target: simplify backend driver registration
[deliverable/linux.git] / drivers / target / target_core_configfs.c
1 /*******************************************************************************
2 * Filename: target_core_configfs.c
3 *
4 * This file contains ConfigFS logic for the Generic Target Engine project.
5 *
6 * (c) Copyright 2008-2013 Datera, Inc.
7 *
8 * Nicholas A. Bellinger <nab@kernel.org>
9 *
10 * based on configfs Copyright (C) 2005 Oracle. All rights reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 ****************************************************************************/
22
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <generated/utsrelease.h>
26 #include <linux/utsname.h>
27 #include <linux/init.h>
28 #include <linux/fs.h>
29 #include <linux/namei.h>
30 #include <linux/slab.h>
31 #include <linux/types.h>
32 #include <linux/delay.h>
33 #include <linux/unistd.h>
34 #include <linux/string.h>
35 #include <linux/parser.h>
36 #include <linux/syscalls.h>
37 #include <linux/configfs.h>
38 #include <linux/spinlock.h>
39
40 #include <target/target_core_base.h>
41 #include <target/target_core_backend.h>
42 #include <target/target_core_fabric.h>
43 #include <target/target_core_fabric_configfs.h>
44 #include <target/target_core_configfs.h>
45 #include <target/configfs_macros.h>
46
47 #include "target_core_internal.h"
48 #include "target_core_alua.h"
49 #include "target_core_pr.h"
50 #include "target_core_rd.h"
51 #include "target_core_xcopy.h"
52
53 #define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
54 static void target_core_setup_##_name##_cit(struct target_backend *tb) \
55 { \
56 struct config_item_type *cit = &tb->tb_##_name##_cit; \
57 \
58 cit->ct_item_ops = _item_ops; \
59 cit->ct_group_ops = _group_ops; \
60 cit->ct_attrs = _attrs; \
61 cit->ct_owner = tb->ops->owner; \
62 pr_debug("Setup generic %s\n", __stringify(_name)); \
63 }
64
65 #define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \
66 static void target_core_setup_##_name##_cit(struct target_backend *tb) \
67 { \
68 struct config_item_type *cit = &tb->tb_##_name##_cit; \
69 \
70 cit->ct_item_ops = _item_ops; \
71 cit->ct_group_ops = _group_ops; \
72 cit->ct_attrs = tb->ops->tb_##_name##_attrs; \
73 cit->ct_owner = tb->ops->owner; \
74 pr_debug("Setup generic %s\n", __stringify(_name)); \
75 }
76
77 extern struct t10_alua_lu_gp *default_lu_gp;
78
79 static LIST_HEAD(g_tf_list);
80 static DEFINE_MUTEX(g_tf_lock);
81
82 struct target_core_configfs_attribute {
83 struct configfs_attribute attr;
84 ssize_t (*show)(void *, char *);
85 ssize_t (*store)(void *, const char *, size_t);
86 };
87
88 static struct config_group target_core_hbagroup;
89 static struct config_group alua_group;
90 static struct config_group alua_lu_gps_group;
91
92 static inline struct se_hba *
93 item_to_hba(struct config_item *item)
94 {
95 return container_of(to_config_group(item), struct se_hba, hba_group);
96 }
97
98 /*
99 * Attributes for /sys/kernel/config/target/
100 */
101 static ssize_t target_core_attr_show(struct config_item *item,
102 struct configfs_attribute *attr,
103 char *page)
104 {
105 return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
106 " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION,
107 utsname()->sysname, utsname()->machine);
108 }
109
110 static struct configfs_item_operations target_core_fabric_item_ops = {
111 .show_attribute = target_core_attr_show,
112 };
113
114 static struct configfs_attribute target_core_item_attr_version = {
115 .ca_owner = THIS_MODULE,
116 .ca_name = "version",
117 .ca_mode = S_IRUGO,
118 };
119
120 static struct target_fabric_configfs *target_core_get_fabric(
121 const char *name)
122 {
123 struct target_fabric_configfs *tf;
124
125 if (!name)
126 return NULL;
127
128 mutex_lock(&g_tf_lock);
129 list_for_each_entry(tf, &g_tf_list, tf_list) {
130 if (!strcmp(tf->tf_ops->name, name)) {
131 atomic_inc(&tf->tf_access_cnt);
132 mutex_unlock(&g_tf_lock);
133 return tf;
134 }
135 }
136 mutex_unlock(&g_tf_lock);
137
138 return NULL;
139 }
140
141 /*
142 * Called from struct target_core_group_ops->make_group()
143 */
144 static struct config_group *target_core_register_fabric(
145 struct config_group *group,
146 const char *name)
147 {
148 struct target_fabric_configfs *tf;
149 int ret;
150
151 pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
152 " %s\n", group, name);
153
154 tf = target_core_get_fabric(name);
155 if (!tf) {
156 pr_debug("target_core_register_fabric() trying autoload for %s\n",
157 name);
158
159 /*
160 * Below are some hardcoded request_module() calls to automatically
161 * local fabric modules when the following is called:
162 *
163 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
164 *
165 * Note that this does not limit which TCM fabric module can be
166 * registered, but simply provids auto loading logic for modules with
167 * mkdir(2) system calls with known TCM fabric modules.
168 */
169
170 if (!strncmp(name, "iscsi", 5)) {
171 /*
172 * Automatically load the LIO Target fabric module when the
173 * following is called:
174 *
175 * mkdir -p $CONFIGFS/target/iscsi
176 */
177 ret = request_module("iscsi_target_mod");
178 if (ret < 0) {
179 pr_debug("request_module() failed for"
180 " iscsi_target_mod.ko: %d\n", ret);
181 return ERR_PTR(-EINVAL);
182 }
183 } else if (!strncmp(name, "loopback", 8)) {
184 /*
185 * Automatically load the tcm_loop fabric module when the
186 * following is called:
187 *
188 * mkdir -p $CONFIGFS/target/loopback
189 */
190 ret = request_module("tcm_loop");
191 if (ret < 0) {
192 pr_debug("request_module() failed for"
193 " tcm_loop.ko: %d\n", ret);
194 return ERR_PTR(-EINVAL);
195 }
196 }
197
198 tf = target_core_get_fabric(name);
199 }
200
201 if (!tf) {
202 pr_debug("target_core_get_fabric() failed for %s\n",
203 name);
204 return ERR_PTR(-EINVAL);
205 }
206 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
207 " %s\n", tf->tf_ops->name);
208 /*
209 * On a successful target_core_get_fabric() look, the returned
210 * struct target_fabric_configfs *tf will contain a usage reference.
211 */
212 pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
213 &tf->tf_wwn_cit);
214
215 tf->tf_group.default_groups = tf->tf_default_groups;
216 tf->tf_group.default_groups[0] = &tf->tf_disc_group;
217 tf->tf_group.default_groups[1] = NULL;
218
219 config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit);
220 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
221 &tf->tf_discovery_cit);
222
223 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
224 " %s\n", tf->tf_group.cg_item.ci_name);
225 return &tf->tf_group;
226 }
227
228 /*
229 * Called from struct target_core_group_ops->drop_item()
230 */
231 static void target_core_deregister_fabric(
232 struct config_group *group,
233 struct config_item *item)
234 {
235 struct target_fabric_configfs *tf = container_of(
236 to_config_group(item), struct target_fabric_configfs, tf_group);
237 struct config_group *tf_group;
238 struct config_item *df_item;
239 int i;
240
241 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
242 " tf list\n", config_item_name(item));
243
244 pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
245 " %s\n", tf->tf_ops->name);
246 atomic_dec(&tf->tf_access_cnt);
247
248 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
249 " %s\n", config_item_name(item));
250
251 tf_group = &tf->tf_group;
252 for (i = 0; tf_group->default_groups[i]; i++) {
253 df_item = &tf_group->default_groups[i]->cg_item;
254 tf_group->default_groups[i] = NULL;
255 config_item_put(df_item);
256 }
257 config_item_put(item);
258 }
259
260 static struct configfs_group_operations target_core_fabric_group_ops = {
261 .make_group = &target_core_register_fabric,
262 .drop_item = &target_core_deregister_fabric,
263 };
264
265 /*
266 * All item attributes appearing in /sys/kernel/target/ appear here.
267 */
268 static struct configfs_attribute *target_core_fabric_item_attrs[] = {
269 &target_core_item_attr_version,
270 NULL,
271 };
272
273 /*
274 * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
275 */
276 static struct config_item_type target_core_fabrics_item = {
277 .ct_item_ops = &target_core_fabric_item_ops,
278 .ct_group_ops = &target_core_fabric_group_ops,
279 .ct_attrs = target_core_fabric_item_attrs,
280 .ct_owner = THIS_MODULE,
281 };
282
283 static struct configfs_subsystem target_core_fabrics = {
284 .su_group = {
285 .cg_item = {
286 .ci_namebuf = "target",
287 .ci_type = &target_core_fabrics_item,
288 },
289 },
290 };
291
292 int target_depend_item(struct config_item *item)
293 {
294 return configfs_depend_item(&target_core_fabrics, item);
295 }
296 EXPORT_SYMBOL(target_depend_item);
297
298 void target_undepend_item(struct config_item *item)
299 {
300 return configfs_undepend_item(&target_core_fabrics, item);
301 }
302 EXPORT_SYMBOL(target_undepend_item);
303
304 /*##############################################################################
305 // Start functions called by external Target Fabrics Modules
306 //############################################################################*/
307
308 static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
309 {
310 if (!tfo->name) {
311 pr_err("Missing tfo->name\n");
312 return -EINVAL;
313 }
314 if (strlen(tfo->name) >= TARGET_FABRIC_NAME_SIZE) {
315 pr_err("Passed name: %s exceeds TARGET_FABRIC"
316 "_NAME_SIZE\n", tfo->name);
317 return -EINVAL;
318 }
319 if (!tfo->get_fabric_name) {
320 pr_err("Missing tfo->get_fabric_name()\n");
321 return -EINVAL;
322 }
323 if (!tfo->tpg_get_wwn) {
324 pr_err("Missing tfo->tpg_get_wwn()\n");
325 return -EINVAL;
326 }
327 if (!tfo->tpg_get_tag) {
328 pr_err("Missing tfo->tpg_get_tag()\n");
329 return -EINVAL;
330 }
331 if (!tfo->tpg_check_demo_mode) {
332 pr_err("Missing tfo->tpg_check_demo_mode()\n");
333 return -EINVAL;
334 }
335 if (!tfo->tpg_check_demo_mode_cache) {
336 pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
337 return -EINVAL;
338 }
339 if (!tfo->tpg_check_demo_mode_write_protect) {
340 pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
341 return -EINVAL;
342 }
343 if (!tfo->tpg_check_prod_mode_write_protect) {
344 pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
345 return -EINVAL;
346 }
347 if (!tfo->tpg_get_inst_index) {
348 pr_err("Missing tfo->tpg_get_inst_index()\n");
349 return -EINVAL;
350 }
351 if (!tfo->release_cmd) {
352 pr_err("Missing tfo->release_cmd()\n");
353 return -EINVAL;
354 }
355 if (!tfo->shutdown_session) {
356 pr_err("Missing tfo->shutdown_session()\n");
357 return -EINVAL;
358 }
359 if (!tfo->close_session) {
360 pr_err("Missing tfo->close_session()\n");
361 return -EINVAL;
362 }
363 if (!tfo->sess_get_index) {
364 pr_err("Missing tfo->sess_get_index()\n");
365 return -EINVAL;
366 }
367 if (!tfo->write_pending) {
368 pr_err("Missing tfo->write_pending()\n");
369 return -EINVAL;
370 }
371 if (!tfo->write_pending_status) {
372 pr_err("Missing tfo->write_pending_status()\n");
373 return -EINVAL;
374 }
375 if (!tfo->set_default_node_attributes) {
376 pr_err("Missing tfo->set_default_node_attributes()\n");
377 return -EINVAL;
378 }
379 if (!tfo->get_cmd_state) {
380 pr_err("Missing tfo->get_cmd_state()\n");
381 return -EINVAL;
382 }
383 if (!tfo->queue_data_in) {
384 pr_err("Missing tfo->queue_data_in()\n");
385 return -EINVAL;
386 }
387 if (!tfo->queue_status) {
388 pr_err("Missing tfo->queue_status()\n");
389 return -EINVAL;
390 }
391 if (!tfo->queue_tm_rsp) {
392 pr_err("Missing tfo->queue_tm_rsp()\n");
393 return -EINVAL;
394 }
395 if (!tfo->aborted_task) {
396 pr_err("Missing tfo->aborted_task()\n");
397 return -EINVAL;
398 }
399 /*
400 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
401 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
402 * target_core_fabric_configfs.c WWN+TPG group context code.
403 */
404 if (!tfo->fabric_make_wwn) {
405 pr_err("Missing tfo->fabric_make_wwn()\n");
406 return -EINVAL;
407 }
408 if (!tfo->fabric_drop_wwn) {
409 pr_err("Missing tfo->fabric_drop_wwn()\n");
410 return -EINVAL;
411 }
412 if (!tfo->fabric_make_tpg) {
413 pr_err("Missing tfo->fabric_make_tpg()\n");
414 return -EINVAL;
415 }
416 if (!tfo->fabric_drop_tpg) {
417 pr_err("Missing tfo->fabric_drop_tpg()\n");
418 return -EINVAL;
419 }
420
421 return 0;
422 }
423
424 int target_register_template(const struct target_core_fabric_ops *fo)
425 {
426 struct target_fabric_configfs *tf;
427 int ret;
428
429 ret = target_fabric_tf_ops_check(fo);
430 if (ret)
431 return ret;
432
433 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
434 if (!tf) {
435 pr_err("%s: could not allocate memory!\n", __func__);
436 return -ENOMEM;
437 }
438
439 INIT_LIST_HEAD(&tf->tf_list);
440 atomic_set(&tf->tf_access_cnt, 0);
441 tf->tf_ops = fo;
442 target_fabric_setup_cits(tf);
443
444 mutex_lock(&g_tf_lock);
445 list_add_tail(&tf->tf_list, &g_tf_list);
446 mutex_unlock(&g_tf_lock);
447
448 return 0;
449 }
450 EXPORT_SYMBOL(target_register_template);
451
452 void target_unregister_template(const struct target_core_fabric_ops *fo)
453 {
454 struct target_fabric_configfs *t;
455
456 mutex_lock(&g_tf_lock);
457 list_for_each_entry(t, &g_tf_list, tf_list) {
458 if (!strcmp(t->tf_ops->name, fo->name)) {
459 BUG_ON(atomic_read(&t->tf_access_cnt));
460 list_del(&t->tf_list);
461 kfree(t);
462 break;
463 }
464 }
465 mutex_unlock(&g_tf_lock);
466 }
467 EXPORT_SYMBOL(target_unregister_template);
468
469 /*##############################################################################
470 // Stop functions called by external Target Fabrics Modules
471 //############################################################################*/
472
473 /* Start functions for struct config_item_type tb_dev_attrib_cit */
474
475 CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
476 CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
477
478 static struct configfs_item_operations target_core_dev_attrib_ops = {
479 .show_attribute = target_core_dev_attrib_attr_show,
480 .store_attribute = target_core_dev_attrib_attr_store,
481 };
482
483 TB_CIT_SETUP_DRV(dev_attrib, &target_core_dev_attrib_ops, NULL);
484
485 /* End functions for struct config_item_type tb_dev_attrib_cit */
486
487 /* Start functions for struct config_item_type tb_dev_wwn_cit */
488
489 CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
490 #define SE_DEV_WWN_ATTR(_name, _mode) \
491 static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \
492 __CONFIGFS_EATTR(_name, _mode, \
493 target_core_dev_wwn_show_attr_##_name, \
494 target_core_dev_wwn_store_attr_##_name);
495
496 #define SE_DEV_WWN_ATTR_RO(_name); \
497 do { \
498 static struct target_core_dev_wwn_attribute \
499 target_core_dev_wwn_##_name = \
500 __CONFIGFS_EATTR_RO(_name, \
501 target_core_dev_wwn_show_attr_##_name); \
502 } while (0);
503
504 /*
505 * VPD page 0x80 Unit serial
506 */
507 static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
508 struct t10_wwn *t10_wwn,
509 char *page)
510 {
511 return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
512 &t10_wwn->unit_serial[0]);
513 }
514
515 static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
516 struct t10_wwn *t10_wwn,
517 const char *page,
518 size_t count)
519 {
520 struct se_device *dev = t10_wwn->t10_dev;
521 unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
522
523 /*
524 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
525 * from the struct scsi_device level firmware, do not allow
526 * VPD Unit Serial to be emulated.
527 *
528 * Note this struct scsi_device could also be emulating VPD
529 * information from its drivers/scsi LLD. But for now we assume
530 * it is doing 'the right thing' wrt a world wide unique
531 * VPD Unit Serial Number that OS dependent multipath can depend on.
532 */
533 if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
534 pr_err("Underlying SCSI device firmware provided VPD"
535 " Unit Serial, ignoring request\n");
536 return -EOPNOTSUPP;
537 }
538
539 if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
540 pr_err("Emulated VPD Unit Serial exceeds"
541 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
542 return -EOVERFLOW;
543 }
544 /*
545 * Check to see if any active $FABRIC_MOD exports exist. If they
546 * do exist, fail here as changing this information on the fly
547 * (underneath the initiator side OS dependent multipath code)
548 * could cause negative effects.
549 */
550 if (dev->export_count) {
551 pr_err("Unable to set VPD Unit Serial while"
552 " active %d $FABRIC_MOD exports exist\n",
553 dev->export_count);
554 return -EINVAL;
555 }
556
557 /*
558 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
559 *
560 * Also, strip any newline added from the userspace
561 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
562 */
563 memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
564 snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
565 snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
566 "%s", strstrip(buf));
567 dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
568
569 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
570 " %s\n", dev->t10_wwn.unit_serial);
571
572 return count;
573 }
574
575 SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR);
576
577 /*
578 * VPD page 0x83 Protocol Identifier
579 */
580 static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
581 struct t10_wwn *t10_wwn,
582 char *page)
583 {
584 struct t10_vpd *vpd;
585 unsigned char buf[VPD_TMP_BUF_SIZE];
586 ssize_t len = 0;
587
588 memset(buf, 0, VPD_TMP_BUF_SIZE);
589
590 spin_lock(&t10_wwn->t10_vpd_lock);
591 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
592 if (!vpd->protocol_identifier_set)
593 continue;
594
595 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
596
597 if (len + strlen(buf) >= PAGE_SIZE)
598 break;
599
600 len += sprintf(page+len, "%s", buf);
601 }
602 spin_unlock(&t10_wwn->t10_vpd_lock);
603
604 return len;
605 }
606
607 static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier(
608 struct t10_wwn *t10_wwn,
609 const char *page,
610 size_t count)
611 {
612 return -ENOSYS;
613 }
614
615 SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR);
616
617 /*
618 * Generic wrapper for dumping VPD identifiers by association.
619 */
620 #define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \
621 static ssize_t target_core_dev_wwn_show_attr_##_name( \
622 struct t10_wwn *t10_wwn, \
623 char *page) \
624 { \
625 struct t10_vpd *vpd; \
626 unsigned char buf[VPD_TMP_BUF_SIZE]; \
627 ssize_t len = 0; \
628 \
629 spin_lock(&t10_wwn->t10_vpd_lock); \
630 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
631 if (vpd->association != _assoc) \
632 continue; \
633 \
634 memset(buf, 0, VPD_TMP_BUF_SIZE); \
635 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
636 if (len + strlen(buf) >= PAGE_SIZE) \
637 break; \
638 len += sprintf(page+len, "%s", buf); \
639 \
640 memset(buf, 0, VPD_TMP_BUF_SIZE); \
641 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
642 if (len + strlen(buf) >= PAGE_SIZE) \
643 break; \
644 len += sprintf(page+len, "%s", buf); \
645 \
646 memset(buf, 0, VPD_TMP_BUF_SIZE); \
647 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
648 if (len + strlen(buf) >= PAGE_SIZE) \
649 break; \
650 len += sprintf(page+len, "%s", buf); \
651 } \
652 spin_unlock(&t10_wwn->t10_vpd_lock); \
653 \
654 return len; \
655 }
656
657 /*
658 * VPD page 0x83 Association: Logical Unit
659 */
660 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
661
662 static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit(
663 struct t10_wwn *t10_wwn,
664 const char *page,
665 size_t count)
666 {
667 return -ENOSYS;
668 }
669
670 SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR);
671
672 /*
673 * VPD page 0x83 Association: Target Port
674 */
675 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
676
677 static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port(
678 struct t10_wwn *t10_wwn,
679 const char *page,
680 size_t count)
681 {
682 return -ENOSYS;
683 }
684
685 SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR);
686
687 /*
688 * VPD page 0x83 Association: SCSI Target Device
689 */
690 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
691
692 static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device(
693 struct t10_wwn *t10_wwn,
694 const char *page,
695 size_t count)
696 {
697 return -ENOSYS;
698 }
699
700 SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR);
701
702 CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group);
703
704 static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
705 &target_core_dev_wwn_vpd_unit_serial.attr,
706 &target_core_dev_wwn_vpd_protocol_identifier.attr,
707 &target_core_dev_wwn_vpd_assoc_logical_unit.attr,
708 &target_core_dev_wwn_vpd_assoc_target_port.attr,
709 &target_core_dev_wwn_vpd_assoc_scsi_target_device.attr,
710 NULL,
711 };
712
713 static struct configfs_item_operations target_core_dev_wwn_ops = {
714 .show_attribute = target_core_dev_wwn_attr_show,
715 .store_attribute = target_core_dev_wwn_attr_store,
716 };
717
718 TB_CIT_SETUP(dev_wwn, &target_core_dev_wwn_ops, NULL, target_core_dev_wwn_attrs);
719
720 /* End functions for struct config_item_type tb_dev_wwn_cit */
721
722 /* Start functions for struct config_item_type tb_dev_pr_cit */
723
724 CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device);
725 #define SE_DEV_PR_ATTR(_name, _mode) \
726 static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
727 __CONFIGFS_EATTR(_name, _mode, \
728 target_core_dev_pr_show_attr_##_name, \
729 target_core_dev_pr_store_attr_##_name);
730
731 #define SE_DEV_PR_ATTR_RO(_name); \
732 static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
733 __CONFIGFS_EATTR_RO(_name, \
734 target_core_dev_pr_show_attr_##_name);
735
736 static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
737 char *page)
738 {
739 struct se_node_acl *se_nacl;
740 struct t10_pr_registration *pr_reg;
741 char i_buf[PR_REG_ISID_ID_LEN];
742
743 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
744
745 pr_reg = dev->dev_pr_res_holder;
746 if (!pr_reg)
747 return sprintf(page, "No SPC-3 Reservation holder\n");
748
749 se_nacl = pr_reg->pr_reg_nacl;
750 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
751
752 return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
753 se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
754 se_nacl->initiatorname, i_buf);
755 }
756
757 static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
758 char *page)
759 {
760 struct se_node_acl *se_nacl;
761 ssize_t len;
762
763 se_nacl = dev->dev_reserved_node_acl;
764 if (se_nacl) {
765 len = sprintf(page,
766 "SPC-2 Reservation: %s Initiator: %s\n",
767 se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
768 se_nacl->initiatorname);
769 } else {
770 len = sprintf(page, "No SPC-2 Reservation holder\n");
771 }
772 return len;
773 }
774
775 static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev,
776 char *page)
777 {
778 int ret;
779
780 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
781 return sprintf(page, "Passthrough\n");
782
783 spin_lock(&dev->dev_reservation_lock);
784 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
785 ret = target_core_dev_pr_show_spc2_res(dev, page);
786 else
787 ret = target_core_dev_pr_show_spc3_res(dev, page);
788 spin_unlock(&dev->dev_reservation_lock);
789 return ret;
790 }
791
792 SE_DEV_PR_ATTR_RO(res_holder);
793
794 static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
795 struct se_device *dev, char *page)
796 {
797 ssize_t len = 0;
798
799 spin_lock(&dev->dev_reservation_lock);
800 if (!dev->dev_pr_res_holder) {
801 len = sprintf(page, "No SPC-3 Reservation holder\n");
802 } else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) {
803 len = sprintf(page, "SPC-3 Reservation: All Target"
804 " Ports registration\n");
805 } else {
806 len = sprintf(page, "SPC-3 Reservation: Single"
807 " Target Port registration\n");
808 }
809
810 spin_unlock(&dev->dev_reservation_lock);
811 return len;
812 }
813
814 SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
815
816 static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
817 struct se_device *dev, char *page)
818 {
819 return sprintf(page, "0x%08x\n", dev->t10_pr.pr_generation);
820 }
821
822 SE_DEV_PR_ATTR_RO(res_pr_generation);
823
824 /*
825 * res_pr_holder_tg_port
826 */
827 static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
828 struct se_device *dev, char *page)
829 {
830 struct se_node_acl *se_nacl;
831 struct se_portal_group *se_tpg;
832 struct t10_pr_registration *pr_reg;
833 const struct target_core_fabric_ops *tfo;
834 ssize_t len = 0;
835
836 spin_lock(&dev->dev_reservation_lock);
837 pr_reg = dev->dev_pr_res_holder;
838 if (!pr_reg) {
839 len = sprintf(page, "No SPC-3 Reservation holder\n");
840 goto out_unlock;
841 }
842
843 se_nacl = pr_reg->pr_reg_nacl;
844 se_tpg = se_nacl->se_tpg;
845 tfo = se_tpg->se_tpg_tfo;
846
847 len += sprintf(page+len, "SPC-3 Reservation: %s"
848 " Target Node Endpoint: %s\n", tfo->get_fabric_name(),
849 tfo->tpg_get_wwn(se_tpg));
850 len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
851 " Identifier Tag: %hu %s Portal Group Tag: %hu"
852 " %s Logical Unit: %u\n", pr_reg->tg_pt_sep_rtpi,
853 tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
854 tfo->get_fabric_name(), pr_reg->pr_aptpl_target_lun);
855
856 out_unlock:
857 spin_unlock(&dev->dev_reservation_lock);
858 return len;
859 }
860
861 SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
862
863 static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
864 struct se_device *dev, char *page)
865 {
866 const struct target_core_fabric_ops *tfo;
867 struct t10_pr_registration *pr_reg;
868 unsigned char buf[384];
869 char i_buf[PR_REG_ISID_ID_LEN];
870 ssize_t len = 0;
871 int reg_count = 0;
872
873 len += sprintf(page+len, "SPC-3 PR Registrations:\n");
874
875 spin_lock(&dev->t10_pr.registration_lock);
876 list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
877 pr_reg_list) {
878
879 memset(buf, 0, 384);
880 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
881 tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
882 core_pr_dump_initiator_port(pr_reg, i_buf,
883 PR_REG_ISID_ID_LEN);
884 sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
885 tfo->get_fabric_name(),
886 pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
887 pr_reg->pr_res_generation);
888
889 if (len + strlen(buf) >= PAGE_SIZE)
890 break;
891
892 len += sprintf(page+len, "%s", buf);
893 reg_count++;
894 }
895 spin_unlock(&dev->t10_pr.registration_lock);
896
897 if (!reg_count)
898 len += sprintf(page+len, "None\n");
899
900 return len;
901 }
902
903 SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
904
905 static ssize_t target_core_dev_pr_show_attr_res_pr_type(
906 struct se_device *dev, char *page)
907 {
908 struct t10_pr_registration *pr_reg;
909 ssize_t len = 0;
910
911 spin_lock(&dev->dev_reservation_lock);
912 pr_reg = dev->dev_pr_res_holder;
913 if (pr_reg) {
914 len = sprintf(page, "SPC-3 Reservation Type: %s\n",
915 core_scsi3_pr_dump_type(pr_reg->pr_res_type));
916 } else {
917 len = sprintf(page, "No SPC-3 Reservation holder\n");
918 }
919
920 spin_unlock(&dev->dev_reservation_lock);
921 return len;
922 }
923
924 SE_DEV_PR_ATTR_RO(res_pr_type);
925
926 static ssize_t target_core_dev_pr_show_attr_res_type(
927 struct se_device *dev, char *page)
928 {
929 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
930 return sprintf(page, "SPC_PASSTHROUGH\n");
931 else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
932 return sprintf(page, "SPC2_RESERVATIONS\n");
933 else
934 return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
935 }
936
937 SE_DEV_PR_ATTR_RO(res_type);
938
939 static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
940 struct se_device *dev, char *page)
941 {
942 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
943 return 0;
944
945 return sprintf(page, "APTPL Bit Status: %s\n",
946 (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
947 }
948
949 SE_DEV_PR_ATTR_RO(res_aptpl_active);
950
951 /*
952 * res_aptpl_metadata
953 */
954 static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
955 struct se_device *dev, char *page)
956 {
957 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
958 return 0;
959
960 return sprintf(page, "Ready to process PR APTPL metadata..\n");
961 }
962
963 enum {
964 Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
965 Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
966 Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
967 Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
968 };
969
970 static match_table_t tokens = {
971 {Opt_initiator_fabric, "initiator_fabric=%s"},
972 {Opt_initiator_node, "initiator_node=%s"},
973 {Opt_initiator_sid, "initiator_sid=%s"},
974 {Opt_sa_res_key, "sa_res_key=%s"},
975 {Opt_res_holder, "res_holder=%d"},
976 {Opt_res_type, "res_type=%d"},
977 {Opt_res_scope, "res_scope=%d"},
978 {Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
979 {Opt_mapped_lun, "mapped_lun=%d"},
980 {Opt_target_fabric, "target_fabric=%s"},
981 {Opt_target_node, "target_node=%s"},
982 {Opt_tpgt, "tpgt=%d"},
983 {Opt_port_rtpi, "port_rtpi=%d"},
984 {Opt_target_lun, "target_lun=%d"},
985 {Opt_err, NULL}
986 };
987
988 static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
989 struct se_device *dev,
990 const char *page,
991 size_t count)
992 {
993 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
994 unsigned char *t_fabric = NULL, *t_port = NULL;
995 char *orig, *ptr, *opts;
996 substring_t args[MAX_OPT_ARGS];
997 unsigned long long tmp_ll;
998 u64 sa_res_key = 0;
999 u32 mapped_lun = 0, target_lun = 0;
1000 int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
1001 u16 tpgt = 0;
1002 u8 type = 0;
1003
1004 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1005 return 0;
1006 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1007 return 0;
1008
1009 if (dev->export_count) {
1010 pr_debug("Unable to process APTPL metadata while"
1011 " active fabric exports exist\n");
1012 return -EINVAL;
1013 }
1014
1015 opts = kstrdup(page, GFP_KERNEL);
1016 if (!opts)
1017 return -ENOMEM;
1018
1019 orig = opts;
1020 while ((ptr = strsep(&opts, ",\n")) != NULL) {
1021 if (!*ptr)
1022 continue;
1023
1024 token = match_token(ptr, tokens, args);
1025 switch (token) {
1026 case Opt_initiator_fabric:
1027 i_fabric = match_strdup(args);
1028 if (!i_fabric) {
1029 ret = -ENOMEM;
1030 goto out;
1031 }
1032 break;
1033 case Opt_initiator_node:
1034 i_port = match_strdup(args);
1035 if (!i_port) {
1036 ret = -ENOMEM;
1037 goto out;
1038 }
1039 if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
1040 pr_err("APTPL metadata initiator_node="
1041 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
1042 PR_APTPL_MAX_IPORT_LEN);
1043 ret = -EINVAL;
1044 break;
1045 }
1046 break;
1047 case Opt_initiator_sid:
1048 isid = match_strdup(args);
1049 if (!isid) {
1050 ret = -ENOMEM;
1051 goto out;
1052 }
1053 if (strlen(isid) >= PR_REG_ISID_LEN) {
1054 pr_err("APTPL metadata initiator_isid"
1055 "= exceeds PR_REG_ISID_LEN: %d\n",
1056 PR_REG_ISID_LEN);
1057 ret = -EINVAL;
1058 break;
1059 }
1060 break;
1061 case Opt_sa_res_key:
1062 ret = kstrtoull(args->from, 0, &tmp_ll);
1063 if (ret < 0) {
1064 pr_err("kstrtoull() failed for sa_res_key=\n");
1065 goto out;
1066 }
1067 sa_res_key = (u64)tmp_ll;
1068 break;
1069 /*
1070 * PR APTPL Metadata for Reservation
1071 */
1072 case Opt_res_holder:
1073 match_int(args, &arg);
1074 res_holder = arg;
1075 break;
1076 case Opt_res_type:
1077 match_int(args, &arg);
1078 type = (u8)arg;
1079 break;
1080 case Opt_res_scope:
1081 match_int(args, &arg);
1082 break;
1083 case Opt_res_all_tg_pt:
1084 match_int(args, &arg);
1085 all_tg_pt = (int)arg;
1086 break;
1087 case Opt_mapped_lun:
1088 match_int(args, &arg);
1089 mapped_lun = (u32)arg;
1090 break;
1091 /*
1092 * PR APTPL Metadata for Target Port
1093 */
1094 case Opt_target_fabric:
1095 t_fabric = match_strdup(args);
1096 if (!t_fabric) {
1097 ret = -ENOMEM;
1098 goto out;
1099 }
1100 break;
1101 case Opt_target_node:
1102 t_port = match_strdup(args);
1103 if (!t_port) {
1104 ret = -ENOMEM;
1105 goto out;
1106 }
1107 if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
1108 pr_err("APTPL metadata target_node="
1109 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
1110 PR_APTPL_MAX_TPORT_LEN);
1111 ret = -EINVAL;
1112 break;
1113 }
1114 break;
1115 case Opt_tpgt:
1116 match_int(args, &arg);
1117 tpgt = (u16)arg;
1118 break;
1119 case Opt_port_rtpi:
1120 match_int(args, &arg);
1121 break;
1122 case Opt_target_lun:
1123 match_int(args, &arg);
1124 target_lun = (u32)arg;
1125 break;
1126 default:
1127 break;
1128 }
1129 }
1130
1131 if (!i_port || !t_port || !sa_res_key) {
1132 pr_err("Illegal parameters for APTPL registration\n");
1133 ret = -EINVAL;
1134 goto out;
1135 }
1136
1137 if (res_holder && !(type)) {
1138 pr_err("Illegal PR type: 0x%02x for reservation"
1139 " holder\n", type);
1140 ret = -EINVAL;
1141 goto out;
1142 }
1143
1144 ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
1145 i_port, isid, mapped_lun, t_port, tpgt, target_lun,
1146 res_holder, all_tg_pt, type);
1147 out:
1148 kfree(i_fabric);
1149 kfree(i_port);
1150 kfree(isid);
1151 kfree(t_fabric);
1152 kfree(t_port);
1153 kfree(orig);
1154 return (ret == 0) ? count : ret;
1155 }
1156
1157 SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
1158
1159 CONFIGFS_EATTR_OPS(target_core_dev_pr, se_device, dev_pr_group);
1160
1161 static struct configfs_attribute *target_core_dev_pr_attrs[] = {
1162 &target_core_dev_pr_res_holder.attr,
1163 &target_core_dev_pr_res_pr_all_tgt_pts.attr,
1164 &target_core_dev_pr_res_pr_generation.attr,
1165 &target_core_dev_pr_res_pr_holder_tg_port.attr,
1166 &target_core_dev_pr_res_pr_registered_i_pts.attr,
1167 &target_core_dev_pr_res_pr_type.attr,
1168 &target_core_dev_pr_res_type.attr,
1169 &target_core_dev_pr_res_aptpl_active.attr,
1170 &target_core_dev_pr_res_aptpl_metadata.attr,
1171 NULL,
1172 };
1173
1174 static struct configfs_item_operations target_core_dev_pr_ops = {
1175 .show_attribute = target_core_dev_pr_attr_show,
1176 .store_attribute = target_core_dev_pr_attr_store,
1177 };
1178
1179 TB_CIT_SETUP(dev_pr, &target_core_dev_pr_ops, NULL, target_core_dev_pr_attrs);
1180
1181 /* End functions for struct config_item_type tb_dev_pr_cit */
1182
1183 /* Start functions for struct config_item_type tb_dev_cit */
1184
1185 static ssize_t target_core_show_dev_info(void *p, char *page)
1186 {
1187 struct se_device *dev = p;
1188 int bl = 0;
1189 ssize_t read_bytes = 0;
1190
1191 transport_dump_dev_state(dev, page, &bl);
1192 read_bytes += bl;
1193 read_bytes += dev->transport->show_configfs_dev_params(dev,
1194 page+read_bytes);
1195 return read_bytes;
1196 }
1197
1198 static struct target_core_configfs_attribute target_core_attr_dev_info = {
1199 .attr = { .ca_owner = THIS_MODULE,
1200 .ca_name = "info",
1201 .ca_mode = S_IRUGO },
1202 .show = target_core_show_dev_info,
1203 .store = NULL,
1204 };
1205
1206 static ssize_t target_core_store_dev_control(
1207 void *p,
1208 const char *page,
1209 size_t count)
1210 {
1211 struct se_device *dev = p;
1212
1213 return dev->transport->set_configfs_dev_params(dev, page, count);
1214 }
1215
1216 static struct target_core_configfs_attribute target_core_attr_dev_control = {
1217 .attr = { .ca_owner = THIS_MODULE,
1218 .ca_name = "control",
1219 .ca_mode = S_IWUSR },
1220 .show = NULL,
1221 .store = target_core_store_dev_control,
1222 };
1223
1224 static ssize_t target_core_show_dev_alias(void *p, char *page)
1225 {
1226 struct se_device *dev = p;
1227
1228 if (!(dev->dev_flags & DF_USING_ALIAS))
1229 return 0;
1230
1231 return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
1232 }
1233
1234 static ssize_t target_core_store_dev_alias(
1235 void *p,
1236 const char *page,
1237 size_t count)
1238 {
1239 struct se_device *dev = p;
1240 struct se_hba *hba = dev->se_hba;
1241 ssize_t read_bytes;
1242
1243 if (count > (SE_DEV_ALIAS_LEN-1)) {
1244 pr_err("alias count: %d exceeds"
1245 " SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
1246 SE_DEV_ALIAS_LEN-1);
1247 return -EINVAL;
1248 }
1249
1250 read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
1251 if (!read_bytes)
1252 return -EINVAL;
1253 if (dev->dev_alias[read_bytes - 1] == '\n')
1254 dev->dev_alias[read_bytes - 1] = '\0';
1255
1256 dev->dev_flags |= DF_USING_ALIAS;
1257
1258 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
1259 config_item_name(&hba->hba_group.cg_item),
1260 config_item_name(&dev->dev_group.cg_item),
1261 dev->dev_alias);
1262
1263 return read_bytes;
1264 }
1265
1266 static struct target_core_configfs_attribute target_core_attr_dev_alias = {
1267 .attr = { .ca_owner = THIS_MODULE,
1268 .ca_name = "alias",
1269 .ca_mode = S_IRUGO | S_IWUSR },
1270 .show = target_core_show_dev_alias,
1271 .store = target_core_store_dev_alias,
1272 };
1273
1274 static ssize_t target_core_show_dev_udev_path(void *p, char *page)
1275 {
1276 struct se_device *dev = p;
1277
1278 if (!(dev->dev_flags & DF_USING_UDEV_PATH))
1279 return 0;
1280
1281 return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
1282 }
1283
1284 static ssize_t target_core_store_dev_udev_path(
1285 void *p,
1286 const char *page,
1287 size_t count)
1288 {
1289 struct se_device *dev = p;
1290 struct se_hba *hba = dev->se_hba;
1291 ssize_t read_bytes;
1292
1293 if (count > (SE_UDEV_PATH_LEN-1)) {
1294 pr_err("udev_path count: %d exceeds"
1295 " SE_UDEV_PATH_LEN-1: %u\n", (int)count,
1296 SE_UDEV_PATH_LEN-1);
1297 return -EINVAL;
1298 }
1299
1300 read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
1301 "%s", page);
1302 if (!read_bytes)
1303 return -EINVAL;
1304 if (dev->udev_path[read_bytes - 1] == '\n')
1305 dev->udev_path[read_bytes - 1] = '\0';
1306
1307 dev->dev_flags |= DF_USING_UDEV_PATH;
1308
1309 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
1310 config_item_name(&hba->hba_group.cg_item),
1311 config_item_name(&dev->dev_group.cg_item),
1312 dev->udev_path);
1313
1314 return read_bytes;
1315 }
1316
1317 static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
1318 .attr = { .ca_owner = THIS_MODULE,
1319 .ca_name = "udev_path",
1320 .ca_mode = S_IRUGO | S_IWUSR },
1321 .show = target_core_show_dev_udev_path,
1322 .store = target_core_store_dev_udev_path,
1323 };
1324
1325 static ssize_t target_core_show_dev_enable(void *p, char *page)
1326 {
1327 struct se_device *dev = p;
1328
1329 return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED));
1330 }
1331
1332 static ssize_t target_core_store_dev_enable(
1333 void *p,
1334 const char *page,
1335 size_t count)
1336 {
1337 struct se_device *dev = p;
1338 char *ptr;
1339 int ret;
1340
1341 ptr = strstr(page, "1");
1342 if (!ptr) {
1343 pr_err("For dev_enable ops, only valid value"
1344 " is \"1\"\n");
1345 return -EINVAL;
1346 }
1347
1348 ret = target_configure_device(dev);
1349 if (ret)
1350 return ret;
1351 return count;
1352 }
1353
1354 static struct target_core_configfs_attribute target_core_attr_dev_enable = {
1355 .attr = { .ca_owner = THIS_MODULE,
1356 .ca_name = "enable",
1357 .ca_mode = S_IRUGO | S_IWUSR },
1358 .show = target_core_show_dev_enable,
1359 .store = target_core_store_dev_enable,
1360 };
1361
1362 static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
1363 {
1364 struct se_device *dev = p;
1365 struct config_item *lu_ci;
1366 struct t10_alua_lu_gp *lu_gp;
1367 struct t10_alua_lu_gp_member *lu_gp_mem;
1368 ssize_t len = 0;
1369
1370 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1371 if (!lu_gp_mem)
1372 return 0;
1373
1374 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1375 lu_gp = lu_gp_mem->lu_gp;
1376 if (lu_gp) {
1377 lu_ci = &lu_gp->lu_gp_group.cg_item;
1378 len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
1379 config_item_name(lu_ci), lu_gp->lu_gp_id);
1380 }
1381 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1382
1383 return len;
1384 }
1385
1386 static ssize_t target_core_store_alua_lu_gp(
1387 void *p,
1388 const char *page,
1389 size_t count)
1390 {
1391 struct se_device *dev = p;
1392 struct se_hba *hba = dev->se_hba;
1393 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
1394 struct t10_alua_lu_gp_member *lu_gp_mem;
1395 unsigned char buf[LU_GROUP_NAME_BUF];
1396 int move = 0;
1397
1398 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1399 if (!lu_gp_mem)
1400 return 0;
1401
1402 if (count > LU_GROUP_NAME_BUF) {
1403 pr_err("ALUA LU Group Alias too large!\n");
1404 return -EINVAL;
1405 }
1406 memset(buf, 0, LU_GROUP_NAME_BUF);
1407 memcpy(buf, page, count);
1408 /*
1409 * Any ALUA logical unit alias besides "NULL" means we will be
1410 * making a new group association.
1411 */
1412 if (strcmp(strstrip(buf), "NULL")) {
1413 /*
1414 * core_alua_get_lu_gp_by_name() will increment reference to
1415 * struct t10_alua_lu_gp. This reference is released with
1416 * core_alua_get_lu_gp_by_name below().
1417 */
1418 lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
1419 if (!lu_gp_new)
1420 return -ENODEV;
1421 }
1422
1423 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1424 lu_gp = lu_gp_mem->lu_gp;
1425 if (lu_gp) {
1426 /*
1427 * Clearing an existing lu_gp association, and replacing
1428 * with NULL
1429 */
1430 if (!lu_gp_new) {
1431 pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
1432 " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
1433 " %hu\n",
1434 config_item_name(&hba->hba_group.cg_item),
1435 config_item_name(&dev->dev_group.cg_item),
1436 config_item_name(&lu_gp->lu_gp_group.cg_item),
1437 lu_gp->lu_gp_id);
1438
1439 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
1440 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1441
1442 return count;
1443 }
1444 /*
1445 * Removing existing association of lu_gp_mem with lu_gp
1446 */
1447 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
1448 move = 1;
1449 }
1450 /*
1451 * Associate lu_gp_mem with lu_gp_new.
1452 */
1453 __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
1454 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1455
1456 pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
1457 " core/alua/lu_gps/%s, ID: %hu\n",
1458 (move) ? "Moving" : "Adding",
1459 config_item_name(&hba->hba_group.cg_item),
1460 config_item_name(&dev->dev_group.cg_item),
1461 config_item_name(&lu_gp_new->lu_gp_group.cg_item),
1462 lu_gp_new->lu_gp_id);
1463
1464 core_alua_put_lu_gp_from_name(lu_gp_new);
1465 return count;
1466 }
1467
1468 static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
1469 .attr = { .ca_owner = THIS_MODULE,
1470 .ca_name = "alua_lu_gp",
1471 .ca_mode = S_IRUGO | S_IWUSR },
1472 .show = target_core_show_alua_lu_gp,
1473 .store = target_core_store_alua_lu_gp,
1474 };
1475
1476 static ssize_t target_core_show_dev_lba_map(void *p, char *page)
1477 {
1478 struct se_device *dev = p;
1479 struct t10_alua_lba_map *map;
1480 struct t10_alua_lba_map_member *mem;
1481 char *b = page;
1482 int bl = 0;
1483 char state;
1484
1485 spin_lock(&dev->t10_alua.lba_map_lock);
1486 if (!list_empty(&dev->t10_alua.lba_map_list))
1487 bl += sprintf(b + bl, "%u %u\n",
1488 dev->t10_alua.lba_map_segment_size,
1489 dev->t10_alua.lba_map_segment_multiplier);
1490 list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
1491 bl += sprintf(b + bl, "%llu %llu",
1492 map->lba_map_first_lba, map->lba_map_last_lba);
1493 list_for_each_entry(mem, &map->lba_map_mem_list,
1494 lba_map_mem_list) {
1495 switch (mem->lba_map_mem_alua_state) {
1496 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
1497 state = 'O';
1498 break;
1499 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
1500 state = 'A';
1501 break;
1502 case ALUA_ACCESS_STATE_STANDBY:
1503 state = 'S';
1504 break;
1505 case ALUA_ACCESS_STATE_UNAVAILABLE:
1506 state = 'U';
1507 break;
1508 default:
1509 state = '.';
1510 break;
1511 }
1512 bl += sprintf(b + bl, " %d:%c",
1513 mem->lba_map_mem_alua_pg_id, state);
1514 }
1515 bl += sprintf(b + bl, "\n");
1516 }
1517 spin_unlock(&dev->t10_alua.lba_map_lock);
1518 return bl;
1519 }
1520
1521 static ssize_t target_core_store_dev_lba_map(
1522 void *p,
1523 const char *page,
1524 size_t count)
1525 {
1526 struct se_device *dev = p;
1527 struct t10_alua_lba_map *lba_map = NULL;
1528 struct list_head lba_list;
1529 char *map_entries, *ptr;
1530 char state;
1531 int pg_num = -1, pg;
1532 int ret = 0, num = 0, pg_id, alua_state;
1533 unsigned long start_lba = -1, end_lba = -1;
1534 unsigned long segment_size = -1, segment_mult = -1;
1535
1536 map_entries = kstrdup(page, GFP_KERNEL);
1537 if (!map_entries)
1538 return -ENOMEM;
1539
1540 INIT_LIST_HEAD(&lba_list);
1541 while ((ptr = strsep(&map_entries, "\n")) != NULL) {
1542 if (!*ptr)
1543 continue;
1544
1545 if (num == 0) {
1546 if (sscanf(ptr, "%lu %lu\n",
1547 &segment_size, &segment_mult) != 2) {
1548 pr_err("Invalid line %d\n", num);
1549 ret = -EINVAL;
1550 break;
1551 }
1552 num++;
1553 continue;
1554 }
1555 if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
1556 pr_err("Invalid line %d\n", num);
1557 ret = -EINVAL;
1558 break;
1559 }
1560 ptr = strchr(ptr, ' ');
1561 if (!ptr) {
1562 pr_err("Invalid line %d, missing end lba\n", num);
1563 ret = -EINVAL;
1564 break;
1565 }
1566 ptr++;
1567 ptr = strchr(ptr, ' ');
1568 if (!ptr) {
1569 pr_err("Invalid line %d, missing state definitions\n",
1570 num);
1571 ret = -EINVAL;
1572 break;
1573 }
1574 ptr++;
1575 lba_map = core_alua_allocate_lba_map(&lba_list,
1576 start_lba, end_lba);
1577 if (IS_ERR(lba_map)) {
1578 ret = PTR_ERR(lba_map);
1579 break;
1580 }
1581 pg = 0;
1582 while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
1583 switch (state) {
1584 case 'O':
1585 alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
1586 break;
1587 case 'A':
1588 alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
1589 break;
1590 case 'S':
1591 alua_state = ALUA_ACCESS_STATE_STANDBY;
1592 break;
1593 case 'U':
1594 alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
1595 break;
1596 default:
1597 pr_err("Invalid ALUA state '%c'\n", state);
1598 ret = -EINVAL;
1599 goto out;
1600 }
1601
1602 ret = core_alua_allocate_lba_map_mem(lba_map,
1603 pg_id, alua_state);
1604 if (ret) {
1605 pr_err("Invalid target descriptor %d:%c "
1606 "at line %d\n",
1607 pg_id, state, num);
1608 break;
1609 }
1610 pg++;
1611 ptr = strchr(ptr, ' ');
1612 if (ptr)
1613 ptr++;
1614 else
1615 break;
1616 }
1617 if (pg_num == -1)
1618 pg_num = pg;
1619 else if (pg != pg_num) {
1620 pr_err("Only %d from %d port groups definitions "
1621 "at line %d\n", pg, pg_num, num);
1622 ret = -EINVAL;
1623 break;
1624 }
1625 num++;
1626 }
1627 out:
1628 if (ret) {
1629 core_alua_free_lba_map(&lba_list);
1630 count = ret;
1631 } else
1632 core_alua_set_lba_map(dev, &lba_list,
1633 segment_size, segment_mult);
1634 kfree(map_entries);
1635 return count;
1636 }
1637
1638 static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
1639 .attr = { .ca_owner = THIS_MODULE,
1640 .ca_name = "lba_map",
1641 .ca_mode = S_IRUGO | S_IWUSR },
1642 .show = target_core_show_dev_lba_map,
1643 .store = target_core_store_dev_lba_map,
1644 };
1645
1646 static struct configfs_attribute *target_core_dev_attrs[] = {
1647 &target_core_attr_dev_info.attr,
1648 &target_core_attr_dev_control.attr,
1649 &target_core_attr_dev_alias.attr,
1650 &target_core_attr_dev_udev_path.attr,
1651 &target_core_attr_dev_enable.attr,
1652 &target_core_attr_dev_alua_lu_gp.attr,
1653 &target_core_attr_dev_lba_map.attr,
1654 NULL,
1655 };
1656
1657 static void target_core_dev_release(struct config_item *item)
1658 {
1659 struct config_group *dev_cg = to_config_group(item);
1660 struct se_device *dev =
1661 container_of(dev_cg, struct se_device, dev_group);
1662
1663 kfree(dev_cg->default_groups);
1664 target_free_device(dev);
1665 }
1666
1667 static ssize_t target_core_dev_show(struct config_item *item,
1668 struct configfs_attribute *attr,
1669 char *page)
1670 {
1671 struct config_group *dev_cg = to_config_group(item);
1672 struct se_device *dev =
1673 container_of(dev_cg, struct se_device, dev_group);
1674 struct target_core_configfs_attribute *tc_attr = container_of(
1675 attr, struct target_core_configfs_attribute, attr);
1676
1677 if (!tc_attr->show)
1678 return -EINVAL;
1679
1680 return tc_attr->show(dev, page);
1681 }
1682
1683 static ssize_t target_core_dev_store(struct config_item *item,
1684 struct configfs_attribute *attr,
1685 const char *page, size_t count)
1686 {
1687 struct config_group *dev_cg = to_config_group(item);
1688 struct se_device *dev =
1689 container_of(dev_cg, struct se_device, dev_group);
1690 struct target_core_configfs_attribute *tc_attr = container_of(
1691 attr, struct target_core_configfs_attribute, attr);
1692
1693 if (!tc_attr->store)
1694 return -EINVAL;
1695
1696 return tc_attr->store(dev, page, count);
1697 }
1698
1699 static struct configfs_item_operations target_core_dev_item_ops = {
1700 .release = target_core_dev_release,
1701 .show_attribute = target_core_dev_show,
1702 .store_attribute = target_core_dev_store,
1703 };
1704
1705 TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
1706
1707 /* End functions for struct config_item_type tb_dev_cit */
1708
1709 /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
1710
1711 CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp);
1712 #define SE_DEV_ALUA_LU_ATTR(_name, _mode) \
1713 static struct target_core_alua_lu_gp_attribute \
1714 target_core_alua_lu_gp_##_name = \
1715 __CONFIGFS_EATTR(_name, _mode, \
1716 target_core_alua_lu_gp_show_attr_##_name, \
1717 target_core_alua_lu_gp_store_attr_##_name);
1718
1719 #define SE_DEV_ALUA_LU_ATTR_RO(_name) \
1720 static struct target_core_alua_lu_gp_attribute \
1721 target_core_alua_lu_gp_##_name = \
1722 __CONFIGFS_EATTR_RO(_name, \
1723 target_core_alua_lu_gp_show_attr_##_name);
1724
1725 /*
1726 * lu_gp_id
1727 */
1728 static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
1729 struct t10_alua_lu_gp *lu_gp,
1730 char *page)
1731 {
1732 if (!lu_gp->lu_gp_valid_id)
1733 return 0;
1734
1735 return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
1736 }
1737
1738 static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
1739 struct t10_alua_lu_gp *lu_gp,
1740 const char *page,
1741 size_t count)
1742 {
1743 struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
1744 unsigned long lu_gp_id;
1745 int ret;
1746
1747 ret = kstrtoul(page, 0, &lu_gp_id);
1748 if (ret < 0) {
1749 pr_err("kstrtoul() returned %d for"
1750 " lu_gp_id\n", ret);
1751 return ret;
1752 }
1753 if (lu_gp_id > 0x0000ffff) {
1754 pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
1755 " 0x0000ffff\n", lu_gp_id);
1756 return -EINVAL;
1757 }
1758
1759 ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
1760 if (ret < 0)
1761 return -EINVAL;
1762
1763 pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
1764 " Group: core/alua/lu_gps/%s to ID: %hu\n",
1765 config_item_name(&alua_lu_gp_cg->cg_item),
1766 lu_gp->lu_gp_id);
1767
1768 return count;
1769 }
1770
1771 SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR);
1772
1773 /*
1774 * members
1775 */
1776 static ssize_t target_core_alua_lu_gp_show_attr_members(
1777 struct t10_alua_lu_gp *lu_gp,
1778 char *page)
1779 {
1780 struct se_device *dev;
1781 struct se_hba *hba;
1782 struct t10_alua_lu_gp_member *lu_gp_mem;
1783 ssize_t len = 0, cur_len;
1784 unsigned char buf[LU_GROUP_NAME_BUF];
1785
1786 memset(buf, 0, LU_GROUP_NAME_BUF);
1787
1788 spin_lock(&lu_gp->lu_gp_lock);
1789 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1790 dev = lu_gp_mem->lu_gp_mem_dev;
1791 hba = dev->se_hba;
1792
1793 cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
1794 config_item_name(&hba->hba_group.cg_item),
1795 config_item_name(&dev->dev_group.cg_item));
1796 cur_len++; /* Extra byte for NULL terminator */
1797
1798 if ((cur_len + len) > PAGE_SIZE) {
1799 pr_warn("Ran out of lu_gp_show_attr"
1800 "_members buffer\n");
1801 break;
1802 }
1803 memcpy(page+len, buf, cur_len);
1804 len += cur_len;
1805 }
1806 spin_unlock(&lu_gp->lu_gp_lock);
1807
1808 return len;
1809 }
1810
1811 SE_DEV_ALUA_LU_ATTR_RO(members);
1812
1813 CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group);
1814
1815 static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
1816 &target_core_alua_lu_gp_lu_gp_id.attr,
1817 &target_core_alua_lu_gp_members.attr,
1818 NULL,
1819 };
1820
1821 static void target_core_alua_lu_gp_release(struct config_item *item)
1822 {
1823 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
1824 struct t10_alua_lu_gp, lu_gp_group);
1825
1826 core_alua_free_lu_gp(lu_gp);
1827 }
1828
1829 static struct configfs_item_operations target_core_alua_lu_gp_ops = {
1830 .release = target_core_alua_lu_gp_release,
1831 .show_attribute = target_core_alua_lu_gp_attr_show,
1832 .store_attribute = target_core_alua_lu_gp_attr_store,
1833 };
1834
1835 static struct config_item_type target_core_alua_lu_gp_cit = {
1836 .ct_item_ops = &target_core_alua_lu_gp_ops,
1837 .ct_attrs = target_core_alua_lu_gp_attrs,
1838 .ct_owner = THIS_MODULE,
1839 };
1840
1841 /* End functions for struct config_item_type target_core_alua_lu_gp_cit */
1842
1843 /* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
1844
1845 static struct config_group *target_core_alua_create_lu_gp(
1846 struct config_group *group,
1847 const char *name)
1848 {
1849 struct t10_alua_lu_gp *lu_gp;
1850 struct config_group *alua_lu_gp_cg = NULL;
1851 struct config_item *alua_lu_gp_ci = NULL;
1852
1853 lu_gp = core_alua_allocate_lu_gp(name, 0);
1854 if (IS_ERR(lu_gp))
1855 return NULL;
1856
1857 alua_lu_gp_cg = &lu_gp->lu_gp_group;
1858 alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
1859
1860 config_group_init_type_name(alua_lu_gp_cg, name,
1861 &target_core_alua_lu_gp_cit);
1862
1863 pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
1864 " Group: core/alua/lu_gps/%s\n",
1865 config_item_name(alua_lu_gp_ci));
1866
1867 return alua_lu_gp_cg;
1868
1869 }
1870
1871 static void target_core_alua_drop_lu_gp(
1872 struct config_group *group,
1873 struct config_item *item)
1874 {
1875 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
1876 struct t10_alua_lu_gp, lu_gp_group);
1877
1878 pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
1879 " Group: core/alua/lu_gps/%s, ID: %hu\n",
1880 config_item_name(item), lu_gp->lu_gp_id);
1881 /*
1882 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
1883 * -> target_core_alua_lu_gp_release()
1884 */
1885 config_item_put(item);
1886 }
1887
1888 static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
1889 .make_group = &target_core_alua_create_lu_gp,
1890 .drop_item = &target_core_alua_drop_lu_gp,
1891 };
1892
1893 static struct config_item_type target_core_alua_lu_gps_cit = {
1894 .ct_item_ops = NULL,
1895 .ct_group_ops = &target_core_alua_lu_gps_group_ops,
1896 .ct_owner = THIS_MODULE,
1897 };
1898
1899 /* End functions for struct config_item_type target_core_alua_lu_gps_cit */
1900
1901 /* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
1902
1903 CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp);
1904 #define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode) \
1905 static struct target_core_alua_tg_pt_gp_attribute \
1906 target_core_alua_tg_pt_gp_##_name = \
1907 __CONFIGFS_EATTR(_name, _mode, \
1908 target_core_alua_tg_pt_gp_show_attr_##_name, \
1909 target_core_alua_tg_pt_gp_store_attr_##_name);
1910
1911 #define SE_DEV_ALUA_TG_PT_ATTR_RO(_name) \
1912 static struct target_core_alua_tg_pt_gp_attribute \
1913 target_core_alua_tg_pt_gp_##_name = \
1914 __CONFIGFS_EATTR_RO(_name, \
1915 target_core_alua_tg_pt_gp_show_attr_##_name);
1916
1917 /*
1918 * alua_access_state
1919 */
1920 static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state(
1921 struct t10_alua_tg_pt_gp *tg_pt_gp,
1922 char *page)
1923 {
1924 return sprintf(page, "%d\n",
1925 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state));
1926 }
1927
1928 static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
1929 struct t10_alua_tg_pt_gp *tg_pt_gp,
1930 const char *page,
1931 size_t count)
1932 {
1933 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1934 unsigned long tmp;
1935 int new_state, ret;
1936
1937 if (!tg_pt_gp->tg_pt_gp_valid_id) {
1938 pr_err("Unable to do implicit ALUA on non valid"
1939 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
1940 return -EINVAL;
1941 }
1942 if (!(dev->dev_flags & DF_CONFIGURED)) {
1943 pr_err("Unable to set alua_access_state while device is"
1944 " not configured\n");
1945 return -ENODEV;
1946 }
1947
1948 ret = kstrtoul(page, 0, &tmp);
1949 if (ret < 0) {
1950 pr_err("Unable to extract new ALUA access state from"
1951 " %s\n", page);
1952 return ret;
1953 }
1954 new_state = (int)tmp;
1955
1956 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
1957 pr_err("Unable to process implicit configfs ALUA"
1958 " transition while TPGS_IMPLICIT_ALUA is disabled\n");
1959 return -EINVAL;
1960 }
1961 if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
1962 new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
1963 /* LBA DEPENDENT is only allowed with implicit ALUA */
1964 pr_err("Unable to process implicit configfs ALUA transition"
1965 " while explicit ALUA management is enabled\n");
1966 return -EINVAL;
1967 }
1968
1969 ret = core_alua_do_port_transition(tg_pt_gp, dev,
1970 NULL, NULL, new_state, 0);
1971 return (!ret) ? count : -EINVAL;
1972 }
1973
1974 SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR);
1975
1976 /*
1977 * alua_access_status
1978 */
1979 static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status(
1980 struct t10_alua_tg_pt_gp *tg_pt_gp,
1981 char *page)
1982 {
1983 return sprintf(page, "%s\n",
1984 core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
1985 }
1986
1987 static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
1988 struct t10_alua_tg_pt_gp *tg_pt_gp,
1989 const char *page,
1990 size_t count)
1991 {
1992 unsigned long tmp;
1993 int new_status, ret;
1994
1995 if (!tg_pt_gp->tg_pt_gp_valid_id) {
1996 pr_err("Unable to do set ALUA access status on non"
1997 " valid tg_pt_gp ID: %hu\n",
1998 tg_pt_gp->tg_pt_gp_valid_id);
1999 return -EINVAL;
2000 }
2001
2002 ret = kstrtoul(page, 0, &tmp);
2003 if (ret < 0) {
2004 pr_err("Unable to extract new ALUA access status"
2005 " from %s\n", page);
2006 return ret;
2007 }
2008 new_status = (int)tmp;
2009
2010 if ((new_status != ALUA_STATUS_NONE) &&
2011 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2012 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2013 pr_err("Illegal ALUA access status: 0x%02x\n",
2014 new_status);
2015 return -EINVAL;
2016 }
2017
2018 tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
2019 return count;
2020 }
2021
2022 SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR);
2023
2024 /*
2025 * alua_access_type
2026 */
2027 static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type(
2028 struct t10_alua_tg_pt_gp *tg_pt_gp,
2029 char *page)
2030 {
2031 return core_alua_show_access_type(tg_pt_gp, page);
2032 }
2033
2034 static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
2035 struct t10_alua_tg_pt_gp *tg_pt_gp,
2036 const char *page,
2037 size_t count)
2038 {
2039 return core_alua_store_access_type(tg_pt_gp, page, count);
2040 }
2041
2042 SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
2043
2044 /*
2045 * alua_supported_states
2046 */
2047
2048 #define SE_DEV_ALUA_SUPPORT_STATE_SHOW(_name, _var, _bit) \
2049 static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_##_name( \
2050 struct t10_alua_tg_pt_gp *t, char *p) \
2051 { \
2052 return sprintf(p, "%d\n", !!(t->_var & _bit)); \
2053 }
2054
2055 #define SE_DEV_ALUA_SUPPORT_STATE_STORE(_name, _var, _bit) \
2056 static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\
2057 struct t10_alua_tg_pt_gp *t, const char *p, size_t c) \
2058 { \
2059 unsigned long tmp; \
2060 int ret; \
2061 \
2062 if (!t->tg_pt_gp_valid_id) { \
2063 pr_err("Unable to do set ##_name ALUA state on non" \
2064 " valid tg_pt_gp ID: %hu\n", \
2065 t->tg_pt_gp_valid_id); \
2066 return -EINVAL; \
2067 } \
2068 \
2069 ret = kstrtoul(p, 0, &tmp); \
2070 if (ret < 0) { \
2071 pr_err("Invalid value '%s', must be '0' or '1'\n", p); \
2072 return -EINVAL; \
2073 } \
2074 if (tmp > 1) { \
2075 pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
2076 return -EINVAL; \
2077 } \
2078 if (tmp) \
2079 t->_var |= _bit; \
2080 else \
2081 t->_var &= ~_bit; \
2082 \
2083 return c; \
2084 }
2085
2086 SE_DEV_ALUA_SUPPORT_STATE_SHOW(transitioning,
2087 tg_pt_gp_alua_supported_states, ALUA_T_SUP);
2088 SE_DEV_ALUA_SUPPORT_STATE_STORE(transitioning,
2089 tg_pt_gp_alua_supported_states, ALUA_T_SUP);
2090 SE_DEV_ALUA_TG_PT_ATTR(alua_support_transitioning, S_IRUGO | S_IWUSR);
2091
2092 SE_DEV_ALUA_SUPPORT_STATE_SHOW(offline,
2093 tg_pt_gp_alua_supported_states, ALUA_O_SUP);
2094 SE_DEV_ALUA_SUPPORT_STATE_STORE(offline,
2095 tg_pt_gp_alua_supported_states, ALUA_O_SUP);
2096 SE_DEV_ALUA_TG_PT_ATTR(alua_support_offline, S_IRUGO | S_IWUSR);
2097
2098 SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent,
2099 tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
2100 SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent,
2101 tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
2102 SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO);
2103
2104 SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable,
2105 tg_pt_gp_alua_supported_states, ALUA_U_SUP);
2106 SE_DEV_ALUA_SUPPORT_STATE_STORE(unavailable,
2107 tg_pt_gp_alua_supported_states, ALUA_U_SUP);
2108 SE_DEV_ALUA_TG_PT_ATTR(alua_support_unavailable, S_IRUGO | S_IWUSR);
2109
2110 SE_DEV_ALUA_SUPPORT_STATE_SHOW(standby,
2111 tg_pt_gp_alua_supported_states, ALUA_S_SUP);
2112 SE_DEV_ALUA_SUPPORT_STATE_STORE(standby,
2113 tg_pt_gp_alua_supported_states, ALUA_S_SUP);
2114 SE_DEV_ALUA_TG_PT_ATTR(alua_support_standby, S_IRUGO | S_IWUSR);
2115
2116 SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_optimized,
2117 tg_pt_gp_alua_supported_states, ALUA_AO_SUP);
2118 SE_DEV_ALUA_SUPPORT_STATE_STORE(active_optimized,
2119 tg_pt_gp_alua_supported_states, ALUA_AO_SUP);
2120 SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_optimized, S_IRUGO | S_IWUSR);
2121
2122 SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_nonoptimized,
2123 tg_pt_gp_alua_supported_states, ALUA_AN_SUP);
2124 SE_DEV_ALUA_SUPPORT_STATE_STORE(active_nonoptimized,
2125 tg_pt_gp_alua_supported_states, ALUA_AN_SUP);
2126 SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_nonoptimized, S_IRUGO | S_IWUSR);
2127
2128 /*
2129 * alua_write_metadata
2130 */
2131 static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
2132 struct t10_alua_tg_pt_gp *tg_pt_gp,
2133 char *page)
2134 {
2135 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata);
2136 }
2137
2138 static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
2139 struct t10_alua_tg_pt_gp *tg_pt_gp,
2140 const char *page,
2141 size_t count)
2142 {
2143 unsigned long tmp;
2144 int ret;
2145
2146 ret = kstrtoul(page, 0, &tmp);
2147 if (ret < 0) {
2148 pr_err("Unable to extract alua_write_metadata\n");
2149 return ret;
2150 }
2151
2152 if ((tmp != 0) && (tmp != 1)) {
2153 pr_err("Illegal value for alua_write_metadata:"
2154 " %lu\n", tmp);
2155 return -EINVAL;
2156 }
2157 tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
2158
2159 return count;
2160 }
2161
2162 SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR);
2163
2164
2165
2166 /*
2167 * nonop_delay_msecs
2168 */
2169 static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs(
2170 struct t10_alua_tg_pt_gp *tg_pt_gp,
2171 char *page)
2172 {
2173 return core_alua_show_nonop_delay_msecs(tg_pt_gp, page);
2174
2175 }
2176
2177 static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs(
2178 struct t10_alua_tg_pt_gp *tg_pt_gp,
2179 const char *page,
2180 size_t count)
2181 {
2182 return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count);
2183 }
2184
2185 SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR);
2186
2187 /*
2188 * trans_delay_msecs
2189 */
2190 static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs(
2191 struct t10_alua_tg_pt_gp *tg_pt_gp,
2192 char *page)
2193 {
2194 return core_alua_show_trans_delay_msecs(tg_pt_gp, page);
2195 }
2196
2197 static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
2198 struct t10_alua_tg_pt_gp *tg_pt_gp,
2199 const char *page,
2200 size_t count)
2201 {
2202 return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count);
2203 }
2204
2205 SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
2206
2207 /*
2208 * implicit_trans_secs
2209 */
2210 static ssize_t target_core_alua_tg_pt_gp_show_attr_implicit_trans_secs(
2211 struct t10_alua_tg_pt_gp *tg_pt_gp,
2212 char *page)
2213 {
2214 return core_alua_show_implicit_trans_secs(tg_pt_gp, page);
2215 }
2216
2217 static ssize_t target_core_alua_tg_pt_gp_store_attr_implicit_trans_secs(
2218 struct t10_alua_tg_pt_gp *tg_pt_gp,
2219 const char *page,
2220 size_t count)
2221 {
2222 return core_alua_store_implicit_trans_secs(tg_pt_gp, page, count);
2223 }
2224
2225 SE_DEV_ALUA_TG_PT_ATTR(implicit_trans_secs, S_IRUGO | S_IWUSR);
2226
2227 /*
2228 * preferred
2229 */
2230
2231 static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred(
2232 struct t10_alua_tg_pt_gp *tg_pt_gp,
2233 char *page)
2234 {
2235 return core_alua_show_preferred_bit(tg_pt_gp, page);
2236 }
2237
2238 static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred(
2239 struct t10_alua_tg_pt_gp *tg_pt_gp,
2240 const char *page,
2241 size_t count)
2242 {
2243 return core_alua_store_preferred_bit(tg_pt_gp, page, count);
2244 }
2245
2246 SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR);
2247
2248 /*
2249 * tg_pt_gp_id
2250 */
2251 static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
2252 struct t10_alua_tg_pt_gp *tg_pt_gp,
2253 char *page)
2254 {
2255 if (!tg_pt_gp->tg_pt_gp_valid_id)
2256 return 0;
2257
2258 return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
2259 }
2260
2261 static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
2262 struct t10_alua_tg_pt_gp *tg_pt_gp,
2263 const char *page,
2264 size_t count)
2265 {
2266 struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2267 unsigned long tg_pt_gp_id;
2268 int ret;
2269
2270 ret = kstrtoul(page, 0, &tg_pt_gp_id);
2271 if (ret < 0) {
2272 pr_err("kstrtoul() returned %d for"
2273 " tg_pt_gp_id\n", ret);
2274 return ret;
2275 }
2276 if (tg_pt_gp_id > 0x0000ffff) {
2277 pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:"
2278 " 0x0000ffff\n", tg_pt_gp_id);
2279 return -EINVAL;
2280 }
2281
2282 ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
2283 if (ret < 0)
2284 return -EINVAL;
2285
2286 pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
2287 "core/alua/tg_pt_gps/%s to ID: %hu\n",
2288 config_item_name(&alua_tg_pt_gp_cg->cg_item),
2289 tg_pt_gp->tg_pt_gp_id);
2290
2291 return count;
2292 }
2293
2294 SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR);
2295
2296 /*
2297 * members
2298 */
2299 static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
2300 struct t10_alua_tg_pt_gp *tg_pt_gp,
2301 char *page)
2302 {
2303 struct se_port *port;
2304 struct se_portal_group *tpg;
2305 struct se_lun *lun;
2306 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2307 ssize_t len = 0, cur_len;
2308 unsigned char buf[TG_PT_GROUP_NAME_BUF];
2309
2310 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
2311
2312 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
2313 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
2314 tg_pt_gp_mem_list) {
2315 port = tg_pt_gp_mem->tg_pt;
2316 tpg = port->sep_tpg;
2317 lun = port->sep_lun;
2318
2319 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
2320 "/%s\n", tpg->se_tpg_tfo->get_fabric_name(),
2321 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2322 tpg->se_tpg_tfo->tpg_get_tag(tpg),
2323 config_item_name(&lun->lun_group.cg_item));
2324 cur_len++; /* Extra byte for NULL terminator */
2325
2326 if ((cur_len + len) > PAGE_SIZE) {
2327 pr_warn("Ran out of lu_gp_show_attr"
2328 "_members buffer\n");
2329 break;
2330 }
2331 memcpy(page+len, buf, cur_len);
2332 len += cur_len;
2333 }
2334 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
2335
2336 return len;
2337 }
2338
2339 SE_DEV_ALUA_TG_PT_ATTR_RO(members);
2340
2341 CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp,
2342 tg_pt_gp_group);
2343
2344 static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
2345 &target_core_alua_tg_pt_gp_alua_access_state.attr,
2346 &target_core_alua_tg_pt_gp_alua_access_status.attr,
2347 &target_core_alua_tg_pt_gp_alua_access_type.attr,
2348 &target_core_alua_tg_pt_gp_alua_support_transitioning.attr,
2349 &target_core_alua_tg_pt_gp_alua_support_offline.attr,
2350 &target_core_alua_tg_pt_gp_alua_support_lba_dependent.attr,
2351 &target_core_alua_tg_pt_gp_alua_support_unavailable.attr,
2352 &target_core_alua_tg_pt_gp_alua_support_standby.attr,
2353 &target_core_alua_tg_pt_gp_alua_support_active_nonoptimized.attr,
2354 &target_core_alua_tg_pt_gp_alua_support_active_optimized.attr,
2355 &target_core_alua_tg_pt_gp_alua_write_metadata.attr,
2356 &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
2357 &target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
2358 &target_core_alua_tg_pt_gp_implicit_trans_secs.attr,
2359 &target_core_alua_tg_pt_gp_preferred.attr,
2360 &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
2361 &target_core_alua_tg_pt_gp_members.attr,
2362 NULL,
2363 };
2364
2365 static void target_core_alua_tg_pt_gp_release(struct config_item *item)
2366 {
2367 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2368 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2369
2370 core_alua_free_tg_pt_gp(tg_pt_gp);
2371 }
2372
2373 static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
2374 .release = target_core_alua_tg_pt_gp_release,
2375 .show_attribute = target_core_alua_tg_pt_gp_attr_show,
2376 .store_attribute = target_core_alua_tg_pt_gp_attr_store,
2377 };
2378
2379 static struct config_item_type target_core_alua_tg_pt_gp_cit = {
2380 .ct_item_ops = &target_core_alua_tg_pt_gp_ops,
2381 .ct_attrs = target_core_alua_tg_pt_gp_attrs,
2382 .ct_owner = THIS_MODULE,
2383 };
2384
2385 /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2386
2387 /* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
2388
2389 static struct config_group *target_core_alua_create_tg_pt_gp(
2390 struct config_group *group,
2391 const char *name)
2392 {
2393 struct t10_alua *alua = container_of(group, struct t10_alua,
2394 alua_tg_pt_gps_group);
2395 struct t10_alua_tg_pt_gp *tg_pt_gp;
2396 struct config_group *alua_tg_pt_gp_cg = NULL;
2397 struct config_item *alua_tg_pt_gp_ci = NULL;
2398
2399 tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
2400 if (!tg_pt_gp)
2401 return NULL;
2402
2403 alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2404 alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
2405
2406 config_group_init_type_name(alua_tg_pt_gp_cg, name,
2407 &target_core_alua_tg_pt_gp_cit);
2408
2409 pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
2410 " Group: alua/tg_pt_gps/%s\n",
2411 config_item_name(alua_tg_pt_gp_ci));
2412
2413 return alua_tg_pt_gp_cg;
2414 }
2415
2416 static void target_core_alua_drop_tg_pt_gp(
2417 struct config_group *group,
2418 struct config_item *item)
2419 {
2420 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2421 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2422
2423 pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
2424 " Group: alua/tg_pt_gps/%s, ID: %hu\n",
2425 config_item_name(item), tg_pt_gp->tg_pt_gp_id);
2426 /*
2427 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
2428 * -> target_core_alua_tg_pt_gp_release().
2429 */
2430 config_item_put(item);
2431 }
2432
2433 static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
2434 .make_group = &target_core_alua_create_tg_pt_gp,
2435 .drop_item = &target_core_alua_drop_tg_pt_gp,
2436 };
2437
2438 TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
2439
2440 /* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
2441
2442 /* Start functions for struct config_item_type target_core_alua_cit */
2443
2444 /*
2445 * target_core_alua_cit is a ConfigFS group that lives under
2446 * /sys/kernel/config/target/core/alua. There are default groups
2447 * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
2448 * target_core_alua_cit in target_core_init_configfs() below.
2449 */
2450 static struct config_item_type target_core_alua_cit = {
2451 .ct_item_ops = NULL,
2452 .ct_attrs = NULL,
2453 .ct_owner = THIS_MODULE,
2454 };
2455
2456 /* End functions for struct config_item_type target_core_alua_cit */
2457
2458 /* Start functions for struct config_item_type tb_dev_stat_cit */
2459
2460 static struct config_group *target_core_stat_mkdir(
2461 struct config_group *group,
2462 const char *name)
2463 {
2464 return ERR_PTR(-ENOSYS);
2465 }
2466
2467 static void target_core_stat_rmdir(
2468 struct config_group *group,
2469 struct config_item *item)
2470 {
2471 return;
2472 }
2473
2474 static struct configfs_group_operations target_core_stat_group_ops = {
2475 .make_group = &target_core_stat_mkdir,
2476 .drop_item = &target_core_stat_rmdir,
2477 };
2478
2479 TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
2480
2481 /* End functions for struct config_item_type tb_dev_stat_cit */
2482
2483 /* Start functions for struct config_item_type target_core_hba_cit */
2484
2485 static struct config_group *target_core_make_subdev(
2486 struct config_group *group,
2487 const char *name)
2488 {
2489 struct t10_alua_tg_pt_gp *tg_pt_gp;
2490 struct config_item *hba_ci = &group->cg_item;
2491 struct se_hba *hba = item_to_hba(hba_ci);
2492 struct target_backend *tb = hba->backend;
2493 struct se_device *dev;
2494 struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
2495 struct config_group *dev_stat_grp = NULL;
2496 int errno = -ENOMEM, ret;
2497
2498 ret = mutex_lock_interruptible(&hba->hba_access_mutex);
2499 if (ret)
2500 return ERR_PTR(ret);
2501
2502 dev = target_alloc_device(hba, name);
2503 if (!dev)
2504 goto out_unlock;
2505
2506 dev_cg = &dev->dev_group;
2507
2508 dev_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
2509 GFP_KERNEL);
2510 if (!dev_cg->default_groups)
2511 goto out_free_device;
2512
2513 config_group_init_type_name(dev_cg, name, &tb->tb_dev_cit);
2514 config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
2515 &tb->tb_dev_attrib_cit);
2516 config_group_init_type_name(&dev->dev_pr_group, "pr",
2517 &tb->tb_dev_pr_cit);
2518 config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
2519 &tb->tb_dev_wwn_cit);
2520 config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
2521 "alua", &tb->tb_dev_alua_tg_pt_gps_cit);
2522 config_group_init_type_name(&dev->dev_stat_grps.stat_group,
2523 "statistics", &tb->tb_dev_stat_cit);
2524
2525 dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
2526 dev_cg->default_groups[1] = &dev->dev_pr_group;
2527 dev_cg->default_groups[2] = &dev->t10_wwn.t10_wwn_group;
2528 dev_cg->default_groups[3] = &dev->t10_alua.alua_tg_pt_gps_group;
2529 dev_cg->default_groups[4] = &dev->dev_stat_grps.stat_group;
2530 dev_cg->default_groups[5] = NULL;
2531 /*
2532 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
2533 */
2534 tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
2535 if (!tg_pt_gp)
2536 goto out_free_dev_cg_default_groups;
2537 dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
2538
2539 tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
2540 tg_pt_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
2541 GFP_KERNEL);
2542 if (!tg_pt_gp_cg->default_groups) {
2543 pr_err("Unable to allocate tg_pt_gp_cg->"
2544 "default_groups\n");
2545 goto out_free_tg_pt_gp;
2546 }
2547
2548 config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
2549 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
2550 tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
2551 tg_pt_gp_cg->default_groups[1] = NULL;
2552 /*
2553 * Add core/$HBA/$DEV/statistics/ default groups
2554 */
2555 dev_stat_grp = &dev->dev_stat_grps.stat_group;
2556 dev_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 4,
2557 GFP_KERNEL);
2558 if (!dev_stat_grp->default_groups) {
2559 pr_err("Unable to allocate dev_stat_grp->default_groups\n");
2560 goto out_free_tg_pt_gp_cg_default_groups;
2561 }
2562 target_stat_setup_dev_default_groups(dev);
2563
2564 mutex_unlock(&hba->hba_access_mutex);
2565 return dev_cg;
2566
2567 out_free_tg_pt_gp_cg_default_groups:
2568 kfree(tg_pt_gp_cg->default_groups);
2569 out_free_tg_pt_gp:
2570 core_alua_free_tg_pt_gp(tg_pt_gp);
2571 out_free_dev_cg_default_groups:
2572 kfree(dev_cg->default_groups);
2573 out_free_device:
2574 target_free_device(dev);
2575 out_unlock:
2576 mutex_unlock(&hba->hba_access_mutex);
2577 return ERR_PTR(errno);
2578 }
2579
2580 static void target_core_drop_subdev(
2581 struct config_group *group,
2582 struct config_item *item)
2583 {
2584 struct config_group *dev_cg = to_config_group(item);
2585 struct se_device *dev =
2586 container_of(dev_cg, struct se_device, dev_group);
2587 struct se_hba *hba;
2588 struct config_item *df_item;
2589 struct config_group *tg_pt_gp_cg, *dev_stat_grp;
2590 int i;
2591
2592 hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
2593
2594 mutex_lock(&hba->hba_access_mutex);
2595
2596 dev_stat_grp = &dev->dev_stat_grps.stat_group;
2597 for (i = 0; dev_stat_grp->default_groups[i]; i++) {
2598 df_item = &dev_stat_grp->default_groups[i]->cg_item;
2599 dev_stat_grp->default_groups[i] = NULL;
2600 config_item_put(df_item);
2601 }
2602 kfree(dev_stat_grp->default_groups);
2603
2604 tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
2605 for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
2606 df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
2607 tg_pt_gp_cg->default_groups[i] = NULL;
2608 config_item_put(df_item);
2609 }
2610 kfree(tg_pt_gp_cg->default_groups);
2611 /*
2612 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
2613 * directly from target_core_alua_tg_pt_gp_release().
2614 */
2615 dev->t10_alua.default_tg_pt_gp = NULL;
2616
2617 for (i = 0; dev_cg->default_groups[i]; i++) {
2618 df_item = &dev_cg->default_groups[i]->cg_item;
2619 dev_cg->default_groups[i] = NULL;
2620 config_item_put(df_item);
2621 }
2622 /*
2623 * se_dev is released from target_core_dev_item_ops->release()
2624 */
2625 config_item_put(item);
2626 mutex_unlock(&hba->hba_access_mutex);
2627 }
2628
2629 static struct configfs_group_operations target_core_hba_group_ops = {
2630 .make_group = target_core_make_subdev,
2631 .drop_item = target_core_drop_subdev,
2632 };
2633
2634 CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba);
2635 #define SE_HBA_ATTR(_name, _mode) \
2636 static struct target_core_hba_attribute \
2637 target_core_hba_##_name = \
2638 __CONFIGFS_EATTR(_name, _mode, \
2639 target_core_hba_show_attr_##_name, \
2640 target_core_hba_store_attr_##_name);
2641
2642 #define SE_HBA_ATTR_RO(_name) \
2643 static struct target_core_hba_attribute \
2644 target_core_hba_##_name = \
2645 __CONFIGFS_EATTR_RO(_name, \
2646 target_core_hba_show_attr_##_name);
2647
2648 static ssize_t target_core_hba_show_attr_hba_info(
2649 struct se_hba *hba,
2650 char *page)
2651 {
2652 return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
2653 hba->hba_id, hba->backend->ops->name,
2654 TARGET_CORE_CONFIGFS_VERSION);
2655 }
2656
2657 SE_HBA_ATTR_RO(hba_info);
2658
2659 static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba,
2660 char *page)
2661 {
2662 int hba_mode = 0;
2663
2664 if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
2665 hba_mode = 1;
2666
2667 return sprintf(page, "%d\n", hba_mode);
2668 }
2669
2670 static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
2671 const char *page, size_t count)
2672 {
2673 unsigned long mode_flag;
2674 int ret;
2675
2676 if (hba->backend->ops->pmode_enable_hba == NULL)
2677 return -EINVAL;
2678
2679 ret = kstrtoul(page, 0, &mode_flag);
2680 if (ret < 0) {
2681 pr_err("Unable to extract hba mode flag: %d\n", ret);
2682 return ret;
2683 }
2684
2685 if (hba->dev_count) {
2686 pr_err("Unable to set hba_mode with active devices\n");
2687 return -EINVAL;
2688 }
2689
2690 ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag);
2691 if (ret < 0)
2692 return -EINVAL;
2693 if (ret > 0)
2694 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
2695 else if (ret == 0)
2696 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
2697
2698 return count;
2699 }
2700
2701 SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
2702
2703 CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
2704
2705 static void target_core_hba_release(struct config_item *item)
2706 {
2707 struct se_hba *hba = container_of(to_config_group(item),
2708 struct se_hba, hba_group);
2709 core_delete_hba(hba);
2710 }
2711
2712 static struct configfs_attribute *target_core_hba_attrs[] = {
2713 &target_core_hba_hba_info.attr,
2714 &target_core_hba_hba_mode.attr,
2715 NULL,
2716 };
2717
2718 static struct configfs_item_operations target_core_hba_item_ops = {
2719 .release = target_core_hba_release,
2720 .show_attribute = target_core_hba_attr_show,
2721 .store_attribute = target_core_hba_attr_store,
2722 };
2723
2724 static struct config_item_type target_core_hba_cit = {
2725 .ct_item_ops = &target_core_hba_item_ops,
2726 .ct_group_ops = &target_core_hba_group_ops,
2727 .ct_attrs = target_core_hba_attrs,
2728 .ct_owner = THIS_MODULE,
2729 };
2730
2731 static struct config_group *target_core_call_addhbatotarget(
2732 struct config_group *group,
2733 const char *name)
2734 {
2735 char *se_plugin_str, *str, *str2;
2736 struct se_hba *hba;
2737 char buf[TARGET_CORE_NAME_MAX_LEN];
2738 unsigned long plugin_dep_id = 0;
2739 int ret;
2740
2741 memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
2742 if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
2743 pr_err("Passed *name strlen(): %d exceeds"
2744 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
2745 TARGET_CORE_NAME_MAX_LEN);
2746 return ERR_PTR(-ENAMETOOLONG);
2747 }
2748 snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
2749
2750 str = strstr(buf, "_");
2751 if (!str) {
2752 pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
2753 return ERR_PTR(-EINVAL);
2754 }
2755 se_plugin_str = buf;
2756 /*
2757 * Special case for subsystem plugins that have "_" in their names.
2758 * Namely rd_direct and rd_mcp..
2759 */
2760 str2 = strstr(str+1, "_");
2761 if (str2) {
2762 *str2 = '\0'; /* Terminate for *se_plugin_str */
2763 str2++; /* Skip to start of plugin dependent ID */
2764 str = str2;
2765 } else {
2766 *str = '\0'; /* Terminate for *se_plugin_str */
2767 str++; /* Skip to start of plugin dependent ID */
2768 }
2769
2770 ret = kstrtoul(str, 0, &plugin_dep_id);
2771 if (ret < 0) {
2772 pr_err("kstrtoul() returned %d for"
2773 " plugin_dep_id\n", ret);
2774 return ERR_PTR(ret);
2775 }
2776 /*
2777 * Load up TCM subsystem plugins if they have not already been loaded.
2778 */
2779 transport_subsystem_check_init();
2780
2781 hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
2782 if (IS_ERR(hba))
2783 return ERR_CAST(hba);
2784
2785 config_group_init_type_name(&hba->hba_group, name,
2786 &target_core_hba_cit);
2787
2788 return &hba->hba_group;
2789 }
2790
2791 static void target_core_call_delhbafromtarget(
2792 struct config_group *group,
2793 struct config_item *item)
2794 {
2795 /*
2796 * core_delete_hba() is called from target_core_hba_item_ops->release()
2797 * -> target_core_hba_release()
2798 */
2799 config_item_put(item);
2800 }
2801
2802 static struct configfs_group_operations target_core_group_ops = {
2803 .make_group = target_core_call_addhbatotarget,
2804 .drop_item = target_core_call_delhbafromtarget,
2805 };
2806
2807 static struct config_item_type target_core_cit = {
2808 .ct_item_ops = NULL,
2809 .ct_group_ops = &target_core_group_ops,
2810 .ct_attrs = NULL,
2811 .ct_owner = THIS_MODULE,
2812 };
2813
2814 /* Stop functions for struct config_item_type target_core_hba_cit */
2815
2816 void target_setup_backend_cits(struct target_backend *tb)
2817 {
2818 target_core_setup_dev_cit(tb);
2819 target_core_setup_dev_attrib_cit(tb);
2820 target_core_setup_dev_pr_cit(tb);
2821 target_core_setup_dev_wwn_cit(tb);
2822 target_core_setup_dev_alua_tg_pt_gps_cit(tb);
2823 target_core_setup_dev_stat_cit(tb);
2824 }
2825
2826 static int __init target_core_init_configfs(void)
2827 {
2828 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
2829 struct config_group *lu_gp_cg = NULL;
2830 struct configfs_subsystem *subsys = &target_core_fabrics;
2831 struct t10_alua_lu_gp *lu_gp;
2832 int ret;
2833
2834 pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
2835 " Engine: %s on %s/%s on "UTS_RELEASE"\n",
2836 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
2837
2838 config_group_init(&subsys->su_group);
2839 mutex_init(&subsys->su_mutex);
2840
2841 ret = init_se_kmem_caches();
2842 if (ret < 0)
2843 return ret;
2844 /*
2845 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
2846 * and ALUA Logical Unit Group and Target Port Group infrastructure.
2847 */
2848 target_cg = &subsys->su_group;
2849 target_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
2850 GFP_KERNEL);
2851 if (!target_cg->default_groups) {
2852 pr_err("Unable to allocate target_cg->default_groups\n");
2853 ret = -ENOMEM;
2854 goto out_global;
2855 }
2856
2857 config_group_init_type_name(&target_core_hbagroup,
2858 "core", &target_core_cit);
2859 target_cg->default_groups[0] = &target_core_hbagroup;
2860 target_cg->default_groups[1] = NULL;
2861 /*
2862 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
2863 */
2864 hba_cg = &target_core_hbagroup;
2865 hba_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
2866 GFP_KERNEL);
2867 if (!hba_cg->default_groups) {
2868 pr_err("Unable to allocate hba_cg->default_groups\n");
2869 ret = -ENOMEM;
2870 goto out_global;
2871 }
2872 config_group_init_type_name(&alua_group,
2873 "alua", &target_core_alua_cit);
2874 hba_cg->default_groups[0] = &alua_group;
2875 hba_cg->default_groups[1] = NULL;
2876 /*
2877 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
2878 * groups under /sys/kernel/config/target/core/alua/
2879 */
2880 alua_cg = &alua_group;
2881 alua_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
2882 GFP_KERNEL);
2883 if (!alua_cg->default_groups) {
2884 pr_err("Unable to allocate alua_cg->default_groups\n");
2885 ret = -ENOMEM;
2886 goto out_global;
2887 }
2888
2889 config_group_init_type_name(&alua_lu_gps_group,
2890 "lu_gps", &target_core_alua_lu_gps_cit);
2891 alua_cg->default_groups[0] = &alua_lu_gps_group;
2892 alua_cg->default_groups[1] = NULL;
2893 /*
2894 * Add core/alua/lu_gps/default_lu_gp
2895 */
2896 lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
2897 if (IS_ERR(lu_gp)) {
2898 ret = -ENOMEM;
2899 goto out_global;
2900 }
2901
2902 lu_gp_cg = &alua_lu_gps_group;
2903 lu_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
2904 GFP_KERNEL);
2905 if (!lu_gp_cg->default_groups) {
2906 pr_err("Unable to allocate lu_gp_cg->default_groups\n");
2907 ret = -ENOMEM;
2908 goto out_global;
2909 }
2910
2911 config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
2912 &target_core_alua_lu_gp_cit);
2913 lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
2914 lu_gp_cg->default_groups[1] = NULL;
2915 default_lu_gp = lu_gp;
2916 /*
2917 * Register the target_core_mod subsystem with configfs.
2918 */
2919 ret = configfs_register_subsystem(subsys);
2920 if (ret < 0) {
2921 pr_err("Error %d while registering subsystem %s\n",
2922 ret, subsys->su_group.cg_item.ci_namebuf);
2923 goto out_global;
2924 }
2925 pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
2926 " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
2927 " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
2928 /*
2929 * Register built-in RAMDISK subsystem logic for virtual LUN 0
2930 */
2931 ret = rd_module_init();
2932 if (ret < 0)
2933 goto out;
2934
2935 ret = core_dev_setup_virtual_lun0();
2936 if (ret < 0)
2937 goto out;
2938
2939 ret = target_xcopy_setup_pt();
2940 if (ret < 0)
2941 goto out;
2942
2943 return 0;
2944
2945 out:
2946 configfs_unregister_subsystem(subsys);
2947 core_dev_release_virtual_lun0();
2948 rd_module_exit();
2949 out_global:
2950 if (default_lu_gp) {
2951 core_alua_free_lu_gp(default_lu_gp);
2952 default_lu_gp = NULL;
2953 }
2954 if (lu_gp_cg)
2955 kfree(lu_gp_cg->default_groups);
2956 if (alua_cg)
2957 kfree(alua_cg->default_groups);
2958 if (hba_cg)
2959 kfree(hba_cg->default_groups);
2960 kfree(target_cg->default_groups);
2961 release_se_kmem_caches();
2962 return ret;
2963 }
2964
2965 static void __exit target_core_exit_configfs(void)
2966 {
2967 struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
2968 struct config_item *item;
2969 int i;
2970
2971 lu_gp_cg = &alua_lu_gps_group;
2972 for (i = 0; lu_gp_cg->default_groups[i]; i++) {
2973 item = &lu_gp_cg->default_groups[i]->cg_item;
2974 lu_gp_cg->default_groups[i] = NULL;
2975 config_item_put(item);
2976 }
2977 kfree(lu_gp_cg->default_groups);
2978 lu_gp_cg->default_groups = NULL;
2979
2980 alua_cg = &alua_group;
2981 for (i = 0; alua_cg->default_groups[i]; i++) {
2982 item = &alua_cg->default_groups[i]->cg_item;
2983 alua_cg->default_groups[i] = NULL;
2984 config_item_put(item);
2985 }
2986 kfree(alua_cg->default_groups);
2987 alua_cg->default_groups = NULL;
2988
2989 hba_cg = &target_core_hbagroup;
2990 for (i = 0; hba_cg->default_groups[i]; i++) {
2991 item = &hba_cg->default_groups[i]->cg_item;
2992 hba_cg->default_groups[i] = NULL;
2993 config_item_put(item);
2994 }
2995 kfree(hba_cg->default_groups);
2996 hba_cg->default_groups = NULL;
2997 /*
2998 * We expect subsys->su_group.default_groups to be released
2999 * by configfs subsystem provider logic..
3000 */
3001 configfs_unregister_subsystem(&target_core_fabrics);
3002 kfree(target_core_fabrics.su_group.default_groups);
3003
3004 core_alua_free_lu_gp(default_lu_gp);
3005 default_lu_gp = NULL;
3006
3007 pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
3008 " Infrastructure\n");
3009
3010 core_dev_release_virtual_lun0();
3011 rd_module_exit();
3012 target_xcopy_release_pt();
3013 release_se_kmem_caches();
3014 }
3015
3016 MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
3017 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3018 MODULE_LICENSE("GPL");
3019
3020 module_init(target_core_init_configfs);
3021 module_exit(target_core_exit_configfs);
This page took 0.148481 seconds and 5 git commands to generate.