Merge branch 'v4l_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[deliverable/linux.git] / drivers / target / target_core_alua.c
1 /*******************************************************************************
2 * Filename: target_core_alua.c
3 *
4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
5 *
6 * Copyright (c) 2009-2010 Rising Tide Systems
7 * Copyright (c) 2009-2010 Linux-iSCSI.org
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/configfs.h>
30 #include <linux/export.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33
34 #include <target/target_core_base.h>
35 #include <target/target_core_device.h>
36 #include <target/target_core_transport.h>
37 #include <target/target_core_fabric_ops.h>
38 #include <target/target_core_configfs.h>
39
40 #include "target_core_alua.h"
41 #include "target_core_hba.h"
42 #include "target_core_ua.h"
43
44 static int core_alua_check_transition(int state, int *primary);
45 static int core_alua_set_tg_pt_secondary_state(
46 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
47 struct se_port *port, int explict, int offline);
48
49 static u16 alua_lu_gps_counter;
50 static u32 alua_lu_gps_count;
51
52 static DEFINE_SPINLOCK(lu_gps_lock);
53 static LIST_HEAD(lu_gps_list);
54
55 struct t10_alua_lu_gp *default_lu_gp;
56
57 /*
58 * REPORT_TARGET_PORT_GROUPS
59 *
60 * See spc4r17 section 6.27
61 */
62 int target_emulate_report_target_port_groups(struct se_task *task)
63 {
64 struct se_cmd *cmd = task->task_se_cmd;
65 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
66 struct se_port *port;
67 struct t10_alua_tg_pt_gp *tg_pt_gp;
68 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
69 unsigned char *buf;
70 u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
71 Target port group descriptor */
72 /*
73 * Need at least 4 bytes of response data or else we can't
74 * even fit the return data length.
75 */
76 if (cmd->data_length < 4) {
77 pr_warn("REPORT TARGET PORT GROUPS allocation length %u"
78 " too small\n", cmd->data_length);
79 return -EINVAL;
80 }
81
82 buf = transport_kmap_first_data_page(cmd);
83
84 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
85 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
86 tg_pt_gp_list) {
87 /*
88 * Check if the Target port group and Target port descriptor list
89 * based on tg_pt_gp_members count will fit into the response payload.
90 * Otherwise, bump rd_len to let the initiator know we have exceeded
91 * the allocation length and the response is truncated.
92 */
93 if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
94 cmd->data_length) {
95 rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
96 continue;
97 }
98 /*
99 * PREF: Preferred target port bit, determine if this
100 * bit should be set for port group.
101 */
102 if (tg_pt_gp->tg_pt_gp_pref)
103 buf[off] = 0x80;
104 /*
105 * Set the ASYMMETRIC ACCESS State
106 */
107 buf[off++] |= (atomic_read(
108 &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
109 /*
110 * Set supported ASYMMETRIC ACCESS State bits
111 */
112 buf[off] = 0x80; /* T_SUP */
113 buf[off] |= 0x40; /* O_SUP */
114 buf[off] |= 0x8; /* U_SUP */
115 buf[off] |= 0x4; /* S_SUP */
116 buf[off] |= 0x2; /* AN_SUP */
117 buf[off++] |= 0x1; /* AO_SUP */
118 /*
119 * TARGET PORT GROUP
120 */
121 buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
122 buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
123
124 off++; /* Skip over Reserved */
125 /*
126 * STATUS CODE
127 */
128 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
129 /*
130 * Vendor Specific field
131 */
132 buf[off++] = 0x00;
133 /*
134 * TARGET PORT COUNT
135 */
136 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
137 rd_len += 8;
138
139 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
140 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
141 tg_pt_gp_mem_list) {
142 port = tg_pt_gp_mem->tg_pt;
143 /*
144 * Start Target Port descriptor format
145 *
146 * See spc4r17 section 6.2.7 Table 247
147 */
148 off += 2; /* Skip over Obsolete */
149 /*
150 * Set RELATIVE TARGET PORT IDENTIFIER
151 */
152 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
153 buf[off++] = (port->sep_rtpi & 0xff);
154 rd_len += 4;
155 }
156 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
157 }
158 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
159 /*
160 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
161 */
162 buf[0] = ((rd_len >> 24) & 0xff);
163 buf[1] = ((rd_len >> 16) & 0xff);
164 buf[2] = ((rd_len >> 8) & 0xff);
165 buf[3] = (rd_len & 0xff);
166
167 transport_kunmap_first_data_page(cmd);
168
169 task->task_scsi_status = GOOD;
170 transport_complete_task(task, 1);
171 return 0;
172 }
173
174 /*
175 * SET_TARGET_PORT_GROUPS for explict ALUA operation.
176 *
177 * See spc4r17 section 6.35
178 */
179 int target_emulate_set_target_port_groups(struct se_task *task)
180 {
181 struct se_cmd *cmd = task->task_se_cmd;
182 struct se_device *dev = cmd->se_dev;
183 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
184 struct se_port *port, *l_port = cmd->se_lun->lun_sep;
185 struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
186 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
187 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
188 unsigned char *buf;
189 unsigned char *ptr;
190 u32 len = 4; /* Skip over RESERVED area in header */
191 int alua_access_state, primary = 0, rc;
192 u16 tg_pt_id, rtpi;
193
194 if (!l_port) {
195 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
196 return -EINVAL;
197 }
198 buf = transport_kmap_first_data_page(cmd);
199
200 /*
201 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
202 * for the local tg_pt_gp.
203 */
204 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
205 if (!l_tg_pt_gp_mem) {
206 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
207 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
208 rc = -EINVAL;
209 goto out;
210 }
211 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
212 l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
213 if (!l_tg_pt_gp) {
214 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
215 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
216 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
217 rc = -EINVAL;
218 goto out;
219 }
220 rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
221 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
222
223 if (!rc) {
224 pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
225 " while TPGS_EXPLICT_ALUA is disabled\n");
226 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
227 rc = -EINVAL;
228 goto out;
229 }
230
231 ptr = &buf[4]; /* Skip over RESERVED area in header */
232
233 while (len < cmd->data_length) {
234 alua_access_state = (ptr[0] & 0x0f);
235 /*
236 * Check the received ALUA access state, and determine if
237 * the state is a primary or secondary target port asymmetric
238 * access state.
239 */
240 rc = core_alua_check_transition(alua_access_state, &primary);
241 if (rc != 0) {
242 /*
243 * If the SET TARGET PORT GROUPS attempts to establish
244 * an invalid combination of target port asymmetric
245 * access states or attempts to establish an
246 * unsupported target port asymmetric access state,
247 * then the command shall be terminated with CHECK
248 * CONDITION status, with the sense key set to ILLEGAL
249 * REQUEST, and the additional sense code set to INVALID
250 * FIELD IN PARAMETER LIST.
251 */
252 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
253 rc = -EINVAL;
254 goto out;
255 }
256 rc = -1;
257 /*
258 * If the ASYMMETRIC ACCESS STATE field (see table 267)
259 * specifies a primary target port asymmetric access state,
260 * then the TARGET PORT GROUP OR TARGET PORT field specifies
261 * a primary target port group for which the primary target
262 * port asymmetric access state shall be changed. If the
263 * ASYMMETRIC ACCESS STATE field specifies a secondary target
264 * port asymmetric access state, then the TARGET PORT GROUP OR
265 * TARGET PORT field specifies the relative target port
266 * identifier (see 3.1.120) of the target port for which the
267 * secondary target port asymmetric access state shall be
268 * changed.
269 */
270 if (primary) {
271 tg_pt_id = ((ptr[2] << 8) & 0xff);
272 tg_pt_id |= (ptr[3] & 0xff);
273 /*
274 * Locate the matching target port group ID from
275 * the global tg_pt_gp list
276 */
277 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
278 list_for_each_entry(tg_pt_gp,
279 &su_dev->t10_alua.tg_pt_gps_list,
280 tg_pt_gp_list) {
281 if (!tg_pt_gp->tg_pt_gp_valid_id)
282 continue;
283
284 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
285 continue;
286
287 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
288 smp_mb__after_atomic_inc();
289 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
290
291 rc = core_alua_do_port_transition(tg_pt_gp,
292 dev, l_port, nacl,
293 alua_access_state, 1);
294
295 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
296 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
297 smp_mb__after_atomic_dec();
298 break;
299 }
300 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
301 /*
302 * If not matching target port group ID can be located
303 * throw an exception with ASCQ: INVALID_PARAMETER_LIST
304 */
305 if (rc != 0) {
306 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
307 rc = -EINVAL;
308 goto out;
309 }
310 } else {
311 /*
312 * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
313 * the Target Port in question for the the incoming
314 * SET_TARGET_PORT_GROUPS op.
315 */
316 rtpi = ((ptr[2] << 8) & 0xff);
317 rtpi |= (ptr[3] & 0xff);
318 /*
319 * Locate the matching relative target port identifer
320 * for the struct se_device storage object.
321 */
322 spin_lock(&dev->se_port_lock);
323 list_for_each_entry(port, &dev->dev_sep_list,
324 sep_list) {
325 if (port->sep_rtpi != rtpi)
326 continue;
327
328 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
329 spin_unlock(&dev->se_port_lock);
330
331 rc = core_alua_set_tg_pt_secondary_state(
332 tg_pt_gp_mem, port, 1, 1);
333
334 spin_lock(&dev->se_port_lock);
335 break;
336 }
337 spin_unlock(&dev->se_port_lock);
338 /*
339 * If not matching relative target port identifier can
340 * be located, throw an exception with ASCQ:
341 * INVALID_PARAMETER_LIST
342 */
343 if (rc != 0) {
344 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
345 rc = -EINVAL;
346 goto out;
347 }
348 }
349
350 ptr += 4;
351 len += 4;
352 }
353
354 out:
355 transport_kunmap_first_data_page(cmd);
356 task->task_scsi_status = GOOD;
357 transport_complete_task(task, 1);
358 return 0;
359 }
360
361 static inline int core_alua_state_nonoptimized(
362 struct se_cmd *cmd,
363 unsigned char *cdb,
364 int nonop_delay_msecs,
365 u8 *alua_ascq)
366 {
367 /*
368 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
369 * later to determine if processing of this cmd needs to be
370 * temporarily delayed for the Active/NonOptimized primary access state.
371 */
372 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
373 cmd->alua_nonop_delay = nonop_delay_msecs;
374 return 0;
375 }
376
377 static inline int core_alua_state_standby(
378 struct se_cmd *cmd,
379 unsigned char *cdb,
380 u8 *alua_ascq)
381 {
382 /*
383 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
384 * spc4r17 section 5.9.2.4.4
385 */
386 switch (cdb[0]) {
387 case INQUIRY:
388 case LOG_SELECT:
389 case LOG_SENSE:
390 case MODE_SELECT:
391 case MODE_SENSE:
392 case REPORT_LUNS:
393 case RECEIVE_DIAGNOSTIC:
394 case SEND_DIAGNOSTIC:
395 case MAINTENANCE_IN:
396 switch (cdb[1]) {
397 case MI_REPORT_TARGET_PGS:
398 return 0;
399 default:
400 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
401 return 1;
402 }
403 case MAINTENANCE_OUT:
404 switch (cdb[1]) {
405 case MO_SET_TARGET_PGS:
406 return 0;
407 default:
408 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
409 return 1;
410 }
411 case REQUEST_SENSE:
412 case PERSISTENT_RESERVE_IN:
413 case PERSISTENT_RESERVE_OUT:
414 case READ_BUFFER:
415 case WRITE_BUFFER:
416 return 0;
417 default:
418 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
419 return 1;
420 }
421
422 return 0;
423 }
424
425 static inline int core_alua_state_unavailable(
426 struct se_cmd *cmd,
427 unsigned char *cdb,
428 u8 *alua_ascq)
429 {
430 /*
431 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
432 * spc4r17 section 5.9.2.4.5
433 */
434 switch (cdb[0]) {
435 case INQUIRY:
436 case REPORT_LUNS:
437 case MAINTENANCE_IN:
438 switch (cdb[1]) {
439 case MI_REPORT_TARGET_PGS:
440 return 0;
441 default:
442 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
443 return 1;
444 }
445 case MAINTENANCE_OUT:
446 switch (cdb[1]) {
447 case MO_SET_TARGET_PGS:
448 return 0;
449 default:
450 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
451 return 1;
452 }
453 case REQUEST_SENSE:
454 case READ_BUFFER:
455 case WRITE_BUFFER:
456 return 0;
457 default:
458 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
459 return 1;
460 }
461
462 return 0;
463 }
464
465 static inline int core_alua_state_transition(
466 struct se_cmd *cmd,
467 unsigned char *cdb,
468 u8 *alua_ascq)
469 {
470 /*
471 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
472 * spc4r17 section 5.9.2.5
473 */
474 switch (cdb[0]) {
475 case INQUIRY:
476 case REPORT_LUNS:
477 case MAINTENANCE_IN:
478 switch (cdb[1]) {
479 case MI_REPORT_TARGET_PGS:
480 return 0;
481 default:
482 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
483 return 1;
484 }
485 case REQUEST_SENSE:
486 case READ_BUFFER:
487 case WRITE_BUFFER:
488 return 0;
489 default:
490 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
491 return 1;
492 }
493
494 return 0;
495 }
496
497 /*
498 * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
499 * in transport_cmd_sequencer(). This function is assigned to
500 * struct t10_alua *->state_check() in core_setup_alua()
501 */
502 static int core_alua_state_check_nop(
503 struct se_cmd *cmd,
504 unsigned char *cdb,
505 u8 *alua_ascq)
506 {
507 return 0;
508 }
509
510 /*
511 * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
512 * This function is assigned to struct t10_alua *->state_check() in
513 * core_setup_alua()
514 *
515 * Also, this function can return three different return codes to
516 * signal transport_generic_cmd_sequencer()
517 *
518 * return 1: Is used to signal LUN not accecsable, and check condition/not ready
519 * return 0: Used to signal success
520 * reutrn -1: Used to signal failure, and invalid cdb field
521 */
522 static int core_alua_state_check(
523 struct se_cmd *cmd,
524 unsigned char *cdb,
525 u8 *alua_ascq)
526 {
527 struct se_lun *lun = cmd->se_lun;
528 struct se_port *port = lun->lun_sep;
529 struct t10_alua_tg_pt_gp *tg_pt_gp;
530 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
531 int out_alua_state, nonop_delay_msecs;
532
533 if (!port)
534 return 0;
535 /*
536 * First, check for a struct se_port specific secondary ALUA target port
537 * access state: OFFLINE
538 */
539 if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
540 *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
541 pr_debug("ALUA: Got secondary offline status for local"
542 " target port\n");
543 *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
544 return 1;
545 }
546 /*
547 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
548 * ALUA target port group, to obtain current ALUA access state.
549 * Otherwise look for the underlying struct se_device association with
550 * a ALUA logical unit group.
551 */
552 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
553 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
554 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
555 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
556 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
557 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
558 /*
559 * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional
560 * statement so the compiler knows explicitly to check this case first.
561 * For the Optimized ALUA access state case, we want to process the
562 * incoming fabric cmd ASAP..
563 */
564 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
565 return 0;
566
567 switch (out_alua_state) {
568 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
569 return core_alua_state_nonoptimized(cmd, cdb,
570 nonop_delay_msecs, alua_ascq);
571 case ALUA_ACCESS_STATE_STANDBY:
572 return core_alua_state_standby(cmd, cdb, alua_ascq);
573 case ALUA_ACCESS_STATE_UNAVAILABLE:
574 return core_alua_state_unavailable(cmd, cdb, alua_ascq);
575 case ALUA_ACCESS_STATE_TRANSITION:
576 return core_alua_state_transition(cmd, cdb, alua_ascq);
577 /*
578 * OFFLINE is a secondary ALUA target port group access state, that is
579 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
580 */
581 case ALUA_ACCESS_STATE_OFFLINE:
582 default:
583 pr_err("Unknown ALUA access state: 0x%02x\n",
584 out_alua_state);
585 return -EINVAL;
586 }
587
588 return 0;
589 }
590
591 /*
592 * Check implict and explict ALUA state change request.
593 */
594 static int core_alua_check_transition(int state, int *primary)
595 {
596 switch (state) {
597 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
598 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
599 case ALUA_ACCESS_STATE_STANDBY:
600 case ALUA_ACCESS_STATE_UNAVAILABLE:
601 /*
602 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
603 * defined as primary target port asymmetric access states.
604 */
605 *primary = 1;
606 break;
607 case ALUA_ACCESS_STATE_OFFLINE:
608 /*
609 * OFFLINE state is defined as a secondary target port
610 * asymmetric access state.
611 */
612 *primary = 0;
613 break;
614 default:
615 pr_err("Unknown ALUA access state: 0x%02x\n", state);
616 return -EINVAL;
617 }
618
619 return 0;
620 }
621
622 static char *core_alua_dump_state(int state)
623 {
624 switch (state) {
625 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
626 return "Active/Optimized";
627 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
628 return "Active/NonOptimized";
629 case ALUA_ACCESS_STATE_STANDBY:
630 return "Standby";
631 case ALUA_ACCESS_STATE_UNAVAILABLE:
632 return "Unavailable";
633 case ALUA_ACCESS_STATE_OFFLINE:
634 return "Offline";
635 default:
636 return "Unknown";
637 }
638
639 return NULL;
640 }
641
642 char *core_alua_dump_status(int status)
643 {
644 switch (status) {
645 case ALUA_STATUS_NONE:
646 return "None";
647 case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
648 return "Altered by Explict STPG";
649 case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
650 return "Altered by Implict ALUA";
651 default:
652 return "Unknown";
653 }
654
655 return NULL;
656 }
657
658 /*
659 * Used by fabric modules to determine when we need to delay processing
660 * for the Active/NonOptimized paths..
661 */
662 int core_alua_check_nonop_delay(
663 struct se_cmd *cmd)
664 {
665 if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
666 return 0;
667 if (in_interrupt())
668 return 0;
669 /*
670 * The ALUA Active/NonOptimized access state delay can be disabled
671 * in via configfs with a value of zero
672 */
673 if (!cmd->alua_nonop_delay)
674 return 0;
675 /*
676 * struct se_cmd->alua_nonop_delay gets set by a target port group
677 * defined interval in core_alua_state_nonoptimized()
678 */
679 msleep_interruptible(cmd->alua_nonop_delay);
680 return 0;
681 }
682 EXPORT_SYMBOL(core_alua_check_nonop_delay);
683
684 /*
685 * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
686 *
687 */
688 static int core_alua_write_tpg_metadata(
689 const char *path,
690 unsigned char *md_buf,
691 u32 md_buf_len)
692 {
693 mm_segment_t old_fs;
694 struct file *file;
695 struct iovec iov[1];
696 int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
697
698 memset(iov, 0, sizeof(struct iovec));
699
700 file = filp_open(path, flags, 0600);
701 if (IS_ERR(file) || !file || !file->f_dentry) {
702 pr_err("filp_open(%s) for ALUA metadata failed\n",
703 path);
704 return -ENODEV;
705 }
706
707 iov[0].iov_base = &md_buf[0];
708 iov[0].iov_len = md_buf_len;
709
710 old_fs = get_fs();
711 set_fs(get_ds());
712 ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
713 set_fs(old_fs);
714
715 if (ret < 0) {
716 pr_err("Error writing ALUA metadata file: %s\n", path);
717 filp_close(file, NULL);
718 return -EIO;
719 }
720 filp_close(file, NULL);
721
722 return 0;
723 }
724
725 /*
726 * Called with tg_pt_gp->tg_pt_gp_md_mutex held
727 */
728 static int core_alua_update_tpg_primary_metadata(
729 struct t10_alua_tg_pt_gp *tg_pt_gp,
730 int primary_state,
731 unsigned char *md_buf)
732 {
733 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
734 struct t10_wwn *wwn = &su_dev->t10_wwn;
735 char path[ALUA_METADATA_PATH_LEN];
736 int len;
737
738 memset(path, 0, ALUA_METADATA_PATH_LEN);
739
740 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
741 "tg_pt_gp_id=%hu\n"
742 "alua_access_state=0x%02x\n"
743 "alua_access_status=0x%02x\n",
744 tg_pt_gp->tg_pt_gp_id, primary_state,
745 tg_pt_gp->tg_pt_gp_alua_access_status);
746
747 snprintf(path, ALUA_METADATA_PATH_LEN,
748 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
749 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
750
751 return core_alua_write_tpg_metadata(path, md_buf, len);
752 }
753
754 static int core_alua_do_transition_tg_pt(
755 struct t10_alua_tg_pt_gp *tg_pt_gp,
756 struct se_port *l_port,
757 struct se_node_acl *nacl,
758 unsigned char *md_buf,
759 int new_state,
760 int explict)
761 {
762 struct se_dev_entry *se_deve;
763 struct se_lun_acl *lacl;
764 struct se_port *port;
765 struct t10_alua_tg_pt_gp_member *mem;
766 int old_state = 0;
767 /*
768 * Save the old primary ALUA access state, and set the current state
769 * to ALUA_ACCESS_STATE_TRANSITION.
770 */
771 old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
772 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
773 ALUA_ACCESS_STATE_TRANSITION);
774 tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
775 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
776 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
777 /*
778 * Check for the optional ALUA primary state transition delay
779 */
780 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
781 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
782
783 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
784 list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
785 tg_pt_gp_mem_list) {
786 port = mem->tg_pt;
787 /*
788 * After an implicit target port asymmetric access state
789 * change, a device server shall establish a unit attention
790 * condition for the initiator port associated with every I_T
791 * nexus with the additional sense code set to ASYMMETRIC
792 * ACCESS STATE CHAGED.
793 *
794 * After an explicit target port asymmetric access state
795 * change, a device server shall establish a unit attention
796 * condition with the additional sense code set to ASYMMETRIC
797 * ACCESS STATE CHANGED for the initiator port associated with
798 * every I_T nexus other than the I_T nexus on which the SET
799 * TARGET PORT GROUPS command
800 */
801 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
802 smp_mb__after_atomic_inc();
803 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
804
805 spin_lock_bh(&port->sep_alua_lock);
806 list_for_each_entry(se_deve, &port->sep_alua_list,
807 alua_port_list) {
808 lacl = se_deve->se_lun_acl;
809 /*
810 * se_deve->se_lun_acl pointer may be NULL for a
811 * entry created without explict Node+MappedLUN ACLs
812 */
813 if (!lacl)
814 continue;
815
816 if (explict &&
817 (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
818 (l_port != NULL) && (l_port == port))
819 continue;
820
821 core_scsi3_ua_allocate(lacl->se_lun_nacl,
822 se_deve->mapped_lun, 0x2A,
823 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
824 }
825 spin_unlock_bh(&port->sep_alua_lock);
826
827 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
828 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
829 smp_mb__after_atomic_dec();
830 }
831 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
832 /*
833 * Update the ALUA metadata buf that has been allocated in
834 * core_alua_do_port_transition(), this metadata will be written
835 * to struct file.
836 *
837 * Note that there is the case where we do not want to update the
838 * metadata when the saved metadata is being parsed in userspace
839 * when setting the existing port access state and access status.
840 *
841 * Also note that the failure to write out the ALUA metadata to
842 * struct file does NOT affect the actual ALUA transition.
843 */
844 if (tg_pt_gp->tg_pt_gp_write_metadata) {
845 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
846 core_alua_update_tpg_primary_metadata(tg_pt_gp,
847 new_state, md_buf);
848 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
849 }
850 /*
851 * Set the current primary ALUA access state to the requested new state
852 */
853 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
854
855 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
856 " from primary access state %s to %s\n", (explict) ? "explict" :
857 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
858 tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
859 core_alua_dump_state(new_state));
860
861 return 0;
862 }
863
864 int core_alua_do_port_transition(
865 struct t10_alua_tg_pt_gp *l_tg_pt_gp,
866 struct se_device *l_dev,
867 struct se_port *l_port,
868 struct se_node_acl *l_nacl,
869 int new_state,
870 int explict)
871 {
872 struct se_device *dev;
873 struct se_port *port;
874 struct se_subsystem_dev *su_dev;
875 struct se_node_acl *nacl;
876 struct t10_alua_lu_gp *lu_gp;
877 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
878 struct t10_alua_tg_pt_gp *tg_pt_gp;
879 unsigned char *md_buf;
880 int primary;
881
882 if (core_alua_check_transition(new_state, &primary) != 0)
883 return -EINVAL;
884
885 md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
886 if (!md_buf) {
887 pr_err("Unable to allocate buf for ALUA metadata\n");
888 return -ENOMEM;
889 }
890
891 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
892 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
893 lu_gp = local_lu_gp_mem->lu_gp;
894 atomic_inc(&lu_gp->lu_gp_ref_cnt);
895 smp_mb__after_atomic_inc();
896 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
897 /*
898 * For storage objects that are members of the 'default_lu_gp',
899 * we only do transition on the passed *l_tp_pt_gp, and not
900 * on all of the matching target port groups IDs in default_lu_gp.
901 */
902 if (!lu_gp->lu_gp_id) {
903 /*
904 * core_alua_do_transition_tg_pt() will always return
905 * success.
906 */
907 core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
908 md_buf, new_state, explict);
909 atomic_dec(&lu_gp->lu_gp_ref_cnt);
910 smp_mb__after_atomic_dec();
911 kfree(md_buf);
912 return 0;
913 }
914 /*
915 * For all other LU groups aside from 'default_lu_gp', walk all of
916 * the associated storage objects looking for a matching target port
917 * group ID from the local target port group.
918 */
919 spin_lock(&lu_gp->lu_gp_lock);
920 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
921 lu_gp_mem_list) {
922
923 dev = lu_gp_mem->lu_gp_mem_dev;
924 su_dev = dev->se_sub_dev;
925 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
926 smp_mb__after_atomic_inc();
927 spin_unlock(&lu_gp->lu_gp_lock);
928
929 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
930 list_for_each_entry(tg_pt_gp,
931 &su_dev->t10_alua.tg_pt_gps_list,
932 tg_pt_gp_list) {
933
934 if (!tg_pt_gp->tg_pt_gp_valid_id)
935 continue;
936 /*
937 * If the target behavior port asymmetric access state
938 * is changed for any target port group accessiable via
939 * a logical unit within a LU group, the target port
940 * behavior group asymmetric access states for the same
941 * target port group accessible via other logical units
942 * in that LU group will also change.
943 */
944 if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
945 continue;
946
947 if (l_tg_pt_gp == tg_pt_gp) {
948 port = l_port;
949 nacl = l_nacl;
950 } else {
951 port = NULL;
952 nacl = NULL;
953 }
954 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
955 smp_mb__after_atomic_inc();
956 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
957 /*
958 * core_alua_do_transition_tg_pt() will always return
959 * success.
960 */
961 core_alua_do_transition_tg_pt(tg_pt_gp, port,
962 nacl, md_buf, new_state, explict);
963
964 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
965 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
966 smp_mb__after_atomic_dec();
967 }
968 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
969
970 spin_lock(&lu_gp->lu_gp_lock);
971 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
972 smp_mb__after_atomic_dec();
973 }
974 spin_unlock(&lu_gp->lu_gp_lock);
975
976 pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
977 " Group IDs: %hu %s transition to primary state: %s\n",
978 config_item_name(&lu_gp->lu_gp_group.cg_item),
979 l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
980 core_alua_dump_state(new_state));
981
982 atomic_dec(&lu_gp->lu_gp_ref_cnt);
983 smp_mb__after_atomic_dec();
984 kfree(md_buf);
985 return 0;
986 }
987
988 /*
989 * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
990 */
991 static int core_alua_update_tpg_secondary_metadata(
992 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
993 struct se_port *port,
994 unsigned char *md_buf,
995 u32 md_buf_len)
996 {
997 struct se_portal_group *se_tpg = port->sep_tpg;
998 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
999 int len;
1000
1001 memset(path, 0, ALUA_METADATA_PATH_LEN);
1002 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
1003
1004 len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
1005 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
1006
1007 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
1008 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
1009 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
1010
1011 len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
1012 "alua_tg_pt_status=0x%02x\n",
1013 atomic_read(&port->sep_tg_pt_secondary_offline),
1014 port->sep_tg_pt_secondary_stat);
1015
1016 snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
1017 se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
1018 port->sep_lun->unpacked_lun);
1019
1020 return core_alua_write_tpg_metadata(path, md_buf, len);
1021 }
1022
1023 static int core_alua_set_tg_pt_secondary_state(
1024 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1025 struct se_port *port,
1026 int explict,
1027 int offline)
1028 {
1029 struct t10_alua_tg_pt_gp *tg_pt_gp;
1030 unsigned char *md_buf;
1031 u32 md_buf_len;
1032 int trans_delay_msecs;
1033
1034 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1035 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1036 if (!tg_pt_gp) {
1037 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1038 pr_err("Unable to complete secondary state"
1039 " transition\n");
1040 return -EINVAL;
1041 }
1042 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1043 /*
1044 * Set the secondary ALUA target port access state to OFFLINE
1045 * or release the previously secondary state for struct se_port
1046 */
1047 if (offline)
1048 atomic_set(&port->sep_tg_pt_secondary_offline, 1);
1049 else
1050 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
1051
1052 md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
1053 port->sep_tg_pt_secondary_stat = (explict) ?
1054 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
1055 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
1056
1057 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1058 " to secondary access state: %s\n", (explict) ? "explict" :
1059 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1060 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1061
1062 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1063 /*
1064 * Do the optional transition delay after we set the secondary
1065 * ALUA access state.
1066 */
1067 if (trans_delay_msecs != 0)
1068 msleep_interruptible(trans_delay_msecs);
1069 /*
1070 * See if we need to update the ALUA fabric port metadata for
1071 * secondary state and status
1072 */
1073 if (port->sep_tg_pt_secondary_write_md) {
1074 md_buf = kzalloc(md_buf_len, GFP_KERNEL);
1075 if (!md_buf) {
1076 pr_err("Unable to allocate md_buf for"
1077 " secondary ALUA access metadata\n");
1078 return -ENOMEM;
1079 }
1080 mutex_lock(&port->sep_tg_pt_md_mutex);
1081 core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
1082 md_buf, md_buf_len);
1083 mutex_unlock(&port->sep_tg_pt_md_mutex);
1084
1085 kfree(md_buf);
1086 }
1087
1088 return 0;
1089 }
1090
1091 struct t10_alua_lu_gp *
1092 core_alua_allocate_lu_gp(const char *name, int def_group)
1093 {
1094 struct t10_alua_lu_gp *lu_gp;
1095
1096 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1097 if (!lu_gp) {
1098 pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1099 return ERR_PTR(-ENOMEM);
1100 }
1101 INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1102 INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1103 spin_lock_init(&lu_gp->lu_gp_lock);
1104 atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1105
1106 if (def_group) {
1107 lu_gp->lu_gp_id = alua_lu_gps_counter++;
1108 lu_gp->lu_gp_valid_id = 1;
1109 alua_lu_gps_count++;
1110 }
1111
1112 return lu_gp;
1113 }
1114
1115 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1116 {
1117 struct t10_alua_lu_gp *lu_gp_tmp;
1118 u16 lu_gp_id_tmp;
1119 /*
1120 * The lu_gp->lu_gp_id may only be set once..
1121 */
1122 if (lu_gp->lu_gp_valid_id) {
1123 pr_warn("ALUA LU Group already has a valid ID,"
1124 " ignoring request\n");
1125 return -EINVAL;
1126 }
1127
1128 spin_lock(&lu_gps_lock);
1129 if (alua_lu_gps_count == 0x0000ffff) {
1130 pr_err("Maximum ALUA alua_lu_gps_count:"
1131 " 0x0000ffff reached\n");
1132 spin_unlock(&lu_gps_lock);
1133 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1134 return -ENOSPC;
1135 }
1136 again:
1137 lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1138 alua_lu_gps_counter++;
1139
1140 list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1141 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1142 if (!lu_gp_id)
1143 goto again;
1144
1145 pr_warn("ALUA Logical Unit Group ID: %hu"
1146 " already exists, ignoring request\n",
1147 lu_gp_id);
1148 spin_unlock(&lu_gps_lock);
1149 return -EINVAL;
1150 }
1151 }
1152
1153 lu_gp->lu_gp_id = lu_gp_id_tmp;
1154 lu_gp->lu_gp_valid_id = 1;
1155 list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1156 alua_lu_gps_count++;
1157 spin_unlock(&lu_gps_lock);
1158
1159 return 0;
1160 }
1161
1162 static struct t10_alua_lu_gp_member *
1163 core_alua_allocate_lu_gp_mem(struct se_device *dev)
1164 {
1165 struct t10_alua_lu_gp_member *lu_gp_mem;
1166
1167 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1168 if (!lu_gp_mem) {
1169 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1170 return ERR_PTR(-ENOMEM);
1171 }
1172 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1173 spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1174 atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1175
1176 lu_gp_mem->lu_gp_mem_dev = dev;
1177 dev->dev_alua_lu_gp_mem = lu_gp_mem;
1178
1179 return lu_gp_mem;
1180 }
1181
1182 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1183 {
1184 struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1185 /*
1186 * Once we have reached this point, config_item_put() has
1187 * already been called from target_core_alua_drop_lu_gp().
1188 *
1189 * Here, we remove the *lu_gp from the global list so that
1190 * no associations can be made while we are releasing
1191 * struct t10_alua_lu_gp.
1192 */
1193 spin_lock(&lu_gps_lock);
1194 list_del(&lu_gp->lu_gp_node);
1195 alua_lu_gps_count--;
1196 spin_unlock(&lu_gps_lock);
1197 /*
1198 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1199 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1200 * released with core_alua_put_lu_gp_from_name()
1201 */
1202 while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1203 cpu_relax();
1204 /*
1205 * Release reference to struct t10_alua_lu_gp * from all associated
1206 * struct se_device.
1207 */
1208 spin_lock(&lu_gp->lu_gp_lock);
1209 list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1210 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1211 if (lu_gp_mem->lu_gp_assoc) {
1212 list_del(&lu_gp_mem->lu_gp_mem_list);
1213 lu_gp->lu_gp_members--;
1214 lu_gp_mem->lu_gp_assoc = 0;
1215 }
1216 spin_unlock(&lu_gp->lu_gp_lock);
1217 /*
1218 *
1219 * lu_gp_mem is associated with a single
1220 * struct se_device->dev_alua_lu_gp_mem, and is released when
1221 * struct se_device is released via core_alua_free_lu_gp_mem().
1222 *
1223 * If the passed lu_gp does NOT match the default_lu_gp, assume
1224 * we want to re-assocate a given lu_gp_mem with default_lu_gp.
1225 */
1226 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1227 if (lu_gp != default_lu_gp)
1228 __core_alua_attach_lu_gp_mem(lu_gp_mem,
1229 default_lu_gp);
1230 else
1231 lu_gp_mem->lu_gp = NULL;
1232 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1233
1234 spin_lock(&lu_gp->lu_gp_lock);
1235 }
1236 spin_unlock(&lu_gp->lu_gp_lock);
1237
1238 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1239 }
1240
1241 void core_alua_free_lu_gp_mem(struct se_device *dev)
1242 {
1243 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1244 struct t10_alua *alua = &su_dev->t10_alua;
1245 struct t10_alua_lu_gp *lu_gp;
1246 struct t10_alua_lu_gp_member *lu_gp_mem;
1247
1248 if (alua->alua_type != SPC3_ALUA_EMULATED)
1249 return;
1250
1251 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1252 if (!lu_gp_mem)
1253 return;
1254
1255 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1256 cpu_relax();
1257
1258 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1259 lu_gp = lu_gp_mem->lu_gp;
1260 if (lu_gp) {
1261 spin_lock(&lu_gp->lu_gp_lock);
1262 if (lu_gp_mem->lu_gp_assoc) {
1263 list_del(&lu_gp_mem->lu_gp_mem_list);
1264 lu_gp->lu_gp_members--;
1265 lu_gp_mem->lu_gp_assoc = 0;
1266 }
1267 spin_unlock(&lu_gp->lu_gp_lock);
1268 lu_gp_mem->lu_gp = NULL;
1269 }
1270 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1271
1272 kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1273 }
1274
1275 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1276 {
1277 struct t10_alua_lu_gp *lu_gp;
1278 struct config_item *ci;
1279
1280 spin_lock(&lu_gps_lock);
1281 list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1282 if (!lu_gp->lu_gp_valid_id)
1283 continue;
1284 ci = &lu_gp->lu_gp_group.cg_item;
1285 if (!strcmp(config_item_name(ci), name)) {
1286 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1287 spin_unlock(&lu_gps_lock);
1288 return lu_gp;
1289 }
1290 }
1291 spin_unlock(&lu_gps_lock);
1292
1293 return NULL;
1294 }
1295
1296 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1297 {
1298 spin_lock(&lu_gps_lock);
1299 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1300 spin_unlock(&lu_gps_lock);
1301 }
1302
1303 /*
1304 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1305 */
1306 void __core_alua_attach_lu_gp_mem(
1307 struct t10_alua_lu_gp_member *lu_gp_mem,
1308 struct t10_alua_lu_gp *lu_gp)
1309 {
1310 spin_lock(&lu_gp->lu_gp_lock);
1311 lu_gp_mem->lu_gp = lu_gp;
1312 lu_gp_mem->lu_gp_assoc = 1;
1313 list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1314 lu_gp->lu_gp_members++;
1315 spin_unlock(&lu_gp->lu_gp_lock);
1316 }
1317
1318 /*
1319 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1320 */
1321 void __core_alua_drop_lu_gp_mem(
1322 struct t10_alua_lu_gp_member *lu_gp_mem,
1323 struct t10_alua_lu_gp *lu_gp)
1324 {
1325 spin_lock(&lu_gp->lu_gp_lock);
1326 list_del(&lu_gp_mem->lu_gp_mem_list);
1327 lu_gp_mem->lu_gp = NULL;
1328 lu_gp_mem->lu_gp_assoc = 0;
1329 lu_gp->lu_gp_members--;
1330 spin_unlock(&lu_gp->lu_gp_lock);
1331 }
1332
1333 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
1334 struct se_subsystem_dev *su_dev,
1335 const char *name,
1336 int def_group)
1337 {
1338 struct t10_alua_tg_pt_gp *tg_pt_gp;
1339
1340 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1341 if (!tg_pt_gp) {
1342 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1343 return NULL;
1344 }
1345 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1346 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
1347 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1348 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1349 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1350 tg_pt_gp->tg_pt_gp_su_dev = su_dev;
1351 tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
1352 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1353 ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
1354 /*
1355 * Enable both explict and implict ALUA support by default
1356 */
1357 tg_pt_gp->tg_pt_gp_alua_access_type =
1358 TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
1359 /*
1360 * Set the default Active/NonOptimized Delay in milliseconds
1361 */
1362 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1363 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1364
1365 if (def_group) {
1366 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1367 tg_pt_gp->tg_pt_gp_id =
1368 su_dev->t10_alua.alua_tg_pt_gps_counter++;
1369 tg_pt_gp->tg_pt_gp_valid_id = 1;
1370 su_dev->t10_alua.alua_tg_pt_gps_count++;
1371 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1372 &su_dev->t10_alua.tg_pt_gps_list);
1373 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1374 }
1375
1376 return tg_pt_gp;
1377 }
1378
1379 int core_alua_set_tg_pt_gp_id(
1380 struct t10_alua_tg_pt_gp *tg_pt_gp,
1381 u16 tg_pt_gp_id)
1382 {
1383 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1384 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1385 u16 tg_pt_gp_id_tmp;
1386 /*
1387 * The tg_pt_gp->tg_pt_gp_id may only be set once..
1388 */
1389 if (tg_pt_gp->tg_pt_gp_valid_id) {
1390 pr_warn("ALUA TG PT Group already has a valid ID,"
1391 " ignoring request\n");
1392 return -EINVAL;
1393 }
1394
1395 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1396 if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1397 pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1398 " 0x0000ffff reached\n");
1399 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1400 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1401 return -ENOSPC;
1402 }
1403 again:
1404 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1405 su_dev->t10_alua.alua_tg_pt_gps_counter++;
1406
1407 list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
1408 tg_pt_gp_list) {
1409 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1410 if (!tg_pt_gp_id)
1411 goto again;
1412
1413 pr_err("ALUA Target Port Group ID: %hu already"
1414 " exists, ignoring request\n", tg_pt_gp_id);
1415 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1416 return -EINVAL;
1417 }
1418 }
1419
1420 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1421 tg_pt_gp->tg_pt_gp_valid_id = 1;
1422 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1423 &su_dev->t10_alua.tg_pt_gps_list);
1424 su_dev->t10_alua.alua_tg_pt_gps_count++;
1425 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1426
1427 return 0;
1428 }
1429
1430 struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1431 struct se_port *port)
1432 {
1433 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1434
1435 tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
1436 GFP_KERNEL);
1437 if (!tg_pt_gp_mem) {
1438 pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
1439 return ERR_PTR(-ENOMEM);
1440 }
1441 INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1442 spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1443 atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
1444
1445 tg_pt_gp_mem->tg_pt = port;
1446 port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1447
1448 return tg_pt_gp_mem;
1449 }
1450
1451 void core_alua_free_tg_pt_gp(
1452 struct t10_alua_tg_pt_gp *tg_pt_gp)
1453 {
1454 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1455 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
1456 /*
1457 * Once we have reached this point, config_item_put() has already
1458 * been called from target_core_alua_drop_tg_pt_gp().
1459 *
1460 * Here we remove *tg_pt_gp from the global list so that
1461 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
1462 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1463 */
1464 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1465 list_del(&tg_pt_gp->tg_pt_gp_list);
1466 su_dev->t10_alua.alua_tg_pt_gps_counter--;
1467 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1468 /*
1469 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1470 * core_alua_get_tg_pt_gp_by_name() in
1471 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1472 * to be released with core_alua_put_tg_pt_gp_from_name().
1473 */
1474 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1475 cpu_relax();
1476 /*
1477 * Release reference to struct t10_alua_tg_pt_gp from all associated
1478 * struct se_port.
1479 */
1480 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1481 list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
1482 &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
1483 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1484 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1485 tg_pt_gp->tg_pt_gp_members--;
1486 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1487 }
1488 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1489 /*
1490 * tg_pt_gp_mem is associated with a single
1491 * se_port->sep_alua_tg_pt_gp_mem, and is released via
1492 * core_alua_free_tg_pt_gp_mem().
1493 *
1494 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1495 * assume we want to re-assocate a given tg_pt_gp_mem with
1496 * default_tg_pt_gp.
1497 */
1498 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1499 if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) {
1500 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1501 su_dev->t10_alua.default_tg_pt_gp);
1502 } else
1503 tg_pt_gp_mem->tg_pt_gp = NULL;
1504 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1505
1506 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1507 }
1508 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1509
1510 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1511 }
1512
1513 void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1514 {
1515 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1516 struct t10_alua *alua = &su_dev->t10_alua;
1517 struct t10_alua_tg_pt_gp *tg_pt_gp;
1518 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1519
1520 if (alua->alua_type != SPC3_ALUA_EMULATED)
1521 return;
1522
1523 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1524 if (!tg_pt_gp_mem)
1525 return;
1526
1527 while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
1528 cpu_relax();
1529
1530 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1531 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1532 if (tg_pt_gp) {
1533 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1534 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1535 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1536 tg_pt_gp->tg_pt_gp_members--;
1537 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1538 }
1539 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1540 tg_pt_gp_mem->tg_pt_gp = NULL;
1541 }
1542 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1543
1544 kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
1545 }
1546
1547 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1548 struct se_subsystem_dev *su_dev,
1549 const char *name)
1550 {
1551 struct t10_alua_tg_pt_gp *tg_pt_gp;
1552 struct config_item *ci;
1553
1554 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1555 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
1556 tg_pt_gp_list) {
1557 if (!tg_pt_gp->tg_pt_gp_valid_id)
1558 continue;
1559 ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1560 if (!strcmp(config_item_name(ci), name)) {
1561 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1562 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1563 return tg_pt_gp;
1564 }
1565 }
1566 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1567
1568 return NULL;
1569 }
1570
1571 static void core_alua_put_tg_pt_gp_from_name(
1572 struct t10_alua_tg_pt_gp *tg_pt_gp)
1573 {
1574 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1575
1576 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1577 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1578 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1579 }
1580
1581 /*
1582 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1583 */
1584 void __core_alua_attach_tg_pt_gp_mem(
1585 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1586 struct t10_alua_tg_pt_gp *tg_pt_gp)
1587 {
1588 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1589 tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
1590 tg_pt_gp_mem->tg_pt_gp_assoc = 1;
1591 list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
1592 &tg_pt_gp->tg_pt_gp_mem_list);
1593 tg_pt_gp->tg_pt_gp_members++;
1594 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1595 }
1596
1597 /*
1598 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1599 */
1600 static void __core_alua_drop_tg_pt_gp_mem(
1601 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1602 struct t10_alua_tg_pt_gp *tg_pt_gp)
1603 {
1604 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1605 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1606 tg_pt_gp_mem->tg_pt_gp = NULL;
1607 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1608 tg_pt_gp->tg_pt_gp_members--;
1609 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1610 }
1611
1612 ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
1613 {
1614 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1615 struct config_item *tg_pt_ci;
1616 struct t10_alua *alua = &su_dev->t10_alua;
1617 struct t10_alua_tg_pt_gp *tg_pt_gp;
1618 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1619 ssize_t len = 0;
1620
1621 if (alua->alua_type != SPC3_ALUA_EMULATED)
1622 return len;
1623
1624 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1625 if (!tg_pt_gp_mem)
1626 return len;
1627
1628 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1629 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1630 if (tg_pt_gp) {
1631 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1632 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1633 " %hu\nTG Port Primary Access State: %s\nTG Port "
1634 "Primary Access Status: %s\nTG Port Secondary Access"
1635 " State: %s\nTG Port Secondary Access Status: %s\n",
1636 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1637 core_alua_dump_state(atomic_read(
1638 &tg_pt_gp->tg_pt_gp_alua_access_state)),
1639 core_alua_dump_status(
1640 tg_pt_gp->tg_pt_gp_alua_access_status),
1641 (atomic_read(&port->sep_tg_pt_secondary_offline)) ?
1642 "Offline" : "None",
1643 core_alua_dump_status(port->sep_tg_pt_secondary_stat));
1644 }
1645 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1646
1647 return len;
1648 }
1649
1650 ssize_t core_alua_store_tg_pt_gp_info(
1651 struct se_port *port,
1652 const char *page,
1653 size_t count)
1654 {
1655 struct se_portal_group *tpg;
1656 struct se_lun *lun;
1657 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1658 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1659 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1660 unsigned char buf[TG_PT_GROUP_NAME_BUF];
1661 int move = 0;
1662
1663 tpg = port->sep_tpg;
1664 lun = port->sep_lun;
1665
1666 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
1667 pr_warn("SPC3_ALUA_EMULATED not enabled for"
1668 " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1669 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1670 config_item_name(&lun->lun_group.cg_item));
1671 return -EINVAL;
1672 }
1673
1674 if (count > TG_PT_GROUP_NAME_BUF) {
1675 pr_err("ALUA Target Port Group alias too large!\n");
1676 return -EINVAL;
1677 }
1678 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1679 memcpy(buf, page, count);
1680 /*
1681 * Any ALUA target port group alias besides "NULL" means we will be
1682 * making a new group association.
1683 */
1684 if (strcmp(strstrip(buf), "NULL")) {
1685 /*
1686 * core_alua_get_tg_pt_gp_by_name() will increment reference to
1687 * struct t10_alua_tg_pt_gp. This reference is released with
1688 * core_alua_put_tg_pt_gp_from_name() below.
1689 */
1690 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
1691 strstrip(buf));
1692 if (!tg_pt_gp_new)
1693 return -ENODEV;
1694 }
1695 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1696 if (!tg_pt_gp_mem) {
1697 if (tg_pt_gp_new)
1698 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1699 pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
1700 return -EINVAL;
1701 }
1702
1703 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1704 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1705 if (tg_pt_gp) {
1706 /*
1707 * Clearing an existing tg_pt_gp association, and replacing
1708 * with the default_tg_pt_gp.
1709 */
1710 if (!tg_pt_gp_new) {
1711 pr_debug("Target_Core_ConfigFS: Moving"
1712 " %s/tpgt_%hu/%s from ALUA Target Port Group:"
1713 " alua/%s, ID: %hu back to"
1714 " default_tg_pt_gp\n",
1715 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1716 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1717 config_item_name(&lun->lun_group.cg_item),
1718 config_item_name(
1719 &tg_pt_gp->tg_pt_gp_group.cg_item),
1720 tg_pt_gp->tg_pt_gp_id);
1721
1722 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1723 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1724 su_dev->t10_alua.default_tg_pt_gp);
1725 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1726
1727 return count;
1728 }
1729 /*
1730 * Removing existing association of tg_pt_gp_mem with tg_pt_gp
1731 */
1732 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1733 move = 1;
1734 }
1735 /*
1736 * Associate tg_pt_gp_mem with tg_pt_gp_new.
1737 */
1738 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
1739 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1740 pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1741 " Target Port Group: alua/%s, ID: %hu\n", (move) ?
1742 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1743 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1744 config_item_name(&lun->lun_group.cg_item),
1745 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1746 tg_pt_gp_new->tg_pt_gp_id);
1747
1748 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1749 return count;
1750 }
1751
1752 ssize_t core_alua_show_access_type(
1753 struct t10_alua_tg_pt_gp *tg_pt_gp,
1754 char *page)
1755 {
1756 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
1757 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
1758 return sprintf(page, "Implict and Explict\n");
1759 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
1760 return sprintf(page, "Implict\n");
1761 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
1762 return sprintf(page, "Explict\n");
1763 else
1764 return sprintf(page, "None\n");
1765 }
1766
1767 ssize_t core_alua_store_access_type(
1768 struct t10_alua_tg_pt_gp *tg_pt_gp,
1769 const char *page,
1770 size_t count)
1771 {
1772 unsigned long tmp;
1773 int ret;
1774
1775 ret = strict_strtoul(page, 0, &tmp);
1776 if (ret < 0) {
1777 pr_err("Unable to extract alua_access_type\n");
1778 return -EINVAL;
1779 }
1780 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
1781 pr_err("Illegal value for alua_access_type:"
1782 " %lu\n", tmp);
1783 return -EINVAL;
1784 }
1785 if (tmp == 3)
1786 tg_pt_gp->tg_pt_gp_alua_access_type =
1787 TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
1788 else if (tmp == 2)
1789 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
1790 else if (tmp == 1)
1791 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
1792 else
1793 tg_pt_gp->tg_pt_gp_alua_access_type = 0;
1794
1795 return count;
1796 }
1797
1798 ssize_t core_alua_show_nonop_delay_msecs(
1799 struct t10_alua_tg_pt_gp *tg_pt_gp,
1800 char *page)
1801 {
1802 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
1803 }
1804
1805 ssize_t core_alua_store_nonop_delay_msecs(
1806 struct t10_alua_tg_pt_gp *tg_pt_gp,
1807 const char *page,
1808 size_t count)
1809 {
1810 unsigned long tmp;
1811 int ret;
1812
1813 ret = strict_strtoul(page, 0, &tmp);
1814 if (ret < 0) {
1815 pr_err("Unable to extract nonop_delay_msecs\n");
1816 return -EINVAL;
1817 }
1818 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
1819 pr_err("Passed nonop_delay_msecs: %lu, exceeds"
1820 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
1821 ALUA_MAX_NONOP_DELAY_MSECS);
1822 return -EINVAL;
1823 }
1824 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
1825
1826 return count;
1827 }
1828
1829 ssize_t core_alua_show_trans_delay_msecs(
1830 struct t10_alua_tg_pt_gp *tg_pt_gp,
1831 char *page)
1832 {
1833 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1834 }
1835
1836 ssize_t core_alua_store_trans_delay_msecs(
1837 struct t10_alua_tg_pt_gp *tg_pt_gp,
1838 const char *page,
1839 size_t count)
1840 {
1841 unsigned long tmp;
1842 int ret;
1843
1844 ret = strict_strtoul(page, 0, &tmp);
1845 if (ret < 0) {
1846 pr_err("Unable to extract trans_delay_msecs\n");
1847 return -EINVAL;
1848 }
1849 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
1850 pr_err("Passed trans_delay_msecs: %lu, exceeds"
1851 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
1852 ALUA_MAX_TRANS_DELAY_MSECS);
1853 return -EINVAL;
1854 }
1855 tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
1856
1857 return count;
1858 }
1859
1860 ssize_t core_alua_show_preferred_bit(
1861 struct t10_alua_tg_pt_gp *tg_pt_gp,
1862 char *page)
1863 {
1864 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
1865 }
1866
1867 ssize_t core_alua_store_preferred_bit(
1868 struct t10_alua_tg_pt_gp *tg_pt_gp,
1869 const char *page,
1870 size_t count)
1871 {
1872 unsigned long tmp;
1873 int ret;
1874
1875 ret = strict_strtoul(page, 0, &tmp);
1876 if (ret < 0) {
1877 pr_err("Unable to extract preferred ALUA value\n");
1878 return -EINVAL;
1879 }
1880 if ((tmp != 0) && (tmp != 1)) {
1881 pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
1882 return -EINVAL;
1883 }
1884 tg_pt_gp->tg_pt_gp_pref = (int)tmp;
1885
1886 return count;
1887 }
1888
1889 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
1890 {
1891 if (!lun->lun_sep)
1892 return -ENODEV;
1893
1894 return sprintf(page, "%d\n",
1895 atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
1896 }
1897
1898 ssize_t core_alua_store_offline_bit(
1899 struct se_lun *lun,
1900 const char *page,
1901 size_t count)
1902 {
1903 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1904 unsigned long tmp;
1905 int ret;
1906
1907 if (!lun->lun_sep)
1908 return -ENODEV;
1909
1910 ret = strict_strtoul(page, 0, &tmp);
1911 if (ret < 0) {
1912 pr_err("Unable to extract alua_tg_pt_offline value\n");
1913 return -EINVAL;
1914 }
1915 if ((tmp != 0) && (tmp != 1)) {
1916 pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
1917 tmp);
1918 return -EINVAL;
1919 }
1920 tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
1921 if (!tg_pt_gp_mem) {
1922 pr_err("Unable to locate *tg_pt_gp_mem\n");
1923 return -EINVAL;
1924 }
1925
1926 ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
1927 lun->lun_sep, 0, (int)tmp);
1928 if (ret < 0)
1929 return -EINVAL;
1930
1931 return count;
1932 }
1933
1934 ssize_t core_alua_show_secondary_status(
1935 struct se_lun *lun,
1936 char *page)
1937 {
1938 return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
1939 }
1940
1941 ssize_t core_alua_store_secondary_status(
1942 struct se_lun *lun,
1943 const char *page,
1944 size_t count)
1945 {
1946 unsigned long tmp;
1947 int ret;
1948
1949 ret = strict_strtoul(page, 0, &tmp);
1950 if (ret < 0) {
1951 pr_err("Unable to extract alua_tg_pt_status\n");
1952 return -EINVAL;
1953 }
1954 if ((tmp != ALUA_STATUS_NONE) &&
1955 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
1956 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
1957 pr_err("Illegal value for alua_tg_pt_status: %lu\n",
1958 tmp);
1959 return -EINVAL;
1960 }
1961 lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
1962
1963 return count;
1964 }
1965
1966 ssize_t core_alua_show_secondary_write_metadata(
1967 struct se_lun *lun,
1968 char *page)
1969 {
1970 return sprintf(page, "%d\n",
1971 lun->lun_sep->sep_tg_pt_secondary_write_md);
1972 }
1973
1974 ssize_t core_alua_store_secondary_write_metadata(
1975 struct se_lun *lun,
1976 const char *page,
1977 size_t count)
1978 {
1979 unsigned long tmp;
1980 int ret;
1981
1982 ret = strict_strtoul(page, 0, &tmp);
1983 if (ret < 0) {
1984 pr_err("Unable to extract alua_tg_pt_write_md\n");
1985 return -EINVAL;
1986 }
1987 if ((tmp != 0) && (tmp != 1)) {
1988 pr_err("Illegal value for alua_tg_pt_write_md:"
1989 " %lu\n", tmp);
1990 return -EINVAL;
1991 }
1992 lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
1993
1994 return count;
1995 }
1996
1997 int core_setup_alua(struct se_device *dev, int force_pt)
1998 {
1999 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2000 struct t10_alua *alua = &su_dev->t10_alua;
2001 struct t10_alua_lu_gp_member *lu_gp_mem;
2002 /*
2003 * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
2004 * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
2005 * cause a problem because libata and some SATA RAID HBAs appear
2006 * under Linux/SCSI, but emulate SCSI logic themselves.
2007 */
2008 if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
2009 !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
2010 alua->alua_type = SPC_ALUA_PASSTHROUGH;
2011 alua->alua_state_check = &core_alua_state_check_nop;
2012 pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
2013 " emulation\n", dev->transport->name);
2014 return 0;
2015 }
2016 /*
2017 * If SPC-3 or above is reported by real or emulated struct se_device,
2018 * use emulated ALUA.
2019 */
2020 if (dev->transport->get_device_rev(dev) >= SCSI_3) {
2021 pr_debug("%s: Enabling ALUA Emulation for SPC-3"
2022 " device\n", dev->transport->name);
2023 /*
2024 * Associate this struct se_device with the default ALUA
2025 * LUN Group.
2026 */
2027 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2028 if (IS_ERR(lu_gp_mem))
2029 return PTR_ERR(lu_gp_mem);
2030
2031 alua->alua_type = SPC3_ALUA_EMULATED;
2032 alua->alua_state_check = &core_alua_state_check;
2033 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2034 __core_alua_attach_lu_gp_mem(lu_gp_mem,
2035 default_lu_gp);
2036 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2037
2038 pr_debug("%s: Adding to default ALUA LU Group:"
2039 " core/alua/lu_gps/default_lu_gp\n",
2040 dev->transport->name);
2041 } else {
2042 alua->alua_type = SPC2_ALUA_DISABLED;
2043 alua->alua_state_check = &core_alua_state_check_nop;
2044 pr_debug("%s: Disabling ALUA Emulation for SPC-2"
2045 " device\n", dev->transport->name);
2046 }
2047
2048 return 0;
2049 }
This page took 0.096676 seconds and 5 git commands to generate.