target: Rename core_tpg_{pre,post}_addlun for clarity
[deliverable/linux.git] / drivers / target / target_core_device.c
1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
3 *
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
6 *
7 * (c) Copyright 2003-2013 Datera, Inc.
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
27 #include <linux/net.h>
28 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/timer.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/kthread.h>
34 #include <linux/in.h>
35 #include <linux/export.h>
36 #include <net/sock.h>
37 #include <net/tcp.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_device.h>
40
41 #include <target/target_core_base.h>
42 #include <target/target_core_backend.h>
43 #include <target/target_core_fabric.h>
44
45 #include "target_core_internal.h"
46 #include "target_core_alua.h"
47 #include "target_core_pr.h"
48 #include "target_core_ua.h"
49
50 DEFINE_MUTEX(g_device_mutex);
51 LIST_HEAD(g_device_list);
52
53 static struct se_hba *lun0_hba;
54 /* not static, needed by tpg.c */
55 struct se_device *g_lun0_dev;
56
57 sense_reason_t
58 transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
59 {
60 struct se_lun *se_lun = NULL;
61 struct se_session *se_sess = se_cmd->se_sess;
62 struct se_device *dev;
63 unsigned long flags;
64
65 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
66 return TCM_NON_EXISTENT_LUN;
67
68 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
69 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
70 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
71 struct se_dev_entry *deve = se_cmd->se_deve;
72
73 deve->total_cmds++;
74
75 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
76 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
77 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
78 " Access for 0x%08x\n",
79 se_cmd->se_tfo->get_fabric_name(),
80 unpacked_lun);
81 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
82 return TCM_WRITE_PROTECTED;
83 }
84
85 if (se_cmd->data_direction == DMA_TO_DEVICE)
86 deve->write_bytes += se_cmd->data_length;
87 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
88 deve->read_bytes += se_cmd->data_length;
89
90 se_lun = deve->se_lun;
91 se_cmd->se_lun = deve->se_lun;
92 se_cmd->pr_res_key = deve->pr_res_key;
93 se_cmd->orig_fe_lun = unpacked_lun;
94 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
95
96 percpu_ref_get(&se_lun->lun_ref);
97 se_cmd->lun_ref_active = true;
98 }
99 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
100
101 if (!se_lun) {
102 /*
103 * Use the se_portal_group->tpg_virt_lun0 to allow for
104 * REPORT_LUNS, et al to be returned when no active
105 * MappedLUN=0 exists for this Initiator Port.
106 */
107 if (unpacked_lun != 0) {
108 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
109 " Access for 0x%08x\n",
110 se_cmd->se_tfo->get_fabric_name(),
111 unpacked_lun);
112 return TCM_NON_EXISTENT_LUN;
113 }
114 /*
115 * Force WRITE PROTECT for virtual LUN 0
116 */
117 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
118 (se_cmd->data_direction != DMA_NONE))
119 return TCM_WRITE_PROTECTED;
120
121 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
122 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
123 se_cmd->orig_fe_lun = 0;
124 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
125
126 percpu_ref_get(&se_lun->lun_ref);
127 se_cmd->lun_ref_active = true;
128 }
129
130 /* Directly associate cmd with se_dev */
131 se_cmd->se_dev = se_lun->lun_se_dev;
132
133 dev = se_lun->lun_se_dev;
134 atomic_long_inc(&dev->num_cmds);
135 if (se_cmd->data_direction == DMA_TO_DEVICE)
136 atomic_long_add(se_cmd->data_length, &dev->write_bytes);
137 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
138 atomic_long_add(se_cmd->data_length, &dev->read_bytes);
139
140 return 0;
141 }
142 EXPORT_SYMBOL(transport_lookup_cmd_lun);
143
144 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
145 {
146 struct se_dev_entry *deve;
147 struct se_lun *se_lun = NULL;
148 struct se_session *se_sess = se_cmd->se_sess;
149 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
150 unsigned long flags;
151
152 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
153 return -ENODEV;
154
155 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
156 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
157 deve = se_cmd->se_deve;
158
159 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
160 se_tmr->tmr_lun = deve->se_lun;
161 se_cmd->se_lun = deve->se_lun;
162 se_lun = deve->se_lun;
163 se_cmd->pr_res_key = deve->pr_res_key;
164 se_cmd->orig_fe_lun = unpacked_lun;
165 }
166 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
167
168 if (!se_lun) {
169 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
170 " Access for 0x%08x\n",
171 se_cmd->se_tfo->get_fabric_name(),
172 unpacked_lun);
173 return -ENODEV;
174 }
175
176 /* Directly associate cmd with se_dev */
177 se_cmd->se_dev = se_lun->lun_se_dev;
178 se_tmr->tmr_dev = se_lun->lun_se_dev;
179
180 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
181 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
182 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
183
184 return 0;
185 }
186 EXPORT_SYMBOL(transport_lookup_tmr_lun);
187
188 /*
189 * This function is called from core_scsi3_emulate_pro_register_and_move()
190 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
191 * when a matching rtpi is found.
192 */
193 struct se_dev_entry *core_get_se_deve_from_rtpi(
194 struct se_node_acl *nacl,
195 u16 rtpi)
196 {
197 struct se_dev_entry *deve;
198 struct se_lun *lun;
199 struct se_port *port;
200 struct se_portal_group *tpg = nacl->se_tpg;
201 u32 i;
202
203 spin_lock_irq(&nacl->device_list_lock);
204 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
205 deve = nacl->device_list[i];
206
207 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
208 continue;
209
210 lun = deve->se_lun;
211 if (!lun) {
212 pr_err("%s device entries device pointer is"
213 " NULL, but Initiator has access.\n",
214 tpg->se_tpg_tfo->get_fabric_name());
215 continue;
216 }
217 port = lun->lun_sep;
218 if (!port) {
219 pr_err("%s device entries device pointer is"
220 " NULL, but Initiator has access.\n",
221 tpg->se_tpg_tfo->get_fabric_name());
222 continue;
223 }
224 if (port->sep_rtpi != rtpi)
225 continue;
226
227 atomic_inc(&deve->pr_ref_count);
228 smp_mb__after_atomic_inc();
229 spin_unlock_irq(&nacl->device_list_lock);
230
231 return deve;
232 }
233 spin_unlock_irq(&nacl->device_list_lock);
234
235 return NULL;
236 }
237
238 int core_free_device_list_for_node(
239 struct se_node_acl *nacl,
240 struct se_portal_group *tpg)
241 {
242 struct se_dev_entry *deve;
243 struct se_lun *lun;
244 u32 i;
245
246 if (!nacl->device_list)
247 return 0;
248
249 spin_lock_irq(&nacl->device_list_lock);
250 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
251 deve = nacl->device_list[i];
252
253 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
254 continue;
255
256 if (!deve->se_lun) {
257 pr_err("%s device entries device pointer is"
258 " NULL, but Initiator has access.\n",
259 tpg->se_tpg_tfo->get_fabric_name());
260 continue;
261 }
262 lun = deve->se_lun;
263
264 spin_unlock_irq(&nacl->device_list_lock);
265 core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
266 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
267 spin_lock_irq(&nacl->device_list_lock);
268 }
269 spin_unlock_irq(&nacl->device_list_lock);
270
271 array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
272 nacl->device_list = NULL;
273
274 return 0;
275 }
276
277 void core_update_device_list_access(
278 u32 mapped_lun,
279 u32 lun_access,
280 struct se_node_acl *nacl)
281 {
282 struct se_dev_entry *deve;
283
284 spin_lock_irq(&nacl->device_list_lock);
285 deve = nacl->device_list[mapped_lun];
286 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
287 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
288 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
289 } else {
290 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
291 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
292 }
293 spin_unlock_irq(&nacl->device_list_lock);
294 }
295
296 /* core_enable_device_list_for_node():
297 *
298 *
299 */
300 int core_enable_device_list_for_node(
301 struct se_lun *lun,
302 struct se_lun_acl *lun_acl,
303 u32 mapped_lun,
304 u32 lun_access,
305 struct se_node_acl *nacl,
306 struct se_portal_group *tpg)
307 {
308 struct se_port *port = lun->lun_sep;
309 struct se_dev_entry *deve;
310
311 spin_lock_irq(&nacl->device_list_lock);
312
313 deve = nacl->device_list[mapped_lun];
314
315 /*
316 * Check if the call is handling demo mode -> explicit LUN ACL
317 * transition. This transition must be for the same struct se_lun
318 * + mapped_lun that was setup in demo mode..
319 */
320 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
321 if (deve->se_lun_acl != NULL) {
322 pr_err("struct se_dev_entry->se_lun_acl"
323 " already set for demo mode -> explicit"
324 " LUN ACL transition\n");
325 spin_unlock_irq(&nacl->device_list_lock);
326 return -EINVAL;
327 }
328 if (deve->se_lun != lun) {
329 pr_err("struct se_dev_entry->se_lun does"
330 " match passed struct se_lun for demo mode"
331 " -> explicit LUN ACL transition\n");
332 spin_unlock_irq(&nacl->device_list_lock);
333 return -EINVAL;
334 }
335 deve->se_lun_acl = lun_acl;
336
337 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
338 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
339 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
340 } else {
341 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
342 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
343 }
344
345 spin_unlock_irq(&nacl->device_list_lock);
346 return 0;
347 }
348
349 deve->se_lun = lun;
350 deve->se_lun_acl = lun_acl;
351 deve->mapped_lun = mapped_lun;
352 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
353
354 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
355 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
356 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
357 } else {
358 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
359 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
360 }
361
362 deve->creation_time = get_jiffies_64();
363 deve->attach_count++;
364 spin_unlock_irq(&nacl->device_list_lock);
365
366 spin_lock_bh(&port->sep_alua_lock);
367 list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
368 spin_unlock_bh(&port->sep_alua_lock);
369
370 return 0;
371 }
372
373 /* core_disable_device_list_for_node():
374 *
375 *
376 */
377 int core_disable_device_list_for_node(
378 struct se_lun *lun,
379 struct se_lun_acl *lun_acl,
380 u32 mapped_lun,
381 u32 lun_access,
382 struct se_node_acl *nacl,
383 struct se_portal_group *tpg)
384 {
385 struct se_port *port = lun->lun_sep;
386 struct se_dev_entry *deve = nacl->device_list[mapped_lun];
387
388 /*
389 * If the MappedLUN entry is being disabled, the entry in
390 * port->sep_alua_list must be removed now before clearing the
391 * struct se_dev_entry pointers below as logic in
392 * core_alua_do_transition_tg_pt() depends on these being present.
393 *
394 * deve->se_lun_acl will be NULL for demo-mode created LUNs
395 * that have not been explicitly converted to MappedLUNs ->
396 * struct se_lun_acl, but we remove deve->alua_port_list from
397 * port->sep_alua_list. This also means that active UAs and
398 * NodeACL context specific PR metadata for demo-mode
399 * MappedLUN *deve will be released below..
400 */
401 spin_lock_bh(&port->sep_alua_lock);
402 list_del(&deve->alua_port_list);
403 spin_unlock_bh(&port->sep_alua_lock);
404 /*
405 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
406 * PR operation to complete.
407 */
408 while (atomic_read(&deve->pr_ref_count) != 0)
409 cpu_relax();
410
411 spin_lock_irq(&nacl->device_list_lock);
412 /*
413 * Disable struct se_dev_entry LUN ACL mapping
414 */
415 core_scsi3_ua_release_all(deve);
416 deve->se_lun = NULL;
417 deve->se_lun_acl = NULL;
418 deve->lun_flags = 0;
419 deve->creation_time = 0;
420 deve->attach_count--;
421 spin_unlock_irq(&nacl->device_list_lock);
422
423 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
424 return 0;
425 }
426
427 /* core_clear_lun_from_tpg():
428 *
429 *
430 */
431 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
432 {
433 struct se_node_acl *nacl;
434 struct se_dev_entry *deve;
435 u32 i;
436
437 spin_lock_irq(&tpg->acl_node_lock);
438 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
439 spin_unlock_irq(&tpg->acl_node_lock);
440
441 spin_lock_irq(&nacl->device_list_lock);
442 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
443 deve = nacl->device_list[i];
444 if (lun != deve->se_lun)
445 continue;
446 spin_unlock_irq(&nacl->device_list_lock);
447
448 core_disable_device_list_for_node(lun, NULL,
449 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
450 nacl, tpg);
451
452 spin_lock_irq(&nacl->device_list_lock);
453 }
454 spin_unlock_irq(&nacl->device_list_lock);
455
456 spin_lock_irq(&tpg->acl_node_lock);
457 }
458 spin_unlock_irq(&tpg->acl_node_lock);
459 }
460
461 static struct se_port *core_alloc_port(struct se_device *dev)
462 {
463 struct se_port *port, *port_tmp;
464
465 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
466 if (!port) {
467 pr_err("Unable to allocate struct se_port\n");
468 return ERR_PTR(-ENOMEM);
469 }
470 INIT_LIST_HEAD(&port->sep_alua_list);
471 INIT_LIST_HEAD(&port->sep_list);
472 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
473 spin_lock_init(&port->sep_alua_lock);
474 mutex_init(&port->sep_tg_pt_md_mutex);
475
476 spin_lock(&dev->se_port_lock);
477 if (dev->dev_port_count == 0x0000ffff) {
478 pr_warn("Reached dev->dev_port_count =="
479 " 0x0000ffff\n");
480 spin_unlock(&dev->se_port_lock);
481 return ERR_PTR(-ENOSPC);
482 }
483 again:
484 /*
485 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
486 * Here is the table from spc4r17 section 7.7.3.8.
487 *
488 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
489 *
490 * Code Description
491 * 0h Reserved
492 * 1h Relative port 1, historically known as port A
493 * 2h Relative port 2, historically known as port B
494 * 3h to FFFFh Relative port 3 through 65 535
495 */
496 port->sep_rtpi = dev->dev_rpti_counter++;
497 if (!port->sep_rtpi)
498 goto again;
499
500 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
501 /*
502 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
503 * for 16-bit wrap..
504 */
505 if (port->sep_rtpi == port_tmp->sep_rtpi)
506 goto again;
507 }
508 spin_unlock(&dev->se_port_lock);
509
510 return port;
511 }
512
513 static void core_export_port(
514 struct se_device *dev,
515 struct se_portal_group *tpg,
516 struct se_port *port,
517 struct se_lun *lun)
518 {
519 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
520
521 spin_lock(&dev->se_port_lock);
522 spin_lock(&lun->lun_sep_lock);
523 port->sep_tpg = tpg;
524 port->sep_lun = lun;
525 lun->lun_sep = port;
526 spin_unlock(&lun->lun_sep_lock);
527
528 list_add_tail(&port->sep_list, &dev->dev_sep_list);
529 spin_unlock(&dev->se_port_lock);
530
531 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
532 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
533 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
534 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
535 pr_err("Unable to allocate t10_alua_tg_pt"
536 "_gp_member_t\n");
537 return;
538 }
539 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
540 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
541 dev->t10_alua.default_tg_pt_gp);
542 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
543 pr_debug("%s/%s: Adding to default ALUA Target Port"
544 " Group: alua/default_tg_pt_gp\n",
545 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
546 }
547
548 dev->dev_port_count++;
549 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
550 }
551
552 /*
553 * Called with struct se_device->se_port_lock spinlock held.
554 */
555 static void core_release_port(struct se_device *dev, struct se_port *port)
556 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
557 {
558 /*
559 * Wait for any port reference for PR ALL_TG_PT=1 operation
560 * to complete in __core_scsi3_alloc_registration()
561 */
562 spin_unlock(&dev->se_port_lock);
563 if (atomic_read(&port->sep_tg_pt_ref_cnt))
564 cpu_relax();
565 spin_lock(&dev->se_port_lock);
566
567 core_alua_free_tg_pt_gp_mem(port);
568
569 list_del(&port->sep_list);
570 dev->dev_port_count--;
571 kfree(port);
572 }
573
574 int core_dev_export(
575 struct se_device *dev,
576 struct se_portal_group *tpg,
577 struct se_lun *lun)
578 {
579 struct se_hba *hba = dev->se_hba;
580 struct se_port *port;
581
582 port = core_alloc_port(dev);
583 if (IS_ERR(port))
584 return PTR_ERR(port);
585
586 lun->lun_se_dev = dev;
587
588 spin_lock(&hba->device_lock);
589 dev->export_count++;
590 spin_unlock(&hba->device_lock);
591
592 core_export_port(dev, tpg, port, lun);
593 return 0;
594 }
595
596 void core_dev_unexport(
597 struct se_device *dev,
598 struct se_portal_group *tpg,
599 struct se_lun *lun)
600 {
601 struct se_hba *hba = dev->se_hba;
602 struct se_port *port = lun->lun_sep;
603
604 spin_lock(&lun->lun_sep_lock);
605 if (lun->lun_se_dev == NULL) {
606 spin_unlock(&lun->lun_sep_lock);
607 return;
608 }
609 spin_unlock(&lun->lun_sep_lock);
610
611 spin_lock(&dev->se_port_lock);
612 core_release_port(dev, port);
613 spin_unlock(&dev->se_port_lock);
614
615 spin_lock(&hba->device_lock);
616 dev->export_count--;
617 spin_unlock(&hba->device_lock);
618
619 lun->lun_se_dev = NULL;
620 }
621
622 static void se_release_vpd_for_dev(struct se_device *dev)
623 {
624 struct t10_vpd *vpd, *vpd_tmp;
625
626 spin_lock(&dev->t10_wwn.t10_vpd_lock);
627 list_for_each_entry_safe(vpd, vpd_tmp,
628 &dev->t10_wwn.t10_vpd_list, vpd_list) {
629 list_del(&vpd->vpd_list);
630 kfree(vpd);
631 }
632 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
633 }
634
635 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
636 {
637 u32 aligned_max_sectors;
638 u32 alignment;
639 /*
640 * Limit max_sectors to a PAGE_SIZE aligned value for modern
641 * transport_allocate_data_tasks() operation.
642 */
643 alignment = max(1ul, PAGE_SIZE / block_size);
644 aligned_max_sectors = rounddown(max_sectors, alignment);
645
646 if (max_sectors != aligned_max_sectors)
647 pr_info("Rounding down aligned max_sectors from %u to %u\n",
648 max_sectors, aligned_max_sectors);
649
650 return aligned_max_sectors;
651 }
652
653 int se_dev_set_max_unmap_lba_count(
654 struct se_device *dev,
655 u32 max_unmap_lba_count)
656 {
657 dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
658 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
659 dev, dev->dev_attrib.max_unmap_lba_count);
660 return 0;
661 }
662
663 int se_dev_set_max_unmap_block_desc_count(
664 struct se_device *dev,
665 u32 max_unmap_block_desc_count)
666 {
667 dev->dev_attrib.max_unmap_block_desc_count =
668 max_unmap_block_desc_count;
669 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
670 dev, dev->dev_attrib.max_unmap_block_desc_count);
671 return 0;
672 }
673
674 int se_dev_set_unmap_granularity(
675 struct se_device *dev,
676 u32 unmap_granularity)
677 {
678 dev->dev_attrib.unmap_granularity = unmap_granularity;
679 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
680 dev, dev->dev_attrib.unmap_granularity);
681 return 0;
682 }
683
684 int se_dev_set_unmap_granularity_alignment(
685 struct se_device *dev,
686 u32 unmap_granularity_alignment)
687 {
688 dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
689 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
690 dev, dev->dev_attrib.unmap_granularity_alignment);
691 return 0;
692 }
693
694 int se_dev_set_max_write_same_len(
695 struct se_device *dev,
696 u32 max_write_same_len)
697 {
698 dev->dev_attrib.max_write_same_len = max_write_same_len;
699 pr_debug("dev[%p]: Set max_write_same_len: %u\n",
700 dev, dev->dev_attrib.max_write_same_len);
701 return 0;
702 }
703
704 static void dev_set_t10_wwn_model_alias(struct se_device *dev)
705 {
706 const char *configname;
707
708 configname = config_item_name(&dev->dev_group.cg_item);
709 if (strlen(configname) >= 16) {
710 pr_warn("dev[%p]: Backstore name '%s' is too long for "
711 "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
712 configname);
713 }
714 snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
715 }
716
717 int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
718 {
719 if (dev->export_count) {
720 pr_err("dev[%p]: Unable to change model alias"
721 " while export_count is %d\n",
722 dev, dev->export_count);
723 return -EINVAL;
724 }
725
726 if (flag != 0 && flag != 1) {
727 pr_err("Illegal value %d\n", flag);
728 return -EINVAL;
729 }
730
731 if (flag) {
732 dev_set_t10_wwn_model_alias(dev);
733 } else {
734 strncpy(&dev->t10_wwn.model[0],
735 dev->transport->inquiry_prod, 16);
736 }
737 dev->dev_attrib.emulate_model_alias = flag;
738
739 return 0;
740 }
741
742 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
743 {
744 if (flag != 0 && flag != 1) {
745 pr_err("Illegal value %d\n", flag);
746 return -EINVAL;
747 }
748
749 if (flag) {
750 pr_err("dpo_emulated not supported\n");
751 return -EINVAL;
752 }
753
754 return 0;
755 }
756
757 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
758 {
759 if (flag != 0 && flag != 1) {
760 pr_err("Illegal value %d\n", flag);
761 return -EINVAL;
762 }
763
764 if (flag &&
765 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
766 pr_err("emulate_fua_write not supported for pSCSI\n");
767 return -EINVAL;
768 }
769 dev->dev_attrib.emulate_fua_write = flag;
770 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
771 dev, dev->dev_attrib.emulate_fua_write);
772 return 0;
773 }
774
775 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
776 {
777 if (flag != 0 && flag != 1) {
778 pr_err("Illegal value %d\n", flag);
779 return -EINVAL;
780 }
781
782 if (flag) {
783 pr_err("ua read emulated not supported\n");
784 return -EINVAL;
785 }
786
787 return 0;
788 }
789
790 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
791 {
792 if (flag != 0 && flag != 1) {
793 pr_err("Illegal value %d\n", flag);
794 return -EINVAL;
795 }
796 if (flag &&
797 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
798 pr_err("emulate_write_cache not supported for pSCSI\n");
799 return -EINVAL;
800 }
801 if (dev->transport->get_write_cache) {
802 pr_warn("emulate_write_cache cannot be changed when underlying"
803 " HW reports WriteCacheEnabled, ignoring request\n");
804 return 0;
805 }
806
807 dev->dev_attrib.emulate_write_cache = flag;
808 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
809 dev, dev->dev_attrib.emulate_write_cache);
810 return 0;
811 }
812
813 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
814 {
815 if ((flag != 0) && (flag != 1) && (flag != 2)) {
816 pr_err("Illegal value %d\n", flag);
817 return -EINVAL;
818 }
819
820 if (dev->export_count) {
821 pr_err("dev[%p]: Unable to change SE Device"
822 " UA_INTRLCK_CTRL while export_count is %d\n",
823 dev, dev->export_count);
824 return -EINVAL;
825 }
826 dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
827 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
828 dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
829
830 return 0;
831 }
832
833 int se_dev_set_emulate_tas(struct se_device *dev, int flag)
834 {
835 if ((flag != 0) && (flag != 1)) {
836 pr_err("Illegal value %d\n", flag);
837 return -EINVAL;
838 }
839
840 if (dev->export_count) {
841 pr_err("dev[%p]: Unable to change SE Device TAS while"
842 " export_count is %d\n",
843 dev, dev->export_count);
844 return -EINVAL;
845 }
846 dev->dev_attrib.emulate_tas = flag;
847 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
848 dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
849
850 return 0;
851 }
852
853 int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
854 {
855 if ((flag != 0) && (flag != 1)) {
856 pr_err("Illegal value %d\n", flag);
857 return -EINVAL;
858 }
859 /*
860 * We expect this value to be non-zero when generic Block Layer
861 * Discard supported is detected iblock_create_virtdevice().
862 */
863 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
864 pr_err("Generic Block Discard not supported\n");
865 return -ENOSYS;
866 }
867
868 dev->dev_attrib.emulate_tpu = flag;
869 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
870 dev, flag);
871 return 0;
872 }
873
874 int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
875 {
876 if ((flag != 0) && (flag != 1)) {
877 pr_err("Illegal value %d\n", flag);
878 return -EINVAL;
879 }
880 /*
881 * We expect this value to be non-zero when generic Block Layer
882 * Discard supported is detected iblock_create_virtdevice().
883 */
884 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
885 pr_err("Generic Block Discard not supported\n");
886 return -ENOSYS;
887 }
888
889 dev->dev_attrib.emulate_tpws = flag;
890 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
891 dev, flag);
892 return 0;
893 }
894
895 int se_dev_set_emulate_caw(struct se_device *dev, int flag)
896 {
897 if (flag != 0 && flag != 1) {
898 pr_err("Illegal value %d\n", flag);
899 return -EINVAL;
900 }
901 dev->dev_attrib.emulate_caw = flag;
902 pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
903 dev, flag);
904
905 return 0;
906 }
907
908 int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
909 {
910 if (flag != 0 && flag != 1) {
911 pr_err("Illegal value %d\n", flag);
912 return -EINVAL;
913 }
914 dev->dev_attrib.emulate_3pc = flag;
915 pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
916 dev, flag);
917
918 return 0;
919 }
920
921 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
922 {
923 if ((flag != 0) && (flag != 1)) {
924 pr_err("Illegal value %d\n", flag);
925 return -EINVAL;
926 }
927 dev->dev_attrib.enforce_pr_isids = flag;
928 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
929 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
930 return 0;
931 }
932
933 int se_dev_set_is_nonrot(struct se_device *dev, int flag)
934 {
935 if ((flag != 0) && (flag != 1)) {
936 printk(KERN_ERR "Illegal value %d\n", flag);
937 return -EINVAL;
938 }
939 dev->dev_attrib.is_nonrot = flag;
940 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
941 dev, flag);
942 return 0;
943 }
944
945 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
946 {
947 if (flag != 0) {
948 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
949 " reordering not implemented\n", dev);
950 return -ENOSYS;
951 }
952 dev->dev_attrib.emulate_rest_reord = flag;
953 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
954 return 0;
955 }
956
957 /*
958 * Note, this can only be called on unexported SE Device Object.
959 */
960 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
961 {
962 if (dev->export_count) {
963 pr_err("dev[%p]: Unable to change SE Device TCQ while"
964 " export_count is %d\n",
965 dev, dev->export_count);
966 return -EINVAL;
967 }
968 if (!queue_depth) {
969 pr_err("dev[%p]: Illegal ZERO value for queue"
970 "_depth\n", dev);
971 return -EINVAL;
972 }
973
974 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
975 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
976 pr_err("dev[%p]: Passed queue_depth: %u"
977 " exceeds TCM/SE_Device TCQ: %u\n",
978 dev, queue_depth,
979 dev->dev_attrib.hw_queue_depth);
980 return -EINVAL;
981 }
982 } else {
983 if (queue_depth > dev->dev_attrib.queue_depth) {
984 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
985 pr_err("dev[%p]: Passed queue_depth:"
986 " %u exceeds TCM/SE_Device MAX"
987 " TCQ: %u\n", dev, queue_depth,
988 dev->dev_attrib.hw_queue_depth);
989 return -EINVAL;
990 }
991 }
992 }
993
994 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
995 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
996 dev, queue_depth);
997 return 0;
998 }
999
1000 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1001 {
1002 int block_size = dev->dev_attrib.block_size;
1003
1004 if (dev->export_count) {
1005 pr_err("dev[%p]: Unable to change SE Device"
1006 " fabric_max_sectors while export_count is %d\n",
1007 dev, dev->export_count);
1008 return -EINVAL;
1009 }
1010 if (!fabric_max_sectors) {
1011 pr_err("dev[%p]: Illegal ZERO value for"
1012 " fabric_max_sectors\n", dev);
1013 return -EINVAL;
1014 }
1015 if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1016 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
1017 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
1018 DA_STATUS_MAX_SECTORS_MIN);
1019 return -EINVAL;
1020 }
1021 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1022 if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
1023 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1024 " greater than TCM/SE_Device max_sectors:"
1025 " %u\n", dev, fabric_max_sectors,
1026 dev->dev_attrib.hw_max_sectors);
1027 return -EINVAL;
1028 }
1029 } else {
1030 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1031 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1032 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1033 " %u\n", dev, fabric_max_sectors,
1034 DA_STATUS_MAX_SECTORS_MAX);
1035 return -EINVAL;
1036 }
1037 }
1038 /*
1039 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1040 */
1041 if (!block_size) {
1042 block_size = 512;
1043 pr_warn("Defaulting to 512 for zero block_size\n");
1044 }
1045 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
1046 block_size);
1047
1048 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
1049 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1050 dev, fabric_max_sectors);
1051 return 0;
1052 }
1053
1054 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1055 {
1056 if (dev->export_count) {
1057 pr_err("dev[%p]: Unable to change SE Device"
1058 " optimal_sectors while export_count is %d\n",
1059 dev, dev->export_count);
1060 return -EINVAL;
1061 }
1062 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1063 pr_err("dev[%p]: Passed optimal_sectors cannot be"
1064 " changed for TCM/pSCSI\n", dev);
1065 return -EINVAL;
1066 }
1067 if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
1068 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1069 " greater than fabric_max_sectors: %u\n", dev,
1070 optimal_sectors, dev->dev_attrib.fabric_max_sectors);
1071 return -EINVAL;
1072 }
1073
1074 dev->dev_attrib.optimal_sectors = optimal_sectors;
1075 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1076 dev, optimal_sectors);
1077 return 0;
1078 }
1079
1080 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1081 {
1082 if (dev->export_count) {
1083 pr_err("dev[%p]: Unable to change SE Device block_size"
1084 " while export_count is %d\n",
1085 dev, dev->export_count);
1086 return -EINVAL;
1087 }
1088
1089 if ((block_size != 512) &&
1090 (block_size != 1024) &&
1091 (block_size != 2048) &&
1092 (block_size != 4096)) {
1093 pr_err("dev[%p]: Illegal value for block_device: %u"
1094 " for SE device, must be 512, 1024, 2048 or 4096\n",
1095 dev, block_size);
1096 return -EINVAL;
1097 }
1098
1099 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1100 pr_err("dev[%p]: Not allowed to change block_size for"
1101 " Physical Device, use for Linux/SCSI to change"
1102 " block_size for underlying hardware\n", dev);
1103 return -EINVAL;
1104 }
1105
1106 dev->dev_attrib.block_size = block_size;
1107 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1108 dev, block_size);
1109 return 0;
1110 }
1111
1112 struct se_lun *core_dev_add_lun(
1113 struct se_portal_group *tpg,
1114 struct se_device *dev,
1115 u32 unpacked_lun)
1116 {
1117 struct se_lun *lun;
1118 int rc;
1119
1120 lun = core_tpg_alloc_lun(tpg, unpacked_lun);
1121 if (IS_ERR(lun))
1122 return lun;
1123
1124 rc = core_tpg_add_lun(tpg, lun,
1125 TRANSPORT_LUNFLAGS_READ_WRITE, dev);
1126 if (rc < 0)
1127 return ERR_PTR(rc);
1128
1129 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1130 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1131 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1132 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
1133 /*
1134 * Update LUN maps for dynamically added initiators when
1135 * generate_node_acl is enabled.
1136 */
1137 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1138 struct se_node_acl *acl;
1139 spin_lock_irq(&tpg->acl_node_lock);
1140 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1141 if (acl->dynamic_node_acl &&
1142 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1143 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1144 spin_unlock_irq(&tpg->acl_node_lock);
1145 core_tpg_add_node_to_devs(acl, tpg);
1146 spin_lock_irq(&tpg->acl_node_lock);
1147 }
1148 }
1149 spin_unlock_irq(&tpg->acl_node_lock);
1150 }
1151
1152 return lun;
1153 }
1154
1155 /* core_dev_del_lun():
1156 *
1157 *
1158 */
1159 int core_dev_del_lun(
1160 struct se_portal_group *tpg,
1161 u32 unpacked_lun)
1162 {
1163 struct se_lun *lun;
1164
1165 lun = core_tpg_pre_dellun(tpg, unpacked_lun);
1166 if (IS_ERR(lun))
1167 return PTR_ERR(lun);
1168
1169 core_tpg_post_dellun(tpg, lun);
1170
1171 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1172 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1173 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1174 tpg->se_tpg_tfo->get_fabric_name());
1175
1176 return 0;
1177 }
1178
1179 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1180 {
1181 struct se_lun *lun;
1182
1183 spin_lock(&tpg->tpg_lun_lock);
1184 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1185 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1186 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1187 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1188 TRANSPORT_MAX_LUNS_PER_TPG-1,
1189 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1190 spin_unlock(&tpg->tpg_lun_lock);
1191 return NULL;
1192 }
1193 lun = tpg->tpg_lun_list[unpacked_lun];
1194
1195 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1196 pr_err("%s Logical Unit Number: %u is not free on"
1197 " Target Portal Group: %hu, ignoring request.\n",
1198 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1199 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1200 spin_unlock(&tpg->tpg_lun_lock);
1201 return NULL;
1202 }
1203 spin_unlock(&tpg->tpg_lun_lock);
1204
1205 return lun;
1206 }
1207
1208 /* core_dev_get_lun():
1209 *
1210 *
1211 */
1212 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1213 {
1214 struct se_lun *lun;
1215
1216 spin_lock(&tpg->tpg_lun_lock);
1217 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1218 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1219 "_TPG-1: %u for Target Portal Group: %hu\n",
1220 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1221 TRANSPORT_MAX_LUNS_PER_TPG-1,
1222 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1223 spin_unlock(&tpg->tpg_lun_lock);
1224 return NULL;
1225 }
1226 lun = tpg->tpg_lun_list[unpacked_lun];
1227
1228 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1229 pr_err("%s Logical Unit Number: %u is not active on"
1230 " Target Portal Group: %hu, ignoring request.\n",
1231 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1232 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1233 spin_unlock(&tpg->tpg_lun_lock);
1234 return NULL;
1235 }
1236 spin_unlock(&tpg->tpg_lun_lock);
1237
1238 return lun;
1239 }
1240
1241 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1242 struct se_portal_group *tpg,
1243 struct se_node_acl *nacl,
1244 u32 mapped_lun,
1245 int *ret)
1246 {
1247 struct se_lun_acl *lacl;
1248
1249 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
1250 pr_err("%s InitiatorName exceeds maximum size.\n",
1251 tpg->se_tpg_tfo->get_fabric_name());
1252 *ret = -EOVERFLOW;
1253 return NULL;
1254 }
1255 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1256 if (!lacl) {
1257 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1258 *ret = -ENOMEM;
1259 return NULL;
1260 }
1261
1262 INIT_LIST_HEAD(&lacl->lacl_list);
1263 lacl->mapped_lun = mapped_lun;
1264 lacl->se_lun_nacl = nacl;
1265 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
1266 nacl->initiatorname);
1267
1268 return lacl;
1269 }
1270
1271 int core_dev_add_initiator_node_lun_acl(
1272 struct se_portal_group *tpg,
1273 struct se_lun_acl *lacl,
1274 u32 unpacked_lun,
1275 u32 lun_access)
1276 {
1277 struct se_lun *lun;
1278 struct se_node_acl *nacl;
1279
1280 lun = core_dev_get_lun(tpg, unpacked_lun);
1281 if (!lun) {
1282 pr_err("%s Logical Unit Number: %u is not active on"
1283 " Target Portal Group: %hu, ignoring request.\n",
1284 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1285 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1286 return -EINVAL;
1287 }
1288
1289 nacl = lacl->se_lun_nacl;
1290 if (!nacl)
1291 return -EINVAL;
1292
1293 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1294 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1295 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1296
1297 lacl->se_lun = lun;
1298
1299 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
1300 lun_access, nacl, tpg) < 0)
1301 return -EINVAL;
1302
1303 spin_lock(&lun->lun_acl_lock);
1304 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1305 atomic_inc(&lun->lun_acl_count);
1306 smp_mb__after_atomic_inc();
1307 spin_unlock(&lun->lun_acl_lock);
1308
1309 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1310 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1311 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1312 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1313 lacl->initiatorname);
1314 /*
1315 * Check to see if there are any existing persistent reservation APTPL
1316 * pre-registrations that need to be enabled for this LUN ACL..
1317 */
1318 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1319 return 0;
1320 }
1321
1322 /* core_dev_del_initiator_node_lun_acl():
1323 *
1324 *
1325 */
1326 int core_dev_del_initiator_node_lun_acl(
1327 struct se_portal_group *tpg,
1328 struct se_lun *lun,
1329 struct se_lun_acl *lacl)
1330 {
1331 struct se_node_acl *nacl;
1332
1333 nacl = lacl->se_lun_nacl;
1334 if (!nacl)
1335 return -EINVAL;
1336
1337 spin_lock(&lun->lun_acl_lock);
1338 list_del(&lacl->lacl_list);
1339 atomic_dec(&lun->lun_acl_count);
1340 smp_mb__after_atomic_dec();
1341 spin_unlock(&lun->lun_acl_lock);
1342
1343 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
1344 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
1345
1346 lacl->se_lun = NULL;
1347
1348 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1349 " InitiatorNode: %s Mapped LUN: %u\n",
1350 tpg->se_tpg_tfo->get_fabric_name(),
1351 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1352 lacl->initiatorname, lacl->mapped_lun);
1353
1354 return 0;
1355 }
1356
1357 void core_dev_free_initiator_node_lun_acl(
1358 struct se_portal_group *tpg,
1359 struct se_lun_acl *lacl)
1360 {
1361 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1362 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1363 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1364 tpg->se_tpg_tfo->get_fabric_name(),
1365 lacl->initiatorname, lacl->mapped_lun);
1366
1367 kfree(lacl);
1368 }
1369
1370 static void scsi_dump_inquiry(struct se_device *dev)
1371 {
1372 struct t10_wwn *wwn = &dev->t10_wwn;
1373 char buf[17];
1374 int i, device_type;
1375 /*
1376 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1377 */
1378 for (i = 0; i < 8; i++)
1379 if (wwn->vendor[i] >= 0x20)
1380 buf[i] = wwn->vendor[i];
1381 else
1382 buf[i] = ' ';
1383 buf[i] = '\0';
1384 pr_debug(" Vendor: %s\n", buf);
1385
1386 for (i = 0; i < 16; i++)
1387 if (wwn->model[i] >= 0x20)
1388 buf[i] = wwn->model[i];
1389 else
1390 buf[i] = ' ';
1391 buf[i] = '\0';
1392 pr_debug(" Model: %s\n", buf);
1393
1394 for (i = 0; i < 4; i++)
1395 if (wwn->revision[i] >= 0x20)
1396 buf[i] = wwn->revision[i];
1397 else
1398 buf[i] = ' ';
1399 buf[i] = '\0';
1400 pr_debug(" Revision: %s\n", buf);
1401
1402 device_type = dev->transport->get_device_type(dev);
1403 pr_debug(" Type: %s ", scsi_device_type(device_type));
1404 }
1405
1406 struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1407 {
1408 struct se_device *dev;
1409 struct se_lun *xcopy_lun;
1410
1411 dev = hba->transport->alloc_device(hba, name);
1412 if (!dev)
1413 return NULL;
1414
1415 dev->dev_link_magic = SE_DEV_LINK_MAGIC;
1416 dev->se_hba = hba;
1417 dev->transport = hba->transport;
1418
1419 INIT_LIST_HEAD(&dev->dev_list);
1420 INIT_LIST_HEAD(&dev->dev_sep_list);
1421 INIT_LIST_HEAD(&dev->dev_tmr_list);
1422 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1423 INIT_LIST_HEAD(&dev->state_list);
1424 INIT_LIST_HEAD(&dev->qf_cmd_list);
1425 INIT_LIST_HEAD(&dev->g_dev_node);
1426 spin_lock_init(&dev->execute_task_lock);
1427 spin_lock_init(&dev->delayed_cmd_lock);
1428 spin_lock_init(&dev->dev_reservation_lock);
1429 spin_lock_init(&dev->se_port_lock);
1430 spin_lock_init(&dev->se_tmr_lock);
1431 spin_lock_init(&dev->qf_cmd_lock);
1432 sema_init(&dev->caw_sem, 1);
1433 atomic_set(&dev->dev_ordered_id, 0);
1434 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
1435 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
1436 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
1437 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
1438 spin_lock_init(&dev->t10_pr.registration_lock);
1439 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
1440 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
1441 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
1442
1443 dev->t10_wwn.t10_dev = dev;
1444 dev->t10_alua.t10_dev = dev;
1445
1446 dev->dev_attrib.da_dev = dev;
1447 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
1448 dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
1449 dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
1450 dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
1451 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
1452 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
1453 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
1454 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
1455 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
1456 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
1457 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
1458 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
1459 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
1460 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
1461 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
1462 dev->dev_attrib.max_unmap_block_desc_count =
1463 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
1464 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
1465 dev->dev_attrib.unmap_granularity_alignment =
1466 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
1467 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
1468 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
1469 dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
1470
1471 xcopy_lun = &dev->xcopy_lun;
1472 xcopy_lun->lun_se_dev = dev;
1473 init_completion(&xcopy_lun->lun_shutdown_comp);
1474 INIT_LIST_HEAD(&xcopy_lun->lun_acl_list);
1475 spin_lock_init(&xcopy_lun->lun_acl_lock);
1476 spin_lock_init(&xcopy_lun->lun_sep_lock);
1477 init_completion(&xcopy_lun->lun_ref_comp);
1478
1479 return dev;
1480 }
1481
1482 int target_configure_device(struct se_device *dev)
1483 {
1484 struct se_hba *hba = dev->se_hba;
1485 int ret;
1486
1487 if (dev->dev_flags & DF_CONFIGURED) {
1488 pr_err("se_dev->se_dev_ptr already set for storage"
1489 " object\n");
1490 return -EEXIST;
1491 }
1492
1493 ret = dev->transport->configure_device(dev);
1494 if (ret)
1495 goto out;
1496 dev->dev_flags |= DF_CONFIGURED;
1497
1498 /*
1499 * XXX: there is not much point to have two different values here..
1500 */
1501 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
1502 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
1503
1504 /*
1505 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
1506 */
1507 dev->dev_attrib.hw_max_sectors =
1508 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
1509 dev->dev_attrib.hw_block_size);
1510
1511 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1512 dev->creation_time = get_jiffies_64();
1513
1514 ret = core_setup_alua(dev);
1515 if (ret)
1516 goto out;
1517
1518 /*
1519 * Startup the struct se_device processing thread
1520 */
1521 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
1522 dev->transport->name);
1523 if (!dev->tmr_wq) {
1524 pr_err("Unable to create tmr workqueue for %s\n",
1525 dev->transport->name);
1526 ret = -ENOMEM;
1527 goto out_free_alua;
1528 }
1529
1530 /*
1531 * Setup work_queue for QUEUE_FULL
1532 */
1533 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1534
1535 /*
1536 * Preload the initial INQUIRY const values if we are doing
1537 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1538 * passthrough because this is being provided by the backend LLD.
1539 */
1540 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1541 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1542 strncpy(&dev->t10_wwn.model[0],
1543 dev->transport->inquiry_prod, 16);
1544 strncpy(&dev->t10_wwn.revision[0],
1545 dev->transport->inquiry_rev, 4);
1546 }
1547
1548 scsi_dump_inquiry(dev);
1549
1550 spin_lock(&hba->device_lock);
1551 hba->dev_count++;
1552 spin_unlock(&hba->device_lock);
1553
1554 mutex_lock(&g_device_mutex);
1555 list_add_tail(&dev->g_dev_node, &g_device_list);
1556 mutex_unlock(&g_device_mutex);
1557
1558 return 0;
1559
1560 out_free_alua:
1561 core_alua_free_lu_gp_mem(dev);
1562 out:
1563 se_release_vpd_for_dev(dev);
1564 return ret;
1565 }
1566
1567 void target_free_device(struct se_device *dev)
1568 {
1569 struct se_hba *hba = dev->se_hba;
1570
1571 WARN_ON(!list_empty(&dev->dev_sep_list));
1572
1573 if (dev->dev_flags & DF_CONFIGURED) {
1574 destroy_workqueue(dev->tmr_wq);
1575
1576 mutex_lock(&g_device_mutex);
1577 list_del(&dev->g_dev_node);
1578 mutex_unlock(&g_device_mutex);
1579
1580 spin_lock(&hba->device_lock);
1581 hba->dev_count--;
1582 spin_unlock(&hba->device_lock);
1583 }
1584
1585 core_alua_free_lu_gp_mem(dev);
1586 core_scsi3_free_all_registrations(dev);
1587 se_release_vpd_for_dev(dev);
1588
1589 dev->transport->free_device(dev);
1590 }
1591
1592 int core_dev_setup_virtual_lun0(void)
1593 {
1594 struct se_hba *hba;
1595 struct se_device *dev;
1596 char buf[] = "rd_pages=8,rd_nullio=1";
1597 int ret;
1598
1599 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1600 if (IS_ERR(hba))
1601 return PTR_ERR(hba);
1602
1603 dev = target_alloc_device(hba, "virt_lun0");
1604 if (!dev) {
1605 ret = -ENOMEM;
1606 goto out_free_hba;
1607 }
1608
1609 hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
1610
1611 ret = target_configure_device(dev);
1612 if (ret)
1613 goto out_free_se_dev;
1614
1615 lun0_hba = hba;
1616 g_lun0_dev = dev;
1617 return 0;
1618
1619 out_free_se_dev:
1620 target_free_device(dev);
1621 out_free_hba:
1622 core_delete_hba(hba);
1623 return ret;
1624 }
1625
1626
1627 void core_dev_release_virtual_lun0(void)
1628 {
1629 struct se_hba *hba = lun0_hba;
1630
1631 if (!hba)
1632 return;
1633
1634 if (g_lun0_dev)
1635 target_free_device(g_lun0_dev);
1636 core_delete_hba(hba);
1637 }
This page took 0.066385 seconds and 5 git commands to generate.