target: Convert se_node_acl->device_list[] to RCU hlist
[deliverable/linux.git] / drivers / target / target_core_device.c
1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
3 *
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
6 *
7 * (c) Copyright 2003-2013 Datera, Inc.
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
27 #include <linux/net.h>
28 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/timer.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/kthread.h>
34 #include <linux/in.h>
35 #include <linux/export.h>
36 #include <asm/unaligned.h>
37 #include <net/sock.h>
38 #include <net/tcp.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_device.h>
41
42 #include <target/target_core_base.h>
43 #include <target/target_core_backend.h>
44 #include <target/target_core_fabric.h>
45
46 #include "target_core_internal.h"
47 #include "target_core_alua.h"
48 #include "target_core_pr.h"
49 #include "target_core_ua.h"
50
51 DEFINE_MUTEX(g_device_mutex);
52 LIST_HEAD(g_device_list);
53
54 static struct se_hba *lun0_hba;
55 /* not static, needed by tpg.c */
56 struct se_device *g_lun0_dev;
57
58 sense_reason_t
59 transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
60 {
61 struct se_lun *se_lun = NULL;
62 struct se_session *se_sess = se_cmd->se_sess;
63 struct se_node_acl *nacl = se_sess->se_node_acl;
64 struct se_device *dev;
65 struct se_dev_entry *deve;
66
67 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
68 return TCM_NON_EXISTENT_LUN;
69
70 rcu_read_lock();
71 deve = target_nacl_find_deve(nacl, unpacked_lun);
72 if (deve) {
73 atomic_long_inc(&deve->total_cmds);
74
75 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
76 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
77 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
78 " Access for 0x%08x\n",
79 se_cmd->se_tfo->get_fabric_name(),
80 unpacked_lun);
81 rcu_read_unlock();
82 return TCM_WRITE_PROTECTED;
83 }
84
85 if (se_cmd->data_direction == DMA_TO_DEVICE)
86 atomic_long_add(se_cmd->data_length,
87 &deve->write_bytes);
88 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
89 atomic_long_add(se_cmd->data_length,
90 &deve->read_bytes);
91
92 se_lun = rcu_dereference(deve->se_lun);
93 se_cmd->se_lun = rcu_dereference(deve->se_lun);
94 se_cmd->pr_res_key = deve->pr_res_key;
95 se_cmd->orig_fe_lun = unpacked_lun;
96 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
97
98 percpu_ref_get(&se_lun->lun_ref);
99 se_cmd->lun_ref_active = true;
100 }
101 rcu_read_unlock();
102
103 if (!se_lun) {
104 /*
105 * Use the se_portal_group->tpg_virt_lun0 to allow for
106 * REPORT_LUNS, et al to be returned when no active
107 * MappedLUN=0 exists for this Initiator Port.
108 */
109 if (unpacked_lun != 0) {
110 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
111 " Access for 0x%08x\n",
112 se_cmd->se_tfo->get_fabric_name(),
113 unpacked_lun);
114 return TCM_NON_EXISTENT_LUN;
115 }
116 /*
117 * Force WRITE PROTECT for virtual LUN 0
118 */
119 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
120 (se_cmd->data_direction != DMA_NONE))
121 return TCM_WRITE_PROTECTED;
122
123 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
124 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
125 se_cmd->orig_fe_lun = 0;
126 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
127
128 percpu_ref_get(&se_lun->lun_ref);
129 se_cmd->lun_ref_active = true;
130 }
131
132 /* Directly associate cmd with se_dev */
133 se_cmd->se_dev = se_lun->lun_se_dev;
134
135 dev = se_lun->lun_se_dev;
136 atomic_long_inc(&dev->num_cmds);
137 if (se_cmd->data_direction == DMA_TO_DEVICE)
138 atomic_long_add(se_cmd->data_length, &dev->write_bytes);
139 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
140 atomic_long_add(se_cmd->data_length, &dev->read_bytes);
141
142 return 0;
143 }
144 EXPORT_SYMBOL(transport_lookup_cmd_lun);
145
146 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
147 {
148 struct se_dev_entry *deve;
149 struct se_lun *se_lun = NULL;
150 struct se_session *se_sess = se_cmd->se_sess;
151 struct se_node_acl *nacl = se_sess->se_node_acl;
152 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
153 unsigned long flags;
154
155 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
156 return -ENODEV;
157
158 rcu_read_lock();
159 deve = target_nacl_find_deve(nacl, unpacked_lun);
160 if (deve) {
161 se_tmr->tmr_lun = rcu_dereference(deve->se_lun);
162 se_cmd->se_lun = rcu_dereference(deve->se_lun);
163 se_lun = rcu_dereference(deve->se_lun);
164 se_cmd->pr_res_key = deve->pr_res_key;
165 se_cmd->orig_fe_lun = unpacked_lun;
166 }
167 rcu_read_unlock();
168
169 if (!se_lun) {
170 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
171 " Access for 0x%08x\n",
172 se_cmd->se_tfo->get_fabric_name(),
173 unpacked_lun);
174 return -ENODEV;
175 }
176
177 /* Directly associate cmd with se_dev */
178 se_cmd->se_dev = se_lun->lun_se_dev;
179 se_tmr->tmr_dev = se_lun->lun_se_dev;
180
181 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
182 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
183 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
184
185 return 0;
186 }
187 EXPORT_SYMBOL(transport_lookup_tmr_lun);
188
189 bool target_lun_is_rdonly(struct se_cmd *cmd)
190 {
191 struct se_session *se_sess = cmd->se_sess;
192 struct se_dev_entry *deve;
193 bool ret;
194
195 if (cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY)
196 return true;
197
198 rcu_read_lock();
199 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
200 ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY);
201 rcu_read_unlock();
202
203 return ret;
204 }
205 EXPORT_SYMBOL(target_lun_is_rdonly);
206
207 /*
208 * This function is called from core_scsi3_emulate_pro_register_and_move()
209 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
210 * when a matching rtpi is found.
211 */
212 struct se_dev_entry *core_get_se_deve_from_rtpi(
213 struct se_node_acl *nacl,
214 u16 rtpi)
215 {
216 struct se_dev_entry *deve;
217 struct se_lun *lun;
218 struct se_portal_group *tpg = nacl->se_tpg;
219
220 rcu_read_lock();
221 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
222 lun = rcu_dereference(deve->se_lun);
223 if (!lun) {
224 pr_err("%s device entries device pointer is"
225 " NULL, but Initiator has access.\n",
226 tpg->se_tpg_tfo->get_fabric_name());
227 continue;
228 }
229 if (lun->lun_rtpi != rtpi)
230 continue;
231
232 kref_get(&deve->pr_kref);
233 rcu_read_unlock();
234
235 return deve;
236 }
237 rcu_read_unlock();
238
239 return NULL;
240 }
241
242 void core_free_device_list_for_node(
243 struct se_node_acl *nacl,
244 struct se_portal_group *tpg)
245 {
246 struct se_dev_entry *deve;
247
248 mutex_lock(&nacl->lun_entry_mutex);
249 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
250 struct se_lun *lun = rcu_dereference_check(deve->se_lun,
251 lockdep_is_held(&nacl->lun_entry_mutex));
252 core_disable_device_list_for_node(lun, deve, nacl, tpg);
253 }
254 mutex_unlock(&nacl->lun_entry_mutex);
255 }
256
257 void core_update_device_list_access(
258 u32 mapped_lun,
259 u32 lun_access,
260 struct se_node_acl *nacl)
261 {
262 struct se_dev_entry *deve;
263
264 mutex_lock(&nacl->lun_entry_mutex);
265 deve = target_nacl_find_deve(nacl, mapped_lun);
266 if (deve) {
267 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
268 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
269 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
270 } else {
271 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
272 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
273 }
274 }
275 mutex_unlock(&nacl->lun_entry_mutex);
276 }
277
278 /*
279 * Called with rcu_read_lock or nacl->device_list_lock held.
280 */
281 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u32 mapped_lun)
282 {
283 struct se_dev_entry *deve;
284
285 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
286 if (deve->mapped_lun == mapped_lun)
287 return deve;
288
289 return NULL;
290 }
291 EXPORT_SYMBOL(target_nacl_find_deve);
292
293 void target_pr_kref_release(struct kref *kref)
294 {
295 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
296 pr_kref);
297 complete(&deve->pr_comp);
298 }
299
300 /* core_enable_device_list_for_node():
301 *
302 *
303 */
304 int core_enable_device_list_for_node(
305 struct se_lun *lun,
306 struct se_lun_acl *lun_acl,
307 u32 mapped_lun,
308 u32 lun_access,
309 struct se_node_acl *nacl,
310 struct se_portal_group *tpg)
311 {
312 struct se_port *port = lun->lun_sep;
313 struct se_dev_entry *orig, *new;
314
315 new = kzalloc(sizeof(*new), GFP_KERNEL);
316 if (!new) {
317 pr_err("Unable to allocate se_dev_entry memory\n");
318 return -ENOMEM;
319 }
320
321 atomic_set(&new->ua_count, 0);
322 spin_lock_init(&new->ua_lock);
323 INIT_LIST_HEAD(&new->alua_port_list);
324 INIT_LIST_HEAD(&new->ua_list);
325
326 new->mapped_lun = mapped_lun;
327 kref_init(&new->pr_kref);
328 init_completion(&new->pr_comp);
329
330 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)
331 new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
332 else
333 new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
334
335 new->creation_time = get_jiffies_64();
336 new->attach_count++;
337
338 mutex_lock(&nacl->lun_entry_mutex);
339 orig = target_nacl_find_deve(nacl, mapped_lun);
340 if (orig && orig->se_lun) {
341 struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
342 lockdep_is_held(&nacl->lun_entry_mutex));
343
344 if (orig_lun != lun) {
345 pr_err("Existing orig->se_lun doesn't match new lun"
346 " for dynamic -> explicit NodeACL conversion:"
347 " %s\n", nacl->initiatorname);
348 mutex_unlock(&nacl->lun_entry_mutex);
349 kfree(new);
350 return -EINVAL;
351 }
352 BUG_ON(orig->se_lun_acl != NULL);
353
354 rcu_assign_pointer(new->se_lun, lun);
355 rcu_assign_pointer(new->se_lun_acl, lun_acl);
356 hlist_del_rcu(&orig->link);
357 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
358 mutex_unlock(&nacl->lun_entry_mutex);
359
360 spin_lock_bh(&port->sep_alua_lock);
361 list_del(&orig->alua_port_list);
362 list_add_tail(&new->alua_port_list, &port->sep_alua_list);
363 spin_unlock_bh(&port->sep_alua_lock);
364
365 kref_put(&orig->pr_kref, target_pr_kref_release);
366 wait_for_completion(&orig->pr_comp);
367
368 kfree_rcu(orig, rcu_head);
369 return 0;
370 }
371
372 rcu_assign_pointer(new->se_lun, lun);
373 rcu_assign_pointer(new->se_lun_acl, lun_acl);
374 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
375 mutex_unlock(&nacl->lun_entry_mutex);
376
377 spin_lock_bh(&port->sep_alua_lock);
378 list_add_tail(&new->alua_port_list, &port->sep_alua_list);
379 spin_unlock_bh(&port->sep_alua_lock);
380
381 return 0;
382 }
383
384 /*
385 * Called with se_node_acl->lun_entry_mutex held.
386 */
387 void core_disable_device_list_for_node(
388 struct se_lun *lun,
389 struct se_dev_entry *orig,
390 struct se_node_acl *nacl,
391 struct se_portal_group *tpg)
392 {
393 struct se_port *port = lun->lun_sep;
394 /*
395 * If the MappedLUN entry is being disabled, the entry in
396 * port->sep_alua_list must be removed now before clearing the
397 * struct se_dev_entry pointers below as logic in
398 * core_alua_do_transition_tg_pt() depends on these being present.
399 *
400 * deve->se_lun_acl will be NULL for demo-mode created LUNs
401 * that have not been explicitly converted to MappedLUNs ->
402 * struct se_lun_acl, but we remove deve->alua_port_list from
403 * port->sep_alua_list. This also means that active UAs and
404 * NodeACL context specific PR metadata for demo-mode
405 * MappedLUN *deve will be released below..
406 */
407 spin_lock_bh(&port->sep_alua_lock);
408 list_del(&orig->alua_port_list);
409 spin_unlock_bh(&port->sep_alua_lock);
410 /*
411 * Disable struct se_dev_entry LUN ACL mapping
412 */
413 core_scsi3_ua_release_all(orig);
414
415 hlist_del_rcu(&orig->link);
416 rcu_assign_pointer(orig->se_lun, NULL);
417 rcu_assign_pointer(orig->se_lun_acl, NULL);
418 orig->lun_flags = 0;
419 orig->creation_time = 0;
420 orig->attach_count--;
421 /*
422 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
423 * or REGISTER_AND_MOVE PR operation to complete.
424 */
425 kref_put(&orig->pr_kref, target_pr_kref_release);
426 wait_for_completion(&orig->pr_comp);
427
428 kfree_rcu(orig, rcu_head);
429
430 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
431 }
432
433 /* core_clear_lun_from_tpg():
434 *
435 *
436 */
437 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
438 {
439 struct se_node_acl *nacl;
440 struct se_dev_entry *deve;
441
442 spin_lock_irq(&tpg->acl_node_lock);
443 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
444 spin_unlock_irq(&tpg->acl_node_lock);
445
446 mutex_lock(&nacl->lun_entry_mutex);
447 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
448 struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
449 lockdep_is_held(&nacl->lun_entry_mutex));
450
451 if (lun != tmp_lun)
452 continue;
453
454 core_disable_device_list_for_node(lun, deve, nacl, tpg);
455 }
456 mutex_unlock(&nacl->lun_entry_mutex);
457
458 spin_lock_irq(&tpg->acl_node_lock);
459 }
460 spin_unlock_irq(&tpg->acl_node_lock);
461 }
462
463 static struct se_port *core_alloc_port(struct se_device *dev)
464 {
465 struct se_port *port, *port_tmp;
466
467 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
468 if (!port) {
469 pr_err("Unable to allocate struct se_port\n");
470 return ERR_PTR(-ENOMEM);
471 }
472 INIT_LIST_HEAD(&port->sep_alua_list);
473 INIT_LIST_HEAD(&port->sep_list);
474 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
475 spin_lock_init(&port->sep_alua_lock);
476 mutex_init(&port->sep_tg_pt_md_mutex);
477
478 spin_lock(&dev->se_port_lock);
479 if (dev->dev_port_count == 0x0000ffff) {
480 pr_warn("Reached dev->dev_port_count =="
481 " 0x0000ffff\n");
482 spin_unlock(&dev->se_port_lock);
483 return ERR_PTR(-ENOSPC);
484 }
485 again:
486 /*
487 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
488 * Here is the table from spc4r17 section 7.7.3.8.
489 *
490 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
491 *
492 * Code Description
493 * 0h Reserved
494 * 1h Relative port 1, historically known as port A
495 * 2h Relative port 2, historically known as port B
496 * 3h to FFFFh Relative port 3 through 65 535
497 */
498 port->sep_rtpi = dev->dev_rpti_counter++;
499 if (!port->sep_rtpi)
500 goto again;
501
502 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
503 /*
504 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
505 * for 16-bit wrap..
506 */
507 if (port->sep_rtpi == port_tmp->sep_rtpi)
508 goto again;
509 }
510 spin_unlock(&dev->se_port_lock);
511
512 return port;
513 }
514
515 static void core_export_port(
516 struct se_device *dev,
517 struct se_portal_group *tpg,
518 struct se_port *port,
519 struct se_lun *lun)
520 {
521 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
522
523 spin_lock(&dev->se_port_lock);
524 spin_lock(&lun->lun_sep_lock);
525 port->sep_tpg = tpg;
526 port->sep_lun = lun;
527 lun->lun_sep = port;
528 spin_unlock(&lun->lun_sep_lock);
529
530 list_add_tail(&port->sep_list, &dev->dev_sep_list);
531 spin_unlock(&dev->se_port_lock);
532
533 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
534 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
535 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
536 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
537 pr_err("Unable to allocate t10_alua_tg_pt"
538 "_gp_member_t\n");
539 return;
540 }
541 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
542 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
543 dev->t10_alua.default_tg_pt_gp);
544 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
545 pr_debug("%s/%s: Adding to default ALUA Target Port"
546 " Group: alua/default_tg_pt_gp\n",
547 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
548 }
549
550 dev->dev_port_count++;
551 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
552 }
553
554 /*
555 * Called with struct se_device->se_port_lock spinlock held.
556 */
557 static void core_release_port(struct se_device *dev, struct se_port *port)
558 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
559 {
560 /*
561 * Wait for any port reference for PR ALL_TG_PT=1 operation
562 * to complete in __core_scsi3_alloc_registration()
563 */
564 spin_unlock(&dev->se_port_lock);
565 if (atomic_read(&port->sep_tg_pt_ref_cnt))
566 cpu_relax();
567 spin_lock(&dev->se_port_lock);
568
569 core_alua_free_tg_pt_gp_mem(port);
570
571 list_del(&port->sep_list);
572 dev->dev_port_count--;
573 kfree(port);
574 }
575
576 int core_dev_export(
577 struct se_device *dev,
578 struct se_portal_group *tpg,
579 struct se_lun *lun)
580 {
581 struct se_hba *hba = dev->se_hba;
582 struct se_port *port;
583
584 port = core_alloc_port(dev);
585 if (IS_ERR(port))
586 return PTR_ERR(port);
587
588 lun->lun_index = dev->dev_index;
589 lun->lun_se_dev = dev;
590 lun->lun_rtpi = port->sep_rtpi;
591
592 spin_lock(&hba->device_lock);
593 dev->export_count++;
594 spin_unlock(&hba->device_lock);
595
596 core_export_port(dev, tpg, port, lun);
597 return 0;
598 }
599
600 void core_dev_unexport(
601 struct se_device *dev,
602 struct se_portal_group *tpg,
603 struct se_lun *lun)
604 {
605 struct se_hba *hba = dev->se_hba;
606 struct se_port *port = lun->lun_sep;
607
608 spin_lock(&lun->lun_sep_lock);
609 if (lun->lun_se_dev == NULL) {
610 spin_unlock(&lun->lun_sep_lock);
611 return;
612 }
613 spin_unlock(&lun->lun_sep_lock);
614
615 spin_lock(&dev->se_port_lock);
616 core_release_port(dev, port);
617 spin_unlock(&dev->se_port_lock);
618
619 spin_lock(&hba->device_lock);
620 dev->export_count--;
621 spin_unlock(&hba->device_lock);
622
623 lun->lun_sep = NULL;
624 lun->lun_se_dev = NULL;
625 }
626
627 static void se_release_vpd_for_dev(struct se_device *dev)
628 {
629 struct t10_vpd *vpd, *vpd_tmp;
630
631 spin_lock(&dev->t10_wwn.t10_vpd_lock);
632 list_for_each_entry_safe(vpd, vpd_tmp,
633 &dev->t10_wwn.t10_vpd_list, vpd_list) {
634 list_del(&vpd->vpd_list);
635 kfree(vpd);
636 }
637 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
638 }
639
640 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
641 {
642 u32 aligned_max_sectors;
643 u32 alignment;
644 /*
645 * Limit max_sectors to a PAGE_SIZE aligned value for modern
646 * transport_allocate_data_tasks() operation.
647 */
648 alignment = max(1ul, PAGE_SIZE / block_size);
649 aligned_max_sectors = rounddown(max_sectors, alignment);
650
651 if (max_sectors != aligned_max_sectors)
652 pr_info("Rounding down aligned max_sectors from %u to %u\n",
653 max_sectors, aligned_max_sectors);
654
655 return aligned_max_sectors;
656 }
657
658 bool se_dev_check_wce(struct se_device *dev)
659 {
660 bool wce = false;
661
662 if (dev->transport->get_write_cache)
663 wce = dev->transport->get_write_cache(dev);
664 else if (dev->dev_attrib.emulate_write_cache > 0)
665 wce = true;
666
667 return wce;
668 }
669
670 int se_dev_set_max_unmap_lba_count(
671 struct se_device *dev,
672 u32 max_unmap_lba_count)
673 {
674 dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
675 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
676 dev, dev->dev_attrib.max_unmap_lba_count);
677 return 0;
678 }
679 EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count);
680
681 int se_dev_set_max_unmap_block_desc_count(
682 struct se_device *dev,
683 u32 max_unmap_block_desc_count)
684 {
685 dev->dev_attrib.max_unmap_block_desc_count =
686 max_unmap_block_desc_count;
687 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
688 dev, dev->dev_attrib.max_unmap_block_desc_count);
689 return 0;
690 }
691 EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count);
692
693 int se_dev_set_unmap_granularity(
694 struct se_device *dev,
695 u32 unmap_granularity)
696 {
697 dev->dev_attrib.unmap_granularity = unmap_granularity;
698 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
699 dev, dev->dev_attrib.unmap_granularity);
700 return 0;
701 }
702 EXPORT_SYMBOL(se_dev_set_unmap_granularity);
703
704 int se_dev_set_unmap_granularity_alignment(
705 struct se_device *dev,
706 u32 unmap_granularity_alignment)
707 {
708 dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
709 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
710 dev, dev->dev_attrib.unmap_granularity_alignment);
711 return 0;
712 }
713 EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment);
714
715 int se_dev_set_max_write_same_len(
716 struct se_device *dev,
717 u32 max_write_same_len)
718 {
719 dev->dev_attrib.max_write_same_len = max_write_same_len;
720 pr_debug("dev[%p]: Set max_write_same_len: %u\n",
721 dev, dev->dev_attrib.max_write_same_len);
722 return 0;
723 }
724 EXPORT_SYMBOL(se_dev_set_max_write_same_len);
725
726 static void dev_set_t10_wwn_model_alias(struct se_device *dev)
727 {
728 const char *configname;
729
730 configname = config_item_name(&dev->dev_group.cg_item);
731 if (strlen(configname) >= 16) {
732 pr_warn("dev[%p]: Backstore name '%s' is too long for "
733 "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
734 configname);
735 }
736 snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
737 }
738
739 int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
740 {
741 if (dev->export_count) {
742 pr_err("dev[%p]: Unable to change model alias"
743 " while export_count is %d\n",
744 dev, dev->export_count);
745 return -EINVAL;
746 }
747
748 if (flag != 0 && flag != 1) {
749 pr_err("Illegal value %d\n", flag);
750 return -EINVAL;
751 }
752
753 if (flag) {
754 dev_set_t10_wwn_model_alias(dev);
755 } else {
756 strncpy(&dev->t10_wwn.model[0],
757 dev->transport->inquiry_prod, 16);
758 }
759 dev->dev_attrib.emulate_model_alias = flag;
760
761 return 0;
762 }
763 EXPORT_SYMBOL(se_dev_set_emulate_model_alias);
764
765 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
766 {
767 printk_once(KERN_WARNING
768 "ignoring deprecated emulate_dpo attribute\n");
769 return 0;
770 }
771 EXPORT_SYMBOL(se_dev_set_emulate_dpo);
772
773 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
774 {
775 if (flag != 0 && flag != 1) {
776 pr_err("Illegal value %d\n", flag);
777 return -EINVAL;
778 }
779 if (flag &&
780 dev->transport->get_write_cache) {
781 pr_warn("emulate_fua_write not supported for this device, ignoring\n");
782 return 0;
783 }
784 if (dev->export_count) {
785 pr_err("emulate_fua_write cannot be changed with active"
786 " exports: %d\n", dev->export_count);
787 return -EINVAL;
788 }
789 dev->dev_attrib.emulate_fua_write = flag;
790 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
791 dev, dev->dev_attrib.emulate_fua_write);
792 return 0;
793 }
794 EXPORT_SYMBOL(se_dev_set_emulate_fua_write);
795
796 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
797 {
798 printk_once(KERN_WARNING
799 "ignoring deprecated emulate_fua_read attribute\n");
800 return 0;
801 }
802 EXPORT_SYMBOL(se_dev_set_emulate_fua_read);
803
804 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
805 {
806 if (flag != 0 && flag != 1) {
807 pr_err("Illegal value %d\n", flag);
808 return -EINVAL;
809 }
810 if (flag &&
811 dev->transport->get_write_cache) {
812 pr_err("emulate_write_cache not supported for this device\n");
813 return -EINVAL;
814 }
815 if (dev->export_count) {
816 pr_err("emulate_write_cache cannot be changed with active"
817 " exports: %d\n", dev->export_count);
818 return -EINVAL;
819 }
820 dev->dev_attrib.emulate_write_cache = flag;
821 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
822 dev, dev->dev_attrib.emulate_write_cache);
823 return 0;
824 }
825 EXPORT_SYMBOL(se_dev_set_emulate_write_cache);
826
827 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
828 {
829 if ((flag != 0) && (flag != 1) && (flag != 2)) {
830 pr_err("Illegal value %d\n", flag);
831 return -EINVAL;
832 }
833
834 if (dev->export_count) {
835 pr_err("dev[%p]: Unable to change SE Device"
836 " UA_INTRLCK_CTRL while export_count is %d\n",
837 dev, dev->export_count);
838 return -EINVAL;
839 }
840 dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
841 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
842 dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
843
844 return 0;
845 }
846 EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl);
847
848 int se_dev_set_emulate_tas(struct se_device *dev, int flag)
849 {
850 if ((flag != 0) && (flag != 1)) {
851 pr_err("Illegal value %d\n", flag);
852 return -EINVAL;
853 }
854
855 if (dev->export_count) {
856 pr_err("dev[%p]: Unable to change SE Device TAS while"
857 " export_count is %d\n",
858 dev, dev->export_count);
859 return -EINVAL;
860 }
861 dev->dev_attrib.emulate_tas = flag;
862 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
863 dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
864
865 return 0;
866 }
867 EXPORT_SYMBOL(se_dev_set_emulate_tas);
868
869 int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
870 {
871 if ((flag != 0) && (flag != 1)) {
872 pr_err("Illegal value %d\n", flag);
873 return -EINVAL;
874 }
875 /*
876 * We expect this value to be non-zero when generic Block Layer
877 * Discard supported is detected iblock_create_virtdevice().
878 */
879 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
880 pr_err("Generic Block Discard not supported\n");
881 return -ENOSYS;
882 }
883
884 dev->dev_attrib.emulate_tpu = flag;
885 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
886 dev, flag);
887 return 0;
888 }
889 EXPORT_SYMBOL(se_dev_set_emulate_tpu);
890
891 int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
892 {
893 if ((flag != 0) && (flag != 1)) {
894 pr_err("Illegal value %d\n", flag);
895 return -EINVAL;
896 }
897 /*
898 * We expect this value to be non-zero when generic Block Layer
899 * Discard supported is detected iblock_create_virtdevice().
900 */
901 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
902 pr_err("Generic Block Discard not supported\n");
903 return -ENOSYS;
904 }
905
906 dev->dev_attrib.emulate_tpws = flag;
907 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
908 dev, flag);
909 return 0;
910 }
911 EXPORT_SYMBOL(se_dev_set_emulate_tpws);
912
913 int se_dev_set_emulate_caw(struct se_device *dev, int flag)
914 {
915 if (flag != 0 && flag != 1) {
916 pr_err("Illegal value %d\n", flag);
917 return -EINVAL;
918 }
919 dev->dev_attrib.emulate_caw = flag;
920 pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
921 dev, flag);
922
923 return 0;
924 }
925 EXPORT_SYMBOL(se_dev_set_emulate_caw);
926
927 int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
928 {
929 if (flag != 0 && flag != 1) {
930 pr_err("Illegal value %d\n", flag);
931 return -EINVAL;
932 }
933 dev->dev_attrib.emulate_3pc = flag;
934 pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
935 dev, flag);
936
937 return 0;
938 }
939 EXPORT_SYMBOL(se_dev_set_emulate_3pc);
940
941 int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
942 {
943 int rc, old_prot = dev->dev_attrib.pi_prot_type;
944
945 if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
946 pr_err("Illegal value %d for pi_prot_type\n", flag);
947 return -EINVAL;
948 }
949 if (flag == 2) {
950 pr_err("DIF TYPE2 protection currently not supported\n");
951 return -ENOSYS;
952 }
953 if (dev->dev_attrib.hw_pi_prot_type) {
954 pr_warn("DIF protection enabled on underlying hardware,"
955 " ignoring\n");
956 return 0;
957 }
958 if (!dev->transport->init_prot || !dev->transport->free_prot) {
959 /* 0 is only allowed value for non-supporting backends */
960 if (flag == 0)
961 return 0;
962
963 pr_err("DIF protection not supported by backend: %s\n",
964 dev->transport->name);
965 return -ENOSYS;
966 }
967 if (!(dev->dev_flags & DF_CONFIGURED)) {
968 pr_err("DIF protection requires device to be configured\n");
969 return -ENODEV;
970 }
971 if (dev->export_count) {
972 pr_err("dev[%p]: Unable to change SE Device PROT type while"
973 " export_count is %d\n", dev, dev->export_count);
974 return -EINVAL;
975 }
976
977 dev->dev_attrib.pi_prot_type = flag;
978
979 if (flag && !old_prot) {
980 rc = dev->transport->init_prot(dev);
981 if (rc) {
982 dev->dev_attrib.pi_prot_type = old_prot;
983 return rc;
984 }
985
986 } else if (!flag && old_prot) {
987 dev->transport->free_prot(dev);
988 }
989 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
990
991 return 0;
992 }
993 EXPORT_SYMBOL(se_dev_set_pi_prot_type);
994
995 int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
996 {
997 int rc;
998
999 if (!flag)
1000 return 0;
1001
1002 if (flag != 1) {
1003 pr_err("Illegal value %d for pi_prot_format\n", flag);
1004 return -EINVAL;
1005 }
1006 if (!dev->transport->format_prot) {
1007 pr_err("DIF protection format not supported by backend %s\n",
1008 dev->transport->name);
1009 return -ENOSYS;
1010 }
1011 if (!(dev->dev_flags & DF_CONFIGURED)) {
1012 pr_err("DIF protection format requires device to be configured\n");
1013 return -ENODEV;
1014 }
1015 if (dev->export_count) {
1016 pr_err("dev[%p]: Unable to format SE Device PROT type while"
1017 " export_count is %d\n", dev, dev->export_count);
1018 return -EINVAL;
1019 }
1020
1021 rc = dev->transport->format_prot(dev);
1022 if (rc)
1023 return rc;
1024
1025 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
1026
1027 return 0;
1028 }
1029 EXPORT_SYMBOL(se_dev_set_pi_prot_format);
1030
1031 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1032 {
1033 if ((flag != 0) && (flag != 1)) {
1034 pr_err("Illegal value %d\n", flag);
1035 return -EINVAL;
1036 }
1037 dev->dev_attrib.enforce_pr_isids = flag;
1038 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1039 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
1040 return 0;
1041 }
1042 EXPORT_SYMBOL(se_dev_set_enforce_pr_isids);
1043
1044 int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
1045 {
1046 if ((flag != 0) && (flag != 1)) {
1047 printk(KERN_ERR "Illegal value %d\n", flag);
1048 return -EINVAL;
1049 }
1050 if (dev->export_count) {
1051 pr_err("dev[%p]: Unable to set force_pr_aptpl while"
1052 " export_count is %d\n", dev, dev->export_count);
1053 return -EINVAL;
1054 }
1055
1056 dev->dev_attrib.force_pr_aptpl = flag;
1057 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag);
1058 return 0;
1059 }
1060 EXPORT_SYMBOL(se_dev_set_force_pr_aptpl);
1061
1062 int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1063 {
1064 if ((flag != 0) && (flag != 1)) {
1065 printk(KERN_ERR "Illegal value %d\n", flag);
1066 return -EINVAL;
1067 }
1068 dev->dev_attrib.is_nonrot = flag;
1069 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
1070 dev, flag);
1071 return 0;
1072 }
1073 EXPORT_SYMBOL(se_dev_set_is_nonrot);
1074
1075 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1076 {
1077 if (flag != 0) {
1078 printk(KERN_ERR "dev[%p]: SE Device emulation of restricted"
1079 " reordering not implemented\n", dev);
1080 return -ENOSYS;
1081 }
1082 dev->dev_attrib.emulate_rest_reord = flag;
1083 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
1084 return 0;
1085 }
1086 EXPORT_SYMBOL(se_dev_set_emulate_rest_reord);
1087
1088 /*
1089 * Note, this can only be called on unexported SE Device Object.
1090 */
1091 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1092 {
1093 if (dev->export_count) {
1094 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1095 " export_count is %d\n",
1096 dev, dev->export_count);
1097 return -EINVAL;
1098 }
1099 if (!queue_depth) {
1100 pr_err("dev[%p]: Illegal ZERO value for queue"
1101 "_depth\n", dev);
1102 return -EINVAL;
1103 }
1104
1105 if (queue_depth > dev->dev_attrib.queue_depth) {
1106 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
1107 pr_err("dev[%p]: Passed queue_depth:"
1108 " %u exceeds TCM/SE_Device MAX"
1109 " TCQ: %u\n", dev, queue_depth,
1110 dev->dev_attrib.hw_queue_depth);
1111 return -EINVAL;
1112 }
1113 }
1114 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1115 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1116 dev, queue_depth);
1117 return 0;
1118 }
1119 EXPORT_SYMBOL(se_dev_set_queue_depth);
1120
1121 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1122 {
1123 if (dev->export_count) {
1124 pr_err("dev[%p]: Unable to change SE Device"
1125 " optimal_sectors while export_count is %d\n",
1126 dev, dev->export_count);
1127 return -EINVAL;
1128 }
1129 if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
1130 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1131 " greater than hw_max_sectors: %u\n", dev,
1132 optimal_sectors, dev->dev_attrib.hw_max_sectors);
1133 return -EINVAL;
1134 }
1135
1136 dev->dev_attrib.optimal_sectors = optimal_sectors;
1137 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1138 dev, optimal_sectors);
1139 return 0;
1140 }
1141 EXPORT_SYMBOL(se_dev_set_optimal_sectors);
1142
1143 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1144 {
1145 if (dev->export_count) {
1146 pr_err("dev[%p]: Unable to change SE Device block_size"
1147 " while export_count is %d\n",
1148 dev, dev->export_count);
1149 return -EINVAL;
1150 }
1151
1152 if ((block_size != 512) &&
1153 (block_size != 1024) &&
1154 (block_size != 2048) &&
1155 (block_size != 4096)) {
1156 pr_err("dev[%p]: Illegal value for block_device: %u"
1157 " for SE device, must be 512, 1024, 2048 or 4096\n",
1158 dev, block_size);
1159 return -EINVAL;
1160 }
1161
1162 dev->dev_attrib.block_size = block_size;
1163 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1164 dev, block_size);
1165
1166 if (dev->dev_attrib.max_bytes_per_io)
1167 dev->dev_attrib.hw_max_sectors =
1168 dev->dev_attrib.max_bytes_per_io / block_size;
1169
1170 return 0;
1171 }
1172 EXPORT_SYMBOL(se_dev_set_block_size);
1173
1174 struct se_lun *core_dev_add_lun(
1175 struct se_portal_group *tpg,
1176 struct se_device *dev,
1177 u32 unpacked_lun)
1178 {
1179 struct se_lun *lun;
1180 int rc;
1181
1182 lun = core_tpg_alloc_lun(tpg, unpacked_lun);
1183 if (IS_ERR(lun))
1184 return lun;
1185
1186 rc = core_tpg_add_lun(tpg, lun,
1187 TRANSPORT_LUNFLAGS_READ_WRITE, dev);
1188 if (rc < 0)
1189 return ERR_PTR(rc);
1190
1191 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1192 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1193 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1194 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
1195 /*
1196 * Update LUN maps for dynamically added initiators when
1197 * generate_node_acl is enabled.
1198 */
1199 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1200 struct se_node_acl *acl;
1201 spin_lock_irq(&tpg->acl_node_lock);
1202 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1203 if (acl->dynamic_node_acl &&
1204 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1205 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1206 spin_unlock_irq(&tpg->acl_node_lock);
1207 core_tpg_add_node_to_devs(acl, tpg);
1208 spin_lock_irq(&tpg->acl_node_lock);
1209 }
1210 }
1211 spin_unlock_irq(&tpg->acl_node_lock);
1212 }
1213
1214 return lun;
1215 }
1216
1217 /* core_dev_del_lun():
1218 *
1219 *
1220 */
1221 void core_dev_del_lun(
1222 struct se_portal_group *tpg,
1223 struct se_lun *lun)
1224 {
1225 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from"
1226 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1227 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1228 tpg->se_tpg_tfo->get_fabric_name());
1229
1230 core_tpg_remove_lun(tpg, lun);
1231 }
1232
1233 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1234 {
1235 struct se_lun *lun;
1236
1237 spin_lock(&tpg->tpg_lun_lock);
1238 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1239 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1240 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1241 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1242 TRANSPORT_MAX_LUNS_PER_TPG-1,
1243 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1244 spin_unlock(&tpg->tpg_lun_lock);
1245 return NULL;
1246 }
1247 lun = tpg->tpg_lun_list[unpacked_lun];
1248
1249 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1250 pr_err("%s Logical Unit Number: %u is not free on"
1251 " Target Portal Group: %hu, ignoring request.\n",
1252 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1253 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1254 spin_unlock(&tpg->tpg_lun_lock);
1255 return NULL;
1256 }
1257 spin_unlock(&tpg->tpg_lun_lock);
1258
1259 return lun;
1260 }
1261
1262 /* core_dev_get_lun():
1263 *
1264 *
1265 */
1266 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1267 {
1268 struct se_lun *lun;
1269
1270 spin_lock(&tpg->tpg_lun_lock);
1271 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1272 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1273 "_TPG-1: %u for Target Portal Group: %hu\n",
1274 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1275 TRANSPORT_MAX_LUNS_PER_TPG-1,
1276 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1277 spin_unlock(&tpg->tpg_lun_lock);
1278 return NULL;
1279 }
1280 lun = tpg->tpg_lun_list[unpacked_lun];
1281
1282 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1283 pr_err("%s Logical Unit Number: %u is not active on"
1284 " Target Portal Group: %hu, ignoring request.\n",
1285 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1286 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1287 spin_unlock(&tpg->tpg_lun_lock);
1288 return NULL;
1289 }
1290 spin_unlock(&tpg->tpg_lun_lock);
1291
1292 return lun;
1293 }
1294
1295 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1296 struct se_portal_group *tpg,
1297 struct se_node_acl *nacl,
1298 u32 mapped_lun,
1299 int *ret)
1300 {
1301 struct se_lun_acl *lacl;
1302
1303 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
1304 pr_err("%s InitiatorName exceeds maximum size.\n",
1305 tpg->se_tpg_tfo->get_fabric_name());
1306 *ret = -EOVERFLOW;
1307 return NULL;
1308 }
1309 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1310 if (!lacl) {
1311 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1312 *ret = -ENOMEM;
1313 return NULL;
1314 }
1315
1316 INIT_LIST_HEAD(&lacl->lacl_list);
1317 lacl->mapped_lun = mapped_lun;
1318 lacl->se_lun_nacl = nacl;
1319 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
1320 nacl->initiatorname);
1321
1322 return lacl;
1323 }
1324
1325 int core_dev_add_initiator_node_lun_acl(
1326 struct se_portal_group *tpg,
1327 struct se_lun_acl *lacl,
1328 u32 unpacked_lun,
1329 u32 lun_access)
1330 {
1331 struct se_lun *lun;
1332 struct se_node_acl *nacl;
1333
1334 lun = core_dev_get_lun(tpg, unpacked_lun);
1335 if (!lun) {
1336 pr_err("%s Logical Unit Number: %u is not active on"
1337 " Target Portal Group: %hu, ignoring request.\n",
1338 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1339 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1340 return -EINVAL;
1341 }
1342
1343 nacl = lacl->se_lun_nacl;
1344 if (!nacl)
1345 return -EINVAL;
1346
1347 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1348 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1349 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1350
1351 lacl->se_lun = lun;
1352
1353 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
1354 lun_access, nacl, tpg) < 0)
1355 return -EINVAL;
1356
1357 spin_lock(&lun->lun_acl_lock);
1358 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1359 atomic_inc_mb(&lun->lun_acl_count);
1360 spin_unlock(&lun->lun_acl_lock);
1361
1362 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1363 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1364 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1365 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1366 lacl->initiatorname);
1367 /*
1368 * Check to see if there are any existing persistent reservation APTPL
1369 * pre-registrations that need to be enabled for this LUN ACL..
1370 */
1371 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl,
1372 lacl->mapped_lun);
1373 return 0;
1374 }
1375
1376 int core_dev_del_initiator_node_lun_acl(
1377 struct se_portal_group *tpg,
1378 struct se_lun *lun,
1379 struct se_lun_acl *lacl)
1380 {
1381 struct se_node_acl *nacl;
1382 struct se_dev_entry *deve;
1383
1384 nacl = lacl->se_lun_nacl;
1385 if (!nacl)
1386 return -EINVAL;
1387
1388 spin_lock(&lun->lun_acl_lock);
1389 list_del(&lacl->lacl_list);
1390 atomic_dec_mb(&lun->lun_acl_count);
1391 spin_unlock(&lun->lun_acl_lock);
1392
1393 mutex_lock(&nacl->lun_entry_mutex);
1394 deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
1395 if (deve)
1396 core_disable_device_list_for_node(lun, deve, nacl, tpg);
1397 mutex_unlock(&nacl->lun_entry_mutex);
1398
1399 lacl->se_lun = NULL;
1400
1401 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1402 " InitiatorNode: %s Mapped LUN: %u\n",
1403 tpg->se_tpg_tfo->get_fabric_name(),
1404 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1405 lacl->initiatorname, lacl->mapped_lun);
1406
1407 return 0;
1408 }
1409
1410 void core_dev_free_initiator_node_lun_acl(
1411 struct se_portal_group *tpg,
1412 struct se_lun_acl *lacl)
1413 {
1414 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1415 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1416 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1417 tpg->se_tpg_tfo->get_fabric_name(),
1418 lacl->initiatorname, lacl->mapped_lun);
1419
1420 kfree(lacl);
1421 }
1422
1423 static void scsi_dump_inquiry(struct se_device *dev)
1424 {
1425 struct t10_wwn *wwn = &dev->t10_wwn;
1426 char buf[17];
1427 int i, device_type;
1428 /*
1429 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1430 */
1431 for (i = 0; i < 8; i++)
1432 if (wwn->vendor[i] >= 0x20)
1433 buf[i] = wwn->vendor[i];
1434 else
1435 buf[i] = ' ';
1436 buf[i] = '\0';
1437 pr_debug(" Vendor: %s\n", buf);
1438
1439 for (i = 0; i < 16; i++)
1440 if (wwn->model[i] >= 0x20)
1441 buf[i] = wwn->model[i];
1442 else
1443 buf[i] = ' ';
1444 buf[i] = '\0';
1445 pr_debug(" Model: %s\n", buf);
1446
1447 for (i = 0; i < 4; i++)
1448 if (wwn->revision[i] >= 0x20)
1449 buf[i] = wwn->revision[i];
1450 else
1451 buf[i] = ' ';
1452 buf[i] = '\0';
1453 pr_debug(" Revision: %s\n", buf);
1454
1455 device_type = dev->transport->get_device_type(dev);
1456 pr_debug(" Type: %s ", scsi_device_type(device_type));
1457 }
1458
1459 struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1460 {
1461 struct se_device *dev;
1462 struct se_lun *xcopy_lun;
1463
1464 dev = hba->transport->alloc_device(hba, name);
1465 if (!dev)
1466 return NULL;
1467
1468 dev->dev_link_magic = SE_DEV_LINK_MAGIC;
1469 dev->se_hba = hba;
1470 dev->transport = hba->transport;
1471 dev->prot_length = sizeof(struct se_dif_v1_tuple);
1472
1473 INIT_LIST_HEAD(&dev->dev_list);
1474 INIT_LIST_HEAD(&dev->dev_sep_list);
1475 INIT_LIST_HEAD(&dev->dev_tmr_list);
1476 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1477 INIT_LIST_HEAD(&dev->state_list);
1478 INIT_LIST_HEAD(&dev->qf_cmd_list);
1479 INIT_LIST_HEAD(&dev->g_dev_node);
1480 spin_lock_init(&dev->execute_task_lock);
1481 spin_lock_init(&dev->delayed_cmd_lock);
1482 spin_lock_init(&dev->dev_reservation_lock);
1483 spin_lock_init(&dev->se_port_lock);
1484 spin_lock_init(&dev->se_tmr_lock);
1485 spin_lock_init(&dev->qf_cmd_lock);
1486 sema_init(&dev->caw_sem, 1);
1487 atomic_set(&dev->dev_ordered_id, 0);
1488 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
1489 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
1490 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
1491 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
1492 spin_lock_init(&dev->t10_pr.registration_lock);
1493 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
1494 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
1495 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
1496 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
1497 spin_lock_init(&dev->t10_alua.lba_map_lock);
1498
1499 dev->t10_wwn.t10_dev = dev;
1500 dev->t10_alua.t10_dev = dev;
1501
1502 dev->dev_attrib.da_dev = dev;
1503 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
1504 dev->dev_attrib.emulate_dpo = 1;
1505 dev->dev_attrib.emulate_fua_write = 1;
1506 dev->dev_attrib.emulate_fua_read = 1;
1507 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
1508 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
1509 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
1510 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
1511 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
1512 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
1513 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
1514 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
1515 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
1516 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
1517 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
1518 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
1519 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
1520 dev->dev_attrib.max_unmap_block_desc_count =
1521 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
1522 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
1523 dev->dev_attrib.unmap_granularity_alignment =
1524 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
1525 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
1526
1527 xcopy_lun = &dev->xcopy_lun;
1528 xcopy_lun->lun_se_dev = dev;
1529 init_completion(&xcopy_lun->lun_shutdown_comp);
1530 INIT_LIST_HEAD(&xcopy_lun->lun_acl_list);
1531 spin_lock_init(&xcopy_lun->lun_acl_lock);
1532 spin_lock_init(&xcopy_lun->lun_sep_lock);
1533 init_completion(&xcopy_lun->lun_ref_comp);
1534
1535 return dev;
1536 }
1537
1538 int target_configure_device(struct se_device *dev)
1539 {
1540 struct se_hba *hba = dev->se_hba;
1541 int ret;
1542
1543 if (dev->dev_flags & DF_CONFIGURED) {
1544 pr_err("se_dev->se_dev_ptr already set for storage"
1545 " object\n");
1546 return -EEXIST;
1547 }
1548
1549 ret = dev->transport->configure_device(dev);
1550 if (ret)
1551 goto out;
1552 /*
1553 * XXX: there is not much point to have two different values here..
1554 */
1555 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
1556 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
1557
1558 /*
1559 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
1560 */
1561 dev->dev_attrib.hw_max_sectors =
1562 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
1563 dev->dev_attrib.hw_block_size);
1564 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
1565
1566 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1567 dev->creation_time = get_jiffies_64();
1568
1569 ret = core_setup_alua(dev);
1570 if (ret)
1571 goto out;
1572
1573 /*
1574 * Startup the struct se_device processing thread
1575 */
1576 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
1577 dev->transport->name);
1578 if (!dev->tmr_wq) {
1579 pr_err("Unable to create tmr workqueue for %s\n",
1580 dev->transport->name);
1581 ret = -ENOMEM;
1582 goto out_free_alua;
1583 }
1584
1585 /*
1586 * Setup work_queue for QUEUE_FULL
1587 */
1588 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1589
1590 /*
1591 * Preload the initial INQUIRY const values if we are doing
1592 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1593 * passthrough because this is being provided by the backend LLD.
1594 */
1595 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
1596 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1597 strncpy(&dev->t10_wwn.model[0],
1598 dev->transport->inquiry_prod, 16);
1599 strncpy(&dev->t10_wwn.revision[0],
1600 dev->transport->inquiry_rev, 4);
1601 }
1602
1603 scsi_dump_inquiry(dev);
1604
1605 spin_lock(&hba->device_lock);
1606 hba->dev_count++;
1607 spin_unlock(&hba->device_lock);
1608
1609 mutex_lock(&g_device_mutex);
1610 list_add_tail(&dev->g_dev_node, &g_device_list);
1611 mutex_unlock(&g_device_mutex);
1612
1613 dev->dev_flags |= DF_CONFIGURED;
1614
1615 return 0;
1616
1617 out_free_alua:
1618 core_alua_free_lu_gp_mem(dev);
1619 out:
1620 se_release_vpd_for_dev(dev);
1621 return ret;
1622 }
1623
1624 void target_free_device(struct se_device *dev)
1625 {
1626 struct se_hba *hba = dev->se_hba;
1627
1628 WARN_ON(!list_empty(&dev->dev_sep_list));
1629
1630 if (dev->dev_flags & DF_CONFIGURED) {
1631 destroy_workqueue(dev->tmr_wq);
1632
1633 mutex_lock(&g_device_mutex);
1634 list_del(&dev->g_dev_node);
1635 mutex_unlock(&g_device_mutex);
1636
1637 spin_lock(&hba->device_lock);
1638 hba->dev_count--;
1639 spin_unlock(&hba->device_lock);
1640 }
1641
1642 core_alua_free_lu_gp_mem(dev);
1643 core_alua_set_lba_map(dev, NULL, 0, 0);
1644 core_scsi3_free_all_registrations(dev);
1645 se_release_vpd_for_dev(dev);
1646
1647 if (dev->transport->free_prot)
1648 dev->transport->free_prot(dev);
1649
1650 dev->transport->free_device(dev);
1651 }
1652
1653 int core_dev_setup_virtual_lun0(void)
1654 {
1655 struct se_hba *hba;
1656 struct se_device *dev;
1657 char buf[] = "rd_pages=8,rd_nullio=1";
1658 int ret;
1659
1660 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1661 if (IS_ERR(hba))
1662 return PTR_ERR(hba);
1663
1664 dev = target_alloc_device(hba, "virt_lun0");
1665 if (!dev) {
1666 ret = -ENOMEM;
1667 goto out_free_hba;
1668 }
1669
1670 hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
1671
1672 ret = target_configure_device(dev);
1673 if (ret)
1674 goto out_free_se_dev;
1675
1676 lun0_hba = hba;
1677 g_lun0_dev = dev;
1678 return 0;
1679
1680 out_free_se_dev:
1681 target_free_device(dev);
1682 out_free_hba:
1683 core_delete_hba(hba);
1684 return ret;
1685 }
1686
1687
1688 void core_dev_release_virtual_lun0(void)
1689 {
1690 struct se_hba *hba = lun0_hba;
1691
1692 if (!hba)
1693 return;
1694
1695 if (g_lun0_dev)
1696 target_free_device(g_lun0_dev);
1697 core_delete_hba(hba);
1698 }
1699
1700 /*
1701 * Common CDB parsing for kernel and user passthrough.
1702 */
1703 sense_reason_t
1704 passthrough_parse_cdb(struct se_cmd *cmd,
1705 sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1706 {
1707 unsigned char *cdb = cmd->t_task_cdb;
1708
1709 /*
1710 * Clear a lun set in the cdb if the initiator talking to use spoke
1711 * and old standards version, as we can't assume the underlying device
1712 * won't choke up on it.
1713 */
1714 switch (cdb[0]) {
1715 case READ_10: /* SBC - RDProtect */
1716 case READ_12: /* SBC - RDProtect */
1717 case READ_16: /* SBC - RDProtect */
1718 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1719 case VERIFY: /* SBC - VRProtect */
1720 case VERIFY_16: /* SBC - VRProtect */
1721 case WRITE_VERIFY: /* SBC - VRProtect */
1722 case WRITE_VERIFY_12: /* SBC - VRProtect */
1723 case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
1724 break;
1725 default:
1726 cdb[1] &= 0x1f; /* clear logical unit number */
1727 break;
1728 }
1729
1730 /*
1731 * For REPORT LUNS we always need to emulate the response, for everything
1732 * else, pass it up.
1733 */
1734 if (cdb[0] == REPORT_LUNS) {
1735 cmd->execute_cmd = spc_emulate_report_luns;
1736 return TCM_NO_SENSE;
1737 }
1738
1739 /* Set DATA_CDB flag for ops that should have it */
1740 switch (cdb[0]) {
1741 case READ_6:
1742 case READ_10:
1743 case READ_12:
1744 case READ_16:
1745 case WRITE_6:
1746 case WRITE_10:
1747 case WRITE_12:
1748 case WRITE_16:
1749 case WRITE_VERIFY:
1750 case WRITE_VERIFY_12:
1751 case 0x8e: /* WRITE_VERIFY_16 */
1752 case COMPARE_AND_WRITE:
1753 case XDWRITEREAD_10:
1754 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1755 break;
1756 case VARIABLE_LENGTH_CMD:
1757 switch (get_unaligned_be16(&cdb[8])) {
1758 case READ_32:
1759 case WRITE_32:
1760 case 0x0c: /* WRITE_VERIFY_32 */
1761 case XDWRITEREAD_32:
1762 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1763 break;
1764 }
1765 }
1766
1767 cmd->execute_cmd = exec_cmd;
1768
1769 return TCM_NO_SENSE;
1770 }
1771 EXPORT_SYMBOL(passthrough_parse_cdb);
This page took 0.106284 seconds and 5 git commands to generate.