PM / OPP: Return suspend_opp only if it is enabled
[deliverable/linux.git] / drivers / target / target_core_device.c
1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
3 *
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
6 *
7 * (c) Copyright 2003-2013 Datera, Inc.
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
27 #include <linux/net.h>
28 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/timer.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/kthread.h>
34 #include <linux/in.h>
35 #include <linux/export.h>
36 #include <asm/unaligned.h>
37 #include <net/sock.h>
38 #include <net/tcp.h>
39 #include <scsi/scsi_common.h>
40 #include <scsi/scsi_proto.h>
41
42 #include <target/target_core_base.h>
43 #include <target/target_core_backend.h>
44 #include <target/target_core_fabric.h>
45
46 #include "target_core_internal.h"
47 #include "target_core_alua.h"
48 #include "target_core_pr.h"
49 #include "target_core_ua.h"
50
51 DEFINE_MUTEX(g_device_mutex);
52 LIST_HEAD(g_device_list);
53
54 static struct se_hba *lun0_hba;
55 /* not static, needed by tpg.c */
56 struct se_device *g_lun0_dev;
57
58 sense_reason_t
59 transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
60 {
61 struct se_lun *se_lun = NULL;
62 struct se_session *se_sess = se_cmd->se_sess;
63 struct se_node_acl *nacl = se_sess->se_node_acl;
64 struct se_dev_entry *deve;
65
66 rcu_read_lock();
67 deve = target_nacl_find_deve(nacl, unpacked_lun);
68 if (deve) {
69 atomic_long_inc(&deve->total_cmds);
70
71 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
72 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
73 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
74 " Access for 0x%08llx\n",
75 se_cmd->se_tfo->get_fabric_name(),
76 unpacked_lun);
77 rcu_read_unlock();
78 return TCM_WRITE_PROTECTED;
79 }
80
81 if (se_cmd->data_direction == DMA_TO_DEVICE)
82 atomic_long_add(se_cmd->data_length,
83 &deve->write_bytes);
84 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
85 atomic_long_add(se_cmd->data_length,
86 &deve->read_bytes);
87
88 se_lun = rcu_dereference(deve->se_lun);
89 se_cmd->se_lun = rcu_dereference(deve->se_lun);
90 se_cmd->pr_res_key = deve->pr_res_key;
91 se_cmd->orig_fe_lun = unpacked_lun;
92 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
93
94 percpu_ref_get(&se_lun->lun_ref);
95 se_cmd->lun_ref_active = true;
96 }
97 rcu_read_unlock();
98
99 if (!se_lun) {
100 /*
101 * Use the se_portal_group->tpg_virt_lun0 to allow for
102 * REPORT_LUNS, et al to be returned when no active
103 * MappedLUN=0 exists for this Initiator Port.
104 */
105 if (unpacked_lun != 0) {
106 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
107 " Access for 0x%08llx\n",
108 se_cmd->se_tfo->get_fabric_name(),
109 unpacked_lun);
110 return TCM_NON_EXISTENT_LUN;
111 }
112 /*
113 * Force WRITE PROTECT for virtual LUN 0
114 */
115 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
116 (se_cmd->data_direction != DMA_NONE))
117 return TCM_WRITE_PROTECTED;
118
119 se_lun = se_sess->se_tpg->tpg_virt_lun0;
120 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
121 se_cmd->orig_fe_lun = 0;
122 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
123
124 percpu_ref_get(&se_lun->lun_ref);
125 se_cmd->lun_ref_active = true;
126 }
127 /*
128 * RCU reference protected by percpu se_lun->lun_ref taken above that
129 * must drop to zero (including initial reference) before this se_lun
130 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
131 * target_core_fabric_configfs.c:target_fabric_port_release
132 */
133 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
134 atomic_long_inc(&se_cmd->se_dev->num_cmds);
135
136 if (se_cmd->data_direction == DMA_TO_DEVICE)
137 atomic_long_add(se_cmd->data_length,
138 &se_cmd->se_dev->write_bytes);
139 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
140 atomic_long_add(se_cmd->data_length,
141 &se_cmd->se_dev->read_bytes);
142
143 return 0;
144 }
145 EXPORT_SYMBOL(transport_lookup_cmd_lun);
146
147 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
148 {
149 struct se_dev_entry *deve;
150 struct se_lun *se_lun = NULL;
151 struct se_session *se_sess = se_cmd->se_sess;
152 struct se_node_acl *nacl = se_sess->se_node_acl;
153 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
154 unsigned long flags;
155
156 rcu_read_lock();
157 deve = target_nacl_find_deve(nacl, unpacked_lun);
158 if (deve) {
159 se_tmr->tmr_lun = rcu_dereference(deve->se_lun);
160 se_cmd->se_lun = rcu_dereference(deve->se_lun);
161 se_lun = rcu_dereference(deve->se_lun);
162 se_cmd->pr_res_key = deve->pr_res_key;
163 se_cmd->orig_fe_lun = unpacked_lun;
164 }
165 rcu_read_unlock();
166
167 if (!se_lun) {
168 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
169 " Access for 0x%08llx\n",
170 se_cmd->se_tfo->get_fabric_name(),
171 unpacked_lun);
172 return -ENODEV;
173 }
174 /*
175 * XXX: Add percpu se_lun->lun_ref reference count for TMR
176 */
177 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
178 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
179
180 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
181 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
182 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
183
184 return 0;
185 }
186 EXPORT_SYMBOL(transport_lookup_tmr_lun);
187
188 bool target_lun_is_rdonly(struct se_cmd *cmd)
189 {
190 struct se_session *se_sess = cmd->se_sess;
191 struct se_dev_entry *deve;
192 bool ret;
193
194 rcu_read_lock();
195 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
196 ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY);
197 rcu_read_unlock();
198
199 return ret;
200 }
201 EXPORT_SYMBOL(target_lun_is_rdonly);
202
203 /*
204 * This function is called from core_scsi3_emulate_pro_register_and_move()
205 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
206 * when a matching rtpi is found.
207 */
208 struct se_dev_entry *core_get_se_deve_from_rtpi(
209 struct se_node_acl *nacl,
210 u16 rtpi)
211 {
212 struct se_dev_entry *deve;
213 struct se_lun *lun;
214 struct se_portal_group *tpg = nacl->se_tpg;
215
216 rcu_read_lock();
217 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
218 lun = rcu_dereference(deve->se_lun);
219 if (!lun) {
220 pr_err("%s device entries device pointer is"
221 " NULL, but Initiator has access.\n",
222 tpg->se_tpg_tfo->get_fabric_name());
223 continue;
224 }
225 if (lun->lun_rtpi != rtpi)
226 continue;
227
228 kref_get(&deve->pr_kref);
229 rcu_read_unlock();
230
231 return deve;
232 }
233 rcu_read_unlock();
234
235 return NULL;
236 }
237
238 void core_free_device_list_for_node(
239 struct se_node_acl *nacl,
240 struct se_portal_group *tpg)
241 {
242 struct se_dev_entry *deve;
243
244 mutex_lock(&nacl->lun_entry_mutex);
245 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
246 struct se_lun *lun = rcu_dereference_check(deve->se_lun,
247 lockdep_is_held(&nacl->lun_entry_mutex));
248 core_disable_device_list_for_node(lun, deve, nacl, tpg);
249 }
250 mutex_unlock(&nacl->lun_entry_mutex);
251 }
252
253 void core_update_device_list_access(
254 u64 mapped_lun,
255 u32 lun_access,
256 struct se_node_acl *nacl)
257 {
258 struct se_dev_entry *deve;
259
260 mutex_lock(&nacl->lun_entry_mutex);
261 deve = target_nacl_find_deve(nacl, mapped_lun);
262 if (deve) {
263 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
264 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
265 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
266 } else {
267 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
268 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
269 }
270 }
271 mutex_unlock(&nacl->lun_entry_mutex);
272 }
273
274 /*
275 * Called with rcu_read_lock or nacl->device_list_lock held.
276 */
277 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
278 {
279 struct se_dev_entry *deve;
280
281 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
282 if (deve->mapped_lun == mapped_lun)
283 return deve;
284
285 return NULL;
286 }
287 EXPORT_SYMBOL(target_nacl_find_deve);
288
289 void target_pr_kref_release(struct kref *kref)
290 {
291 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
292 pr_kref);
293 complete(&deve->pr_comp);
294 }
295
296 static void
297 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
298 bool skip_new)
299 {
300 struct se_dev_entry *tmp;
301
302 rcu_read_lock();
303 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
304 if (skip_new && tmp == new)
305 continue;
306 core_scsi3_ua_allocate(tmp, 0x3F,
307 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
308 }
309 rcu_read_unlock();
310 }
311
312 int core_enable_device_list_for_node(
313 struct se_lun *lun,
314 struct se_lun_acl *lun_acl,
315 u64 mapped_lun,
316 u32 lun_access,
317 struct se_node_acl *nacl,
318 struct se_portal_group *tpg)
319 {
320 struct se_dev_entry *orig, *new;
321
322 new = kzalloc(sizeof(*new), GFP_KERNEL);
323 if (!new) {
324 pr_err("Unable to allocate se_dev_entry memory\n");
325 return -ENOMEM;
326 }
327
328 atomic_set(&new->ua_count, 0);
329 spin_lock_init(&new->ua_lock);
330 INIT_LIST_HEAD(&new->ua_list);
331 INIT_LIST_HEAD(&new->lun_link);
332
333 new->mapped_lun = mapped_lun;
334 kref_init(&new->pr_kref);
335 init_completion(&new->pr_comp);
336
337 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)
338 new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
339 else
340 new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
341
342 new->creation_time = get_jiffies_64();
343 new->attach_count++;
344
345 mutex_lock(&nacl->lun_entry_mutex);
346 orig = target_nacl_find_deve(nacl, mapped_lun);
347 if (orig && orig->se_lun) {
348 struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
349 lockdep_is_held(&nacl->lun_entry_mutex));
350
351 if (orig_lun != lun) {
352 pr_err("Existing orig->se_lun doesn't match new lun"
353 " for dynamic -> explicit NodeACL conversion:"
354 " %s\n", nacl->initiatorname);
355 mutex_unlock(&nacl->lun_entry_mutex);
356 kfree(new);
357 return -EINVAL;
358 }
359 BUG_ON(orig->se_lun_acl != NULL);
360
361 rcu_assign_pointer(new->se_lun, lun);
362 rcu_assign_pointer(new->se_lun_acl, lun_acl);
363 hlist_del_rcu(&orig->link);
364 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
365 mutex_unlock(&nacl->lun_entry_mutex);
366
367 spin_lock(&lun->lun_deve_lock);
368 list_del(&orig->lun_link);
369 list_add_tail(&new->lun_link, &lun->lun_deve_list);
370 spin_unlock(&lun->lun_deve_lock);
371
372 kref_put(&orig->pr_kref, target_pr_kref_release);
373 wait_for_completion(&orig->pr_comp);
374
375 target_luns_data_has_changed(nacl, new, true);
376 kfree_rcu(orig, rcu_head);
377 return 0;
378 }
379
380 rcu_assign_pointer(new->se_lun, lun);
381 rcu_assign_pointer(new->se_lun_acl, lun_acl);
382 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
383 mutex_unlock(&nacl->lun_entry_mutex);
384
385 spin_lock(&lun->lun_deve_lock);
386 list_add_tail(&new->lun_link, &lun->lun_deve_list);
387 spin_unlock(&lun->lun_deve_lock);
388
389 target_luns_data_has_changed(nacl, new, true);
390 return 0;
391 }
392
393 /*
394 * Called with se_node_acl->lun_entry_mutex held.
395 */
396 void core_disable_device_list_for_node(
397 struct se_lun *lun,
398 struct se_dev_entry *orig,
399 struct se_node_acl *nacl,
400 struct se_portal_group *tpg)
401 {
402 /*
403 * rcu_dereference_raw protected by se_lun->lun_group symlink
404 * reference to se_device->dev_group.
405 */
406 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
407 /*
408 * If the MappedLUN entry is being disabled, the entry in
409 * lun->lun_deve_list must be removed now before clearing the
410 * struct se_dev_entry pointers below as logic in
411 * core_alua_do_transition_tg_pt() depends on these being present.
412 *
413 * deve->se_lun_acl will be NULL for demo-mode created LUNs
414 * that have not been explicitly converted to MappedLUNs ->
415 * struct se_lun_acl, but we remove deve->lun_link from
416 * lun->lun_deve_list. This also means that active UAs and
417 * NodeACL context specific PR metadata for demo-mode
418 * MappedLUN *deve will be released below..
419 */
420 spin_lock(&lun->lun_deve_lock);
421 list_del(&orig->lun_link);
422 spin_unlock(&lun->lun_deve_lock);
423 /*
424 * Disable struct se_dev_entry LUN ACL mapping
425 */
426 core_scsi3_ua_release_all(orig);
427
428 hlist_del_rcu(&orig->link);
429 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
430 rcu_assign_pointer(orig->se_lun, NULL);
431 rcu_assign_pointer(orig->se_lun_acl, NULL);
432 orig->lun_flags = 0;
433 orig->creation_time = 0;
434 orig->attach_count--;
435 /*
436 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
437 * or REGISTER_AND_MOVE PR operation to complete.
438 */
439 kref_put(&orig->pr_kref, target_pr_kref_release);
440 wait_for_completion(&orig->pr_comp);
441
442 kfree_rcu(orig, rcu_head);
443
444 core_scsi3_free_pr_reg_from_nacl(dev, nacl);
445 target_luns_data_has_changed(nacl, NULL, false);
446 }
447
448 /* core_clear_lun_from_tpg():
449 *
450 *
451 */
452 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
453 {
454 struct se_node_acl *nacl;
455 struct se_dev_entry *deve;
456
457 mutex_lock(&tpg->acl_node_mutex);
458 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
459
460 mutex_lock(&nacl->lun_entry_mutex);
461 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
462 struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
463 lockdep_is_held(&nacl->lun_entry_mutex));
464
465 if (lun != tmp_lun)
466 continue;
467
468 core_disable_device_list_for_node(lun, deve, nacl, tpg);
469 }
470 mutex_unlock(&nacl->lun_entry_mutex);
471 }
472 mutex_unlock(&tpg->acl_node_mutex);
473 }
474
475 int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
476 {
477 struct se_lun *tmp;
478
479 spin_lock(&dev->se_port_lock);
480 if (dev->export_count == 0x0000ffff) {
481 pr_warn("Reached dev->dev_port_count =="
482 " 0x0000ffff\n");
483 spin_unlock(&dev->se_port_lock);
484 return -ENOSPC;
485 }
486 again:
487 /*
488 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
489 * Here is the table from spc4r17 section 7.7.3.8.
490 *
491 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
492 *
493 * Code Description
494 * 0h Reserved
495 * 1h Relative port 1, historically known as port A
496 * 2h Relative port 2, historically known as port B
497 * 3h to FFFFh Relative port 3 through 65 535
498 */
499 lun->lun_rtpi = dev->dev_rpti_counter++;
500 if (!lun->lun_rtpi)
501 goto again;
502
503 list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
504 /*
505 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
506 * for 16-bit wrap..
507 */
508 if (lun->lun_rtpi == tmp->lun_rtpi)
509 goto again;
510 }
511 spin_unlock(&dev->se_port_lock);
512
513 return 0;
514 }
515
516 static void se_release_vpd_for_dev(struct se_device *dev)
517 {
518 struct t10_vpd *vpd, *vpd_tmp;
519
520 spin_lock(&dev->t10_wwn.t10_vpd_lock);
521 list_for_each_entry_safe(vpd, vpd_tmp,
522 &dev->t10_wwn.t10_vpd_list, vpd_list) {
523 list_del(&vpd->vpd_list);
524 kfree(vpd);
525 }
526 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
527 }
528
529 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
530 {
531 u32 aligned_max_sectors;
532 u32 alignment;
533 /*
534 * Limit max_sectors to a PAGE_SIZE aligned value for modern
535 * transport_allocate_data_tasks() operation.
536 */
537 alignment = max(1ul, PAGE_SIZE / block_size);
538 aligned_max_sectors = rounddown(max_sectors, alignment);
539
540 if (max_sectors != aligned_max_sectors)
541 pr_info("Rounding down aligned max_sectors from %u to %u\n",
542 max_sectors, aligned_max_sectors);
543
544 return aligned_max_sectors;
545 }
546
547 int core_dev_add_lun(
548 struct se_portal_group *tpg,
549 struct se_device *dev,
550 struct se_lun *lun)
551 {
552 int rc;
553
554 rc = core_tpg_add_lun(tpg, lun,
555 TRANSPORT_LUNFLAGS_READ_WRITE, dev);
556 if (rc < 0)
557 return rc;
558
559 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
560 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
561 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
562 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
563 /*
564 * Update LUN maps for dynamically added initiators when
565 * generate_node_acl is enabled.
566 */
567 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
568 struct se_node_acl *acl;
569
570 mutex_lock(&tpg->acl_node_mutex);
571 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
572 if (acl->dynamic_node_acl &&
573 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
574 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
575 core_tpg_add_node_to_devs(acl, tpg, lun);
576 }
577 }
578 mutex_unlock(&tpg->acl_node_mutex);
579 }
580
581 return 0;
582 }
583
584 /* core_dev_del_lun():
585 *
586 *
587 */
588 void core_dev_del_lun(
589 struct se_portal_group *tpg,
590 struct se_lun *lun)
591 {
592 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
593 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
594 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
595 tpg->se_tpg_tfo->get_fabric_name());
596
597 core_tpg_remove_lun(tpg, lun);
598 }
599
600 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
601 struct se_portal_group *tpg,
602 struct se_node_acl *nacl,
603 u64 mapped_lun,
604 int *ret)
605 {
606 struct se_lun_acl *lacl;
607
608 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
609 pr_err("%s InitiatorName exceeds maximum size.\n",
610 tpg->se_tpg_tfo->get_fabric_name());
611 *ret = -EOVERFLOW;
612 return NULL;
613 }
614 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
615 if (!lacl) {
616 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
617 *ret = -ENOMEM;
618 return NULL;
619 }
620
621 lacl->mapped_lun = mapped_lun;
622 lacl->se_lun_nacl = nacl;
623 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
624 nacl->initiatorname);
625
626 return lacl;
627 }
628
629 int core_dev_add_initiator_node_lun_acl(
630 struct se_portal_group *tpg,
631 struct se_lun_acl *lacl,
632 struct se_lun *lun,
633 u32 lun_access)
634 {
635 struct se_node_acl *nacl = lacl->se_lun_nacl;
636 /*
637 * rcu_dereference_raw protected by se_lun->lun_group symlink
638 * reference to se_device->dev_group.
639 */
640 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
641
642 if (!nacl)
643 return -EINVAL;
644
645 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
646 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
647 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
648
649 lacl->se_lun = lun;
650
651 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
652 lun_access, nacl, tpg) < 0)
653 return -EINVAL;
654
655 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
656 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
657 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
658 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
659 lacl->initiatorname);
660 /*
661 * Check to see if there are any existing persistent reservation APTPL
662 * pre-registrations that need to be enabled for this LUN ACL..
663 */
664 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
665 lacl->mapped_lun);
666 return 0;
667 }
668
669 int core_dev_del_initiator_node_lun_acl(
670 struct se_lun *lun,
671 struct se_lun_acl *lacl)
672 {
673 struct se_portal_group *tpg = lun->lun_tpg;
674 struct se_node_acl *nacl;
675 struct se_dev_entry *deve;
676
677 nacl = lacl->se_lun_nacl;
678 if (!nacl)
679 return -EINVAL;
680
681 mutex_lock(&nacl->lun_entry_mutex);
682 deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
683 if (deve)
684 core_disable_device_list_for_node(lun, deve, nacl, tpg);
685 mutex_unlock(&nacl->lun_entry_mutex);
686
687 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
688 " InitiatorNode: %s Mapped LUN: %llu\n",
689 tpg->se_tpg_tfo->get_fabric_name(),
690 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
691 lacl->initiatorname, lacl->mapped_lun);
692
693 return 0;
694 }
695
696 void core_dev_free_initiator_node_lun_acl(
697 struct se_portal_group *tpg,
698 struct se_lun_acl *lacl)
699 {
700 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
701 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(),
702 tpg->se_tpg_tfo->tpg_get_tag(tpg),
703 tpg->se_tpg_tfo->get_fabric_name(),
704 lacl->initiatorname, lacl->mapped_lun);
705
706 kfree(lacl);
707 }
708
709 static void scsi_dump_inquiry(struct se_device *dev)
710 {
711 struct t10_wwn *wwn = &dev->t10_wwn;
712 char buf[17];
713 int i, device_type;
714 /*
715 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
716 */
717 for (i = 0; i < 8; i++)
718 if (wwn->vendor[i] >= 0x20)
719 buf[i] = wwn->vendor[i];
720 else
721 buf[i] = ' ';
722 buf[i] = '\0';
723 pr_debug(" Vendor: %s\n", buf);
724
725 for (i = 0; i < 16; i++)
726 if (wwn->model[i] >= 0x20)
727 buf[i] = wwn->model[i];
728 else
729 buf[i] = ' ';
730 buf[i] = '\0';
731 pr_debug(" Model: %s\n", buf);
732
733 for (i = 0; i < 4; i++)
734 if (wwn->revision[i] >= 0x20)
735 buf[i] = wwn->revision[i];
736 else
737 buf[i] = ' ';
738 buf[i] = '\0';
739 pr_debug(" Revision: %s\n", buf);
740
741 device_type = dev->transport->get_device_type(dev);
742 pr_debug(" Type: %s ", scsi_device_type(device_type));
743 }
744
745 struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
746 {
747 struct se_device *dev;
748 struct se_lun *xcopy_lun;
749
750 dev = hba->backend->ops->alloc_device(hba, name);
751 if (!dev)
752 return NULL;
753
754 dev->dev_link_magic = SE_DEV_LINK_MAGIC;
755 dev->se_hba = hba;
756 dev->transport = hba->backend->ops;
757 dev->prot_length = sizeof(struct se_dif_v1_tuple);
758 dev->hba_index = hba->hba_index;
759
760 INIT_LIST_HEAD(&dev->dev_list);
761 INIT_LIST_HEAD(&dev->dev_sep_list);
762 INIT_LIST_HEAD(&dev->dev_tmr_list);
763 INIT_LIST_HEAD(&dev->delayed_cmd_list);
764 INIT_LIST_HEAD(&dev->state_list);
765 INIT_LIST_HEAD(&dev->qf_cmd_list);
766 INIT_LIST_HEAD(&dev->g_dev_node);
767 spin_lock_init(&dev->execute_task_lock);
768 spin_lock_init(&dev->delayed_cmd_lock);
769 spin_lock_init(&dev->dev_reservation_lock);
770 spin_lock_init(&dev->se_port_lock);
771 spin_lock_init(&dev->se_tmr_lock);
772 spin_lock_init(&dev->qf_cmd_lock);
773 sema_init(&dev->caw_sem, 1);
774 atomic_set(&dev->dev_ordered_id, 0);
775 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
776 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
777 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
778 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
779 spin_lock_init(&dev->t10_pr.registration_lock);
780 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
781 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
782 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
783 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
784 spin_lock_init(&dev->t10_alua.lba_map_lock);
785
786 dev->t10_wwn.t10_dev = dev;
787 dev->t10_alua.t10_dev = dev;
788
789 dev->dev_attrib.da_dev = dev;
790 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
791 dev->dev_attrib.emulate_dpo = 1;
792 dev->dev_attrib.emulate_fua_write = 1;
793 dev->dev_attrib.emulate_fua_read = 1;
794 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
795 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
796 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
797 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
798 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
799 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
800 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
801 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
802 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
803 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
804 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
805 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
806 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
807 dev->dev_attrib.max_unmap_block_desc_count =
808 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
809 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
810 dev->dev_attrib.unmap_granularity_alignment =
811 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
812 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
813
814 xcopy_lun = &dev->xcopy_lun;
815 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
816 init_completion(&xcopy_lun->lun_ref_comp);
817 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
818 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
819 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
820 xcopy_lun->lun_tpg = &xcopy_pt_tpg;
821
822 return dev;
823 }
824
825 int target_configure_device(struct se_device *dev)
826 {
827 struct se_hba *hba = dev->se_hba;
828 int ret;
829
830 if (dev->dev_flags & DF_CONFIGURED) {
831 pr_err("se_dev->se_dev_ptr already set for storage"
832 " object\n");
833 return -EEXIST;
834 }
835
836 ret = dev->transport->configure_device(dev);
837 if (ret)
838 goto out;
839 /*
840 * XXX: there is not much point to have two different values here..
841 */
842 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
843 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
844
845 /*
846 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
847 */
848 dev->dev_attrib.hw_max_sectors =
849 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
850 dev->dev_attrib.hw_block_size);
851 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
852
853 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
854 dev->creation_time = get_jiffies_64();
855
856 ret = core_setup_alua(dev);
857 if (ret)
858 goto out;
859
860 /*
861 * Startup the struct se_device processing thread
862 */
863 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
864 dev->transport->name);
865 if (!dev->tmr_wq) {
866 pr_err("Unable to create tmr workqueue for %s\n",
867 dev->transport->name);
868 ret = -ENOMEM;
869 goto out_free_alua;
870 }
871
872 /*
873 * Setup work_queue for QUEUE_FULL
874 */
875 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
876
877 /*
878 * Preload the initial INQUIRY const values if we are doing
879 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
880 * passthrough because this is being provided by the backend LLD.
881 */
882 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
883 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
884 strncpy(&dev->t10_wwn.model[0],
885 dev->transport->inquiry_prod, 16);
886 strncpy(&dev->t10_wwn.revision[0],
887 dev->transport->inquiry_rev, 4);
888 }
889
890 scsi_dump_inquiry(dev);
891
892 spin_lock(&hba->device_lock);
893 hba->dev_count++;
894 spin_unlock(&hba->device_lock);
895
896 mutex_lock(&g_device_mutex);
897 list_add_tail(&dev->g_dev_node, &g_device_list);
898 mutex_unlock(&g_device_mutex);
899
900 dev->dev_flags |= DF_CONFIGURED;
901
902 return 0;
903
904 out_free_alua:
905 core_alua_free_lu_gp_mem(dev);
906 out:
907 se_release_vpd_for_dev(dev);
908 return ret;
909 }
910
911 void target_free_device(struct se_device *dev)
912 {
913 struct se_hba *hba = dev->se_hba;
914
915 WARN_ON(!list_empty(&dev->dev_sep_list));
916
917 if (dev->dev_flags & DF_CONFIGURED) {
918 destroy_workqueue(dev->tmr_wq);
919
920 mutex_lock(&g_device_mutex);
921 list_del(&dev->g_dev_node);
922 mutex_unlock(&g_device_mutex);
923
924 spin_lock(&hba->device_lock);
925 hba->dev_count--;
926 spin_unlock(&hba->device_lock);
927 }
928
929 core_alua_free_lu_gp_mem(dev);
930 core_alua_set_lba_map(dev, NULL, 0, 0);
931 core_scsi3_free_all_registrations(dev);
932 se_release_vpd_for_dev(dev);
933
934 if (dev->transport->free_prot)
935 dev->transport->free_prot(dev);
936
937 dev->transport->free_device(dev);
938 }
939
940 int core_dev_setup_virtual_lun0(void)
941 {
942 struct se_hba *hba;
943 struct se_device *dev;
944 char buf[] = "rd_pages=8,rd_nullio=1";
945 int ret;
946
947 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
948 if (IS_ERR(hba))
949 return PTR_ERR(hba);
950
951 dev = target_alloc_device(hba, "virt_lun0");
952 if (!dev) {
953 ret = -ENOMEM;
954 goto out_free_hba;
955 }
956
957 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
958
959 ret = target_configure_device(dev);
960 if (ret)
961 goto out_free_se_dev;
962
963 lun0_hba = hba;
964 g_lun0_dev = dev;
965 return 0;
966
967 out_free_se_dev:
968 target_free_device(dev);
969 out_free_hba:
970 core_delete_hba(hba);
971 return ret;
972 }
973
974
975 void core_dev_release_virtual_lun0(void)
976 {
977 struct se_hba *hba = lun0_hba;
978
979 if (!hba)
980 return;
981
982 if (g_lun0_dev)
983 target_free_device(g_lun0_dev);
984 core_delete_hba(hba);
985 }
986
987 /*
988 * Common CDB parsing for kernel and user passthrough.
989 */
990 sense_reason_t
991 passthrough_parse_cdb(struct se_cmd *cmd,
992 sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
993 {
994 unsigned char *cdb = cmd->t_task_cdb;
995
996 /*
997 * Clear a lun set in the cdb if the initiator talking to use spoke
998 * and old standards version, as we can't assume the underlying device
999 * won't choke up on it.
1000 */
1001 switch (cdb[0]) {
1002 case READ_10: /* SBC - RDProtect */
1003 case READ_12: /* SBC - RDProtect */
1004 case READ_16: /* SBC - RDProtect */
1005 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1006 case VERIFY: /* SBC - VRProtect */
1007 case VERIFY_16: /* SBC - VRProtect */
1008 case WRITE_VERIFY: /* SBC - VRProtect */
1009 case WRITE_VERIFY_12: /* SBC - VRProtect */
1010 case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
1011 break;
1012 default:
1013 cdb[1] &= 0x1f; /* clear logical unit number */
1014 break;
1015 }
1016
1017 /*
1018 * For REPORT LUNS we always need to emulate the response, for everything
1019 * else, pass it up.
1020 */
1021 if (cdb[0] == REPORT_LUNS) {
1022 cmd->execute_cmd = spc_emulate_report_luns;
1023 return TCM_NO_SENSE;
1024 }
1025
1026 /* Set DATA_CDB flag for ops that should have it */
1027 switch (cdb[0]) {
1028 case READ_6:
1029 case READ_10:
1030 case READ_12:
1031 case READ_16:
1032 case WRITE_6:
1033 case WRITE_10:
1034 case WRITE_12:
1035 case WRITE_16:
1036 case WRITE_VERIFY:
1037 case WRITE_VERIFY_12:
1038 case 0x8e: /* WRITE_VERIFY_16 */
1039 case COMPARE_AND_WRITE:
1040 case XDWRITEREAD_10:
1041 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1042 break;
1043 case VARIABLE_LENGTH_CMD:
1044 switch (get_unaligned_be16(&cdb[8])) {
1045 case READ_32:
1046 case WRITE_32:
1047 case 0x0c: /* WRITE_VERIFY_32 */
1048 case XDWRITEREAD_32:
1049 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1050 break;
1051 }
1052 }
1053
1054 cmd->execute_cmd = exec_cmd;
1055
1056 return TCM_NO_SENSE;
1057 }
1058 EXPORT_SYMBOL(passthrough_parse_cdb);
This page took 0.065161 seconds and 5 git commands to generate.