Commit | Line | Data |
---|---|---|
c66ac9db NB |
1 | /******************************************************************************* |
2 | * Filename: target_core_device.c (based on iscsi_target_device.c) | |
3 | * | |
e3d6f909 | 4 | * This file contains the TCM Virtual Device and Disk Transport |
c66ac9db NB |
5 | * agnostic related functions. |
6 | * | |
7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | |
8 | * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. | |
9 | * Copyright (c) 2007-2010 Rising Tide Systems | |
10 | * Copyright (c) 2008-2010 Linux-iSCSI.org | |
11 | * | |
12 | * Nicholas A. Bellinger <nab@kernel.org> | |
13 | * | |
14 | * This program is free software; you can redistribute it and/or modify | |
15 | * it under the terms of the GNU General Public License as published by | |
16 | * the Free Software Foundation; either version 2 of the License, or | |
17 | * (at your option) any later version. | |
18 | * | |
19 | * This program is distributed in the hope that it will be useful, | |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
22 | * GNU General Public License for more details. | |
23 | * | |
24 | * You should have received a copy of the GNU General Public License | |
25 | * along with this program; if not, write to the Free Software | |
26 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
27 | * | |
28 | ******************************************************************************/ | |
29 | ||
30 | #include <linux/net.h> | |
31 | #include <linux/string.h> | |
32 | #include <linux/delay.h> | |
33 | #include <linux/timer.h> | |
34 | #include <linux/slab.h> | |
35 | #include <linux/spinlock.h> | |
c66ac9db NB |
36 | #include <linux/kthread.h> |
37 | #include <linux/in.h> | |
c53181af | 38 | #include <linux/export.h> |
c66ac9db NB |
39 | #include <net/sock.h> |
40 | #include <net/tcp.h> | |
41 | #include <scsi/scsi.h> | |
1078da16 | 42 | #include <scsi/scsi_device.h> |
c66ac9db NB |
43 | |
44 | #include <target/target_core_base.h> | |
45 | #include <target/target_core_device.h> | |
46 | #include <target/target_core_tpg.h> | |
47 | #include <target/target_core_transport.h> | |
48 | #include <target/target_core_fabric_ops.h> | |
49 | ||
50 | #include "target_core_alua.h" | |
51 | #include "target_core_hba.h" | |
52 | #include "target_core_pr.h" | |
53 | #include "target_core_ua.h" | |
54 | ||
55 | static void se_dev_start(struct se_device *dev); | |
56 | static void se_dev_stop(struct se_device *dev); | |
57 | ||
e3d6f909 AG |
58 | static struct se_hba *lun0_hba; |
59 | static struct se_subsystem_dev *lun0_su_dev; | |
60 | /* not static, needed by tpg.c */ | |
61 | struct se_device *g_lun0_dev; | |
62 | ||
5951146d | 63 | int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) |
c66ac9db | 64 | { |
c66ac9db | 65 | struct se_lun *se_lun = NULL; |
e3d6f909 | 66 | struct se_session *se_sess = se_cmd->se_sess; |
5951146d | 67 | struct se_device *dev; |
c66ac9db | 68 | unsigned long flags; |
c66ac9db | 69 | |
d8144955 FC |
70 | if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { |
71 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | |
72 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
e3d6f909 | 73 | return -ENODEV; |
d8144955 FC |
74 | } |
75 | ||
78faae37 | 76 | spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); |
5951146d AG |
77 | se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun]; |
78 | if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | |
79 | struct se_dev_entry *deve = se_cmd->se_deve; | |
80 | ||
81 | deve->total_cmds++; | |
82 | deve->total_bytes += se_cmd->data_length; | |
83 | ||
84 | if ((se_cmd->data_direction == DMA_TO_DEVICE) && | |
85 | (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { | |
86 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | |
87 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
6708bb27 | 88 | pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" |
5951146d AG |
89 | " Access for 0x%08x\n", |
90 | se_cmd->se_tfo->get_fabric_name(), | |
91 | unpacked_lun); | |
78faae37 | 92 | spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); |
5951146d | 93 | return -EACCES; |
c66ac9db | 94 | } |
5951146d AG |
95 | |
96 | if (se_cmd->data_direction == DMA_TO_DEVICE) | |
97 | deve->write_bytes += se_cmd->data_length; | |
98 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) | |
99 | deve->read_bytes += se_cmd->data_length; | |
100 | ||
c66ac9db NB |
101 | deve->deve_cmds++; |
102 | ||
5951146d AG |
103 | se_lun = deve->se_lun; |
104 | se_cmd->se_lun = deve->se_lun; | |
c66ac9db NB |
105 | se_cmd->pr_res_key = deve->pr_res_key; |
106 | se_cmd->orig_fe_lun = unpacked_lun; | |
e3d6f909 | 107 | se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; |
c66ac9db NB |
108 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; |
109 | } | |
78faae37 | 110 | spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); |
c66ac9db NB |
111 | |
112 | if (!se_lun) { | |
5951146d AG |
113 | /* |
114 | * Use the se_portal_group->tpg_virt_lun0 to allow for | |
115 | * REPORT_LUNS, et al to be returned when no active | |
116 | * MappedLUN=0 exists for this Initiator Port. | |
117 | */ | |
118 | if (unpacked_lun != 0) { | |
119 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | |
c66ac9db | 120 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
6708bb27 | 121 | pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" |
c66ac9db | 122 | " Access for 0x%08x\n", |
e3d6f909 | 123 | se_cmd->se_tfo->get_fabric_name(), |
c66ac9db | 124 | unpacked_lun); |
5951146d AG |
125 | return -ENODEV; |
126 | } | |
127 | /* | |
128 | * Force WRITE PROTECT for virtual LUN 0 | |
129 | */ | |
130 | if ((se_cmd->data_direction != DMA_FROM_DEVICE) && | |
131 | (se_cmd->data_direction != DMA_NONE)) { | |
132 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | |
133 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
e3d6f909 | 134 | return -EACCES; |
c66ac9db | 135 | } |
5951146d AG |
136 | |
137 | se_lun = &se_sess->se_tpg->tpg_virt_lun0; | |
138 | se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; | |
139 | se_cmd->orig_fe_lun = 0; | |
140 | se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; | |
141 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | |
c66ac9db NB |
142 | } |
143 | /* | |
144 | * Determine if the struct se_lun is online. | |
5951146d | 145 | * FIXME: Check for LUN_RESET + UNIT Attention |
c66ac9db | 146 | */ |
c66ac9db NB |
147 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { |
148 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | |
149 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
e3d6f909 | 150 | return -ENODEV; |
c66ac9db NB |
151 | } |
152 | ||
5951146d AG |
153 | /* Directly associate cmd with se_dev */ |
154 | se_cmd->se_dev = se_lun->lun_se_dev; | |
155 | ||
156 | /* TODO: get rid of this and use atomics for stats */ | |
157 | dev = se_lun->lun_se_dev; | |
78faae37 | 158 | spin_lock_irqsave(&dev->stats_lock, flags); |
c66ac9db NB |
159 | dev->num_cmds++; |
160 | if (se_cmd->data_direction == DMA_TO_DEVICE) | |
161 | dev->write_bytes += se_cmd->data_length; | |
162 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) | |
163 | dev->read_bytes += se_cmd->data_length; | |
78faae37 | 164 | spin_unlock_irqrestore(&dev->stats_lock, flags); |
c66ac9db NB |
165 | |
166 | /* | |
167 | * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used | |
168 | * for tracking state of struct se_cmds during LUN shutdown events. | |
169 | */ | |
170 | spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); | |
5951146d | 171 | list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list); |
a1d8b49a | 172 | atomic_set(&se_cmd->transport_lun_active, 1); |
c66ac9db NB |
173 | spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); |
174 | ||
175 | return 0; | |
176 | } | |
5951146d | 177 | EXPORT_SYMBOL(transport_lookup_cmd_lun); |
c66ac9db | 178 | |
5951146d | 179 | int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) |
c66ac9db | 180 | { |
c66ac9db NB |
181 | struct se_dev_entry *deve; |
182 | struct se_lun *se_lun = NULL; | |
e3d6f909 | 183 | struct se_session *se_sess = se_cmd->se_sess; |
c66ac9db | 184 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; |
5e1be919 | 185 | unsigned long flags; |
c66ac9db | 186 | |
d8144955 FC |
187 | if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { |
188 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | |
189 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
e3d6f909 | 190 | return -ENODEV; |
d8144955 FC |
191 | } |
192 | ||
5e1be919 | 193 | spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); |
5951146d AG |
194 | se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun]; |
195 | deve = se_cmd->se_deve; | |
196 | ||
c66ac9db | 197 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
5951146d AG |
198 | se_tmr->tmr_lun = deve->se_lun; |
199 | se_cmd->se_lun = deve->se_lun; | |
200 | se_lun = deve->se_lun; | |
c66ac9db NB |
201 | se_cmd->pr_res_key = deve->pr_res_key; |
202 | se_cmd->orig_fe_lun = unpacked_lun; | |
5951146d | 203 | se_cmd->se_orig_obj_ptr = se_cmd->se_dev; |
c66ac9db | 204 | } |
5e1be919 | 205 | spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); |
c66ac9db NB |
206 | |
207 | if (!se_lun) { | |
6708bb27 | 208 | pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" |
c66ac9db | 209 | " Access for 0x%08x\n", |
e3d6f909 | 210 | se_cmd->se_tfo->get_fabric_name(), |
c66ac9db NB |
211 | unpacked_lun); |
212 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
e3d6f909 | 213 | return -ENODEV; |
c66ac9db NB |
214 | } |
215 | /* | |
216 | * Determine if the struct se_lun is online. | |
5951146d | 217 | * FIXME: Check for LUN_RESET + UNIT Attention |
c66ac9db | 218 | */ |
c66ac9db NB |
219 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { |
220 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
e3d6f909 | 221 | return -ENODEV; |
c66ac9db NB |
222 | } |
223 | ||
5951146d AG |
224 | /* Directly associate cmd with se_dev */ |
225 | se_cmd->se_dev = se_lun->lun_se_dev; | |
226 | se_tmr->tmr_dev = se_lun->lun_se_dev; | |
227 | ||
5e1be919 | 228 | spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); |
5951146d | 229 | list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); |
5e1be919 | 230 | spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); |
c66ac9db NB |
231 | |
232 | return 0; | |
233 | } | |
5951146d | 234 | EXPORT_SYMBOL(transport_lookup_tmr_lun); |
c66ac9db NB |
235 | |
236 | /* | |
237 | * This function is called from core_scsi3_emulate_pro_register_and_move() | |
238 | * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count | |
239 | * when a matching rtpi is found. | |
240 | */ | |
241 | struct se_dev_entry *core_get_se_deve_from_rtpi( | |
242 | struct se_node_acl *nacl, | |
243 | u16 rtpi) | |
244 | { | |
245 | struct se_dev_entry *deve; | |
246 | struct se_lun *lun; | |
247 | struct se_port *port; | |
248 | struct se_portal_group *tpg = nacl->se_tpg; | |
249 | u32 i; | |
250 | ||
251 | spin_lock_irq(&nacl->device_list_lock); | |
252 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
253 | deve = &nacl->device_list[i]; | |
254 | ||
255 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | |
256 | continue; | |
257 | ||
258 | lun = deve->se_lun; | |
6708bb27 AG |
259 | if (!lun) { |
260 | pr_err("%s device entries device pointer is" | |
c66ac9db | 261 | " NULL, but Initiator has access.\n", |
e3d6f909 | 262 | tpg->se_tpg_tfo->get_fabric_name()); |
c66ac9db NB |
263 | continue; |
264 | } | |
265 | port = lun->lun_sep; | |
6708bb27 AG |
266 | if (!port) { |
267 | pr_err("%s device entries device pointer is" | |
c66ac9db | 268 | " NULL, but Initiator has access.\n", |
e3d6f909 | 269 | tpg->se_tpg_tfo->get_fabric_name()); |
c66ac9db NB |
270 | continue; |
271 | } | |
272 | if (port->sep_rtpi != rtpi) | |
273 | continue; | |
274 | ||
275 | atomic_inc(&deve->pr_ref_count); | |
276 | smp_mb__after_atomic_inc(); | |
277 | spin_unlock_irq(&nacl->device_list_lock); | |
278 | ||
279 | return deve; | |
280 | } | |
281 | spin_unlock_irq(&nacl->device_list_lock); | |
282 | ||
283 | return NULL; | |
284 | } | |
285 | ||
286 | int core_free_device_list_for_node( | |
287 | struct se_node_acl *nacl, | |
288 | struct se_portal_group *tpg) | |
289 | { | |
290 | struct se_dev_entry *deve; | |
291 | struct se_lun *lun; | |
292 | u32 i; | |
293 | ||
294 | if (!nacl->device_list) | |
295 | return 0; | |
296 | ||
297 | spin_lock_irq(&nacl->device_list_lock); | |
298 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
299 | deve = &nacl->device_list[i]; | |
300 | ||
301 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | |
302 | continue; | |
303 | ||
304 | if (!deve->se_lun) { | |
6708bb27 | 305 | pr_err("%s device entries device pointer is" |
c66ac9db | 306 | " NULL, but Initiator has access.\n", |
e3d6f909 | 307 | tpg->se_tpg_tfo->get_fabric_name()); |
c66ac9db NB |
308 | continue; |
309 | } | |
310 | lun = deve->se_lun; | |
311 | ||
312 | spin_unlock_irq(&nacl->device_list_lock); | |
313 | core_update_device_list_for_node(lun, NULL, deve->mapped_lun, | |
314 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); | |
315 | spin_lock_irq(&nacl->device_list_lock); | |
316 | } | |
317 | spin_unlock_irq(&nacl->device_list_lock); | |
318 | ||
319 | kfree(nacl->device_list); | |
320 | nacl->device_list = NULL; | |
321 | ||
322 | return 0; | |
323 | } | |
324 | ||
325 | void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) | |
326 | { | |
327 | struct se_dev_entry *deve; | |
328 | ||
329 | spin_lock_irq(&se_nacl->device_list_lock); | |
330 | deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; | |
331 | deve->deve_cmds--; | |
332 | spin_unlock_irq(&se_nacl->device_list_lock); | |
c66ac9db NB |
333 | } |
334 | ||
335 | void core_update_device_list_access( | |
336 | u32 mapped_lun, | |
337 | u32 lun_access, | |
338 | struct se_node_acl *nacl) | |
339 | { | |
340 | struct se_dev_entry *deve; | |
341 | ||
342 | spin_lock_irq(&nacl->device_list_lock); | |
343 | deve = &nacl->device_list[mapped_lun]; | |
344 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | |
345 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | |
346 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | |
347 | } else { | |
348 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | |
349 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | |
350 | } | |
351 | spin_unlock_irq(&nacl->device_list_lock); | |
c66ac9db NB |
352 | } |
353 | ||
354 | /* core_update_device_list_for_node(): | |
355 | * | |
356 | * | |
357 | */ | |
358 | int core_update_device_list_for_node( | |
359 | struct se_lun *lun, | |
360 | struct se_lun_acl *lun_acl, | |
361 | u32 mapped_lun, | |
362 | u32 lun_access, | |
363 | struct se_node_acl *nacl, | |
364 | struct se_portal_group *tpg, | |
365 | int enable) | |
366 | { | |
367 | struct se_port *port = lun->lun_sep; | |
368 | struct se_dev_entry *deve = &nacl->device_list[mapped_lun]; | |
369 | int trans = 0; | |
370 | /* | |
371 | * If the MappedLUN entry is being disabled, the entry in | |
372 | * port->sep_alua_list must be removed now before clearing the | |
373 | * struct se_dev_entry pointers below as logic in | |
374 | * core_alua_do_transition_tg_pt() depends on these being present. | |
375 | */ | |
6708bb27 | 376 | if (!enable) { |
c66ac9db NB |
377 | /* |
378 | * deve->se_lun_acl will be NULL for demo-mode created LUNs | |
25985edc | 379 | * that have not been explicitly concerted to MappedLUNs -> |
29fe609d NB |
380 | * struct se_lun_acl, but we remove deve->alua_port_list from |
381 | * port->sep_alua_list. This also means that active UAs and | |
382 | * NodeACL context specific PR metadata for demo-mode | |
383 | * MappedLUN *deve will be released below.. | |
c66ac9db | 384 | */ |
c66ac9db NB |
385 | spin_lock_bh(&port->sep_alua_lock); |
386 | list_del(&deve->alua_port_list); | |
387 | spin_unlock_bh(&port->sep_alua_lock); | |
388 | } | |
389 | ||
390 | spin_lock_irq(&nacl->device_list_lock); | |
391 | if (enable) { | |
392 | /* | |
393 | * Check if the call is handling demo mode -> explict LUN ACL | |
394 | * transition. This transition must be for the same struct se_lun | |
395 | * + mapped_lun that was setup in demo mode.. | |
396 | */ | |
397 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | |
398 | if (deve->se_lun_acl != NULL) { | |
6708bb27 | 399 | pr_err("struct se_dev_entry->se_lun_acl" |
c66ac9db NB |
400 | " already set for demo mode -> explict" |
401 | " LUN ACL transition\n"); | |
85dc98d9 | 402 | spin_unlock_irq(&nacl->device_list_lock); |
e3d6f909 | 403 | return -EINVAL; |
c66ac9db NB |
404 | } |
405 | if (deve->se_lun != lun) { | |
6708bb27 | 406 | pr_err("struct se_dev_entry->se_lun does" |
c66ac9db NB |
407 | " match passed struct se_lun for demo mode" |
408 | " -> explict LUN ACL transition\n"); | |
85dc98d9 | 409 | spin_unlock_irq(&nacl->device_list_lock); |
e3d6f909 | 410 | return -EINVAL; |
c66ac9db NB |
411 | } |
412 | deve->se_lun_acl = lun_acl; | |
413 | trans = 1; | |
414 | } else { | |
415 | deve->se_lun = lun; | |
416 | deve->se_lun_acl = lun_acl; | |
417 | deve->mapped_lun = mapped_lun; | |
418 | deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; | |
419 | } | |
420 | ||
421 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | |
422 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | |
423 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | |
424 | } else { | |
425 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | |
426 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | |
427 | } | |
428 | ||
429 | if (trans) { | |
430 | spin_unlock_irq(&nacl->device_list_lock); | |
431 | return 0; | |
432 | } | |
433 | deve->creation_time = get_jiffies_64(); | |
434 | deve->attach_count++; | |
435 | spin_unlock_irq(&nacl->device_list_lock); | |
436 | ||
437 | spin_lock_bh(&port->sep_alua_lock); | |
438 | list_add_tail(&deve->alua_port_list, &port->sep_alua_list); | |
439 | spin_unlock_bh(&port->sep_alua_lock); | |
440 | ||
441 | return 0; | |
442 | } | |
443 | /* | |
444 | * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE | |
445 | * PR operation to complete. | |
446 | */ | |
447 | spin_unlock_irq(&nacl->device_list_lock); | |
448 | while (atomic_read(&deve->pr_ref_count) != 0) | |
449 | cpu_relax(); | |
450 | spin_lock_irq(&nacl->device_list_lock); | |
451 | /* | |
452 | * Disable struct se_dev_entry LUN ACL mapping | |
453 | */ | |
454 | core_scsi3_ua_release_all(deve); | |
455 | deve->se_lun = NULL; | |
456 | deve->se_lun_acl = NULL; | |
457 | deve->lun_flags = 0; | |
458 | deve->creation_time = 0; | |
459 | deve->attach_count--; | |
460 | spin_unlock_irq(&nacl->device_list_lock); | |
461 | ||
462 | core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); | |
463 | return 0; | |
464 | } | |
465 | ||
466 | /* core_clear_lun_from_tpg(): | |
467 | * | |
468 | * | |
469 | */ | |
470 | void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) | |
471 | { | |
472 | struct se_node_acl *nacl; | |
473 | struct se_dev_entry *deve; | |
474 | u32 i; | |
475 | ||
28638887 | 476 | spin_lock_irq(&tpg->acl_node_lock); |
c66ac9db | 477 | list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { |
28638887 | 478 | spin_unlock_irq(&tpg->acl_node_lock); |
c66ac9db NB |
479 | |
480 | spin_lock_irq(&nacl->device_list_lock); | |
481 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
482 | deve = &nacl->device_list[i]; | |
483 | if (lun != deve->se_lun) | |
484 | continue; | |
485 | spin_unlock_irq(&nacl->device_list_lock); | |
486 | ||
487 | core_update_device_list_for_node(lun, NULL, | |
488 | deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, | |
489 | nacl, tpg, 0); | |
490 | ||
491 | spin_lock_irq(&nacl->device_list_lock); | |
492 | } | |
493 | spin_unlock_irq(&nacl->device_list_lock); | |
494 | ||
28638887 | 495 | spin_lock_irq(&tpg->acl_node_lock); |
c66ac9db | 496 | } |
28638887 | 497 | spin_unlock_irq(&tpg->acl_node_lock); |
c66ac9db NB |
498 | } |
499 | ||
500 | static struct se_port *core_alloc_port(struct se_device *dev) | |
501 | { | |
502 | struct se_port *port, *port_tmp; | |
503 | ||
504 | port = kzalloc(sizeof(struct se_port), GFP_KERNEL); | |
6708bb27 AG |
505 | if (!port) { |
506 | pr_err("Unable to allocate struct se_port\n"); | |
e3d6f909 | 507 | return ERR_PTR(-ENOMEM); |
c66ac9db NB |
508 | } |
509 | INIT_LIST_HEAD(&port->sep_alua_list); | |
510 | INIT_LIST_HEAD(&port->sep_list); | |
511 | atomic_set(&port->sep_tg_pt_secondary_offline, 0); | |
512 | spin_lock_init(&port->sep_alua_lock); | |
513 | mutex_init(&port->sep_tg_pt_md_mutex); | |
514 | ||
515 | spin_lock(&dev->se_port_lock); | |
516 | if (dev->dev_port_count == 0x0000ffff) { | |
6708bb27 | 517 | pr_warn("Reached dev->dev_port_count ==" |
c66ac9db NB |
518 | " 0x0000ffff\n"); |
519 | spin_unlock(&dev->se_port_lock); | |
e3d6f909 | 520 | return ERR_PTR(-ENOSPC); |
c66ac9db NB |
521 | } |
522 | again: | |
523 | /* | |
524 | * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device | |
525 | * Here is the table from spc4r17 section 7.7.3.8. | |
526 | * | |
527 | * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field | |
528 | * | |
529 | * Code Description | |
530 | * 0h Reserved | |
531 | * 1h Relative port 1, historically known as port A | |
532 | * 2h Relative port 2, historically known as port B | |
533 | * 3h to FFFFh Relative port 3 through 65 535 | |
534 | */ | |
535 | port->sep_rtpi = dev->dev_rpti_counter++; | |
6708bb27 | 536 | if (!port->sep_rtpi) |
c66ac9db NB |
537 | goto again; |
538 | ||
539 | list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { | |
540 | /* | |
541 | * Make sure RELATIVE TARGET PORT IDENTIFER is unique | |
542 | * for 16-bit wrap.. | |
543 | */ | |
544 | if (port->sep_rtpi == port_tmp->sep_rtpi) | |
545 | goto again; | |
546 | } | |
547 | spin_unlock(&dev->se_port_lock); | |
548 | ||
549 | return port; | |
550 | } | |
551 | ||
552 | static void core_export_port( | |
553 | struct se_device *dev, | |
554 | struct se_portal_group *tpg, | |
555 | struct se_port *port, | |
556 | struct se_lun *lun) | |
557 | { | |
e3d6f909 | 558 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
c66ac9db NB |
559 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; |
560 | ||
561 | spin_lock(&dev->se_port_lock); | |
562 | spin_lock(&lun->lun_sep_lock); | |
563 | port->sep_tpg = tpg; | |
564 | port->sep_lun = lun; | |
565 | lun->lun_sep = port; | |
566 | spin_unlock(&lun->lun_sep_lock); | |
567 | ||
568 | list_add_tail(&port->sep_list, &dev->dev_sep_list); | |
569 | spin_unlock(&dev->se_port_lock); | |
570 | ||
e3d6f909 | 571 | if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { |
c66ac9db NB |
572 | tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); |
573 | if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { | |
6708bb27 | 574 | pr_err("Unable to allocate t10_alua_tg_pt" |
c66ac9db NB |
575 | "_gp_member_t\n"); |
576 | return; | |
577 | } | |
578 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | |
579 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | |
e3d6f909 | 580 | su_dev->t10_alua.default_tg_pt_gp); |
c66ac9db | 581 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
6708bb27 | 582 | pr_debug("%s/%s: Adding to default ALUA Target Port" |
c66ac9db | 583 | " Group: alua/default_tg_pt_gp\n", |
e3d6f909 | 584 | dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); |
c66ac9db NB |
585 | } |
586 | ||
587 | dev->dev_port_count++; | |
588 | port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */ | |
589 | } | |
590 | ||
591 | /* | |
592 | * Called with struct se_device->se_port_lock spinlock held. | |
593 | */ | |
594 | static void core_release_port(struct se_device *dev, struct se_port *port) | |
5dd7ed2e | 595 | __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock) |
c66ac9db NB |
596 | { |
597 | /* | |
598 | * Wait for any port reference for PR ALL_TG_PT=1 operation | |
599 | * to complete in __core_scsi3_alloc_registration() | |
600 | */ | |
601 | spin_unlock(&dev->se_port_lock); | |
602 | if (atomic_read(&port->sep_tg_pt_ref_cnt)) | |
603 | cpu_relax(); | |
604 | spin_lock(&dev->se_port_lock); | |
605 | ||
606 | core_alua_free_tg_pt_gp_mem(port); | |
607 | ||
608 | list_del(&port->sep_list); | |
609 | dev->dev_port_count--; | |
610 | kfree(port); | |
c66ac9db NB |
611 | } |
612 | ||
613 | int core_dev_export( | |
614 | struct se_device *dev, | |
615 | struct se_portal_group *tpg, | |
616 | struct se_lun *lun) | |
617 | { | |
618 | struct se_port *port; | |
619 | ||
620 | port = core_alloc_port(dev); | |
e3d6f909 AG |
621 | if (IS_ERR(port)) |
622 | return PTR_ERR(port); | |
c66ac9db NB |
623 | |
624 | lun->lun_se_dev = dev; | |
625 | se_dev_start(dev); | |
626 | ||
627 | atomic_inc(&dev->dev_export_obj.obj_access_count); | |
628 | core_export_port(dev, tpg, port, lun); | |
629 | return 0; | |
630 | } | |
631 | ||
632 | void core_dev_unexport( | |
633 | struct se_device *dev, | |
634 | struct se_portal_group *tpg, | |
635 | struct se_lun *lun) | |
636 | { | |
637 | struct se_port *port = lun->lun_sep; | |
638 | ||
639 | spin_lock(&lun->lun_sep_lock); | |
640 | if (lun->lun_se_dev == NULL) { | |
641 | spin_unlock(&lun->lun_sep_lock); | |
642 | return; | |
643 | } | |
644 | spin_unlock(&lun->lun_sep_lock); | |
645 | ||
646 | spin_lock(&dev->se_port_lock); | |
647 | atomic_dec(&dev->dev_export_obj.obj_access_count); | |
648 | core_release_port(dev, port); | |
649 | spin_unlock(&dev->se_port_lock); | |
650 | ||
651 | se_dev_stop(dev); | |
652 | lun->lun_se_dev = NULL; | |
653 | } | |
654 | ||
e76a35d6 | 655 | int target_report_luns(struct se_task *se_task) |
c66ac9db | 656 | { |
e76a35d6 | 657 | struct se_cmd *se_cmd = se_task->task_se_cmd; |
c66ac9db NB |
658 | struct se_dev_entry *deve; |
659 | struct se_lun *se_lun; | |
e3d6f909 | 660 | struct se_session *se_sess = se_cmd->se_sess; |
05d1c7c0 | 661 | unsigned char *buf; |
1078da16 | 662 | u32 cdb_offset = 0, lun_count = 0, offset = 8, i; |
c66ac9db | 663 | |
05d1c7c0 AG |
664 | buf = transport_kmap_first_data_page(se_cmd); |
665 | ||
c66ac9db NB |
666 | /* |
667 | * If no struct se_session pointer is present, this struct se_cmd is | |
668 | * coming via a target_core_mod PASSTHROUGH op, and not through | |
669 | * a $FABRIC_MOD. In that case, report LUN=0 only. | |
670 | */ | |
6708bb27 | 671 | if (!se_sess) { |
1078da16 | 672 | int_to_scsilun(0, (struct scsi_lun *)&buf[offset]); |
c66ac9db NB |
673 | lun_count = 1; |
674 | goto done; | |
675 | } | |
676 | ||
e3d6f909 | 677 | spin_lock_irq(&se_sess->se_node_acl->device_list_lock); |
c66ac9db | 678 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
e3d6f909 | 679 | deve = &se_sess->se_node_acl->device_list[i]; |
c66ac9db NB |
680 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) |
681 | continue; | |
682 | se_lun = deve->se_lun; | |
683 | /* | |
684 | * We determine the correct LUN LIST LENGTH even once we | |
685 | * have reached the initial allocation length. | |
686 | * See SPC2-R20 7.19. | |
687 | */ | |
688 | lun_count++; | |
689 | if ((cdb_offset + 8) >= se_cmd->data_length) | |
690 | continue; | |
691 | ||
1078da16 NB |
692 | int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]); |
693 | offset += 8; | |
c66ac9db NB |
694 | cdb_offset += 8; |
695 | } | |
e3d6f909 | 696 | spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); |
c66ac9db NB |
697 | |
698 | /* | |
699 | * See SPC3 r07, page 159. | |
700 | */ | |
701 | done: | |
05d1c7c0 | 702 | transport_kunmap_first_data_page(se_cmd); |
c66ac9db NB |
703 | lun_count *= 8; |
704 | buf[0] = ((lun_count >> 24) & 0xff); | |
705 | buf[1] = ((lun_count >> 16) & 0xff); | |
706 | buf[2] = ((lun_count >> 8) & 0xff); | |
707 | buf[3] = (lun_count & 0xff); | |
708 | ||
d29a5b6a CH |
709 | se_task->task_scsi_status = GOOD; |
710 | transport_complete_task(se_task, 1); | |
c66ac9db NB |
711 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; |
712 | } | |
713 | ||
714 | /* se_release_device_for_hba(): | |
715 | * | |
716 | * | |
717 | */ | |
718 | void se_release_device_for_hba(struct se_device *dev) | |
719 | { | |
720 | struct se_hba *hba = dev->se_hba; | |
721 | ||
722 | if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || | |
723 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) || | |
724 | (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) || | |
725 | (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) || | |
726 | (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED)) | |
727 | se_dev_stop(dev); | |
728 | ||
729 | if (dev->dev_ptr) { | |
730 | kthread_stop(dev->process_thread); | |
731 | if (dev->transport->free_device) | |
732 | dev->transport->free_device(dev->dev_ptr); | |
733 | } | |
734 | ||
735 | spin_lock(&hba->device_lock); | |
736 | list_del(&dev->dev_list); | |
737 | hba->dev_count--; | |
738 | spin_unlock(&hba->device_lock); | |
739 | ||
740 | core_scsi3_free_all_registrations(dev); | |
741 | se_release_vpd_for_dev(dev); | |
742 | ||
c66ac9db | 743 | kfree(dev); |
c66ac9db NB |
744 | } |
745 | ||
746 | void se_release_vpd_for_dev(struct se_device *dev) | |
747 | { | |
748 | struct t10_vpd *vpd, *vpd_tmp; | |
749 | ||
e3d6f909 | 750 | spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); |
c66ac9db | 751 | list_for_each_entry_safe(vpd, vpd_tmp, |
e3d6f909 | 752 | &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) { |
c66ac9db NB |
753 | list_del(&vpd->vpd_list); |
754 | kfree(vpd); | |
755 | } | |
e3d6f909 | 756 | spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); |
c66ac9db NB |
757 | } |
758 | ||
c66ac9db NB |
759 | /* se_free_virtual_device(): |
760 | * | |
761 | * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. | |
762 | */ | |
763 | int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) | |
764 | { | |
05aea6e7 FC |
765 | if (!list_empty(&dev->dev_sep_list)) |
766 | dump_stack(); | |
c66ac9db NB |
767 | |
768 | core_alua_free_lu_gp_mem(dev); | |
769 | se_release_device_for_hba(dev); | |
770 | ||
771 | return 0; | |
772 | } | |
773 | ||
774 | static void se_dev_start(struct se_device *dev) | |
775 | { | |
776 | struct se_hba *hba = dev->se_hba; | |
777 | ||
778 | spin_lock(&hba->device_lock); | |
779 | atomic_inc(&dev->dev_obj.obj_access_count); | |
780 | if (atomic_read(&dev->dev_obj.obj_access_count) == 1) { | |
781 | if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) { | |
782 | dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED; | |
783 | dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED; | |
784 | } else if (dev->dev_status & | |
785 | TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) { | |
786 | dev->dev_status &= | |
787 | ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; | |
788 | dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED; | |
789 | } | |
790 | } | |
791 | spin_unlock(&hba->device_lock); | |
792 | } | |
793 | ||
794 | static void se_dev_stop(struct se_device *dev) | |
795 | { | |
796 | struct se_hba *hba = dev->se_hba; | |
797 | ||
798 | spin_lock(&hba->device_lock); | |
799 | atomic_dec(&dev->dev_obj.obj_access_count); | |
800 | if (atomic_read(&dev->dev_obj.obj_access_count) == 0) { | |
801 | if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) { | |
802 | dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED; | |
803 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | |
804 | } else if (dev->dev_status & | |
805 | TRANSPORT_DEVICE_OFFLINE_ACTIVATED) { | |
806 | dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED; | |
807 | dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; | |
808 | } | |
809 | } | |
810 | spin_unlock(&hba->device_lock); | |
c66ac9db NB |
811 | } |
812 | ||
813 | int se_dev_check_online(struct se_device *dev) | |
814 | { | |
56e34ee2 | 815 | unsigned long flags; |
c66ac9db NB |
816 | int ret; |
817 | ||
56e34ee2 | 818 | spin_lock_irqsave(&dev->dev_status_lock, flags); |
c66ac9db NB |
819 | ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || |
820 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1; | |
56e34ee2 | 821 | spin_unlock_irqrestore(&dev->dev_status_lock, flags); |
c66ac9db NB |
822 | |
823 | return ret; | |
824 | } | |
825 | ||
826 | int se_dev_check_shutdown(struct se_device *dev) | |
827 | { | |
828 | int ret; | |
829 | ||
830 | spin_lock_irq(&dev->dev_status_lock); | |
831 | ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN); | |
832 | spin_unlock_irq(&dev->dev_status_lock); | |
833 | ||
834 | return ret; | |
835 | } | |
836 | ||
525a48a2 NB |
837 | u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) |
838 | { | |
839 | u32 tmp, aligned_max_sectors; | |
840 | /* | |
841 | * Limit max_sectors to a PAGE_SIZE aligned value for modern | |
842 | * transport_allocate_data_tasks() operation. | |
843 | */ | |
844 | tmp = rounddown((max_sectors * block_size), PAGE_SIZE); | |
845 | aligned_max_sectors = (tmp / block_size); | |
846 | if (max_sectors != aligned_max_sectors) { | |
847 | printk(KERN_INFO "Rounding down aligned max_sectors from %u" | |
848 | " to %u\n", max_sectors, aligned_max_sectors); | |
849 | return aligned_max_sectors; | |
850 | } | |
851 | ||
852 | return max_sectors; | |
853 | } | |
854 | ||
c66ac9db NB |
855 | void se_dev_set_default_attribs( |
856 | struct se_device *dev, | |
857 | struct se_dev_limits *dev_limits) | |
858 | { | |
859 | struct queue_limits *limits = &dev_limits->limits; | |
860 | ||
e3d6f909 AG |
861 | dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO; |
862 | dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; | |
863 | dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; | |
864 | dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; | |
865 | dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; | |
866 | dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS; | |
867 | dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU; | |
868 | dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS; | |
869 | dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS; | |
870 | dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA; | |
871 | dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; | |
e22a7f07 | 872 | dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT; |
5de619a3 | 873 | dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; |
c66ac9db NB |
874 | /* |
875 | * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK | |
876 | * iblock_create_virtdevice() from struct queue_limits values | |
877 | * if blk_queue_discard()==1 | |
878 | */ | |
e3d6f909 AG |
879 | dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; |
880 | dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = | |
881 | DA_MAX_UNMAP_BLOCK_DESC_COUNT; | |
882 | dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; | |
883 | dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = | |
c66ac9db NB |
884 | DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; |
885 | /* | |
886 | * block_size is based on subsystem plugin dependent requirements. | |
887 | */ | |
e3d6f909 AG |
888 | dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size; |
889 | dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size; | |
c66ac9db NB |
890 | /* |
891 | * max_sectors is based on subsystem plugin dependent requirements. | |
892 | */ | |
e3d6f909 | 893 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; |
525a48a2 NB |
894 | /* |
895 | * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() | |
896 | */ | |
897 | limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors, | |
898 | limits->logical_block_size); | |
e3d6f909 | 899 | dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; |
c66ac9db NB |
900 | /* |
901 | * Set optimal_sectors from max_sectors, which can be lowered via | |
902 | * configfs. | |
903 | */ | |
e3d6f909 | 904 | dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors; |
c66ac9db NB |
905 | /* |
906 | * queue_depth is based on subsystem plugin dependent requirements. | |
907 | */ | |
e3d6f909 AG |
908 | dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth; |
909 | dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth; | |
c66ac9db NB |
910 | } |
911 | ||
c66ac9db NB |
912 | int se_dev_set_max_unmap_lba_count( |
913 | struct se_device *dev, | |
914 | u32 max_unmap_lba_count) | |
915 | { | |
e3d6f909 | 916 | dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count; |
6708bb27 | 917 | pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n", |
e3d6f909 | 918 | dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count); |
c66ac9db NB |
919 | return 0; |
920 | } | |
921 | ||
922 | int se_dev_set_max_unmap_block_desc_count( | |
923 | struct se_device *dev, | |
924 | u32 max_unmap_block_desc_count) | |
925 | { | |
e3d6f909 AG |
926 | dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = |
927 | max_unmap_block_desc_count; | |
6708bb27 | 928 | pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n", |
e3d6f909 | 929 | dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count); |
c66ac9db NB |
930 | return 0; |
931 | } | |
932 | ||
933 | int se_dev_set_unmap_granularity( | |
934 | struct se_device *dev, | |
935 | u32 unmap_granularity) | |
936 | { | |
e3d6f909 | 937 | dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity; |
6708bb27 | 938 | pr_debug("dev[%p]: Set unmap_granularity: %u\n", |
e3d6f909 | 939 | dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity); |
c66ac9db NB |
940 | return 0; |
941 | } | |
942 | ||
943 | int se_dev_set_unmap_granularity_alignment( | |
944 | struct se_device *dev, | |
945 | u32 unmap_granularity_alignment) | |
946 | { | |
e3d6f909 | 947 | dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; |
6708bb27 | 948 | pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n", |
e3d6f909 | 949 | dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment); |
c66ac9db NB |
950 | return 0; |
951 | } | |
952 | ||
953 | int se_dev_set_emulate_dpo(struct se_device *dev, int flag) | |
954 | { | |
f55918fa | 955 | if (flag != 0 && flag != 1) { |
6708bb27 | 956 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 957 | return -EINVAL; |
c66ac9db | 958 | } |
f55918fa CH |
959 | |
960 | pr_err("dpo_emulated not supported\n"); | |
961 | return -EINVAL; | |
c66ac9db NB |
962 | } |
963 | ||
964 | int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) | |
965 | { | |
f55918fa | 966 | if (flag != 0 && flag != 1) { |
6708bb27 | 967 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 968 | return -EINVAL; |
c66ac9db | 969 | } |
f55918fa CH |
970 | |
971 | if (dev->transport->fua_write_emulated == 0) { | |
972 | pr_err("fua_write_emulated not supported\n"); | |
e3d6f909 | 973 | return -EINVAL; |
c66ac9db | 974 | } |
e3d6f909 | 975 | dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; |
6708bb27 | 976 | pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", |
e3d6f909 | 977 | dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write); |
c66ac9db NB |
978 | return 0; |
979 | } | |
980 | ||
981 | int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) | |
982 | { | |
f55918fa | 983 | if (flag != 0 && flag != 1) { |
6708bb27 | 984 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 985 | return -EINVAL; |
c66ac9db | 986 | } |
f55918fa CH |
987 | |
988 | pr_err("ua read emulated not supported\n"); | |
989 | return -EINVAL; | |
c66ac9db NB |
990 | } |
991 | ||
992 | int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | |
993 | { | |
f55918fa | 994 | if (flag != 0 && flag != 1) { |
6708bb27 | 995 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 996 | return -EINVAL; |
c66ac9db | 997 | } |
f55918fa CH |
998 | if (dev->transport->write_cache_emulated == 0) { |
999 | pr_err("write_cache_emulated not supported\n"); | |
e3d6f909 | 1000 | return -EINVAL; |
c66ac9db | 1001 | } |
e3d6f909 | 1002 | dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; |
6708bb27 | 1003 | pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", |
e3d6f909 | 1004 | dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache); |
c66ac9db NB |
1005 | return 0; |
1006 | } | |
1007 | ||
1008 | int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) | |
1009 | { | |
1010 | if ((flag != 0) && (flag != 1) && (flag != 2)) { | |
6708bb27 | 1011 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 1012 | return -EINVAL; |
c66ac9db NB |
1013 | } |
1014 | ||
1015 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
6708bb27 | 1016 | pr_err("dev[%p]: Unable to change SE Device" |
c66ac9db NB |
1017 | " UA_INTRLCK_CTRL while dev_export_obj: %d count" |
1018 | " exists\n", dev, | |
1019 | atomic_read(&dev->dev_export_obj.obj_access_count)); | |
e3d6f909 | 1020 | return -EINVAL; |
c66ac9db | 1021 | } |
e3d6f909 | 1022 | dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag; |
6708bb27 | 1023 | pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", |
e3d6f909 | 1024 | dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl); |
c66ac9db NB |
1025 | |
1026 | return 0; | |
1027 | } | |
1028 | ||
1029 | int se_dev_set_emulate_tas(struct se_device *dev, int flag) | |
1030 | { | |
1031 | if ((flag != 0) && (flag != 1)) { | |
6708bb27 | 1032 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 1033 | return -EINVAL; |
c66ac9db NB |
1034 | } |
1035 | ||
1036 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
6708bb27 | 1037 | pr_err("dev[%p]: Unable to change SE Device TAS while" |
c66ac9db NB |
1038 | " dev_export_obj: %d count exists\n", dev, |
1039 | atomic_read(&dev->dev_export_obj.obj_access_count)); | |
e3d6f909 | 1040 | return -EINVAL; |
c66ac9db | 1041 | } |
e3d6f909 | 1042 | dev->se_sub_dev->se_dev_attrib.emulate_tas = flag; |
6708bb27 | 1043 | pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", |
e3d6f909 | 1044 | dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); |
c66ac9db NB |
1045 | |
1046 | return 0; | |
1047 | } | |
1048 | ||
1049 | int se_dev_set_emulate_tpu(struct se_device *dev, int flag) | |
1050 | { | |
1051 | if ((flag != 0) && (flag != 1)) { | |
6708bb27 | 1052 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 1053 | return -EINVAL; |
c66ac9db NB |
1054 | } |
1055 | /* | |
1056 | * We expect this value to be non-zero when generic Block Layer | |
1057 | * Discard supported is detected iblock_create_virtdevice(). | |
1058 | */ | |
6708bb27 AG |
1059 | if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { |
1060 | pr_err("Generic Block Discard not supported\n"); | |
c66ac9db NB |
1061 | return -ENOSYS; |
1062 | } | |
1063 | ||
e3d6f909 | 1064 | dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag; |
6708bb27 | 1065 | pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", |
c66ac9db NB |
1066 | dev, flag); |
1067 | return 0; | |
1068 | } | |
1069 | ||
1070 | int se_dev_set_emulate_tpws(struct se_device *dev, int flag) | |
1071 | { | |
1072 | if ((flag != 0) && (flag != 1)) { | |
6708bb27 | 1073 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 1074 | return -EINVAL; |
c66ac9db NB |
1075 | } |
1076 | /* | |
1077 | * We expect this value to be non-zero when generic Block Layer | |
1078 | * Discard supported is detected iblock_create_virtdevice(). | |
1079 | */ | |
6708bb27 AG |
1080 | if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { |
1081 | pr_err("Generic Block Discard not supported\n"); | |
c66ac9db NB |
1082 | return -ENOSYS; |
1083 | } | |
1084 | ||
e3d6f909 | 1085 | dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag; |
6708bb27 | 1086 | pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", |
c66ac9db NB |
1087 | dev, flag); |
1088 | return 0; | |
1089 | } | |
1090 | ||
1091 | int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) | |
1092 | { | |
1093 | if ((flag != 0) && (flag != 1)) { | |
6708bb27 | 1094 | pr_err("Illegal value %d\n", flag); |
e3d6f909 | 1095 | return -EINVAL; |
c66ac9db | 1096 | } |
e3d6f909 | 1097 | dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag; |
6708bb27 | 1098 | pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, |
e3d6f909 | 1099 | (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); |
c66ac9db NB |
1100 | return 0; |
1101 | } | |
1102 | ||
e22a7f07 RD |
1103 | int se_dev_set_is_nonrot(struct se_device *dev, int flag) |
1104 | { | |
1105 | if ((flag != 0) && (flag != 1)) { | |
1106 | printk(KERN_ERR "Illegal value %d\n", flag); | |
1107 | return -EINVAL; | |
1108 | } | |
1109 | dev->se_sub_dev->se_dev_attrib.is_nonrot = flag; | |
5de619a3 | 1110 | pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n", |
e22a7f07 RD |
1111 | dev, flag); |
1112 | return 0; | |
1113 | } | |
1114 | ||
5de619a3 NB |
1115 | int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) |
1116 | { | |
1117 | if (flag != 0) { | |
1118 | printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted" | |
1119 | " reordering not implemented\n", dev); | |
1120 | return -ENOSYS; | |
1121 | } | |
1122 | dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag; | |
1123 | pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); | |
1124 | return 0; | |
1125 | } | |
1126 | ||
c66ac9db NB |
1127 | /* |
1128 | * Note, this can only be called on unexported SE Device Object. | |
1129 | */ | |
1130 | int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) | |
1131 | { | |
1132 | u32 orig_queue_depth = dev->queue_depth; | |
1133 | ||
1134 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
6708bb27 | 1135 | pr_err("dev[%p]: Unable to change SE Device TCQ while" |
c66ac9db NB |
1136 | " dev_export_obj: %d count exists\n", dev, |
1137 | atomic_read(&dev->dev_export_obj.obj_access_count)); | |
e3d6f909 | 1138 | return -EINVAL; |
c66ac9db | 1139 | } |
6708bb27 AG |
1140 | if (!queue_depth) { |
1141 | pr_err("dev[%p]: Illegal ZERO value for queue" | |
c66ac9db | 1142 | "_depth\n", dev); |
e3d6f909 | 1143 | return -EINVAL; |
c66ac9db NB |
1144 | } |
1145 | ||
e3d6f909 AG |
1146 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1147 | if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { | |
6708bb27 | 1148 | pr_err("dev[%p]: Passed queue_depth: %u" |
c66ac9db NB |
1149 | " exceeds TCM/SE_Device TCQ: %u\n", |
1150 | dev, queue_depth, | |
e3d6f909 AG |
1151 | dev->se_sub_dev->se_dev_attrib.hw_queue_depth); |
1152 | return -EINVAL; | |
c66ac9db NB |
1153 | } |
1154 | } else { | |
e3d6f909 AG |
1155 | if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) { |
1156 | if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { | |
6708bb27 | 1157 | pr_err("dev[%p]: Passed queue_depth:" |
c66ac9db NB |
1158 | " %u exceeds TCM/SE_Device MAX" |
1159 | " TCQ: %u\n", dev, queue_depth, | |
e3d6f909 AG |
1160 | dev->se_sub_dev->se_dev_attrib.hw_queue_depth); |
1161 | return -EINVAL; | |
c66ac9db NB |
1162 | } |
1163 | } | |
1164 | } | |
1165 | ||
e3d6f909 | 1166 | dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth; |
c66ac9db NB |
1167 | if (queue_depth > orig_queue_depth) |
1168 | atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); | |
1169 | else if (queue_depth < orig_queue_depth) | |
1170 | atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); | |
1171 | ||
6708bb27 | 1172 | pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", |
c66ac9db NB |
1173 | dev, queue_depth); |
1174 | return 0; | |
1175 | } | |
1176 | ||
1177 | int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) | |
1178 | { | |
1179 | int force = 0; /* Force setting for VDEVS */ | |
1180 | ||
1181 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
6708bb27 | 1182 | pr_err("dev[%p]: Unable to change SE Device" |
c66ac9db NB |
1183 | " max_sectors while dev_export_obj: %d count exists\n", |
1184 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); | |
e3d6f909 | 1185 | return -EINVAL; |
c66ac9db | 1186 | } |
6708bb27 AG |
1187 | if (!max_sectors) { |
1188 | pr_err("dev[%p]: Illegal ZERO value for" | |
c66ac9db | 1189 | " max_sectors\n", dev); |
e3d6f909 | 1190 | return -EINVAL; |
c66ac9db NB |
1191 | } |
1192 | if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { | |
6708bb27 | 1193 | pr_err("dev[%p]: Passed max_sectors: %u less than" |
c66ac9db NB |
1194 | " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, |
1195 | DA_STATUS_MAX_SECTORS_MIN); | |
e3d6f909 | 1196 | return -EINVAL; |
c66ac9db | 1197 | } |
e3d6f909 AG |
1198 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1199 | if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { | |
6708bb27 | 1200 | pr_err("dev[%p]: Passed max_sectors: %u" |
c66ac9db NB |
1201 | " greater than TCM/SE_Device max_sectors:" |
1202 | " %u\n", dev, max_sectors, | |
e3d6f909 AG |
1203 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors); |
1204 | return -EINVAL; | |
c66ac9db NB |
1205 | } |
1206 | } else { | |
6708bb27 | 1207 | if (!force && (max_sectors > |
e3d6f909 | 1208 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) { |
6708bb27 | 1209 | pr_err("dev[%p]: Passed max_sectors: %u" |
c66ac9db NB |
1210 | " greater than TCM/SE_Device max_sectors" |
1211 | ": %u, use force=1 to override.\n", dev, | |
e3d6f909 AG |
1212 | max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors); |
1213 | return -EINVAL; | |
c66ac9db NB |
1214 | } |
1215 | if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { | |
6708bb27 | 1216 | pr_err("dev[%p]: Passed max_sectors: %u" |
c66ac9db NB |
1217 | " greater than DA_STATUS_MAX_SECTORS_MAX:" |
1218 | " %u\n", dev, max_sectors, | |
1219 | DA_STATUS_MAX_SECTORS_MAX); | |
e3d6f909 | 1220 | return -EINVAL; |
c66ac9db NB |
1221 | } |
1222 | } | |
525a48a2 NB |
1223 | /* |
1224 | * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() | |
1225 | */ | |
1226 | max_sectors = se_dev_align_max_sectors(max_sectors, | |
1227 | dev->se_sub_dev->se_dev_attrib.block_size); | |
c66ac9db | 1228 | |
e3d6f909 | 1229 | dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; |
6708bb27 | 1230 | pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", |
c66ac9db NB |
1231 | dev, max_sectors); |
1232 | return 0; | |
1233 | } | |
1234 | ||
1235 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) | |
1236 | { | |
1237 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
6708bb27 | 1238 | pr_err("dev[%p]: Unable to change SE Device" |
c66ac9db NB |
1239 | " optimal_sectors while dev_export_obj: %d count exists\n", |
1240 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); | |
1241 | return -EINVAL; | |
1242 | } | |
e3d6f909 | 1243 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
6708bb27 | 1244 | pr_err("dev[%p]: Passed optimal_sectors cannot be" |
c66ac9db NB |
1245 | " changed for TCM/pSCSI\n", dev); |
1246 | return -EINVAL; | |
1247 | } | |
e3d6f909 | 1248 | if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { |
6708bb27 | 1249 | pr_err("dev[%p]: Passed optimal_sectors %u cannot be" |
c66ac9db | 1250 | " greater than max_sectors: %u\n", dev, |
e3d6f909 | 1251 | optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); |
c66ac9db NB |
1252 | return -EINVAL; |
1253 | } | |
1254 | ||
e3d6f909 | 1255 | dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors; |
6708bb27 | 1256 | pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", |
c66ac9db NB |
1257 | dev, optimal_sectors); |
1258 | return 0; | |
1259 | } | |
1260 | ||
1261 | int se_dev_set_block_size(struct se_device *dev, u32 block_size) | |
1262 | { | |
1263 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
6708bb27 | 1264 | pr_err("dev[%p]: Unable to change SE Device block_size" |
c66ac9db NB |
1265 | " while dev_export_obj: %d count exists\n", dev, |
1266 | atomic_read(&dev->dev_export_obj.obj_access_count)); | |
e3d6f909 | 1267 | return -EINVAL; |
c66ac9db NB |
1268 | } |
1269 | ||
1270 | if ((block_size != 512) && | |
1271 | (block_size != 1024) && | |
1272 | (block_size != 2048) && | |
1273 | (block_size != 4096)) { | |
6708bb27 | 1274 | pr_err("dev[%p]: Illegal value for block_device: %u" |
c66ac9db NB |
1275 | " for SE device, must be 512, 1024, 2048 or 4096\n", |
1276 | dev, block_size); | |
e3d6f909 | 1277 | return -EINVAL; |
c66ac9db NB |
1278 | } |
1279 | ||
e3d6f909 | 1280 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
6708bb27 | 1281 | pr_err("dev[%p]: Not allowed to change block_size for" |
c66ac9db NB |
1282 | " Physical Device, use for Linux/SCSI to change" |
1283 | " block_size for underlying hardware\n", dev); | |
e3d6f909 | 1284 | return -EINVAL; |
c66ac9db NB |
1285 | } |
1286 | ||
e3d6f909 | 1287 | dev->se_sub_dev->se_dev_attrib.block_size = block_size; |
6708bb27 | 1288 | pr_debug("dev[%p]: SE Device block_size changed to %u\n", |
c66ac9db NB |
1289 | dev, block_size); |
1290 | return 0; | |
1291 | } | |
1292 | ||
1293 | struct se_lun *core_dev_add_lun( | |
1294 | struct se_portal_group *tpg, | |
1295 | struct se_hba *hba, | |
1296 | struct se_device *dev, | |
1297 | u32 lun) | |
1298 | { | |
1299 | struct se_lun *lun_p; | |
1300 | u32 lun_access = 0; | |
1301 | ||
1302 | if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { | |
6708bb27 | 1303 | pr_err("Unable to export struct se_device while dev_access_obj: %d\n", |
c66ac9db NB |
1304 | atomic_read(&dev->dev_access_obj.obj_access_count)); |
1305 | return NULL; | |
1306 | } | |
1307 | ||
1308 | lun_p = core_tpg_pre_addlun(tpg, lun); | |
6708bb27 | 1309 | if ((IS_ERR(lun_p)) || !lun_p) |
c66ac9db NB |
1310 | return NULL; |
1311 | ||
1312 | if (dev->dev_flags & DF_READ_ONLY) | |
1313 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | |
1314 | else | |
1315 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; | |
1316 | ||
1317 | if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) | |
1318 | return NULL; | |
1319 | ||
6708bb27 | 1320 | pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" |
e3d6f909 AG |
1321 | " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), |
1322 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, | |
1323 | tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id); | |
c66ac9db NB |
1324 | /* |
1325 | * Update LUN maps for dynamically added initiators when | |
1326 | * generate_node_acl is enabled. | |
1327 | */ | |
e3d6f909 | 1328 | if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { |
c66ac9db | 1329 | struct se_node_acl *acl; |
28638887 | 1330 | spin_lock_irq(&tpg->acl_node_lock); |
c66ac9db | 1331 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { |
052605c6 NB |
1332 | if (acl->dynamic_node_acl && |
1333 | (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || | |
1334 | !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { | |
28638887 | 1335 | spin_unlock_irq(&tpg->acl_node_lock); |
c66ac9db | 1336 | core_tpg_add_node_to_devs(acl, tpg); |
28638887 | 1337 | spin_lock_irq(&tpg->acl_node_lock); |
c66ac9db NB |
1338 | } |
1339 | } | |
28638887 | 1340 | spin_unlock_irq(&tpg->acl_node_lock); |
c66ac9db NB |
1341 | } |
1342 | ||
1343 | return lun_p; | |
1344 | } | |
1345 | ||
1346 | /* core_dev_del_lun(): | |
1347 | * | |
1348 | * | |
1349 | */ | |
1350 | int core_dev_del_lun( | |
1351 | struct se_portal_group *tpg, | |
1352 | u32 unpacked_lun) | |
1353 | { | |
1354 | struct se_lun *lun; | |
1355 | int ret = 0; | |
1356 | ||
1357 | lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); | |
6708bb27 | 1358 | if (!lun) |
c66ac9db NB |
1359 | return ret; |
1360 | ||
1361 | core_tpg_post_dellun(tpg, lun); | |
1362 | ||
6708bb27 | 1363 | pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" |
e3d6f909 AG |
1364 | " device object\n", tpg->se_tpg_tfo->get_fabric_name(), |
1365 | tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, | |
1366 | tpg->se_tpg_tfo->get_fabric_name()); | |
c66ac9db NB |
1367 | |
1368 | return 0; | |
1369 | } | |
1370 | ||
1371 | struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) | |
1372 | { | |
1373 | struct se_lun *lun; | |
1374 | ||
1375 | spin_lock(&tpg->tpg_lun_lock); | |
1376 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | |
6708bb27 | 1377 | pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS" |
c66ac9db | 1378 | "_PER_TPG-1: %u for Target Portal Group: %hu\n", |
e3d6f909 | 1379 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
c66ac9db | 1380 | TRANSPORT_MAX_LUNS_PER_TPG-1, |
e3d6f909 | 1381 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
c66ac9db NB |
1382 | spin_unlock(&tpg->tpg_lun_lock); |
1383 | return NULL; | |
1384 | } | |
1385 | lun = &tpg->tpg_lun_list[unpacked_lun]; | |
1386 | ||
1387 | if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { | |
6708bb27 | 1388 | pr_err("%s Logical Unit Number: %u is not free on" |
c66ac9db | 1389 | " Target Portal Group: %hu, ignoring request.\n", |
e3d6f909 AG |
1390 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
1391 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | |
c66ac9db NB |
1392 | spin_unlock(&tpg->tpg_lun_lock); |
1393 | return NULL; | |
1394 | } | |
1395 | spin_unlock(&tpg->tpg_lun_lock); | |
1396 | ||
1397 | return lun; | |
1398 | } | |
1399 | ||
1400 | /* core_dev_get_lun(): | |
1401 | * | |
1402 | * | |
1403 | */ | |
1404 | static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun) | |
1405 | { | |
1406 | struct se_lun *lun; | |
1407 | ||
1408 | spin_lock(&tpg->tpg_lun_lock); | |
1409 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | |
6708bb27 | 1410 | pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" |
c66ac9db | 1411 | "_TPG-1: %u for Target Portal Group: %hu\n", |
e3d6f909 | 1412 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
c66ac9db | 1413 | TRANSPORT_MAX_LUNS_PER_TPG-1, |
e3d6f909 | 1414 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
c66ac9db NB |
1415 | spin_unlock(&tpg->tpg_lun_lock); |
1416 | return NULL; | |
1417 | } | |
1418 | lun = &tpg->tpg_lun_list[unpacked_lun]; | |
1419 | ||
1420 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { | |
6708bb27 | 1421 | pr_err("%s Logical Unit Number: %u is not active on" |
c66ac9db | 1422 | " Target Portal Group: %hu, ignoring request.\n", |
e3d6f909 AG |
1423 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
1424 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | |
c66ac9db NB |
1425 | spin_unlock(&tpg->tpg_lun_lock); |
1426 | return NULL; | |
1427 | } | |
1428 | spin_unlock(&tpg->tpg_lun_lock); | |
1429 | ||
1430 | return lun; | |
1431 | } | |
1432 | ||
1433 | struct se_lun_acl *core_dev_init_initiator_node_lun_acl( | |
1434 | struct se_portal_group *tpg, | |
1435 | u32 mapped_lun, | |
1436 | char *initiatorname, | |
1437 | int *ret) | |
1438 | { | |
1439 | struct se_lun_acl *lacl; | |
1440 | struct se_node_acl *nacl; | |
1441 | ||
60d645a4 | 1442 | if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { |
6708bb27 | 1443 | pr_err("%s InitiatorName exceeds maximum size.\n", |
e3d6f909 | 1444 | tpg->se_tpg_tfo->get_fabric_name()); |
c66ac9db NB |
1445 | *ret = -EOVERFLOW; |
1446 | return NULL; | |
1447 | } | |
1448 | nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); | |
6708bb27 | 1449 | if (!nacl) { |
c66ac9db NB |
1450 | *ret = -EINVAL; |
1451 | return NULL; | |
1452 | } | |
1453 | lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); | |
6708bb27 AG |
1454 | if (!lacl) { |
1455 | pr_err("Unable to allocate memory for struct se_lun_acl.\n"); | |
c66ac9db NB |
1456 | *ret = -ENOMEM; |
1457 | return NULL; | |
1458 | } | |
1459 | ||
1460 | INIT_LIST_HEAD(&lacl->lacl_list); | |
1461 | lacl->mapped_lun = mapped_lun; | |
1462 | lacl->se_lun_nacl = nacl; | |
1463 | snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | |
1464 | ||
1465 | return lacl; | |
1466 | } | |
1467 | ||
1468 | int core_dev_add_initiator_node_lun_acl( | |
1469 | struct se_portal_group *tpg, | |
1470 | struct se_lun_acl *lacl, | |
1471 | u32 unpacked_lun, | |
1472 | u32 lun_access) | |
1473 | { | |
1474 | struct se_lun *lun; | |
1475 | struct se_node_acl *nacl; | |
1476 | ||
1477 | lun = core_dev_get_lun(tpg, unpacked_lun); | |
6708bb27 AG |
1478 | if (!lun) { |
1479 | pr_err("%s Logical Unit Number: %u is not active on" | |
c66ac9db | 1480 | " Target Portal Group: %hu, ignoring request.\n", |
e3d6f909 AG |
1481 | tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, |
1482 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | |
c66ac9db NB |
1483 | return -EINVAL; |
1484 | } | |
1485 | ||
1486 | nacl = lacl->se_lun_nacl; | |
6708bb27 | 1487 | if (!nacl) |
c66ac9db NB |
1488 | return -EINVAL; |
1489 | ||
1490 | if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && | |
1491 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) | |
1492 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | |
1493 | ||
1494 | lacl->se_lun = lun; | |
1495 | ||
1496 | if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun, | |
1497 | lun_access, nacl, tpg, 1) < 0) | |
1498 | return -EINVAL; | |
1499 | ||
1500 | spin_lock(&lun->lun_acl_lock); | |
1501 | list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); | |
1502 | atomic_inc(&lun->lun_acl_count); | |
1503 | smp_mb__after_atomic_inc(); | |
1504 | spin_unlock(&lun->lun_acl_lock); | |
1505 | ||
6708bb27 | 1506 | pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " |
e3d6f909 AG |
1507 | " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
1508 | tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, | |
c66ac9db NB |
1509 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", |
1510 | lacl->initiatorname); | |
1511 | /* | |
1512 | * Check to see if there are any existing persistent reservation APTPL | |
1513 | * pre-registrations that need to be enabled for this LUN ACL.. | |
1514 | */ | |
1515 | core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); | |
1516 | return 0; | |
1517 | } | |
1518 | ||
1519 | /* core_dev_del_initiator_node_lun_acl(): | |
1520 | * | |
1521 | * | |
1522 | */ | |
1523 | int core_dev_del_initiator_node_lun_acl( | |
1524 | struct se_portal_group *tpg, | |
1525 | struct se_lun *lun, | |
1526 | struct se_lun_acl *lacl) | |
1527 | { | |
1528 | struct se_node_acl *nacl; | |
1529 | ||
1530 | nacl = lacl->se_lun_nacl; | |
6708bb27 | 1531 | if (!nacl) |
c66ac9db NB |
1532 | return -EINVAL; |
1533 | ||
1534 | spin_lock(&lun->lun_acl_lock); | |
1535 | list_del(&lacl->lacl_list); | |
1536 | atomic_dec(&lun->lun_acl_count); | |
1537 | smp_mb__after_atomic_dec(); | |
1538 | spin_unlock(&lun->lun_acl_lock); | |
1539 | ||
1540 | core_update_device_list_for_node(lun, NULL, lacl->mapped_lun, | |
1541 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); | |
1542 | ||
1543 | lacl->se_lun = NULL; | |
1544 | ||
6708bb27 | 1545 | pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for" |
c66ac9db | 1546 | " InitiatorNode: %s Mapped LUN: %u\n", |
e3d6f909 AG |
1547 | tpg->se_tpg_tfo->get_fabric_name(), |
1548 | tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, | |
c66ac9db NB |
1549 | lacl->initiatorname, lacl->mapped_lun); |
1550 | ||
1551 | return 0; | |
1552 | } | |
1553 | ||
1554 | void core_dev_free_initiator_node_lun_acl( | |
1555 | struct se_portal_group *tpg, | |
1556 | struct se_lun_acl *lacl) | |
1557 | { | |
6708bb27 | 1558 | pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" |
e3d6f909 AG |
1559 | " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), |
1560 | tpg->se_tpg_tfo->tpg_get_tag(tpg), | |
1561 | tpg->se_tpg_tfo->get_fabric_name(), | |
c66ac9db NB |
1562 | lacl->initiatorname, lacl->mapped_lun); |
1563 | ||
1564 | kfree(lacl); | |
1565 | } | |
1566 | ||
1567 | int core_dev_setup_virtual_lun0(void) | |
1568 | { | |
1569 | struct se_hba *hba; | |
1570 | struct se_device *dev; | |
1571 | struct se_subsystem_dev *se_dev = NULL; | |
1572 | struct se_subsystem_api *t; | |
1573 | char buf[16]; | |
1574 | int ret; | |
1575 | ||
6708bb27 | 1576 | hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); |
c66ac9db NB |
1577 | if (IS_ERR(hba)) |
1578 | return PTR_ERR(hba); | |
1579 | ||
e3d6f909 | 1580 | lun0_hba = hba; |
c66ac9db NB |
1581 | t = hba->transport; |
1582 | ||
1583 | se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); | |
6708bb27 AG |
1584 | if (!se_dev) { |
1585 | pr_err("Unable to allocate memory for" | |
c66ac9db NB |
1586 | " struct se_subsystem_dev\n"); |
1587 | ret = -ENOMEM; | |
1588 | goto out; | |
1589 | } | |
e3d6f909 | 1590 | INIT_LIST_HEAD(&se_dev->se_dev_node); |
c66ac9db NB |
1591 | INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); |
1592 | spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); | |
e3d6f909 AG |
1593 | INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); |
1594 | INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list); | |
1595 | spin_lock_init(&se_dev->t10_pr.registration_lock); | |
1596 | spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock); | |
c66ac9db NB |
1597 | INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); |
1598 | spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); | |
1599 | spin_lock_init(&se_dev->se_dev_lock); | |
e3d6f909 | 1600 | se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; |
c66ac9db NB |
1601 | se_dev->t10_wwn.t10_sub_dev = se_dev; |
1602 | se_dev->t10_alua.t10_sub_dev = se_dev; | |
1603 | se_dev->se_dev_attrib.da_sub_dev = se_dev; | |
1604 | se_dev->se_dev_hba = hba; | |
1605 | ||
1606 | se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); | |
6708bb27 AG |
1607 | if (!se_dev->se_dev_su_ptr) { |
1608 | pr_err("Unable to locate subsystem dependent pointer" | |
c66ac9db NB |
1609 | " from allocate_virtdevice()\n"); |
1610 | ret = -ENOMEM; | |
1611 | goto out; | |
1612 | } | |
e3d6f909 | 1613 | lun0_su_dev = se_dev; |
c66ac9db NB |
1614 | |
1615 | memset(buf, 0, 16); | |
1616 | sprintf(buf, "rd_pages=8"); | |
1617 | t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); | |
1618 | ||
1619 | dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); | |
e3d6f909 AG |
1620 | if (IS_ERR(dev)) { |
1621 | ret = PTR_ERR(dev); | |
c66ac9db NB |
1622 | goto out; |
1623 | } | |
1624 | se_dev->se_dev_ptr = dev; | |
e3d6f909 | 1625 | g_lun0_dev = dev; |
c66ac9db NB |
1626 | |
1627 | return 0; | |
1628 | out: | |
e3d6f909 | 1629 | lun0_su_dev = NULL; |
c66ac9db | 1630 | kfree(se_dev); |
e3d6f909 AG |
1631 | if (lun0_hba) { |
1632 | core_delete_hba(lun0_hba); | |
1633 | lun0_hba = NULL; | |
c66ac9db NB |
1634 | } |
1635 | return ret; | |
1636 | } | |
1637 | ||
1638 | ||
1639 | void core_dev_release_virtual_lun0(void) | |
1640 | { | |
e3d6f909 AG |
1641 | struct se_hba *hba = lun0_hba; |
1642 | struct se_subsystem_dev *su_dev = lun0_su_dev; | |
c66ac9db | 1643 | |
6708bb27 | 1644 | if (!hba) |
c66ac9db NB |
1645 | return; |
1646 | ||
e3d6f909 AG |
1647 | if (g_lun0_dev) |
1648 | se_free_virtual_device(g_lun0_dev, hba); | |
c66ac9db NB |
1649 | |
1650 | kfree(su_dev); | |
1651 | core_delete_hba(hba); | |
1652 | } |