target: replace ->get_cdb with a target_get_task_cdb helper
[deliverable/linux.git] / drivers / target / target_core_cdb.c
CommitLineData
c66ac9db
NB
1/*
2 * CDB emulation for non-READ/WRITE commands.
3 *
4 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
5 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
6 * Copyright (c) 2007-2010 Rising Tide Systems
7 * Copyright (c) 2008-2010 Linux-iSCSI.org
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 */
25
11650b85 26#include <linux/kernel.h>
c66ac9db
NB
27#include <asm/unaligned.h>
28#include <scsi/scsi.h>
29
30#include <target/target_core_base.h>
31#include <target/target_core_transport.h>
32#include <target/target_core_fabric_ops.h>
33#include "target_core_ua.h"
34
35static void
36target_fill_alua_data(struct se_port *port, unsigned char *buf)
37{
38 struct t10_alua_tg_pt_gp *tg_pt_gp;
39 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
40
41 /*
42 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
43 */
44 buf[5] = 0x80;
45
46 /*
47 * Set TPGS field for explict and/or implict ALUA access type
48 * and opteration.
49 *
50 * See spc4r17 section 6.4.2 Table 135
51 */
52 if (!port)
53 return;
54 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
55 if (!tg_pt_gp_mem)
56 return;
57
58 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
59 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
60 if (tg_pt_gp)
61 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
62 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
63}
64
65static int
66target_emulate_inquiry_std(struct se_cmd *cmd)
67{
e3d6f909 68 struct se_lun *lun = cmd->se_lun;
5951146d 69 struct se_device *dev = cmd->se_dev;
052605c6 70 struct se_portal_group *tpg = lun->lun_sep->sep_tpg;
05d1c7c0 71 unsigned char *buf;
c66ac9db
NB
72
73 /*
74 * Make sure we at least have 6 bytes of INQUIRY response
75 * payload going back for EVPD=0
76 */
77 if (cmd->data_length < 6) {
6708bb27 78 pr_err("SCSI Inquiry payload length: %u"
c66ac9db 79 " too small for EVPD=0\n", cmd->data_length);
e3d6f909 80 return -EINVAL;
c66ac9db
NB
81 }
82
05d1c7c0
AG
83 buf = transport_kmap_first_data_page(cmd);
84
052605c6
NB
85 if (dev == tpg->tpg_virt_lun0.lun_se_dev) {
86 buf[0] = 0x3f; /* Not connected */
87 } else {
88 buf[0] = dev->transport->get_device_type(dev);
89 if (buf[0] == TYPE_TAPE)
90 buf[1] = 0x80;
91 }
c66ac9db
NB
92 buf[2] = dev->transport->get_device_rev(dev);
93
94 /*
95 * Enable SCCS and TPGS fields for Emulated ALUA
96 */
e3d6f909 97 if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
c66ac9db
NB
98 target_fill_alua_data(lun->lun_sep, buf);
99
100 if (cmd->data_length < 8) {
101 buf[4] = 1; /* Set additional length to 1 */
05d1c7c0 102 goto out;
c66ac9db
NB
103 }
104
105 buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
106
107 /*
108 * Do not include vendor, product, reversion info in INQUIRY
109 * response payload for cdbs with a small allocation length.
110 */
111 if (cmd->data_length < 36) {
112 buf[4] = 3; /* Set additional length to 3 */
05d1c7c0 113 goto out;
c66ac9db
NB
114 }
115
116 snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
117 snprintf((unsigned char *)&buf[16], 16, "%s",
e3d6f909 118 &dev->se_sub_dev->t10_wwn.model[0]);
c66ac9db 119 snprintf((unsigned char *)&buf[32], 4, "%s",
e3d6f909 120 &dev->se_sub_dev->t10_wwn.revision[0]);
c66ac9db 121 buf[4] = 31; /* Set additional length to 31 */
05d1c7c0
AG
122
123out:
124 transport_kunmap_first_data_page(cmd);
c66ac9db
NB
125 return 0;
126}
127
c66ac9db
NB
128/* unit serial number */
129static int
130target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
131{
5951146d 132 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
133 u16 len = 0;
134
c66ac9db
NB
135 if (dev->se_sub_dev->su_dev_flags &
136 SDF_EMULATED_VPD_UNIT_SERIAL) {
137 u32 unit_serial_len;
138
139 unit_serial_len =
e3d6f909 140 strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
c66ac9db
NB
141 unit_serial_len++; /* For NULL Terminator */
142
143 if (((len + 4) + unit_serial_len) > cmd->data_length) {
144 len += unit_serial_len;
145 buf[2] = ((len >> 8) & 0xff);
146 buf[3] = (len & 0xff);
147 return 0;
148 }
149 len += sprintf((unsigned char *)&buf[4], "%s",
e3d6f909 150 &dev->se_sub_dev->t10_wwn.unit_serial[0]);
c66ac9db
NB
151 len++; /* Extra Byte for NULL Terminator */
152 buf[3] = len;
153 }
154 return 0;
155}
156
784eb99e 157static void
b6b4e61f 158target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf)
784eb99e
NB
159{
160 unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
b6b4e61f
AS
161 int cnt;
162 bool next = true;
163
784eb99e
NB
164 /*
165 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
166 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
167 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
168 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
169 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
170 * per device uniqeness.
171 */
b6b4e61f
AS
172 for (cnt = 0; *p && cnt < 13; p++) {
173 int val = hex_to_bin(*p);
174
175 if (val < 0)
784eb99e 176 continue;
b6b4e61f
AS
177
178 if (next) {
179 next = false;
180 buf[cnt++] |= val;
784eb99e 181 } else {
b6b4e61f
AS
182 next = true;
183 buf[cnt] = val << 4;
784eb99e
NB
184 }
185 }
186}
187
c66ac9db
NB
188/*
189 * Device identification VPD, for a complete list of
190 * DESIGNATOR TYPEs see spc4r17 Table 459.
191 */
192static int
193target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
194{
5951146d 195 struct se_device *dev = cmd->se_dev;
e3d6f909 196 struct se_lun *lun = cmd->se_lun;
c66ac9db
NB
197 struct se_port *port = NULL;
198 struct se_portal_group *tpg = NULL;
199 struct t10_alua_lu_gp_member *lu_gp_mem;
200 struct t10_alua_tg_pt_gp *tg_pt_gp;
201 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
e3d6f909 202 unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0];
c66ac9db
NB
203 u32 prod_len;
204 u32 unit_serial_len, off = 0;
c66ac9db
NB
205 u16 len = 0, id_len;
206
c66ac9db
NB
207 off = 4;
208
209 /*
210 * NAA IEEE Registered Extended Assigned designator format, see
211 * spc4r17 section 7.7.3.6.5
212 *
213 * We depend upon a target_core_mod/ConfigFS provided
214 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
215 * value in order to return the NAA id.
216 */
217 if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
218 goto check_t10_vend_desc;
219
220 if (off + 20 > cmd->data_length)
221 goto check_t10_vend_desc;
222
223 /* CODE SET == Binary */
224 buf[off++] = 0x1;
225
163cd5fa 226 /* Set ASSOCIATION == addressed logical unit: 0)b */
c66ac9db
NB
227 buf[off] = 0x00;
228
229 /* Identifier/Designator type == NAA identifier */
163cd5fa 230 buf[off++] |= 0x3;
c66ac9db
NB
231 off++;
232
233 /* Identifier/Designator length */
234 buf[off++] = 0x10;
235
236 /*
237 * Start NAA IEEE Registered Extended Identifier/Designator
238 */
239 buf[off++] = (0x6 << 4);
240
241 /*
242 * Use OpenFabrics IEEE Company ID: 00 14 05
243 */
244 buf[off++] = 0x01;
245 buf[off++] = 0x40;
246 buf[off] = (0x5 << 4);
247
248 /*
249 * Return ConfigFS Unit Serial Number information for
250 * VENDOR_SPECIFIC_IDENTIFIER and
251 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
252 */
784eb99e 253 target_parse_naa_6h_vendor_specific(dev, &buf[off]);
11650b85 254
c66ac9db
NB
255 len = 20;
256 off = (len + 4);
257
258check_t10_vend_desc:
259 /*
260 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
261 */
262 id_len = 8; /* For Vendor field */
263 prod_len = 4; /* For VPD Header */
264 prod_len += 8; /* For Vendor field */
265 prod_len += strlen(prod);
266 prod_len++; /* For : */
267
268 if (dev->se_sub_dev->su_dev_flags &
269 SDF_EMULATED_VPD_UNIT_SERIAL) {
270 unit_serial_len =
e3d6f909 271 strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
c66ac9db
NB
272 unit_serial_len++; /* For NULL Terminator */
273
274 if ((len + (id_len + 4) +
275 (prod_len + unit_serial_len)) >
276 cmd->data_length) {
277 len += (prod_len + unit_serial_len);
278 goto check_port;
279 }
280 id_len += sprintf((unsigned char *)&buf[off+12],
281 "%s:%s", prod,
e3d6f909 282 &dev->se_sub_dev->t10_wwn.unit_serial[0]);
c66ac9db
NB
283 }
284 buf[off] = 0x2; /* ASCII */
285 buf[off+1] = 0x1; /* T10 Vendor ID */
286 buf[off+2] = 0x0;
287 memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8);
288 /* Extra Byte for NULL Terminator */
289 id_len++;
290 /* Identifier Length */
291 buf[off+3] = id_len;
292 /* Header size for Designation descriptor */
293 len += (id_len + 4);
294 off += (id_len + 4);
295 /*
296 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
297 */
298check_port:
299 port = lun->lun_sep;
300 if (port) {
301 struct t10_alua_lu_gp *lu_gp;
302 u32 padding, scsi_name_len;
303 u16 lu_gp_id = 0;
304 u16 tg_pt_gp_id = 0;
305 u16 tpgt;
306
307 tpg = port->sep_tpg;
308 /*
309 * Relative target port identifer, see spc4r17
310 * section 7.7.3.7
311 *
312 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
313 * section 7.5.1 Table 362
314 */
315 if (((len + 4) + 8) > cmd->data_length) {
316 len += 8;
317 goto check_tpgi;
318 }
319 buf[off] =
e3d6f909 320 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
c66ac9db
NB
321 buf[off++] |= 0x1; /* CODE SET == Binary */
322 buf[off] = 0x80; /* Set PIV=1 */
163cd5fa 323 /* Set ASSOCIATION == target port: 01b */
c66ac9db
NB
324 buf[off] |= 0x10;
325 /* DESIGNATOR TYPE == Relative target port identifer */
326 buf[off++] |= 0x4;
327 off++; /* Skip over Reserved */
328 buf[off++] = 4; /* DESIGNATOR LENGTH */
329 /* Skip over Obsolete field in RTPI payload
330 * in Table 472 */
331 off += 2;
332 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
333 buf[off++] = (port->sep_rtpi & 0xff);
334 len += 8; /* Header size + Designation descriptor */
335 /*
336 * Target port group identifier, see spc4r17
337 * section 7.7.3.8
338 *
339 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
340 * section 7.5.1 Table 362
341 */
342check_tpgi:
e3d6f909 343 if (dev->se_sub_dev->t10_alua.alua_type !=
c66ac9db
NB
344 SPC3_ALUA_EMULATED)
345 goto check_scsi_name;
346
347 if (((len + 4) + 8) > cmd->data_length) {
348 len += 8;
349 goto check_lu_gp;
350 }
351 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
352 if (!tg_pt_gp_mem)
353 goto check_lu_gp;
354
355 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
356 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
6708bb27 357 if (!tg_pt_gp) {
c66ac9db
NB
358 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
359 goto check_lu_gp;
360 }
361 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
362 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
363
364 buf[off] =
e3d6f909 365 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
c66ac9db
NB
366 buf[off++] |= 0x1; /* CODE SET == Binary */
367 buf[off] = 0x80; /* Set PIV=1 */
163cd5fa 368 /* Set ASSOCIATION == target port: 01b */
c66ac9db
NB
369 buf[off] |= 0x10;
370 /* DESIGNATOR TYPE == Target port group identifier */
371 buf[off++] |= 0x5;
372 off++; /* Skip over Reserved */
373 buf[off++] = 4; /* DESIGNATOR LENGTH */
374 off += 2; /* Skip over Reserved Field */
375 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
376 buf[off++] = (tg_pt_gp_id & 0xff);
377 len += 8; /* Header size + Designation descriptor */
378 /*
379 * Logical Unit Group identifier, see spc4r17
380 * section 7.7.3.8
381 */
382check_lu_gp:
383 if (((len + 4) + 8) > cmd->data_length) {
384 len += 8;
385 goto check_scsi_name;
386 }
387 lu_gp_mem = dev->dev_alua_lu_gp_mem;
6708bb27 388 if (!lu_gp_mem)
c66ac9db
NB
389 goto check_scsi_name;
390
391 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
392 lu_gp = lu_gp_mem->lu_gp;
6708bb27 393 if (!lu_gp) {
c66ac9db
NB
394 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
395 goto check_scsi_name;
396 }
397 lu_gp_id = lu_gp->lu_gp_id;
398 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
399
400 buf[off++] |= 0x1; /* CODE SET == Binary */
401 /* DESIGNATOR TYPE == Logical Unit Group identifier */
402 buf[off++] |= 0x6;
403 off++; /* Skip over Reserved */
404 buf[off++] = 4; /* DESIGNATOR LENGTH */
405 off += 2; /* Skip over Reserved Field */
406 buf[off++] = ((lu_gp_id >> 8) & 0xff);
407 buf[off++] = (lu_gp_id & 0xff);
408 len += 8; /* Header size + Designation descriptor */
409 /*
410 * SCSI name string designator, see spc4r17
411 * section 7.7.3.11
412 *
413 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
414 * section 7.5.1 Table 362
415 */
416check_scsi_name:
e3d6f909 417 scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg));
c66ac9db
NB
418 /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
419 scsi_name_len += 10;
420 /* Check for 4-byte padding */
421 padding = ((-scsi_name_len) & 3);
422 if (padding != 0)
423 scsi_name_len += padding;
424 /* Header size + Designation descriptor */
425 scsi_name_len += 4;
426
427 if (((len + 4) + scsi_name_len) > cmd->data_length) {
428 len += scsi_name_len;
429 goto set_len;
430 }
431 buf[off] =
e3d6f909 432 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
c66ac9db
NB
433 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
434 buf[off] = 0x80; /* Set PIV=1 */
163cd5fa 435 /* Set ASSOCIATION == target port: 01b */
c66ac9db
NB
436 buf[off] |= 0x10;
437 /* DESIGNATOR TYPE == SCSI name string */
438 buf[off++] |= 0x8;
439 off += 2; /* Skip over Reserved and length */
440 /*
441 * SCSI name string identifer containing, $FABRIC_MOD
442 * dependent information. For LIO-Target and iSCSI
443 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
444 * UTF-8 encoding.
445 */
e3d6f909 446 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
c66ac9db 447 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
e3d6f909 448 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
c66ac9db
NB
449 scsi_name_len += 1 /* Include NULL terminator */;
450 /*
451 * The null-terminated, null-padded (see 4.4.2) SCSI
452 * NAME STRING field contains a UTF-8 format string.
453 * The number of bytes in the SCSI NAME STRING field
454 * (i.e., the value in the DESIGNATOR LENGTH field)
455 * shall be no larger than 256 and shall be a multiple
456 * of four.
457 */
458 if (padding)
459 scsi_name_len += padding;
460
461 buf[off-1] = scsi_name_len;
462 off += scsi_name_len;
463 /* Header size + Designation descriptor */
464 len += (scsi_name_len + 4);
465 }
466set_len:
467 buf[2] = ((len >> 8) & 0xff);
468 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
469 return 0;
470}
471
472/* Extended INQUIRY Data VPD Page */
473static int
474target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
475{
476 if (cmd->data_length < 60)
477 return 0;
478
c66ac9db
NB
479 buf[2] = 0x3c;
480 /* Set HEADSUP, ORDSUP, SIMPSUP */
481 buf[5] = 0x07;
482
483 /* If WriteCache emulation is enabled, set V_SUP */
5951146d 484 if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
c66ac9db
NB
485 buf[6] = 0x01;
486 return 0;
487}
488
489/* Block Limits VPD page */
490static int
491target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
492{
5951146d 493 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
494 int have_tp = 0;
495
496 /*
497 * Following sbc3r22 section 6.5.3 Block Limits VPD page, when
498 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
499 * different page length for Thin Provisioning.
500 */
e3d6f909 501 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
c66ac9db
NB
502 have_tp = 1;
503
504 if (cmd->data_length < (0x10 + 4)) {
6708bb27 505 pr_debug("Received data_length: %u"
c66ac9db
NB
506 " too small for EVPD 0xb0\n",
507 cmd->data_length);
e3d6f909 508 return -EINVAL;
c66ac9db
NB
509 }
510
511 if (have_tp && cmd->data_length < (0x3c + 4)) {
6708bb27 512 pr_debug("Received data_length: %u"
c66ac9db
NB
513 " too small for TPE=1 EVPD 0xb0\n",
514 cmd->data_length);
515 have_tp = 0;
516 }
517
518 buf[0] = dev->transport->get_device_type(dev);
c66ac9db
NB
519 buf[3] = have_tp ? 0x3c : 0x10;
520
6708bb27
AG
521 /* Set WSNZ to 1 */
522 buf[4] = 0x01;
523
c66ac9db
NB
524 /*
525 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
526 */
527 put_unaligned_be16(1, &buf[6]);
528
529 /*
530 * Set MAXIMUM TRANSFER LENGTH
531 */
e3d6f909 532 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]);
c66ac9db
NB
533
534 /*
535 * Set OPTIMAL TRANSFER LENGTH
536 */
e3d6f909 537 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]);
c66ac9db
NB
538
539 /*
540 * Exit now if we don't support TP or the initiator sent a too
541 * short buffer.
542 */
543 if (!have_tp || cmd->data_length < (0x3c + 4))
544 return 0;
545
546 /*
547 * Set MAXIMUM UNMAP LBA COUNT
548 */
e3d6f909 549 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]);
c66ac9db
NB
550
551 /*
552 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
553 */
e3d6f909 554 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count,
c66ac9db
NB
555 &buf[24]);
556
557 /*
558 * Set OPTIMAL UNMAP GRANULARITY
559 */
e3d6f909 560 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]);
c66ac9db
NB
561
562 /*
563 * UNMAP GRANULARITY ALIGNMENT
564 */
e3d6f909 565 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment,
c66ac9db 566 &buf[32]);
e3d6f909 567 if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0)
c66ac9db
NB
568 buf[32] |= 0x80; /* Set the UGAVALID bit */
569
570 return 0;
571}
572
e22a7f07
RD
573/* Block Device Characteristics VPD page */
574static int
575target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
576{
577 struct se_device *dev = cmd->se_dev;
578
579 buf[0] = dev->transport->get_device_type(dev);
580 buf[3] = 0x3c;
581
582 if (cmd->data_length >= 5 &&
583 dev->se_sub_dev->se_dev_attrib.is_nonrot)
584 buf[5] = 1;
585
586 return 0;
587}
588
c66ac9db
NB
589/* Thin Provisioning VPD */
590static int
591target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
592{
5951146d 593 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
594
595 /*
596 * From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
597 *
598 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
599 * zero, then the page length shall be set to 0004h. If the DP bit
600 * is set to one, then the page length shall be set to the value
601 * defined in table 162.
602 */
603 buf[0] = dev->transport->get_device_type(dev);
c66ac9db
NB
604
605 /*
606 * Set Hardcoded length mentioned above for DP=0
607 */
608 put_unaligned_be16(0x0004, &buf[2]);
609
610 /*
611 * The THRESHOLD EXPONENT field indicates the threshold set size in
612 * LBAs as a power of 2 (i.e., the threshold set size is equal to
613 * 2(threshold exponent)).
614 *
615 * Note that this is currently set to 0x00 as mkp says it will be
616 * changing again. We can enable this once it has settled in T10
617 * and is actually used by Linux/SCSI ML code.
618 */
619 buf[4] = 0x00;
620
621 /*
622 * A TPU bit set to one indicates that the device server supports
623 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
624 * that the device server does not support the UNMAP command.
625 */
e3d6f909 626 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0)
c66ac9db
NB
627 buf[5] = 0x80;
628
629 /*
630 * A TPWS bit set to one indicates that the device server supports
631 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
632 * A TPWS bit set to zero indicates that the device server does not
633 * support the use of the WRITE SAME (16) command to unmap LBAs.
634 */
e3d6f909 635 if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0)
c66ac9db
NB
636 buf[5] |= 0x40;
637
638 return 0;
639}
640
b2eb705e
RD
641static int
642target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
643
644static struct {
645 uint8_t page;
646 int (*emulate)(struct se_cmd *, unsigned char *);
647} evpd_handlers[] = {
648 { .page = 0x00, .emulate = target_emulate_evpd_00 },
649 { .page = 0x80, .emulate = target_emulate_evpd_80 },
650 { .page = 0x83, .emulate = target_emulate_evpd_83 },
651 { .page = 0x86, .emulate = target_emulate_evpd_86 },
652 { .page = 0xb0, .emulate = target_emulate_evpd_b0 },
e22a7f07 653 { .page = 0xb1, .emulate = target_emulate_evpd_b1 },
b2eb705e
RD
654 { .page = 0xb2, .emulate = target_emulate_evpd_b2 },
655};
656
657/* supported vital product data pages */
658static int
659target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
660{
661 int p;
662
663 if (cmd->data_length < 8)
664 return 0;
665 /*
666 * Only report the INQUIRY EVPD=1 pages after a valid NAA
667 * Registered Extended LUN WWN has been set via ConfigFS
668 * during device creation/restart.
669 */
670 if (cmd->se_dev->se_sub_dev->su_dev_flags &
671 SDF_EMULATED_VPD_UNIT_SERIAL) {
672 buf[3] = ARRAY_SIZE(evpd_handlers);
673 for (p = 0; p < min_t(int, ARRAY_SIZE(evpd_handlers),
674 cmd->data_length - 4); ++p)
675 buf[p + 4] = evpd_handlers[p].page;
676 }
677
678 return 0;
679}
680
c66ac9db
NB
681static int
682target_emulate_inquiry(struct se_cmd *cmd)
683{
5951146d 684 struct se_device *dev = cmd->se_dev;
05d1c7c0 685 unsigned char *buf;
a1d8b49a 686 unsigned char *cdb = cmd->t_task_cdb;
05d1c7c0 687 int p, ret;
c66ac9db
NB
688
689 if (!(cdb[1] & 0x1))
690 return target_emulate_inquiry_std(cmd);
691
692 /*
693 * Make sure we at least have 4 bytes of INQUIRY response
694 * payload for 0x00 going back for EVPD=1. Note that 0x80
695 * and 0x83 will check for enough payload data length and
696 * jump to set_len: label when there is not enough inquiry EVPD
697 * payload length left for the next outgoing EVPD metadata
698 */
699 if (cmd->data_length < 4) {
6708bb27 700 pr_err("SCSI Inquiry payload length: %u"
c66ac9db 701 " too small for EVPD=1\n", cmd->data_length);
e3d6f909 702 return -EINVAL;
c66ac9db 703 }
05d1c7c0
AG
704
705 buf = transport_kmap_first_data_page(cmd);
706
c66ac9db
NB
707 buf[0] = dev->transport->get_device_type(dev);
708
b2eb705e
RD
709 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
710 if (cdb[2] == evpd_handlers[p].page) {
711 buf[1] = cdb[2];
05d1c7c0
AG
712 ret = evpd_handlers[p].emulate(cmd, buf);
713 transport_kunmap_first_data_page(cmd);
714 return ret;
b2eb705e 715 }
c66ac9db 716
05d1c7c0 717 transport_kunmap_first_data_page(cmd);
6708bb27 718 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
b2eb705e 719 return -EINVAL;
c66ac9db
NB
720}
721
722static int
723target_emulate_readcapacity(struct se_cmd *cmd)
724{
5951146d 725 struct se_device *dev = cmd->se_dev;
05d1c7c0 726 unsigned char *buf;
904f0bc4
NB
727 unsigned long long blocks_long = dev->transport->get_blocks(dev);
728 u32 blocks;
729
730 if (blocks_long >= 0x00000000ffffffff)
731 blocks = 0xffffffff;
732 else
733 blocks = (u32)blocks_long;
c66ac9db 734
05d1c7c0
AG
735 buf = transport_kmap_first_data_page(cmd);
736
c66ac9db
NB
737 buf[0] = (blocks >> 24) & 0xff;
738 buf[1] = (blocks >> 16) & 0xff;
739 buf[2] = (blocks >> 8) & 0xff;
740 buf[3] = blocks & 0xff;
e3d6f909
AG
741 buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
742 buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
743 buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
744 buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
c66ac9db
NB
745 /*
746 * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16
747 */
e3d6f909 748 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
c66ac9db
NB
749 put_unaligned_be32(0xFFFFFFFF, &buf[0]);
750
05d1c7c0
AG
751 transport_kunmap_first_data_page(cmd);
752
c66ac9db
NB
753 return 0;
754}
755
756static int
757target_emulate_readcapacity_16(struct se_cmd *cmd)
758{
5951146d 759 struct se_device *dev = cmd->se_dev;
05d1c7c0 760 unsigned char *buf;
c66ac9db
NB
761 unsigned long long blocks = dev->transport->get_blocks(dev);
762
05d1c7c0
AG
763 buf = transport_kmap_first_data_page(cmd);
764
c66ac9db
NB
765 buf[0] = (blocks >> 56) & 0xff;
766 buf[1] = (blocks >> 48) & 0xff;
767 buf[2] = (blocks >> 40) & 0xff;
768 buf[3] = (blocks >> 32) & 0xff;
769 buf[4] = (blocks >> 24) & 0xff;
770 buf[5] = (blocks >> 16) & 0xff;
771 buf[6] = (blocks >> 8) & 0xff;
772 buf[7] = blocks & 0xff;
e3d6f909
AG
773 buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
774 buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
775 buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
776 buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
c66ac9db
NB
777 /*
778 * Set Thin Provisioning Enable bit following sbc3r22 in section
779 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
780 */
e3d6f909 781 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
c66ac9db
NB
782 buf[14] = 0x80;
783
05d1c7c0
AG
784 transport_kunmap_first_data_page(cmd);
785
c66ac9db
NB
786 return 0;
787}
788
789static int
790target_modesense_rwrecovery(unsigned char *p)
791{
792 p[0] = 0x01;
793 p[1] = 0x0a;
794
795 return 12;
796}
797
798static int
799target_modesense_control(struct se_device *dev, unsigned char *p)
800{
801 p[0] = 0x0a;
802 p[1] = 0x0a;
803 p[2] = 2;
5de619a3
NB
804 /*
805 * From spc4r23, 7.4.7 Control mode page
806 *
807 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
808 * restrictions on the algorithm used for reordering commands
809 * having the SIMPLE task attribute (see SAM-4).
810 *
811 * Table 368 -- QUEUE ALGORITHM MODIFIER field
812 * Code Description
813 * 0h Restricted reordering
814 * 1h Unrestricted reordering allowed
815 * 2h to 7h Reserved
816 * 8h to Fh Vendor specific
817 *
818 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
819 * the device server shall order the processing sequence of commands
820 * having the SIMPLE task attribute such that data integrity is maintained
821 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
822 * requests is halted at any time, the final value of all data observable
823 * on the medium shall be the same as if all the commands had been processed
824 * with the ORDERED task attribute).
825 *
826 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
827 * device server may reorder the processing sequence of commands having the
828 * SIMPLE task attribute in any manner. Any data integrity exposures related to
829 * command sequence order shall be explicitly handled by the application client
830 * through the selection of appropriate ommands and task attributes.
831 */
832 p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
c66ac9db
NB
833 /*
834 * From spc4r17, section 7.4.6 Control mode Page
835 *
836 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
837 *
838 * 00b: The logical unit shall clear any unit attention condition
839 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
840 * status and shall not establish a unit attention condition when a com-
841 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
842 * status.
843 *
844 * 10b: The logical unit shall not clear any unit attention condition
845 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
846 * status and shall not establish a unit attention condition when
847 * a command is completed with BUSY, TASK SET FULL, or RESERVATION
848 * CONFLICT status.
849 *
850 * 11b a The logical unit shall not clear any unit attention condition
851 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
852 * status and shall establish a unit attention condition for the
853 * initiator port associated with the I_T nexus on which the BUSY,
854 * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
855 * Depending on the status, the additional sense code shall be set to
856 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
857 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
858 * command, a unit attention condition shall be established only once
859 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
860 * to the number of commands completed with one of those status codes.
861 */
e3d6f909
AG
862 p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
863 (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
c66ac9db
NB
864 /*
865 * From spc4r17, section 7.4.6 Control mode Page
866 *
867 * Task Aborted Status (TAS) bit set to zero.
868 *
869 * A task aborted status (TAS) bit set to zero specifies that aborted
870 * tasks shall be terminated by the device server without any response
871 * to the application client. A TAS bit set to one specifies that tasks
872 * aborted by the actions of an I_T nexus other than the I_T nexus on
873 * which the command was received shall be completed with TASK ABORTED
874 * status (see SAM-4).
875 */
e3d6f909 876 p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00;
c66ac9db
NB
877 p[8] = 0xff;
878 p[9] = 0xff;
879 p[11] = 30;
880
881 return 12;
882}
883
884static int
885target_modesense_caching(struct se_device *dev, unsigned char *p)
886{
887 p[0] = 0x08;
888 p[1] = 0x12;
e3d6f909 889 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
c66ac9db
NB
890 p[2] = 0x04; /* Write Cache Enable */
891 p[12] = 0x20; /* Disabled Read Ahead */
892
893 return 20;
894}
895
896static void
897target_modesense_write_protect(unsigned char *buf, int type)
898{
899 /*
900 * I believe that the WP bit (bit 7) in the mode header is the same for
901 * all device types..
902 */
903 switch (type) {
904 case TYPE_DISK:
905 case TYPE_TAPE:
906 default:
907 buf[0] |= 0x80; /* WP bit */
908 break;
909 }
910}
911
912static void
913target_modesense_dpofua(unsigned char *buf, int type)
914{
915 switch (type) {
916 case TYPE_DISK:
917 buf[0] |= 0x10; /* DPOFUA bit */
918 break;
919 default:
920 break;
921 }
922}
923
924static int
925target_emulate_modesense(struct se_cmd *cmd, int ten)
926{
5951146d 927 struct se_device *dev = cmd->se_dev;
a1d8b49a 928 char *cdb = cmd->t_task_cdb;
05d1c7c0 929 unsigned char *rbuf;
c66ac9db
NB
930 int type = dev->transport->get_device_type(dev);
931 int offset = (ten) ? 8 : 4;
932 int length = 0;
933 unsigned char buf[SE_MODE_PAGE_BUF];
934
935 memset(buf, 0, SE_MODE_PAGE_BUF);
936
937 switch (cdb[2] & 0x3f) {
938 case 0x01:
939 length = target_modesense_rwrecovery(&buf[offset]);
940 break;
941 case 0x08:
942 length = target_modesense_caching(dev, &buf[offset]);
943 break;
944 case 0x0a:
945 length = target_modesense_control(dev, &buf[offset]);
946 break;
947 case 0x3f:
948 length = target_modesense_rwrecovery(&buf[offset]);
949 length += target_modesense_caching(dev, &buf[offset+length]);
950 length += target_modesense_control(dev, &buf[offset+length]);
951 break;
952 default:
f15ea578
RD
953 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
954 cdb[2] & 0x3f, cdb[3]);
c66ac9db
NB
955 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
956 }
957 offset += length;
958
959 if (ten) {
960 offset -= 2;
961 buf[0] = (offset >> 8) & 0xff;
962 buf[1] = offset & 0xff;
963
e3d6f909 964 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
c66ac9db
NB
965 (cmd->se_deve &&
966 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
967 target_modesense_write_protect(&buf[3], type);
968
e3d6f909
AG
969 if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
970 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
c66ac9db
NB
971 target_modesense_dpofua(&buf[3], type);
972
973 if ((offset + 2) > cmd->data_length)
974 offset = cmd->data_length;
975
976 } else {
977 offset -= 1;
978 buf[0] = offset & 0xff;
979
e3d6f909 980 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
c66ac9db
NB
981 (cmd->se_deve &&
982 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
983 target_modesense_write_protect(&buf[2], type);
984
e3d6f909
AG
985 if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
986 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
c66ac9db
NB
987 target_modesense_dpofua(&buf[2], type);
988
989 if ((offset + 1) > cmd->data_length)
990 offset = cmd->data_length;
991 }
05d1c7c0
AG
992
993 rbuf = transport_kmap_first_data_page(cmd);
c66ac9db 994 memcpy(rbuf, buf, offset);
05d1c7c0 995 transport_kunmap_first_data_page(cmd);
c66ac9db
NB
996
997 return 0;
998}
999
1000static int
1001target_emulate_request_sense(struct se_cmd *cmd)
1002{
a1d8b49a 1003 unsigned char *cdb = cmd->t_task_cdb;
05d1c7c0 1004 unsigned char *buf;
c66ac9db 1005 u8 ua_asc = 0, ua_ascq = 0;
05d1c7c0 1006 int err = 0;
c66ac9db
NB
1007
1008 if (cdb[1] & 0x01) {
6708bb27 1009 pr_err("REQUEST_SENSE description emulation not"
c66ac9db
NB
1010 " supported\n");
1011 return PYX_TRANSPORT_INVALID_CDB_FIELD;
1012 }
05d1c7c0
AG
1013
1014 buf = transport_kmap_first_data_page(cmd);
1015
6708bb27 1016 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
c66ac9db
NB
1017 /*
1018 * CURRENT ERROR, UNIT ATTENTION
1019 */
1020 buf[0] = 0x70;
1021 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
1022 /*
1023 * Make sure request data length is enough for additional
1024 * sense data.
1025 */
1026 if (cmd->data_length <= 18) {
1027 buf[7] = 0x00;
05d1c7c0
AG
1028 err = -EINVAL;
1029 goto end;
c66ac9db
NB
1030 }
1031 /*
1032 * The Additional Sense Code (ASC) from the UNIT ATTENTION
1033 */
1034 buf[SPC_ASC_KEY_OFFSET] = ua_asc;
1035 buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
1036 buf[7] = 0x0A;
1037 } else {
1038 /*
1039 * CURRENT ERROR, NO SENSE
1040 */
1041 buf[0] = 0x70;
1042 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
1043 /*
1044 * Make sure request data length is enough for additional
1045 * sense data.
1046 */
1047 if (cmd->data_length <= 18) {
1048 buf[7] = 0x00;
05d1c7c0
AG
1049 err = -EINVAL;
1050 goto end;
c66ac9db
NB
1051 }
1052 /*
1053 * NO ADDITIONAL SENSE INFORMATION
1054 */
1055 buf[SPC_ASC_KEY_OFFSET] = 0x00;
1056 buf[7] = 0x0A;
1057 }
1058
05d1c7c0
AG
1059end:
1060 transport_kunmap_first_data_page(cmd);
1061
c66ac9db
NB
1062 return 0;
1063}
1064
1065/*
1066 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
1067 * Note this is not used for TCM/pSCSI passthrough
1068 */
1069static int
1070target_emulate_unmap(struct se_task *task)
1071{
e3d6f909 1072 struct se_cmd *cmd = task->task_se_cmd;
5951146d 1073 struct se_device *dev = cmd->se_dev;
05d1c7c0 1074 unsigned char *buf, *ptr = NULL;
a1d8b49a 1075 unsigned char *cdb = &cmd->t_task_cdb[0];
c66ac9db
NB
1076 sector_t lba;
1077 unsigned int size = cmd->data_length, range;
05d1c7c0 1078 int ret = 0, offset;
c66ac9db
NB
1079 unsigned short dl, bd_dl;
1080
1081 /* First UNMAP block descriptor starts at 8 byte offset */
1082 offset = 8;
1083 size -= 8;
1084 dl = get_unaligned_be16(&cdb[0]);
1085 bd_dl = get_unaligned_be16(&cdb[2]);
05d1c7c0
AG
1086
1087 buf = transport_kmap_first_data_page(cmd);
1088
c66ac9db 1089 ptr = &buf[offset];
6708bb27 1090 pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
c66ac9db
NB
1091 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
1092
1093 while (size) {
1094 lba = get_unaligned_be64(&ptr[0]);
1095 range = get_unaligned_be32(&ptr[8]);
6708bb27 1096 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
c66ac9db
NB
1097 (unsigned long long)lba, range);
1098
1099 ret = dev->transport->do_discard(dev, lba, range);
1100 if (ret < 0) {
6708bb27 1101 pr_err("blkdev_issue_discard() failed: %d\n",
c66ac9db 1102 ret);
05d1c7c0 1103 goto err;
c66ac9db
NB
1104 }
1105
1106 ptr += 16;
1107 size -= 16;
1108 }
1109
05d1c7c0
AG
1110err:
1111 transport_kunmap_first_data_page(cmd);
1112
1113 return ret;
c66ac9db
NB
1114}
1115
1116/*
1117 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
1118 * Note this is not used for TCM/pSCSI passthrough
1119 */
1120static int
706d5860 1121target_emulate_write_same(struct se_task *task, u32 num_blocks)
c66ac9db 1122{
e3d6f909 1123 struct se_cmd *cmd = task->task_se_cmd;
5951146d 1124 struct se_device *dev = cmd->se_dev;
a1d8b49a
AG
1125 sector_t range;
1126 sector_t lba = cmd->t_task_lba;
c66ac9db 1127 int ret;
dd3a5ad8 1128 /*
706d5860
NB
1129 * Use the explicit range when non zero is supplied, otherwise calculate
1130 * the remaining range based on ->get_blocks() - starting LBA.
dd3a5ad8 1131 */
dd3a5ad8
NB
1132 if (num_blocks != 0)
1133 range = num_blocks;
1134 else
1135 range = (dev->transport->get_blocks(dev) - lba);
c66ac9db 1136
6708bb27 1137 pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
dd3a5ad8 1138 (unsigned long long)lba, (unsigned long long)range);
c66ac9db
NB
1139
1140 ret = dev->transport->do_discard(dev, lba, range);
1141 if (ret < 0) {
6708bb27 1142 pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
e3d6f909 1143 return ret;
c66ac9db
NB
1144 }
1145
c66ac9db
NB
1146 return 0;
1147}
1148
1149int
1150transport_emulate_control_cdb(struct se_task *task)
1151{
e3d6f909 1152 struct se_cmd *cmd = task->task_se_cmd;
5951146d 1153 struct se_device *dev = cmd->se_dev;
c66ac9db
NB
1154 unsigned short service_action;
1155 int ret = 0;
1156
a1d8b49a 1157 switch (cmd->t_task_cdb[0]) {
c66ac9db
NB
1158 case INQUIRY:
1159 ret = target_emulate_inquiry(cmd);
1160 break;
1161 case READ_CAPACITY:
1162 ret = target_emulate_readcapacity(cmd);
1163 break;
1164 case MODE_SENSE:
1165 ret = target_emulate_modesense(cmd, 0);
1166 break;
1167 case MODE_SENSE_10:
1168 ret = target_emulate_modesense(cmd, 1);
1169 break;
1170 case SERVICE_ACTION_IN:
a1d8b49a 1171 switch (cmd->t_task_cdb[1] & 0x1f) {
c66ac9db
NB
1172 case SAI_READ_CAPACITY_16:
1173 ret = target_emulate_readcapacity_16(cmd);
1174 break;
1175 default:
6708bb27 1176 pr_err("Unsupported SA: 0x%02x\n",
a1d8b49a 1177 cmd->t_task_cdb[1] & 0x1f);
c66ac9db
NB
1178 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1179 }
1180 break;
1181 case REQUEST_SENSE:
1182 ret = target_emulate_request_sense(cmd);
1183 break;
1184 case UNMAP:
1185 if (!dev->transport->do_discard) {
6708bb27 1186 pr_err("UNMAP emulation not supported for: %s\n",
c66ac9db
NB
1187 dev->transport->name);
1188 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1189 }
1190 ret = target_emulate_unmap(task);
1191 break;
706d5860
NB
1192 case WRITE_SAME:
1193 if (!dev->transport->do_discard) {
1194 pr_err("WRITE_SAME emulation not supported"
1195 " for: %s\n", dev->transport->name);
1196 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1197 }
1198 ret = target_emulate_write_same(task,
1199 get_unaligned_be16(&cmd->t_task_cdb[7]));
1200 break;
c66ac9db
NB
1201 case WRITE_SAME_16:
1202 if (!dev->transport->do_discard) {
6708bb27 1203 pr_err("WRITE_SAME_16 emulation not supported"
c66ac9db
NB
1204 " for: %s\n", dev->transport->name);
1205 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1206 }
706d5860
NB
1207 ret = target_emulate_write_same(task,
1208 get_unaligned_be32(&cmd->t_task_cdb[10]));
c66ac9db
NB
1209 break;
1210 case VARIABLE_LENGTH_CMD:
1211 service_action =
a1d8b49a 1212 get_unaligned_be16(&cmd->t_task_cdb[8]);
c66ac9db
NB
1213 switch (service_action) {
1214 case WRITE_SAME_32:
1215 if (!dev->transport->do_discard) {
6708bb27 1216 pr_err("WRITE_SAME_32 SA emulation not"
c66ac9db
NB
1217 " supported for: %s\n",
1218 dev->transport->name);
1219 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1220 }
706d5860
NB
1221 ret = target_emulate_write_same(task,
1222 get_unaligned_be32(&cmd->t_task_cdb[28]));
c66ac9db
NB
1223 break;
1224 default:
6708bb27 1225 pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
c66ac9db
NB
1226 " 0x%02x\n", service_action);
1227 break;
1228 }
1229 break;
1230 case SYNCHRONIZE_CACHE:
1231 case 0x91: /* SYNCHRONIZE_CACHE_16: */
1232 if (!dev->transport->do_sync_cache) {
6708bb27 1233 pr_err("SYNCHRONIZE_CACHE emulation not supported"
c66ac9db
NB
1234 " for: %s\n", dev->transport->name);
1235 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1236 }
1237 dev->transport->do_sync_cache(task);
1238 break;
1239 case ALLOW_MEDIUM_REMOVAL:
1240 case ERASE:
1241 case REZERO_UNIT:
1242 case SEEK_10:
1243 case SPACE:
1244 case START_STOP:
1245 case TEST_UNIT_READY:
1246 case VERIFY:
1247 case WRITE_FILEMARKS:
1248 break;
1249 default:
6708bb27 1250 pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n",
a1d8b49a 1251 cmd->t_task_cdb[0], dev->transport->name);
c66ac9db
NB
1252 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1253 }
1254
1255 if (ret < 0)
1256 return ret;
72f4ba1e
NB
1257 /*
1258 * Handle the successful completion here unless a caller
1259 * has explictly requested an asychronous completion.
1260 */
1261 if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
1262 task->task_scsi_status = GOOD;
1263 transport_complete_task(task, 1);
1264 }
c66ac9db
NB
1265
1266 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
1267}
485fd0d1
CH
1268
1269/*
1270 * Write a CDB into @cdb that is based on the one the intiator sent us,
1271 * but updated to only cover the sectors that the current task handles.
1272 */
1273void target_get_task_cdb(struct se_task *task, unsigned char *cdb)
1274{
1275 struct se_cmd *cmd = task->task_se_cmd;
1276
1277 memcpy(cdb, cmd->t_task_cdb, scsi_command_size(cmd->t_task_cdb));
1278 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
1279 cmd->transport_split_cdb(task->task_lba, task->task_sectors,
1280 cdb);
1281 }
1282}
1283EXPORT_SYMBOL(target_get_task_cdb);
This page took 0.14618 seconds and 5 git commands to generate.