net: cdc_ncm: split out rx_max/tx_max update of setup
[deliverable/linux.git] / drivers / target / target_core_spc.c
1 /*
2 * SCSI Primary Commands (SPC) parsing and emulation.
3 *
4 * (c) Copyright 2002-2013 Datera, Inc.
5 *
6 * Nicholas A. Bellinger <nab@kernel.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <asm/unaligned.h>
26
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_tcq.h>
29
30 #include <target/target_core_base.h>
31 #include <target/target_core_backend.h>
32 #include <target/target_core_fabric.h>
33
34 #include "target_core_internal.h"
35 #include "target_core_alua.h"
36 #include "target_core_pr.h"
37 #include "target_core_ua.h"
38 #include "target_core_xcopy.h"
39
40 static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
41 {
42 struct t10_alua_tg_pt_gp *tg_pt_gp;
43 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
44
45 /*
46 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
47 */
48 buf[5] = 0x80;
49
50 /*
51 * Set TPGS field for explicit and/or implicit ALUA access type
52 * and opteration.
53 *
54 * See spc4r17 section 6.4.2 Table 135
55 */
56 if (!port)
57 return;
58 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
59 if (!tg_pt_gp_mem)
60 return;
61
62 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
63 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
64 if (tg_pt_gp)
65 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
66 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
67 }
68
69 sense_reason_t
70 spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
71 {
72 struct se_lun *lun = cmd->se_lun;
73 struct se_device *dev = cmd->se_dev;
74 struct se_session *sess = cmd->se_sess;
75
76 /* Set RMB (removable media) for tape devices */
77 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
78 buf[1] = 0x80;
79
80 buf[2] = 0x05; /* SPC-3 */
81
82 /*
83 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
84 *
85 * SPC4 says:
86 * A RESPONSE DATA FORMAT field set to 2h indicates that the
87 * standard INQUIRY data is in the format defined in this
88 * standard. Response data format values less than 2h are
89 * obsolete. Response data format values greater than 2h are
90 * reserved.
91 */
92 buf[3] = 2;
93
94 /*
95 * Enable SCCS and TPGS fields for Emulated ALUA
96 */
97 spc_fill_alua_data(lun->lun_sep, buf);
98
99 /*
100 * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY
101 */
102 if (dev->dev_attrib.emulate_3pc)
103 buf[5] |= 0x8;
104 /*
105 * Set Protection (PROTECT) bit when DIF has been enabled on the
106 * device, and the transport supports VERIFY + PASS.
107 */
108 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
109 if (dev->dev_attrib.pi_prot_type)
110 buf[5] |= 0x1;
111 }
112
113 buf[7] = 0x2; /* CmdQue=1 */
114
115 memcpy(&buf[8], "LIO-ORG ", 8);
116 memset(&buf[16], 0x20, 16);
117 memcpy(&buf[16], dev->t10_wwn.model,
118 min_t(size_t, strlen(dev->t10_wwn.model), 16));
119 memcpy(&buf[32], dev->t10_wwn.revision,
120 min_t(size_t, strlen(dev->t10_wwn.revision), 4));
121 buf[4] = 31; /* Set additional length to 31 */
122
123 return 0;
124 }
125 EXPORT_SYMBOL(spc_emulate_inquiry_std);
126
127 /* unit serial number */
128 static sense_reason_t
129 spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
130 {
131 struct se_device *dev = cmd->se_dev;
132 u16 len = 0;
133
134 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
135 u32 unit_serial_len;
136
137 unit_serial_len = strlen(dev->t10_wwn.unit_serial);
138 unit_serial_len++; /* For NULL Terminator */
139
140 len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
141 len++; /* Extra Byte for NULL Terminator */
142 buf[3] = len;
143 }
144 return 0;
145 }
146
147 void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
148 unsigned char *buf)
149 {
150 unsigned char *p = &dev->t10_wwn.unit_serial[0];
151 int cnt;
152 bool next = true;
153
154 /*
155 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
156 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
157 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
158 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
159 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
160 * per device uniqeness.
161 */
162 for (cnt = 0; *p && cnt < 13; p++) {
163 int val = hex_to_bin(*p);
164
165 if (val < 0)
166 continue;
167
168 if (next) {
169 next = false;
170 buf[cnt++] |= val;
171 } else {
172 next = true;
173 buf[cnt] = val << 4;
174 }
175 }
176 }
177
178 /*
179 * Device identification VPD, for a complete list of
180 * DESIGNATOR TYPEs see spc4r17 Table 459.
181 */
182 sense_reason_t
183 spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
184 {
185 struct se_device *dev = cmd->se_dev;
186 struct se_lun *lun = cmd->se_lun;
187 struct se_port *port = NULL;
188 struct se_portal_group *tpg = NULL;
189 struct t10_alua_lu_gp_member *lu_gp_mem;
190 struct t10_alua_tg_pt_gp *tg_pt_gp;
191 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
192 unsigned char *prod = &dev->t10_wwn.model[0];
193 u32 prod_len;
194 u32 unit_serial_len, off = 0;
195 u16 len = 0, id_len;
196
197 off = 4;
198
199 /*
200 * NAA IEEE Registered Extended Assigned designator format, see
201 * spc4r17 section 7.7.3.6.5
202 *
203 * We depend upon a target_core_mod/ConfigFS provided
204 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
205 * value in order to return the NAA id.
206 */
207 if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL))
208 goto check_t10_vend_desc;
209
210 /* CODE SET == Binary */
211 buf[off++] = 0x1;
212
213 /* Set ASSOCIATION == addressed logical unit: 0)b */
214 buf[off] = 0x00;
215
216 /* Identifier/Designator type == NAA identifier */
217 buf[off++] |= 0x3;
218 off++;
219
220 /* Identifier/Designator length */
221 buf[off++] = 0x10;
222
223 /*
224 * Start NAA IEEE Registered Extended Identifier/Designator
225 */
226 buf[off++] = (0x6 << 4);
227
228 /*
229 * Use OpenFabrics IEEE Company ID: 00 14 05
230 */
231 buf[off++] = 0x01;
232 buf[off++] = 0x40;
233 buf[off] = (0x5 << 4);
234
235 /*
236 * Return ConfigFS Unit Serial Number information for
237 * VENDOR_SPECIFIC_IDENTIFIER and
238 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
239 */
240 spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
241
242 len = 20;
243 off = (len + 4);
244
245 check_t10_vend_desc:
246 /*
247 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
248 */
249 id_len = 8; /* For Vendor field */
250 prod_len = 4; /* For VPD Header */
251 prod_len += 8; /* For Vendor field */
252 prod_len += strlen(prod);
253 prod_len++; /* For : */
254
255 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
256 unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]);
257 unit_serial_len++; /* For NULL Terminator */
258
259 id_len += sprintf(&buf[off+12], "%s:%s", prod,
260 &dev->t10_wwn.unit_serial[0]);
261 }
262 buf[off] = 0x2; /* ASCII */
263 buf[off+1] = 0x1; /* T10 Vendor ID */
264 buf[off+2] = 0x0;
265 memcpy(&buf[off+4], "LIO-ORG", 8);
266 /* Extra Byte for NULL Terminator */
267 id_len++;
268 /* Identifier Length */
269 buf[off+3] = id_len;
270 /* Header size for Designation descriptor */
271 len += (id_len + 4);
272 off += (id_len + 4);
273 /*
274 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
275 */
276 port = lun->lun_sep;
277 if (port) {
278 struct t10_alua_lu_gp *lu_gp;
279 u32 padding, scsi_name_len, scsi_target_len;
280 u16 lu_gp_id = 0;
281 u16 tg_pt_gp_id = 0;
282 u16 tpgt;
283
284 tpg = port->sep_tpg;
285 /*
286 * Relative target port identifer, see spc4r17
287 * section 7.7.3.7
288 *
289 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
290 * section 7.5.1 Table 362
291 */
292 buf[off] =
293 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
294 buf[off++] |= 0x1; /* CODE SET == Binary */
295 buf[off] = 0x80; /* Set PIV=1 */
296 /* Set ASSOCIATION == target port: 01b */
297 buf[off] |= 0x10;
298 /* DESIGNATOR TYPE == Relative target port identifer */
299 buf[off++] |= 0x4;
300 off++; /* Skip over Reserved */
301 buf[off++] = 4; /* DESIGNATOR LENGTH */
302 /* Skip over Obsolete field in RTPI payload
303 * in Table 472 */
304 off += 2;
305 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
306 buf[off++] = (port->sep_rtpi & 0xff);
307 len += 8; /* Header size + Designation descriptor */
308 /*
309 * Target port group identifier, see spc4r17
310 * section 7.7.3.8
311 *
312 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
313 * section 7.5.1 Table 362
314 */
315 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
316 if (!tg_pt_gp_mem)
317 goto check_lu_gp;
318
319 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
320 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
321 if (!tg_pt_gp) {
322 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
323 goto check_lu_gp;
324 }
325 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
326 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
327
328 buf[off] =
329 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
330 buf[off++] |= 0x1; /* CODE SET == Binary */
331 buf[off] = 0x80; /* Set PIV=1 */
332 /* Set ASSOCIATION == target port: 01b */
333 buf[off] |= 0x10;
334 /* DESIGNATOR TYPE == Target port group identifier */
335 buf[off++] |= 0x5;
336 off++; /* Skip over Reserved */
337 buf[off++] = 4; /* DESIGNATOR LENGTH */
338 off += 2; /* Skip over Reserved Field */
339 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
340 buf[off++] = (tg_pt_gp_id & 0xff);
341 len += 8; /* Header size + Designation descriptor */
342 /*
343 * Logical Unit Group identifier, see spc4r17
344 * section 7.7.3.8
345 */
346 check_lu_gp:
347 lu_gp_mem = dev->dev_alua_lu_gp_mem;
348 if (!lu_gp_mem)
349 goto check_scsi_name;
350
351 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
352 lu_gp = lu_gp_mem->lu_gp;
353 if (!lu_gp) {
354 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
355 goto check_scsi_name;
356 }
357 lu_gp_id = lu_gp->lu_gp_id;
358 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
359
360 buf[off++] |= 0x1; /* CODE SET == Binary */
361 /* DESIGNATOR TYPE == Logical Unit Group identifier */
362 buf[off++] |= 0x6;
363 off++; /* Skip over Reserved */
364 buf[off++] = 4; /* DESIGNATOR LENGTH */
365 off += 2; /* Skip over Reserved Field */
366 buf[off++] = ((lu_gp_id >> 8) & 0xff);
367 buf[off++] = (lu_gp_id & 0xff);
368 len += 8; /* Header size + Designation descriptor */
369 /*
370 * SCSI name string designator, see spc4r17
371 * section 7.7.3.11
372 *
373 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
374 * section 7.5.1 Table 362
375 */
376 check_scsi_name:
377 buf[off] =
378 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
379 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
380 buf[off] = 0x80; /* Set PIV=1 */
381 /* Set ASSOCIATION == target port: 01b */
382 buf[off] |= 0x10;
383 /* DESIGNATOR TYPE == SCSI name string */
384 buf[off++] |= 0x8;
385 off += 2; /* Skip over Reserved and length */
386 /*
387 * SCSI name string identifer containing, $FABRIC_MOD
388 * dependent information. For LIO-Target and iSCSI
389 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
390 * UTF-8 encoding.
391 */
392 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
393 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
394 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
395 scsi_name_len += 1 /* Include NULL terminator */;
396 /*
397 * The null-terminated, null-padded (see 4.4.2) SCSI
398 * NAME STRING field contains a UTF-8 format string.
399 * The number of bytes in the SCSI NAME STRING field
400 * (i.e., the value in the DESIGNATOR LENGTH field)
401 * shall be no larger than 256 and shall be a multiple
402 * of four.
403 */
404 padding = ((-scsi_name_len) & 3);
405 if (padding)
406 scsi_name_len += padding;
407 if (scsi_name_len > 256)
408 scsi_name_len = 256;
409
410 buf[off-1] = scsi_name_len;
411 off += scsi_name_len;
412 /* Header size + Designation descriptor */
413 len += (scsi_name_len + 4);
414
415 /*
416 * Target device designator
417 */
418 buf[off] =
419 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
420 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
421 buf[off] = 0x80; /* Set PIV=1 */
422 /* Set ASSOCIATION == target device: 10b */
423 buf[off] |= 0x20;
424 /* DESIGNATOR TYPE == SCSI name string */
425 buf[off++] |= 0x8;
426 off += 2; /* Skip over Reserved and length */
427 /*
428 * SCSI name string identifer containing, $FABRIC_MOD
429 * dependent information. For LIO-Target and iSCSI
430 * Target Port, this means "<iSCSI name>" in
431 * UTF-8 encoding.
432 */
433 scsi_target_len = sprintf(&buf[off], "%s",
434 tpg->se_tpg_tfo->tpg_get_wwn(tpg));
435 scsi_target_len += 1 /* Include NULL terminator */;
436 /*
437 * The null-terminated, null-padded (see 4.4.2) SCSI
438 * NAME STRING field contains a UTF-8 format string.
439 * The number of bytes in the SCSI NAME STRING field
440 * (i.e., the value in the DESIGNATOR LENGTH field)
441 * shall be no larger than 256 and shall be a multiple
442 * of four.
443 */
444 padding = ((-scsi_target_len) & 3);
445 if (padding)
446 scsi_target_len += padding;
447 if (scsi_target_len > 256)
448 scsi_target_len = 256;
449
450 buf[off-1] = scsi_target_len;
451 off += scsi_target_len;
452
453 /* Header size + Designation descriptor */
454 len += (scsi_target_len + 4);
455 }
456 buf[2] = ((len >> 8) & 0xff);
457 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
458 return 0;
459 }
460 EXPORT_SYMBOL(spc_emulate_evpd_83);
461
462 static bool
463 spc_check_dev_wce(struct se_device *dev)
464 {
465 bool wce = false;
466
467 if (dev->transport->get_write_cache)
468 wce = dev->transport->get_write_cache(dev);
469 else if (dev->dev_attrib.emulate_write_cache > 0)
470 wce = true;
471
472 return wce;
473 }
474
475 /* Extended INQUIRY Data VPD Page */
476 static sense_reason_t
477 spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
478 {
479 struct se_device *dev = cmd->se_dev;
480 struct se_session *sess = cmd->se_sess;
481
482 buf[3] = 0x3c;
483 /*
484 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
485 * only for TYPE3 protection.
486 */
487 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
488 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
489 buf[4] = 0x5;
490 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
491 buf[4] = 0x4;
492 }
493
494 /* Set HEADSUP, ORDSUP, SIMPSUP */
495 buf[5] = 0x07;
496
497 /* If WriteCache emulation is enabled, set V_SUP */
498 if (spc_check_dev_wce(dev))
499 buf[6] = 0x01;
500 /* If an LBA map is present set R_SUP */
501 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
502 if (!list_empty(&dev->t10_alua.lba_map_list))
503 buf[8] = 0x10;
504 spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
505 return 0;
506 }
507
508 /* Block Limits VPD page */
509 static sense_reason_t
510 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
511 {
512 struct se_device *dev = cmd->se_dev;
513 u32 max_sectors;
514 int have_tp = 0;
515 int opt, min;
516
517 /*
518 * Following spc3r22 section 6.5.3 Block Limits VPD page, when
519 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
520 * different page length for Thin Provisioning.
521 */
522 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
523 have_tp = 1;
524
525 buf[0] = dev->transport->get_device_type(dev);
526 buf[3] = have_tp ? 0x3c : 0x10;
527
528 /* Set WSNZ to 1 */
529 buf[4] = 0x01;
530 /*
531 * Set MAXIMUM COMPARE AND WRITE LENGTH
532 */
533 if (dev->dev_attrib.emulate_caw)
534 buf[5] = 0x01;
535
536 /*
537 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
538 */
539 if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
540 put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
541 else
542 put_unaligned_be16(1, &buf[6]);
543
544 /*
545 * Set MAXIMUM TRANSFER LENGTH
546 */
547 max_sectors = min(dev->dev_attrib.fabric_max_sectors,
548 dev->dev_attrib.hw_max_sectors);
549 put_unaligned_be32(max_sectors, &buf[8]);
550
551 /*
552 * Set OPTIMAL TRANSFER LENGTH
553 */
554 if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
555 put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
556 else
557 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
558
559 /*
560 * Exit now if we don't support TP.
561 */
562 if (!have_tp)
563 goto max_write_same;
564
565 /*
566 * Set MAXIMUM UNMAP LBA COUNT
567 */
568 put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]);
569
570 /*
571 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
572 */
573 put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count,
574 &buf[24]);
575
576 /*
577 * Set OPTIMAL UNMAP GRANULARITY
578 */
579 put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]);
580
581 /*
582 * UNMAP GRANULARITY ALIGNMENT
583 */
584 put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment,
585 &buf[32]);
586 if (dev->dev_attrib.unmap_granularity_alignment != 0)
587 buf[32] |= 0x80; /* Set the UGAVALID bit */
588
589 /*
590 * MAXIMUM WRITE SAME LENGTH
591 */
592 max_write_same:
593 put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
594
595 return 0;
596 }
597
598 /* Block Device Characteristics VPD page */
599 static sense_reason_t
600 spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
601 {
602 struct se_device *dev = cmd->se_dev;
603
604 buf[0] = dev->transport->get_device_type(dev);
605 buf[3] = 0x3c;
606 buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0;
607
608 return 0;
609 }
610
611 /* Thin Provisioning VPD */
612 static sense_reason_t
613 spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
614 {
615 struct se_device *dev = cmd->se_dev;
616
617 /*
618 * From spc3r22 section 6.5.4 Thin Provisioning VPD page:
619 *
620 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
621 * zero, then the page length shall be set to 0004h. If the DP bit
622 * is set to one, then the page length shall be set to the value
623 * defined in table 162.
624 */
625 buf[0] = dev->transport->get_device_type(dev);
626
627 /*
628 * Set Hardcoded length mentioned above for DP=0
629 */
630 put_unaligned_be16(0x0004, &buf[2]);
631
632 /*
633 * The THRESHOLD EXPONENT field indicates the threshold set size in
634 * LBAs as a power of 2 (i.e., the threshold set size is equal to
635 * 2(threshold exponent)).
636 *
637 * Note that this is currently set to 0x00 as mkp says it will be
638 * changing again. We can enable this once it has settled in T10
639 * and is actually used by Linux/SCSI ML code.
640 */
641 buf[4] = 0x00;
642
643 /*
644 * A TPU bit set to one indicates that the device server supports
645 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
646 * that the device server does not support the UNMAP command.
647 */
648 if (dev->dev_attrib.emulate_tpu != 0)
649 buf[5] = 0x80;
650
651 /*
652 * A TPWS bit set to one indicates that the device server supports
653 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
654 * A TPWS bit set to zero indicates that the device server does not
655 * support the use of the WRITE SAME (16) command to unmap LBAs.
656 */
657 if (dev->dev_attrib.emulate_tpws != 0)
658 buf[5] |= 0x40;
659
660 return 0;
661 }
662
663 /* Referrals VPD page */
664 static sense_reason_t
665 spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
666 {
667 struct se_device *dev = cmd->se_dev;
668
669 buf[0] = dev->transport->get_device_type(dev);
670 buf[3] = 0x0c;
671 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
672 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]);
673
674 return 0;
675 }
676
677 static sense_reason_t
678 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
679
680 static struct {
681 uint8_t page;
682 sense_reason_t (*emulate)(struct se_cmd *, unsigned char *);
683 } evpd_handlers[] = {
684 { .page = 0x00, .emulate = spc_emulate_evpd_00 },
685 { .page = 0x80, .emulate = spc_emulate_evpd_80 },
686 { .page = 0x83, .emulate = spc_emulate_evpd_83 },
687 { .page = 0x86, .emulate = spc_emulate_evpd_86 },
688 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
689 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
690 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
691 { .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
692 };
693
694 /* supported vital product data pages */
695 static sense_reason_t
696 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
697 {
698 int p;
699
700 /*
701 * Only report the INQUIRY EVPD=1 pages after a valid NAA
702 * Registered Extended LUN WWN has been set via ConfigFS
703 * during device creation/restart.
704 */
705 if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
706 buf[3] = ARRAY_SIZE(evpd_handlers);
707 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
708 buf[p + 4] = evpd_handlers[p].page;
709 }
710
711 return 0;
712 }
713
714 static sense_reason_t
715 spc_emulate_inquiry(struct se_cmd *cmd)
716 {
717 struct se_device *dev = cmd->se_dev;
718 struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
719 unsigned char *rbuf;
720 unsigned char *cdb = cmd->t_task_cdb;
721 unsigned char *buf;
722 sense_reason_t ret;
723 int p;
724
725 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
726 if (!buf) {
727 pr_err("Unable to allocate response buffer for INQUIRY\n");
728 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
729 }
730
731 if (dev == tpg->tpg_virt_lun0.lun_se_dev)
732 buf[0] = 0x3f; /* Not connected */
733 else
734 buf[0] = dev->transport->get_device_type(dev);
735
736 if (!(cdb[1] & 0x1)) {
737 if (cdb[2]) {
738 pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
739 cdb[2]);
740 ret = TCM_INVALID_CDB_FIELD;
741 goto out;
742 }
743
744 ret = spc_emulate_inquiry_std(cmd, buf);
745 goto out;
746 }
747
748 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
749 if (cdb[2] == evpd_handlers[p].page) {
750 buf[1] = cdb[2];
751 ret = evpd_handlers[p].emulate(cmd, buf);
752 goto out;
753 }
754 }
755
756 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
757 ret = TCM_INVALID_CDB_FIELD;
758
759 out:
760 rbuf = transport_kmap_data_sg(cmd);
761 if (rbuf) {
762 memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length));
763 transport_kunmap_data_sg(cmd);
764 }
765 kfree(buf);
766
767 if (!ret)
768 target_complete_cmd(cmd, GOOD);
769 return ret;
770 }
771
772 static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p)
773 {
774 p[0] = 0x01;
775 p[1] = 0x0a;
776
777 /* No changeable values for now */
778 if (pc == 1)
779 goto out;
780
781 out:
782 return 12;
783 }
784
785 static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
786 {
787 struct se_device *dev = cmd->se_dev;
788 struct se_session *sess = cmd->se_sess;
789
790 p[0] = 0x0a;
791 p[1] = 0x0a;
792
793 /* No changeable values for now */
794 if (pc == 1)
795 goto out;
796
797 p[2] = 2;
798 /*
799 * From spc4r23, 7.4.7 Control mode page
800 *
801 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
802 * restrictions on the algorithm used for reordering commands
803 * having the SIMPLE task attribute (see SAM-4).
804 *
805 * Table 368 -- QUEUE ALGORITHM MODIFIER field
806 * Code Description
807 * 0h Restricted reordering
808 * 1h Unrestricted reordering allowed
809 * 2h to 7h Reserved
810 * 8h to Fh Vendor specific
811 *
812 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
813 * the device server shall order the processing sequence of commands
814 * having the SIMPLE task attribute such that data integrity is maintained
815 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
816 * requests is halted at any time, the final value of all data observable
817 * on the medium shall be the same as if all the commands had been processed
818 * with the ORDERED task attribute).
819 *
820 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
821 * device server may reorder the processing sequence of commands having the
822 * SIMPLE task attribute in any manner. Any data integrity exposures related to
823 * command sequence order shall be explicitly handled by the application client
824 * through the selection of appropriate ommands and task attributes.
825 */
826 p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
827 /*
828 * From spc4r17, section 7.4.6 Control mode Page
829 *
830 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
831 *
832 * 00b: The logical unit shall clear any unit attention condition
833 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
834 * status and shall not establish a unit attention condition when a com-
835 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
836 * status.
837 *
838 * 10b: The logical unit shall not clear any unit attention condition
839 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
840 * status and shall not establish a unit attention condition when
841 * a command is completed with BUSY, TASK SET FULL, or RESERVATION
842 * CONFLICT status.
843 *
844 * 11b a The logical unit shall not clear any unit attention condition
845 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
846 * status and shall establish a unit attention condition for the
847 * initiator port associated with the I_T nexus on which the BUSY,
848 * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
849 * Depending on the status, the additional sense code shall be set to
850 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
851 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
852 * command, a unit attention condition shall be established only once
853 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
854 * to the number of commands completed with one of those status codes.
855 */
856 p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
857 (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
858 /*
859 * From spc4r17, section 7.4.6 Control mode Page
860 *
861 * Task Aborted Status (TAS) bit set to zero.
862 *
863 * A task aborted status (TAS) bit set to zero specifies that aborted
864 * tasks shall be terminated by the device server without any response
865 * to the application client. A TAS bit set to one specifies that tasks
866 * aborted by the actions of an I_T nexus other than the I_T nexus on
867 * which the command was received shall be completed with TASK ABORTED
868 * status (see SAM-4).
869 */
870 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
871 /*
872 * From spc4r30, section 7.5.7 Control mode page
873 *
874 * Application Tag Owner (ATO) bit set to one.
875 *
876 * If the ATO bit is set to one the device server shall not modify the
877 * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection
878 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
879 * TAG field.
880 */
881 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
882 if (dev->dev_attrib.pi_prot_type)
883 p[5] |= 0x80;
884 }
885
886 p[8] = 0xff;
887 p[9] = 0xff;
888 p[11] = 30;
889
890 out:
891 return 12;
892 }
893
894 static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
895 {
896 struct se_device *dev = cmd->se_dev;
897
898 p[0] = 0x08;
899 p[1] = 0x12;
900
901 /* No changeable values for now */
902 if (pc == 1)
903 goto out;
904
905 if (spc_check_dev_wce(dev))
906 p[2] = 0x04; /* Write Cache Enable */
907 p[12] = 0x20; /* Disabled Read Ahead */
908
909 out:
910 return 20;
911 }
912
913 static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p)
914 {
915 p[0] = 0x1c;
916 p[1] = 0x0a;
917
918 /* No changeable values for now */
919 if (pc == 1)
920 goto out;
921
922 out:
923 return 12;
924 }
925
926 static struct {
927 uint8_t page;
928 uint8_t subpage;
929 int (*emulate)(struct se_cmd *, u8, unsigned char *);
930 } modesense_handlers[] = {
931 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
932 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
933 { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control },
934 { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions },
935 };
936
937 static void spc_modesense_write_protect(unsigned char *buf, int type)
938 {
939 /*
940 * I believe that the WP bit (bit 7) in the mode header is the same for
941 * all device types..
942 */
943 switch (type) {
944 case TYPE_DISK:
945 case TYPE_TAPE:
946 default:
947 buf[0] |= 0x80; /* WP bit */
948 break;
949 }
950 }
951
952 static void spc_modesense_dpofua(unsigned char *buf, int type)
953 {
954 switch (type) {
955 case TYPE_DISK:
956 buf[0] |= 0x10; /* DPOFUA bit */
957 break;
958 default:
959 break;
960 }
961 }
962
963 static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
964 {
965 *buf++ = 8;
966 put_unaligned_be32(min(blocks, 0xffffffffull), buf);
967 buf += 4;
968 put_unaligned_be32(block_size, buf);
969 return 9;
970 }
971
972 static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
973 {
974 if (blocks <= 0xffffffff)
975 return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3;
976
977 *buf++ = 1; /* LONGLBA */
978 buf += 2;
979 *buf++ = 16;
980 put_unaligned_be64(blocks, buf);
981 buf += 12;
982 put_unaligned_be32(block_size, buf);
983
984 return 17;
985 }
986
987 static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
988 {
989 struct se_device *dev = cmd->se_dev;
990 char *cdb = cmd->t_task_cdb;
991 unsigned char buf[SE_MODE_PAGE_BUF], *rbuf;
992 int type = dev->transport->get_device_type(dev);
993 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
994 bool dbd = !!(cdb[1] & 0x08);
995 bool llba = ten ? !!(cdb[1] & 0x10) : false;
996 u8 pc = cdb[2] >> 6;
997 u8 page = cdb[2] & 0x3f;
998 u8 subpage = cdb[3];
999 int length = 0;
1000 int ret;
1001 int i;
1002
1003 memset(buf, 0, SE_MODE_PAGE_BUF);
1004
1005 /*
1006 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
1007 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
1008 */
1009 length = ten ? 3 : 2;
1010
1011 /* DEVICE-SPECIFIC PARAMETER */
1012 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
1013 (cmd->se_deve &&
1014 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
1015 spc_modesense_write_protect(&buf[length], type);
1016
1017 if ((spc_check_dev_wce(dev)) &&
1018 (dev->dev_attrib.emulate_fua_write > 0))
1019 spc_modesense_dpofua(&buf[length], type);
1020
1021 ++length;
1022
1023 /* BLOCK DESCRIPTOR */
1024
1025 /*
1026 * For now we only include a block descriptor for disk (SBC)
1027 * devices; other command sets use a slightly different format.
1028 */
1029 if (!dbd && type == TYPE_DISK) {
1030 u64 blocks = dev->transport->get_blocks(dev);
1031 u32 block_size = dev->dev_attrib.block_size;
1032
1033 if (ten) {
1034 if (llba) {
1035 length += spc_modesense_long_blockdesc(&buf[length],
1036 blocks, block_size);
1037 } else {
1038 length += 3;
1039 length += spc_modesense_blockdesc(&buf[length],
1040 blocks, block_size);
1041 }
1042 } else {
1043 length += spc_modesense_blockdesc(&buf[length], blocks,
1044 block_size);
1045 }
1046 } else {
1047 if (ten)
1048 length += 4;
1049 else
1050 length += 1;
1051 }
1052
1053 if (page == 0x3f) {
1054 if (subpage != 0x00 && subpage != 0xff) {
1055 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
1056 return TCM_INVALID_CDB_FIELD;
1057 }
1058
1059 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) {
1060 /*
1061 * Tricky way to say all subpage 00h for
1062 * subpage==0, all subpages for subpage==0xff
1063 * (and we just checked above that those are
1064 * the only two possibilities).
1065 */
1066 if ((modesense_handlers[i].subpage & ~subpage) == 0) {
1067 ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]);
1068 if (!ten && length + ret >= 255)
1069 break;
1070 length += ret;
1071 }
1072 }
1073
1074 goto set_length;
1075 }
1076
1077 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
1078 if (modesense_handlers[i].page == page &&
1079 modesense_handlers[i].subpage == subpage) {
1080 length += modesense_handlers[i].emulate(cmd, pc, &buf[length]);
1081 goto set_length;
1082 }
1083
1084 /*
1085 * We don't intend to implement:
1086 * - obsolete page 03h "format parameters" (checked by Solaris)
1087 */
1088 if (page != 0x03)
1089 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
1090 page, subpage);
1091
1092 return TCM_UNKNOWN_MODE_PAGE;
1093
1094 set_length:
1095 if (ten)
1096 put_unaligned_be16(length - 2, buf);
1097 else
1098 buf[0] = length - 1;
1099
1100 rbuf = transport_kmap_data_sg(cmd);
1101 if (rbuf) {
1102 memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length));
1103 transport_kunmap_data_sg(cmd);
1104 }
1105
1106 target_complete_cmd(cmd, GOOD);
1107 return 0;
1108 }
1109
1110 static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
1111 {
1112 char *cdb = cmd->t_task_cdb;
1113 bool ten = cdb[0] == MODE_SELECT_10;
1114 int off = ten ? 8 : 4;
1115 bool pf = !!(cdb[1] & 0x10);
1116 u8 page, subpage;
1117 unsigned char *buf;
1118 unsigned char tbuf[SE_MODE_PAGE_BUF];
1119 int length;
1120 int ret = 0;
1121 int i;
1122
1123 if (!cmd->data_length) {
1124 target_complete_cmd(cmd, GOOD);
1125 return 0;
1126 }
1127
1128 if (cmd->data_length < off + 2)
1129 return TCM_PARAMETER_LIST_LENGTH_ERROR;
1130
1131 buf = transport_kmap_data_sg(cmd);
1132 if (!buf)
1133 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1134
1135 if (!pf) {
1136 ret = TCM_INVALID_CDB_FIELD;
1137 goto out;
1138 }
1139
1140 page = buf[off] & 0x3f;
1141 subpage = buf[off] & 0x40 ? buf[off + 1] : 0;
1142
1143 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
1144 if (modesense_handlers[i].page == page &&
1145 modesense_handlers[i].subpage == subpage) {
1146 memset(tbuf, 0, SE_MODE_PAGE_BUF);
1147 length = modesense_handlers[i].emulate(cmd, 0, tbuf);
1148 goto check_contents;
1149 }
1150
1151 ret = TCM_UNKNOWN_MODE_PAGE;
1152 goto out;
1153
1154 check_contents:
1155 if (cmd->data_length < off + length) {
1156 ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
1157 goto out;
1158 }
1159
1160 if (memcmp(buf + off, tbuf, length))
1161 ret = TCM_INVALID_PARAMETER_LIST;
1162
1163 out:
1164 transport_kunmap_data_sg(cmd);
1165
1166 if (!ret)
1167 target_complete_cmd(cmd, GOOD);
1168 return ret;
1169 }
1170
1171 static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
1172 {
1173 unsigned char *cdb = cmd->t_task_cdb;
1174 unsigned char *rbuf;
1175 u8 ua_asc = 0, ua_ascq = 0;
1176 unsigned char buf[SE_SENSE_BUF];
1177
1178 memset(buf, 0, SE_SENSE_BUF);
1179
1180 if (cdb[1] & 0x01) {
1181 pr_err("REQUEST_SENSE description emulation not"
1182 " supported\n");
1183 return TCM_INVALID_CDB_FIELD;
1184 }
1185
1186 rbuf = transport_kmap_data_sg(cmd);
1187 if (!rbuf)
1188 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1189
1190 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
1191 /*
1192 * CURRENT ERROR, UNIT ATTENTION
1193 */
1194 buf[0] = 0x70;
1195 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
1196
1197 /*
1198 * The Additional Sense Code (ASC) from the UNIT ATTENTION
1199 */
1200 buf[SPC_ASC_KEY_OFFSET] = ua_asc;
1201 buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
1202 buf[7] = 0x0A;
1203 } else {
1204 /*
1205 * CURRENT ERROR, NO SENSE
1206 */
1207 buf[0] = 0x70;
1208 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
1209
1210 /*
1211 * NO ADDITIONAL SENSE INFORMATION
1212 */
1213 buf[SPC_ASC_KEY_OFFSET] = 0x00;
1214 buf[7] = 0x0A;
1215 }
1216
1217 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
1218 transport_kunmap_data_sg(cmd);
1219
1220 target_complete_cmd(cmd, GOOD);
1221 return 0;
1222 }
1223
1224 sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
1225 {
1226 struct se_dev_entry *deve;
1227 struct se_session *sess = cmd->se_sess;
1228 unsigned char *buf;
1229 u32 lun_count = 0, offset = 8, i;
1230
1231 if (cmd->data_length < 16) {
1232 pr_warn("REPORT LUNS allocation length %u too small\n",
1233 cmd->data_length);
1234 return TCM_INVALID_CDB_FIELD;
1235 }
1236
1237 buf = transport_kmap_data_sg(cmd);
1238 if (!buf)
1239 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1240
1241 /*
1242 * If no struct se_session pointer is present, this struct se_cmd is
1243 * coming via a target_core_mod PASSTHROUGH op, and not through
1244 * a $FABRIC_MOD. In that case, report LUN=0 only.
1245 */
1246 if (!sess) {
1247 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
1248 lun_count = 1;
1249 goto done;
1250 }
1251
1252 spin_lock_irq(&sess->se_node_acl->device_list_lock);
1253 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
1254 deve = sess->se_node_acl->device_list[i];
1255 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
1256 continue;
1257 /*
1258 * We determine the correct LUN LIST LENGTH even once we
1259 * have reached the initial allocation length.
1260 * See SPC2-R20 7.19.
1261 */
1262 lun_count++;
1263 if ((offset + 8) > cmd->data_length)
1264 continue;
1265
1266 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
1267 offset += 8;
1268 }
1269 spin_unlock_irq(&sess->se_node_acl->device_list_lock);
1270
1271 /*
1272 * See SPC3 r07, page 159.
1273 */
1274 done:
1275 lun_count *= 8;
1276 buf[0] = ((lun_count >> 24) & 0xff);
1277 buf[1] = ((lun_count >> 16) & 0xff);
1278 buf[2] = ((lun_count >> 8) & 0xff);
1279 buf[3] = (lun_count & 0xff);
1280 transport_kunmap_data_sg(cmd);
1281
1282 target_complete_cmd(cmd, GOOD);
1283 return 0;
1284 }
1285 EXPORT_SYMBOL(spc_emulate_report_luns);
1286
1287 static sense_reason_t
1288 spc_emulate_testunitready(struct se_cmd *cmd)
1289 {
1290 target_complete_cmd(cmd, GOOD);
1291 return 0;
1292 }
1293
1294 sense_reason_t
1295 spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1296 {
1297 struct se_device *dev = cmd->se_dev;
1298 unsigned char *cdb = cmd->t_task_cdb;
1299
1300 switch (cdb[0]) {
1301 case MODE_SELECT:
1302 *size = cdb[4];
1303 cmd->execute_cmd = spc_emulate_modeselect;
1304 break;
1305 case MODE_SELECT_10:
1306 *size = (cdb[7] << 8) + cdb[8];
1307 cmd->execute_cmd = spc_emulate_modeselect;
1308 break;
1309 case MODE_SENSE:
1310 *size = cdb[4];
1311 cmd->execute_cmd = spc_emulate_modesense;
1312 break;
1313 case MODE_SENSE_10:
1314 *size = (cdb[7] << 8) + cdb[8];
1315 cmd->execute_cmd = spc_emulate_modesense;
1316 break;
1317 case LOG_SELECT:
1318 case LOG_SENSE:
1319 *size = (cdb[7] << 8) + cdb[8];
1320 break;
1321 case PERSISTENT_RESERVE_IN:
1322 *size = (cdb[7] << 8) + cdb[8];
1323 cmd->execute_cmd = target_scsi3_emulate_pr_in;
1324 break;
1325 case PERSISTENT_RESERVE_OUT:
1326 *size = (cdb[7] << 8) + cdb[8];
1327 cmd->execute_cmd = target_scsi3_emulate_pr_out;
1328 break;
1329 case RELEASE:
1330 case RELEASE_10:
1331 if (cdb[0] == RELEASE_10)
1332 *size = (cdb[7] << 8) | cdb[8];
1333 else
1334 *size = cmd->data_length;
1335
1336 cmd->execute_cmd = target_scsi2_reservation_release;
1337 break;
1338 case RESERVE:
1339 case RESERVE_10:
1340 /*
1341 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
1342 * Assume the passthrough or $FABRIC_MOD will tell us about it.
1343 */
1344 if (cdb[0] == RESERVE_10)
1345 *size = (cdb[7] << 8) | cdb[8];
1346 else
1347 *size = cmd->data_length;
1348
1349 cmd->execute_cmd = target_scsi2_reservation_reserve;
1350 break;
1351 case REQUEST_SENSE:
1352 *size = cdb[4];
1353 cmd->execute_cmd = spc_emulate_request_sense;
1354 break;
1355 case INQUIRY:
1356 *size = (cdb[3] << 8) + cdb[4];
1357
1358 /*
1359 * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
1360 * See spc4r17 section 5.3
1361 */
1362 cmd->sam_task_attr = MSG_HEAD_TAG;
1363 cmd->execute_cmd = spc_emulate_inquiry;
1364 break;
1365 case SECURITY_PROTOCOL_IN:
1366 case SECURITY_PROTOCOL_OUT:
1367 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1368 break;
1369 case EXTENDED_COPY:
1370 *size = get_unaligned_be32(&cdb[10]);
1371 cmd->execute_cmd = target_do_xcopy;
1372 break;
1373 case RECEIVE_COPY_RESULTS:
1374 *size = get_unaligned_be32(&cdb[10]);
1375 cmd->execute_cmd = target_do_receive_copy_results;
1376 break;
1377 case READ_ATTRIBUTE:
1378 case WRITE_ATTRIBUTE:
1379 *size = (cdb[10] << 24) | (cdb[11] << 16) |
1380 (cdb[12] << 8) | cdb[13];
1381 break;
1382 case RECEIVE_DIAGNOSTIC:
1383 case SEND_DIAGNOSTIC:
1384 *size = (cdb[3] << 8) | cdb[4];
1385 break;
1386 case WRITE_BUFFER:
1387 *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
1388 break;
1389 case REPORT_LUNS:
1390 cmd->execute_cmd = spc_emulate_report_luns;
1391 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1392 /*
1393 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
1394 * See spc4r17 section 5.3
1395 */
1396 cmd->sam_task_attr = MSG_HEAD_TAG;
1397 break;
1398 case TEST_UNIT_READY:
1399 cmd->execute_cmd = spc_emulate_testunitready;
1400 *size = 0;
1401 break;
1402 case MAINTENANCE_IN:
1403 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
1404 /*
1405 * MAINTENANCE_IN from SCC-2
1406 * Check for emulated MI_REPORT_TARGET_PGS
1407 */
1408 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) {
1409 cmd->execute_cmd =
1410 target_emulate_report_target_port_groups;
1411 }
1412 *size = get_unaligned_be32(&cdb[6]);
1413 } else {
1414 /*
1415 * GPCMD_SEND_KEY from multi media commands
1416 */
1417 *size = get_unaligned_be16(&cdb[8]);
1418 }
1419 break;
1420 case MAINTENANCE_OUT:
1421 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
1422 /*
1423 * MAINTENANCE_OUT from SCC-2
1424 * Check for emulated MO_SET_TARGET_PGS.
1425 */
1426 if (cdb[1] == MO_SET_TARGET_PGS) {
1427 cmd->execute_cmd =
1428 target_emulate_set_target_port_groups;
1429 }
1430 *size = get_unaligned_be32(&cdb[6]);
1431 } else {
1432 /*
1433 * GPCMD_SEND_KEY from multi media commands
1434 */
1435 *size = get_unaligned_be16(&cdb[8]);
1436 }
1437 break;
1438 default:
1439 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
1440 " 0x%02x, sending CHECK_CONDITION.\n",
1441 cmd->se_tfo->get_fabric_name(), cdb[0]);
1442 return TCM_UNSUPPORTED_SCSI_OPCODE;
1443 }
1444
1445 return 0;
1446 }
1447 EXPORT_SYMBOL(spc_parse_cdb);
This page took 0.092181 seconds and 5 git commands to generate.