target/spc: Only expose PI mode page bits when supported by fabric
[deliverable/linux.git] / drivers / target / target_core_sbc.c
CommitLineData
d6e0175c
CH
1/*
2 * SCSI Block Commands (SBC) parsing and emulation.
3 *
4c76251e 4 * (c) Copyright 2002-2013 Datera, Inc.
d6e0175c
CH
5 *
6 * Nicholas A. Bellinger <nab@kernel.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/ratelimit.h>
41861fa8 26#include <linux/crc-t10dif.h>
d6e0175c
CH
27#include <asm/unaligned.h>
28#include <scsi/scsi.h>
68ff9b9b 29#include <scsi/scsi_tcq.h>
d6e0175c
CH
30
31#include <target/target_core_base.h>
32#include <target/target_core_backend.h>
33#include <target/target_core_fabric.h>
34
35#include "target_core_internal.h"
36#include "target_core_ua.h"
c66094bf 37#include "target_core_alua.h"
d6e0175c 38
de103c93
CH
39static sense_reason_t
40sbc_emulate_readcapacity(struct se_cmd *cmd)
1fd032ee
CH
41{
42 struct se_device *dev = cmd->se_dev;
8dc8632a 43 unsigned char *cdb = cmd->t_task_cdb;
1fd032ee 44 unsigned long long blocks_long = dev->transport->get_blocks(dev);
a50da144
PB
45 unsigned char *rbuf;
46 unsigned char buf[8];
1fd032ee
CH
47 u32 blocks;
48
8dc8632a
RD
49 /*
50 * SBC-2 says:
51 * If the PMI bit is set to zero and the LOGICAL BLOCK
52 * ADDRESS field is not set to zero, the device server shall
53 * terminate the command with CHECK CONDITION status with
54 * the sense key set to ILLEGAL REQUEST and the additional
55 * sense code set to INVALID FIELD IN CDB.
56 *
57 * In SBC-3, these fields are obsolete, but some SCSI
58 * compliance tests actually check this, so we might as well
59 * follow SBC-2.
60 */
61 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
62 return TCM_INVALID_CDB_FIELD;
63
1fd032ee
CH
64 if (blocks_long >= 0x00000000ffffffff)
65 blocks = 0xffffffff;
66 else
67 blocks = (u32)blocks_long;
68
1fd032ee
CH
69 buf[0] = (blocks >> 24) & 0xff;
70 buf[1] = (blocks >> 16) & 0xff;
71 buf[2] = (blocks >> 8) & 0xff;
72 buf[3] = blocks & 0xff;
0fd97ccf
CH
73 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
74 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
75 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
76 buf[7] = dev->dev_attrib.block_size & 0xff;
1fd032ee 77
a50da144 78 rbuf = transport_kmap_data_sg(cmd);
8b4b0dcb
NB
79 if (rbuf) {
80 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
81 transport_kunmap_data_sg(cmd);
82 }
1fd032ee
CH
83
84 target_complete_cmd(cmd, GOOD);
85 return 0;
86}
87
de103c93
CH
88static sense_reason_t
89sbc_emulate_readcapacity_16(struct se_cmd *cmd)
1fd032ee
CH
90{
91 struct se_device *dev = cmd->se_dev;
a50da144
PB
92 unsigned char *rbuf;
93 unsigned char buf[32];
1fd032ee
CH
94 unsigned long long blocks = dev->transport->get_blocks(dev);
95
a50da144 96 memset(buf, 0, sizeof(buf));
1fd032ee
CH
97 buf[0] = (blocks >> 56) & 0xff;
98 buf[1] = (blocks >> 48) & 0xff;
99 buf[2] = (blocks >> 40) & 0xff;
100 buf[3] = (blocks >> 32) & 0xff;
101 buf[4] = (blocks >> 24) & 0xff;
102 buf[5] = (blocks >> 16) & 0xff;
103 buf[6] = (blocks >> 8) & 0xff;
104 buf[7] = blocks & 0xff;
0fd97ccf
CH
105 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
106 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
107 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
108 buf[11] = dev->dev_attrib.block_size & 0xff;
56dac14c
NB
109 /*
110 * Set P_TYPE and PROT_EN bits for DIF support
111 */
112 if (dev->dev_attrib.pi_prot_type)
113 buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
7f7caf6a
AG
114
115 if (dev->transport->get_lbppbe)
116 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
117
118 if (dev->transport->get_alignment_offset_lbas) {
119 u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
120 buf[14] = (lalba >> 8) & 0x3f;
121 buf[15] = lalba & 0xff;
122 }
123
1fd032ee
CH
124 /*
125 * Set Thin Provisioning Enable bit following sbc3r22 in section
126 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
127 */
0fd97ccf 128 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
7f7caf6a 129 buf[14] |= 0x80;
1fd032ee 130
a50da144 131 rbuf = transport_kmap_data_sg(cmd);
8b4b0dcb
NB
132 if (rbuf) {
133 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
134 transport_kunmap_data_sg(cmd);
135 }
1fd032ee
CH
136
137 target_complete_cmd(cmd, GOOD);
138 return 0;
139}
140
972b29c8 141sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
1fd032ee 142{
1fd032ee 143 u32 num_blocks;
1fd032ee
CH
144
145 if (cmd->t_task_cdb[0] == WRITE_SAME)
146 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
147 else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
148 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
149 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
150 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
151
152 /*
153 * Use the explicit range when non zero is supplied, otherwise calculate
154 * the remaining range based on ->get_blocks() - starting LBA.
155 */
6f974e8c
CH
156 if (num_blocks)
157 return num_blocks;
1fd032ee 158
6f974e8c
CH
159 return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
160 cmd->t_task_lba + 1;
1fd032ee 161}
972b29c8 162EXPORT_SYMBOL(sbc_get_write_same_sectors);
1fd032ee 163
de103c93 164static sense_reason_t
1920ed61 165sbc_emulate_noop(struct se_cmd *cmd)
1a1ff38c
BK
166{
167 target_complete_cmd(cmd, GOOD);
168 return 0;
169}
170
d6e0175c
CH
171static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
172{
0fd97ccf 173 return cmd->se_dev->dev_attrib.block_size * sectors;
d6e0175c
CH
174}
175
176static int sbc_check_valid_sectors(struct se_cmd *cmd)
177{
178 struct se_device *dev = cmd->se_dev;
179 unsigned long long end_lba;
180 u32 sectors;
181
0fd97ccf 182 sectors = cmd->data_length / dev->dev_attrib.block_size;
d6e0175c
CH
183 end_lba = dev->transport->get_blocks(dev) + 1;
184
185 if (cmd->t_task_lba + sectors > end_lba) {
186 pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
187 cmd->t_task_lba, sectors, end_lba);
188 return -EINVAL;
189 }
190
191 return 0;
192}
193
194static inline u32 transport_get_sectors_6(unsigned char *cdb)
195{
196 /*
197 * Use 8-bit sector value. SBC-3 says:
198 *
199 * A TRANSFER LENGTH field set to zero specifies that 256
200 * logical blocks shall be written. Any other value
201 * specifies the number of logical blocks that shall be
202 * written.
203 */
204 return cdb[4] ? : 256;
205}
206
207static inline u32 transport_get_sectors_10(unsigned char *cdb)
208{
209 return (u32)(cdb[7] << 8) + cdb[8];
210}
211
212static inline u32 transport_get_sectors_12(unsigned char *cdb)
213{
214 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
215}
216
217static inline u32 transport_get_sectors_16(unsigned char *cdb)
218{
219 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
220 (cdb[12] << 8) + cdb[13];
221}
222
223/*
224 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
225 */
226static inline u32 transport_get_sectors_32(unsigned char *cdb)
227{
228 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
229 (cdb[30] << 8) + cdb[31];
230
231}
232
233static inline u32 transport_lba_21(unsigned char *cdb)
234{
235 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
236}
237
238static inline u32 transport_lba_32(unsigned char *cdb)
239{
240 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
241}
242
243static inline unsigned long long transport_lba_64(unsigned char *cdb)
244{
245 unsigned int __v1, __v2;
246
247 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
248 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
249
250 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
251}
252
253/*
254 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
255 */
256static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
257{
258 unsigned int __v1, __v2;
259
260 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
261 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
262
263 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
264}
265
cd063bef
NB
266static sense_reason_t
267sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
d6e0175c 268{
972b29c8 269 unsigned int sectors = sbc_get_write_same_sectors(cmd);
773cbaf7 270
d6e0175c
CH
271 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
272 pr_err("WRITE_SAME PBDATA and LBDATA"
273 " bits not supported for Block Discard"
274 " Emulation\n");
cd063bef 275 return TCM_UNSUPPORTED_SCSI_OPCODE;
d6e0175c 276 }
773cbaf7
NB
277 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
278 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
279 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
280 return TCM_INVALID_CDB_FIELD;
281 }
5cb770bf
RD
282 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
283 if (flags[0] & 0x10) {
284 pr_warn("WRITE SAME with ANCHOR not supported\n");
285 return TCM_INVALID_CDB_FIELD;
286 }
d6e0175c 287 /*
cd063bef
NB
288 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
289 * translated into block discard requests within backend code.
d6e0175c 290 */
cd063bef
NB
291 if (flags[0] & 0x08) {
292 if (!ops->execute_write_same_unmap)
293 return TCM_UNSUPPORTED_SCSI_OPCODE;
294
295 cmd->execute_cmd = ops->execute_write_same_unmap;
296 return 0;
d6e0175c 297 }
cd063bef
NB
298 if (!ops->execute_write_same)
299 return TCM_UNSUPPORTED_SCSI_OPCODE;
d6e0175c 300
cd063bef 301 cmd->execute_cmd = ops->execute_write_same;
d6e0175c
CH
302 return 0;
303}
304
a6b0133c 305static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
d6e0175c
CH
306{
307 unsigned char *buf, *addr;
308 struct scatterlist *sg;
309 unsigned int offset;
a6b0133c
NB
310 sense_reason_t ret = TCM_NO_SENSE;
311 int i, count;
d6e0175c
CH
312 /*
313 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
314 *
315 * 1) read the specified logical block(s);
316 * 2) transfer logical blocks from the data-out buffer;
317 * 3) XOR the logical blocks transferred from the data-out buffer with
318 * the logical blocks read, storing the resulting XOR data in a buffer;
319 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
320 * blocks transferred from the data-out buffer; and
321 * 5) transfer the resulting XOR data to the data-in buffer.
322 */
323 buf = kmalloc(cmd->data_length, GFP_KERNEL);
324 if (!buf) {
325 pr_err("Unable to allocate xor_callback buf\n");
a6b0133c 326 return TCM_OUT_OF_RESOURCES;
d6e0175c
CH
327 }
328 /*
329 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
330 * into the locally allocated *buf
331 */
332 sg_copy_to_buffer(cmd->t_data_sg,
333 cmd->t_data_nents,
334 buf,
335 cmd->data_length);
336
337 /*
338 * Now perform the XOR against the BIDI read memory located at
339 * cmd->t_mem_bidi_list
340 */
341
342 offset = 0;
343 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
344 addr = kmap_atomic(sg_page(sg));
a6b0133c
NB
345 if (!addr) {
346 ret = TCM_OUT_OF_RESOURCES;
d6e0175c 347 goto out;
a6b0133c 348 }
d6e0175c
CH
349
350 for (i = 0; i < sg->length; i++)
351 *(addr + sg->offset + i) ^= *(buf + offset + i);
352
353 offset += sg->length;
354 kunmap_atomic(addr);
355 }
356
357out:
358 kfree(buf);
a6b0133c 359 return ret;
d6e0175c
CH
360}
361
a82a9538
NB
362static sense_reason_t
363sbc_execute_rw(struct se_cmd *cmd)
364{
365 return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
366 cmd->data_direction);
367}
368
68ff9b9b
NB
369static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
370{
371 struct se_device *dev = cmd->se_dev;
372
d8855c15
NB
373 /*
374 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
375 * within target_complete_ok_work() if the command was successfully
376 * sent to the backend driver.
377 */
378 spin_lock_irq(&cmd->t_state_lock);
379 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
380 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
381 spin_unlock_irq(&cmd->t_state_lock);
382
68ff9b9b
NB
383 /*
384 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
385 * before the original READ I/O submission.
386 */
387 up(&dev->caw_sem);
388
389 return TCM_NO_SENSE;
390}
391
392static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
393{
394 struct se_device *dev = cmd->se_dev;
395 struct scatterlist *write_sg = NULL, *sg;
db60df88 396 unsigned char *buf = NULL, *addr;
68ff9b9b
NB
397 struct sg_mapping_iter m;
398 unsigned int offset = 0, len;
399 unsigned int nlbas = cmd->t_task_nolb;
400 unsigned int block_size = dev->dev_attrib.block_size;
401 unsigned int compare_len = (nlbas * block_size);
402 sense_reason_t ret = TCM_NO_SENSE;
403 int rc, i;
404
cf6d1f09
NB
405 /*
406 * Handle early failure in transport_generic_request_failure(),
407 * which will not have taken ->caw_mutex yet..
408 */
409 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
410 return TCM_NO_SENSE;
db60df88
NB
411 /*
412 * Immediately exit + release dev->caw_sem if command has already
413 * been failed with a non-zero SCSI status.
414 */
415 if (cmd->scsi_status) {
416 pr_err("compare_and_write_callback: non zero scsi_status:"
417 " 0x%02x\n", cmd->scsi_status);
418 goto out;
419 }
cf6d1f09 420
68ff9b9b
NB
421 buf = kzalloc(cmd->data_length, GFP_KERNEL);
422 if (!buf) {
423 pr_err("Unable to allocate compare_and_write buf\n");
a2890087
NB
424 ret = TCM_OUT_OF_RESOURCES;
425 goto out;
68ff9b9b
NB
426 }
427
a1e1774c 428 write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
68ff9b9b
NB
429 GFP_KERNEL);
430 if (!write_sg) {
431 pr_err("Unable to allocate compare_and_write sg\n");
432 ret = TCM_OUT_OF_RESOURCES;
433 goto out;
434 }
a1e1774c 435 sg_init_table(write_sg, cmd->t_data_nents);
68ff9b9b
NB
436 /*
437 * Setup verify and write data payloads from total NumberLBAs.
438 */
439 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
440 cmd->data_length);
441 if (!rc) {
442 pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
443 ret = TCM_OUT_OF_RESOURCES;
444 goto out;
445 }
446 /*
447 * Compare against SCSI READ payload against verify payload
448 */
449 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
450 addr = (unsigned char *)kmap_atomic(sg_page(sg));
451 if (!addr) {
452 ret = TCM_OUT_OF_RESOURCES;
453 goto out;
454 }
455
456 len = min(sg->length, compare_len);
457
458 if (memcmp(addr, buf + offset, len)) {
459 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
460 addr, buf + offset);
461 kunmap_atomic(addr);
462 goto miscompare;
463 }
464 kunmap_atomic(addr);
465
466 offset += len;
467 compare_len -= len;
468 if (!compare_len)
469 break;
470 }
471
472 i = 0;
473 len = cmd->t_task_nolb * block_size;
474 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
475 /*
476 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
477 */
478 while (len) {
479 sg_miter_next(&m);
480
481 if (block_size < PAGE_SIZE) {
482 sg_set_page(&write_sg[i], m.page, block_size,
483 block_size);
484 } else {
485 sg_miter_next(&m);
486 sg_set_page(&write_sg[i], m.page, block_size,
487 0);
488 }
489 len -= block_size;
490 i++;
491 }
492 sg_miter_stop(&m);
493 /*
494 * Save the original SGL + nents values before updating to new
495 * assignments, to be released in transport_free_pages() ->
496 * transport_reset_sgl_orig()
497 */
498 cmd->t_data_sg_orig = cmd->t_data_sg;
499 cmd->t_data_sg = write_sg;
500 cmd->t_data_nents_orig = cmd->t_data_nents;
501 cmd->t_data_nents = 1;
502
503 cmd->sam_task_attr = MSG_HEAD_TAG;
504 cmd->transport_complete_callback = compare_and_write_post;
505 /*
506 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
507 * for submitting the adjusted SGL to write instance user-data.
508 */
509 cmd->execute_cmd = sbc_execute_rw;
510
511 spin_lock_irq(&cmd->t_state_lock);
512 cmd->t_state = TRANSPORT_PROCESSING;
513 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
514 spin_unlock_irq(&cmd->t_state_lock);
515
516 __target_execute_cmd(cmd);
517
518 kfree(buf);
519 return ret;
520
521miscompare:
522 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
523 dev->transport->name);
524 ret = TCM_MISCOMPARE_VERIFY;
525out:
526 /*
527 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
528 * sbc_compare_and_write() before the original READ I/O submission.
529 */
530 up(&dev->caw_sem);
531 kfree(write_sg);
532 kfree(buf);
533 return ret;
534}
535
536static sense_reason_t
537sbc_compare_and_write(struct se_cmd *cmd)
538{
539 struct se_device *dev = cmd->se_dev;
540 sense_reason_t ret;
541 int rc;
542 /*
543 * Submit the READ first for COMPARE_AND_WRITE to perform the
544 * comparision using SGLs at cmd->t_bidi_data_sg..
545 */
546 rc = down_interruptible(&dev->caw_sem);
547 if ((rc != 0) || signal_pending(current)) {
548 cmd->transport_complete_callback = NULL;
549 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
550 }
b7191253
NB
551 /*
552 * Reset cmd->data_length to individual block_size in order to not
553 * confuse backend drivers that depend on this value matching the
554 * size of the I/O being submitted.
555 */
556 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
68ff9b9b
NB
557
558 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
559 DMA_FROM_DEVICE);
560 if (ret) {
561 cmd->transport_complete_callback = NULL;
562 up(&dev->caw_sem);
563 return ret;
564 }
565 /*
566 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
567 * upon MISCOMPARE, or in compare_and_write_done() upon completion
568 * of WRITE instance user-data.
569 */
570 return TCM_NO_SENSE;
571}
572
19f9361a
SG
573static int
574sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
575 bool is_write, struct se_cmd *cmd)
576{
577 if (is_write) {
578 cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS :
579 TARGET_PROT_DOUT_INSERT;
580 switch (protect) {
581 case 0x0:
582 case 0x3:
583 cmd->prot_checks = 0;
584 break;
585 case 0x1:
586 case 0x5:
587 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
588 if (prot_type == TARGET_DIF_TYPE1_PROT)
589 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
590 break;
591 case 0x2:
592 if (prot_type == TARGET_DIF_TYPE1_PROT)
593 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
594 break;
595 case 0x4:
596 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
597 break;
598 default:
599 pr_err("Unsupported protect field %d\n", protect);
600 return -EINVAL;
601 }
602 } else {
603 cmd->prot_op = protect ? TARGET_PROT_DIN_PASS :
604 TARGET_PROT_DIN_STRIP;
605 switch (protect) {
606 case 0x0:
607 case 0x1:
608 case 0x5:
609 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
610 if (prot_type == TARGET_DIF_TYPE1_PROT)
611 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
612 break;
613 case 0x2:
614 if (prot_type == TARGET_DIF_TYPE1_PROT)
615 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
616 break;
617 case 0x3:
618 cmd->prot_checks = 0;
619 break;
620 case 0x4:
621 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
622 break;
623 default:
624 pr_err("Unsupported protect field %d\n", protect);
625 return -EINVAL;
626 }
627 }
628
629 return 0;
630}
631
499bf77b
NB
632static bool
633sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
19f9361a 634 u32 sectors, bool is_write)
499bf77b 635{
19f9361a
SG
636 u8 protect = cdb[1] >> 5;
637
b5b8e298 638 if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto)
499bf77b
NB
639 return true;
640
641 switch (dev->dev_attrib.pi_prot_type) {
642 case TARGET_DIF_TYPE3_PROT:
499bf77b
NB
643 cmd->reftag_seed = 0xffffffff;
644 break;
645 case TARGET_DIF_TYPE2_PROT:
19f9361a 646 if (protect)
499bf77b
NB
647 return false;
648
649 cmd->reftag_seed = cmd->t_task_lba;
650 break;
651 case TARGET_DIF_TYPE1_PROT:
499bf77b
NB
652 cmd->reftag_seed = cmd->t_task_lba;
653 break;
654 case TARGET_DIF_TYPE0_PROT:
655 default:
656 return true;
657 }
658
19f9361a
SG
659 if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
660 is_write, cmd))
661 return false;
662
499bf77b
NB
663 cmd->prot_type = dev->dev_attrib.pi_prot_type;
664 cmd->prot_length = dev->prot_length * sectors;
03abad9e
SG
665 pr_debug("%s: prot_type=%d, prot_length=%d prot_op=%d prot_checks=%d\n",
666 __func__, cmd->prot_type, cmd->prot_length,
667 cmd->prot_op, cmd->prot_checks);
499bf77b
NB
668
669 return true;
670}
671
de103c93
CH
672sense_reason_t
673sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
d6e0175c 674{
d6e0175c
CH
675 struct se_device *dev = cmd->se_dev;
676 unsigned char *cdb = cmd->t_task_cdb;
1fd032ee 677 unsigned int size;
d6e0175c 678 u32 sectors = 0;
de103c93 679 sense_reason_t ret;
d6e0175c
CH
680
681 switch (cdb[0]) {
682 case READ_6:
683 sectors = transport_get_sectors_6(cdb);
684 cmd->t_task_lba = transport_lba_21(cdb);
685 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
686 cmd->execute_rw = ops->execute_rw;
687 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
688 break;
689 case READ_10:
690 sectors = transport_get_sectors_10(cdb);
691 cmd->t_task_lba = transport_lba_32(cdb);
499bf77b 692
19f9361a 693 if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
499bf77b
NB
694 return TCM_UNSUPPORTED_SCSI_OPCODE;
695
d6e0175c 696 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
697 cmd->execute_rw = ops->execute_rw;
698 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
699 break;
700 case READ_12:
701 sectors = transport_get_sectors_12(cdb);
702 cmd->t_task_lba = transport_lba_32(cdb);
499bf77b 703
19f9361a 704 if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
499bf77b
NB
705 return TCM_UNSUPPORTED_SCSI_OPCODE;
706
d6e0175c 707 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
708 cmd->execute_rw = ops->execute_rw;
709 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
710 break;
711 case READ_16:
712 sectors = transport_get_sectors_16(cdb);
713 cmd->t_task_lba = transport_lba_64(cdb);
499bf77b 714
19f9361a 715 if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
499bf77b
NB
716 return TCM_UNSUPPORTED_SCSI_OPCODE;
717
d6e0175c 718 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
719 cmd->execute_rw = ops->execute_rw;
720 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
721 break;
722 case WRITE_6:
723 sectors = transport_get_sectors_6(cdb);
724 cmd->t_task_lba = transport_lba_21(cdb);
725 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
726 cmd->execute_rw = ops->execute_rw;
727 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
728 break;
729 case WRITE_10:
730 case WRITE_VERIFY:
731 sectors = transport_get_sectors_10(cdb);
732 cmd->t_task_lba = transport_lba_32(cdb);
499bf77b 733
19f9361a 734 if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
499bf77b
NB
735 return TCM_UNSUPPORTED_SCSI_OPCODE;
736
d6e0175c
CH
737 if (cdb[1] & 0x8)
738 cmd->se_cmd_flags |= SCF_FUA;
739 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
740 cmd->execute_rw = ops->execute_rw;
741 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
742 break;
743 case WRITE_12:
744 sectors = transport_get_sectors_12(cdb);
745 cmd->t_task_lba = transport_lba_32(cdb);
499bf77b 746
19f9361a 747 if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
499bf77b
NB
748 return TCM_UNSUPPORTED_SCSI_OPCODE;
749
d6e0175c
CH
750 if (cdb[1] & 0x8)
751 cmd->se_cmd_flags |= SCF_FUA;
752 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
753 cmd->execute_rw = ops->execute_rw;
754 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
755 break;
756 case WRITE_16:
757 sectors = transport_get_sectors_16(cdb);
758 cmd->t_task_lba = transport_lba_64(cdb);
499bf77b 759
19f9361a 760 if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
499bf77b
NB
761 return TCM_UNSUPPORTED_SCSI_OPCODE;
762
d6e0175c
CH
763 if (cdb[1] & 0x8)
764 cmd->se_cmd_flags |= SCF_FUA;
765 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
766 cmd->execute_rw = ops->execute_rw;
767 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
768 break;
769 case XDWRITEREAD_10:
de103c93 770 if (cmd->data_direction != DMA_TO_DEVICE ||
d6e0175c 771 !(cmd->se_cmd_flags & SCF_BIDI))
de103c93 772 return TCM_INVALID_CDB_FIELD;
d6e0175c
CH
773 sectors = transport_get_sectors_10(cdb);
774
775 cmd->t_task_lba = transport_lba_32(cdb);
776 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
777
778 /*
779 * Setup BIDI XOR callback to be run after I/O completion.
780 */
a82a9538
NB
781 cmd->execute_rw = ops->execute_rw;
782 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
783 cmd->transport_complete_callback = &xdreadwrite_callback;
784 if (cdb[1] & 0x8)
785 cmd->se_cmd_flags |= SCF_FUA;
786 break;
787 case VARIABLE_LENGTH_CMD:
788 {
789 u16 service_action = get_unaligned_be16(&cdb[8]);
790 switch (service_action) {
791 case XDWRITEREAD_32:
792 sectors = transport_get_sectors_32(cdb);
793
794 /*
795 * Use WRITE_32 and READ_32 opcodes for the emulated
796 * XDWRITE_READ_32 logic.
797 */
798 cmd->t_task_lba = transport_lba_64_ext(cdb);
799 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
800
801 /*
802 * Setup BIDI XOR callback to be run during after I/O
803 * completion.
804 */
a82a9538
NB
805 cmd->execute_rw = ops->execute_rw;
806 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
807 cmd->transport_complete_callback = &xdreadwrite_callback;
808 if (cdb[1] & 0x8)
809 cmd->se_cmd_flags |= SCF_FUA;
810 break;
811 case WRITE_SAME_32:
812 sectors = transport_get_sectors_32(cdb);
813 if (!sectors) {
814 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
815 " supported\n");
de103c93 816 return TCM_INVALID_CDB_FIELD;
d6e0175c
CH
817 }
818
1fd032ee 819 size = sbc_get_size(cmd, 1);
d6e0175c
CH
820 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
821
cd063bef 822 ret = sbc_setup_write_same(cmd, &cdb[10], ops);
6b64e1fe 823 if (ret)
cd063bef 824 return ret;
d6e0175c
CH
825 break;
826 default:
827 pr_err("VARIABLE_LENGTH_CMD service action"
828 " 0x%04x not supported\n", service_action);
de103c93 829 return TCM_UNSUPPORTED_SCSI_OPCODE;
d6e0175c
CH
830 }
831 break;
832 }
68ff9b9b
NB
833 case COMPARE_AND_WRITE:
834 sectors = cdb[13];
835 /*
836 * Currently enforce COMPARE_AND_WRITE for a single sector
837 */
838 if (sectors > 1) {
839 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
840 " than 1\n", sectors);
841 return TCM_INVALID_CDB_FIELD;
842 }
843 /*
844 * Double size because we have two buffers, note that
845 * zero is not an error..
846 */
847 size = 2 * sbc_get_size(cmd, sectors);
848 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
849 cmd->t_task_nolb = sectors;
850 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
851 cmd->execute_rw = ops->execute_rw;
852 cmd->execute_cmd = sbc_compare_and_write;
853 cmd->transport_complete_callback = compare_and_write_callback;
854 break;
d6e0175c 855 case READ_CAPACITY:
1fd032ee
CH
856 size = READ_CAP_LEN;
857 cmd->execute_cmd = sbc_emulate_readcapacity;
d6e0175c
CH
858 break;
859 case SERVICE_ACTION_IN:
860 switch (cmd->t_task_cdb[1] & 0x1f) {
861 case SAI_READ_CAPACITY_16:
1fd032ee 862 cmd->execute_cmd = sbc_emulate_readcapacity_16;
d6e0175c 863 break;
c66094bf
HR
864 case SAI_REPORT_REFERRALS:
865 cmd->execute_cmd = target_emulate_report_referrals;
866 break;
d6e0175c
CH
867 default:
868 pr_err("Unsupported SA: 0x%02x\n",
869 cmd->t_task_cdb[1] & 0x1f);
de103c93 870 return TCM_INVALID_CDB_FIELD;
d6e0175c 871 }
1fd032ee 872 size = (cdb[10] << 24) | (cdb[11] << 16) |
d6e0175c
CH
873 (cdb[12] << 8) | cdb[13];
874 break;
875 case SYNCHRONIZE_CACHE:
876 case SYNCHRONIZE_CACHE_16:
882e3f8e
HR
877 if (!ops->execute_sync_cache) {
878 size = 0;
879 cmd->execute_cmd = sbc_emulate_noop;
880 break;
881 }
ad67f0d9 882
d6e0175c
CH
883 /*
884 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
885 */
886 if (cdb[0] == SYNCHRONIZE_CACHE) {
887 sectors = transport_get_sectors_10(cdb);
888 cmd->t_task_lba = transport_lba_32(cdb);
889 } else {
890 sectors = transport_get_sectors_16(cdb);
891 cmd->t_task_lba = transport_lba_64(cdb);
892 }
893
1fd032ee 894 size = sbc_get_size(cmd, sectors);
d6e0175c
CH
895
896 /*
897 * Check to ensure that LBA + Range does not exceed past end of
898 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
899 */
900 if (cmd->t_task_lba || sectors) {
901 if (sbc_check_valid_sectors(cmd) < 0)
33633676 902 return TCM_ADDRESS_OUT_OF_RANGE;
d6e0175c 903 }
ad67f0d9 904 cmd->execute_cmd = ops->execute_sync_cache;
d6e0175c
CH
905 break;
906 case UNMAP:
14150a6b 907 if (!ops->execute_unmap)
de103c93 908 return TCM_UNSUPPORTED_SCSI_OPCODE;
14150a6b 909
1fd032ee 910 size = get_unaligned_be16(&cdb[7]);
14150a6b 911 cmd->execute_cmd = ops->execute_unmap;
d6e0175c
CH
912 break;
913 case WRITE_SAME_16:
914 sectors = transport_get_sectors_16(cdb);
915 if (!sectors) {
916 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
de103c93 917 return TCM_INVALID_CDB_FIELD;
d6e0175c
CH
918 }
919
1fd032ee 920 size = sbc_get_size(cmd, 1);
d6e0175c
CH
921 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
922
cd063bef 923 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
6b64e1fe 924 if (ret)
cd063bef 925 return ret;
d6e0175c
CH
926 break;
927 case WRITE_SAME:
928 sectors = transport_get_sectors_10(cdb);
929 if (!sectors) {
930 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
de103c93 931 return TCM_INVALID_CDB_FIELD;
d6e0175c
CH
932 }
933
1fd032ee 934 size = sbc_get_size(cmd, 1);
d6e0175c
CH
935 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
936
937 /*
938 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
939 * of byte 1 bit 3 UNMAP instead of original reserved field
940 */
cd063bef 941 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
6b64e1fe 942 if (ret)
cd063bef 943 return ret;
d6e0175c
CH
944 break;
945 case VERIFY:
1fd032ee 946 size = 0;
1920ed61 947 cmd->execute_cmd = sbc_emulate_noop;
d6e0175c 948 break;
1a1ff38c
BK
949 case REZERO_UNIT:
950 case SEEK_6:
951 case SEEK_10:
952 /*
953 * There are still clients out there which use these old SCSI-2
954 * commands. This mainly happens when running VMs with legacy
955 * guest systems, connected via SCSI command pass-through to
956 * iSCSI targets. Make them happy and return status GOOD.
957 */
958 size = 0;
959 cmd->execute_cmd = sbc_emulate_noop;
960 break;
d6e0175c 961 default:
1fd032ee 962 ret = spc_parse_cdb(cmd, &size);
d6e0175c
CH
963 if (ret)
964 return ret;
965 }
966
967 /* reject any command that we don't have a handler for */
968 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
de103c93 969 return TCM_UNSUPPORTED_SCSI_OPCODE;
d6e0175c
CH
970
971 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1fd032ee
CH
972 unsigned long long end_lba;
973
0fd97ccf 974 if (sectors > dev->dev_attrib.fabric_max_sectors) {
d6e0175c
CH
975 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
976 " big sectors %u exceeds fabric_max_sectors:"
977 " %u\n", cdb[0], sectors,
0fd97ccf 978 dev->dev_attrib.fabric_max_sectors);
de103c93 979 return TCM_INVALID_CDB_FIELD;
d6e0175c 980 }
0fd97ccf 981 if (sectors > dev->dev_attrib.hw_max_sectors) {
d6e0175c
CH
982 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
983 " big sectors %u exceeds backend hw_max_sectors:"
984 " %u\n", cdb[0], sectors,
0fd97ccf 985 dev->dev_attrib.hw_max_sectors);
de103c93 986 return TCM_INVALID_CDB_FIELD;
d6e0175c
CH
987 }
988
1fd032ee
CH
989 end_lba = dev->transport->get_blocks(dev) + 1;
990 if (cmd->t_task_lba + sectors > end_lba) {
991 pr_err("cmd exceeds last lba %llu "
992 "(lba %llu, sectors %u)\n",
993 end_lba, cmd->t_task_lba, sectors);
09ceadc7 994 return TCM_ADDRESS_OUT_OF_RANGE;
1fd032ee
CH
995 }
996
68ff9b9b
NB
997 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
998 size = sbc_get_size(cmd, sectors);
d6e0175c
CH
999 }
1000
de103c93 1001 return target_cmd_size_check(cmd, size);
d6e0175c
CH
1002}
1003EXPORT_SYMBOL(sbc_parse_cdb);
6f23ac8a 1004
6f23ac8a
CH
1005u32 sbc_get_device_type(struct se_device *dev)
1006{
1007 return TYPE_DISK;
1008}
1009EXPORT_SYMBOL(sbc_get_device_type);
86d71829
AH
1010
1011sense_reason_t
1012sbc_execute_unmap(struct se_cmd *cmd,
1013 sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
1014 sector_t, sector_t),
1015 void *priv)
1016{
1017 struct se_device *dev = cmd->se_dev;
1018 unsigned char *buf, *ptr = NULL;
1019 sector_t lba;
1020 int size;
1021 u32 range;
1022 sense_reason_t ret = 0;
1023 int dl, bd_dl;
1024
1025 /* We never set ANC_SUP */
1026 if (cmd->t_task_cdb[1])
1027 return TCM_INVALID_CDB_FIELD;
1028
1029 if (cmd->data_length == 0) {
1030 target_complete_cmd(cmd, SAM_STAT_GOOD);
1031 return 0;
1032 }
1033
1034 if (cmd->data_length < 8) {
1035 pr_warn("UNMAP parameter list length %u too small\n",
1036 cmd->data_length);
1037 return TCM_PARAMETER_LIST_LENGTH_ERROR;
1038 }
1039
1040 buf = transport_kmap_data_sg(cmd);
1041 if (!buf)
1042 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1043
1044 dl = get_unaligned_be16(&buf[0]);
1045 bd_dl = get_unaligned_be16(&buf[2]);
1046
1047 size = cmd->data_length - 8;
1048 if (bd_dl > size)
1049 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
1050 cmd->data_length, bd_dl);
1051 else
1052 size = bd_dl;
1053
1054 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
1055 ret = TCM_INVALID_PARAMETER_LIST;
1056 goto err;
1057 }
1058
1059 /* First UNMAP block descriptor starts at 8 byte offset */
1060 ptr = &buf[8];
1061 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
1062 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
1063
1064 while (size >= 16) {
1065 lba = get_unaligned_be64(&ptr[0]);
1066 range = get_unaligned_be32(&ptr[8]);
1067 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1068 (unsigned long long)lba, range);
1069
1070 if (range > dev->dev_attrib.max_unmap_lba_count) {
1071 ret = TCM_INVALID_PARAMETER_LIST;
1072 goto err;
1073 }
1074
1075 if (lba + range > dev->transport->get_blocks(dev) + 1) {
1076 ret = TCM_ADDRESS_OUT_OF_RANGE;
1077 goto err;
1078 }
1079
1080 ret = do_unmap_fn(cmd, priv, lba, range);
1081 if (ret)
1082 goto err;
1083
1084 ptr += 16;
1085 size -= 16;
1086 }
1087
1088err:
1089 transport_kunmap_data_sg(cmd);
1090 if (!ret)
1091 target_complete_cmd(cmd, GOOD);
1092 return ret;
1093}
1094EXPORT_SYMBOL(sbc_execute_unmap);
41861fa8
NB
1095
1096static sense_reason_t
1097sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
1098 const void *p, sector_t sector, unsigned int ei_lba)
1099{
1100 int block_size = dev->dev_attrib.block_size;
1101 __be16 csum;
1102
1103 csum = cpu_to_be16(crc_t10dif(p, block_size));
1104
1105 if (sdt->guard_tag != csum) {
1106 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
1107 " csum 0x%04x\n", (unsigned long long)sector,
1108 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
1109 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1110 }
1111
1112 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
1113 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1114 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
1115 " sector MSB: 0x%08x\n", (unsigned long long)sector,
1116 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
1117 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1118 }
1119
1120 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
1121 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1122 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
1123 " ei_lba: 0x%08x\n", (unsigned long long)sector,
1124 be32_to_cpu(sdt->ref_tag), ei_lba);
1125 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1126 }
1127
1128 return 0;
1129}
1130
1131static void
1132sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1133 struct scatterlist *sg, int sg_off)
1134{
1135 struct se_device *dev = cmd->se_dev;
1136 struct scatterlist *psg;
1137 void *paddr, *addr;
1138 unsigned int i, len, left;
10762e80 1139 unsigned int offset = sg_off;
41861fa8
NB
1140
1141 left = sectors * dev->prot_length;
1142
1143 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
16c0ae02 1144 unsigned int psg_len, copied = 0;
d6a65fdc 1145
41861fa8 1146 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
16c0ae02
SG
1147 psg_len = min(left, psg->length);
1148 while (psg_len) {
1149 len = min(psg_len, sg->length - offset);
1150 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
1151
1152 if (read)
1153 memcpy(paddr + copied, addr, len);
1154 else
1155 memcpy(addr, paddr + copied, len);
1156
1157 left -= len;
1158 offset += len;
1159 copied += len;
1160 psg_len -= len;
1161
1162 if (offset >= sg->length) {
1163 sg = sg_next(sg);
1164 offset = 0;
1165 }
1166 kunmap_atomic(addr);
1167 }
41861fa8 1168 kunmap_atomic(paddr);
41861fa8
NB
1169 }
1170}
1171
1172sense_reason_t
1173sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1174 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1175{
1176 struct se_device *dev = cmd->se_dev;
1177 struct se_dif_v1_tuple *sdt;
1178 struct scatterlist *dsg, *psg = cmd->t_prot_sg;
1179 sector_t sector = start;
1180 void *daddr, *paddr;
1181 int i, j, offset = 0;
1182 sense_reason_t rc;
1183
1184 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1185 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1186 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1187
1188 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1189
1190 if (offset >= psg->length) {
1191 kunmap_atomic(paddr);
1192 psg = sg_next(psg);
1193 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1194 offset = 0;
1195 }
1196
1197 sdt = paddr + offset;
1198
1199 pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
1200 " app_tag: 0x%04x ref_tag: %u\n",
1201 (unsigned long long)sector, sdt->guard_tag,
1202 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1203
1204 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1205 ei_lba);
1206 if (rc) {
1207 kunmap_atomic(paddr);
1208 kunmap_atomic(daddr);
76736db3 1209 cmd->bad_sector = sector;
41861fa8
NB
1210 return rc;
1211 }
1212
1213 sector++;
1214 ei_lba++;
1215 offset += sizeof(struct se_dif_v1_tuple);
1216 }
1217
1218 kunmap_atomic(paddr);
1219 kunmap_atomic(daddr);
1220 }
1221 sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
1222
1223 return 0;
1224}
1225EXPORT_SYMBOL(sbc_dif_verify_write);
1226
1227sense_reason_t
1228sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1229 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1230{
1231 struct se_device *dev = cmd->se_dev;
1232 struct se_dif_v1_tuple *sdt;
fc272ec7 1233 struct scatterlist *dsg, *psg = sg;
41861fa8
NB
1234 sector_t sector = start;
1235 void *daddr, *paddr;
1236 int i, j, offset = sg_off;
1237 sense_reason_t rc;
1238
1239 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1240 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
fc272ec7 1241 paddr = kmap_atomic(sg_page(psg)) + sg->offset;
41861fa8
NB
1242
1243 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1244
fc272ec7 1245 if (offset >= psg->length) {
41861fa8 1246 kunmap_atomic(paddr);
fc272ec7
SG
1247 psg = sg_next(psg);
1248 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
41861fa8
NB
1249 offset = 0;
1250 }
1251
1252 sdt = paddr + offset;
1253
1254 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
1255 " app_tag: 0x%04x ref_tag: %u\n",
1256 (unsigned long long)sector, sdt->guard_tag,
1257 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1258
1259 if (sdt->app_tag == cpu_to_be16(0xffff)) {
1260 sector++;
1261 offset += sizeof(struct se_dif_v1_tuple);
1262 continue;
1263 }
1264
1265 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1266 ei_lba);
1267 if (rc) {
1268 kunmap_atomic(paddr);
1269 kunmap_atomic(daddr);
76736db3 1270 cmd->bad_sector = sector;
41861fa8
NB
1271 return rc;
1272 }
1273
1274 sector++;
1275 ei_lba++;
1276 offset += sizeof(struct se_dif_v1_tuple);
1277 }
1278
1279 kunmap_atomic(paddr);
1280 kunmap_atomic(daddr);
1281 }
1282 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
1283
1284 return 0;
1285}
1286EXPORT_SYMBOL(sbc_dif_verify_read);
This page took 0.146069 seconds and 5 git commands to generate.