target/spc: Add protection related bits to INQUIRY EVPD=0x86
[deliverable/linux.git] / drivers / target / target_core_sbc.c
CommitLineData
d6e0175c
CH
1/*
2 * SCSI Block Commands (SBC) parsing and emulation.
3 *
4c76251e 4 * (c) Copyright 2002-2013 Datera, Inc.
d6e0175c
CH
5 *
6 * Nicholas A. Bellinger <nab@kernel.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/ratelimit.h>
41861fa8 26#include <linux/crc-t10dif.h>
d6e0175c
CH
27#include <asm/unaligned.h>
28#include <scsi/scsi.h>
68ff9b9b 29#include <scsi/scsi_tcq.h>
d6e0175c
CH
30
31#include <target/target_core_base.h>
32#include <target/target_core_backend.h>
33#include <target/target_core_fabric.h>
34
35#include "target_core_internal.h"
36#include "target_core_ua.h"
c66094bf 37#include "target_core_alua.h"
d6e0175c 38
de103c93
CH
39static sense_reason_t
40sbc_emulate_readcapacity(struct se_cmd *cmd)
1fd032ee
CH
41{
42 struct se_device *dev = cmd->se_dev;
8dc8632a 43 unsigned char *cdb = cmd->t_task_cdb;
1fd032ee 44 unsigned long long blocks_long = dev->transport->get_blocks(dev);
a50da144
PB
45 unsigned char *rbuf;
46 unsigned char buf[8];
1fd032ee
CH
47 u32 blocks;
48
8dc8632a
RD
49 /*
50 * SBC-2 says:
51 * If the PMI bit is set to zero and the LOGICAL BLOCK
52 * ADDRESS field is not set to zero, the device server shall
53 * terminate the command with CHECK CONDITION status with
54 * the sense key set to ILLEGAL REQUEST and the additional
55 * sense code set to INVALID FIELD IN CDB.
56 *
57 * In SBC-3, these fields are obsolete, but some SCSI
58 * compliance tests actually check this, so we might as well
59 * follow SBC-2.
60 */
61 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
62 return TCM_INVALID_CDB_FIELD;
63
1fd032ee
CH
64 if (blocks_long >= 0x00000000ffffffff)
65 blocks = 0xffffffff;
66 else
67 blocks = (u32)blocks_long;
68
1fd032ee
CH
69 buf[0] = (blocks >> 24) & 0xff;
70 buf[1] = (blocks >> 16) & 0xff;
71 buf[2] = (blocks >> 8) & 0xff;
72 buf[3] = blocks & 0xff;
0fd97ccf
CH
73 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
74 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
75 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
76 buf[7] = dev->dev_attrib.block_size & 0xff;
1fd032ee 77
a50da144 78 rbuf = transport_kmap_data_sg(cmd);
8b4b0dcb
NB
79 if (rbuf) {
80 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
81 transport_kunmap_data_sg(cmd);
82 }
1fd032ee
CH
83
84 target_complete_cmd(cmd, GOOD);
85 return 0;
86}
87
de103c93
CH
88static sense_reason_t
89sbc_emulate_readcapacity_16(struct se_cmd *cmd)
1fd032ee
CH
90{
91 struct se_device *dev = cmd->se_dev;
a50da144
PB
92 unsigned char *rbuf;
93 unsigned char buf[32];
1fd032ee
CH
94 unsigned long long blocks = dev->transport->get_blocks(dev);
95
a50da144 96 memset(buf, 0, sizeof(buf));
1fd032ee
CH
97 buf[0] = (blocks >> 56) & 0xff;
98 buf[1] = (blocks >> 48) & 0xff;
99 buf[2] = (blocks >> 40) & 0xff;
100 buf[3] = (blocks >> 32) & 0xff;
101 buf[4] = (blocks >> 24) & 0xff;
102 buf[5] = (blocks >> 16) & 0xff;
103 buf[6] = (blocks >> 8) & 0xff;
104 buf[7] = blocks & 0xff;
0fd97ccf
CH
105 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
106 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
107 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
108 buf[11] = dev->dev_attrib.block_size & 0xff;
7f7caf6a
AG
109
110 if (dev->transport->get_lbppbe)
111 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
112
113 if (dev->transport->get_alignment_offset_lbas) {
114 u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
115 buf[14] = (lalba >> 8) & 0x3f;
116 buf[15] = lalba & 0xff;
117 }
118
1fd032ee
CH
119 /*
120 * Set Thin Provisioning Enable bit following sbc3r22 in section
121 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
122 */
0fd97ccf 123 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
7f7caf6a 124 buf[14] |= 0x80;
1fd032ee 125
a50da144 126 rbuf = transport_kmap_data_sg(cmd);
8b4b0dcb
NB
127 if (rbuf) {
128 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
129 transport_kunmap_data_sg(cmd);
130 }
1fd032ee
CH
131
132 target_complete_cmd(cmd, GOOD);
133 return 0;
134}
135
972b29c8 136sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
1fd032ee 137{
1fd032ee 138 u32 num_blocks;
1fd032ee
CH
139
140 if (cmd->t_task_cdb[0] == WRITE_SAME)
141 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
142 else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
143 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
144 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
145 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
146
147 /*
148 * Use the explicit range when non zero is supplied, otherwise calculate
149 * the remaining range based on ->get_blocks() - starting LBA.
150 */
6f974e8c
CH
151 if (num_blocks)
152 return num_blocks;
1fd032ee 153
6f974e8c
CH
154 return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
155 cmd->t_task_lba + 1;
1fd032ee 156}
972b29c8 157EXPORT_SYMBOL(sbc_get_write_same_sectors);
1fd032ee 158
de103c93 159static sense_reason_t
1920ed61 160sbc_emulate_noop(struct se_cmd *cmd)
1a1ff38c
BK
161{
162 target_complete_cmd(cmd, GOOD);
163 return 0;
164}
165
d6e0175c
CH
166static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
167{
0fd97ccf 168 return cmd->se_dev->dev_attrib.block_size * sectors;
d6e0175c
CH
169}
170
171static int sbc_check_valid_sectors(struct se_cmd *cmd)
172{
173 struct se_device *dev = cmd->se_dev;
174 unsigned long long end_lba;
175 u32 sectors;
176
0fd97ccf 177 sectors = cmd->data_length / dev->dev_attrib.block_size;
d6e0175c
CH
178 end_lba = dev->transport->get_blocks(dev) + 1;
179
180 if (cmd->t_task_lba + sectors > end_lba) {
181 pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
182 cmd->t_task_lba, sectors, end_lba);
183 return -EINVAL;
184 }
185
186 return 0;
187}
188
189static inline u32 transport_get_sectors_6(unsigned char *cdb)
190{
191 /*
192 * Use 8-bit sector value. SBC-3 says:
193 *
194 * A TRANSFER LENGTH field set to zero specifies that 256
195 * logical blocks shall be written. Any other value
196 * specifies the number of logical blocks that shall be
197 * written.
198 */
199 return cdb[4] ? : 256;
200}
201
202static inline u32 transport_get_sectors_10(unsigned char *cdb)
203{
204 return (u32)(cdb[7] << 8) + cdb[8];
205}
206
207static inline u32 transport_get_sectors_12(unsigned char *cdb)
208{
209 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
210}
211
212static inline u32 transport_get_sectors_16(unsigned char *cdb)
213{
214 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
215 (cdb[12] << 8) + cdb[13];
216}
217
218/*
219 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
220 */
221static inline u32 transport_get_sectors_32(unsigned char *cdb)
222{
223 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
224 (cdb[30] << 8) + cdb[31];
225
226}
227
228static inline u32 transport_lba_21(unsigned char *cdb)
229{
230 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
231}
232
233static inline u32 transport_lba_32(unsigned char *cdb)
234{
235 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
236}
237
238static inline unsigned long long transport_lba_64(unsigned char *cdb)
239{
240 unsigned int __v1, __v2;
241
242 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
243 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
244
245 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
246}
247
248/*
249 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
250 */
251static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
252{
253 unsigned int __v1, __v2;
254
255 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
256 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
257
258 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
259}
260
cd063bef
NB
261static sense_reason_t
262sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
d6e0175c 263{
972b29c8 264 unsigned int sectors = sbc_get_write_same_sectors(cmd);
773cbaf7 265
d6e0175c
CH
266 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
267 pr_err("WRITE_SAME PBDATA and LBDATA"
268 " bits not supported for Block Discard"
269 " Emulation\n");
cd063bef 270 return TCM_UNSUPPORTED_SCSI_OPCODE;
d6e0175c 271 }
773cbaf7
NB
272 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
273 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
274 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
275 return TCM_INVALID_CDB_FIELD;
276 }
5cb770bf
RD
277 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
278 if (flags[0] & 0x10) {
279 pr_warn("WRITE SAME with ANCHOR not supported\n");
280 return TCM_INVALID_CDB_FIELD;
281 }
d6e0175c 282 /*
cd063bef
NB
283 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
284 * translated into block discard requests within backend code.
d6e0175c 285 */
cd063bef
NB
286 if (flags[0] & 0x08) {
287 if (!ops->execute_write_same_unmap)
288 return TCM_UNSUPPORTED_SCSI_OPCODE;
289
290 cmd->execute_cmd = ops->execute_write_same_unmap;
291 return 0;
d6e0175c 292 }
cd063bef
NB
293 if (!ops->execute_write_same)
294 return TCM_UNSUPPORTED_SCSI_OPCODE;
d6e0175c 295
cd063bef 296 cmd->execute_cmd = ops->execute_write_same;
d6e0175c
CH
297 return 0;
298}
299
a6b0133c 300static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
d6e0175c
CH
301{
302 unsigned char *buf, *addr;
303 struct scatterlist *sg;
304 unsigned int offset;
a6b0133c
NB
305 sense_reason_t ret = TCM_NO_SENSE;
306 int i, count;
d6e0175c
CH
307 /*
308 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
309 *
310 * 1) read the specified logical block(s);
311 * 2) transfer logical blocks from the data-out buffer;
312 * 3) XOR the logical blocks transferred from the data-out buffer with
313 * the logical blocks read, storing the resulting XOR data in a buffer;
314 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
315 * blocks transferred from the data-out buffer; and
316 * 5) transfer the resulting XOR data to the data-in buffer.
317 */
318 buf = kmalloc(cmd->data_length, GFP_KERNEL);
319 if (!buf) {
320 pr_err("Unable to allocate xor_callback buf\n");
a6b0133c 321 return TCM_OUT_OF_RESOURCES;
d6e0175c
CH
322 }
323 /*
324 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
325 * into the locally allocated *buf
326 */
327 sg_copy_to_buffer(cmd->t_data_sg,
328 cmd->t_data_nents,
329 buf,
330 cmd->data_length);
331
332 /*
333 * Now perform the XOR against the BIDI read memory located at
334 * cmd->t_mem_bidi_list
335 */
336
337 offset = 0;
338 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
339 addr = kmap_atomic(sg_page(sg));
a6b0133c
NB
340 if (!addr) {
341 ret = TCM_OUT_OF_RESOURCES;
d6e0175c 342 goto out;
a6b0133c 343 }
d6e0175c
CH
344
345 for (i = 0; i < sg->length; i++)
346 *(addr + sg->offset + i) ^= *(buf + offset + i);
347
348 offset += sg->length;
349 kunmap_atomic(addr);
350 }
351
352out:
353 kfree(buf);
a6b0133c 354 return ret;
d6e0175c
CH
355}
356
a82a9538
NB
357static sense_reason_t
358sbc_execute_rw(struct se_cmd *cmd)
359{
360 return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
361 cmd->data_direction);
362}
363
68ff9b9b
NB
364static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
365{
366 struct se_device *dev = cmd->se_dev;
367
d8855c15
NB
368 /*
369 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
370 * within target_complete_ok_work() if the command was successfully
371 * sent to the backend driver.
372 */
373 spin_lock_irq(&cmd->t_state_lock);
374 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
375 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
376 spin_unlock_irq(&cmd->t_state_lock);
377
68ff9b9b
NB
378 /*
379 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
380 * before the original READ I/O submission.
381 */
382 up(&dev->caw_sem);
383
384 return TCM_NO_SENSE;
385}
386
387static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
388{
389 struct se_device *dev = cmd->se_dev;
390 struct scatterlist *write_sg = NULL, *sg;
db60df88 391 unsigned char *buf = NULL, *addr;
68ff9b9b
NB
392 struct sg_mapping_iter m;
393 unsigned int offset = 0, len;
394 unsigned int nlbas = cmd->t_task_nolb;
395 unsigned int block_size = dev->dev_attrib.block_size;
396 unsigned int compare_len = (nlbas * block_size);
397 sense_reason_t ret = TCM_NO_SENSE;
398 int rc, i;
399
cf6d1f09
NB
400 /*
401 * Handle early failure in transport_generic_request_failure(),
402 * which will not have taken ->caw_mutex yet..
403 */
404 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
405 return TCM_NO_SENSE;
db60df88
NB
406 /*
407 * Immediately exit + release dev->caw_sem if command has already
408 * been failed with a non-zero SCSI status.
409 */
410 if (cmd->scsi_status) {
411 pr_err("compare_and_write_callback: non zero scsi_status:"
412 " 0x%02x\n", cmd->scsi_status);
413 goto out;
414 }
cf6d1f09 415
68ff9b9b
NB
416 buf = kzalloc(cmd->data_length, GFP_KERNEL);
417 if (!buf) {
418 pr_err("Unable to allocate compare_and_write buf\n");
a2890087
NB
419 ret = TCM_OUT_OF_RESOURCES;
420 goto out;
68ff9b9b
NB
421 }
422
423 write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
424 GFP_KERNEL);
425 if (!write_sg) {
426 pr_err("Unable to allocate compare_and_write sg\n");
427 ret = TCM_OUT_OF_RESOURCES;
428 goto out;
429 }
430 /*
431 * Setup verify and write data payloads from total NumberLBAs.
432 */
433 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
434 cmd->data_length);
435 if (!rc) {
436 pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
437 ret = TCM_OUT_OF_RESOURCES;
438 goto out;
439 }
440 /*
441 * Compare against SCSI READ payload against verify payload
442 */
443 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
444 addr = (unsigned char *)kmap_atomic(sg_page(sg));
445 if (!addr) {
446 ret = TCM_OUT_OF_RESOURCES;
447 goto out;
448 }
449
450 len = min(sg->length, compare_len);
451
452 if (memcmp(addr, buf + offset, len)) {
453 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
454 addr, buf + offset);
455 kunmap_atomic(addr);
456 goto miscompare;
457 }
458 kunmap_atomic(addr);
459
460 offset += len;
461 compare_len -= len;
462 if (!compare_len)
463 break;
464 }
465
466 i = 0;
467 len = cmd->t_task_nolb * block_size;
468 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
469 /*
470 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
471 */
472 while (len) {
473 sg_miter_next(&m);
474
475 if (block_size < PAGE_SIZE) {
476 sg_set_page(&write_sg[i], m.page, block_size,
477 block_size);
478 } else {
479 sg_miter_next(&m);
480 sg_set_page(&write_sg[i], m.page, block_size,
481 0);
482 }
483 len -= block_size;
484 i++;
485 }
486 sg_miter_stop(&m);
487 /*
488 * Save the original SGL + nents values before updating to new
489 * assignments, to be released in transport_free_pages() ->
490 * transport_reset_sgl_orig()
491 */
492 cmd->t_data_sg_orig = cmd->t_data_sg;
493 cmd->t_data_sg = write_sg;
494 cmd->t_data_nents_orig = cmd->t_data_nents;
495 cmd->t_data_nents = 1;
496
497 cmd->sam_task_attr = MSG_HEAD_TAG;
498 cmd->transport_complete_callback = compare_and_write_post;
499 /*
500 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
501 * for submitting the adjusted SGL to write instance user-data.
502 */
503 cmd->execute_cmd = sbc_execute_rw;
504
505 spin_lock_irq(&cmd->t_state_lock);
506 cmd->t_state = TRANSPORT_PROCESSING;
507 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
508 spin_unlock_irq(&cmd->t_state_lock);
509
510 __target_execute_cmd(cmd);
511
512 kfree(buf);
513 return ret;
514
515miscompare:
516 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
517 dev->transport->name);
518 ret = TCM_MISCOMPARE_VERIFY;
519out:
520 /*
521 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
522 * sbc_compare_and_write() before the original READ I/O submission.
523 */
524 up(&dev->caw_sem);
525 kfree(write_sg);
526 kfree(buf);
527 return ret;
528}
529
530static sense_reason_t
531sbc_compare_and_write(struct se_cmd *cmd)
532{
533 struct se_device *dev = cmd->se_dev;
534 sense_reason_t ret;
535 int rc;
536 /*
537 * Submit the READ first for COMPARE_AND_WRITE to perform the
538 * comparision using SGLs at cmd->t_bidi_data_sg..
539 */
540 rc = down_interruptible(&dev->caw_sem);
541 if ((rc != 0) || signal_pending(current)) {
542 cmd->transport_complete_callback = NULL;
543 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
544 }
b7191253
NB
545 /*
546 * Reset cmd->data_length to individual block_size in order to not
547 * confuse backend drivers that depend on this value matching the
548 * size of the I/O being submitted.
549 */
550 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
68ff9b9b
NB
551
552 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
553 DMA_FROM_DEVICE);
554 if (ret) {
555 cmd->transport_complete_callback = NULL;
556 up(&dev->caw_sem);
557 return ret;
558 }
559 /*
560 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
561 * upon MISCOMPARE, or in compare_and_write_done() upon completion
562 * of WRITE instance user-data.
563 */
564 return TCM_NO_SENSE;
565}
566
499bf77b
NB
567static bool
568sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
569 u32 sectors)
570{
571 if (!cmd->t_prot_sg || !cmd->t_prot_nents)
572 return true;
573
574 switch (dev->dev_attrib.pi_prot_type) {
575 case TARGET_DIF_TYPE3_PROT:
576 if (!(cdb[1] & 0xe0))
577 return true;
578
579 cmd->reftag_seed = 0xffffffff;
580 break;
581 case TARGET_DIF_TYPE2_PROT:
582 if (cdb[1] & 0xe0)
583 return false;
584
585 cmd->reftag_seed = cmd->t_task_lba;
586 break;
587 case TARGET_DIF_TYPE1_PROT:
588 if (!(cdb[1] & 0xe0))
589 return true;
590
591 cmd->reftag_seed = cmd->t_task_lba;
592 break;
593 case TARGET_DIF_TYPE0_PROT:
594 default:
595 return true;
596 }
597
598 cmd->prot_type = dev->dev_attrib.pi_prot_type;
599 cmd->prot_length = dev->prot_length * sectors;
600 cmd->prot_handover = PROT_SEPERATED;
601
602 return true;
603}
604
de103c93
CH
605sense_reason_t
606sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
d6e0175c 607{
d6e0175c
CH
608 struct se_device *dev = cmd->se_dev;
609 unsigned char *cdb = cmd->t_task_cdb;
1fd032ee 610 unsigned int size;
d6e0175c 611 u32 sectors = 0;
de103c93 612 sense_reason_t ret;
d6e0175c
CH
613
614 switch (cdb[0]) {
615 case READ_6:
616 sectors = transport_get_sectors_6(cdb);
617 cmd->t_task_lba = transport_lba_21(cdb);
618 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
619 cmd->execute_rw = ops->execute_rw;
620 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
621 break;
622 case READ_10:
623 sectors = transport_get_sectors_10(cdb);
624 cmd->t_task_lba = transport_lba_32(cdb);
499bf77b
NB
625
626 if (!sbc_check_prot(dev, cmd, cdb, sectors))
627 return TCM_UNSUPPORTED_SCSI_OPCODE;
628
d6e0175c 629 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
630 cmd->execute_rw = ops->execute_rw;
631 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
632 break;
633 case READ_12:
634 sectors = transport_get_sectors_12(cdb);
635 cmd->t_task_lba = transport_lba_32(cdb);
499bf77b
NB
636
637 if (!sbc_check_prot(dev, cmd, cdb, sectors))
638 return TCM_UNSUPPORTED_SCSI_OPCODE;
639
d6e0175c 640 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
641 cmd->execute_rw = ops->execute_rw;
642 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
643 break;
644 case READ_16:
645 sectors = transport_get_sectors_16(cdb);
646 cmd->t_task_lba = transport_lba_64(cdb);
499bf77b
NB
647
648 if (!sbc_check_prot(dev, cmd, cdb, sectors))
649 return TCM_UNSUPPORTED_SCSI_OPCODE;
650
d6e0175c 651 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
652 cmd->execute_rw = ops->execute_rw;
653 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
654 break;
655 case WRITE_6:
656 sectors = transport_get_sectors_6(cdb);
657 cmd->t_task_lba = transport_lba_21(cdb);
658 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
659 cmd->execute_rw = ops->execute_rw;
660 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
661 break;
662 case WRITE_10:
663 case WRITE_VERIFY:
664 sectors = transport_get_sectors_10(cdb);
665 cmd->t_task_lba = transport_lba_32(cdb);
499bf77b
NB
666
667 if (!sbc_check_prot(dev, cmd, cdb, sectors))
668 return TCM_UNSUPPORTED_SCSI_OPCODE;
669
d6e0175c
CH
670 if (cdb[1] & 0x8)
671 cmd->se_cmd_flags |= SCF_FUA;
672 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
673 cmd->execute_rw = ops->execute_rw;
674 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
675 break;
676 case WRITE_12:
677 sectors = transport_get_sectors_12(cdb);
678 cmd->t_task_lba = transport_lba_32(cdb);
499bf77b
NB
679
680 if (!sbc_check_prot(dev, cmd, cdb, sectors))
681 return TCM_UNSUPPORTED_SCSI_OPCODE;
682
d6e0175c
CH
683 if (cdb[1] & 0x8)
684 cmd->se_cmd_flags |= SCF_FUA;
685 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
686 cmd->execute_rw = ops->execute_rw;
687 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
688 break;
689 case WRITE_16:
690 sectors = transport_get_sectors_16(cdb);
691 cmd->t_task_lba = transport_lba_64(cdb);
499bf77b
NB
692
693 if (!sbc_check_prot(dev, cmd, cdb, sectors))
694 return TCM_UNSUPPORTED_SCSI_OPCODE;
695
d6e0175c
CH
696 if (cdb[1] & 0x8)
697 cmd->se_cmd_flags |= SCF_FUA;
698 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
a82a9538
NB
699 cmd->execute_rw = ops->execute_rw;
700 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
701 break;
702 case XDWRITEREAD_10:
de103c93 703 if (cmd->data_direction != DMA_TO_DEVICE ||
d6e0175c 704 !(cmd->se_cmd_flags & SCF_BIDI))
de103c93 705 return TCM_INVALID_CDB_FIELD;
d6e0175c
CH
706 sectors = transport_get_sectors_10(cdb);
707
708 cmd->t_task_lba = transport_lba_32(cdb);
709 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
710
711 /*
712 * Setup BIDI XOR callback to be run after I/O completion.
713 */
a82a9538
NB
714 cmd->execute_rw = ops->execute_rw;
715 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
716 cmd->transport_complete_callback = &xdreadwrite_callback;
717 if (cdb[1] & 0x8)
718 cmd->se_cmd_flags |= SCF_FUA;
719 break;
720 case VARIABLE_LENGTH_CMD:
721 {
722 u16 service_action = get_unaligned_be16(&cdb[8]);
723 switch (service_action) {
724 case XDWRITEREAD_32:
725 sectors = transport_get_sectors_32(cdb);
726
727 /*
728 * Use WRITE_32 and READ_32 opcodes for the emulated
729 * XDWRITE_READ_32 logic.
730 */
731 cmd->t_task_lba = transport_lba_64_ext(cdb);
732 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
733
734 /*
735 * Setup BIDI XOR callback to be run during after I/O
736 * completion.
737 */
a82a9538
NB
738 cmd->execute_rw = ops->execute_rw;
739 cmd->execute_cmd = sbc_execute_rw;
d6e0175c
CH
740 cmd->transport_complete_callback = &xdreadwrite_callback;
741 if (cdb[1] & 0x8)
742 cmd->se_cmd_flags |= SCF_FUA;
743 break;
744 case WRITE_SAME_32:
745 sectors = transport_get_sectors_32(cdb);
746 if (!sectors) {
747 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
748 " supported\n");
de103c93 749 return TCM_INVALID_CDB_FIELD;
d6e0175c
CH
750 }
751
1fd032ee 752 size = sbc_get_size(cmd, 1);
d6e0175c
CH
753 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
754
cd063bef 755 ret = sbc_setup_write_same(cmd, &cdb[10], ops);
6b64e1fe 756 if (ret)
cd063bef 757 return ret;
d6e0175c
CH
758 break;
759 default:
760 pr_err("VARIABLE_LENGTH_CMD service action"
761 " 0x%04x not supported\n", service_action);
de103c93 762 return TCM_UNSUPPORTED_SCSI_OPCODE;
d6e0175c
CH
763 }
764 break;
765 }
68ff9b9b
NB
766 case COMPARE_AND_WRITE:
767 sectors = cdb[13];
768 /*
769 * Currently enforce COMPARE_AND_WRITE for a single sector
770 */
771 if (sectors > 1) {
772 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
773 " than 1\n", sectors);
774 return TCM_INVALID_CDB_FIELD;
775 }
776 /*
777 * Double size because we have two buffers, note that
778 * zero is not an error..
779 */
780 size = 2 * sbc_get_size(cmd, sectors);
781 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
782 cmd->t_task_nolb = sectors;
783 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
784 cmd->execute_rw = ops->execute_rw;
785 cmd->execute_cmd = sbc_compare_and_write;
786 cmd->transport_complete_callback = compare_and_write_callback;
787 break;
d6e0175c 788 case READ_CAPACITY:
1fd032ee
CH
789 size = READ_CAP_LEN;
790 cmd->execute_cmd = sbc_emulate_readcapacity;
d6e0175c
CH
791 break;
792 case SERVICE_ACTION_IN:
793 switch (cmd->t_task_cdb[1] & 0x1f) {
794 case SAI_READ_CAPACITY_16:
1fd032ee 795 cmd->execute_cmd = sbc_emulate_readcapacity_16;
d6e0175c 796 break;
c66094bf
HR
797 case SAI_REPORT_REFERRALS:
798 cmd->execute_cmd = target_emulate_report_referrals;
799 break;
d6e0175c
CH
800 default:
801 pr_err("Unsupported SA: 0x%02x\n",
802 cmd->t_task_cdb[1] & 0x1f);
de103c93 803 return TCM_INVALID_CDB_FIELD;
d6e0175c 804 }
1fd032ee 805 size = (cdb[10] << 24) | (cdb[11] << 16) |
d6e0175c
CH
806 (cdb[12] << 8) | cdb[13];
807 break;
808 case SYNCHRONIZE_CACHE:
809 case SYNCHRONIZE_CACHE_16:
882e3f8e
HR
810 if (!ops->execute_sync_cache) {
811 size = 0;
812 cmd->execute_cmd = sbc_emulate_noop;
813 break;
814 }
ad67f0d9 815
d6e0175c
CH
816 /*
817 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
818 */
819 if (cdb[0] == SYNCHRONIZE_CACHE) {
820 sectors = transport_get_sectors_10(cdb);
821 cmd->t_task_lba = transport_lba_32(cdb);
822 } else {
823 sectors = transport_get_sectors_16(cdb);
824 cmd->t_task_lba = transport_lba_64(cdb);
825 }
826
1fd032ee 827 size = sbc_get_size(cmd, sectors);
d6e0175c
CH
828
829 /*
830 * Check to ensure that LBA + Range does not exceed past end of
831 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
832 */
833 if (cmd->t_task_lba || sectors) {
834 if (sbc_check_valid_sectors(cmd) < 0)
33633676 835 return TCM_ADDRESS_OUT_OF_RANGE;
d6e0175c 836 }
ad67f0d9 837 cmd->execute_cmd = ops->execute_sync_cache;
d6e0175c
CH
838 break;
839 case UNMAP:
14150a6b 840 if (!ops->execute_unmap)
de103c93 841 return TCM_UNSUPPORTED_SCSI_OPCODE;
14150a6b 842
1fd032ee 843 size = get_unaligned_be16(&cdb[7]);
14150a6b 844 cmd->execute_cmd = ops->execute_unmap;
d6e0175c
CH
845 break;
846 case WRITE_SAME_16:
847 sectors = transport_get_sectors_16(cdb);
848 if (!sectors) {
849 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
de103c93 850 return TCM_INVALID_CDB_FIELD;
d6e0175c
CH
851 }
852
1fd032ee 853 size = sbc_get_size(cmd, 1);
d6e0175c
CH
854 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
855
cd063bef 856 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
6b64e1fe 857 if (ret)
cd063bef 858 return ret;
d6e0175c
CH
859 break;
860 case WRITE_SAME:
861 sectors = transport_get_sectors_10(cdb);
862 if (!sectors) {
863 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
de103c93 864 return TCM_INVALID_CDB_FIELD;
d6e0175c
CH
865 }
866
1fd032ee 867 size = sbc_get_size(cmd, 1);
d6e0175c
CH
868 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
869
870 /*
871 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
872 * of byte 1 bit 3 UNMAP instead of original reserved field
873 */
cd063bef 874 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
6b64e1fe 875 if (ret)
cd063bef 876 return ret;
d6e0175c
CH
877 break;
878 case VERIFY:
1fd032ee 879 size = 0;
1920ed61 880 cmd->execute_cmd = sbc_emulate_noop;
d6e0175c 881 break;
1a1ff38c
BK
882 case REZERO_UNIT:
883 case SEEK_6:
884 case SEEK_10:
885 /*
886 * There are still clients out there which use these old SCSI-2
887 * commands. This mainly happens when running VMs with legacy
888 * guest systems, connected via SCSI command pass-through to
889 * iSCSI targets. Make them happy and return status GOOD.
890 */
891 size = 0;
892 cmd->execute_cmd = sbc_emulate_noop;
893 break;
d6e0175c 894 default:
1fd032ee 895 ret = spc_parse_cdb(cmd, &size);
d6e0175c
CH
896 if (ret)
897 return ret;
898 }
899
900 /* reject any command that we don't have a handler for */
901 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
de103c93 902 return TCM_UNSUPPORTED_SCSI_OPCODE;
d6e0175c
CH
903
904 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1fd032ee
CH
905 unsigned long long end_lba;
906
0fd97ccf 907 if (sectors > dev->dev_attrib.fabric_max_sectors) {
d6e0175c
CH
908 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
909 " big sectors %u exceeds fabric_max_sectors:"
910 " %u\n", cdb[0], sectors,
0fd97ccf 911 dev->dev_attrib.fabric_max_sectors);
de103c93 912 return TCM_INVALID_CDB_FIELD;
d6e0175c 913 }
0fd97ccf 914 if (sectors > dev->dev_attrib.hw_max_sectors) {
d6e0175c
CH
915 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
916 " big sectors %u exceeds backend hw_max_sectors:"
917 " %u\n", cdb[0], sectors,
0fd97ccf 918 dev->dev_attrib.hw_max_sectors);
de103c93 919 return TCM_INVALID_CDB_FIELD;
d6e0175c
CH
920 }
921
1fd032ee
CH
922 end_lba = dev->transport->get_blocks(dev) + 1;
923 if (cmd->t_task_lba + sectors > end_lba) {
924 pr_err("cmd exceeds last lba %llu "
925 "(lba %llu, sectors %u)\n",
926 end_lba, cmd->t_task_lba, sectors);
09ceadc7 927 return TCM_ADDRESS_OUT_OF_RANGE;
1fd032ee
CH
928 }
929
68ff9b9b
NB
930 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
931 size = sbc_get_size(cmd, sectors);
d6e0175c
CH
932 }
933
de103c93 934 return target_cmd_size_check(cmd, size);
d6e0175c
CH
935}
936EXPORT_SYMBOL(sbc_parse_cdb);
6f23ac8a 937
6f23ac8a
CH
938u32 sbc_get_device_type(struct se_device *dev)
939{
940 return TYPE_DISK;
941}
942EXPORT_SYMBOL(sbc_get_device_type);
86d71829
AH
943
944sense_reason_t
945sbc_execute_unmap(struct se_cmd *cmd,
946 sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
947 sector_t, sector_t),
948 void *priv)
949{
950 struct se_device *dev = cmd->se_dev;
951 unsigned char *buf, *ptr = NULL;
952 sector_t lba;
953 int size;
954 u32 range;
955 sense_reason_t ret = 0;
956 int dl, bd_dl;
957
958 /* We never set ANC_SUP */
959 if (cmd->t_task_cdb[1])
960 return TCM_INVALID_CDB_FIELD;
961
962 if (cmd->data_length == 0) {
963 target_complete_cmd(cmd, SAM_STAT_GOOD);
964 return 0;
965 }
966
967 if (cmd->data_length < 8) {
968 pr_warn("UNMAP parameter list length %u too small\n",
969 cmd->data_length);
970 return TCM_PARAMETER_LIST_LENGTH_ERROR;
971 }
972
973 buf = transport_kmap_data_sg(cmd);
974 if (!buf)
975 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
976
977 dl = get_unaligned_be16(&buf[0]);
978 bd_dl = get_unaligned_be16(&buf[2]);
979
980 size = cmd->data_length - 8;
981 if (bd_dl > size)
982 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
983 cmd->data_length, bd_dl);
984 else
985 size = bd_dl;
986
987 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
988 ret = TCM_INVALID_PARAMETER_LIST;
989 goto err;
990 }
991
992 /* First UNMAP block descriptor starts at 8 byte offset */
993 ptr = &buf[8];
994 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
995 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
996
997 while (size >= 16) {
998 lba = get_unaligned_be64(&ptr[0]);
999 range = get_unaligned_be32(&ptr[8]);
1000 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1001 (unsigned long long)lba, range);
1002
1003 if (range > dev->dev_attrib.max_unmap_lba_count) {
1004 ret = TCM_INVALID_PARAMETER_LIST;
1005 goto err;
1006 }
1007
1008 if (lba + range > dev->transport->get_blocks(dev) + 1) {
1009 ret = TCM_ADDRESS_OUT_OF_RANGE;
1010 goto err;
1011 }
1012
1013 ret = do_unmap_fn(cmd, priv, lba, range);
1014 if (ret)
1015 goto err;
1016
1017 ptr += 16;
1018 size -= 16;
1019 }
1020
1021err:
1022 transport_kunmap_data_sg(cmd);
1023 if (!ret)
1024 target_complete_cmd(cmd, GOOD);
1025 return ret;
1026}
1027EXPORT_SYMBOL(sbc_execute_unmap);
41861fa8
NB
1028
1029static sense_reason_t
1030sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
1031 const void *p, sector_t sector, unsigned int ei_lba)
1032{
1033 int block_size = dev->dev_attrib.block_size;
1034 __be16 csum;
1035
1036 csum = cpu_to_be16(crc_t10dif(p, block_size));
1037
1038 if (sdt->guard_tag != csum) {
1039 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
1040 " csum 0x%04x\n", (unsigned long long)sector,
1041 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
1042 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1043 }
1044
1045 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
1046 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1047 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
1048 " sector MSB: 0x%08x\n", (unsigned long long)sector,
1049 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
1050 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1051 }
1052
1053 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
1054 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1055 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
1056 " ei_lba: 0x%08x\n", (unsigned long long)sector,
1057 be32_to_cpu(sdt->ref_tag), ei_lba);
1058 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1059 }
1060
1061 return 0;
1062}
1063
1064static void
1065sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1066 struct scatterlist *sg, int sg_off)
1067{
1068 struct se_device *dev = cmd->se_dev;
1069 struct scatterlist *psg;
1070 void *paddr, *addr;
1071 unsigned int i, len, left;
1072
1073 left = sectors * dev->prot_length;
1074
1075 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1076
1077 len = min(psg->length, left);
1078 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1079 addr = kmap_atomic(sg_page(sg)) + sg_off;
1080
1081 if (read)
1082 memcpy(paddr, addr, len);
1083 else
1084 memcpy(addr, paddr, len);
1085
1086 left -= len;
1087 kunmap_atomic(paddr);
1088 kunmap_atomic(addr);
1089 }
1090}
1091
1092sense_reason_t
1093sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1094 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1095{
1096 struct se_device *dev = cmd->se_dev;
1097 struct se_dif_v1_tuple *sdt;
1098 struct scatterlist *dsg, *psg = cmd->t_prot_sg;
1099 sector_t sector = start;
1100 void *daddr, *paddr;
1101 int i, j, offset = 0;
1102 sense_reason_t rc;
1103
1104 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1105 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1106 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1107
1108 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1109
1110 if (offset >= psg->length) {
1111 kunmap_atomic(paddr);
1112 psg = sg_next(psg);
1113 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1114 offset = 0;
1115 }
1116
1117 sdt = paddr + offset;
1118
1119 pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
1120 " app_tag: 0x%04x ref_tag: %u\n",
1121 (unsigned long long)sector, sdt->guard_tag,
1122 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1123
1124 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1125 ei_lba);
1126 if (rc) {
1127 kunmap_atomic(paddr);
1128 kunmap_atomic(daddr);
1129 return rc;
1130 }
1131
1132 sector++;
1133 ei_lba++;
1134 offset += sizeof(struct se_dif_v1_tuple);
1135 }
1136
1137 kunmap_atomic(paddr);
1138 kunmap_atomic(daddr);
1139 }
1140 sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
1141
1142 return 0;
1143}
1144EXPORT_SYMBOL(sbc_dif_verify_write);
1145
1146sense_reason_t
1147sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1148 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1149{
1150 struct se_device *dev = cmd->se_dev;
1151 struct se_dif_v1_tuple *sdt;
1152 struct scatterlist *dsg;
1153 sector_t sector = start;
1154 void *daddr, *paddr;
1155 int i, j, offset = sg_off;
1156 sense_reason_t rc;
1157
1158 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1159 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1160 paddr = kmap_atomic(sg_page(sg)) + sg->offset;
1161
1162 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1163
1164 if (offset >= sg->length) {
1165 kunmap_atomic(paddr);
1166 sg = sg_next(sg);
1167 paddr = kmap_atomic(sg_page(sg)) + sg->offset;
1168 offset = 0;
1169 }
1170
1171 sdt = paddr + offset;
1172
1173 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
1174 " app_tag: 0x%04x ref_tag: %u\n",
1175 (unsigned long long)sector, sdt->guard_tag,
1176 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1177
1178 if (sdt->app_tag == cpu_to_be16(0xffff)) {
1179 sector++;
1180 offset += sizeof(struct se_dif_v1_tuple);
1181 continue;
1182 }
1183
1184 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1185 ei_lba);
1186 if (rc) {
1187 kunmap_atomic(paddr);
1188 kunmap_atomic(daddr);
1189 return rc;
1190 }
1191
1192 sector++;
1193 ei_lba++;
1194 offset += sizeof(struct se_dif_v1_tuple);
1195 }
1196
1197 kunmap_atomic(paddr);
1198 kunmap_atomic(daddr);
1199 }
1200 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
1201
1202 return 0;
1203}
1204EXPORT_SYMBOL(sbc_dif_verify_read);
This page took 0.264525 seconds and 5 git commands to generate.