2 * sd_dif.c - SCSI Data Integrity Field
4 * Copyright (C) 2007, 2008 Oracle Corporation
5 * Written by: Martin K. Petersen <martin.petersen@oracle.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
23 #include <linux/blkdev.h>
24 #include <linux/crc-t10dif.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_dbg.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_driver.h>
31 #include <scsi/scsi_eh.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_ioctl.h>
34 #include <scsi/scsicam.h>
36 #include <net/checksum.h>
40 typedef __u16 (csum_fn
) (void *, unsigned int);
42 static __u16
sd_dif_crc_fn(void *data
, unsigned int len
)
44 return cpu_to_be16(crc_t10dif(data
, len
));
47 static __u16
sd_dif_ip_fn(void *data
, unsigned int len
)
49 return ip_compute_csum(data
, len
);
53 * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
54 * 16 bit app tag, 32 bit reference tag.
56 static void sd_dif_type1_generate(struct blk_integrity_exchg
*bix
, csum_fn
*fn
)
58 void *buf
= bix
->data_buf
;
59 struct sd_dif_tuple
*sdt
= bix
->prot_buf
;
60 sector_t sector
= bix
->sector
;
63 for (i
= 0 ; i
< bix
->data_size
; i
+= bix
->sector_size
, sdt
++) {
64 sdt
->guard_tag
= fn(buf
, bix
->sector_size
);
65 sdt
->ref_tag
= cpu_to_be32(sector
& 0xffffffff);
68 buf
+= bix
->sector_size
;
73 static void sd_dif_type1_generate_crc(struct blk_integrity_exchg
*bix
)
75 sd_dif_type1_generate(bix
, sd_dif_crc_fn
);
78 static void sd_dif_type1_generate_ip(struct blk_integrity_exchg
*bix
)
80 sd_dif_type1_generate(bix
, sd_dif_ip_fn
);
83 static int sd_dif_type1_verify(struct blk_integrity_exchg
*bix
, csum_fn
*fn
)
85 void *buf
= bix
->data_buf
;
86 struct sd_dif_tuple
*sdt
= bix
->prot_buf
;
87 sector_t sector
= bix
->sector
;
91 for (i
= 0 ; i
< bix
->data_size
; i
+= bix
->sector_size
, sdt
++) {
92 /* Unwritten sectors */
93 if (sdt
->app_tag
== 0xffff)
96 if (be32_to_cpu(sdt
->ref_tag
) != (sector
& 0xffffffff)) {
98 "%s: ref tag error on sector %lu (rcvd %u)\n",
99 bix
->disk_name
, (unsigned long)sector
,
100 be32_to_cpu(sdt
->ref_tag
));
104 csum
= fn(buf
, bix
->sector_size
);
106 if (sdt
->guard_tag
!= csum
) {
107 printk(KERN_ERR
"%s: guard tag error on sector %lu " \
108 "(rcvd %04x, data %04x)\n", bix
->disk_name
,
109 (unsigned long)sector
,
110 be16_to_cpu(sdt
->guard_tag
), be16_to_cpu(csum
));
114 buf
+= bix
->sector_size
;
121 static int sd_dif_type1_verify_crc(struct blk_integrity_exchg
*bix
)
123 return sd_dif_type1_verify(bix
, sd_dif_crc_fn
);
126 static int sd_dif_type1_verify_ip(struct blk_integrity_exchg
*bix
)
128 return sd_dif_type1_verify(bix
, sd_dif_ip_fn
);
131 static struct blk_integrity dif_type1_integrity_crc
= {
132 .name
= "T10-DIF-TYPE1-CRC",
133 .generate_fn
= sd_dif_type1_generate_crc
,
134 .verify_fn
= sd_dif_type1_verify_crc
,
135 .tuple_size
= sizeof(struct sd_dif_tuple
),
139 static struct blk_integrity dif_type1_integrity_ip
= {
140 .name
= "T10-DIF-TYPE1-IP",
141 .generate_fn
= sd_dif_type1_generate_ip
,
142 .verify_fn
= sd_dif_type1_verify_ip
,
143 .tuple_size
= sizeof(struct sd_dif_tuple
),
149 * Type 3 protection has a 16-bit guard tag and 16 + 32 bits of opaque
152 static void sd_dif_type3_generate(struct blk_integrity_exchg
*bix
, csum_fn
*fn
)
154 void *buf
= bix
->data_buf
;
155 struct sd_dif_tuple
*sdt
= bix
->prot_buf
;
158 for (i
= 0 ; i
< bix
->data_size
; i
+= bix
->sector_size
, sdt
++) {
159 sdt
->guard_tag
= fn(buf
, bix
->sector_size
);
163 buf
+= bix
->sector_size
;
167 static void sd_dif_type3_generate_crc(struct blk_integrity_exchg
*bix
)
169 sd_dif_type3_generate(bix
, sd_dif_crc_fn
);
172 static void sd_dif_type3_generate_ip(struct blk_integrity_exchg
*bix
)
174 sd_dif_type3_generate(bix
, sd_dif_ip_fn
);
177 static int sd_dif_type3_verify(struct blk_integrity_exchg
*bix
, csum_fn
*fn
)
179 void *buf
= bix
->data_buf
;
180 struct sd_dif_tuple
*sdt
= bix
->prot_buf
;
181 sector_t sector
= bix
->sector
;
185 for (i
= 0 ; i
< bix
->data_size
; i
+= bix
->sector_size
, sdt
++) {
186 /* Unwritten sectors */
187 if (sdt
->app_tag
== 0xffff && sdt
->ref_tag
== 0xffffffff)
190 csum
= fn(buf
, bix
->sector_size
);
192 if (sdt
->guard_tag
!= csum
) {
193 printk(KERN_ERR
"%s: guard tag error on sector %lu " \
194 "(rcvd %04x, data %04x)\n", bix
->disk_name
,
195 (unsigned long)sector
,
196 be16_to_cpu(sdt
->guard_tag
), be16_to_cpu(csum
));
200 buf
+= bix
->sector_size
;
207 static int sd_dif_type3_verify_crc(struct blk_integrity_exchg
*bix
)
209 return sd_dif_type3_verify(bix
, sd_dif_crc_fn
);
212 static int sd_dif_type3_verify_ip(struct blk_integrity_exchg
*bix
)
214 return sd_dif_type3_verify(bix
, sd_dif_ip_fn
);
217 static struct blk_integrity dif_type3_integrity_crc
= {
218 .name
= "T10-DIF-TYPE3-CRC",
219 .generate_fn
= sd_dif_type3_generate_crc
,
220 .verify_fn
= sd_dif_type3_verify_crc
,
221 .tuple_size
= sizeof(struct sd_dif_tuple
),
225 static struct blk_integrity dif_type3_integrity_ip
= {
226 .name
= "T10-DIF-TYPE3-IP",
227 .generate_fn
= sd_dif_type3_generate_ip
,
228 .verify_fn
= sd_dif_type3_verify_ip
,
229 .tuple_size
= sizeof(struct sd_dif_tuple
),
234 * Configure exchange of protection information between OS and HBA.
236 void sd_dif_config_host(struct scsi_disk
*sdkp
)
238 struct scsi_device
*sdp
= sdkp
->device
;
239 struct gendisk
*disk
= sdkp
->disk
;
240 u8 type
= sdkp
->protection_type
;
243 dif
= scsi_host_dif_capable(sdp
->host
, type
);
244 dix
= scsi_host_dix_capable(sdp
->host
, type
);
246 if (!dix
&& scsi_host_dix_capable(sdp
->host
, 0)) {
253 /* Enable DMA of protection information */
254 if (scsi_host_get_guard(sdkp
->device
->host
) & SHOST_DIX_GUARD_IP
)
255 if (type
== SD_DIF_TYPE3_PROTECTION
)
256 blk_integrity_register(disk
, &dif_type3_integrity_ip
);
258 blk_integrity_register(disk
, &dif_type1_integrity_ip
);
260 if (type
== SD_DIF_TYPE3_PROTECTION
)
261 blk_integrity_register(disk
, &dif_type3_integrity_crc
);
263 blk_integrity_register(disk
, &dif_type1_integrity_crc
);
265 sd_printk(KERN_NOTICE
, sdkp
,
266 "Enabling DIX %s protection\n", disk
->integrity
->name
);
268 /* Signal to block layer that we support sector tagging */
269 if (dif
&& type
&& sdkp
->ATO
) {
270 if (type
== SD_DIF_TYPE3_PROTECTION
)
271 disk
->integrity
->tag_size
= sizeof(u16
) + sizeof(u32
);
273 disk
->integrity
->tag_size
= sizeof(u16
);
275 sd_printk(KERN_NOTICE
, sdkp
, "DIF application tag size %u\n",
276 disk
->integrity
->tag_size
);
281 * The virtual start sector is the one that was originally submitted
282 * by the block layer. Due to partitioning, MD/DM cloning, etc. the
283 * actual physical start sector is likely to be different. Remap
284 * protection information to match the physical LBA.
286 * From a protocol perspective there's a slight difference between
287 * Type 1 and 2. The latter uses 32-byte CDBs exclusively, and the
288 * reference tag is seeded in the CDB. This gives us the potential to
289 * avoid virt->phys remapping during write. However, at read time we
290 * don't know whether the virt sector is the same as when we wrote it
291 * (we could be reading from real disk as opposed to MD/DM device. So
292 * we always remap Type 2 making it identical to Type 1.
294 * Type 3 does not have a reference tag so no remapping is required.
296 void sd_dif_prepare(struct request
*rq
, sector_t hw_sector
,
297 unsigned int sector_sz
)
299 const int tuple_sz
= sizeof(struct sd_dif_tuple
);
301 struct scsi_disk
*sdkp
;
302 struct sd_dif_tuple
*sdt
;
305 sdkp
= rq
->bio
->bi_bdev
->bd_disk
->private_data
;
307 if (sdkp
->protection_type
== SD_DIF_TYPE3_PROTECTION
)
310 phys
= hw_sector
& 0xffffffff;
312 __rq_for_each_bio(bio
, rq
) {
314 struct bvec_iter iter
;
317 /* Already remapped? */
318 if (bio_flagged(bio
, BIO_MAPPED_INTEGRITY
))
321 virt
= bio_integrity(bio
)->bip_iter
.bi_sector
& 0xffffffff;
323 bip_for_each_vec(iv
, bio_integrity(bio
), iter
) {
324 sdt
= kmap_atomic(iv
.bv_page
)
327 for (j
= 0; j
< iv
.bv_len
; j
+= tuple_sz
, sdt
++) {
329 if (be32_to_cpu(sdt
->ref_tag
) == virt
)
330 sdt
->ref_tag
= cpu_to_be32(phys
);
339 bio
->bi_flags
|= (1 << BIO_MAPPED_INTEGRITY
);
344 * Remap physical sector values in the reference tag to the virtual
345 * values expected by the block layer.
347 void sd_dif_complete(struct scsi_cmnd
*scmd
, unsigned int good_bytes
)
349 const int tuple_sz
= sizeof(struct sd_dif_tuple
);
350 struct scsi_disk
*sdkp
;
352 struct sd_dif_tuple
*sdt
;
353 unsigned int j
, sectors
, sector_sz
;
356 sdkp
= scsi_disk(scmd
->request
->rq_disk
);
358 if (sdkp
->protection_type
== SD_DIF_TYPE3_PROTECTION
|| good_bytes
== 0)
361 sector_sz
= scmd
->device
->sector_size
;
362 sectors
= good_bytes
/ sector_sz
;
364 phys
= blk_rq_pos(scmd
->request
) & 0xffffffff;
365 if (sector_sz
== 4096)
368 __rq_for_each_bio(bio
, scmd
->request
) {
370 struct bvec_iter iter
;
372 virt
= bio_integrity(bio
)->bip_iter
.bi_sector
& 0xffffffff;
374 bip_for_each_vec(iv
, bio_integrity(bio
), iter
) {
375 sdt
= kmap_atomic(iv
.bv_page
)
378 for (j
= 0; j
< iv
.bv_len
; j
+= tuple_sz
, sdt
++) {
385 if (be32_to_cpu(sdt
->ref_tag
) == phys
)
386 sdt
->ref_tag
= cpu_to_be32(virt
);