2 * Copyright (C) 2003 Jana Saout <jana@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
5 * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
7 * This file is released under the GPL.
10 #include <linux/completion.h>
11 #include <linux/err.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/bio.h>
16 #include <linux/blkdev.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/crypto.h>
20 #include <linux/workqueue.h>
21 #include <linux/backing-dev.h>
22 #include <linux/atomic.h>
23 #include <linux/scatterlist.h>
25 #include <asm/unaligned.h>
26 #include <crypto/hash.h>
27 #include <crypto/md5.h>
28 #include <crypto/algapi.h>
30 #include <linux/device-mapper.h>
32 #define DM_MSG_PREFIX "crypt"
35 * context holding the current state of a multi-part conversion
37 struct convert_context
{
38 struct completion restart
;
41 struct bvec_iter iter_in
;
42 struct bvec_iter iter_out
;
45 struct ablkcipher_request
*req
;
49 * per bio private data
52 struct crypt_config
*cc
;
54 struct work_struct work
;
56 struct convert_context ctx
;
61 } CRYPTO_MINALIGN_ATTR
;
63 struct dm_crypt_request
{
64 struct convert_context
*ctx
;
65 struct scatterlist sg_in
;
66 struct scatterlist sg_out
;
72 struct crypt_iv_operations
{
73 int (*ctr
)(struct crypt_config
*cc
, struct dm_target
*ti
,
75 void (*dtr
)(struct crypt_config
*cc
);
76 int (*init
)(struct crypt_config
*cc
);
77 int (*wipe
)(struct crypt_config
*cc
);
78 int (*generator
)(struct crypt_config
*cc
, u8
*iv
,
79 struct dm_crypt_request
*dmreq
);
80 int (*post
)(struct crypt_config
*cc
, u8
*iv
,
81 struct dm_crypt_request
*dmreq
);
84 struct iv_essiv_private
{
85 struct crypto_hash
*hash_tfm
;
89 struct iv_benbi_private
{
93 #define LMK_SEED_SIZE 64 /* hash + 0 */
94 struct iv_lmk_private
{
95 struct crypto_shash
*hash_tfm
;
99 #define TCW_WHITENING_SIZE 16
100 struct iv_tcw_private
{
101 struct crypto_shash
*crc32_tfm
;
107 * Crypt: maps a linear range of a block device
108 * and encrypts / decrypts at the same time.
110 enum flags
{ DM_CRYPT_SUSPENDED
, DM_CRYPT_KEY_VALID
, DM_CRYPT_SAME_CPU
};
113 * The fields in here must be read only after initialization.
115 struct crypt_config
{
120 * pool for per bio private data, crypto requests and
121 * encryption requeusts/buffer pages
124 mempool_t
*page_pool
;
126 struct mutex bio_alloc_lock
;
128 struct workqueue_struct
*io_queue
;
129 struct workqueue_struct
*crypt_queue
;
134 struct crypt_iv_operations
*iv_gen_ops
;
136 struct iv_essiv_private essiv
;
137 struct iv_benbi_private benbi
;
138 struct iv_lmk_private lmk
;
139 struct iv_tcw_private tcw
;
142 unsigned int iv_size
;
144 /* ESSIV: struct crypto_cipher *essiv_tfm */
146 struct crypto_ablkcipher
**tfms
;
150 * Layout of each crypto request:
152 * struct ablkcipher_request
155 * struct dm_crypt_request
159 * The padding is added so that dm_crypt_request and the IV are
162 unsigned int dmreq_start
;
164 unsigned int per_bio_data_size
;
167 unsigned int key_size
;
168 unsigned int key_parts
; /* independent parts in key buffer */
169 unsigned int key_extra_size
; /* additional keys length */
175 static void clone_init(struct dm_crypt_io
*, struct bio
*);
176 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
);
177 static u8
*iv_of_dmreq(struct crypt_config
*cc
, struct dm_crypt_request
*dmreq
);
180 * Use this to access cipher attributes that are the same for each CPU.
182 static struct crypto_ablkcipher
*any_tfm(struct crypt_config
*cc
)
188 * Different IV generation algorithms:
190 * plain: the initial vector is the 32-bit little-endian version of the sector
191 * number, padded with zeros if necessary.
193 * plain64: the initial vector is the 64-bit little-endian version of the sector
194 * number, padded with zeros if necessary.
196 * essiv: "encrypted sector|salt initial vector", the sector number is
197 * encrypted with the bulk cipher using a salt as key. The salt
198 * should be derived from the bulk cipher's key via hashing.
200 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
201 * (needed for LRW-32-AES and possible other narrow block modes)
203 * null: the initial vector is always zero. Provides compatibility with
204 * obsolete loop_fish2 devices. Do not use for new devices.
206 * lmk: Compatible implementation of the block chaining mode used
207 * by the Loop-AES block device encryption system
208 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
209 * It operates on full 512 byte sectors and uses CBC
210 * with an IV derived from the sector number, the data and
211 * optionally extra IV seed.
212 * This means that after decryption the first block
213 * of sector must be tweaked according to decrypted data.
214 * Loop-AES can use three encryption schemes:
215 * version 1: is plain aes-cbc mode
216 * version 2: uses 64 multikey scheme with lmk IV generator
217 * version 3: the same as version 2 with additional IV seed
218 * (it uses 65 keys, last key is used as IV seed)
220 * tcw: Compatible implementation of the block chaining mode used
221 * by the TrueCrypt device encryption system (prior to version 4.1).
222 * For more info see: http://www.truecrypt.org
223 * It operates on full 512 byte sectors and uses CBC
224 * with an IV derived from initial key and the sector number.
225 * In addition, whitening value is applied on every sector, whitening
226 * is calculated from initial key, sector number and mixed using CRC32.
227 * Note that this encryption scheme is vulnerable to watermarking attacks
228 * and should be used for old compatible containers access only.
230 * plumb: unimplemented, see:
231 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
234 static int crypt_iv_plain_gen(struct crypt_config
*cc
, u8
*iv
,
235 struct dm_crypt_request
*dmreq
)
237 memset(iv
, 0, cc
->iv_size
);
238 *(__le32
*)iv
= cpu_to_le32(dmreq
->iv_sector
& 0xffffffff);
243 static int crypt_iv_plain64_gen(struct crypt_config
*cc
, u8
*iv
,
244 struct dm_crypt_request
*dmreq
)
246 memset(iv
, 0, cc
->iv_size
);
247 *(__le64
*)iv
= cpu_to_le64(dmreq
->iv_sector
);
252 /* Initialise ESSIV - compute salt but no local memory allocations */
253 static int crypt_iv_essiv_init(struct crypt_config
*cc
)
255 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
256 struct hash_desc desc
;
257 struct scatterlist sg
;
258 struct crypto_cipher
*essiv_tfm
;
261 sg_init_one(&sg
, cc
->key
, cc
->key_size
);
262 desc
.tfm
= essiv
->hash_tfm
;
263 desc
.flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
265 err
= crypto_hash_digest(&desc
, &sg
, cc
->key_size
, essiv
->salt
);
269 essiv_tfm
= cc
->iv_private
;
271 err
= crypto_cipher_setkey(essiv_tfm
, essiv
->salt
,
272 crypto_hash_digestsize(essiv
->hash_tfm
));
279 /* Wipe salt and reset key derived from volume key */
280 static int crypt_iv_essiv_wipe(struct crypt_config
*cc
)
282 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
283 unsigned salt_size
= crypto_hash_digestsize(essiv
->hash_tfm
);
284 struct crypto_cipher
*essiv_tfm
;
287 memset(essiv
->salt
, 0, salt_size
);
289 essiv_tfm
= cc
->iv_private
;
290 r
= crypto_cipher_setkey(essiv_tfm
, essiv
->salt
, salt_size
);
297 /* Set up per cpu cipher state */
298 static struct crypto_cipher
*setup_essiv_cpu(struct crypt_config
*cc
,
299 struct dm_target
*ti
,
300 u8
*salt
, unsigned saltsize
)
302 struct crypto_cipher
*essiv_tfm
;
305 /* Setup the essiv_tfm with the given salt */
306 essiv_tfm
= crypto_alloc_cipher(cc
->cipher
, 0, CRYPTO_ALG_ASYNC
);
307 if (IS_ERR(essiv_tfm
)) {
308 ti
->error
= "Error allocating crypto tfm for ESSIV";
312 if (crypto_cipher_blocksize(essiv_tfm
) !=
313 crypto_ablkcipher_ivsize(any_tfm(cc
))) {
314 ti
->error
= "Block size of ESSIV cipher does "
315 "not match IV size of block cipher";
316 crypto_free_cipher(essiv_tfm
);
317 return ERR_PTR(-EINVAL
);
320 err
= crypto_cipher_setkey(essiv_tfm
, salt
, saltsize
);
322 ti
->error
= "Failed to set key for ESSIV cipher";
323 crypto_free_cipher(essiv_tfm
);
330 static void crypt_iv_essiv_dtr(struct crypt_config
*cc
)
332 struct crypto_cipher
*essiv_tfm
;
333 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
335 crypto_free_hash(essiv
->hash_tfm
);
336 essiv
->hash_tfm
= NULL
;
341 essiv_tfm
= cc
->iv_private
;
344 crypto_free_cipher(essiv_tfm
);
346 cc
->iv_private
= NULL
;
349 static int crypt_iv_essiv_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
352 struct crypto_cipher
*essiv_tfm
= NULL
;
353 struct crypto_hash
*hash_tfm
= NULL
;
358 ti
->error
= "Digest algorithm missing for ESSIV mode";
362 /* Allocate hash algorithm */
363 hash_tfm
= crypto_alloc_hash(opts
, 0, CRYPTO_ALG_ASYNC
);
364 if (IS_ERR(hash_tfm
)) {
365 ti
->error
= "Error initializing ESSIV hash";
366 err
= PTR_ERR(hash_tfm
);
370 salt
= kzalloc(crypto_hash_digestsize(hash_tfm
), GFP_KERNEL
);
372 ti
->error
= "Error kmallocing salt storage in ESSIV";
377 cc
->iv_gen_private
.essiv
.salt
= salt
;
378 cc
->iv_gen_private
.essiv
.hash_tfm
= hash_tfm
;
380 essiv_tfm
= setup_essiv_cpu(cc
, ti
, salt
,
381 crypto_hash_digestsize(hash_tfm
));
382 if (IS_ERR(essiv_tfm
)) {
383 crypt_iv_essiv_dtr(cc
);
384 return PTR_ERR(essiv_tfm
);
386 cc
->iv_private
= essiv_tfm
;
391 if (hash_tfm
&& !IS_ERR(hash_tfm
))
392 crypto_free_hash(hash_tfm
);
397 static int crypt_iv_essiv_gen(struct crypt_config
*cc
, u8
*iv
,
398 struct dm_crypt_request
*dmreq
)
400 struct crypto_cipher
*essiv_tfm
= cc
->iv_private
;
402 memset(iv
, 0, cc
->iv_size
);
403 *(__le64
*)iv
= cpu_to_le64(dmreq
->iv_sector
);
404 crypto_cipher_encrypt_one(essiv_tfm
, iv
, iv
);
409 static int crypt_iv_benbi_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
412 unsigned bs
= crypto_ablkcipher_blocksize(any_tfm(cc
));
415 /* we need to calculate how far we must shift the sector count
416 * to get the cipher block count, we use this shift in _gen */
418 if (1 << log
!= bs
) {
419 ti
->error
= "cypher blocksize is not a power of 2";
424 ti
->error
= "cypher blocksize is > 512";
428 cc
->iv_gen_private
.benbi
.shift
= 9 - log
;
433 static void crypt_iv_benbi_dtr(struct crypt_config
*cc
)
437 static int crypt_iv_benbi_gen(struct crypt_config
*cc
, u8
*iv
,
438 struct dm_crypt_request
*dmreq
)
442 memset(iv
, 0, cc
->iv_size
- sizeof(u64
)); /* rest is cleared below */
444 val
= cpu_to_be64(((u64
)dmreq
->iv_sector
<< cc
->iv_gen_private
.benbi
.shift
) + 1);
445 put_unaligned(val
, (__be64
*)(iv
+ cc
->iv_size
- sizeof(u64
)));
450 static int crypt_iv_null_gen(struct crypt_config
*cc
, u8
*iv
,
451 struct dm_crypt_request
*dmreq
)
453 memset(iv
, 0, cc
->iv_size
);
458 static void crypt_iv_lmk_dtr(struct crypt_config
*cc
)
460 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
462 if (lmk
->hash_tfm
&& !IS_ERR(lmk
->hash_tfm
))
463 crypto_free_shash(lmk
->hash_tfm
);
464 lmk
->hash_tfm
= NULL
;
470 static int crypt_iv_lmk_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
473 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
475 lmk
->hash_tfm
= crypto_alloc_shash("md5", 0, 0);
476 if (IS_ERR(lmk
->hash_tfm
)) {
477 ti
->error
= "Error initializing LMK hash";
478 return PTR_ERR(lmk
->hash_tfm
);
481 /* No seed in LMK version 2 */
482 if (cc
->key_parts
== cc
->tfms_count
) {
487 lmk
->seed
= kzalloc(LMK_SEED_SIZE
, GFP_KERNEL
);
489 crypt_iv_lmk_dtr(cc
);
490 ti
->error
= "Error kmallocing seed storage in LMK";
497 static int crypt_iv_lmk_init(struct crypt_config
*cc
)
499 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
500 int subkey_size
= cc
->key_size
/ cc
->key_parts
;
502 /* LMK seed is on the position of LMK_KEYS + 1 key */
504 memcpy(lmk
->seed
, cc
->key
+ (cc
->tfms_count
* subkey_size
),
505 crypto_shash_digestsize(lmk
->hash_tfm
));
510 static int crypt_iv_lmk_wipe(struct crypt_config
*cc
)
512 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
515 memset(lmk
->seed
, 0, LMK_SEED_SIZE
);
520 static int crypt_iv_lmk_one(struct crypt_config
*cc
, u8
*iv
,
521 struct dm_crypt_request
*dmreq
,
524 struct iv_lmk_private
*lmk
= &cc
->iv_gen_private
.lmk
;
525 SHASH_DESC_ON_STACK(desc
, lmk
->hash_tfm
);
526 struct md5_state md5state
;
530 desc
->tfm
= lmk
->hash_tfm
;
531 desc
->flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
533 r
= crypto_shash_init(desc
);
538 r
= crypto_shash_update(desc
, lmk
->seed
, LMK_SEED_SIZE
);
543 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
544 r
= crypto_shash_update(desc
, data
+ 16, 16 * 31);
548 /* Sector is cropped to 56 bits here */
549 buf
[0] = cpu_to_le32(dmreq
->iv_sector
& 0xFFFFFFFF);
550 buf
[1] = cpu_to_le32((((u64
)dmreq
->iv_sector
>> 32) & 0x00FFFFFF) | 0x80000000);
551 buf
[2] = cpu_to_le32(4024);
553 r
= crypto_shash_update(desc
, (u8
*)buf
, sizeof(buf
));
557 /* No MD5 padding here */
558 r
= crypto_shash_export(desc
, &md5state
);
562 for (i
= 0; i
< MD5_HASH_WORDS
; i
++)
563 __cpu_to_le32s(&md5state
.hash
[i
]);
564 memcpy(iv
, &md5state
.hash
, cc
->iv_size
);
569 static int crypt_iv_lmk_gen(struct crypt_config
*cc
, u8
*iv
,
570 struct dm_crypt_request
*dmreq
)
575 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
) {
576 src
= kmap_atomic(sg_page(&dmreq
->sg_in
));
577 r
= crypt_iv_lmk_one(cc
, iv
, dmreq
, src
+ dmreq
->sg_in
.offset
);
580 memset(iv
, 0, cc
->iv_size
);
585 static int crypt_iv_lmk_post(struct crypt_config
*cc
, u8
*iv
,
586 struct dm_crypt_request
*dmreq
)
591 if (bio_data_dir(dmreq
->ctx
->bio_in
) == WRITE
)
594 dst
= kmap_atomic(sg_page(&dmreq
->sg_out
));
595 r
= crypt_iv_lmk_one(cc
, iv
, dmreq
, dst
+ dmreq
->sg_out
.offset
);
597 /* Tweak the first block of plaintext sector */
599 crypto_xor(dst
+ dmreq
->sg_out
.offset
, iv
, cc
->iv_size
);
605 static void crypt_iv_tcw_dtr(struct crypt_config
*cc
)
607 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
609 kzfree(tcw
->iv_seed
);
611 kzfree(tcw
->whitening
);
612 tcw
->whitening
= NULL
;
614 if (tcw
->crc32_tfm
&& !IS_ERR(tcw
->crc32_tfm
))
615 crypto_free_shash(tcw
->crc32_tfm
);
616 tcw
->crc32_tfm
= NULL
;
619 static int crypt_iv_tcw_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
622 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
624 if (cc
->key_size
<= (cc
->iv_size
+ TCW_WHITENING_SIZE
)) {
625 ti
->error
= "Wrong key size for TCW";
629 tcw
->crc32_tfm
= crypto_alloc_shash("crc32", 0, 0);
630 if (IS_ERR(tcw
->crc32_tfm
)) {
631 ti
->error
= "Error initializing CRC32 in TCW";
632 return PTR_ERR(tcw
->crc32_tfm
);
635 tcw
->iv_seed
= kzalloc(cc
->iv_size
, GFP_KERNEL
);
636 tcw
->whitening
= kzalloc(TCW_WHITENING_SIZE
, GFP_KERNEL
);
637 if (!tcw
->iv_seed
|| !tcw
->whitening
) {
638 crypt_iv_tcw_dtr(cc
);
639 ti
->error
= "Error allocating seed storage in TCW";
646 static int crypt_iv_tcw_init(struct crypt_config
*cc
)
648 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
649 int key_offset
= cc
->key_size
- cc
->iv_size
- TCW_WHITENING_SIZE
;
651 memcpy(tcw
->iv_seed
, &cc
->key
[key_offset
], cc
->iv_size
);
652 memcpy(tcw
->whitening
, &cc
->key
[key_offset
+ cc
->iv_size
],
658 static int crypt_iv_tcw_wipe(struct crypt_config
*cc
)
660 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
662 memset(tcw
->iv_seed
, 0, cc
->iv_size
);
663 memset(tcw
->whitening
, 0, TCW_WHITENING_SIZE
);
668 static int crypt_iv_tcw_whitening(struct crypt_config
*cc
,
669 struct dm_crypt_request
*dmreq
,
672 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
673 u64 sector
= cpu_to_le64((u64
)dmreq
->iv_sector
);
674 u8 buf
[TCW_WHITENING_SIZE
];
675 SHASH_DESC_ON_STACK(desc
, tcw
->crc32_tfm
);
678 /* xor whitening with sector number */
679 memcpy(buf
, tcw
->whitening
, TCW_WHITENING_SIZE
);
680 crypto_xor(buf
, (u8
*)§or
, 8);
681 crypto_xor(&buf
[8], (u8
*)§or
, 8);
683 /* calculate crc32 for every 32bit part and xor it */
684 desc
->tfm
= tcw
->crc32_tfm
;
685 desc
->flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
686 for (i
= 0; i
< 4; i
++) {
687 r
= crypto_shash_init(desc
);
690 r
= crypto_shash_update(desc
, &buf
[i
* 4], 4);
693 r
= crypto_shash_final(desc
, &buf
[i
* 4]);
697 crypto_xor(&buf
[0], &buf
[12], 4);
698 crypto_xor(&buf
[4], &buf
[8], 4);
700 /* apply whitening (8 bytes) to whole sector */
701 for (i
= 0; i
< ((1 << SECTOR_SHIFT
) / 8); i
++)
702 crypto_xor(data
+ i
* 8, buf
, 8);
704 memzero_explicit(buf
, sizeof(buf
));
708 static int crypt_iv_tcw_gen(struct crypt_config
*cc
, u8
*iv
,
709 struct dm_crypt_request
*dmreq
)
711 struct iv_tcw_private
*tcw
= &cc
->iv_gen_private
.tcw
;
712 u64 sector
= cpu_to_le64((u64
)dmreq
->iv_sector
);
716 /* Remove whitening from ciphertext */
717 if (bio_data_dir(dmreq
->ctx
->bio_in
) != WRITE
) {
718 src
= kmap_atomic(sg_page(&dmreq
->sg_in
));
719 r
= crypt_iv_tcw_whitening(cc
, dmreq
, src
+ dmreq
->sg_in
.offset
);
724 memcpy(iv
, tcw
->iv_seed
, cc
->iv_size
);
725 crypto_xor(iv
, (u8
*)§or
, 8);
727 crypto_xor(&iv
[8], (u8
*)§or
, cc
->iv_size
- 8);
732 static int crypt_iv_tcw_post(struct crypt_config
*cc
, u8
*iv
,
733 struct dm_crypt_request
*dmreq
)
738 if (bio_data_dir(dmreq
->ctx
->bio_in
) != WRITE
)
741 /* Apply whitening on ciphertext */
742 dst
= kmap_atomic(sg_page(&dmreq
->sg_out
));
743 r
= crypt_iv_tcw_whitening(cc
, dmreq
, dst
+ dmreq
->sg_out
.offset
);
749 static struct crypt_iv_operations crypt_iv_plain_ops
= {
750 .generator
= crypt_iv_plain_gen
753 static struct crypt_iv_operations crypt_iv_plain64_ops
= {
754 .generator
= crypt_iv_plain64_gen
757 static struct crypt_iv_operations crypt_iv_essiv_ops
= {
758 .ctr
= crypt_iv_essiv_ctr
,
759 .dtr
= crypt_iv_essiv_dtr
,
760 .init
= crypt_iv_essiv_init
,
761 .wipe
= crypt_iv_essiv_wipe
,
762 .generator
= crypt_iv_essiv_gen
765 static struct crypt_iv_operations crypt_iv_benbi_ops
= {
766 .ctr
= crypt_iv_benbi_ctr
,
767 .dtr
= crypt_iv_benbi_dtr
,
768 .generator
= crypt_iv_benbi_gen
771 static struct crypt_iv_operations crypt_iv_null_ops
= {
772 .generator
= crypt_iv_null_gen
775 static struct crypt_iv_operations crypt_iv_lmk_ops
= {
776 .ctr
= crypt_iv_lmk_ctr
,
777 .dtr
= crypt_iv_lmk_dtr
,
778 .init
= crypt_iv_lmk_init
,
779 .wipe
= crypt_iv_lmk_wipe
,
780 .generator
= crypt_iv_lmk_gen
,
781 .post
= crypt_iv_lmk_post
784 static struct crypt_iv_operations crypt_iv_tcw_ops
= {
785 .ctr
= crypt_iv_tcw_ctr
,
786 .dtr
= crypt_iv_tcw_dtr
,
787 .init
= crypt_iv_tcw_init
,
788 .wipe
= crypt_iv_tcw_wipe
,
789 .generator
= crypt_iv_tcw_gen
,
790 .post
= crypt_iv_tcw_post
793 static void crypt_convert_init(struct crypt_config
*cc
,
794 struct convert_context
*ctx
,
795 struct bio
*bio_out
, struct bio
*bio_in
,
798 ctx
->bio_in
= bio_in
;
799 ctx
->bio_out
= bio_out
;
801 ctx
->iter_in
= bio_in
->bi_iter
;
803 ctx
->iter_out
= bio_out
->bi_iter
;
804 ctx
->cc_sector
= sector
+ cc
->iv_offset
;
805 init_completion(&ctx
->restart
);
808 static struct dm_crypt_request
*dmreq_of_req(struct crypt_config
*cc
,
809 struct ablkcipher_request
*req
)
811 return (struct dm_crypt_request
*)((char *)req
+ cc
->dmreq_start
);
814 static struct ablkcipher_request
*req_of_dmreq(struct crypt_config
*cc
,
815 struct dm_crypt_request
*dmreq
)
817 return (struct ablkcipher_request
*)((char *)dmreq
- cc
->dmreq_start
);
820 static u8
*iv_of_dmreq(struct crypt_config
*cc
,
821 struct dm_crypt_request
*dmreq
)
823 return (u8
*)ALIGN((unsigned long)(dmreq
+ 1),
824 crypto_ablkcipher_alignmask(any_tfm(cc
)) + 1);
827 static int crypt_convert_block(struct crypt_config
*cc
,
828 struct convert_context
*ctx
,
829 struct ablkcipher_request
*req
)
831 struct bio_vec bv_in
= bio_iter_iovec(ctx
->bio_in
, ctx
->iter_in
);
832 struct bio_vec bv_out
= bio_iter_iovec(ctx
->bio_out
, ctx
->iter_out
);
833 struct dm_crypt_request
*dmreq
;
837 dmreq
= dmreq_of_req(cc
, req
);
838 iv
= iv_of_dmreq(cc
, dmreq
);
840 dmreq
->iv_sector
= ctx
->cc_sector
;
842 sg_init_table(&dmreq
->sg_in
, 1);
843 sg_set_page(&dmreq
->sg_in
, bv_in
.bv_page
, 1 << SECTOR_SHIFT
,
846 sg_init_table(&dmreq
->sg_out
, 1);
847 sg_set_page(&dmreq
->sg_out
, bv_out
.bv_page
, 1 << SECTOR_SHIFT
,
850 bio_advance_iter(ctx
->bio_in
, &ctx
->iter_in
, 1 << SECTOR_SHIFT
);
851 bio_advance_iter(ctx
->bio_out
, &ctx
->iter_out
, 1 << SECTOR_SHIFT
);
853 if (cc
->iv_gen_ops
) {
854 r
= cc
->iv_gen_ops
->generator(cc
, iv
, dmreq
);
859 ablkcipher_request_set_crypt(req
, &dmreq
->sg_in
, &dmreq
->sg_out
,
860 1 << SECTOR_SHIFT
, iv
);
862 if (bio_data_dir(ctx
->bio_in
) == WRITE
)
863 r
= crypto_ablkcipher_encrypt(req
);
865 r
= crypto_ablkcipher_decrypt(req
);
867 if (!r
&& cc
->iv_gen_ops
&& cc
->iv_gen_ops
->post
)
868 r
= cc
->iv_gen_ops
->post(cc
, iv
, dmreq
);
873 static void kcryptd_async_done(struct crypto_async_request
*async_req
,
876 static void crypt_alloc_req(struct crypt_config
*cc
,
877 struct convert_context
*ctx
)
879 unsigned key_index
= ctx
->cc_sector
& (cc
->tfms_count
- 1);
882 ctx
->req
= mempool_alloc(cc
->req_pool
, GFP_NOIO
);
884 ablkcipher_request_set_tfm(ctx
->req
, cc
->tfms
[key_index
]);
885 ablkcipher_request_set_callback(ctx
->req
,
886 CRYPTO_TFM_REQ_MAY_BACKLOG
| CRYPTO_TFM_REQ_MAY_SLEEP
,
887 kcryptd_async_done
, dmreq_of_req(cc
, ctx
->req
));
890 static void crypt_free_req(struct crypt_config
*cc
,
891 struct ablkcipher_request
*req
, struct bio
*base_bio
)
893 struct dm_crypt_io
*io
= dm_per_bio_data(base_bio
, cc
->per_bio_data_size
);
895 if ((struct ablkcipher_request
*)(io
+ 1) != req
)
896 mempool_free(req
, cc
->req_pool
);
900 * Encrypt / decrypt data from one bio to another one (can be the same one)
902 static int crypt_convert(struct crypt_config
*cc
,
903 struct convert_context
*ctx
)
907 atomic_set(&ctx
->cc_pending
, 1);
909 while (ctx
->iter_in
.bi_size
&& ctx
->iter_out
.bi_size
) {
911 crypt_alloc_req(cc
, ctx
);
913 atomic_inc(&ctx
->cc_pending
);
915 r
= crypt_convert_block(cc
, ctx
, ctx
->req
);
920 wait_for_completion(&ctx
->restart
);
921 reinit_completion(&ctx
->restart
);
930 atomic_dec(&ctx
->cc_pending
);
937 atomic_dec(&ctx
->cc_pending
);
945 static void crypt_free_buffer_pages(struct crypt_config
*cc
, struct bio
*clone
);
948 * Generate a new unfragmented bio with the given size
949 * This should never violate the device limitations
951 * This function may be called concurrently. If we allocate from the mempool
952 * concurrently, there is a possibility of deadlock. For example, if we have
953 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
954 * the mempool concurrently, it may deadlock in a situation where both processes
955 * have allocated 128 pages and the mempool is exhausted.
957 * In order to avoid this scenario we allocate the pages under a mutex.
959 * In order to not degrade performance with excessive locking, we try
960 * non-blocking allocations without a mutex first but on failure we fallback
961 * to blocking allocations with a mutex.
963 static struct bio
*crypt_alloc_buffer(struct dm_crypt_io
*io
, unsigned size
)
965 struct crypt_config
*cc
= io
->cc
;
967 unsigned int nr_iovecs
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
968 gfp_t gfp_mask
= GFP_NOWAIT
| __GFP_HIGHMEM
;
969 unsigned i
, len
, remaining_size
;
971 struct bio_vec
*bvec
;
974 if (unlikely(gfp_mask
& __GFP_WAIT
))
975 mutex_lock(&cc
->bio_alloc_lock
);
977 clone
= bio_alloc_bioset(GFP_NOIO
, nr_iovecs
, cc
->bs
);
981 clone_init(io
, clone
);
983 remaining_size
= size
;
985 for (i
= 0; i
< nr_iovecs
; i
++) {
986 page
= mempool_alloc(cc
->page_pool
, gfp_mask
);
988 crypt_free_buffer_pages(cc
, clone
);
990 gfp_mask
|= __GFP_WAIT
;
994 len
= (remaining_size
> PAGE_SIZE
) ? PAGE_SIZE
: remaining_size
;
996 bvec
= &clone
->bi_io_vec
[clone
->bi_vcnt
++];
997 bvec
->bv_page
= page
;
1001 clone
->bi_iter
.bi_size
+= len
;
1003 remaining_size
-= len
;
1007 if (unlikely(gfp_mask
& __GFP_WAIT
))
1008 mutex_unlock(&cc
->bio_alloc_lock
);
1013 static void crypt_free_buffer_pages(struct crypt_config
*cc
, struct bio
*clone
)
1018 bio_for_each_segment_all(bv
, clone
, i
) {
1019 BUG_ON(!bv
->bv_page
);
1020 mempool_free(bv
->bv_page
, cc
->page_pool
);
1025 static void crypt_io_init(struct dm_crypt_io
*io
, struct crypt_config
*cc
,
1026 struct bio
*bio
, sector_t sector
)
1030 io
->sector
= sector
;
1033 atomic_set(&io
->io_pending
, 0);
1036 static void crypt_inc_pending(struct dm_crypt_io
*io
)
1038 atomic_inc(&io
->io_pending
);
1042 * One of the bios was finished. Check for completion of
1043 * the whole request and correctly clean up the buffer.
1045 static void crypt_dec_pending(struct dm_crypt_io
*io
)
1047 struct crypt_config
*cc
= io
->cc
;
1048 struct bio
*base_bio
= io
->base_bio
;
1049 int error
= io
->error
;
1051 if (!atomic_dec_and_test(&io
->io_pending
))
1055 crypt_free_req(cc
, io
->ctx
.req
, base_bio
);
1057 bio_endio(base_bio
, error
);
1061 * kcryptd/kcryptd_io:
1063 * Needed because it would be very unwise to do decryption in an
1064 * interrupt context.
1066 * kcryptd performs the actual encryption or decryption.
1068 * kcryptd_io performs the IO submission.
1070 * They must be separated as otherwise the final stages could be
1071 * starved by new requests which can block in the first stages due
1072 * to memory allocation.
1074 * The work is done per CPU global for all dm-crypt instances.
1075 * They should not depend on each other and do not block.
1077 static void crypt_endio(struct bio
*clone
, int error
)
1079 struct dm_crypt_io
*io
= clone
->bi_private
;
1080 struct crypt_config
*cc
= io
->cc
;
1081 unsigned rw
= bio_data_dir(clone
);
1083 if (unlikely(!bio_flagged(clone
, BIO_UPTODATE
) && !error
))
1087 * free the processed pages
1090 crypt_free_buffer_pages(cc
, clone
);
1094 if (rw
== READ
&& !error
) {
1095 kcryptd_queue_crypt(io
);
1099 if (unlikely(error
))
1102 crypt_dec_pending(io
);
1105 static void clone_init(struct dm_crypt_io
*io
, struct bio
*clone
)
1107 struct crypt_config
*cc
= io
->cc
;
1109 clone
->bi_private
= io
;
1110 clone
->bi_end_io
= crypt_endio
;
1111 clone
->bi_bdev
= cc
->dev
->bdev
;
1112 clone
->bi_rw
= io
->base_bio
->bi_rw
;
1115 static int kcryptd_io_read(struct dm_crypt_io
*io
, gfp_t gfp
)
1117 struct crypt_config
*cc
= io
->cc
;
1118 struct bio
*base_bio
= io
->base_bio
;
1122 * The block layer might modify the bvec array, so always
1123 * copy the required bvecs because we need the original
1124 * one in order to decrypt the whole bio data *afterwards*.
1126 clone
= bio_clone_bioset(base_bio
, gfp
, cc
->bs
);
1130 crypt_inc_pending(io
);
1132 clone_init(io
, clone
);
1133 clone
->bi_iter
.bi_sector
= cc
->start
+ io
->sector
;
1135 generic_make_request(clone
);
1139 static void kcryptd_io_write(struct dm_crypt_io
*io
)
1141 struct bio
*clone
= io
->ctx
.bio_out
;
1142 generic_make_request(clone
);
1145 static void kcryptd_io(struct work_struct
*work
)
1147 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
1149 if (bio_data_dir(io
->base_bio
) == READ
) {
1150 crypt_inc_pending(io
);
1151 if (kcryptd_io_read(io
, GFP_NOIO
))
1152 io
->error
= -ENOMEM
;
1153 crypt_dec_pending(io
);
1155 kcryptd_io_write(io
);
1158 static void kcryptd_queue_io(struct dm_crypt_io
*io
)
1160 struct crypt_config
*cc
= io
->cc
;
1162 INIT_WORK(&io
->work
, kcryptd_io
);
1163 queue_work(cc
->io_queue
, &io
->work
);
1166 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io
*io
, int async
)
1168 struct bio
*clone
= io
->ctx
.bio_out
;
1169 struct crypt_config
*cc
= io
->cc
;
1171 if (unlikely(io
->error
< 0)) {
1172 crypt_free_buffer_pages(cc
, clone
);
1174 crypt_dec_pending(io
);
1178 /* crypt_convert should have filled the clone bio */
1179 BUG_ON(io
->ctx
.iter_out
.bi_size
);
1181 clone
->bi_iter
.bi_sector
= cc
->start
+ io
->sector
;
1184 kcryptd_queue_io(io
);
1186 generic_make_request(clone
);
1189 static void kcryptd_crypt_write_convert(struct dm_crypt_io
*io
)
1191 struct crypt_config
*cc
= io
->cc
;
1194 sector_t sector
= io
->sector
;
1198 * Prevent io from disappearing until this function completes.
1200 crypt_inc_pending(io
);
1201 crypt_convert_init(cc
, &io
->ctx
, NULL
, io
->base_bio
, sector
);
1203 clone
= crypt_alloc_buffer(io
, io
->base_bio
->bi_iter
.bi_size
);
1204 if (unlikely(!clone
)) {
1209 io
->ctx
.bio_out
= clone
;
1210 io
->ctx
.iter_out
= clone
->bi_iter
;
1212 sector
+= bio_sectors(clone
);
1214 crypt_inc_pending(io
);
1215 r
= crypt_convert(cc
, &io
->ctx
);
1218 crypt_finished
= atomic_dec_and_test(&io
->ctx
.cc_pending
);
1220 /* Encryption was already finished, submit io now */
1221 if (crypt_finished
) {
1222 kcryptd_crypt_write_io_submit(io
, 0);
1223 io
->sector
= sector
;
1227 crypt_dec_pending(io
);
1230 static void kcryptd_crypt_read_done(struct dm_crypt_io
*io
)
1232 crypt_dec_pending(io
);
1235 static void kcryptd_crypt_read_convert(struct dm_crypt_io
*io
)
1237 struct crypt_config
*cc
= io
->cc
;
1240 crypt_inc_pending(io
);
1242 crypt_convert_init(cc
, &io
->ctx
, io
->base_bio
, io
->base_bio
,
1245 r
= crypt_convert(cc
, &io
->ctx
);
1249 if (atomic_dec_and_test(&io
->ctx
.cc_pending
))
1250 kcryptd_crypt_read_done(io
);
1252 crypt_dec_pending(io
);
1255 static void kcryptd_async_done(struct crypto_async_request
*async_req
,
1258 struct dm_crypt_request
*dmreq
= async_req
->data
;
1259 struct convert_context
*ctx
= dmreq
->ctx
;
1260 struct dm_crypt_io
*io
= container_of(ctx
, struct dm_crypt_io
, ctx
);
1261 struct crypt_config
*cc
= io
->cc
;
1263 if (error
== -EINPROGRESS
) {
1264 complete(&ctx
->restart
);
1268 if (!error
&& cc
->iv_gen_ops
&& cc
->iv_gen_ops
->post
)
1269 error
= cc
->iv_gen_ops
->post(cc
, iv_of_dmreq(cc
, dmreq
), dmreq
);
1274 crypt_free_req(cc
, req_of_dmreq(cc
, dmreq
), io
->base_bio
);
1276 if (!atomic_dec_and_test(&ctx
->cc_pending
))
1279 if (bio_data_dir(io
->base_bio
) == READ
)
1280 kcryptd_crypt_read_done(io
);
1282 kcryptd_crypt_write_io_submit(io
, 1);
1285 static void kcryptd_crypt(struct work_struct
*work
)
1287 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
1289 if (bio_data_dir(io
->base_bio
) == READ
)
1290 kcryptd_crypt_read_convert(io
);
1292 kcryptd_crypt_write_convert(io
);
1295 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
)
1297 struct crypt_config
*cc
= io
->cc
;
1299 INIT_WORK(&io
->work
, kcryptd_crypt
);
1300 queue_work(cc
->crypt_queue
, &io
->work
);
1304 * Decode key from its hex representation
1306 static int crypt_decode_key(u8
*key
, char *hex
, unsigned int size
)
1313 for (i
= 0; i
< size
; i
++) {
1317 if (kstrtou8(buffer
, 16, &key
[i
]))
1327 static void crypt_free_tfms(struct crypt_config
*cc
)
1334 for (i
= 0; i
< cc
->tfms_count
; i
++)
1335 if (cc
->tfms
[i
] && !IS_ERR(cc
->tfms
[i
])) {
1336 crypto_free_ablkcipher(cc
->tfms
[i
]);
1344 static int crypt_alloc_tfms(struct crypt_config
*cc
, char *ciphermode
)
1349 cc
->tfms
= kmalloc(cc
->tfms_count
* sizeof(struct crypto_ablkcipher
*),
1354 for (i
= 0; i
< cc
->tfms_count
; i
++) {
1355 cc
->tfms
[i
] = crypto_alloc_ablkcipher(ciphermode
, 0, 0);
1356 if (IS_ERR(cc
->tfms
[i
])) {
1357 err
= PTR_ERR(cc
->tfms
[i
]);
1358 crypt_free_tfms(cc
);
1366 static int crypt_setkey_allcpus(struct crypt_config
*cc
)
1368 unsigned subkey_size
;
1371 /* Ignore extra keys (which are used for IV etc) */
1372 subkey_size
= (cc
->key_size
- cc
->key_extra_size
) >> ilog2(cc
->tfms_count
);
1374 for (i
= 0; i
< cc
->tfms_count
; i
++) {
1375 r
= crypto_ablkcipher_setkey(cc
->tfms
[i
],
1376 cc
->key
+ (i
* subkey_size
),
1385 static int crypt_set_key(struct crypt_config
*cc
, char *key
)
1388 int key_string_len
= strlen(key
);
1390 /* The key size may not be changed. */
1391 if (cc
->key_size
!= (key_string_len
>> 1))
1394 /* Hyphen (which gives a key_size of zero) means there is no key. */
1395 if (!cc
->key_size
&& strcmp(key
, "-"))
1398 if (cc
->key_size
&& crypt_decode_key(cc
->key
, key
, cc
->key_size
) < 0)
1401 set_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
1403 r
= crypt_setkey_allcpus(cc
);
1406 /* Hex key string not needed after here, so wipe it. */
1407 memset(key
, '0', key_string_len
);
1412 static int crypt_wipe_key(struct crypt_config
*cc
)
1414 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
1415 memset(&cc
->key
, 0, cc
->key_size
* sizeof(u8
));
1417 return crypt_setkey_allcpus(cc
);
1420 static void crypt_dtr(struct dm_target
*ti
)
1422 struct crypt_config
*cc
= ti
->private;
1430 destroy_workqueue(cc
->io_queue
);
1431 if (cc
->crypt_queue
)
1432 destroy_workqueue(cc
->crypt_queue
);
1434 crypt_free_tfms(cc
);
1437 bioset_free(cc
->bs
);
1440 mempool_destroy(cc
->page_pool
);
1442 mempool_destroy(cc
->req_pool
);
1444 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->dtr
)
1445 cc
->iv_gen_ops
->dtr(cc
);
1448 dm_put_device(ti
, cc
->dev
);
1451 kzfree(cc
->cipher_string
);
1453 /* Must zero key material before freeing */
1457 static int crypt_ctr_cipher(struct dm_target
*ti
,
1458 char *cipher_in
, char *key
)
1460 struct crypt_config
*cc
= ti
->private;
1461 char *tmp
, *cipher
, *chainmode
, *ivmode
, *ivopts
, *keycount
;
1462 char *cipher_api
= NULL
;
1466 /* Convert to crypto api definition? */
1467 if (strchr(cipher_in
, '(')) {
1468 ti
->error
= "Bad cipher specification";
1472 cc
->cipher_string
= kstrdup(cipher_in
, GFP_KERNEL
);
1473 if (!cc
->cipher_string
)
1477 * Legacy dm-crypt cipher specification
1478 * cipher[:keycount]-mode-iv:ivopts
1481 keycount
= strsep(&tmp
, "-");
1482 cipher
= strsep(&keycount
, ":");
1486 else if (sscanf(keycount
, "%u%c", &cc
->tfms_count
, &dummy
) != 1 ||
1487 !is_power_of_2(cc
->tfms_count
)) {
1488 ti
->error
= "Bad cipher key count specification";
1491 cc
->key_parts
= cc
->tfms_count
;
1492 cc
->key_extra_size
= 0;
1494 cc
->cipher
= kstrdup(cipher
, GFP_KERNEL
);
1498 chainmode
= strsep(&tmp
, "-");
1499 ivopts
= strsep(&tmp
, "-");
1500 ivmode
= strsep(&ivopts
, ":");
1503 DMWARN("Ignoring unexpected additional cipher options");
1506 * For compatibility with the original dm-crypt mapping format, if
1507 * only the cipher name is supplied, use cbc-plain.
1509 if (!chainmode
|| (!strcmp(chainmode
, "plain") && !ivmode
)) {
1514 if (strcmp(chainmode
, "ecb") && !ivmode
) {
1515 ti
->error
= "IV mechanism required";
1519 cipher_api
= kmalloc(CRYPTO_MAX_ALG_NAME
, GFP_KERNEL
);
1523 ret
= snprintf(cipher_api
, CRYPTO_MAX_ALG_NAME
,
1524 "%s(%s)", chainmode
, cipher
);
1530 /* Allocate cipher */
1531 ret
= crypt_alloc_tfms(cc
, cipher_api
);
1533 ti
->error
= "Error allocating crypto tfm";
1538 cc
->iv_size
= crypto_ablkcipher_ivsize(any_tfm(cc
));
1540 /* at least a 64 bit sector number should fit in our buffer */
1541 cc
->iv_size
= max(cc
->iv_size
,
1542 (unsigned int)(sizeof(u64
) / sizeof(u8
)));
1544 DMWARN("Selected cipher does not support IVs");
1548 /* Choose ivmode, see comments at iv code. */
1550 cc
->iv_gen_ops
= NULL
;
1551 else if (strcmp(ivmode
, "plain") == 0)
1552 cc
->iv_gen_ops
= &crypt_iv_plain_ops
;
1553 else if (strcmp(ivmode
, "plain64") == 0)
1554 cc
->iv_gen_ops
= &crypt_iv_plain64_ops
;
1555 else if (strcmp(ivmode
, "essiv") == 0)
1556 cc
->iv_gen_ops
= &crypt_iv_essiv_ops
;
1557 else if (strcmp(ivmode
, "benbi") == 0)
1558 cc
->iv_gen_ops
= &crypt_iv_benbi_ops
;
1559 else if (strcmp(ivmode
, "null") == 0)
1560 cc
->iv_gen_ops
= &crypt_iv_null_ops
;
1561 else if (strcmp(ivmode
, "lmk") == 0) {
1562 cc
->iv_gen_ops
= &crypt_iv_lmk_ops
;
1564 * Version 2 and 3 is recognised according
1565 * to length of provided multi-key string.
1566 * If present (version 3), last key is used as IV seed.
1567 * All keys (including IV seed) are always the same size.
1569 if (cc
->key_size
% cc
->key_parts
) {
1571 cc
->key_extra_size
= cc
->key_size
/ cc
->key_parts
;
1573 } else if (strcmp(ivmode
, "tcw") == 0) {
1574 cc
->iv_gen_ops
= &crypt_iv_tcw_ops
;
1575 cc
->key_parts
+= 2; /* IV + whitening */
1576 cc
->key_extra_size
= cc
->iv_size
+ TCW_WHITENING_SIZE
;
1579 ti
->error
= "Invalid IV mode";
1583 /* Initialize and set key */
1584 ret
= crypt_set_key(cc
, key
);
1586 ti
->error
= "Error decoding and setting key";
1591 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->ctr
) {
1592 ret
= cc
->iv_gen_ops
->ctr(cc
, ti
, ivopts
);
1594 ti
->error
= "Error creating IV";
1599 /* Initialize IV (set keys for ESSIV etc) */
1600 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->init
) {
1601 ret
= cc
->iv_gen_ops
->init(cc
);
1603 ti
->error
= "Error initialising IV";
1614 ti
->error
= "Cannot allocate cipher strings";
1619 * Construct an encryption mapping:
1620 * <cipher> <key> <iv_offset> <dev_path> <start>
1622 static int crypt_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1624 struct crypt_config
*cc
;
1625 unsigned int key_size
, opt_params
;
1626 unsigned long long tmpll
;
1628 size_t iv_size_padding
;
1629 struct dm_arg_set as
;
1630 const char *opt_string
;
1633 static struct dm_arg _args
[] = {
1634 {0, 2, "Invalid number of feature args"},
1638 ti
->error
= "Not enough arguments";
1642 key_size
= strlen(argv
[1]) >> 1;
1644 cc
= kzalloc(sizeof(*cc
) + key_size
* sizeof(u8
), GFP_KERNEL
);
1646 ti
->error
= "Cannot allocate encryption context";
1649 cc
->key_size
= key_size
;
1652 ret
= crypt_ctr_cipher(ti
, argv
[0], argv
[1]);
1656 cc
->dmreq_start
= sizeof(struct ablkcipher_request
);
1657 cc
->dmreq_start
+= crypto_ablkcipher_reqsize(any_tfm(cc
));
1658 cc
->dmreq_start
= ALIGN(cc
->dmreq_start
, __alignof__(struct dm_crypt_request
));
1660 if (crypto_ablkcipher_alignmask(any_tfm(cc
)) < CRYPTO_MINALIGN
) {
1661 /* Allocate the padding exactly */
1662 iv_size_padding
= -(cc
->dmreq_start
+ sizeof(struct dm_crypt_request
))
1663 & crypto_ablkcipher_alignmask(any_tfm(cc
));
1666 * If the cipher requires greater alignment than kmalloc
1667 * alignment, we don't know the exact position of the
1668 * initialization vector. We must assume worst case.
1670 iv_size_padding
= crypto_ablkcipher_alignmask(any_tfm(cc
));
1674 cc
->req_pool
= mempool_create_kmalloc_pool(MIN_IOS
, cc
->dmreq_start
+
1675 sizeof(struct dm_crypt_request
) + iv_size_padding
+ cc
->iv_size
);
1676 if (!cc
->req_pool
) {
1677 ti
->error
= "Cannot allocate crypt request mempool";
1681 cc
->per_bio_data_size
= ti
->per_bio_data_size
=
1682 ALIGN(sizeof(struct dm_crypt_io
) + cc
->dmreq_start
+
1683 sizeof(struct dm_crypt_request
) + iv_size_padding
+ cc
->iv_size
,
1684 ARCH_KMALLOC_MINALIGN
);
1686 cc
->page_pool
= mempool_create_page_pool(BIO_MAX_PAGES
, 0);
1687 if (!cc
->page_pool
) {
1688 ti
->error
= "Cannot allocate page mempool";
1692 cc
->bs
= bioset_create(MIN_IOS
, 0);
1694 ti
->error
= "Cannot allocate crypt bioset";
1698 mutex_init(&cc
->bio_alloc_lock
);
1701 if (sscanf(argv
[2], "%llu%c", &tmpll
, &dummy
) != 1) {
1702 ti
->error
= "Invalid iv_offset sector";
1705 cc
->iv_offset
= tmpll
;
1707 if (dm_get_device(ti
, argv
[3], dm_table_get_mode(ti
->table
), &cc
->dev
)) {
1708 ti
->error
= "Device lookup failed";
1712 if (sscanf(argv
[4], "%llu%c", &tmpll
, &dummy
) != 1) {
1713 ti
->error
= "Invalid device sector";
1721 /* Optional parameters */
1726 ret
= dm_read_arg_group(_args
, &as
, &opt_params
, &ti
->error
);
1730 while (opt_params
--) {
1731 opt_string
= dm_shift_arg(&as
);
1733 ti
->error
= "Not enough feature arguments";
1737 if (!strcasecmp(opt_string
, "allow_discards"))
1738 ti
->num_discard_bios
= 1;
1740 else if (!strcasecmp(opt_string
, "same_cpu_crypt"))
1741 set_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
);
1744 ti
->error
= "Invalid feature arguments";
1751 cc
->io_queue
= alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM
, 1);
1752 if (!cc
->io_queue
) {
1753 ti
->error
= "Couldn't create kcryptd io queue";
1757 if (test_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
))
1758 cc
->crypt_queue
= alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE
| WQ_MEM_RECLAIM
, 1);
1760 cc
->crypt_queue
= alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE
| WQ_MEM_RECLAIM
| WQ_UNBOUND
,
1762 if (!cc
->crypt_queue
) {
1763 ti
->error
= "Couldn't create kcryptd queue";
1767 ti
->num_flush_bios
= 1;
1768 ti
->discard_zeroes_data_unsupported
= true;
1777 static int crypt_map(struct dm_target
*ti
, struct bio
*bio
)
1779 struct dm_crypt_io
*io
;
1780 struct crypt_config
*cc
= ti
->private;
1783 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
1784 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
1785 * - for REQ_DISCARD caller must use flush if IO ordering matters
1787 if (unlikely(bio
->bi_rw
& (REQ_FLUSH
| REQ_DISCARD
))) {
1788 bio
->bi_bdev
= cc
->dev
->bdev
;
1789 if (bio_sectors(bio
))
1790 bio
->bi_iter
.bi_sector
= cc
->start
+
1791 dm_target_offset(ti
, bio
->bi_iter
.bi_sector
);
1792 return DM_MAPIO_REMAPPED
;
1795 io
= dm_per_bio_data(bio
, cc
->per_bio_data_size
);
1796 crypt_io_init(io
, cc
, bio
, dm_target_offset(ti
, bio
->bi_iter
.bi_sector
));
1797 io
->ctx
.req
= (struct ablkcipher_request
*)(io
+ 1);
1799 if (bio_data_dir(io
->base_bio
) == READ
) {
1800 if (kcryptd_io_read(io
, GFP_NOWAIT
))
1801 kcryptd_queue_io(io
);
1803 kcryptd_queue_crypt(io
);
1805 return DM_MAPIO_SUBMITTED
;
1808 static void crypt_status(struct dm_target
*ti
, status_type_t type
,
1809 unsigned status_flags
, char *result
, unsigned maxlen
)
1811 struct crypt_config
*cc
= ti
->private;
1813 int num_feature_args
= 0;
1816 case STATUSTYPE_INFO
:
1820 case STATUSTYPE_TABLE
:
1821 DMEMIT("%s ", cc
->cipher_string
);
1823 if (cc
->key_size
> 0)
1824 for (i
= 0; i
< cc
->key_size
; i
++)
1825 DMEMIT("%02x", cc
->key
[i
]);
1829 DMEMIT(" %llu %s %llu", (unsigned long long)cc
->iv_offset
,
1830 cc
->dev
->name
, (unsigned long long)cc
->start
);
1832 num_feature_args
+= !!ti
->num_discard_bios
;
1833 num_feature_args
+= test_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
);
1834 if (num_feature_args
) {
1835 DMEMIT(" %d", num_feature_args
);
1836 if (ti
->num_discard_bios
)
1837 DMEMIT(" allow_discards");
1838 if (test_bit(DM_CRYPT_SAME_CPU
, &cc
->flags
))
1839 DMEMIT(" same_cpu_crypt");
1846 static void crypt_postsuspend(struct dm_target
*ti
)
1848 struct crypt_config
*cc
= ti
->private;
1850 set_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
1853 static int crypt_preresume(struct dm_target
*ti
)
1855 struct crypt_config
*cc
= ti
->private;
1857 if (!test_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
)) {
1858 DMERR("aborting resume - crypt key is not set.");
1865 static void crypt_resume(struct dm_target
*ti
)
1867 struct crypt_config
*cc
= ti
->private;
1869 clear_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
1872 /* Message interface
1876 static int crypt_message(struct dm_target
*ti
, unsigned argc
, char **argv
)
1878 struct crypt_config
*cc
= ti
->private;
1884 if (!strcasecmp(argv
[0], "key")) {
1885 if (!test_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
)) {
1886 DMWARN("not suspended during key manipulation.");
1889 if (argc
== 3 && !strcasecmp(argv
[1], "set")) {
1890 ret
= crypt_set_key(cc
, argv
[2]);
1893 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->init
)
1894 ret
= cc
->iv_gen_ops
->init(cc
);
1897 if (argc
== 2 && !strcasecmp(argv
[1], "wipe")) {
1898 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->wipe
) {
1899 ret
= cc
->iv_gen_ops
->wipe(cc
);
1903 return crypt_wipe_key(cc
);
1908 DMWARN("unrecognised message received.");
1912 static int crypt_merge(struct dm_target
*ti
, struct bvec_merge_data
*bvm
,
1913 struct bio_vec
*biovec
, int max_size
)
1915 struct crypt_config
*cc
= ti
->private;
1916 struct request_queue
*q
= bdev_get_queue(cc
->dev
->bdev
);
1918 if (!q
->merge_bvec_fn
)
1921 bvm
->bi_bdev
= cc
->dev
->bdev
;
1922 bvm
->bi_sector
= cc
->start
+ dm_target_offset(ti
, bvm
->bi_sector
);
1924 return min(max_size
, q
->merge_bvec_fn(q
, bvm
, biovec
));
1927 static int crypt_iterate_devices(struct dm_target
*ti
,
1928 iterate_devices_callout_fn fn
, void *data
)
1930 struct crypt_config
*cc
= ti
->private;
1932 return fn(ti
, cc
->dev
, cc
->start
, ti
->len
, data
);
1935 static struct target_type crypt_target
= {
1937 .version
= {1, 14, 0},
1938 .module
= THIS_MODULE
,
1942 .status
= crypt_status
,
1943 .postsuspend
= crypt_postsuspend
,
1944 .preresume
= crypt_preresume
,
1945 .resume
= crypt_resume
,
1946 .message
= crypt_message
,
1947 .merge
= crypt_merge
,
1948 .iterate_devices
= crypt_iterate_devices
,
1951 static int __init
dm_crypt_init(void)
1955 r
= dm_register_target(&crypt_target
);
1957 DMERR("register failed %d", r
);
1962 static void __exit
dm_crypt_exit(void)
1964 dm_unregister_target(&crypt_target
);
1967 module_init(dm_crypt_init
);
1968 module_exit(dm_crypt_exit
);
1970 MODULE_AUTHOR("Jana Saout <jana@saout.de>");
1971 MODULE_DESCRIPTION(DM_NAME
" target for transparent encryption / decryption");
1972 MODULE_LICENSE("GPL");