2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
6 * This file is released under the GPL.
9 #include <linux/completion.h>
10 #include <linux/err.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/bio.h>
15 #include <linux/blkdev.h>
16 #include <linux/mempool.h>
17 #include <linux/slab.h>
18 #include <linux/crypto.h>
19 #include <linux/workqueue.h>
20 #include <linux/backing-dev.h>
21 #include <linux/percpu.h>
22 #include <asm/atomic.h>
23 #include <linux/scatterlist.h>
25 #include <asm/unaligned.h>
27 #include <linux/device-mapper.h>
29 #define DM_MSG_PREFIX "crypt"
30 #define MESG_STR(x) x, sizeof(x)
33 * context holding the current state of a multi-part conversion
35 struct convert_context
{
36 struct completion restart
;
39 unsigned int offset_in
;
40 unsigned int offset_out
;
48 * per bio private data
51 struct dm_target
*target
;
53 struct work_struct work
;
55 struct convert_context ctx
;
60 struct dm_crypt_io
*base_io
;
63 struct dm_crypt_request
{
64 struct convert_context
*ctx
;
65 struct scatterlist sg_in
;
66 struct scatterlist sg_out
;
71 struct crypt_iv_operations
{
72 int (*ctr
)(struct crypt_config
*cc
, struct dm_target
*ti
,
74 void (*dtr
)(struct crypt_config
*cc
);
75 int (*init
)(struct crypt_config
*cc
);
76 int (*wipe
)(struct crypt_config
*cc
);
77 int (*generator
)(struct crypt_config
*cc
, u8
*iv
, sector_t sector
);
80 struct iv_essiv_private
{
81 struct crypto_hash
*hash_tfm
;
85 struct iv_benbi_private
{
90 * Crypt: maps a linear range of a block device
91 * and encrypts / decrypts at the same time.
93 enum flags
{ DM_CRYPT_SUSPENDED
, DM_CRYPT_KEY_VALID
};
96 * Duplicated per-CPU state for cipher.
99 struct ablkcipher_request
*req
;
100 struct crypto_ablkcipher
*tfm
;
102 /* ESSIV: struct crypto_cipher *essiv_tfm */
107 * The fields in here must be read only after initialization,
108 * changing state should be in crypt_cpu.
110 struct crypt_config
{
115 * pool for per bio private data, crypto requests and
116 * encryption requeusts/buffer pages
120 mempool_t
*page_pool
;
123 struct workqueue_struct
*io_queue
;
124 struct workqueue_struct
*crypt_queue
;
129 struct crypt_iv_operations
*iv_gen_ops
;
131 struct iv_essiv_private essiv
;
132 struct iv_benbi_private benbi
;
135 unsigned int iv_size
;
138 * Duplicated per cpu state. Access through
139 * per_cpu_ptr() only.
141 struct crypt_cpu __percpu
*cpu
;
144 * Layout of each crypto request:
146 * struct ablkcipher_request
149 * struct dm_crypt_request
153 * The padding is added so that dm_crypt_request and the IV are
156 unsigned int dmreq_start
;
159 unsigned int key_size
;
164 #define MIN_POOL_PAGES 32
165 #define MIN_BIO_PAGES 8
167 static struct kmem_cache
*_crypt_io_pool
;
169 static void clone_init(struct dm_crypt_io
*, struct bio
*);
170 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
);
172 static struct crypt_cpu
*this_crypt_config(struct crypt_config
*cc
)
174 return this_cpu_ptr(cc
->cpu
);
178 * Use this to access cipher attributes that are the same for each CPU.
180 static struct crypto_ablkcipher
*any_tfm(struct crypt_config
*cc
)
182 return __this_cpu_ptr(cc
->cpu
)->tfm
;
186 * Different IV generation algorithms:
188 * plain: the initial vector is the 32-bit little-endian version of the sector
189 * number, padded with zeros if necessary.
191 * plain64: the initial vector is the 64-bit little-endian version of the sector
192 * number, padded with zeros if necessary.
194 * essiv: "encrypted sector|salt initial vector", the sector number is
195 * encrypted with the bulk cipher using a salt as key. The salt
196 * should be derived from the bulk cipher's key via hashing.
198 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
199 * (needed for LRW-32-AES and possible other narrow block modes)
201 * null: the initial vector is always zero. Provides compatibility with
202 * obsolete loop_fish2 devices. Do not use for new devices.
204 * plumb: unimplemented, see:
205 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
208 static int crypt_iv_plain_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
210 memset(iv
, 0, cc
->iv_size
);
211 *(u32
*)iv
= cpu_to_le32(sector
& 0xffffffff);
216 static int crypt_iv_plain64_gen(struct crypt_config
*cc
, u8
*iv
,
219 memset(iv
, 0, cc
->iv_size
);
220 *(u64
*)iv
= cpu_to_le64(sector
);
225 /* Initialise ESSIV - compute salt but no local memory allocations */
226 static int crypt_iv_essiv_init(struct crypt_config
*cc
)
228 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
229 struct hash_desc desc
;
230 struct scatterlist sg
;
231 struct crypto_cipher
*essiv_tfm
;
234 sg_init_one(&sg
, cc
->key
, cc
->key_size
);
235 desc
.tfm
= essiv
->hash_tfm
;
236 desc
.flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
238 err
= crypto_hash_digest(&desc
, &sg
, cc
->key_size
, essiv
->salt
);
242 for_each_possible_cpu(cpu
) {
243 essiv_tfm
= per_cpu_ptr(cc
->cpu
, cpu
)->iv_private
,
245 err
= crypto_cipher_setkey(essiv_tfm
, essiv
->salt
,
246 crypto_hash_digestsize(essiv
->hash_tfm
));
254 /* Wipe salt and reset key derived from volume key */
255 static int crypt_iv_essiv_wipe(struct crypt_config
*cc
)
257 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
258 unsigned salt_size
= crypto_hash_digestsize(essiv
->hash_tfm
);
259 struct crypto_cipher
*essiv_tfm
;
262 memset(essiv
->salt
, 0, salt_size
);
264 for_each_possible_cpu(cpu
) {
265 essiv_tfm
= per_cpu_ptr(cc
->cpu
, cpu
)->iv_private
;
266 r
= crypto_cipher_setkey(essiv_tfm
, essiv
->salt
, salt_size
);
274 /* Set up per cpu cipher state */
275 static struct crypto_cipher
*setup_essiv_cpu(struct crypt_config
*cc
,
276 struct dm_target
*ti
,
277 u8
*salt
, unsigned saltsize
)
279 struct crypto_cipher
*essiv_tfm
;
282 /* Setup the essiv_tfm with the given salt */
283 essiv_tfm
= crypto_alloc_cipher(cc
->cipher
, 0, CRYPTO_ALG_ASYNC
);
284 if (IS_ERR(essiv_tfm
)) {
285 ti
->error
= "Error allocating crypto tfm for ESSIV";
289 if (crypto_cipher_blocksize(essiv_tfm
) !=
290 crypto_ablkcipher_ivsize(any_tfm(cc
))) {
291 ti
->error
= "Block size of ESSIV cipher does "
292 "not match IV size of block cipher";
293 crypto_free_cipher(essiv_tfm
);
294 return ERR_PTR(-EINVAL
);
297 err
= crypto_cipher_setkey(essiv_tfm
, salt
, saltsize
);
299 ti
->error
= "Failed to set key for ESSIV cipher";
300 crypto_free_cipher(essiv_tfm
);
307 static void crypt_iv_essiv_dtr(struct crypt_config
*cc
)
310 struct crypt_cpu
*cpu_cc
;
311 struct crypto_cipher
*essiv_tfm
;
312 struct iv_essiv_private
*essiv
= &cc
->iv_gen_private
.essiv
;
314 crypto_free_hash(essiv
->hash_tfm
);
315 essiv
->hash_tfm
= NULL
;
320 for_each_possible_cpu(cpu
) {
321 cpu_cc
= per_cpu_ptr(cc
->cpu
, cpu
);
322 essiv_tfm
= cpu_cc
->iv_private
;
325 crypto_free_cipher(essiv_tfm
);
327 cpu_cc
->iv_private
= NULL
;
331 static int crypt_iv_essiv_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
334 struct crypto_cipher
*essiv_tfm
= NULL
;
335 struct crypto_hash
*hash_tfm
= NULL
;
340 ti
->error
= "Digest algorithm missing for ESSIV mode";
344 /* Allocate hash algorithm */
345 hash_tfm
= crypto_alloc_hash(opts
, 0, CRYPTO_ALG_ASYNC
);
346 if (IS_ERR(hash_tfm
)) {
347 ti
->error
= "Error initializing ESSIV hash";
348 err
= PTR_ERR(hash_tfm
);
352 salt
= kzalloc(crypto_hash_digestsize(hash_tfm
), GFP_KERNEL
);
354 ti
->error
= "Error kmallocing salt storage in ESSIV";
359 cc
->iv_gen_private
.essiv
.salt
= salt
;
360 cc
->iv_gen_private
.essiv
.hash_tfm
= hash_tfm
;
362 for_each_possible_cpu(cpu
) {
363 essiv_tfm
= setup_essiv_cpu(cc
, ti
, salt
,
364 crypto_hash_digestsize(hash_tfm
));
365 if (IS_ERR(essiv_tfm
)) {
366 crypt_iv_essiv_dtr(cc
);
367 return PTR_ERR(essiv_tfm
);
369 per_cpu_ptr(cc
->cpu
, cpu
)->iv_private
= essiv_tfm
;
375 if (hash_tfm
&& !IS_ERR(hash_tfm
))
376 crypto_free_hash(hash_tfm
);
381 static int crypt_iv_essiv_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
383 struct crypto_cipher
*essiv_tfm
= this_crypt_config(cc
)->iv_private
;
385 memset(iv
, 0, cc
->iv_size
);
386 *(u64
*)iv
= cpu_to_le64(sector
);
387 crypto_cipher_encrypt_one(essiv_tfm
, iv
, iv
);
392 static int crypt_iv_benbi_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
395 unsigned bs
= crypto_ablkcipher_blocksize(any_tfm(cc
));
398 /* we need to calculate how far we must shift the sector count
399 * to get the cipher block count, we use this shift in _gen */
401 if (1 << log
!= bs
) {
402 ti
->error
= "cypher blocksize is not a power of 2";
407 ti
->error
= "cypher blocksize is > 512";
411 cc
->iv_gen_private
.benbi
.shift
= 9 - log
;
416 static void crypt_iv_benbi_dtr(struct crypt_config
*cc
)
420 static int crypt_iv_benbi_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
424 memset(iv
, 0, cc
->iv_size
- sizeof(u64
)); /* rest is cleared below */
426 val
= cpu_to_be64(((u64
)sector
<< cc
->iv_gen_private
.benbi
.shift
) + 1);
427 put_unaligned(val
, (__be64
*)(iv
+ cc
->iv_size
- sizeof(u64
)));
432 static int crypt_iv_null_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
434 memset(iv
, 0, cc
->iv_size
);
439 static struct crypt_iv_operations crypt_iv_plain_ops
= {
440 .generator
= crypt_iv_plain_gen
443 static struct crypt_iv_operations crypt_iv_plain64_ops
= {
444 .generator
= crypt_iv_plain64_gen
447 static struct crypt_iv_operations crypt_iv_essiv_ops
= {
448 .ctr
= crypt_iv_essiv_ctr
,
449 .dtr
= crypt_iv_essiv_dtr
,
450 .init
= crypt_iv_essiv_init
,
451 .wipe
= crypt_iv_essiv_wipe
,
452 .generator
= crypt_iv_essiv_gen
455 static struct crypt_iv_operations crypt_iv_benbi_ops
= {
456 .ctr
= crypt_iv_benbi_ctr
,
457 .dtr
= crypt_iv_benbi_dtr
,
458 .generator
= crypt_iv_benbi_gen
461 static struct crypt_iv_operations crypt_iv_null_ops
= {
462 .generator
= crypt_iv_null_gen
465 static void crypt_convert_init(struct crypt_config
*cc
,
466 struct convert_context
*ctx
,
467 struct bio
*bio_out
, struct bio
*bio_in
,
470 ctx
->bio_in
= bio_in
;
471 ctx
->bio_out
= bio_out
;
474 ctx
->idx_in
= bio_in
? bio_in
->bi_idx
: 0;
475 ctx
->idx_out
= bio_out
? bio_out
->bi_idx
: 0;
476 ctx
->sector
= sector
+ cc
->iv_offset
;
477 init_completion(&ctx
->restart
);
480 static struct dm_crypt_request
*dmreq_of_req(struct crypt_config
*cc
,
481 struct ablkcipher_request
*req
)
483 return (struct dm_crypt_request
*)((char *)req
+ cc
->dmreq_start
);
486 static struct ablkcipher_request
*req_of_dmreq(struct crypt_config
*cc
,
487 struct dm_crypt_request
*dmreq
)
489 return (struct ablkcipher_request
*)((char *)dmreq
- cc
->dmreq_start
);
492 static int crypt_convert_block(struct crypt_config
*cc
,
493 struct convert_context
*ctx
,
494 struct ablkcipher_request
*req
)
496 struct bio_vec
*bv_in
= bio_iovec_idx(ctx
->bio_in
, ctx
->idx_in
);
497 struct bio_vec
*bv_out
= bio_iovec_idx(ctx
->bio_out
, ctx
->idx_out
);
498 struct dm_crypt_request
*dmreq
;
502 dmreq
= dmreq_of_req(cc
, req
);
503 iv
= (u8
*)ALIGN((unsigned long)(dmreq
+ 1),
504 crypto_ablkcipher_alignmask(any_tfm(cc
)) + 1);
507 sg_init_table(&dmreq
->sg_in
, 1);
508 sg_set_page(&dmreq
->sg_in
, bv_in
->bv_page
, 1 << SECTOR_SHIFT
,
509 bv_in
->bv_offset
+ ctx
->offset_in
);
511 sg_init_table(&dmreq
->sg_out
, 1);
512 sg_set_page(&dmreq
->sg_out
, bv_out
->bv_page
, 1 << SECTOR_SHIFT
,
513 bv_out
->bv_offset
+ ctx
->offset_out
);
515 ctx
->offset_in
+= 1 << SECTOR_SHIFT
;
516 if (ctx
->offset_in
>= bv_in
->bv_len
) {
521 ctx
->offset_out
+= 1 << SECTOR_SHIFT
;
522 if (ctx
->offset_out
>= bv_out
->bv_len
) {
527 if (cc
->iv_gen_ops
) {
528 r
= cc
->iv_gen_ops
->generator(cc
, iv
, ctx
->sector
);
533 ablkcipher_request_set_crypt(req
, &dmreq
->sg_in
, &dmreq
->sg_out
,
534 1 << SECTOR_SHIFT
, iv
);
536 if (bio_data_dir(ctx
->bio_in
) == WRITE
)
537 r
= crypto_ablkcipher_encrypt(req
);
539 r
= crypto_ablkcipher_decrypt(req
);
544 static void kcryptd_async_done(struct crypto_async_request
*async_req
,
547 static void crypt_alloc_req(struct crypt_config
*cc
,
548 struct convert_context
*ctx
)
550 struct crypt_cpu
*this_cc
= this_crypt_config(cc
);
553 this_cc
->req
= mempool_alloc(cc
->req_pool
, GFP_NOIO
);
555 ablkcipher_request_set_tfm(this_cc
->req
, this_cc
->tfm
);
556 ablkcipher_request_set_callback(this_cc
->req
,
557 CRYPTO_TFM_REQ_MAY_BACKLOG
| CRYPTO_TFM_REQ_MAY_SLEEP
,
558 kcryptd_async_done
, dmreq_of_req(cc
, this_cc
->req
));
562 * Encrypt / decrypt data from one bio to another one (can be the same one)
564 static int crypt_convert(struct crypt_config
*cc
,
565 struct convert_context
*ctx
)
567 struct crypt_cpu
*this_cc
= this_crypt_config(cc
);
570 atomic_set(&ctx
->pending
, 1);
572 while(ctx
->idx_in
< ctx
->bio_in
->bi_vcnt
&&
573 ctx
->idx_out
< ctx
->bio_out
->bi_vcnt
) {
575 crypt_alloc_req(cc
, ctx
);
577 atomic_inc(&ctx
->pending
);
579 r
= crypt_convert_block(cc
, ctx
, this_cc
->req
);
584 wait_for_completion(&ctx
->restart
);
585 INIT_COMPLETION(ctx
->restart
);
594 atomic_dec(&ctx
->pending
);
601 atomic_dec(&ctx
->pending
);
609 static void dm_crypt_bio_destructor(struct bio
*bio
)
611 struct dm_crypt_io
*io
= bio
->bi_private
;
612 struct crypt_config
*cc
= io
->target
->private;
614 bio_free(bio
, cc
->bs
);
618 * Generate a new unfragmented bio with the given size
619 * This should never violate the device limitations
620 * May return a smaller bio when running out of pages, indicated by
621 * *out_of_pages set to 1.
623 static struct bio
*crypt_alloc_buffer(struct dm_crypt_io
*io
, unsigned size
,
624 unsigned *out_of_pages
)
626 struct crypt_config
*cc
= io
->target
->private;
628 unsigned int nr_iovecs
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
629 gfp_t gfp_mask
= GFP_NOIO
| __GFP_HIGHMEM
;
633 clone
= bio_alloc_bioset(GFP_NOIO
, nr_iovecs
, cc
->bs
);
637 clone_init(io
, clone
);
640 for (i
= 0; i
< nr_iovecs
; i
++) {
641 page
= mempool_alloc(cc
->page_pool
, gfp_mask
);
648 * if additional pages cannot be allocated without waiting,
649 * return a partially allocated bio, the caller will then try
650 * to allocate additional bios while submitting this partial bio
652 if (i
== (MIN_BIO_PAGES
- 1))
653 gfp_mask
= (gfp_mask
| __GFP_NOWARN
) & ~__GFP_WAIT
;
655 len
= (size
> PAGE_SIZE
) ? PAGE_SIZE
: size
;
657 if (!bio_add_page(clone
, page
, len
, 0)) {
658 mempool_free(page
, cc
->page_pool
);
665 if (!clone
->bi_size
) {
673 static void crypt_free_buffer_pages(struct crypt_config
*cc
, struct bio
*clone
)
678 for (i
= 0; i
< clone
->bi_vcnt
; i
++) {
679 bv
= bio_iovec_idx(clone
, i
);
680 BUG_ON(!bv
->bv_page
);
681 mempool_free(bv
->bv_page
, cc
->page_pool
);
686 static struct dm_crypt_io
*crypt_io_alloc(struct dm_target
*ti
,
687 struct bio
*bio
, sector_t sector
)
689 struct crypt_config
*cc
= ti
->private;
690 struct dm_crypt_io
*io
;
692 io
= mempool_alloc(cc
->io_pool
, GFP_NOIO
);
698 atomic_set(&io
->pending
, 0);
703 static void crypt_inc_pending(struct dm_crypt_io
*io
)
705 atomic_inc(&io
->pending
);
709 * One of the bios was finished. Check for completion of
710 * the whole request and correctly clean up the buffer.
711 * If base_io is set, wait for the last fragment to complete.
713 static void crypt_dec_pending(struct dm_crypt_io
*io
)
715 struct crypt_config
*cc
= io
->target
->private;
716 struct bio
*base_bio
= io
->base_bio
;
717 struct dm_crypt_io
*base_io
= io
->base_io
;
718 int error
= io
->error
;
720 if (!atomic_dec_and_test(&io
->pending
))
723 mempool_free(io
, cc
->io_pool
);
725 if (likely(!base_io
))
726 bio_endio(base_bio
, error
);
728 if (error
&& !base_io
->error
)
729 base_io
->error
= error
;
730 crypt_dec_pending(base_io
);
735 * kcryptd/kcryptd_io:
737 * Needed because it would be very unwise to do decryption in an
740 * kcryptd performs the actual encryption or decryption.
742 * kcryptd_io performs the IO submission.
744 * They must be separated as otherwise the final stages could be
745 * starved by new requests which can block in the first stages due
746 * to memory allocation.
748 * The work is done per CPU global for all dm-crypt instances.
749 * They should not depend on each other and do not block.
751 static void crypt_endio(struct bio
*clone
, int error
)
753 struct dm_crypt_io
*io
= clone
->bi_private
;
754 struct crypt_config
*cc
= io
->target
->private;
755 unsigned rw
= bio_data_dir(clone
);
757 if (unlikely(!bio_flagged(clone
, BIO_UPTODATE
) && !error
))
761 * free the processed pages
764 crypt_free_buffer_pages(cc
, clone
);
768 if (rw
== READ
&& !error
) {
769 kcryptd_queue_crypt(io
);
776 crypt_dec_pending(io
);
779 static void clone_init(struct dm_crypt_io
*io
, struct bio
*clone
)
781 struct crypt_config
*cc
= io
->target
->private;
783 clone
->bi_private
= io
;
784 clone
->bi_end_io
= crypt_endio
;
785 clone
->bi_bdev
= cc
->dev
->bdev
;
786 clone
->bi_rw
= io
->base_bio
->bi_rw
;
787 clone
->bi_destructor
= dm_crypt_bio_destructor
;
790 static void kcryptd_io_read(struct dm_crypt_io
*io
)
792 struct crypt_config
*cc
= io
->target
->private;
793 struct bio
*base_bio
= io
->base_bio
;
796 crypt_inc_pending(io
);
799 * The block layer might modify the bvec array, so always
800 * copy the required bvecs because we need the original
801 * one in order to decrypt the whole bio data *afterwards*.
803 clone
= bio_alloc_bioset(GFP_NOIO
, bio_segments(base_bio
), cc
->bs
);
804 if (unlikely(!clone
)) {
806 crypt_dec_pending(io
);
810 clone_init(io
, clone
);
812 clone
->bi_vcnt
= bio_segments(base_bio
);
813 clone
->bi_size
= base_bio
->bi_size
;
814 clone
->bi_sector
= cc
->start
+ io
->sector
;
815 memcpy(clone
->bi_io_vec
, bio_iovec(base_bio
),
816 sizeof(struct bio_vec
) * clone
->bi_vcnt
);
818 generic_make_request(clone
);
821 static void kcryptd_io_write(struct dm_crypt_io
*io
)
823 struct bio
*clone
= io
->ctx
.bio_out
;
824 generic_make_request(clone
);
827 static void kcryptd_io(struct work_struct
*work
)
829 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
831 if (bio_data_dir(io
->base_bio
) == READ
)
834 kcryptd_io_write(io
);
837 static void kcryptd_queue_io(struct dm_crypt_io
*io
)
839 struct crypt_config
*cc
= io
->target
->private;
841 INIT_WORK(&io
->work
, kcryptd_io
);
842 queue_work(cc
->io_queue
, &io
->work
);
845 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io
*io
,
846 int error
, int async
)
848 struct bio
*clone
= io
->ctx
.bio_out
;
849 struct crypt_config
*cc
= io
->target
->private;
851 if (unlikely(error
< 0)) {
852 crypt_free_buffer_pages(cc
, clone
);
855 crypt_dec_pending(io
);
859 /* crypt_convert should have filled the clone bio */
860 BUG_ON(io
->ctx
.idx_out
< clone
->bi_vcnt
);
862 clone
->bi_sector
= cc
->start
+ io
->sector
;
865 kcryptd_queue_io(io
);
867 generic_make_request(clone
);
870 static void kcryptd_crypt_write_convert(struct dm_crypt_io
*io
)
872 struct crypt_config
*cc
= io
->target
->private;
874 struct dm_crypt_io
*new_io
;
876 unsigned out_of_pages
= 0;
877 unsigned remaining
= io
->base_bio
->bi_size
;
878 sector_t sector
= io
->sector
;
882 * Prevent io from disappearing until this function completes.
884 crypt_inc_pending(io
);
885 crypt_convert_init(cc
, &io
->ctx
, NULL
, io
->base_bio
, sector
);
888 * The allocated buffers can be smaller than the whole bio,
889 * so repeat the whole process until all the data can be handled.
892 clone
= crypt_alloc_buffer(io
, remaining
, &out_of_pages
);
893 if (unlikely(!clone
)) {
898 io
->ctx
.bio_out
= clone
;
901 remaining
-= clone
->bi_size
;
902 sector
+= bio_sectors(clone
);
904 crypt_inc_pending(io
);
905 r
= crypt_convert(cc
, &io
->ctx
);
906 crypt_finished
= atomic_dec_and_test(&io
->ctx
.pending
);
908 /* Encryption was already finished, submit io now */
909 if (crypt_finished
) {
910 kcryptd_crypt_write_io_submit(io
, r
, 0);
913 * If there was an error, do not try next fragments.
914 * For async, error is processed in async handler.
923 * Out of memory -> run queues
924 * But don't wait if split was due to the io size restriction
926 if (unlikely(out_of_pages
))
927 congestion_wait(BLK_RW_ASYNC
, HZ
/100);
930 * With async crypto it is unsafe to share the crypto context
931 * between fragments, so switch to a new dm_crypt_io structure.
933 if (unlikely(!crypt_finished
&& remaining
)) {
934 new_io
= crypt_io_alloc(io
->target
, io
->base_bio
,
936 crypt_inc_pending(new_io
);
937 crypt_convert_init(cc
, &new_io
->ctx
, NULL
,
938 io
->base_bio
, sector
);
939 new_io
->ctx
.idx_in
= io
->ctx
.idx_in
;
940 new_io
->ctx
.offset_in
= io
->ctx
.offset_in
;
943 * Fragments after the first use the base_io
947 new_io
->base_io
= io
;
949 new_io
->base_io
= io
->base_io
;
950 crypt_inc_pending(io
->base_io
);
951 crypt_dec_pending(io
);
958 crypt_dec_pending(io
);
961 static void kcryptd_crypt_read_done(struct dm_crypt_io
*io
, int error
)
963 if (unlikely(error
< 0))
966 crypt_dec_pending(io
);
969 static void kcryptd_crypt_read_convert(struct dm_crypt_io
*io
)
971 struct crypt_config
*cc
= io
->target
->private;
974 crypt_inc_pending(io
);
976 crypt_convert_init(cc
, &io
->ctx
, io
->base_bio
, io
->base_bio
,
979 r
= crypt_convert(cc
, &io
->ctx
);
981 if (atomic_dec_and_test(&io
->ctx
.pending
))
982 kcryptd_crypt_read_done(io
, r
);
984 crypt_dec_pending(io
);
987 static void kcryptd_async_done(struct crypto_async_request
*async_req
,
990 struct dm_crypt_request
*dmreq
= async_req
->data
;
991 struct convert_context
*ctx
= dmreq
->ctx
;
992 struct dm_crypt_io
*io
= container_of(ctx
, struct dm_crypt_io
, ctx
);
993 struct crypt_config
*cc
= io
->target
->private;
995 if (error
== -EINPROGRESS
) {
996 complete(&ctx
->restart
);
1000 mempool_free(req_of_dmreq(cc
, dmreq
), cc
->req_pool
);
1002 if (!atomic_dec_and_test(&ctx
->pending
))
1005 if (bio_data_dir(io
->base_bio
) == READ
)
1006 kcryptd_crypt_read_done(io
, error
);
1008 kcryptd_crypt_write_io_submit(io
, error
, 1);
1011 static void kcryptd_crypt(struct work_struct
*work
)
1013 struct dm_crypt_io
*io
= container_of(work
, struct dm_crypt_io
, work
);
1015 if (bio_data_dir(io
->base_bio
) == READ
)
1016 kcryptd_crypt_read_convert(io
);
1018 kcryptd_crypt_write_convert(io
);
1021 static void kcryptd_queue_crypt(struct dm_crypt_io
*io
)
1023 struct crypt_config
*cc
= io
->target
->private;
1025 INIT_WORK(&io
->work
, kcryptd_crypt
);
1026 queue_work(cc
->crypt_queue
, &io
->work
);
1030 * Decode key from its hex representation
1032 static int crypt_decode_key(u8
*key
, char *hex
, unsigned int size
)
1040 for (i
= 0; i
< size
; i
++) {
1044 key
[i
] = (u8
)simple_strtoul(buffer
, &endp
, 16);
1046 if (endp
!= &buffer
[2])
1057 * Encode key into its hex representation
1059 static void crypt_encode_key(char *hex
, u8
*key
, unsigned int size
)
1063 for (i
= 0; i
< size
; i
++) {
1064 sprintf(hex
, "%02x", *key
);
1070 static int crypt_setkey_allcpus(struct crypt_config
*cc
)
1072 int cpu
, err
= 0, r
;
1074 for_each_possible_cpu(cpu
) {
1075 r
= crypto_ablkcipher_setkey(per_cpu_ptr(cc
->cpu
, cpu
)->tfm
,
1076 cc
->key
, cc
->key_size
);
1084 static int crypt_set_key(struct crypt_config
*cc
, char *key
)
1086 /* The key size may not be changed. */
1087 if (cc
->key_size
!= (strlen(key
) >> 1))
1090 /* Hyphen (which gives a key_size of zero) means there is no key. */
1091 if (!cc
->key_size
&& strcmp(key
, "-"))
1094 if (cc
->key_size
&& crypt_decode_key(cc
->key
, key
, cc
->key_size
) < 0)
1097 set_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
1099 return crypt_setkey_allcpus(cc
);
1102 static int crypt_wipe_key(struct crypt_config
*cc
)
1104 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
1105 memset(&cc
->key
, 0, cc
->key_size
* sizeof(u8
));
1107 return crypt_setkey_allcpus(cc
);
1110 static void crypt_dtr(struct dm_target
*ti
)
1112 struct crypt_config
*cc
= ti
->private;
1113 struct crypt_cpu
*cpu_cc
;
1122 destroy_workqueue(cc
->io_queue
);
1123 if (cc
->crypt_queue
)
1124 destroy_workqueue(cc
->crypt_queue
);
1127 for_each_possible_cpu(cpu
) {
1128 cpu_cc
= per_cpu_ptr(cc
->cpu
, cpu
);
1130 mempool_free(cpu_cc
->req
, cc
->req_pool
);
1132 crypto_free_ablkcipher(cpu_cc
->tfm
);
1136 bioset_free(cc
->bs
);
1139 mempool_destroy(cc
->page_pool
);
1141 mempool_destroy(cc
->req_pool
);
1143 mempool_destroy(cc
->io_pool
);
1145 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->dtr
)
1146 cc
->iv_gen_ops
->dtr(cc
);
1149 dm_put_device(ti
, cc
->dev
);
1152 free_percpu(cc
->cpu
);
1155 kzfree(cc
->cipher_string
);
1157 /* Must zero key material before freeing */
1161 static int crypt_ctr_cipher(struct dm_target
*ti
,
1162 char *cipher_in
, char *key
)
1164 struct crypt_config
*cc
= ti
->private;
1165 struct crypto_ablkcipher
*tfm
;
1166 char *tmp
, *cipher
, *chainmode
, *ivmode
, *ivopts
;
1167 char *cipher_api
= NULL
;
1168 int cpu
, ret
= -EINVAL
;
1170 /* Convert to crypto api definition? */
1171 if (strchr(cipher_in
, '(')) {
1172 ti
->error
= "Bad cipher specification";
1176 cc
->cipher_string
= kstrdup(cipher_in
, GFP_KERNEL
);
1177 if (!cc
->cipher_string
)
1181 * Legacy dm-crypt cipher specification
1182 * cipher-mode-iv:ivopts
1185 cipher
= strsep(&tmp
, "-");
1187 cc
->cipher
= kstrdup(cipher
, GFP_KERNEL
);
1191 chainmode
= strsep(&tmp
, "-");
1192 ivopts
= strsep(&tmp
, "-");
1193 ivmode
= strsep(&ivopts
, ":");
1196 DMWARN("Ignoring unexpected additional cipher options");
1198 cc
->cpu
= alloc_percpu(struct crypt_cpu
);
1200 ti
->error
= "Cannot allocate per cpu state";
1205 * For compatibility with the original dm-crypt mapping format, if
1206 * only the cipher name is supplied, use cbc-plain.
1208 if (!chainmode
|| (!strcmp(chainmode
, "plain") && !ivmode
)) {
1213 if (strcmp(chainmode
, "ecb") && !ivmode
) {
1214 ti
->error
= "IV mechanism required";
1218 cipher_api
= kmalloc(CRYPTO_MAX_ALG_NAME
, GFP_KERNEL
);
1222 ret
= snprintf(cipher_api
, CRYPTO_MAX_ALG_NAME
,
1223 "%s(%s)", chainmode
, cipher
);
1229 /* Allocate cipher */
1230 for_each_possible_cpu(cpu
) {
1231 tfm
= crypto_alloc_ablkcipher(cipher_api
, 0, 0);
1234 ti
->error
= "Error allocating crypto tfm";
1237 per_cpu_ptr(cc
->cpu
, cpu
)->tfm
= tfm
;
1240 /* Initialize and set key */
1241 ret
= crypt_set_key(cc
, key
);
1243 ti
->error
= "Error decoding and setting key";
1248 cc
->iv_size
= crypto_ablkcipher_ivsize(any_tfm(cc
));
1250 /* at least a 64 bit sector number should fit in our buffer */
1251 cc
->iv_size
= max(cc
->iv_size
,
1252 (unsigned int)(sizeof(u64
) / sizeof(u8
)));
1254 DMWARN("Selected cipher does not support IVs");
1258 /* Choose ivmode, see comments at iv code. */
1260 cc
->iv_gen_ops
= NULL
;
1261 else if (strcmp(ivmode
, "plain") == 0)
1262 cc
->iv_gen_ops
= &crypt_iv_plain_ops
;
1263 else if (strcmp(ivmode
, "plain64") == 0)
1264 cc
->iv_gen_ops
= &crypt_iv_plain64_ops
;
1265 else if (strcmp(ivmode
, "essiv") == 0)
1266 cc
->iv_gen_ops
= &crypt_iv_essiv_ops
;
1267 else if (strcmp(ivmode
, "benbi") == 0)
1268 cc
->iv_gen_ops
= &crypt_iv_benbi_ops
;
1269 else if (strcmp(ivmode
, "null") == 0)
1270 cc
->iv_gen_ops
= &crypt_iv_null_ops
;
1273 ti
->error
= "Invalid IV mode";
1278 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->ctr
) {
1279 ret
= cc
->iv_gen_ops
->ctr(cc
, ti
, ivopts
);
1281 ti
->error
= "Error creating IV";
1286 /* Initialize IV (set keys for ESSIV etc) */
1287 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->init
) {
1288 ret
= cc
->iv_gen_ops
->init(cc
);
1290 ti
->error
= "Error initialising IV";
1301 ti
->error
= "Cannot allocate cipher strings";
1306 * Construct an encryption mapping:
1307 * <cipher> <key> <iv_offset> <dev_path> <start>
1309 static int crypt_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1311 struct crypt_config
*cc
;
1312 unsigned int key_size
;
1313 unsigned long long tmpll
;
1317 ti
->error
= "Not enough arguments";
1321 key_size
= strlen(argv
[1]) >> 1;
1323 cc
= kzalloc(sizeof(*cc
) + key_size
* sizeof(u8
), GFP_KERNEL
);
1325 ti
->error
= "Cannot allocate encryption context";
1328 cc
->key_size
= key_size
;
1331 ret
= crypt_ctr_cipher(ti
, argv
[0], argv
[1]);
1336 cc
->io_pool
= mempool_create_slab_pool(MIN_IOS
, _crypt_io_pool
);
1338 ti
->error
= "Cannot allocate crypt io mempool";
1342 cc
->dmreq_start
= sizeof(struct ablkcipher_request
);
1343 cc
->dmreq_start
+= crypto_ablkcipher_reqsize(any_tfm(cc
));
1344 cc
->dmreq_start
= ALIGN(cc
->dmreq_start
, crypto_tfm_ctx_alignment());
1345 cc
->dmreq_start
+= crypto_ablkcipher_alignmask(any_tfm(cc
)) &
1346 ~(crypto_tfm_ctx_alignment() - 1);
1348 cc
->req_pool
= mempool_create_kmalloc_pool(MIN_IOS
, cc
->dmreq_start
+
1349 sizeof(struct dm_crypt_request
) + cc
->iv_size
);
1350 if (!cc
->req_pool
) {
1351 ti
->error
= "Cannot allocate crypt request mempool";
1355 cc
->page_pool
= mempool_create_page_pool(MIN_POOL_PAGES
, 0);
1356 if (!cc
->page_pool
) {
1357 ti
->error
= "Cannot allocate page mempool";
1361 cc
->bs
= bioset_create(MIN_IOS
, 0);
1363 ti
->error
= "Cannot allocate crypt bioset";
1368 if (sscanf(argv
[2], "%llu", &tmpll
) != 1) {
1369 ti
->error
= "Invalid iv_offset sector";
1372 cc
->iv_offset
= tmpll
;
1374 if (dm_get_device(ti
, argv
[3], dm_table_get_mode(ti
->table
), &cc
->dev
)) {
1375 ti
->error
= "Device lookup failed";
1379 if (sscanf(argv
[4], "%llu", &tmpll
) != 1) {
1380 ti
->error
= "Invalid device sector";
1386 cc
->io_queue
= alloc_workqueue("kcryptd_io",
1390 if (!cc
->io_queue
) {
1391 ti
->error
= "Couldn't create kcryptd io queue";
1395 cc
->crypt_queue
= alloc_workqueue("kcryptd",
1400 if (!cc
->crypt_queue
) {
1401 ti
->error
= "Couldn't create kcryptd queue";
1405 ti
->num_flush_requests
= 1;
1413 static int crypt_map(struct dm_target
*ti
, struct bio
*bio
,
1414 union map_info
*map_context
)
1416 struct dm_crypt_io
*io
;
1417 struct crypt_config
*cc
;
1419 if (bio
->bi_rw
& REQ_FLUSH
) {
1421 bio
->bi_bdev
= cc
->dev
->bdev
;
1422 return DM_MAPIO_REMAPPED
;
1425 io
= crypt_io_alloc(ti
, bio
, dm_target_offset(ti
, bio
->bi_sector
));
1427 if (bio_data_dir(io
->base_bio
) == READ
)
1428 kcryptd_queue_io(io
);
1430 kcryptd_queue_crypt(io
);
1432 return DM_MAPIO_SUBMITTED
;
1435 static int crypt_status(struct dm_target
*ti
, status_type_t type
,
1436 char *result
, unsigned int maxlen
)
1438 struct crypt_config
*cc
= ti
->private;
1439 unsigned int sz
= 0;
1442 case STATUSTYPE_INFO
:
1446 case STATUSTYPE_TABLE
:
1447 DMEMIT("%s ", cc
->cipher_string
);
1449 if (cc
->key_size
> 0) {
1450 if ((maxlen
- sz
) < ((cc
->key_size
<< 1) + 1))
1453 crypt_encode_key(result
+ sz
, cc
->key
, cc
->key_size
);
1454 sz
+= cc
->key_size
<< 1;
1461 DMEMIT(" %llu %s %llu", (unsigned long long)cc
->iv_offset
,
1462 cc
->dev
->name
, (unsigned long long)cc
->start
);
1468 static void crypt_postsuspend(struct dm_target
*ti
)
1470 struct crypt_config
*cc
= ti
->private;
1472 set_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
1475 static int crypt_preresume(struct dm_target
*ti
)
1477 struct crypt_config
*cc
= ti
->private;
1479 if (!test_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
)) {
1480 DMERR("aborting resume - crypt key is not set.");
1487 static void crypt_resume(struct dm_target
*ti
)
1489 struct crypt_config
*cc
= ti
->private;
1491 clear_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
1494 /* Message interface
1498 static int crypt_message(struct dm_target
*ti
, unsigned argc
, char **argv
)
1500 struct crypt_config
*cc
= ti
->private;
1506 if (!strnicmp(argv
[0], MESG_STR("key"))) {
1507 if (!test_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
)) {
1508 DMWARN("not suspended during key manipulation.");
1511 if (argc
== 3 && !strnicmp(argv
[1], MESG_STR("set"))) {
1512 ret
= crypt_set_key(cc
, argv
[2]);
1515 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->init
)
1516 ret
= cc
->iv_gen_ops
->init(cc
);
1519 if (argc
== 2 && !strnicmp(argv
[1], MESG_STR("wipe"))) {
1520 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->wipe
) {
1521 ret
= cc
->iv_gen_ops
->wipe(cc
);
1525 return crypt_wipe_key(cc
);
1530 DMWARN("unrecognised message received.");
1534 static int crypt_merge(struct dm_target
*ti
, struct bvec_merge_data
*bvm
,
1535 struct bio_vec
*biovec
, int max_size
)
1537 struct crypt_config
*cc
= ti
->private;
1538 struct request_queue
*q
= bdev_get_queue(cc
->dev
->bdev
);
1540 if (!q
->merge_bvec_fn
)
1543 bvm
->bi_bdev
= cc
->dev
->bdev
;
1544 bvm
->bi_sector
= cc
->start
+ dm_target_offset(ti
, bvm
->bi_sector
);
1546 return min(max_size
, q
->merge_bvec_fn(q
, bvm
, biovec
));
1549 static int crypt_iterate_devices(struct dm_target
*ti
,
1550 iterate_devices_callout_fn fn
, void *data
)
1552 struct crypt_config
*cc
= ti
->private;
1554 return fn(ti
, cc
->dev
, cc
->start
, ti
->len
, data
);
1557 static struct target_type crypt_target
= {
1559 .version
= {1, 9, 0},
1560 .module
= THIS_MODULE
,
1564 .status
= crypt_status
,
1565 .postsuspend
= crypt_postsuspend
,
1566 .preresume
= crypt_preresume
,
1567 .resume
= crypt_resume
,
1568 .message
= crypt_message
,
1569 .merge
= crypt_merge
,
1570 .iterate_devices
= crypt_iterate_devices
,
1573 static int __init
dm_crypt_init(void)
1577 _crypt_io_pool
= KMEM_CACHE(dm_crypt_io
, 0);
1578 if (!_crypt_io_pool
)
1581 r
= dm_register_target(&crypt_target
);
1583 DMERR("register failed %d", r
);
1584 kmem_cache_destroy(_crypt_io_pool
);
1590 static void __exit
dm_crypt_exit(void)
1592 dm_unregister_target(&crypt_target
);
1593 kmem_cache_destroy(_crypt_io_pool
);
1596 module_init(dm_crypt_init
);
1597 module_exit(dm_crypt_exit
);
1599 MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
1600 MODULE_DESCRIPTION(DM_NAME
" target for transparent encryption / decryption");
1601 MODULE_LICENSE("GPL");