2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
6 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/mempool.h>
16 #include <linux/slab.h>
17 #include <linux/crypto.h>
18 #include <linux/workqueue.h>
19 #include <asm/atomic.h>
20 #include <linux/scatterlist.h>
25 #define DM_MSG_PREFIX "crypt"
26 #define MESG_STR(x) x, sizeof(x)
29 * per bio private data
32 struct dm_target
*target
;
34 struct bio
*first_clone
;
35 struct work_struct work
;
41 * context holding the current state of a multi-part conversion
43 struct convert_context
{
46 unsigned int offset_in
;
47 unsigned int offset_out
;
56 struct crypt_iv_operations
{
57 int (*ctr
)(struct crypt_config
*cc
, struct dm_target
*ti
,
59 void (*dtr
)(struct crypt_config
*cc
);
60 const char *(*status
)(struct crypt_config
*cc
);
61 int (*generator
)(struct crypt_config
*cc
, u8
*iv
, sector_t sector
);
65 * Crypt: maps a linear range of a block device
66 * and encrypts / decrypts at the same time.
68 enum flags
{ DM_CRYPT_SUSPENDED
, DM_CRYPT_KEY_VALID
};
74 * pool for per bio private data and
75 * for encryption buffer pages
83 struct crypt_iv_operations
*iv_gen_ops
;
85 struct crypto_cipher
*iv_gen_private
;
89 char cipher
[CRYPTO_MAX_ALG_NAME
];
90 char chainmode
[CRYPTO_MAX_ALG_NAME
];
91 struct crypto_blkcipher
*tfm
;
93 unsigned int key_size
;
98 #define MIN_POOL_PAGES 32
99 #define MIN_BIO_PAGES 8
101 static kmem_cache_t
*_crypt_io_pool
;
104 * Different IV generation algorithms:
106 * plain: the initial vector is the 32-bit little-endian version of the sector
107 * number, padded with zeros if neccessary.
109 * essiv: "encrypted sector|salt initial vector", the sector number is
110 * encrypted with the bulk cipher using a salt as key. The salt
111 * should be derived from the bulk cipher's key via hashing.
113 * plumb: unimplemented, see:
114 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
117 static int crypt_iv_plain_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
119 memset(iv
, 0, cc
->iv_size
);
120 *(u32
*)iv
= cpu_to_le32(sector
& 0xffffffff);
125 static int crypt_iv_essiv_ctr(struct crypt_config
*cc
, struct dm_target
*ti
,
128 struct crypto_cipher
*essiv_tfm
;
129 struct crypto_hash
*hash_tfm
;
130 struct hash_desc desc
;
131 struct scatterlist sg
;
132 unsigned int saltsize
;
137 ti
->error
= "Digest algorithm missing for ESSIV mode";
141 /* Hash the cipher key with the given hash algorithm */
142 hash_tfm
= crypto_alloc_hash(opts
, 0, CRYPTO_ALG_ASYNC
);
143 if (IS_ERR(hash_tfm
)) {
144 ti
->error
= "Error initializing ESSIV hash";
145 return PTR_ERR(hash_tfm
);
148 saltsize
= crypto_hash_digestsize(hash_tfm
);
149 salt
= kmalloc(saltsize
, GFP_KERNEL
);
151 ti
->error
= "Error kmallocing salt storage in ESSIV";
152 crypto_free_hash(hash_tfm
);
156 sg_set_buf(&sg
, cc
->key
, cc
->key_size
);
158 desc
.flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
159 err
= crypto_hash_digest(&desc
, &sg
, cc
->key_size
, salt
);
160 crypto_free_hash(hash_tfm
);
163 ti
->error
= "Error calculating hash in ESSIV";
167 /* Setup the essiv_tfm with the given salt */
168 essiv_tfm
= crypto_alloc_cipher(cc
->cipher
, 0, CRYPTO_ALG_ASYNC
);
169 if (IS_ERR(essiv_tfm
)) {
170 ti
->error
= "Error allocating crypto tfm for ESSIV";
172 return PTR_ERR(essiv_tfm
);
174 if (crypto_cipher_blocksize(essiv_tfm
) !=
175 crypto_blkcipher_ivsize(cc
->tfm
)) {
176 ti
->error
= "Block size of ESSIV cipher does "
177 "not match IV size of block cipher";
178 crypto_free_cipher(essiv_tfm
);
182 err
= crypto_cipher_setkey(essiv_tfm
, salt
, saltsize
);
184 ti
->error
= "Failed to set key for ESSIV cipher";
185 crypto_free_cipher(essiv_tfm
);
191 cc
->iv_gen_private
= essiv_tfm
;
195 static void crypt_iv_essiv_dtr(struct crypt_config
*cc
)
197 crypto_free_cipher(cc
->iv_gen_private
);
198 cc
->iv_gen_private
= NULL
;
201 static int crypt_iv_essiv_gen(struct crypt_config
*cc
, u8
*iv
, sector_t sector
)
203 memset(iv
, 0, cc
->iv_size
);
204 *(u64
*)iv
= cpu_to_le64(sector
);
205 crypto_cipher_encrypt_one(cc
->iv_gen_private
, iv
, iv
);
209 static struct crypt_iv_operations crypt_iv_plain_ops
= {
210 .generator
= crypt_iv_plain_gen
213 static struct crypt_iv_operations crypt_iv_essiv_ops
= {
214 .ctr
= crypt_iv_essiv_ctr
,
215 .dtr
= crypt_iv_essiv_dtr
,
216 .generator
= crypt_iv_essiv_gen
221 crypt_convert_scatterlist(struct crypt_config
*cc
, struct scatterlist
*out
,
222 struct scatterlist
*in
, unsigned int length
,
223 int write
, sector_t sector
)
226 struct blkcipher_desc desc
= {
229 .flags
= CRYPTO_TFM_REQ_MAY_SLEEP
,
233 if (cc
->iv_gen_ops
) {
234 r
= cc
->iv_gen_ops
->generator(cc
, iv
, sector
);
239 r
= crypto_blkcipher_encrypt_iv(&desc
, out
, in
, length
);
241 r
= crypto_blkcipher_decrypt_iv(&desc
, out
, in
, length
);
244 r
= crypto_blkcipher_encrypt(&desc
, out
, in
, length
);
246 r
= crypto_blkcipher_decrypt(&desc
, out
, in
, length
);
253 crypt_convert_init(struct crypt_config
*cc
, struct convert_context
*ctx
,
254 struct bio
*bio_out
, struct bio
*bio_in
,
255 sector_t sector
, int write
)
257 ctx
->bio_in
= bio_in
;
258 ctx
->bio_out
= bio_out
;
261 ctx
->idx_in
= bio_in
? bio_in
->bi_idx
: 0;
262 ctx
->idx_out
= bio_out
? bio_out
->bi_idx
: 0;
263 ctx
->sector
= sector
+ cc
->iv_offset
;
268 * Encrypt / decrypt data from one bio to another one (can be the same one)
270 static int crypt_convert(struct crypt_config
*cc
,
271 struct convert_context
*ctx
)
275 while(ctx
->idx_in
< ctx
->bio_in
->bi_vcnt
&&
276 ctx
->idx_out
< ctx
->bio_out
->bi_vcnt
) {
277 struct bio_vec
*bv_in
= bio_iovec_idx(ctx
->bio_in
, ctx
->idx_in
);
278 struct bio_vec
*bv_out
= bio_iovec_idx(ctx
->bio_out
, ctx
->idx_out
);
279 struct scatterlist sg_in
= {
280 .page
= bv_in
->bv_page
,
281 .offset
= bv_in
->bv_offset
+ ctx
->offset_in
,
282 .length
= 1 << SECTOR_SHIFT
284 struct scatterlist sg_out
= {
285 .page
= bv_out
->bv_page
,
286 .offset
= bv_out
->bv_offset
+ ctx
->offset_out
,
287 .length
= 1 << SECTOR_SHIFT
290 ctx
->offset_in
+= sg_in
.length
;
291 if (ctx
->offset_in
>= bv_in
->bv_len
) {
296 ctx
->offset_out
+= sg_out
.length
;
297 if (ctx
->offset_out
>= bv_out
->bv_len
) {
302 r
= crypt_convert_scatterlist(cc
, &sg_out
, &sg_in
, sg_in
.length
,
303 ctx
->write
, ctx
->sector
);
314 * Generate a new unfragmented bio with the given size
315 * This should never violate the device limitations
316 * May return a smaller bio when running out of pages
319 crypt_alloc_buffer(struct crypt_config
*cc
, unsigned int size
,
320 struct bio
*base_bio
, unsigned int *bio_vec_idx
)
323 unsigned int nr_iovecs
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
324 gfp_t gfp_mask
= GFP_NOIO
| __GFP_HIGHMEM
;
328 * Use __GFP_NOMEMALLOC to tell the VM to act less aggressively and
329 * to fail earlier. This is not necessary but increases throughput.
330 * FIXME: Is this really intelligent?
333 bio
= bio_clone(base_bio
, GFP_NOIO
|__GFP_NOMEMALLOC
);
335 bio
= bio_alloc(GFP_NOIO
|__GFP_NOMEMALLOC
, nr_iovecs
);
339 /* if the last bio was not complete, continue where that one ended */
340 bio
->bi_idx
= *bio_vec_idx
;
341 bio
->bi_vcnt
= *bio_vec_idx
;
343 bio
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
345 /* bio->bi_idx pages have already been allocated */
346 size
-= bio
->bi_idx
* PAGE_SIZE
;
348 for(i
= bio
->bi_idx
; i
< nr_iovecs
; i
++) {
349 struct bio_vec
*bv
= bio_iovec_idx(bio
, i
);
351 bv
->bv_page
= mempool_alloc(cc
->page_pool
, gfp_mask
);
356 * if additional pages cannot be allocated without waiting,
357 * return a partially allocated bio, the caller will then try
358 * to allocate additional bios while submitting this partial bio
360 if ((i
- bio
->bi_idx
) == (MIN_BIO_PAGES
- 1))
361 gfp_mask
= (gfp_mask
| __GFP_NOWARN
) & ~__GFP_WAIT
;
364 if (size
> PAGE_SIZE
)
365 bv
->bv_len
= PAGE_SIZE
;
369 bio
->bi_size
+= bv
->bv_len
;
380 * Remember the last bio_vec allocated to be able
381 * to correctly continue after the splitting.
383 *bio_vec_idx
= bio
->bi_vcnt
;
388 static void crypt_free_buffer_pages(struct crypt_config
*cc
,
389 struct bio
*bio
, unsigned int bytes
)
391 unsigned int i
, start
, end
;
395 * This is ugly, but Jens Axboe thinks that using bi_idx in the
396 * endio function is too dangerous at the moment, so I calculate the
397 * correct position using bi_vcnt and bi_size.
398 * The bv_offset and bv_len fields might already be modified but we
399 * know that we always allocated whole pages.
400 * A fix to the bi_idx issue in the kernel is in the works, so
401 * we will hopefully be able to revert to the cleaner solution soon.
403 i
= bio
->bi_vcnt
- 1;
404 bv
= bio_iovec_idx(bio
, i
);
405 end
= (i
<< PAGE_SHIFT
) + (bv
->bv_offset
+ bv
->bv_len
) - bio
->bi_size
;
408 start
>>= PAGE_SHIFT
;
414 for(i
= start
; i
< end
; i
++) {
415 bv
= bio_iovec_idx(bio
, i
);
416 BUG_ON(!bv
->bv_page
);
417 mempool_free(bv
->bv_page
, cc
->page_pool
);
423 * One of the bios was finished. Check for completion of
424 * the whole request and correctly clean up the buffer.
426 static void dec_pending(struct crypt_io
*io
, int error
)
428 struct crypt_config
*cc
= (struct crypt_config
*) io
->target
->private;
433 if (!atomic_dec_and_test(&io
->pending
))
437 bio_put(io
->first_clone
);
439 bio_endio(io
->bio
, io
->bio
->bi_size
, io
->error
);
441 mempool_free(io
, cc
->io_pool
);
447 * Needed because it would be very unwise to do decryption in an
448 * interrupt context, so bios returning from read requests get
451 static struct workqueue_struct
*_kcryptd_workqueue
;
453 static void kcryptd_do_work(void *data
)
455 struct crypt_io
*io
= (struct crypt_io
*) data
;
456 struct crypt_config
*cc
= (struct crypt_config
*) io
->target
->private;
457 struct convert_context ctx
;
460 crypt_convert_init(cc
, &ctx
, io
->bio
, io
->bio
,
461 io
->bio
->bi_sector
- io
->target
->begin
, 0);
462 r
= crypt_convert(cc
, &ctx
);
467 static void kcryptd_queue_io(struct crypt_io
*io
)
469 INIT_WORK(&io
->work
, kcryptd_do_work
, io
);
470 queue_work(_kcryptd_workqueue
, &io
->work
);
474 * Decode key from its hex representation
476 static int crypt_decode_key(u8
*key
, char *hex
, unsigned int size
)
484 for(i
= 0; i
< size
; i
++) {
488 key
[i
] = (u8
)simple_strtoul(buffer
, &endp
, 16);
490 if (endp
!= &buffer
[2])
501 * Encode key into its hex representation
503 static void crypt_encode_key(char *hex
, u8
*key
, unsigned int size
)
507 for(i
= 0; i
< size
; i
++) {
508 sprintf(hex
, "%02x", *key
);
514 static int crypt_set_key(struct crypt_config
*cc
, char *key
)
516 unsigned key_size
= strlen(key
) >> 1;
518 if (cc
->key_size
&& cc
->key_size
!= key_size
)
521 cc
->key_size
= key_size
; /* initial settings */
523 if ((!key_size
&& strcmp(key
, "-")) ||
524 (key_size
&& crypt_decode_key(cc
->key
, key
, key_size
) < 0))
527 set_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
532 static int crypt_wipe_key(struct crypt_config
*cc
)
534 clear_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
);
535 memset(&cc
->key
, 0, cc
->key_size
* sizeof(u8
));
540 * Construct an encryption mapping:
541 * <cipher> <key> <iv_offset> <dev_path> <start>
543 static int crypt_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
545 struct crypt_config
*cc
;
546 struct crypto_blkcipher
*tfm
;
552 unsigned int key_size
;
553 unsigned long long tmpll
;
556 ti
->error
= "Not enough arguments";
561 cipher
= strsep(&tmp
, "-");
562 chainmode
= strsep(&tmp
, "-");
563 ivopts
= strsep(&tmp
, "-");
564 ivmode
= strsep(&ivopts
, ":");
567 DMWARN("Unexpected additional cipher options");
569 key_size
= strlen(argv
[1]) >> 1;
571 cc
= kzalloc(sizeof(*cc
) + key_size
* sizeof(u8
), GFP_KERNEL
);
574 "Cannot allocate transparent encryption context";
578 if (crypt_set_key(cc
, argv
[1])) {
579 ti
->error
= "Error decoding key";
583 /* Compatiblity mode for old dm-crypt cipher strings */
584 if (!chainmode
|| (strcmp(chainmode
, "plain") == 0 && !ivmode
)) {
589 if (strcmp(chainmode
, "ecb") && !ivmode
) {
590 ti
->error
= "This chaining mode requires an IV mechanism";
594 if (snprintf(cc
->cipher
, CRYPTO_MAX_ALG_NAME
, "%s(%s)", chainmode
,
595 cipher
) >= CRYPTO_MAX_ALG_NAME
) {
596 ti
->error
= "Chain mode + cipher name is too long";
600 tfm
= crypto_alloc_blkcipher(cc
->cipher
, 0, CRYPTO_ALG_ASYNC
);
602 ti
->error
= "Error allocating crypto tfm";
606 strcpy(cc
->cipher
, cipher
);
607 strcpy(cc
->chainmode
, chainmode
);
611 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>".
612 * See comments at iv code
616 cc
->iv_gen_ops
= NULL
;
617 else if (strcmp(ivmode
, "plain") == 0)
618 cc
->iv_gen_ops
= &crypt_iv_plain_ops
;
619 else if (strcmp(ivmode
, "essiv") == 0)
620 cc
->iv_gen_ops
= &crypt_iv_essiv_ops
;
622 ti
->error
= "Invalid IV mode";
626 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->ctr
&&
627 cc
->iv_gen_ops
->ctr(cc
, ti
, ivopts
) < 0)
630 cc
->iv_size
= crypto_blkcipher_ivsize(tfm
);
632 /* at least a 64 bit sector number should fit in our buffer */
633 cc
->iv_size
= max(cc
->iv_size
,
634 (unsigned int)(sizeof(u64
) / sizeof(u8
)));
636 if (cc
->iv_gen_ops
) {
637 DMWARN("Selected cipher does not support IVs");
638 if (cc
->iv_gen_ops
->dtr
)
639 cc
->iv_gen_ops
->dtr(cc
);
640 cc
->iv_gen_ops
= NULL
;
644 cc
->io_pool
= mempool_create_slab_pool(MIN_IOS
, _crypt_io_pool
);
646 ti
->error
= "Cannot allocate crypt io mempool";
650 cc
->page_pool
= mempool_create_page_pool(MIN_POOL_PAGES
, 0);
651 if (!cc
->page_pool
) {
652 ti
->error
= "Cannot allocate page mempool";
656 if (crypto_blkcipher_setkey(tfm
, cc
->key
, key_size
) < 0) {
657 ti
->error
= "Error setting key";
661 if (sscanf(argv
[2], "%llu", &tmpll
) != 1) {
662 ti
->error
= "Invalid iv_offset sector";
665 cc
->iv_offset
= tmpll
;
667 if (sscanf(argv
[4], "%llu", &tmpll
) != 1) {
668 ti
->error
= "Invalid device sector";
673 if (dm_get_device(ti
, argv
[3], cc
->start
, ti
->len
,
674 dm_table_get_mode(ti
->table
), &cc
->dev
)) {
675 ti
->error
= "Device lookup failed";
679 if (ivmode
&& cc
->iv_gen_ops
) {
682 cc
->iv_mode
= kmalloc(strlen(ivmode
) + 1, GFP_KERNEL
);
684 ti
->error
= "Error kmallocing iv_mode string";
687 strcpy(cc
->iv_mode
, ivmode
);
695 mempool_destroy(cc
->page_pool
);
697 mempool_destroy(cc
->io_pool
);
699 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->dtr
)
700 cc
->iv_gen_ops
->dtr(cc
);
702 crypto_free_blkcipher(tfm
);
704 /* Must zero key material before freeing */
705 memset(cc
, 0, sizeof(*cc
) + cc
->key_size
* sizeof(u8
));
710 static void crypt_dtr(struct dm_target
*ti
)
712 struct crypt_config
*cc
= (struct crypt_config
*) ti
->private;
714 mempool_destroy(cc
->page_pool
);
715 mempool_destroy(cc
->io_pool
);
718 if (cc
->iv_gen_ops
&& cc
->iv_gen_ops
->dtr
)
719 cc
->iv_gen_ops
->dtr(cc
);
720 crypto_free_blkcipher(cc
->tfm
);
721 dm_put_device(ti
, cc
->dev
);
723 /* Must zero key material before freeing */
724 memset(cc
, 0, sizeof(*cc
) + cc
->key_size
* sizeof(u8
));
728 static int crypt_endio(struct bio
*bio
, unsigned int done
, int error
)
730 struct crypt_io
*io
= (struct crypt_io
*) bio
->bi_private
;
731 struct crypt_config
*cc
= (struct crypt_config
*) io
->target
->private;
733 if (bio_data_dir(bio
) == WRITE
) {
735 * free the processed pages, even if
736 * it's only a partially completed write
738 crypt_free_buffer_pages(cc
, bio
, done
);
747 * successful reads are decrypted by the worker thread
749 if ((bio_data_dir(bio
) == READ
)
750 && bio_flagged(bio
, BIO_UPTODATE
)) {
751 kcryptd_queue_io(io
);
755 dec_pending(io
, error
);
759 static inline struct bio
*
760 crypt_clone(struct crypt_config
*cc
, struct crypt_io
*io
, struct bio
*bio
,
761 sector_t sector
, unsigned int *bvec_idx
,
762 struct convert_context
*ctx
)
766 if (bio_data_dir(bio
) == WRITE
) {
767 clone
= crypt_alloc_buffer(cc
, bio
->bi_size
,
768 io
->first_clone
, bvec_idx
);
770 ctx
->bio_out
= clone
;
771 if (crypt_convert(cc
, ctx
) < 0) {
772 crypt_free_buffer_pages(cc
, clone
,
780 * The block layer might modify the bvec array, so always
781 * copy the required bvecs because we need the original
782 * one in order to decrypt the whole bio data *afterwards*.
784 clone
= bio_alloc(GFP_NOIO
, bio_segments(bio
));
787 clone
->bi_vcnt
= bio_segments(bio
);
788 clone
->bi_size
= bio
->bi_size
;
789 memcpy(clone
->bi_io_vec
, bio_iovec(bio
),
790 sizeof(struct bio_vec
) * clone
->bi_vcnt
);
797 clone
->bi_private
= io
;
798 clone
->bi_end_io
= crypt_endio
;
799 clone
->bi_bdev
= cc
->dev
->bdev
;
800 clone
->bi_sector
= cc
->start
+ sector
;
801 clone
->bi_rw
= bio
->bi_rw
;
806 static int crypt_map(struct dm_target
*ti
, struct bio
*bio
,
807 union map_info
*map_context
)
809 struct crypt_config
*cc
= (struct crypt_config
*) ti
->private;
811 struct convert_context ctx
;
813 unsigned int remaining
= bio
->bi_size
;
814 sector_t sector
= bio
->bi_sector
- ti
->begin
;
815 unsigned int bvec_idx
= 0;
817 io
= mempool_alloc(cc
->io_pool
, GFP_NOIO
);
820 io
->first_clone
= NULL
;
822 atomic_set(&io
->pending
, 1); /* hold a reference */
824 if (bio_data_dir(bio
) == WRITE
)
825 crypt_convert_init(cc
, &ctx
, NULL
, bio
, sector
, 1);
828 * The allocated buffers can be smaller than the whole bio,
829 * so repeat the whole process until all the data can be handled.
832 clone
= crypt_clone(cc
, io
, bio
, sector
, &bvec_idx
, &ctx
);
836 if (!io
->first_clone
) {
838 * hold a reference to the first clone, because it
839 * holds the bio_vec array and that can't be freed
840 * before all other clones are released
843 io
->first_clone
= clone
;
845 atomic_inc(&io
->pending
);
847 remaining
-= clone
->bi_size
;
848 sector
+= bio_sectors(clone
);
850 generic_make_request(clone
);
852 /* out of memory -> run queues */
854 blk_congestion_wait(bio_data_dir(clone
), HZ
/100);
857 /* drop reference, clones could have returned before we reach this */
862 if (io
->first_clone
) {
863 dec_pending(io
, -ENOMEM
);
867 /* if no bio has been dispatched yet, we can directly return the error */
868 mempool_free(io
, cc
->io_pool
);
872 static int crypt_status(struct dm_target
*ti
, status_type_t type
,
873 char *result
, unsigned int maxlen
)
875 struct crypt_config
*cc
= (struct crypt_config
*) ti
->private;
877 const char *chainmode
= NULL
;
881 case STATUSTYPE_INFO
:
885 case STATUSTYPE_TABLE
:
886 cipher
= crypto_blkcipher_name(cc
->tfm
);
888 chainmode
= cc
->chainmode
;
891 DMEMIT("%s-%s-%s ", cipher
, chainmode
, cc
->iv_mode
);
893 DMEMIT("%s-%s ", cipher
, chainmode
);
895 if (cc
->key_size
> 0) {
896 if ((maxlen
- sz
) < ((cc
->key_size
<< 1) + 1))
899 crypt_encode_key(result
+ sz
, cc
->key
, cc
->key_size
);
900 sz
+= cc
->key_size
<< 1;
907 DMEMIT(" %llu %s %llu", (unsigned long long)cc
->iv_offset
,
908 cc
->dev
->name
, (unsigned long long)cc
->start
);
914 static void crypt_postsuspend(struct dm_target
*ti
)
916 struct crypt_config
*cc
= ti
->private;
918 set_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
921 static int crypt_preresume(struct dm_target
*ti
)
923 struct crypt_config
*cc
= ti
->private;
925 if (!test_bit(DM_CRYPT_KEY_VALID
, &cc
->flags
)) {
926 DMERR("aborting resume - crypt key is not set.");
933 static void crypt_resume(struct dm_target
*ti
)
935 struct crypt_config
*cc
= ti
->private;
937 clear_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
);
944 static int crypt_message(struct dm_target
*ti
, unsigned argc
, char **argv
)
946 struct crypt_config
*cc
= ti
->private;
951 if (!strnicmp(argv
[0], MESG_STR("key"))) {
952 if (!test_bit(DM_CRYPT_SUSPENDED
, &cc
->flags
)) {
953 DMWARN("not suspended during key manipulation.");
956 if (argc
== 3 && !strnicmp(argv
[1], MESG_STR("set")))
957 return crypt_set_key(cc
, argv
[2]);
958 if (argc
== 2 && !strnicmp(argv
[1], MESG_STR("wipe")))
959 return crypt_wipe_key(cc
);
963 DMWARN("unrecognised message received.");
967 static struct target_type crypt_target
= {
970 .module
= THIS_MODULE
,
974 .status
= crypt_status
,
975 .postsuspend
= crypt_postsuspend
,
976 .preresume
= crypt_preresume
,
977 .resume
= crypt_resume
,
978 .message
= crypt_message
,
981 static int __init
dm_crypt_init(void)
985 _crypt_io_pool
= kmem_cache_create("dm-crypt_io",
986 sizeof(struct crypt_io
),
991 _kcryptd_workqueue
= create_workqueue("kcryptd");
992 if (!_kcryptd_workqueue
) {
994 DMERR("couldn't create kcryptd");
998 r
= dm_register_target(&crypt_target
);
1000 DMERR("register failed %d", r
);
1007 destroy_workqueue(_kcryptd_workqueue
);
1009 kmem_cache_destroy(_crypt_io_pool
);
1013 static void __exit
dm_crypt_exit(void)
1015 int r
= dm_unregister_target(&crypt_target
);
1018 DMERR("unregister failed %d", r
);
1020 destroy_workqueue(_kcryptd_workqueue
);
1021 kmem_cache_destroy(_crypt_io_pool
);
1024 module_init(dm_crypt_init
);
1025 module_exit(dm_crypt_exit
);
1027 MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
1028 MODULE_DESCRIPTION(DM_NAME
" target for transparent encryption / decryption");
1029 MODULE_LICENSE("GPL");