dm crypt: add async request mempool
[deliverable/linux.git] / drivers / md / dm-crypt.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4e4eef64 4 * Copyright (C) 2006-2007 Red Hat, Inc. All rights reserved.
1da177e4
LT
5 *
6 * This file is released under the GPL.
7 */
8
d1806f6a 9#include <linux/err.h>
1da177e4
LT
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/bio.h>
14#include <linux/blkdev.h>
15#include <linux/mempool.h>
16#include <linux/slab.h>
17#include <linux/crypto.h>
18#include <linux/workqueue.h>
3fcfab16 19#include <linux/backing-dev.h>
1da177e4 20#include <asm/atomic.h>
378f058c 21#include <linux/scatterlist.h>
1da177e4 22#include <asm/page.h>
48527fa7 23#include <asm/unaligned.h>
1da177e4
LT
24
25#include "dm.h"
26
72d94861 27#define DM_MSG_PREFIX "crypt"
e48d4bbf 28#define MESG_STR(x) x, sizeof(x)
1da177e4 29
1da177e4
LT
30/*
31 * context holding the current state of a multi-part conversion
32 */
33struct convert_context {
34 struct bio *bio_in;
35 struct bio *bio_out;
36 unsigned int offset_in;
37 unsigned int offset_out;
38 unsigned int idx_in;
39 unsigned int idx_out;
40 sector_t sector;
1da177e4
LT
41};
42
53017030
MB
43/*
44 * per bio private data
45 */
46struct dm_crypt_io {
47 struct dm_target *target;
48 struct bio *base_bio;
49 struct work_struct work;
50
51 struct convert_context ctx;
52
53 atomic_t pending;
54 int error;
0c395b0f 55 sector_t sector;
53017030
MB
56};
57
01482b76
MB
58struct dm_crypt_request {
59 struct scatterlist sg_in;
60 struct scatterlist sg_out;
61};
62
1da177e4
LT
63struct crypt_config;
64
65struct crypt_iv_operations {
66 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
d469f841 67 const char *opts);
1da177e4
LT
68 void (*dtr)(struct crypt_config *cc);
69 const char *(*status)(struct crypt_config *cc);
70 int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
71};
72
73/*
74 * Crypt: maps a linear range of a block device
75 * and encrypts / decrypts at the same time.
76 */
e48d4bbf 77enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
1da177e4
LT
78struct crypt_config {
79 struct dm_dev *dev;
80 sector_t start;
81
82 /*
ddd42edf
MB
83 * pool for per bio private data, crypto requests and
84 * encryption requeusts/buffer pages
1da177e4
LT
85 */
86 mempool_t *io_pool;
ddd42edf 87 mempool_t *req_pool;
1da177e4 88 mempool_t *page_pool;
6a24c718 89 struct bio_set *bs;
1da177e4 90
cabf08e4
MB
91 struct workqueue_struct *io_queue;
92 struct workqueue_struct *crypt_queue;
1da177e4
LT
93 /*
94 * crypto related data
95 */
96 struct crypt_iv_operations *iv_gen_ops;
97 char *iv_mode;
79066ad3
HX
98 union {
99 struct crypto_cipher *essiv_tfm;
100 int benbi_shift;
101 } iv_gen_private;
1da177e4
LT
102 sector_t iv_offset;
103 unsigned int iv_size;
104
ddd42edf
MB
105 /*
106 * Layout of each crypto request:
107 *
108 * struct ablkcipher_request
109 * context
110 * padding
111 * struct dm_crypt_request
112 * padding
113 * IV
114 *
115 * The padding is added so that dm_crypt_request and the IV are
116 * correctly aligned.
117 */
118 unsigned int dmreq_start;
119 struct ablkcipher_request *req;
120
d1806f6a
HX
121 char cipher[CRYPTO_MAX_ALG_NAME];
122 char chainmode[CRYPTO_MAX_ALG_NAME];
123 struct crypto_blkcipher *tfm;
e48d4bbf 124 unsigned long flags;
1da177e4
LT
125 unsigned int key_size;
126 u8 key[0];
127};
128
6a24c718 129#define MIN_IOS 16
1da177e4
LT
130#define MIN_POOL_PAGES 32
131#define MIN_BIO_PAGES 8
132
e18b890b 133static struct kmem_cache *_crypt_io_pool;
1da177e4 134
028867ac 135static void clone_init(struct dm_crypt_io *, struct bio *);
395b167c 136static void kcryptd_queue_crypt(struct dm_crypt_io *io);
027581f3 137
1da177e4
LT
138/*
139 * Different IV generation algorithms:
140 *
3c164bd8 141 * plain: the initial vector is the 32-bit little-endian version of the sector
3a4fa0a2 142 * number, padded with zeros if necessary.
1da177e4 143 *
3c164bd8
RS
144 * essiv: "encrypted sector|salt initial vector", the sector number is
145 * encrypted with the bulk cipher using a salt as key. The salt
146 * should be derived from the bulk cipher's key via hashing.
1da177e4 147 *
48527fa7
RS
148 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
149 * (needed for LRW-32-AES and possible other narrow block modes)
150 *
46b47730
LN
151 * null: the initial vector is always zero. Provides compatibility with
152 * obsolete loop_fish2 devices. Do not use for new devices.
153 *
1da177e4
LT
154 * plumb: unimplemented, see:
155 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
156 */
157
158static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
159{
160 memset(iv, 0, cc->iv_size);
161 *(u32 *)iv = cpu_to_le32(sector & 0xffffffff);
162
163 return 0;
164}
165
166static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
d469f841 167 const char *opts)
1da177e4 168{
d1806f6a 169 struct crypto_cipher *essiv_tfm;
35058687
HX
170 struct crypto_hash *hash_tfm;
171 struct hash_desc desc;
1da177e4
LT
172 struct scatterlist sg;
173 unsigned int saltsize;
174 u8 *salt;
d1806f6a 175 int err;
1da177e4
LT
176
177 if (opts == NULL) {
72d94861 178 ti->error = "Digest algorithm missing for ESSIV mode";
1da177e4
LT
179 return -EINVAL;
180 }
181
182 /* Hash the cipher key with the given hash algorithm */
35058687
HX
183 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
184 if (IS_ERR(hash_tfm)) {
72d94861 185 ti->error = "Error initializing ESSIV hash";
35058687 186 return PTR_ERR(hash_tfm);
1da177e4
LT
187 }
188
35058687 189 saltsize = crypto_hash_digestsize(hash_tfm);
1da177e4
LT
190 salt = kmalloc(saltsize, GFP_KERNEL);
191 if (salt == NULL) {
72d94861 192 ti->error = "Error kmallocing salt storage in ESSIV";
35058687 193 crypto_free_hash(hash_tfm);
1da177e4
LT
194 return -ENOMEM;
195 }
196
68e3f5dd 197 sg_init_one(&sg, cc->key, cc->key_size);
35058687
HX
198 desc.tfm = hash_tfm;
199 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
200 err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
201 crypto_free_hash(hash_tfm);
202
203 if (err) {
204 ti->error = "Error calculating hash in ESSIV";
815f9e32 205 kfree(salt);
35058687
HX
206 return err;
207 }
1da177e4
LT
208
209 /* Setup the essiv_tfm with the given salt */
d1806f6a
HX
210 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
211 if (IS_ERR(essiv_tfm)) {
72d94861 212 ti->error = "Error allocating crypto tfm for ESSIV";
1da177e4 213 kfree(salt);
d1806f6a 214 return PTR_ERR(essiv_tfm);
1da177e4 215 }
d1806f6a
HX
216 if (crypto_cipher_blocksize(essiv_tfm) !=
217 crypto_blkcipher_ivsize(cc->tfm)) {
72d94861 218 ti->error = "Block size of ESSIV cipher does "
d469f841 219 "not match IV size of block cipher";
d1806f6a 220 crypto_free_cipher(essiv_tfm);
1da177e4
LT
221 kfree(salt);
222 return -EINVAL;
223 }
d1806f6a
HX
224 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
225 if (err) {
72d94861 226 ti->error = "Failed to set key for ESSIV cipher";
d1806f6a 227 crypto_free_cipher(essiv_tfm);
1da177e4 228 kfree(salt);
d1806f6a 229 return err;
1da177e4
LT
230 }
231 kfree(salt);
232
79066ad3 233 cc->iv_gen_private.essiv_tfm = essiv_tfm;
1da177e4
LT
234 return 0;
235}
236
237static void crypt_iv_essiv_dtr(struct crypt_config *cc)
238{
79066ad3
HX
239 crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
240 cc->iv_gen_private.essiv_tfm = NULL;
1da177e4
LT
241}
242
243static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
244{
1da177e4
LT
245 memset(iv, 0, cc->iv_size);
246 *(u64 *)iv = cpu_to_le64(sector);
79066ad3 247 crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
1da177e4
LT
248 return 0;
249}
250
48527fa7
RS
251static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
252 const char *opts)
253{
254 unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
f0d1b0b3 255 int log = ilog2(bs);
48527fa7
RS
256
257 /* we need to calculate how far we must shift the sector count
258 * to get the cipher block count, we use this shift in _gen */
259
260 if (1 << log != bs) {
261 ti->error = "cypher blocksize is not a power of 2";
262 return -EINVAL;
263 }
264
265 if (log > 9) {
266 ti->error = "cypher blocksize is > 512";
267 return -EINVAL;
268 }
269
79066ad3 270 cc->iv_gen_private.benbi_shift = 9 - log;
48527fa7
RS
271
272 return 0;
273}
274
275static void crypt_iv_benbi_dtr(struct crypt_config *cc)
276{
48527fa7
RS
277}
278
279static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
280{
79066ad3
HX
281 __be64 val;
282
48527fa7 283 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
79066ad3
HX
284
285 val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
286 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
48527fa7 287
1da177e4
LT
288 return 0;
289}
290
46b47730
LN
291static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
292{
293 memset(iv, 0, cc->iv_size);
294
295 return 0;
296}
297
1da177e4
LT
298static struct crypt_iv_operations crypt_iv_plain_ops = {
299 .generator = crypt_iv_plain_gen
300};
301
302static struct crypt_iv_operations crypt_iv_essiv_ops = {
303 .ctr = crypt_iv_essiv_ctr,
304 .dtr = crypt_iv_essiv_dtr,
305 .generator = crypt_iv_essiv_gen
306};
307
48527fa7
RS
308static struct crypt_iv_operations crypt_iv_benbi_ops = {
309 .ctr = crypt_iv_benbi_ctr,
310 .dtr = crypt_iv_benbi_dtr,
311 .generator = crypt_iv_benbi_gen
312};
1da177e4 313
46b47730
LN
314static struct crypt_iv_operations crypt_iv_null_ops = {
315 .generator = crypt_iv_null_gen
316};
317
858119e1 318static int
1da177e4
LT
319crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
320 struct scatterlist *in, unsigned int length,
321 int write, sector_t sector)
322{
45789328 323 u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
d1806f6a
HX
324 struct blkcipher_desc desc = {
325 .tfm = cc->tfm,
326 .info = iv,
327 .flags = CRYPTO_TFM_REQ_MAY_SLEEP,
328 };
1da177e4
LT
329 int r;
330
331 if (cc->iv_gen_ops) {
332 r = cc->iv_gen_ops->generator(cc, iv, sector);
333 if (r < 0)
334 return r;
335
336 if (write)
d1806f6a 337 r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
1da177e4 338 else
d1806f6a 339 r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
1da177e4
LT
340 } else {
341 if (write)
d1806f6a 342 r = crypto_blkcipher_encrypt(&desc, out, in, length);
1da177e4 343 else
d1806f6a 344 r = crypto_blkcipher_decrypt(&desc, out, in, length);
1da177e4
LT
345 }
346
347 return r;
348}
349
d469f841
MB
350static void crypt_convert_init(struct crypt_config *cc,
351 struct convert_context *ctx,
352 struct bio *bio_out, struct bio *bio_in,
fcd369da 353 sector_t sector)
1da177e4
LT
354{
355 ctx->bio_in = bio_in;
356 ctx->bio_out = bio_out;
357 ctx->offset_in = 0;
358 ctx->offset_out = 0;
359 ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
360 ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
361 ctx->sector = sector + cc->iv_offset;
1da177e4
LT
362}
363
01482b76
MB
364static int crypt_convert_block(struct crypt_config *cc,
365 struct convert_context *ctx)
366{
367 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
368 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
369 struct dm_crypt_request dmreq;
370
371 sg_init_table(&dmreq.sg_in, 1);
372 sg_set_page(&dmreq.sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
373 bv_in->bv_offset + ctx->offset_in);
374
375 sg_init_table(&dmreq.sg_out, 1);
376 sg_set_page(&dmreq.sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
377 bv_out->bv_offset + ctx->offset_out);
378
379 ctx->offset_in += 1 << SECTOR_SHIFT;
380 if (ctx->offset_in >= bv_in->bv_len) {
381 ctx->offset_in = 0;
382 ctx->idx_in++;
383 }
384
385 ctx->offset_out += 1 << SECTOR_SHIFT;
386 if (ctx->offset_out >= bv_out->bv_len) {
387 ctx->offset_out = 0;
388 ctx->idx_out++;
389 }
390
391 return crypt_convert_scatterlist(cc, &dmreq.sg_out, &dmreq.sg_in,
392 dmreq.sg_in.length,
393 bio_data_dir(ctx->bio_in) == WRITE,
394 ctx->sector);
395}
396
ddd42edf
MB
397static void crypt_alloc_req(struct crypt_config *cc,
398 struct convert_context *ctx)
399{
400 if (!cc->req)
401 cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
402}
403
1da177e4
LT
404/*
405 * Encrypt / decrypt data from one bio to another one (can be the same one)
406 */
407static int crypt_convert(struct crypt_config *cc,
d469f841 408 struct convert_context *ctx)
1da177e4
LT
409{
410 int r = 0;
411
412 while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
413 ctx->idx_out < ctx->bio_out->bi_vcnt) {
01482b76 414 r = crypt_convert_block(cc, ctx);
1da177e4
LT
415 if (r < 0)
416 break;
417
418 ctx->sector++;
419 }
420
421 return r;
422}
423
d469f841
MB
424static void dm_crypt_bio_destructor(struct bio *bio)
425{
028867ac 426 struct dm_crypt_io *io = bio->bi_private;
6a24c718
MB
427 struct crypt_config *cc = io->target->private;
428
429 bio_free(bio, cc->bs);
d469f841 430}
6a24c718 431
1da177e4
LT
432/*
433 * Generate a new unfragmented bio with the given size
434 * This should never violate the device limitations
435 * May return a smaller bio when running out of pages
436 */
028867ac 437static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
1da177e4 438{
027581f3 439 struct crypt_config *cc = io->target->private;
8b004457 440 struct bio *clone;
1da177e4 441 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
b4e3ca1a 442 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
91e10625
MB
443 unsigned i, len;
444 struct page *page;
1da177e4 445
2f9941b6 446 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
8b004457 447 if (!clone)
1da177e4 448 return NULL;
1da177e4 449
027581f3 450 clone_init(io, clone);
6a24c718 451
f97380bc 452 for (i = 0; i < nr_iovecs; i++) {
91e10625
MB
453 page = mempool_alloc(cc->page_pool, gfp_mask);
454 if (!page)
1da177e4
LT
455 break;
456
457 /*
458 * if additional pages cannot be allocated without waiting,
459 * return a partially allocated bio, the caller will then try
460 * to allocate additional bios while submitting this partial bio
461 */
f97380bc 462 if (i == (MIN_BIO_PAGES - 1))
1da177e4
LT
463 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
464
91e10625
MB
465 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
466
467 if (!bio_add_page(clone, page, len, 0)) {
468 mempool_free(page, cc->page_pool);
469 break;
470 }
1da177e4 471
91e10625 472 size -= len;
1da177e4
LT
473 }
474
8b004457
MB
475 if (!clone->bi_size) {
476 bio_put(clone);
1da177e4
LT
477 return NULL;
478 }
479
8b004457 480 return clone;
1da177e4
LT
481}
482
644bd2f0 483static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1da177e4 484{
644bd2f0 485 unsigned int i;
1da177e4
LT
486 struct bio_vec *bv;
487
644bd2f0 488 for (i = 0; i < clone->bi_vcnt; i++) {
8b004457 489 bv = bio_iovec_idx(clone, i);
1da177e4
LT
490 BUG_ON(!bv->bv_page);
491 mempool_free(bv->bv_page, cc->page_pool);
492 bv->bv_page = NULL;
493 }
494}
495
496/*
497 * One of the bios was finished. Check for completion of
498 * the whole request and correctly clean up the buffer.
499 */
5742fd77 500static void crypt_dec_pending(struct dm_crypt_io *io)
1da177e4 501{
5742fd77 502 struct crypt_config *cc = io->target->private;
1da177e4
LT
503
504 if (!atomic_dec_and_test(&io->pending))
505 return;
506
6712ecf8 507 bio_endio(io->base_bio, io->error);
1da177e4
LT
508 mempool_free(io, cc->io_pool);
509}
510
511/*
cabf08e4 512 * kcryptd/kcryptd_io:
1da177e4
LT
513 *
514 * Needed because it would be very unwise to do decryption in an
23541d2d 515 * interrupt context.
cabf08e4
MB
516 *
517 * kcryptd performs the actual encryption or decryption.
518 *
519 * kcryptd_io performs the IO submission.
520 *
521 * They must be separated as otherwise the final stages could be
522 * starved by new requests which can block in the first stages due
523 * to memory allocation.
1da177e4 524 */
6712ecf8 525static void crypt_endio(struct bio *clone, int error)
8b004457 526{
028867ac 527 struct dm_crypt_io *io = clone->bi_private;
8b004457 528 struct crypt_config *cc = io->target->private;
ee7a491e 529 unsigned rw = bio_data_dir(clone);
8b004457 530
adfe4770
MB
531 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
532 error = -EIO;
533
8b004457 534 /*
6712ecf8 535 * free the processed pages
8b004457 536 */
ee7a491e 537 if (rw == WRITE)
644bd2f0 538 crypt_free_buffer_pages(cc, clone);
8b004457
MB
539
540 bio_put(clone);
8b004457 541
ee7a491e
MB
542 if (rw == READ && !error) {
543 kcryptd_queue_crypt(io);
544 return;
545 }
5742fd77
MB
546
547 if (unlikely(error))
548 io->error = error;
549
550 crypt_dec_pending(io);
8b004457
MB
551}
552
028867ac 553static void clone_init(struct dm_crypt_io *io, struct bio *clone)
8b004457
MB
554{
555 struct crypt_config *cc = io->target->private;
556
557 clone->bi_private = io;
558 clone->bi_end_io = crypt_endio;
559 clone->bi_bdev = cc->dev->bdev;
560 clone->bi_rw = io->base_bio->bi_rw;
027581f3 561 clone->bi_destructor = dm_crypt_bio_destructor;
8b004457
MB
562}
563
4e4eef64 564static void kcryptd_io_read(struct dm_crypt_io *io)
8b004457
MB
565{
566 struct crypt_config *cc = io->target->private;
567 struct bio *base_bio = io->base_bio;
568 struct bio *clone;
93e605c2
MB
569
570 atomic_inc(&io->pending);
8b004457
MB
571
572 /*
573 * The block layer might modify the bvec array, so always
574 * copy the required bvecs because we need the original
575 * one in order to decrypt the whole bio data *afterwards*.
576 */
6a24c718 577 clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
93e605c2 578 if (unlikely(!clone)) {
5742fd77
MB
579 io->error = -ENOMEM;
580 crypt_dec_pending(io);
23541d2d 581 return;
93e605c2 582 }
8b004457
MB
583
584 clone_init(io, clone);
585 clone->bi_idx = 0;
586 clone->bi_vcnt = bio_segments(base_bio);
587 clone->bi_size = base_bio->bi_size;
0c395b0f 588 clone->bi_sector = cc->start + io->sector;
8b004457
MB
589 memcpy(clone->bi_io_vec, bio_iovec(base_bio),
590 sizeof(struct bio_vec) * clone->bi_vcnt);
8b004457 591
93e605c2 592 generic_make_request(clone);
8b004457
MB
593}
594
4e4eef64
MB
595static void kcryptd_io_write(struct dm_crypt_io *io)
596{
597}
598
395b167c
AK
599static void kcryptd_io(struct work_struct *work)
600{
601 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
602
603 if (bio_data_dir(io->base_bio) == READ)
604 kcryptd_io_read(io);
605 else
606 kcryptd_io_write(io);
607}
608
609static void kcryptd_queue_io(struct dm_crypt_io *io)
610{
611 struct crypt_config *cc = io->target->private;
612
613 INIT_WORK(&io->work, kcryptd_io);
614 queue_work(cc->io_queue, &io->work);
615}
616
4e4eef64
MB
617static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int error)
618{
dec1cedf
MB
619 struct bio *clone = io->ctx.bio_out;
620 struct crypt_config *cc = io->target->private;
621
622 if (unlikely(error < 0)) {
623 crypt_free_buffer_pages(cc, clone);
624 bio_put(clone);
625 io->error = -EIO;
dec1cedf
MB
626 return;
627 }
628
629 /* crypt_convert should have filled the clone bio */
630 BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
631
632 clone->bi_sector = cc->start + io->sector;
633 io->sector += bio_sectors(clone);
899c95d3
MB
634
635 atomic_inc(&io->pending);
636 generic_make_request(clone);
4e4eef64
MB
637}
638
84131db6 639static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
8b004457
MB
640{
641 struct crypt_config *cc = io->target->private;
8b004457 642 struct bio *clone;
dec1cedf
MB
643 unsigned remaining = io->base_bio->bi_size;
644 int r;
8b004457 645
93e605c2
MB
646 /*
647 * The allocated buffers can be smaller than the whole bio,
648 * so repeat the whole process until all the data can be handled.
649 */
650 while (remaining) {
f97380bc 651 clone = crypt_alloc_buffer(io, remaining);
23541d2d 652 if (unlikely(!clone)) {
5742fd77 653 io->error = -ENOMEM;
23541d2d
MB
654 return;
655 }
93e605c2 656
53017030
MB
657 io->ctx.bio_out = clone;
658 io->ctx.idx_out = 0;
93e605c2 659
dec1cedf 660 remaining -= clone->bi_size;
93e605c2 661
dec1cedf 662 r = crypt_convert(cc, &io->ctx);
f97380bc 663
dec1cedf
MB
664 kcryptd_crypt_write_io_submit(io, r);
665 if (unlikely(r < 0))
666 return;
93e605c2 667
93e605c2 668 /* out of memory -> run queues */
dec1cedf 669 if (unlikely(remaining))
98221eb7 670 congestion_wait(WRITE, HZ/100);
93e605c2 671 }
8b004457
MB
672}
673
84131db6
MB
674static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
675{
676 struct crypt_config *cc = io->target->private;
677
899c95d3
MB
678 /*
679 * Prevent io from disappearing until this function completes.
680 */
84131db6
MB
681 atomic_inc(&io->pending);
682
683 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
684 kcryptd_crypt_write_convert_loop(io);
899c95d3
MB
685
686 crypt_dec_pending(io);
84131db6
MB
687}
688
4e4eef64 689static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
5742fd77
MB
690{
691 if (unlikely(error < 0))
692 io->error = -EIO;
693
694 crypt_dec_pending(io);
695}
696
4e4eef64 697static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
8b004457
MB
698{
699 struct crypt_config *cc = io->target->private;
5742fd77 700 int r = 0;
1da177e4 701
53017030 702 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
0c395b0f 703 io->sector);
1da177e4 704
5742fd77
MB
705 r = crypt_convert(cc, &io->ctx);
706
4e4eef64 707 kcryptd_crypt_read_done(io, r);
1da177e4
LT
708}
709
395b167c 710static void kcryptd_crypt(struct work_struct *work)
1da177e4 711{
028867ac 712 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
8b004457 713
cabf08e4 714 if (bio_data_dir(io->base_bio) == READ)
395b167c 715 kcryptd_crypt_read_convert(io);
4e4eef64 716 else
395b167c 717 kcryptd_crypt_write_convert(io);
cabf08e4
MB
718}
719
395b167c 720static void kcryptd_queue_crypt(struct dm_crypt_io *io)
cabf08e4 721{
395b167c 722 struct crypt_config *cc = io->target->private;
cabf08e4 723
395b167c
AK
724 INIT_WORK(&io->work, kcryptd_crypt);
725 queue_work(cc->crypt_queue, &io->work);
1da177e4
LT
726}
727
728/*
729 * Decode key from its hex representation
730 */
731static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
732{
733 char buffer[3];
734 char *endp;
735 unsigned int i;
736
737 buffer[2] = '\0';
738
8b004457 739 for (i = 0; i < size; i++) {
1da177e4
LT
740 buffer[0] = *hex++;
741 buffer[1] = *hex++;
742
743 key[i] = (u8)simple_strtoul(buffer, &endp, 16);
744
745 if (endp != &buffer[2])
746 return -EINVAL;
747 }
748
749 if (*hex != '\0')
750 return -EINVAL;
751
752 return 0;
753}
754
755/*
756 * Encode key into its hex representation
757 */
758static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
759{
760 unsigned int i;
761
8b004457 762 for (i = 0; i < size; i++) {
1da177e4
LT
763 sprintf(hex, "%02x", *key);
764 hex += 2;
765 key++;
766 }
767}
768
e48d4bbf
MB
769static int crypt_set_key(struct crypt_config *cc, char *key)
770{
771 unsigned key_size = strlen(key) >> 1;
772
773 if (cc->key_size && cc->key_size != key_size)
774 return -EINVAL;
775
776 cc->key_size = key_size; /* initial settings */
777
778 if ((!key_size && strcmp(key, "-")) ||
d469f841 779 (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
e48d4bbf
MB
780 return -EINVAL;
781
782 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
783
784 return 0;
785}
786
787static int crypt_wipe_key(struct crypt_config *cc)
788{
789 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
790 memset(&cc->key, 0, cc->key_size * sizeof(u8));
791 return 0;
792}
793
1da177e4
LT
794/*
795 * Construct an encryption mapping:
796 * <cipher> <key> <iv_offset> <dev_path> <start>
797 */
798static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
799{
800 struct crypt_config *cc;
d1806f6a 801 struct crypto_blkcipher *tfm;
1da177e4
LT
802 char *tmp;
803 char *cipher;
804 char *chainmode;
805 char *ivmode;
806 char *ivopts;
1da177e4 807 unsigned int key_size;
4ee218cd 808 unsigned long long tmpll;
1da177e4
LT
809
810 if (argc != 5) {
72d94861 811 ti->error = "Not enough arguments";
1da177e4
LT
812 return -EINVAL;
813 }
814
815 tmp = argv[0];
816 cipher = strsep(&tmp, "-");
817 chainmode = strsep(&tmp, "-");
818 ivopts = strsep(&tmp, "-");
819 ivmode = strsep(&ivopts, ":");
820
821 if (tmp)
72d94861 822 DMWARN("Unexpected additional cipher options");
1da177e4
LT
823
824 key_size = strlen(argv[1]) >> 1;
825
e48d4bbf 826 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1da177e4
LT
827 if (cc == NULL) {
828 ti->error =
72d94861 829 "Cannot allocate transparent encryption context";
1da177e4
LT
830 return -ENOMEM;
831 }
832
e48d4bbf 833 if (crypt_set_key(cc, argv[1])) {
72d94861 834 ti->error = "Error decoding key";
636d5786 835 goto bad_cipher;
1da177e4
LT
836 }
837
838 /* Compatiblity mode for old dm-crypt cipher strings */
839 if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
840 chainmode = "cbc";
841 ivmode = "plain";
842 }
843
d1806f6a
HX
844 if (strcmp(chainmode, "ecb") && !ivmode) {
845 ti->error = "This chaining mode requires an IV mechanism";
636d5786 846 goto bad_cipher;
1da177e4
LT
847 }
848
d469f841
MB
849 if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)",
850 chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) {
d1806f6a 851 ti->error = "Chain mode + cipher name is too long";
636d5786 852 goto bad_cipher;
1da177e4
LT
853 }
854
d1806f6a
HX
855 tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
856 if (IS_ERR(tfm)) {
72d94861 857 ti->error = "Error allocating crypto tfm";
636d5786 858 goto bad_cipher;
1da177e4 859 }
1da177e4 860
d1806f6a
HX
861 strcpy(cc->cipher, cipher);
862 strcpy(cc->chainmode, chainmode);
1da177e4
LT
863 cc->tfm = tfm;
864
865 /*
48527fa7 866 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
1da177e4
LT
867 * See comments at iv code
868 */
869
870 if (ivmode == NULL)
871 cc->iv_gen_ops = NULL;
872 else if (strcmp(ivmode, "plain") == 0)
873 cc->iv_gen_ops = &crypt_iv_plain_ops;
874 else if (strcmp(ivmode, "essiv") == 0)
875 cc->iv_gen_ops = &crypt_iv_essiv_ops;
48527fa7
RS
876 else if (strcmp(ivmode, "benbi") == 0)
877 cc->iv_gen_ops = &crypt_iv_benbi_ops;
46b47730
LN
878 else if (strcmp(ivmode, "null") == 0)
879 cc->iv_gen_ops = &crypt_iv_null_ops;
1da177e4 880 else {
72d94861 881 ti->error = "Invalid IV mode";
636d5786 882 goto bad_ivmode;
1da177e4
LT
883 }
884
885 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
886 cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
636d5786 887 goto bad_ivmode;
1da177e4 888
d1806f6a
HX
889 cc->iv_size = crypto_blkcipher_ivsize(tfm);
890 if (cc->iv_size)
1da177e4 891 /* at least a 64 bit sector number should fit in our buffer */
d1806f6a 892 cc->iv_size = max(cc->iv_size,
d469f841 893 (unsigned int)(sizeof(u64) / sizeof(u8)));
1da177e4 894 else {
1da177e4 895 if (cc->iv_gen_ops) {
72d94861 896 DMWARN("Selected cipher does not support IVs");
1da177e4
LT
897 if (cc->iv_gen_ops->dtr)
898 cc->iv_gen_ops->dtr(cc);
899 cc->iv_gen_ops = NULL;
900 }
901 }
902
93d2341c 903 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
1da177e4 904 if (!cc->io_pool) {
72d94861 905 ti->error = "Cannot allocate crypt io mempool";
636d5786 906 goto bad_slab_pool;
1da177e4
LT
907 }
908
ddd42edf
MB
909 cc->dmreq_start = sizeof(struct ablkcipher_request);
910 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
911
912 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
913 sizeof(struct dm_crypt_request) + cc->iv_size);
914 if (!cc->req_pool) {
915 ti->error = "Cannot allocate crypt request mempool";
916 goto bad_req_pool;
917 }
918 cc->req = NULL;
919
a19b27ce 920 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
1da177e4 921 if (!cc->page_pool) {
72d94861 922 ti->error = "Cannot allocate page mempool";
636d5786 923 goto bad_page_pool;
1da177e4
LT
924 }
925
5972511b 926 cc->bs = bioset_create(MIN_IOS, MIN_IOS);
6a24c718
MB
927 if (!cc->bs) {
928 ti->error = "Cannot allocate crypt bioset";
929 goto bad_bs;
930 }
931
d1806f6a 932 if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
72d94861 933 ti->error = "Error setting key";
636d5786 934 goto bad_device;
1da177e4
LT
935 }
936
4ee218cd 937 if (sscanf(argv[2], "%llu", &tmpll) != 1) {
72d94861 938 ti->error = "Invalid iv_offset sector";
636d5786 939 goto bad_device;
1da177e4 940 }
4ee218cd 941 cc->iv_offset = tmpll;
1da177e4 942
4ee218cd 943 if (sscanf(argv[4], "%llu", &tmpll) != 1) {
72d94861 944 ti->error = "Invalid device sector";
636d5786 945 goto bad_device;
1da177e4 946 }
4ee218cd 947 cc->start = tmpll;
1da177e4
LT
948
949 if (dm_get_device(ti, argv[3], cc->start, ti->len,
d469f841 950 dm_table_get_mode(ti->table), &cc->dev)) {
72d94861 951 ti->error = "Device lookup failed";
636d5786 952 goto bad_device;
1da177e4
LT
953 }
954
955 if (ivmode && cc->iv_gen_ops) {
956 if (ivopts)
957 *(ivopts - 1) = ':';
958 cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
959 if (!cc->iv_mode) {
72d94861 960 ti->error = "Error kmallocing iv_mode string";
636d5786 961 goto bad_ivmode_string;
1da177e4
LT
962 }
963 strcpy(cc->iv_mode, ivmode);
964 } else
965 cc->iv_mode = NULL;
966
cabf08e4
MB
967 cc->io_queue = create_singlethread_workqueue("kcryptd_io");
968 if (!cc->io_queue) {
969 ti->error = "Couldn't create kcryptd io queue";
970 goto bad_io_queue;
971 }
972
973 cc->crypt_queue = create_singlethread_workqueue("kcryptd");
974 if (!cc->crypt_queue) {
9934a8be 975 ti->error = "Couldn't create kcryptd queue";
cabf08e4 976 goto bad_crypt_queue;
9934a8be
MB
977 }
978
1da177e4
LT
979 ti->private = cc;
980 return 0;
981
cabf08e4
MB
982bad_crypt_queue:
983 destroy_workqueue(cc->io_queue);
984bad_io_queue:
9934a8be 985 kfree(cc->iv_mode);
636d5786 986bad_ivmode_string:
55b42c5a 987 dm_put_device(ti, cc->dev);
636d5786 988bad_device:
6a24c718
MB
989 bioset_free(cc->bs);
990bad_bs:
1da177e4 991 mempool_destroy(cc->page_pool);
636d5786 992bad_page_pool:
ddd42edf
MB
993 mempool_destroy(cc->req_pool);
994bad_req_pool:
1da177e4 995 mempool_destroy(cc->io_pool);
636d5786 996bad_slab_pool:
1da177e4
LT
997 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
998 cc->iv_gen_ops->dtr(cc);
636d5786 999bad_ivmode:
d1806f6a 1000 crypto_free_blkcipher(tfm);
636d5786 1001bad_cipher:
9d3520a3
SR
1002 /* Must zero key material before freeing */
1003 memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
1da177e4
LT
1004 kfree(cc);
1005 return -EINVAL;
1006}
1007
1008static void crypt_dtr(struct dm_target *ti)
1009{
1010 struct crypt_config *cc = (struct crypt_config *) ti->private;
1011
cabf08e4
MB
1012 destroy_workqueue(cc->io_queue);
1013 destroy_workqueue(cc->crypt_queue);
80b16c19 1014
ddd42edf
MB
1015 if (cc->req)
1016 mempool_free(cc->req, cc->req_pool);
1017
6a24c718 1018 bioset_free(cc->bs);
1da177e4 1019 mempool_destroy(cc->page_pool);
ddd42edf 1020 mempool_destroy(cc->req_pool);
1da177e4
LT
1021 mempool_destroy(cc->io_pool);
1022
990a8baf 1023 kfree(cc->iv_mode);
1da177e4
LT
1024 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1025 cc->iv_gen_ops->dtr(cc);
d1806f6a 1026 crypto_free_blkcipher(cc->tfm);
1da177e4 1027 dm_put_device(ti, cc->dev);
9d3520a3
SR
1028
1029 /* Must zero key material before freeing */
1030 memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
1da177e4
LT
1031 kfree(cc);
1032}
1033
1da177e4
LT
1034static int crypt_map(struct dm_target *ti, struct bio *bio,
1035 union map_info *map_context)
1036{
8b004457 1037 struct crypt_config *cc = ti->private;
028867ac 1038 struct dm_crypt_io *io;
1da177e4 1039
e48d4bbf 1040 io = mempool_alloc(cc->io_pool, GFP_NOIO);
1da177e4 1041 io->target = ti;
8b004457 1042 io->base_bio = bio;
0c395b0f 1043 io->sector = bio->bi_sector - ti->begin;
cabf08e4 1044 io->error = 0;
93e605c2 1045 atomic_set(&io->pending, 0);
cabf08e4
MB
1046
1047 if (bio_data_dir(io->base_bio) == READ)
1048 kcryptd_queue_io(io);
1049 else
1050 kcryptd_queue_crypt(io);
1da177e4 1051
d2a7ad29 1052 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1053}
1054
1055static int crypt_status(struct dm_target *ti, status_type_t type,
1056 char *result, unsigned int maxlen)
1057{
1058 struct crypt_config *cc = (struct crypt_config *) ti->private;
1da177e4
LT
1059 unsigned int sz = 0;
1060
1061 switch (type) {
1062 case STATUSTYPE_INFO:
1063 result[0] = '\0';
1064 break;
1065
1066 case STATUSTYPE_TABLE:
1da177e4 1067 if (cc->iv_mode)
37af6560
CS
1068 DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
1069 cc->iv_mode);
1da177e4 1070 else
37af6560 1071 DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
1da177e4
LT
1072
1073 if (cc->key_size > 0) {
1074 if ((maxlen - sz) < ((cc->key_size << 1) + 1))
1075 return -ENOMEM;
1076
1077 crypt_encode_key(result + sz, cc->key, cc->key_size);
1078 sz += cc->key_size << 1;
1079 } else {
1080 if (sz >= maxlen)
1081 return -ENOMEM;
1082 result[sz++] = '-';
1083 }
1084
4ee218cd
AM
1085 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1086 cc->dev->name, (unsigned long long)cc->start);
1da177e4
LT
1087 break;
1088 }
1089 return 0;
1090}
1091
e48d4bbf
MB
1092static void crypt_postsuspend(struct dm_target *ti)
1093{
1094 struct crypt_config *cc = ti->private;
1095
1096 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1097}
1098
1099static int crypt_preresume(struct dm_target *ti)
1100{
1101 struct crypt_config *cc = ti->private;
1102
1103 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1104 DMERR("aborting resume - crypt key is not set.");
1105 return -EAGAIN;
1106 }
1107
1108 return 0;
1109}
1110
1111static void crypt_resume(struct dm_target *ti)
1112{
1113 struct crypt_config *cc = ti->private;
1114
1115 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1116}
1117
1118/* Message interface
1119 * key set <key>
1120 * key wipe
1121 */
1122static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1123{
1124 struct crypt_config *cc = ti->private;
1125
1126 if (argc < 2)
1127 goto error;
1128
1129 if (!strnicmp(argv[0], MESG_STR("key"))) {
1130 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1131 DMWARN("not suspended during key manipulation.");
1132 return -EINVAL;
1133 }
1134 if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
1135 return crypt_set_key(cc, argv[2]);
1136 if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
1137 return crypt_wipe_key(cc);
1138 }
1139
1140error:
1141 DMWARN("unrecognised message received.");
1142 return -EINVAL;
1143}
1144
1da177e4
LT
1145static struct target_type crypt_target = {
1146 .name = "crypt",
46b47730 1147 .version= {1, 5, 0},
1da177e4
LT
1148 .module = THIS_MODULE,
1149 .ctr = crypt_ctr,
1150 .dtr = crypt_dtr,
1151 .map = crypt_map,
1152 .status = crypt_status,
e48d4bbf
MB
1153 .postsuspend = crypt_postsuspend,
1154 .preresume = crypt_preresume,
1155 .resume = crypt_resume,
1156 .message = crypt_message,
1da177e4
LT
1157};
1158
1159static int __init dm_crypt_init(void)
1160{
1161 int r;
1162
028867ac 1163 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
1da177e4
LT
1164 if (!_crypt_io_pool)
1165 return -ENOMEM;
1166
1da177e4
LT
1167 r = dm_register_target(&crypt_target);
1168 if (r < 0) {
72d94861 1169 DMERR("register failed %d", r);
9934a8be 1170 kmem_cache_destroy(_crypt_io_pool);
1da177e4
LT
1171 }
1172
1da177e4
LT
1173 return r;
1174}
1175
1176static void __exit dm_crypt_exit(void)
1177{
1178 int r = dm_unregister_target(&crypt_target);
1179
1180 if (r < 0)
72d94861 1181 DMERR("unregister failed %d", r);
1da177e4 1182
1da177e4
LT
1183 kmem_cache_destroy(_crypt_io_pool);
1184}
1185
1186module_init(dm_crypt_init);
1187module_exit(dm_crypt_exit);
1188
1189MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
1190MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
1191MODULE_LICENSE("GPL");
This page took 0.394248 seconds and 5 git commands to generate.