50ae6ef83738b39ed34370d3bb59405cc55f87bb
[deliverable/linux.git] / drivers / md / dm-crypt.c
1 /*
2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
5 *
6 * This file is released under the GPL.
7 */
8
9 #include <linux/completion.h>
10 #include <linux/err.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/bio.h>
15 #include <linux/blkdev.h>
16 #include <linux/mempool.h>
17 #include <linux/slab.h>
18 #include <linux/crypto.h>
19 #include <linux/workqueue.h>
20 #include <linux/backing-dev.h>
21 #include <linux/percpu.h>
22 #include <asm/atomic.h>
23 #include <linux/scatterlist.h>
24 #include <asm/page.h>
25 #include <asm/unaligned.h>
26
27 #include <linux/device-mapper.h>
28
29 #define DM_MSG_PREFIX "crypt"
30 #define MESG_STR(x) x, sizeof(x)
31
32 /*
33 * context holding the current state of a multi-part conversion
34 */
35 struct convert_context {
36 struct completion restart;
37 struct bio *bio_in;
38 struct bio *bio_out;
39 unsigned int offset_in;
40 unsigned int offset_out;
41 unsigned int idx_in;
42 unsigned int idx_out;
43 sector_t sector;
44 atomic_t pending;
45 };
46
47 /*
48 * per bio private data
49 */
50 struct dm_crypt_io {
51 struct dm_target *target;
52 struct bio *base_bio;
53 struct work_struct work;
54
55 struct convert_context ctx;
56
57 atomic_t pending;
58 int error;
59 sector_t sector;
60 struct dm_crypt_io *base_io;
61 };
62
63 struct dm_crypt_request {
64 struct convert_context *ctx;
65 struct scatterlist sg_in;
66 struct scatterlist sg_out;
67 };
68
69 struct crypt_config;
70
71 struct crypt_iv_operations {
72 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
73 const char *opts);
74 void (*dtr)(struct crypt_config *cc);
75 int (*init)(struct crypt_config *cc);
76 int (*wipe)(struct crypt_config *cc);
77 int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
78 };
79
80 struct iv_essiv_private {
81 struct crypto_hash *hash_tfm;
82 u8 *salt;
83 };
84
85 struct iv_benbi_private {
86 int shift;
87 };
88
89 /*
90 * Crypt: maps a linear range of a block device
91 * and encrypts / decrypts at the same time.
92 */
93 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
94
95 /*
96 * Duplicated per-CPU state for cipher.
97 */
98 struct crypt_cpu {
99 struct ablkcipher_request *req;
100 struct crypto_ablkcipher *tfm;
101
102 /* ESSIV: struct crypto_cipher *essiv_tfm */
103 void *iv_private;
104 };
105
106 /*
107 * The fields in here must be read only after initialization,
108 * changing state should be in crypt_cpu.
109 */
110 struct crypt_config {
111 struct dm_dev *dev;
112 sector_t start;
113
114 /*
115 * pool for per bio private data, crypto requests and
116 * encryption requeusts/buffer pages
117 */
118 mempool_t *io_pool;
119 mempool_t *req_pool;
120 mempool_t *page_pool;
121 struct bio_set *bs;
122
123 struct workqueue_struct *io_queue;
124 struct workqueue_struct *crypt_queue;
125
126 char *cipher;
127 char *cipher_string;
128
129 struct crypt_iv_operations *iv_gen_ops;
130 union {
131 struct iv_essiv_private essiv;
132 struct iv_benbi_private benbi;
133 } iv_gen_private;
134 sector_t iv_offset;
135 unsigned int iv_size;
136
137 /*
138 * Duplicated per cpu state. Access through
139 * per_cpu_ptr() only.
140 */
141 struct crypt_cpu __percpu *cpu;
142
143 /*
144 * Layout of each crypto request:
145 *
146 * struct ablkcipher_request
147 * context
148 * padding
149 * struct dm_crypt_request
150 * padding
151 * IV
152 *
153 * The padding is added so that dm_crypt_request and the IV are
154 * correctly aligned.
155 */
156 unsigned int dmreq_start;
157
158 unsigned long flags;
159 unsigned int key_size;
160 u8 key[0];
161 };
162
163 #define MIN_IOS 16
164 #define MIN_POOL_PAGES 32
165 #define MIN_BIO_PAGES 8
166
167 static struct kmem_cache *_crypt_io_pool;
168
169 static void clone_init(struct dm_crypt_io *, struct bio *);
170 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
171
172 static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
173 {
174 return this_cpu_ptr(cc->cpu);
175 }
176
177 /*
178 * Use this to access cipher attributes that are the same for each CPU.
179 */
180 static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
181 {
182 return __this_cpu_ptr(cc->cpu)->tfm;
183 }
184
185 /*
186 * Different IV generation algorithms:
187 *
188 * plain: the initial vector is the 32-bit little-endian version of the sector
189 * number, padded with zeros if necessary.
190 *
191 * plain64: the initial vector is the 64-bit little-endian version of the sector
192 * number, padded with zeros if necessary.
193 *
194 * essiv: "encrypted sector|salt initial vector", the sector number is
195 * encrypted with the bulk cipher using a salt as key. The salt
196 * should be derived from the bulk cipher's key via hashing.
197 *
198 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
199 * (needed for LRW-32-AES and possible other narrow block modes)
200 *
201 * null: the initial vector is always zero. Provides compatibility with
202 * obsolete loop_fish2 devices. Do not use for new devices.
203 *
204 * plumb: unimplemented, see:
205 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
206 */
207
208 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
209 {
210 memset(iv, 0, cc->iv_size);
211 *(u32 *)iv = cpu_to_le32(sector & 0xffffffff);
212
213 return 0;
214 }
215
216 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
217 sector_t sector)
218 {
219 memset(iv, 0, cc->iv_size);
220 *(u64 *)iv = cpu_to_le64(sector);
221
222 return 0;
223 }
224
225 /* Initialise ESSIV - compute salt but no local memory allocations */
226 static int crypt_iv_essiv_init(struct crypt_config *cc)
227 {
228 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
229 struct hash_desc desc;
230 struct scatterlist sg;
231 struct crypto_cipher *essiv_tfm;
232 int err, cpu;
233
234 sg_init_one(&sg, cc->key, cc->key_size);
235 desc.tfm = essiv->hash_tfm;
236 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
237
238 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
239 if (err)
240 return err;
241
242 for_each_possible_cpu(cpu) {
243 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private,
244
245 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
246 crypto_hash_digestsize(essiv->hash_tfm));
247 if (err)
248 return err;
249 }
250
251 return 0;
252 }
253
254 /* Wipe salt and reset key derived from volume key */
255 static int crypt_iv_essiv_wipe(struct crypt_config *cc)
256 {
257 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
258 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
259 struct crypto_cipher *essiv_tfm;
260 int cpu, r, err = 0;
261
262 memset(essiv->salt, 0, salt_size);
263
264 for_each_possible_cpu(cpu) {
265 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private;
266 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
267 if (r)
268 err = r;
269 }
270
271 return err;
272 }
273
274 /* Set up per cpu cipher state */
275 static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
276 struct dm_target *ti,
277 u8 *salt, unsigned saltsize)
278 {
279 struct crypto_cipher *essiv_tfm;
280 int err;
281
282 /* Setup the essiv_tfm with the given salt */
283 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
284 if (IS_ERR(essiv_tfm)) {
285 ti->error = "Error allocating crypto tfm for ESSIV";
286 return essiv_tfm;
287 }
288
289 if (crypto_cipher_blocksize(essiv_tfm) !=
290 crypto_ablkcipher_ivsize(any_tfm(cc))) {
291 ti->error = "Block size of ESSIV cipher does "
292 "not match IV size of block cipher";
293 crypto_free_cipher(essiv_tfm);
294 return ERR_PTR(-EINVAL);
295 }
296
297 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
298 if (err) {
299 ti->error = "Failed to set key for ESSIV cipher";
300 crypto_free_cipher(essiv_tfm);
301 return ERR_PTR(err);
302 }
303
304 return essiv_tfm;
305 }
306
307 static void crypt_iv_essiv_dtr(struct crypt_config *cc)
308 {
309 int cpu;
310 struct crypt_cpu *cpu_cc;
311 struct crypto_cipher *essiv_tfm;
312 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
313
314 crypto_free_hash(essiv->hash_tfm);
315 essiv->hash_tfm = NULL;
316
317 kzfree(essiv->salt);
318 essiv->salt = NULL;
319
320 for_each_possible_cpu(cpu) {
321 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
322 essiv_tfm = cpu_cc->iv_private;
323
324 if (essiv_tfm)
325 crypto_free_cipher(essiv_tfm);
326
327 cpu_cc->iv_private = NULL;
328 }
329 }
330
331 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
332 const char *opts)
333 {
334 struct crypto_cipher *essiv_tfm = NULL;
335 struct crypto_hash *hash_tfm = NULL;
336 u8 *salt = NULL;
337 int err, cpu;
338
339 if (!opts) {
340 ti->error = "Digest algorithm missing for ESSIV mode";
341 return -EINVAL;
342 }
343
344 /* Allocate hash algorithm */
345 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
346 if (IS_ERR(hash_tfm)) {
347 ti->error = "Error initializing ESSIV hash";
348 err = PTR_ERR(hash_tfm);
349 goto bad;
350 }
351
352 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
353 if (!salt) {
354 ti->error = "Error kmallocing salt storage in ESSIV";
355 err = -ENOMEM;
356 goto bad;
357 }
358
359 cc->iv_gen_private.essiv.salt = salt;
360 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
361
362 for_each_possible_cpu(cpu) {
363 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
364 crypto_hash_digestsize(hash_tfm));
365 if (IS_ERR(essiv_tfm)) {
366 crypt_iv_essiv_dtr(cc);
367 return PTR_ERR(essiv_tfm);
368 }
369 per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm;
370 }
371
372 return 0;
373
374 bad:
375 if (hash_tfm && !IS_ERR(hash_tfm))
376 crypto_free_hash(hash_tfm);
377 kfree(salt);
378 return err;
379 }
380
381 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
382 {
383 struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
384
385 memset(iv, 0, cc->iv_size);
386 *(u64 *)iv = cpu_to_le64(sector);
387 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
388
389 return 0;
390 }
391
392 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
393 const char *opts)
394 {
395 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
396 int log = ilog2(bs);
397
398 /* we need to calculate how far we must shift the sector count
399 * to get the cipher block count, we use this shift in _gen */
400
401 if (1 << log != bs) {
402 ti->error = "cypher blocksize is not a power of 2";
403 return -EINVAL;
404 }
405
406 if (log > 9) {
407 ti->error = "cypher blocksize is > 512";
408 return -EINVAL;
409 }
410
411 cc->iv_gen_private.benbi.shift = 9 - log;
412
413 return 0;
414 }
415
416 static void crypt_iv_benbi_dtr(struct crypt_config *cc)
417 {
418 }
419
420 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
421 {
422 __be64 val;
423
424 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
425
426 val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1);
427 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
428
429 return 0;
430 }
431
432 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
433 {
434 memset(iv, 0, cc->iv_size);
435
436 return 0;
437 }
438
439 static struct crypt_iv_operations crypt_iv_plain_ops = {
440 .generator = crypt_iv_plain_gen
441 };
442
443 static struct crypt_iv_operations crypt_iv_plain64_ops = {
444 .generator = crypt_iv_plain64_gen
445 };
446
447 static struct crypt_iv_operations crypt_iv_essiv_ops = {
448 .ctr = crypt_iv_essiv_ctr,
449 .dtr = crypt_iv_essiv_dtr,
450 .init = crypt_iv_essiv_init,
451 .wipe = crypt_iv_essiv_wipe,
452 .generator = crypt_iv_essiv_gen
453 };
454
455 static struct crypt_iv_operations crypt_iv_benbi_ops = {
456 .ctr = crypt_iv_benbi_ctr,
457 .dtr = crypt_iv_benbi_dtr,
458 .generator = crypt_iv_benbi_gen
459 };
460
461 static struct crypt_iv_operations crypt_iv_null_ops = {
462 .generator = crypt_iv_null_gen
463 };
464
465 static void crypt_convert_init(struct crypt_config *cc,
466 struct convert_context *ctx,
467 struct bio *bio_out, struct bio *bio_in,
468 sector_t sector)
469 {
470 ctx->bio_in = bio_in;
471 ctx->bio_out = bio_out;
472 ctx->offset_in = 0;
473 ctx->offset_out = 0;
474 ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
475 ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
476 ctx->sector = sector + cc->iv_offset;
477 init_completion(&ctx->restart);
478 }
479
480 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
481 struct ablkcipher_request *req)
482 {
483 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
484 }
485
486 static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
487 struct dm_crypt_request *dmreq)
488 {
489 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
490 }
491
492 static int crypt_convert_block(struct crypt_config *cc,
493 struct convert_context *ctx,
494 struct ablkcipher_request *req)
495 {
496 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
497 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
498 struct dm_crypt_request *dmreq;
499 u8 *iv;
500 int r = 0;
501
502 dmreq = dmreq_of_req(cc, req);
503 iv = (u8 *)ALIGN((unsigned long)(dmreq + 1),
504 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
505
506 dmreq->ctx = ctx;
507 sg_init_table(&dmreq->sg_in, 1);
508 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
509 bv_in->bv_offset + ctx->offset_in);
510
511 sg_init_table(&dmreq->sg_out, 1);
512 sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
513 bv_out->bv_offset + ctx->offset_out);
514
515 ctx->offset_in += 1 << SECTOR_SHIFT;
516 if (ctx->offset_in >= bv_in->bv_len) {
517 ctx->offset_in = 0;
518 ctx->idx_in++;
519 }
520
521 ctx->offset_out += 1 << SECTOR_SHIFT;
522 if (ctx->offset_out >= bv_out->bv_len) {
523 ctx->offset_out = 0;
524 ctx->idx_out++;
525 }
526
527 if (cc->iv_gen_ops) {
528 r = cc->iv_gen_ops->generator(cc, iv, ctx->sector);
529 if (r < 0)
530 return r;
531 }
532
533 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
534 1 << SECTOR_SHIFT, iv);
535
536 if (bio_data_dir(ctx->bio_in) == WRITE)
537 r = crypto_ablkcipher_encrypt(req);
538 else
539 r = crypto_ablkcipher_decrypt(req);
540
541 return r;
542 }
543
544 static void kcryptd_async_done(struct crypto_async_request *async_req,
545 int error);
546
547 static void crypt_alloc_req(struct crypt_config *cc,
548 struct convert_context *ctx)
549 {
550 struct crypt_cpu *this_cc = this_crypt_config(cc);
551
552 if (!this_cc->req)
553 this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
554
555 ablkcipher_request_set_tfm(this_cc->req, this_cc->tfm);
556 ablkcipher_request_set_callback(this_cc->req,
557 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
558 kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
559 }
560
561 /*
562 * Encrypt / decrypt data from one bio to another one (can be the same one)
563 */
564 static int crypt_convert(struct crypt_config *cc,
565 struct convert_context *ctx)
566 {
567 struct crypt_cpu *this_cc = this_crypt_config(cc);
568 int r;
569
570 atomic_set(&ctx->pending, 1);
571
572 while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
573 ctx->idx_out < ctx->bio_out->bi_vcnt) {
574
575 crypt_alloc_req(cc, ctx);
576
577 atomic_inc(&ctx->pending);
578
579 r = crypt_convert_block(cc, ctx, this_cc->req);
580
581 switch (r) {
582 /* async */
583 case -EBUSY:
584 wait_for_completion(&ctx->restart);
585 INIT_COMPLETION(ctx->restart);
586 /* fall through*/
587 case -EINPROGRESS:
588 this_cc->req = NULL;
589 ctx->sector++;
590 continue;
591
592 /* sync */
593 case 0:
594 atomic_dec(&ctx->pending);
595 ctx->sector++;
596 cond_resched();
597 continue;
598
599 /* error */
600 default:
601 atomic_dec(&ctx->pending);
602 return r;
603 }
604 }
605
606 return 0;
607 }
608
609 static void dm_crypt_bio_destructor(struct bio *bio)
610 {
611 struct dm_crypt_io *io = bio->bi_private;
612 struct crypt_config *cc = io->target->private;
613
614 bio_free(bio, cc->bs);
615 }
616
617 /*
618 * Generate a new unfragmented bio with the given size
619 * This should never violate the device limitations
620 * May return a smaller bio when running out of pages, indicated by
621 * *out_of_pages set to 1.
622 */
623 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
624 unsigned *out_of_pages)
625 {
626 struct crypt_config *cc = io->target->private;
627 struct bio *clone;
628 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
629 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
630 unsigned i, len;
631 struct page *page;
632
633 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
634 if (!clone)
635 return NULL;
636
637 clone_init(io, clone);
638 *out_of_pages = 0;
639
640 for (i = 0; i < nr_iovecs; i++) {
641 page = mempool_alloc(cc->page_pool, gfp_mask);
642 if (!page) {
643 *out_of_pages = 1;
644 break;
645 }
646
647 /*
648 * if additional pages cannot be allocated without waiting,
649 * return a partially allocated bio, the caller will then try
650 * to allocate additional bios while submitting this partial bio
651 */
652 if (i == (MIN_BIO_PAGES - 1))
653 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
654
655 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
656
657 if (!bio_add_page(clone, page, len, 0)) {
658 mempool_free(page, cc->page_pool);
659 break;
660 }
661
662 size -= len;
663 }
664
665 if (!clone->bi_size) {
666 bio_put(clone);
667 return NULL;
668 }
669
670 return clone;
671 }
672
673 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
674 {
675 unsigned int i;
676 struct bio_vec *bv;
677
678 for (i = 0; i < clone->bi_vcnt; i++) {
679 bv = bio_iovec_idx(clone, i);
680 BUG_ON(!bv->bv_page);
681 mempool_free(bv->bv_page, cc->page_pool);
682 bv->bv_page = NULL;
683 }
684 }
685
686 static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
687 struct bio *bio, sector_t sector)
688 {
689 struct crypt_config *cc = ti->private;
690 struct dm_crypt_io *io;
691
692 io = mempool_alloc(cc->io_pool, GFP_NOIO);
693 io->target = ti;
694 io->base_bio = bio;
695 io->sector = sector;
696 io->error = 0;
697 io->base_io = NULL;
698 atomic_set(&io->pending, 0);
699
700 return io;
701 }
702
703 static void crypt_inc_pending(struct dm_crypt_io *io)
704 {
705 atomic_inc(&io->pending);
706 }
707
708 /*
709 * One of the bios was finished. Check for completion of
710 * the whole request and correctly clean up the buffer.
711 * If base_io is set, wait for the last fragment to complete.
712 */
713 static void crypt_dec_pending(struct dm_crypt_io *io)
714 {
715 struct crypt_config *cc = io->target->private;
716 struct bio *base_bio = io->base_bio;
717 struct dm_crypt_io *base_io = io->base_io;
718 int error = io->error;
719
720 if (!atomic_dec_and_test(&io->pending))
721 return;
722
723 mempool_free(io, cc->io_pool);
724
725 if (likely(!base_io))
726 bio_endio(base_bio, error);
727 else {
728 if (error && !base_io->error)
729 base_io->error = error;
730 crypt_dec_pending(base_io);
731 }
732 }
733
734 /*
735 * kcryptd/kcryptd_io:
736 *
737 * Needed because it would be very unwise to do decryption in an
738 * interrupt context.
739 *
740 * kcryptd performs the actual encryption or decryption.
741 *
742 * kcryptd_io performs the IO submission.
743 *
744 * They must be separated as otherwise the final stages could be
745 * starved by new requests which can block in the first stages due
746 * to memory allocation.
747 *
748 * The work is done per CPU global for all dm-crypt instances.
749 * They should not depend on each other and do not block.
750 */
751 static void crypt_endio(struct bio *clone, int error)
752 {
753 struct dm_crypt_io *io = clone->bi_private;
754 struct crypt_config *cc = io->target->private;
755 unsigned rw = bio_data_dir(clone);
756
757 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
758 error = -EIO;
759
760 /*
761 * free the processed pages
762 */
763 if (rw == WRITE)
764 crypt_free_buffer_pages(cc, clone);
765
766 bio_put(clone);
767
768 if (rw == READ && !error) {
769 kcryptd_queue_crypt(io);
770 return;
771 }
772
773 if (unlikely(error))
774 io->error = error;
775
776 crypt_dec_pending(io);
777 }
778
779 static void clone_init(struct dm_crypt_io *io, struct bio *clone)
780 {
781 struct crypt_config *cc = io->target->private;
782
783 clone->bi_private = io;
784 clone->bi_end_io = crypt_endio;
785 clone->bi_bdev = cc->dev->bdev;
786 clone->bi_rw = io->base_bio->bi_rw;
787 clone->bi_destructor = dm_crypt_bio_destructor;
788 }
789
790 static void kcryptd_io_read(struct dm_crypt_io *io)
791 {
792 struct crypt_config *cc = io->target->private;
793 struct bio *base_bio = io->base_bio;
794 struct bio *clone;
795
796 crypt_inc_pending(io);
797
798 /*
799 * The block layer might modify the bvec array, so always
800 * copy the required bvecs because we need the original
801 * one in order to decrypt the whole bio data *afterwards*.
802 */
803 clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
804 if (unlikely(!clone)) {
805 io->error = -ENOMEM;
806 crypt_dec_pending(io);
807 return;
808 }
809
810 clone_init(io, clone);
811 clone->bi_idx = 0;
812 clone->bi_vcnt = bio_segments(base_bio);
813 clone->bi_size = base_bio->bi_size;
814 clone->bi_sector = cc->start + io->sector;
815 memcpy(clone->bi_io_vec, bio_iovec(base_bio),
816 sizeof(struct bio_vec) * clone->bi_vcnt);
817
818 generic_make_request(clone);
819 }
820
821 static void kcryptd_io_write(struct dm_crypt_io *io)
822 {
823 struct bio *clone = io->ctx.bio_out;
824 generic_make_request(clone);
825 }
826
827 static void kcryptd_io(struct work_struct *work)
828 {
829 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
830
831 if (bio_data_dir(io->base_bio) == READ)
832 kcryptd_io_read(io);
833 else
834 kcryptd_io_write(io);
835 }
836
837 static void kcryptd_queue_io(struct dm_crypt_io *io)
838 {
839 struct crypt_config *cc = io->target->private;
840
841 INIT_WORK(&io->work, kcryptd_io);
842 queue_work(cc->io_queue, &io->work);
843 }
844
845 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
846 int error, int async)
847 {
848 struct bio *clone = io->ctx.bio_out;
849 struct crypt_config *cc = io->target->private;
850
851 if (unlikely(error < 0)) {
852 crypt_free_buffer_pages(cc, clone);
853 bio_put(clone);
854 io->error = -EIO;
855 crypt_dec_pending(io);
856 return;
857 }
858
859 /* crypt_convert should have filled the clone bio */
860 BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
861
862 clone->bi_sector = cc->start + io->sector;
863
864 if (async)
865 kcryptd_queue_io(io);
866 else
867 generic_make_request(clone);
868 }
869
870 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
871 {
872 struct crypt_config *cc = io->target->private;
873 struct bio *clone;
874 struct dm_crypt_io *new_io;
875 int crypt_finished;
876 unsigned out_of_pages = 0;
877 unsigned remaining = io->base_bio->bi_size;
878 sector_t sector = io->sector;
879 int r;
880
881 /*
882 * Prevent io from disappearing until this function completes.
883 */
884 crypt_inc_pending(io);
885 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
886
887 /*
888 * The allocated buffers can be smaller than the whole bio,
889 * so repeat the whole process until all the data can be handled.
890 */
891 while (remaining) {
892 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
893 if (unlikely(!clone)) {
894 io->error = -ENOMEM;
895 break;
896 }
897
898 io->ctx.bio_out = clone;
899 io->ctx.idx_out = 0;
900
901 remaining -= clone->bi_size;
902 sector += bio_sectors(clone);
903
904 crypt_inc_pending(io);
905 r = crypt_convert(cc, &io->ctx);
906 crypt_finished = atomic_dec_and_test(&io->ctx.pending);
907
908 /* Encryption was already finished, submit io now */
909 if (crypt_finished) {
910 kcryptd_crypt_write_io_submit(io, r, 0);
911
912 /*
913 * If there was an error, do not try next fragments.
914 * For async, error is processed in async handler.
915 */
916 if (unlikely(r < 0))
917 break;
918
919 io->sector = sector;
920 }
921
922 /*
923 * Out of memory -> run queues
924 * But don't wait if split was due to the io size restriction
925 */
926 if (unlikely(out_of_pages))
927 congestion_wait(BLK_RW_ASYNC, HZ/100);
928
929 /*
930 * With async crypto it is unsafe to share the crypto context
931 * between fragments, so switch to a new dm_crypt_io structure.
932 */
933 if (unlikely(!crypt_finished && remaining)) {
934 new_io = crypt_io_alloc(io->target, io->base_bio,
935 sector);
936 crypt_inc_pending(new_io);
937 crypt_convert_init(cc, &new_io->ctx, NULL,
938 io->base_bio, sector);
939 new_io->ctx.idx_in = io->ctx.idx_in;
940 new_io->ctx.offset_in = io->ctx.offset_in;
941
942 /*
943 * Fragments after the first use the base_io
944 * pending count.
945 */
946 if (!io->base_io)
947 new_io->base_io = io;
948 else {
949 new_io->base_io = io->base_io;
950 crypt_inc_pending(io->base_io);
951 crypt_dec_pending(io);
952 }
953
954 io = new_io;
955 }
956 }
957
958 crypt_dec_pending(io);
959 }
960
961 static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
962 {
963 if (unlikely(error < 0))
964 io->error = -EIO;
965
966 crypt_dec_pending(io);
967 }
968
969 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
970 {
971 struct crypt_config *cc = io->target->private;
972 int r = 0;
973
974 crypt_inc_pending(io);
975
976 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
977 io->sector);
978
979 r = crypt_convert(cc, &io->ctx);
980
981 if (atomic_dec_and_test(&io->ctx.pending))
982 kcryptd_crypt_read_done(io, r);
983
984 crypt_dec_pending(io);
985 }
986
987 static void kcryptd_async_done(struct crypto_async_request *async_req,
988 int error)
989 {
990 struct dm_crypt_request *dmreq = async_req->data;
991 struct convert_context *ctx = dmreq->ctx;
992 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
993 struct crypt_config *cc = io->target->private;
994
995 if (error == -EINPROGRESS) {
996 complete(&ctx->restart);
997 return;
998 }
999
1000 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
1001
1002 if (!atomic_dec_and_test(&ctx->pending))
1003 return;
1004
1005 if (bio_data_dir(io->base_bio) == READ)
1006 kcryptd_crypt_read_done(io, error);
1007 else
1008 kcryptd_crypt_write_io_submit(io, error, 1);
1009 }
1010
1011 static void kcryptd_crypt(struct work_struct *work)
1012 {
1013 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1014
1015 if (bio_data_dir(io->base_bio) == READ)
1016 kcryptd_crypt_read_convert(io);
1017 else
1018 kcryptd_crypt_write_convert(io);
1019 }
1020
1021 static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1022 {
1023 struct crypt_config *cc = io->target->private;
1024
1025 INIT_WORK(&io->work, kcryptd_crypt);
1026 queue_work(cc->crypt_queue, &io->work);
1027 }
1028
1029 /*
1030 * Decode key from its hex representation
1031 */
1032 static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1033 {
1034 char buffer[3];
1035 char *endp;
1036 unsigned int i;
1037
1038 buffer[2] = '\0';
1039
1040 for (i = 0; i < size; i++) {
1041 buffer[0] = *hex++;
1042 buffer[1] = *hex++;
1043
1044 key[i] = (u8)simple_strtoul(buffer, &endp, 16);
1045
1046 if (endp != &buffer[2])
1047 return -EINVAL;
1048 }
1049
1050 if (*hex != '\0')
1051 return -EINVAL;
1052
1053 return 0;
1054 }
1055
1056 /*
1057 * Encode key into its hex representation
1058 */
1059 static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
1060 {
1061 unsigned int i;
1062
1063 for (i = 0; i < size; i++) {
1064 sprintf(hex, "%02x", *key);
1065 hex += 2;
1066 key++;
1067 }
1068 }
1069
1070 static int crypt_setkey_allcpus(struct crypt_config *cc)
1071 {
1072 int cpu, err = 0, r;
1073
1074 for_each_possible_cpu(cpu) {
1075 r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfm,
1076 cc->key, cc->key_size);
1077 if (r)
1078 err = r;
1079 }
1080
1081 return err;
1082 }
1083
1084 static int crypt_set_key(struct crypt_config *cc, char *key)
1085 {
1086 /* The key size may not be changed. */
1087 if (cc->key_size != (strlen(key) >> 1))
1088 return -EINVAL;
1089
1090 /* Hyphen (which gives a key_size of zero) means there is no key. */
1091 if (!cc->key_size && strcmp(key, "-"))
1092 return -EINVAL;
1093
1094 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
1095 return -EINVAL;
1096
1097 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1098
1099 return crypt_setkey_allcpus(cc);
1100 }
1101
1102 static int crypt_wipe_key(struct crypt_config *cc)
1103 {
1104 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1105 memset(&cc->key, 0, cc->key_size * sizeof(u8));
1106
1107 return crypt_setkey_allcpus(cc);
1108 }
1109
1110 static void crypt_dtr(struct dm_target *ti)
1111 {
1112 struct crypt_config *cc = ti->private;
1113 struct crypt_cpu *cpu_cc;
1114 int cpu;
1115
1116 ti->private = NULL;
1117
1118 if (!cc)
1119 return;
1120
1121 if (cc->io_queue)
1122 destroy_workqueue(cc->io_queue);
1123 if (cc->crypt_queue)
1124 destroy_workqueue(cc->crypt_queue);
1125
1126 if (cc->cpu)
1127 for_each_possible_cpu(cpu) {
1128 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1129 if (cpu_cc->req)
1130 mempool_free(cpu_cc->req, cc->req_pool);
1131 if (cpu_cc->tfm)
1132 crypto_free_ablkcipher(cpu_cc->tfm);
1133 }
1134
1135 if (cc->bs)
1136 bioset_free(cc->bs);
1137
1138 if (cc->page_pool)
1139 mempool_destroy(cc->page_pool);
1140 if (cc->req_pool)
1141 mempool_destroy(cc->req_pool);
1142 if (cc->io_pool)
1143 mempool_destroy(cc->io_pool);
1144
1145 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1146 cc->iv_gen_ops->dtr(cc);
1147
1148 if (cc->dev)
1149 dm_put_device(ti, cc->dev);
1150
1151 if (cc->cpu)
1152 free_percpu(cc->cpu);
1153
1154 kzfree(cc->cipher);
1155 kzfree(cc->cipher_string);
1156
1157 /* Must zero key material before freeing */
1158 kzfree(cc);
1159 }
1160
1161 static int crypt_ctr_cipher(struct dm_target *ti,
1162 char *cipher_in, char *key)
1163 {
1164 struct crypt_config *cc = ti->private;
1165 struct crypto_ablkcipher *tfm;
1166 char *tmp, *cipher, *chainmode, *ivmode, *ivopts;
1167 char *cipher_api = NULL;
1168 int cpu, ret = -EINVAL;
1169
1170 /* Convert to crypto api definition? */
1171 if (strchr(cipher_in, '(')) {
1172 ti->error = "Bad cipher specification";
1173 return -EINVAL;
1174 }
1175
1176 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1177 if (!cc->cipher_string)
1178 goto bad_mem;
1179
1180 /*
1181 * Legacy dm-crypt cipher specification
1182 * cipher-mode-iv:ivopts
1183 */
1184 tmp = cipher_in;
1185 cipher = strsep(&tmp, "-");
1186
1187 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1188 if (!cc->cipher)
1189 goto bad_mem;
1190
1191 chainmode = strsep(&tmp, "-");
1192 ivopts = strsep(&tmp, "-");
1193 ivmode = strsep(&ivopts, ":");
1194
1195 if (tmp)
1196 DMWARN("Ignoring unexpected additional cipher options");
1197
1198 cc->cpu = alloc_percpu(struct crypt_cpu);
1199 if (!cc->cpu) {
1200 ti->error = "Cannot allocate per cpu state";
1201 goto bad_mem;
1202 }
1203
1204 /*
1205 * For compatibility with the original dm-crypt mapping format, if
1206 * only the cipher name is supplied, use cbc-plain.
1207 */
1208 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
1209 chainmode = "cbc";
1210 ivmode = "plain";
1211 }
1212
1213 if (strcmp(chainmode, "ecb") && !ivmode) {
1214 ti->error = "IV mechanism required";
1215 return -EINVAL;
1216 }
1217
1218 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1219 if (!cipher_api)
1220 goto bad_mem;
1221
1222 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1223 "%s(%s)", chainmode, cipher);
1224 if (ret < 0) {
1225 kfree(cipher_api);
1226 goto bad_mem;
1227 }
1228
1229 /* Allocate cipher */
1230 for_each_possible_cpu(cpu) {
1231 tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0);
1232 if (IS_ERR(tfm)) {
1233 ret = PTR_ERR(tfm);
1234 ti->error = "Error allocating crypto tfm";
1235 goto bad;
1236 }
1237 per_cpu_ptr(cc->cpu, cpu)->tfm = tfm;
1238 }
1239
1240 /* Initialize and set key */
1241 ret = crypt_set_key(cc, key);
1242 if (ret < 0) {
1243 ti->error = "Error decoding and setting key";
1244 goto bad;
1245 }
1246
1247 /* Initialize IV */
1248 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
1249 if (cc->iv_size)
1250 /* at least a 64 bit sector number should fit in our buffer */
1251 cc->iv_size = max(cc->iv_size,
1252 (unsigned int)(sizeof(u64) / sizeof(u8)));
1253 else if (ivmode) {
1254 DMWARN("Selected cipher does not support IVs");
1255 ivmode = NULL;
1256 }
1257
1258 /* Choose ivmode, see comments at iv code. */
1259 if (ivmode == NULL)
1260 cc->iv_gen_ops = NULL;
1261 else if (strcmp(ivmode, "plain") == 0)
1262 cc->iv_gen_ops = &crypt_iv_plain_ops;
1263 else if (strcmp(ivmode, "plain64") == 0)
1264 cc->iv_gen_ops = &crypt_iv_plain64_ops;
1265 else if (strcmp(ivmode, "essiv") == 0)
1266 cc->iv_gen_ops = &crypt_iv_essiv_ops;
1267 else if (strcmp(ivmode, "benbi") == 0)
1268 cc->iv_gen_ops = &crypt_iv_benbi_ops;
1269 else if (strcmp(ivmode, "null") == 0)
1270 cc->iv_gen_ops = &crypt_iv_null_ops;
1271 else {
1272 ret = -EINVAL;
1273 ti->error = "Invalid IV mode";
1274 goto bad;
1275 }
1276
1277 /* Allocate IV */
1278 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1279 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1280 if (ret < 0) {
1281 ti->error = "Error creating IV";
1282 goto bad;
1283 }
1284 }
1285
1286 /* Initialize IV (set keys for ESSIV etc) */
1287 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1288 ret = cc->iv_gen_ops->init(cc);
1289 if (ret < 0) {
1290 ti->error = "Error initialising IV";
1291 goto bad;
1292 }
1293 }
1294
1295 ret = 0;
1296 bad:
1297 kfree(cipher_api);
1298 return ret;
1299
1300 bad_mem:
1301 ti->error = "Cannot allocate cipher strings";
1302 return -ENOMEM;
1303 }
1304
1305 /*
1306 * Construct an encryption mapping:
1307 * <cipher> <key> <iv_offset> <dev_path> <start>
1308 */
1309 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1310 {
1311 struct crypt_config *cc;
1312 unsigned int key_size;
1313 unsigned long long tmpll;
1314 int ret;
1315
1316 if (argc != 5) {
1317 ti->error = "Not enough arguments";
1318 return -EINVAL;
1319 }
1320
1321 key_size = strlen(argv[1]) >> 1;
1322
1323 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1324 if (!cc) {
1325 ti->error = "Cannot allocate encryption context";
1326 return -ENOMEM;
1327 }
1328 cc->key_size = key_size;
1329
1330 ti->private = cc;
1331 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1332 if (ret < 0)
1333 goto bad;
1334
1335 ret = -ENOMEM;
1336 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
1337 if (!cc->io_pool) {
1338 ti->error = "Cannot allocate crypt io mempool";
1339 goto bad;
1340 }
1341
1342 cc->dmreq_start = sizeof(struct ablkcipher_request);
1343 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1344 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
1345 cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
1346 ~(crypto_tfm_ctx_alignment() - 1);
1347
1348 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1349 sizeof(struct dm_crypt_request) + cc->iv_size);
1350 if (!cc->req_pool) {
1351 ti->error = "Cannot allocate crypt request mempool";
1352 goto bad;
1353 }
1354
1355 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
1356 if (!cc->page_pool) {
1357 ti->error = "Cannot allocate page mempool";
1358 goto bad;
1359 }
1360
1361 cc->bs = bioset_create(MIN_IOS, 0);
1362 if (!cc->bs) {
1363 ti->error = "Cannot allocate crypt bioset";
1364 goto bad;
1365 }
1366
1367 ret = -EINVAL;
1368 if (sscanf(argv[2], "%llu", &tmpll) != 1) {
1369 ti->error = "Invalid iv_offset sector";
1370 goto bad;
1371 }
1372 cc->iv_offset = tmpll;
1373
1374 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
1375 ti->error = "Device lookup failed";
1376 goto bad;
1377 }
1378
1379 if (sscanf(argv[4], "%llu", &tmpll) != 1) {
1380 ti->error = "Invalid device sector";
1381 goto bad;
1382 }
1383 cc->start = tmpll;
1384
1385 ret = -ENOMEM;
1386 cc->io_queue = alloc_workqueue("kcryptd_io",
1387 WQ_NON_REENTRANT|
1388 WQ_MEM_RECLAIM,
1389 1);
1390 if (!cc->io_queue) {
1391 ti->error = "Couldn't create kcryptd io queue";
1392 goto bad;
1393 }
1394
1395 cc->crypt_queue = alloc_workqueue("kcryptd",
1396 WQ_NON_REENTRANT|
1397 WQ_CPU_INTENSIVE|
1398 WQ_MEM_RECLAIM,
1399 1);
1400 if (!cc->crypt_queue) {
1401 ti->error = "Couldn't create kcryptd queue";
1402 goto bad;
1403 }
1404
1405 ti->num_flush_requests = 1;
1406 return 0;
1407
1408 bad:
1409 crypt_dtr(ti);
1410 return ret;
1411 }
1412
1413 static int crypt_map(struct dm_target *ti, struct bio *bio,
1414 union map_info *map_context)
1415 {
1416 struct dm_crypt_io *io;
1417 struct crypt_config *cc;
1418
1419 if (bio->bi_rw & REQ_FLUSH) {
1420 cc = ti->private;
1421 bio->bi_bdev = cc->dev->bdev;
1422 return DM_MAPIO_REMAPPED;
1423 }
1424
1425 io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));
1426
1427 if (bio_data_dir(io->base_bio) == READ)
1428 kcryptd_queue_io(io);
1429 else
1430 kcryptd_queue_crypt(io);
1431
1432 return DM_MAPIO_SUBMITTED;
1433 }
1434
1435 static int crypt_status(struct dm_target *ti, status_type_t type,
1436 char *result, unsigned int maxlen)
1437 {
1438 struct crypt_config *cc = ti->private;
1439 unsigned int sz = 0;
1440
1441 switch (type) {
1442 case STATUSTYPE_INFO:
1443 result[0] = '\0';
1444 break;
1445
1446 case STATUSTYPE_TABLE:
1447 DMEMIT("%s ", cc->cipher_string);
1448
1449 if (cc->key_size > 0) {
1450 if ((maxlen - sz) < ((cc->key_size << 1) + 1))
1451 return -ENOMEM;
1452
1453 crypt_encode_key(result + sz, cc->key, cc->key_size);
1454 sz += cc->key_size << 1;
1455 } else {
1456 if (sz >= maxlen)
1457 return -ENOMEM;
1458 result[sz++] = '-';
1459 }
1460
1461 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1462 cc->dev->name, (unsigned long long)cc->start);
1463 break;
1464 }
1465 return 0;
1466 }
1467
1468 static void crypt_postsuspend(struct dm_target *ti)
1469 {
1470 struct crypt_config *cc = ti->private;
1471
1472 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1473 }
1474
1475 static int crypt_preresume(struct dm_target *ti)
1476 {
1477 struct crypt_config *cc = ti->private;
1478
1479 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1480 DMERR("aborting resume - crypt key is not set.");
1481 return -EAGAIN;
1482 }
1483
1484 return 0;
1485 }
1486
1487 static void crypt_resume(struct dm_target *ti)
1488 {
1489 struct crypt_config *cc = ti->private;
1490
1491 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1492 }
1493
1494 /* Message interface
1495 * key set <key>
1496 * key wipe
1497 */
1498 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1499 {
1500 struct crypt_config *cc = ti->private;
1501 int ret = -EINVAL;
1502
1503 if (argc < 2)
1504 goto error;
1505
1506 if (!strnicmp(argv[0], MESG_STR("key"))) {
1507 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1508 DMWARN("not suspended during key manipulation.");
1509 return -EINVAL;
1510 }
1511 if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) {
1512 ret = crypt_set_key(cc, argv[2]);
1513 if (ret)
1514 return ret;
1515 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
1516 ret = cc->iv_gen_ops->init(cc);
1517 return ret;
1518 }
1519 if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) {
1520 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
1521 ret = cc->iv_gen_ops->wipe(cc);
1522 if (ret)
1523 return ret;
1524 }
1525 return crypt_wipe_key(cc);
1526 }
1527 }
1528
1529 error:
1530 DMWARN("unrecognised message received.");
1531 return -EINVAL;
1532 }
1533
1534 static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1535 struct bio_vec *biovec, int max_size)
1536 {
1537 struct crypt_config *cc = ti->private;
1538 struct request_queue *q = bdev_get_queue(cc->dev->bdev);
1539
1540 if (!q->merge_bvec_fn)
1541 return max_size;
1542
1543 bvm->bi_bdev = cc->dev->bdev;
1544 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
1545
1546 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
1547 }
1548
1549 static int crypt_iterate_devices(struct dm_target *ti,
1550 iterate_devices_callout_fn fn, void *data)
1551 {
1552 struct crypt_config *cc = ti->private;
1553
1554 return fn(ti, cc->dev, cc->start, ti->len, data);
1555 }
1556
1557 static struct target_type crypt_target = {
1558 .name = "crypt",
1559 .version = {1, 9, 0},
1560 .module = THIS_MODULE,
1561 .ctr = crypt_ctr,
1562 .dtr = crypt_dtr,
1563 .map = crypt_map,
1564 .status = crypt_status,
1565 .postsuspend = crypt_postsuspend,
1566 .preresume = crypt_preresume,
1567 .resume = crypt_resume,
1568 .message = crypt_message,
1569 .merge = crypt_merge,
1570 .iterate_devices = crypt_iterate_devices,
1571 };
1572
1573 static int __init dm_crypt_init(void)
1574 {
1575 int r;
1576
1577 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
1578 if (!_crypt_io_pool)
1579 return -ENOMEM;
1580
1581 r = dm_register_target(&crypt_target);
1582 if (r < 0) {
1583 DMERR("register failed %d", r);
1584 kmem_cache_destroy(_crypt_io_pool);
1585 }
1586
1587 return r;
1588 }
1589
1590 static void __exit dm_crypt_exit(void)
1591 {
1592 dm_unregister_target(&crypt_target);
1593 kmem_cache_destroy(_crypt_io_pool);
1594 }
1595
1596 module_init(dm_crypt_init);
1597 module_exit(dm_crypt_exit);
1598
1599 MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
1600 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
1601 MODULE_LICENSE("GPL");
This page took 0.141549 seconds and 4 git commands to generate.