dm crypt: remove unused io_pool and _crypt_io_pool
[deliverable/linux.git] / drivers / md / dm-crypt.c
1 /*
2 * Copyright (C) 2003 Jana Saout <jana@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
5 * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
6 *
7 * This file is released under the GPL.
8 */
9
10 #include <linux/completion.h>
11 #include <linux/err.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/bio.h>
16 #include <linux/blkdev.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/crypto.h>
20 #include <linux/workqueue.h>
21 #include <linux/backing-dev.h>
22 #include <linux/atomic.h>
23 #include <linux/scatterlist.h>
24 #include <asm/page.h>
25 #include <asm/unaligned.h>
26 #include <crypto/hash.h>
27 #include <crypto/md5.h>
28 #include <crypto/algapi.h>
29
30 #include <linux/device-mapper.h>
31
32 #define DM_MSG_PREFIX "crypt"
33
34 /*
35 * context holding the current state of a multi-part conversion
36 */
37 struct convert_context {
38 struct completion restart;
39 struct bio *bio_in;
40 struct bio *bio_out;
41 struct bvec_iter iter_in;
42 struct bvec_iter iter_out;
43 sector_t cc_sector;
44 atomic_t cc_pending;
45 struct ablkcipher_request *req;
46 };
47
48 /*
49 * per bio private data
50 */
51 struct dm_crypt_io {
52 struct crypt_config *cc;
53 struct bio *base_bio;
54 struct work_struct work;
55
56 struct convert_context ctx;
57
58 atomic_t io_pending;
59 int error;
60 sector_t sector;
61 } CRYPTO_MINALIGN_ATTR;
62
63 struct dm_crypt_request {
64 struct convert_context *ctx;
65 struct scatterlist sg_in;
66 struct scatterlist sg_out;
67 sector_t iv_sector;
68 };
69
70 struct crypt_config;
71
72 struct crypt_iv_operations {
73 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
74 const char *opts);
75 void (*dtr)(struct crypt_config *cc);
76 int (*init)(struct crypt_config *cc);
77 int (*wipe)(struct crypt_config *cc);
78 int (*generator)(struct crypt_config *cc, u8 *iv,
79 struct dm_crypt_request *dmreq);
80 int (*post)(struct crypt_config *cc, u8 *iv,
81 struct dm_crypt_request *dmreq);
82 };
83
84 struct iv_essiv_private {
85 struct crypto_hash *hash_tfm;
86 u8 *salt;
87 };
88
89 struct iv_benbi_private {
90 int shift;
91 };
92
93 #define LMK_SEED_SIZE 64 /* hash + 0 */
94 struct iv_lmk_private {
95 struct crypto_shash *hash_tfm;
96 u8 *seed;
97 };
98
99 #define TCW_WHITENING_SIZE 16
100 struct iv_tcw_private {
101 struct crypto_shash *crc32_tfm;
102 u8 *iv_seed;
103 u8 *whitening;
104 };
105
106 /*
107 * Crypt: maps a linear range of a block device
108 * and encrypts / decrypts at the same time.
109 */
110 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, DM_CRYPT_SAME_CPU };
111
112 /*
113 * The fields in here must be read only after initialization.
114 */
115 struct crypt_config {
116 struct dm_dev *dev;
117 sector_t start;
118
119 /*
120 * pool for per bio private data, crypto requests and
121 * encryption requeusts/buffer pages
122 */
123 mempool_t *req_pool;
124 mempool_t *page_pool;
125 struct bio_set *bs;
126 struct mutex bio_alloc_lock;
127
128 struct workqueue_struct *io_queue;
129 struct workqueue_struct *crypt_queue;
130
131 char *cipher;
132 char *cipher_string;
133
134 struct crypt_iv_operations *iv_gen_ops;
135 union {
136 struct iv_essiv_private essiv;
137 struct iv_benbi_private benbi;
138 struct iv_lmk_private lmk;
139 struct iv_tcw_private tcw;
140 } iv_gen_private;
141 sector_t iv_offset;
142 unsigned int iv_size;
143
144 /* ESSIV: struct crypto_cipher *essiv_tfm */
145 void *iv_private;
146 struct crypto_ablkcipher **tfms;
147 unsigned tfms_count;
148
149 /*
150 * Layout of each crypto request:
151 *
152 * struct ablkcipher_request
153 * context
154 * padding
155 * struct dm_crypt_request
156 * padding
157 * IV
158 *
159 * The padding is added so that dm_crypt_request and the IV are
160 * correctly aligned.
161 */
162 unsigned int dmreq_start;
163
164 unsigned int per_bio_data_size;
165
166 unsigned long flags;
167 unsigned int key_size;
168 unsigned int key_parts; /* independent parts in key buffer */
169 unsigned int key_extra_size; /* additional keys length */
170 u8 key[0];
171 };
172
173 #define MIN_IOS 16
174
175 static void clone_init(struct dm_crypt_io *, struct bio *);
176 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
177 static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
178
179 /*
180 * Use this to access cipher attributes that are the same for each CPU.
181 */
182 static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
183 {
184 return cc->tfms[0];
185 }
186
187 /*
188 * Different IV generation algorithms:
189 *
190 * plain: the initial vector is the 32-bit little-endian version of the sector
191 * number, padded with zeros if necessary.
192 *
193 * plain64: the initial vector is the 64-bit little-endian version of the sector
194 * number, padded with zeros if necessary.
195 *
196 * essiv: "encrypted sector|salt initial vector", the sector number is
197 * encrypted with the bulk cipher using a salt as key. The salt
198 * should be derived from the bulk cipher's key via hashing.
199 *
200 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
201 * (needed for LRW-32-AES and possible other narrow block modes)
202 *
203 * null: the initial vector is always zero. Provides compatibility with
204 * obsolete loop_fish2 devices. Do not use for new devices.
205 *
206 * lmk: Compatible implementation of the block chaining mode used
207 * by the Loop-AES block device encryption system
208 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
209 * It operates on full 512 byte sectors and uses CBC
210 * with an IV derived from the sector number, the data and
211 * optionally extra IV seed.
212 * This means that after decryption the first block
213 * of sector must be tweaked according to decrypted data.
214 * Loop-AES can use three encryption schemes:
215 * version 1: is plain aes-cbc mode
216 * version 2: uses 64 multikey scheme with lmk IV generator
217 * version 3: the same as version 2 with additional IV seed
218 * (it uses 65 keys, last key is used as IV seed)
219 *
220 * tcw: Compatible implementation of the block chaining mode used
221 * by the TrueCrypt device encryption system (prior to version 4.1).
222 * For more info see: http://www.truecrypt.org
223 * It operates on full 512 byte sectors and uses CBC
224 * with an IV derived from initial key and the sector number.
225 * In addition, whitening value is applied on every sector, whitening
226 * is calculated from initial key, sector number and mixed using CRC32.
227 * Note that this encryption scheme is vulnerable to watermarking attacks
228 * and should be used for old compatible containers access only.
229 *
230 * plumb: unimplemented, see:
231 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
232 */
233
234 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
235 struct dm_crypt_request *dmreq)
236 {
237 memset(iv, 0, cc->iv_size);
238 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
239
240 return 0;
241 }
242
243 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
244 struct dm_crypt_request *dmreq)
245 {
246 memset(iv, 0, cc->iv_size);
247 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
248
249 return 0;
250 }
251
252 /* Initialise ESSIV - compute salt but no local memory allocations */
253 static int crypt_iv_essiv_init(struct crypt_config *cc)
254 {
255 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
256 struct hash_desc desc;
257 struct scatterlist sg;
258 struct crypto_cipher *essiv_tfm;
259 int err;
260
261 sg_init_one(&sg, cc->key, cc->key_size);
262 desc.tfm = essiv->hash_tfm;
263 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
264
265 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
266 if (err)
267 return err;
268
269 essiv_tfm = cc->iv_private;
270
271 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
272 crypto_hash_digestsize(essiv->hash_tfm));
273 if (err)
274 return err;
275
276 return 0;
277 }
278
279 /* Wipe salt and reset key derived from volume key */
280 static int crypt_iv_essiv_wipe(struct crypt_config *cc)
281 {
282 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
283 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
284 struct crypto_cipher *essiv_tfm;
285 int r, err = 0;
286
287 memset(essiv->salt, 0, salt_size);
288
289 essiv_tfm = cc->iv_private;
290 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
291 if (r)
292 err = r;
293
294 return err;
295 }
296
297 /* Set up per cpu cipher state */
298 static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
299 struct dm_target *ti,
300 u8 *salt, unsigned saltsize)
301 {
302 struct crypto_cipher *essiv_tfm;
303 int err;
304
305 /* Setup the essiv_tfm with the given salt */
306 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
307 if (IS_ERR(essiv_tfm)) {
308 ti->error = "Error allocating crypto tfm for ESSIV";
309 return essiv_tfm;
310 }
311
312 if (crypto_cipher_blocksize(essiv_tfm) !=
313 crypto_ablkcipher_ivsize(any_tfm(cc))) {
314 ti->error = "Block size of ESSIV cipher does "
315 "not match IV size of block cipher";
316 crypto_free_cipher(essiv_tfm);
317 return ERR_PTR(-EINVAL);
318 }
319
320 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
321 if (err) {
322 ti->error = "Failed to set key for ESSIV cipher";
323 crypto_free_cipher(essiv_tfm);
324 return ERR_PTR(err);
325 }
326
327 return essiv_tfm;
328 }
329
330 static void crypt_iv_essiv_dtr(struct crypt_config *cc)
331 {
332 struct crypto_cipher *essiv_tfm;
333 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
334
335 crypto_free_hash(essiv->hash_tfm);
336 essiv->hash_tfm = NULL;
337
338 kzfree(essiv->salt);
339 essiv->salt = NULL;
340
341 essiv_tfm = cc->iv_private;
342
343 if (essiv_tfm)
344 crypto_free_cipher(essiv_tfm);
345
346 cc->iv_private = NULL;
347 }
348
349 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
350 const char *opts)
351 {
352 struct crypto_cipher *essiv_tfm = NULL;
353 struct crypto_hash *hash_tfm = NULL;
354 u8 *salt = NULL;
355 int err;
356
357 if (!opts) {
358 ti->error = "Digest algorithm missing for ESSIV mode";
359 return -EINVAL;
360 }
361
362 /* Allocate hash algorithm */
363 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
364 if (IS_ERR(hash_tfm)) {
365 ti->error = "Error initializing ESSIV hash";
366 err = PTR_ERR(hash_tfm);
367 goto bad;
368 }
369
370 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
371 if (!salt) {
372 ti->error = "Error kmallocing salt storage in ESSIV";
373 err = -ENOMEM;
374 goto bad;
375 }
376
377 cc->iv_gen_private.essiv.salt = salt;
378 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
379
380 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
381 crypto_hash_digestsize(hash_tfm));
382 if (IS_ERR(essiv_tfm)) {
383 crypt_iv_essiv_dtr(cc);
384 return PTR_ERR(essiv_tfm);
385 }
386 cc->iv_private = essiv_tfm;
387
388 return 0;
389
390 bad:
391 if (hash_tfm && !IS_ERR(hash_tfm))
392 crypto_free_hash(hash_tfm);
393 kfree(salt);
394 return err;
395 }
396
397 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
398 struct dm_crypt_request *dmreq)
399 {
400 struct crypto_cipher *essiv_tfm = cc->iv_private;
401
402 memset(iv, 0, cc->iv_size);
403 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
404 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
405
406 return 0;
407 }
408
409 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
410 const char *opts)
411 {
412 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
413 int log = ilog2(bs);
414
415 /* we need to calculate how far we must shift the sector count
416 * to get the cipher block count, we use this shift in _gen */
417
418 if (1 << log != bs) {
419 ti->error = "cypher blocksize is not a power of 2";
420 return -EINVAL;
421 }
422
423 if (log > 9) {
424 ti->error = "cypher blocksize is > 512";
425 return -EINVAL;
426 }
427
428 cc->iv_gen_private.benbi.shift = 9 - log;
429
430 return 0;
431 }
432
433 static void crypt_iv_benbi_dtr(struct crypt_config *cc)
434 {
435 }
436
437 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
438 struct dm_crypt_request *dmreq)
439 {
440 __be64 val;
441
442 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
443
444 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
445 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
446
447 return 0;
448 }
449
450 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
451 struct dm_crypt_request *dmreq)
452 {
453 memset(iv, 0, cc->iv_size);
454
455 return 0;
456 }
457
458 static void crypt_iv_lmk_dtr(struct crypt_config *cc)
459 {
460 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
461
462 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
463 crypto_free_shash(lmk->hash_tfm);
464 lmk->hash_tfm = NULL;
465
466 kzfree(lmk->seed);
467 lmk->seed = NULL;
468 }
469
470 static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
471 const char *opts)
472 {
473 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
474
475 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
476 if (IS_ERR(lmk->hash_tfm)) {
477 ti->error = "Error initializing LMK hash";
478 return PTR_ERR(lmk->hash_tfm);
479 }
480
481 /* No seed in LMK version 2 */
482 if (cc->key_parts == cc->tfms_count) {
483 lmk->seed = NULL;
484 return 0;
485 }
486
487 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
488 if (!lmk->seed) {
489 crypt_iv_lmk_dtr(cc);
490 ti->error = "Error kmallocing seed storage in LMK";
491 return -ENOMEM;
492 }
493
494 return 0;
495 }
496
497 static int crypt_iv_lmk_init(struct crypt_config *cc)
498 {
499 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
500 int subkey_size = cc->key_size / cc->key_parts;
501
502 /* LMK seed is on the position of LMK_KEYS + 1 key */
503 if (lmk->seed)
504 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
505 crypto_shash_digestsize(lmk->hash_tfm));
506
507 return 0;
508 }
509
510 static int crypt_iv_lmk_wipe(struct crypt_config *cc)
511 {
512 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
513
514 if (lmk->seed)
515 memset(lmk->seed, 0, LMK_SEED_SIZE);
516
517 return 0;
518 }
519
520 static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
521 struct dm_crypt_request *dmreq,
522 u8 *data)
523 {
524 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
525 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
526 struct md5_state md5state;
527 __le32 buf[4];
528 int i, r;
529
530 desc->tfm = lmk->hash_tfm;
531 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
532
533 r = crypto_shash_init(desc);
534 if (r)
535 return r;
536
537 if (lmk->seed) {
538 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
539 if (r)
540 return r;
541 }
542
543 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
544 r = crypto_shash_update(desc, data + 16, 16 * 31);
545 if (r)
546 return r;
547
548 /* Sector is cropped to 56 bits here */
549 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
550 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
551 buf[2] = cpu_to_le32(4024);
552 buf[3] = 0;
553 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
554 if (r)
555 return r;
556
557 /* No MD5 padding here */
558 r = crypto_shash_export(desc, &md5state);
559 if (r)
560 return r;
561
562 for (i = 0; i < MD5_HASH_WORDS; i++)
563 __cpu_to_le32s(&md5state.hash[i]);
564 memcpy(iv, &md5state.hash, cc->iv_size);
565
566 return 0;
567 }
568
569 static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
570 struct dm_crypt_request *dmreq)
571 {
572 u8 *src;
573 int r = 0;
574
575 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
576 src = kmap_atomic(sg_page(&dmreq->sg_in));
577 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
578 kunmap_atomic(src);
579 } else
580 memset(iv, 0, cc->iv_size);
581
582 return r;
583 }
584
585 static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
586 struct dm_crypt_request *dmreq)
587 {
588 u8 *dst;
589 int r;
590
591 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
592 return 0;
593
594 dst = kmap_atomic(sg_page(&dmreq->sg_out));
595 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
596
597 /* Tweak the first block of plaintext sector */
598 if (!r)
599 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
600
601 kunmap_atomic(dst);
602 return r;
603 }
604
605 static void crypt_iv_tcw_dtr(struct crypt_config *cc)
606 {
607 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
608
609 kzfree(tcw->iv_seed);
610 tcw->iv_seed = NULL;
611 kzfree(tcw->whitening);
612 tcw->whitening = NULL;
613
614 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
615 crypto_free_shash(tcw->crc32_tfm);
616 tcw->crc32_tfm = NULL;
617 }
618
619 static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
620 const char *opts)
621 {
622 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
623
624 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
625 ti->error = "Wrong key size for TCW";
626 return -EINVAL;
627 }
628
629 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
630 if (IS_ERR(tcw->crc32_tfm)) {
631 ti->error = "Error initializing CRC32 in TCW";
632 return PTR_ERR(tcw->crc32_tfm);
633 }
634
635 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
636 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
637 if (!tcw->iv_seed || !tcw->whitening) {
638 crypt_iv_tcw_dtr(cc);
639 ti->error = "Error allocating seed storage in TCW";
640 return -ENOMEM;
641 }
642
643 return 0;
644 }
645
646 static int crypt_iv_tcw_init(struct crypt_config *cc)
647 {
648 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
649 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
650
651 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
652 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
653 TCW_WHITENING_SIZE);
654
655 return 0;
656 }
657
658 static int crypt_iv_tcw_wipe(struct crypt_config *cc)
659 {
660 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
661
662 memset(tcw->iv_seed, 0, cc->iv_size);
663 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
664
665 return 0;
666 }
667
668 static int crypt_iv_tcw_whitening(struct crypt_config *cc,
669 struct dm_crypt_request *dmreq,
670 u8 *data)
671 {
672 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
673 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
674 u8 buf[TCW_WHITENING_SIZE];
675 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
676 int i, r;
677
678 /* xor whitening with sector number */
679 memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
680 crypto_xor(buf, (u8 *)&sector, 8);
681 crypto_xor(&buf[8], (u8 *)&sector, 8);
682
683 /* calculate crc32 for every 32bit part and xor it */
684 desc->tfm = tcw->crc32_tfm;
685 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
686 for (i = 0; i < 4; i++) {
687 r = crypto_shash_init(desc);
688 if (r)
689 goto out;
690 r = crypto_shash_update(desc, &buf[i * 4], 4);
691 if (r)
692 goto out;
693 r = crypto_shash_final(desc, &buf[i * 4]);
694 if (r)
695 goto out;
696 }
697 crypto_xor(&buf[0], &buf[12], 4);
698 crypto_xor(&buf[4], &buf[8], 4);
699
700 /* apply whitening (8 bytes) to whole sector */
701 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
702 crypto_xor(data + i * 8, buf, 8);
703 out:
704 memzero_explicit(buf, sizeof(buf));
705 return r;
706 }
707
708 static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
709 struct dm_crypt_request *dmreq)
710 {
711 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
712 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
713 u8 *src;
714 int r = 0;
715
716 /* Remove whitening from ciphertext */
717 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
718 src = kmap_atomic(sg_page(&dmreq->sg_in));
719 r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
720 kunmap_atomic(src);
721 }
722
723 /* Calculate IV */
724 memcpy(iv, tcw->iv_seed, cc->iv_size);
725 crypto_xor(iv, (u8 *)&sector, 8);
726 if (cc->iv_size > 8)
727 crypto_xor(&iv[8], (u8 *)&sector, cc->iv_size - 8);
728
729 return r;
730 }
731
732 static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
733 struct dm_crypt_request *dmreq)
734 {
735 u8 *dst;
736 int r;
737
738 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
739 return 0;
740
741 /* Apply whitening on ciphertext */
742 dst = kmap_atomic(sg_page(&dmreq->sg_out));
743 r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset);
744 kunmap_atomic(dst);
745
746 return r;
747 }
748
749 static struct crypt_iv_operations crypt_iv_plain_ops = {
750 .generator = crypt_iv_plain_gen
751 };
752
753 static struct crypt_iv_operations crypt_iv_plain64_ops = {
754 .generator = crypt_iv_plain64_gen
755 };
756
757 static struct crypt_iv_operations crypt_iv_essiv_ops = {
758 .ctr = crypt_iv_essiv_ctr,
759 .dtr = crypt_iv_essiv_dtr,
760 .init = crypt_iv_essiv_init,
761 .wipe = crypt_iv_essiv_wipe,
762 .generator = crypt_iv_essiv_gen
763 };
764
765 static struct crypt_iv_operations crypt_iv_benbi_ops = {
766 .ctr = crypt_iv_benbi_ctr,
767 .dtr = crypt_iv_benbi_dtr,
768 .generator = crypt_iv_benbi_gen
769 };
770
771 static struct crypt_iv_operations crypt_iv_null_ops = {
772 .generator = crypt_iv_null_gen
773 };
774
775 static struct crypt_iv_operations crypt_iv_lmk_ops = {
776 .ctr = crypt_iv_lmk_ctr,
777 .dtr = crypt_iv_lmk_dtr,
778 .init = crypt_iv_lmk_init,
779 .wipe = crypt_iv_lmk_wipe,
780 .generator = crypt_iv_lmk_gen,
781 .post = crypt_iv_lmk_post
782 };
783
784 static struct crypt_iv_operations crypt_iv_tcw_ops = {
785 .ctr = crypt_iv_tcw_ctr,
786 .dtr = crypt_iv_tcw_dtr,
787 .init = crypt_iv_tcw_init,
788 .wipe = crypt_iv_tcw_wipe,
789 .generator = crypt_iv_tcw_gen,
790 .post = crypt_iv_tcw_post
791 };
792
793 static void crypt_convert_init(struct crypt_config *cc,
794 struct convert_context *ctx,
795 struct bio *bio_out, struct bio *bio_in,
796 sector_t sector)
797 {
798 ctx->bio_in = bio_in;
799 ctx->bio_out = bio_out;
800 if (bio_in)
801 ctx->iter_in = bio_in->bi_iter;
802 if (bio_out)
803 ctx->iter_out = bio_out->bi_iter;
804 ctx->cc_sector = sector + cc->iv_offset;
805 init_completion(&ctx->restart);
806 }
807
808 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
809 struct ablkcipher_request *req)
810 {
811 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
812 }
813
814 static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
815 struct dm_crypt_request *dmreq)
816 {
817 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
818 }
819
820 static u8 *iv_of_dmreq(struct crypt_config *cc,
821 struct dm_crypt_request *dmreq)
822 {
823 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
824 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
825 }
826
827 static int crypt_convert_block(struct crypt_config *cc,
828 struct convert_context *ctx,
829 struct ablkcipher_request *req)
830 {
831 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
832 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
833 struct dm_crypt_request *dmreq;
834 u8 *iv;
835 int r;
836
837 dmreq = dmreq_of_req(cc, req);
838 iv = iv_of_dmreq(cc, dmreq);
839
840 dmreq->iv_sector = ctx->cc_sector;
841 dmreq->ctx = ctx;
842 sg_init_table(&dmreq->sg_in, 1);
843 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
844 bv_in.bv_offset);
845
846 sg_init_table(&dmreq->sg_out, 1);
847 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
848 bv_out.bv_offset);
849
850 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
851 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
852
853 if (cc->iv_gen_ops) {
854 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
855 if (r < 0)
856 return r;
857 }
858
859 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
860 1 << SECTOR_SHIFT, iv);
861
862 if (bio_data_dir(ctx->bio_in) == WRITE)
863 r = crypto_ablkcipher_encrypt(req);
864 else
865 r = crypto_ablkcipher_decrypt(req);
866
867 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
868 r = cc->iv_gen_ops->post(cc, iv, dmreq);
869
870 return r;
871 }
872
873 static void kcryptd_async_done(struct crypto_async_request *async_req,
874 int error);
875
876 static void crypt_alloc_req(struct crypt_config *cc,
877 struct convert_context *ctx)
878 {
879 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
880
881 if (!ctx->req)
882 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
883
884 ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
885 ablkcipher_request_set_callback(ctx->req,
886 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
887 kcryptd_async_done, dmreq_of_req(cc, ctx->req));
888 }
889
890 static void crypt_free_req(struct crypt_config *cc,
891 struct ablkcipher_request *req, struct bio *base_bio)
892 {
893 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
894
895 if ((struct ablkcipher_request *)(io + 1) != req)
896 mempool_free(req, cc->req_pool);
897 }
898
899 /*
900 * Encrypt / decrypt data from one bio to another one (can be the same one)
901 */
902 static int crypt_convert(struct crypt_config *cc,
903 struct convert_context *ctx)
904 {
905 int r;
906
907 atomic_set(&ctx->cc_pending, 1);
908
909 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
910
911 crypt_alloc_req(cc, ctx);
912
913 atomic_inc(&ctx->cc_pending);
914
915 r = crypt_convert_block(cc, ctx, ctx->req);
916
917 switch (r) {
918 /* async */
919 case -EBUSY:
920 wait_for_completion(&ctx->restart);
921 reinit_completion(&ctx->restart);
922 /* fall through*/
923 case -EINPROGRESS:
924 ctx->req = NULL;
925 ctx->cc_sector++;
926 continue;
927
928 /* sync */
929 case 0:
930 atomic_dec(&ctx->cc_pending);
931 ctx->cc_sector++;
932 cond_resched();
933 continue;
934
935 /* error */
936 default:
937 atomic_dec(&ctx->cc_pending);
938 return r;
939 }
940 }
941
942 return 0;
943 }
944
945 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
946
947 /*
948 * Generate a new unfragmented bio with the given size
949 * This should never violate the device limitations
950 *
951 * This function may be called concurrently. If we allocate from the mempool
952 * concurrently, there is a possibility of deadlock. For example, if we have
953 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
954 * the mempool concurrently, it may deadlock in a situation where both processes
955 * have allocated 128 pages and the mempool is exhausted.
956 *
957 * In order to avoid this scenario we allocate the pages under a mutex.
958 *
959 * In order to not degrade performance with excessive locking, we try
960 * non-blocking allocations without a mutex first but on failure we fallback
961 * to blocking allocations with a mutex.
962 */
963 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
964 {
965 struct crypt_config *cc = io->cc;
966 struct bio *clone;
967 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
968 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
969 unsigned i, len, remaining_size;
970 struct page *page;
971 struct bio_vec *bvec;
972
973 retry:
974 if (unlikely(gfp_mask & __GFP_WAIT))
975 mutex_lock(&cc->bio_alloc_lock);
976
977 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
978 if (!clone)
979 goto return_clone;
980
981 clone_init(io, clone);
982
983 remaining_size = size;
984
985 for (i = 0; i < nr_iovecs; i++) {
986 page = mempool_alloc(cc->page_pool, gfp_mask);
987 if (!page) {
988 crypt_free_buffer_pages(cc, clone);
989 bio_put(clone);
990 gfp_mask |= __GFP_WAIT;
991 goto retry;
992 }
993
994 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
995
996 bvec = &clone->bi_io_vec[clone->bi_vcnt++];
997 bvec->bv_page = page;
998 bvec->bv_len = len;
999 bvec->bv_offset = 0;
1000
1001 clone->bi_iter.bi_size += len;
1002
1003 remaining_size -= len;
1004 }
1005
1006 return_clone:
1007 if (unlikely(gfp_mask & __GFP_WAIT))
1008 mutex_unlock(&cc->bio_alloc_lock);
1009
1010 return clone;
1011 }
1012
1013 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1014 {
1015 unsigned int i;
1016 struct bio_vec *bv;
1017
1018 bio_for_each_segment_all(bv, clone, i) {
1019 BUG_ON(!bv->bv_page);
1020 mempool_free(bv->bv_page, cc->page_pool);
1021 bv->bv_page = NULL;
1022 }
1023 }
1024
1025 static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1026 struct bio *bio, sector_t sector)
1027 {
1028 io->cc = cc;
1029 io->base_bio = bio;
1030 io->sector = sector;
1031 io->error = 0;
1032 io->ctx.req = NULL;
1033 atomic_set(&io->io_pending, 0);
1034 }
1035
1036 static void crypt_inc_pending(struct dm_crypt_io *io)
1037 {
1038 atomic_inc(&io->io_pending);
1039 }
1040
1041 /*
1042 * One of the bios was finished. Check for completion of
1043 * the whole request and correctly clean up the buffer.
1044 */
1045 static void crypt_dec_pending(struct dm_crypt_io *io)
1046 {
1047 struct crypt_config *cc = io->cc;
1048 struct bio *base_bio = io->base_bio;
1049 int error = io->error;
1050
1051 if (!atomic_dec_and_test(&io->io_pending))
1052 return;
1053
1054 if (io->ctx.req)
1055 crypt_free_req(cc, io->ctx.req, base_bio);
1056
1057 bio_endio(base_bio, error);
1058 }
1059
1060 /*
1061 * kcryptd/kcryptd_io:
1062 *
1063 * Needed because it would be very unwise to do decryption in an
1064 * interrupt context.
1065 *
1066 * kcryptd performs the actual encryption or decryption.
1067 *
1068 * kcryptd_io performs the IO submission.
1069 *
1070 * They must be separated as otherwise the final stages could be
1071 * starved by new requests which can block in the first stages due
1072 * to memory allocation.
1073 *
1074 * The work is done per CPU global for all dm-crypt instances.
1075 * They should not depend on each other and do not block.
1076 */
1077 static void crypt_endio(struct bio *clone, int error)
1078 {
1079 struct dm_crypt_io *io = clone->bi_private;
1080 struct crypt_config *cc = io->cc;
1081 unsigned rw = bio_data_dir(clone);
1082
1083 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
1084 error = -EIO;
1085
1086 /*
1087 * free the processed pages
1088 */
1089 if (rw == WRITE)
1090 crypt_free_buffer_pages(cc, clone);
1091
1092 bio_put(clone);
1093
1094 if (rw == READ && !error) {
1095 kcryptd_queue_crypt(io);
1096 return;
1097 }
1098
1099 if (unlikely(error))
1100 io->error = error;
1101
1102 crypt_dec_pending(io);
1103 }
1104
1105 static void clone_init(struct dm_crypt_io *io, struct bio *clone)
1106 {
1107 struct crypt_config *cc = io->cc;
1108
1109 clone->bi_private = io;
1110 clone->bi_end_io = crypt_endio;
1111 clone->bi_bdev = cc->dev->bdev;
1112 clone->bi_rw = io->base_bio->bi_rw;
1113 }
1114
1115 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1116 {
1117 struct crypt_config *cc = io->cc;
1118 struct bio *base_bio = io->base_bio;
1119 struct bio *clone;
1120
1121 /*
1122 * The block layer might modify the bvec array, so always
1123 * copy the required bvecs because we need the original
1124 * one in order to decrypt the whole bio data *afterwards*.
1125 */
1126 clone = bio_clone_bioset(base_bio, gfp, cc->bs);
1127 if (!clone)
1128 return 1;
1129
1130 crypt_inc_pending(io);
1131
1132 clone_init(io, clone);
1133 clone->bi_iter.bi_sector = cc->start + io->sector;
1134
1135 generic_make_request(clone);
1136 return 0;
1137 }
1138
1139 static void kcryptd_io_write(struct dm_crypt_io *io)
1140 {
1141 struct bio *clone = io->ctx.bio_out;
1142 generic_make_request(clone);
1143 }
1144
1145 static void kcryptd_io(struct work_struct *work)
1146 {
1147 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1148
1149 if (bio_data_dir(io->base_bio) == READ) {
1150 crypt_inc_pending(io);
1151 if (kcryptd_io_read(io, GFP_NOIO))
1152 io->error = -ENOMEM;
1153 crypt_dec_pending(io);
1154 } else
1155 kcryptd_io_write(io);
1156 }
1157
1158 static void kcryptd_queue_io(struct dm_crypt_io *io)
1159 {
1160 struct crypt_config *cc = io->cc;
1161
1162 INIT_WORK(&io->work, kcryptd_io);
1163 queue_work(cc->io_queue, &io->work);
1164 }
1165
1166 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1167 {
1168 struct bio *clone = io->ctx.bio_out;
1169 struct crypt_config *cc = io->cc;
1170
1171 if (unlikely(io->error < 0)) {
1172 crypt_free_buffer_pages(cc, clone);
1173 bio_put(clone);
1174 crypt_dec_pending(io);
1175 return;
1176 }
1177
1178 /* crypt_convert should have filled the clone bio */
1179 BUG_ON(io->ctx.iter_out.bi_size);
1180
1181 clone->bi_iter.bi_sector = cc->start + io->sector;
1182
1183 if (async)
1184 kcryptd_queue_io(io);
1185 else
1186 generic_make_request(clone);
1187 }
1188
1189 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1190 {
1191 struct crypt_config *cc = io->cc;
1192 struct bio *clone;
1193 int crypt_finished;
1194 sector_t sector = io->sector;
1195 int r;
1196
1197 /*
1198 * Prevent io from disappearing until this function completes.
1199 */
1200 crypt_inc_pending(io);
1201 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
1202
1203 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1204 if (unlikely(!clone)) {
1205 io->error = -EIO;
1206 goto dec;
1207 }
1208
1209 io->ctx.bio_out = clone;
1210 io->ctx.iter_out = clone->bi_iter;
1211
1212 sector += bio_sectors(clone);
1213
1214 crypt_inc_pending(io);
1215 r = crypt_convert(cc, &io->ctx);
1216 if (r)
1217 io->error = -EIO;
1218 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1219
1220 /* Encryption was already finished, submit io now */
1221 if (crypt_finished) {
1222 kcryptd_crypt_write_io_submit(io, 0);
1223 io->sector = sector;
1224 }
1225
1226 dec:
1227 crypt_dec_pending(io);
1228 }
1229
1230 static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
1231 {
1232 crypt_dec_pending(io);
1233 }
1234
1235 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
1236 {
1237 struct crypt_config *cc = io->cc;
1238 int r = 0;
1239
1240 crypt_inc_pending(io);
1241
1242 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
1243 io->sector);
1244
1245 r = crypt_convert(cc, &io->ctx);
1246 if (r < 0)
1247 io->error = -EIO;
1248
1249 if (atomic_dec_and_test(&io->ctx.cc_pending))
1250 kcryptd_crypt_read_done(io);
1251
1252 crypt_dec_pending(io);
1253 }
1254
1255 static void kcryptd_async_done(struct crypto_async_request *async_req,
1256 int error)
1257 {
1258 struct dm_crypt_request *dmreq = async_req->data;
1259 struct convert_context *ctx = dmreq->ctx;
1260 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1261 struct crypt_config *cc = io->cc;
1262
1263 if (error == -EINPROGRESS) {
1264 complete(&ctx->restart);
1265 return;
1266 }
1267
1268 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1269 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1270
1271 if (error < 0)
1272 io->error = -EIO;
1273
1274 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
1275
1276 if (!atomic_dec_and_test(&ctx->cc_pending))
1277 return;
1278
1279 if (bio_data_dir(io->base_bio) == READ)
1280 kcryptd_crypt_read_done(io);
1281 else
1282 kcryptd_crypt_write_io_submit(io, 1);
1283 }
1284
1285 static void kcryptd_crypt(struct work_struct *work)
1286 {
1287 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1288
1289 if (bio_data_dir(io->base_bio) == READ)
1290 kcryptd_crypt_read_convert(io);
1291 else
1292 kcryptd_crypt_write_convert(io);
1293 }
1294
1295 static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1296 {
1297 struct crypt_config *cc = io->cc;
1298
1299 INIT_WORK(&io->work, kcryptd_crypt);
1300 queue_work(cc->crypt_queue, &io->work);
1301 }
1302
1303 /*
1304 * Decode key from its hex representation
1305 */
1306 static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1307 {
1308 char buffer[3];
1309 unsigned int i;
1310
1311 buffer[2] = '\0';
1312
1313 for (i = 0; i < size; i++) {
1314 buffer[0] = *hex++;
1315 buffer[1] = *hex++;
1316
1317 if (kstrtou8(buffer, 16, &key[i]))
1318 return -EINVAL;
1319 }
1320
1321 if (*hex != '\0')
1322 return -EINVAL;
1323
1324 return 0;
1325 }
1326
1327 static void crypt_free_tfms(struct crypt_config *cc)
1328 {
1329 unsigned i;
1330
1331 if (!cc->tfms)
1332 return;
1333
1334 for (i = 0; i < cc->tfms_count; i++)
1335 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
1336 crypto_free_ablkcipher(cc->tfms[i]);
1337 cc->tfms[i] = NULL;
1338 }
1339
1340 kfree(cc->tfms);
1341 cc->tfms = NULL;
1342 }
1343
1344 static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
1345 {
1346 unsigned i;
1347 int err;
1348
1349 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
1350 GFP_KERNEL);
1351 if (!cc->tfms)
1352 return -ENOMEM;
1353
1354 for (i = 0; i < cc->tfms_count; i++) {
1355 cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1356 if (IS_ERR(cc->tfms[i])) {
1357 err = PTR_ERR(cc->tfms[i]);
1358 crypt_free_tfms(cc);
1359 return err;
1360 }
1361 }
1362
1363 return 0;
1364 }
1365
1366 static int crypt_setkey_allcpus(struct crypt_config *cc)
1367 {
1368 unsigned subkey_size;
1369 int err = 0, i, r;
1370
1371 /* Ignore extra keys (which are used for IV etc) */
1372 subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
1373
1374 for (i = 0; i < cc->tfms_count; i++) {
1375 r = crypto_ablkcipher_setkey(cc->tfms[i],
1376 cc->key + (i * subkey_size),
1377 subkey_size);
1378 if (r)
1379 err = r;
1380 }
1381
1382 return err;
1383 }
1384
1385 static int crypt_set_key(struct crypt_config *cc, char *key)
1386 {
1387 int r = -EINVAL;
1388 int key_string_len = strlen(key);
1389
1390 /* The key size may not be changed. */
1391 if (cc->key_size != (key_string_len >> 1))
1392 goto out;
1393
1394 /* Hyphen (which gives a key_size of zero) means there is no key. */
1395 if (!cc->key_size && strcmp(key, "-"))
1396 goto out;
1397
1398 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
1399 goto out;
1400
1401 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1402
1403 r = crypt_setkey_allcpus(cc);
1404
1405 out:
1406 /* Hex key string not needed after here, so wipe it. */
1407 memset(key, '0', key_string_len);
1408
1409 return r;
1410 }
1411
1412 static int crypt_wipe_key(struct crypt_config *cc)
1413 {
1414 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1415 memset(&cc->key, 0, cc->key_size * sizeof(u8));
1416
1417 return crypt_setkey_allcpus(cc);
1418 }
1419
1420 static void crypt_dtr(struct dm_target *ti)
1421 {
1422 struct crypt_config *cc = ti->private;
1423
1424 ti->private = NULL;
1425
1426 if (!cc)
1427 return;
1428
1429 if (cc->io_queue)
1430 destroy_workqueue(cc->io_queue);
1431 if (cc->crypt_queue)
1432 destroy_workqueue(cc->crypt_queue);
1433
1434 crypt_free_tfms(cc);
1435
1436 if (cc->bs)
1437 bioset_free(cc->bs);
1438
1439 if (cc->page_pool)
1440 mempool_destroy(cc->page_pool);
1441 if (cc->req_pool)
1442 mempool_destroy(cc->req_pool);
1443
1444 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1445 cc->iv_gen_ops->dtr(cc);
1446
1447 if (cc->dev)
1448 dm_put_device(ti, cc->dev);
1449
1450 kzfree(cc->cipher);
1451 kzfree(cc->cipher_string);
1452
1453 /* Must zero key material before freeing */
1454 kzfree(cc);
1455 }
1456
1457 static int crypt_ctr_cipher(struct dm_target *ti,
1458 char *cipher_in, char *key)
1459 {
1460 struct crypt_config *cc = ti->private;
1461 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
1462 char *cipher_api = NULL;
1463 int ret = -EINVAL;
1464 char dummy;
1465
1466 /* Convert to crypto api definition? */
1467 if (strchr(cipher_in, '(')) {
1468 ti->error = "Bad cipher specification";
1469 return -EINVAL;
1470 }
1471
1472 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1473 if (!cc->cipher_string)
1474 goto bad_mem;
1475
1476 /*
1477 * Legacy dm-crypt cipher specification
1478 * cipher[:keycount]-mode-iv:ivopts
1479 */
1480 tmp = cipher_in;
1481 keycount = strsep(&tmp, "-");
1482 cipher = strsep(&keycount, ":");
1483
1484 if (!keycount)
1485 cc->tfms_count = 1;
1486 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
1487 !is_power_of_2(cc->tfms_count)) {
1488 ti->error = "Bad cipher key count specification";
1489 return -EINVAL;
1490 }
1491 cc->key_parts = cc->tfms_count;
1492 cc->key_extra_size = 0;
1493
1494 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1495 if (!cc->cipher)
1496 goto bad_mem;
1497
1498 chainmode = strsep(&tmp, "-");
1499 ivopts = strsep(&tmp, "-");
1500 ivmode = strsep(&ivopts, ":");
1501
1502 if (tmp)
1503 DMWARN("Ignoring unexpected additional cipher options");
1504
1505 /*
1506 * For compatibility with the original dm-crypt mapping format, if
1507 * only the cipher name is supplied, use cbc-plain.
1508 */
1509 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
1510 chainmode = "cbc";
1511 ivmode = "plain";
1512 }
1513
1514 if (strcmp(chainmode, "ecb") && !ivmode) {
1515 ti->error = "IV mechanism required";
1516 return -EINVAL;
1517 }
1518
1519 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1520 if (!cipher_api)
1521 goto bad_mem;
1522
1523 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1524 "%s(%s)", chainmode, cipher);
1525 if (ret < 0) {
1526 kfree(cipher_api);
1527 goto bad_mem;
1528 }
1529
1530 /* Allocate cipher */
1531 ret = crypt_alloc_tfms(cc, cipher_api);
1532 if (ret < 0) {
1533 ti->error = "Error allocating crypto tfm";
1534 goto bad;
1535 }
1536
1537 /* Initialize IV */
1538 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
1539 if (cc->iv_size)
1540 /* at least a 64 bit sector number should fit in our buffer */
1541 cc->iv_size = max(cc->iv_size,
1542 (unsigned int)(sizeof(u64) / sizeof(u8)));
1543 else if (ivmode) {
1544 DMWARN("Selected cipher does not support IVs");
1545 ivmode = NULL;
1546 }
1547
1548 /* Choose ivmode, see comments at iv code. */
1549 if (ivmode == NULL)
1550 cc->iv_gen_ops = NULL;
1551 else if (strcmp(ivmode, "plain") == 0)
1552 cc->iv_gen_ops = &crypt_iv_plain_ops;
1553 else if (strcmp(ivmode, "plain64") == 0)
1554 cc->iv_gen_ops = &crypt_iv_plain64_ops;
1555 else if (strcmp(ivmode, "essiv") == 0)
1556 cc->iv_gen_ops = &crypt_iv_essiv_ops;
1557 else if (strcmp(ivmode, "benbi") == 0)
1558 cc->iv_gen_ops = &crypt_iv_benbi_ops;
1559 else if (strcmp(ivmode, "null") == 0)
1560 cc->iv_gen_ops = &crypt_iv_null_ops;
1561 else if (strcmp(ivmode, "lmk") == 0) {
1562 cc->iv_gen_ops = &crypt_iv_lmk_ops;
1563 /*
1564 * Version 2 and 3 is recognised according
1565 * to length of provided multi-key string.
1566 * If present (version 3), last key is used as IV seed.
1567 * All keys (including IV seed) are always the same size.
1568 */
1569 if (cc->key_size % cc->key_parts) {
1570 cc->key_parts++;
1571 cc->key_extra_size = cc->key_size / cc->key_parts;
1572 }
1573 } else if (strcmp(ivmode, "tcw") == 0) {
1574 cc->iv_gen_ops = &crypt_iv_tcw_ops;
1575 cc->key_parts += 2; /* IV + whitening */
1576 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
1577 } else {
1578 ret = -EINVAL;
1579 ti->error = "Invalid IV mode";
1580 goto bad;
1581 }
1582
1583 /* Initialize and set key */
1584 ret = crypt_set_key(cc, key);
1585 if (ret < 0) {
1586 ti->error = "Error decoding and setting key";
1587 goto bad;
1588 }
1589
1590 /* Allocate IV */
1591 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1592 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1593 if (ret < 0) {
1594 ti->error = "Error creating IV";
1595 goto bad;
1596 }
1597 }
1598
1599 /* Initialize IV (set keys for ESSIV etc) */
1600 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1601 ret = cc->iv_gen_ops->init(cc);
1602 if (ret < 0) {
1603 ti->error = "Error initialising IV";
1604 goto bad;
1605 }
1606 }
1607
1608 ret = 0;
1609 bad:
1610 kfree(cipher_api);
1611 return ret;
1612
1613 bad_mem:
1614 ti->error = "Cannot allocate cipher strings";
1615 return -ENOMEM;
1616 }
1617
1618 /*
1619 * Construct an encryption mapping:
1620 * <cipher> <key> <iv_offset> <dev_path> <start>
1621 */
1622 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1623 {
1624 struct crypt_config *cc;
1625 unsigned int key_size, opt_params;
1626 unsigned long long tmpll;
1627 int ret;
1628 size_t iv_size_padding;
1629 struct dm_arg_set as;
1630 const char *opt_string;
1631 char dummy;
1632
1633 static struct dm_arg _args[] = {
1634 {0, 2, "Invalid number of feature args"},
1635 };
1636
1637 if (argc < 5) {
1638 ti->error = "Not enough arguments";
1639 return -EINVAL;
1640 }
1641
1642 key_size = strlen(argv[1]) >> 1;
1643
1644 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1645 if (!cc) {
1646 ti->error = "Cannot allocate encryption context";
1647 return -ENOMEM;
1648 }
1649 cc->key_size = key_size;
1650
1651 ti->private = cc;
1652 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1653 if (ret < 0)
1654 goto bad;
1655
1656 cc->dmreq_start = sizeof(struct ablkcipher_request);
1657 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1658 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
1659
1660 if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
1661 /* Allocate the padding exactly */
1662 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
1663 & crypto_ablkcipher_alignmask(any_tfm(cc));
1664 } else {
1665 /*
1666 * If the cipher requires greater alignment than kmalloc
1667 * alignment, we don't know the exact position of the
1668 * initialization vector. We must assume worst case.
1669 */
1670 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
1671 }
1672
1673 ret = -ENOMEM;
1674 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1675 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
1676 if (!cc->req_pool) {
1677 ti->error = "Cannot allocate crypt request mempool";
1678 goto bad;
1679 }
1680
1681 cc->per_bio_data_size = ti->per_bio_data_size =
1682 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
1683 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
1684 ARCH_KMALLOC_MINALIGN);
1685
1686 cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
1687 if (!cc->page_pool) {
1688 ti->error = "Cannot allocate page mempool";
1689 goto bad;
1690 }
1691
1692 cc->bs = bioset_create(MIN_IOS, 0);
1693 if (!cc->bs) {
1694 ti->error = "Cannot allocate crypt bioset";
1695 goto bad;
1696 }
1697
1698 mutex_init(&cc->bio_alloc_lock);
1699
1700 ret = -EINVAL;
1701 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
1702 ti->error = "Invalid iv_offset sector";
1703 goto bad;
1704 }
1705 cc->iv_offset = tmpll;
1706
1707 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
1708 ti->error = "Device lookup failed";
1709 goto bad;
1710 }
1711
1712 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
1713 ti->error = "Invalid device sector";
1714 goto bad;
1715 }
1716 cc->start = tmpll;
1717
1718 argv += 5;
1719 argc -= 5;
1720
1721 /* Optional parameters */
1722 if (argc) {
1723 as.argc = argc;
1724 as.argv = argv;
1725
1726 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1727 if (ret)
1728 goto bad;
1729
1730 while (opt_params--) {
1731 opt_string = dm_shift_arg(&as);
1732 if (!opt_string) {
1733 ti->error = "Not enough feature arguments";
1734 goto bad;
1735 }
1736
1737 if (!strcasecmp(opt_string, "allow_discards"))
1738 ti->num_discard_bios = 1;
1739
1740 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
1741 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1742
1743 else {
1744 ti->error = "Invalid feature arguments";
1745 goto bad;
1746 }
1747 }
1748 }
1749
1750 ret = -ENOMEM;
1751 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
1752 if (!cc->io_queue) {
1753 ti->error = "Couldn't create kcryptd io queue";
1754 goto bad;
1755 }
1756
1757 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1758 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
1759 else
1760 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
1761 num_online_cpus());
1762 if (!cc->crypt_queue) {
1763 ti->error = "Couldn't create kcryptd queue";
1764 goto bad;
1765 }
1766
1767 ti->num_flush_bios = 1;
1768 ti->discard_zeroes_data_unsupported = true;
1769
1770 return 0;
1771
1772 bad:
1773 crypt_dtr(ti);
1774 return ret;
1775 }
1776
1777 static int crypt_map(struct dm_target *ti, struct bio *bio)
1778 {
1779 struct dm_crypt_io *io;
1780 struct crypt_config *cc = ti->private;
1781
1782 /*
1783 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
1784 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
1785 * - for REQ_DISCARD caller must use flush if IO ordering matters
1786 */
1787 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
1788 bio->bi_bdev = cc->dev->bdev;
1789 if (bio_sectors(bio))
1790 bio->bi_iter.bi_sector = cc->start +
1791 dm_target_offset(ti, bio->bi_iter.bi_sector);
1792 return DM_MAPIO_REMAPPED;
1793 }
1794
1795 io = dm_per_bio_data(bio, cc->per_bio_data_size);
1796 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1797 io->ctx.req = (struct ablkcipher_request *)(io + 1);
1798
1799 if (bio_data_dir(io->base_bio) == READ) {
1800 if (kcryptd_io_read(io, GFP_NOWAIT))
1801 kcryptd_queue_io(io);
1802 } else
1803 kcryptd_queue_crypt(io);
1804
1805 return DM_MAPIO_SUBMITTED;
1806 }
1807
1808 static void crypt_status(struct dm_target *ti, status_type_t type,
1809 unsigned status_flags, char *result, unsigned maxlen)
1810 {
1811 struct crypt_config *cc = ti->private;
1812 unsigned i, sz = 0;
1813 int num_feature_args = 0;
1814
1815 switch (type) {
1816 case STATUSTYPE_INFO:
1817 result[0] = '\0';
1818 break;
1819
1820 case STATUSTYPE_TABLE:
1821 DMEMIT("%s ", cc->cipher_string);
1822
1823 if (cc->key_size > 0)
1824 for (i = 0; i < cc->key_size; i++)
1825 DMEMIT("%02x", cc->key[i]);
1826 else
1827 DMEMIT("-");
1828
1829 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1830 cc->dev->name, (unsigned long long)cc->start);
1831
1832 num_feature_args += !!ti->num_discard_bios;
1833 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1834 if (num_feature_args) {
1835 DMEMIT(" %d", num_feature_args);
1836 if (ti->num_discard_bios)
1837 DMEMIT(" allow_discards");
1838 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1839 DMEMIT(" same_cpu_crypt");
1840 }
1841
1842 break;
1843 }
1844 }
1845
1846 static void crypt_postsuspend(struct dm_target *ti)
1847 {
1848 struct crypt_config *cc = ti->private;
1849
1850 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1851 }
1852
1853 static int crypt_preresume(struct dm_target *ti)
1854 {
1855 struct crypt_config *cc = ti->private;
1856
1857 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1858 DMERR("aborting resume - crypt key is not set.");
1859 return -EAGAIN;
1860 }
1861
1862 return 0;
1863 }
1864
1865 static void crypt_resume(struct dm_target *ti)
1866 {
1867 struct crypt_config *cc = ti->private;
1868
1869 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1870 }
1871
1872 /* Message interface
1873 * key set <key>
1874 * key wipe
1875 */
1876 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1877 {
1878 struct crypt_config *cc = ti->private;
1879 int ret = -EINVAL;
1880
1881 if (argc < 2)
1882 goto error;
1883
1884 if (!strcasecmp(argv[0], "key")) {
1885 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1886 DMWARN("not suspended during key manipulation.");
1887 return -EINVAL;
1888 }
1889 if (argc == 3 && !strcasecmp(argv[1], "set")) {
1890 ret = crypt_set_key(cc, argv[2]);
1891 if (ret)
1892 return ret;
1893 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
1894 ret = cc->iv_gen_ops->init(cc);
1895 return ret;
1896 }
1897 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
1898 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
1899 ret = cc->iv_gen_ops->wipe(cc);
1900 if (ret)
1901 return ret;
1902 }
1903 return crypt_wipe_key(cc);
1904 }
1905 }
1906
1907 error:
1908 DMWARN("unrecognised message received.");
1909 return -EINVAL;
1910 }
1911
1912 static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1913 struct bio_vec *biovec, int max_size)
1914 {
1915 struct crypt_config *cc = ti->private;
1916 struct request_queue *q = bdev_get_queue(cc->dev->bdev);
1917
1918 if (!q->merge_bvec_fn)
1919 return max_size;
1920
1921 bvm->bi_bdev = cc->dev->bdev;
1922 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
1923
1924 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
1925 }
1926
1927 static int crypt_iterate_devices(struct dm_target *ti,
1928 iterate_devices_callout_fn fn, void *data)
1929 {
1930 struct crypt_config *cc = ti->private;
1931
1932 return fn(ti, cc->dev, cc->start, ti->len, data);
1933 }
1934
1935 static struct target_type crypt_target = {
1936 .name = "crypt",
1937 .version = {1, 14, 0},
1938 .module = THIS_MODULE,
1939 .ctr = crypt_ctr,
1940 .dtr = crypt_dtr,
1941 .map = crypt_map,
1942 .status = crypt_status,
1943 .postsuspend = crypt_postsuspend,
1944 .preresume = crypt_preresume,
1945 .resume = crypt_resume,
1946 .message = crypt_message,
1947 .merge = crypt_merge,
1948 .iterate_devices = crypt_iterate_devices,
1949 };
1950
1951 static int __init dm_crypt_init(void)
1952 {
1953 int r;
1954
1955 r = dm_register_target(&crypt_target);
1956 if (r < 0)
1957 DMERR("register failed %d", r);
1958
1959 return r;
1960 }
1961
1962 static void __exit dm_crypt_exit(void)
1963 {
1964 dm_unregister_target(&crypt_target);
1965 }
1966
1967 module_init(dm_crypt_init);
1968 module_exit(dm_crypt_exit);
1969
1970 MODULE_AUTHOR("Jana Saout <jana@saout.de>");
1971 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
1972 MODULE_LICENSE("GPL");
This page took 0.08186 seconds and 6 git commands to generate.