ext4 crypto: require CONFIG_CRYPTO_CTR if ext4 encryption is enabled
[deliverable/linux.git] / fs / ext4 / crypto.c
CommitLineData
b30ab0e0
MH
1/*
2 * linux/fs/ext4/crypto.c
3 *
4 * Copyright (C) 2015, Google, Inc.
5 *
6 * This contains encryption functions for ext4
7 *
8 * Written by Michael Halcrow, 2014.
9 *
10 * Filename encryption additions
11 * Uday Savagaonkar, 2014
12 * Encryption policy handling additions
13 * Ildar Muslukhov, 2014
14 *
15 * This has not yet undergone a rigorous security audit.
16 *
17 * The usage of AES-XTS should conform to recommendations in NIST
18 * Special Publication 800-38E and IEEE P1619/D16.
19 */
20
21#include <crypto/hash.h>
22#include <crypto/sha.h>
23#include <keys/user-type.h>
24#include <keys/encrypted-type.h>
25#include <linux/crypto.h>
26#include <linux/ecryptfs.h>
27#include <linux/gfp.h>
28#include <linux/kernel.h>
29#include <linux/key.h>
30#include <linux/list.h>
31#include <linux/mempool.h>
32#include <linux/module.h>
33#include <linux/mutex.h>
34#include <linux/random.h>
35#include <linux/scatterlist.h>
36#include <linux/spinlock_types.h>
37
38#include "ext4_extents.h"
39#include "xattr.h"
40
41/* Encryption added and removed here! (L: */
42
43static unsigned int num_prealloc_crypto_pages = 32;
44static unsigned int num_prealloc_crypto_ctxs = 128;
45
46module_param(num_prealloc_crypto_pages, uint, 0444);
47MODULE_PARM_DESC(num_prealloc_crypto_pages,
48 "Number of crypto pages to preallocate");
49module_param(num_prealloc_crypto_ctxs, uint, 0444);
50MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
51 "Number of crypto contexts to preallocate");
52
53static mempool_t *ext4_bounce_page_pool;
54
55static LIST_HEAD(ext4_free_crypto_ctxs);
56static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
57
8ee03714
TT
58static struct kmem_cache *ext4_crypto_ctx_cachep;
59struct kmem_cache *ext4_crypt_info_cachep;
60
b30ab0e0
MH
61/**
62 * ext4_release_crypto_ctx() - Releases an encryption context
63 * @ctx: The encryption context to release.
64 *
65 * If the encryption context was allocated from the pre-allocated pool, returns
66 * it to that pool. Else, frees it.
67 *
68 * If there's a bounce page in the context, this frees that.
69 */
70void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
71{
72 unsigned long flags;
73
614def70 74 if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page) {
b30ab0e0 75 if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
614def70 76 __free_page(ctx->w.bounce_page);
b30ab0e0 77 else
614def70 78 mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool);
b30ab0e0 79 }
614def70
TT
80 ctx->w.bounce_page = NULL;
81 ctx->w.control_page = NULL;
b30ab0e0
MH
82 if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
83 if (ctx->tfm)
84 crypto_free_tfm(ctx->tfm);
8ee03714 85 kmem_cache_free(ext4_crypto_ctx_cachep, ctx);
b30ab0e0
MH
86 } else {
87 spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
88 list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
89 spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
90 }
91}
92
b30ab0e0
MH
93/**
94 * ext4_get_crypto_ctx() - Gets an encryption context
95 * @inode: The inode for which we are doing the crypto
96 *
97 * Allocates and initializes an encryption context.
98 *
99 * Return: An allocated and initialized encryption context on success; error
100 * value or NULL otherwise.
101 */
102struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
103{
104 struct ext4_crypto_ctx *ctx = NULL;
105 int res = 0;
106 unsigned long flags;
b7236e21 107 struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
b30ab0e0 108
b7236e21 109 BUG_ON(ci == NULL);
b30ab0e0
MH
110
111 /*
112 * We first try getting the ctx from a free list because in
113 * the common case the ctx will have an allocated and
114 * initialized crypto tfm, so it's probably a worthwhile
115 * optimization. For the bounce page, we first try getting it
116 * from the kernel allocator because that's just about as fast
117 * as getting it from a list and because a cache of free pages
118 * should generally be a "last resort" option for a filesystem
119 * to be able to do its job.
120 */
121 spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
122 ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs,
123 struct ext4_crypto_ctx, free_list);
124 if (ctx)
125 list_del(&ctx->free_list);
126 spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
127 if (!ctx) {
8ee03714
TT
128 ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
129 if (!ctx) {
130 res = -ENOMEM;
b30ab0e0
MH
131 goto out;
132 }
133 ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
134 } else {
135 ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
136 }
614def70 137 ctx->flags &= ~EXT4_WRITE_PATH_FL;
b30ab0e0
MH
138
139 /* Allocate a new Crypto API context if we don't already have
140 * one or if it isn't the right mode. */
1aaa6e8b 141 if (ctx->tfm && (ctx->mode != ci->ci_data_mode)) {
b30ab0e0
MH
142 crypto_free_tfm(ctx->tfm);
143 ctx->tfm = NULL;
144 ctx->mode = EXT4_ENCRYPTION_MODE_INVALID;
145 }
146 if (!ctx->tfm) {
1aaa6e8b 147 switch (ci->ci_data_mode) {
b30ab0e0
MH
148 case EXT4_ENCRYPTION_MODE_AES_256_XTS:
149 ctx->tfm = crypto_ablkcipher_tfm(
150 crypto_alloc_ablkcipher("xts(aes)", 0, 0));
151 break;
152 case EXT4_ENCRYPTION_MODE_AES_256_GCM:
153 /* TODO(mhalcrow): AEAD w/ gcm(aes);
154 * crypto_aead_setauthsize() */
155 ctx->tfm = ERR_PTR(-ENOTSUPP);
156 break;
157 default:
158 BUG();
159 }
160 if (IS_ERR_OR_NULL(ctx->tfm)) {
161 res = PTR_ERR(ctx->tfm);
162 ctx->tfm = NULL;
163 goto out;
164 }
1aaa6e8b 165 ctx->mode = ci->ci_data_mode;
b30ab0e0 166 }
1aaa6e8b 167 BUG_ON(ci->ci_size != ext4_encryption_key_size(ci->ci_data_mode));
b30ab0e0 168
b30ab0e0
MH
169out:
170 if (res) {
171 if (!IS_ERR_OR_NULL(ctx))
172 ext4_release_crypto_ctx(ctx);
173 ctx = ERR_PTR(res);
174 }
175 return ctx;
176}
177
178struct workqueue_struct *ext4_read_workqueue;
179static DEFINE_MUTEX(crypto_init);
180
181/**
182 * ext4_exit_crypto() - Shutdown the ext4 encryption system
183 */
184void ext4_exit_crypto(void)
185{
186 struct ext4_crypto_ctx *pos, *n;
187
188 list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) {
b30ab0e0
MH
189 if (pos->tfm)
190 crypto_free_tfm(pos->tfm);
8ee03714 191 kmem_cache_free(ext4_crypto_ctx_cachep, pos);
b30ab0e0
MH
192 }
193 INIT_LIST_HEAD(&ext4_free_crypto_ctxs);
194 if (ext4_bounce_page_pool)
195 mempool_destroy(ext4_bounce_page_pool);
196 ext4_bounce_page_pool = NULL;
197 if (ext4_read_workqueue)
198 destroy_workqueue(ext4_read_workqueue);
199 ext4_read_workqueue = NULL;
8ee03714
TT
200 if (ext4_crypto_ctx_cachep)
201 kmem_cache_destroy(ext4_crypto_ctx_cachep);
202 ext4_crypto_ctx_cachep = NULL;
203 if (ext4_crypt_info_cachep)
204 kmem_cache_destroy(ext4_crypt_info_cachep);
205 ext4_crypt_info_cachep = NULL;
b30ab0e0
MH
206}
207
208/**
209 * ext4_init_crypto() - Set up for ext4 encryption.
210 *
211 * We only call this when we start accessing encrypted files, since it
212 * results in memory getting allocated that wouldn't otherwise be used.
213 *
214 * Return: Zero on success, non-zero otherwise.
215 */
216int ext4_init_crypto(void)
217{
8ee03714 218 int i, res = -ENOMEM;
b30ab0e0
MH
219
220 mutex_lock(&crypto_init);
221 if (ext4_read_workqueue)
222 goto already_initialized;
223 ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
8ee03714
TT
224 if (!ext4_read_workqueue)
225 goto fail;
226
227 ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx,
228 SLAB_RECLAIM_ACCOUNT);
229 if (!ext4_crypto_ctx_cachep)
230 goto fail;
231
232 ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info,
233 SLAB_RECLAIM_ACCOUNT);
234 if (!ext4_crypt_info_cachep)
b30ab0e0 235 goto fail;
b30ab0e0
MH
236
237 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
238 struct ext4_crypto_ctx *ctx;
239
8ee03714
TT
240 ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
241 if (!ctx) {
242 res = -ENOMEM;
b30ab0e0
MH
243 goto fail;
244 }
245 list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
246 }
247
248 ext4_bounce_page_pool =
249 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
250 if (!ext4_bounce_page_pool) {
251 res = -ENOMEM;
252 goto fail;
253 }
254already_initialized:
255 mutex_unlock(&crypto_init);
256 return 0;
257fail:
258 ext4_exit_crypto();
259 mutex_unlock(&crypto_init);
260 return res;
261}
262
263void ext4_restore_control_page(struct page *data_page)
264{
265 struct ext4_crypto_ctx *ctx =
266 (struct ext4_crypto_ctx *)page_private(data_page);
267
268 set_page_private(data_page, (unsigned long)NULL);
269 ClearPagePrivate(data_page);
270 unlock_page(data_page);
271 ext4_release_crypto_ctx(ctx);
272}
273
274/**
275 * ext4_crypt_complete() - The completion callback for page encryption
276 * @req: The asynchronous encryption request context
277 * @res: The result of the encryption operation
278 */
279static void ext4_crypt_complete(struct crypto_async_request *req, int res)
280{
281 struct ext4_completion_result *ecr = req->data;
282
283 if (res == -EINPROGRESS)
284 return;
285 ecr->res = res;
286 complete(&ecr->completion);
287}
288
289typedef enum {
290 EXT4_DECRYPT = 0,
291 EXT4_ENCRYPT,
292} ext4_direction_t;
293
294static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
295 struct inode *inode,
296 ext4_direction_t rw,
297 pgoff_t index,
298 struct page *src_page,
299 struct page *dest_page)
300
301{
302 u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
303 struct ablkcipher_request *req = NULL;
304 DECLARE_EXT4_COMPLETION_RESULT(ecr);
305 struct scatterlist dst, src;
306 struct ext4_inode_info *ei = EXT4_I(inode);
307 struct crypto_ablkcipher *atfm = __crypto_ablkcipher_cast(ctx->tfm);
308 int res = 0;
309
310 BUG_ON(!ctx->tfm);
1aaa6e8b 311 BUG_ON(ctx->mode != ei->i_crypt_info->ci_data_mode);
b30ab0e0
MH
312
313 if (ctx->mode != EXT4_ENCRYPTION_MODE_AES_256_XTS) {
314 printk_ratelimited(KERN_ERR
315 "%s: unsupported crypto algorithm: %d\n",
316 __func__, ctx->mode);
317 return -ENOTSUPP;
318 }
319
320 crypto_ablkcipher_clear_flags(atfm, ~0);
321 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
322
b7236e21
TT
323 res = crypto_ablkcipher_setkey(atfm, ei->i_crypt_info->ci_raw,
324 ei->i_crypt_info->ci_size);
b30ab0e0
MH
325 if (res) {
326 printk_ratelimited(KERN_ERR
327 "%s: crypto_ablkcipher_setkey() failed\n",
328 __func__);
329 return res;
330 }
331 req = ablkcipher_request_alloc(atfm, GFP_NOFS);
332 if (!req) {
333 printk_ratelimited(KERN_ERR
334 "%s: crypto_request_alloc() failed\n",
335 __func__);
336 return -ENOMEM;
337 }
338 ablkcipher_request_set_callback(
339 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
340 ext4_crypt_complete, &ecr);
341
342 BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index));
343 memcpy(xts_tweak, &index, sizeof(index));
344 memset(&xts_tweak[sizeof(index)], 0,
345 EXT4_XTS_TWEAK_SIZE - sizeof(index));
346
347 sg_init_table(&dst, 1);
348 sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
349 sg_init_table(&src, 1);
350 sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
351 ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
352 xts_tweak);
353 if (rw == EXT4_DECRYPT)
354 res = crypto_ablkcipher_decrypt(req);
355 else
356 res = crypto_ablkcipher_encrypt(req);
357 if (res == -EINPROGRESS || res == -EBUSY) {
358 BUG_ON(req->base.data != &ecr);
359 wait_for_completion(&ecr.completion);
360 res = ecr.res;
361 }
362 ablkcipher_request_free(req);
363 if (res) {
364 printk_ratelimited(
365 KERN_ERR
366 "%s: crypto_ablkcipher_encrypt() returned %d\n",
367 __func__, res);
368 return res;
369 }
370 return 0;
371}
372
373/**
374 * ext4_encrypt() - Encrypts a page
375 * @inode: The inode for which the encryption should take place
376 * @plaintext_page: The page to encrypt. Must be locked.
377 *
378 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
379 * encryption context.
380 *
381 * Called on the page write path. The caller must call
382 * ext4_restore_control_page() on the returned ciphertext page to
383 * release the bounce buffer and the encryption context.
384 *
385 * Return: An allocated page with the encrypted content on success. Else, an
386 * error value or NULL.
387 */
388struct page *ext4_encrypt(struct inode *inode,
389 struct page *plaintext_page)
390{
391 struct ext4_crypto_ctx *ctx;
392 struct page *ciphertext_page = NULL;
393 int err;
394
395 BUG_ON(!PageLocked(plaintext_page));
396
397 ctx = ext4_get_crypto_ctx(inode);
398 if (IS_ERR(ctx))
399 return (struct page *) ctx;
400
401 /* The encryption operation will require a bounce page. */
402 ciphertext_page = alloc_page(GFP_NOFS);
403 if (!ciphertext_page) {
404 /* This is a potential bottleneck, but at least we'll have
405 * forward progress. */
406 ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
407 GFP_NOFS);
408 if (WARN_ON_ONCE(!ciphertext_page)) {
409 ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
410 GFP_NOFS | __GFP_WAIT);
411 }
412 ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
413 } else {
414 ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
415 }
614def70
TT
416 ctx->flags |= EXT4_WRITE_PATH_FL;
417 ctx->w.bounce_page = ciphertext_page;
418 ctx->w.control_page = plaintext_page;
b30ab0e0
MH
419 err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
420 plaintext_page, ciphertext_page);
421 if (err) {
422 ext4_release_crypto_ctx(ctx);
423 return ERR_PTR(err);
424 }
425 SetPagePrivate(ciphertext_page);
426 set_page_private(ciphertext_page, (unsigned long)ctx);
427 lock_page(ciphertext_page);
428 return ciphertext_page;
429}
430
431/**
432 * ext4_decrypt() - Decrypts a page in-place
433 * @ctx: The encryption context.
434 * @page: The page to decrypt. Must be locked.
435 *
436 * Decrypts page in-place using the ctx encryption context.
437 *
438 * Called from the read completion callback.
439 *
440 * Return: Zero on success, non-zero otherwise.
441 */
442int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page)
443{
444 BUG_ON(!PageLocked(page));
445
446 return ext4_page_crypto(ctx, page->mapping->host,
447 EXT4_DECRYPT, page->index, page, page);
448}
449
450/*
451 * Convenience function which takes care of allocating and
452 * deallocating the encryption context
453 */
454int ext4_decrypt_one(struct inode *inode, struct page *page)
455{
456 int ret;
457
458 struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(inode);
459
460 if (!ctx)
461 return -ENOMEM;
462 ret = ext4_decrypt(ctx, page);
463 ext4_release_crypto_ctx(ctx);
464 return ret;
465}
466
467int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
468{
469 struct ext4_crypto_ctx *ctx;
470 struct page *ciphertext_page = NULL;
471 struct bio *bio;
472 ext4_lblk_t lblk = ex->ee_block;
473 ext4_fsblk_t pblk = ext4_ext_pblock(ex);
474 unsigned int len = ext4_ext_get_actual_len(ex);
475 int err = 0;
476
477 BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
478
479 ctx = ext4_get_crypto_ctx(inode);
480 if (IS_ERR(ctx))
481 return PTR_ERR(ctx);
482
483 ciphertext_page = alloc_page(GFP_NOFS);
484 if (!ciphertext_page) {
485 /* This is a potential bottleneck, but at least we'll have
486 * forward progress. */
487 ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
488 GFP_NOFS);
489 if (WARN_ON_ONCE(!ciphertext_page)) {
490 ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
491 GFP_NOFS | __GFP_WAIT);
492 }
493 ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
494 } else {
495 ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
496 }
614def70 497 ctx->w.bounce_page = ciphertext_page;
b30ab0e0
MH
498
499 while (len--) {
500 err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
501 ZERO_PAGE(0), ciphertext_page);
502 if (err)
503 goto errout;
504
505 bio = bio_alloc(GFP_KERNEL, 1);
506 if (!bio) {
507 err = -ENOMEM;
508 goto errout;
509 }
510 bio->bi_bdev = inode->i_sb->s_bdev;
511 bio->bi_iter.bi_sector = pblk;
512 err = bio_add_page(bio, ciphertext_page,
513 inode->i_sb->s_blocksize, 0);
514 if (err) {
515 bio_put(bio);
516 goto errout;
517 }
518 err = submit_bio_wait(WRITE, bio);
519 if (err)
520 goto errout;
521 }
522 err = 0;
523errout:
524 ext4_release_crypto_ctx(ctx);
525 return err;
526}
527
528bool ext4_valid_contents_enc_mode(uint32_t mode)
529{
530 return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS);
531}
532
533/**
534 * ext4_validate_encryption_key_size() - Validate the encryption key size
535 * @mode: The key mode.
536 * @size: The key size to validate.
537 *
538 * Return: The validated key size for @mode. Zero if invalid.
539 */
540uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
541{
542 if (size == ext4_encryption_key_size(mode))
543 return size;
544 return 0;
545}
This page took 0.066612 seconds and 5 git commands to generate.