2 * linux/fs/ext4/crypto.c
4 * Copyright (C) 2015, Google, Inc.
6 * This contains encryption functions for ext4
8 * Written by Michael Halcrow, 2014.
10 * Filename encryption additions
11 * Uday Savagaonkar, 2014
12 * Encryption policy handling additions
13 * Ildar Muslukhov, 2014
15 * This has not yet undergone a rigorous security audit.
17 * The usage of AES-XTS should conform to recommendations in NIST
18 * Special Publication 800-38E and IEEE P1619/D16.
21 #include <crypto/skcipher.h>
22 #include <keys/user-type.h>
23 #include <keys/encrypted-type.h>
24 #include <linux/ecryptfs.h>
25 #include <linux/gfp.h>
26 #include <linux/kernel.h>
27 #include <linux/key.h>
28 #include <linux/list.h>
29 #include <linux/mempool.h>
30 #include <linux/module.h>
31 #include <linux/mutex.h>
32 #include <linux/random.h>
33 #include <linux/scatterlist.h>
34 #include <linux/spinlock_types.h>
36 #include "ext4_extents.h"
39 /* Encryption added and removed here! (L: */
41 static unsigned int num_prealloc_crypto_pages
= 32;
42 static unsigned int num_prealloc_crypto_ctxs
= 128;
44 module_param(num_prealloc_crypto_pages
, uint
, 0444);
45 MODULE_PARM_DESC(num_prealloc_crypto_pages
,
46 "Number of crypto pages to preallocate");
47 module_param(num_prealloc_crypto_ctxs
, uint
, 0444);
48 MODULE_PARM_DESC(num_prealloc_crypto_ctxs
,
49 "Number of crypto contexts to preallocate");
51 static mempool_t
*ext4_bounce_page_pool
;
53 static LIST_HEAD(ext4_free_crypto_ctxs
);
54 static DEFINE_SPINLOCK(ext4_crypto_ctx_lock
);
56 static struct kmem_cache
*ext4_crypto_ctx_cachep
;
57 struct kmem_cache
*ext4_crypt_info_cachep
;
60 * ext4_release_crypto_ctx() - Releases an encryption context
61 * @ctx: The encryption context to release.
63 * If the encryption context was allocated from the pre-allocated pool, returns
64 * it to that pool. Else, frees it.
66 * If there's a bounce page in the context, this frees that.
68 void ext4_release_crypto_ctx(struct ext4_crypto_ctx
*ctx
)
72 if (ctx
->flags
& EXT4_WRITE_PATH_FL
&& ctx
->w
.bounce_page
)
73 mempool_free(ctx
->w
.bounce_page
, ext4_bounce_page_pool
);
74 ctx
->w
.bounce_page
= NULL
;
75 ctx
->w
.control_page
= NULL
;
76 if (ctx
->flags
& EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL
) {
77 kmem_cache_free(ext4_crypto_ctx_cachep
, ctx
);
79 spin_lock_irqsave(&ext4_crypto_ctx_lock
, flags
);
80 list_add(&ctx
->free_list
, &ext4_free_crypto_ctxs
);
81 spin_unlock_irqrestore(&ext4_crypto_ctx_lock
, flags
);
86 * ext4_get_crypto_ctx() - Gets an encryption context
87 * @inode: The inode for which we are doing the crypto
89 * Allocates and initializes an encryption context.
91 * Return: An allocated and initialized encryption context on success; error
92 * value or NULL otherwise.
94 struct ext4_crypto_ctx
*ext4_get_crypto_ctx(struct inode
*inode
)
96 struct ext4_crypto_ctx
*ctx
= NULL
;
99 struct ext4_crypt_info
*ci
= EXT4_I(inode
)->i_crypt_info
;
102 return ERR_PTR(-ENOKEY
);
105 * We first try getting the ctx from a free list because in
106 * the common case the ctx will have an allocated and
107 * initialized crypto tfm, so it's probably a worthwhile
108 * optimization. For the bounce page, we first try getting it
109 * from the kernel allocator because that's just about as fast
110 * as getting it from a list and because a cache of free pages
111 * should generally be a "last resort" option for a filesystem
112 * to be able to do its job.
114 spin_lock_irqsave(&ext4_crypto_ctx_lock
, flags
);
115 ctx
= list_first_entry_or_null(&ext4_free_crypto_ctxs
,
116 struct ext4_crypto_ctx
, free_list
);
118 list_del(&ctx
->free_list
);
119 spin_unlock_irqrestore(&ext4_crypto_ctx_lock
, flags
);
121 ctx
= kmem_cache_zalloc(ext4_crypto_ctx_cachep
, GFP_NOFS
);
126 ctx
->flags
|= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL
;
128 ctx
->flags
&= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL
;
130 ctx
->flags
&= ~EXT4_WRITE_PATH_FL
;
134 if (!IS_ERR_OR_NULL(ctx
))
135 ext4_release_crypto_ctx(ctx
);
141 struct workqueue_struct
*ext4_read_workqueue
;
142 static DEFINE_MUTEX(crypto_init
);
145 * ext4_exit_crypto() - Shutdown the ext4 encryption system
147 void ext4_exit_crypto(void)
149 struct ext4_crypto_ctx
*pos
, *n
;
151 list_for_each_entry_safe(pos
, n
, &ext4_free_crypto_ctxs
, free_list
)
152 kmem_cache_free(ext4_crypto_ctx_cachep
, pos
);
153 INIT_LIST_HEAD(&ext4_free_crypto_ctxs
);
154 if (ext4_bounce_page_pool
)
155 mempool_destroy(ext4_bounce_page_pool
);
156 ext4_bounce_page_pool
= NULL
;
157 if (ext4_read_workqueue
)
158 destroy_workqueue(ext4_read_workqueue
);
159 ext4_read_workqueue
= NULL
;
160 if (ext4_crypto_ctx_cachep
)
161 kmem_cache_destroy(ext4_crypto_ctx_cachep
);
162 ext4_crypto_ctx_cachep
= NULL
;
163 if (ext4_crypt_info_cachep
)
164 kmem_cache_destroy(ext4_crypt_info_cachep
);
165 ext4_crypt_info_cachep
= NULL
;
169 * ext4_init_crypto() - Set up for ext4 encryption.
171 * We only call this when we start accessing encrypted files, since it
172 * results in memory getting allocated that wouldn't otherwise be used.
174 * Return: Zero on success, non-zero otherwise.
176 int ext4_init_crypto(void)
178 int i
, res
= -ENOMEM
;
180 mutex_lock(&crypto_init
);
181 if (ext4_read_workqueue
)
182 goto already_initialized
;
183 ext4_read_workqueue
= alloc_workqueue("ext4_crypto", WQ_HIGHPRI
, 0);
184 if (!ext4_read_workqueue
)
187 ext4_crypto_ctx_cachep
= KMEM_CACHE(ext4_crypto_ctx
,
188 SLAB_RECLAIM_ACCOUNT
);
189 if (!ext4_crypto_ctx_cachep
)
192 ext4_crypt_info_cachep
= KMEM_CACHE(ext4_crypt_info
,
193 SLAB_RECLAIM_ACCOUNT
);
194 if (!ext4_crypt_info_cachep
)
197 for (i
= 0; i
< num_prealloc_crypto_ctxs
; i
++) {
198 struct ext4_crypto_ctx
*ctx
;
200 ctx
= kmem_cache_zalloc(ext4_crypto_ctx_cachep
, GFP_NOFS
);
205 list_add(&ctx
->free_list
, &ext4_free_crypto_ctxs
);
208 ext4_bounce_page_pool
=
209 mempool_create_page_pool(num_prealloc_crypto_pages
, 0);
210 if (!ext4_bounce_page_pool
) {
215 mutex_unlock(&crypto_init
);
219 mutex_unlock(&crypto_init
);
223 void ext4_restore_control_page(struct page
*data_page
)
225 struct ext4_crypto_ctx
*ctx
=
226 (struct ext4_crypto_ctx
*)page_private(data_page
);
228 set_page_private(data_page
, (unsigned long)NULL
);
229 ClearPagePrivate(data_page
);
230 unlock_page(data_page
);
231 ext4_release_crypto_ctx(ctx
);
235 * ext4_crypt_complete() - The completion callback for page encryption
236 * @req: The asynchronous encryption request context
237 * @res: The result of the encryption operation
239 static void ext4_crypt_complete(struct crypto_async_request
*req
, int res
)
241 struct ext4_completion_result
*ecr
= req
->data
;
243 if (res
== -EINPROGRESS
)
246 complete(&ecr
->completion
);
254 static int ext4_page_crypto(struct inode
*inode
,
257 struct page
*src_page
,
258 struct page
*dest_page
)
261 u8 xts_tweak
[EXT4_XTS_TWEAK_SIZE
];
262 struct skcipher_request
*req
= NULL
;
263 DECLARE_EXT4_COMPLETION_RESULT(ecr
);
264 struct scatterlist dst
, src
;
265 struct ext4_crypt_info
*ci
= EXT4_I(inode
)->i_crypt_info
;
266 struct crypto_skcipher
*tfm
= ci
->ci_ctfm
;
269 req
= skcipher_request_alloc(tfm
, GFP_NOFS
);
271 printk_ratelimited(KERN_ERR
272 "%s: crypto_request_alloc() failed\n",
276 skcipher_request_set_callback(
277 req
, CRYPTO_TFM_REQ_MAY_BACKLOG
| CRYPTO_TFM_REQ_MAY_SLEEP
,
278 ext4_crypt_complete
, &ecr
);
280 BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE
< sizeof(index
));
281 memcpy(xts_tweak
, &index
, sizeof(index
));
282 memset(&xts_tweak
[sizeof(index
)], 0,
283 EXT4_XTS_TWEAK_SIZE
- sizeof(index
));
285 sg_init_table(&dst
, 1);
286 sg_set_page(&dst
, dest_page
, PAGE_CACHE_SIZE
, 0);
287 sg_init_table(&src
, 1);
288 sg_set_page(&src
, src_page
, PAGE_CACHE_SIZE
, 0);
289 skcipher_request_set_crypt(req
, &src
, &dst
, PAGE_CACHE_SIZE
,
291 if (rw
== EXT4_DECRYPT
)
292 res
= crypto_skcipher_decrypt(req
);
294 res
= crypto_skcipher_encrypt(req
);
295 if (res
== -EINPROGRESS
|| res
== -EBUSY
) {
296 wait_for_completion(&ecr
.completion
);
299 skcipher_request_free(req
);
303 "%s: crypto_skcipher_encrypt() returned %d\n",
310 static struct page
*alloc_bounce_page(struct ext4_crypto_ctx
*ctx
)
312 ctx
->w
.bounce_page
= mempool_alloc(ext4_bounce_page_pool
, GFP_NOWAIT
);
313 if (ctx
->w
.bounce_page
== NULL
)
314 return ERR_PTR(-ENOMEM
);
315 ctx
->flags
|= EXT4_WRITE_PATH_FL
;
316 return ctx
->w
.bounce_page
;
320 * ext4_encrypt() - Encrypts a page
321 * @inode: The inode for which the encryption should take place
322 * @plaintext_page: The page to encrypt. Must be locked.
324 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
325 * encryption context.
327 * Called on the page write path. The caller must call
328 * ext4_restore_control_page() on the returned ciphertext page to
329 * release the bounce buffer and the encryption context.
331 * Return: An allocated page with the encrypted content on success. Else, an
332 * error value or NULL.
334 struct page
*ext4_encrypt(struct inode
*inode
,
335 struct page
*plaintext_page
)
337 struct ext4_crypto_ctx
*ctx
;
338 struct page
*ciphertext_page
= NULL
;
341 BUG_ON(!PageLocked(plaintext_page
));
343 ctx
= ext4_get_crypto_ctx(inode
);
345 return (struct page
*) ctx
;
347 /* The encryption operation will require a bounce page. */
348 ciphertext_page
= alloc_bounce_page(ctx
);
349 if (IS_ERR(ciphertext_page
))
351 ctx
->w
.control_page
= plaintext_page
;
352 err
= ext4_page_crypto(inode
, EXT4_ENCRYPT
, plaintext_page
->index
,
353 plaintext_page
, ciphertext_page
);
355 ciphertext_page
= ERR_PTR(err
);
357 ext4_release_crypto_ctx(ctx
);
358 return ciphertext_page
;
360 SetPagePrivate(ciphertext_page
);
361 set_page_private(ciphertext_page
, (unsigned long)ctx
);
362 lock_page(ciphertext_page
);
363 return ciphertext_page
;
367 * ext4_decrypt() - Decrypts a page in-place
368 * @ctx: The encryption context.
369 * @page: The page to decrypt. Must be locked.
371 * Decrypts page in-place using the ctx encryption context.
373 * Called from the read completion callback.
375 * Return: Zero on success, non-zero otherwise.
377 int ext4_decrypt(struct page
*page
)
379 BUG_ON(!PageLocked(page
));
381 return ext4_page_crypto(page
->mapping
->host
,
382 EXT4_DECRYPT
, page
->index
, page
, page
);
385 int ext4_encrypted_zeroout(struct inode
*inode
, ext4_lblk_t lblk
,
386 ext4_fsblk_t pblk
, ext4_lblk_t len
)
388 struct ext4_crypto_ctx
*ctx
;
389 struct page
*ciphertext_page
= NULL
;
394 ext4_msg(inode
->i_sb
, KERN_CRIT
,
395 "ext4_encrypted_zeroout ino %lu lblk %u len %u",
396 (unsigned long) inode
->i_ino
, lblk
, len
);
399 BUG_ON(inode
->i_sb
->s_blocksize
!= PAGE_CACHE_SIZE
);
401 ctx
= ext4_get_crypto_ctx(inode
);
405 ciphertext_page
= alloc_bounce_page(ctx
);
406 if (IS_ERR(ciphertext_page
)) {
407 err
= PTR_ERR(ciphertext_page
);
412 err
= ext4_page_crypto(inode
, EXT4_ENCRYPT
, lblk
,
413 ZERO_PAGE(0), ciphertext_page
);
417 bio
= bio_alloc(GFP_KERNEL
, 1);
422 bio
->bi_bdev
= inode
->i_sb
->s_bdev
;
423 bio
->bi_iter
.bi_sector
=
424 pblk
<< (inode
->i_sb
->s_blocksize_bits
- 9);
425 ret
= bio_add_page(bio
, ciphertext_page
,
426 inode
->i_sb
->s_blocksize
, 0);
427 if (ret
!= inode
->i_sb
->s_blocksize
) {
428 /* should never happen! */
429 ext4_msg(inode
->i_sb
, KERN_ERR
,
430 "bio_add_page failed: %d", ret
);
436 err
= submit_bio_wait(WRITE
, bio
);
437 if ((err
== 0) && bio
->bi_error
)
446 ext4_release_crypto_ctx(ctx
);
450 bool ext4_valid_contents_enc_mode(uint32_t mode
)
452 return (mode
== EXT4_ENCRYPTION_MODE_AES_256_XTS
);
456 * ext4_validate_encryption_key_size() - Validate the encryption key size
457 * @mode: The key mode.
458 * @size: The key size to validate.
460 * Return: The validated key size for @mode. Zero if invalid.
462 uint32_t ext4_validate_encryption_key_size(uint32_t mode
, uint32_t size
)
464 if (size
== ext4_encryption_key_size(mode
))
470 * Validate dentries for encrypted directories to make sure we aren't
471 * potentially caching stale data after a key has been added or
474 static int ext4_d_revalidate(struct dentry
*dentry
, unsigned int flags
)
476 struct inode
*dir
= d_inode(dentry
->d_parent
);
477 struct ext4_crypt_info
*ci
= EXT4_I(dir
)->i_crypt_info
;
478 int dir_has_key
, cached_with_key
;
480 if (!ext4_encrypted_inode(dir
))
483 if (ci
&& ci
->ci_keyring_key
&&
484 (ci
->ci_keyring_key
->flags
& ((1 << KEY_FLAG_INVALIDATED
) |
485 (1 << KEY_FLAG_REVOKED
) |
486 (1 << KEY_FLAG_DEAD
))))
489 /* this should eventually be an flag in d_flags */
490 cached_with_key
= dentry
->d_fsdata
!= NULL
;
491 dir_has_key
= (ci
!= NULL
);
494 * If the dentry was cached without the key, and it is a
495 * negative dentry, it might be a valid name. We can't check
496 * if the key has since been made available due to locking
497 * reasons, so we fail the validation so ext4_lookup() can do
500 * We also fail the validation if the dentry was created with
501 * the key present, but we no longer have the key, or vice versa.
503 if ((!cached_with_key
&& d_is_negative(dentry
)) ||
504 (!cached_with_key
&& dir_has_key
) ||
505 (cached_with_key
&& !dir_has_key
)) {
506 #if 0 /* Revalidation debug */
508 char *cp
= simple_dname(dentry
, buf
, sizeof(buf
));
512 pr_err("revalidate: %s %p %d %d %d\n", cp
, dentry
->d_fsdata
,
513 cached_with_key
, d_is_negative(dentry
),
521 const struct dentry_operations ext4_encrypted_d_ops
= {
522 .d_revalidate
= ext4_d_revalidate
,