Commit | Line | Data |
---|---|---|
b30ab0e0 MH |
1 | /* |
2 | * linux/fs/ext4/crypto.c | |
3 | * | |
4 | * Copyright (C) 2015, Google, Inc. | |
5 | * | |
6 | * This contains encryption functions for ext4 | |
7 | * | |
8 | * Written by Michael Halcrow, 2014. | |
9 | * | |
10 | * Filename encryption additions | |
11 | * Uday Savagaonkar, 2014 | |
12 | * Encryption policy handling additions | |
13 | * Ildar Muslukhov, 2014 | |
14 | * | |
15 | * This has not yet undergone a rigorous security audit. | |
16 | * | |
17 | * The usage of AES-XTS should conform to recommendations in NIST | |
18 | * Special Publication 800-38E and IEEE P1619/D16. | |
19 | */ | |
20 | ||
3f32a5be | 21 | #include <crypto/skcipher.h> |
b30ab0e0 MH |
22 | #include <keys/user-type.h> |
23 | #include <keys/encrypted-type.h> | |
b30ab0e0 MH |
24 | #include <linux/ecryptfs.h> |
25 | #include <linux/gfp.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/key.h> | |
28 | #include <linux/list.h> | |
29 | #include <linux/mempool.h> | |
30 | #include <linux/module.h> | |
31 | #include <linux/mutex.h> | |
32 | #include <linux/random.h> | |
33 | #include <linux/scatterlist.h> | |
34 | #include <linux/spinlock_types.h> | |
35 | ||
36 | #include "ext4_extents.h" | |
37 | #include "xattr.h" | |
38 | ||
39 | /* Encryption added and removed here! (L: */ | |
40 | ||
41 | static unsigned int num_prealloc_crypto_pages = 32; | |
42 | static unsigned int num_prealloc_crypto_ctxs = 128; | |
43 | ||
44 | module_param(num_prealloc_crypto_pages, uint, 0444); | |
45 | MODULE_PARM_DESC(num_prealloc_crypto_pages, | |
46 | "Number of crypto pages to preallocate"); | |
47 | module_param(num_prealloc_crypto_ctxs, uint, 0444); | |
48 | MODULE_PARM_DESC(num_prealloc_crypto_ctxs, | |
49 | "Number of crypto contexts to preallocate"); | |
50 | ||
51 | static mempool_t *ext4_bounce_page_pool; | |
52 | ||
53 | static LIST_HEAD(ext4_free_crypto_ctxs); | |
54 | static DEFINE_SPINLOCK(ext4_crypto_ctx_lock); | |
55 | ||
8ee03714 TT |
56 | static struct kmem_cache *ext4_crypto_ctx_cachep; |
57 | struct kmem_cache *ext4_crypt_info_cachep; | |
58 | ||
b30ab0e0 MH |
59 | /** |
60 | * ext4_release_crypto_ctx() - Releases an encryption context | |
61 | * @ctx: The encryption context to release. | |
62 | * | |
63 | * If the encryption context was allocated from the pre-allocated pool, returns | |
64 | * it to that pool. Else, frees it. | |
65 | * | |
66 | * If there's a bounce page in the context, this frees that. | |
67 | */ | |
68 | void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx) | |
69 | { | |
70 | unsigned long flags; | |
71 | ||
3dbb5eb9 TT |
72 | if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page) |
73 | mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool); | |
614def70 TT |
74 | ctx->w.bounce_page = NULL; |
75 | ctx->w.control_page = NULL; | |
b30ab0e0 | 76 | if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) { |
8ee03714 | 77 | kmem_cache_free(ext4_crypto_ctx_cachep, ctx); |
b30ab0e0 MH |
78 | } else { |
79 | spin_lock_irqsave(&ext4_crypto_ctx_lock, flags); | |
80 | list_add(&ctx->free_list, &ext4_free_crypto_ctxs); | |
81 | spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); | |
82 | } | |
83 | } | |
84 | ||
b30ab0e0 MH |
85 | /** |
86 | * ext4_get_crypto_ctx() - Gets an encryption context | |
87 | * @inode: The inode for which we are doing the crypto | |
88 | * | |
89 | * Allocates and initializes an encryption context. | |
90 | * | |
91 | * Return: An allocated and initialized encryption context on success; error | |
92 | * value or NULL otherwise. | |
93 | */ | |
94 | struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode) | |
95 | { | |
96 | struct ext4_crypto_ctx *ctx = NULL; | |
97 | int res = 0; | |
98 | unsigned long flags; | |
b7236e21 | 99 | struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info; |
b30ab0e0 | 100 | |
abdd438b TT |
101 | if (ci == NULL) |
102 | return ERR_PTR(-ENOKEY); | |
b30ab0e0 MH |
103 | |
104 | /* | |
105 | * We first try getting the ctx from a free list because in | |
106 | * the common case the ctx will have an allocated and | |
107 | * initialized crypto tfm, so it's probably a worthwhile | |
108 | * optimization. For the bounce page, we first try getting it | |
109 | * from the kernel allocator because that's just about as fast | |
110 | * as getting it from a list and because a cache of free pages | |
111 | * should generally be a "last resort" option for a filesystem | |
112 | * to be able to do its job. | |
113 | */ | |
114 | spin_lock_irqsave(&ext4_crypto_ctx_lock, flags); | |
115 | ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs, | |
116 | struct ext4_crypto_ctx, free_list); | |
117 | if (ctx) | |
118 | list_del(&ctx->free_list); | |
119 | spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); | |
120 | if (!ctx) { | |
8ee03714 TT |
121 | ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS); |
122 | if (!ctx) { | |
123 | res = -ENOMEM; | |
b30ab0e0 MH |
124 | goto out; |
125 | } | |
126 | ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL; | |
127 | } else { | |
128 | ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL; | |
129 | } | |
614def70 | 130 | ctx->flags &= ~EXT4_WRITE_PATH_FL; |
b30ab0e0 | 131 | |
b30ab0e0 MH |
132 | out: |
133 | if (res) { | |
134 | if (!IS_ERR_OR_NULL(ctx)) | |
135 | ext4_release_crypto_ctx(ctx); | |
136 | ctx = ERR_PTR(res); | |
137 | } | |
138 | return ctx; | |
139 | } | |
140 | ||
141 | struct workqueue_struct *ext4_read_workqueue; | |
142 | static DEFINE_MUTEX(crypto_init); | |
143 | ||
144 | /** | |
145 | * ext4_exit_crypto() - Shutdown the ext4 encryption system | |
146 | */ | |
147 | void ext4_exit_crypto(void) | |
148 | { | |
149 | struct ext4_crypto_ctx *pos, *n; | |
150 | ||
c936e1ec | 151 | list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) |
8ee03714 | 152 | kmem_cache_free(ext4_crypto_ctx_cachep, pos); |
b30ab0e0 MH |
153 | INIT_LIST_HEAD(&ext4_free_crypto_ctxs); |
154 | if (ext4_bounce_page_pool) | |
155 | mempool_destroy(ext4_bounce_page_pool); | |
156 | ext4_bounce_page_pool = NULL; | |
157 | if (ext4_read_workqueue) | |
158 | destroy_workqueue(ext4_read_workqueue); | |
159 | ext4_read_workqueue = NULL; | |
8ee03714 TT |
160 | if (ext4_crypto_ctx_cachep) |
161 | kmem_cache_destroy(ext4_crypto_ctx_cachep); | |
162 | ext4_crypto_ctx_cachep = NULL; | |
163 | if (ext4_crypt_info_cachep) | |
164 | kmem_cache_destroy(ext4_crypt_info_cachep); | |
165 | ext4_crypt_info_cachep = NULL; | |
b30ab0e0 MH |
166 | } |
167 | ||
168 | /** | |
169 | * ext4_init_crypto() - Set up for ext4 encryption. | |
170 | * | |
171 | * We only call this when we start accessing encrypted files, since it | |
172 | * results in memory getting allocated that wouldn't otherwise be used. | |
173 | * | |
174 | * Return: Zero on success, non-zero otherwise. | |
175 | */ | |
176 | int ext4_init_crypto(void) | |
177 | { | |
8ee03714 | 178 | int i, res = -ENOMEM; |
b30ab0e0 MH |
179 | |
180 | mutex_lock(&crypto_init); | |
181 | if (ext4_read_workqueue) | |
182 | goto already_initialized; | |
183 | ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0); | |
8ee03714 TT |
184 | if (!ext4_read_workqueue) |
185 | goto fail; | |
186 | ||
187 | ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx, | |
188 | SLAB_RECLAIM_ACCOUNT); | |
189 | if (!ext4_crypto_ctx_cachep) | |
190 | goto fail; | |
191 | ||
192 | ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info, | |
193 | SLAB_RECLAIM_ACCOUNT); | |
194 | if (!ext4_crypt_info_cachep) | |
b30ab0e0 | 195 | goto fail; |
b30ab0e0 MH |
196 | |
197 | for (i = 0; i < num_prealloc_crypto_ctxs; i++) { | |
198 | struct ext4_crypto_ctx *ctx; | |
199 | ||
8ee03714 TT |
200 | ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS); |
201 | if (!ctx) { | |
202 | res = -ENOMEM; | |
b30ab0e0 MH |
203 | goto fail; |
204 | } | |
205 | list_add(&ctx->free_list, &ext4_free_crypto_ctxs); | |
206 | } | |
207 | ||
208 | ext4_bounce_page_pool = | |
209 | mempool_create_page_pool(num_prealloc_crypto_pages, 0); | |
210 | if (!ext4_bounce_page_pool) { | |
211 | res = -ENOMEM; | |
212 | goto fail; | |
213 | } | |
214 | already_initialized: | |
215 | mutex_unlock(&crypto_init); | |
216 | return 0; | |
217 | fail: | |
218 | ext4_exit_crypto(); | |
219 | mutex_unlock(&crypto_init); | |
220 | return res; | |
221 | } | |
222 | ||
223 | void ext4_restore_control_page(struct page *data_page) | |
224 | { | |
225 | struct ext4_crypto_ctx *ctx = | |
226 | (struct ext4_crypto_ctx *)page_private(data_page); | |
227 | ||
228 | set_page_private(data_page, (unsigned long)NULL); | |
229 | ClearPagePrivate(data_page); | |
230 | unlock_page(data_page); | |
231 | ext4_release_crypto_ctx(ctx); | |
232 | } | |
233 | ||
234 | /** | |
235 | * ext4_crypt_complete() - The completion callback for page encryption | |
236 | * @req: The asynchronous encryption request context | |
237 | * @res: The result of the encryption operation | |
238 | */ | |
239 | static void ext4_crypt_complete(struct crypto_async_request *req, int res) | |
240 | { | |
241 | struct ext4_completion_result *ecr = req->data; | |
242 | ||
243 | if (res == -EINPROGRESS) | |
244 | return; | |
245 | ecr->res = res; | |
246 | complete(&ecr->completion); | |
247 | } | |
248 | ||
249 | typedef enum { | |
250 | EXT4_DECRYPT = 0, | |
251 | EXT4_ENCRYPT, | |
252 | } ext4_direction_t; | |
253 | ||
3684de8c | 254 | static int ext4_page_crypto(struct inode *inode, |
b30ab0e0 MH |
255 | ext4_direction_t rw, |
256 | pgoff_t index, | |
257 | struct page *src_page, | |
258 | struct page *dest_page) | |
259 | ||
260 | { | |
261 | u8 xts_tweak[EXT4_XTS_TWEAK_SIZE]; | |
3f32a5be | 262 | struct skcipher_request *req = NULL; |
b30ab0e0 MH |
263 | DECLARE_EXT4_COMPLETION_RESULT(ecr); |
264 | struct scatterlist dst, src; | |
c936e1ec | 265 | struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info; |
3f32a5be | 266 | struct crypto_skcipher *tfm = ci->ci_ctfm; |
b30ab0e0 MH |
267 | int res = 0; |
268 | ||
3f32a5be | 269 | req = skcipher_request_alloc(tfm, GFP_NOFS); |
b30ab0e0 MH |
270 | if (!req) { |
271 | printk_ratelimited(KERN_ERR | |
272 | "%s: crypto_request_alloc() failed\n", | |
273 | __func__); | |
274 | return -ENOMEM; | |
275 | } | |
3f32a5be | 276 | skcipher_request_set_callback( |
b30ab0e0 MH |
277 | req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, |
278 | ext4_crypt_complete, &ecr); | |
279 | ||
280 | BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index)); | |
281 | memcpy(xts_tweak, &index, sizeof(index)); | |
282 | memset(&xts_tweak[sizeof(index)], 0, | |
283 | EXT4_XTS_TWEAK_SIZE - sizeof(index)); | |
284 | ||
285 | sg_init_table(&dst, 1); | |
286 | sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); | |
287 | sg_init_table(&src, 1); | |
288 | sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); | |
3f32a5be HX |
289 | skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, |
290 | xts_tweak); | |
b30ab0e0 | 291 | if (rw == EXT4_DECRYPT) |
3f32a5be | 292 | res = crypto_skcipher_decrypt(req); |
b30ab0e0 | 293 | else |
3f32a5be | 294 | res = crypto_skcipher_encrypt(req); |
b30ab0e0 | 295 | if (res == -EINPROGRESS || res == -EBUSY) { |
b30ab0e0 MH |
296 | wait_for_completion(&ecr.completion); |
297 | res = ecr.res; | |
298 | } | |
3f32a5be | 299 | skcipher_request_free(req); |
b30ab0e0 MH |
300 | if (res) { |
301 | printk_ratelimited( | |
302 | KERN_ERR | |
3f32a5be | 303 | "%s: crypto_skcipher_encrypt() returned %d\n", |
b30ab0e0 MH |
304 | __func__, res); |
305 | return res; | |
306 | } | |
307 | return 0; | |
308 | } | |
309 | ||
95ea68b4 TT |
310 | static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx) |
311 | { | |
3dbb5eb9 TT |
312 | ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT); |
313 | if (ctx->w.bounce_page == NULL) | |
314 | return ERR_PTR(-ENOMEM); | |
95ea68b4 | 315 | ctx->flags |= EXT4_WRITE_PATH_FL; |
3dbb5eb9 | 316 | return ctx->w.bounce_page; |
95ea68b4 TT |
317 | } |
318 | ||
b30ab0e0 MH |
319 | /** |
320 | * ext4_encrypt() - Encrypts a page | |
321 | * @inode: The inode for which the encryption should take place | |
322 | * @plaintext_page: The page to encrypt. Must be locked. | |
323 | * | |
324 | * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx | |
325 | * encryption context. | |
326 | * | |
327 | * Called on the page write path. The caller must call | |
328 | * ext4_restore_control_page() on the returned ciphertext page to | |
329 | * release the bounce buffer and the encryption context. | |
330 | * | |
331 | * Return: An allocated page with the encrypted content on success. Else, an | |
332 | * error value or NULL. | |
333 | */ | |
334 | struct page *ext4_encrypt(struct inode *inode, | |
335 | struct page *plaintext_page) | |
336 | { | |
337 | struct ext4_crypto_ctx *ctx; | |
338 | struct page *ciphertext_page = NULL; | |
339 | int err; | |
340 | ||
341 | BUG_ON(!PageLocked(plaintext_page)); | |
342 | ||
343 | ctx = ext4_get_crypto_ctx(inode); | |
344 | if (IS_ERR(ctx)) | |
345 | return (struct page *) ctx; | |
346 | ||
347 | /* The encryption operation will require a bounce page. */ | |
95ea68b4 TT |
348 | ciphertext_page = alloc_bounce_page(ctx); |
349 | if (IS_ERR(ciphertext_page)) | |
350 | goto errout; | |
614def70 | 351 | ctx->w.control_page = plaintext_page; |
3684de8c | 352 | err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index, |
b30ab0e0 MH |
353 | plaintext_page, ciphertext_page); |
354 | if (err) { | |
95ea68b4 TT |
355 | ciphertext_page = ERR_PTR(err); |
356 | errout: | |
b30ab0e0 | 357 | ext4_release_crypto_ctx(ctx); |
95ea68b4 | 358 | return ciphertext_page; |
b30ab0e0 MH |
359 | } |
360 | SetPagePrivate(ciphertext_page); | |
361 | set_page_private(ciphertext_page, (unsigned long)ctx); | |
362 | lock_page(ciphertext_page); | |
363 | return ciphertext_page; | |
364 | } | |
365 | ||
366 | /** | |
367 | * ext4_decrypt() - Decrypts a page in-place | |
368 | * @ctx: The encryption context. | |
369 | * @page: The page to decrypt. Must be locked. | |
370 | * | |
371 | * Decrypts page in-place using the ctx encryption context. | |
372 | * | |
373 | * Called from the read completion callback. | |
374 | * | |
375 | * Return: Zero on success, non-zero otherwise. | |
376 | */ | |
3684de8c | 377 | int ext4_decrypt(struct page *page) |
b30ab0e0 MH |
378 | { |
379 | BUG_ON(!PageLocked(page)); | |
380 | ||
3684de8c | 381 | return ext4_page_crypto(page->mapping->host, |
b30ab0e0 MH |
382 | EXT4_DECRYPT, page->index, page, page); |
383 | } | |
384 | ||
53085fac JK |
385 | int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, |
386 | ext4_fsblk_t pblk, ext4_lblk_t len) | |
b30ab0e0 MH |
387 | { |
388 | struct ext4_crypto_ctx *ctx; | |
389 | struct page *ciphertext_page = NULL; | |
390 | struct bio *bio; | |
36086d43 TT |
391 | int ret, err = 0; |
392 | ||
393 | #if 0 | |
394 | ext4_msg(inode->i_sb, KERN_CRIT, | |
395 | "ext4_encrypted_zeroout ino %lu lblk %u len %u", | |
396 | (unsigned long) inode->i_ino, lblk, len); | |
397 | #endif | |
b30ab0e0 MH |
398 | |
399 | BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); | |
400 | ||
401 | ctx = ext4_get_crypto_ctx(inode); | |
402 | if (IS_ERR(ctx)) | |
403 | return PTR_ERR(ctx); | |
404 | ||
95ea68b4 TT |
405 | ciphertext_page = alloc_bounce_page(ctx); |
406 | if (IS_ERR(ciphertext_page)) { | |
407 | err = PTR_ERR(ciphertext_page); | |
408 | goto errout; | |
b30ab0e0 | 409 | } |
b30ab0e0 MH |
410 | |
411 | while (len--) { | |
3684de8c | 412 | err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk, |
b30ab0e0 MH |
413 | ZERO_PAGE(0), ciphertext_page); |
414 | if (err) | |
415 | goto errout; | |
416 | ||
417 | bio = bio_alloc(GFP_KERNEL, 1); | |
418 | if (!bio) { | |
419 | err = -ENOMEM; | |
420 | goto errout; | |
421 | } | |
422 | bio->bi_bdev = inode->i_sb->s_bdev; | |
36086d43 TT |
423 | bio->bi_iter.bi_sector = |
424 | pblk << (inode->i_sb->s_blocksize_bits - 9); | |
425 | ret = bio_add_page(bio, ciphertext_page, | |
b30ab0e0 | 426 | inode->i_sb->s_blocksize, 0); |
36086d43 TT |
427 | if (ret != inode->i_sb->s_blocksize) { |
428 | /* should never happen! */ | |
429 | ext4_msg(inode->i_sb, KERN_ERR, | |
430 | "bio_add_page failed: %d", ret); | |
431 | WARN_ON(1); | |
b30ab0e0 | 432 | bio_put(bio); |
36086d43 | 433 | err = -EIO; |
b30ab0e0 MH |
434 | goto errout; |
435 | } | |
436 | err = submit_bio_wait(WRITE, bio); | |
36086d43 TT |
437 | if ((err == 0) && bio->bi_error) |
438 | err = -EIO; | |
95ea68b4 | 439 | bio_put(bio); |
b30ab0e0 MH |
440 | if (err) |
441 | goto errout; | |
36086d43 | 442 | lblk++; pblk++; |
b30ab0e0 MH |
443 | } |
444 | err = 0; | |
445 | errout: | |
446 | ext4_release_crypto_ctx(ctx); | |
447 | return err; | |
448 | } | |
449 | ||
450 | bool ext4_valid_contents_enc_mode(uint32_t mode) | |
451 | { | |
452 | return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS); | |
453 | } | |
454 | ||
455 | /** | |
456 | * ext4_validate_encryption_key_size() - Validate the encryption key size | |
457 | * @mode: The key mode. | |
458 | * @size: The key size to validate. | |
459 | * | |
460 | * Return: The validated key size for @mode. Zero if invalid. | |
461 | */ | |
462 | uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size) | |
463 | { | |
464 | if (size == ext4_encryption_key_size(mode)) | |
465 | return size; | |
466 | return 0; | |
467 | } | |
28b4c263 TT |
468 | |
469 | /* | |
470 | * Validate dentries for encrypted directories to make sure we aren't | |
471 | * potentially caching stale data after a key has been added or | |
472 | * removed. | |
473 | */ | |
474 | static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags) | |
475 | { | |
476 | struct inode *dir = d_inode(dentry->d_parent); | |
477 | struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info; | |
478 | int dir_has_key, cached_with_key; | |
479 | ||
480 | if (!ext4_encrypted_inode(dir)) | |
481 | return 0; | |
482 | ||
483 | if (ci && ci->ci_keyring_key && | |
484 | (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | | |
485 | (1 << KEY_FLAG_REVOKED) | | |
486 | (1 << KEY_FLAG_DEAD)))) | |
487 | ci = NULL; | |
488 | ||
489 | /* this should eventually be an flag in d_flags */ | |
490 | cached_with_key = dentry->d_fsdata != NULL; | |
491 | dir_has_key = (ci != NULL); | |
492 | ||
493 | /* | |
494 | * If the dentry was cached without the key, and it is a | |
495 | * negative dentry, it might be a valid name. We can't check | |
496 | * if the key has since been made available due to locking | |
497 | * reasons, so we fail the validation so ext4_lookup() can do | |
498 | * this check. | |
499 | * | |
500 | * We also fail the validation if the dentry was created with | |
501 | * the key present, but we no longer have the key, or vice versa. | |
502 | */ | |
503 | if ((!cached_with_key && d_is_negative(dentry)) || | |
504 | (!cached_with_key && dir_has_key) || | |
505 | (cached_with_key && !dir_has_key)) { | |
506 | #if 0 /* Revalidation debug */ | |
507 | char buf[80]; | |
508 | char *cp = simple_dname(dentry, buf, sizeof(buf)); | |
509 | ||
510 | if (IS_ERR(cp)) | |
511 | cp = (char *) "???"; | |
512 | pr_err("revalidate: %s %p %d %d %d\n", cp, dentry->d_fsdata, | |
513 | cached_with_key, d_is_negative(dentry), | |
514 | dir_has_key); | |
515 | #endif | |
516 | return 0; | |
517 | } | |
518 | return 1; | |
519 | } | |
520 | ||
521 | const struct dentry_operations ext4_encrypted_d_ops = { | |
522 | .d_revalidate = ext4_d_revalidate, | |
523 | }; |