ovl: check whiteout on lowest layer as well
[deliverable/linux.git] / fs / overlayfs / readdir.c
1 /*
2 *
3 * Copyright (C) 2011 Novell Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
10 #include <linux/fs.h>
11 #include <linux/slab.h>
12 #include <linux/namei.h>
13 #include <linux/file.h>
14 #include <linux/xattr.h>
15 #include <linux/rbtree.h>
16 #include <linux/security.h>
17 #include <linux/cred.h>
18 #include "overlayfs.h"
19
20 struct ovl_cache_entry {
21 unsigned int len;
22 unsigned int type;
23 u64 ino;
24 struct list_head l_node;
25 struct rb_node node;
26 bool is_whiteout;
27 bool is_cursor;
28 char name[];
29 };
30
31 struct ovl_dir_cache {
32 long refcount;
33 u64 version;
34 struct list_head entries;
35 };
36
37 struct ovl_readdir_data {
38 struct dir_context ctx;
39 bool is_merge;
40 struct rb_root root;
41 struct list_head *list;
42 struct list_head middle;
43 struct dentry *dir;
44 int count;
45 int err;
46 };
47
48 struct ovl_dir_file {
49 bool is_real;
50 bool is_upper;
51 struct ovl_dir_cache *cache;
52 struct ovl_cache_entry cursor;
53 struct file *realfile;
54 struct file *upperfile;
55 };
56
57 static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n)
58 {
59 return container_of(n, struct ovl_cache_entry, node);
60 }
61
62 static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
63 const char *name, int len)
64 {
65 struct rb_node *node = root->rb_node;
66 int cmp;
67
68 while (node) {
69 struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
70
71 cmp = strncmp(name, p->name, len);
72 if (cmp > 0)
73 node = p->node.rb_right;
74 else if (cmp < 0 || len < p->len)
75 node = p->node.rb_left;
76 else
77 return p;
78 }
79
80 return NULL;
81 }
82
83 static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir,
84 const char *name, int len,
85 u64 ino, unsigned int d_type)
86 {
87 struct ovl_cache_entry *p;
88 size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
89
90 p = kmalloc(size, GFP_KERNEL);
91 if (!p)
92 return NULL;
93
94 memcpy(p->name, name, len);
95 p->name[len] = '\0';
96 p->len = len;
97 p->type = d_type;
98 p->ino = ino;
99 p->is_whiteout = false;
100 p->is_cursor = false;
101
102 if (d_type == DT_CHR) {
103 struct dentry *dentry;
104 const struct cred *old_cred;
105 struct cred *override_cred;
106
107 override_cred = prepare_creds();
108 if (!override_cred) {
109 kfree(p);
110 return NULL;
111 }
112
113 /*
114 * CAP_DAC_OVERRIDE for lookup
115 */
116 cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
117 old_cred = override_creds(override_cred);
118
119 dentry = lookup_one_len(name, dir, len);
120 if (!IS_ERR(dentry)) {
121 p->is_whiteout = ovl_is_whiteout(dentry);
122 dput(dentry);
123 }
124 revert_creds(old_cred);
125 put_cred(override_cred);
126 }
127 return p;
128 }
129
130 static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
131 const char *name, int len, u64 ino,
132 unsigned int d_type)
133 {
134 struct rb_node **newp = &rdd->root.rb_node;
135 struct rb_node *parent = NULL;
136 struct ovl_cache_entry *p;
137
138 while (*newp) {
139 int cmp;
140 struct ovl_cache_entry *tmp;
141
142 parent = *newp;
143 tmp = ovl_cache_entry_from_node(*newp);
144 cmp = strncmp(name, tmp->name, len);
145 if (cmp > 0)
146 newp = &tmp->node.rb_right;
147 else if (cmp < 0 || len < tmp->len)
148 newp = &tmp->node.rb_left;
149 else
150 return 0;
151 }
152
153 p = ovl_cache_entry_new(rdd->dir, name, len, ino, d_type);
154 if (p == NULL)
155 return -ENOMEM;
156
157 list_add_tail(&p->l_node, rdd->list);
158 rb_link_node(&p->node, parent, newp);
159 rb_insert_color(&p->node, &rdd->root);
160
161 return 0;
162 }
163
164 static int ovl_fill_lower(struct ovl_readdir_data *rdd,
165 const char *name, int namelen,
166 loff_t offset, u64 ino, unsigned int d_type)
167 {
168 struct ovl_cache_entry *p;
169
170 p = ovl_cache_entry_find(&rdd->root, name, namelen);
171 if (p) {
172 list_move_tail(&p->l_node, &rdd->middle);
173 } else {
174 p = ovl_cache_entry_new(rdd->dir, name, namelen, ino, d_type);
175 if (p == NULL)
176 rdd->err = -ENOMEM;
177 else
178 list_add_tail(&p->l_node, &rdd->middle);
179 }
180
181 return rdd->err;
182 }
183
184 void ovl_cache_free(struct list_head *list)
185 {
186 struct ovl_cache_entry *p;
187 struct ovl_cache_entry *n;
188
189 list_for_each_entry_safe(p, n, list, l_node)
190 kfree(p);
191
192 INIT_LIST_HEAD(list);
193 }
194
195 static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry)
196 {
197 struct ovl_dir_cache *cache = od->cache;
198
199 list_del_init(&od->cursor.l_node);
200 WARN_ON(cache->refcount <= 0);
201 cache->refcount--;
202 if (!cache->refcount) {
203 if (ovl_dir_cache(dentry) == cache)
204 ovl_set_dir_cache(dentry, NULL);
205
206 ovl_cache_free(&cache->entries);
207 kfree(cache);
208 }
209 }
210
211 static int ovl_fill_merge(void *buf, const char *name, int namelen,
212 loff_t offset, u64 ino, unsigned int d_type)
213 {
214 struct ovl_readdir_data *rdd = buf;
215
216 rdd->count++;
217 if (!rdd->is_merge)
218 return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
219 else
220 return ovl_fill_lower(rdd, name, namelen, offset, ino, d_type);
221 }
222
223 static inline int ovl_dir_read(struct path *realpath,
224 struct ovl_readdir_data *rdd)
225 {
226 struct file *realfile;
227 int err;
228
229 realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY);
230 if (IS_ERR(realfile))
231 return PTR_ERR(realfile);
232
233 rdd->dir = realpath->dentry;
234 rdd->ctx.pos = 0;
235 do {
236 rdd->count = 0;
237 rdd->err = 0;
238 err = iterate_dir(realfile, &rdd->ctx);
239 if (err >= 0)
240 err = rdd->err;
241 } while (!err && rdd->count);
242 fput(realfile);
243
244 return err;
245 }
246
247 static void ovl_dir_reset(struct file *file)
248 {
249 struct ovl_dir_file *od = file->private_data;
250 struct ovl_dir_cache *cache = od->cache;
251 struct dentry *dentry = file->f_path.dentry;
252 enum ovl_path_type type = ovl_path_type(dentry);
253
254 if (cache && ovl_dentry_version_get(dentry) != cache->version) {
255 ovl_cache_put(od, dentry);
256 od->cache = NULL;
257 }
258 WARN_ON(!od->is_real && !OVL_TYPE_MERGE(type));
259 if (od->is_real && OVL_TYPE_MERGE(type))
260 od->is_real = false;
261 }
262
263 static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
264 {
265 int err;
266 struct path realpath;
267 struct ovl_readdir_data rdd = {
268 .ctx.actor = ovl_fill_merge,
269 .list = list,
270 .root = RB_ROOT,
271 .is_merge = false,
272 };
273 int idx, next;
274
275 for (idx = 0; idx != -1; idx = next) {
276 next = ovl_path_next(idx, dentry, &realpath);
277
278 if (next != -1) {
279 err = ovl_dir_read(&realpath, &rdd);
280 if (err)
281 break;
282 } else {
283 /*
284 * Insert lowest layer entries before upper ones, this
285 * allows offsets to be reasonably constant
286 */
287 list_add(&rdd.middle, rdd.list);
288 rdd.is_merge = true;
289 err = ovl_dir_read(&realpath, &rdd);
290 list_del(&rdd.middle);
291 }
292 }
293 return err;
294 }
295
296 static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
297 {
298 struct ovl_cache_entry *p;
299 loff_t off = 0;
300
301 list_for_each_entry(p, &od->cache->entries, l_node) {
302 if (p->is_cursor)
303 continue;
304 if (off >= pos)
305 break;
306 off++;
307 }
308 list_move_tail(&od->cursor.l_node, &p->l_node);
309 }
310
311 static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
312 {
313 int res;
314 struct ovl_dir_cache *cache;
315
316 cache = ovl_dir_cache(dentry);
317 if (cache && ovl_dentry_version_get(dentry) == cache->version) {
318 cache->refcount++;
319 return cache;
320 }
321 ovl_set_dir_cache(dentry, NULL);
322
323 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
324 if (!cache)
325 return ERR_PTR(-ENOMEM);
326
327 cache->refcount = 1;
328 INIT_LIST_HEAD(&cache->entries);
329
330 res = ovl_dir_read_merged(dentry, &cache->entries);
331 if (res) {
332 ovl_cache_free(&cache->entries);
333 kfree(cache);
334 return ERR_PTR(res);
335 }
336
337 cache->version = ovl_dentry_version_get(dentry);
338 ovl_set_dir_cache(dentry, cache);
339
340 return cache;
341 }
342
343 static int ovl_iterate(struct file *file, struct dir_context *ctx)
344 {
345 struct ovl_dir_file *od = file->private_data;
346 struct dentry *dentry = file->f_path.dentry;
347
348 if (!ctx->pos)
349 ovl_dir_reset(file);
350
351 if (od->is_real)
352 return iterate_dir(od->realfile, ctx);
353
354 if (!od->cache) {
355 struct ovl_dir_cache *cache;
356
357 cache = ovl_cache_get(dentry);
358 if (IS_ERR(cache))
359 return PTR_ERR(cache);
360
361 od->cache = cache;
362 ovl_seek_cursor(od, ctx->pos);
363 }
364
365 while (od->cursor.l_node.next != &od->cache->entries) {
366 struct ovl_cache_entry *p;
367
368 p = list_entry(od->cursor.l_node.next, struct ovl_cache_entry, l_node);
369 /* Skip cursors */
370 if (!p->is_cursor) {
371 if (!p->is_whiteout) {
372 if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
373 break;
374 }
375 ctx->pos++;
376 }
377 list_move(&od->cursor.l_node, &p->l_node);
378 }
379 return 0;
380 }
381
382 static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
383 {
384 loff_t res;
385 struct ovl_dir_file *od = file->private_data;
386
387 mutex_lock(&file_inode(file)->i_mutex);
388 if (!file->f_pos)
389 ovl_dir_reset(file);
390
391 if (od->is_real) {
392 res = vfs_llseek(od->realfile, offset, origin);
393 file->f_pos = od->realfile->f_pos;
394 } else {
395 res = -EINVAL;
396
397 switch (origin) {
398 case SEEK_CUR:
399 offset += file->f_pos;
400 break;
401 case SEEK_SET:
402 break;
403 default:
404 goto out_unlock;
405 }
406 if (offset < 0)
407 goto out_unlock;
408
409 if (offset != file->f_pos) {
410 file->f_pos = offset;
411 if (od->cache)
412 ovl_seek_cursor(od, offset);
413 }
414 res = offset;
415 }
416 out_unlock:
417 mutex_unlock(&file_inode(file)->i_mutex);
418
419 return res;
420 }
421
422 static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
423 int datasync)
424 {
425 struct ovl_dir_file *od = file->private_data;
426 struct dentry *dentry = file->f_path.dentry;
427 struct file *realfile = od->realfile;
428
429 /*
430 * Need to check if we started out being a lower dir, but got copied up
431 */
432 if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
433 struct inode *inode = file_inode(file);
434
435 realfile = lockless_dereference(od->upperfile);
436 if (!realfile) {
437 struct path upperpath;
438
439 ovl_path_upper(dentry, &upperpath);
440 realfile = ovl_path_open(&upperpath, O_RDONLY);
441 smp_mb__before_spinlock();
442 mutex_lock(&inode->i_mutex);
443 if (!od->upperfile) {
444 if (IS_ERR(realfile)) {
445 mutex_unlock(&inode->i_mutex);
446 return PTR_ERR(realfile);
447 }
448 od->upperfile = realfile;
449 } else {
450 /* somebody has beaten us to it */
451 if (!IS_ERR(realfile))
452 fput(realfile);
453 realfile = od->upperfile;
454 }
455 mutex_unlock(&inode->i_mutex);
456 }
457 }
458
459 return vfs_fsync_range(realfile, start, end, datasync);
460 }
461
462 static int ovl_dir_release(struct inode *inode, struct file *file)
463 {
464 struct ovl_dir_file *od = file->private_data;
465
466 if (od->cache) {
467 mutex_lock(&inode->i_mutex);
468 ovl_cache_put(od, file->f_path.dentry);
469 mutex_unlock(&inode->i_mutex);
470 }
471 fput(od->realfile);
472 if (od->upperfile)
473 fput(od->upperfile);
474 kfree(od);
475
476 return 0;
477 }
478
479 static int ovl_dir_open(struct inode *inode, struct file *file)
480 {
481 struct path realpath;
482 struct file *realfile;
483 struct ovl_dir_file *od;
484 enum ovl_path_type type;
485
486 od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
487 if (!od)
488 return -ENOMEM;
489
490 type = ovl_path_real(file->f_path.dentry, &realpath);
491 realfile = ovl_path_open(&realpath, file->f_flags);
492 if (IS_ERR(realfile)) {
493 kfree(od);
494 return PTR_ERR(realfile);
495 }
496 INIT_LIST_HEAD(&od->cursor.l_node);
497 od->realfile = realfile;
498 od->is_real = !OVL_TYPE_MERGE(type);
499 od->is_upper = OVL_TYPE_UPPER(type);
500 od->cursor.is_cursor = true;
501 file->private_data = od;
502
503 return 0;
504 }
505
506 const struct file_operations ovl_dir_operations = {
507 .read = generic_read_dir,
508 .open = ovl_dir_open,
509 .iterate = ovl_iterate,
510 .llseek = ovl_dir_llseek,
511 .fsync = ovl_dir_fsync,
512 .release = ovl_dir_release,
513 };
514
515 int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
516 {
517 int err;
518 struct ovl_cache_entry *p;
519
520 err = ovl_dir_read_merged(dentry, list);
521 if (err)
522 return err;
523
524 err = 0;
525
526 list_for_each_entry(p, list, l_node) {
527 if (p->is_whiteout)
528 continue;
529
530 if (p->name[0] == '.') {
531 if (p->len == 1)
532 continue;
533 if (p->len == 2 && p->name[1] == '.')
534 continue;
535 }
536 err = -ENOTEMPTY;
537 break;
538 }
539
540 return err;
541 }
542
543 void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
544 {
545 struct ovl_cache_entry *p;
546
547 mutex_lock_nested(&upper->d_inode->i_mutex, I_MUTEX_CHILD);
548 list_for_each_entry(p, list, l_node) {
549 struct dentry *dentry;
550
551 if (!p->is_whiteout)
552 continue;
553
554 dentry = lookup_one_len(p->name, upper, p->len);
555 if (IS_ERR(dentry)) {
556 pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n",
557 upper->d_name.name, p->len, p->name,
558 (int) PTR_ERR(dentry));
559 continue;
560 }
561 ovl_cleanup(upper->d_inode, dentry);
562 dput(dentry);
563 }
564 mutex_unlock(&upper->d_inode->i_mutex);
565 }
This page took 0.251302 seconds and 5 git commands to generate.