nfs: fix xattr inode op pointers when disabled
[deliverable/linux.git] / fs / namei.c
1 /*
2 * linux/fs/namei.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 /*
8 * Some corrections by tytso.
9 */
10
11 /* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname
12 * lookup logic.
13 */
14 /* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture.
15 */
16
17 #include <linux/init.h>
18 #include <linux/export.h>
19 #include <linux/kernel.h>
20 #include <linux/slab.h>
21 #include <linux/fs.h>
22 #include <linux/namei.h>
23 #include <linux/pagemap.h>
24 #include <linux/fsnotify.h>
25 #include <linux/personality.h>
26 #include <linux/security.h>
27 #include <linux/ima.h>
28 #include <linux/syscalls.h>
29 #include <linux/mount.h>
30 #include <linux/audit.h>
31 #include <linux/capability.h>
32 #include <linux/file.h>
33 #include <linux/fcntl.h>
34 #include <linux/device_cgroup.h>
35 #include <linux/fs_struct.h>
36 #include <linux/posix_acl.h>
37 #include <asm/uaccess.h>
38
39 #include "internal.h"
40 #include "mount.h"
41
42 /* [Feb-1997 T. Schoebel-Theuer]
43 * Fundamental changes in the pathname lookup mechanisms (namei)
44 * were necessary because of omirr. The reason is that omirr needs
45 * to know the _real_ pathname, not the user-supplied one, in case
46 * of symlinks (and also when transname replacements occur).
47 *
48 * The new code replaces the old recursive symlink resolution with
49 * an iterative one (in case of non-nested symlink chains). It does
50 * this with calls to <fs>_follow_link().
51 * As a side effect, dir_namei(), _namei() and follow_link() are now
52 * replaced with a single function lookup_dentry() that can handle all
53 * the special cases of the former code.
54 *
55 * With the new dcache, the pathname is stored at each inode, at least as
56 * long as the refcount of the inode is positive. As a side effect, the
57 * size of the dcache depends on the inode cache and thus is dynamic.
58 *
59 * [29-Apr-1998 C. Scott Ananian] Updated above description of symlink
60 * resolution to correspond with current state of the code.
61 *
62 * Note that the symlink resolution is not *completely* iterative.
63 * There is still a significant amount of tail- and mid- recursion in
64 * the algorithm. Also, note that <fs>_readlink() is not used in
65 * lookup_dentry(): lookup_dentry() on the result of <fs>_readlink()
66 * may return different results than <fs>_follow_link(). Many virtual
67 * filesystems (including /proc) exhibit this behavior.
68 */
69
70 /* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation:
71 * New symlink semantics: when open() is called with flags O_CREAT | O_EXCL
72 * and the name already exists in form of a symlink, try to create the new
73 * name indicated by the symlink. The old code always complained that the
74 * name already exists, due to not following the symlink even if its target
75 * is nonexistent. The new semantics affects also mknod() and link() when
76 * the name is a symlink pointing to a non-existent name.
77 *
78 * I don't know which semantics is the right one, since I have no access
79 * to standards. But I found by trial that HP-UX 9.0 has the full "new"
80 * semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the
81 * "old" one. Personally, I think the new semantics is much more logical.
82 * Note that "ln old new" where "new" is a symlink pointing to a non-existing
83 * file does succeed in both HP-UX and SunOs, but not in Solaris
84 * and in the old Linux semantics.
85 */
86
87 /* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink
88 * semantics. See the comments in "open_namei" and "do_link" below.
89 *
90 * [10-Sep-98 Alan Modra] Another symlink change.
91 */
92
93 /* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks:
94 * inside the path - always follow.
95 * in the last component in creation/removal/renaming - never follow.
96 * if LOOKUP_FOLLOW passed - follow.
97 * if the pathname has trailing slashes - follow.
98 * otherwise - don't follow.
99 * (applied in that order).
100 *
101 * [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT
102 * restored for 2.4. This is the last surviving part of old 4.2BSD bug.
103 * During the 2.4 we need to fix the userland stuff depending on it -
104 * hopefully we will be able to get rid of that wart in 2.5. So far only
105 * XEmacs seems to be relying on it...
106 */
107 /*
108 * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland)
109 * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives
110 * any extra contention...
111 */
112
113 /* In order to reduce some races, while at the same time doing additional
114 * checking and hopefully speeding things up, we copy filenames to the
115 * kernel data space before using them..
116 *
117 * POSIX.1 2.4: an empty pathname is invalid (ENOENT).
118 * PATH_MAX includes the nul terminator --RR.
119 */
120 void final_putname(struct filename *name)
121 {
122 if (name->separate) {
123 __putname(name->name);
124 kfree(name);
125 } else {
126 __putname(name);
127 }
128 }
129
130 #define EMBEDDED_NAME_MAX (PATH_MAX - sizeof(struct filename))
131
132 static struct filename *
133 getname_flags(const char __user *filename, int flags, int *empty)
134 {
135 struct filename *result, *err;
136 int len;
137 long max;
138 char *kname;
139
140 result = audit_reusename(filename);
141 if (result)
142 return result;
143
144 result = __getname();
145 if (unlikely(!result))
146 return ERR_PTR(-ENOMEM);
147
148 /*
149 * First, try to embed the struct filename inside the names_cache
150 * allocation
151 */
152 kname = (char *)result + sizeof(*result);
153 result->name = kname;
154 result->separate = false;
155 max = EMBEDDED_NAME_MAX;
156
157 recopy:
158 len = strncpy_from_user(kname, filename, max);
159 if (unlikely(len < 0)) {
160 err = ERR_PTR(len);
161 goto error;
162 }
163
164 /*
165 * Uh-oh. We have a name that's approaching PATH_MAX. Allocate a
166 * separate struct filename so we can dedicate the entire
167 * names_cache allocation for the pathname, and re-do the copy from
168 * userland.
169 */
170 if (len == EMBEDDED_NAME_MAX && max == EMBEDDED_NAME_MAX) {
171 kname = (char *)result;
172
173 result = kzalloc(sizeof(*result), GFP_KERNEL);
174 if (!result) {
175 err = ERR_PTR(-ENOMEM);
176 result = (struct filename *)kname;
177 goto error;
178 }
179 result->name = kname;
180 result->separate = true;
181 max = PATH_MAX;
182 goto recopy;
183 }
184
185 /* The empty path is special. */
186 if (unlikely(!len)) {
187 if (empty)
188 *empty = 1;
189 err = ERR_PTR(-ENOENT);
190 if (!(flags & LOOKUP_EMPTY))
191 goto error;
192 }
193
194 err = ERR_PTR(-ENAMETOOLONG);
195 if (unlikely(len >= PATH_MAX))
196 goto error;
197
198 result->uptr = filename;
199 audit_getname(result);
200 return result;
201
202 error:
203 final_putname(result);
204 return err;
205 }
206
207 struct filename *
208 getname(const char __user * filename)
209 {
210 return getname_flags(filename, 0, NULL);
211 }
212 EXPORT_SYMBOL(getname);
213
214 #ifdef CONFIG_AUDITSYSCALL
215 void putname(struct filename *name)
216 {
217 if (unlikely(!audit_dummy_context()))
218 return audit_putname(name);
219 final_putname(name);
220 }
221 #endif
222
223 static int check_acl(struct inode *inode, int mask)
224 {
225 #ifdef CONFIG_FS_POSIX_ACL
226 struct posix_acl *acl;
227
228 if (mask & MAY_NOT_BLOCK) {
229 acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS);
230 if (!acl)
231 return -EAGAIN;
232 /* no ->get_acl() calls in RCU mode... */
233 if (acl == ACL_NOT_CACHED)
234 return -ECHILD;
235 return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK);
236 }
237
238 acl = get_acl(inode, ACL_TYPE_ACCESS);
239 if (IS_ERR(acl))
240 return PTR_ERR(acl);
241 if (acl) {
242 int error = posix_acl_permission(inode, acl, mask);
243 posix_acl_release(acl);
244 return error;
245 }
246 #endif
247
248 return -EAGAIN;
249 }
250
251 /*
252 * This does the basic permission checking
253 */
254 static int acl_permission_check(struct inode *inode, int mask)
255 {
256 unsigned int mode = inode->i_mode;
257
258 if (likely(uid_eq(current_fsuid(), inode->i_uid)))
259 mode >>= 6;
260 else {
261 if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
262 int error = check_acl(inode, mask);
263 if (error != -EAGAIN)
264 return error;
265 }
266
267 if (in_group_p(inode->i_gid))
268 mode >>= 3;
269 }
270
271 /*
272 * If the DACs are ok we don't need any capability check.
273 */
274 if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
275 return 0;
276 return -EACCES;
277 }
278
279 /**
280 * generic_permission - check for access rights on a Posix-like filesystem
281 * @inode: inode to check access rights for
282 * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...)
283 *
284 * Used to check for read/write/execute permissions on a file.
285 * We use "fsuid" for this, letting us set arbitrary permissions
286 * for filesystem access without changing the "normal" uids which
287 * are used for other things.
288 *
289 * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk
290 * request cannot be satisfied (eg. requires blocking or too much complexity).
291 * It would then be called again in ref-walk mode.
292 */
293 int generic_permission(struct inode *inode, int mask)
294 {
295 int ret;
296
297 /*
298 * Do the basic permission checks.
299 */
300 ret = acl_permission_check(inode, mask);
301 if (ret != -EACCES)
302 return ret;
303
304 if (S_ISDIR(inode->i_mode)) {
305 /* DACs are overridable for directories */
306 if (inode_capable(inode, CAP_DAC_OVERRIDE))
307 return 0;
308 if (!(mask & MAY_WRITE))
309 if (inode_capable(inode, CAP_DAC_READ_SEARCH))
310 return 0;
311 return -EACCES;
312 }
313 /*
314 * Read/write DACs are always overridable.
315 * Executable DACs are overridable when there is
316 * at least one exec bit set.
317 */
318 if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
319 if (inode_capable(inode, CAP_DAC_OVERRIDE))
320 return 0;
321
322 /*
323 * Searching includes executable on directories, else just read.
324 */
325 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
326 if (mask == MAY_READ)
327 if (inode_capable(inode, CAP_DAC_READ_SEARCH))
328 return 0;
329
330 return -EACCES;
331 }
332
333 /*
334 * We _really_ want to just do "generic_permission()" without
335 * even looking at the inode->i_op values. So we keep a cache
336 * flag in inode->i_opflags, that says "this has not special
337 * permission function, use the fast case".
338 */
339 static inline int do_inode_permission(struct inode *inode, int mask)
340 {
341 if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
342 if (likely(inode->i_op->permission))
343 return inode->i_op->permission(inode, mask);
344
345 /* This gets set once for the inode lifetime */
346 spin_lock(&inode->i_lock);
347 inode->i_opflags |= IOP_FASTPERM;
348 spin_unlock(&inode->i_lock);
349 }
350 return generic_permission(inode, mask);
351 }
352
353 /**
354 * __inode_permission - Check for access rights to a given inode
355 * @inode: Inode to check permission on
356 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
357 *
358 * Check for read/write/execute permissions on an inode.
359 *
360 * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
361 *
362 * This does not check for a read-only file system. You probably want
363 * inode_permission().
364 */
365 int __inode_permission(struct inode *inode, int mask)
366 {
367 int retval;
368
369 if (unlikely(mask & MAY_WRITE)) {
370 /*
371 * Nobody gets write access to an immutable file.
372 */
373 if (IS_IMMUTABLE(inode))
374 return -EACCES;
375 }
376
377 retval = do_inode_permission(inode, mask);
378 if (retval)
379 return retval;
380
381 retval = devcgroup_inode_permission(inode, mask);
382 if (retval)
383 return retval;
384
385 return security_inode_permission(inode, mask);
386 }
387
388 /**
389 * sb_permission - Check superblock-level permissions
390 * @sb: Superblock of inode to check permission on
391 * @inode: Inode to check permission on
392 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
393 *
394 * Separate out file-system wide checks from inode-specific permission checks.
395 */
396 static int sb_permission(struct super_block *sb, struct inode *inode, int mask)
397 {
398 if (unlikely(mask & MAY_WRITE)) {
399 umode_t mode = inode->i_mode;
400
401 /* Nobody gets write access to a read-only fs. */
402 if ((sb->s_flags & MS_RDONLY) &&
403 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
404 return -EROFS;
405 }
406 return 0;
407 }
408
409 /**
410 * inode_permission - Check for access rights to a given inode
411 * @inode: Inode to check permission on
412 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
413 *
414 * Check for read/write/execute permissions on an inode. We use fs[ug]id for
415 * this, letting us set arbitrary permissions for filesystem access without
416 * changing the "normal" UIDs which are used for other things.
417 *
418 * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
419 */
420 int inode_permission(struct inode *inode, int mask)
421 {
422 int retval;
423
424 retval = sb_permission(inode->i_sb, inode, mask);
425 if (retval)
426 return retval;
427 return __inode_permission(inode, mask);
428 }
429
430 /**
431 * path_get - get a reference to a path
432 * @path: path to get the reference to
433 *
434 * Given a path increment the reference count to the dentry and the vfsmount.
435 */
436 void path_get(const struct path *path)
437 {
438 mntget(path->mnt);
439 dget(path->dentry);
440 }
441 EXPORT_SYMBOL(path_get);
442
443 /**
444 * path_put - put a reference to a path
445 * @path: path to put the reference to
446 *
447 * Given a path decrement the reference count to the dentry and the vfsmount.
448 */
449 void path_put(const struct path *path)
450 {
451 dput(path->dentry);
452 mntput(path->mnt);
453 }
454 EXPORT_SYMBOL(path_put);
455
456 /*
457 * Path walking has 2 modes, rcu-walk and ref-walk (see
458 * Documentation/filesystems/path-lookup.txt). In situations when we can't
459 * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab
460 * normal reference counts on dentries and vfsmounts to transition to rcu-walk
461 * mode. Refcounts are grabbed at the last known good point before rcu-walk
462 * got stuck, so ref-walk may continue from there. If this is not successful
463 * (eg. a seqcount has changed), then failure is returned and it's up to caller
464 * to restart the path walk from the beginning in ref-walk mode.
465 */
466
467 /**
468 * unlazy_walk - try to switch to ref-walk mode.
469 * @nd: nameidata pathwalk data
470 * @dentry: child of nd->path.dentry or NULL
471 * Returns: 0 on success, -ECHILD on failure
472 *
473 * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry
474 * for ref-walk mode. @dentry must be a path found by a do_lookup call on
475 * @nd or NULL. Must be called from rcu-walk context.
476 */
477 static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
478 {
479 struct fs_struct *fs = current->fs;
480 struct dentry *parent = nd->path.dentry;
481
482 BUG_ON(!(nd->flags & LOOKUP_RCU));
483
484 /*
485 * After legitimizing the bastards, terminate_walk()
486 * will do the right thing for non-RCU mode, and all our
487 * subsequent exit cases should rcu_read_unlock()
488 * before returning. Do vfsmount first; if dentry
489 * can't be legitimized, just set nd->path.dentry to NULL
490 * and rely on dput(NULL) being a no-op.
491 */
492 if (!legitimize_mnt(nd->path.mnt, nd->m_seq))
493 return -ECHILD;
494 nd->flags &= ~LOOKUP_RCU;
495
496 if (!lockref_get_not_dead(&parent->d_lockref)) {
497 nd->path.dentry = NULL;
498 goto out;
499 }
500
501 /*
502 * For a negative lookup, the lookup sequence point is the parents
503 * sequence point, and it only needs to revalidate the parent dentry.
504 *
505 * For a positive lookup, we need to move both the parent and the
506 * dentry from the RCU domain to be properly refcounted. And the
507 * sequence number in the dentry validates *both* dentry counters,
508 * since we checked the sequence number of the parent after we got
509 * the child sequence number. So we know the parent must still
510 * be valid if the child sequence number is still valid.
511 */
512 if (!dentry) {
513 if (read_seqcount_retry(&parent->d_seq, nd->seq))
514 goto out;
515 BUG_ON(nd->inode != parent->d_inode);
516 } else {
517 if (!lockref_get_not_dead(&dentry->d_lockref))
518 goto out;
519 if (read_seqcount_retry(&dentry->d_seq, nd->seq))
520 goto drop_dentry;
521 }
522
523 /*
524 * Sequence counts matched. Now make sure that the root is
525 * still valid and get it if required.
526 */
527 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
528 spin_lock(&fs->lock);
529 if (nd->root.mnt != fs->root.mnt || nd->root.dentry != fs->root.dentry)
530 goto unlock_and_drop_dentry;
531 path_get(&nd->root);
532 spin_unlock(&fs->lock);
533 }
534
535 rcu_read_unlock();
536 return 0;
537
538 unlock_and_drop_dentry:
539 spin_unlock(&fs->lock);
540 drop_dentry:
541 rcu_read_unlock();
542 dput(dentry);
543 goto drop_root_mnt;
544 out:
545 rcu_read_unlock();
546 drop_root_mnt:
547 if (!(nd->flags & LOOKUP_ROOT))
548 nd->root.mnt = NULL;
549 return -ECHILD;
550 }
551
552 static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
553 {
554 return dentry->d_op->d_revalidate(dentry, flags);
555 }
556
557 /**
558 * complete_walk - successful completion of path walk
559 * @nd: pointer nameidata
560 *
561 * If we had been in RCU mode, drop out of it and legitimize nd->path.
562 * Revalidate the final result, unless we'd already done that during
563 * the path walk or the filesystem doesn't ask for it. Return 0 on
564 * success, -error on failure. In case of failure caller does not
565 * need to drop nd->path.
566 */
567 static int complete_walk(struct nameidata *nd)
568 {
569 struct dentry *dentry = nd->path.dentry;
570 int status;
571
572 if (nd->flags & LOOKUP_RCU) {
573 nd->flags &= ~LOOKUP_RCU;
574 if (!(nd->flags & LOOKUP_ROOT))
575 nd->root.mnt = NULL;
576
577 if (!legitimize_mnt(nd->path.mnt, nd->m_seq)) {
578 rcu_read_unlock();
579 return -ECHILD;
580 }
581 if (unlikely(!lockref_get_not_dead(&dentry->d_lockref))) {
582 rcu_read_unlock();
583 mntput(nd->path.mnt);
584 return -ECHILD;
585 }
586 if (read_seqcount_retry(&dentry->d_seq, nd->seq)) {
587 rcu_read_unlock();
588 dput(dentry);
589 mntput(nd->path.mnt);
590 return -ECHILD;
591 }
592 rcu_read_unlock();
593 }
594
595 if (likely(!(nd->flags & LOOKUP_JUMPED)))
596 return 0;
597
598 if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE)))
599 return 0;
600
601 status = dentry->d_op->d_weak_revalidate(dentry, nd->flags);
602 if (status > 0)
603 return 0;
604
605 if (!status)
606 status = -ESTALE;
607
608 path_put(&nd->path);
609 return status;
610 }
611
612 static __always_inline void set_root(struct nameidata *nd)
613 {
614 if (!nd->root.mnt)
615 get_fs_root(current->fs, &nd->root);
616 }
617
618 static int link_path_walk(const char *, struct nameidata *);
619
620 static __always_inline void set_root_rcu(struct nameidata *nd)
621 {
622 if (!nd->root.mnt) {
623 struct fs_struct *fs = current->fs;
624 unsigned seq;
625
626 do {
627 seq = read_seqcount_begin(&fs->seq);
628 nd->root = fs->root;
629 nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
630 } while (read_seqcount_retry(&fs->seq, seq));
631 }
632 }
633
634 static void path_put_conditional(struct path *path, struct nameidata *nd)
635 {
636 dput(path->dentry);
637 if (path->mnt != nd->path.mnt)
638 mntput(path->mnt);
639 }
640
641 static inline void path_to_nameidata(const struct path *path,
642 struct nameidata *nd)
643 {
644 if (!(nd->flags & LOOKUP_RCU)) {
645 dput(nd->path.dentry);
646 if (nd->path.mnt != path->mnt)
647 mntput(nd->path.mnt);
648 }
649 nd->path.mnt = path->mnt;
650 nd->path.dentry = path->dentry;
651 }
652
653 /*
654 * Helper to directly jump to a known parsed path from ->follow_link,
655 * caller must have taken a reference to path beforehand.
656 */
657 void nd_jump_link(struct nameidata *nd, struct path *path)
658 {
659 path_put(&nd->path);
660
661 nd->path = *path;
662 nd->inode = nd->path.dentry->d_inode;
663 nd->flags |= LOOKUP_JUMPED;
664 }
665
666 static inline void put_link(struct nameidata *nd, struct path *link, void *cookie)
667 {
668 struct inode *inode = link->dentry->d_inode;
669 if (inode->i_op->put_link)
670 inode->i_op->put_link(link->dentry, nd, cookie);
671 path_put(link);
672 }
673
674 int sysctl_protected_symlinks __read_mostly = 0;
675 int sysctl_protected_hardlinks __read_mostly = 0;
676
677 /**
678 * may_follow_link - Check symlink following for unsafe situations
679 * @link: The path of the symlink
680 * @nd: nameidata pathwalk data
681 *
682 * In the case of the sysctl_protected_symlinks sysctl being enabled,
683 * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
684 * in a sticky world-writable directory. This is to protect privileged
685 * processes from failing races against path names that may change out
686 * from under them by way of other users creating malicious symlinks.
687 * It will permit symlinks to be followed only when outside a sticky
688 * world-writable directory, or when the uid of the symlink and follower
689 * match, or when the directory owner matches the symlink's owner.
690 *
691 * Returns 0 if following the symlink is allowed, -ve on error.
692 */
693 static inline int may_follow_link(struct path *link, struct nameidata *nd)
694 {
695 const struct inode *inode;
696 const struct inode *parent;
697
698 if (!sysctl_protected_symlinks)
699 return 0;
700
701 /* Allowed if owner and follower match. */
702 inode = link->dentry->d_inode;
703 if (uid_eq(current_cred()->fsuid, inode->i_uid))
704 return 0;
705
706 /* Allowed if parent directory not sticky and world-writable. */
707 parent = nd->path.dentry->d_inode;
708 if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
709 return 0;
710
711 /* Allowed if parent directory and link owner match. */
712 if (uid_eq(parent->i_uid, inode->i_uid))
713 return 0;
714
715 audit_log_link_denied("follow_link", link);
716 path_put_conditional(link, nd);
717 path_put(&nd->path);
718 return -EACCES;
719 }
720
721 /**
722 * safe_hardlink_source - Check for safe hardlink conditions
723 * @inode: the source inode to hardlink from
724 *
725 * Return false if at least one of the following conditions:
726 * - inode is not a regular file
727 * - inode is setuid
728 * - inode is setgid and group-exec
729 * - access failure for read and write
730 *
731 * Otherwise returns true.
732 */
733 static bool safe_hardlink_source(struct inode *inode)
734 {
735 umode_t mode = inode->i_mode;
736
737 /* Special files should not get pinned to the filesystem. */
738 if (!S_ISREG(mode))
739 return false;
740
741 /* Setuid files should not get pinned to the filesystem. */
742 if (mode & S_ISUID)
743 return false;
744
745 /* Executable setgid files should not get pinned to the filesystem. */
746 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
747 return false;
748
749 /* Hardlinking to unreadable or unwritable sources is dangerous. */
750 if (inode_permission(inode, MAY_READ | MAY_WRITE))
751 return false;
752
753 return true;
754 }
755
756 /**
757 * may_linkat - Check permissions for creating a hardlink
758 * @link: the source to hardlink from
759 *
760 * Block hardlink when all of:
761 * - sysctl_protected_hardlinks enabled
762 * - fsuid does not match inode
763 * - hardlink source is unsafe (see safe_hardlink_source() above)
764 * - not CAP_FOWNER
765 *
766 * Returns 0 if successful, -ve on error.
767 */
768 static int may_linkat(struct path *link)
769 {
770 const struct cred *cred;
771 struct inode *inode;
772
773 if (!sysctl_protected_hardlinks)
774 return 0;
775
776 cred = current_cred();
777 inode = link->dentry->d_inode;
778
779 /* Source inode owner (or CAP_FOWNER) can hardlink all they like,
780 * otherwise, it must be a safe source.
781 */
782 if (uid_eq(cred->fsuid, inode->i_uid) || safe_hardlink_source(inode) ||
783 capable(CAP_FOWNER))
784 return 0;
785
786 audit_log_link_denied("linkat", link);
787 return -EPERM;
788 }
789
790 static __always_inline int
791 follow_link(struct path *link, struct nameidata *nd, void **p)
792 {
793 struct dentry *dentry = link->dentry;
794 int error;
795 char *s;
796
797 BUG_ON(nd->flags & LOOKUP_RCU);
798
799 if (link->mnt == nd->path.mnt)
800 mntget(link->mnt);
801
802 error = -ELOOP;
803 if (unlikely(current->total_link_count >= 40))
804 goto out_put_nd_path;
805
806 cond_resched();
807 current->total_link_count++;
808
809 touch_atime(link);
810 nd_set_link(nd, NULL);
811
812 error = security_inode_follow_link(link->dentry, nd);
813 if (error)
814 goto out_put_nd_path;
815
816 nd->last_type = LAST_BIND;
817 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
818 error = PTR_ERR(*p);
819 if (IS_ERR(*p))
820 goto out_put_nd_path;
821
822 error = 0;
823 s = nd_get_link(nd);
824 if (s) {
825 if (unlikely(IS_ERR(s))) {
826 path_put(&nd->path);
827 put_link(nd, link, *p);
828 return PTR_ERR(s);
829 }
830 if (*s == '/') {
831 set_root(nd);
832 path_put(&nd->path);
833 nd->path = nd->root;
834 path_get(&nd->root);
835 nd->flags |= LOOKUP_JUMPED;
836 }
837 nd->inode = nd->path.dentry->d_inode;
838 error = link_path_walk(s, nd);
839 if (unlikely(error))
840 put_link(nd, link, *p);
841 }
842
843 return error;
844
845 out_put_nd_path:
846 *p = NULL;
847 path_put(&nd->path);
848 path_put(link);
849 return error;
850 }
851
852 static int follow_up_rcu(struct path *path)
853 {
854 struct mount *mnt = real_mount(path->mnt);
855 struct mount *parent;
856 struct dentry *mountpoint;
857
858 parent = mnt->mnt_parent;
859 if (&parent->mnt == path->mnt)
860 return 0;
861 mountpoint = mnt->mnt_mountpoint;
862 path->dentry = mountpoint;
863 path->mnt = &parent->mnt;
864 return 1;
865 }
866
867 /*
868 * follow_up - Find the mountpoint of path's vfsmount
869 *
870 * Given a path, find the mountpoint of its source file system.
871 * Replace @path with the path of the mountpoint in the parent mount.
872 * Up is towards /.
873 *
874 * Return 1 if we went up a level and 0 if we were already at the
875 * root.
876 */
877 int follow_up(struct path *path)
878 {
879 struct mount *mnt = real_mount(path->mnt);
880 struct mount *parent;
881 struct dentry *mountpoint;
882
883 read_seqlock_excl(&mount_lock);
884 parent = mnt->mnt_parent;
885 if (parent == mnt) {
886 read_sequnlock_excl(&mount_lock);
887 return 0;
888 }
889 mntget(&parent->mnt);
890 mountpoint = dget(mnt->mnt_mountpoint);
891 read_sequnlock_excl(&mount_lock);
892 dput(path->dentry);
893 path->dentry = mountpoint;
894 mntput(path->mnt);
895 path->mnt = &parent->mnt;
896 return 1;
897 }
898
899 /*
900 * Perform an automount
901 * - return -EISDIR to tell follow_managed() to stop and return the path we
902 * were called with.
903 */
904 static int follow_automount(struct path *path, unsigned flags,
905 bool *need_mntput)
906 {
907 struct vfsmount *mnt;
908 int err;
909
910 if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
911 return -EREMOTE;
912
913 /* We don't want to mount if someone's just doing a stat -
914 * unless they're stat'ing a directory and appended a '/' to
915 * the name.
916 *
917 * We do, however, want to mount if someone wants to open or
918 * create a file of any type under the mountpoint, wants to
919 * traverse through the mountpoint or wants to open the
920 * mounted directory. Also, autofs may mark negative dentries
921 * as being automount points. These will need the attentions
922 * of the daemon to instantiate them before they can be used.
923 */
924 if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
925 LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
926 path->dentry->d_inode)
927 return -EISDIR;
928
929 current->total_link_count++;
930 if (current->total_link_count >= 40)
931 return -ELOOP;
932
933 mnt = path->dentry->d_op->d_automount(path);
934 if (IS_ERR(mnt)) {
935 /*
936 * The filesystem is allowed to return -EISDIR here to indicate
937 * it doesn't want to automount. For instance, autofs would do
938 * this so that its userspace daemon can mount on this dentry.
939 *
940 * However, we can only permit this if it's a terminal point in
941 * the path being looked up; if it wasn't then the remainder of
942 * the path is inaccessible and we should say so.
943 */
944 if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_PARENT))
945 return -EREMOTE;
946 return PTR_ERR(mnt);
947 }
948
949 if (!mnt) /* mount collision */
950 return 0;
951
952 if (!*need_mntput) {
953 /* lock_mount() may release path->mnt on error */
954 mntget(path->mnt);
955 *need_mntput = true;
956 }
957 err = finish_automount(mnt, path);
958
959 switch (err) {
960 case -EBUSY:
961 /* Someone else made a mount here whilst we were busy */
962 return 0;
963 case 0:
964 path_put(path);
965 path->mnt = mnt;
966 path->dentry = dget(mnt->mnt_root);
967 return 0;
968 default:
969 return err;
970 }
971
972 }
973
974 /*
975 * Handle a dentry that is managed in some way.
976 * - Flagged for transit management (autofs)
977 * - Flagged as mountpoint
978 * - Flagged as automount point
979 *
980 * This may only be called in refwalk mode.
981 *
982 * Serialization is taken care of in namespace.c
983 */
984 static int follow_managed(struct path *path, unsigned flags)
985 {
986 struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */
987 unsigned managed;
988 bool need_mntput = false;
989 int ret = 0;
990
991 /* Given that we're not holding a lock here, we retain the value in a
992 * local variable for each dentry as we look at it so that we don't see
993 * the components of that value change under us */
994 while (managed = ACCESS_ONCE(path->dentry->d_flags),
995 managed &= DCACHE_MANAGED_DENTRY,
996 unlikely(managed != 0)) {
997 /* Allow the filesystem to manage the transit without i_mutex
998 * being held. */
999 if (managed & DCACHE_MANAGE_TRANSIT) {
1000 BUG_ON(!path->dentry->d_op);
1001 BUG_ON(!path->dentry->d_op->d_manage);
1002 ret = path->dentry->d_op->d_manage(path->dentry, false);
1003 if (ret < 0)
1004 break;
1005 }
1006
1007 /* Transit to a mounted filesystem. */
1008 if (managed & DCACHE_MOUNTED) {
1009 struct vfsmount *mounted = lookup_mnt(path);
1010 if (mounted) {
1011 dput(path->dentry);
1012 if (need_mntput)
1013 mntput(path->mnt);
1014 path->mnt = mounted;
1015 path->dentry = dget(mounted->mnt_root);
1016 need_mntput = true;
1017 continue;
1018 }
1019
1020 /* Something is mounted on this dentry in another
1021 * namespace and/or whatever was mounted there in this
1022 * namespace got unmounted before lookup_mnt() could
1023 * get it */
1024 }
1025
1026 /* Handle an automount point */
1027 if (managed & DCACHE_NEED_AUTOMOUNT) {
1028 ret = follow_automount(path, flags, &need_mntput);
1029 if (ret < 0)
1030 break;
1031 continue;
1032 }
1033
1034 /* We didn't change the current path point */
1035 break;
1036 }
1037
1038 if (need_mntput && path->mnt == mnt)
1039 mntput(path->mnt);
1040 if (ret == -EISDIR)
1041 ret = 0;
1042 return ret < 0 ? ret : need_mntput;
1043 }
1044
1045 int follow_down_one(struct path *path)
1046 {
1047 struct vfsmount *mounted;
1048
1049 mounted = lookup_mnt(path);
1050 if (mounted) {
1051 dput(path->dentry);
1052 mntput(path->mnt);
1053 path->mnt = mounted;
1054 path->dentry = dget(mounted->mnt_root);
1055 return 1;
1056 }
1057 return 0;
1058 }
1059
1060 static inline bool managed_dentry_might_block(struct dentry *dentry)
1061 {
1062 return (dentry->d_flags & DCACHE_MANAGE_TRANSIT &&
1063 dentry->d_op->d_manage(dentry, true) < 0);
1064 }
1065
1066 /*
1067 * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if
1068 * we meet a managed dentry that would need blocking.
1069 */
1070 static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
1071 struct inode **inode)
1072 {
1073 for (;;) {
1074 struct mount *mounted;
1075 /*
1076 * Don't forget we might have a non-mountpoint managed dentry
1077 * that wants to block transit.
1078 */
1079 if (unlikely(managed_dentry_might_block(path->dentry)))
1080 return false;
1081
1082 if (!d_mountpoint(path->dentry))
1083 break;
1084
1085 mounted = __lookup_mnt(path->mnt, path->dentry);
1086 if (!mounted)
1087 break;
1088 path->mnt = &mounted->mnt;
1089 path->dentry = mounted->mnt.mnt_root;
1090 nd->flags |= LOOKUP_JUMPED;
1091 nd->seq = read_seqcount_begin(&path->dentry->d_seq);
1092 /*
1093 * Update the inode too. We don't need to re-check the
1094 * dentry sequence number here after this d_inode read,
1095 * because a mount-point is always pinned.
1096 */
1097 *inode = path->dentry->d_inode;
1098 }
1099 return true;
1100 }
1101
1102 static void follow_mount_rcu(struct nameidata *nd)
1103 {
1104 while (d_mountpoint(nd->path.dentry)) {
1105 struct mount *mounted;
1106 mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
1107 if (!mounted)
1108 break;
1109 nd->path.mnt = &mounted->mnt;
1110 nd->path.dentry = mounted->mnt.mnt_root;
1111 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
1112 }
1113 }
1114
1115 static int follow_dotdot_rcu(struct nameidata *nd)
1116 {
1117 set_root_rcu(nd);
1118
1119 while (1) {
1120 if (nd->path.dentry == nd->root.dentry &&
1121 nd->path.mnt == nd->root.mnt) {
1122 break;
1123 }
1124 if (nd->path.dentry != nd->path.mnt->mnt_root) {
1125 struct dentry *old = nd->path.dentry;
1126 struct dentry *parent = old->d_parent;
1127 unsigned seq;
1128
1129 seq = read_seqcount_begin(&parent->d_seq);
1130 if (read_seqcount_retry(&old->d_seq, nd->seq))
1131 goto failed;
1132 nd->path.dentry = parent;
1133 nd->seq = seq;
1134 break;
1135 }
1136 if (!follow_up_rcu(&nd->path))
1137 break;
1138 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
1139 }
1140 follow_mount_rcu(nd);
1141 nd->inode = nd->path.dentry->d_inode;
1142 return 0;
1143
1144 failed:
1145 nd->flags &= ~LOOKUP_RCU;
1146 if (!(nd->flags & LOOKUP_ROOT))
1147 nd->root.mnt = NULL;
1148 rcu_read_unlock();
1149 return -ECHILD;
1150 }
1151
1152 /*
1153 * Follow down to the covering mount currently visible to userspace. At each
1154 * point, the filesystem owning that dentry may be queried as to whether the
1155 * caller is permitted to proceed or not.
1156 */
1157 int follow_down(struct path *path)
1158 {
1159 unsigned managed;
1160 int ret;
1161
1162 while (managed = ACCESS_ONCE(path->dentry->d_flags),
1163 unlikely(managed & DCACHE_MANAGED_DENTRY)) {
1164 /* Allow the filesystem to manage the transit without i_mutex
1165 * being held.
1166 *
1167 * We indicate to the filesystem if someone is trying to mount
1168 * something here. This gives autofs the chance to deny anyone
1169 * other than its daemon the right to mount on its
1170 * superstructure.
1171 *
1172 * The filesystem may sleep at this point.
1173 */
1174 if (managed & DCACHE_MANAGE_TRANSIT) {
1175 BUG_ON(!path->dentry->d_op);
1176 BUG_ON(!path->dentry->d_op->d_manage);
1177 ret = path->dentry->d_op->d_manage(
1178 path->dentry, false);
1179 if (ret < 0)
1180 return ret == -EISDIR ? 0 : ret;
1181 }
1182
1183 /* Transit to a mounted filesystem. */
1184 if (managed & DCACHE_MOUNTED) {
1185 struct vfsmount *mounted = lookup_mnt(path);
1186 if (!mounted)
1187 break;
1188 dput(path->dentry);
1189 mntput(path->mnt);
1190 path->mnt = mounted;
1191 path->dentry = dget(mounted->mnt_root);
1192 continue;
1193 }
1194
1195 /* Don't handle automount points here */
1196 break;
1197 }
1198 return 0;
1199 }
1200
1201 /*
1202 * Skip to top of mountpoint pile in refwalk mode for follow_dotdot()
1203 */
1204 static void follow_mount(struct path *path)
1205 {
1206 while (d_mountpoint(path->dentry)) {
1207 struct vfsmount *mounted = lookup_mnt(path);
1208 if (!mounted)
1209 break;
1210 dput(path->dentry);
1211 mntput(path->mnt);
1212 path->mnt = mounted;
1213 path->dentry = dget(mounted->mnt_root);
1214 }
1215 }
1216
1217 static void follow_dotdot(struct nameidata *nd)
1218 {
1219 set_root(nd);
1220
1221 while(1) {
1222 struct dentry *old = nd->path.dentry;
1223
1224 if (nd->path.dentry == nd->root.dentry &&
1225 nd->path.mnt == nd->root.mnt) {
1226 break;
1227 }
1228 if (nd->path.dentry != nd->path.mnt->mnt_root) {
1229 /* rare case of legitimate dget_parent()... */
1230 nd->path.dentry = dget_parent(nd->path.dentry);
1231 dput(old);
1232 break;
1233 }
1234 if (!follow_up(&nd->path))
1235 break;
1236 }
1237 follow_mount(&nd->path);
1238 nd->inode = nd->path.dentry->d_inode;
1239 }
1240
1241 /*
1242 * This looks up the name in dcache, possibly revalidates the old dentry and
1243 * allocates a new one if not found or not valid. In the need_lookup argument
1244 * returns whether i_op->lookup is necessary.
1245 *
1246 * dir->d_inode->i_mutex must be held
1247 */
1248 static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
1249 unsigned int flags, bool *need_lookup)
1250 {
1251 struct dentry *dentry;
1252 int error;
1253
1254 *need_lookup = false;
1255 dentry = d_lookup(dir, name);
1256 if (dentry) {
1257 if (dentry->d_flags & DCACHE_OP_REVALIDATE) {
1258 error = d_revalidate(dentry, flags);
1259 if (unlikely(error <= 0)) {
1260 if (error < 0) {
1261 dput(dentry);
1262 return ERR_PTR(error);
1263 } else if (!d_invalidate(dentry)) {
1264 dput(dentry);
1265 dentry = NULL;
1266 }
1267 }
1268 }
1269 }
1270
1271 if (!dentry) {
1272 dentry = d_alloc(dir, name);
1273 if (unlikely(!dentry))
1274 return ERR_PTR(-ENOMEM);
1275
1276 *need_lookup = true;
1277 }
1278 return dentry;
1279 }
1280
1281 /*
1282 * Call i_op->lookup on the dentry. The dentry must be negative and
1283 * unhashed.
1284 *
1285 * dir->d_inode->i_mutex must be held
1286 */
1287 static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry,
1288 unsigned int flags)
1289 {
1290 struct dentry *old;
1291
1292 /* Don't create child dentry for a dead directory. */
1293 if (unlikely(IS_DEADDIR(dir))) {
1294 dput(dentry);
1295 return ERR_PTR(-ENOENT);
1296 }
1297
1298 old = dir->i_op->lookup(dir, dentry, flags);
1299 if (unlikely(old)) {
1300 dput(dentry);
1301 dentry = old;
1302 }
1303 return dentry;
1304 }
1305
1306 static struct dentry *__lookup_hash(struct qstr *name,
1307 struct dentry *base, unsigned int flags)
1308 {
1309 bool need_lookup;
1310 struct dentry *dentry;
1311
1312 dentry = lookup_dcache(name, base, flags, &need_lookup);
1313 if (!need_lookup)
1314 return dentry;
1315
1316 return lookup_real(base->d_inode, dentry, flags);
1317 }
1318
1319 /*
1320 * It's more convoluted than I'd like it to be, but... it's still fairly
1321 * small and for now I'd prefer to have fast path as straight as possible.
1322 * It _is_ time-critical.
1323 */
1324 static int lookup_fast(struct nameidata *nd,
1325 struct path *path, struct inode **inode)
1326 {
1327 struct vfsmount *mnt = nd->path.mnt;
1328 struct dentry *dentry, *parent = nd->path.dentry;
1329 int need_reval = 1;
1330 int status = 1;
1331 int err;
1332
1333 /*
1334 * Rename seqlock is not required here because in the off chance
1335 * of a false negative due to a concurrent rename, we're going to
1336 * do the non-racy lookup, below.
1337 */
1338 if (nd->flags & LOOKUP_RCU) {
1339 unsigned seq;
1340 dentry = __d_lookup_rcu(parent, &nd->last, &seq);
1341 if (!dentry)
1342 goto unlazy;
1343
1344 /*
1345 * This sequence count validates that the inode matches
1346 * the dentry name information from lookup.
1347 */
1348 *inode = dentry->d_inode;
1349 if (read_seqcount_retry(&dentry->d_seq, seq))
1350 return -ECHILD;
1351
1352 /*
1353 * This sequence count validates that the parent had no
1354 * changes while we did the lookup of the dentry above.
1355 *
1356 * The memory barrier in read_seqcount_begin of child is
1357 * enough, we can use __read_seqcount_retry here.
1358 */
1359 if (__read_seqcount_retry(&parent->d_seq, nd->seq))
1360 return -ECHILD;
1361 nd->seq = seq;
1362
1363 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
1364 status = d_revalidate(dentry, nd->flags);
1365 if (unlikely(status <= 0)) {
1366 if (status != -ECHILD)
1367 need_reval = 0;
1368 goto unlazy;
1369 }
1370 }
1371 path->mnt = mnt;
1372 path->dentry = dentry;
1373 if (unlikely(!__follow_mount_rcu(nd, path, inode)))
1374 goto unlazy;
1375 if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
1376 goto unlazy;
1377 return 0;
1378 unlazy:
1379 if (unlazy_walk(nd, dentry))
1380 return -ECHILD;
1381 } else {
1382 dentry = __d_lookup(parent, &nd->last);
1383 }
1384
1385 if (unlikely(!dentry))
1386 goto need_lookup;
1387
1388 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval)
1389 status = d_revalidate(dentry, nd->flags);
1390 if (unlikely(status <= 0)) {
1391 if (status < 0) {
1392 dput(dentry);
1393 return status;
1394 }
1395 if (!d_invalidate(dentry)) {
1396 dput(dentry);
1397 goto need_lookup;
1398 }
1399 }
1400
1401 path->mnt = mnt;
1402 path->dentry = dentry;
1403 err = follow_managed(path, nd->flags);
1404 if (unlikely(err < 0)) {
1405 path_put_conditional(path, nd);
1406 return err;
1407 }
1408 if (err)
1409 nd->flags |= LOOKUP_JUMPED;
1410 *inode = path->dentry->d_inode;
1411 return 0;
1412
1413 need_lookup:
1414 return 1;
1415 }
1416
1417 /* Fast lookup failed, do it the slow way */
1418 static int lookup_slow(struct nameidata *nd, struct path *path)
1419 {
1420 struct dentry *dentry, *parent;
1421 int err;
1422
1423 parent = nd->path.dentry;
1424 BUG_ON(nd->inode != parent->d_inode);
1425
1426 mutex_lock(&parent->d_inode->i_mutex);
1427 dentry = __lookup_hash(&nd->last, parent, nd->flags);
1428 mutex_unlock(&parent->d_inode->i_mutex);
1429 if (IS_ERR(dentry))
1430 return PTR_ERR(dentry);
1431 path->mnt = nd->path.mnt;
1432 path->dentry = dentry;
1433 err = follow_managed(path, nd->flags);
1434 if (unlikely(err < 0)) {
1435 path_put_conditional(path, nd);
1436 return err;
1437 }
1438 if (err)
1439 nd->flags |= LOOKUP_JUMPED;
1440 return 0;
1441 }
1442
1443 static inline int may_lookup(struct nameidata *nd)
1444 {
1445 if (nd->flags & LOOKUP_RCU) {
1446 int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
1447 if (err != -ECHILD)
1448 return err;
1449 if (unlazy_walk(nd, NULL))
1450 return -ECHILD;
1451 }
1452 return inode_permission(nd->inode, MAY_EXEC);
1453 }
1454
1455 static inline int handle_dots(struct nameidata *nd, int type)
1456 {
1457 if (type == LAST_DOTDOT) {
1458 if (nd->flags & LOOKUP_RCU) {
1459 if (follow_dotdot_rcu(nd))
1460 return -ECHILD;
1461 } else
1462 follow_dotdot(nd);
1463 }
1464 return 0;
1465 }
1466
1467 static void terminate_walk(struct nameidata *nd)
1468 {
1469 if (!(nd->flags & LOOKUP_RCU)) {
1470 path_put(&nd->path);
1471 } else {
1472 nd->flags &= ~LOOKUP_RCU;
1473 if (!(nd->flags & LOOKUP_ROOT))
1474 nd->root.mnt = NULL;
1475 rcu_read_unlock();
1476 }
1477 }
1478
1479 /*
1480 * Do we need to follow links? We _really_ want to be able
1481 * to do this check without having to look at inode->i_op,
1482 * so we keep a cache of "no, this doesn't need follow_link"
1483 * for the common case.
1484 */
1485 static inline int should_follow_link(struct dentry *dentry, int follow)
1486 {
1487 return unlikely(d_is_symlink(dentry)) ? follow : 0;
1488 }
1489
1490 static inline int walk_component(struct nameidata *nd, struct path *path,
1491 int follow)
1492 {
1493 struct inode *inode;
1494 int err;
1495 /*
1496 * "." and ".." are special - ".." especially so because it has
1497 * to be able to know about the current root directory and
1498 * parent relationships.
1499 */
1500 if (unlikely(nd->last_type != LAST_NORM))
1501 return handle_dots(nd, nd->last_type);
1502 err = lookup_fast(nd, path, &inode);
1503 if (unlikely(err)) {
1504 if (err < 0)
1505 goto out_err;
1506
1507 err = lookup_slow(nd, path);
1508 if (err < 0)
1509 goto out_err;
1510
1511 inode = path->dentry->d_inode;
1512 }
1513 err = -ENOENT;
1514 if (!inode)
1515 goto out_path_put;
1516
1517 if (should_follow_link(path->dentry, follow)) {
1518 if (nd->flags & LOOKUP_RCU) {
1519 if (unlikely(unlazy_walk(nd, path->dentry))) {
1520 err = -ECHILD;
1521 goto out_err;
1522 }
1523 }
1524 BUG_ON(inode != path->dentry->d_inode);
1525 return 1;
1526 }
1527 path_to_nameidata(path, nd);
1528 nd->inode = inode;
1529 return 0;
1530
1531 out_path_put:
1532 path_to_nameidata(path, nd);
1533 out_err:
1534 terminate_walk(nd);
1535 return err;
1536 }
1537
1538 /*
1539 * This limits recursive symlink follows to 8, while
1540 * limiting consecutive symlinks to 40.
1541 *
1542 * Without that kind of total limit, nasty chains of consecutive
1543 * symlinks can cause almost arbitrarily long lookups.
1544 */
1545 static inline int nested_symlink(struct path *path, struct nameidata *nd)
1546 {
1547 int res;
1548
1549 if (unlikely(current->link_count >= MAX_NESTED_LINKS)) {
1550 path_put_conditional(path, nd);
1551 path_put(&nd->path);
1552 return -ELOOP;
1553 }
1554 BUG_ON(nd->depth >= MAX_NESTED_LINKS);
1555
1556 nd->depth++;
1557 current->link_count++;
1558
1559 do {
1560 struct path link = *path;
1561 void *cookie;
1562
1563 res = follow_link(&link, nd, &cookie);
1564 if (res)
1565 break;
1566 res = walk_component(nd, path, LOOKUP_FOLLOW);
1567 put_link(nd, &link, cookie);
1568 } while (res > 0);
1569
1570 current->link_count--;
1571 nd->depth--;
1572 return res;
1573 }
1574
1575 /*
1576 * We can do the critical dentry name comparison and hashing
1577 * operations one word at a time, but we are limited to:
1578 *
1579 * - Architectures with fast unaligned word accesses. We could
1580 * do a "get_unaligned()" if this helps and is sufficiently
1581 * fast.
1582 *
1583 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
1584 * do not trap on the (extremely unlikely) case of a page
1585 * crossing operation.
1586 *
1587 * - Furthermore, we need an efficient 64-bit compile for the
1588 * 64-bit case in order to generate the "number of bytes in
1589 * the final mask". Again, that could be replaced with a
1590 * efficient population count instruction or similar.
1591 */
1592 #ifdef CONFIG_DCACHE_WORD_ACCESS
1593
1594 #include <asm/word-at-a-time.h>
1595
1596 #ifdef CONFIG_64BIT
1597
1598 static inline unsigned int fold_hash(unsigned long hash)
1599 {
1600 hash += hash >> (8*sizeof(int));
1601 return hash;
1602 }
1603
1604 #else /* 32-bit case */
1605
1606 #define fold_hash(x) (x)
1607
1608 #endif
1609
1610 unsigned int full_name_hash(const unsigned char *name, unsigned int len)
1611 {
1612 unsigned long a, mask;
1613 unsigned long hash = 0;
1614
1615 for (;;) {
1616 a = load_unaligned_zeropad(name);
1617 if (len < sizeof(unsigned long))
1618 break;
1619 hash += a;
1620 hash *= 9;
1621 name += sizeof(unsigned long);
1622 len -= sizeof(unsigned long);
1623 if (!len)
1624 goto done;
1625 }
1626 mask = bytemask_from_count(len);
1627 hash += mask & a;
1628 done:
1629 return fold_hash(hash);
1630 }
1631 EXPORT_SYMBOL(full_name_hash);
1632
1633 /*
1634 * Calculate the length and hash of the path component, and
1635 * return the length of the component;
1636 */
1637 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
1638 {
1639 unsigned long a, b, adata, bdata, mask, hash, len;
1640 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
1641
1642 hash = a = 0;
1643 len = -sizeof(unsigned long);
1644 do {
1645 hash = (hash + a) * 9;
1646 len += sizeof(unsigned long);
1647 a = load_unaligned_zeropad(name+len);
1648 b = a ^ REPEAT_BYTE('/');
1649 } while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)));
1650
1651 adata = prep_zero_mask(a, adata, &constants);
1652 bdata = prep_zero_mask(b, bdata, &constants);
1653
1654 mask = create_zero_mask(adata | bdata);
1655
1656 hash += a & zero_bytemask(mask);
1657 *hashp = fold_hash(hash);
1658
1659 return len + find_zero(mask);
1660 }
1661
1662 #else
1663
1664 unsigned int full_name_hash(const unsigned char *name, unsigned int len)
1665 {
1666 unsigned long hash = init_name_hash();
1667 while (len--)
1668 hash = partial_name_hash(*name++, hash);
1669 return end_name_hash(hash);
1670 }
1671 EXPORT_SYMBOL(full_name_hash);
1672
1673 /*
1674 * We know there's a real path component here of at least
1675 * one character.
1676 */
1677 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
1678 {
1679 unsigned long hash = init_name_hash();
1680 unsigned long len = 0, c;
1681
1682 c = (unsigned char)*name;
1683 do {
1684 len++;
1685 hash = partial_name_hash(c, hash);
1686 c = (unsigned char)name[len];
1687 } while (c && c != '/');
1688 *hashp = end_name_hash(hash);
1689 return len;
1690 }
1691
1692 #endif
1693
1694 /*
1695 * Name resolution.
1696 * This is the basic name resolution function, turning a pathname into
1697 * the final dentry. We expect 'base' to be positive and a directory.
1698 *
1699 * Returns 0 and nd will have valid dentry and mnt on success.
1700 * Returns error and drops reference to input namei data on failure.
1701 */
1702 static int link_path_walk(const char *name, struct nameidata *nd)
1703 {
1704 struct path next;
1705 int err;
1706
1707 while (*name=='/')
1708 name++;
1709 if (!*name)
1710 return 0;
1711
1712 /* At this point we know we have a real path component. */
1713 for(;;) {
1714 struct qstr this;
1715 long len;
1716 int type;
1717
1718 err = may_lookup(nd);
1719 if (err)
1720 break;
1721
1722 len = hash_name(name, &this.hash);
1723 this.name = name;
1724 this.len = len;
1725
1726 type = LAST_NORM;
1727 if (name[0] == '.') switch (len) {
1728 case 2:
1729 if (name[1] == '.') {
1730 type = LAST_DOTDOT;
1731 nd->flags |= LOOKUP_JUMPED;
1732 }
1733 break;
1734 case 1:
1735 type = LAST_DOT;
1736 }
1737 if (likely(type == LAST_NORM)) {
1738 struct dentry *parent = nd->path.dentry;
1739 nd->flags &= ~LOOKUP_JUMPED;
1740 if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
1741 err = parent->d_op->d_hash(parent, &this);
1742 if (err < 0)
1743 break;
1744 }
1745 }
1746
1747 nd->last = this;
1748 nd->last_type = type;
1749
1750 if (!name[len])
1751 return 0;
1752 /*
1753 * If it wasn't NUL, we know it was '/'. Skip that
1754 * slash, and continue until no more slashes.
1755 */
1756 do {
1757 len++;
1758 } while (unlikely(name[len] == '/'));
1759 if (!name[len])
1760 return 0;
1761
1762 name += len;
1763
1764 err = walk_component(nd, &next, LOOKUP_FOLLOW);
1765 if (err < 0)
1766 return err;
1767
1768 if (err) {
1769 err = nested_symlink(&next, nd);
1770 if (err)
1771 return err;
1772 }
1773 if (!d_is_directory(nd->path.dentry)) {
1774 err = -ENOTDIR;
1775 break;
1776 }
1777 }
1778 terminate_walk(nd);
1779 return err;
1780 }
1781
1782 static int path_init(int dfd, const char *name, unsigned int flags,
1783 struct nameidata *nd, struct file **fp)
1784 {
1785 int retval = 0;
1786
1787 nd->last_type = LAST_ROOT; /* if there are only slashes... */
1788 nd->flags = flags | LOOKUP_JUMPED;
1789 nd->depth = 0;
1790 if (flags & LOOKUP_ROOT) {
1791 struct dentry *root = nd->root.dentry;
1792 struct inode *inode = root->d_inode;
1793 if (*name) {
1794 if (!d_is_directory(root))
1795 return -ENOTDIR;
1796 retval = inode_permission(inode, MAY_EXEC);
1797 if (retval)
1798 return retval;
1799 }
1800 nd->path = nd->root;
1801 nd->inode = inode;
1802 if (flags & LOOKUP_RCU) {
1803 rcu_read_lock();
1804 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1805 nd->m_seq = read_seqbegin(&mount_lock);
1806 } else {
1807 path_get(&nd->path);
1808 }
1809 return 0;
1810 }
1811
1812 nd->root.mnt = NULL;
1813
1814 nd->m_seq = read_seqbegin(&mount_lock);
1815 if (*name=='/') {
1816 if (flags & LOOKUP_RCU) {
1817 rcu_read_lock();
1818 set_root_rcu(nd);
1819 } else {
1820 set_root(nd);
1821 path_get(&nd->root);
1822 }
1823 nd->path = nd->root;
1824 } else if (dfd == AT_FDCWD) {
1825 if (flags & LOOKUP_RCU) {
1826 struct fs_struct *fs = current->fs;
1827 unsigned seq;
1828
1829 rcu_read_lock();
1830
1831 do {
1832 seq = read_seqcount_begin(&fs->seq);
1833 nd->path = fs->pwd;
1834 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1835 } while (read_seqcount_retry(&fs->seq, seq));
1836 } else {
1837 get_fs_pwd(current->fs, &nd->path);
1838 }
1839 } else {
1840 /* Caller must check execute permissions on the starting path component */
1841 struct fd f = fdget_raw(dfd);
1842 struct dentry *dentry;
1843
1844 if (!f.file)
1845 return -EBADF;
1846
1847 dentry = f.file->f_path.dentry;
1848
1849 if (*name) {
1850 if (!d_is_directory(dentry)) {
1851 fdput(f);
1852 return -ENOTDIR;
1853 }
1854 }
1855
1856 nd->path = f.file->f_path;
1857 if (flags & LOOKUP_RCU) {
1858 if (f.need_put)
1859 *fp = f.file;
1860 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1861 rcu_read_lock();
1862 } else {
1863 path_get(&nd->path);
1864 fdput(f);
1865 }
1866 }
1867
1868 nd->inode = nd->path.dentry->d_inode;
1869 return 0;
1870 }
1871
1872 static inline int lookup_last(struct nameidata *nd, struct path *path)
1873 {
1874 if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len])
1875 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
1876
1877 nd->flags &= ~LOOKUP_PARENT;
1878 return walk_component(nd, path, nd->flags & LOOKUP_FOLLOW);
1879 }
1880
1881 /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
1882 static int path_lookupat(int dfd, const char *name,
1883 unsigned int flags, struct nameidata *nd)
1884 {
1885 struct file *base = NULL;
1886 struct path path;
1887 int err;
1888
1889 /*
1890 * Path walking is largely split up into 2 different synchronisation
1891 * schemes, rcu-walk and ref-walk (explained in
1892 * Documentation/filesystems/path-lookup.txt). These share much of the
1893 * path walk code, but some things particularly setup, cleanup, and
1894 * following mounts are sufficiently divergent that functions are
1895 * duplicated. Typically there is a function foo(), and its RCU
1896 * analogue, foo_rcu().
1897 *
1898 * -ECHILD is the error number of choice (just to avoid clashes) that
1899 * is returned if some aspect of an rcu-walk fails. Such an error must
1900 * be handled by restarting a traditional ref-walk (which will always
1901 * be able to complete).
1902 */
1903 err = path_init(dfd, name, flags | LOOKUP_PARENT, nd, &base);
1904
1905 if (unlikely(err))
1906 return err;
1907
1908 current->total_link_count = 0;
1909 err = link_path_walk(name, nd);
1910
1911 if (!err && !(flags & LOOKUP_PARENT)) {
1912 err = lookup_last(nd, &path);
1913 while (err > 0) {
1914 void *cookie;
1915 struct path link = path;
1916 err = may_follow_link(&link, nd);
1917 if (unlikely(err))
1918 break;
1919 nd->flags |= LOOKUP_PARENT;
1920 err = follow_link(&link, nd, &cookie);
1921 if (err)
1922 break;
1923 err = lookup_last(nd, &path);
1924 put_link(nd, &link, cookie);
1925 }
1926 }
1927
1928 if (!err)
1929 err = complete_walk(nd);
1930
1931 if (!err && nd->flags & LOOKUP_DIRECTORY) {
1932 if (!d_is_directory(nd->path.dentry)) {
1933 path_put(&nd->path);
1934 err = -ENOTDIR;
1935 }
1936 }
1937
1938 if (base)
1939 fput(base);
1940
1941 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
1942 path_put(&nd->root);
1943 nd->root.mnt = NULL;
1944 }
1945 return err;
1946 }
1947
1948 static int filename_lookup(int dfd, struct filename *name,
1949 unsigned int flags, struct nameidata *nd)
1950 {
1951 int retval = path_lookupat(dfd, name->name, flags | LOOKUP_RCU, nd);
1952 if (unlikely(retval == -ECHILD))
1953 retval = path_lookupat(dfd, name->name, flags, nd);
1954 if (unlikely(retval == -ESTALE))
1955 retval = path_lookupat(dfd, name->name,
1956 flags | LOOKUP_REVAL, nd);
1957
1958 if (likely(!retval))
1959 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
1960 return retval;
1961 }
1962
1963 static int do_path_lookup(int dfd, const char *name,
1964 unsigned int flags, struct nameidata *nd)
1965 {
1966 struct filename filename = { .name = name };
1967
1968 return filename_lookup(dfd, &filename, flags, nd);
1969 }
1970
1971 /* does lookup, returns the object with parent locked */
1972 struct dentry *kern_path_locked(const char *name, struct path *path)
1973 {
1974 struct nameidata nd;
1975 struct dentry *d;
1976 int err = do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, &nd);
1977 if (err)
1978 return ERR_PTR(err);
1979 if (nd.last_type != LAST_NORM) {
1980 path_put(&nd.path);
1981 return ERR_PTR(-EINVAL);
1982 }
1983 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
1984 d = __lookup_hash(&nd.last, nd.path.dentry, 0);
1985 if (IS_ERR(d)) {
1986 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
1987 path_put(&nd.path);
1988 return d;
1989 }
1990 *path = nd.path;
1991 return d;
1992 }
1993
1994 int kern_path(const char *name, unsigned int flags, struct path *path)
1995 {
1996 struct nameidata nd;
1997 int res = do_path_lookup(AT_FDCWD, name, flags, &nd);
1998 if (!res)
1999 *path = nd.path;
2000 return res;
2001 }
2002
2003 /**
2004 * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
2005 * @dentry: pointer to dentry of the base directory
2006 * @mnt: pointer to vfs mount of the base directory
2007 * @name: pointer to file name
2008 * @flags: lookup flags
2009 * @path: pointer to struct path to fill
2010 */
2011 int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
2012 const char *name, unsigned int flags,
2013 struct path *path)
2014 {
2015 struct nameidata nd;
2016 int err;
2017 nd.root.dentry = dentry;
2018 nd.root.mnt = mnt;
2019 BUG_ON(flags & LOOKUP_PARENT);
2020 /* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */
2021 err = do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, &nd);
2022 if (!err)
2023 *path = nd.path;
2024 return err;
2025 }
2026
2027 /*
2028 * Restricted form of lookup. Doesn't follow links, single-component only,
2029 * needs parent already locked. Doesn't follow mounts.
2030 * SMP-safe.
2031 */
2032 static struct dentry *lookup_hash(struct nameidata *nd)
2033 {
2034 return __lookup_hash(&nd->last, nd->path.dentry, nd->flags);
2035 }
2036
2037 /**
2038 * lookup_one_len - filesystem helper to lookup single pathname component
2039 * @name: pathname component to lookup
2040 * @base: base directory to lookup from
2041 * @len: maximum length @len should be interpreted to
2042 *
2043 * Note that this routine is purely a helper for filesystem usage and should
2044 * not be called by generic code. Also note that by using this function the
2045 * nameidata argument is passed to the filesystem methods and a filesystem
2046 * using this helper needs to be prepared for that.
2047 */
2048 struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
2049 {
2050 struct qstr this;
2051 unsigned int c;
2052 int err;
2053
2054 WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex));
2055
2056 this.name = name;
2057 this.len = len;
2058 this.hash = full_name_hash(name, len);
2059 if (!len)
2060 return ERR_PTR(-EACCES);
2061
2062 if (unlikely(name[0] == '.')) {
2063 if (len < 2 || (len == 2 && name[1] == '.'))
2064 return ERR_PTR(-EACCES);
2065 }
2066
2067 while (len--) {
2068 c = *(const unsigned char *)name++;
2069 if (c == '/' || c == '\0')
2070 return ERR_PTR(-EACCES);
2071 }
2072 /*
2073 * See if the low-level filesystem might want
2074 * to use its own hash..
2075 */
2076 if (base->d_flags & DCACHE_OP_HASH) {
2077 int err = base->d_op->d_hash(base, &this);
2078 if (err < 0)
2079 return ERR_PTR(err);
2080 }
2081
2082 err = inode_permission(base->d_inode, MAY_EXEC);
2083 if (err)
2084 return ERR_PTR(err);
2085
2086 return __lookup_hash(&this, base, 0);
2087 }
2088
2089 int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
2090 struct path *path, int *empty)
2091 {
2092 struct nameidata nd;
2093 struct filename *tmp = getname_flags(name, flags, empty);
2094 int err = PTR_ERR(tmp);
2095 if (!IS_ERR(tmp)) {
2096
2097 BUG_ON(flags & LOOKUP_PARENT);
2098
2099 err = filename_lookup(dfd, tmp, flags, &nd);
2100 putname(tmp);
2101 if (!err)
2102 *path = nd.path;
2103 }
2104 return err;
2105 }
2106
2107 int user_path_at(int dfd, const char __user *name, unsigned flags,
2108 struct path *path)
2109 {
2110 return user_path_at_empty(dfd, name, flags, path, NULL);
2111 }
2112
2113 /*
2114 * NB: most callers don't do anything directly with the reference to the
2115 * to struct filename, but the nd->last pointer points into the name string
2116 * allocated by getname. So we must hold the reference to it until all
2117 * path-walking is complete.
2118 */
2119 static struct filename *
2120 user_path_parent(int dfd, const char __user *path, struct nameidata *nd,
2121 unsigned int flags)
2122 {
2123 struct filename *s = getname(path);
2124 int error;
2125
2126 /* only LOOKUP_REVAL is allowed in extra flags */
2127 flags &= LOOKUP_REVAL;
2128
2129 if (IS_ERR(s))
2130 return s;
2131
2132 error = filename_lookup(dfd, s, flags | LOOKUP_PARENT, nd);
2133 if (error) {
2134 putname(s);
2135 return ERR_PTR(error);
2136 }
2137
2138 return s;
2139 }
2140
2141 /**
2142 * mountpoint_last - look up last component for umount
2143 * @nd: pathwalk nameidata - currently pointing at parent directory of "last"
2144 * @path: pointer to container for result
2145 *
2146 * This is a special lookup_last function just for umount. In this case, we
2147 * need to resolve the path without doing any revalidation.
2148 *
2149 * The nameidata should be the result of doing a LOOKUP_PARENT pathwalk. Since
2150 * mountpoints are always pinned in the dcache, their ancestors are too. Thus,
2151 * in almost all cases, this lookup will be served out of the dcache. The only
2152 * cases where it won't are if nd->last refers to a symlink or the path is
2153 * bogus and it doesn't exist.
2154 *
2155 * Returns:
2156 * -error: if there was an error during lookup. This includes -ENOENT if the
2157 * lookup found a negative dentry. The nd->path reference will also be
2158 * put in this case.
2159 *
2160 * 0: if we successfully resolved nd->path and found it to not to be a
2161 * symlink that needs to be followed. "path" will also be populated.
2162 * The nd->path reference will also be put.
2163 *
2164 * 1: if we successfully resolved nd->last and found it to be a symlink
2165 * that needs to be followed. "path" will be populated with the path
2166 * to the link, and nd->path will *not* be put.
2167 */
2168 static int
2169 mountpoint_last(struct nameidata *nd, struct path *path)
2170 {
2171 int error = 0;
2172 struct dentry *dentry;
2173 struct dentry *dir = nd->path.dentry;
2174
2175 /* If we're in rcuwalk, drop out of it to handle last component */
2176 if (nd->flags & LOOKUP_RCU) {
2177 if (unlazy_walk(nd, NULL)) {
2178 error = -ECHILD;
2179 goto out;
2180 }
2181 }
2182
2183 nd->flags &= ~LOOKUP_PARENT;
2184
2185 if (unlikely(nd->last_type != LAST_NORM)) {
2186 error = handle_dots(nd, nd->last_type);
2187 if (error)
2188 goto out;
2189 dentry = dget(nd->path.dentry);
2190 goto done;
2191 }
2192
2193 mutex_lock(&dir->d_inode->i_mutex);
2194 dentry = d_lookup(dir, &nd->last);
2195 if (!dentry) {
2196 /*
2197 * No cached dentry. Mounted dentries are pinned in the cache,
2198 * so that means that this dentry is probably a symlink or the
2199 * path doesn't actually point to a mounted dentry.
2200 */
2201 dentry = d_alloc(dir, &nd->last);
2202 if (!dentry) {
2203 error = -ENOMEM;
2204 mutex_unlock(&dir->d_inode->i_mutex);
2205 goto out;
2206 }
2207 dentry = lookup_real(dir->d_inode, dentry, nd->flags);
2208 error = PTR_ERR(dentry);
2209 if (IS_ERR(dentry)) {
2210 mutex_unlock(&dir->d_inode->i_mutex);
2211 goto out;
2212 }
2213 }
2214 mutex_unlock(&dir->d_inode->i_mutex);
2215
2216 done:
2217 if (!dentry->d_inode) {
2218 error = -ENOENT;
2219 dput(dentry);
2220 goto out;
2221 }
2222 path->dentry = dentry;
2223 path->mnt = mntget(nd->path.mnt);
2224 if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
2225 return 1;
2226 follow_mount(path);
2227 error = 0;
2228 out:
2229 terminate_walk(nd);
2230 return error;
2231 }
2232
2233 /**
2234 * path_mountpoint - look up a path to be umounted
2235 * @dfd: directory file descriptor to start walk from
2236 * @name: full pathname to walk
2237 * @path: pointer to container for result
2238 * @flags: lookup flags
2239 *
2240 * Look up the given name, but don't attempt to revalidate the last component.
2241 * Returns 0 and "path" will be valid on success; Returns error otherwise.
2242 */
2243 static int
2244 path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags)
2245 {
2246 struct file *base = NULL;
2247 struct nameidata nd;
2248 int err;
2249
2250 err = path_init(dfd, name, flags | LOOKUP_PARENT, &nd, &base);
2251 if (unlikely(err))
2252 return err;
2253
2254 current->total_link_count = 0;
2255 err = link_path_walk(name, &nd);
2256 if (err)
2257 goto out;
2258
2259 err = mountpoint_last(&nd, path);
2260 while (err > 0) {
2261 void *cookie;
2262 struct path link = *path;
2263 err = may_follow_link(&link, &nd);
2264 if (unlikely(err))
2265 break;
2266 nd.flags |= LOOKUP_PARENT;
2267 err = follow_link(&link, &nd, &cookie);
2268 if (err)
2269 break;
2270 err = mountpoint_last(&nd, path);
2271 put_link(&nd, &link, cookie);
2272 }
2273 out:
2274 if (base)
2275 fput(base);
2276
2277 if (nd.root.mnt && !(nd.flags & LOOKUP_ROOT))
2278 path_put(&nd.root);
2279
2280 return err;
2281 }
2282
2283 static int
2284 filename_mountpoint(int dfd, struct filename *s, struct path *path,
2285 unsigned int flags)
2286 {
2287 int error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_RCU);
2288 if (unlikely(error == -ECHILD))
2289 error = path_mountpoint(dfd, s->name, path, flags);
2290 if (unlikely(error == -ESTALE))
2291 error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_REVAL);
2292 if (likely(!error))
2293 audit_inode(s, path->dentry, 0);
2294 return error;
2295 }
2296
2297 /**
2298 * user_path_mountpoint_at - lookup a path from userland in order to umount it
2299 * @dfd: directory file descriptor
2300 * @name: pathname from userland
2301 * @flags: lookup flags
2302 * @path: pointer to container to hold result
2303 *
2304 * A umount is a special case for path walking. We're not actually interested
2305 * in the inode in this situation, and ESTALE errors can be a problem. We
2306 * simply want track down the dentry and vfsmount attached at the mountpoint
2307 * and avoid revalidating the last component.
2308 *
2309 * Returns 0 and populates "path" on success.
2310 */
2311 int
2312 user_path_mountpoint_at(int dfd, const char __user *name, unsigned int flags,
2313 struct path *path)
2314 {
2315 struct filename *s = getname(name);
2316 int error;
2317 if (IS_ERR(s))
2318 return PTR_ERR(s);
2319 error = filename_mountpoint(dfd, s, path, flags);
2320 putname(s);
2321 return error;
2322 }
2323
2324 int
2325 kern_path_mountpoint(int dfd, const char *name, struct path *path,
2326 unsigned int flags)
2327 {
2328 struct filename s = {.name = name};
2329 return filename_mountpoint(dfd, &s, path, flags);
2330 }
2331 EXPORT_SYMBOL(kern_path_mountpoint);
2332
2333 /*
2334 * It's inline, so penalty for filesystems that don't use sticky bit is
2335 * minimal.
2336 */
2337 static inline int check_sticky(struct inode *dir, struct inode *inode)
2338 {
2339 kuid_t fsuid = current_fsuid();
2340
2341 if (!(dir->i_mode & S_ISVTX))
2342 return 0;
2343 if (uid_eq(inode->i_uid, fsuid))
2344 return 0;
2345 if (uid_eq(dir->i_uid, fsuid))
2346 return 0;
2347 return !inode_capable(inode, CAP_FOWNER);
2348 }
2349
2350 /*
2351 * Check whether we can remove a link victim from directory dir, check
2352 * whether the type of victim is right.
2353 * 1. We can't do it if dir is read-only (done in permission())
2354 * 2. We should have write and exec permissions on dir
2355 * 3. We can't remove anything from append-only dir
2356 * 4. We can't do anything with immutable dir (done in permission())
2357 * 5. If the sticky bit on dir is set we should either
2358 * a. be owner of dir, or
2359 * b. be owner of victim, or
2360 * c. have CAP_FOWNER capability
2361 * 6. If the victim is append-only or immutable we can't do antyhing with
2362 * links pointing to it.
2363 * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
2364 * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
2365 * 9. We can't remove a root or mountpoint.
2366 * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
2367 * nfs_async_unlink().
2368 */
2369 static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
2370 {
2371 struct inode *inode = victim->d_inode;
2372 int error;
2373
2374 if (d_is_negative(victim))
2375 return -ENOENT;
2376 BUG_ON(!inode);
2377
2378 BUG_ON(victim->d_parent->d_inode != dir);
2379 audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
2380
2381 error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
2382 if (error)
2383 return error;
2384 if (IS_APPEND(dir))
2385 return -EPERM;
2386
2387 if (check_sticky(dir, inode) || IS_APPEND(inode) ||
2388 IS_IMMUTABLE(inode) || IS_SWAPFILE(inode))
2389 return -EPERM;
2390 if (isdir) {
2391 if (!d_is_directory(victim) && !d_is_autodir(victim))
2392 return -ENOTDIR;
2393 if (IS_ROOT(victim))
2394 return -EBUSY;
2395 } else if (d_is_directory(victim) || d_is_autodir(victim))
2396 return -EISDIR;
2397 if (IS_DEADDIR(dir))
2398 return -ENOENT;
2399 if (victim->d_flags & DCACHE_NFSFS_RENAMED)
2400 return -EBUSY;
2401 return 0;
2402 }
2403
2404 /* Check whether we can create an object with dentry child in directory
2405 * dir.
2406 * 1. We can't do it if child already exists (open has special treatment for
2407 * this case, but since we are inlined it's OK)
2408 * 2. We can't do it if dir is read-only (done in permission())
2409 * 3. We should have write and exec permissions on dir
2410 * 4. We can't do it if dir is immutable (done in permission())
2411 */
2412 static inline int may_create(struct inode *dir, struct dentry *child)
2413 {
2414 audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
2415 if (child->d_inode)
2416 return -EEXIST;
2417 if (IS_DEADDIR(dir))
2418 return -ENOENT;
2419 return inode_permission(dir, MAY_WRITE | MAY_EXEC);
2420 }
2421
2422 /*
2423 * p1 and p2 should be directories on the same fs.
2424 */
2425 struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
2426 {
2427 struct dentry *p;
2428
2429 if (p1 == p2) {
2430 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
2431 return NULL;
2432 }
2433
2434 mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
2435
2436 p = d_ancestor(p2, p1);
2437 if (p) {
2438 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT);
2439 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD);
2440 return p;
2441 }
2442
2443 p = d_ancestor(p1, p2);
2444 if (p) {
2445 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
2446 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
2447 return p;
2448 }
2449
2450 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
2451 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
2452 return NULL;
2453 }
2454
2455 void unlock_rename(struct dentry *p1, struct dentry *p2)
2456 {
2457 mutex_unlock(&p1->d_inode->i_mutex);
2458 if (p1 != p2) {
2459 mutex_unlock(&p2->d_inode->i_mutex);
2460 mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
2461 }
2462 }
2463
2464 int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2465 bool want_excl)
2466 {
2467 int error = may_create(dir, dentry);
2468 if (error)
2469 return error;
2470
2471 if (!dir->i_op->create)
2472 return -EACCES; /* shouldn't it be ENOSYS? */
2473 mode &= S_IALLUGO;
2474 mode |= S_IFREG;
2475 error = security_inode_create(dir, dentry, mode);
2476 if (error)
2477 return error;
2478 error = dir->i_op->create(dir, dentry, mode, want_excl);
2479 if (!error)
2480 fsnotify_create(dir, dentry);
2481 return error;
2482 }
2483
2484 static int may_open(struct path *path, int acc_mode, int flag)
2485 {
2486 struct dentry *dentry = path->dentry;
2487 struct inode *inode = dentry->d_inode;
2488 int error;
2489
2490 /* O_PATH? */
2491 if (!acc_mode)
2492 return 0;
2493
2494 if (!inode)
2495 return -ENOENT;
2496
2497 switch (inode->i_mode & S_IFMT) {
2498 case S_IFLNK:
2499 return -ELOOP;
2500 case S_IFDIR:
2501 if (acc_mode & MAY_WRITE)
2502 return -EISDIR;
2503 break;
2504 case S_IFBLK:
2505 case S_IFCHR:
2506 if (path->mnt->mnt_flags & MNT_NODEV)
2507 return -EACCES;
2508 /*FALLTHRU*/
2509 case S_IFIFO:
2510 case S_IFSOCK:
2511 flag &= ~O_TRUNC;
2512 break;
2513 }
2514
2515 error = inode_permission(inode, acc_mode);
2516 if (error)
2517 return error;
2518
2519 /*
2520 * An append-only file must be opened in append mode for writing.
2521 */
2522 if (IS_APPEND(inode)) {
2523 if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
2524 return -EPERM;
2525 if (flag & O_TRUNC)
2526 return -EPERM;
2527 }
2528
2529 /* O_NOATIME can only be set by the owner or superuser */
2530 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
2531 return -EPERM;
2532
2533 return 0;
2534 }
2535
2536 static int handle_truncate(struct file *filp)
2537 {
2538 struct path *path = &filp->f_path;
2539 struct inode *inode = path->dentry->d_inode;
2540 int error = get_write_access(inode);
2541 if (error)
2542 return error;
2543 /*
2544 * Refuse to truncate files with mandatory locks held on them.
2545 */
2546 error = locks_verify_locked(inode);
2547 if (!error)
2548 error = security_path_truncate(path);
2549 if (!error) {
2550 error = do_truncate(path->dentry, 0,
2551 ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
2552 filp);
2553 }
2554 put_write_access(inode);
2555 return error;
2556 }
2557
2558 static inline int open_to_namei_flags(int flag)
2559 {
2560 if ((flag & O_ACCMODE) == 3)
2561 flag--;
2562 return flag;
2563 }
2564
2565 static int may_o_create(struct path *dir, struct dentry *dentry, umode_t mode)
2566 {
2567 int error = security_path_mknod(dir, dentry, mode, 0);
2568 if (error)
2569 return error;
2570
2571 error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
2572 if (error)
2573 return error;
2574
2575 return security_inode_create(dir->dentry->d_inode, dentry, mode);
2576 }
2577
2578 /*
2579 * Attempt to atomically look up, create and open a file from a negative
2580 * dentry.
2581 *
2582 * Returns 0 if successful. The file will have been created and attached to
2583 * @file by the filesystem calling finish_open().
2584 *
2585 * Returns 1 if the file was looked up only or didn't need creating. The
2586 * caller will need to perform the open themselves. @path will have been
2587 * updated to point to the new dentry. This may be negative.
2588 *
2589 * Returns an error code otherwise.
2590 */
2591 static int atomic_open(struct nameidata *nd, struct dentry *dentry,
2592 struct path *path, struct file *file,
2593 const struct open_flags *op,
2594 bool got_write, bool need_lookup,
2595 int *opened)
2596 {
2597 struct inode *dir = nd->path.dentry->d_inode;
2598 unsigned open_flag = open_to_namei_flags(op->open_flag);
2599 umode_t mode;
2600 int error;
2601 int acc_mode;
2602 int create_error = 0;
2603 struct dentry *const DENTRY_NOT_SET = (void *) -1UL;
2604 bool excl;
2605
2606 BUG_ON(dentry->d_inode);
2607
2608 /* Don't create child dentry for a dead directory. */
2609 if (unlikely(IS_DEADDIR(dir))) {
2610 error = -ENOENT;
2611 goto out;
2612 }
2613
2614 mode = op->mode;
2615 if ((open_flag & O_CREAT) && !IS_POSIXACL(dir))
2616 mode &= ~current_umask();
2617
2618 excl = (open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT);
2619 if (excl)
2620 open_flag &= ~O_TRUNC;
2621
2622 /*
2623 * Checking write permission is tricky, bacuse we don't know if we are
2624 * going to actually need it: O_CREAT opens should work as long as the
2625 * file exists. But checking existence breaks atomicity. The trick is
2626 * to check access and if not granted clear O_CREAT from the flags.
2627 *
2628 * Another problem is returing the "right" error value (e.g. for an
2629 * O_EXCL open we want to return EEXIST not EROFS).
2630 */
2631 if (((open_flag & (O_CREAT | O_TRUNC)) ||
2632 (open_flag & O_ACCMODE) != O_RDONLY) && unlikely(!got_write)) {
2633 if (!(open_flag & O_CREAT)) {
2634 /*
2635 * No O_CREATE -> atomicity not a requirement -> fall
2636 * back to lookup + open
2637 */
2638 goto no_open;
2639 } else if (open_flag & (O_EXCL | O_TRUNC)) {
2640 /* Fall back and fail with the right error */
2641 create_error = -EROFS;
2642 goto no_open;
2643 } else {
2644 /* No side effects, safe to clear O_CREAT */
2645 create_error = -EROFS;
2646 open_flag &= ~O_CREAT;
2647 }
2648 }
2649
2650 if (open_flag & O_CREAT) {
2651 error = may_o_create(&nd->path, dentry, mode);
2652 if (error) {
2653 create_error = error;
2654 if (open_flag & O_EXCL)
2655 goto no_open;
2656 open_flag &= ~O_CREAT;
2657 }
2658 }
2659
2660 if (nd->flags & LOOKUP_DIRECTORY)
2661 open_flag |= O_DIRECTORY;
2662
2663 file->f_path.dentry = DENTRY_NOT_SET;
2664 file->f_path.mnt = nd->path.mnt;
2665 error = dir->i_op->atomic_open(dir, dentry, file, open_flag, mode,
2666 opened);
2667 if (error < 0) {
2668 if (create_error && error == -ENOENT)
2669 error = create_error;
2670 goto out;
2671 }
2672
2673 if (error) { /* returned 1, that is */
2674 if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
2675 error = -EIO;
2676 goto out;
2677 }
2678 if (file->f_path.dentry) {
2679 dput(dentry);
2680 dentry = file->f_path.dentry;
2681 }
2682 if (*opened & FILE_CREATED)
2683 fsnotify_create(dir, dentry);
2684 if (!dentry->d_inode) {
2685 WARN_ON(*opened & FILE_CREATED);
2686 if (create_error) {
2687 error = create_error;
2688 goto out;
2689 }
2690 } else {
2691 if (excl && !(*opened & FILE_CREATED)) {
2692 error = -EEXIST;
2693 goto out;
2694 }
2695 }
2696 goto looked_up;
2697 }
2698
2699 /*
2700 * We didn't have the inode before the open, so check open permission
2701 * here.
2702 */
2703 acc_mode = op->acc_mode;
2704 if (*opened & FILE_CREATED) {
2705 WARN_ON(!(open_flag & O_CREAT));
2706 fsnotify_create(dir, dentry);
2707 acc_mode = MAY_OPEN;
2708 }
2709 error = may_open(&file->f_path, acc_mode, open_flag);
2710 if (error)
2711 fput(file);
2712
2713 out:
2714 dput(dentry);
2715 return error;
2716
2717 no_open:
2718 if (need_lookup) {
2719 dentry = lookup_real(dir, dentry, nd->flags);
2720 if (IS_ERR(dentry))
2721 return PTR_ERR(dentry);
2722
2723 if (create_error) {
2724 int open_flag = op->open_flag;
2725
2726 error = create_error;
2727 if ((open_flag & O_EXCL)) {
2728 if (!dentry->d_inode)
2729 goto out;
2730 } else if (!dentry->d_inode) {
2731 goto out;
2732 } else if ((open_flag & O_TRUNC) &&
2733 S_ISREG(dentry->d_inode->i_mode)) {
2734 goto out;
2735 }
2736 /* will fail later, go on to get the right error */
2737 }
2738 }
2739 looked_up:
2740 path->dentry = dentry;
2741 path->mnt = nd->path.mnt;
2742 return 1;
2743 }
2744
2745 /*
2746 * Look up and maybe create and open the last component.
2747 *
2748 * Must be called with i_mutex held on parent.
2749 *
2750 * Returns 0 if the file was successfully atomically created (if necessary) and
2751 * opened. In this case the file will be returned attached to @file.
2752 *
2753 * Returns 1 if the file was not completely opened at this time, though lookups
2754 * and creations will have been performed and the dentry returned in @path will
2755 * be positive upon return if O_CREAT was specified. If O_CREAT wasn't
2756 * specified then a negative dentry may be returned.
2757 *
2758 * An error code is returned otherwise.
2759 *
2760 * FILE_CREATE will be set in @*opened if the dentry was created and will be
2761 * cleared otherwise prior to returning.
2762 */
2763 static int lookup_open(struct nameidata *nd, struct path *path,
2764 struct file *file,
2765 const struct open_flags *op,
2766 bool got_write, int *opened)
2767 {
2768 struct dentry *dir = nd->path.dentry;
2769 struct inode *dir_inode = dir->d_inode;
2770 struct dentry *dentry;
2771 int error;
2772 bool need_lookup;
2773
2774 *opened &= ~FILE_CREATED;
2775 dentry = lookup_dcache(&nd->last, dir, nd->flags, &need_lookup);
2776 if (IS_ERR(dentry))
2777 return PTR_ERR(dentry);
2778
2779 /* Cached positive dentry: will open in f_op->open */
2780 if (!need_lookup && dentry->d_inode)
2781 goto out_no_open;
2782
2783 if ((nd->flags & LOOKUP_OPEN) && dir_inode->i_op->atomic_open) {
2784 return atomic_open(nd, dentry, path, file, op, got_write,
2785 need_lookup, opened);
2786 }
2787
2788 if (need_lookup) {
2789 BUG_ON(dentry->d_inode);
2790
2791 dentry = lookup_real(dir_inode, dentry, nd->flags);
2792 if (IS_ERR(dentry))
2793 return PTR_ERR(dentry);
2794 }
2795
2796 /* Negative dentry, just create the file */
2797 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
2798 umode_t mode = op->mode;
2799 if (!IS_POSIXACL(dir->d_inode))
2800 mode &= ~current_umask();
2801 /*
2802 * This write is needed to ensure that a
2803 * rw->ro transition does not occur between
2804 * the time when the file is created and when
2805 * a permanent write count is taken through
2806 * the 'struct file' in finish_open().
2807 */
2808 if (!got_write) {
2809 error = -EROFS;
2810 goto out_dput;
2811 }
2812 *opened |= FILE_CREATED;
2813 error = security_path_mknod(&nd->path, dentry, mode, 0);
2814 if (error)
2815 goto out_dput;
2816 error = vfs_create(dir->d_inode, dentry, mode,
2817 nd->flags & LOOKUP_EXCL);
2818 if (error)
2819 goto out_dput;
2820 }
2821 out_no_open:
2822 path->dentry = dentry;
2823 path->mnt = nd->path.mnt;
2824 return 1;
2825
2826 out_dput:
2827 dput(dentry);
2828 return error;
2829 }
2830
2831 /*
2832 * Handle the last step of open()
2833 */
2834 static int do_last(struct nameidata *nd, struct path *path,
2835 struct file *file, const struct open_flags *op,
2836 int *opened, struct filename *name)
2837 {
2838 struct dentry *dir = nd->path.dentry;
2839 int open_flag = op->open_flag;
2840 bool will_truncate = (open_flag & O_TRUNC) != 0;
2841 bool got_write = false;
2842 int acc_mode = op->acc_mode;
2843 struct inode *inode;
2844 bool symlink_ok = false;
2845 struct path save_parent = { .dentry = NULL, .mnt = NULL };
2846 bool retried = false;
2847 int error;
2848
2849 nd->flags &= ~LOOKUP_PARENT;
2850 nd->flags |= op->intent;
2851
2852 if (nd->last_type != LAST_NORM) {
2853 error = handle_dots(nd, nd->last_type);
2854 if (error)
2855 return error;
2856 goto finish_open;
2857 }
2858
2859 if (!(open_flag & O_CREAT)) {
2860 if (nd->last.name[nd->last.len])
2861 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
2862 if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW))
2863 symlink_ok = true;
2864 /* we _can_ be in RCU mode here */
2865 error = lookup_fast(nd, path, &inode);
2866 if (likely(!error))
2867 goto finish_lookup;
2868
2869 if (error < 0)
2870 goto out;
2871
2872 BUG_ON(nd->inode != dir->d_inode);
2873 } else {
2874 /* create side of things */
2875 /*
2876 * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED
2877 * has been cleared when we got to the last component we are
2878 * about to look up
2879 */
2880 error = complete_walk(nd);
2881 if (error)
2882 return error;
2883
2884 audit_inode(name, dir, LOOKUP_PARENT);
2885 error = -EISDIR;
2886 /* trailing slashes? */
2887 if (nd->last.name[nd->last.len])
2888 goto out;
2889 }
2890
2891 retry_lookup:
2892 if (op->open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
2893 error = mnt_want_write(nd->path.mnt);
2894 if (!error)
2895 got_write = true;
2896 /*
2897 * do _not_ fail yet - we might not need that or fail with
2898 * a different error; let lookup_open() decide; we'll be
2899 * dropping this one anyway.
2900 */
2901 }
2902 mutex_lock(&dir->d_inode->i_mutex);
2903 error = lookup_open(nd, path, file, op, got_write, opened);
2904 mutex_unlock(&dir->d_inode->i_mutex);
2905
2906 if (error <= 0) {
2907 if (error)
2908 goto out;
2909
2910 if ((*opened & FILE_CREATED) ||
2911 !S_ISREG(file_inode(file)->i_mode))
2912 will_truncate = false;
2913
2914 audit_inode(name, file->f_path.dentry, 0);
2915 goto opened;
2916 }
2917
2918 if (*opened & FILE_CREATED) {
2919 /* Don't check for write permission, don't truncate */
2920 open_flag &= ~O_TRUNC;
2921 will_truncate = false;
2922 acc_mode = MAY_OPEN;
2923 path_to_nameidata(path, nd);
2924 goto finish_open_created;
2925 }
2926
2927 /*
2928 * create/update audit record if it already exists.
2929 */
2930 if (d_is_positive(path->dentry))
2931 audit_inode(name, path->dentry, 0);
2932
2933 /*
2934 * If atomic_open() acquired write access it is dropped now due to
2935 * possible mount and symlink following (this might be optimized away if
2936 * necessary...)
2937 */
2938 if (got_write) {
2939 mnt_drop_write(nd->path.mnt);
2940 got_write = false;
2941 }
2942
2943 error = -EEXIST;
2944 if ((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))
2945 goto exit_dput;
2946
2947 error = follow_managed(path, nd->flags);
2948 if (error < 0)
2949 goto exit_dput;
2950
2951 if (error)
2952 nd->flags |= LOOKUP_JUMPED;
2953
2954 BUG_ON(nd->flags & LOOKUP_RCU);
2955 inode = path->dentry->d_inode;
2956 finish_lookup:
2957 /* we _can_ be in RCU mode here */
2958 error = -ENOENT;
2959 if (d_is_negative(path->dentry)) {
2960 path_to_nameidata(path, nd);
2961 goto out;
2962 }
2963
2964 if (should_follow_link(path->dentry, !symlink_ok)) {
2965 if (nd->flags & LOOKUP_RCU) {
2966 if (unlikely(unlazy_walk(nd, path->dentry))) {
2967 error = -ECHILD;
2968 goto out;
2969 }
2970 }
2971 BUG_ON(inode != path->dentry->d_inode);
2972 return 1;
2973 }
2974
2975 if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path->mnt) {
2976 path_to_nameidata(path, nd);
2977 } else {
2978 save_parent.dentry = nd->path.dentry;
2979 save_parent.mnt = mntget(path->mnt);
2980 nd->path.dentry = path->dentry;
2981
2982 }
2983 nd->inode = inode;
2984 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
2985 finish_open:
2986 error = complete_walk(nd);
2987 if (error) {
2988 path_put(&save_parent);
2989 return error;
2990 }
2991 audit_inode(name, nd->path.dentry, 0);
2992 error = -EISDIR;
2993 if ((open_flag & O_CREAT) &&
2994 (d_is_directory(nd->path.dentry) || d_is_autodir(nd->path.dentry)))
2995 goto out;
2996 error = -ENOTDIR;
2997 if ((nd->flags & LOOKUP_DIRECTORY) && !d_is_directory(nd->path.dentry))
2998 goto out;
2999 if (!S_ISREG(nd->inode->i_mode))
3000 will_truncate = false;
3001
3002 if (will_truncate) {
3003 error = mnt_want_write(nd->path.mnt);
3004 if (error)
3005 goto out;
3006 got_write = true;
3007 }
3008 finish_open_created:
3009 error = may_open(&nd->path, acc_mode, open_flag);
3010 if (error)
3011 goto out;
3012 file->f_path.mnt = nd->path.mnt;
3013 error = finish_open(file, nd->path.dentry, NULL, opened);
3014 if (error) {
3015 if (error == -EOPENSTALE)
3016 goto stale_open;
3017 goto out;
3018 }
3019 opened:
3020 error = open_check_o_direct(file);
3021 if (error)
3022 goto exit_fput;
3023 error = ima_file_check(file, op->acc_mode);
3024 if (error)
3025 goto exit_fput;
3026
3027 if (will_truncate) {
3028 error = handle_truncate(file);
3029 if (error)
3030 goto exit_fput;
3031 }
3032 out:
3033 if (got_write)
3034 mnt_drop_write(nd->path.mnt);
3035 path_put(&save_parent);
3036 terminate_walk(nd);
3037 return error;
3038
3039 exit_dput:
3040 path_put_conditional(path, nd);
3041 goto out;
3042 exit_fput:
3043 fput(file);
3044 goto out;
3045
3046 stale_open:
3047 /* If no saved parent or already retried then can't retry */
3048 if (!save_parent.dentry || retried)
3049 goto out;
3050
3051 BUG_ON(save_parent.dentry != dir);
3052 path_put(&nd->path);
3053 nd->path = save_parent;
3054 nd->inode = dir->d_inode;
3055 save_parent.mnt = NULL;
3056 save_parent.dentry = NULL;
3057 if (got_write) {
3058 mnt_drop_write(nd->path.mnt);
3059 got_write = false;
3060 }
3061 retried = true;
3062 goto retry_lookup;
3063 }
3064
3065 static int do_tmpfile(int dfd, struct filename *pathname,
3066 struct nameidata *nd, int flags,
3067 const struct open_flags *op,
3068 struct file *file, int *opened)
3069 {
3070 static const struct qstr name = QSTR_INIT("/", 1);
3071 struct dentry *dentry, *child;
3072 struct inode *dir;
3073 int error = path_lookupat(dfd, pathname->name,
3074 flags | LOOKUP_DIRECTORY, nd);
3075 if (unlikely(error))
3076 return error;
3077 error = mnt_want_write(nd->path.mnt);
3078 if (unlikely(error))
3079 goto out;
3080 /* we want directory to be writable */
3081 error = inode_permission(nd->inode, MAY_WRITE | MAY_EXEC);
3082 if (error)
3083 goto out2;
3084 dentry = nd->path.dentry;
3085 dir = dentry->d_inode;
3086 if (!dir->i_op->tmpfile) {
3087 error = -EOPNOTSUPP;
3088 goto out2;
3089 }
3090 child = d_alloc(dentry, &name);
3091 if (unlikely(!child)) {
3092 error = -ENOMEM;
3093 goto out2;
3094 }
3095 nd->flags &= ~LOOKUP_DIRECTORY;
3096 nd->flags |= op->intent;
3097 dput(nd->path.dentry);
3098 nd->path.dentry = child;
3099 error = dir->i_op->tmpfile(dir, nd->path.dentry, op->mode);
3100 if (error)
3101 goto out2;
3102 audit_inode(pathname, nd->path.dentry, 0);
3103 error = may_open(&nd->path, op->acc_mode, op->open_flag);
3104 if (error)
3105 goto out2;
3106 file->f_path.mnt = nd->path.mnt;
3107 error = finish_open(file, nd->path.dentry, NULL, opened);
3108 if (error)
3109 goto out2;
3110 error = open_check_o_direct(file);
3111 if (error) {
3112 fput(file);
3113 } else if (!(op->open_flag & O_EXCL)) {
3114 struct inode *inode = file_inode(file);
3115 spin_lock(&inode->i_lock);
3116 inode->i_state |= I_LINKABLE;
3117 spin_unlock(&inode->i_lock);
3118 }
3119 out2:
3120 mnt_drop_write(nd->path.mnt);
3121 out:
3122 path_put(&nd->path);
3123 return error;
3124 }
3125
3126 static struct file *path_openat(int dfd, struct filename *pathname,
3127 struct nameidata *nd, const struct open_flags *op, int flags)
3128 {
3129 struct file *base = NULL;
3130 struct file *file;
3131 struct path path;
3132 int opened = 0;
3133 int error;
3134
3135 file = get_empty_filp();
3136 if (IS_ERR(file))
3137 return file;
3138
3139 file->f_flags = op->open_flag;
3140
3141 if (unlikely(file->f_flags & __O_TMPFILE)) {
3142 error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened);
3143 goto out;
3144 }
3145
3146 error = path_init(dfd, pathname->name, flags | LOOKUP_PARENT, nd, &base);
3147 if (unlikely(error))
3148 goto out;
3149
3150 current->total_link_count = 0;
3151 error = link_path_walk(pathname->name, nd);
3152 if (unlikely(error))
3153 goto out;
3154
3155 error = do_last(nd, &path, file, op, &opened, pathname);
3156 while (unlikely(error > 0)) { /* trailing symlink */
3157 struct path link = path;
3158 void *cookie;
3159 if (!(nd->flags & LOOKUP_FOLLOW)) {
3160 path_put_conditional(&path, nd);
3161 path_put(&nd->path);
3162 error = -ELOOP;
3163 break;
3164 }
3165 error = may_follow_link(&link, nd);
3166 if (unlikely(error))
3167 break;
3168 nd->flags |= LOOKUP_PARENT;
3169 nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
3170 error = follow_link(&link, nd, &cookie);
3171 if (unlikely(error))
3172 break;
3173 error = do_last(nd, &path, file, op, &opened, pathname);
3174 put_link(nd, &link, cookie);
3175 }
3176 out:
3177 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT))
3178 path_put(&nd->root);
3179 if (base)
3180 fput(base);
3181 if (!(opened & FILE_OPENED)) {
3182 BUG_ON(!error);
3183 put_filp(file);
3184 }
3185 if (unlikely(error)) {
3186 if (error == -EOPENSTALE) {
3187 if (flags & LOOKUP_RCU)
3188 error = -ECHILD;
3189 else
3190 error = -ESTALE;
3191 }
3192 file = ERR_PTR(error);
3193 }
3194 return file;
3195 }
3196
3197 struct file *do_filp_open(int dfd, struct filename *pathname,
3198 const struct open_flags *op)
3199 {
3200 struct nameidata nd;
3201 int flags = op->lookup_flags;
3202 struct file *filp;
3203
3204 filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_RCU);
3205 if (unlikely(filp == ERR_PTR(-ECHILD)))
3206 filp = path_openat(dfd, pathname, &nd, op, flags);
3207 if (unlikely(filp == ERR_PTR(-ESTALE)))
3208 filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_REVAL);
3209 return filp;
3210 }
3211
3212 struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt,
3213 const char *name, const struct open_flags *op)
3214 {
3215 struct nameidata nd;
3216 struct file *file;
3217 struct filename filename = { .name = name };
3218 int flags = op->lookup_flags | LOOKUP_ROOT;
3219
3220 nd.root.mnt = mnt;
3221 nd.root.dentry = dentry;
3222
3223 if (d_is_symlink(dentry) && op->intent & LOOKUP_OPEN)
3224 return ERR_PTR(-ELOOP);
3225
3226 file = path_openat(-1, &filename, &nd, op, flags | LOOKUP_RCU);
3227 if (unlikely(file == ERR_PTR(-ECHILD)))
3228 file = path_openat(-1, &filename, &nd, op, flags);
3229 if (unlikely(file == ERR_PTR(-ESTALE)))
3230 file = path_openat(-1, &filename, &nd, op, flags | LOOKUP_REVAL);
3231 return file;
3232 }
3233
3234 struct dentry *kern_path_create(int dfd, const char *pathname,
3235 struct path *path, unsigned int lookup_flags)
3236 {
3237 struct dentry *dentry = ERR_PTR(-EEXIST);
3238 struct nameidata nd;
3239 int err2;
3240 int error;
3241 bool is_dir = (lookup_flags & LOOKUP_DIRECTORY);
3242
3243 /*
3244 * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any
3245 * other flags passed in are ignored!
3246 */
3247 lookup_flags &= LOOKUP_REVAL;
3248
3249 error = do_path_lookup(dfd, pathname, LOOKUP_PARENT|lookup_flags, &nd);
3250 if (error)
3251 return ERR_PTR(error);
3252
3253 /*
3254 * Yucky last component or no last component at all?
3255 * (foo/., foo/.., /////)
3256 */
3257 if (nd.last_type != LAST_NORM)
3258 goto out;
3259 nd.flags &= ~LOOKUP_PARENT;
3260 nd.flags |= LOOKUP_CREATE | LOOKUP_EXCL;
3261
3262 /* don't fail immediately if it's r/o, at least try to report other errors */
3263 err2 = mnt_want_write(nd.path.mnt);
3264 /*
3265 * Do the final lookup.
3266 */
3267 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
3268 dentry = lookup_hash(&nd);
3269 if (IS_ERR(dentry))
3270 goto unlock;
3271
3272 error = -EEXIST;
3273 if (d_is_positive(dentry))
3274 goto fail;
3275
3276 /*
3277 * Special case - lookup gave negative, but... we had foo/bar/
3278 * From the vfs_mknod() POV we just have a negative dentry -
3279 * all is fine. Let's be bastards - you had / on the end, you've
3280 * been asking for (non-existent) directory. -ENOENT for you.
3281 */
3282 if (unlikely(!is_dir && nd.last.name[nd.last.len])) {
3283 error = -ENOENT;
3284 goto fail;
3285 }
3286 if (unlikely(err2)) {
3287 error = err2;
3288 goto fail;
3289 }
3290 *path = nd.path;
3291 return dentry;
3292 fail:
3293 dput(dentry);
3294 dentry = ERR_PTR(error);
3295 unlock:
3296 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
3297 if (!err2)
3298 mnt_drop_write(nd.path.mnt);
3299 out:
3300 path_put(&nd.path);
3301 return dentry;
3302 }
3303 EXPORT_SYMBOL(kern_path_create);
3304
3305 void done_path_create(struct path *path, struct dentry *dentry)
3306 {
3307 dput(dentry);
3308 mutex_unlock(&path->dentry->d_inode->i_mutex);
3309 mnt_drop_write(path->mnt);
3310 path_put(path);
3311 }
3312 EXPORT_SYMBOL(done_path_create);
3313
3314 struct dentry *user_path_create(int dfd, const char __user *pathname,
3315 struct path *path, unsigned int lookup_flags)
3316 {
3317 struct filename *tmp = getname(pathname);
3318 struct dentry *res;
3319 if (IS_ERR(tmp))
3320 return ERR_CAST(tmp);
3321 res = kern_path_create(dfd, tmp->name, path, lookup_flags);
3322 putname(tmp);
3323 return res;
3324 }
3325 EXPORT_SYMBOL(user_path_create);
3326
3327 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
3328 {
3329 int error = may_create(dir, dentry);
3330
3331 if (error)
3332 return error;
3333
3334 if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
3335 return -EPERM;
3336
3337 if (!dir->i_op->mknod)
3338 return -EPERM;
3339
3340 error = devcgroup_inode_mknod(mode, dev);
3341 if (error)
3342 return error;
3343
3344 error = security_inode_mknod(dir, dentry, mode, dev);
3345 if (error)
3346 return error;
3347
3348 error = dir->i_op->mknod(dir, dentry, mode, dev);
3349 if (!error)
3350 fsnotify_create(dir, dentry);
3351 return error;
3352 }
3353
3354 static int may_mknod(umode_t mode)
3355 {
3356 switch (mode & S_IFMT) {
3357 case S_IFREG:
3358 case S_IFCHR:
3359 case S_IFBLK:
3360 case S_IFIFO:
3361 case S_IFSOCK:
3362 case 0: /* zero mode translates to S_IFREG */
3363 return 0;
3364 case S_IFDIR:
3365 return -EPERM;
3366 default:
3367 return -EINVAL;
3368 }
3369 }
3370
3371 SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
3372 unsigned, dev)
3373 {
3374 struct dentry *dentry;
3375 struct path path;
3376 int error;
3377 unsigned int lookup_flags = 0;
3378
3379 error = may_mknod(mode);
3380 if (error)
3381 return error;
3382 retry:
3383 dentry = user_path_create(dfd, filename, &path, lookup_flags);
3384 if (IS_ERR(dentry))
3385 return PTR_ERR(dentry);
3386
3387 if (!IS_POSIXACL(path.dentry->d_inode))
3388 mode &= ~current_umask();
3389 error = security_path_mknod(&path, dentry, mode, dev);
3390 if (error)
3391 goto out;
3392 switch (mode & S_IFMT) {
3393 case 0: case S_IFREG:
3394 error = vfs_create(path.dentry->d_inode,dentry,mode,true);
3395 break;
3396 case S_IFCHR: case S_IFBLK:
3397 error = vfs_mknod(path.dentry->d_inode,dentry,mode,
3398 new_decode_dev(dev));
3399 break;
3400 case S_IFIFO: case S_IFSOCK:
3401 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
3402 break;
3403 }
3404 out:
3405 done_path_create(&path, dentry);
3406 if (retry_estale(error, lookup_flags)) {
3407 lookup_flags |= LOOKUP_REVAL;
3408 goto retry;
3409 }
3410 return error;
3411 }
3412
3413 SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev)
3414 {
3415 return sys_mknodat(AT_FDCWD, filename, mode, dev);
3416 }
3417
3418 int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
3419 {
3420 int error = may_create(dir, dentry);
3421 unsigned max_links = dir->i_sb->s_max_links;
3422
3423 if (error)
3424 return error;
3425
3426 if (!dir->i_op->mkdir)
3427 return -EPERM;
3428
3429 mode &= (S_IRWXUGO|S_ISVTX);
3430 error = security_inode_mkdir(dir, dentry, mode);
3431 if (error)
3432 return error;
3433
3434 if (max_links && dir->i_nlink >= max_links)
3435 return -EMLINK;
3436
3437 error = dir->i_op->mkdir(dir, dentry, mode);
3438 if (!error)
3439 fsnotify_mkdir(dir, dentry);
3440 return error;
3441 }
3442
3443 SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
3444 {
3445 struct dentry *dentry;
3446 struct path path;
3447 int error;
3448 unsigned int lookup_flags = LOOKUP_DIRECTORY;
3449
3450 retry:
3451 dentry = user_path_create(dfd, pathname, &path, lookup_flags);
3452 if (IS_ERR(dentry))
3453 return PTR_ERR(dentry);
3454
3455 if (!IS_POSIXACL(path.dentry->d_inode))
3456 mode &= ~current_umask();
3457 error = security_path_mkdir(&path, dentry, mode);
3458 if (!error)
3459 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
3460 done_path_create(&path, dentry);
3461 if (retry_estale(error, lookup_flags)) {
3462 lookup_flags |= LOOKUP_REVAL;
3463 goto retry;
3464 }
3465 return error;
3466 }
3467
3468 SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
3469 {
3470 return sys_mkdirat(AT_FDCWD, pathname, mode);
3471 }
3472
3473 /*
3474 * The dentry_unhash() helper will try to drop the dentry early: we
3475 * should have a usage count of 1 if we're the only user of this
3476 * dentry, and if that is true (possibly after pruning the dcache),
3477 * then we drop the dentry now.
3478 *
3479 * A low-level filesystem can, if it choses, legally
3480 * do a
3481 *
3482 * if (!d_unhashed(dentry))
3483 * return -EBUSY;
3484 *
3485 * if it cannot handle the case of removing a directory
3486 * that is still in use by something else..
3487 */
3488 void dentry_unhash(struct dentry *dentry)
3489 {
3490 shrink_dcache_parent(dentry);
3491 spin_lock(&dentry->d_lock);
3492 if (dentry->d_lockref.count == 1)
3493 __d_drop(dentry);
3494 spin_unlock(&dentry->d_lock);
3495 }
3496
3497 int vfs_rmdir(struct inode *dir, struct dentry *dentry)
3498 {
3499 int error = may_delete(dir, dentry, 1);
3500
3501 if (error)
3502 return error;
3503
3504 if (!dir->i_op->rmdir)
3505 return -EPERM;
3506
3507 dget(dentry);
3508 mutex_lock(&dentry->d_inode->i_mutex);
3509
3510 error = -EBUSY;
3511 if (d_mountpoint(dentry))
3512 goto out;
3513
3514 error = security_inode_rmdir(dir, dentry);
3515 if (error)
3516 goto out;
3517
3518 shrink_dcache_parent(dentry);
3519 error = dir->i_op->rmdir(dir, dentry);
3520 if (error)
3521 goto out;
3522
3523 dentry->d_inode->i_flags |= S_DEAD;
3524 dont_mount(dentry);
3525
3526 out:
3527 mutex_unlock(&dentry->d_inode->i_mutex);
3528 dput(dentry);
3529 if (!error)
3530 d_delete(dentry);
3531 return error;
3532 }
3533
3534 static long do_rmdir(int dfd, const char __user *pathname)
3535 {
3536 int error = 0;
3537 struct filename *name;
3538 struct dentry *dentry;
3539 struct nameidata nd;
3540 unsigned int lookup_flags = 0;
3541 retry:
3542 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
3543 if (IS_ERR(name))
3544 return PTR_ERR(name);
3545
3546 switch(nd.last_type) {
3547 case LAST_DOTDOT:
3548 error = -ENOTEMPTY;
3549 goto exit1;
3550 case LAST_DOT:
3551 error = -EINVAL;
3552 goto exit1;
3553 case LAST_ROOT:
3554 error = -EBUSY;
3555 goto exit1;
3556 }
3557
3558 nd.flags &= ~LOOKUP_PARENT;
3559 error = mnt_want_write(nd.path.mnt);
3560 if (error)
3561 goto exit1;
3562
3563 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
3564 dentry = lookup_hash(&nd);
3565 error = PTR_ERR(dentry);
3566 if (IS_ERR(dentry))
3567 goto exit2;
3568 if (!dentry->d_inode) {
3569 error = -ENOENT;
3570 goto exit3;
3571 }
3572 error = security_path_rmdir(&nd.path, dentry);
3573 if (error)
3574 goto exit3;
3575 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
3576 exit3:
3577 dput(dentry);
3578 exit2:
3579 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
3580 mnt_drop_write(nd.path.mnt);
3581 exit1:
3582 path_put(&nd.path);
3583 putname(name);
3584 if (retry_estale(error, lookup_flags)) {
3585 lookup_flags |= LOOKUP_REVAL;
3586 goto retry;
3587 }
3588 return error;
3589 }
3590
3591 SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
3592 {
3593 return do_rmdir(AT_FDCWD, pathname);
3594 }
3595
3596 /**
3597 * vfs_unlink - unlink a filesystem object
3598 * @dir: parent directory
3599 * @dentry: victim
3600 * @delegated_inode: returns victim inode, if the inode is delegated.
3601 *
3602 * The caller must hold dir->i_mutex.
3603 *
3604 * If vfs_unlink discovers a delegation, it will return -EWOULDBLOCK and
3605 * return a reference to the inode in delegated_inode. The caller
3606 * should then break the delegation on that inode and retry. Because
3607 * breaking a delegation may take a long time, the caller should drop
3608 * dir->i_mutex before doing so.
3609 *
3610 * Alternatively, a caller may pass NULL for delegated_inode. This may
3611 * be appropriate for callers that expect the underlying filesystem not
3612 * to be NFS exported.
3613 */
3614 int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode)
3615 {
3616 struct inode *target = dentry->d_inode;
3617 int error = may_delete(dir, dentry, 0);
3618
3619 if (error)
3620 return error;
3621
3622 if (!dir->i_op->unlink)
3623 return -EPERM;
3624
3625 mutex_lock(&target->i_mutex);
3626 if (d_mountpoint(dentry))
3627 error = -EBUSY;
3628 else {
3629 error = security_inode_unlink(dir, dentry);
3630 if (!error) {
3631 error = try_break_deleg(target, delegated_inode);
3632 if (error)
3633 goto out;
3634 error = dir->i_op->unlink(dir, dentry);
3635 if (!error)
3636 dont_mount(dentry);
3637 }
3638 }
3639 out:
3640 mutex_unlock(&target->i_mutex);
3641
3642 /* We don't d_delete() NFS sillyrenamed files--they still exist. */
3643 if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
3644 fsnotify_link_count(target);
3645 d_delete(dentry);
3646 }
3647
3648 return error;
3649 }
3650
3651 /*
3652 * Make sure that the actual truncation of the file will occur outside its
3653 * directory's i_mutex. Truncate can take a long time if there is a lot of
3654 * writeout happening, and we don't want to prevent access to the directory
3655 * while waiting on the I/O.
3656 */
3657 static long do_unlinkat(int dfd, const char __user *pathname)
3658 {
3659 int error;
3660 struct filename *name;
3661 struct dentry *dentry;
3662 struct nameidata nd;
3663 struct inode *inode = NULL;
3664 struct inode *delegated_inode = NULL;
3665 unsigned int lookup_flags = 0;
3666 retry:
3667 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
3668 if (IS_ERR(name))
3669 return PTR_ERR(name);
3670
3671 error = -EISDIR;
3672 if (nd.last_type != LAST_NORM)
3673 goto exit1;
3674
3675 nd.flags &= ~LOOKUP_PARENT;
3676 error = mnt_want_write(nd.path.mnt);
3677 if (error)
3678 goto exit1;
3679 retry_deleg:
3680 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
3681 dentry = lookup_hash(&nd);
3682 error = PTR_ERR(dentry);
3683 if (!IS_ERR(dentry)) {
3684 /* Why not before? Because we want correct error value */
3685 if (nd.last.name[nd.last.len])
3686 goto slashes;
3687 inode = dentry->d_inode;
3688 if (d_is_negative(dentry))
3689 goto slashes;
3690 ihold(inode);
3691 error = security_path_unlink(&nd.path, dentry);
3692 if (error)
3693 goto exit2;
3694 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
3695 exit2:
3696 dput(dentry);
3697 }
3698 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
3699 if (inode)
3700 iput(inode); /* truncate the inode here */
3701 inode = NULL;
3702 if (delegated_inode) {
3703 error = break_deleg_wait(&delegated_inode);
3704 if (!error)
3705 goto retry_deleg;
3706 }
3707 mnt_drop_write(nd.path.mnt);
3708 exit1:
3709 path_put(&nd.path);
3710 putname(name);
3711 if (retry_estale(error, lookup_flags)) {
3712 lookup_flags |= LOOKUP_REVAL;
3713 inode = NULL;
3714 goto retry;
3715 }
3716 return error;
3717
3718 slashes:
3719 if (d_is_negative(dentry))
3720 error = -ENOENT;
3721 else if (d_is_directory(dentry) || d_is_autodir(dentry))
3722 error = -EISDIR;
3723 else
3724 error = -ENOTDIR;
3725 goto exit2;
3726 }
3727
3728 SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag)
3729 {
3730 if ((flag & ~AT_REMOVEDIR) != 0)
3731 return -EINVAL;
3732
3733 if (flag & AT_REMOVEDIR)
3734 return do_rmdir(dfd, pathname);
3735
3736 return do_unlinkat(dfd, pathname);
3737 }
3738
3739 SYSCALL_DEFINE1(unlink, const char __user *, pathname)
3740 {
3741 return do_unlinkat(AT_FDCWD, pathname);
3742 }
3743
3744 int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
3745 {
3746 int error = may_create(dir, dentry);
3747
3748 if (error)
3749 return error;
3750
3751 if (!dir->i_op->symlink)
3752 return -EPERM;
3753
3754 error = security_inode_symlink(dir, dentry, oldname);
3755 if (error)
3756 return error;
3757
3758 error = dir->i_op->symlink(dir, dentry, oldname);
3759 if (!error)
3760 fsnotify_create(dir, dentry);
3761 return error;
3762 }
3763
3764 SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
3765 int, newdfd, const char __user *, newname)
3766 {
3767 int error;
3768 struct filename *from;
3769 struct dentry *dentry;
3770 struct path path;
3771 unsigned int lookup_flags = 0;
3772
3773 from = getname(oldname);
3774 if (IS_ERR(from))
3775 return PTR_ERR(from);
3776 retry:
3777 dentry = user_path_create(newdfd, newname, &path, lookup_flags);
3778 error = PTR_ERR(dentry);
3779 if (IS_ERR(dentry))
3780 goto out_putname;
3781
3782 error = security_path_symlink(&path, dentry, from->name);
3783 if (!error)
3784 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
3785 done_path_create(&path, dentry);
3786 if (retry_estale(error, lookup_flags)) {
3787 lookup_flags |= LOOKUP_REVAL;
3788 goto retry;
3789 }
3790 out_putname:
3791 putname(from);
3792 return error;
3793 }
3794
3795 SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname)
3796 {
3797 return sys_symlinkat(oldname, AT_FDCWD, newname);
3798 }
3799
3800 /**
3801 * vfs_link - create a new link
3802 * @old_dentry: object to be linked
3803 * @dir: new parent
3804 * @new_dentry: where to create the new link
3805 * @delegated_inode: returns inode needing a delegation break
3806 *
3807 * The caller must hold dir->i_mutex
3808 *
3809 * If vfs_link discovers a delegation on the to-be-linked file in need
3810 * of breaking, it will return -EWOULDBLOCK and return a reference to the
3811 * inode in delegated_inode. The caller should then break the delegation
3812 * and retry. Because breaking a delegation may take a long time, the
3813 * caller should drop the i_mutex before doing so.
3814 *
3815 * Alternatively, a caller may pass NULL for delegated_inode. This may
3816 * be appropriate for callers that expect the underlying filesystem not
3817 * to be NFS exported.
3818 */
3819 int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode)
3820 {
3821 struct inode *inode = old_dentry->d_inode;
3822 unsigned max_links = dir->i_sb->s_max_links;
3823 int error;
3824
3825 if (!inode)
3826 return -ENOENT;
3827
3828 error = may_create(dir, new_dentry);
3829 if (error)
3830 return error;
3831
3832 if (dir->i_sb != inode->i_sb)
3833 return -EXDEV;
3834
3835 /*
3836 * A link to an append-only or immutable file cannot be created.
3837 */
3838 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
3839 return -EPERM;
3840 if (!dir->i_op->link)
3841 return -EPERM;
3842 if (S_ISDIR(inode->i_mode))
3843 return -EPERM;
3844
3845 error = security_inode_link(old_dentry, dir, new_dentry);
3846 if (error)
3847 return error;
3848
3849 mutex_lock(&inode->i_mutex);
3850 /* Make sure we don't allow creating hardlink to an unlinked file */
3851 if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE))
3852 error = -ENOENT;
3853 else if (max_links && inode->i_nlink >= max_links)
3854 error = -EMLINK;
3855 else {
3856 error = try_break_deleg(inode, delegated_inode);
3857 if (!error)
3858 error = dir->i_op->link(old_dentry, dir, new_dentry);
3859 }
3860
3861 if (!error && (inode->i_state & I_LINKABLE)) {
3862 spin_lock(&inode->i_lock);
3863 inode->i_state &= ~I_LINKABLE;
3864 spin_unlock(&inode->i_lock);
3865 }
3866 mutex_unlock(&inode->i_mutex);
3867 if (!error)
3868 fsnotify_link(dir, inode, new_dentry);
3869 return error;
3870 }
3871
3872 /*
3873 * Hardlinks are often used in delicate situations. We avoid
3874 * security-related surprises by not following symlinks on the
3875 * newname. --KAB
3876 *
3877 * We don't follow them on the oldname either to be compatible
3878 * with linux 2.0, and to avoid hard-linking to directories
3879 * and other special files. --ADM
3880 */
3881 SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
3882 int, newdfd, const char __user *, newname, int, flags)
3883 {
3884 struct dentry *new_dentry;
3885 struct path old_path, new_path;
3886 struct inode *delegated_inode = NULL;
3887 int how = 0;
3888 int error;
3889
3890 if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
3891 return -EINVAL;
3892 /*
3893 * To use null names we require CAP_DAC_READ_SEARCH
3894 * This ensures that not everyone will be able to create
3895 * handlink using the passed filedescriptor.
3896 */
3897 if (flags & AT_EMPTY_PATH) {
3898 if (!capable(CAP_DAC_READ_SEARCH))
3899 return -ENOENT;
3900 how = LOOKUP_EMPTY;
3901 }
3902
3903 if (flags & AT_SYMLINK_FOLLOW)
3904 how |= LOOKUP_FOLLOW;
3905 retry:
3906 error = user_path_at(olddfd, oldname, how, &old_path);
3907 if (error)
3908 return error;
3909
3910 new_dentry = user_path_create(newdfd, newname, &new_path,
3911 (how & LOOKUP_REVAL));
3912 error = PTR_ERR(new_dentry);
3913 if (IS_ERR(new_dentry))
3914 goto out;
3915
3916 error = -EXDEV;
3917 if (old_path.mnt != new_path.mnt)
3918 goto out_dput;
3919 error = may_linkat(&old_path);
3920 if (unlikely(error))
3921 goto out_dput;
3922 error = security_path_link(old_path.dentry, &new_path, new_dentry);
3923 if (error)
3924 goto out_dput;
3925 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
3926 out_dput:
3927 done_path_create(&new_path, new_dentry);
3928 if (delegated_inode) {
3929 error = break_deleg_wait(&delegated_inode);
3930 if (!error)
3931 goto retry;
3932 }
3933 if (retry_estale(error, how)) {
3934 how |= LOOKUP_REVAL;
3935 goto retry;
3936 }
3937 out:
3938 path_put(&old_path);
3939
3940 return error;
3941 }
3942
3943 SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname)
3944 {
3945 return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
3946 }
3947
3948 /*
3949 * The worst of all namespace operations - renaming directory. "Perverted"
3950 * doesn't even start to describe it. Somebody in UCB had a heck of a trip...
3951 * Problems:
3952 * a) we can get into loop creation. Check is done in is_subdir().
3953 * b) race potential - two innocent renames can create a loop together.
3954 * That's where 4.4 screws up. Current fix: serialization on
3955 * sb->s_vfs_rename_mutex. We might be more accurate, but that's another
3956 * story.
3957 * c) we have to lock _four_ objects - parents and victim (if it exists),
3958 * and source (if it is not a directory).
3959 * And that - after we got ->i_mutex on parents (until then we don't know
3960 * whether the target exists). Solution: try to be smart with locking
3961 * order for inodes. We rely on the fact that tree topology may change
3962 * only under ->s_vfs_rename_mutex _and_ that parent of the object we
3963 * move will be locked. Thus we can rank directories by the tree
3964 * (ancestors first) and rank all non-directories after them.
3965 * That works since everybody except rename does "lock parent, lookup,
3966 * lock child" and rename is under ->s_vfs_rename_mutex.
3967 * HOWEVER, it relies on the assumption that any object with ->lookup()
3968 * has no more than 1 dentry. If "hybrid" objects will ever appear,
3969 * we'd better make sure that there's no link(2) for them.
3970 * d) conversion from fhandle to dentry may come in the wrong moment - when
3971 * we are removing the target. Solution: we will have to grab ->i_mutex
3972 * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
3973 * ->i_mutex on parents, which works but leads to some truly excessive
3974 * locking].
3975 */
3976 static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
3977 struct inode *new_dir, struct dentry *new_dentry)
3978 {
3979 int error = 0;
3980 struct inode *target = new_dentry->d_inode;
3981 unsigned max_links = new_dir->i_sb->s_max_links;
3982
3983 /*
3984 * If we are going to change the parent - check write permissions,
3985 * we'll need to flip '..'.
3986 */
3987 if (new_dir != old_dir) {
3988 error = inode_permission(old_dentry->d_inode, MAY_WRITE);
3989 if (error)
3990 return error;
3991 }
3992
3993 error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
3994 if (error)
3995 return error;
3996
3997 dget(new_dentry);
3998 if (target)
3999 mutex_lock(&target->i_mutex);
4000
4001 error = -EBUSY;
4002 if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry))
4003 goto out;
4004
4005 error = -EMLINK;
4006 if (max_links && !target && new_dir != old_dir &&
4007 new_dir->i_nlink >= max_links)
4008 goto out;
4009
4010 if (target)
4011 shrink_dcache_parent(new_dentry);
4012 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
4013 if (error)
4014 goto out;
4015
4016 if (target) {
4017 target->i_flags |= S_DEAD;
4018 dont_mount(new_dentry);
4019 }
4020 out:
4021 if (target)
4022 mutex_unlock(&target->i_mutex);
4023 dput(new_dentry);
4024 if (!error)
4025 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
4026 d_move(old_dentry,new_dentry);
4027 return error;
4028 }
4029
4030 static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
4031 struct inode *new_dir, struct dentry *new_dentry,
4032 struct inode **delegated_inode)
4033 {
4034 struct inode *target = new_dentry->d_inode;
4035 struct inode *source = old_dentry->d_inode;
4036 int error;
4037
4038 error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
4039 if (error)
4040 return error;
4041
4042 dget(new_dentry);
4043 lock_two_nondirectories(source, target);
4044
4045 error = -EBUSY;
4046 if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
4047 goto out;
4048
4049 error = try_break_deleg(source, delegated_inode);
4050 if (error)
4051 goto out;
4052 if (target) {
4053 error = try_break_deleg(target, delegated_inode);
4054 if (error)
4055 goto out;
4056 }
4057 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
4058 if (error)
4059 goto out;
4060
4061 if (target)
4062 dont_mount(new_dentry);
4063 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
4064 d_move(old_dentry, new_dentry);
4065 out:
4066 unlock_two_nondirectories(source, target);
4067 dput(new_dentry);
4068 return error;
4069 }
4070
4071 /**
4072 * vfs_rename - rename a filesystem object
4073 * @old_dir: parent of source
4074 * @old_dentry: source
4075 * @new_dir: parent of destination
4076 * @new_dentry: destination
4077 * @delegated_inode: returns an inode needing a delegation break
4078 *
4079 * The caller must hold multiple mutexes--see lock_rename()).
4080 *
4081 * If vfs_rename discovers a delegation in need of breaking at either
4082 * the source or destination, it will return -EWOULDBLOCK and return a
4083 * reference to the inode in delegated_inode. The caller should then
4084 * break the delegation and retry. Because breaking a delegation may
4085 * take a long time, the caller should drop all locks before doing
4086 * so.
4087 *
4088 * Alternatively, a caller may pass NULL for delegated_inode. This may
4089 * be appropriate for callers that expect the underlying filesystem not
4090 * to be NFS exported.
4091 */
4092 int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4093 struct inode *new_dir, struct dentry *new_dentry,
4094 struct inode **delegated_inode)
4095 {
4096 int error;
4097 int is_dir = d_is_directory(old_dentry) || d_is_autodir(old_dentry);
4098 const unsigned char *old_name;
4099
4100 if (old_dentry->d_inode == new_dentry->d_inode)
4101 return 0;
4102
4103 error = may_delete(old_dir, old_dentry, is_dir);
4104 if (error)
4105 return error;
4106
4107 if (!new_dentry->d_inode)
4108 error = may_create(new_dir, new_dentry);
4109 else
4110 error = may_delete(new_dir, new_dentry, is_dir);
4111 if (error)
4112 return error;
4113
4114 if (!old_dir->i_op->rename)
4115 return -EPERM;
4116
4117 old_name = fsnotify_oldname_init(old_dentry->d_name.name);
4118
4119 if (is_dir)
4120 error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry);
4121 else
4122 error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry,delegated_inode);
4123 if (!error)
4124 fsnotify_move(old_dir, new_dir, old_name, is_dir,
4125 new_dentry->d_inode, old_dentry);
4126 fsnotify_oldname_free(old_name);
4127
4128 return error;
4129 }
4130
4131 SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
4132 int, newdfd, const char __user *, newname)
4133 {
4134 struct dentry *old_dir, *new_dir;
4135 struct dentry *old_dentry, *new_dentry;
4136 struct dentry *trap;
4137 struct nameidata oldnd, newnd;
4138 struct inode *delegated_inode = NULL;
4139 struct filename *from;
4140 struct filename *to;
4141 unsigned int lookup_flags = 0;
4142 bool should_retry = false;
4143 int error;
4144 retry:
4145 from = user_path_parent(olddfd, oldname, &oldnd, lookup_flags);
4146 if (IS_ERR(from)) {
4147 error = PTR_ERR(from);
4148 goto exit;
4149 }
4150
4151 to = user_path_parent(newdfd, newname, &newnd, lookup_flags);
4152 if (IS_ERR(to)) {
4153 error = PTR_ERR(to);
4154 goto exit1;
4155 }
4156
4157 error = -EXDEV;
4158 if (oldnd.path.mnt != newnd.path.mnt)
4159 goto exit2;
4160
4161 old_dir = oldnd.path.dentry;
4162 error = -EBUSY;
4163 if (oldnd.last_type != LAST_NORM)
4164 goto exit2;
4165
4166 new_dir = newnd.path.dentry;
4167 if (newnd.last_type != LAST_NORM)
4168 goto exit2;
4169
4170 error = mnt_want_write(oldnd.path.mnt);
4171 if (error)
4172 goto exit2;
4173
4174 oldnd.flags &= ~LOOKUP_PARENT;
4175 newnd.flags &= ~LOOKUP_PARENT;
4176 newnd.flags |= LOOKUP_RENAME_TARGET;
4177
4178 retry_deleg:
4179 trap = lock_rename(new_dir, old_dir);
4180
4181 old_dentry = lookup_hash(&oldnd);
4182 error = PTR_ERR(old_dentry);
4183 if (IS_ERR(old_dentry))
4184 goto exit3;
4185 /* source must exist */
4186 error = -ENOENT;
4187 if (d_is_negative(old_dentry))
4188 goto exit4;
4189 /* unless the source is a directory trailing slashes give -ENOTDIR */
4190 if (!d_is_directory(old_dentry) && !d_is_autodir(old_dentry)) {
4191 error = -ENOTDIR;
4192 if (oldnd.last.name[oldnd.last.len])
4193 goto exit4;
4194 if (newnd.last.name[newnd.last.len])
4195 goto exit4;
4196 }
4197 /* source should not be ancestor of target */
4198 error = -EINVAL;
4199 if (old_dentry == trap)
4200 goto exit4;
4201 new_dentry = lookup_hash(&newnd);
4202 error = PTR_ERR(new_dentry);
4203 if (IS_ERR(new_dentry))
4204 goto exit4;
4205 /* target should not be an ancestor of source */
4206 error = -ENOTEMPTY;
4207 if (new_dentry == trap)
4208 goto exit5;
4209
4210 error = security_path_rename(&oldnd.path, old_dentry,
4211 &newnd.path, new_dentry);
4212 if (error)
4213 goto exit5;
4214 error = vfs_rename(old_dir->d_inode, old_dentry,
4215 new_dir->d_inode, new_dentry,
4216 &delegated_inode);
4217 exit5:
4218 dput(new_dentry);
4219 exit4:
4220 dput(old_dentry);
4221 exit3:
4222 unlock_rename(new_dir, old_dir);
4223 if (delegated_inode) {
4224 error = break_deleg_wait(&delegated_inode);
4225 if (!error)
4226 goto retry_deleg;
4227 }
4228 mnt_drop_write(oldnd.path.mnt);
4229 exit2:
4230 if (retry_estale(error, lookup_flags))
4231 should_retry = true;
4232 path_put(&newnd.path);
4233 putname(to);
4234 exit1:
4235 path_put(&oldnd.path);
4236 putname(from);
4237 if (should_retry) {
4238 should_retry = false;
4239 lookup_flags |= LOOKUP_REVAL;
4240 goto retry;
4241 }
4242 exit:
4243 return error;
4244 }
4245
4246 SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname)
4247 {
4248 return sys_renameat(AT_FDCWD, oldname, AT_FDCWD, newname);
4249 }
4250
4251 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
4252 {
4253 int len;
4254
4255 len = PTR_ERR(link);
4256 if (IS_ERR(link))
4257 goto out;
4258
4259 len = strlen(link);
4260 if (len > (unsigned) buflen)
4261 len = buflen;
4262 if (copy_to_user(buffer, link, len))
4263 len = -EFAULT;
4264 out:
4265 return len;
4266 }
4267
4268 /*
4269 * A helper for ->readlink(). This should be used *ONLY* for symlinks that
4270 * have ->follow_link() touching nd only in nd_set_link(). Using (or not
4271 * using) it for any given inode is up to filesystem.
4272 */
4273 int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
4274 {
4275 struct nameidata nd;
4276 void *cookie;
4277 int res;
4278
4279 nd.depth = 0;
4280 cookie = dentry->d_inode->i_op->follow_link(dentry, &nd);
4281 if (IS_ERR(cookie))
4282 return PTR_ERR(cookie);
4283
4284 res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd));
4285 if (dentry->d_inode->i_op->put_link)
4286 dentry->d_inode->i_op->put_link(dentry, &nd, cookie);
4287 return res;
4288 }
4289
4290 /* get the link contents into pagecache */
4291 static char *page_getlink(struct dentry * dentry, struct page **ppage)
4292 {
4293 char *kaddr;
4294 struct page *page;
4295 struct address_space *mapping = dentry->d_inode->i_mapping;
4296 page = read_mapping_page(mapping, 0, NULL);
4297 if (IS_ERR(page))
4298 return (char*)page;
4299 *ppage = page;
4300 kaddr = kmap(page);
4301 nd_terminate_link(kaddr, dentry->d_inode->i_size, PAGE_SIZE - 1);
4302 return kaddr;
4303 }
4304
4305 int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
4306 {
4307 struct page *page = NULL;
4308 char *s = page_getlink(dentry, &page);
4309 int res = vfs_readlink(dentry,buffer,buflen,s);
4310 if (page) {
4311 kunmap(page);
4312 page_cache_release(page);
4313 }
4314 return res;
4315 }
4316
4317 void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd)
4318 {
4319 struct page *page = NULL;
4320 nd_set_link(nd, page_getlink(dentry, &page));
4321 return page;
4322 }
4323
4324 void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
4325 {
4326 struct page *page = cookie;
4327
4328 if (page) {
4329 kunmap(page);
4330 page_cache_release(page);
4331 }
4332 }
4333
4334 /*
4335 * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS
4336 */
4337 int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
4338 {
4339 struct address_space *mapping = inode->i_mapping;
4340 struct page *page;
4341 void *fsdata;
4342 int err;
4343 char *kaddr;
4344 unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE;
4345 if (nofs)
4346 flags |= AOP_FLAG_NOFS;
4347
4348 retry:
4349 err = pagecache_write_begin(NULL, mapping, 0, len-1,
4350 flags, &page, &fsdata);
4351 if (err)
4352 goto fail;
4353
4354 kaddr = kmap_atomic(page);
4355 memcpy(kaddr, symname, len-1);
4356 kunmap_atomic(kaddr);
4357
4358 err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
4359 page, fsdata);
4360 if (err < 0)
4361 goto fail;
4362 if (err < len-1)
4363 goto retry;
4364
4365 mark_inode_dirty(inode);
4366 return 0;
4367 fail:
4368 return err;
4369 }
4370
4371 int page_symlink(struct inode *inode, const char *symname, int len)
4372 {
4373 return __page_symlink(inode, symname, len,
4374 !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS));
4375 }
4376
4377 const struct inode_operations page_symlink_inode_operations = {
4378 .readlink = generic_readlink,
4379 .follow_link = page_follow_link_light,
4380 .put_link = page_put_link,
4381 };
4382
4383 EXPORT_SYMBOL(user_path_at);
4384 EXPORT_SYMBOL(follow_down_one);
4385 EXPORT_SYMBOL(follow_down);
4386 EXPORT_SYMBOL(follow_up);
4387 EXPORT_SYMBOL(get_write_access); /* nfsd */
4388 EXPORT_SYMBOL(lock_rename);
4389 EXPORT_SYMBOL(lookup_one_len);
4390 EXPORT_SYMBOL(page_follow_link_light);
4391 EXPORT_SYMBOL(page_put_link);
4392 EXPORT_SYMBOL(page_readlink);
4393 EXPORT_SYMBOL(__page_symlink);
4394 EXPORT_SYMBOL(page_symlink);
4395 EXPORT_SYMBOL(page_symlink_inode_operations);
4396 EXPORT_SYMBOL(kern_path);
4397 EXPORT_SYMBOL(vfs_path_lookup);
4398 EXPORT_SYMBOL(inode_permission);
4399 EXPORT_SYMBOL(unlock_rename);
4400 EXPORT_SYMBOL(vfs_create);
4401 EXPORT_SYMBOL(vfs_link);
4402 EXPORT_SYMBOL(vfs_mkdir);
4403 EXPORT_SYMBOL(vfs_mknod);
4404 EXPORT_SYMBOL(generic_permission);
4405 EXPORT_SYMBOL(vfs_readlink);
4406 EXPORT_SYMBOL(vfs_rename);
4407 EXPORT_SYMBOL(vfs_rmdir);
4408 EXPORT_SYMBOL(vfs_symlink);
4409 EXPORT_SYMBOL(vfs_unlink);
4410 EXPORT_SYMBOL(dentry_unhash);
4411 EXPORT_SYMBOL(generic_readlink);
This page took 0.123928 seconds and 5 git commands to generate.