xfs: provide simple rcu-walk ACL implementation
[deliverable/linux.git] / fs / fs_struct.c
1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/fs.h>
4 #include <linux/path.h>
5 #include <linux/slab.h>
6 #include <linux/fs_struct.h>
7
8 /*
9 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
10 * It can block.
11 */
12 void set_fs_root(struct fs_struct *fs, struct path *path)
13 {
14 struct path old_root;
15
16 spin_lock(&fs->lock);
17 write_seqcount_begin(&fs->seq);
18 old_root = fs->root;
19 fs->root = *path;
20 path_get(path);
21 write_seqcount_end(&fs->seq);
22 spin_unlock(&fs->lock);
23 if (old_root.dentry)
24 path_put(&old_root);
25 }
26
27 /*
28 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
29 * It can block.
30 */
31 void set_fs_pwd(struct fs_struct *fs, struct path *path)
32 {
33 struct path old_pwd;
34
35 spin_lock(&fs->lock);
36 write_seqcount_begin(&fs->seq);
37 old_pwd = fs->pwd;
38 fs->pwd = *path;
39 path_get(path);
40 write_seqcount_end(&fs->seq);
41 spin_unlock(&fs->lock);
42
43 if (old_pwd.dentry)
44 path_put(&old_pwd);
45 }
46
47 void chroot_fs_refs(struct path *old_root, struct path *new_root)
48 {
49 struct task_struct *g, *p;
50 struct fs_struct *fs;
51 int count = 0;
52
53 read_lock(&tasklist_lock);
54 do_each_thread(g, p) {
55 task_lock(p);
56 fs = p->fs;
57 if (fs) {
58 spin_lock(&fs->lock);
59 write_seqcount_begin(&fs->seq);
60 if (fs->root.dentry == old_root->dentry
61 && fs->root.mnt == old_root->mnt) {
62 path_get(new_root);
63 fs->root = *new_root;
64 count++;
65 }
66 if (fs->pwd.dentry == old_root->dentry
67 && fs->pwd.mnt == old_root->mnt) {
68 path_get(new_root);
69 fs->pwd = *new_root;
70 count++;
71 }
72 write_seqcount_end(&fs->seq);
73 spin_unlock(&fs->lock);
74 }
75 task_unlock(p);
76 } while_each_thread(g, p);
77 read_unlock(&tasklist_lock);
78 while (count--)
79 path_put(old_root);
80 }
81
82 void free_fs_struct(struct fs_struct *fs)
83 {
84 path_put(&fs->root);
85 path_put(&fs->pwd);
86 kmem_cache_free(fs_cachep, fs);
87 }
88
89 void exit_fs(struct task_struct *tsk)
90 {
91 struct fs_struct *fs = tsk->fs;
92
93 if (fs) {
94 int kill;
95 task_lock(tsk);
96 spin_lock(&fs->lock);
97 write_seqcount_begin(&fs->seq);
98 tsk->fs = NULL;
99 kill = !--fs->users;
100 write_seqcount_end(&fs->seq);
101 spin_unlock(&fs->lock);
102 task_unlock(tsk);
103 if (kill)
104 free_fs_struct(fs);
105 }
106 }
107
108 struct fs_struct *copy_fs_struct(struct fs_struct *old)
109 {
110 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
111 /* We don't need to lock fs - think why ;-) */
112 if (fs) {
113 fs->users = 1;
114 fs->in_exec = 0;
115 spin_lock_init(&fs->lock);
116 seqcount_init(&fs->seq);
117 fs->umask = old->umask;
118 get_fs_root_and_pwd(old, &fs->root, &fs->pwd);
119 }
120 return fs;
121 }
122
123 int unshare_fs_struct(void)
124 {
125 struct fs_struct *fs = current->fs;
126 struct fs_struct *new_fs = copy_fs_struct(fs);
127 int kill;
128
129 if (!new_fs)
130 return -ENOMEM;
131
132 task_lock(current);
133 spin_lock(&fs->lock);
134 kill = !--fs->users;
135 current->fs = new_fs;
136 spin_unlock(&fs->lock);
137 task_unlock(current);
138
139 if (kill)
140 free_fs_struct(fs);
141
142 return 0;
143 }
144 EXPORT_SYMBOL_GPL(unshare_fs_struct);
145
146 int current_umask(void)
147 {
148 return current->fs->umask;
149 }
150 EXPORT_SYMBOL(current_umask);
151
152 /* to be mentioned only in INIT_TASK */
153 struct fs_struct init_fs = {
154 .users = 1,
155 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
156 .seq = SEQCNT_ZERO,
157 .umask = 0022,
158 };
159
160 void daemonize_fs_struct(void)
161 {
162 struct fs_struct *fs = current->fs;
163
164 if (fs) {
165 int kill;
166
167 task_lock(current);
168
169 spin_lock(&init_fs.lock);
170 init_fs.users++;
171 spin_unlock(&init_fs.lock);
172
173 spin_lock(&fs->lock);
174 current->fs = &init_fs;
175 kill = !--fs->users;
176 spin_unlock(&fs->lock);
177
178 task_unlock(current);
179 if (kill)
180 free_fs_struct(fs);
181 }
182 }
This page took 0.035397 seconds and 5 git commands to generate.