[XFS] Don't allow silent errors in xfs_inactive().
[deliverable/linux.git] / fs / proc / inode.c
1 /*
2 * linux/fs/proc/inode.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/time.h>
8 #include <linux/proc_fs.h>
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/string.h>
12 #include <linux/stat.h>
13 #include <linux/completion.h>
14 #include <linux/poll.h>
15 #include <linux/file.h>
16 #include <linux/limits.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/smp_lock.h>
20
21 #include <asm/system.h>
22 #include <asm/uaccess.h>
23
24 #include "internal.h"
25
26 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
27 {
28 if (de)
29 atomic_inc(&de->count);
30 return de;
31 }
32
33 /*
34 * Decrements the use count and checks for deferred deletion.
35 */
36 void de_put(struct proc_dir_entry *de)
37 {
38 if (de) {
39 lock_kernel();
40 if (!atomic_read(&de->count)) {
41 printk("de_put: entry %s already free!\n", de->name);
42 unlock_kernel();
43 return;
44 }
45
46 if (atomic_dec_and_test(&de->count))
47 free_proc_entry(de);
48 unlock_kernel();
49 }
50 }
51
52 /*
53 * Decrement the use count of the proc_dir_entry.
54 */
55 static void proc_delete_inode(struct inode *inode)
56 {
57 struct proc_dir_entry *de;
58
59 truncate_inode_pages(&inode->i_data, 0);
60
61 /* Stop tracking associated processes */
62 put_pid(PROC_I(inode)->pid);
63
64 /* Let go of any associated proc directory entry */
65 de = PROC_I(inode)->pde;
66 if (de) {
67 if (de->owner)
68 module_put(de->owner);
69 de_put(de);
70 }
71 clear_inode(inode);
72 }
73
74 struct vfsmount *proc_mnt;
75
76 static struct kmem_cache * proc_inode_cachep;
77
78 static struct inode *proc_alloc_inode(struct super_block *sb)
79 {
80 struct proc_inode *ei;
81 struct inode *inode;
82
83 ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL);
84 if (!ei)
85 return NULL;
86 ei->pid = NULL;
87 ei->fd = 0;
88 ei->op.proc_get_link = NULL;
89 ei->pde = NULL;
90 inode = &ei->vfs_inode;
91 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
92 return inode;
93 }
94
95 static void proc_destroy_inode(struct inode *inode)
96 {
97 kmem_cache_free(proc_inode_cachep, PROC_I(inode));
98 }
99
100 static void init_once(struct kmem_cache * cachep, void *foo)
101 {
102 struct proc_inode *ei = (struct proc_inode *) foo;
103
104 inode_init_once(&ei->vfs_inode);
105 }
106
107 int __init proc_init_inodecache(void)
108 {
109 proc_inode_cachep = kmem_cache_create("proc_inode_cache",
110 sizeof(struct proc_inode),
111 0, (SLAB_RECLAIM_ACCOUNT|
112 SLAB_MEM_SPREAD|SLAB_PANIC),
113 init_once);
114 return 0;
115 }
116
117 static int proc_remount(struct super_block *sb, int *flags, char *data)
118 {
119 *flags |= MS_NODIRATIME;
120 return 0;
121 }
122
123 static const struct super_operations proc_sops = {
124 .alloc_inode = proc_alloc_inode,
125 .destroy_inode = proc_destroy_inode,
126 .drop_inode = generic_delete_inode,
127 .delete_inode = proc_delete_inode,
128 .statfs = simple_statfs,
129 .remount_fs = proc_remount,
130 };
131
132 static void pde_users_dec(struct proc_dir_entry *pde)
133 {
134 spin_lock(&pde->pde_unload_lock);
135 pde->pde_users--;
136 if (pde->pde_unload_completion && pde->pde_users == 0)
137 complete(pde->pde_unload_completion);
138 spin_unlock(&pde->pde_unload_lock);
139 }
140
141 static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence)
142 {
143 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
144 loff_t rv = -EINVAL;
145 loff_t (*llseek)(struct file *, loff_t, int);
146
147 spin_lock(&pde->pde_unload_lock);
148 /*
149 * remove_proc_entry() is going to delete PDE (as part of module
150 * cleanup sequence). No new callers into module allowed.
151 */
152 if (!pde->proc_fops) {
153 spin_unlock(&pde->pde_unload_lock);
154 return rv;
155 }
156 /*
157 * Bump refcount so that remove_proc_entry will wail for ->llseek to
158 * complete.
159 */
160 pde->pde_users++;
161 /*
162 * Save function pointer under lock, to protect against ->proc_fops
163 * NULL'ifying right after ->pde_unload_lock is dropped.
164 */
165 llseek = pde->proc_fops->llseek;
166 spin_unlock(&pde->pde_unload_lock);
167
168 if (!llseek)
169 llseek = default_llseek;
170 rv = llseek(file, offset, whence);
171
172 pde_users_dec(pde);
173 return rv;
174 }
175
176 static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
177 {
178 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
179 ssize_t rv = -EIO;
180 ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
181
182 spin_lock(&pde->pde_unload_lock);
183 if (!pde->proc_fops) {
184 spin_unlock(&pde->pde_unload_lock);
185 return rv;
186 }
187 pde->pde_users++;
188 read = pde->proc_fops->read;
189 spin_unlock(&pde->pde_unload_lock);
190
191 if (read)
192 rv = read(file, buf, count, ppos);
193
194 pde_users_dec(pde);
195 return rv;
196 }
197
198 static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
199 {
200 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
201 ssize_t rv = -EIO;
202 ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
203
204 spin_lock(&pde->pde_unload_lock);
205 if (!pde->proc_fops) {
206 spin_unlock(&pde->pde_unload_lock);
207 return rv;
208 }
209 pde->pde_users++;
210 write = pde->proc_fops->write;
211 spin_unlock(&pde->pde_unload_lock);
212
213 if (write)
214 rv = write(file, buf, count, ppos);
215
216 pde_users_dec(pde);
217 return rv;
218 }
219
220 static unsigned int proc_reg_poll(struct file *file, struct poll_table_struct *pts)
221 {
222 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
223 unsigned int rv = DEFAULT_POLLMASK;
224 unsigned int (*poll)(struct file *, struct poll_table_struct *);
225
226 spin_lock(&pde->pde_unload_lock);
227 if (!pde->proc_fops) {
228 spin_unlock(&pde->pde_unload_lock);
229 return rv;
230 }
231 pde->pde_users++;
232 poll = pde->proc_fops->poll;
233 spin_unlock(&pde->pde_unload_lock);
234
235 if (poll)
236 rv = poll(file, pts);
237
238 pde_users_dec(pde);
239 return rv;
240 }
241
242 static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
243 {
244 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
245 long rv = -ENOTTY;
246 long (*unlocked_ioctl)(struct file *, unsigned int, unsigned long);
247 int (*ioctl)(struct inode *, struct file *, unsigned int, unsigned long);
248
249 spin_lock(&pde->pde_unload_lock);
250 if (!pde->proc_fops) {
251 spin_unlock(&pde->pde_unload_lock);
252 return rv;
253 }
254 pde->pde_users++;
255 unlocked_ioctl = pde->proc_fops->unlocked_ioctl;
256 ioctl = pde->proc_fops->ioctl;
257 spin_unlock(&pde->pde_unload_lock);
258
259 if (unlocked_ioctl) {
260 rv = unlocked_ioctl(file, cmd, arg);
261 if (rv == -ENOIOCTLCMD)
262 rv = -EINVAL;
263 } else if (ioctl) {
264 lock_kernel();
265 rv = ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
266 unlock_kernel();
267 }
268
269 pde_users_dec(pde);
270 return rv;
271 }
272
273 #ifdef CONFIG_COMPAT
274 static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
275 {
276 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
277 long rv = -ENOTTY;
278 long (*compat_ioctl)(struct file *, unsigned int, unsigned long);
279
280 spin_lock(&pde->pde_unload_lock);
281 if (!pde->proc_fops) {
282 spin_unlock(&pde->pde_unload_lock);
283 return rv;
284 }
285 pde->pde_users++;
286 compat_ioctl = pde->proc_fops->compat_ioctl;
287 spin_unlock(&pde->pde_unload_lock);
288
289 if (compat_ioctl)
290 rv = compat_ioctl(file, cmd, arg);
291
292 pde_users_dec(pde);
293 return rv;
294 }
295 #endif
296
297 static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
298 {
299 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
300 int rv = -EIO;
301 int (*mmap)(struct file *, struct vm_area_struct *);
302
303 spin_lock(&pde->pde_unload_lock);
304 if (!pde->proc_fops) {
305 spin_unlock(&pde->pde_unload_lock);
306 return rv;
307 }
308 pde->pde_users++;
309 mmap = pde->proc_fops->mmap;
310 spin_unlock(&pde->pde_unload_lock);
311
312 if (mmap)
313 rv = mmap(file, vma);
314
315 pde_users_dec(pde);
316 return rv;
317 }
318
319 static int proc_reg_open(struct inode *inode, struct file *file)
320 {
321 struct proc_dir_entry *pde = PDE(inode);
322 int rv = 0;
323 int (*open)(struct inode *, struct file *);
324
325 spin_lock(&pde->pde_unload_lock);
326 if (!pde->proc_fops) {
327 spin_unlock(&pde->pde_unload_lock);
328 return rv;
329 }
330 pde->pde_users++;
331 open = pde->proc_fops->open;
332 spin_unlock(&pde->pde_unload_lock);
333
334 if (open)
335 rv = open(inode, file);
336
337 pde_users_dec(pde);
338 return rv;
339 }
340
341 static int proc_reg_release(struct inode *inode, struct file *file)
342 {
343 struct proc_dir_entry *pde = PDE(inode);
344 int rv = 0;
345 int (*release)(struct inode *, struct file *);
346
347 spin_lock(&pde->pde_unload_lock);
348 if (!pde->proc_fops) {
349 spin_unlock(&pde->pde_unload_lock);
350 return rv;
351 }
352 pde->pde_users++;
353 release = pde->proc_fops->release;
354 spin_unlock(&pde->pde_unload_lock);
355
356 if (release)
357 rv = release(inode, file);
358
359 pde_users_dec(pde);
360 return rv;
361 }
362
363 static const struct file_operations proc_reg_file_ops = {
364 .llseek = proc_reg_llseek,
365 .read = proc_reg_read,
366 .write = proc_reg_write,
367 .poll = proc_reg_poll,
368 .unlocked_ioctl = proc_reg_unlocked_ioctl,
369 #ifdef CONFIG_COMPAT
370 .compat_ioctl = proc_reg_compat_ioctl,
371 #endif
372 .mmap = proc_reg_mmap,
373 .open = proc_reg_open,
374 .release = proc_reg_release,
375 };
376
377 #ifdef CONFIG_COMPAT
378 static const struct file_operations proc_reg_file_ops_no_compat = {
379 .llseek = proc_reg_llseek,
380 .read = proc_reg_read,
381 .write = proc_reg_write,
382 .poll = proc_reg_poll,
383 .unlocked_ioctl = proc_reg_unlocked_ioctl,
384 .mmap = proc_reg_mmap,
385 .open = proc_reg_open,
386 .release = proc_reg_release,
387 };
388 #endif
389
390 struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
391 struct proc_dir_entry *de)
392 {
393 struct inode * inode;
394
395 if (de != NULL && !try_module_get(de->owner))
396 goto out_mod;
397
398 inode = iget_locked(sb, ino);
399 if (!inode)
400 goto out_ino;
401 if (inode->i_state & I_NEW) {
402 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
403 PROC_I(inode)->fd = 0;
404 PROC_I(inode)->pde = de;
405 if (de) {
406 if (de->mode) {
407 inode->i_mode = de->mode;
408 inode->i_uid = de->uid;
409 inode->i_gid = de->gid;
410 }
411 if (de->size)
412 inode->i_size = de->size;
413 if (de->nlink)
414 inode->i_nlink = de->nlink;
415 if (de->proc_iops)
416 inode->i_op = de->proc_iops;
417 if (de->proc_fops) {
418 if (S_ISREG(inode->i_mode)) {
419 #ifdef CONFIG_COMPAT
420 if (!de->proc_fops->compat_ioctl)
421 inode->i_fop =
422 &proc_reg_file_ops_no_compat;
423 else
424 #endif
425 inode->i_fop = &proc_reg_file_ops;
426 } else {
427 inode->i_fop = de->proc_fops;
428 }
429 }
430 }
431 unlock_new_inode(inode);
432 }
433 return inode;
434
435 out_ino:
436 if (de != NULL)
437 module_put(de->owner);
438 out_mod:
439 return NULL;
440 }
441
442 int proc_fill_super(struct super_block *s)
443 {
444 struct inode * root_inode;
445
446 s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
447 s->s_blocksize = 1024;
448 s->s_blocksize_bits = 10;
449 s->s_magic = PROC_SUPER_MAGIC;
450 s->s_op = &proc_sops;
451 s->s_time_gran = 1;
452
453 de_get(&proc_root);
454 root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root);
455 if (!root_inode)
456 goto out_no_root;
457 root_inode->i_uid = 0;
458 root_inode->i_gid = 0;
459 s->s_root = d_alloc_root(root_inode);
460 if (!s->s_root)
461 goto out_no_root;
462 return 0;
463
464 out_no_root:
465 printk("proc_read_super: get root inode failed\n");
466 iput(root_inode);
467 de_put(&proc_root);
468 return -ENOMEM;
469 }
This page took 0.048025 seconds and 5 git commands to generate.