2 * linux/fs/proc/inode.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/time.h>
8 #include <linux/proc_fs.h>
9 #include <linux/kernel.h>
10 #include <linux/pid_namespace.h>
12 #include <linux/string.h>
13 #include <linux/stat.h>
14 #include <linux/completion.h>
15 #include <linux/poll.h>
16 #include <linux/printk.h>
17 #include <linux/file.h>
18 #include <linux/limits.h>
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/sysctl.h>
22 #include <linux/seq_file.h>
23 #include <linux/slab.h>
24 #include <linux/mount.h>
26 #include <asm/uaccess.h>
30 static void proc_evict_inode(struct inode
*inode
)
32 struct proc_dir_entry
*de
;
33 struct ctl_table_header
*head
;
34 const struct proc_ns_operations
*ns_ops
;
37 truncate_inode_pages(&inode
->i_data
, 0);
40 /* Stop tracking associated processes */
41 put_pid(PROC_I(inode
)->pid
);
43 /* Let go of any associated proc directory entry */
44 de
= PROC_I(inode
)->pde
;
47 head
= PROC_I(inode
)->sysctl
;
49 rcu_assign_pointer(PROC_I(inode
)->sysctl
, NULL
);
50 sysctl_head_put(head
);
52 /* Release any associated namespace */
53 ns_ops
= PROC_I(inode
)->ns_ops
;
54 ns
= PROC_I(inode
)->ns
;
59 static struct kmem_cache
* proc_inode_cachep
;
61 static struct inode
*proc_alloc_inode(struct super_block
*sb
)
63 struct proc_inode
*ei
;
66 ei
= (struct proc_inode
*)kmem_cache_alloc(proc_inode_cachep
, GFP_KERNEL
);
71 ei
->op
.proc_get_link
= NULL
;
74 ei
->sysctl_entry
= NULL
;
77 inode
= &ei
->vfs_inode
;
78 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
82 static void proc_i_callback(struct rcu_head
*head
)
84 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
85 kmem_cache_free(proc_inode_cachep
, PROC_I(inode
));
88 static void proc_destroy_inode(struct inode
*inode
)
90 call_rcu(&inode
->i_rcu
, proc_i_callback
);
93 static void init_once(void *foo
)
95 struct proc_inode
*ei
= (struct proc_inode
*) foo
;
97 inode_init_once(&ei
->vfs_inode
);
100 void __init
proc_init_inodecache(void)
102 proc_inode_cachep
= kmem_cache_create("proc_inode_cache",
103 sizeof(struct proc_inode
),
104 0, (SLAB_RECLAIM_ACCOUNT
|
105 SLAB_MEM_SPREAD
|SLAB_PANIC
),
109 static int proc_show_options(struct seq_file
*seq
, struct dentry
*root
)
111 struct super_block
*sb
= root
->d_sb
;
112 struct pid_namespace
*pid
= sb
->s_fs_info
;
114 if (!gid_eq(pid
->pid_gid
, GLOBAL_ROOT_GID
))
115 seq_printf(seq
, ",gid=%u", from_kgid_munged(&init_user_ns
, pid
->pid_gid
));
116 if (pid
->hide_pid
!= 0)
117 seq_printf(seq
, ",hidepid=%u", pid
->hide_pid
);
122 static const struct super_operations proc_sops
= {
123 .alloc_inode
= proc_alloc_inode
,
124 .destroy_inode
= proc_destroy_inode
,
125 .drop_inode
= generic_delete_inode
,
126 .evict_inode
= proc_evict_inode
,
127 .statfs
= simple_statfs
,
128 .remount_fs
= proc_remount
,
129 .show_options
= proc_show_options
,
132 enum {BIAS
= -1U<<31};
134 static inline int use_pde(struct proc_dir_entry
*pde
)
136 return atomic_inc_unless_negative(&pde
->in_use
);
139 static void unuse_pde(struct proc_dir_entry
*pde
)
141 if (atomic_dec_return(&pde
->in_use
) == BIAS
)
142 complete(pde
->pde_unload_completion
);
146 static void close_pdeo(struct proc_dir_entry
*pde
, struct pde_opener
*pdeo
)
149 /* somebody else is doing that, just wait */
150 DECLARE_COMPLETION_ONSTACK(c
);
152 spin_unlock(&pde
->pde_unload_lock
);
153 wait_for_completion(&c
);
154 spin_lock(&pde
->pde_unload_lock
);
158 spin_unlock(&pde
->pde_unload_lock
);
160 pde
->proc_fops
->release(file_inode(file
), file
);
161 spin_lock(&pde
->pde_unload_lock
);
162 list_del_init(&pdeo
->lh
);
169 void proc_entry_rundown(struct proc_dir_entry
*de
)
171 DECLARE_COMPLETION_ONSTACK(c
);
172 /* Wait until all existing callers into module are done. */
173 de
->pde_unload_completion
= &c
;
174 if (atomic_add_return(BIAS
, &de
->in_use
) != BIAS
)
175 wait_for_completion(&c
);
177 spin_lock(&de
->pde_unload_lock
);
178 while (!list_empty(&de
->pde_openers
)) {
179 struct pde_opener
*pdeo
;
180 pdeo
= list_first_entry(&de
->pde_openers
, struct pde_opener
, lh
);
181 close_pdeo(de
, pdeo
);
183 spin_unlock(&de
->pde_unload_lock
);
186 static loff_t
proc_reg_llseek(struct file
*file
, loff_t offset
, int whence
)
188 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
191 loff_t (*llseek
)(struct file
*, loff_t
, int);
192 llseek
= pde
->proc_fops
->llseek
;
194 llseek
= default_llseek
;
195 rv
= llseek(file
, offset
, whence
);
201 static ssize_t
proc_reg_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
203 ssize_t (*read
)(struct file
*, char __user
*, size_t, loff_t
*);
204 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
207 read
= pde
->proc_fops
->read
;
209 rv
= read(file
, buf
, count
, ppos
);
215 static ssize_t
proc_reg_write(struct file
*file
, const char __user
*buf
, size_t count
, loff_t
*ppos
)
217 ssize_t (*write
)(struct file
*, const char __user
*, size_t, loff_t
*);
218 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
221 write
= pde
->proc_fops
->write
;
223 rv
= write(file
, buf
, count
, ppos
);
229 static unsigned int proc_reg_poll(struct file
*file
, struct poll_table_struct
*pts
)
231 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
232 unsigned int rv
= DEFAULT_POLLMASK
;
233 unsigned int (*poll
)(struct file
*, struct poll_table_struct
*);
235 poll
= pde
->proc_fops
->poll
;
237 rv
= poll(file
, pts
);
243 static long proc_reg_unlocked_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
245 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
247 long (*ioctl
)(struct file
*, unsigned int, unsigned long);
249 ioctl
= pde
->proc_fops
->unlocked_ioctl
;
251 rv
= ioctl(file
, cmd
, arg
);
258 static long proc_reg_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
260 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
262 long (*compat_ioctl
)(struct file
*, unsigned int, unsigned long);
264 compat_ioctl
= pde
->proc_fops
->compat_ioctl
;
266 rv
= compat_ioctl(file
, cmd
, arg
);
273 static int proc_reg_mmap(struct file
*file
, struct vm_area_struct
*vma
)
275 struct proc_dir_entry
*pde
= PDE(file_inode(file
));
277 int (*mmap
)(struct file
*, struct vm_area_struct
*);
279 mmap
= pde
->proc_fops
->mmap
;
281 rv
= mmap(file
, vma
);
287 static int proc_reg_open(struct inode
*inode
, struct file
*file
)
289 struct proc_dir_entry
*pde
= PDE(inode
);
291 int (*open
)(struct inode
*, struct file
*);
292 int (*release
)(struct inode
*, struct file
*);
293 struct pde_opener
*pdeo
;
296 * What for, you ask? Well, we can have open, rmmod, remove_proc_entry
297 * sequence. ->release won't be called because ->proc_fops will be
298 * cleared. Depending on complexity of ->release, consequences vary.
300 * We can't wait for mercy when close will be done for real, it's
301 * deadlockable: rmmod foo </proc/foo . So, we're going to do ->release
302 * by hand in remove_proc_entry(). For this, save opener's credentials
305 pdeo
= kzalloc(sizeof(struct pde_opener
), GFP_KERNEL
);
313 open
= pde
->proc_fops
->open
;
314 release
= pde
->proc_fops
->release
;
317 rv
= open(inode
, file
);
319 if (rv
== 0 && release
) {
320 /* To know what to release. */
322 /* Strictly for "too late" ->release in proc_reg_release(). */
323 spin_lock(&pde
->pde_unload_lock
);
324 list_add(&pdeo
->lh
, &pde
->pde_openers
);
325 spin_unlock(&pde
->pde_unload_lock
);
333 static int proc_reg_release(struct inode
*inode
, struct file
*file
)
335 struct proc_dir_entry
*pde
= PDE(inode
);
336 struct pde_opener
*pdeo
;
337 spin_lock(&pde
->pde_unload_lock
);
338 list_for_each_entry(pdeo
, &pde
->pde_openers
, lh
) {
339 if (pdeo
->file
== file
) {
340 close_pdeo(pde
, pdeo
);
344 spin_unlock(&pde
->pde_unload_lock
);
348 static const struct file_operations proc_reg_file_ops
= {
349 .llseek
= proc_reg_llseek
,
350 .read
= proc_reg_read
,
351 .write
= proc_reg_write
,
352 .poll
= proc_reg_poll
,
353 .unlocked_ioctl
= proc_reg_unlocked_ioctl
,
355 .compat_ioctl
= proc_reg_compat_ioctl
,
357 .mmap
= proc_reg_mmap
,
358 .open
= proc_reg_open
,
359 .release
= proc_reg_release
,
363 static const struct file_operations proc_reg_file_ops_no_compat
= {
364 .llseek
= proc_reg_llseek
,
365 .read
= proc_reg_read
,
366 .write
= proc_reg_write
,
367 .poll
= proc_reg_poll
,
368 .unlocked_ioctl
= proc_reg_unlocked_ioctl
,
369 .mmap
= proc_reg_mmap
,
370 .open
= proc_reg_open
,
371 .release
= proc_reg_release
,
375 struct inode
*proc_get_inode(struct super_block
*sb
, struct proc_dir_entry
*de
)
377 struct inode
*inode
= new_inode_pseudo(sb
);
380 inode
->i_ino
= de
->low_ino
;
381 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
382 PROC_I(inode
)->pde
= de
;
385 inode
->i_mode
= de
->mode
;
386 inode
->i_uid
= de
->uid
;
387 inode
->i_gid
= de
->gid
;
390 inode
->i_size
= de
->size
;
392 set_nlink(inode
, de
->nlink
);
393 WARN_ON(!de
->proc_iops
);
394 inode
->i_op
= de
->proc_iops
;
396 if (S_ISREG(inode
->i_mode
)) {
398 if (!de
->proc_fops
->compat_ioctl
)
400 &proc_reg_file_ops_no_compat
;
403 inode
->i_fop
= &proc_reg_file_ops
;
405 inode
->i_fop
= de
->proc_fops
;
413 int proc_fill_super(struct super_block
*s
)
415 struct inode
*root_inode
;
417 s
->s_flags
|= MS_NODIRATIME
| MS_NOSUID
| MS_NOEXEC
;
418 s
->s_blocksize
= 1024;
419 s
->s_blocksize_bits
= 10;
420 s
->s_magic
= PROC_SUPER_MAGIC
;
421 s
->s_op
= &proc_sops
;
425 root_inode
= proc_get_inode(s
, &proc_root
);
427 pr_err("proc_fill_super: get root inode failed\n");
431 s
->s_root
= d_make_root(root_inode
);
433 pr_err("proc_fill_super: allocate dentry failed\n");
437 return proc_setup_self(s
);