ftrace: Allow instances to use function tracing
[deliverable/linux.git] / fs / file_table.c
... / ...
CommitLineData
1/*
2 * linux/fs/file_table.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/string.h>
9#include <linux/slab.h>
10#include <linux/file.h>
11#include <linux/fdtable.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/fs.h>
15#include <linux/security.h>
16#include <linux/eventpoll.h>
17#include <linux/rcupdate.h>
18#include <linux/mount.h>
19#include <linux/capability.h>
20#include <linux/cdev.h>
21#include <linux/fsnotify.h>
22#include <linux/sysctl.h>
23#include <linux/lglock.h>
24#include <linux/percpu_counter.h>
25#include <linux/percpu.h>
26#include <linux/hardirq.h>
27#include <linux/task_work.h>
28#include <linux/ima.h>
29
30#include <linux/atomic.h>
31
32#include "internal.h"
33
34/* sysctl tunables... */
35struct files_stat_struct files_stat = {
36 .max_files = NR_FILE
37};
38
39/* SLAB cache for file structures */
40static struct kmem_cache *filp_cachep __read_mostly;
41
42static struct percpu_counter nr_files __cacheline_aligned_in_smp;
43
44static void file_free_rcu(struct rcu_head *head)
45{
46 struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
47
48 put_cred(f->f_cred);
49 kmem_cache_free(filp_cachep, f);
50}
51
52static inline void file_free(struct file *f)
53{
54 percpu_counter_dec(&nr_files);
55 file_check_state(f);
56 call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
57}
58
59/*
60 * Return the total number of open files in the system
61 */
62static long get_nr_files(void)
63{
64 return percpu_counter_read_positive(&nr_files);
65}
66
67/*
68 * Return the maximum number of open files in the system
69 */
70unsigned long get_max_files(void)
71{
72 return files_stat.max_files;
73}
74EXPORT_SYMBOL_GPL(get_max_files);
75
76/*
77 * Handle nr_files sysctl
78 */
79#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
80int proc_nr_files(ctl_table *table, int write,
81 void __user *buffer, size_t *lenp, loff_t *ppos)
82{
83 files_stat.nr_files = get_nr_files();
84 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
85}
86#else
87int proc_nr_files(ctl_table *table, int write,
88 void __user *buffer, size_t *lenp, loff_t *ppos)
89{
90 return -ENOSYS;
91}
92#endif
93
94/* Find an unused file structure and return a pointer to it.
95 * Returns an error pointer if some error happend e.g. we over file
96 * structures limit, run out of memory or operation is not permitted.
97 *
98 * Be very careful using this. You are responsible for
99 * getting write access to any mount that you might assign
100 * to this filp, if it is opened for write. If this is not
101 * done, you will imbalance int the mount's writer count
102 * and a warning at __fput() time.
103 */
104struct file *get_empty_filp(void)
105{
106 const struct cred *cred = current_cred();
107 static long old_max;
108 struct file *f;
109 int error;
110
111 /*
112 * Privileged users can go above max_files
113 */
114 if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
115 /*
116 * percpu_counters are inaccurate. Do an expensive check before
117 * we go and fail.
118 */
119 if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
120 goto over;
121 }
122
123 f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
124 if (unlikely(!f))
125 return ERR_PTR(-ENOMEM);
126
127 percpu_counter_inc(&nr_files);
128 f->f_cred = get_cred(cred);
129 error = security_file_alloc(f);
130 if (unlikely(error)) {
131 file_free(f);
132 return ERR_PTR(error);
133 }
134
135 atomic_long_set(&f->f_count, 1);
136 rwlock_init(&f->f_owner.lock);
137 spin_lock_init(&f->f_lock);
138 eventpoll_init_file(f);
139 /* f->f_version: 0 */
140 return f;
141
142over:
143 /* Ran out of filps - report that */
144 if (get_nr_files() > old_max) {
145 pr_info("VFS: file-max limit %lu reached\n", get_max_files());
146 old_max = get_nr_files();
147 }
148 return ERR_PTR(-ENFILE);
149}
150
151/**
152 * alloc_file - allocate and initialize a 'struct file'
153 * @mnt: the vfsmount on which the file will reside
154 * @dentry: the dentry representing the new file
155 * @mode: the mode with which the new file will be opened
156 * @fop: the 'struct file_operations' for the new file
157 *
158 * Use this instead of get_empty_filp() to get a new
159 * 'struct file'. Do so because of the same initialization
160 * pitfalls reasons listed for init_file(). This is a
161 * preferred interface to using init_file().
162 *
163 * If all the callers of init_file() are eliminated, its
164 * code should be moved into this function.
165 */
166struct file *alloc_file(struct path *path, fmode_t mode,
167 const struct file_operations *fop)
168{
169 struct file *file;
170
171 file = get_empty_filp();
172 if (IS_ERR(file))
173 return file;
174
175 file->f_path = *path;
176 file->f_inode = path->dentry->d_inode;
177 file->f_mapping = path->dentry->d_inode->i_mapping;
178 file->f_mode = mode;
179 file->f_op = fop;
180
181 /*
182 * These mounts don't really matter in practice
183 * for r/o bind mounts. They aren't userspace-
184 * visible. We do this for consistency, and so
185 * that we can do debugging checks at __fput()
186 */
187 if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
188 file_take_write(file);
189 WARN_ON(mnt_clone_write(path->mnt));
190 }
191 if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
192 i_readcount_inc(path->dentry->d_inode);
193 return file;
194}
195EXPORT_SYMBOL(alloc_file);
196
197/**
198 * drop_file_write_access - give up ability to write to a file
199 * @file: the file to which we will stop writing
200 *
201 * This is a central place which will give up the ability
202 * to write to @file, along with access to write through
203 * its vfsmount.
204 */
205static void drop_file_write_access(struct file *file)
206{
207 struct vfsmount *mnt = file->f_path.mnt;
208 struct dentry *dentry = file->f_path.dentry;
209 struct inode *inode = dentry->d_inode;
210
211 put_write_access(inode);
212
213 if (special_file(inode->i_mode))
214 return;
215 if (file_check_writeable(file) != 0)
216 return;
217 __mnt_drop_write(mnt);
218 file_release_write(file);
219}
220
221/* the real guts of fput() - releasing the last reference to file
222 */
223static void __fput(struct file *file)
224{
225 struct dentry *dentry = file->f_path.dentry;
226 struct vfsmount *mnt = file->f_path.mnt;
227 struct inode *inode = file->f_inode;
228
229 might_sleep();
230
231 fsnotify_close(file);
232 /*
233 * The function eventpoll_release() should be the first called
234 * in the file cleanup chain.
235 */
236 eventpoll_release(file);
237 locks_remove_flock(file);
238
239 if (unlikely(file->f_flags & FASYNC)) {
240 if (file->f_op->fasync)
241 file->f_op->fasync(-1, file, 0);
242 }
243 ima_file_free(file);
244 if (file->f_op->release)
245 file->f_op->release(inode, file);
246 security_file_free(file);
247 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
248 !(file->f_mode & FMODE_PATH))) {
249 cdev_put(inode->i_cdev);
250 }
251 fops_put(file->f_op);
252 put_pid(file->f_owner.pid);
253 if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
254 i_readcount_dec(inode);
255 if (file->f_mode & FMODE_WRITE)
256 drop_file_write_access(file);
257 file->f_path.dentry = NULL;
258 file->f_path.mnt = NULL;
259 file->f_inode = NULL;
260 file_free(file);
261 dput(dentry);
262 mntput(mnt);
263}
264
265static LLIST_HEAD(delayed_fput_list);
266static void delayed_fput(struct work_struct *unused)
267{
268 struct llist_node *node = llist_del_all(&delayed_fput_list);
269 struct llist_node *next;
270
271 for (; node; node = next) {
272 next = llist_next(node);
273 __fput(llist_entry(node, struct file, f_u.fu_llist));
274 }
275}
276
277static void ____fput(struct callback_head *work)
278{
279 __fput(container_of(work, struct file, f_u.fu_rcuhead));
280}
281
282/*
283 * If kernel thread really needs to have the final fput() it has done
284 * to complete, call this. The only user right now is the boot - we
285 * *do* need to make sure our writes to binaries on initramfs has
286 * not left us with opened struct file waiting for __fput() - execve()
287 * won't work without that. Please, don't add more callers without
288 * very good reasons; in particular, never call that with locks
289 * held and never call that from a thread that might need to do
290 * some work on any kind of umount.
291 */
292void flush_delayed_fput(void)
293{
294 delayed_fput(NULL);
295}
296
297static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
298
299void fput(struct file *file)
300{
301 if (atomic_long_dec_and_test(&file->f_count)) {
302 struct task_struct *task = current;
303
304 if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
305 init_task_work(&file->f_u.fu_rcuhead, ____fput);
306 if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
307 return;
308 /*
309 * After this task has run exit_task_work(),
310 * task_work_add() will fail. Fall through to delayed
311 * fput to avoid leaking *file.
312 */
313 }
314
315 if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
316 schedule_delayed_work(&delayed_fput_work, 1);
317 }
318}
319
320/*
321 * synchronous analog of fput(); for kernel threads that might be needed
322 * in some umount() (and thus can't use flush_delayed_fput() without
323 * risking deadlocks), need to wait for completion of __fput() and know
324 * for this specific struct file it won't involve anything that would
325 * need them. Use only if you really need it - at the very least,
326 * don't blindly convert fput() by kernel thread to that.
327 */
328void __fput_sync(struct file *file)
329{
330 if (atomic_long_dec_and_test(&file->f_count)) {
331 struct task_struct *task = current;
332 BUG_ON(!(task->flags & PF_KTHREAD));
333 __fput(file);
334 }
335}
336
337EXPORT_SYMBOL(fput);
338
339void put_filp(struct file *file)
340{
341 if (atomic_long_dec_and_test(&file->f_count)) {
342 security_file_free(file);
343 file_free(file);
344 }
345}
346
347void __init files_init(unsigned long mempages)
348{
349 unsigned long n;
350
351 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
352 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
353
354 /*
355 * One file with associated inode and dcache is very roughly 1K.
356 * Per default don't use more than 10% of our memory for files.
357 */
358
359 n = (mempages * (PAGE_SIZE / 1024)) / 10;
360 files_stat.max_files = max_t(unsigned long, n, NR_FILE);
361 files_defer_init();
362 percpu_counter_init(&nr_files, 0);
363}
This page took 0.027929 seconds and 5 git commands to generate.