/proc/*/environ: wrong placing of ptrace_may_attach() check
[deliverable/linux.git] / fs / proc / base.c
1 /*
2 * linux/fs/proc/base.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * proc base directory handling functions
7 *
8 * 1999, Al Viro. Rewritten. Now it covers the whole per-process part.
9 * Instead of using magical inumbers to determine the kind of object
10 * we allocate and fill in-core inodes upon lookup. They don't even
11 * go into icache. We cache the reference to task_struct upon lookup too.
12 * Eventually it should become a filesystem in its own. We don't use the
13 * rest of procfs anymore.
14 *
15 *
16 * Changelog:
17 * 17-Jan-2005
18 * Allan Bezerra
19 * Bruna Moreira <bruna.moreira@indt.org.br>
20 * Edjard Mota <edjard.mota@indt.org.br>
21 * Ilias Biris <ilias.biris@indt.org.br>
22 * Mauricio Lin <mauricio.lin@indt.org.br>
23 *
24 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
25 *
26 * A new process specific entry (smaps) included in /proc. It shows the
27 * size of rss for each memory area. The maps entry lacks information
28 * about physical memory size (rss) for each mapped file, i.e.,
29 * rss information for executables and library files.
30 * This additional information is useful for any tools that need to know
31 * about physical memory consumption for a process specific library.
32 *
33 * Changelog:
34 * 21-Feb-2005
35 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
36 * Pud inclusion in the page table walking.
37 *
38 * ChangeLog:
39 * 10-Mar-2005
40 * 10LE Instituto Nokia de Tecnologia - INdT:
41 * A better way to walks through the page table as suggested by Hugh Dickins.
42 *
43 * Simo Piiroinen <simo.piiroinen@nokia.com>:
44 * Smaps information related to shared, private, clean and dirty pages.
45 *
46 * Paul Mundt <paul.mundt@nokia.com>:
47 * Overall revision about smaps.
48 */
49
50 #include <asm/uaccess.h>
51
52 #include <linux/errno.h>
53 #include <linux/time.h>
54 #include <linux/proc_fs.h>
55 #include <linux/stat.h>
56 #include <linux/init.h>
57 #include <linux/capability.h>
58 #include <linux/file.h>
59 #include <linux/string.h>
60 #include <linux/seq_file.h>
61 #include <linux/namei.h>
62 #include <linux/mnt_namespace.h>
63 #include <linux/mm.h>
64 #include <linux/rcupdate.h>
65 #include <linux/kallsyms.h>
66 #include <linux/module.h>
67 #include <linux/mount.h>
68 #include <linux/security.h>
69 #include <linux/ptrace.h>
70 #include <linux/seccomp.h>
71 #include <linux/cpuset.h>
72 #include <linux/audit.h>
73 #include <linux/poll.h>
74 #include <linux/nsproxy.h>
75 #include <linux/oom.h>
76 #include "internal.h"
77
78 /* NOTE:
79 * Implementing inode permission operations in /proc is almost
80 * certainly an error. Permission checks need to happen during
81 * each system call not at open time. The reason is that most of
82 * what we wish to check for permissions in /proc varies at runtime.
83 *
84 * The classic example of a problem is opening file descriptors
85 * in /proc for a task before it execs a suid executable.
86 */
87
88
89 /* Worst case buffer size needed for holding an integer. */
90 #define PROC_NUMBUF 13
91
92 struct pid_entry {
93 char *name;
94 int len;
95 mode_t mode;
96 const struct inode_operations *iop;
97 const struct file_operations *fop;
98 union proc_op op;
99 };
100
101 #define NOD(NAME, MODE, IOP, FOP, OP) { \
102 .name = (NAME), \
103 .len = sizeof(NAME) - 1, \
104 .mode = MODE, \
105 .iop = IOP, \
106 .fop = FOP, \
107 .op = OP, \
108 }
109
110 #define DIR(NAME, MODE, OTYPE) \
111 NOD(NAME, (S_IFDIR|(MODE)), \
112 &proc_##OTYPE##_inode_operations, &proc_##OTYPE##_operations, \
113 {} )
114 #define LNK(NAME, OTYPE) \
115 NOD(NAME, (S_IFLNK|S_IRWXUGO), \
116 &proc_pid_link_inode_operations, NULL, \
117 { .proc_get_link = &proc_##OTYPE##_link } )
118 #define REG(NAME, MODE, OTYPE) \
119 NOD(NAME, (S_IFREG|(MODE)), NULL, \
120 &proc_##OTYPE##_operations, {})
121 #define INF(NAME, MODE, OTYPE) \
122 NOD(NAME, (S_IFREG|(MODE)), \
123 NULL, &proc_info_file_operations, \
124 { .proc_read = &proc_##OTYPE } )
125
126 int maps_protect;
127 EXPORT_SYMBOL(maps_protect);
128
129 static struct fs_struct *get_fs_struct(struct task_struct *task)
130 {
131 struct fs_struct *fs;
132 task_lock(task);
133 fs = task->fs;
134 if(fs)
135 atomic_inc(&fs->count);
136 task_unlock(task);
137 return fs;
138 }
139
140 static int get_nr_threads(struct task_struct *tsk)
141 {
142 /* Must be called with the rcu_read_lock held */
143 unsigned long flags;
144 int count = 0;
145
146 if (lock_task_sighand(tsk, &flags)) {
147 count = atomic_read(&tsk->signal->count);
148 unlock_task_sighand(tsk, &flags);
149 }
150 return count;
151 }
152
153 static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
154 {
155 struct task_struct *task = get_proc_task(inode);
156 struct fs_struct *fs = NULL;
157 int result = -ENOENT;
158
159 if (task) {
160 fs = get_fs_struct(task);
161 put_task_struct(task);
162 }
163 if (fs) {
164 read_lock(&fs->lock);
165 *mnt = mntget(fs->pwdmnt);
166 *dentry = dget(fs->pwd);
167 read_unlock(&fs->lock);
168 result = 0;
169 put_fs_struct(fs);
170 }
171 return result;
172 }
173
174 static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
175 {
176 struct task_struct *task = get_proc_task(inode);
177 struct fs_struct *fs = NULL;
178 int result = -ENOENT;
179
180 if (task) {
181 fs = get_fs_struct(task);
182 put_task_struct(task);
183 }
184 if (fs) {
185 read_lock(&fs->lock);
186 *mnt = mntget(fs->rootmnt);
187 *dentry = dget(fs->root);
188 read_unlock(&fs->lock);
189 result = 0;
190 put_fs_struct(fs);
191 }
192 return result;
193 }
194
195 #define MAY_PTRACE(task) \
196 (task == current || \
197 (task->parent == current && \
198 (task->ptrace & PT_PTRACED) && \
199 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
200 security_ptrace(current,task) == 0))
201
202 static int proc_pid_environ(struct task_struct *task, char * buffer)
203 {
204 int res = 0;
205 struct mm_struct *mm = get_task_mm(task);
206 if (mm) {
207 unsigned int len;
208
209 res = -ESRCH;
210 if (!ptrace_may_attach(task))
211 goto out;
212
213 len = mm->env_end - mm->env_start;
214 if (len > PAGE_SIZE)
215 len = PAGE_SIZE;
216 res = access_process_vm(task, mm->env_start, buffer, len, 0);
217 out:
218 mmput(mm);
219 }
220 return res;
221 }
222
223 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
224 {
225 int res = 0;
226 unsigned int len;
227 struct mm_struct *mm = get_task_mm(task);
228 if (!mm)
229 goto out;
230 if (!mm->arg_end)
231 goto out_mm; /* Shh! No looking before we're done */
232
233 len = mm->arg_end - mm->arg_start;
234
235 if (len > PAGE_SIZE)
236 len = PAGE_SIZE;
237
238 res = access_process_vm(task, mm->arg_start, buffer, len, 0);
239
240 // If the nul at the end of args has been overwritten, then
241 // assume application is using setproctitle(3).
242 if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
243 len = strnlen(buffer, res);
244 if (len < res) {
245 res = len;
246 } else {
247 len = mm->env_end - mm->env_start;
248 if (len > PAGE_SIZE - res)
249 len = PAGE_SIZE - res;
250 res += access_process_vm(task, mm->env_start, buffer+res, len, 0);
251 res = strnlen(buffer, res);
252 }
253 }
254 out_mm:
255 mmput(mm);
256 out:
257 return res;
258 }
259
260 static int proc_pid_auxv(struct task_struct *task, char *buffer)
261 {
262 int res = 0;
263 struct mm_struct *mm = get_task_mm(task);
264 if (mm) {
265 unsigned int nwords = 0;
266 do
267 nwords += 2;
268 while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
269 res = nwords * sizeof(mm->saved_auxv[0]);
270 if (res > PAGE_SIZE)
271 res = PAGE_SIZE;
272 memcpy(buffer, mm->saved_auxv, res);
273 mmput(mm);
274 }
275 return res;
276 }
277
278
279 #ifdef CONFIG_KALLSYMS
280 /*
281 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
282 * Returns the resolved symbol. If that fails, simply return the address.
283 */
284 static int proc_pid_wchan(struct task_struct *task, char *buffer)
285 {
286 unsigned long wchan;
287 char symname[KSYM_NAME_LEN+1];
288
289 wchan = get_wchan(task);
290
291 if (lookup_symbol_name(wchan, symname) < 0)
292 return sprintf(buffer, "%lu", wchan);
293 else
294 return sprintf(buffer, "%s", symname);
295 }
296 #endif /* CONFIG_KALLSYMS */
297
298 #ifdef CONFIG_SCHEDSTATS
299 /*
300 * Provides /proc/PID/schedstat
301 */
302 static int proc_pid_schedstat(struct task_struct *task, char *buffer)
303 {
304 return sprintf(buffer, "%llu %llu %lu\n",
305 task->sched_info.cpu_time,
306 task->sched_info.run_delay,
307 task->sched_info.pcnt);
308 }
309 #endif
310
311 /* The badness from the OOM killer */
312 unsigned long badness(struct task_struct *p, unsigned long uptime);
313 static int proc_oom_score(struct task_struct *task, char *buffer)
314 {
315 unsigned long points;
316 struct timespec uptime;
317
318 do_posix_clock_monotonic_gettime(&uptime);
319 read_lock(&tasklist_lock);
320 points = badness(task, uptime.tv_sec);
321 read_unlock(&tasklist_lock);
322 return sprintf(buffer, "%lu\n", points);
323 }
324
325 /************************************************************************/
326 /* Here the fs part begins */
327 /************************************************************************/
328
329 /* permission checks */
330 static int proc_fd_access_allowed(struct inode *inode)
331 {
332 struct task_struct *task;
333 int allowed = 0;
334 /* Allow access to a task's file descriptors if it is us or we
335 * may use ptrace attach to the process and find out that
336 * information.
337 */
338 task = get_proc_task(inode);
339 if (task) {
340 allowed = ptrace_may_attach(task);
341 put_task_struct(task);
342 }
343 return allowed;
344 }
345
346 static int proc_setattr(struct dentry *dentry, struct iattr *attr)
347 {
348 int error;
349 struct inode *inode = dentry->d_inode;
350
351 if (attr->ia_valid & ATTR_MODE)
352 return -EPERM;
353
354 error = inode_change_ok(inode, attr);
355 if (!error)
356 error = inode_setattr(inode, attr);
357 return error;
358 }
359
360 static const struct inode_operations proc_def_inode_operations = {
361 .setattr = proc_setattr,
362 };
363
364 extern struct seq_operations mounts_op;
365 struct proc_mounts {
366 struct seq_file m;
367 int event;
368 };
369
370 static int mounts_open(struct inode *inode, struct file *file)
371 {
372 struct task_struct *task = get_proc_task(inode);
373 struct mnt_namespace *ns = NULL;
374 struct proc_mounts *p;
375 int ret = -EINVAL;
376
377 if (task) {
378 task_lock(task);
379 if (task->nsproxy) {
380 ns = task->nsproxy->mnt_ns;
381 if (ns)
382 get_mnt_ns(ns);
383 }
384 task_unlock(task);
385 put_task_struct(task);
386 }
387
388 if (ns) {
389 ret = -ENOMEM;
390 p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
391 if (p) {
392 file->private_data = &p->m;
393 ret = seq_open(file, &mounts_op);
394 if (!ret) {
395 p->m.private = ns;
396 p->event = ns->event;
397 return 0;
398 }
399 kfree(p);
400 }
401 put_mnt_ns(ns);
402 }
403 return ret;
404 }
405
406 static int mounts_release(struct inode *inode, struct file *file)
407 {
408 struct seq_file *m = file->private_data;
409 struct mnt_namespace *ns = m->private;
410 put_mnt_ns(ns);
411 return seq_release(inode, file);
412 }
413
414 static unsigned mounts_poll(struct file *file, poll_table *wait)
415 {
416 struct proc_mounts *p = file->private_data;
417 struct mnt_namespace *ns = p->m.private;
418 unsigned res = 0;
419
420 poll_wait(file, &ns->poll, wait);
421
422 spin_lock(&vfsmount_lock);
423 if (p->event != ns->event) {
424 p->event = ns->event;
425 res = POLLERR;
426 }
427 spin_unlock(&vfsmount_lock);
428
429 return res;
430 }
431
432 static const struct file_operations proc_mounts_operations = {
433 .open = mounts_open,
434 .read = seq_read,
435 .llseek = seq_lseek,
436 .release = mounts_release,
437 .poll = mounts_poll,
438 };
439
440 extern struct seq_operations mountstats_op;
441 static int mountstats_open(struct inode *inode, struct file *file)
442 {
443 int ret = seq_open(file, &mountstats_op);
444
445 if (!ret) {
446 struct seq_file *m = file->private_data;
447 struct mnt_namespace *mnt_ns = NULL;
448 struct task_struct *task = get_proc_task(inode);
449
450 if (task) {
451 task_lock(task);
452 if (task->nsproxy)
453 mnt_ns = task->nsproxy->mnt_ns;
454 if (mnt_ns)
455 get_mnt_ns(mnt_ns);
456 task_unlock(task);
457 put_task_struct(task);
458 }
459
460 if (mnt_ns)
461 m->private = mnt_ns;
462 else {
463 seq_release(inode, file);
464 ret = -EINVAL;
465 }
466 }
467 return ret;
468 }
469
470 static const struct file_operations proc_mountstats_operations = {
471 .open = mountstats_open,
472 .read = seq_read,
473 .llseek = seq_lseek,
474 .release = mounts_release,
475 };
476
477 #define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */
478
479 static ssize_t proc_info_read(struct file * file, char __user * buf,
480 size_t count, loff_t *ppos)
481 {
482 struct inode * inode = file->f_path.dentry->d_inode;
483 unsigned long page;
484 ssize_t length;
485 struct task_struct *task = get_proc_task(inode);
486
487 length = -ESRCH;
488 if (!task)
489 goto out_no_task;
490
491 if (count > PROC_BLOCK_SIZE)
492 count = PROC_BLOCK_SIZE;
493
494 length = -ENOMEM;
495 if (!(page = __get_free_page(GFP_KERNEL)))
496 goto out;
497
498 length = PROC_I(inode)->op.proc_read(task, (char*)page);
499
500 if (length >= 0)
501 length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
502 free_page(page);
503 out:
504 put_task_struct(task);
505 out_no_task:
506 return length;
507 }
508
509 static const struct file_operations proc_info_file_operations = {
510 .read = proc_info_read,
511 };
512
513 static int mem_open(struct inode* inode, struct file* file)
514 {
515 file->private_data = (void*)((long)current->self_exec_id);
516 return 0;
517 }
518
519 static ssize_t mem_read(struct file * file, char __user * buf,
520 size_t count, loff_t *ppos)
521 {
522 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
523 char *page;
524 unsigned long src = *ppos;
525 int ret = -ESRCH;
526 struct mm_struct *mm;
527
528 if (!task)
529 goto out_no_task;
530
531 if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
532 goto out;
533
534 ret = -ENOMEM;
535 page = (char *)__get_free_page(GFP_USER);
536 if (!page)
537 goto out;
538
539 ret = 0;
540
541 mm = get_task_mm(task);
542 if (!mm)
543 goto out_free;
544
545 ret = -EIO;
546
547 if (file->private_data != (void*)((long)current->self_exec_id))
548 goto out_put;
549
550 ret = 0;
551
552 while (count > 0) {
553 int this_len, retval;
554
555 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
556 retval = access_process_vm(task, src, page, this_len, 0);
557 if (!retval || !MAY_PTRACE(task) || !ptrace_may_attach(task)) {
558 if (!ret)
559 ret = -EIO;
560 break;
561 }
562
563 if (copy_to_user(buf, page, retval)) {
564 ret = -EFAULT;
565 break;
566 }
567
568 ret += retval;
569 src += retval;
570 buf += retval;
571 count -= retval;
572 }
573 *ppos = src;
574
575 out_put:
576 mmput(mm);
577 out_free:
578 free_page((unsigned long) page);
579 out:
580 put_task_struct(task);
581 out_no_task:
582 return ret;
583 }
584
585 #define mem_write NULL
586
587 #ifndef mem_write
588 /* This is a security hazard */
589 static ssize_t mem_write(struct file * file, const char __user *buf,
590 size_t count, loff_t *ppos)
591 {
592 int copied;
593 char *page;
594 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
595 unsigned long dst = *ppos;
596
597 copied = -ESRCH;
598 if (!task)
599 goto out_no_task;
600
601 if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
602 goto out;
603
604 copied = -ENOMEM;
605 page = (char *)__get_free_page(GFP_USER);
606 if (!page)
607 goto out;
608
609 copied = 0;
610 while (count > 0) {
611 int this_len, retval;
612
613 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
614 if (copy_from_user(page, buf, this_len)) {
615 copied = -EFAULT;
616 break;
617 }
618 retval = access_process_vm(task, dst, page, this_len, 1);
619 if (!retval) {
620 if (!copied)
621 copied = -EIO;
622 break;
623 }
624 copied += retval;
625 buf += retval;
626 dst += retval;
627 count -= retval;
628 }
629 *ppos = dst;
630 free_page((unsigned long) page);
631 out:
632 put_task_struct(task);
633 out_no_task:
634 return copied;
635 }
636 #endif
637
638 static loff_t mem_lseek(struct file * file, loff_t offset, int orig)
639 {
640 switch (orig) {
641 case 0:
642 file->f_pos = offset;
643 break;
644 case 1:
645 file->f_pos += offset;
646 break;
647 default:
648 return -EINVAL;
649 }
650 force_successful_syscall_return();
651 return file->f_pos;
652 }
653
654 static const struct file_operations proc_mem_operations = {
655 .llseek = mem_lseek,
656 .read = mem_read,
657 .write = mem_write,
658 .open = mem_open,
659 };
660
661 static ssize_t oom_adjust_read(struct file *file, char __user *buf,
662 size_t count, loff_t *ppos)
663 {
664 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
665 char buffer[PROC_NUMBUF];
666 size_t len;
667 int oom_adjust;
668
669 if (!task)
670 return -ESRCH;
671 oom_adjust = task->oomkilladj;
672 put_task_struct(task);
673
674 len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust);
675
676 return simple_read_from_buffer(buf, count, ppos, buffer, len);
677 }
678
679 static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
680 size_t count, loff_t *ppos)
681 {
682 struct task_struct *task;
683 char buffer[PROC_NUMBUF], *end;
684 int oom_adjust;
685
686 memset(buffer, 0, sizeof(buffer));
687 if (count > sizeof(buffer) - 1)
688 count = sizeof(buffer) - 1;
689 if (copy_from_user(buffer, buf, count))
690 return -EFAULT;
691 oom_adjust = simple_strtol(buffer, &end, 0);
692 if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) &&
693 oom_adjust != OOM_DISABLE)
694 return -EINVAL;
695 if (*end == '\n')
696 end++;
697 task = get_proc_task(file->f_path.dentry->d_inode);
698 if (!task)
699 return -ESRCH;
700 if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) {
701 put_task_struct(task);
702 return -EACCES;
703 }
704 task->oomkilladj = oom_adjust;
705 put_task_struct(task);
706 if (end - buffer == 0)
707 return -EIO;
708 return end - buffer;
709 }
710
711 static const struct file_operations proc_oom_adjust_operations = {
712 .read = oom_adjust_read,
713 .write = oom_adjust_write,
714 };
715
716 #ifdef CONFIG_MMU
717 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
718 size_t count, loff_t *ppos)
719 {
720 struct task_struct *task;
721 char buffer[PROC_NUMBUF], *end;
722 struct mm_struct *mm;
723
724 memset(buffer, 0, sizeof(buffer));
725 if (count > sizeof(buffer) - 1)
726 count = sizeof(buffer) - 1;
727 if (copy_from_user(buffer, buf, count))
728 return -EFAULT;
729 if (!simple_strtol(buffer, &end, 0))
730 return -EINVAL;
731 if (*end == '\n')
732 end++;
733 task = get_proc_task(file->f_path.dentry->d_inode);
734 if (!task)
735 return -ESRCH;
736 mm = get_task_mm(task);
737 if (mm) {
738 clear_refs_smap(mm);
739 mmput(mm);
740 }
741 put_task_struct(task);
742 if (end - buffer == 0)
743 return -EIO;
744 return end - buffer;
745 }
746
747 static struct file_operations proc_clear_refs_operations = {
748 .write = clear_refs_write,
749 };
750 #endif
751
752 #ifdef CONFIG_AUDITSYSCALL
753 #define TMPBUFLEN 21
754 static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
755 size_t count, loff_t *ppos)
756 {
757 struct inode * inode = file->f_path.dentry->d_inode;
758 struct task_struct *task = get_proc_task(inode);
759 ssize_t length;
760 char tmpbuf[TMPBUFLEN];
761
762 if (!task)
763 return -ESRCH;
764 length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
765 audit_get_loginuid(task->audit_context));
766 put_task_struct(task);
767 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
768 }
769
770 static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
771 size_t count, loff_t *ppos)
772 {
773 struct inode * inode = file->f_path.dentry->d_inode;
774 char *page, *tmp;
775 ssize_t length;
776 uid_t loginuid;
777
778 if (!capable(CAP_AUDIT_CONTROL))
779 return -EPERM;
780
781 if (current != pid_task(proc_pid(inode), PIDTYPE_PID))
782 return -EPERM;
783
784 if (count >= PAGE_SIZE)
785 count = PAGE_SIZE - 1;
786
787 if (*ppos != 0) {
788 /* No partial writes. */
789 return -EINVAL;
790 }
791 page = (char*)__get_free_page(GFP_USER);
792 if (!page)
793 return -ENOMEM;
794 length = -EFAULT;
795 if (copy_from_user(page, buf, count))
796 goto out_free_page;
797
798 page[count] = '\0';
799 loginuid = simple_strtoul(page, &tmp, 10);
800 if (tmp == page) {
801 length = -EINVAL;
802 goto out_free_page;
803
804 }
805 length = audit_set_loginuid(current, loginuid);
806 if (likely(length == 0))
807 length = count;
808
809 out_free_page:
810 free_page((unsigned long) page);
811 return length;
812 }
813
814 static const struct file_operations proc_loginuid_operations = {
815 .read = proc_loginuid_read,
816 .write = proc_loginuid_write,
817 };
818 #endif
819
820 #ifdef CONFIG_SECCOMP
821 static ssize_t seccomp_read(struct file *file, char __user *buf,
822 size_t count, loff_t *ppos)
823 {
824 struct task_struct *tsk = get_proc_task(file->f_dentry->d_inode);
825 char __buf[20];
826 size_t len;
827
828 if (!tsk)
829 return -ESRCH;
830 /* no need to print the trailing zero, so use only len */
831 len = sprintf(__buf, "%u\n", tsk->seccomp.mode);
832 put_task_struct(tsk);
833
834 return simple_read_from_buffer(buf, count, ppos, __buf, len);
835 }
836
837 static ssize_t seccomp_write(struct file *file, const char __user *buf,
838 size_t count, loff_t *ppos)
839 {
840 struct task_struct *tsk = get_proc_task(file->f_dentry->d_inode);
841 char __buf[20], *end;
842 unsigned int seccomp_mode;
843 ssize_t result;
844
845 result = -ESRCH;
846 if (!tsk)
847 goto out_no_task;
848
849 /* can set it only once to be even more secure */
850 result = -EPERM;
851 if (unlikely(tsk->seccomp.mode))
852 goto out;
853
854 result = -EFAULT;
855 memset(__buf, 0, sizeof(__buf));
856 count = min(count, sizeof(__buf) - 1);
857 if (copy_from_user(__buf, buf, count))
858 goto out;
859
860 seccomp_mode = simple_strtoul(__buf, &end, 0);
861 if (*end == '\n')
862 end++;
863 result = -EINVAL;
864 if (seccomp_mode && seccomp_mode <= NR_SECCOMP_MODES) {
865 tsk->seccomp.mode = seccomp_mode;
866 set_tsk_thread_flag(tsk, TIF_SECCOMP);
867 } else
868 goto out;
869 result = -EIO;
870 if (unlikely(!(end - __buf)))
871 goto out;
872 result = end - __buf;
873 out:
874 put_task_struct(tsk);
875 out_no_task:
876 return result;
877 }
878
879 static const struct file_operations proc_seccomp_operations = {
880 .read = seccomp_read,
881 .write = seccomp_write,
882 };
883 #endif /* CONFIG_SECCOMP */
884
885 #ifdef CONFIG_FAULT_INJECTION
886 static ssize_t proc_fault_inject_read(struct file * file, char __user * buf,
887 size_t count, loff_t *ppos)
888 {
889 struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
890 char buffer[PROC_NUMBUF];
891 size_t len;
892 int make_it_fail;
893
894 if (!task)
895 return -ESRCH;
896 make_it_fail = task->make_it_fail;
897 put_task_struct(task);
898
899 len = snprintf(buffer, sizeof(buffer), "%i\n", make_it_fail);
900
901 return simple_read_from_buffer(buf, count, ppos, buffer, len);
902 }
903
904 static ssize_t proc_fault_inject_write(struct file * file,
905 const char __user * buf, size_t count, loff_t *ppos)
906 {
907 struct task_struct *task;
908 char buffer[PROC_NUMBUF], *end;
909 int make_it_fail;
910
911 if (!capable(CAP_SYS_RESOURCE))
912 return -EPERM;
913 memset(buffer, 0, sizeof(buffer));
914 if (count > sizeof(buffer) - 1)
915 count = sizeof(buffer) - 1;
916 if (copy_from_user(buffer, buf, count))
917 return -EFAULT;
918 make_it_fail = simple_strtol(buffer, &end, 0);
919 if (*end == '\n')
920 end++;
921 task = get_proc_task(file->f_dentry->d_inode);
922 if (!task)
923 return -ESRCH;
924 task->make_it_fail = make_it_fail;
925 put_task_struct(task);
926 if (end - buffer == 0)
927 return -EIO;
928 return end - buffer;
929 }
930
931 static const struct file_operations proc_fault_inject_operations = {
932 .read = proc_fault_inject_read,
933 .write = proc_fault_inject_write,
934 };
935 #endif
936
937 #ifdef CONFIG_SCHED_DEBUG
938 /*
939 * Print out various scheduling related per-task fields:
940 */
941 static int sched_show(struct seq_file *m, void *v)
942 {
943 struct inode *inode = m->private;
944 struct task_struct *p;
945
946 WARN_ON(!inode);
947
948 p = get_proc_task(inode);
949 if (!p)
950 return -ESRCH;
951 proc_sched_show_task(p, m);
952
953 put_task_struct(p);
954
955 return 0;
956 }
957
958 static ssize_t
959 sched_write(struct file *file, const char __user *buf,
960 size_t count, loff_t *offset)
961 {
962 struct inode *inode = file->f_path.dentry->d_inode;
963 struct task_struct *p;
964
965 WARN_ON(!inode);
966
967 p = get_proc_task(inode);
968 if (!p)
969 return -ESRCH;
970 proc_sched_set_task(p);
971
972 put_task_struct(p);
973
974 return count;
975 }
976
977 static int sched_open(struct inode *inode, struct file *filp)
978 {
979 int ret;
980
981 ret = single_open(filp, sched_show, NULL);
982 if (!ret) {
983 struct seq_file *m = filp->private_data;
984
985 m->private = inode;
986 }
987 return ret;
988 }
989
990 static const struct file_operations proc_pid_sched_operations = {
991 .open = sched_open,
992 .read = seq_read,
993 .write = sched_write,
994 .llseek = seq_lseek,
995 .release = seq_release,
996 };
997
998 #endif
999
1000 static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
1001 {
1002 struct inode *inode = dentry->d_inode;
1003 int error = -EACCES;
1004
1005 /* We don't need a base pointer in the /proc filesystem */
1006 path_release(nd);
1007
1008 /* Are we allowed to snoop on the tasks file descriptors? */
1009 if (!proc_fd_access_allowed(inode))
1010 goto out;
1011
1012 error = PROC_I(inode)->op.proc_get_link(inode, &nd->dentry, &nd->mnt);
1013 nd->last_type = LAST_BIND;
1014 out:
1015 return ERR_PTR(error);
1016 }
1017
1018 static int do_proc_readlink(struct dentry *dentry, struct vfsmount *mnt,
1019 char __user *buffer, int buflen)
1020 {
1021 struct inode * inode;
1022 char *tmp = (char*)__get_free_page(GFP_KERNEL), *path;
1023 int len;
1024
1025 if (!tmp)
1026 return -ENOMEM;
1027
1028 inode = dentry->d_inode;
1029 path = d_path(dentry, mnt, tmp, PAGE_SIZE);
1030 len = PTR_ERR(path);
1031 if (IS_ERR(path))
1032 goto out;
1033 len = tmp + PAGE_SIZE - 1 - path;
1034
1035 if (len > buflen)
1036 len = buflen;
1037 if (copy_to_user(buffer, path, len))
1038 len = -EFAULT;
1039 out:
1040 free_page((unsigned long)tmp);
1041 return len;
1042 }
1043
1044 static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen)
1045 {
1046 int error = -EACCES;
1047 struct inode *inode = dentry->d_inode;
1048 struct dentry *de;
1049 struct vfsmount *mnt = NULL;
1050
1051 /* Are we allowed to snoop on the tasks file descriptors? */
1052 if (!proc_fd_access_allowed(inode))
1053 goto out;
1054
1055 error = PROC_I(inode)->op.proc_get_link(inode, &de, &mnt);
1056 if (error)
1057 goto out;
1058
1059 error = do_proc_readlink(de, mnt, buffer, buflen);
1060 dput(de);
1061 mntput(mnt);
1062 out:
1063 return error;
1064 }
1065
1066 static const struct inode_operations proc_pid_link_inode_operations = {
1067 .readlink = proc_pid_readlink,
1068 .follow_link = proc_pid_follow_link,
1069 .setattr = proc_setattr,
1070 };
1071
1072
1073 /* building an inode */
1074
1075 static int task_dumpable(struct task_struct *task)
1076 {
1077 int dumpable = 0;
1078 struct mm_struct *mm;
1079
1080 task_lock(task);
1081 mm = task->mm;
1082 if (mm)
1083 dumpable = mm->dumpable;
1084 task_unlock(task);
1085 if(dumpable == 1)
1086 return 1;
1087 return 0;
1088 }
1089
1090
1091 static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task)
1092 {
1093 struct inode * inode;
1094 struct proc_inode *ei;
1095
1096 /* We need a new inode */
1097
1098 inode = new_inode(sb);
1099 if (!inode)
1100 goto out;
1101
1102 /* Common stuff */
1103 ei = PROC_I(inode);
1104 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1105 inode->i_op = &proc_def_inode_operations;
1106
1107 /*
1108 * grab the reference to task.
1109 */
1110 ei->pid = get_task_pid(task, PIDTYPE_PID);
1111 if (!ei->pid)
1112 goto out_unlock;
1113
1114 inode->i_uid = 0;
1115 inode->i_gid = 0;
1116 if (task_dumpable(task)) {
1117 inode->i_uid = task->euid;
1118 inode->i_gid = task->egid;
1119 }
1120 security_task_to_inode(task, inode);
1121
1122 out:
1123 return inode;
1124
1125 out_unlock:
1126 iput(inode);
1127 return NULL;
1128 }
1129
1130 static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
1131 {
1132 struct inode *inode = dentry->d_inode;
1133 struct task_struct *task;
1134 generic_fillattr(inode, stat);
1135
1136 rcu_read_lock();
1137 stat->uid = 0;
1138 stat->gid = 0;
1139 task = pid_task(proc_pid(inode), PIDTYPE_PID);
1140 if (task) {
1141 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
1142 task_dumpable(task)) {
1143 stat->uid = task->euid;
1144 stat->gid = task->egid;
1145 }
1146 }
1147 rcu_read_unlock();
1148 return 0;
1149 }
1150
1151 /* dentry stuff */
1152
1153 /*
1154 * Exceptional case: normally we are not allowed to unhash a busy
1155 * directory. In this case, however, we can do it - no aliasing problems
1156 * due to the way we treat inodes.
1157 *
1158 * Rewrite the inode's ownerships here because the owning task may have
1159 * performed a setuid(), etc.
1160 *
1161 * Before the /proc/pid/status file was created the only way to read
1162 * the effective uid of a /process was to stat /proc/pid. Reading
1163 * /proc/pid/status is slow enough that procps and other packages
1164 * kept stating /proc/pid. To keep the rules in /proc simple I have
1165 * made this apply to all per process world readable and executable
1166 * directories.
1167 */
1168 static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
1169 {
1170 struct inode *inode = dentry->d_inode;
1171 struct task_struct *task = get_proc_task(inode);
1172 if (task) {
1173 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
1174 task_dumpable(task)) {
1175 inode->i_uid = task->euid;
1176 inode->i_gid = task->egid;
1177 } else {
1178 inode->i_uid = 0;
1179 inode->i_gid = 0;
1180 }
1181 inode->i_mode &= ~(S_ISUID | S_ISGID);
1182 security_task_to_inode(task, inode);
1183 put_task_struct(task);
1184 return 1;
1185 }
1186 d_drop(dentry);
1187 return 0;
1188 }
1189
1190 static int pid_delete_dentry(struct dentry * dentry)
1191 {
1192 /* Is the task we represent dead?
1193 * If so, then don't put the dentry on the lru list,
1194 * kill it immediately.
1195 */
1196 return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first;
1197 }
1198
1199 static struct dentry_operations pid_dentry_operations =
1200 {
1201 .d_revalidate = pid_revalidate,
1202 .d_delete = pid_delete_dentry,
1203 };
1204
1205 /* Lookups */
1206
1207 typedef struct dentry *instantiate_t(struct inode *, struct dentry *,
1208 struct task_struct *, const void *);
1209
1210 /*
1211 * Fill a directory entry.
1212 *
1213 * If possible create the dcache entry and derive our inode number and
1214 * file type from dcache entry.
1215 *
1216 * Since all of the proc inode numbers are dynamically generated, the inode
1217 * numbers do not exist until the inode is cache. This means creating the
1218 * the dcache entry in readdir is necessary to keep the inode numbers
1219 * reported by readdir in sync with the inode numbers reported
1220 * by stat.
1221 */
1222 static int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
1223 char *name, int len,
1224 instantiate_t instantiate, struct task_struct *task, const void *ptr)
1225 {
1226 struct dentry *child, *dir = filp->f_path.dentry;
1227 struct inode *inode;
1228 struct qstr qname;
1229 ino_t ino = 0;
1230 unsigned type = DT_UNKNOWN;
1231
1232 qname.name = name;
1233 qname.len = len;
1234 qname.hash = full_name_hash(name, len);
1235
1236 child = d_lookup(dir, &qname);
1237 if (!child) {
1238 struct dentry *new;
1239 new = d_alloc(dir, &qname);
1240 if (new) {
1241 child = instantiate(dir->d_inode, new, task, ptr);
1242 if (child)
1243 dput(new);
1244 else
1245 child = new;
1246 }
1247 }
1248 if (!child || IS_ERR(child) || !child->d_inode)
1249 goto end_instantiate;
1250 inode = child->d_inode;
1251 if (inode) {
1252 ino = inode->i_ino;
1253 type = inode->i_mode >> 12;
1254 }
1255 dput(child);
1256 end_instantiate:
1257 if (!ino)
1258 ino = find_inode_number(dir, &qname);
1259 if (!ino)
1260 ino = 1;
1261 return filldir(dirent, name, len, filp->f_pos, ino, type);
1262 }
1263
1264 static unsigned name_to_int(struct dentry *dentry)
1265 {
1266 const char *name = dentry->d_name.name;
1267 int len = dentry->d_name.len;
1268 unsigned n = 0;
1269
1270 if (len > 1 && *name == '0')
1271 goto out;
1272 while (len-- > 0) {
1273 unsigned c = *name++ - '0';
1274 if (c > 9)
1275 goto out;
1276 if (n >= (~0U-9)/10)
1277 goto out;
1278 n *= 10;
1279 n += c;
1280 }
1281 return n;
1282 out:
1283 return ~0U;
1284 }
1285
1286 #define PROC_FDINFO_MAX 64
1287
1288 static int proc_fd_info(struct inode *inode, struct dentry **dentry,
1289 struct vfsmount **mnt, char *info)
1290 {
1291 struct task_struct *task = get_proc_task(inode);
1292 struct files_struct *files = NULL;
1293 struct file *file;
1294 int fd = proc_fd(inode);
1295
1296 if (task) {
1297 files = get_files_struct(task);
1298 put_task_struct(task);
1299 }
1300 if (files) {
1301 /*
1302 * We are not taking a ref to the file structure, so we must
1303 * hold ->file_lock.
1304 */
1305 spin_lock(&files->file_lock);
1306 file = fcheck_files(files, fd);
1307 if (file) {
1308 if (mnt)
1309 *mnt = mntget(file->f_path.mnt);
1310 if (dentry)
1311 *dentry = dget(file->f_path.dentry);
1312 if (info)
1313 snprintf(info, PROC_FDINFO_MAX,
1314 "pos:\t%lli\n"
1315 "flags:\t0%o\n",
1316 (long long) file->f_pos,
1317 file->f_flags);
1318 spin_unlock(&files->file_lock);
1319 put_files_struct(files);
1320 return 0;
1321 }
1322 spin_unlock(&files->file_lock);
1323 put_files_struct(files);
1324 }
1325 return -ENOENT;
1326 }
1327
1328 static int proc_fd_link(struct inode *inode, struct dentry **dentry,
1329 struct vfsmount **mnt)
1330 {
1331 return proc_fd_info(inode, dentry, mnt, NULL);
1332 }
1333
1334 static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
1335 {
1336 struct inode *inode = dentry->d_inode;
1337 struct task_struct *task = get_proc_task(inode);
1338 int fd = proc_fd(inode);
1339 struct files_struct *files;
1340
1341 if (task) {
1342 files = get_files_struct(task);
1343 if (files) {
1344 rcu_read_lock();
1345 if (fcheck_files(files, fd)) {
1346 rcu_read_unlock();
1347 put_files_struct(files);
1348 if (task_dumpable(task)) {
1349 inode->i_uid = task->euid;
1350 inode->i_gid = task->egid;
1351 } else {
1352 inode->i_uid = 0;
1353 inode->i_gid = 0;
1354 }
1355 inode->i_mode &= ~(S_ISUID | S_ISGID);
1356 security_task_to_inode(task, inode);
1357 put_task_struct(task);
1358 return 1;
1359 }
1360 rcu_read_unlock();
1361 put_files_struct(files);
1362 }
1363 put_task_struct(task);
1364 }
1365 d_drop(dentry);
1366 return 0;
1367 }
1368
1369 static struct dentry_operations tid_fd_dentry_operations =
1370 {
1371 .d_revalidate = tid_fd_revalidate,
1372 .d_delete = pid_delete_dentry,
1373 };
1374
1375 static struct dentry *proc_fd_instantiate(struct inode *dir,
1376 struct dentry *dentry, struct task_struct *task, const void *ptr)
1377 {
1378 unsigned fd = *(const unsigned *)ptr;
1379 struct file *file;
1380 struct files_struct *files;
1381 struct inode *inode;
1382 struct proc_inode *ei;
1383 struct dentry *error = ERR_PTR(-ENOENT);
1384
1385 inode = proc_pid_make_inode(dir->i_sb, task);
1386 if (!inode)
1387 goto out;
1388 ei = PROC_I(inode);
1389 ei->fd = fd;
1390 files = get_files_struct(task);
1391 if (!files)
1392 goto out_iput;
1393 inode->i_mode = S_IFLNK;
1394
1395 /*
1396 * We are not taking a ref to the file structure, so we must
1397 * hold ->file_lock.
1398 */
1399 spin_lock(&files->file_lock);
1400 file = fcheck_files(files, fd);
1401 if (!file)
1402 goto out_unlock;
1403 if (file->f_mode & 1)
1404 inode->i_mode |= S_IRUSR | S_IXUSR;
1405 if (file->f_mode & 2)
1406 inode->i_mode |= S_IWUSR | S_IXUSR;
1407 spin_unlock(&files->file_lock);
1408 put_files_struct(files);
1409
1410 inode->i_op = &proc_pid_link_inode_operations;
1411 inode->i_size = 64;
1412 ei->op.proc_get_link = proc_fd_link;
1413 dentry->d_op = &tid_fd_dentry_operations;
1414 d_add(dentry, inode);
1415 /* Close the race of the process dying before we return the dentry */
1416 if (tid_fd_revalidate(dentry, NULL))
1417 error = NULL;
1418
1419 out:
1420 return error;
1421 out_unlock:
1422 spin_unlock(&files->file_lock);
1423 put_files_struct(files);
1424 out_iput:
1425 iput(inode);
1426 goto out;
1427 }
1428
1429 static struct dentry *proc_lookupfd_common(struct inode *dir,
1430 struct dentry *dentry,
1431 instantiate_t instantiate)
1432 {
1433 struct task_struct *task = get_proc_task(dir);
1434 unsigned fd = name_to_int(dentry);
1435 struct dentry *result = ERR_PTR(-ENOENT);
1436
1437 if (!task)
1438 goto out_no_task;
1439 if (fd == ~0U)
1440 goto out;
1441
1442 result = instantiate(dir, dentry, task, &fd);
1443 out:
1444 put_task_struct(task);
1445 out_no_task:
1446 return result;
1447 }
1448
1449 static int proc_readfd_common(struct file * filp, void * dirent,
1450 filldir_t filldir, instantiate_t instantiate)
1451 {
1452 struct dentry *dentry = filp->f_path.dentry;
1453 struct inode *inode = dentry->d_inode;
1454 struct task_struct *p = get_proc_task(inode);
1455 unsigned int fd, tid, ino;
1456 int retval;
1457 struct files_struct * files;
1458 struct fdtable *fdt;
1459
1460 retval = -ENOENT;
1461 if (!p)
1462 goto out_no_task;
1463 retval = 0;
1464 tid = p->pid;
1465
1466 fd = filp->f_pos;
1467 switch (fd) {
1468 case 0:
1469 if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
1470 goto out;
1471 filp->f_pos++;
1472 case 1:
1473 ino = parent_ino(dentry);
1474 if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
1475 goto out;
1476 filp->f_pos++;
1477 default:
1478 files = get_files_struct(p);
1479 if (!files)
1480 goto out;
1481 rcu_read_lock();
1482 fdt = files_fdtable(files);
1483 for (fd = filp->f_pos-2;
1484 fd < fdt->max_fds;
1485 fd++, filp->f_pos++) {
1486 char name[PROC_NUMBUF];
1487 int len;
1488
1489 if (!fcheck_files(files, fd))
1490 continue;
1491 rcu_read_unlock();
1492
1493 len = snprintf(name, sizeof(name), "%d", fd);
1494 if (proc_fill_cache(filp, dirent, filldir,
1495 name, len, instantiate,
1496 p, &fd) < 0) {
1497 rcu_read_lock();
1498 break;
1499 }
1500 rcu_read_lock();
1501 }
1502 rcu_read_unlock();
1503 put_files_struct(files);
1504 }
1505 out:
1506 put_task_struct(p);
1507 out_no_task:
1508 return retval;
1509 }
1510
1511 static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
1512 struct nameidata *nd)
1513 {
1514 return proc_lookupfd_common(dir, dentry, proc_fd_instantiate);
1515 }
1516
1517 static int proc_readfd(struct file *filp, void *dirent, filldir_t filldir)
1518 {
1519 return proc_readfd_common(filp, dirent, filldir, proc_fd_instantiate);
1520 }
1521
1522 static ssize_t proc_fdinfo_read(struct file *file, char __user *buf,
1523 size_t len, loff_t *ppos)
1524 {
1525 char tmp[PROC_FDINFO_MAX];
1526 int err = proc_fd_info(file->f_path.dentry->d_inode, NULL, NULL, tmp);
1527 if (!err)
1528 err = simple_read_from_buffer(buf, len, ppos, tmp, strlen(tmp));
1529 return err;
1530 }
1531
1532 static const struct file_operations proc_fdinfo_file_operations = {
1533 .open = nonseekable_open,
1534 .read = proc_fdinfo_read,
1535 };
1536
1537 static const struct file_operations proc_fd_operations = {
1538 .read = generic_read_dir,
1539 .readdir = proc_readfd,
1540 };
1541
1542 /*
1543 * /proc/pid/fd needs a special permission handler so that a process can still
1544 * access /proc/self/fd after it has executed a setuid().
1545 */
1546 static int proc_fd_permission(struct inode *inode, int mask,
1547 struct nameidata *nd)
1548 {
1549 int rv;
1550
1551 rv = generic_permission(inode, mask, NULL);
1552 if (rv == 0)
1553 return 0;
1554 if (task_pid(current) == proc_pid(inode))
1555 rv = 0;
1556 return rv;
1557 }
1558
1559 /*
1560 * proc directories can do almost nothing..
1561 */
1562 static const struct inode_operations proc_fd_inode_operations = {
1563 .lookup = proc_lookupfd,
1564 .permission = proc_fd_permission,
1565 .setattr = proc_setattr,
1566 };
1567
1568 static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
1569 struct dentry *dentry, struct task_struct *task, const void *ptr)
1570 {
1571 unsigned fd = *(unsigned *)ptr;
1572 struct inode *inode;
1573 struct proc_inode *ei;
1574 struct dentry *error = ERR_PTR(-ENOENT);
1575
1576 inode = proc_pid_make_inode(dir->i_sb, task);
1577 if (!inode)
1578 goto out;
1579 ei = PROC_I(inode);
1580 ei->fd = fd;
1581 inode->i_mode = S_IFREG | S_IRUSR;
1582 inode->i_fop = &proc_fdinfo_file_operations;
1583 dentry->d_op = &tid_fd_dentry_operations;
1584 d_add(dentry, inode);
1585 /* Close the race of the process dying before we return the dentry */
1586 if (tid_fd_revalidate(dentry, NULL))
1587 error = NULL;
1588
1589 out:
1590 return error;
1591 }
1592
1593 static struct dentry *proc_lookupfdinfo(struct inode *dir,
1594 struct dentry *dentry,
1595 struct nameidata *nd)
1596 {
1597 return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate);
1598 }
1599
1600 static int proc_readfdinfo(struct file *filp, void *dirent, filldir_t filldir)
1601 {
1602 return proc_readfd_common(filp, dirent, filldir,
1603 proc_fdinfo_instantiate);
1604 }
1605
1606 static const struct file_operations proc_fdinfo_operations = {
1607 .read = generic_read_dir,
1608 .readdir = proc_readfdinfo,
1609 };
1610
1611 /*
1612 * proc directories can do almost nothing..
1613 */
1614 static const struct inode_operations proc_fdinfo_inode_operations = {
1615 .lookup = proc_lookupfdinfo,
1616 .setattr = proc_setattr,
1617 };
1618
1619
1620 static struct dentry *proc_pident_instantiate(struct inode *dir,
1621 struct dentry *dentry, struct task_struct *task, const void *ptr)
1622 {
1623 const struct pid_entry *p = ptr;
1624 struct inode *inode;
1625 struct proc_inode *ei;
1626 struct dentry *error = ERR_PTR(-EINVAL);
1627
1628 inode = proc_pid_make_inode(dir->i_sb, task);
1629 if (!inode)
1630 goto out;
1631
1632 ei = PROC_I(inode);
1633 inode->i_mode = p->mode;
1634 if (S_ISDIR(inode->i_mode))
1635 inode->i_nlink = 2; /* Use getattr to fix if necessary */
1636 if (p->iop)
1637 inode->i_op = p->iop;
1638 if (p->fop)
1639 inode->i_fop = p->fop;
1640 ei->op = p->op;
1641 dentry->d_op = &pid_dentry_operations;
1642 d_add(dentry, inode);
1643 /* Close the race of the process dying before we return the dentry */
1644 if (pid_revalidate(dentry, NULL))
1645 error = NULL;
1646 out:
1647 return error;
1648 }
1649
1650 static struct dentry *proc_pident_lookup(struct inode *dir,
1651 struct dentry *dentry,
1652 const struct pid_entry *ents,
1653 unsigned int nents)
1654 {
1655 struct inode *inode;
1656 struct dentry *error;
1657 struct task_struct *task = get_proc_task(dir);
1658 const struct pid_entry *p, *last;
1659
1660 error = ERR_PTR(-ENOENT);
1661 inode = NULL;
1662
1663 if (!task)
1664 goto out_no_task;
1665
1666 /*
1667 * Yes, it does not scale. And it should not. Don't add
1668 * new entries into /proc/<tgid>/ without very good reasons.
1669 */
1670 last = &ents[nents - 1];
1671 for (p = ents; p <= last; p++) {
1672 if (p->len != dentry->d_name.len)
1673 continue;
1674 if (!memcmp(dentry->d_name.name, p->name, p->len))
1675 break;
1676 }
1677 if (p > last)
1678 goto out;
1679
1680 error = proc_pident_instantiate(dir, dentry, task, p);
1681 out:
1682 put_task_struct(task);
1683 out_no_task:
1684 return error;
1685 }
1686
1687 static int proc_pident_fill_cache(struct file *filp, void *dirent,
1688 filldir_t filldir, struct task_struct *task, const struct pid_entry *p)
1689 {
1690 return proc_fill_cache(filp, dirent, filldir, p->name, p->len,
1691 proc_pident_instantiate, task, p);
1692 }
1693
1694 static int proc_pident_readdir(struct file *filp,
1695 void *dirent, filldir_t filldir,
1696 const struct pid_entry *ents, unsigned int nents)
1697 {
1698 int i;
1699 int pid;
1700 struct dentry *dentry = filp->f_path.dentry;
1701 struct inode *inode = dentry->d_inode;
1702 struct task_struct *task = get_proc_task(inode);
1703 const struct pid_entry *p, *last;
1704 ino_t ino;
1705 int ret;
1706
1707 ret = -ENOENT;
1708 if (!task)
1709 goto out_no_task;
1710
1711 ret = 0;
1712 pid = task->pid;
1713 i = filp->f_pos;
1714 switch (i) {
1715 case 0:
1716 ino = inode->i_ino;
1717 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
1718 goto out;
1719 i++;
1720 filp->f_pos++;
1721 /* fall through */
1722 case 1:
1723 ino = parent_ino(dentry);
1724 if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
1725 goto out;
1726 i++;
1727 filp->f_pos++;
1728 /* fall through */
1729 default:
1730 i -= 2;
1731 if (i >= nents) {
1732 ret = 1;
1733 goto out;
1734 }
1735 p = ents + i;
1736 last = &ents[nents - 1];
1737 while (p <= last) {
1738 if (proc_pident_fill_cache(filp, dirent, filldir, task, p) < 0)
1739 goto out;
1740 filp->f_pos++;
1741 p++;
1742 }
1743 }
1744
1745 ret = 1;
1746 out:
1747 put_task_struct(task);
1748 out_no_task:
1749 return ret;
1750 }
1751
1752 #ifdef CONFIG_SECURITY
1753 static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
1754 size_t count, loff_t *ppos)
1755 {
1756 struct inode * inode = file->f_path.dentry->d_inode;
1757 char *p = NULL;
1758 ssize_t length;
1759 struct task_struct *task = get_proc_task(inode);
1760
1761 if (!task)
1762 return -ESRCH;
1763
1764 length = security_getprocattr(task,
1765 (char*)file->f_path.dentry->d_name.name,
1766 &p);
1767 put_task_struct(task);
1768 if (length > 0)
1769 length = simple_read_from_buffer(buf, count, ppos, p, length);
1770 kfree(p);
1771 return length;
1772 }
1773
1774 static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
1775 size_t count, loff_t *ppos)
1776 {
1777 struct inode * inode = file->f_path.dentry->d_inode;
1778 char *page;
1779 ssize_t length;
1780 struct task_struct *task = get_proc_task(inode);
1781
1782 length = -ESRCH;
1783 if (!task)
1784 goto out_no_task;
1785 if (count > PAGE_SIZE)
1786 count = PAGE_SIZE;
1787
1788 /* No partial writes. */
1789 length = -EINVAL;
1790 if (*ppos != 0)
1791 goto out;
1792
1793 length = -ENOMEM;
1794 page = (char*)__get_free_page(GFP_USER);
1795 if (!page)
1796 goto out;
1797
1798 length = -EFAULT;
1799 if (copy_from_user(page, buf, count))
1800 goto out_free;
1801
1802 length = security_setprocattr(task,
1803 (char*)file->f_path.dentry->d_name.name,
1804 (void*)page, count);
1805 out_free:
1806 free_page((unsigned long) page);
1807 out:
1808 put_task_struct(task);
1809 out_no_task:
1810 return length;
1811 }
1812
1813 static const struct file_operations proc_pid_attr_operations = {
1814 .read = proc_pid_attr_read,
1815 .write = proc_pid_attr_write,
1816 };
1817
1818 static const struct pid_entry attr_dir_stuff[] = {
1819 REG("current", S_IRUGO|S_IWUGO, pid_attr),
1820 REG("prev", S_IRUGO, pid_attr),
1821 REG("exec", S_IRUGO|S_IWUGO, pid_attr),
1822 REG("fscreate", S_IRUGO|S_IWUGO, pid_attr),
1823 REG("keycreate", S_IRUGO|S_IWUGO, pid_attr),
1824 REG("sockcreate", S_IRUGO|S_IWUGO, pid_attr),
1825 };
1826
1827 static int proc_attr_dir_readdir(struct file * filp,
1828 void * dirent, filldir_t filldir)
1829 {
1830 return proc_pident_readdir(filp,dirent,filldir,
1831 attr_dir_stuff,ARRAY_SIZE(attr_dir_stuff));
1832 }
1833
1834 static const struct file_operations proc_attr_dir_operations = {
1835 .read = generic_read_dir,
1836 .readdir = proc_attr_dir_readdir,
1837 };
1838
1839 static struct dentry *proc_attr_dir_lookup(struct inode *dir,
1840 struct dentry *dentry, struct nameidata *nd)
1841 {
1842 return proc_pident_lookup(dir, dentry,
1843 attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff));
1844 }
1845
1846 static const struct inode_operations proc_attr_dir_inode_operations = {
1847 .lookup = proc_attr_dir_lookup,
1848 .getattr = pid_getattr,
1849 .setattr = proc_setattr,
1850 };
1851
1852 #endif
1853
1854 /*
1855 * /proc/self:
1856 */
1857 static int proc_self_readlink(struct dentry *dentry, char __user *buffer,
1858 int buflen)
1859 {
1860 char tmp[PROC_NUMBUF];
1861 sprintf(tmp, "%d", current->tgid);
1862 return vfs_readlink(dentry,buffer,buflen,tmp);
1863 }
1864
1865 static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
1866 {
1867 char tmp[PROC_NUMBUF];
1868 sprintf(tmp, "%d", current->tgid);
1869 return ERR_PTR(vfs_follow_link(nd,tmp));
1870 }
1871
1872 static const struct inode_operations proc_self_inode_operations = {
1873 .readlink = proc_self_readlink,
1874 .follow_link = proc_self_follow_link,
1875 };
1876
1877 /*
1878 * proc base
1879 *
1880 * These are the directory entries in the root directory of /proc
1881 * that properly belong to the /proc filesystem, as they describe
1882 * describe something that is process related.
1883 */
1884 static const struct pid_entry proc_base_stuff[] = {
1885 NOD("self", S_IFLNK|S_IRWXUGO,
1886 &proc_self_inode_operations, NULL, {}),
1887 };
1888
1889 /*
1890 * Exceptional case: normally we are not allowed to unhash a busy
1891 * directory. In this case, however, we can do it - no aliasing problems
1892 * due to the way we treat inodes.
1893 */
1894 static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd)
1895 {
1896 struct inode *inode = dentry->d_inode;
1897 struct task_struct *task = get_proc_task(inode);
1898 if (task) {
1899 put_task_struct(task);
1900 return 1;
1901 }
1902 d_drop(dentry);
1903 return 0;
1904 }
1905
1906 static struct dentry_operations proc_base_dentry_operations =
1907 {
1908 .d_revalidate = proc_base_revalidate,
1909 .d_delete = pid_delete_dentry,
1910 };
1911
1912 static struct dentry *proc_base_instantiate(struct inode *dir,
1913 struct dentry *dentry, struct task_struct *task, const void *ptr)
1914 {
1915 const struct pid_entry *p = ptr;
1916 struct inode *inode;
1917 struct proc_inode *ei;
1918 struct dentry *error = ERR_PTR(-EINVAL);
1919
1920 /* Allocate the inode */
1921 error = ERR_PTR(-ENOMEM);
1922 inode = new_inode(dir->i_sb);
1923 if (!inode)
1924 goto out;
1925
1926 /* Initialize the inode */
1927 ei = PROC_I(inode);
1928 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1929
1930 /*
1931 * grab the reference to the task.
1932 */
1933 ei->pid = get_task_pid(task, PIDTYPE_PID);
1934 if (!ei->pid)
1935 goto out_iput;
1936
1937 inode->i_uid = 0;
1938 inode->i_gid = 0;
1939 inode->i_mode = p->mode;
1940 if (S_ISDIR(inode->i_mode))
1941 inode->i_nlink = 2;
1942 if (S_ISLNK(inode->i_mode))
1943 inode->i_size = 64;
1944 if (p->iop)
1945 inode->i_op = p->iop;
1946 if (p->fop)
1947 inode->i_fop = p->fop;
1948 ei->op = p->op;
1949 dentry->d_op = &proc_base_dentry_operations;
1950 d_add(dentry, inode);
1951 error = NULL;
1952 out:
1953 return error;
1954 out_iput:
1955 iput(inode);
1956 goto out;
1957 }
1958
1959 static struct dentry *proc_base_lookup(struct inode *dir, struct dentry *dentry)
1960 {
1961 struct dentry *error;
1962 struct task_struct *task = get_proc_task(dir);
1963 const struct pid_entry *p, *last;
1964
1965 error = ERR_PTR(-ENOENT);
1966
1967 if (!task)
1968 goto out_no_task;
1969
1970 /* Lookup the directory entry */
1971 last = &proc_base_stuff[ARRAY_SIZE(proc_base_stuff) - 1];
1972 for (p = proc_base_stuff; p <= last; p++) {
1973 if (p->len != dentry->d_name.len)
1974 continue;
1975 if (!memcmp(dentry->d_name.name, p->name, p->len))
1976 break;
1977 }
1978 if (p > last)
1979 goto out;
1980
1981 error = proc_base_instantiate(dir, dentry, task, p);
1982
1983 out:
1984 put_task_struct(task);
1985 out_no_task:
1986 return error;
1987 }
1988
1989 static int proc_base_fill_cache(struct file *filp, void *dirent,
1990 filldir_t filldir, struct task_struct *task, const struct pid_entry *p)
1991 {
1992 return proc_fill_cache(filp, dirent, filldir, p->name, p->len,
1993 proc_base_instantiate, task, p);
1994 }
1995
1996 #ifdef CONFIG_TASK_IO_ACCOUNTING
1997 static int proc_pid_io_accounting(struct task_struct *task, char *buffer)
1998 {
1999 return sprintf(buffer,
2000 #ifdef CONFIG_TASK_XACCT
2001 "rchar: %llu\n"
2002 "wchar: %llu\n"
2003 "syscr: %llu\n"
2004 "syscw: %llu\n"
2005 #endif
2006 "read_bytes: %llu\n"
2007 "write_bytes: %llu\n"
2008 "cancelled_write_bytes: %llu\n",
2009 #ifdef CONFIG_TASK_XACCT
2010 (unsigned long long)task->rchar,
2011 (unsigned long long)task->wchar,
2012 (unsigned long long)task->syscr,
2013 (unsigned long long)task->syscw,
2014 #endif
2015 (unsigned long long)task->ioac.read_bytes,
2016 (unsigned long long)task->ioac.write_bytes,
2017 (unsigned long long)task->ioac.cancelled_write_bytes);
2018 }
2019 #endif
2020
2021 /*
2022 * Thread groups
2023 */
2024 static const struct file_operations proc_task_operations;
2025 static const struct inode_operations proc_task_inode_operations;
2026
2027 static const struct pid_entry tgid_base_stuff[] = {
2028 DIR("task", S_IRUGO|S_IXUGO, task),
2029 DIR("fd", S_IRUSR|S_IXUSR, fd),
2030 DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo),
2031 INF("environ", S_IRUSR, pid_environ),
2032 INF("auxv", S_IRUSR, pid_auxv),
2033 INF("status", S_IRUGO, pid_status),
2034 #ifdef CONFIG_SCHED_DEBUG
2035 REG("sched", S_IRUGO|S_IWUSR, pid_sched),
2036 #endif
2037 INF("cmdline", S_IRUGO, pid_cmdline),
2038 INF("stat", S_IRUGO, tgid_stat),
2039 INF("statm", S_IRUGO, pid_statm),
2040 REG("maps", S_IRUGO, maps),
2041 #ifdef CONFIG_NUMA
2042 REG("numa_maps", S_IRUGO, numa_maps),
2043 #endif
2044 REG("mem", S_IRUSR|S_IWUSR, mem),
2045 #ifdef CONFIG_SECCOMP
2046 REG("seccomp", S_IRUSR|S_IWUSR, seccomp),
2047 #endif
2048 LNK("cwd", cwd),
2049 LNK("root", root),
2050 LNK("exe", exe),
2051 REG("mounts", S_IRUGO, mounts),
2052 REG("mountstats", S_IRUSR, mountstats),
2053 #ifdef CONFIG_MMU
2054 REG("clear_refs", S_IWUSR, clear_refs),
2055 REG("smaps", S_IRUGO, smaps),
2056 #endif
2057 #ifdef CONFIG_SECURITY
2058 DIR("attr", S_IRUGO|S_IXUGO, attr_dir),
2059 #endif
2060 #ifdef CONFIG_KALLSYMS
2061 INF("wchan", S_IRUGO, pid_wchan),
2062 #endif
2063 #ifdef CONFIG_SCHEDSTATS
2064 INF("schedstat", S_IRUGO, pid_schedstat),
2065 #endif
2066 #ifdef CONFIG_CPUSETS
2067 REG("cpuset", S_IRUGO, cpuset),
2068 #endif
2069 INF("oom_score", S_IRUGO, oom_score),
2070 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust),
2071 #ifdef CONFIG_AUDITSYSCALL
2072 REG("loginuid", S_IWUSR|S_IRUGO, loginuid),
2073 #endif
2074 #ifdef CONFIG_FAULT_INJECTION
2075 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject),
2076 #endif
2077 #ifdef CONFIG_TASK_IO_ACCOUNTING
2078 INF("io", S_IRUGO, pid_io_accounting),
2079 #endif
2080 };
2081
2082 static int proc_tgid_base_readdir(struct file * filp,
2083 void * dirent, filldir_t filldir)
2084 {
2085 return proc_pident_readdir(filp,dirent,filldir,
2086 tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff));
2087 }
2088
2089 static const struct file_operations proc_tgid_base_operations = {
2090 .read = generic_read_dir,
2091 .readdir = proc_tgid_base_readdir,
2092 };
2093
2094 static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
2095 return proc_pident_lookup(dir, dentry,
2096 tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
2097 }
2098
2099 static const struct inode_operations proc_tgid_base_inode_operations = {
2100 .lookup = proc_tgid_base_lookup,
2101 .getattr = pid_getattr,
2102 .setattr = proc_setattr,
2103 };
2104
2105 /**
2106 * proc_flush_task - Remove dcache entries for @task from the /proc dcache.
2107 *
2108 * @task: task that should be flushed.
2109 *
2110 * Looks in the dcache for
2111 * /proc/@pid
2112 * /proc/@tgid/task/@pid
2113 * if either directory is present flushes it and all of it'ts children
2114 * from the dcache.
2115 *
2116 * It is safe and reasonable to cache /proc entries for a task until
2117 * that task exits. After that they just clog up the dcache with
2118 * useless entries, possibly causing useful dcache entries to be
2119 * flushed instead. This routine is proved to flush those useless
2120 * dcache entries at process exit time.
2121 *
2122 * NOTE: This routine is just an optimization so it does not guarantee
2123 * that no dcache entries will exist at process exit time it
2124 * just makes it very unlikely that any will persist.
2125 */
2126 void proc_flush_task(struct task_struct *task)
2127 {
2128 struct dentry *dentry, *leader, *dir;
2129 char buf[PROC_NUMBUF];
2130 struct qstr name;
2131
2132 name.name = buf;
2133 name.len = snprintf(buf, sizeof(buf), "%d", task->pid);
2134 dentry = d_hash_and_lookup(proc_mnt->mnt_root, &name);
2135 if (dentry) {
2136 shrink_dcache_parent(dentry);
2137 d_drop(dentry);
2138 dput(dentry);
2139 }
2140
2141 if (thread_group_leader(task))
2142 goto out;
2143
2144 name.name = buf;
2145 name.len = snprintf(buf, sizeof(buf), "%d", task->tgid);
2146 leader = d_hash_and_lookup(proc_mnt->mnt_root, &name);
2147 if (!leader)
2148 goto out;
2149
2150 name.name = "task";
2151 name.len = strlen(name.name);
2152 dir = d_hash_and_lookup(leader, &name);
2153 if (!dir)
2154 goto out_put_leader;
2155
2156 name.name = buf;
2157 name.len = snprintf(buf, sizeof(buf), "%d", task->pid);
2158 dentry = d_hash_and_lookup(dir, &name);
2159 if (dentry) {
2160 shrink_dcache_parent(dentry);
2161 d_drop(dentry);
2162 dput(dentry);
2163 }
2164
2165 dput(dir);
2166 out_put_leader:
2167 dput(leader);
2168 out:
2169 return;
2170 }
2171
2172 static struct dentry *proc_pid_instantiate(struct inode *dir,
2173 struct dentry * dentry,
2174 struct task_struct *task, const void *ptr)
2175 {
2176 struct dentry *error = ERR_PTR(-ENOENT);
2177 struct inode *inode;
2178
2179 inode = proc_pid_make_inode(dir->i_sb, task);
2180 if (!inode)
2181 goto out;
2182
2183 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
2184 inode->i_op = &proc_tgid_base_inode_operations;
2185 inode->i_fop = &proc_tgid_base_operations;
2186 inode->i_flags|=S_IMMUTABLE;
2187 inode->i_nlink = 5;
2188 #ifdef CONFIG_SECURITY
2189 inode->i_nlink += 1;
2190 #endif
2191
2192 dentry->d_op = &pid_dentry_operations;
2193
2194 d_add(dentry, inode);
2195 /* Close the race of the process dying before we return the dentry */
2196 if (pid_revalidate(dentry, NULL))
2197 error = NULL;
2198 out:
2199 return error;
2200 }
2201
2202 struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
2203 {
2204 struct dentry *result = ERR_PTR(-ENOENT);
2205 struct task_struct *task;
2206 unsigned tgid;
2207
2208 result = proc_base_lookup(dir, dentry);
2209 if (!IS_ERR(result) || PTR_ERR(result) != -ENOENT)
2210 goto out;
2211
2212 tgid = name_to_int(dentry);
2213 if (tgid == ~0U)
2214 goto out;
2215
2216 rcu_read_lock();
2217 task = find_task_by_pid(tgid);
2218 if (task)
2219 get_task_struct(task);
2220 rcu_read_unlock();
2221 if (!task)
2222 goto out;
2223
2224 result = proc_pid_instantiate(dir, dentry, task, NULL);
2225 put_task_struct(task);
2226 out:
2227 return result;
2228 }
2229
2230 /*
2231 * Find the first task with tgid >= tgid
2232 *
2233 */
2234 static struct task_struct *next_tgid(unsigned int tgid)
2235 {
2236 struct task_struct *task;
2237 struct pid *pid;
2238
2239 rcu_read_lock();
2240 retry:
2241 task = NULL;
2242 pid = find_ge_pid(tgid);
2243 if (pid) {
2244 tgid = pid->nr + 1;
2245 task = pid_task(pid, PIDTYPE_PID);
2246 /* What we to know is if the pid we have find is the
2247 * pid of a thread_group_leader. Testing for task
2248 * being a thread_group_leader is the obvious thing
2249 * todo but there is a window when it fails, due to
2250 * the pid transfer logic in de_thread.
2251 *
2252 * So we perform the straight forward test of seeing
2253 * if the pid we have found is the pid of a thread
2254 * group leader, and don't worry if the task we have
2255 * found doesn't happen to be a thread group leader.
2256 * As we don't care in the case of readdir.
2257 */
2258 if (!task || !has_group_leader_pid(task))
2259 goto retry;
2260 get_task_struct(task);
2261 }
2262 rcu_read_unlock();
2263 return task;
2264 }
2265
2266 #define TGID_OFFSET (FIRST_PROCESS_ENTRY + ARRAY_SIZE(proc_base_stuff))
2267
2268 static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
2269 struct task_struct *task, int tgid)
2270 {
2271 char name[PROC_NUMBUF];
2272 int len = snprintf(name, sizeof(name), "%d", tgid);
2273 return proc_fill_cache(filp, dirent, filldir, name, len,
2274 proc_pid_instantiate, task, NULL);
2275 }
2276
2277 /* for the /proc/ directory itself, after non-process stuff has been done */
2278 int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
2279 {
2280 unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
2281 struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
2282 struct task_struct *task;
2283 int tgid;
2284
2285 if (!reaper)
2286 goto out_no_task;
2287
2288 for (; nr < ARRAY_SIZE(proc_base_stuff); filp->f_pos++, nr++) {
2289 const struct pid_entry *p = &proc_base_stuff[nr];
2290 if (proc_base_fill_cache(filp, dirent, filldir, reaper, p) < 0)
2291 goto out;
2292 }
2293
2294 tgid = filp->f_pos - TGID_OFFSET;
2295 for (task = next_tgid(tgid);
2296 task;
2297 put_task_struct(task), task = next_tgid(tgid + 1)) {
2298 tgid = task->pid;
2299 filp->f_pos = tgid + TGID_OFFSET;
2300 if (proc_pid_fill_cache(filp, dirent, filldir, task, tgid) < 0) {
2301 put_task_struct(task);
2302 goto out;
2303 }
2304 }
2305 filp->f_pos = PID_MAX_LIMIT + TGID_OFFSET;
2306 out:
2307 put_task_struct(reaper);
2308 out_no_task:
2309 return 0;
2310 }
2311
2312 /*
2313 * Tasks
2314 */
2315 static const struct pid_entry tid_base_stuff[] = {
2316 DIR("fd", S_IRUSR|S_IXUSR, fd),
2317 DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo),
2318 INF("environ", S_IRUSR, pid_environ),
2319 INF("auxv", S_IRUSR, pid_auxv),
2320 INF("status", S_IRUGO, pid_status),
2321 #ifdef CONFIG_SCHED_DEBUG
2322 REG("sched", S_IRUGO|S_IWUSR, pid_sched),
2323 #endif
2324 INF("cmdline", S_IRUGO, pid_cmdline),
2325 INF("stat", S_IRUGO, tid_stat),
2326 INF("statm", S_IRUGO, pid_statm),
2327 REG("maps", S_IRUGO, maps),
2328 #ifdef CONFIG_NUMA
2329 REG("numa_maps", S_IRUGO, numa_maps),
2330 #endif
2331 REG("mem", S_IRUSR|S_IWUSR, mem),
2332 #ifdef CONFIG_SECCOMP
2333 REG("seccomp", S_IRUSR|S_IWUSR, seccomp),
2334 #endif
2335 LNK("cwd", cwd),
2336 LNK("root", root),
2337 LNK("exe", exe),
2338 REG("mounts", S_IRUGO, mounts),
2339 #ifdef CONFIG_MMU
2340 REG("clear_refs", S_IWUSR, clear_refs),
2341 REG("smaps", S_IRUGO, smaps),
2342 #endif
2343 #ifdef CONFIG_SECURITY
2344 DIR("attr", S_IRUGO|S_IXUGO, attr_dir),
2345 #endif
2346 #ifdef CONFIG_KALLSYMS
2347 INF("wchan", S_IRUGO, pid_wchan),
2348 #endif
2349 #ifdef CONFIG_SCHEDSTATS
2350 INF("schedstat", S_IRUGO, pid_schedstat),
2351 #endif
2352 #ifdef CONFIG_CPUSETS
2353 REG("cpuset", S_IRUGO, cpuset),
2354 #endif
2355 INF("oom_score", S_IRUGO, oom_score),
2356 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust),
2357 #ifdef CONFIG_AUDITSYSCALL
2358 REG("loginuid", S_IWUSR|S_IRUGO, loginuid),
2359 #endif
2360 #ifdef CONFIG_FAULT_INJECTION
2361 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject),
2362 #endif
2363 };
2364
2365 static int proc_tid_base_readdir(struct file * filp,
2366 void * dirent, filldir_t filldir)
2367 {
2368 return proc_pident_readdir(filp,dirent,filldir,
2369 tid_base_stuff,ARRAY_SIZE(tid_base_stuff));
2370 }
2371
2372 static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
2373 return proc_pident_lookup(dir, dentry,
2374 tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
2375 }
2376
2377 static const struct file_operations proc_tid_base_operations = {
2378 .read = generic_read_dir,
2379 .readdir = proc_tid_base_readdir,
2380 };
2381
2382 static const struct inode_operations proc_tid_base_inode_operations = {
2383 .lookup = proc_tid_base_lookup,
2384 .getattr = pid_getattr,
2385 .setattr = proc_setattr,
2386 };
2387
2388 static struct dentry *proc_task_instantiate(struct inode *dir,
2389 struct dentry *dentry, struct task_struct *task, const void *ptr)
2390 {
2391 struct dentry *error = ERR_PTR(-ENOENT);
2392 struct inode *inode;
2393 inode = proc_pid_make_inode(dir->i_sb, task);
2394
2395 if (!inode)
2396 goto out;
2397 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
2398 inode->i_op = &proc_tid_base_inode_operations;
2399 inode->i_fop = &proc_tid_base_operations;
2400 inode->i_flags|=S_IMMUTABLE;
2401 inode->i_nlink = 4;
2402 #ifdef CONFIG_SECURITY
2403 inode->i_nlink += 1;
2404 #endif
2405
2406 dentry->d_op = &pid_dentry_operations;
2407
2408 d_add(dentry, inode);
2409 /* Close the race of the process dying before we return the dentry */
2410 if (pid_revalidate(dentry, NULL))
2411 error = NULL;
2412 out:
2413 return error;
2414 }
2415
2416 static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
2417 {
2418 struct dentry *result = ERR_PTR(-ENOENT);
2419 struct task_struct *task;
2420 struct task_struct *leader = get_proc_task(dir);
2421 unsigned tid;
2422
2423 if (!leader)
2424 goto out_no_task;
2425
2426 tid = name_to_int(dentry);
2427 if (tid == ~0U)
2428 goto out;
2429
2430 rcu_read_lock();
2431 task = find_task_by_pid(tid);
2432 if (task)
2433 get_task_struct(task);
2434 rcu_read_unlock();
2435 if (!task)
2436 goto out;
2437 if (leader->tgid != task->tgid)
2438 goto out_drop_task;
2439
2440 result = proc_task_instantiate(dir, dentry, task, NULL);
2441 out_drop_task:
2442 put_task_struct(task);
2443 out:
2444 put_task_struct(leader);
2445 out_no_task:
2446 return result;
2447 }
2448
2449 /*
2450 * Find the first tid of a thread group to return to user space.
2451 *
2452 * Usually this is just the thread group leader, but if the users
2453 * buffer was too small or there was a seek into the middle of the
2454 * directory we have more work todo.
2455 *
2456 * In the case of a short read we start with find_task_by_pid.
2457 *
2458 * In the case of a seek we start with the leader and walk nr
2459 * threads past it.
2460 */
2461 static struct task_struct *first_tid(struct task_struct *leader,
2462 int tid, int nr)
2463 {
2464 struct task_struct *pos;
2465
2466 rcu_read_lock();
2467 /* Attempt to start with the pid of a thread */
2468 if (tid && (nr > 0)) {
2469 pos = find_task_by_pid(tid);
2470 if (pos && (pos->group_leader == leader))
2471 goto found;
2472 }
2473
2474 /* If nr exceeds the number of threads there is nothing todo */
2475 pos = NULL;
2476 if (nr && nr >= get_nr_threads(leader))
2477 goto out;
2478
2479 /* If we haven't found our starting place yet start
2480 * with the leader and walk nr threads forward.
2481 */
2482 for (pos = leader; nr > 0; --nr) {
2483 pos = next_thread(pos);
2484 if (pos == leader) {
2485 pos = NULL;
2486 goto out;
2487 }
2488 }
2489 found:
2490 get_task_struct(pos);
2491 out:
2492 rcu_read_unlock();
2493 return pos;
2494 }
2495
2496 /*
2497 * Find the next thread in the thread list.
2498 * Return NULL if there is an error or no next thread.
2499 *
2500 * The reference to the input task_struct is released.
2501 */
2502 static struct task_struct *next_tid(struct task_struct *start)
2503 {
2504 struct task_struct *pos = NULL;
2505 rcu_read_lock();
2506 if (pid_alive(start)) {
2507 pos = next_thread(start);
2508 if (thread_group_leader(pos))
2509 pos = NULL;
2510 else
2511 get_task_struct(pos);
2512 }
2513 rcu_read_unlock();
2514 put_task_struct(start);
2515 return pos;
2516 }
2517
2518 static int proc_task_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
2519 struct task_struct *task, int tid)
2520 {
2521 char name[PROC_NUMBUF];
2522 int len = snprintf(name, sizeof(name), "%d", tid);
2523 return proc_fill_cache(filp, dirent, filldir, name, len,
2524 proc_task_instantiate, task, NULL);
2525 }
2526
2527 /* for the /proc/TGID/task/ directories */
2528 static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir)
2529 {
2530 struct dentry *dentry = filp->f_path.dentry;
2531 struct inode *inode = dentry->d_inode;
2532 struct task_struct *leader = NULL;
2533 struct task_struct *task;
2534 int retval = -ENOENT;
2535 ino_t ino;
2536 int tid;
2537 unsigned long pos = filp->f_pos; /* avoiding "long long" filp->f_pos */
2538
2539 task = get_proc_task(inode);
2540 if (!task)
2541 goto out_no_task;
2542 rcu_read_lock();
2543 if (pid_alive(task)) {
2544 leader = task->group_leader;
2545 get_task_struct(leader);
2546 }
2547 rcu_read_unlock();
2548 put_task_struct(task);
2549 if (!leader)
2550 goto out_no_task;
2551 retval = 0;
2552
2553 switch (pos) {
2554 case 0:
2555 ino = inode->i_ino;
2556 if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
2557 goto out;
2558 pos++;
2559 /* fall through */
2560 case 1:
2561 ino = parent_ino(dentry);
2562 if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
2563 goto out;
2564 pos++;
2565 /* fall through */
2566 }
2567
2568 /* f_version caches the tgid value that the last readdir call couldn't
2569 * return. lseek aka telldir automagically resets f_version to 0.
2570 */
2571 tid = filp->f_version;
2572 filp->f_version = 0;
2573 for (task = first_tid(leader, tid, pos - 2);
2574 task;
2575 task = next_tid(task), pos++) {
2576 tid = task->pid;
2577 if (proc_task_fill_cache(filp, dirent, filldir, task, tid) < 0) {
2578 /* returning this tgid failed, save it as the first
2579 * pid for the next readir call */
2580 filp->f_version = tid;
2581 put_task_struct(task);
2582 break;
2583 }
2584 }
2585 out:
2586 filp->f_pos = pos;
2587 put_task_struct(leader);
2588 out_no_task:
2589 return retval;
2590 }
2591
2592 static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
2593 {
2594 struct inode *inode = dentry->d_inode;
2595 struct task_struct *p = get_proc_task(inode);
2596 generic_fillattr(inode, stat);
2597
2598 if (p) {
2599 rcu_read_lock();
2600 stat->nlink += get_nr_threads(p);
2601 rcu_read_unlock();
2602 put_task_struct(p);
2603 }
2604
2605 return 0;
2606 }
2607
2608 static const struct inode_operations proc_task_inode_operations = {
2609 .lookup = proc_task_lookup,
2610 .getattr = proc_task_getattr,
2611 .setattr = proc_setattr,
2612 };
2613
2614 static const struct file_operations proc_task_operations = {
2615 .read = generic_read_dir,
2616 .readdir = proc_task_readdir,
2617 };
This page took 0.133198 seconds and 5 git commands to generate.