[PATCH] sysfs: change uses of f_{dentry, vfsmnt} to use f_path
[deliverable/linux.git] / fs / proc / task_mmu.c
CommitLineData
1da177e4
LT
1#include <linux/mm.h>
2#include <linux/hugetlb.h>
3#include <linux/mount.h>
4#include <linux/seq_file.h>
e070ad49 5#include <linux/highmem.h>
6e21c8f1
CL
6#include <linux/pagemap.h>
7#include <linux/mempolicy.h>
e070ad49 8
1da177e4
LT
9#include <asm/elf.h>
10#include <asm/uaccess.h>
e070ad49 11#include <asm/tlbflush.h>
1da177e4
LT
12#include "internal.h"
13
14char *task_mem(struct mm_struct *mm, char *buffer)
15{
16 unsigned long data, text, lib;
365e9c87
HD
17 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
18
19 /*
20 * Note: to minimize their overhead, mm maintains hiwater_vm and
21 * hiwater_rss only when about to *lower* total_vm or rss. Any
22 * collector of these hiwater stats must therefore get total_vm
23 * and rss too, which will usually be the higher. Barriers? not
24 * worth the effort, such snapshots can always be inconsistent.
25 */
26 hiwater_vm = total_vm = mm->total_vm;
27 if (hiwater_vm < mm->hiwater_vm)
28 hiwater_vm = mm->hiwater_vm;
29 hiwater_rss = total_rss = get_mm_rss(mm);
30 if (hiwater_rss < mm->hiwater_rss)
31 hiwater_rss = mm->hiwater_rss;
1da177e4
LT
32
33 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
34 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
35 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
36 buffer += sprintf(buffer,
365e9c87 37 "VmPeak:\t%8lu kB\n"
1da177e4
LT
38 "VmSize:\t%8lu kB\n"
39 "VmLck:\t%8lu kB\n"
365e9c87 40 "VmHWM:\t%8lu kB\n"
1da177e4
LT
41 "VmRSS:\t%8lu kB\n"
42 "VmData:\t%8lu kB\n"
43 "VmStk:\t%8lu kB\n"
44 "VmExe:\t%8lu kB\n"
45 "VmLib:\t%8lu kB\n"
46 "VmPTE:\t%8lu kB\n",
365e9c87
HD
47 hiwater_vm << (PAGE_SHIFT-10),
48 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
1da177e4 49 mm->locked_vm << (PAGE_SHIFT-10),
365e9c87
HD
50 hiwater_rss << (PAGE_SHIFT-10),
51 total_rss << (PAGE_SHIFT-10),
1da177e4
LT
52 data << (PAGE_SHIFT-10),
53 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
54 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
55 return buffer;
56}
57
58unsigned long task_vsize(struct mm_struct *mm)
59{
60 return PAGE_SIZE * mm->total_vm;
61}
62
63int task_statm(struct mm_struct *mm, int *shared, int *text,
64 int *data, int *resident)
65{
4294621f 66 *shared = get_mm_counter(mm, file_rss);
1da177e4
LT
67 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
68 >> PAGE_SHIFT;
69 *data = mm->total_vm - mm->shared_vm;
4294621f 70 *resident = *shared + get_mm_counter(mm, anon_rss);
1da177e4
LT
71 return mm->total_vm;
72}
73
74int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
75{
76 struct vm_area_struct * vma;
77 int result = -ENOENT;
99f89551
EB
78 struct task_struct *task = get_proc_task(inode);
79 struct mm_struct * mm = NULL;
1da177e4 80
99f89551
EB
81 if (task) {
82 mm = get_task_mm(task);
83 put_task_struct(task);
84 }
1da177e4
LT
85 if (!mm)
86 goto out;
87 down_read(&mm->mmap_sem);
88
89 vma = mm->mmap;
90 while (vma) {
91 if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
92 break;
93 vma = vma->vm_next;
94 }
95
96 if (vma) {
97 *mnt = mntget(vma->vm_file->f_vfsmnt);
98 *dentry = dget(vma->vm_file->f_dentry);
99 result = 0;
100 }
101
102 up_read(&mm->mmap_sem);
103 mmput(mm);
104out:
105 return result;
106}
107
108static void pad_len_spaces(struct seq_file *m, int len)
109{
110 len = 25 + sizeof(void*) * 6 - len;
111 if (len < 1)
112 len = 1;
113 seq_printf(m, "%*c", len, ' ');
114}
115
e070ad49
ML
116struct mem_size_stats
117{
118 unsigned long resident;
119 unsigned long shared_clean;
120 unsigned long shared_dirty;
121 unsigned long private_clean;
122 unsigned long private_dirty;
123};
124
125static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
1da177e4 126{
99f89551
EB
127 struct proc_maps_private *priv = m->private;
128 struct task_struct *task = priv->task;
e070ad49
ML
129 struct vm_area_struct *vma = v;
130 struct mm_struct *mm = vma->vm_mm;
131 struct file *file = vma->vm_file;
132 int flags = vma->vm_flags;
1da177e4
LT
133 unsigned long ino = 0;
134 dev_t dev = 0;
135 int len;
136
137 if (file) {
e070ad49 138 struct inode *inode = vma->vm_file->f_dentry->d_inode;
1da177e4
LT
139 dev = inode->i_sb->s_dev;
140 ino = inode->i_ino;
141 }
142
143 seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
e070ad49
ML
144 vma->vm_start,
145 vma->vm_end,
1da177e4
LT
146 flags & VM_READ ? 'r' : '-',
147 flags & VM_WRITE ? 'w' : '-',
148 flags & VM_EXEC ? 'x' : '-',
149 flags & VM_MAYSHARE ? 's' : 'p',
e070ad49 150 vma->vm_pgoff << PAGE_SHIFT,
1da177e4
LT
151 MAJOR(dev), MINOR(dev), ino, &len);
152
153 /*
154 * Print the dentry name for named mappings, and a
155 * special [heap] marker for the heap:
156 */
e070ad49 157 if (file) {
1da177e4 158 pad_len_spaces(m, len);
e070ad49 159 seq_path(m, file->f_vfsmnt, file->f_dentry, "\n");
1da177e4 160 } else {
e6e5494c
IM
161 const char *name = arch_vma_name(vma);
162 if (!name) {
163 if (mm) {
164 if (vma->vm_start <= mm->start_brk &&
e070ad49 165 vma->vm_end >= mm->brk) {
e6e5494c
IM
166 name = "[heap]";
167 } else if (vma->vm_start <= mm->start_stack &&
168 vma->vm_end >= mm->start_stack) {
169 name = "[stack]";
1da177e4 170 }
e6e5494c
IM
171 } else {
172 name = "[vdso]";
1da177e4 173 }
e6e5494c
IM
174 }
175 if (name) {
1da177e4 176 pad_len_spaces(m, len);
e6e5494c 177 seq_puts(m, name);
1da177e4
LT
178 }
179 }
180 seq_putc(m, '\n');
e070ad49
ML
181
182 if (mss)
183 seq_printf(m,
184 "Size: %8lu kB\n"
185 "Rss: %8lu kB\n"
186 "Shared_Clean: %8lu kB\n"
187 "Shared_Dirty: %8lu kB\n"
188 "Private_Clean: %8lu kB\n"
189 "Private_Dirty: %8lu kB\n",
190 (vma->vm_end - vma->vm_start) >> 10,
191 mss->resident >> 10,
192 mss->shared_clean >> 10,
193 mss->shared_dirty >> 10,
194 mss->private_clean >> 10,
195 mss->private_dirty >> 10);
196
197 if (m->count < m->size) /* vma is copied successfully */
198 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
1da177e4
LT
199 return 0;
200}
201
e070ad49
ML
202static int show_map(struct seq_file *m, void *v)
203{
0f5c79f2 204 return show_map_internal(m, v, NULL);
e070ad49
ML
205}
206
207static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
208 unsigned long addr, unsigned long end,
209 struct mem_size_stats *mss)
210{
211 pte_t *pte, ptent;
705e87c0 212 spinlock_t *ptl;
e070ad49
ML
213 struct page *page;
214
705e87c0 215 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
e070ad49
ML
216 do {
217 ptent = *pte;
705e87c0 218 if (!pte_present(ptent))
e070ad49
ML
219 continue;
220
221 mss->resident += PAGE_SIZE;
ad820c5d
NP
222
223 page = vm_normal_page(vma, addr, ptent);
224 if (!page)
e070ad49
ML
225 continue;
226
ad820c5d 227 if (page_mapcount(page) >= 2) {
e070ad49
ML
228 if (pte_dirty(ptent))
229 mss->shared_dirty += PAGE_SIZE;
230 else
231 mss->shared_clean += PAGE_SIZE;
232 } else {
233 if (pte_dirty(ptent))
234 mss->private_dirty += PAGE_SIZE;
235 else
236 mss->private_clean += PAGE_SIZE;
237 }
238 } while (pte++, addr += PAGE_SIZE, addr != end);
705e87c0
HD
239 pte_unmap_unlock(pte - 1, ptl);
240 cond_resched();
e070ad49
ML
241}
242
243static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud,
244 unsigned long addr, unsigned long end,
245 struct mem_size_stats *mss)
246{
247 pmd_t *pmd;
248 unsigned long next;
249
250 pmd = pmd_offset(pud, addr);
251 do {
252 next = pmd_addr_end(addr, end);
253 if (pmd_none_or_clear_bad(pmd))
254 continue;
255 smaps_pte_range(vma, pmd, addr, next, mss);
256 } while (pmd++, addr = next, addr != end);
257}
258
259static inline void smaps_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
260 unsigned long addr, unsigned long end,
261 struct mem_size_stats *mss)
262{
263 pud_t *pud;
264 unsigned long next;
265
266 pud = pud_offset(pgd, addr);
267 do {
268 next = pud_addr_end(addr, end);
269 if (pud_none_or_clear_bad(pud))
270 continue;
271 smaps_pmd_range(vma, pud, addr, next, mss);
272 } while (pud++, addr = next, addr != end);
273}
274
275static inline void smaps_pgd_range(struct vm_area_struct *vma,
276 unsigned long addr, unsigned long end,
277 struct mem_size_stats *mss)
278{
279 pgd_t *pgd;
280 unsigned long next;
281
282 pgd = pgd_offset(vma->vm_mm, addr);
283 do {
284 next = pgd_addr_end(addr, end);
285 if (pgd_none_or_clear_bad(pgd))
286 continue;
287 smaps_pud_range(vma, pgd, addr, next, mss);
288 } while (pgd++, addr = next, addr != end);
289}
290
291static int show_smap(struct seq_file *m, void *v)
292{
293 struct vm_area_struct *vma = v;
e070ad49
ML
294 struct mem_size_stats mss;
295
296 memset(&mss, 0, sizeof mss);
5ddfae16 297 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
e070ad49 298 smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
e070ad49
ML
299 return show_map_internal(m, v, &mss);
300}
301
1da177e4
LT
302static void *m_start(struct seq_file *m, loff_t *pos)
303{
99f89551 304 struct proc_maps_private *priv = m->private;
1da177e4
LT
305 unsigned long last_addr = m->version;
306 struct mm_struct *mm;
99f89551 307 struct vm_area_struct *vma, *tail_vma = NULL;
1da177e4
LT
308 loff_t l = *pos;
309
99f89551
EB
310 /* Clear the per syscall fields in priv */
311 priv->task = NULL;
312 priv->tail_vma = NULL;
313
1da177e4
LT
314 /*
315 * We remember last_addr rather than next_addr to hit with
316 * mmap_cache most of the time. We have zero last_addr at
e070ad49
ML
317 * the beginning and also after lseek. We will have -1 last_addr
318 * after the end of the vmas.
1da177e4
LT
319 */
320
321 if (last_addr == -1UL)
322 return NULL;
323
13b41b09 324 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
99f89551
EB
325 if (!priv->task)
326 return NULL;
327
328 mm = get_task_mm(priv->task);
1da177e4
LT
329 if (!mm)
330 return NULL;
331
99f89551 332 priv->tail_vma = tail_vma = get_gate_vma(priv->task);
1da177e4
LT
333 down_read(&mm->mmap_sem);
334
335 /* Start with last addr hint */
e070ad49
ML
336 if (last_addr && (vma = find_vma(mm, last_addr))) {
337 vma = vma->vm_next;
1da177e4
LT
338 goto out;
339 }
340
341 /*
e070ad49 342 * Check the vma index is within the range and do
1da177e4
LT
343 * sequential scan until m_index.
344 */
e070ad49 345 vma = NULL;
1da177e4 346 if ((unsigned long)l < mm->map_count) {
e070ad49
ML
347 vma = mm->mmap;
348 while (l-- && vma)
349 vma = vma->vm_next;
1da177e4
LT
350 goto out;
351 }
352
353 if (l != mm->map_count)
e070ad49 354 tail_vma = NULL; /* After gate vma */
1da177e4
LT
355
356out:
e070ad49
ML
357 if (vma)
358 return vma;
1da177e4 359
e070ad49
ML
360 /* End of vmas has been reached */
361 m->version = (tail_vma != NULL)? 0: -1UL;
1da177e4
LT
362 up_read(&mm->mmap_sem);
363 mmput(mm);
e070ad49 364 return tail_vma;
1da177e4
LT
365}
366
99f89551 367static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
1da177e4 368{
99f89551 369 if (vma && vma != priv->tail_vma) {
e070ad49 370 struct mm_struct *mm = vma->vm_mm;
1da177e4
LT
371 up_read(&mm->mmap_sem);
372 mmput(mm);
373 }
374}
375
376static void *m_next(struct seq_file *m, void *v, loff_t *pos)
377{
99f89551 378 struct proc_maps_private *priv = m->private;
e070ad49 379 struct vm_area_struct *vma = v;
99f89551 380 struct vm_area_struct *tail_vma = priv->tail_vma;
1da177e4
LT
381
382 (*pos)++;
e070ad49
ML
383 if (vma && (vma != tail_vma) && vma->vm_next)
384 return vma->vm_next;
99f89551 385 vma_stop(priv, vma);
e070ad49 386 return (vma != tail_vma)? tail_vma: NULL;
1da177e4
LT
387}
388
99f89551
EB
389static void m_stop(struct seq_file *m, void *v)
390{
391 struct proc_maps_private *priv = m->private;
392 struct vm_area_struct *vma = v;
393
394 vma_stop(priv, vma);
395 if (priv->task)
396 put_task_struct(priv->task);
397}
398
662795de 399static struct seq_operations proc_pid_maps_op = {
1da177e4
LT
400 .start = m_start,
401 .next = m_next,
402 .stop = m_stop,
403 .show = show_map
404};
6e21c8f1 405
662795de 406static struct seq_operations proc_pid_smaps_op = {
e070ad49
ML
407 .start = m_start,
408 .next = m_next,
409 .stop = m_stop,
410 .show = show_smap
411};
412
662795de
EB
413static int do_maps_open(struct inode *inode, struct file *file,
414 struct seq_operations *ops)
415{
99f89551
EB
416 struct proc_maps_private *priv;
417 int ret = -ENOMEM;
418 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
419 if (priv) {
13b41b09 420 priv->pid = proc_pid(inode);
99f89551
EB
421 ret = seq_open(file, ops);
422 if (!ret) {
423 struct seq_file *m = file->private_data;
424 m->private = priv;
425 } else {
426 kfree(priv);
427 }
662795de
EB
428 }
429 return ret;
430}
431
432static int maps_open(struct inode *inode, struct file *file)
433{
434 return do_maps_open(inode, file, &proc_pid_maps_op);
435}
436
437struct file_operations proc_maps_operations = {
438 .open = maps_open,
439 .read = seq_read,
440 .llseek = seq_lseek,
99f89551 441 .release = seq_release_private,
662795de
EB
442};
443
6e21c8f1 444#ifdef CONFIG_NUMA
1a75a6c8 445extern int show_numa_map(struct seq_file *m, void *v);
6e21c8f1 446
662795de 447static struct seq_operations proc_pid_numa_maps_op = {
1a75a6c8
CL
448 .start = m_start,
449 .next = m_next,
450 .stop = m_stop,
451 .show = show_numa_map
6e21c8f1 452};
662795de
EB
453
454static int numa_maps_open(struct inode *inode, struct file *file)
455{
456 return do_maps_open(inode, file, &proc_pid_numa_maps_op);
457}
458
459struct file_operations proc_numa_maps_operations = {
460 .open = numa_maps_open,
461 .read = seq_read,
462 .llseek = seq_lseek,
99f89551 463 .release = seq_release_private,
662795de 464};
6e21c8f1 465#endif
662795de
EB
466
467static int smaps_open(struct inode *inode, struct file *file)
468{
469 return do_maps_open(inode, file, &proc_pid_smaps_op);
470}
471
472struct file_operations proc_smaps_operations = {
473 .open = smaps_open,
474 .read = seq_read,
475 .llseek = seq_lseek,
99f89551 476 .release = seq_release_private,
662795de 477};
This page took 0.185118 seconds and 5 git commands to generate.