2 #include <linux/vmacache.h>
3 #include <linux/hugetlb.h>
4 #include <linux/huge_mm.h>
5 #include <linux/mount.h>
6 #include <linux/seq_file.h>
7 #include <linux/highmem.h>
8 #include <linux/ptrace.h>
9 #include <linux/slab.h>
10 #include <linux/pagemap.h>
11 #include <linux/mempolicy.h>
12 #include <linux/rmap.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/page_idle.h>
19 #include <asm/uaccess.h>
20 #include <asm/tlbflush.h>
23 void task_mem(struct seq_file
*m
, struct mm_struct
*mm
)
25 unsigned long data
, text
, lib
, swap
, ptes
, pmds
;
26 unsigned long hiwater_vm
, total_vm
, hiwater_rss
, total_rss
;
29 * Note: to minimize their overhead, mm maintains hiwater_vm and
30 * hiwater_rss only when about to *lower* total_vm or rss. Any
31 * collector of these hiwater stats must therefore get total_vm
32 * and rss too, which will usually be the higher. Barriers? not
33 * worth the effort, such snapshots can always be inconsistent.
35 hiwater_vm
= total_vm
= mm
->total_vm
;
36 if (hiwater_vm
< mm
->hiwater_vm
)
37 hiwater_vm
= mm
->hiwater_vm
;
38 hiwater_rss
= total_rss
= get_mm_rss(mm
);
39 if (hiwater_rss
< mm
->hiwater_rss
)
40 hiwater_rss
= mm
->hiwater_rss
;
42 data
= mm
->total_vm
- mm
->shared_vm
- mm
->stack_vm
;
43 text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
)) >> 10;
44 lib
= (mm
->exec_vm
<< (PAGE_SHIFT
-10)) - text
;
45 swap
= get_mm_counter(mm
, MM_SWAPENTS
);
46 ptes
= PTRS_PER_PTE
* sizeof(pte_t
) * atomic_long_read(&mm
->nr_ptes
);
47 pmds
= PTRS_PER_PMD
* sizeof(pmd_t
) * mm_nr_pmds(mm
);
62 hiwater_vm
<< (PAGE_SHIFT
-10),
63 total_vm
<< (PAGE_SHIFT
-10),
64 mm
->locked_vm
<< (PAGE_SHIFT
-10),
65 mm
->pinned_vm
<< (PAGE_SHIFT
-10),
66 hiwater_rss
<< (PAGE_SHIFT
-10),
67 total_rss
<< (PAGE_SHIFT
-10),
68 data
<< (PAGE_SHIFT
-10),
69 mm
->stack_vm
<< (PAGE_SHIFT
-10), text
, lib
,
72 swap
<< (PAGE_SHIFT
-10));
75 unsigned long task_vsize(struct mm_struct
*mm
)
77 return PAGE_SIZE
* mm
->total_vm
;
80 unsigned long task_statm(struct mm_struct
*mm
,
81 unsigned long *shared
, unsigned long *text
,
82 unsigned long *data
, unsigned long *resident
)
84 *shared
= get_mm_counter(mm
, MM_FILEPAGES
);
85 *text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
))
87 *data
= mm
->total_vm
- mm
->shared_vm
;
88 *resident
= *shared
+ get_mm_counter(mm
, MM_ANONPAGES
);
94 * Save get_task_policy() for show_numa_map().
96 static void hold_task_mempolicy(struct proc_maps_private
*priv
)
98 struct task_struct
*task
= priv
->task
;
101 priv
->task_mempolicy
= get_task_policy(task
);
102 mpol_get(priv
->task_mempolicy
);
105 static void release_task_mempolicy(struct proc_maps_private
*priv
)
107 mpol_put(priv
->task_mempolicy
);
110 static void hold_task_mempolicy(struct proc_maps_private
*priv
)
113 static void release_task_mempolicy(struct proc_maps_private
*priv
)
118 static void vma_stop(struct proc_maps_private
*priv
)
120 struct mm_struct
*mm
= priv
->mm
;
122 release_task_mempolicy(priv
);
123 up_read(&mm
->mmap_sem
);
127 static struct vm_area_struct
*
128 m_next_vma(struct proc_maps_private
*priv
, struct vm_area_struct
*vma
)
130 if (vma
== priv
->tail_vma
)
132 return vma
->vm_next
?: priv
->tail_vma
;
135 static void m_cache_vma(struct seq_file
*m
, struct vm_area_struct
*vma
)
137 if (m
->count
< m
->size
) /* vma is copied successfully */
138 m
->version
= m_next_vma(m
->private, vma
) ? vma
->vm_start
: -1UL;
141 static void *m_start(struct seq_file
*m
, loff_t
*ppos
)
143 struct proc_maps_private
*priv
= m
->private;
144 unsigned long last_addr
= m
->version
;
145 struct mm_struct
*mm
;
146 struct vm_area_struct
*vma
;
147 unsigned int pos
= *ppos
;
149 /* See m_cache_vma(). Zero at the start or after lseek. */
150 if (last_addr
== -1UL)
153 priv
->task
= get_proc_task(priv
->inode
);
155 return ERR_PTR(-ESRCH
);
158 if (!mm
|| !atomic_inc_not_zero(&mm
->mm_users
))
161 down_read(&mm
->mmap_sem
);
162 hold_task_mempolicy(priv
);
163 priv
->tail_vma
= get_gate_vma(mm
);
166 vma
= find_vma(mm
, last_addr
);
167 if (vma
&& (vma
= m_next_vma(priv
, vma
)))
172 if (pos
< mm
->map_count
) {
173 for (vma
= mm
->mmap
; pos
; pos
--) {
174 m
->version
= vma
->vm_start
;
180 /* we do not bother to update m->version in this case */
181 if (pos
== mm
->map_count
&& priv
->tail_vma
)
182 return priv
->tail_vma
;
188 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
190 struct proc_maps_private
*priv
= m
->private;
191 struct vm_area_struct
*next
;
194 next
= m_next_vma(priv
, v
);
200 static void m_stop(struct seq_file
*m
, void *v
)
202 struct proc_maps_private
*priv
= m
->private;
204 if (!IS_ERR_OR_NULL(v
))
207 put_task_struct(priv
->task
);
212 static int proc_maps_open(struct inode
*inode
, struct file
*file
,
213 const struct seq_operations
*ops
, int psize
)
215 struct proc_maps_private
*priv
= __seq_open_private(file
, ops
, psize
);
221 priv
->mm
= proc_mem_open(inode
, PTRACE_MODE_READ
);
222 if (IS_ERR(priv
->mm
)) {
223 int err
= PTR_ERR(priv
->mm
);
225 seq_release_private(inode
, file
);
232 static int proc_map_release(struct inode
*inode
, struct file
*file
)
234 struct seq_file
*seq
= file
->private_data
;
235 struct proc_maps_private
*priv
= seq
->private;
240 return seq_release_private(inode
, file
);
243 static int do_maps_open(struct inode
*inode
, struct file
*file
,
244 const struct seq_operations
*ops
)
246 return proc_maps_open(inode
, file
, ops
,
247 sizeof(struct proc_maps_private
));
250 static pid_t
pid_of_stack(struct proc_maps_private
*priv
,
251 struct vm_area_struct
*vma
, bool is_pid
)
253 struct inode
*inode
= priv
->inode
;
254 struct task_struct
*task
;
258 task
= pid_task(proc_pid(inode
), PIDTYPE_PID
);
260 task
= task_of_stack(task
, vma
, is_pid
);
262 ret
= task_pid_nr_ns(task
, inode
->i_sb
->s_fs_info
);
270 show_map_vma(struct seq_file
*m
, struct vm_area_struct
*vma
, int is_pid
)
272 struct mm_struct
*mm
= vma
->vm_mm
;
273 struct file
*file
= vma
->vm_file
;
274 struct proc_maps_private
*priv
= m
->private;
275 vm_flags_t flags
= vma
->vm_flags
;
276 unsigned long ino
= 0;
277 unsigned long long pgoff
= 0;
278 unsigned long start
, end
;
280 const char *name
= NULL
;
283 struct inode
*inode
= file_inode(vma
->vm_file
);
284 dev
= inode
->i_sb
->s_dev
;
286 pgoff
= ((loff_t
)vma
->vm_pgoff
) << PAGE_SHIFT
;
289 /* We don't show the stack guard page in /proc/maps */
290 start
= vma
->vm_start
;
291 if (stack_guard_page_start(vma
, start
))
294 if (stack_guard_page_end(vma
, end
))
297 seq_setwidth(m
, 25 + sizeof(void *) * 6 - 1);
298 seq_printf(m
, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
301 flags
& VM_READ
? 'r' : '-',
302 flags
& VM_WRITE
? 'w' : '-',
303 flags
& VM_EXEC
? 'x' : '-',
304 flags
& VM_MAYSHARE
? 's' : 'p',
306 MAJOR(dev
), MINOR(dev
), ino
);
309 * Print the dentry name for named mappings, and a
310 * special [heap] marker for the heap:
314 seq_file_path(m
, file
, "\n");
318 if (vma
->vm_ops
&& vma
->vm_ops
->name
) {
319 name
= vma
->vm_ops
->name(vma
);
324 name
= arch_vma_name(vma
);
333 if (vma
->vm_start
<= mm
->brk
&&
334 vma
->vm_end
>= mm
->start_brk
) {
339 tid
= pid_of_stack(priv
, vma
, is_pid
);
342 * Thread stack in /proc/PID/task/TID/maps or
343 * the main process stack.
345 if (!is_pid
|| (vma
->vm_start
<= mm
->start_stack
&&
346 vma
->vm_end
>= mm
->start_stack
)) {
349 /* Thread stack in /proc/PID/maps */
351 seq_printf(m
, "[stack:%d]", tid
);
364 static int show_map(struct seq_file
*m
, void *v
, int is_pid
)
366 show_map_vma(m
, v
, is_pid
);
371 static int show_pid_map(struct seq_file
*m
, void *v
)
373 return show_map(m
, v
, 1);
376 static int show_tid_map(struct seq_file
*m
, void *v
)
378 return show_map(m
, v
, 0);
381 static const struct seq_operations proc_pid_maps_op
= {
388 static const struct seq_operations proc_tid_maps_op
= {
395 static int pid_maps_open(struct inode
*inode
, struct file
*file
)
397 return do_maps_open(inode
, file
, &proc_pid_maps_op
);
400 static int tid_maps_open(struct inode
*inode
, struct file
*file
)
402 return do_maps_open(inode
, file
, &proc_tid_maps_op
);
405 const struct file_operations proc_pid_maps_operations
= {
406 .open
= pid_maps_open
,
409 .release
= proc_map_release
,
412 const struct file_operations proc_tid_maps_operations
= {
413 .open
= tid_maps_open
,
416 .release
= proc_map_release
,
420 * Proportional Set Size(PSS): my share of RSS.
422 * PSS of a process is the count of pages it has in memory, where each
423 * page is divided by the number of processes sharing it. So if a
424 * process has 1000 pages all to itself, and 1000 shared with one other
425 * process, its PSS will be 1500.
427 * To keep (accumulated) division errors low, we adopt a 64bit
428 * fixed-point pss counter to minimize division errors. So (pss >>
429 * PSS_SHIFT) would be the real byte count.
431 * A shift of 12 before division means (assuming 4K page size):
432 * - 1M 3-user-pages add up to 8KB errors;
433 * - supports mapcount up to 2^24, or 16M;
434 * - supports PSS up to 2^52 bytes, or 4PB.
438 #ifdef CONFIG_PROC_PAGE_MONITOR
439 struct mem_size_stats
{
440 unsigned long resident
;
441 unsigned long shared_clean
;
442 unsigned long shared_dirty
;
443 unsigned long private_clean
;
444 unsigned long private_dirty
;
445 unsigned long referenced
;
446 unsigned long anonymous
;
447 unsigned long anonymous_thp
;
453 static void smaps_account(struct mem_size_stats
*mss
, struct page
*page
,
454 unsigned long size
, bool young
, bool dirty
)
459 mss
->anonymous
+= size
;
461 mss
->resident
+= size
;
462 /* Accumulate the size in pages that have been accessed. */
463 if (young
|| page_is_young(page
) || PageReferenced(page
))
464 mss
->referenced
+= size
;
465 mapcount
= page_mapcount(page
);
469 if (dirty
|| PageDirty(page
))
470 mss
->shared_dirty
+= size
;
472 mss
->shared_clean
+= size
;
473 pss_delta
= (u64
)size
<< PSS_SHIFT
;
474 do_div(pss_delta
, mapcount
);
475 mss
->pss
+= pss_delta
;
477 if (dirty
|| PageDirty(page
))
478 mss
->private_dirty
+= size
;
480 mss
->private_clean
+= size
;
481 mss
->pss
+= (u64
)size
<< PSS_SHIFT
;
485 static void smaps_pte_entry(pte_t
*pte
, unsigned long addr
,
486 struct mm_walk
*walk
)
488 struct mem_size_stats
*mss
= walk
->private;
489 struct vm_area_struct
*vma
= walk
->vma
;
490 struct page
*page
= NULL
;
492 if (pte_present(*pte
)) {
493 page
= vm_normal_page(vma
, addr
, *pte
);
494 } else if (is_swap_pte(*pte
)) {
495 swp_entry_t swpent
= pte_to_swp_entry(*pte
);
497 if (!non_swap_entry(swpent
)) {
500 mss
->swap
+= PAGE_SIZE
;
501 mapcount
= swp_swapcount(swpent
);
503 u64 pss_delta
= (u64
)PAGE_SIZE
<< PSS_SHIFT
;
505 do_div(pss_delta
, mapcount
);
506 mss
->swap_pss
+= pss_delta
;
508 mss
->swap_pss
+= (u64
)PAGE_SIZE
<< PSS_SHIFT
;
510 } else if (is_migration_entry(swpent
))
511 page
= migration_entry_to_page(swpent
);
516 smaps_account(mss
, page
, PAGE_SIZE
, pte_young(*pte
), pte_dirty(*pte
));
519 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
520 static void smaps_pmd_entry(pmd_t
*pmd
, unsigned long addr
,
521 struct mm_walk
*walk
)
523 struct mem_size_stats
*mss
= walk
->private;
524 struct vm_area_struct
*vma
= walk
->vma
;
527 /* FOLL_DUMP will return -EFAULT on huge zero page */
528 page
= follow_trans_huge_pmd(vma
, addr
, pmd
, FOLL_DUMP
);
529 if (IS_ERR_OR_NULL(page
))
531 mss
->anonymous_thp
+= HPAGE_PMD_SIZE
;
532 smaps_account(mss
, page
, HPAGE_PMD_SIZE
,
533 pmd_young(*pmd
), pmd_dirty(*pmd
));
536 static void smaps_pmd_entry(pmd_t
*pmd
, unsigned long addr
,
537 struct mm_walk
*walk
)
542 static int smaps_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
543 struct mm_walk
*walk
)
545 struct vm_area_struct
*vma
= walk
->vma
;
549 if (pmd_trans_huge_lock(pmd
, vma
, &ptl
) == 1) {
550 smaps_pmd_entry(pmd
, addr
, walk
);
555 if (pmd_trans_unstable(pmd
))
558 * The mmap_sem held all the way back in m_start() is what
559 * keeps khugepaged out of here and from collapsing things
562 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
563 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
)
564 smaps_pte_entry(pte
, addr
, walk
);
565 pte_unmap_unlock(pte
- 1, ptl
);
570 static void show_smap_vma_flags(struct seq_file
*m
, struct vm_area_struct
*vma
)
573 * Don't forget to update Documentation/ on changes.
575 static const char mnemonics
[BITS_PER_LONG
][2] = {
577 * In case if we meet a flag we don't know about.
579 [0 ... (BITS_PER_LONG
-1)] = "??",
581 [ilog2(VM_READ
)] = "rd",
582 [ilog2(VM_WRITE
)] = "wr",
583 [ilog2(VM_EXEC
)] = "ex",
584 [ilog2(VM_SHARED
)] = "sh",
585 [ilog2(VM_MAYREAD
)] = "mr",
586 [ilog2(VM_MAYWRITE
)] = "mw",
587 [ilog2(VM_MAYEXEC
)] = "me",
588 [ilog2(VM_MAYSHARE
)] = "ms",
589 [ilog2(VM_GROWSDOWN
)] = "gd",
590 [ilog2(VM_PFNMAP
)] = "pf",
591 [ilog2(VM_DENYWRITE
)] = "dw",
592 #ifdef CONFIG_X86_INTEL_MPX
593 [ilog2(VM_MPX
)] = "mp",
595 [ilog2(VM_LOCKED
)] = "lo",
596 [ilog2(VM_IO
)] = "io",
597 [ilog2(VM_SEQ_READ
)] = "sr",
598 [ilog2(VM_RAND_READ
)] = "rr",
599 [ilog2(VM_DONTCOPY
)] = "dc",
600 [ilog2(VM_DONTEXPAND
)] = "de",
601 [ilog2(VM_ACCOUNT
)] = "ac",
602 [ilog2(VM_NORESERVE
)] = "nr",
603 [ilog2(VM_HUGETLB
)] = "ht",
604 [ilog2(VM_ARCH_1
)] = "ar",
605 [ilog2(VM_DONTDUMP
)] = "dd",
606 #ifdef CONFIG_MEM_SOFT_DIRTY
607 [ilog2(VM_SOFTDIRTY
)] = "sd",
609 [ilog2(VM_MIXEDMAP
)] = "mm",
610 [ilog2(VM_HUGEPAGE
)] = "hg",
611 [ilog2(VM_NOHUGEPAGE
)] = "nh",
612 [ilog2(VM_MERGEABLE
)] = "mg",
613 [ilog2(VM_UFFD_MISSING
)]= "um",
614 [ilog2(VM_UFFD_WP
)] = "uw",
618 seq_puts(m
, "VmFlags: ");
619 for (i
= 0; i
< BITS_PER_LONG
; i
++) {
620 if (vma
->vm_flags
& (1UL << i
)) {
621 seq_printf(m
, "%c%c ",
622 mnemonics
[i
][0], mnemonics
[i
][1]);
628 static int show_smap(struct seq_file
*m
, void *v
, int is_pid
)
630 struct vm_area_struct
*vma
= v
;
631 struct mem_size_stats mss
;
632 struct mm_walk smaps_walk
= {
633 .pmd_entry
= smaps_pte_range
,
638 memset(&mss
, 0, sizeof mss
);
639 /* mmap_sem is held in m_start */
640 walk_page_vma(vma
, &smaps_walk
);
642 show_map_vma(m
, vma
, is_pid
);
648 "Shared_Clean: %8lu kB\n"
649 "Shared_Dirty: %8lu kB\n"
650 "Private_Clean: %8lu kB\n"
651 "Private_Dirty: %8lu kB\n"
652 "Referenced: %8lu kB\n"
653 "Anonymous: %8lu kB\n"
654 "AnonHugePages: %8lu kB\n"
657 "KernelPageSize: %8lu kB\n"
658 "MMUPageSize: %8lu kB\n"
660 (vma
->vm_end
- vma
->vm_start
) >> 10,
662 (unsigned long)(mss
.pss
>> (10 + PSS_SHIFT
)),
663 mss
.shared_clean
>> 10,
664 mss
.shared_dirty
>> 10,
665 mss
.private_clean
>> 10,
666 mss
.private_dirty
>> 10,
667 mss
.referenced
>> 10,
669 mss
.anonymous_thp
>> 10,
671 (unsigned long)(mss
.swap_pss
>> (10 + PSS_SHIFT
)),
672 vma_kernel_pagesize(vma
) >> 10,
673 vma_mmu_pagesize(vma
) >> 10,
674 (vma
->vm_flags
& VM_LOCKED
) ?
675 (unsigned long)(mss
.pss
>> (10 + PSS_SHIFT
)) : 0);
677 show_smap_vma_flags(m
, vma
);
682 static int show_pid_smap(struct seq_file
*m
, void *v
)
684 return show_smap(m
, v
, 1);
687 static int show_tid_smap(struct seq_file
*m
, void *v
)
689 return show_smap(m
, v
, 0);
692 static const struct seq_operations proc_pid_smaps_op
= {
696 .show
= show_pid_smap
699 static const struct seq_operations proc_tid_smaps_op
= {
703 .show
= show_tid_smap
706 static int pid_smaps_open(struct inode
*inode
, struct file
*file
)
708 return do_maps_open(inode
, file
, &proc_pid_smaps_op
);
711 static int tid_smaps_open(struct inode
*inode
, struct file
*file
)
713 return do_maps_open(inode
, file
, &proc_tid_smaps_op
);
716 const struct file_operations proc_pid_smaps_operations
= {
717 .open
= pid_smaps_open
,
720 .release
= proc_map_release
,
723 const struct file_operations proc_tid_smaps_operations
= {
724 .open
= tid_smaps_open
,
727 .release
= proc_map_release
,
730 enum clear_refs_types
{
734 CLEAR_REFS_SOFT_DIRTY
,
735 CLEAR_REFS_MM_HIWATER_RSS
,
739 struct clear_refs_private
{
740 enum clear_refs_types type
;
743 #ifdef CONFIG_MEM_SOFT_DIRTY
744 static inline void clear_soft_dirty(struct vm_area_struct
*vma
,
745 unsigned long addr
, pte_t
*pte
)
748 * The soft-dirty tracker uses #PF-s to catch writes
749 * to pages, so write-protect the pte as well. See the
750 * Documentation/vm/soft-dirty.txt for full description
751 * of how soft-dirty works.
755 if (pte_present(ptent
)) {
756 ptent
= pte_wrprotect(ptent
);
757 ptent
= pte_clear_flags(ptent
, _PAGE_SOFT_DIRTY
);
758 } else if (is_swap_pte(ptent
)) {
759 ptent
= pte_swp_clear_soft_dirty(ptent
);
762 set_pte_at(vma
->vm_mm
, addr
, pte
, ptent
);
765 static inline void clear_soft_dirty_pmd(struct vm_area_struct
*vma
,
766 unsigned long addr
, pmd_t
*pmdp
)
770 pmd
= pmd_wrprotect(pmd
);
771 pmd
= pmd_clear_flags(pmd
, _PAGE_SOFT_DIRTY
);
773 if (vma
->vm_flags
& VM_SOFTDIRTY
)
774 vma
->vm_flags
&= ~VM_SOFTDIRTY
;
776 set_pmd_at(vma
->vm_mm
, addr
, pmdp
, pmd
);
781 static inline void clear_soft_dirty(struct vm_area_struct
*vma
,
782 unsigned long addr
, pte_t
*pte
)
786 static inline void clear_soft_dirty_pmd(struct vm_area_struct
*vma
,
787 unsigned long addr
, pmd_t
*pmdp
)
792 static int clear_refs_pte_range(pmd_t
*pmd
, unsigned long addr
,
793 unsigned long end
, struct mm_walk
*walk
)
795 struct clear_refs_private
*cp
= walk
->private;
796 struct vm_area_struct
*vma
= walk
->vma
;
801 if (pmd_trans_huge_lock(pmd
, vma
, &ptl
) == 1) {
802 if (cp
->type
== CLEAR_REFS_SOFT_DIRTY
) {
803 clear_soft_dirty_pmd(vma
, addr
, pmd
);
807 page
= pmd_page(*pmd
);
809 /* Clear accessed and referenced bits. */
810 pmdp_test_and_clear_young(vma
, addr
, pmd
);
811 test_and_clear_page_young(page
);
812 ClearPageReferenced(page
);
818 if (pmd_trans_unstable(pmd
))
821 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
822 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
) {
825 if (cp
->type
== CLEAR_REFS_SOFT_DIRTY
) {
826 clear_soft_dirty(vma
, addr
, pte
);
830 if (!pte_present(ptent
))
833 page
= vm_normal_page(vma
, addr
, ptent
);
837 /* Clear accessed and referenced bits. */
838 ptep_test_and_clear_young(vma
, addr
, pte
);
839 test_and_clear_page_young(page
);
840 ClearPageReferenced(page
);
842 pte_unmap_unlock(pte
- 1, ptl
);
847 static int clear_refs_test_walk(unsigned long start
, unsigned long end
,
848 struct mm_walk
*walk
)
850 struct clear_refs_private
*cp
= walk
->private;
851 struct vm_area_struct
*vma
= walk
->vma
;
853 if (vma
->vm_flags
& VM_PFNMAP
)
857 * Writing 1 to /proc/pid/clear_refs affects all pages.
858 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
859 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
860 * Writing 4 to /proc/pid/clear_refs affects all pages.
862 if (cp
->type
== CLEAR_REFS_ANON
&& vma
->vm_file
)
864 if (cp
->type
== CLEAR_REFS_MAPPED
&& !vma
->vm_file
)
869 static ssize_t
clear_refs_write(struct file
*file
, const char __user
*buf
,
870 size_t count
, loff_t
*ppos
)
872 struct task_struct
*task
;
873 char buffer
[PROC_NUMBUF
];
874 struct mm_struct
*mm
;
875 struct vm_area_struct
*vma
;
876 enum clear_refs_types type
;
880 memset(buffer
, 0, sizeof(buffer
));
881 if (count
> sizeof(buffer
) - 1)
882 count
= sizeof(buffer
) - 1;
883 if (copy_from_user(buffer
, buf
, count
))
885 rv
= kstrtoint(strstrip(buffer
), 10, &itype
);
888 type
= (enum clear_refs_types
)itype
;
889 if (type
< CLEAR_REFS_ALL
|| type
>= CLEAR_REFS_LAST
)
892 task
= get_proc_task(file_inode(file
));
895 mm
= get_task_mm(task
);
897 struct clear_refs_private cp
= {
900 struct mm_walk clear_refs_walk
= {
901 .pmd_entry
= clear_refs_pte_range
,
902 .test_walk
= clear_refs_test_walk
,
907 if (type
== CLEAR_REFS_MM_HIWATER_RSS
) {
909 * Writing 5 to /proc/pid/clear_refs resets the peak
910 * resident set size to this mm's current rss value.
912 down_write(&mm
->mmap_sem
);
913 reset_mm_hiwater_rss(mm
);
914 up_write(&mm
->mmap_sem
);
918 down_read(&mm
->mmap_sem
);
919 if (type
== CLEAR_REFS_SOFT_DIRTY
) {
920 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
921 if (!(vma
->vm_flags
& VM_SOFTDIRTY
))
923 up_read(&mm
->mmap_sem
);
924 down_write(&mm
->mmap_sem
);
925 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
926 vma
->vm_flags
&= ~VM_SOFTDIRTY
;
927 vma_set_page_prot(vma
);
929 downgrade_write(&mm
->mmap_sem
);
932 mmu_notifier_invalidate_range_start(mm
, 0, -1);
934 walk_page_range(0, ~0UL, &clear_refs_walk
);
935 if (type
== CLEAR_REFS_SOFT_DIRTY
)
936 mmu_notifier_invalidate_range_end(mm
, 0, -1);
938 up_read(&mm
->mmap_sem
);
942 put_task_struct(task
);
947 const struct file_operations proc_clear_refs_operations
= {
948 .write
= clear_refs_write
,
949 .llseek
= noop_llseek
,
957 int pos
, len
; /* units: PM_ENTRY_BYTES, not bytes */
958 pagemap_entry_t
*buffer
;
962 #define PAGEMAP_WALK_SIZE (PMD_SIZE)
963 #define PAGEMAP_WALK_MASK (PMD_MASK)
965 #define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
966 #define PM_PFRAME_BITS 55
967 #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
968 #define PM_SOFT_DIRTY BIT_ULL(55)
969 #define PM_MMAP_EXCLUSIVE BIT_ULL(56)
970 #define PM_FILE BIT_ULL(61)
971 #define PM_SWAP BIT_ULL(62)
972 #define PM_PRESENT BIT_ULL(63)
974 #define PM_END_OF_BUFFER 1
976 static inline pagemap_entry_t
make_pme(u64 frame
, u64 flags
)
978 return (pagemap_entry_t
) { .pme
= (frame
& PM_PFRAME_MASK
) | flags
};
981 static int add_to_pagemap(unsigned long addr
, pagemap_entry_t
*pme
,
982 struct pagemapread
*pm
)
984 pm
->buffer
[pm
->pos
++] = *pme
;
985 if (pm
->pos
>= pm
->len
)
986 return PM_END_OF_BUFFER
;
990 static int pagemap_pte_hole(unsigned long start
, unsigned long end
,
991 struct mm_walk
*walk
)
993 struct pagemapread
*pm
= walk
->private;
994 unsigned long addr
= start
;
998 struct vm_area_struct
*vma
= find_vma(walk
->mm
, addr
);
999 pagemap_entry_t pme
= make_pme(0, 0);
1000 /* End of address space hole, which we mark as non-present. */
1001 unsigned long hole_end
;
1004 hole_end
= min(end
, vma
->vm_start
);
1008 for (; addr
< hole_end
; addr
+= PAGE_SIZE
) {
1009 err
= add_to_pagemap(addr
, &pme
, pm
);
1017 /* Addresses in the VMA. */
1018 if (vma
->vm_flags
& VM_SOFTDIRTY
)
1019 pme
= make_pme(0, PM_SOFT_DIRTY
);
1020 for (; addr
< min(end
, vma
->vm_end
); addr
+= PAGE_SIZE
) {
1021 err
= add_to_pagemap(addr
, &pme
, pm
);
1030 static pagemap_entry_t
pte_to_pagemap_entry(struct pagemapread
*pm
,
1031 struct vm_area_struct
*vma
, unsigned long addr
, pte_t pte
)
1033 u64 frame
= 0, flags
= 0;
1034 struct page
*page
= NULL
;
1036 if (pte_present(pte
)) {
1038 frame
= pte_pfn(pte
);
1039 flags
|= PM_PRESENT
;
1040 page
= vm_normal_page(vma
, addr
, pte
);
1041 if (pte_soft_dirty(pte
))
1042 flags
|= PM_SOFT_DIRTY
;
1043 } else if (is_swap_pte(pte
)) {
1045 if (pte_swp_soft_dirty(pte
))
1046 flags
|= PM_SOFT_DIRTY
;
1047 entry
= pte_to_swp_entry(pte
);
1048 frame
= swp_type(entry
) |
1049 (swp_offset(entry
) << MAX_SWAPFILES_SHIFT
);
1051 if (is_migration_entry(entry
))
1052 page
= migration_entry_to_page(entry
);
1055 if (page
&& !PageAnon(page
))
1057 if (page
&& page_mapcount(page
) == 1)
1058 flags
|= PM_MMAP_EXCLUSIVE
;
1059 if (vma
->vm_flags
& VM_SOFTDIRTY
)
1060 flags
|= PM_SOFT_DIRTY
;
1062 return make_pme(frame
, flags
);
1065 static int pagemap_pmd_range(pmd_t
*pmdp
, unsigned long addr
, unsigned long end
,
1066 struct mm_walk
*walk
)
1068 struct vm_area_struct
*vma
= walk
->vma
;
1069 struct pagemapread
*pm
= walk
->private;
1071 pte_t
*pte
, *orig_pte
;
1074 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1075 if (pmd_trans_huge_lock(pmdp
, vma
, &ptl
) == 1) {
1076 u64 flags
= 0, frame
= 0;
1079 if ((vma
->vm_flags
& VM_SOFTDIRTY
) || pmd_soft_dirty(pmd
))
1080 flags
|= PM_SOFT_DIRTY
;
1083 * Currently pmd for thp is always present because thp
1084 * can not be swapped-out, migrated, or HWPOISONed
1085 * (split in such cases instead.)
1086 * This if-check is just to prepare for future implementation.
1088 if (pmd_present(pmd
)) {
1089 struct page
*page
= pmd_page(pmd
);
1091 if (page_mapcount(page
) == 1)
1092 flags
|= PM_MMAP_EXCLUSIVE
;
1094 flags
|= PM_PRESENT
;
1096 frame
= pmd_pfn(pmd
) +
1097 ((addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
1100 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
1101 pagemap_entry_t pme
= make_pme(frame
, flags
);
1103 err
= add_to_pagemap(addr
, &pme
, pm
);
1106 if (pm
->show_pfn
&& (flags
& PM_PRESENT
))
1113 if (pmd_trans_unstable(pmdp
))
1115 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1118 * We can assume that @vma always points to a valid one and @end never
1119 * goes beyond vma->vm_end.
1121 orig_pte
= pte
= pte_offset_map_lock(walk
->mm
, pmdp
, addr
, &ptl
);
1122 for (; addr
< end
; pte
++, addr
+= PAGE_SIZE
) {
1123 pagemap_entry_t pme
;
1125 pme
= pte_to_pagemap_entry(pm
, vma
, addr
, *pte
);
1126 err
= add_to_pagemap(addr
, &pme
, pm
);
1130 pte_unmap_unlock(orig_pte
, ptl
);
1137 #ifdef CONFIG_HUGETLB_PAGE
1138 /* This function walks within one hugetlb entry in the single call */
1139 static int pagemap_hugetlb_range(pte_t
*ptep
, unsigned long hmask
,
1140 unsigned long addr
, unsigned long end
,
1141 struct mm_walk
*walk
)
1143 struct pagemapread
*pm
= walk
->private;
1144 struct vm_area_struct
*vma
= walk
->vma
;
1145 u64 flags
= 0, frame
= 0;
1149 if (vma
->vm_flags
& VM_SOFTDIRTY
)
1150 flags
|= PM_SOFT_DIRTY
;
1152 pte
= huge_ptep_get(ptep
);
1153 if (pte_present(pte
)) {
1154 struct page
*page
= pte_page(pte
);
1156 if (!PageAnon(page
))
1159 if (page_mapcount(page
) == 1)
1160 flags
|= PM_MMAP_EXCLUSIVE
;
1162 flags
|= PM_PRESENT
;
1164 frame
= pte_pfn(pte
) +
1165 ((addr
& ~hmask
) >> PAGE_SHIFT
);
1168 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
1169 pagemap_entry_t pme
= make_pme(frame
, flags
);
1171 err
= add_to_pagemap(addr
, &pme
, pm
);
1174 if (pm
->show_pfn
&& (flags
& PM_PRESENT
))
1182 #endif /* HUGETLB_PAGE */
1185 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1187 * For each page in the address space, this file contains one 64-bit entry
1188 * consisting of the following:
1190 * Bits 0-54 page frame number (PFN) if present
1191 * Bits 0-4 swap type if swapped
1192 * Bits 5-54 swap offset if swapped
1193 * Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.txt)
1194 * Bit 56 page exclusively mapped
1196 * Bit 61 page is file-page or shared-anon
1197 * Bit 62 page swapped
1198 * Bit 63 page present
1200 * If the page is not present but in swap, then the PFN contains an
1201 * encoding of the swap file number and the page's offset into the
1202 * swap. Unmapped pages return a null PFN. This allows determining
1203 * precisely which pages are mapped (or in swap) and comparing mapped
1204 * pages between processes.
1206 * Efficient users of this interface will use /proc/pid/maps to
1207 * determine which areas of memory are actually mapped and llseek to
1208 * skip over unmapped regions.
1210 static ssize_t
pagemap_read(struct file
*file
, char __user
*buf
,
1211 size_t count
, loff_t
*ppos
)
1213 struct mm_struct
*mm
= file
->private_data
;
1214 struct pagemapread pm
;
1215 struct mm_walk pagemap_walk
= {};
1217 unsigned long svpfn
;
1218 unsigned long start_vaddr
;
1219 unsigned long end_vaddr
;
1220 int ret
= 0, copied
= 0;
1222 if (!mm
|| !atomic_inc_not_zero(&mm
->mm_users
))
1226 /* file position must be aligned */
1227 if ((*ppos
% PM_ENTRY_BYTES
) || (count
% PM_ENTRY_BYTES
))
1234 /* do not disclose physical addresses: attack vector */
1235 pm
.show_pfn
= file_ns_capable(file
, &init_user_ns
, CAP_SYS_ADMIN
);
1237 pm
.len
= (PAGEMAP_WALK_SIZE
>> PAGE_SHIFT
);
1238 pm
.buffer
= kmalloc(pm
.len
* PM_ENTRY_BYTES
, GFP_TEMPORARY
);
1243 pagemap_walk
.pmd_entry
= pagemap_pmd_range
;
1244 pagemap_walk
.pte_hole
= pagemap_pte_hole
;
1245 #ifdef CONFIG_HUGETLB_PAGE
1246 pagemap_walk
.hugetlb_entry
= pagemap_hugetlb_range
;
1248 pagemap_walk
.mm
= mm
;
1249 pagemap_walk
.private = &pm
;
1252 svpfn
= src
/ PM_ENTRY_BYTES
;
1253 start_vaddr
= svpfn
<< PAGE_SHIFT
;
1254 end_vaddr
= mm
->task_size
;
1256 /* watch out for wraparound */
1257 if (svpfn
> mm
->task_size
>> PAGE_SHIFT
)
1258 start_vaddr
= end_vaddr
;
1261 * The odds are that this will stop walking way
1262 * before end_vaddr, because the length of the
1263 * user buffer is tracked in "pm", and the walk
1264 * will stop when we hit the end of the buffer.
1267 while (count
&& (start_vaddr
< end_vaddr
)) {
1272 end
= (start_vaddr
+ PAGEMAP_WALK_SIZE
) & PAGEMAP_WALK_MASK
;
1274 if (end
< start_vaddr
|| end
> end_vaddr
)
1276 down_read(&mm
->mmap_sem
);
1277 ret
= walk_page_range(start_vaddr
, end
, &pagemap_walk
);
1278 up_read(&mm
->mmap_sem
);
1281 len
= min(count
, PM_ENTRY_BYTES
* pm
.pos
);
1282 if (copy_to_user(buf
, pm
.buffer
, len
)) {
1291 if (!ret
|| ret
== PM_END_OF_BUFFER
)
1302 static int pagemap_open(struct inode
*inode
, struct file
*file
)
1304 struct mm_struct
*mm
;
1306 mm
= proc_mem_open(inode
, PTRACE_MODE_READ
);
1309 file
->private_data
= mm
;
1313 static int pagemap_release(struct inode
*inode
, struct file
*file
)
1315 struct mm_struct
*mm
= file
->private_data
;
1322 const struct file_operations proc_pagemap_operations
= {
1323 .llseek
= mem_lseek
, /* borrow this */
1324 .read
= pagemap_read
,
1325 .open
= pagemap_open
,
1326 .release
= pagemap_release
,
1328 #endif /* CONFIG_PROC_PAGE_MONITOR */
1333 unsigned long pages
;
1335 unsigned long active
;
1336 unsigned long writeback
;
1337 unsigned long mapcount_max
;
1338 unsigned long dirty
;
1339 unsigned long swapcache
;
1340 unsigned long node
[MAX_NUMNODES
];
1343 struct numa_maps_private
{
1344 struct proc_maps_private proc_maps
;
1345 struct numa_maps md
;
1348 static void gather_stats(struct page
*page
, struct numa_maps
*md
, int pte_dirty
,
1349 unsigned long nr_pages
)
1351 int count
= page_mapcount(page
);
1353 md
->pages
+= nr_pages
;
1354 if (pte_dirty
|| PageDirty(page
))
1355 md
->dirty
+= nr_pages
;
1357 if (PageSwapCache(page
))
1358 md
->swapcache
+= nr_pages
;
1360 if (PageActive(page
) || PageUnevictable(page
))
1361 md
->active
+= nr_pages
;
1363 if (PageWriteback(page
))
1364 md
->writeback
+= nr_pages
;
1367 md
->anon
+= nr_pages
;
1369 if (count
> md
->mapcount_max
)
1370 md
->mapcount_max
= count
;
1372 md
->node
[page_to_nid(page
)] += nr_pages
;
1375 static struct page
*can_gather_numa_stats(pte_t pte
, struct vm_area_struct
*vma
,
1381 if (!pte_present(pte
))
1384 page
= vm_normal_page(vma
, addr
, pte
);
1388 if (PageReserved(page
))
1391 nid
= page_to_nid(page
);
1392 if (!node_isset(nid
, node_states
[N_MEMORY
]))
1398 static int gather_pte_stats(pmd_t
*pmd
, unsigned long addr
,
1399 unsigned long end
, struct mm_walk
*walk
)
1401 struct numa_maps
*md
= walk
->private;
1402 struct vm_area_struct
*vma
= walk
->vma
;
1407 if (pmd_trans_huge_lock(pmd
, vma
, &ptl
) == 1) {
1408 pte_t huge_pte
= *(pte_t
*)pmd
;
1411 page
= can_gather_numa_stats(huge_pte
, vma
, addr
);
1413 gather_stats(page
, md
, pte_dirty(huge_pte
),
1414 HPAGE_PMD_SIZE
/PAGE_SIZE
);
1419 if (pmd_trans_unstable(pmd
))
1421 orig_pte
= pte
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
1423 struct page
*page
= can_gather_numa_stats(*pte
, vma
, addr
);
1426 gather_stats(page
, md
, pte_dirty(*pte
), 1);
1428 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
1429 pte_unmap_unlock(orig_pte
, ptl
);
1432 #ifdef CONFIG_HUGETLB_PAGE
1433 static int gather_hugetlb_stats(pte_t
*pte
, unsigned long hmask
,
1434 unsigned long addr
, unsigned long end
, struct mm_walk
*walk
)
1436 struct numa_maps
*md
;
1439 if (!pte_present(*pte
))
1442 page
= pte_page(*pte
);
1447 gather_stats(page
, md
, pte_dirty(*pte
), 1);
1452 static int gather_hugetlb_stats(pte_t
*pte
, unsigned long hmask
,
1453 unsigned long addr
, unsigned long end
, struct mm_walk
*walk
)
1460 * Display pages allocated per node and memory policy via /proc.
1462 static int show_numa_map(struct seq_file
*m
, void *v
, int is_pid
)
1464 struct numa_maps_private
*numa_priv
= m
->private;
1465 struct proc_maps_private
*proc_priv
= &numa_priv
->proc_maps
;
1466 struct vm_area_struct
*vma
= v
;
1467 struct numa_maps
*md
= &numa_priv
->md
;
1468 struct file
*file
= vma
->vm_file
;
1469 struct mm_struct
*mm
= vma
->vm_mm
;
1470 struct mm_walk walk
= {
1471 .hugetlb_entry
= gather_hugetlb_stats
,
1472 .pmd_entry
= gather_pte_stats
,
1476 struct mempolicy
*pol
;
1483 /* Ensure we start with an empty set of numa_maps statistics. */
1484 memset(md
, 0, sizeof(*md
));
1486 pol
= __get_vma_policy(vma
, vma
->vm_start
);
1488 mpol_to_str(buffer
, sizeof(buffer
), pol
);
1491 mpol_to_str(buffer
, sizeof(buffer
), proc_priv
->task_mempolicy
);
1494 seq_printf(m
, "%08lx %s", vma
->vm_start
, buffer
);
1497 seq_puts(m
, " file=");
1498 seq_file_path(m
, file
, "\n\t= ");
1499 } else if (vma
->vm_start
<= mm
->brk
&& vma
->vm_end
>= mm
->start_brk
) {
1500 seq_puts(m
, " heap");
1502 pid_t tid
= pid_of_stack(proc_priv
, vma
, is_pid
);
1505 * Thread stack in /proc/PID/task/TID/maps or
1506 * the main process stack.
1508 if (!is_pid
|| (vma
->vm_start
<= mm
->start_stack
&&
1509 vma
->vm_end
>= mm
->start_stack
))
1510 seq_puts(m
, " stack");
1512 seq_printf(m
, " stack:%d", tid
);
1516 if (is_vm_hugetlb_page(vma
))
1517 seq_puts(m
, " huge");
1519 /* mmap_sem is held by m_start */
1520 walk_page_vma(vma
, &walk
);
1526 seq_printf(m
, " anon=%lu", md
->anon
);
1529 seq_printf(m
, " dirty=%lu", md
->dirty
);
1531 if (md
->pages
!= md
->anon
&& md
->pages
!= md
->dirty
)
1532 seq_printf(m
, " mapped=%lu", md
->pages
);
1534 if (md
->mapcount_max
> 1)
1535 seq_printf(m
, " mapmax=%lu", md
->mapcount_max
);
1538 seq_printf(m
, " swapcache=%lu", md
->swapcache
);
1540 if (md
->active
< md
->pages
&& !is_vm_hugetlb_page(vma
))
1541 seq_printf(m
, " active=%lu", md
->active
);
1544 seq_printf(m
, " writeback=%lu", md
->writeback
);
1546 for_each_node_state(nid
, N_MEMORY
)
1548 seq_printf(m
, " N%d=%lu", nid
, md
->node
[nid
]);
1550 seq_printf(m
, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma
) >> 10);
1553 m_cache_vma(m
, vma
);
1557 static int show_pid_numa_map(struct seq_file
*m
, void *v
)
1559 return show_numa_map(m
, v
, 1);
1562 static int show_tid_numa_map(struct seq_file
*m
, void *v
)
1564 return show_numa_map(m
, v
, 0);
1567 static const struct seq_operations proc_pid_numa_maps_op
= {
1571 .show
= show_pid_numa_map
,
1574 static const struct seq_operations proc_tid_numa_maps_op
= {
1578 .show
= show_tid_numa_map
,
1581 static int numa_maps_open(struct inode
*inode
, struct file
*file
,
1582 const struct seq_operations
*ops
)
1584 return proc_maps_open(inode
, file
, ops
,
1585 sizeof(struct numa_maps_private
));
1588 static int pid_numa_maps_open(struct inode
*inode
, struct file
*file
)
1590 return numa_maps_open(inode
, file
, &proc_pid_numa_maps_op
);
1593 static int tid_numa_maps_open(struct inode
*inode
, struct file
*file
)
1595 return numa_maps_open(inode
, file
, &proc_tid_numa_maps_op
);
1598 const struct file_operations proc_pid_numa_maps_operations
= {
1599 .open
= pid_numa_maps_open
,
1601 .llseek
= seq_lseek
,
1602 .release
= proc_map_release
,
1605 const struct file_operations proc_tid_numa_maps_operations
= {
1606 .open
= tid_numa_maps_open
,
1608 .llseek
= seq_lseek
,
1609 .release
= proc_map_release
,
1611 #endif /* CONFIG_NUMA */