2 #include <linux/vmacache.h>
3 #include <linux/hugetlb.h>
4 #include <linux/huge_mm.h>
5 #include <linux/mount.h>
6 #include <linux/seq_file.h>
7 #include <linux/highmem.h>
8 #include <linux/ptrace.h>
9 #include <linux/slab.h>
10 #include <linux/pagemap.h>
11 #include <linux/mempolicy.h>
12 #include <linux/rmap.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mmu_notifier.h>
18 #include <asm/uaccess.h>
19 #include <asm/tlbflush.h>
22 void task_mem(struct seq_file
*m
, struct mm_struct
*mm
)
24 unsigned long data
, text
, lib
, swap
;
25 unsigned long hiwater_vm
, total_vm
, hiwater_rss
, total_rss
;
28 * Note: to minimize their overhead, mm maintains hiwater_vm and
29 * hiwater_rss only when about to *lower* total_vm or rss. Any
30 * collector of these hiwater stats must therefore get total_vm
31 * and rss too, which will usually be the higher. Barriers? not
32 * worth the effort, such snapshots can always be inconsistent.
34 hiwater_vm
= total_vm
= mm
->total_vm
;
35 if (hiwater_vm
< mm
->hiwater_vm
)
36 hiwater_vm
= mm
->hiwater_vm
;
37 hiwater_rss
= total_rss
= get_mm_rss(mm
);
38 if (hiwater_rss
< mm
->hiwater_rss
)
39 hiwater_rss
= mm
->hiwater_rss
;
41 data
= mm
->total_vm
- mm
->shared_vm
- mm
->stack_vm
;
42 text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
)) >> 10;
43 lib
= (mm
->exec_vm
<< (PAGE_SHIFT
-10)) - text
;
44 swap
= get_mm_counter(mm
, MM_SWAPENTS
);
58 hiwater_vm
<< (PAGE_SHIFT
-10),
59 total_vm
<< (PAGE_SHIFT
-10),
60 mm
->locked_vm
<< (PAGE_SHIFT
-10),
61 mm
->pinned_vm
<< (PAGE_SHIFT
-10),
62 hiwater_rss
<< (PAGE_SHIFT
-10),
63 total_rss
<< (PAGE_SHIFT
-10),
64 data
<< (PAGE_SHIFT
-10),
65 mm
->stack_vm
<< (PAGE_SHIFT
-10), text
, lib
,
66 (PTRS_PER_PTE
* sizeof(pte_t
) *
67 atomic_long_read(&mm
->nr_ptes
)) >> 10,
68 swap
<< (PAGE_SHIFT
-10));
71 unsigned long task_vsize(struct mm_struct
*mm
)
73 return PAGE_SIZE
* mm
->total_vm
;
76 unsigned long task_statm(struct mm_struct
*mm
,
77 unsigned long *shared
, unsigned long *text
,
78 unsigned long *data
, unsigned long *resident
)
80 *shared
= get_mm_counter(mm
, MM_FILEPAGES
);
81 *text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
))
83 *data
= mm
->total_vm
- mm
->shared_vm
;
84 *resident
= *shared
+ get_mm_counter(mm
, MM_ANONPAGES
);
90 * These functions are for numa_maps but called in generic **maps seq_file
91 * ->start(), ->stop() ops.
93 * numa_maps scans all vmas under mmap_sem and checks their mempolicy.
94 * Each mempolicy object is controlled by reference counting. The problem here
95 * is how to avoid accessing dead mempolicy object.
97 * Because we're holding mmap_sem while reading seq_file, it's safe to access
98 * each vma's mempolicy, no vma objects will never drop refs to mempolicy.
100 * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy
101 * is set and replaced under mmap_sem but unrefed and cleared under task_lock().
102 * So, without task_lock(), we cannot trust get_vma_policy() because we cannot
103 * gurantee the task never exits under us. But taking task_lock() around
104 * get_vma_plicy() causes lock order problem.
106 * To access task->mempolicy without lock, we hold a reference count of an
107 * object pointed by task->mempolicy and remember it. This will guarantee
108 * that task->mempolicy points to an alive object or NULL in numa_maps accesses.
110 static void hold_task_mempolicy(struct proc_maps_private
*priv
)
112 struct task_struct
*task
= priv
->task
;
115 priv
->task_mempolicy
= task
->mempolicy
;
116 mpol_get(priv
->task_mempolicy
);
119 static void release_task_mempolicy(struct proc_maps_private
*priv
)
121 mpol_put(priv
->task_mempolicy
);
124 static void hold_task_mempolicy(struct proc_maps_private
*priv
)
127 static void release_task_mempolicy(struct proc_maps_private
*priv
)
132 static void vma_stop(struct proc_maps_private
*priv
, struct vm_area_struct
*vma
)
134 if (vma
&& vma
!= priv
->tail_vma
) {
135 struct mm_struct
*mm
= vma
->vm_mm
;
136 release_task_mempolicy(priv
);
137 up_read(&mm
->mmap_sem
);
142 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
144 struct proc_maps_private
*priv
= m
->private;
145 unsigned long last_addr
= m
->version
;
146 struct mm_struct
*mm
;
147 struct vm_area_struct
*vma
, *tail_vma
= NULL
;
150 /* Clear the per syscall fields in priv */
152 priv
->tail_vma
= NULL
;
155 * We remember last_addr rather than next_addr to hit with
156 * vmacache most of the time. We have zero last_addr at
157 * the beginning and also after lseek. We will have -1 last_addr
158 * after the end of the vmas.
161 if (last_addr
== -1UL)
164 priv
->task
= get_pid_task(priv
->pid
, PIDTYPE_PID
);
166 return ERR_PTR(-ESRCH
);
168 mm
= mm_access(priv
->task
, PTRACE_MODE_READ
);
169 if (!mm
|| IS_ERR(mm
))
171 down_read(&mm
->mmap_sem
);
173 tail_vma
= get_gate_vma(priv
->task
->mm
);
174 priv
->tail_vma
= tail_vma
;
175 hold_task_mempolicy(priv
);
176 /* Start with last addr hint */
177 vma
= find_vma(mm
, last_addr
);
178 if (last_addr
&& vma
) {
184 * Check the vma index is within the range and do
185 * sequential scan until m_index.
188 if ((unsigned long)l
< mm
->map_count
) {
195 if (l
!= mm
->map_count
)
196 tail_vma
= NULL
; /* After gate vma */
202 release_task_mempolicy(priv
);
203 /* End of vmas has been reached */
204 m
->version
= (tail_vma
!= NULL
)? 0: -1UL;
205 up_read(&mm
->mmap_sem
);
210 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
212 struct proc_maps_private
*priv
= m
->private;
213 struct vm_area_struct
*vma
= v
;
214 struct vm_area_struct
*tail_vma
= priv
->tail_vma
;
217 if (vma
&& (vma
!= tail_vma
) && vma
->vm_next
)
220 return (vma
!= tail_vma
)? tail_vma
: NULL
;
223 static void m_stop(struct seq_file
*m
, void *v
)
225 struct proc_maps_private
*priv
= m
->private;
226 struct vm_area_struct
*vma
= v
;
231 put_task_struct(priv
->task
);
234 static int do_maps_open(struct inode
*inode
, struct file
*file
,
235 const struct seq_operations
*ops
)
237 struct proc_maps_private
*priv
;
239 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
241 priv
->pid
= proc_pid(inode
);
242 ret
= seq_open(file
, ops
);
244 struct seq_file
*m
= file
->private_data
;
254 show_map_vma(struct seq_file
*m
, struct vm_area_struct
*vma
, int is_pid
)
256 struct mm_struct
*mm
= vma
->vm_mm
;
257 struct file
*file
= vma
->vm_file
;
258 struct proc_maps_private
*priv
= m
->private;
259 struct task_struct
*task
= priv
->task
;
260 vm_flags_t flags
= vma
->vm_flags
;
261 unsigned long ino
= 0;
262 unsigned long long pgoff
= 0;
263 unsigned long start
, end
;
265 const char *name
= NULL
;
268 struct inode
*inode
= file_inode(vma
->vm_file
);
269 dev
= inode
->i_sb
->s_dev
;
271 pgoff
= ((loff_t
)vma
->vm_pgoff
) << PAGE_SHIFT
;
274 /* We don't show the stack guard page in /proc/maps */
275 start
= vma
->vm_start
;
276 if (stack_guard_page_start(vma
, start
))
279 if (stack_guard_page_end(vma
, end
))
282 seq_setwidth(m
, 25 + sizeof(void *) * 6 - 1);
283 seq_printf(m
, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
286 flags
& VM_READ
? 'r' : '-',
287 flags
& VM_WRITE
? 'w' : '-',
288 flags
& VM_EXEC
? 'x' : '-',
289 flags
& VM_MAYSHARE
? 's' : 'p',
291 MAJOR(dev
), MINOR(dev
), ino
);
294 * Print the dentry name for named mappings, and a
295 * special [heap] marker for the heap:
299 seq_path(m
, &file
->f_path
, "\n");
303 if (vma
->vm_ops
&& vma
->vm_ops
->name
) {
304 name
= vma
->vm_ops
->name(vma
);
309 name
= arch_vma_name(vma
);
318 if (vma
->vm_start
<= mm
->brk
&&
319 vma
->vm_end
>= mm
->start_brk
) {
324 tid
= vm_is_stack(task
, vma
, is_pid
);
328 * Thread stack in /proc/PID/task/TID/maps or
329 * the main process stack.
331 if (!is_pid
|| (vma
->vm_start
<= mm
->start_stack
&&
332 vma
->vm_end
>= mm
->start_stack
)) {
335 /* Thread stack in /proc/PID/maps */
337 seq_printf(m
, "[stack:%d]", tid
);
350 static int show_map(struct seq_file
*m
, void *v
, int is_pid
)
352 struct vm_area_struct
*vma
= v
;
353 struct proc_maps_private
*priv
= m
->private;
354 struct task_struct
*task
= priv
->task
;
356 show_map_vma(m
, vma
, is_pid
);
358 if (m
->count
< m
->size
) /* vma is copied successfully */
359 m
->version
= (vma
!= get_gate_vma(task
->mm
))
364 static int show_pid_map(struct seq_file
*m
, void *v
)
366 return show_map(m
, v
, 1);
369 static int show_tid_map(struct seq_file
*m
, void *v
)
371 return show_map(m
, v
, 0);
374 static const struct seq_operations proc_pid_maps_op
= {
381 static const struct seq_operations proc_tid_maps_op
= {
388 static int pid_maps_open(struct inode
*inode
, struct file
*file
)
390 return do_maps_open(inode
, file
, &proc_pid_maps_op
);
393 static int tid_maps_open(struct inode
*inode
, struct file
*file
)
395 return do_maps_open(inode
, file
, &proc_tid_maps_op
);
398 const struct file_operations proc_pid_maps_operations
= {
399 .open
= pid_maps_open
,
402 .release
= seq_release_private
,
405 const struct file_operations proc_tid_maps_operations
= {
406 .open
= tid_maps_open
,
409 .release
= seq_release_private
,
413 * Proportional Set Size(PSS): my share of RSS.
415 * PSS of a process is the count of pages it has in memory, where each
416 * page is divided by the number of processes sharing it. So if a
417 * process has 1000 pages all to itself, and 1000 shared with one other
418 * process, its PSS will be 1500.
420 * To keep (accumulated) division errors low, we adopt a 64bit
421 * fixed-point pss counter to minimize division errors. So (pss >>
422 * PSS_SHIFT) would be the real byte count.
424 * A shift of 12 before division means (assuming 4K page size):
425 * - 1M 3-user-pages add up to 8KB errors;
426 * - supports mapcount up to 2^24, or 16M;
427 * - supports PSS up to 2^52 bytes, or 4PB.
431 #ifdef CONFIG_PROC_PAGE_MONITOR
432 struct mem_size_stats
{
433 struct vm_area_struct
*vma
;
434 unsigned long resident
;
435 unsigned long shared_clean
;
436 unsigned long shared_dirty
;
437 unsigned long private_clean
;
438 unsigned long private_dirty
;
439 unsigned long referenced
;
440 unsigned long anonymous
;
441 unsigned long anonymous_thp
;
443 unsigned long nonlinear
;
448 static void smaps_pte_entry(pte_t ptent
, unsigned long addr
,
449 unsigned long ptent_size
, struct mm_walk
*walk
)
451 struct mem_size_stats
*mss
= walk
->private;
452 struct vm_area_struct
*vma
= mss
->vma
;
453 pgoff_t pgoff
= linear_page_index(vma
, addr
);
454 struct page
*page
= NULL
;
457 if (pte_present(ptent
)) {
458 page
= vm_normal_page(vma
, addr
, ptent
);
459 } else if (is_swap_pte(ptent
)) {
460 swp_entry_t swpent
= pte_to_swp_entry(ptent
);
462 if (!non_swap_entry(swpent
))
463 mss
->swap
+= ptent_size
;
464 else if (is_migration_entry(swpent
))
465 page
= migration_entry_to_page(swpent
);
466 } else if (pte_file(ptent
)) {
467 if (pte_to_pgoff(ptent
) != pgoff
)
468 mss
->nonlinear
+= ptent_size
;
475 mss
->anonymous
+= ptent_size
;
477 if (page
->index
!= pgoff
)
478 mss
->nonlinear
+= ptent_size
;
480 mss
->resident
+= ptent_size
;
481 /* Accumulate the size in pages that have been accessed. */
482 if (pte_young(ptent
) || PageReferenced(page
))
483 mss
->referenced
+= ptent_size
;
484 mapcount
= page_mapcount(page
);
486 if (pte_dirty(ptent
) || PageDirty(page
))
487 mss
->shared_dirty
+= ptent_size
;
489 mss
->shared_clean
+= ptent_size
;
490 mss
->pss
+= (ptent_size
<< PSS_SHIFT
) / mapcount
;
492 if (pte_dirty(ptent
) || PageDirty(page
))
493 mss
->private_dirty
+= ptent_size
;
495 mss
->private_clean
+= ptent_size
;
496 mss
->pss
+= (ptent_size
<< PSS_SHIFT
);
500 static int smaps_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
501 struct mm_walk
*walk
)
503 struct mem_size_stats
*mss
= walk
->private;
504 struct vm_area_struct
*vma
= mss
->vma
;
508 if (pmd_trans_huge_lock(pmd
, vma
, &ptl
) == 1) {
509 smaps_pte_entry(*(pte_t
*)pmd
, addr
, HPAGE_PMD_SIZE
, walk
);
511 mss
->anonymous_thp
+= HPAGE_PMD_SIZE
;
515 if (pmd_trans_unstable(pmd
))
518 * The mmap_sem held all the way back in m_start() is what
519 * keeps khugepaged out of here and from collapsing things
522 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
523 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
)
524 smaps_pte_entry(*pte
, addr
, PAGE_SIZE
, walk
);
525 pte_unmap_unlock(pte
- 1, ptl
);
530 static void show_smap_vma_flags(struct seq_file
*m
, struct vm_area_struct
*vma
)
533 * Don't forget to update Documentation/ on changes.
535 static const char mnemonics
[BITS_PER_LONG
][2] = {
537 * In case if we meet a flag we don't know about.
539 [0 ... (BITS_PER_LONG
-1)] = "??",
541 [ilog2(VM_READ
)] = "rd",
542 [ilog2(VM_WRITE
)] = "wr",
543 [ilog2(VM_EXEC
)] = "ex",
544 [ilog2(VM_SHARED
)] = "sh",
545 [ilog2(VM_MAYREAD
)] = "mr",
546 [ilog2(VM_MAYWRITE
)] = "mw",
547 [ilog2(VM_MAYEXEC
)] = "me",
548 [ilog2(VM_MAYSHARE
)] = "ms",
549 [ilog2(VM_GROWSDOWN
)] = "gd",
550 [ilog2(VM_PFNMAP
)] = "pf",
551 [ilog2(VM_DENYWRITE
)] = "dw",
552 [ilog2(VM_LOCKED
)] = "lo",
553 [ilog2(VM_IO
)] = "io",
554 [ilog2(VM_SEQ_READ
)] = "sr",
555 [ilog2(VM_RAND_READ
)] = "rr",
556 [ilog2(VM_DONTCOPY
)] = "dc",
557 [ilog2(VM_DONTEXPAND
)] = "de",
558 [ilog2(VM_ACCOUNT
)] = "ac",
559 [ilog2(VM_NORESERVE
)] = "nr",
560 [ilog2(VM_HUGETLB
)] = "ht",
561 [ilog2(VM_NONLINEAR
)] = "nl",
562 [ilog2(VM_ARCH_1
)] = "ar",
563 [ilog2(VM_DONTDUMP
)] = "dd",
564 #ifdef CONFIG_MEM_SOFT_DIRTY
565 [ilog2(VM_SOFTDIRTY
)] = "sd",
567 [ilog2(VM_MIXEDMAP
)] = "mm",
568 [ilog2(VM_HUGEPAGE
)] = "hg",
569 [ilog2(VM_NOHUGEPAGE
)] = "nh",
570 [ilog2(VM_MERGEABLE
)] = "mg",
574 seq_puts(m
, "VmFlags: ");
575 for (i
= 0; i
< BITS_PER_LONG
; i
++) {
576 if (vma
->vm_flags
& (1UL << i
)) {
577 seq_printf(m
, "%c%c ",
578 mnemonics
[i
][0], mnemonics
[i
][1]);
584 static int show_smap(struct seq_file
*m
, void *v
, int is_pid
)
586 struct proc_maps_private
*priv
= m
->private;
587 struct task_struct
*task
= priv
->task
;
588 struct vm_area_struct
*vma
= v
;
589 struct mem_size_stats mss
;
590 struct mm_walk smaps_walk
= {
591 .pmd_entry
= smaps_pte_range
,
596 memset(&mss
, 0, sizeof mss
);
598 /* mmap_sem is held in m_start */
599 if (vma
->vm_mm
&& !is_vm_hugetlb_page(vma
))
600 walk_page_range(vma
->vm_start
, vma
->vm_end
, &smaps_walk
);
602 show_map_vma(m
, vma
, is_pid
);
608 "Shared_Clean: %8lu kB\n"
609 "Shared_Dirty: %8lu kB\n"
610 "Private_Clean: %8lu kB\n"
611 "Private_Dirty: %8lu kB\n"
612 "Referenced: %8lu kB\n"
613 "Anonymous: %8lu kB\n"
614 "AnonHugePages: %8lu kB\n"
616 "KernelPageSize: %8lu kB\n"
617 "MMUPageSize: %8lu kB\n"
619 (vma
->vm_end
- vma
->vm_start
) >> 10,
621 (unsigned long)(mss
.pss
>> (10 + PSS_SHIFT
)),
622 mss
.shared_clean
>> 10,
623 mss
.shared_dirty
>> 10,
624 mss
.private_clean
>> 10,
625 mss
.private_dirty
>> 10,
626 mss
.referenced
>> 10,
628 mss
.anonymous_thp
>> 10,
630 vma_kernel_pagesize(vma
) >> 10,
631 vma_mmu_pagesize(vma
) >> 10,
632 (vma
->vm_flags
& VM_LOCKED
) ?
633 (unsigned long)(mss
.pss
>> (10 + PSS_SHIFT
)) : 0);
635 if (vma
->vm_flags
& VM_NONLINEAR
)
636 seq_printf(m
, "Nonlinear: %8lu kB\n",
637 mss
.nonlinear
>> 10);
639 show_smap_vma_flags(m
, vma
);
641 if (m
->count
< m
->size
) /* vma is copied successfully */
642 m
->version
= (vma
!= get_gate_vma(task
->mm
))
647 static int show_pid_smap(struct seq_file
*m
, void *v
)
649 return show_smap(m
, v
, 1);
652 static int show_tid_smap(struct seq_file
*m
, void *v
)
654 return show_smap(m
, v
, 0);
657 static const struct seq_operations proc_pid_smaps_op
= {
661 .show
= show_pid_smap
664 static const struct seq_operations proc_tid_smaps_op
= {
668 .show
= show_tid_smap
671 static int pid_smaps_open(struct inode
*inode
, struct file
*file
)
673 return do_maps_open(inode
, file
, &proc_pid_smaps_op
);
676 static int tid_smaps_open(struct inode
*inode
, struct file
*file
)
678 return do_maps_open(inode
, file
, &proc_tid_smaps_op
);
681 const struct file_operations proc_pid_smaps_operations
= {
682 .open
= pid_smaps_open
,
685 .release
= seq_release_private
,
688 const struct file_operations proc_tid_smaps_operations
= {
689 .open
= tid_smaps_open
,
692 .release
= seq_release_private
,
696 * We do not want to have constant page-shift bits sitting in
697 * pagemap entries and are about to reuse them some time soon.
699 * Here's the "migration strategy":
700 * 1. when the system boots these bits remain what they are,
701 * but a warning about future change is printed in log;
702 * 2. once anyone clears soft-dirty bits via clear_refs file,
703 * these flag is set to denote, that user is aware of the
704 * new API and those page-shift bits change their meaning.
705 * The respective warning is printed in dmesg;
706 * 3. In a couple of releases we will remove all the mentions
707 * of page-shift in pagemap entries.
710 static bool soft_dirty_cleared __read_mostly
;
712 enum clear_refs_types
{
716 CLEAR_REFS_SOFT_DIRTY
,
720 struct clear_refs_private
{
721 struct vm_area_struct
*vma
;
722 enum clear_refs_types type
;
725 static inline void clear_soft_dirty(struct vm_area_struct
*vma
,
726 unsigned long addr
, pte_t
*pte
)
728 #ifdef CONFIG_MEM_SOFT_DIRTY
730 * The soft-dirty tracker uses #PF-s to catch writes
731 * to pages, so write-protect the pte as well. See the
732 * Documentation/vm/soft-dirty.txt for full description
733 * of how soft-dirty works.
737 if (pte_present(ptent
)) {
738 ptent
= pte_wrprotect(ptent
);
739 ptent
= pte_clear_flags(ptent
, _PAGE_SOFT_DIRTY
);
740 } else if (is_swap_pte(ptent
)) {
741 ptent
= pte_swp_clear_soft_dirty(ptent
);
742 } else if (pte_file(ptent
)) {
743 ptent
= pte_file_clear_soft_dirty(ptent
);
746 set_pte_at(vma
->vm_mm
, addr
, pte
, ptent
);
750 static int clear_refs_pte_range(pmd_t
*pmd
, unsigned long addr
,
751 unsigned long end
, struct mm_walk
*walk
)
753 struct clear_refs_private
*cp
= walk
->private;
754 struct vm_area_struct
*vma
= cp
->vma
;
759 split_huge_page_pmd(vma
, addr
, pmd
);
760 if (pmd_trans_unstable(pmd
))
763 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
764 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
) {
767 if (cp
->type
== CLEAR_REFS_SOFT_DIRTY
) {
768 clear_soft_dirty(vma
, addr
, pte
);
772 if (!pte_present(ptent
))
775 page
= vm_normal_page(vma
, addr
, ptent
);
779 /* Clear accessed and referenced bits. */
780 ptep_test_and_clear_young(vma
, addr
, pte
);
781 ClearPageReferenced(page
);
783 pte_unmap_unlock(pte
- 1, ptl
);
788 static ssize_t
clear_refs_write(struct file
*file
, const char __user
*buf
,
789 size_t count
, loff_t
*ppos
)
791 struct task_struct
*task
;
792 char buffer
[PROC_NUMBUF
];
793 struct mm_struct
*mm
;
794 struct vm_area_struct
*vma
;
795 enum clear_refs_types type
;
799 memset(buffer
, 0, sizeof(buffer
));
800 if (count
> sizeof(buffer
) - 1)
801 count
= sizeof(buffer
) - 1;
802 if (copy_from_user(buffer
, buf
, count
))
804 rv
= kstrtoint(strstrip(buffer
), 10, &itype
);
807 type
= (enum clear_refs_types
)itype
;
808 if (type
< CLEAR_REFS_ALL
|| type
>= CLEAR_REFS_LAST
)
811 if (type
== CLEAR_REFS_SOFT_DIRTY
) {
812 soft_dirty_cleared
= true;
813 pr_warn_once("The pagemap bits 55-60 has changed their meaning!"
814 " See the linux/Documentation/vm/pagemap.txt for "
818 task
= get_proc_task(file_inode(file
));
821 mm
= get_task_mm(task
);
823 struct clear_refs_private cp
= {
826 struct mm_walk clear_refs_walk
= {
827 .pmd_entry
= clear_refs_pte_range
,
831 down_read(&mm
->mmap_sem
);
832 if (type
== CLEAR_REFS_SOFT_DIRTY
)
833 mmu_notifier_invalidate_range_start(mm
, 0, -1);
834 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
836 if (is_vm_hugetlb_page(vma
))
839 * Writing 1 to /proc/pid/clear_refs affects all pages.
841 * Writing 2 to /proc/pid/clear_refs only affects
844 * Writing 3 to /proc/pid/clear_refs only affects file
847 * Writing 4 to /proc/pid/clear_refs affects all pages.
849 if (type
== CLEAR_REFS_ANON
&& vma
->vm_file
)
851 if (type
== CLEAR_REFS_MAPPED
&& !vma
->vm_file
)
853 if (type
== CLEAR_REFS_SOFT_DIRTY
) {
854 if (vma
->vm_flags
& VM_SOFTDIRTY
)
855 vma
->vm_flags
&= ~VM_SOFTDIRTY
;
857 walk_page_range(vma
->vm_start
, vma
->vm_end
,
860 if (type
== CLEAR_REFS_SOFT_DIRTY
)
861 mmu_notifier_invalidate_range_end(mm
, 0, -1);
863 up_read(&mm
->mmap_sem
);
866 put_task_struct(task
);
871 const struct file_operations proc_clear_refs_operations
= {
872 .write
= clear_refs_write
,
873 .llseek
= noop_llseek
,
881 int pos
, len
; /* units: PM_ENTRY_BYTES, not bytes */
882 pagemap_entry_t
*buffer
;
886 #define PAGEMAP_WALK_SIZE (PMD_SIZE)
887 #define PAGEMAP_WALK_MASK (PMD_MASK)
889 #define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
890 #define PM_STATUS_BITS 3
891 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
892 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
893 #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
894 #define PM_PSHIFT_BITS 6
895 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
896 #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
897 #define __PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
898 #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
899 #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
900 /* in "new" pagemap pshift bits are occupied with more status bits */
901 #define PM_STATUS2(v2, x) (__PM_PSHIFT(v2 ? x : PAGE_SHIFT))
903 #define __PM_SOFT_DIRTY (1LL)
904 #define PM_PRESENT PM_STATUS(4LL)
905 #define PM_SWAP PM_STATUS(2LL)
906 #define PM_FILE PM_STATUS(1LL)
907 #define PM_NOT_PRESENT(v2) PM_STATUS2(v2, 0)
908 #define PM_END_OF_BUFFER 1
910 static inline pagemap_entry_t
make_pme(u64 val
)
912 return (pagemap_entry_t
) { .pme
= val
};
915 static int add_to_pagemap(unsigned long addr
, pagemap_entry_t
*pme
,
916 struct pagemapread
*pm
)
918 pm
->buffer
[pm
->pos
++] = *pme
;
919 if (pm
->pos
>= pm
->len
)
920 return PM_END_OF_BUFFER
;
924 static int pagemap_pte_hole(unsigned long start
, unsigned long end
,
925 struct mm_walk
*walk
)
927 struct pagemapread
*pm
= walk
->private;
928 unsigned long addr
= start
;
932 struct vm_area_struct
*vma
= find_vma(walk
->mm
, addr
);
933 pagemap_entry_t pme
= make_pme(PM_NOT_PRESENT(pm
->v2
));
934 unsigned long vm_end
;
939 vm_end
= min(end
, vma
->vm_end
);
940 if (vma
->vm_flags
& VM_SOFTDIRTY
)
941 pme
.pme
|= PM_STATUS2(pm
->v2
, __PM_SOFT_DIRTY
);
944 for (; addr
< vm_end
; addr
+= PAGE_SIZE
) {
945 err
= add_to_pagemap(addr
, &pme
, pm
);
955 static void pte_to_pagemap_entry(pagemap_entry_t
*pme
, struct pagemapread
*pm
,
956 struct vm_area_struct
*vma
, unsigned long addr
, pte_t pte
)
959 struct page
*page
= NULL
;
962 if (pte_present(pte
)) {
963 frame
= pte_pfn(pte
);
965 page
= vm_normal_page(vma
, addr
, pte
);
966 if (pte_soft_dirty(pte
))
967 flags2
|= __PM_SOFT_DIRTY
;
968 } else if (is_swap_pte(pte
)) {
970 if (pte_swp_soft_dirty(pte
))
971 flags2
|= __PM_SOFT_DIRTY
;
972 entry
= pte_to_swp_entry(pte
);
973 frame
= swp_type(entry
) |
974 (swp_offset(entry
) << MAX_SWAPFILES_SHIFT
);
976 if (is_migration_entry(entry
))
977 page
= migration_entry_to_page(entry
);
979 if (vma
->vm_flags
& VM_SOFTDIRTY
)
980 flags2
|= __PM_SOFT_DIRTY
;
981 *pme
= make_pme(PM_NOT_PRESENT(pm
->v2
) | PM_STATUS2(pm
->v2
, flags2
));
985 if (page
&& !PageAnon(page
))
987 if ((vma
->vm_flags
& VM_SOFTDIRTY
))
988 flags2
|= __PM_SOFT_DIRTY
;
990 *pme
= make_pme(PM_PFRAME(frame
) | PM_STATUS2(pm
->v2
, flags2
) | flags
);
993 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
994 static void thp_pmd_to_pagemap_entry(pagemap_entry_t
*pme
, struct pagemapread
*pm
,
995 pmd_t pmd
, int offset
, int pmd_flags2
)
998 * Currently pmd for thp is always present because thp can not be
999 * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
1000 * This if-check is just to prepare for future implementation.
1002 if (pmd_present(pmd
))
1003 *pme
= make_pme(PM_PFRAME(pmd_pfn(pmd
) + offset
)
1004 | PM_STATUS2(pm
->v2
, pmd_flags2
) | PM_PRESENT
);
1006 *pme
= make_pme(PM_NOT_PRESENT(pm
->v2
) | PM_STATUS2(pm
->v2
, pmd_flags2
));
1009 static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t
*pme
, struct pagemapread
*pm
,
1010 pmd_t pmd
, int offset
, int pmd_flags2
)
1015 static int pagemap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
1016 struct mm_walk
*walk
)
1018 struct vm_area_struct
*vma
;
1019 struct pagemapread
*pm
= walk
->private;
1023 pagemap_entry_t pme
= make_pme(PM_NOT_PRESENT(pm
->v2
));
1025 /* find the first VMA at or above 'addr' */
1026 vma
= find_vma(walk
->mm
, addr
);
1027 if (vma
&& pmd_trans_huge_lock(pmd
, vma
, &ptl
) == 1) {
1030 if ((vma
->vm_flags
& VM_SOFTDIRTY
) || pmd_soft_dirty(*pmd
))
1031 pmd_flags2
= __PM_SOFT_DIRTY
;
1035 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
1036 unsigned long offset
;
1038 offset
= (addr
& ~PAGEMAP_WALK_MASK
) >>
1040 thp_pmd_to_pagemap_entry(&pme
, pm
, *pmd
, offset
, pmd_flags2
);
1041 err
= add_to_pagemap(addr
, &pme
, pm
);
1049 if (pmd_trans_unstable(pmd
))
1051 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
1054 /* check to see if we've left 'vma' behind
1055 * and need a new, higher one */
1056 if (vma
&& (addr
>= vma
->vm_end
)) {
1057 vma
= find_vma(walk
->mm
, addr
);
1058 if (vma
&& (vma
->vm_flags
& VM_SOFTDIRTY
))
1059 flags2
= __PM_SOFT_DIRTY
;
1062 pme
= make_pme(PM_NOT_PRESENT(pm
->v2
) | PM_STATUS2(pm
->v2
, flags2
));
1065 /* check that 'vma' actually covers this address,
1066 * and that it isn't a huge page vma */
1067 if (vma
&& (vma
->vm_start
<= addr
) &&
1068 !is_vm_hugetlb_page(vma
)) {
1069 pte
= pte_offset_map(pmd
, addr
);
1070 pte_to_pagemap_entry(&pme
, pm
, vma
, addr
, *pte
);
1071 /* unmap before userspace copy */
1074 err
= add_to_pagemap(addr
, &pme
, pm
);
1084 #ifdef CONFIG_HUGETLB_PAGE
1085 static void huge_pte_to_pagemap_entry(pagemap_entry_t
*pme
, struct pagemapread
*pm
,
1086 pte_t pte
, int offset
, int flags2
)
1088 if (pte_present(pte
))
1089 *pme
= make_pme(PM_PFRAME(pte_pfn(pte
) + offset
) |
1090 PM_STATUS2(pm
->v2
, flags2
) |
1093 *pme
= make_pme(PM_NOT_PRESENT(pm
->v2
) |
1094 PM_STATUS2(pm
->v2
, flags2
));
1097 /* This function walks within one hugetlb entry in the single call */
1098 static int pagemap_hugetlb_range(pte_t
*pte
, unsigned long hmask
,
1099 unsigned long addr
, unsigned long end
,
1100 struct mm_walk
*walk
)
1102 struct pagemapread
*pm
= walk
->private;
1103 struct vm_area_struct
*vma
;
1106 pagemap_entry_t pme
;
1108 vma
= find_vma(walk
->mm
, addr
);
1111 if (vma
&& (vma
->vm_flags
& VM_SOFTDIRTY
))
1112 flags2
= __PM_SOFT_DIRTY
;
1116 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
1117 int offset
= (addr
& ~hmask
) >> PAGE_SHIFT
;
1118 huge_pte_to_pagemap_entry(&pme
, pm
, *pte
, offset
, flags2
);
1119 err
= add_to_pagemap(addr
, &pme
, pm
);
1128 #endif /* HUGETLB_PAGE */
1131 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1133 * For each page in the address space, this file contains one 64-bit entry
1134 * consisting of the following:
1136 * Bits 0-54 page frame number (PFN) if present
1137 * Bits 0-4 swap type if swapped
1138 * Bits 5-54 swap offset if swapped
1139 * Bits 55-60 page shift (page size = 1<<page shift)
1140 * Bit 61 page is file-page or shared-anon
1141 * Bit 62 page swapped
1142 * Bit 63 page present
1144 * If the page is not present but in swap, then the PFN contains an
1145 * encoding of the swap file number and the page's offset into the
1146 * swap. Unmapped pages return a null PFN. This allows determining
1147 * precisely which pages are mapped (or in swap) and comparing mapped
1148 * pages between processes.
1150 * Efficient users of this interface will use /proc/pid/maps to
1151 * determine which areas of memory are actually mapped and llseek to
1152 * skip over unmapped regions.
1154 static ssize_t
pagemap_read(struct file
*file
, char __user
*buf
,
1155 size_t count
, loff_t
*ppos
)
1157 struct task_struct
*task
= get_proc_task(file_inode(file
));
1158 struct mm_struct
*mm
;
1159 struct pagemapread pm
;
1161 struct mm_walk pagemap_walk
= {};
1163 unsigned long svpfn
;
1164 unsigned long start_vaddr
;
1165 unsigned long end_vaddr
;
1172 /* file position must be aligned */
1173 if ((*ppos
% PM_ENTRY_BYTES
) || (count
% PM_ENTRY_BYTES
))
1180 pm
.v2
= soft_dirty_cleared
;
1181 pm
.len
= (PAGEMAP_WALK_SIZE
>> PAGE_SHIFT
);
1182 pm
.buffer
= kmalloc(pm
.len
* PM_ENTRY_BYTES
, GFP_TEMPORARY
);
1187 mm
= mm_access(task
, PTRACE_MODE_READ
);
1189 if (!mm
|| IS_ERR(mm
))
1192 pagemap_walk
.pmd_entry
= pagemap_pte_range
;
1193 pagemap_walk
.pte_hole
= pagemap_pte_hole
;
1194 #ifdef CONFIG_HUGETLB_PAGE
1195 pagemap_walk
.hugetlb_entry
= pagemap_hugetlb_range
;
1197 pagemap_walk
.mm
= mm
;
1198 pagemap_walk
.private = &pm
;
1201 svpfn
= src
/ PM_ENTRY_BYTES
;
1202 start_vaddr
= svpfn
<< PAGE_SHIFT
;
1203 end_vaddr
= TASK_SIZE_OF(task
);
1205 /* watch out for wraparound */
1206 if (svpfn
> TASK_SIZE_OF(task
) >> PAGE_SHIFT
)
1207 start_vaddr
= end_vaddr
;
1210 * The odds are that this will stop walking way
1211 * before end_vaddr, because the length of the
1212 * user buffer is tracked in "pm", and the walk
1213 * will stop when we hit the end of the buffer.
1216 while (count
&& (start_vaddr
< end_vaddr
)) {
1221 end
= (start_vaddr
+ PAGEMAP_WALK_SIZE
) & PAGEMAP_WALK_MASK
;
1223 if (end
< start_vaddr
|| end
> end_vaddr
)
1225 down_read(&mm
->mmap_sem
);
1226 ret
= walk_page_range(start_vaddr
, end
, &pagemap_walk
);
1227 up_read(&mm
->mmap_sem
);
1230 len
= min(count
, PM_ENTRY_BYTES
* pm
.pos
);
1231 if (copy_to_user(buf
, pm
.buffer
, len
)) {
1240 if (!ret
|| ret
== PM_END_OF_BUFFER
)
1248 put_task_struct(task
);
1253 static int pagemap_open(struct inode
*inode
, struct file
*file
)
1255 pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
1256 "to stop being page-shift some time soon. See the "
1257 "linux/Documentation/vm/pagemap.txt for details.\n");
1261 const struct file_operations proc_pagemap_operations
= {
1262 .llseek
= mem_lseek
, /* borrow this */
1263 .read
= pagemap_read
,
1264 .open
= pagemap_open
,
1266 #endif /* CONFIG_PROC_PAGE_MONITOR */
1271 struct vm_area_struct
*vma
;
1272 unsigned long pages
;
1274 unsigned long active
;
1275 unsigned long writeback
;
1276 unsigned long mapcount_max
;
1277 unsigned long dirty
;
1278 unsigned long swapcache
;
1279 unsigned long node
[MAX_NUMNODES
];
1282 struct numa_maps_private
{
1283 struct proc_maps_private proc_maps
;
1284 struct numa_maps md
;
1287 static void gather_stats(struct page
*page
, struct numa_maps
*md
, int pte_dirty
,
1288 unsigned long nr_pages
)
1290 int count
= page_mapcount(page
);
1292 md
->pages
+= nr_pages
;
1293 if (pte_dirty
|| PageDirty(page
))
1294 md
->dirty
+= nr_pages
;
1296 if (PageSwapCache(page
))
1297 md
->swapcache
+= nr_pages
;
1299 if (PageActive(page
) || PageUnevictable(page
))
1300 md
->active
+= nr_pages
;
1302 if (PageWriteback(page
))
1303 md
->writeback
+= nr_pages
;
1306 md
->anon
+= nr_pages
;
1308 if (count
> md
->mapcount_max
)
1309 md
->mapcount_max
= count
;
1311 md
->node
[page_to_nid(page
)] += nr_pages
;
1314 static struct page
*can_gather_numa_stats(pte_t pte
, struct vm_area_struct
*vma
,
1320 if (!pte_present(pte
))
1323 page
= vm_normal_page(vma
, addr
, pte
);
1327 if (PageReserved(page
))
1330 nid
= page_to_nid(page
);
1331 if (!node_isset(nid
, node_states
[N_MEMORY
]))
1337 static int gather_pte_stats(pmd_t
*pmd
, unsigned long addr
,
1338 unsigned long end
, struct mm_walk
*walk
)
1340 struct numa_maps
*md
;
1347 if (pmd_trans_huge_lock(pmd
, md
->vma
, &ptl
) == 1) {
1348 pte_t huge_pte
= *(pte_t
*)pmd
;
1351 page
= can_gather_numa_stats(huge_pte
, md
->vma
, addr
);
1353 gather_stats(page
, md
, pte_dirty(huge_pte
),
1354 HPAGE_PMD_SIZE
/PAGE_SIZE
);
1359 if (pmd_trans_unstable(pmd
))
1361 orig_pte
= pte
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
1363 struct page
*page
= can_gather_numa_stats(*pte
, md
->vma
, addr
);
1366 gather_stats(page
, md
, pte_dirty(*pte
), 1);
1368 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
1369 pte_unmap_unlock(orig_pte
, ptl
);
1372 #ifdef CONFIG_HUGETLB_PAGE
1373 static int gather_hugetbl_stats(pte_t
*pte
, unsigned long hmask
,
1374 unsigned long addr
, unsigned long end
, struct mm_walk
*walk
)
1376 struct numa_maps
*md
;
1379 if (!pte_present(*pte
))
1382 page
= pte_page(*pte
);
1387 gather_stats(page
, md
, pte_dirty(*pte
), 1);
1392 static int gather_hugetbl_stats(pte_t
*pte
, unsigned long hmask
,
1393 unsigned long addr
, unsigned long end
, struct mm_walk
*walk
)
1400 * Display pages allocated per node and memory policy via /proc.
1402 static int show_numa_map(struct seq_file
*m
, void *v
, int is_pid
)
1404 struct numa_maps_private
*numa_priv
= m
->private;
1405 struct proc_maps_private
*proc_priv
= &numa_priv
->proc_maps
;
1406 struct vm_area_struct
*vma
= v
;
1407 struct numa_maps
*md
= &numa_priv
->md
;
1408 struct file
*file
= vma
->vm_file
;
1409 struct task_struct
*task
= proc_priv
->task
;
1410 struct mm_struct
*mm
= vma
->vm_mm
;
1411 struct mm_walk walk
= {};
1412 struct mempolicy
*pol
;
1419 /* Ensure we start with an empty set of numa_maps statistics. */
1420 memset(md
, 0, sizeof(*md
));
1424 walk
.hugetlb_entry
= gather_hugetbl_stats
;
1425 walk
.pmd_entry
= gather_pte_stats
;
1429 pol
= get_vma_policy(task
, vma
, vma
->vm_start
);
1430 mpol_to_str(buffer
, sizeof(buffer
), pol
);
1433 seq_printf(m
, "%08lx %s", vma
->vm_start
, buffer
);
1436 seq_puts(m
, " file=");
1437 seq_path(m
, &file
->f_path
, "\n\t= ");
1438 } else if (vma
->vm_start
<= mm
->brk
&& vma
->vm_end
>= mm
->start_brk
) {
1439 seq_puts(m
, " heap");
1441 pid_t tid
= vm_is_stack(task
, vma
, is_pid
);
1444 * Thread stack in /proc/PID/task/TID/maps or
1445 * the main process stack.
1447 if (!is_pid
|| (vma
->vm_start
<= mm
->start_stack
&&
1448 vma
->vm_end
>= mm
->start_stack
))
1449 seq_puts(m
, " stack");
1451 seq_printf(m
, " stack:%d", tid
);
1455 if (is_vm_hugetlb_page(vma
))
1456 seq_puts(m
, " huge");
1458 walk_page_range(vma
->vm_start
, vma
->vm_end
, &walk
);
1464 seq_printf(m
, " anon=%lu", md
->anon
);
1467 seq_printf(m
, " dirty=%lu", md
->dirty
);
1469 if (md
->pages
!= md
->anon
&& md
->pages
!= md
->dirty
)
1470 seq_printf(m
, " mapped=%lu", md
->pages
);
1472 if (md
->mapcount_max
> 1)
1473 seq_printf(m
, " mapmax=%lu", md
->mapcount_max
);
1476 seq_printf(m
, " swapcache=%lu", md
->swapcache
);
1478 if (md
->active
< md
->pages
&& !is_vm_hugetlb_page(vma
))
1479 seq_printf(m
, " active=%lu", md
->active
);
1482 seq_printf(m
, " writeback=%lu", md
->writeback
);
1484 for_each_node_state(nid
, N_MEMORY
)
1486 seq_printf(m
, " N%d=%lu", nid
, md
->node
[nid
]);
1490 if (m
->count
< m
->size
)
1491 m
->version
= (vma
!= proc_priv
->tail_vma
) ? vma
->vm_start
: 0;
1495 static int show_pid_numa_map(struct seq_file
*m
, void *v
)
1497 return show_numa_map(m
, v
, 1);
1500 static int show_tid_numa_map(struct seq_file
*m
, void *v
)
1502 return show_numa_map(m
, v
, 0);
1505 static const struct seq_operations proc_pid_numa_maps_op
= {
1509 .show
= show_pid_numa_map
,
1512 static const struct seq_operations proc_tid_numa_maps_op
= {
1516 .show
= show_tid_numa_map
,
1519 static int numa_maps_open(struct inode
*inode
, struct file
*file
,
1520 const struct seq_operations
*ops
)
1522 struct numa_maps_private
*priv
;
1524 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
1526 priv
->proc_maps
.pid
= proc_pid(inode
);
1527 ret
= seq_open(file
, ops
);
1529 struct seq_file
*m
= file
->private_data
;
1538 static int pid_numa_maps_open(struct inode
*inode
, struct file
*file
)
1540 return numa_maps_open(inode
, file
, &proc_pid_numa_maps_op
);
1543 static int tid_numa_maps_open(struct inode
*inode
, struct file
*file
)
1545 return numa_maps_open(inode
, file
, &proc_tid_numa_maps_op
);
1548 const struct file_operations proc_pid_numa_maps_operations
= {
1549 .open
= pid_numa_maps_open
,
1551 .llseek
= seq_lseek
,
1552 .release
= seq_release_private
,
1555 const struct file_operations proc_tid_numa_maps_operations
= {
1556 .open
= tid_numa_maps_open
,
1558 .llseek
= seq_lseek
,
1559 .release
= seq_release_private
,
1561 #endif /* CONFIG_NUMA */