mm, shmem: add internal shmem resident memory accounting
[deliverable/linux.git] / fs / proc / task_mmu.c
index 187b3b5f242ef946658751d15580a6c7e6ac85d6..45eb24145978c661ef8214481c8376be6d13186d 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/swapops.h>
 #include <linux/mmu_notifier.h>
 #include <linux/page_idle.h>
+#include <linux/shmem_fs.h>
 
 #include <asm/elf.h>
 #include <asm/uaccess.h>
@@ -82,7 +83,8 @@ unsigned long task_statm(struct mm_struct *mm,
                         unsigned long *shared, unsigned long *text,
                         unsigned long *data, unsigned long *resident)
 {
-       *shared = get_mm_counter(mm, MM_FILEPAGES);
+       *shared = get_mm_counter(mm, MM_FILEPAGES) +
+                       get_mm_counter(mm, MM_SHMEMPAGES);
        *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
                                                                >> PAGE_SHIFT;
        *data = mm->total_vm - mm->shared_vm;
@@ -451,6 +453,7 @@ struct mem_size_stats {
        unsigned long private_hugetlb;
        u64 pss;
        u64 swap_pss;
+       bool check_shmem_swap;
 };
 
 static void smaps_account(struct mem_size_stats *mss, struct page *page,
@@ -485,6 +488,19 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
        }
 }
 
+#ifdef CONFIG_SHMEM
+static int smaps_pte_hole(unsigned long addr, unsigned long end,
+               struct mm_walk *walk)
+{
+       struct mem_size_stats *mss = walk->private;
+
+       mss->swap += shmem_partial_swap_usage(
+                       walk->vma->vm_file->f_mapping, addr, end);
+
+       return 0;
+}
+#endif
+
 static void smaps_pte_entry(pte_t *pte, unsigned long addr,
                struct mm_walk *walk)
 {
@@ -512,6 +528,19 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
                        }
                } else if (is_migration_entry(swpent))
                        page = migration_entry_to_page(swpent);
+       } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
+                                                       && pte_none(*pte))) {
+               page = find_get_entry(vma->vm_file->f_mapping,
+                                               linear_page_index(vma, addr));
+               if (!page)
+                       return;
+
+               if (radix_tree_exceptional_entry(page))
+                       mss->swap += PAGE_SIZE;
+               else
+                       page_cache_release(page);
+
+               return;
        }
 
        if (!page)
@@ -671,6 +700,31 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
        };
 
        memset(&mss, 0, sizeof mss);
+
+#ifdef CONFIG_SHMEM
+       if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
+               /*
+                * For shared or readonly shmem mappings we know that all
+                * swapped out pages belong to the shmem object, and we can
+                * obtain the swap value much more efficiently. For private
+                * writable mappings, we might have COW pages that are
+                * not affected by the parent swapped out pages of the shmem
+                * object, so we have to distinguish them during the page walk.
+                * Unless we know that the shmem object (or the part mapped by
+                * our VMA) has no swapped out pages at all.
+                */
+               unsigned long shmem_swapped = shmem_swap_usage(vma);
+
+               if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
+                                       !(vma->vm_flags & VM_WRITE)) {
+                       mss.swap = shmem_swapped;
+               } else {
+                       mss.check_shmem_swap = true;
+                       smaps_walk.pte_hole = smaps_pte_hole;
+               }
+       }
+#endif
+
        /* mmap_sem is held in m_start */
        walk_page_vma(vma, &smaps_walk);
 
This page took 0.036907 seconds and 5 git commands to generate.