1 #include <linux/init.h>
2 #include <linux/bootmem.h>
4 #include <linux/sysfs.h>
5 #include <linux/kobject.h>
7 #include <linux/mmzone.h>
8 #include <linux/pagemap.h>
9 #include <linux/rmap.h>
10 #include <linux/mmu_notifier.h>
11 #include <linux/page_ext.h>
12 #include <linux/page_idle.h>
14 #define BITMAP_CHUNK_SIZE sizeof(u64)
15 #define BITMAP_CHUNK_BITS (BITMAP_CHUNK_SIZE * BITS_PER_BYTE)
18 * Idle page tracking only considers user memory pages, for other types of
19 * pages the idle flag is always unset and an attempt to set it is silently
22 * We treat a page as a user memory page if it is on an LRU list, because it is
23 * always safe to pass such a page to rmap_walk(), which is essential for idle
24 * page tracking. With such an indicator of user pages we can skip isolated
25 * pages, but since there are not usually many of them, it will hardly affect
28 * This function tries to get a user memory page by pfn as described above.
30 static struct page
*page_idle_get_page(unsigned long pfn
)
38 page
= pfn_to_page(pfn
);
39 if (!page
|| !PageLRU(page
) ||
40 !get_page_unless_zero(page
))
43 zone
= page_zone(page
);
44 spin_lock_irq(&zone
->lru_lock
);
45 if (unlikely(!PageLRU(page
))) {
49 spin_unlock_irq(&zone
->lru_lock
);
53 static int page_idle_clear_pte_refs_one(struct page
*page
,
54 struct vm_area_struct
*vma
,
55 unsigned long addr
, void *arg
)
57 struct mm_struct
*mm
= vma
->vm_mm
;
63 bool referenced
= false;
65 pgd
= pgd_offset(mm
, addr
);
66 if (!pgd_present(*pgd
))
68 pud
= pud_offset(pgd
, addr
);
69 if (!pud_present(*pud
))
71 pmd
= pmd_offset(pud
, addr
);
73 if (pmd_trans_huge(*pmd
)) {
74 ptl
= pmd_lock(mm
, pmd
);
75 if (!pmd_present(*pmd
))
77 if (unlikely(!pmd_trans_huge(*pmd
))) {
82 if (pmd_page(*pmd
) != page
)
85 referenced
= pmdp_clear_young_notify(vma
, addr
, pmd
);
95 if (!pmd_present(pmde
) || pmd_trans_huge(pmde
))
100 pte
= pte_offset_map(pmd
, addr
);
101 if (!pte_present(*pte
)) {
106 ptl
= pte_lockptr(mm
, pmd
);
109 if (!pte_present(*pte
)) {
110 pte_unmap_unlock(pte
, ptl
);
114 /* THP can be referenced by any subpage */
115 if (pte_pfn(*pte
) - page_to_pfn(page
) >= hpage_nr_pages(page
)) {
116 pte_unmap_unlock(pte
, ptl
);
120 referenced
= ptep_clear_young_notify(vma
, addr
, pte
);
121 pte_unmap_unlock(pte
, ptl
);
124 clear_page_idle(page
);
126 * We cleared the referenced bit in a mapping to this page. To
127 * avoid interference with page reclaim, mark it young so that
128 * page_referenced() will return > 0.
130 set_page_young(page
);
135 static void page_idle_clear_pte_refs(struct page
*page
)
138 * Since rwc.arg is unused, rwc is effectively immutable, so we
139 * can make it static const to save some cycles and stack.
141 static const struct rmap_walk_control rwc
= {
142 .rmap_one
= page_idle_clear_pte_refs_one
,
143 .anon_lock
= page_lock_anon_vma_read
,
147 if (!page_mapped(page
) ||
148 !page_rmapping(page
))
151 need_lock
= !PageAnon(page
) || PageKsm(page
);
152 if (need_lock
&& !trylock_page(page
))
155 rmap_walk(page
, (struct rmap_walk_control
*)&rwc
);
161 static ssize_t
page_idle_bitmap_read(struct file
*file
, struct kobject
*kobj
,
162 struct bin_attribute
*attr
, char *buf
,
163 loff_t pos
, size_t count
)
165 u64
*out
= (u64
*)buf
;
167 unsigned long pfn
, end_pfn
;
170 if (pos
% BITMAP_CHUNK_SIZE
|| count
% BITMAP_CHUNK_SIZE
)
173 pfn
= pos
* BITS_PER_BYTE
;
177 end_pfn
= pfn
+ count
* BITS_PER_BYTE
;
178 if (end_pfn
> max_pfn
)
179 end_pfn
= ALIGN(max_pfn
, BITMAP_CHUNK_BITS
);
181 for (; pfn
< end_pfn
; pfn
++) {
182 bit
= pfn
% BITMAP_CHUNK_BITS
;
185 page
= page_idle_get_page(pfn
);
187 if (page_is_idle(page
)) {
189 * The page might have been referenced via a
190 * pte, in which case it is not idle. Clear
193 page_idle_clear_pte_refs(page
);
194 if (page_is_idle(page
))
199 if (bit
== BITMAP_CHUNK_BITS
- 1)
203 return (char *)out
- buf
;
206 static ssize_t
page_idle_bitmap_write(struct file
*file
, struct kobject
*kobj
,
207 struct bin_attribute
*attr
, char *buf
,
208 loff_t pos
, size_t count
)
210 const u64
*in
= (u64
*)buf
;
212 unsigned long pfn
, end_pfn
;
215 if (pos
% BITMAP_CHUNK_SIZE
|| count
% BITMAP_CHUNK_SIZE
)
218 pfn
= pos
* BITS_PER_BYTE
;
222 end_pfn
= pfn
+ count
* BITS_PER_BYTE
;
223 if (end_pfn
> max_pfn
)
224 end_pfn
= ALIGN(max_pfn
, BITMAP_CHUNK_BITS
);
226 for (; pfn
< end_pfn
; pfn
++) {
227 bit
= pfn
% BITMAP_CHUNK_BITS
;
228 if ((*in
>> bit
) & 1) {
229 page
= page_idle_get_page(pfn
);
231 page_idle_clear_pte_refs(page
);
236 if (bit
== BITMAP_CHUNK_BITS
- 1)
240 return (char *)in
- buf
;
243 static struct bin_attribute page_idle_bitmap_attr
=
244 __BIN_ATTR(bitmap
, S_IRUSR
| S_IWUSR
,
245 page_idle_bitmap_read
, page_idle_bitmap_write
, 0);
247 static struct bin_attribute
*page_idle_bin_attrs
[] = {
248 &page_idle_bitmap_attr
,
252 static struct attribute_group page_idle_attr_group
= {
253 .bin_attrs
= page_idle_bin_attrs
,
258 static bool need_page_idle(void)
262 struct page_ext_operations page_idle_ops
= {
263 .need
= need_page_idle
,
267 static int __init
page_idle_init(void)
271 err
= sysfs_create_group(mm_kobj
, &page_idle_attr_group
);
273 pr_err("page_idle: register sysfs failed\n");
278 subsys_initcall(page_idle_init
);