Commit | Line | Data |
---|---|---|
f8af4da3 HD |
1 | #ifndef __LINUX_KSM_H |
2 | #define __LINUX_KSM_H | |
3 | /* | |
4 | * Memory merging support. | |
5 | * | |
6 | * This code enables dynamic sharing of identical pages found in different | |
7 | * memory areas, even if they are not shared by fork(). | |
8 | */ | |
9 | ||
10 | #include <linux/bitops.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/sched.h> | |
9a840895 | 13 | #include <linux/vmstat.h> |
f8af4da3 HD |
14 | |
15 | #ifdef CONFIG_KSM | |
16 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | |
17 | unsigned long end, int advice, unsigned long *vm_flags); | |
18 | int __ksm_enter(struct mm_struct *mm); | |
1c2fb7a4 | 19 | void __ksm_exit(struct mm_struct *mm); |
f8af4da3 HD |
20 | |
21 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | |
22 | { | |
23 | if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) | |
24 | return __ksm_enter(mm); | |
25 | return 0; | |
26 | } | |
27 | ||
1c2fb7a4 | 28 | static inline void ksm_exit(struct mm_struct *mm) |
f8af4da3 HD |
29 | { |
30 | if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) | |
1c2fb7a4 | 31 | __ksm_exit(mm); |
f8af4da3 | 32 | } |
9a840895 HD |
33 | |
34 | /* | |
35 | * A KSM page is one of those write-protected "shared pages" or "merged pages" | |
36 | * which KSM maps into multiple mms, wherever identical anonymous page content | |
37 | * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma. | |
38 | */ | |
39 | static inline int PageKsm(struct page *page) | |
40 | { | |
41 | return ((unsigned long)page->mapping == PAGE_MAPPING_ANON); | |
42 | } | |
43 | ||
44 | /* | |
45 | * But we have to avoid the checking which page_add_anon_rmap() performs. | |
46 | */ | |
47 | static inline void page_add_ksm_rmap(struct page *page) | |
48 | { | |
49 | if (atomic_inc_and_test(&page->_mapcount)) { | |
50 | page->mapping = (void *) PAGE_MAPPING_ANON; | |
51 | __inc_zone_page_state(page, NR_ANON_PAGES); | |
52 | } | |
53 | } | |
f8af4da3 HD |
54 | #else /* !CONFIG_KSM */ |
55 | ||
56 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | |
57 | unsigned long end, int advice, unsigned long *vm_flags) | |
58 | { | |
59 | return 0; | |
60 | } | |
61 | ||
62 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | |
63 | { | |
64 | return 0; | |
65 | } | |
66 | ||
1c2fb7a4 | 67 | static inline void ksm_exit(struct mm_struct *mm) |
f8af4da3 HD |
68 | { |
69 | } | |
9a840895 HD |
70 | |
71 | static inline int PageKsm(struct page *page) | |
72 | { | |
73 | return 0; | |
74 | } | |
75 | ||
76 | /* No stub required for page_add_ksm_rmap(page) */ | |
f8af4da3 HD |
77 | #endif /* !CONFIG_KSM */ |
78 | ||
79 | #endif |