1 #ifndef _LINUX_PAGE_REF_H
2 #define _LINUX_PAGE_REF_H
4 #include <linux/atomic.h>
5 #include <linux/mm_types.h>
6 #include <linux/page-flags.h>
7 #include <linux/tracepoint-defs.h>
9 extern struct tracepoint __tracepoint_page_ref_set
;
10 extern struct tracepoint __tracepoint_page_ref_mod
;
11 extern struct tracepoint __tracepoint_page_ref_mod_and_test
;
12 extern struct tracepoint __tracepoint_page_ref_mod_and_return
;
13 extern struct tracepoint __tracepoint_page_ref_mod_unless
;
14 extern struct tracepoint __tracepoint_page_ref_freeze
;
15 extern struct tracepoint __tracepoint_page_ref_unfreeze
;
17 #ifdef CONFIG_DEBUG_PAGE_REF
20 * Ideally we would want to use the trace_<tracepoint>_enabled() helper
21 * functions. But due to include header file issues, that is not
22 * feasible. Instead we have to open code the static key functions.
24 * See trace_##name##_enabled(void) in include/linux/tracepoint.h
26 #define page_ref_tracepoint_active(t) static_key_false(&(t).key)
28 extern void __page_ref_set(struct page
*page
, int v
);
29 extern void __page_ref_mod(struct page
*page
, int v
);
30 extern void __page_ref_mod_and_test(struct page
*page
, int v
, int ret
);
31 extern void __page_ref_mod_and_return(struct page
*page
, int v
, int ret
);
32 extern void __page_ref_mod_unless(struct page
*page
, int v
, int u
);
33 extern void __page_ref_freeze(struct page
*page
, int v
, int ret
);
34 extern void __page_ref_unfreeze(struct page
*page
, int v
);
38 #define page_ref_tracepoint_active(t) false
40 static inline void __page_ref_set(struct page
*page
, int v
)
43 static inline void __page_ref_mod(struct page
*page
, int v
)
46 static inline void __page_ref_mod_and_test(struct page
*page
, int v
, int ret
)
49 static inline void __page_ref_mod_and_return(struct page
*page
, int v
, int ret
)
52 static inline void __page_ref_mod_unless(struct page
*page
, int v
, int u
)
55 static inline void __page_ref_freeze(struct page
*page
, int v
, int ret
)
58 static inline void __page_ref_unfreeze(struct page
*page
, int v
)
64 static inline int page_ref_count(struct page
*page
)
66 return atomic_read(&page
->_count
);
69 static inline int page_count(struct page
*page
)
71 return atomic_read(&compound_head(page
)->_count
);
74 static inline void set_page_count(struct page
*page
, int v
)
76 atomic_set(&page
->_count
, v
);
77 if (page_ref_tracepoint_active(__tracepoint_page_ref_set
))
78 __page_ref_set(page
, v
);
82 * Setup the page count before being freed into the page allocator for
83 * the first time (boot or memory hotplug)
85 static inline void init_page_count(struct page
*page
)
87 set_page_count(page
, 1);
90 static inline void page_ref_add(struct page
*page
, int nr
)
92 atomic_add(nr
, &page
->_count
);
93 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod
))
94 __page_ref_mod(page
, nr
);
97 static inline void page_ref_sub(struct page
*page
, int nr
)
99 atomic_sub(nr
, &page
->_count
);
100 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod
))
101 __page_ref_mod(page
, -nr
);
104 static inline void page_ref_inc(struct page
*page
)
106 atomic_inc(&page
->_count
);
107 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod
))
108 __page_ref_mod(page
, 1);
111 static inline void page_ref_dec(struct page
*page
)
113 atomic_dec(&page
->_count
);
114 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod
))
115 __page_ref_mod(page
, -1);
118 static inline int page_ref_sub_and_test(struct page
*page
, int nr
)
120 int ret
= atomic_sub_and_test(nr
, &page
->_count
);
122 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test
))
123 __page_ref_mod_and_test(page
, -nr
, ret
);
127 static inline int page_ref_dec_and_test(struct page
*page
)
129 int ret
= atomic_dec_and_test(&page
->_count
);
131 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test
))
132 __page_ref_mod_and_test(page
, -1, ret
);
136 static inline int page_ref_dec_return(struct page
*page
)
138 int ret
= atomic_dec_return(&page
->_count
);
140 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return
))
141 __page_ref_mod_and_return(page
, -1, ret
);
145 static inline int page_ref_add_unless(struct page
*page
, int nr
, int u
)
147 int ret
= atomic_add_unless(&page
->_count
, nr
, u
);
149 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless
))
150 __page_ref_mod_unless(page
, nr
, ret
);
154 static inline int page_ref_freeze(struct page
*page
, int count
)
156 int ret
= likely(atomic_cmpxchg(&page
->_count
, count
, 0) == count
);
158 if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze
))
159 __page_ref_freeze(page
, count
, ret
);
163 static inline void page_ref_unfreeze(struct page
*page
, int count
)
165 VM_BUG_ON_PAGE(page_count(page
) != 0, page
);
166 VM_BUG_ON(count
== 0);
168 atomic_set(&page
->_count
, count
);
169 if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze
))
170 __page_ref_unfreeze(page
, count
);