Commit | Line | Data |
---|---|---|
d0b6e04a LZ |
1 | #undef TRACE_SYSTEM |
2 | #define TRACE_SYSTEM kmem | |
3 | ||
ea20d929 | 4 | #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ) |
02af61bb | 5 | #define _TRACE_KMEM_H |
b9ce08c0 EGM |
6 | |
7 | #include <linux/types.h> | |
fc182a43 | 8 | #include <linux/tracepoint.h> |
b9ce08c0 | 9 | |
62ba180e SR |
10 | /* |
11 | * The order of these masks is important. Matching masks will be seen | |
12 | * first and the left over flags will end up showing by themselves. | |
13 | * | |
14 | * For example, if we have GFP_KERNEL before GFP_USER we wil get: | |
15 | * | |
16 | * GFP_KERNEL|GFP_HARDWALL | |
17 | * | |
18 | * Thus most bits set go first. | |
19 | */ | |
20 | #define show_gfp_flags(flags) \ | |
21 | (flags) ? __print_flags(flags, "|", \ | |
22 | {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ | |
23 | {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ | |
24 | {(unsigned long)GFP_USER, "GFP_USER"}, \ | |
25 | {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \ | |
26 | {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \ | |
27 | {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \ | |
28 | {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \ | |
29 | {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \ | |
30 | {(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \ | |
31 | {(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \ | |
32 | {(unsigned long)__GFP_IO, "GFP_IO"}, \ | |
33 | {(unsigned long)__GFP_COLD, "GFP_COLD"}, \ | |
34 | {(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \ | |
35 | {(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \ | |
36 | {(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \ | |
37 | {(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \ | |
38 | {(unsigned long)__GFP_COMP, "GFP_COMP"}, \ | |
39 | {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \ | |
40 | {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \ | |
41 | {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ | |
42 | {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ | |
43 | {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ | |
44 | {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \ | |
45 | ) : "GFP_NOWAIT" | |
46 | ||
ea20d929 SR |
47 | TRACE_EVENT(kmalloc, |
48 | ||
49 | TP_PROTO(unsigned long call_site, | |
50 | const void *ptr, | |
51 | size_t bytes_req, | |
52 | size_t bytes_alloc, | |
53 | gfp_t gfp_flags), | |
54 | ||
55 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), | |
56 | ||
57 | TP_STRUCT__entry( | |
58 | __field( unsigned long, call_site ) | |
59 | __field( const void *, ptr ) | |
60 | __field( size_t, bytes_req ) | |
61 | __field( size_t, bytes_alloc ) | |
62 | __field( gfp_t, gfp_flags ) | |
63 | ), | |
64 | ||
65 | TP_fast_assign( | |
66 | __entry->call_site = call_site; | |
67 | __entry->ptr = ptr; | |
68 | __entry->bytes_req = bytes_req; | |
69 | __entry->bytes_alloc = bytes_alloc; | |
70 | __entry->gfp_flags = gfp_flags; | |
71 | ), | |
72 | ||
62ba180e | 73 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", |
ea20d929 SR |
74 | __entry->call_site, |
75 | __entry->ptr, | |
76 | __entry->bytes_req, | |
77 | __entry->bytes_alloc, | |
62ba180e | 78 | show_gfp_flags(__entry->gfp_flags)) |
ea20d929 SR |
79 | ); |
80 | ||
81 | TRACE_EVENT(kmem_cache_alloc, | |
82 | ||
83 | TP_PROTO(unsigned long call_site, | |
84 | const void *ptr, | |
85 | size_t bytes_req, | |
86 | size_t bytes_alloc, | |
87 | gfp_t gfp_flags), | |
88 | ||
89 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), | |
90 | ||
91 | TP_STRUCT__entry( | |
92 | __field( unsigned long, call_site ) | |
93 | __field( const void *, ptr ) | |
94 | __field( size_t, bytes_req ) | |
95 | __field( size_t, bytes_alloc ) | |
96 | __field( gfp_t, gfp_flags ) | |
97 | ), | |
98 | ||
99 | TP_fast_assign( | |
100 | __entry->call_site = call_site; | |
101 | __entry->ptr = ptr; | |
102 | __entry->bytes_req = bytes_req; | |
103 | __entry->bytes_alloc = bytes_alloc; | |
104 | __entry->gfp_flags = gfp_flags; | |
105 | ), | |
106 | ||
62ba180e | 107 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", |
ea20d929 SR |
108 | __entry->call_site, |
109 | __entry->ptr, | |
110 | __entry->bytes_req, | |
111 | __entry->bytes_alloc, | |
62ba180e | 112 | show_gfp_flags(__entry->gfp_flags)) |
ea20d929 SR |
113 | ); |
114 | ||
115 | TRACE_EVENT(kmalloc_node, | |
116 | ||
117 | TP_PROTO(unsigned long call_site, | |
118 | const void *ptr, | |
119 | size_t bytes_req, | |
120 | size_t bytes_alloc, | |
121 | gfp_t gfp_flags, | |
122 | int node), | |
123 | ||
124 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), | |
125 | ||
126 | TP_STRUCT__entry( | |
127 | __field( unsigned long, call_site ) | |
128 | __field( const void *, ptr ) | |
129 | __field( size_t, bytes_req ) | |
130 | __field( size_t, bytes_alloc ) | |
131 | __field( gfp_t, gfp_flags ) | |
132 | __field( int, node ) | |
133 | ), | |
134 | ||
135 | TP_fast_assign( | |
136 | __entry->call_site = call_site; | |
137 | __entry->ptr = ptr; | |
138 | __entry->bytes_req = bytes_req; | |
139 | __entry->bytes_alloc = bytes_alloc; | |
140 | __entry->gfp_flags = gfp_flags; | |
141 | __entry->node = node; | |
142 | ), | |
143 | ||
62ba180e | 144 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", |
ea20d929 SR |
145 | __entry->call_site, |
146 | __entry->ptr, | |
147 | __entry->bytes_req, | |
148 | __entry->bytes_alloc, | |
62ba180e | 149 | show_gfp_flags(__entry->gfp_flags), |
ea20d929 SR |
150 | __entry->node) |
151 | ); | |
152 | ||
153 | TRACE_EVENT(kmem_cache_alloc_node, | |
154 | ||
155 | TP_PROTO(unsigned long call_site, | |
156 | const void *ptr, | |
157 | size_t bytes_req, | |
158 | size_t bytes_alloc, | |
159 | gfp_t gfp_flags, | |
160 | int node), | |
161 | ||
162 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), | |
163 | ||
164 | TP_STRUCT__entry( | |
165 | __field( unsigned long, call_site ) | |
166 | __field( const void *, ptr ) | |
167 | __field( size_t, bytes_req ) | |
168 | __field( size_t, bytes_alloc ) | |
169 | __field( gfp_t, gfp_flags ) | |
170 | __field( int, node ) | |
171 | ), | |
172 | ||
173 | TP_fast_assign( | |
174 | __entry->call_site = call_site; | |
175 | __entry->ptr = ptr; | |
176 | __entry->bytes_req = bytes_req; | |
177 | __entry->bytes_alloc = bytes_alloc; | |
178 | __entry->gfp_flags = gfp_flags; | |
179 | __entry->node = node; | |
180 | ), | |
181 | ||
62ba180e | 182 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", |
ea20d929 SR |
183 | __entry->call_site, |
184 | __entry->ptr, | |
185 | __entry->bytes_req, | |
186 | __entry->bytes_alloc, | |
62ba180e | 187 | show_gfp_flags(__entry->gfp_flags), |
ea20d929 SR |
188 | __entry->node) |
189 | ); | |
190 | ||
191 | TRACE_EVENT(kfree, | |
192 | ||
193 | TP_PROTO(unsigned long call_site, const void *ptr), | |
194 | ||
195 | TP_ARGS(call_site, ptr), | |
196 | ||
197 | TP_STRUCT__entry( | |
198 | __field( unsigned long, call_site ) | |
199 | __field( const void *, ptr ) | |
200 | ), | |
201 | ||
202 | TP_fast_assign( | |
203 | __entry->call_site = call_site; | |
204 | __entry->ptr = ptr; | |
205 | ), | |
206 | ||
207 | TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) | |
208 | ); | |
209 | ||
210 | TRACE_EVENT(kmem_cache_free, | |
211 | ||
212 | TP_PROTO(unsigned long call_site, const void *ptr), | |
213 | ||
214 | TP_ARGS(call_site, ptr), | |
215 | ||
216 | TP_STRUCT__entry( | |
217 | __field( unsigned long, call_site ) | |
218 | __field( const void *, ptr ) | |
219 | ), | |
220 | ||
221 | TP_fast_assign( | |
222 | __entry->call_site = call_site; | |
223 | __entry->ptr = ptr; | |
224 | ), | |
225 | ||
226 | TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) | |
227 | ); | |
4b4f278c MG |
228 | |
229 | TRACE_EVENT(mm_page_free_direct, | |
230 | ||
231 | TP_PROTO(struct page *page, unsigned int order), | |
232 | ||
233 | TP_ARGS(page, order), | |
234 | ||
235 | TP_STRUCT__entry( | |
236 | __field( struct page *, page ) | |
237 | __field( unsigned int, order ) | |
238 | ), | |
239 | ||
240 | TP_fast_assign( | |
241 | __entry->page = page; | |
242 | __entry->order = order; | |
243 | ), | |
244 | ||
245 | TP_printk("page=%p pfn=%lu order=%d", | |
246 | __entry->page, | |
247 | page_to_pfn(__entry->page), | |
248 | __entry->order) | |
249 | ); | |
250 | ||
251 | TRACE_EVENT(mm_pagevec_free, | |
252 | ||
253 | TP_PROTO(struct page *page, int cold), | |
254 | ||
255 | TP_ARGS(page, cold), | |
256 | ||
257 | TP_STRUCT__entry( | |
258 | __field( struct page *, page ) | |
259 | __field( int, cold ) | |
260 | ), | |
261 | ||
262 | TP_fast_assign( | |
263 | __entry->page = page; | |
264 | __entry->cold = cold; | |
265 | ), | |
266 | ||
267 | TP_printk("page=%p pfn=%lu order=0 cold=%d", | |
268 | __entry->page, | |
269 | page_to_pfn(__entry->page), | |
270 | __entry->cold) | |
271 | ); | |
272 | ||
273 | TRACE_EVENT(mm_page_alloc, | |
274 | ||
275 | TP_PROTO(struct page *page, unsigned int order, | |
276 | gfp_t gfp_flags, int migratetype), | |
277 | ||
278 | TP_ARGS(page, order, gfp_flags, migratetype), | |
279 | ||
280 | TP_STRUCT__entry( | |
281 | __field( struct page *, page ) | |
282 | __field( unsigned int, order ) | |
283 | __field( gfp_t, gfp_flags ) | |
284 | __field( int, migratetype ) | |
285 | ), | |
286 | ||
287 | TP_fast_assign( | |
288 | __entry->page = page; | |
289 | __entry->order = order; | |
290 | __entry->gfp_flags = gfp_flags; | |
291 | __entry->migratetype = migratetype; | |
292 | ), | |
293 | ||
294 | TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s", | |
295 | __entry->page, | |
296 | page_to_pfn(__entry->page), | |
297 | __entry->order, | |
298 | __entry->migratetype, | |
299 | show_gfp_flags(__entry->gfp_flags)) | |
300 | ); | |
301 | ||
e0fff1bd MG |
302 | TRACE_EVENT(mm_page_alloc_extfrag, |
303 | ||
304 | TP_PROTO(struct page *page, | |
305 | int alloc_order, int fallback_order, | |
306 | int alloc_migratetype, int fallback_migratetype), | |
307 | ||
308 | TP_ARGS(page, | |
309 | alloc_order, fallback_order, | |
310 | alloc_migratetype, fallback_migratetype), | |
311 | ||
312 | TP_STRUCT__entry( | |
313 | __field( struct page *, page ) | |
314 | __field( int, alloc_order ) | |
315 | __field( int, fallback_order ) | |
316 | __field( int, alloc_migratetype ) | |
317 | __field( int, fallback_migratetype ) | |
318 | ), | |
319 | ||
320 | TP_fast_assign( | |
321 | __entry->page = page; | |
322 | __entry->alloc_order = alloc_order; | |
323 | __entry->fallback_order = fallback_order; | |
324 | __entry->alloc_migratetype = alloc_migratetype; | |
325 | __entry->fallback_migratetype = fallback_migratetype; | |
326 | ), | |
327 | ||
328 | TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", | |
329 | __entry->page, | |
330 | page_to_pfn(__entry->page), | |
331 | __entry->alloc_order, | |
332 | __entry->fallback_order, | |
333 | pageblock_order, | |
334 | __entry->alloc_migratetype, | |
335 | __entry->fallback_migratetype, | |
336 | __entry->fallback_order < pageblock_order, | |
337 | __entry->alloc_migratetype == __entry->fallback_migratetype) | |
338 | ); | |
339 | ||
a8d154b0 | 340 | #endif /* _TRACE_KMEM_H */ |
ea20d929 | 341 | |
a8d154b0 SR |
342 | /* This part must be outside protection */ |
343 | #include <trace/define_trace.h> |