Commit | Line | Data |
---|---|---|
76369139 FW |
1 | /* |
2 | * Performance events ring-buffer code: | |
3 | * | |
4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar | |
6 | * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
d36b6910 | 7 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
76369139 FW |
8 | * |
9 | * For licensing details see kernel-base/COPYING | |
10 | */ | |
11 | ||
12 | #include <linux/perf_event.h> | |
13 | #include <linux/vmalloc.h> | |
14 | #include <linux/slab.h> | |
26c86da8 | 15 | #include <linux/circ_buf.h> |
76369139 FW |
16 | |
17 | #include "internal.h" | |
18 | ||
76369139 FW |
19 | static void perf_output_wakeup(struct perf_output_handle *handle) |
20 | { | |
21 | atomic_set(&handle->rb->poll, POLL_IN); | |
22 | ||
a8b0ca17 PZ |
23 | handle->event->pending_wakeup = 1; |
24 | irq_work_queue(&handle->event->pending); | |
76369139 FW |
25 | } |
26 | ||
27 | /* | |
28 | * We need to ensure a later event_id doesn't publish a head when a former | |
29 | * event isn't done writing. However since we need to deal with NMIs we | |
30 | * cannot fully serialize things. | |
31 | * | |
32 | * We only publish the head (and generate a wakeup) when the outer-most | |
33 | * event completes. | |
34 | */ | |
35 | static void perf_output_get_handle(struct perf_output_handle *handle) | |
36 | { | |
37 | struct ring_buffer *rb = handle->rb; | |
38 | ||
39 | preempt_disable(); | |
40 | local_inc(&rb->nest); | |
41 | handle->wakeup = local_read(&rb->wakeup); | |
42 | } | |
43 | ||
44 | static void perf_output_put_handle(struct perf_output_handle *handle) | |
45 | { | |
46 | struct ring_buffer *rb = handle->rb; | |
47 | unsigned long head; | |
48 | ||
49 | again: | |
50 | head = local_read(&rb->head); | |
51 | ||
52 | /* | |
53 | * IRQ/NMI can happen here, which means we can miss a head update. | |
54 | */ | |
55 | ||
56 | if (!local_dec_and_test(&rb->nest)) | |
57 | goto out; | |
58 | ||
59 | /* | |
bf378d34 PZ |
60 | * Since the mmap() consumer (userspace) can run on a different CPU: |
61 | * | |
62 | * kernel user | |
63 | * | |
c7f2e3cd PZ |
64 | * if (LOAD ->data_tail) { LOAD ->data_head |
65 | * (A) smp_rmb() (C) | |
66 | * STORE $data LOAD $data | |
67 | * smp_wmb() (B) smp_mb() (D) | |
68 | * STORE ->data_head STORE ->data_tail | |
69 | * } | |
bf378d34 PZ |
70 | * |
71 | * Where A pairs with D, and B pairs with C. | |
72 | * | |
c7f2e3cd PZ |
73 | * In our case (A) is a control dependency that separates the load of |
74 | * the ->data_tail and the stores of $data. In case ->data_tail | |
75 | * indicates there is no room in the buffer to store $data we do not. | |
bf378d34 | 76 | * |
c7f2e3cd | 77 | * D needs to be a full barrier since it separates the data READ |
bf378d34 PZ |
78 | * from the tail WRITE. |
79 | * | |
80 | * For B a WMB is sufficient since it separates two WRITEs, and for C | |
81 | * an RMB is sufficient since it separates two READs. | |
82 | * | |
83 | * See perf_output_begin(). | |
76369139 | 84 | */ |
c7f2e3cd | 85 | smp_wmb(); /* B, matches C */ |
76369139 FW |
86 | rb->user_page->data_head = head; |
87 | ||
88 | /* | |
394570b7 PZ |
89 | * Now check if we missed an update -- rely on previous implied |
90 | * compiler barriers to force a re-read. | |
76369139 FW |
91 | */ |
92 | if (unlikely(head != local_read(&rb->head))) { | |
93 | local_inc(&rb->nest); | |
94 | goto again; | |
95 | } | |
96 | ||
97 | if (handle->wakeup != local_read(&rb->wakeup)) | |
98 | perf_output_wakeup(handle); | |
99 | ||
100 | out: | |
101 | preempt_enable(); | |
102 | } | |
103 | ||
104 | int perf_output_begin(struct perf_output_handle *handle, | |
a7ac67ea | 105 | struct perf_event *event, unsigned int size) |
76369139 FW |
106 | { |
107 | struct ring_buffer *rb; | |
108 | unsigned long tail, offset, head; | |
524feca5 | 109 | int have_lost, page_shift; |
76369139 FW |
110 | struct { |
111 | struct perf_event_header header; | |
112 | u64 id; | |
113 | u64 lost; | |
114 | } lost_event; | |
115 | ||
116 | rcu_read_lock(); | |
117 | /* | |
118 | * For inherited events we send all the output towards the parent. | |
119 | */ | |
120 | if (event->parent) | |
121 | event = event->parent; | |
122 | ||
123 | rb = rcu_dereference(event->rb); | |
c72b42a3 | 124 | if (unlikely(!rb)) |
76369139 FW |
125 | goto out; |
126 | ||
c72b42a3 | 127 | if (unlikely(!rb->nr_pages)) |
76369139 FW |
128 | goto out; |
129 | ||
c72b42a3 PZ |
130 | handle->rb = rb; |
131 | handle->event = event; | |
132 | ||
76369139 | 133 | have_lost = local_read(&rb->lost); |
c72b42a3 | 134 | if (unlikely(have_lost)) { |
d20a973f PZ |
135 | size += sizeof(lost_event); |
136 | if (event->attr.sample_id_all) | |
137 | size += event->id_header_size; | |
76369139 FW |
138 | } |
139 | ||
140 | perf_output_get_handle(handle); | |
141 | ||
142 | do { | |
76369139 | 143 | tail = ACCESS_ONCE(rb->user_page->data_tail); |
76369139 | 144 | offset = head = local_read(&rb->head); |
26c86da8 PZ |
145 | if (!rb->overwrite && |
146 | unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size)) | |
76369139 | 147 | goto fail; |
c7f2e3cd PZ |
148 | |
149 | /* | |
150 | * The above forms a control dependency barrier separating the | |
151 | * @tail load above from the data stores below. Since the @tail | |
152 | * load is required to compute the branch to fail below. | |
153 | * | |
154 | * A, matches D; the full memory barrier userspace SHOULD issue | |
155 | * after reading the data and before storing the new tail | |
156 | * position. | |
157 | * | |
158 | * See perf_output_put_handle(). | |
159 | */ | |
160 | ||
26c86da8 | 161 | head += size; |
76369139 FW |
162 | } while (local_cmpxchg(&rb->head, offset, head) != offset); |
163 | ||
85f59edf | 164 | /* |
c7f2e3cd PZ |
165 | * We rely on the implied barrier() by local_cmpxchg() to ensure |
166 | * none of the data stores below can be lifted up by the compiler. | |
85f59edf | 167 | */ |
85f59edf | 168 | |
c72b42a3 | 169 | if (unlikely(head - local_read(&rb->wakeup) > rb->watermark)) |
76369139 FW |
170 | local_add(rb->watermark, &rb->wakeup); |
171 | ||
524feca5 PZ |
172 | page_shift = PAGE_SHIFT + page_order(rb); |
173 | ||
174 | handle->page = (offset >> page_shift) & (rb->nr_pages - 1); | |
175 | offset &= (1UL << page_shift) - 1; | |
176 | handle->addr = rb->data_pages[handle->page] + offset; | |
177 | handle->size = (1UL << page_shift) - offset; | |
76369139 | 178 | |
c72b42a3 | 179 | if (unlikely(have_lost)) { |
d20a973f PZ |
180 | struct perf_sample_data sample_data; |
181 | ||
182 | lost_event.header.size = sizeof(lost_event); | |
76369139 FW |
183 | lost_event.header.type = PERF_RECORD_LOST; |
184 | lost_event.header.misc = 0; | |
185 | lost_event.id = event->id; | |
186 | lost_event.lost = local_xchg(&rb->lost, 0); | |
187 | ||
d20a973f PZ |
188 | perf_event_header__init_id(&lost_event.header, |
189 | &sample_data, event); | |
76369139 FW |
190 | perf_output_put(handle, lost_event); |
191 | perf_event__output_id_sample(event, handle, &sample_data); | |
192 | } | |
193 | ||
194 | return 0; | |
195 | ||
196 | fail: | |
197 | local_inc(&rb->lost); | |
198 | perf_output_put_handle(handle); | |
199 | out: | |
200 | rcu_read_unlock(); | |
201 | ||
202 | return -ENOSPC; | |
203 | } | |
204 | ||
91d7753a | 205 | unsigned int perf_output_copy(struct perf_output_handle *handle, |
76369139 FW |
206 | const void *buf, unsigned int len) |
207 | { | |
91d7753a | 208 | return __output_copy(handle, buf, len); |
76369139 FW |
209 | } |
210 | ||
5685e0ff JO |
211 | unsigned int perf_output_skip(struct perf_output_handle *handle, |
212 | unsigned int len) | |
213 | { | |
214 | return __output_skip(handle, NULL, len); | |
215 | } | |
216 | ||
76369139 FW |
217 | void perf_output_end(struct perf_output_handle *handle) |
218 | { | |
76369139 FW |
219 | perf_output_put_handle(handle); |
220 | rcu_read_unlock(); | |
221 | } | |
222 | ||
223 | static void | |
224 | ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) | |
225 | { | |
226 | long max_size = perf_data_size(rb); | |
227 | ||
228 | if (watermark) | |
229 | rb->watermark = min(max_size, watermark); | |
230 | ||
231 | if (!rb->watermark) | |
232 | rb->watermark = max_size / 2; | |
233 | ||
234 | if (flags & RING_BUFFER_WRITABLE) | |
dd9c086d SE |
235 | rb->overwrite = 0; |
236 | else | |
237 | rb->overwrite = 1; | |
76369139 FW |
238 | |
239 | atomic_set(&rb->refcount, 1); | |
10c6db11 PZ |
240 | |
241 | INIT_LIST_HEAD(&rb->event_list); | |
242 | spin_lock_init(&rb->event_lock); | |
76369139 FW |
243 | } |
244 | ||
245 | #ifndef CONFIG_PERF_USE_VMALLOC | |
246 | ||
247 | /* | |
248 | * Back perf_mmap() with regular GFP_KERNEL-0 pages. | |
249 | */ | |
250 | ||
251 | struct page * | |
252 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) | |
253 | { | |
254 | if (pgoff > rb->nr_pages) | |
255 | return NULL; | |
256 | ||
257 | if (pgoff == 0) | |
258 | return virt_to_page(rb->user_page); | |
259 | ||
260 | return virt_to_page(rb->data_pages[pgoff - 1]); | |
261 | } | |
262 | ||
263 | static void *perf_mmap_alloc_page(int cpu) | |
264 | { | |
265 | struct page *page; | |
266 | int node; | |
267 | ||
268 | node = (cpu == -1) ? cpu : cpu_to_node(cpu); | |
269 | page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); | |
270 | if (!page) | |
271 | return NULL; | |
272 | ||
273 | return page_address(page); | |
274 | } | |
275 | ||
276 | struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) | |
277 | { | |
278 | struct ring_buffer *rb; | |
279 | unsigned long size; | |
280 | int i; | |
281 | ||
282 | size = sizeof(struct ring_buffer); | |
283 | size += nr_pages * sizeof(void *); | |
284 | ||
285 | rb = kzalloc(size, GFP_KERNEL); | |
286 | if (!rb) | |
287 | goto fail; | |
288 | ||
289 | rb->user_page = perf_mmap_alloc_page(cpu); | |
290 | if (!rb->user_page) | |
291 | goto fail_user_page; | |
292 | ||
293 | for (i = 0; i < nr_pages; i++) { | |
294 | rb->data_pages[i] = perf_mmap_alloc_page(cpu); | |
295 | if (!rb->data_pages[i]) | |
296 | goto fail_data_pages; | |
297 | } | |
298 | ||
299 | rb->nr_pages = nr_pages; | |
300 | ||
301 | ring_buffer_init(rb, watermark, flags); | |
302 | ||
303 | return rb; | |
304 | ||
305 | fail_data_pages: | |
306 | for (i--; i >= 0; i--) | |
307 | free_page((unsigned long)rb->data_pages[i]); | |
308 | ||
309 | free_page((unsigned long)rb->user_page); | |
310 | ||
311 | fail_user_page: | |
312 | kfree(rb); | |
313 | ||
314 | fail: | |
315 | return NULL; | |
316 | } | |
317 | ||
318 | static void perf_mmap_free_page(unsigned long addr) | |
319 | { | |
320 | struct page *page = virt_to_page((void *)addr); | |
321 | ||
322 | page->mapping = NULL; | |
323 | __free_page(page); | |
324 | } | |
325 | ||
326 | void rb_free(struct ring_buffer *rb) | |
327 | { | |
328 | int i; | |
329 | ||
330 | perf_mmap_free_page((unsigned long)rb->user_page); | |
331 | for (i = 0; i < rb->nr_pages; i++) | |
332 | perf_mmap_free_page((unsigned long)rb->data_pages[i]); | |
333 | kfree(rb); | |
334 | } | |
335 | ||
336 | #else | |
5919b309 JO |
337 | static int data_page_nr(struct ring_buffer *rb) |
338 | { | |
339 | return rb->nr_pages << page_order(rb); | |
340 | } | |
76369139 FW |
341 | |
342 | struct page * | |
343 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) | |
344 | { | |
5919b309 JO |
345 | /* The '>' counts in the user page. */ |
346 | if (pgoff > data_page_nr(rb)) | |
76369139 FW |
347 | return NULL; |
348 | ||
349 | return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE); | |
350 | } | |
351 | ||
352 | static void perf_mmap_unmark_page(void *addr) | |
353 | { | |
354 | struct page *page = vmalloc_to_page(addr); | |
355 | ||
356 | page->mapping = NULL; | |
357 | } | |
358 | ||
359 | static void rb_free_work(struct work_struct *work) | |
360 | { | |
361 | struct ring_buffer *rb; | |
362 | void *base; | |
363 | int i, nr; | |
364 | ||
365 | rb = container_of(work, struct ring_buffer, work); | |
5919b309 | 366 | nr = data_page_nr(rb); |
76369139 FW |
367 | |
368 | base = rb->user_page; | |
5919b309 JO |
369 | /* The '<=' counts in the user page. */ |
370 | for (i = 0; i <= nr; i++) | |
76369139 FW |
371 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); |
372 | ||
373 | vfree(base); | |
374 | kfree(rb); | |
375 | } | |
376 | ||
377 | void rb_free(struct ring_buffer *rb) | |
378 | { | |
379 | schedule_work(&rb->work); | |
380 | } | |
381 | ||
382 | struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) | |
383 | { | |
384 | struct ring_buffer *rb; | |
385 | unsigned long size; | |
386 | void *all_buf; | |
387 | ||
388 | size = sizeof(struct ring_buffer); | |
389 | size += sizeof(void *); | |
390 | ||
391 | rb = kzalloc(size, GFP_KERNEL); | |
392 | if (!rb) | |
393 | goto fail; | |
394 | ||
395 | INIT_WORK(&rb->work, rb_free_work); | |
396 | ||
397 | all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); | |
398 | if (!all_buf) | |
399 | goto fail_all_buf; | |
400 | ||
401 | rb->user_page = all_buf; | |
402 | rb->data_pages[0] = all_buf + PAGE_SIZE; | |
403 | rb->page_order = ilog2(nr_pages); | |
5919b309 | 404 | rb->nr_pages = !!nr_pages; |
76369139 FW |
405 | |
406 | ring_buffer_init(rb, watermark, flags); | |
407 | ||
408 | return rb; | |
409 | ||
410 | fail_all_buf: | |
411 | kfree(rb); | |
412 | ||
413 | fail: | |
414 | return NULL; | |
415 | } | |
416 | ||
417 | #endif |