Linux 3.17-rc7
[deliverable/linux.git] / kernel / trace / ring_buffer.c
CommitLineData
7a8e76a3
SR
1/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
0b07436d 6#include <linux/ftrace_event.h>
7a8e76a3 7#include <linux/ring_buffer.h>
14131f2f 8#include <linux/trace_clock.h>
0b07436d 9#include <linux/trace_seq.h>
7a8e76a3 10#include <linux/spinlock.h>
15693458 11#include <linux/irq_work.h>
7a8e76a3
SR
12#include <linux/debugfs.h>
13#include <linux/uaccess.h>
a81bd80a 14#include <linux/hardirq.h>
6c43e554 15#include <linux/kthread.h> /* for self test */
1744a21d 16#include <linux/kmemcheck.h>
7a8e76a3
SR
17#include <linux/module.h>
18#include <linux/percpu.h>
19#include <linux/mutex.h>
6c43e554 20#include <linux/delay.h>
5a0e3ad6 21#include <linux/slab.h>
7a8e76a3
SR
22#include <linux/init.h>
23#include <linux/hash.h>
24#include <linux/list.h>
554f786e 25#include <linux/cpu.h>
7a8e76a3
SR
26#include <linux/fs.h>
27
79615760 28#include <asm/local.h>
182e9f5f 29
83f40318
VN
30static void update_pages_handler(struct work_struct *work);
31
d1b182a8
SR
32/*
33 * The ring buffer header is special. We must manually up keep it.
34 */
35int ring_buffer_print_entry_header(struct trace_seq *s)
36{
37 int ret;
38
146c3442
J
39 ret = trace_seq_puts(s, "# compressed entry header\n");
40 ret = trace_seq_puts(s, "\ttype_len : 5 bits\n");
41 ret = trace_seq_puts(s, "\ttime_delta : 27 bits\n");
42 ret = trace_seq_puts(s, "\tarray : 32 bits\n");
43 ret = trace_seq_putc(s, '\n');
d1b182a8
SR
44 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
45 RINGBUF_TYPE_PADDING);
46 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
47 RINGBUF_TYPE_TIME_EXTEND);
334d4169
LJ
48 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
49 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
d1b182a8
SR
50
51 return ret;
52}
53
5cc98548
SR
54/*
55 * The ring buffer is made up of a list of pages. A separate list of pages is
56 * allocated for each CPU. A writer may only write to a buffer that is
57 * associated with the CPU it is currently executing on. A reader may read
58 * from any per cpu buffer.
59 *
60 * The reader is special. For each per cpu buffer, the reader has its own
61 * reader page. When a reader has read the entire reader page, this reader
62 * page is swapped with another page in the ring buffer.
63 *
64 * Now, as long as the writer is off the reader page, the reader can do what
65 * ever it wants with that page. The writer will never write to that page
66 * again (as long as it is out of the ring buffer).
67 *
68 * Here's some silly ASCII art.
69 *
70 * +------+
71 * |reader| RING BUFFER
72 * |page |
73 * +------+ +---+ +---+ +---+
74 * | |-->| |-->| |
75 * +---+ +---+ +---+
76 * ^ |
77 * | |
78 * +---------------+
79 *
80 *
81 * +------+
82 * |reader| RING BUFFER
83 * |page |------------------v
84 * +------+ +---+ +---+ +---+
85 * | |-->| |-->| |
86 * +---+ +---+ +---+
87 * ^ |
88 * | |
89 * +---------------+
90 *
91 *
92 * +------+
93 * |reader| RING BUFFER
94 * |page |------------------v
95 * +------+ +---+ +---+ +---+
96 * ^ | |-->| |-->| |
97 * | +---+ +---+ +---+
98 * | |
99 * | |
100 * +------------------------------+
101 *
102 *
103 * +------+
104 * |buffer| RING BUFFER
105 * |page |------------------v
106 * +------+ +---+ +---+ +---+
107 * ^ | | | |-->| |
108 * | New +---+ +---+ +---+
109 * | Reader------^ |
110 * | page |
111 * +------------------------------+
112 *
113 *
114 * After we make this swap, the reader can hand this page off to the splice
115 * code and be done with it. It can even allocate a new page if it needs to
116 * and swap that into the ring buffer.
117 *
118 * We will be using cmpxchg soon to make all this lockless.
119 *
120 */
121
033601a3
SR
122/*
123 * A fast way to enable or disable all ring buffers is to
124 * call tracing_on or tracing_off. Turning off the ring buffers
125 * prevents all ring buffers from being recorded to.
126 * Turning this switch on, makes it OK to write to the
127 * ring buffer, if the ring buffer is enabled itself.
128 *
129 * There's three layers that must be on in order to write
130 * to the ring buffer.
131 *
132 * 1) This global flag must be set.
133 * 2) The ring buffer must be enabled for recording.
134 * 3) The per cpu buffer must be enabled for recording.
135 *
136 * In case of an anomaly, this global flag has a bit set that
137 * will permantly disable all ring buffers.
138 */
139
140/*
141 * Global flag to disable all recording to ring buffers
142 * This has two bits: ON, DISABLED
143 *
144 * ON DISABLED
145 * ---- ----------
146 * 0 0 : ring buffers are off
147 * 1 0 : ring buffers are on
148 * X 1 : ring buffers are permanently disabled
149 */
150
151enum {
152 RB_BUFFERS_ON_BIT = 0,
153 RB_BUFFERS_DISABLED_BIT = 1,
154};
155
156enum {
157 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
158 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
159};
160
5e39841c 161static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
a3583244 162
499e5470
SR
163/* Used for individual buffers (after the counter) */
164#define RB_BUFFER_OFF (1 << 20)
a3583244 165
499e5470 166#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
033601a3
SR
167
168/**
169 * tracing_off_permanent - permanently disable ring buffers
170 *
171 * This function, once called, will disable all ring buffers
c3706f00 172 * permanently.
033601a3
SR
173 */
174void tracing_off_permanent(void)
175{
176 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
a3583244
SR
177}
178
e3d6bf0a 179#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
67d34724 180#define RB_ALIGNMENT 4U
334d4169 181#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
c7b09308 182#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
334d4169 183
649508f6 184#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
2271048d
SR
185# define RB_FORCE_8BYTE_ALIGNMENT 0
186# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
187#else
188# define RB_FORCE_8BYTE_ALIGNMENT 1
189# define RB_ARCH_ALIGNMENT 8U
190#endif
191
649508f6
JH
192#define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
193
334d4169
LJ
194/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
195#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
7a8e76a3
SR
196
197enum {
198 RB_LEN_TIME_EXTEND = 8,
199 RB_LEN_TIME_STAMP = 16,
200};
201
69d1b839
SR
202#define skip_time_extend(event) \
203 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
204
2d622719
TZ
205static inline int rb_null_event(struct ring_buffer_event *event)
206{
a1863c21 207 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
2d622719
TZ
208}
209
210static void rb_event_set_padding(struct ring_buffer_event *event)
211{
a1863c21 212 /* padding has a NULL time_delta */
334d4169 213 event->type_len = RINGBUF_TYPE_PADDING;
2d622719
TZ
214 event->time_delta = 0;
215}
216
34a148bf 217static unsigned
2d622719 218rb_event_data_length(struct ring_buffer_event *event)
7a8e76a3
SR
219{
220 unsigned length;
221
334d4169
LJ
222 if (event->type_len)
223 length = event->type_len * RB_ALIGNMENT;
2d622719
TZ
224 else
225 length = event->array[0];
226 return length + RB_EVNT_HDR_SIZE;
227}
228
69d1b839
SR
229/*
230 * Return the length of the given event. Will return
231 * the length of the time extend if the event is a
232 * time extend.
233 */
234static inline unsigned
2d622719
TZ
235rb_event_length(struct ring_buffer_event *event)
236{
334d4169 237 switch (event->type_len) {
7a8e76a3 238 case RINGBUF_TYPE_PADDING:
2d622719
TZ
239 if (rb_null_event(event))
240 /* undefined */
241 return -1;
334d4169 242 return event->array[0] + RB_EVNT_HDR_SIZE;
7a8e76a3
SR
243
244 case RINGBUF_TYPE_TIME_EXTEND:
245 return RB_LEN_TIME_EXTEND;
246
247 case RINGBUF_TYPE_TIME_STAMP:
248 return RB_LEN_TIME_STAMP;
249
250 case RINGBUF_TYPE_DATA:
2d622719 251 return rb_event_data_length(event);
7a8e76a3
SR
252 default:
253 BUG();
254 }
255 /* not hit */
256 return 0;
257}
258
69d1b839
SR
259/*
260 * Return total length of time extend and data,
261 * or just the event length for all other events.
262 */
263static inline unsigned
264rb_event_ts_length(struct ring_buffer_event *event)
265{
266 unsigned len = 0;
267
268 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
269 /* time extends include the data event after it */
270 len = RB_LEN_TIME_EXTEND;
271 event = skip_time_extend(event);
272 }
273 return len + rb_event_length(event);
274}
275
7a8e76a3
SR
276/**
277 * ring_buffer_event_length - return the length of the event
278 * @event: the event to get the length of
69d1b839
SR
279 *
280 * Returns the size of the data load of a data event.
281 * If the event is something other than a data event, it
282 * returns the size of the event itself. With the exception
283 * of a TIME EXTEND, where it still returns the size of the
284 * data load of the data event after it.
7a8e76a3
SR
285 */
286unsigned ring_buffer_event_length(struct ring_buffer_event *event)
287{
69d1b839
SR
288 unsigned length;
289
290 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
291 event = skip_time_extend(event);
292
293 length = rb_event_length(event);
334d4169 294 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
465634ad
RR
295 return length;
296 length -= RB_EVNT_HDR_SIZE;
297 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
298 length -= sizeof(event->array[0]);
299 return length;
7a8e76a3 300}
c4f50183 301EXPORT_SYMBOL_GPL(ring_buffer_event_length);
7a8e76a3
SR
302
303/* inline for ring buffer fast paths */
34a148bf 304static void *
7a8e76a3
SR
305rb_event_data(struct ring_buffer_event *event)
306{
69d1b839
SR
307 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
308 event = skip_time_extend(event);
334d4169 309 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
7a8e76a3 310 /* If length is in len field, then array[0] has the data */
334d4169 311 if (event->type_len)
7a8e76a3
SR
312 return (void *)&event->array[0];
313 /* Otherwise length is in array[0] and array[1] has the data */
314 return (void *)&event->array[1];
315}
316
317/**
318 * ring_buffer_event_data - return the data of the event
319 * @event: the event to get the data from
320 */
321void *ring_buffer_event_data(struct ring_buffer_event *event)
322{
323 return rb_event_data(event);
324}
c4f50183 325EXPORT_SYMBOL_GPL(ring_buffer_event_data);
7a8e76a3
SR
326
327#define for_each_buffer_cpu(buffer, cpu) \
9e01c1b7 328 for_each_cpu(cpu, buffer->cpumask)
7a8e76a3
SR
329
330#define TS_SHIFT 27
331#define TS_MASK ((1ULL << TS_SHIFT) - 1)
332#define TS_DELTA_TEST (~TS_MASK)
333
66a8cb95
SR
334/* Flag when events were overwritten */
335#define RB_MISSED_EVENTS (1 << 31)
ff0ff84a
SR
336/* Missed count stored at end */
337#define RB_MISSED_STORED (1 << 30)
66a8cb95 338
abc9b56d 339struct buffer_data_page {
e4c2ce82 340 u64 time_stamp; /* page time stamp */
c3706f00 341 local_t commit; /* write committed index */
649508f6 342 unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */
abc9b56d
SR
343};
344
77ae365e
SR
345/*
346 * Note, the buffer_page list must be first. The buffer pages
347 * are allocated in cache lines, which means that each buffer
348 * page will be at the beginning of a cache line, and thus
349 * the least significant bits will be zero. We use this to
350 * add flags in the list struct pointers, to make the ring buffer
351 * lockless.
352 */
abc9b56d 353struct buffer_page {
778c55d4 354 struct list_head list; /* list of buffer pages */
abc9b56d 355 local_t write; /* index for next write */
6f807acd 356 unsigned read; /* index for next read */
778c55d4 357 local_t entries; /* entries on this page */
ff0ff84a 358 unsigned long real_end; /* real end of data */
abc9b56d 359 struct buffer_data_page *page; /* Actual data page */
7a8e76a3
SR
360};
361
77ae365e
SR
362/*
363 * The buffer page counters, write and entries, must be reset
364 * atomically when crossing page boundaries. To synchronize this
365 * update, two counters are inserted into the number. One is
366 * the actual counter for the write position or count on the page.
367 *
368 * The other is a counter of updaters. Before an update happens
369 * the update partition of the counter is incremented. This will
370 * allow the updater to update the counter atomically.
371 *
372 * The counter is 20 bits, and the state data is 12.
373 */
374#define RB_WRITE_MASK 0xfffff
375#define RB_WRITE_INTCNT (1 << 20)
376
044fa782 377static void rb_init_page(struct buffer_data_page *bpage)
abc9b56d 378{
044fa782 379 local_set(&bpage->commit, 0);
abc9b56d
SR
380}
381
474d32b6
SR
382/**
383 * ring_buffer_page_len - the size of data on the page.
384 * @page: The page to read
385 *
386 * Returns the amount of data on the page, including buffer page header.
387 */
ef7a4a16
SR
388size_t ring_buffer_page_len(void *page)
389{
474d32b6
SR
390 return local_read(&((struct buffer_data_page *)page)->commit)
391 + BUF_PAGE_HDR_SIZE;
ef7a4a16
SR
392}
393
ed56829c
SR
394/*
395 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
396 * this issue out.
397 */
34a148bf 398static void free_buffer_page(struct buffer_page *bpage)
ed56829c 399{
34a148bf 400 free_page((unsigned long)bpage->page);
e4c2ce82 401 kfree(bpage);
ed56829c
SR
402}
403
7a8e76a3
SR
404/*
405 * We need to fit the time_stamp delta into 27 bits.
406 */
407static inline int test_time_stamp(u64 delta)
408{
409 if (delta & TS_DELTA_TEST)
410 return 1;
411 return 0;
412}
413
474d32b6 414#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
7a8e76a3 415
be957c44
SR
416/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
417#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
418
d1b182a8
SR
419int ring_buffer_print_page_header(struct trace_seq *s)
420{
421 struct buffer_data_page field;
422 int ret;
423
424 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
26a50744
TZ
425 "offset:0;\tsize:%u;\tsigned:%u;\n",
426 (unsigned int)sizeof(field.time_stamp),
427 (unsigned int)is_signed_type(u64));
d1b182a8
SR
428
429 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
26a50744 430 "offset:%u;\tsize:%u;\tsigned:%u;\n",
d1b182a8 431 (unsigned int)offsetof(typeof(field), commit),
26a50744
TZ
432 (unsigned int)sizeof(field.commit),
433 (unsigned int)is_signed_type(long));
d1b182a8 434
66a8cb95
SR
435 ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
436 "offset:%u;\tsize:%u;\tsigned:%u;\n",
437 (unsigned int)offsetof(typeof(field), commit),
438 1,
439 (unsigned int)is_signed_type(long));
440
d1b182a8 441 ret = trace_seq_printf(s, "\tfield: char data;\t"
26a50744 442 "offset:%u;\tsize:%u;\tsigned:%u;\n",
d1b182a8 443 (unsigned int)offsetof(typeof(field), data),
26a50744
TZ
444 (unsigned int)BUF_PAGE_SIZE,
445 (unsigned int)is_signed_type(char));
d1b182a8
SR
446
447 return ret;
448}
449
15693458
SRRH
450struct rb_irq_work {
451 struct irq_work work;
452 wait_queue_head_t waiters;
453 bool waiters_pending;
454};
455
7a8e76a3
SR
456/*
457 * head_page == tail_page && head == tail then buffer is empty.
458 */
459struct ring_buffer_per_cpu {
460 int cpu;
985023de 461 atomic_t record_disabled;
7a8e76a3 462 struct ring_buffer *buffer;
5389f6fa 463 raw_spinlock_t reader_lock; /* serialize readers */
445c8951 464 arch_spinlock_t lock;
7a8e76a3 465 struct lock_class_key lock_key;
438ced17 466 unsigned int nr_pages;
3adc54fa 467 struct list_head *pages;
6f807acd
SR
468 struct buffer_page *head_page; /* read from head */
469 struct buffer_page *tail_page; /* write to tail */
c3706f00 470 struct buffer_page *commit_page; /* committed pages */
d769041f 471 struct buffer_page *reader_page;
66a8cb95
SR
472 unsigned long lost_events;
473 unsigned long last_overrun;
c64e148a 474 local_t entries_bytes;
e4906eff 475 local_t entries;
884bfe89
SP
476 local_t overrun;
477 local_t commit_overrun;
478 local_t dropped_events;
fa743953
SR
479 local_t committing;
480 local_t commits;
77ae365e 481 unsigned long read;
c64e148a 482 unsigned long read_bytes;
7a8e76a3
SR
483 u64 write_stamp;
484 u64 read_stamp;
438ced17
VN
485 /* ring buffer pages to update, > 0 to add, < 0 to remove */
486 int nr_pages_to_update;
487 struct list_head new_pages; /* new pages to add */
83f40318 488 struct work_struct update_pages_work;
05fdd70d 489 struct completion update_done;
15693458
SRRH
490
491 struct rb_irq_work irq_work;
7a8e76a3
SR
492};
493
494struct ring_buffer {
7a8e76a3
SR
495 unsigned flags;
496 int cpus;
7a8e76a3 497 atomic_t record_disabled;
83f40318 498 atomic_t resize_disabled;
00f62f61 499 cpumask_var_t cpumask;
7a8e76a3 500
1f8a6a10
PZ
501 struct lock_class_key *reader_lock_key;
502
7a8e76a3
SR
503 struct mutex mutex;
504
505 struct ring_buffer_per_cpu **buffers;
554f786e 506
59222efe 507#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
508 struct notifier_block cpu_notify;
509#endif
37886f6a 510 u64 (*clock)(void);
15693458
SRRH
511
512 struct rb_irq_work irq_work;
7a8e76a3
SR
513};
514
515struct ring_buffer_iter {
516 struct ring_buffer_per_cpu *cpu_buffer;
517 unsigned long head;
518 struct buffer_page *head_page;
492a74f4
SR
519 struct buffer_page *cache_reader_page;
520 unsigned long cache_read;
7a8e76a3
SR
521 u64 read_stamp;
522};
523
15693458
SRRH
524/*
525 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
526 *
527 * Schedules a delayed work to wake up any task that is blocked on the
528 * ring buffer waiters queue.
529 */
530static void rb_wake_up_waiters(struct irq_work *work)
531{
532 struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
533
534 wake_up_all(&rbwork->waiters);
535}
536
537/**
538 * ring_buffer_wait - wait for input to the ring buffer
539 * @buffer: buffer to wait on
540 * @cpu: the cpu buffer to wait on
541 *
542 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
543 * as data is added to any of the @buffer's cpu buffers. Otherwise
544 * it will wait for data to be added to a specific cpu buffer.
545 */
8b8b3683 546int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
15693458
SRRH
547{
548 struct ring_buffer_per_cpu *cpu_buffer;
549 DEFINE_WAIT(wait);
550 struct rb_irq_work *work;
551
552 /*
553 * Depending on what the caller is waiting for, either any
554 * data in any cpu buffer, or a specific buffer, put the
555 * caller on the appropriate wait queue.
556 */
557 if (cpu == RING_BUFFER_ALL_CPUS)
558 work = &buffer->irq_work;
559 else {
8b8b3683
SRRH
560 if (!cpumask_test_cpu(cpu, buffer->cpumask))
561 return -ENODEV;
15693458
SRRH
562 cpu_buffer = buffer->buffers[cpu];
563 work = &cpu_buffer->irq_work;
564 }
565
566
567 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
568
569 /*
570 * The events can happen in critical sections where
571 * checking a work queue can cause deadlocks.
572 * After adding a task to the queue, this flag is set
573 * only to notify events to try to wake up the queue
574 * using irq_work.
575 *
576 * We don't clear it even if the buffer is no longer
577 * empty. The flag only causes the next event to run
578 * irq_work to do the work queue wake up. The worse
579 * that can happen if we race with !trace_empty() is that
580 * an event will cause an irq_work to try to wake up
581 * an empty queue.
582 *
583 * There's no reason to protect this flag either, as
584 * the work queue and irq_work logic will do the necessary
585 * synchronization for the wake ups. The only thing
586 * that is necessary is that the wake up happens after
587 * a task has been queued. It's OK for spurious wake ups.
588 */
589 work->waiters_pending = true;
590
591 if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
592 (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
593 schedule();
594
595 finish_wait(&work->waiters, &wait);
8b8b3683 596 return 0;
15693458
SRRH
597}
598
599/**
600 * ring_buffer_poll_wait - poll on buffer input
601 * @buffer: buffer to wait on
602 * @cpu: the cpu buffer to wait on
603 * @filp: the file descriptor
604 * @poll_table: The poll descriptor
605 *
606 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
607 * as data is added to any of the @buffer's cpu buffers. Otherwise
608 * it will wait for data to be added to a specific cpu buffer.
609 *
610 * Returns POLLIN | POLLRDNORM if data exists in the buffers,
611 * zero otherwise.
612 */
613int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
614 struct file *filp, poll_table *poll_table)
615{
616 struct ring_buffer_per_cpu *cpu_buffer;
617 struct rb_irq_work *work;
618
15693458
SRRH
619 if (cpu == RING_BUFFER_ALL_CPUS)
620 work = &buffer->irq_work;
621 else {
6721cb60
SRRH
622 if (!cpumask_test_cpu(cpu, buffer->cpumask))
623 return -EINVAL;
624
15693458
SRRH
625 cpu_buffer = buffer->buffers[cpu];
626 work = &cpu_buffer->irq_work;
627 }
628
15693458 629 poll_wait(filp, &work->waiters, poll_table);
4ce97dbf
JB
630 work->waiters_pending = true;
631 /*
632 * There's a tight race between setting the waiters_pending and
633 * checking if the ring buffer is empty. Once the waiters_pending bit
634 * is set, the next event will wake the task up, but we can get stuck
635 * if there's only a single event in.
636 *
637 * FIXME: Ideally, we need a memory barrier on the writer side as well,
638 * but adding a memory barrier to all events will cause too much of a
639 * performance hit in the fast path. We only need a memory barrier when
640 * the buffer goes from empty to having content. But as this race is
641 * extremely small, and it's not a problem if another event comes in, we
642 * will fix it later.
643 */
644 smp_mb();
15693458
SRRH
645
646 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
647 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
648 return POLLIN | POLLRDNORM;
649 return 0;
650}
651
f536aafc 652/* buffer may be either ring_buffer or ring_buffer_per_cpu */
077c5407
SR
653#define RB_WARN_ON(b, cond) \
654 ({ \
655 int _____ret = unlikely(cond); \
656 if (_____ret) { \
657 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
658 struct ring_buffer_per_cpu *__b = \
659 (void *)b; \
660 atomic_inc(&__b->buffer->record_disabled); \
661 } else \
662 atomic_inc(&b->record_disabled); \
663 WARN_ON(1); \
664 } \
665 _____ret; \
3e89c7bb 666 })
f536aafc 667
37886f6a
SR
668/* Up this if you want to test the TIME_EXTENTS and normalization */
669#define DEBUG_SHIFT 0
670
6d3f1e12 671static inline u64 rb_time_stamp(struct ring_buffer *buffer)
88eb0125
SR
672{
673 /* shift to debug/test normalization and TIME_EXTENTS */
674 return buffer->clock() << DEBUG_SHIFT;
675}
676
37886f6a
SR
677u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
678{
679 u64 time;
680
681 preempt_disable_notrace();
6d3f1e12 682 time = rb_time_stamp(buffer);
37886f6a
SR
683 preempt_enable_no_resched_notrace();
684
685 return time;
686}
687EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
688
689void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
690 int cpu, u64 *ts)
691{
692 /* Just stupid testing the normalize function and deltas */
693 *ts >>= DEBUG_SHIFT;
694}
695EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
696
77ae365e
SR
697/*
698 * Making the ring buffer lockless makes things tricky.
699 * Although writes only happen on the CPU that they are on,
700 * and they only need to worry about interrupts. Reads can
701 * happen on any CPU.
702 *
703 * The reader page is always off the ring buffer, but when the
704 * reader finishes with a page, it needs to swap its page with
705 * a new one from the buffer. The reader needs to take from
706 * the head (writes go to the tail). But if a writer is in overwrite
707 * mode and wraps, it must push the head page forward.
708 *
709 * Here lies the problem.
710 *
711 * The reader must be careful to replace only the head page, and
712 * not another one. As described at the top of the file in the
713 * ASCII art, the reader sets its old page to point to the next
714 * page after head. It then sets the page after head to point to
715 * the old reader page. But if the writer moves the head page
716 * during this operation, the reader could end up with the tail.
717 *
718 * We use cmpxchg to help prevent this race. We also do something
719 * special with the page before head. We set the LSB to 1.
720 *
721 * When the writer must push the page forward, it will clear the
722 * bit that points to the head page, move the head, and then set
723 * the bit that points to the new head page.
724 *
725 * We also don't want an interrupt coming in and moving the head
726 * page on another writer. Thus we use the second LSB to catch
727 * that too. Thus:
728 *
729 * head->list->prev->next bit 1 bit 0
730 * ------- -------
731 * Normal page 0 0
732 * Points to head page 0 1
733 * New head page 1 0
734 *
735 * Note we can not trust the prev pointer of the head page, because:
736 *
737 * +----+ +-----+ +-----+
738 * | |------>| T |---X--->| N |
739 * | |<------| | | |
740 * +----+ +-----+ +-----+
741 * ^ ^ |
742 * | +-----+ | |
743 * +----------| R |----------+ |
744 * | |<-----------+
745 * +-----+
746 *
747 * Key: ---X--> HEAD flag set in pointer
748 * T Tail page
749 * R Reader page
750 * N Next page
751 *
752 * (see __rb_reserve_next() to see where this happens)
753 *
754 * What the above shows is that the reader just swapped out
755 * the reader page with a page in the buffer, but before it
756 * could make the new header point back to the new page added
757 * it was preempted by a writer. The writer moved forward onto
758 * the new page added by the reader and is about to move forward
759 * again.
760 *
761 * You can see, it is legitimate for the previous pointer of
762 * the head (or any page) not to point back to itself. But only
763 * temporarially.
764 */
765
766#define RB_PAGE_NORMAL 0UL
767#define RB_PAGE_HEAD 1UL
768#define RB_PAGE_UPDATE 2UL
769
770
771#define RB_FLAG_MASK 3UL
772
773/* PAGE_MOVED is not part of the mask */
774#define RB_PAGE_MOVED 4UL
775
776/*
777 * rb_list_head - remove any bit
778 */
779static struct list_head *rb_list_head(struct list_head *list)
780{
781 unsigned long val = (unsigned long)list;
782
783 return (struct list_head *)(val & ~RB_FLAG_MASK);
784}
785
786/*
6d3f1e12 787 * rb_is_head_page - test if the given page is the head page
77ae365e
SR
788 *
789 * Because the reader may move the head_page pointer, we can
790 * not trust what the head page is (it may be pointing to
791 * the reader page). But if the next page is a header page,
792 * its flags will be non zero.
793 */
42b16b3f 794static inline int
77ae365e
SR
795rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
796 struct buffer_page *page, struct list_head *list)
797{
798 unsigned long val;
799
800 val = (unsigned long)list->next;
801
802 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
803 return RB_PAGE_MOVED;
804
805 return val & RB_FLAG_MASK;
806}
807
808/*
809 * rb_is_reader_page
810 *
811 * The unique thing about the reader page, is that, if the
812 * writer is ever on it, the previous pointer never points
813 * back to the reader page.
814 */
815static int rb_is_reader_page(struct buffer_page *page)
816{
817 struct list_head *list = page->list.prev;
818
819 return rb_list_head(list->next) != &page->list;
820}
821
822/*
823 * rb_set_list_to_head - set a list_head to be pointing to head.
824 */
825static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
826 struct list_head *list)
827{
828 unsigned long *ptr;
829
830 ptr = (unsigned long *)&list->next;
831 *ptr |= RB_PAGE_HEAD;
832 *ptr &= ~RB_PAGE_UPDATE;
833}
834
835/*
836 * rb_head_page_activate - sets up head page
837 */
838static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
839{
840 struct buffer_page *head;
841
842 head = cpu_buffer->head_page;
843 if (!head)
844 return;
845
846 /*
847 * Set the previous list pointer to have the HEAD flag.
848 */
849 rb_set_list_to_head(cpu_buffer, head->list.prev);
850}
851
852static void rb_list_head_clear(struct list_head *list)
853{
854 unsigned long *ptr = (unsigned long *)&list->next;
855
856 *ptr &= ~RB_FLAG_MASK;
857}
858
859/*
860 * rb_head_page_dactivate - clears head page ptr (for free list)
861 */
862static void
863rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
864{
865 struct list_head *hd;
866
867 /* Go through the whole list and clear any pointers found. */
868 rb_list_head_clear(cpu_buffer->pages);
869
870 list_for_each(hd, cpu_buffer->pages)
871 rb_list_head_clear(hd);
872}
873
874static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
875 struct buffer_page *head,
876 struct buffer_page *prev,
877 int old_flag, int new_flag)
878{
879 struct list_head *list;
880 unsigned long val = (unsigned long)&head->list;
881 unsigned long ret;
882
883 list = &prev->list;
884
885 val &= ~RB_FLAG_MASK;
886
08a40816
SR
887 ret = cmpxchg((unsigned long *)&list->next,
888 val | old_flag, val | new_flag);
77ae365e
SR
889
890 /* check if the reader took the page */
891 if ((ret & ~RB_FLAG_MASK) != val)
892 return RB_PAGE_MOVED;
893
894 return ret & RB_FLAG_MASK;
895}
896
897static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
898 struct buffer_page *head,
899 struct buffer_page *prev,
900 int old_flag)
901{
902 return rb_head_page_set(cpu_buffer, head, prev,
903 old_flag, RB_PAGE_UPDATE);
904}
905
906static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
907 struct buffer_page *head,
908 struct buffer_page *prev,
909 int old_flag)
910{
911 return rb_head_page_set(cpu_buffer, head, prev,
912 old_flag, RB_PAGE_HEAD);
913}
914
915static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
916 struct buffer_page *head,
917 struct buffer_page *prev,
918 int old_flag)
919{
920 return rb_head_page_set(cpu_buffer, head, prev,
921 old_flag, RB_PAGE_NORMAL);
922}
923
924static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
925 struct buffer_page **bpage)
926{
927 struct list_head *p = rb_list_head((*bpage)->list.next);
928
929 *bpage = list_entry(p, struct buffer_page, list);
930}
931
932static struct buffer_page *
933rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
934{
935 struct buffer_page *head;
936 struct buffer_page *page;
937 struct list_head *list;
938 int i;
939
940 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
941 return NULL;
942
943 /* sanity check */
944 list = cpu_buffer->pages;
945 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
946 return NULL;
947
948 page = head = cpu_buffer->head_page;
949 /*
950 * It is possible that the writer moves the header behind
951 * where we started, and we miss in one loop.
952 * A second loop should grab the header, but we'll do
953 * three loops just because I'm paranoid.
954 */
955 for (i = 0; i < 3; i++) {
956 do {
957 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
958 cpu_buffer->head_page = page;
959 return page;
960 }
961 rb_inc_page(cpu_buffer, &page);
962 } while (page != head);
963 }
964
965 RB_WARN_ON(cpu_buffer, 1);
966
967 return NULL;
968}
969
970static int rb_head_page_replace(struct buffer_page *old,
971 struct buffer_page *new)
972{
973 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
974 unsigned long val;
975 unsigned long ret;
976
977 val = *ptr & ~RB_FLAG_MASK;
978 val |= RB_PAGE_HEAD;
979
08a40816 980 ret = cmpxchg(ptr, val, (unsigned long)&new->list);
77ae365e
SR
981
982 return ret == val;
983}
984
985/*
986 * rb_tail_page_update - move the tail page forward
987 *
988 * Returns 1 if moved tail page, 0 if someone else did.
989 */
990static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
991 struct buffer_page *tail_page,
992 struct buffer_page *next_page)
993{
994 struct buffer_page *old_tail;
995 unsigned long old_entries;
996 unsigned long old_write;
997 int ret = 0;
998
999 /*
1000 * The tail page now needs to be moved forward.
1001 *
1002 * We need to reset the tail page, but without messing
1003 * with possible erasing of data brought in by interrupts
1004 * that have moved the tail page and are currently on it.
1005 *
1006 * We add a counter to the write field to denote this.
1007 */
1008 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1009 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1010
1011 /*
1012 * Just make sure we have seen our old_write and synchronize
1013 * with any interrupts that come in.
1014 */
1015 barrier();
1016
1017 /*
1018 * If the tail page is still the same as what we think
1019 * it is, then it is up to us to update the tail
1020 * pointer.
1021 */
1022 if (tail_page == cpu_buffer->tail_page) {
1023 /* Zero the write counter */
1024 unsigned long val = old_write & ~RB_WRITE_MASK;
1025 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1026
1027 /*
1028 * This will only succeed if an interrupt did
1029 * not come in and change it. In which case, we
1030 * do not want to modify it.
da706d8b
LJ
1031 *
1032 * We add (void) to let the compiler know that we do not care
1033 * about the return value of these functions. We use the
1034 * cmpxchg to only update if an interrupt did not already
1035 * do it for us. If the cmpxchg fails, we don't care.
77ae365e 1036 */
da706d8b
LJ
1037 (void)local_cmpxchg(&next_page->write, old_write, val);
1038 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
77ae365e
SR
1039
1040 /*
1041 * No need to worry about races with clearing out the commit.
1042 * it only can increment when a commit takes place. But that
1043 * only happens in the outer most nested commit.
1044 */
1045 local_set(&next_page->page->commit, 0);
1046
1047 old_tail = cmpxchg(&cpu_buffer->tail_page,
1048 tail_page, next_page);
1049
1050 if (old_tail == tail_page)
1051 ret = 1;
1052 }
1053
1054 return ret;
1055}
1056
1057static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1058 struct buffer_page *bpage)
1059{
1060 unsigned long val = (unsigned long)bpage;
1061
1062 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1063 return 1;
1064
1065 return 0;
1066}
1067
1068/**
1069 * rb_check_list - make sure a pointer to a list has the last bits zero
1070 */
1071static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1072 struct list_head *list)
1073{
1074 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1075 return 1;
1076 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1077 return 1;
1078 return 0;
1079}
1080
7a8e76a3 1081/**
d611851b 1082 * rb_check_pages - integrity check of buffer pages
7a8e76a3
SR
1083 * @cpu_buffer: CPU buffer with pages to test
1084 *
c3706f00 1085 * As a safety measure we check to make sure the data pages have not
7a8e76a3
SR
1086 * been corrupted.
1087 */
1088static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1089{
3adc54fa 1090 struct list_head *head = cpu_buffer->pages;
044fa782 1091 struct buffer_page *bpage, *tmp;
7a8e76a3 1092
308f7eeb
SR
1093 /* Reset the head page if it exists */
1094 if (cpu_buffer->head_page)
1095 rb_set_head_page(cpu_buffer);
1096
77ae365e
SR
1097 rb_head_page_deactivate(cpu_buffer);
1098
3e89c7bb
SR
1099 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1100 return -1;
1101 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1102 return -1;
7a8e76a3 1103
77ae365e
SR
1104 if (rb_check_list(cpu_buffer, head))
1105 return -1;
1106
044fa782 1107 list_for_each_entry_safe(bpage, tmp, head, list) {
3e89c7bb 1108 if (RB_WARN_ON(cpu_buffer,
044fa782 1109 bpage->list.next->prev != &bpage->list))
3e89c7bb
SR
1110 return -1;
1111 if (RB_WARN_ON(cpu_buffer,
044fa782 1112 bpage->list.prev->next != &bpage->list))
3e89c7bb 1113 return -1;
77ae365e
SR
1114 if (rb_check_list(cpu_buffer, &bpage->list))
1115 return -1;
7a8e76a3
SR
1116 }
1117
77ae365e
SR
1118 rb_head_page_activate(cpu_buffer);
1119
7a8e76a3
SR
1120 return 0;
1121}
1122
438ced17 1123static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
7a8e76a3 1124{
438ced17 1125 int i;
044fa782 1126 struct buffer_page *bpage, *tmp;
3adc54fa 1127
7a8e76a3 1128 for (i = 0; i < nr_pages; i++) {
7ea59064 1129 struct page *page;
d7ec4bfe
VN
1130 /*
1131 * __GFP_NORETRY flag makes sure that the allocation fails
1132 * gracefully without invoking oom-killer and the system is
1133 * not destabilized.
1134 */
044fa782 1135 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
d7ec4bfe 1136 GFP_KERNEL | __GFP_NORETRY,
438ced17 1137 cpu_to_node(cpu));
044fa782 1138 if (!bpage)
e4c2ce82 1139 goto free_pages;
77ae365e 1140
438ced17 1141 list_add(&bpage->list, pages);
77ae365e 1142
438ced17 1143 page = alloc_pages_node(cpu_to_node(cpu),
d7ec4bfe 1144 GFP_KERNEL | __GFP_NORETRY, 0);
7ea59064 1145 if (!page)
7a8e76a3 1146 goto free_pages;
7ea59064 1147 bpage->page = page_address(page);
044fa782 1148 rb_init_page(bpage->page);
7a8e76a3
SR
1149 }
1150
438ced17
VN
1151 return 0;
1152
1153free_pages:
1154 list_for_each_entry_safe(bpage, tmp, pages, list) {
1155 list_del_init(&bpage->list);
1156 free_buffer_page(bpage);
1157 }
1158
1159 return -ENOMEM;
1160}
1161
1162static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1163 unsigned nr_pages)
1164{
1165 LIST_HEAD(pages);
1166
1167 WARN_ON(!nr_pages);
1168
1169 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1170 return -ENOMEM;
1171
3adc54fa
SR
1172 /*
1173 * The ring buffer page list is a circular list that does not
1174 * start and end with a list head. All page list items point to
1175 * other pages.
1176 */
1177 cpu_buffer->pages = pages.next;
1178 list_del(&pages);
7a8e76a3 1179
438ced17
VN
1180 cpu_buffer->nr_pages = nr_pages;
1181
7a8e76a3
SR
1182 rb_check_pages(cpu_buffer);
1183
1184 return 0;
7a8e76a3
SR
1185}
1186
1187static struct ring_buffer_per_cpu *
438ced17 1188rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
7a8e76a3
SR
1189{
1190 struct ring_buffer_per_cpu *cpu_buffer;
044fa782 1191 struct buffer_page *bpage;
7ea59064 1192 struct page *page;
7a8e76a3
SR
1193 int ret;
1194
1195 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1196 GFP_KERNEL, cpu_to_node(cpu));
1197 if (!cpu_buffer)
1198 return NULL;
1199
1200 cpu_buffer->cpu = cpu;
1201 cpu_buffer->buffer = buffer;
5389f6fa 1202 raw_spin_lock_init(&cpu_buffer->reader_lock);
1f8a6a10 1203 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
edc35bd7 1204 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
83f40318 1205 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
05fdd70d 1206 init_completion(&cpu_buffer->update_done);
15693458 1207 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
f1dc6725 1208 init_waitqueue_head(&cpu_buffer->irq_work.waiters);
7a8e76a3 1209
044fa782 1210 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
e4c2ce82 1211 GFP_KERNEL, cpu_to_node(cpu));
044fa782 1212 if (!bpage)
e4c2ce82
SR
1213 goto fail_free_buffer;
1214
77ae365e
SR
1215 rb_check_bpage(cpu_buffer, bpage);
1216
044fa782 1217 cpu_buffer->reader_page = bpage;
7ea59064
VN
1218 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1219 if (!page)
e4c2ce82 1220 goto fail_free_reader;
7ea59064 1221 bpage->page = page_address(page);
044fa782 1222 rb_init_page(bpage->page);
e4c2ce82 1223
d769041f 1224 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
44b99462 1225 INIT_LIST_HEAD(&cpu_buffer->new_pages);
d769041f 1226
438ced17 1227 ret = rb_allocate_pages(cpu_buffer, nr_pages);
7a8e76a3 1228 if (ret < 0)
d769041f 1229 goto fail_free_reader;
7a8e76a3
SR
1230
1231 cpu_buffer->head_page
3adc54fa 1232 = list_entry(cpu_buffer->pages, struct buffer_page, list);
bf41a158 1233 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
7a8e76a3 1234
77ae365e
SR
1235 rb_head_page_activate(cpu_buffer);
1236
7a8e76a3
SR
1237 return cpu_buffer;
1238
d769041f
SR
1239 fail_free_reader:
1240 free_buffer_page(cpu_buffer->reader_page);
1241
7a8e76a3
SR
1242 fail_free_buffer:
1243 kfree(cpu_buffer);
1244 return NULL;
1245}
1246
1247static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1248{
3adc54fa 1249 struct list_head *head = cpu_buffer->pages;
044fa782 1250 struct buffer_page *bpage, *tmp;
7a8e76a3 1251
d769041f
SR
1252 free_buffer_page(cpu_buffer->reader_page);
1253
77ae365e
SR
1254 rb_head_page_deactivate(cpu_buffer);
1255
3adc54fa
SR
1256 if (head) {
1257 list_for_each_entry_safe(bpage, tmp, head, list) {
1258 list_del_init(&bpage->list);
1259 free_buffer_page(bpage);
1260 }
1261 bpage = list_entry(head, struct buffer_page, list);
044fa782 1262 free_buffer_page(bpage);
7a8e76a3 1263 }
3adc54fa 1264
7a8e76a3
SR
1265 kfree(cpu_buffer);
1266}
1267
59222efe 1268#ifdef CONFIG_HOTPLUG_CPU
09c9e84d
FW
1269static int rb_cpu_notify(struct notifier_block *self,
1270 unsigned long action, void *hcpu);
554f786e
SR
1271#endif
1272
7a8e76a3 1273/**
d611851b 1274 * __ring_buffer_alloc - allocate a new ring_buffer
68814b58 1275 * @size: the size in bytes per cpu that is needed.
7a8e76a3
SR
1276 * @flags: attributes to set for the ring buffer.
1277 *
1278 * Currently the only flag that is available is the RB_FL_OVERWRITE
1279 * flag. This flag means that the buffer will overwrite old data
1280 * when the buffer wraps. If this flag is not set, the buffer will
1281 * drop data when the tail hits the head.
1282 */
1f8a6a10
PZ
1283struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1284 struct lock_class_key *key)
7a8e76a3
SR
1285{
1286 struct ring_buffer *buffer;
1287 int bsize;
438ced17 1288 int cpu, nr_pages;
7a8e76a3
SR
1289
1290 /* keep it in its own cache line */
1291 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1292 GFP_KERNEL);
1293 if (!buffer)
1294 return NULL;
1295
9e01c1b7
RR
1296 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1297 goto fail_free_buffer;
1298
438ced17 1299 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
7a8e76a3 1300 buffer->flags = flags;
37886f6a 1301 buffer->clock = trace_clock_local;
1f8a6a10 1302 buffer->reader_lock_key = key;
7a8e76a3 1303
15693458 1304 init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
f1dc6725 1305 init_waitqueue_head(&buffer->irq_work.waiters);
15693458 1306
7a8e76a3 1307 /* need at least two pages */
438ced17
VN
1308 if (nr_pages < 2)
1309 nr_pages = 2;
7a8e76a3 1310
3bf832ce
FW
1311 /*
1312 * In case of non-hotplug cpu, if the ring-buffer is allocated
1313 * in early initcall, it will not be notified of secondary cpus.
1314 * In that off case, we need to allocate for all possible cpus.
1315 */
1316#ifdef CONFIG_HOTPLUG_CPU
d39ad278 1317 cpu_notifier_register_begin();
554f786e 1318 cpumask_copy(buffer->cpumask, cpu_online_mask);
3bf832ce
FW
1319#else
1320 cpumask_copy(buffer->cpumask, cpu_possible_mask);
1321#endif
7a8e76a3
SR
1322 buffer->cpus = nr_cpu_ids;
1323
1324 bsize = sizeof(void *) * nr_cpu_ids;
1325 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1326 GFP_KERNEL);
1327 if (!buffer->buffers)
9e01c1b7 1328 goto fail_free_cpumask;
7a8e76a3
SR
1329
1330 for_each_buffer_cpu(buffer, cpu) {
1331 buffer->buffers[cpu] =
438ced17 1332 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
7a8e76a3
SR
1333 if (!buffer->buffers[cpu])
1334 goto fail_free_buffers;
1335 }
1336
59222efe 1337#ifdef CONFIG_HOTPLUG_CPU
554f786e
SR
1338 buffer->cpu_notify.notifier_call = rb_cpu_notify;
1339 buffer->cpu_notify.priority = 0;
d39ad278
SB
1340 __register_cpu_notifier(&buffer->cpu_notify);
1341 cpu_notifier_register_done();
554f786e
SR
1342#endif
1343
7a8e76a3
SR
1344 mutex_init(&buffer->mutex);
1345
1346 return buffer;
1347
1348 fail_free_buffers:
1349 for_each_buffer_cpu(buffer, cpu) {
1350 if (buffer->buffers[cpu])
1351 rb_free_cpu_buffer(buffer->buffers[cpu]);
1352 }
1353 kfree(buffer->buffers);
1354
9e01c1b7
RR
1355 fail_free_cpumask:
1356 free_cpumask_var(buffer->cpumask);
d39ad278
SB
1357#ifdef CONFIG_HOTPLUG_CPU
1358 cpu_notifier_register_done();
1359#endif
9e01c1b7 1360
7a8e76a3
SR
1361 fail_free_buffer:
1362 kfree(buffer);
1363 return NULL;
1364}
1f8a6a10 1365EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
7a8e76a3
SR
1366
1367/**
1368 * ring_buffer_free - free a ring buffer.
1369 * @buffer: the buffer to free.
1370 */
1371void
1372ring_buffer_free(struct ring_buffer *buffer)
1373{
1374 int cpu;
1375
59222efe 1376#ifdef CONFIG_HOTPLUG_CPU
d39ad278
SB
1377 cpu_notifier_register_begin();
1378 __unregister_cpu_notifier(&buffer->cpu_notify);
554f786e
SR
1379#endif
1380
7a8e76a3
SR
1381 for_each_buffer_cpu(buffer, cpu)
1382 rb_free_cpu_buffer(buffer->buffers[cpu]);
1383
d39ad278
SB
1384#ifdef CONFIG_HOTPLUG_CPU
1385 cpu_notifier_register_done();
1386#endif
554f786e 1387
bd3f0221 1388 kfree(buffer->buffers);
9e01c1b7
RR
1389 free_cpumask_var(buffer->cpumask);
1390
7a8e76a3
SR
1391 kfree(buffer);
1392}
c4f50183 1393EXPORT_SYMBOL_GPL(ring_buffer_free);
7a8e76a3 1394
37886f6a
SR
1395void ring_buffer_set_clock(struct ring_buffer *buffer,
1396 u64 (*clock)(void))
1397{
1398 buffer->clock = clock;
1399}
1400
7a8e76a3
SR
1401static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1402
83f40318
VN
1403static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1404{
1405 return local_read(&bpage->entries) & RB_WRITE_MASK;
1406}
1407
1408static inline unsigned long rb_page_write(struct buffer_page *bpage)
1409{
1410 return local_read(&bpage->write) & RB_WRITE_MASK;
1411}
1412
5040b4b7 1413static int
83f40318 1414rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
7a8e76a3 1415{
83f40318
VN
1416 struct list_head *tail_page, *to_remove, *next_page;
1417 struct buffer_page *to_remove_page, *tmp_iter_page;
1418 struct buffer_page *last_page, *first_page;
1419 unsigned int nr_removed;
1420 unsigned long head_bit;
1421 int page_entries;
1422
1423 head_bit = 0;
7a8e76a3 1424
5389f6fa 1425 raw_spin_lock_irq(&cpu_buffer->reader_lock);
83f40318
VN
1426 atomic_inc(&cpu_buffer->record_disabled);
1427 /*
1428 * We don't race with the readers since we have acquired the reader
1429 * lock. We also don't race with writers after disabling recording.
1430 * This makes it easy to figure out the first and the last page to be
1431 * removed from the list. We unlink all the pages in between including
1432 * the first and last pages. This is done in a busy loop so that we
1433 * lose the least number of traces.
1434 * The pages are freed after we restart recording and unlock readers.
1435 */
1436 tail_page = &cpu_buffer->tail_page->list;
77ae365e 1437
83f40318
VN
1438 /*
1439 * tail page might be on reader page, we remove the next page
1440 * from the ring buffer
1441 */
1442 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1443 tail_page = rb_list_head(tail_page->next);
1444 to_remove = tail_page;
1445
1446 /* start of pages to remove */
1447 first_page = list_entry(rb_list_head(to_remove->next),
1448 struct buffer_page, list);
1449
1450 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1451 to_remove = rb_list_head(to_remove)->next;
1452 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
7a8e76a3 1453 }
7a8e76a3 1454
83f40318 1455 next_page = rb_list_head(to_remove)->next;
7a8e76a3 1456
83f40318
VN
1457 /*
1458 * Now we remove all pages between tail_page and next_page.
1459 * Make sure that we have head_bit value preserved for the
1460 * next page
1461 */
1462 tail_page->next = (struct list_head *)((unsigned long)next_page |
1463 head_bit);
1464 next_page = rb_list_head(next_page);
1465 next_page->prev = tail_page;
1466
1467 /* make sure pages points to a valid page in the ring buffer */
1468 cpu_buffer->pages = next_page;
1469
1470 /* update head page */
1471 if (head_bit)
1472 cpu_buffer->head_page = list_entry(next_page,
1473 struct buffer_page, list);
1474
1475 /*
1476 * change read pointer to make sure any read iterators reset
1477 * themselves
1478 */
1479 cpu_buffer->read = 0;
1480
1481 /* pages are removed, resume tracing and then free the pages */
1482 atomic_dec(&cpu_buffer->record_disabled);
5389f6fa 1483 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
83f40318
VN
1484
1485 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1486
1487 /* last buffer page to remove */
1488 last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1489 list);
1490 tmp_iter_page = first_page;
1491
1492 do {
1493 to_remove_page = tmp_iter_page;
1494 rb_inc_page(cpu_buffer, &tmp_iter_page);
1495
1496 /* update the counters */
1497 page_entries = rb_page_entries(to_remove_page);
1498 if (page_entries) {
1499 /*
1500 * If something was added to this page, it was full
1501 * since it is not the tail page. So we deduct the
1502 * bytes consumed in ring buffer from here.
48fdc72f 1503 * Increment overrun to account for the lost events.
83f40318 1504 */
48fdc72f 1505 local_add(page_entries, &cpu_buffer->overrun);
83f40318
VN
1506 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1507 }
1508
1509 /*
1510 * We have already removed references to this list item, just
1511 * free up the buffer_page and its page
1512 */
1513 free_buffer_page(to_remove_page);
1514 nr_removed--;
1515
1516 } while (to_remove_page != last_page);
1517
1518 RB_WARN_ON(cpu_buffer, nr_removed);
5040b4b7
VN
1519
1520 return nr_removed == 0;
7a8e76a3
SR
1521}
1522
5040b4b7
VN
1523static int
1524rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1525{
5040b4b7
VN
1526 struct list_head *pages = &cpu_buffer->new_pages;
1527 int retries, success;
7a8e76a3 1528
5389f6fa 1529 raw_spin_lock_irq(&cpu_buffer->reader_lock);
5040b4b7
VN
1530 /*
1531 * We are holding the reader lock, so the reader page won't be swapped
1532 * in the ring buffer. Now we are racing with the writer trying to
1533 * move head page and the tail page.
1534 * We are going to adapt the reader page update process where:
1535 * 1. We first splice the start and end of list of new pages between
1536 * the head page and its previous page.
1537 * 2. We cmpxchg the prev_page->next to point from head page to the
1538 * start of new pages list.
1539 * 3. Finally, we update the head->prev to the end of new list.
1540 *
1541 * We will try this process 10 times, to make sure that we don't keep
1542 * spinning.
1543 */
1544 retries = 10;
1545 success = 0;
1546 while (retries--) {
1547 struct list_head *head_page, *prev_page, *r;
1548 struct list_head *last_page, *first_page;
1549 struct list_head *head_page_with_bit;
77ae365e 1550
5040b4b7 1551 head_page = &rb_set_head_page(cpu_buffer)->list;
54f7be5b
SR
1552 if (!head_page)
1553 break;
5040b4b7
VN
1554 prev_page = head_page->prev;
1555
1556 first_page = pages->next;
1557 last_page = pages->prev;
1558
1559 head_page_with_bit = (struct list_head *)
1560 ((unsigned long)head_page | RB_PAGE_HEAD);
1561
1562 last_page->next = head_page_with_bit;
1563 first_page->prev = prev_page;
1564
1565 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1566
1567 if (r == head_page_with_bit) {
1568 /*
1569 * yay, we replaced the page pointer to our new list,
1570 * now, we just have to update to head page's prev
1571 * pointer to point to end of list
1572 */
1573 head_page->prev = last_page;
1574 success = 1;
1575 break;
1576 }
7a8e76a3 1577 }
7a8e76a3 1578
5040b4b7
VN
1579 if (success)
1580 INIT_LIST_HEAD(pages);
1581 /*
1582 * If we weren't successful in adding in new pages, warn and stop
1583 * tracing
1584 */
1585 RB_WARN_ON(cpu_buffer, !success);
5389f6fa 1586 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
5040b4b7
VN
1587
1588 /* free pages if they weren't inserted */
1589 if (!success) {
1590 struct buffer_page *bpage, *tmp;
1591 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1592 list) {
1593 list_del_init(&bpage->list);
1594 free_buffer_page(bpage);
1595 }
1596 }
1597 return success;
7a8e76a3
SR
1598}
1599
83f40318 1600static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
438ced17 1601{
5040b4b7
VN
1602 int success;
1603
438ced17 1604 if (cpu_buffer->nr_pages_to_update > 0)
5040b4b7 1605 success = rb_insert_pages(cpu_buffer);
438ced17 1606 else
5040b4b7
VN
1607 success = rb_remove_pages(cpu_buffer,
1608 -cpu_buffer->nr_pages_to_update);
83f40318 1609
5040b4b7
VN
1610 if (success)
1611 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
83f40318
VN
1612}
1613
1614static void update_pages_handler(struct work_struct *work)
1615{
1616 struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1617 struct ring_buffer_per_cpu, update_pages_work);
1618 rb_update_pages(cpu_buffer);
05fdd70d 1619 complete(&cpu_buffer->update_done);
438ced17
VN
1620}
1621
7a8e76a3
SR
1622/**
1623 * ring_buffer_resize - resize the ring buffer
1624 * @buffer: the buffer to resize.
1625 * @size: the new size.
d611851b 1626 * @cpu_id: the cpu buffer to resize
7a8e76a3 1627 *
7a8e76a3
SR
1628 * Minimum size is 2 * BUF_PAGE_SIZE.
1629 *
83f40318 1630 * Returns 0 on success and < 0 on failure.
7a8e76a3 1631 */
438ced17
VN
1632int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1633 int cpu_id)
7a8e76a3
SR
1634{
1635 struct ring_buffer_per_cpu *cpu_buffer;
438ced17 1636 unsigned nr_pages;
83f40318 1637 int cpu, err = 0;
7a8e76a3 1638
ee51a1de
IM
1639 /*
1640 * Always succeed at resizing a non-existent buffer:
1641 */
1642 if (!buffer)
1643 return size;
1644
6a31e1f1
SR
1645 /* Make sure the requested buffer exists */
1646 if (cpu_id != RING_BUFFER_ALL_CPUS &&
1647 !cpumask_test_cpu(cpu_id, buffer->cpumask))
1648 return size;
1649
7a8e76a3
SR
1650 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1651 size *= BUF_PAGE_SIZE;
7a8e76a3
SR
1652
1653 /* we need a minimum of two pages */
1654 if (size < BUF_PAGE_SIZE * 2)
1655 size = BUF_PAGE_SIZE * 2;
1656
83f40318 1657 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
18421015 1658
83f40318
VN
1659 /*
1660 * Don't succeed if resizing is disabled, as a reader might be
1661 * manipulating the ring buffer and is expecting a sane state while
1662 * this is true.
1663 */
1664 if (atomic_read(&buffer->resize_disabled))
1665 return -EBUSY;
18421015 1666
83f40318 1667 /* prevent another thread from changing buffer sizes */
7a8e76a3 1668 mutex_lock(&buffer->mutex);
7a8e76a3 1669
438ced17
VN
1670 if (cpu_id == RING_BUFFER_ALL_CPUS) {
1671 /* calculate the pages to update */
7a8e76a3
SR
1672 for_each_buffer_cpu(buffer, cpu) {
1673 cpu_buffer = buffer->buffers[cpu];
7a8e76a3 1674
438ced17
VN
1675 cpu_buffer->nr_pages_to_update = nr_pages -
1676 cpu_buffer->nr_pages;
438ced17
VN
1677 /*
1678 * nothing more to do for removing pages or no update
1679 */
1680 if (cpu_buffer->nr_pages_to_update <= 0)
1681 continue;
d7ec4bfe 1682 /*
438ced17
VN
1683 * to add pages, make sure all new pages can be
1684 * allocated without receiving ENOMEM
d7ec4bfe 1685 */
438ced17
VN
1686 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1687 if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
83f40318 1688 &cpu_buffer->new_pages, cpu)) {
438ced17 1689 /* not enough memory for new pages */
83f40318
VN
1690 err = -ENOMEM;
1691 goto out_err;
1692 }
1693 }
1694
1695 get_online_cpus();
1696 /*
1697 * Fire off all the required work handlers
05fdd70d 1698 * We can't schedule on offline CPUs, but it's not necessary
83f40318
VN
1699 * since we can change their buffer sizes without any race.
1700 */
1701 for_each_buffer_cpu(buffer, cpu) {
1702 cpu_buffer = buffer->buffers[cpu];
05fdd70d 1703 if (!cpu_buffer->nr_pages_to_update)
83f40318
VN
1704 continue;
1705
021c5b34
CM
1706 /* Can't run something on an offline CPU. */
1707 if (!cpu_online(cpu)) {
f5eb5588
SRRH
1708 rb_update_pages(cpu_buffer);
1709 cpu_buffer->nr_pages_to_update = 0;
1710 } else {
05fdd70d
VN
1711 schedule_work_on(cpu,
1712 &cpu_buffer->update_pages_work);
f5eb5588 1713 }
7a8e76a3 1714 }
7a8e76a3 1715
438ced17
VN
1716 /* wait for all the updates to complete */
1717 for_each_buffer_cpu(buffer, cpu) {
1718 cpu_buffer = buffer->buffers[cpu];
05fdd70d 1719 if (!cpu_buffer->nr_pages_to_update)
83f40318
VN
1720 continue;
1721
05fdd70d
VN
1722 if (cpu_online(cpu))
1723 wait_for_completion(&cpu_buffer->update_done);
83f40318 1724 cpu_buffer->nr_pages_to_update = 0;
438ced17 1725 }
83f40318
VN
1726
1727 put_online_cpus();
438ced17 1728 } else {
8e49f418
VN
1729 /* Make sure this CPU has been intitialized */
1730 if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1731 goto out;
1732
438ced17 1733 cpu_buffer = buffer->buffers[cpu_id];
83f40318 1734
438ced17
VN
1735 if (nr_pages == cpu_buffer->nr_pages)
1736 goto out;
7a8e76a3 1737
438ced17
VN
1738 cpu_buffer->nr_pages_to_update = nr_pages -
1739 cpu_buffer->nr_pages;
1740
1741 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1742 if (cpu_buffer->nr_pages_to_update > 0 &&
1743 __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
83f40318
VN
1744 &cpu_buffer->new_pages, cpu_id)) {
1745 err = -ENOMEM;
1746 goto out_err;
1747 }
438ced17 1748
83f40318
VN
1749 get_online_cpus();
1750
021c5b34
CM
1751 /* Can't run something on an offline CPU. */
1752 if (!cpu_online(cpu_id))
f5eb5588
SRRH
1753 rb_update_pages(cpu_buffer);
1754 else {
83f40318
VN
1755 schedule_work_on(cpu_id,
1756 &cpu_buffer->update_pages_work);
05fdd70d 1757 wait_for_completion(&cpu_buffer->update_done);
f5eb5588 1758 }
83f40318 1759
83f40318 1760 cpu_buffer->nr_pages_to_update = 0;
05fdd70d 1761 put_online_cpus();
438ced17 1762 }
7a8e76a3
SR
1763
1764 out:
659f451f
SR
1765 /*
1766 * The ring buffer resize can happen with the ring buffer
1767 * enabled, so that the update disturbs the tracing as little
1768 * as possible. But if the buffer is disabled, we do not need
1769 * to worry about that, and we can take the time to verify
1770 * that the buffer is not corrupt.
1771 */
1772 if (atomic_read(&buffer->record_disabled)) {
1773 atomic_inc(&buffer->record_disabled);
1774 /*
1775 * Even though the buffer was disabled, we must make sure
1776 * that it is truly disabled before calling rb_check_pages.
1777 * There could have been a race between checking
1778 * record_disable and incrementing it.
1779 */
1780 synchronize_sched();
1781 for_each_buffer_cpu(buffer, cpu) {
1782 cpu_buffer = buffer->buffers[cpu];
1783 rb_check_pages(cpu_buffer);
1784 }
1785 atomic_dec(&buffer->record_disabled);
1786 }
1787
7a8e76a3 1788 mutex_unlock(&buffer->mutex);
7a8e76a3
SR
1789 return size;
1790
83f40318 1791 out_err:
438ced17
VN
1792 for_each_buffer_cpu(buffer, cpu) {
1793 struct buffer_page *bpage, *tmp;
83f40318 1794
438ced17 1795 cpu_buffer = buffer->buffers[cpu];
438ced17 1796 cpu_buffer->nr_pages_to_update = 0;
83f40318 1797
438ced17
VN
1798 if (list_empty(&cpu_buffer->new_pages))
1799 continue;
83f40318 1800
438ced17
VN
1801 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1802 list) {
1803 list_del_init(&bpage->list);
1804 free_buffer_page(bpage);
1805 }
7a8e76a3 1806 }
641d2f63 1807 mutex_unlock(&buffer->mutex);
83f40318 1808 return err;
7a8e76a3 1809}
c4f50183 1810EXPORT_SYMBOL_GPL(ring_buffer_resize);
7a8e76a3 1811
750912fa
DS
1812void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1813{
1814 mutex_lock(&buffer->mutex);
1815 if (val)
1816 buffer->flags |= RB_FL_OVERWRITE;
1817 else
1818 buffer->flags &= ~RB_FL_OVERWRITE;
1819 mutex_unlock(&buffer->mutex);
1820}
1821EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1822
8789a9e7 1823static inline void *
044fa782 1824__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
8789a9e7 1825{
044fa782 1826 return bpage->data + index;
8789a9e7
SR
1827}
1828
044fa782 1829static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
7a8e76a3 1830{
044fa782 1831 return bpage->page->data + index;
7a8e76a3
SR
1832}
1833
1834static inline struct ring_buffer_event *
d769041f 1835rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1836{
6f807acd
SR
1837 return __rb_page_index(cpu_buffer->reader_page,
1838 cpu_buffer->reader_page->read);
1839}
1840
7a8e76a3
SR
1841static inline struct ring_buffer_event *
1842rb_iter_head_event(struct ring_buffer_iter *iter)
1843{
6f807acd 1844 return __rb_page_index(iter->head_page, iter->head);
7a8e76a3
SR
1845}
1846
bf41a158
SR
1847static inline unsigned rb_page_commit(struct buffer_page *bpage)
1848{
abc9b56d 1849 return local_read(&bpage->page->commit);
bf41a158
SR
1850}
1851
25985edc 1852/* Size is determined by what has been committed */
bf41a158
SR
1853static inline unsigned rb_page_size(struct buffer_page *bpage)
1854{
1855 return rb_page_commit(bpage);
1856}
1857
1858static inline unsigned
1859rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1860{
1861 return rb_page_commit(cpu_buffer->commit_page);
1862}
1863
bf41a158
SR
1864static inline unsigned
1865rb_event_index(struct ring_buffer_event *event)
1866{
1867 unsigned long addr = (unsigned long)event;
1868
22f470f8 1869 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
bf41a158
SR
1870}
1871
0f0c85fc 1872static inline int
fa743953
SR
1873rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1874 struct ring_buffer_event *event)
bf41a158
SR
1875{
1876 unsigned long addr = (unsigned long)event;
1877 unsigned long index;
1878
1879 index = rb_event_index(event);
1880 addr &= PAGE_MASK;
1881
1882 return cpu_buffer->commit_page->page == (void *)addr &&
1883 rb_commit_index(cpu_buffer) == index;
1884}
1885
34a148bf 1886static void
bf41a158 1887rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1888{
77ae365e
SR
1889 unsigned long max_count;
1890
bf41a158
SR
1891 /*
1892 * We only race with interrupts and NMIs on this CPU.
1893 * If we own the commit event, then we can commit
1894 * all others that interrupted us, since the interruptions
1895 * are in stack format (they finish before they come
1896 * back to us). This allows us to do a simple loop to
1897 * assign the commit to the tail.
1898 */
a8ccf1d6 1899 again:
438ced17 1900 max_count = cpu_buffer->nr_pages * 100;
77ae365e 1901
bf41a158 1902 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
77ae365e
SR
1903 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1904 return;
1905 if (RB_WARN_ON(cpu_buffer,
1906 rb_is_reader_page(cpu_buffer->tail_page)))
1907 return;
1908 local_set(&cpu_buffer->commit_page->page->commit,
1909 rb_page_write(cpu_buffer->commit_page));
bf41a158 1910 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
abc9b56d
SR
1911 cpu_buffer->write_stamp =
1912 cpu_buffer->commit_page->page->time_stamp;
bf41a158
SR
1913 /* add barrier to keep gcc from optimizing too much */
1914 barrier();
1915 }
1916 while (rb_commit_index(cpu_buffer) !=
1917 rb_page_write(cpu_buffer->commit_page)) {
77ae365e
SR
1918
1919 local_set(&cpu_buffer->commit_page->page->commit,
1920 rb_page_write(cpu_buffer->commit_page));
1921 RB_WARN_ON(cpu_buffer,
1922 local_read(&cpu_buffer->commit_page->page->commit) &
1923 ~RB_WRITE_MASK);
bf41a158
SR
1924 barrier();
1925 }
a8ccf1d6
SR
1926
1927 /* again, keep gcc from optimizing */
1928 barrier();
1929
1930 /*
1931 * If an interrupt came in just after the first while loop
1932 * and pushed the tail page forward, we will be left with
1933 * a dangling commit that will never go forward.
1934 */
1935 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1936 goto again;
7a8e76a3
SR
1937}
1938
d769041f 1939static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 1940{
abc9b56d 1941 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
6f807acd 1942 cpu_buffer->reader_page->read = 0;
d769041f
SR
1943}
1944
34a148bf 1945static void rb_inc_iter(struct ring_buffer_iter *iter)
d769041f
SR
1946{
1947 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1948
1949 /*
1950 * The iterator could be on the reader page (it starts there).
1951 * But the head could have moved, since the reader was
1952 * found. Check for this case and assign the iterator
1953 * to the head page instead of next.
1954 */
1955 if (iter->head_page == cpu_buffer->reader_page)
77ae365e 1956 iter->head_page = rb_set_head_page(cpu_buffer);
d769041f
SR
1957 else
1958 rb_inc_page(cpu_buffer, &iter->head_page);
1959
abc9b56d 1960 iter->read_stamp = iter->head_page->page->time_stamp;
7a8e76a3
SR
1961 iter->head = 0;
1962}
1963
69d1b839
SR
1964/* Slow path, do not inline */
1965static noinline struct ring_buffer_event *
1966rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1967{
1968 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
1969
1970 /* Not the first event on the page? */
1971 if (rb_event_index(event)) {
1972 event->time_delta = delta & TS_MASK;
1973 event->array[0] = delta >> TS_SHIFT;
1974 } else {
1975 /* nope, just zero it */
1976 event->time_delta = 0;
1977 event->array[0] = 0;
1978 }
1979
1980 return skip_time_extend(event);
1981}
1982
7a8e76a3 1983/**
01e3e710 1984 * rb_update_event - update event type and data
021de3d9 1985 * @event: the event to update
7a8e76a3
SR
1986 * @type: the type of event
1987 * @length: the size of the event field in the ring buffer
1988 *
1989 * Update the type and data fields of the event. The length
1990 * is the actual size that is written to the ring buffer,
1991 * and with this, we can determine what to place into the
1992 * data field.
1993 */
34a148bf 1994static void
69d1b839
SR
1995rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
1996 struct ring_buffer_event *event, unsigned length,
1997 int add_timestamp, u64 delta)
7a8e76a3 1998{
69d1b839
SR
1999 /* Only a commit updates the timestamp */
2000 if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
2001 delta = 0;
7a8e76a3 2002
69d1b839
SR
2003 /*
2004 * If we need to add a timestamp, then we
2005 * add it to the start of the resevered space.
2006 */
2007 if (unlikely(add_timestamp)) {
2008 event = rb_add_time_stamp(event, delta);
2009 length -= RB_LEN_TIME_EXTEND;
2010 delta = 0;
7a8e76a3 2011 }
69d1b839
SR
2012
2013 event->time_delta = delta;
2014 length -= RB_EVNT_HDR_SIZE;
2015 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2016 event->type_len = 0;
2017 event->array[0] = length;
2018 } else
2019 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
7a8e76a3
SR
2020}
2021
77ae365e
SR
2022/*
2023 * rb_handle_head_page - writer hit the head page
2024 *
2025 * Returns: +1 to retry page
2026 * 0 to continue
2027 * -1 on error
2028 */
2029static int
2030rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2031 struct buffer_page *tail_page,
2032 struct buffer_page *next_page)
2033{
2034 struct buffer_page *new_head;
2035 int entries;
2036 int type;
2037 int ret;
2038
2039 entries = rb_page_entries(next_page);
2040
2041 /*
2042 * The hard part is here. We need to move the head
2043 * forward, and protect against both readers on
2044 * other CPUs and writers coming in via interrupts.
2045 */
2046 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2047 RB_PAGE_HEAD);
2048
2049 /*
2050 * type can be one of four:
2051 * NORMAL - an interrupt already moved it for us
2052 * HEAD - we are the first to get here.
2053 * UPDATE - we are the interrupt interrupting
2054 * a current move.
2055 * MOVED - a reader on another CPU moved the next
2056 * pointer to its reader page. Give up
2057 * and try again.
2058 */
2059
2060 switch (type) {
2061 case RB_PAGE_HEAD:
2062 /*
2063 * We changed the head to UPDATE, thus
2064 * it is our responsibility to update
2065 * the counters.
2066 */
2067 local_add(entries, &cpu_buffer->overrun);
c64e148a 2068 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
77ae365e
SR
2069
2070 /*
2071 * The entries will be zeroed out when we move the
2072 * tail page.
2073 */
2074
2075 /* still more to do */
2076 break;
2077
2078 case RB_PAGE_UPDATE:
2079 /*
2080 * This is an interrupt that interrupt the
2081 * previous update. Still more to do.
2082 */
2083 break;
2084 case RB_PAGE_NORMAL:
2085 /*
2086 * An interrupt came in before the update
2087 * and processed this for us.
2088 * Nothing left to do.
2089 */
2090 return 1;
2091 case RB_PAGE_MOVED:
2092 /*
2093 * The reader is on another CPU and just did
2094 * a swap with our next_page.
2095 * Try again.
2096 */
2097 return 1;
2098 default:
2099 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2100 return -1;
2101 }
2102
2103 /*
2104 * Now that we are here, the old head pointer is
2105 * set to UPDATE. This will keep the reader from
2106 * swapping the head page with the reader page.
2107 * The reader (on another CPU) will spin till
2108 * we are finished.
2109 *
2110 * We just need to protect against interrupts
2111 * doing the job. We will set the next pointer
2112 * to HEAD. After that, we set the old pointer
2113 * to NORMAL, but only if it was HEAD before.
2114 * otherwise we are an interrupt, and only
2115 * want the outer most commit to reset it.
2116 */
2117 new_head = next_page;
2118 rb_inc_page(cpu_buffer, &new_head);
2119
2120 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2121 RB_PAGE_NORMAL);
2122
2123 /*
2124 * Valid returns are:
2125 * HEAD - an interrupt came in and already set it.
2126 * NORMAL - One of two things:
2127 * 1) We really set it.
2128 * 2) A bunch of interrupts came in and moved
2129 * the page forward again.
2130 */
2131 switch (ret) {
2132 case RB_PAGE_HEAD:
2133 case RB_PAGE_NORMAL:
2134 /* OK */
2135 break;
2136 default:
2137 RB_WARN_ON(cpu_buffer, 1);
2138 return -1;
2139 }
2140
2141 /*
2142 * It is possible that an interrupt came in,
2143 * set the head up, then more interrupts came in
2144 * and moved it again. When we get back here,
2145 * the page would have been set to NORMAL but we
2146 * just set it back to HEAD.
2147 *
2148 * How do you detect this? Well, if that happened
2149 * the tail page would have moved.
2150 */
2151 if (ret == RB_PAGE_NORMAL) {
2152 /*
2153 * If the tail had moved passed next, then we need
2154 * to reset the pointer.
2155 */
2156 if (cpu_buffer->tail_page != tail_page &&
2157 cpu_buffer->tail_page != next_page)
2158 rb_head_page_set_normal(cpu_buffer, new_head,
2159 next_page,
2160 RB_PAGE_HEAD);
2161 }
2162
2163 /*
2164 * If this was the outer most commit (the one that
2165 * changed the original pointer from HEAD to UPDATE),
2166 * then it is up to us to reset it to NORMAL.
2167 */
2168 if (type == RB_PAGE_HEAD) {
2169 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2170 tail_page,
2171 RB_PAGE_UPDATE);
2172 if (RB_WARN_ON(cpu_buffer,
2173 ret != RB_PAGE_UPDATE))
2174 return -1;
2175 }
2176
2177 return 0;
2178}
2179
34a148bf 2180static unsigned rb_calculate_event_length(unsigned length)
7a8e76a3
SR
2181{
2182 struct ring_buffer_event event; /* Used only for sizeof array */
2183
2184 /* zero length can cause confusions */
2185 if (!length)
2186 length = 1;
2187
2271048d 2188 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
7a8e76a3
SR
2189 length += sizeof(event.array[0]);
2190
2191 length += RB_EVNT_HDR_SIZE;
2271048d 2192 length = ALIGN(length, RB_ARCH_ALIGNMENT);
7a8e76a3
SR
2193
2194 return length;
2195}
2196
c7b09308
SR
2197static inline void
2198rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2199 struct buffer_page *tail_page,
2200 unsigned long tail, unsigned long length)
2201{
2202 struct ring_buffer_event *event;
2203
2204 /*
2205 * Only the event that crossed the page boundary
2206 * must fill the old tail_page with padding.
2207 */
2208 if (tail >= BUF_PAGE_SIZE) {
b3230c8b
SR
2209 /*
2210 * If the page was filled, then we still need
2211 * to update the real_end. Reset it to zero
2212 * and the reader will ignore it.
2213 */
2214 if (tail == BUF_PAGE_SIZE)
2215 tail_page->real_end = 0;
2216
c7b09308
SR
2217 local_sub(length, &tail_page->write);
2218 return;
2219 }
2220
2221 event = __rb_page_index(tail_page, tail);
b0b7065b 2222 kmemcheck_annotate_bitfield(event, bitfield);
c7b09308 2223
c64e148a
VN
2224 /* account for padding bytes */
2225 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2226
ff0ff84a
SR
2227 /*
2228 * Save the original length to the meta data.
2229 * This will be used by the reader to add lost event
2230 * counter.
2231 */
2232 tail_page->real_end = tail;
2233
c7b09308
SR
2234 /*
2235 * If this event is bigger than the minimum size, then
2236 * we need to be careful that we don't subtract the
2237 * write counter enough to allow another writer to slip
2238 * in on this page.
2239 * We put in a discarded commit instead, to make sure
2240 * that this space is not used again.
2241 *
2242 * If we are less than the minimum size, we don't need to
2243 * worry about it.
2244 */
2245 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2246 /* No room for any events */
2247
2248 /* Mark the rest of the page with padding */
2249 rb_event_set_padding(event);
2250
2251 /* Set the write back to the previous setting */
2252 local_sub(length, &tail_page->write);
2253 return;
2254 }
2255
2256 /* Put in a discarded event */
2257 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2258 event->type_len = RINGBUF_TYPE_PADDING;
2259 /* time delta must be non zero */
2260 event->time_delta = 1;
c7b09308
SR
2261
2262 /* Set write to end of buffer */
2263 length = (tail + length) - BUF_PAGE_SIZE;
2264 local_sub(length, &tail_page->write);
2265}
6634ff26 2266
747e94ae
SR
2267/*
2268 * This is the slow path, force gcc not to inline it.
2269 */
2270static noinline struct ring_buffer_event *
6634ff26
SR
2271rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2272 unsigned long length, unsigned long tail,
e8bc43e8 2273 struct buffer_page *tail_page, u64 ts)
7a8e76a3 2274{
5a50e33c 2275 struct buffer_page *commit_page = cpu_buffer->commit_page;
7a8e76a3 2276 struct ring_buffer *buffer = cpu_buffer->buffer;
77ae365e
SR
2277 struct buffer_page *next_page;
2278 int ret;
aa20ae84
SR
2279
2280 next_page = tail_page;
2281
aa20ae84
SR
2282 rb_inc_page(cpu_buffer, &next_page);
2283
aa20ae84
SR
2284 /*
2285 * If for some reason, we had an interrupt storm that made
2286 * it all the way around the buffer, bail, and warn
2287 * about it.
2288 */
2289 if (unlikely(next_page == commit_page)) {
77ae365e 2290 local_inc(&cpu_buffer->commit_overrun);
aa20ae84
SR
2291 goto out_reset;
2292 }
2293
77ae365e
SR
2294 /*
2295 * This is where the fun begins!
2296 *
2297 * We are fighting against races between a reader that
2298 * could be on another CPU trying to swap its reader
2299 * page with the buffer head.
2300 *
2301 * We are also fighting against interrupts coming in and
2302 * moving the head or tail on us as well.
2303 *
2304 * If the next page is the head page then we have filled
2305 * the buffer, unless the commit page is still on the
2306 * reader page.
2307 */
2308 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
aa20ae84 2309
77ae365e
SR
2310 /*
2311 * If the commit is not on the reader page, then
2312 * move the header page.
2313 */
2314 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2315 /*
2316 * If we are not in overwrite mode,
2317 * this is easy, just stop here.
2318 */
884bfe89
SP
2319 if (!(buffer->flags & RB_FL_OVERWRITE)) {
2320 local_inc(&cpu_buffer->dropped_events);
77ae365e 2321 goto out_reset;
884bfe89 2322 }
77ae365e
SR
2323
2324 ret = rb_handle_head_page(cpu_buffer,
2325 tail_page,
2326 next_page);
2327 if (ret < 0)
2328 goto out_reset;
2329 if (ret)
2330 goto out_again;
2331 } else {
2332 /*
2333 * We need to be careful here too. The
2334 * commit page could still be on the reader
2335 * page. We could have a small buffer, and
2336 * have filled up the buffer with events
2337 * from interrupts and such, and wrapped.
2338 *
2339 * Note, if the tail page is also the on the
2340 * reader_page, we let it move out.
2341 */
2342 if (unlikely((cpu_buffer->commit_page !=
2343 cpu_buffer->tail_page) &&
2344 (cpu_buffer->commit_page ==
2345 cpu_buffer->reader_page))) {
2346 local_inc(&cpu_buffer->commit_overrun);
2347 goto out_reset;
2348 }
aa20ae84
SR
2349 }
2350 }
2351
77ae365e
SR
2352 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
2353 if (ret) {
2354 /*
2355 * Nested commits always have zero deltas, so
2356 * just reread the time stamp
2357 */
e8bc43e8
SR
2358 ts = rb_time_stamp(buffer);
2359 next_page->page->time_stamp = ts;
aa20ae84
SR
2360 }
2361
77ae365e 2362 out_again:
aa20ae84 2363
77ae365e 2364 rb_reset_tail(cpu_buffer, tail_page, tail, length);
aa20ae84
SR
2365
2366 /* fail and let the caller try again */
2367 return ERR_PTR(-EAGAIN);
2368
45141d46 2369 out_reset:
6f3b3440 2370 /* reset write */
c7b09308 2371 rb_reset_tail(cpu_buffer, tail_page, tail, length);
6f3b3440 2372
bf41a158 2373 return NULL;
7a8e76a3
SR
2374}
2375
6634ff26
SR
2376static struct ring_buffer_event *
2377__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
69d1b839
SR
2378 unsigned long length, u64 ts,
2379 u64 delta, int add_timestamp)
6634ff26 2380{
5a50e33c 2381 struct buffer_page *tail_page;
6634ff26
SR
2382 struct ring_buffer_event *event;
2383 unsigned long tail, write;
2384
69d1b839
SR
2385 /*
2386 * If the time delta since the last event is too big to
2387 * hold in the time field of the event, then we append a
2388 * TIME EXTEND event ahead of the data event.
2389 */
2390 if (unlikely(add_timestamp))
2391 length += RB_LEN_TIME_EXTEND;
2392
6634ff26
SR
2393 tail_page = cpu_buffer->tail_page;
2394 write = local_add_return(length, &tail_page->write);
77ae365e
SR
2395
2396 /* set write to only the index of the write */
2397 write &= RB_WRITE_MASK;
6634ff26
SR
2398 tail = write - length;
2399
d651aa1d
SRRH
2400 /*
2401 * If this is the first commit on the page, then it has the same
2402 * timestamp as the page itself.
2403 */
2404 if (!tail)
2405 delta = 0;
2406
6634ff26 2407 /* See if we shot pass the end of this buffer page */
747e94ae 2408 if (unlikely(write > BUF_PAGE_SIZE))
6634ff26 2409 return rb_move_tail(cpu_buffer, length, tail,
5a50e33c 2410 tail_page, ts);
6634ff26
SR
2411
2412 /* We reserved something on the buffer */
2413
6634ff26 2414 event = __rb_page_index(tail_page, tail);
1744a21d 2415 kmemcheck_annotate_bitfield(event, bitfield);
69d1b839 2416 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
6634ff26 2417
69d1b839 2418 local_inc(&tail_page->entries);
6634ff26
SR
2419
2420 /*
fa743953
SR
2421 * If this is the first commit on the page, then update
2422 * its timestamp.
6634ff26 2423 */
fa743953 2424 if (!tail)
e8bc43e8 2425 tail_page->page->time_stamp = ts;
6634ff26 2426
c64e148a
VN
2427 /* account for these added bytes */
2428 local_add(length, &cpu_buffer->entries_bytes);
2429
6634ff26
SR
2430 return event;
2431}
2432
edd813bf
SR
2433static inline int
2434rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2435 struct ring_buffer_event *event)
2436{
2437 unsigned long new_index, old_index;
2438 struct buffer_page *bpage;
2439 unsigned long index;
2440 unsigned long addr;
2441
2442 new_index = rb_event_index(event);
69d1b839 2443 old_index = new_index + rb_event_ts_length(event);
edd813bf
SR
2444 addr = (unsigned long)event;
2445 addr &= PAGE_MASK;
2446
2447 bpage = cpu_buffer->tail_page;
2448
2449 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
77ae365e
SR
2450 unsigned long write_mask =
2451 local_read(&bpage->write) & ~RB_WRITE_MASK;
c64e148a 2452 unsigned long event_length = rb_event_length(event);
edd813bf
SR
2453 /*
2454 * This is on the tail page. It is possible that
2455 * a write could come in and move the tail page
2456 * and write to the next page. That is fine
2457 * because we just shorten what is on this page.
2458 */
77ae365e
SR
2459 old_index += write_mask;
2460 new_index += write_mask;
edd813bf 2461 index = local_cmpxchg(&bpage->write, old_index, new_index);
c64e148a
VN
2462 if (index == old_index) {
2463 /* update counters */
2464 local_sub(event_length, &cpu_buffer->entries_bytes);
edd813bf 2465 return 1;
c64e148a 2466 }
edd813bf
SR
2467 }
2468
2469 /* could not discard */
2470 return 0;
2471}
2472
fa743953
SR
2473static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2474{
2475 local_inc(&cpu_buffer->committing);
2476 local_inc(&cpu_buffer->commits);
2477}
2478
d9abde21 2479static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
fa743953
SR
2480{
2481 unsigned long commits;
2482
2483 if (RB_WARN_ON(cpu_buffer,
2484 !local_read(&cpu_buffer->committing)))
2485 return;
2486
2487 again:
2488 commits = local_read(&cpu_buffer->commits);
2489 /* synchronize with interrupts */
2490 barrier();
2491 if (local_read(&cpu_buffer->committing) == 1)
2492 rb_set_commit_to_write(cpu_buffer);
2493
2494 local_dec(&cpu_buffer->committing);
2495
2496 /* synchronize with interrupts */
2497 barrier();
2498
2499 /*
2500 * Need to account for interrupts coming in between the
2501 * updating of the commit page and the clearing of the
2502 * committing counter.
2503 */
2504 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2505 !local_read(&cpu_buffer->committing)) {
2506 local_inc(&cpu_buffer->committing);
2507 goto again;
2508 }
2509}
2510
7a8e76a3 2511static struct ring_buffer_event *
62f0b3eb
SR
2512rb_reserve_next_event(struct ring_buffer *buffer,
2513 struct ring_buffer_per_cpu *cpu_buffer,
1cd8d735 2514 unsigned long length)
7a8e76a3
SR
2515{
2516 struct ring_buffer_event *event;
69d1b839 2517 u64 ts, delta;
818e3dd3 2518 int nr_loops = 0;
69d1b839 2519 int add_timestamp;
140ff891 2520 u64 diff;
7a8e76a3 2521
fa743953
SR
2522 rb_start_commit(cpu_buffer);
2523
85bac32c 2524#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
62f0b3eb
SR
2525 /*
2526 * Due to the ability to swap a cpu buffer from a buffer
2527 * it is possible it was swapped before we committed.
2528 * (committing stops a swap). We check for it here and
2529 * if it happened, we have to fail the write.
2530 */
2531 barrier();
2532 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2533 local_dec(&cpu_buffer->committing);
2534 local_dec(&cpu_buffer->commits);
2535 return NULL;
2536 }
85bac32c 2537#endif
62f0b3eb 2538
be957c44 2539 length = rb_calculate_event_length(length);
bf41a158 2540 again:
69d1b839
SR
2541 add_timestamp = 0;
2542 delta = 0;
2543
818e3dd3
SR
2544 /*
2545 * We allow for interrupts to reenter here and do a trace.
2546 * If one does, it will cause this original code to loop
2547 * back here. Even with heavy interrupts happening, this
2548 * should only happen a few times in a row. If this happens
2549 * 1000 times in a row, there must be either an interrupt
2550 * storm or we have something buggy.
2551 * Bail!
2552 */
3e89c7bb 2553 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
fa743953 2554 goto out_fail;
818e3dd3 2555
6d3f1e12 2556 ts = rb_time_stamp(cpu_buffer->buffer);
140ff891 2557 diff = ts - cpu_buffer->write_stamp;
7a8e76a3 2558
140ff891
SR
2559 /* make sure this diff is calculated here */
2560 barrier();
bf41a158 2561
140ff891
SR
2562 /* Did the write stamp get updated already? */
2563 if (likely(ts >= cpu_buffer->write_stamp)) {
168b6b1d
SR
2564 delta = diff;
2565 if (unlikely(test_time_stamp(delta))) {
31274d72
JO
2566 int local_clock_stable = 1;
2567#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
35af99e6 2568 local_clock_stable = sched_clock_stable();
31274d72 2569#endif
69d1b839 2570 WARN_ONCE(delta > (1ULL << 59),
31274d72 2571 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
69d1b839
SR
2572 (unsigned long long)delta,
2573 (unsigned long long)ts,
31274d72
JO
2574 (unsigned long long)cpu_buffer->write_stamp,
2575 local_clock_stable ? "" :
2576 "If you just came from a suspend/resume,\n"
2577 "please switch to the trace global clock:\n"
2578 " echo global > /sys/kernel/debug/tracing/trace_clock\n");
69d1b839 2579 add_timestamp = 1;
7a8e76a3 2580 }
168b6b1d 2581 }
7a8e76a3 2582
69d1b839
SR
2583 event = __rb_reserve_next(cpu_buffer, length, ts,
2584 delta, add_timestamp);
168b6b1d 2585 if (unlikely(PTR_ERR(event) == -EAGAIN))
bf41a158
SR
2586 goto again;
2587
fa743953
SR
2588 if (!event)
2589 goto out_fail;
7a8e76a3 2590
7a8e76a3 2591 return event;
fa743953
SR
2592
2593 out_fail:
2594 rb_end_commit(cpu_buffer);
2595 return NULL;
7a8e76a3
SR
2596}
2597
1155de47
PM
2598#ifdef CONFIG_TRACING
2599
567cd4da
SR
2600/*
2601 * The lock and unlock are done within a preempt disable section.
2602 * The current_context per_cpu variable can only be modified
2603 * by the current task between lock and unlock. But it can
2604 * be modified more than once via an interrupt. To pass this
2605 * information from the lock to the unlock without having to
2606 * access the 'in_interrupt()' functions again (which do show
2607 * a bit of overhead in something as critical as function tracing,
2608 * we use a bitmask trick.
2609 *
2610 * bit 0 = NMI context
2611 * bit 1 = IRQ context
2612 * bit 2 = SoftIRQ context
2613 * bit 3 = normal context.
2614 *
2615 * This works because this is the order of contexts that can
2616 * preempt other contexts. A SoftIRQ never preempts an IRQ
2617 * context.
2618 *
2619 * When the context is determined, the corresponding bit is
2620 * checked and set (if it was set, then a recursion of that context
2621 * happened).
2622 *
2623 * On unlock, we need to clear this bit. To do so, just subtract
2624 * 1 from the current_context and AND it to itself.
2625 *
2626 * (binary)
2627 * 101 - 1 = 100
2628 * 101 & 100 = 100 (clearing bit zero)
2629 *
2630 * 1010 - 1 = 1001
2631 * 1010 & 1001 = 1000 (clearing bit 1)
2632 *
2633 * The least significant bit can be cleared this way, and it
2634 * just so happens that it is the same bit corresponding to
2635 * the current context.
2636 */
2637static DEFINE_PER_CPU(unsigned int, current_context);
261842b7 2638
567cd4da 2639static __always_inline int trace_recursive_lock(void)
261842b7 2640{
567cd4da
SR
2641 unsigned int val = this_cpu_read(current_context);
2642 int bit;
d9abde21 2643
567cd4da
SR
2644 if (in_interrupt()) {
2645 if (in_nmi())
2646 bit = 0;
2647 else if (in_irq())
2648 bit = 1;
2649 else
2650 bit = 2;
2651 } else
2652 bit = 3;
d9abde21 2653
567cd4da
SR
2654 if (unlikely(val & (1 << bit)))
2655 return 1;
d9abde21 2656
567cd4da
SR
2657 val |= (1 << bit);
2658 this_cpu_write(current_context, val);
d9abde21 2659
567cd4da 2660 return 0;
261842b7
SR
2661}
2662
567cd4da 2663static __always_inline void trace_recursive_unlock(void)
261842b7 2664{
567cd4da 2665 unsigned int val = this_cpu_read(current_context);
261842b7 2666
567cd4da
SR
2667 val--;
2668 val &= this_cpu_read(current_context);
2669 this_cpu_write(current_context, val);
261842b7
SR
2670}
2671
1155de47
PM
2672#else
2673
2674#define trace_recursive_lock() (0)
2675#define trace_recursive_unlock() do { } while (0)
2676
2677#endif
2678
7a8e76a3
SR
2679/**
2680 * ring_buffer_lock_reserve - reserve a part of the buffer
2681 * @buffer: the ring buffer to reserve from
2682 * @length: the length of the data to reserve (excluding event header)
7a8e76a3
SR
2683 *
2684 * Returns a reseverd event on the ring buffer to copy directly to.
2685 * The user of this interface will need to get the body to write into
2686 * and can use the ring_buffer_event_data() interface.
2687 *
2688 * The length is the length of the data needed, not the event length
2689 * which also includes the event header.
2690 *
2691 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2692 * If NULL is returned, then nothing has been allocated or locked.
2693 */
2694struct ring_buffer_event *
0a987751 2695ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
7a8e76a3
SR
2696{
2697 struct ring_buffer_per_cpu *cpu_buffer;
2698 struct ring_buffer_event *event;
5168ae50 2699 int cpu;
7a8e76a3 2700
033601a3 2701 if (ring_buffer_flags != RB_BUFFERS_ON)
a3583244
SR
2702 return NULL;
2703
bf41a158 2704 /* If we are tracing schedule, we don't want to recurse */
5168ae50 2705 preempt_disable_notrace();
bf41a158 2706
52fbe9cd
LJ
2707 if (atomic_read(&buffer->record_disabled))
2708 goto out_nocheck;
2709
261842b7
SR
2710 if (trace_recursive_lock())
2711 goto out_nocheck;
2712
7a8e76a3
SR
2713 cpu = raw_smp_processor_id();
2714
9e01c1b7 2715 if (!cpumask_test_cpu(cpu, buffer->cpumask))
d769041f 2716 goto out;
7a8e76a3
SR
2717
2718 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
2719
2720 if (atomic_read(&cpu_buffer->record_disabled))
d769041f 2721 goto out;
7a8e76a3 2722
be957c44 2723 if (length > BUF_MAX_DATA_SIZE)
bf41a158 2724 goto out;
7a8e76a3 2725
62f0b3eb 2726 event = rb_reserve_next_event(buffer, cpu_buffer, length);
7a8e76a3 2727 if (!event)
d769041f 2728 goto out;
7a8e76a3
SR
2729
2730 return event;
2731
d769041f 2732 out:
261842b7
SR
2733 trace_recursive_unlock();
2734
2735 out_nocheck:
5168ae50 2736 preempt_enable_notrace();
7a8e76a3
SR
2737 return NULL;
2738}
c4f50183 2739EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
7a8e76a3 2740
a1863c21
SR
2741static void
2742rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
7a8e76a3
SR
2743 struct ring_buffer_event *event)
2744{
69d1b839
SR
2745 u64 delta;
2746
fa743953
SR
2747 /*
2748 * The event first in the commit queue updates the
2749 * time stamp.
2750 */
69d1b839
SR
2751 if (rb_event_is_commit(cpu_buffer, event)) {
2752 /*
2753 * A commit event that is first on a page
2754 * updates the write timestamp with the page stamp
2755 */
2756 if (!rb_event_index(event))
2757 cpu_buffer->write_stamp =
2758 cpu_buffer->commit_page->page->time_stamp;
2759 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2760 delta = event->array[0];
2761 delta <<= TS_SHIFT;
2762 delta += event->time_delta;
2763 cpu_buffer->write_stamp += delta;
2764 } else
2765 cpu_buffer->write_stamp += event->time_delta;
2766 }
a1863c21 2767}
bf41a158 2768
a1863c21
SR
2769static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2770 struct ring_buffer_event *event)
2771{
2772 local_inc(&cpu_buffer->entries);
2773 rb_update_write_stamp(cpu_buffer, event);
fa743953 2774 rb_end_commit(cpu_buffer);
7a8e76a3
SR
2775}
2776
15693458
SRRH
2777static __always_inline void
2778rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2779{
2780 if (buffer->irq_work.waiters_pending) {
2781 buffer->irq_work.waiters_pending = false;
2782 /* irq_work_queue() supplies it's own memory barriers */
2783 irq_work_queue(&buffer->irq_work.work);
2784 }
2785
2786 if (cpu_buffer->irq_work.waiters_pending) {
2787 cpu_buffer->irq_work.waiters_pending = false;
2788 /* irq_work_queue() supplies it's own memory barriers */
2789 irq_work_queue(&cpu_buffer->irq_work.work);
2790 }
2791}
2792
7a8e76a3
SR
2793/**
2794 * ring_buffer_unlock_commit - commit a reserved
2795 * @buffer: The buffer to commit to
2796 * @event: The event pointer to commit.
7a8e76a3
SR
2797 *
2798 * This commits the data to the ring buffer, and releases any locks held.
2799 *
2800 * Must be paired with ring_buffer_lock_reserve.
2801 */
2802int ring_buffer_unlock_commit(struct ring_buffer *buffer,
0a987751 2803 struct ring_buffer_event *event)
7a8e76a3
SR
2804{
2805 struct ring_buffer_per_cpu *cpu_buffer;
2806 int cpu = raw_smp_processor_id();
2807
2808 cpu_buffer = buffer->buffers[cpu];
2809
7a8e76a3
SR
2810 rb_commit(cpu_buffer, event);
2811
15693458
SRRH
2812 rb_wakeups(buffer, cpu_buffer);
2813
261842b7
SR
2814 trace_recursive_unlock();
2815
5168ae50 2816 preempt_enable_notrace();
7a8e76a3
SR
2817
2818 return 0;
2819}
c4f50183 2820EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
7a8e76a3 2821
f3b9aae1
FW
2822static inline void rb_event_discard(struct ring_buffer_event *event)
2823{
69d1b839
SR
2824 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2825 event = skip_time_extend(event);
2826
334d4169
LJ
2827 /* array[0] holds the actual length for the discarded event */
2828 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2829 event->type_len = RINGBUF_TYPE_PADDING;
f3b9aae1
FW
2830 /* time delta must be non zero */
2831 if (!event->time_delta)
2832 event->time_delta = 1;
2833}
2834
a1863c21
SR
2835/*
2836 * Decrement the entries to the page that an event is on.
2837 * The event does not even need to exist, only the pointer
2838 * to the page it is on. This may only be called before the commit
2839 * takes place.
2840 */
2841static inline void
2842rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2843 struct ring_buffer_event *event)
2844{
2845 unsigned long addr = (unsigned long)event;
2846 struct buffer_page *bpage = cpu_buffer->commit_page;
2847 struct buffer_page *start;
2848
2849 addr &= PAGE_MASK;
2850
2851 /* Do the likely case first */
2852 if (likely(bpage->page == (void *)addr)) {
2853 local_dec(&bpage->entries);
2854 return;
2855 }
2856
2857 /*
2858 * Because the commit page may be on the reader page we
2859 * start with the next page and check the end loop there.
2860 */
2861 rb_inc_page(cpu_buffer, &bpage);
2862 start = bpage;
2863 do {
2864 if (bpage->page == (void *)addr) {
2865 local_dec(&bpage->entries);
2866 return;
2867 }
2868 rb_inc_page(cpu_buffer, &bpage);
2869 } while (bpage != start);
2870
2871 /* commit not part of this buffer?? */
2872 RB_WARN_ON(cpu_buffer, 1);
2873}
2874
fa1b47dd
SR
2875/**
2876 * ring_buffer_commit_discard - discard an event that has not been committed
2877 * @buffer: the ring buffer
2878 * @event: non committed event to discard
2879 *
dc892f73
SR
2880 * Sometimes an event that is in the ring buffer needs to be ignored.
2881 * This function lets the user discard an event in the ring buffer
2882 * and then that event will not be read later.
2883 *
2884 * This function only works if it is called before the the item has been
2885 * committed. It will try to free the event from the ring buffer
fa1b47dd
SR
2886 * if another event has not been added behind it.
2887 *
2888 * If another event has been added behind it, it will set the event
2889 * up as discarded, and perform the commit.
2890 *
2891 * If this function is called, do not call ring_buffer_unlock_commit on
2892 * the event.
2893 */
2894void ring_buffer_discard_commit(struct ring_buffer *buffer,
2895 struct ring_buffer_event *event)
2896{
2897 struct ring_buffer_per_cpu *cpu_buffer;
fa1b47dd
SR
2898 int cpu;
2899
2900 /* The event is discarded regardless */
f3b9aae1 2901 rb_event_discard(event);
fa1b47dd 2902
fa743953
SR
2903 cpu = smp_processor_id();
2904 cpu_buffer = buffer->buffers[cpu];
2905
fa1b47dd
SR
2906 /*
2907 * This must only be called if the event has not been
2908 * committed yet. Thus we can assume that preemption
2909 * is still disabled.
2910 */
fa743953 2911 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
fa1b47dd 2912
a1863c21 2913 rb_decrement_entry(cpu_buffer, event);
0f2541d2 2914 if (rb_try_to_discard(cpu_buffer, event))
edd813bf 2915 goto out;
fa1b47dd
SR
2916
2917 /*
2918 * The commit is still visible by the reader, so we
a1863c21 2919 * must still update the timestamp.
fa1b47dd 2920 */
a1863c21 2921 rb_update_write_stamp(cpu_buffer, event);
fa1b47dd 2922 out:
fa743953 2923 rb_end_commit(cpu_buffer);
fa1b47dd 2924
f3b9aae1
FW
2925 trace_recursive_unlock();
2926
5168ae50 2927 preempt_enable_notrace();
fa1b47dd
SR
2928
2929}
2930EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2931
7a8e76a3
SR
2932/**
2933 * ring_buffer_write - write data to the buffer without reserving
2934 * @buffer: The ring buffer to write to.
2935 * @length: The length of the data being written (excluding the event header)
2936 * @data: The data to write to the buffer.
2937 *
2938 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2939 * one function. If you already have the data to write to the buffer, it
2940 * may be easier to simply call this function.
2941 *
2942 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2943 * and not the length of the event which would hold the header.
2944 */
2945int ring_buffer_write(struct ring_buffer *buffer,
01e3e710
DS
2946 unsigned long length,
2947 void *data)
7a8e76a3
SR
2948{
2949 struct ring_buffer_per_cpu *cpu_buffer;
2950 struct ring_buffer_event *event;
7a8e76a3
SR
2951 void *body;
2952 int ret = -EBUSY;
5168ae50 2953 int cpu;
7a8e76a3 2954
033601a3 2955 if (ring_buffer_flags != RB_BUFFERS_ON)
a3583244
SR
2956 return -EBUSY;
2957
5168ae50 2958 preempt_disable_notrace();
bf41a158 2959
52fbe9cd
LJ
2960 if (atomic_read(&buffer->record_disabled))
2961 goto out;
2962
7a8e76a3
SR
2963 cpu = raw_smp_processor_id();
2964
9e01c1b7 2965 if (!cpumask_test_cpu(cpu, buffer->cpumask))
d769041f 2966 goto out;
7a8e76a3
SR
2967
2968 cpu_buffer = buffer->buffers[cpu];
7a8e76a3
SR
2969
2970 if (atomic_read(&cpu_buffer->record_disabled))
2971 goto out;
2972
be957c44
SR
2973 if (length > BUF_MAX_DATA_SIZE)
2974 goto out;
2975
62f0b3eb 2976 event = rb_reserve_next_event(buffer, cpu_buffer, length);
7a8e76a3
SR
2977 if (!event)
2978 goto out;
2979
2980 body = rb_event_data(event);
2981
2982 memcpy(body, data, length);
2983
2984 rb_commit(cpu_buffer, event);
2985
15693458
SRRH
2986 rb_wakeups(buffer, cpu_buffer);
2987
7a8e76a3
SR
2988 ret = 0;
2989 out:
5168ae50 2990 preempt_enable_notrace();
7a8e76a3
SR
2991
2992 return ret;
2993}
c4f50183 2994EXPORT_SYMBOL_GPL(ring_buffer_write);
7a8e76a3 2995
34a148bf 2996static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
bf41a158
SR
2997{
2998 struct buffer_page *reader = cpu_buffer->reader_page;
77ae365e 2999 struct buffer_page *head = rb_set_head_page(cpu_buffer);
bf41a158
SR
3000 struct buffer_page *commit = cpu_buffer->commit_page;
3001
77ae365e
SR
3002 /* In case of error, head will be NULL */
3003 if (unlikely(!head))
3004 return 1;
3005
bf41a158
SR
3006 return reader->read == rb_page_commit(reader) &&
3007 (commit == reader ||
3008 (commit == head &&
3009 head->read == rb_page_commit(commit)));
3010}
3011
7a8e76a3
SR
3012/**
3013 * ring_buffer_record_disable - stop all writes into the buffer
3014 * @buffer: The ring buffer to stop writes to.
3015 *
3016 * This prevents all writes to the buffer. Any attempt to write
3017 * to the buffer after this will fail and return NULL.
3018 *
3019 * The caller should call synchronize_sched() after this.
3020 */
3021void ring_buffer_record_disable(struct ring_buffer *buffer)
3022{
3023 atomic_inc(&buffer->record_disabled);
3024}
c4f50183 3025EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
7a8e76a3
SR
3026
3027/**
3028 * ring_buffer_record_enable - enable writes to the buffer
3029 * @buffer: The ring buffer to enable writes
3030 *
3031 * Note, multiple disables will need the same number of enables
c41b20e7 3032 * to truly enable the writing (much like preempt_disable).
7a8e76a3
SR
3033 */
3034void ring_buffer_record_enable(struct ring_buffer *buffer)
3035{
3036 atomic_dec(&buffer->record_disabled);
3037}
c4f50183 3038EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
7a8e76a3 3039
499e5470
SR
3040/**
3041 * ring_buffer_record_off - stop all writes into the buffer
3042 * @buffer: The ring buffer to stop writes to.
3043 *
3044 * This prevents all writes to the buffer. Any attempt to write
3045 * to the buffer after this will fail and return NULL.
3046 *
3047 * This is different than ring_buffer_record_disable() as
87abb3b1 3048 * it works like an on/off switch, where as the disable() version
499e5470
SR
3049 * must be paired with a enable().
3050 */
3051void ring_buffer_record_off(struct ring_buffer *buffer)
3052{
3053 unsigned int rd;
3054 unsigned int new_rd;
3055
3056 do {
3057 rd = atomic_read(&buffer->record_disabled);
3058 new_rd = rd | RB_BUFFER_OFF;
3059 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3060}
3061EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3062
3063/**
3064 * ring_buffer_record_on - restart writes into the buffer
3065 * @buffer: The ring buffer to start writes to.
3066 *
3067 * This enables all writes to the buffer that was disabled by
3068 * ring_buffer_record_off().
3069 *
3070 * This is different than ring_buffer_record_enable() as
87abb3b1 3071 * it works like an on/off switch, where as the enable() version
499e5470
SR
3072 * must be paired with a disable().
3073 */
3074void ring_buffer_record_on(struct ring_buffer *buffer)
3075{
3076 unsigned int rd;
3077 unsigned int new_rd;
3078
3079 do {
3080 rd = atomic_read(&buffer->record_disabled);
3081 new_rd = rd & ~RB_BUFFER_OFF;
3082 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3083}
3084EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3085
3086/**
3087 * ring_buffer_record_is_on - return true if the ring buffer can write
3088 * @buffer: The ring buffer to see if write is enabled
3089 *
3090 * Returns true if the ring buffer is in a state that it accepts writes.
3091 */
3092int ring_buffer_record_is_on(struct ring_buffer *buffer)
3093{
3094 return !atomic_read(&buffer->record_disabled);
3095}
3096
7a8e76a3
SR
3097/**
3098 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3099 * @buffer: The ring buffer to stop writes to.
3100 * @cpu: The CPU buffer to stop
3101 *
3102 * This prevents all writes to the buffer. Any attempt to write
3103 * to the buffer after this will fail and return NULL.
3104 *
3105 * The caller should call synchronize_sched() after this.
3106 */
3107void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
3108{
3109 struct ring_buffer_per_cpu *cpu_buffer;
3110
9e01c1b7 3111 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3112 return;
7a8e76a3
SR
3113
3114 cpu_buffer = buffer->buffers[cpu];
3115 atomic_inc(&cpu_buffer->record_disabled);
3116}
c4f50183 3117EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
7a8e76a3
SR
3118
3119/**
3120 * ring_buffer_record_enable_cpu - enable writes to the buffer
3121 * @buffer: The ring buffer to enable writes
3122 * @cpu: The CPU to enable.
3123 *
3124 * Note, multiple disables will need the same number of enables
c41b20e7 3125 * to truly enable the writing (much like preempt_disable).
7a8e76a3
SR
3126 */
3127void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
3128{
3129 struct ring_buffer_per_cpu *cpu_buffer;
3130
9e01c1b7 3131 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3132 return;
7a8e76a3
SR
3133
3134 cpu_buffer = buffer->buffers[cpu];
3135 atomic_dec(&cpu_buffer->record_disabled);
3136}
c4f50183 3137EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
7a8e76a3 3138
f6195aa0
SR
3139/*
3140 * The total entries in the ring buffer is the running counter
3141 * of entries entered into the ring buffer, minus the sum of
3142 * the entries read from the ring buffer and the number of
3143 * entries that were overwritten.
3144 */
3145static inline unsigned long
3146rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3147{
3148 return local_read(&cpu_buffer->entries) -
3149 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3150}
3151
c64e148a
VN
3152/**
3153 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3154 * @buffer: The ring buffer
3155 * @cpu: The per CPU buffer to read from.
3156 */
50ecf2c3 3157u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
c64e148a
VN
3158{
3159 unsigned long flags;
3160 struct ring_buffer_per_cpu *cpu_buffer;
3161 struct buffer_page *bpage;
da830e58 3162 u64 ret = 0;
c64e148a
VN
3163
3164 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3165 return 0;
3166
3167 cpu_buffer = buffer->buffers[cpu];
7115e3fc 3168 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
c64e148a
VN
3169 /*
3170 * if the tail is on reader_page, oldest time stamp is on the reader
3171 * page
3172 */
3173 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3174 bpage = cpu_buffer->reader_page;
3175 else
3176 bpage = rb_set_head_page(cpu_buffer);
54f7be5b
SR
3177 if (bpage)
3178 ret = bpage->page->time_stamp;
7115e3fc 3179 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
c64e148a
VN
3180
3181 return ret;
3182}
3183EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3184
3185/**
3186 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3187 * @buffer: The ring buffer
3188 * @cpu: The per CPU buffer to read from.
3189 */
3190unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3191{
3192 struct ring_buffer_per_cpu *cpu_buffer;
3193 unsigned long ret;
3194
3195 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3196 return 0;
3197
3198 cpu_buffer = buffer->buffers[cpu];
3199 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3200
3201 return ret;
3202}
3203EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3204
7a8e76a3
SR
3205/**
3206 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3207 * @buffer: The ring buffer
3208 * @cpu: The per CPU buffer to get the entries from.
3209 */
3210unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3211{
3212 struct ring_buffer_per_cpu *cpu_buffer;
3213
9e01c1b7 3214 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3215 return 0;
7a8e76a3
SR
3216
3217 cpu_buffer = buffer->buffers[cpu];
554f786e 3218
f6195aa0 3219 return rb_num_of_entries(cpu_buffer);
7a8e76a3 3220}
c4f50183 3221EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
7a8e76a3
SR
3222
3223/**
884bfe89
SP
3224 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3225 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
7a8e76a3
SR
3226 * @buffer: The ring buffer
3227 * @cpu: The per CPU buffer to get the number of overruns from
3228 */
3229unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3230{
3231 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 3232 unsigned long ret;
7a8e76a3 3233
9e01c1b7 3234 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3235 return 0;
7a8e76a3
SR
3236
3237 cpu_buffer = buffer->buffers[cpu];
77ae365e 3238 ret = local_read(&cpu_buffer->overrun);
554f786e
SR
3239
3240 return ret;
7a8e76a3 3241}
c4f50183 3242EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
7a8e76a3 3243
f0d2c681 3244/**
884bfe89
SP
3245 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3246 * commits failing due to the buffer wrapping around while there are uncommitted
3247 * events, such as during an interrupt storm.
f0d2c681
SR
3248 * @buffer: The ring buffer
3249 * @cpu: The per CPU buffer to get the number of overruns from
3250 */
3251unsigned long
3252ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3253{
3254 struct ring_buffer_per_cpu *cpu_buffer;
3255 unsigned long ret;
3256
3257 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3258 return 0;
3259
3260 cpu_buffer = buffer->buffers[cpu];
77ae365e 3261 ret = local_read(&cpu_buffer->commit_overrun);
f0d2c681
SR
3262
3263 return ret;
3264}
3265EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3266
884bfe89
SP
3267/**
3268 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3269 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3270 * @buffer: The ring buffer
3271 * @cpu: The per CPU buffer to get the number of overruns from
3272 */
3273unsigned long
3274ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3275{
3276 struct ring_buffer_per_cpu *cpu_buffer;
3277 unsigned long ret;
3278
3279 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3280 return 0;
3281
3282 cpu_buffer = buffer->buffers[cpu];
3283 ret = local_read(&cpu_buffer->dropped_events);
3284
3285 return ret;
3286}
3287EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3288
ad964704
SRRH
3289/**
3290 * ring_buffer_read_events_cpu - get the number of events successfully read
3291 * @buffer: The ring buffer
3292 * @cpu: The per CPU buffer to get the number of events read
3293 */
3294unsigned long
3295ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3296{
3297 struct ring_buffer_per_cpu *cpu_buffer;
3298
3299 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3300 return 0;
3301
3302 cpu_buffer = buffer->buffers[cpu];
3303 return cpu_buffer->read;
3304}
3305EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3306
7a8e76a3
SR
3307/**
3308 * ring_buffer_entries - get the number of entries in a buffer
3309 * @buffer: The ring buffer
3310 *
3311 * Returns the total number of entries in the ring buffer
3312 * (all CPU entries)
3313 */
3314unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3315{
3316 struct ring_buffer_per_cpu *cpu_buffer;
3317 unsigned long entries = 0;
3318 int cpu;
3319
3320 /* if you care about this being correct, lock the buffer */
3321 for_each_buffer_cpu(buffer, cpu) {
3322 cpu_buffer = buffer->buffers[cpu];
f6195aa0 3323 entries += rb_num_of_entries(cpu_buffer);
7a8e76a3
SR
3324 }
3325
3326 return entries;
3327}
c4f50183 3328EXPORT_SYMBOL_GPL(ring_buffer_entries);
7a8e76a3
SR
3329
3330/**
67b394f7 3331 * ring_buffer_overruns - get the number of overruns in buffer
7a8e76a3
SR
3332 * @buffer: The ring buffer
3333 *
3334 * Returns the total number of overruns in the ring buffer
3335 * (all CPU entries)
3336 */
3337unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3338{
3339 struct ring_buffer_per_cpu *cpu_buffer;
3340 unsigned long overruns = 0;
3341 int cpu;
3342
3343 /* if you care about this being correct, lock the buffer */
3344 for_each_buffer_cpu(buffer, cpu) {
3345 cpu_buffer = buffer->buffers[cpu];
77ae365e 3346 overruns += local_read(&cpu_buffer->overrun);
7a8e76a3
SR
3347 }
3348
3349 return overruns;
3350}
c4f50183 3351EXPORT_SYMBOL_GPL(ring_buffer_overruns);
7a8e76a3 3352
642edba5 3353static void rb_iter_reset(struct ring_buffer_iter *iter)
7a8e76a3
SR
3354{
3355 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3356
d769041f 3357 /* Iterator usage is expected to have record disabled */
651e22f2
SRRH
3358 iter->head_page = cpu_buffer->reader_page;
3359 iter->head = cpu_buffer->reader_page->read;
3360
3361 iter->cache_reader_page = iter->head_page;
3362 iter->cache_read = iter->head;
3363
d769041f
SR
3364 if (iter->head)
3365 iter->read_stamp = cpu_buffer->read_stamp;
3366 else
abc9b56d 3367 iter->read_stamp = iter->head_page->page->time_stamp;
642edba5 3368}
f83c9d0f 3369
642edba5
SR
3370/**
3371 * ring_buffer_iter_reset - reset an iterator
3372 * @iter: The iterator to reset
3373 *
3374 * Resets the iterator, so that it will start from the beginning
3375 * again.
3376 */
3377void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3378{
554f786e 3379 struct ring_buffer_per_cpu *cpu_buffer;
642edba5
SR
3380 unsigned long flags;
3381
554f786e
SR
3382 if (!iter)
3383 return;
3384
3385 cpu_buffer = iter->cpu_buffer;
3386
5389f6fa 3387 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
642edba5 3388 rb_iter_reset(iter);
5389f6fa 3389 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3 3390}
c4f50183 3391EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
7a8e76a3
SR
3392
3393/**
3394 * ring_buffer_iter_empty - check if an iterator has no more to read
3395 * @iter: The iterator to check
3396 */
3397int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3398{
3399 struct ring_buffer_per_cpu *cpu_buffer;
3400
3401 cpu_buffer = iter->cpu_buffer;
3402
bf41a158
SR
3403 return iter->head_page == cpu_buffer->commit_page &&
3404 iter->head == rb_commit_index(cpu_buffer);
7a8e76a3 3405}
c4f50183 3406EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
7a8e76a3
SR
3407
3408static void
3409rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3410 struct ring_buffer_event *event)
3411{
3412 u64 delta;
3413
334d4169 3414 switch (event->type_len) {
7a8e76a3
SR
3415 case RINGBUF_TYPE_PADDING:
3416 return;
3417
3418 case RINGBUF_TYPE_TIME_EXTEND:
3419 delta = event->array[0];
3420 delta <<= TS_SHIFT;
3421 delta += event->time_delta;
3422 cpu_buffer->read_stamp += delta;
3423 return;
3424
3425 case RINGBUF_TYPE_TIME_STAMP:
3426 /* FIXME: not implemented */
3427 return;
3428
3429 case RINGBUF_TYPE_DATA:
3430 cpu_buffer->read_stamp += event->time_delta;
3431 return;
3432
3433 default:
3434 BUG();
3435 }
3436 return;
3437}
3438
3439static void
3440rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3441 struct ring_buffer_event *event)
3442{
3443 u64 delta;
3444
334d4169 3445 switch (event->type_len) {
7a8e76a3
SR
3446 case RINGBUF_TYPE_PADDING:
3447 return;
3448
3449 case RINGBUF_TYPE_TIME_EXTEND:
3450 delta = event->array[0];
3451 delta <<= TS_SHIFT;
3452 delta += event->time_delta;
3453 iter->read_stamp += delta;
3454 return;
3455
3456 case RINGBUF_TYPE_TIME_STAMP:
3457 /* FIXME: not implemented */
3458 return;
3459
3460 case RINGBUF_TYPE_DATA:
3461 iter->read_stamp += event->time_delta;
3462 return;
3463
3464 default:
3465 BUG();
3466 }
3467 return;
3468}
3469
d769041f
SR
3470static struct buffer_page *
3471rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
7a8e76a3 3472{
d769041f 3473 struct buffer_page *reader = NULL;
66a8cb95 3474 unsigned long overwrite;
d769041f 3475 unsigned long flags;
818e3dd3 3476 int nr_loops = 0;
77ae365e 3477 int ret;
d769041f 3478
3e03fb7f 3479 local_irq_save(flags);
0199c4e6 3480 arch_spin_lock(&cpu_buffer->lock);
d769041f
SR
3481
3482 again:
818e3dd3
SR
3483 /*
3484 * This should normally only loop twice. But because the
3485 * start of the reader inserts an empty page, it causes
3486 * a case where we will loop three times. There should be no
3487 * reason to loop four times (that I know of).
3488 */
3e89c7bb 3489 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
818e3dd3
SR
3490 reader = NULL;
3491 goto out;
3492 }
3493
d769041f
SR
3494 reader = cpu_buffer->reader_page;
3495
3496 /* If there's more to read, return this page */
bf41a158 3497 if (cpu_buffer->reader_page->read < rb_page_size(reader))
d769041f
SR
3498 goto out;
3499
3500 /* Never should we have an index greater than the size */
3e89c7bb
SR
3501 if (RB_WARN_ON(cpu_buffer,
3502 cpu_buffer->reader_page->read > rb_page_size(reader)))
3503 goto out;
d769041f
SR
3504
3505 /* check if we caught up to the tail */
3506 reader = NULL;
bf41a158 3507 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
d769041f 3508 goto out;
7a8e76a3 3509
a5fb8331
SR
3510 /* Don't bother swapping if the ring buffer is empty */
3511 if (rb_num_of_entries(cpu_buffer) == 0)
3512 goto out;
3513
7a8e76a3 3514 /*
d769041f 3515 * Reset the reader page to size zero.
7a8e76a3 3516 */
77ae365e
SR
3517 local_set(&cpu_buffer->reader_page->write, 0);
3518 local_set(&cpu_buffer->reader_page->entries, 0);
3519 local_set(&cpu_buffer->reader_page->page->commit, 0);
ff0ff84a 3520 cpu_buffer->reader_page->real_end = 0;
7a8e76a3 3521
77ae365e
SR
3522 spin:
3523 /*
3524 * Splice the empty reader page into the list around the head.
3525 */
3526 reader = rb_set_head_page(cpu_buffer);
54f7be5b
SR
3527 if (!reader)
3528 goto out;
0e1ff5d7 3529 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
d769041f 3530 cpu_buffer->reader_page->list.prev = reader->list.prev;
bf41a158 3531
3adc54fa
SR
3532 /*
3533 * cpu_buffer->pages just needs to point to the buffer, it
3534 * has no specific buffer page to point to. Lets move it out
25985edc 3535 * of our way so we don't accidentally swap it.
3adc54fa
SR
3536 */
3537 cpu_buffer->pages = reader->list.prev;
3538
77ae365e
SR
3539 /* The reader page will be pointing to the new head */
3540 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
7a8e76a3 3541
66a8cb95
SR
3542 /*
3543 * We want to make sure we read the overruns after we set up our
3544 * pointers to the next object. The writer side does a
3545 * cmpxchg to cross pages which acts as the mb on the writer
3546 * side. Note, the reader will constantly fail the swap
3547 * while the writer is updating the pointers, so this
3548 * guarantees that the overwrite recorded here is the one we
3549 * want to compare with the last_overrun.
3550 */
3551 smp_mb();
3552 overwrite = local_read(&(cpu_buffer->overrun));
3553
77ae365e
SR
3554 /*
3555 * Here's the tricky part.
3556 *
3557 * We need to move the pointer past the header page.
3558 * But we can only do that if a writer is not currently
3559 * moving it. The page before the header page has the
3560 * flag bit '1' set if it is pointing to the page we want.
3561 * but if the writer is in the process of moving it
3562 * than it will be '2' or already moved '0'.
3563 */
3564
3565 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
7a8e76a3
SR
3566
3567 /*
77ae365e 3568 * If we did not convert it, then we must try again.
7a8e76a3 3569 */
77ae365e
SR
3570 if (!ret)
3571 goto spin;
7a8e76a3 3572
77ae365e
SR
3573 /*
3574 * Yeah! We succeeded in replacing the page.
3575 *
3576 * Now make the new head point back to the reader page.
3577 */
5ded3dc6 3578 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
77ae365e 3579 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
d769041f
SR
3580
3581 /* Finally update the reader page to the new head */
3582 cpu_buffer->reader_page = reader;
3583 rb_reset_reader_page(cpu_buffer);
3584
66a8cb95
SR
3585 if (overwrite != cpu_buffer->last_overrun) {
3586 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3587 cpu_buffer->last_overrun = overwrite;
3588 }
3589
d769041f
SR
3590 goto again;
3591
3592 out:
0199c4e6 3593 arch_spin_unlock(&cpu_buffer->lock);
3e03fb7f 3594 local_irq_restore(flags);
d769041f
SR
3595
3596 return reader;
3597}
3598
3599static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3600{
3601 struct ring_buffer_event *event;
3602 struct buffer_page *reader;
3603 unsigned length;
3604
3605 reader = rb_get_reader_page(cpu_buffer);
7a8e76a3 3606
d769041f 3607 /* This function should not be called when buffer is empty */
3e89c7bb
SR
3608 if (RB_WARN_ON(cpu_buffer, !reader))
3609 return;
7a8e76a3 3610
d769041f
SR
3611 event = rb_reader_event(cpu_buffer);
3612
a1863c21 3613 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
e4906eff 3614 cpu_buffer->read++;
d769041f
SR
3615
3616 rb_update_read_stamp(cpu_buffer, event);
3617
3618 length = rb_event_length(event);
6f807acd 3619 cpu_buffer->reader_page->read += length;
7a8e76a3
SR
3620}
3621
3622static void rb_advance_iter(struct ring_buffer_iter *iter)
3623{
7a8e76a3
SR
3624 struct ring_buffer_per_cpu *cpu_buffer;
3625 struct ring_buffer_event *event;
3626 unsigned length;
3627
3628 cpu_buffer = iter->cpu_buffer;
7a8e76a3
SR
3629
3630 /*
3631 * Check if we are at the end of the buffer.
3632 */
bf41a158 3633 if (iter->head >= rb_page_size(iter->head_page)) {
ea05b57c
SR
3634 /* discarded commits can make the page empty */
3635 if (iter->head_page == cpu_buffer->commit_page)
3e89c7bb 3636 return;
d769041f 3637 rb_inc_iter(iter);
7a8e76a3
SR
3638 return;
3639 }
3640
3641 event = rb_iter_head_event(iter);
3642
3643 length = rb_event_length(event);
3644
3645 /*
3646 * This should not be called to advance the header if we are
3647 * at the tail of the buffer.
3648 */
3e89c7bb 3649 if (RB_WARN_ON(cpu_buffer,
f536aafc 3650 (iter->head_page == cpu_buffer->commit_page) &&
3e89c7bb
SR
3651 (iter->head + length > rb_commit_index(cpu_buffer))))
3652 return;
7a8e76a3
SR
3653
3654 rb_update_iter_read_stamp(iter, event);
3655
3656 iter->head += length;
3657
3658 /* check for end of page padding */
bf41a158
SR
3659 if ((iter->head >= rb_page_size(iter->head_page)) &&
3660 (iter->head_page != cpu_buffer->commit_page))
771e0384 3661 rb_inc_iter(iter);
7a8e76a3
SR
3662}
3663
66a8cb95
SR
3664static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3665{
3666 return cpu_buffer->lost_events;
3667}
3668
f83c9d0f 3669static struct ring_buffer_event *
66a8cb95
SR
3670rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3671 unsigned long *lost_events)
7a8e76a3 3672{
7a8e76a3 3673 struct ring_buffer_event *event;
d769041f 3674 struct buffer_page *reader;
818e3dd3 3675 int nr_loops = 0;
7a8e76a3 3676
7a8e76a3 3677 again:
818e3dd3 3678 /*
69d1b839
SR
3679 * We repeat when a time extend is encountered.
3680 * Since the time extend is always attached to a data event,
3681 * we should never loop more than once.
3682 * (We never hit the following condition more than twice).
818e3dd3 3683 */
69d1b839 3684 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
818e3dd3 3685 return NULL;
818e3dd3 3686
d769041f
SR
3687 reader = rb_get_reader_page(cpu_buffer);
3688 if (!reader)
7a8e76a3
SR
3689 return NULL;
3690
d769041f 3691 event = rb_reader_event(cpu_buffer);
7a8e76a3 3692
334d4169 3693 switch (event->type_len) {
7a8e76a3 3694 case RINGBUF_TYPE_PADDING:
2d622719
TZ
3695 if (rb_null_event(event))
3696 RB_WARN_ON(cpu_buffer, 1);
3697 /*
3698 * Because the writer could be discarding every
3699 * event it creates (which would probably be bad)
3700 * if we were to go back to "again" then we may never
3701 * catch up, and will trigger the warn on, or lock
3702 * the box. Return the padding, and we will release
3703 * the current locks, and try again.
3704 */
2d622719 3705 return event;
7a8e76a3
SR
3706
3707 case RINGBUF_TYPE_TIME_EXTEND:
3708 /* Internal data, OK to advance */
d769041f 3709 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
3710 goto again;
3711
3712 case RINGBUF_TYPE_TIME_STAMP:
3713 /* FIXME: not implemented */
d769041f 3714 rb_advance_reader(cpu_buffer);
7a8e76a3
SR
3715 goto again;
3716
3717 case RINGBUF_TYPE_DATA:
3718 if (ts) {
3719 *ts = cpu_buffer->read_stamp + event->time_delta;
d8eeb2d3 3720 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
37886f6a 3721 cpu_buffer->cpu, ts);
7a8e76a3 3722 }
66a8cb95
SR
3723 if (lost_events)
3724 *lost_events = rb_lost_events(cpu_buffer);
7a8e76a3
SR
3725 return event;
3726
3727 default:
3728 BUG();
3729 }
3730
3731 return NULL;
3732}
c4f50183 3733EXPORT_SYMBOL_GPL(ring_buffer_peek);
7a8e76a3 3734
f83c9d0f
SR
3735static struct ring_buffer_event *
3736rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
7a8e76a3
SR
3737{
3738 struct ring_buffer *buffer;
3739 struct ring_buffer_per_cpu *cpu_buffer;
3740 struct ring_buffer_event *event;
818e3dd3 3741 int nr_loops = 0;
7a8e76a3 3742
7a8e76a3
SR
3743 cpu_buffer = iter->cpu_buffer;
3744 buffer = cpu_buffer->buffer;
3745
492a74f4
SR
3746 /*
3747 * Check if someone performed a consuming read to
3748 * the buffer. A consuming read invalidates the iterator
3749 * and we need to reset the iterator in this case.
3750 */
3751 if (unlikely(iter->cache_read != cpu_buffer->read ||
3752 iter->cache_reader_page != cpu_buffer->reader_page))
3753 rb_iter_reset(iter);
3754
7a8e76a3 3755 again:
3c05d748
SR
3756 if (ring_buffer_iter_empty(iter))
3757 return NULL;
3758
818e3dd3 3759 /*
021de3d9
SRRH
3760 * We repeat when a time extend is encountered or we hit
3761 * the end of the page. Since the time extend is always attached
3762 * to a data event, we should never loop more than three times.
3763 * Once for going to next page, once on time extend, and
3764 * finally once to get the event.
3765 * (We never hit the following condition more than thrice).
818e3dd3 3766 */
021de3d9 3767 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
818e3dd3 3768 return NULL;
818e3dd3 3769
7a8e76a3
SR
3770 if (rb_per_cpu_empty(cpu_buffer))
3771 return NULL;
3772
10e83fd0 3773 if (iter->head >= rb_page_size(iter->head_page)) {
3c05d748
SR
3774 rb_inc_iter(iter);
3775 goto again;
3776 }
3777
7a8e76a3
SR
3778 event = rb_iter_head_event(iter);
3779
334d4169 3780 switch (event->type_len) {
7a8e76a3 3781 case RINGBUF_TYPE_PADDING:
2d622719
TZ
3782 if (rb_null_event(event)) {
3783 rb_inc_iter(iter);
3784 goto again;
3785 }
3786 rb_advance_iter(iter);
3787 return event;
7a8e76a3
SR
3788
3789 case RINGBUF_TYPE_TIME_EXTEND:
3790 /* Internal data, OK to advance */
3791 rb_advance_iter(iter);
3792 goto again;
3793
3794 case RINGBUF_TYPE_TIME_STAMP:
3795 /* FIXME: not implemented */
3796 rb_advance_iter(iter);
3797 goto again;
3798
3799 case RINGBUF_TYPE_DATA:
3800 if (ts) {
3801 *ts = iter->read_stamp + event->time_delta;
37886f6a
SR
3802 ring_buffer_normalize_time_stamp(buffer,
3803 cpu_buffer->cpu, ts);
7a8e76a3
SR
3804 }
3805 return event;
3806
3807 default:
3808 BUG();
3809 }
3810
3811 return NULL;
3812}
c4f50183 3813EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
7a8e76a3 3814
8d707e8e
SR
3815static inline int rb_ok_to_lock(void)
3816{
3817 /*
3818 * If an NMI die dumps out the content of the ring buffer
3819 * do not grab locks. We also permanently disable the ring
3820 * buffer too. A one time deal is all you get from reading
3821 * the ring buffer from an NMI.
3822 */
464e85eb 3823 if (likely(!in_nmi()))
8d707e8e
SR
3824 return 1;
3825
3826 tracing_off_permanent();
3827 return 0;
3828}
3829
f83c9d0f
SR
3830/**
3831 * ring_buffer_peek - peek at the next event to be read
3832 * @buffer: The ring buffer to read
3833 * @cpu: The cpu to peak at
3834 * @ts: The timestamp counter of this event.
66a8cb95 3835 * @lost_events: a variable to store if events were lost (may be NULL)
f83c9d0f
SR
3836 *
3837 * This will return the event that will be read next, but does
3838 * not consume the data.
3839 */
3840struct ring_buffer_event *
66a8cb95
SR
3841ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3842 unsigned long *lost_events)
f83c9d0f
SR
3843{
3844 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
8aabee57 3845 struct ring_buffer_event *event;
f83c9d0f 3846 unsigned long flags;
8d707e8e 3847 int dolock;
f83c9d0f 3848
554f786e 3849 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3850 return NULL;
554f786e 3851
8d707e8e 3852 dolock = rb_ok_to_lock();
2d622719 3853 again:
8d707e8e
SR
3854 local_irq_save(flags);
3855 if (dolock)
5389f6fa 3856 raw_spin_lock(&cpu_buffer->reader_lock);
66a8cb95 3857 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
469535a5
RR
3858 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3859 rb_advance_reader(cpu_buffer);
8d707e8e 3860 if (dolock)
5389f6fa 3861 raw_spin_unlock(&cpu_buffer->reader_lock);
8d707e8e 3862 local_irq_restore(flags);
f83c9d0f 3863
1b959e18 3864 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 3865 goto again;
2d622719 3866
f83c9d0f
SR
3867 return event;
3868}
3869
3870/**
3871 * ring_buffer_iter_peek - peek at the next event to be read
3872 * @iter: The ring buffer iterator
3873 * @ts: The timestamp counter of this event.
3874 *
3875 * This will return the event that will be read next, but does
3876 * not increment the iterator.
3877 */
3878struct ring_buffer_event *
3879ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3880{
3881 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3882 struct ring_buffer_event *event;
3883 unsigned long flags;
3884
2d622719 3885 again:
5389f6fa 3886 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
f83c9d0f 3887 event = rb_iter_peek(iter, ts);
5389f6fa 3888 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
f83c9d0f 3889
1b959e18 3890 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 3891 goto again;
2d622719 3892
f83c9d0f
SR
3893 return event;
3894}
3895
7a8e76a3
SR
3896/**
3897 * ring_buffer_consume - return an event and consume it
3898 * @buffer: The ring buffer to get the next event from
66a8cb95
SR
3899 * @cpu: the cpu to read the buffer from
3900 * @ts: a variable to store the timestamp (may be NULL)
3901 * @lost_events: a variable to store if events were lost (may be NULL)
7a8e76a3
SR
3902 *
3903 * Returns the next event in the ring buffer, and that event is consumed.
3904 * Meaning, that sequential reads will keep returning a different event,
3905 * and eventually empty the ring buffer if the producer is slower.
3906 */
3907struct ring_buffer_event *
66a8cb95
SR
3908ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3909 unsigned long *lost_events)
7a8e76a3 3910{
554f786e
SR
3911 struct ring_buffer_per_cpu *cpu_buffer;
3912 struct ring_buffer_event *event = NULL;
f83c9d0f 3913 unsigned long flags;
8d707e8e
SR
3914 int dolock;
3915
3916 dolock = rb_ok_to_lock();
7a8e76a3 3917
2d622719 3918 again:
554f786e
SR
3919 /* might be called in atomic */
3920 preempt_disable();
3921
9e01c1b7 3922 if (!cpumask_test_cpu(cpu, buffer->cpumask))
554f786e 3923 goto out;
7a8e76a3 3924
554f786e 3925 cpu_buffer = buffer->buffers[cpu];
8d707e8e
SR
3926 local_irq_save(flags);
3927 if (dolock)
5389f6fa 3928 raw_spin_lock(&cpu_buffer->reader_lock);
f83c9d0f 3929
66a8cb95
SR
3930 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3931 if (event) {
3932 cpu_buffer->lost_events = 0;
469535a5 3933 rb_advance_reader(cpu_buffer);
66a8cb95 3934 }
7a8e76a3 3935
8d707e8e 3936 if (dolock)
5389f6fa 3937 raw_spin_unlock(&cpu_buffer->reader_lock);
8d707e8e 3938 local_irq_restore(flags);
f83c9d0f 3939
554f786e
SR
3940 out:
3941 preempt_enable();
3942
1b959e18 3943 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2d622719 3944 goto again;
2d622719 3945
7a8e76a3
SR
3946 return event;
3947}
c4f50183 3948EXPORT_SYMBOL_GPL(ring_buffer_consume);
7a8e76a3
SR
3949
3950/**
72c9ddfd 3951 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
7a8e76a3
SR
3952 * @buffer: The ring buffer to read from
3953 * @cpu: The cpu buffer to iterate over
3954 *
72c9ddfd
DM
3955 * This performs the initial preparations necessary to iterate
3956 * through the buffer. Memory is allocated, buffer recording
3957 * is disabled, and the iterator pointer is returned to the caller.
7a8e76a3 3958 *
72c9ddfd
DM
3959 * Disabling buffer recordng prevents the reading from being
3960 * corrupted. This is not a consuming read, so a producer is not
3961 * expected.
3962 *
3963 * After a sequence of ring_buffer_read_prepare calls, the user is
d611851b 3964 * expected to make at least one call to ring_buffer_read_prepare_sync.
72c9ddfd
DM
3965 * Afterwards, ring_buffer_read_start is invoked to get things going
3966 * for real.
3967 *
d611851b 3968 * This overall must be paired with ring_buffer_read_finish.
7a8e76a3
SR
3969 */
3970struct ring_buffer_iter *
72c9ddfd 3971ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
7a8e76a3
SR
3972{
3973 struct ring_buffer_per_cpu *cpu_buffer;
8aabee57 3974 struct ring_buffer_iter *iter;
7a8e76a3 3975
9e01c1b7 3976 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 3977 return NULL;
7a8e76a3
SR
3978
3979 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3980 if (!iter)
8aabee57 3981 return NULL;
7a8e76a3
SR
3982
3983 cpu_buffer = buffer->buffers[cpu];
3984
3985 iter->cpu_buffer = cpu_buffer;
3986
83f40318 3987 atomic_inc(&buffer->resize_disabled);
7a8e76a3 3988 atomic_inc(&cpu_buffer->record_disabled);
72c9ddfd
DM
3989
3990 return iter;
3991}
3992EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3993
3994/**
3995 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3996 *
3997 * All previously invoked ring_buffer_read_prepare calls to prepare
3998 * iterators will be synchronized. Afterwards, read_buffer_read_start
3999 * calls on those iterators are allowed.
4000 */
4001void
4002ring_buffer_read_prepare_sync(void)
4003{
7a8e76a3 4004 synchronize_sched();
72c9ddfd
DM
4005}
4006EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4007
4008/**
4009 * ring_buffer_read_start - start a non consuming read of the buffer
4010 * @iter: The iterator returned by ring_buffer_read_prepare
4011 *
4012 * This finalizes the startup of an iteration through the buffer.
4013 * The iterator comes from a call to ring_buffer_read_prepare and
4014 * an intervening ring_buffer_read_prepare_sync must have been
4015 * performed.
4016 *
d611851b 4017 * Must be paired with ring_buffer_read_finish.
72c9ddfd
DM
4018 */
4019void
4020ring_buffer_read_start(struct ring_buffer_iter *iter)
4021{
4022 struct ring_buffer_per_cpu *cpu_buffer;
4023 unsigned long flags;
4024
4025 if (!iter)
4026 return;
4027
4028 cpu_buffer = iter->cpu_buffer;
7a8e76a3 4029
5389f6fa 4030 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
0199c4e6 4031 arch_spin_lock(&cpu_buffer->lock);
642edba5 4032 rb_iter_reset(iter);
0199c4e6 4033 arch_spin_unlock(&cpu_buffer->lock);
5389f6fa 4034 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3 4035}
c4f50183 4036EXPORT_SYMBOL_GPL(ring_buffer_read_start);
7a8e76a3
SR
4037
4038/**
d611851b 4039 * ring_buffer_read_finish - finish reading the iterator of the buffer
7a8e76a3
SR
4040 * @iter: The iterator retrieved by ring_buffer_start
4041 *
4042 * This re-enables the recording to the buffer, and frees the
4043 * iterator.
4044 */
4045void
4046ring_buffer_read_finish(struct ring_buffer_iter *iter)
4047{
4048 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
9366c1ba 4049 unsigned long flags;
7a8e76a3 4050
659f451f
SR
4051 /*
4052 * Ring buffer is disabled from recording, here's a good place
9366c1ba
SR
4053 * to check the integrity of the ring buffer.
4054 * Must prevent readers from trying to read, as the check
4055 * clears the HEAD page and readers require it.
659f451f 4056 */
9366c1ba 4057 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
659f451f 4058 rb_check_pages(cpu_buffer);
9366c1ba 4059 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
659f451f 4060
7a8e76a3 4061 atomic_dec(&cpu_buffer->record_disabled);
83f40318 4062 atomic_dec(&cpu_buffer->buffer->resize_disabled);
7a8e76a3
SR
4063 kfree(iter);
4064}
c4f50183 4065EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
7a8e76a3
SR
4066
4067/**
4068 * ring_buffer_read - read the next item in the ring buffer by the iterator
4069 * @iter: The ring buffer iterator
4070 * @ts: The time stamp of the event read.
4071 *
4072 * This reads the next event in the ring buffer and increments the iterator.
4073 */
4074struct ring_buffer_event *
4075ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
4076{
4077 struct ring_buffer_event *event;
f83c9d0f
SR
4078 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4079 unsigned long flags;
7a8e76a3 4080
5389f6fa 4081 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7e9391cf 4082 again:
f83c9d0f 4083 event = rb_iter_peek(iter, ts);
7a8e76a3 4084 if (!event)
f83c9d0f 4085 goto out;
7a8e76a3 4086
7e9391cf
SR
4087 if (event->type_len == RINGBUF_TYPE_PADDING)
4088 goto again;
4089
7a8e76a3 4090 rb_advance_iter(iter);
f83c9d0f 4091 out:
5389f6fa 4092 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7a8e76a3
SR
4093
4094 return event;
4095}
c4f50183 4096EXPORT_SYMBOL_GPL(ring_buffer_read);
7a8e76a3
SR
4097
4098/**
4099 * ring_buffer_size - return the size of the ring buffer (in bytes)
4100 * @buffer: The ring buffer.
4101 */
438ced17 4102unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
7a8e76a3 4103{
438ced17
VN
4104 /*
4105 * Earlier, this method returned
4106 * BUF_PAGE_SIZE * buffer->nr_pages
4107 * Since the nr_pages field is now removed, we have converted this to
4108 * return the per cpu buffer value.
4109 */
4110 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4111 return 0;
4112
4113 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
7a8e76a3 4114}
c4f50183 4115EXPORT_SYMBOL_GPL(ring_buffer_size);
7a8e76a3
SR
4116
4117static void
4118rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4119{
77ae365e
SR
4120 rb_head_page_deactivate(cpu_buffer);
4121
7a8e76a3 4122 cpu_buffer->head_page
3adc54fa 4123 = list_entry(cpu_buffer->pages, struct buffer_page, list);
bf41a158 4124 local_set(&cpu_buffer->head_page->write, 0);
778c55d4 4125 local_set(&cpu_buffer->head_page->entries, 0);
abc9b56d 4126 local_set(&cpu_buffer->head_page->page->commit, 0);
d769041f 4127
6f807acd 4128 cpu_buffer->head_page->read = 0;
bf41a158
SR
4129
4130 cpu_buffer->tail_page = cpu_buffer->head_page;
4131 cpu_buffer->commit_page = cpu_buffer->head_page;
4132
4133 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
5040b4b7 4134 INIT_LIST_HEAD(&cpu_buffer->new_pages);
bf41a158 4135 local_set(&cpu_buffer->reader_page->write, 0);
778c55d4 4136 local_set(&cpu_buffer->reader_page->entries, 0);
abc9b56d 4137 local_set(&cpu_buffer->reader_page->page->commit, 0);
6f807acd 4138 cpu_buffer->reader_page->read = 0;
7a8e76a3 4139
c64e148a 4140 local_set(&cpu_buffer->entries_bytes, 0);
77ae365e 4141 local_set(&cpu_buffer->overrun, 0);
884bfe89
SP
4142 local_set(&cpu_buffer->commit_overrun, 0);
4143 local_set(&cpu_buffer->dropped_events, 0);
e4906eff 4144 local_set(&cpu_buffer->entries, 0);
fa743953
SR
4145 local_set(&cpu_buffer->committing, 0);
4146 local_set(&cpu_buffer->commits, 0);
77ae365e 4147 cpu_buffer->read = 0;
c64e148a 4148 cpu_buffer->read_bytes = 0;
69507c06
SR
4149
4150 cpu_buffer->write_stamp = 0;
4151 cpu_buffer->read_stamp = 0;
77ae365e 4152
66a8cb95
SR
4153 cpu_buffer->lost_events = 0;
4154 cpu_buffer->last_overrun = 0;
4155
77ae365e 4156 rb_head_page_activate(cpu_buffer);
7a8e76a3
SR
4157}
4158
4159/**
4160 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4161 * @buffer: The ring buffer to reset a per cpu buffer of
4162 * @cpu: The CPU buffer to be reset
4163 */
4164void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
4165{
4166 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4167 unsigned long flags;
4168
9e01c1b7 4169 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 4170 return;
7a8e76a3 4171
83f40318 4172 atomic_inc(&buffer->resize_disabled);
41ede23e
SR
4173 atomic_inc(&cpu_buffer->record_disabled);
4174
83f40318
VN
4175 /* Make sure all commits have finished */
4176 synchronize_sched();
4177
5389f6fa 4178 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
f83c9d0f 4179
41b6a95d
SR
4180 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4181 goto out;
4182
0199c4e6 4183 arch_spin_lock(&cpu_buffer->lock);
7a8e76a3
SR
4184
4185 rb_reset_cpu(cpu_buffer);
4186
0199c4e6 4187 arch_spin_unlock(&cpu_buffer->lock);
f83c9d0f 4188
41b6a95d 4189 out:
5389f6fa 4190 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
41ede23e
SR
4191
4192 atomic_dec(&cpu_buffer->record_disabled);
83f40318 4193 atomic_dec(&buffer->resize_disabled);
7a8e76a3 4194}
c4f50183 4195EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
7a8e76a3
SR
4196
4197/**
4198 * ring_buffer_reset - reset a ring buffer
4199 * @buffer: The ring buffer to reset all cpu buffers
4200 */
4201void ring_buffer_reset(struct ring_buffer *buffer)
4202{
7a8e76a3
SR
4203 int cpu;
4204
7a8e76a3 4205 for_each_buffer_cpu(buffer, cpu)
d769041f 4206 ring_buffer_reset_cpu(buffer, cpu);
7a8e76a3 4207}
c4f50183 4208EXPORT_SYMBOL_GPL(ring_buffer_reset);
7a8e76a3
SR
4209
4210/**
4211 * rind_buffer_empty - is the ring buffer empty?
4212 * @buffer: The ring buffer to test
4213 */
4214int ring_buffer_empty(struct ring_buffer *buffer)
4215{
4216 struct ring_buffer_per_cpu *cpu_buffer;
d4788207 4217 unsigned long flags;
8d707e8e 4218 int dolock;
7a8e76a3 4219 int cpu;
d4788207 4220 int ret;
7a8e76a3 4221
8d707e8e 4222 dolock = rb_ok_to_lock();
7a8e76a3
SR
4223
4224 /* yes this is racy, but if you don't like the race, lock the buffer */
4225 for_each_buffer_cpu(buffer, cpu) {
4226 cpu_buffer = buffer->buffers[cpu];
8d707e8e
SR
4227 local_irq_save(flags);
4228 if (dolock)
5389f6fa 4229 raw_spin_lock(&cpu_buffer->reader_lock);
d4788207 4230 ret = rb_per_cpu_empty(cpu_buffer);
8d707e8e 4231 if (dolock)
5389f6fa 4232 raw_spin_unlock(&cpu_buffer->reader_lock);
8d707e8e
SR
4233 local_irq_restore(flags);
4234
d4788207 4235 if (!ret)
7a8e76a3
SR
4236 return 0;
4237 }
554f786e 4238
7a8e76a3
SR
4239 return 1;
4240}
c4f50183 4241EXPORT_SYMBOL_GPL(ring_buffer_empty);
7a8e76a3
SR
4242
4243/**
4244 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4245 * @buffer: The ring buffer
4246 * @cpu: The CPU buffer to test
4247 */
4248int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4249{
4250 struct ring_buffer_per_cpu *cpu_buffer;
d4788207 4251 unsigned long flags;
8d707e8e 4252 int dolock;
8aabee57 4253 int ret;
7a8e76a3 4254
9e01c1b7 4255 if (!cpumask_test_cpu(cpu, buffer->cpumask))
8aabee57 4256 return 1;
7a8e76a3 4257
8d707e8e
SR
4258 dolock = rb_ok_to_lock();
4259
7a8e76a3 4260 cpu_buffer = buffer->buffers[cpu];
8d707e8e
SR
4261 local_irq_save(flags);
4262 if (dolock)
5389f6fa 4263 raw_spin_lock(&cpu_buffer->reader_lock);
554f786e 4264 ret = rb_per_cpu_empty(cpu_buffer);
8d707e8e 4265 if (dolock)
5389f6fa 4266 raw_spin_unlock(&cpu_buffer->reader_lock);
8d707e8e 4267 local_irq_restore(flags);
554f786e
SR
4268
4269 return ret;
7a8e76a3 4270}
c4f50183 4271EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
7a8e76a3 4272
85bac32c 4273#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
7a8e76a3
SR
4274/**
4275 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4276 * @buffer_a: One buffer to swap with
4277 * @buffer_b: The other buffer to swap with
4278 *
4279 * This function is useful for tracers that want to take a "snapshot"
4280 * of a CPU buffer and has another back up buffer lying around.
4281 * it is expected that the tracer handles the cpu buffer not being
4282 * used at the moment.
4283 */
4284int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4285 struct ring_buffer *buffer_b, int cpu)
4286{
4287 struct ring_buffer_per_cpu *cpu_buffer_a;
4288 struct ring_buffer_per_cpu *cpu_buffer_b;
554f786e
SR
4289 int ret = -EINVAL;
4290
9e01c1b7
RR
4291 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4292 !cpumask_test_cpu(cpu, buffer_b->cpumask))
554f786e 4293 goto out;
7a8e76a3 4294
438ced17
VN
4295 cpu_buffer_a = buffer_a->buffers[cpu];
4296 cpu_buffer_b = buffer_b->buffers[cpu];
4297
7a8e76a3 4298 /* At least make sure the two buffers are somewhat the same */
438ced17 4299 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
554f786e
SR
4300 goto out;
4301
4302 ret = -EAGAIN;
7a8e76a3 4303
97b17efe 4304 if (ring_buffer_flags != RB_BUFFERS_ON)
554f786e 4305 goto out;
97b17efe
SR
4306
4307 if (atomic_read(&buffer_a->record_disabled))
554f786e 4308 goto out;
97b17efe
SR
4309
4310 if (atomic_read(&buffer_b->record_disabled))
554f786e 4311 goto out;
97b17efe 4312
97b17efe 4313 if (atomic_read(&cpu_buffer_a->record_disabled))
554f786e 4314 goto out;
97b17efe
SR
4315
4316 if (atomic_read(&cpu_buffer_b->record_disabled))
554f786e 4317 goto out;
97b17efe 4318
7a8e76a3
SR
4319 /*
4320 * We can't do a synchronize_sched here because this
4321 * function can be called in atomic context.
4322 * Normally this will be called from the same CPU as cpu.
4323 * If not it's up to the caller to protect this.
4324 */
4325 atomic_inc(&cpu_buffer_a->record_disabled);
4326 atomic_inc(&cpu_buffer_b->record_disabled);
4327
98277991
SR
4328 ret = -EBUSY;
4329 if (local_read(&cpu_buffer_a->committing))
4330 goto out_dec;
4331 if (local_read(&cpu_buffer_b->committing))
4332 goto out_dec;
4333
7a8e76a3
SR
4334 buffer_a->buffers[cpu] = cpu_buffer_b;
4335 buffer_b->buffers[cpu] = cpu_buffer_a;
4336
4337 cpu_buffer_b->buffer = buffer_a;
4338 cpu_buffer_a->buffer = buffer_b;
4339
98277991
SR
4340 ret = 0;
4341
4342out_dec:
7a8e76a3
SR
4343 atomic_dec(&cpu_buffer_a->record_disabled);
4344 atomic_dec(&cpu_buffer_b->record_disabled);
554f786e 4345out:
554f786e 4346 return ret;
7a8e76a3 4347}
c4f50183 4348EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
85bac32c 4349#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
7a8e76a3 4350
8789a9e7
SR
4351/**
4352 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4353 * @buffer: the buffer to allocate for.
d611851b 4354 * @cpu: the cpu buffer to allocate.
8789a9e7
SR
4355 *
4356 * This function is used in conjunction with ring_buffer_read_page.
4357 * When reading a full page from the ring buffer, these functions
4358 * can be used to speed up the process. The calling function should
4359 * allocate a few pages first with this function. Then when it
4360 * needs to get pages from the ring buffer, it passes the result
4361 * of this function into ring_buffer_read_page, which will swap
4362 * the page that was allocated, with the read page of the buffer.
4363 *
4364 * Returns:
4365 * The page allocated, or NULL on error.
4366 */
7ea59064 4367void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
8789a9e7 4368{
044fa782 4369 struct buffer_data_page *bpage;
7ea59064 4370 struct page *page;
8789a9e7 4371
d7ec4bfe
VN
4372 page = alloc_pages_node(cpu_to_node(cpu),
4373 GFP_KERNEL | __GFP_NORETRY, 0);
7ea59064 4374 if (!page)
8789a9e7
SR
4375 return NULL;
4376
7ea59064 4377 bpage = page_address(page);
8789a9e7 4378
ef7a4a16
SR
4379 rb_init_page(bpage);
4380
044fa782 4381 return bpage;
8789a9e7 4382}
d6ce96da 4383EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
8789a9e7
SR
4384
4385/**
4386 * ring_buffer_free_read_page - free an allocated read page
4387 * @buffer: the buffer the page was allocate for
4388 * @data: the page to free
4389 *
4390 * Free a page allocated from ring_buffer_alloc_read_page.
4391 */
4392void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
4393{
4394 free_page((unsigned long)data);
4395}
d6ce96da 4396EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
8789a9e7
SR
4397
4398/**
4399 * ring_buffer_read_page - extract a page from the ring buffer
4400 * @buffer: buffer to extract from
4401 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
ef7a4a16 4402 * @len: amount to extract
8789a9e7
SR
4403 * @cpu: the cpu of the buffer to extract
4404 * @full: should the extraction only happen when the page is full.
4405 *
4406 * This function will pull out a page from the ring buffer and consume it.
4407 * @data_page must be the address of the variable that was returned
4408 * from ring_buffer_alloc_read_page. This is because the page might be used
4409 * to swap with a page in the ring buffer.
4410 *
4411 * for example:
d611851b 4412 * rpage = ring_buffer_alloc_read_page(buffer, cpu);
8789a9e7
SR
4413 * if (!rpage)
4414 * return error;
ef7a4a16 4415 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
667d2412
LJ
4416 * if (ret >= 0)
4417 * process_page(rpage, ret);
8789a9e7
SR
4418 *
4419 * When @full is set, the function will not return true unless
4420 * the writer is off the reader page.
4421 *
4422 * Note: it is up to the calling functions to handle sleeps and wakeups.
4423 * The ring buffer can be used anywhere in the kernel and can not
4424 * blindly call wake_up. The layer that uses the ring buffer must be
4425 * responsible for that.
4426 *
4427 * Returns:
667d2412
LJ
4428 * >=0 if data has been transferred, returns the offset of consumed data.
4429 * <0 if no data has been transferred.
8789a9e7
SR
4430 */
4431int ring_buffer_read_page(struct ring_buffer *buffer,
ef7a4a16 4432 void **data_page, size_t len, int cpu, int full)
8789a9e7
SR
4433{
4434 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4435 struct ring_buffer_event *event;
044fa782 4436 struct buffer_data_page *bpage;
ef7a4a16 4437 struct buffer_page *reader;
ff0ff84a 4438 unsigned long missed_events;
8789a9e7 4439 unsigned long flags;
ef7a4a16 4440 unsigned int commit;
667d2412 4441 unsigned int read;
4f3640f8 4442 u64 save_timestamp;
667d2412 4443 int ret = -1;
8789a9e7 4444
554f786e
SR
4445 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4446 goto out;
4447
474d32b6
SR
4448 /*
4449 * If len is not big enough to hold the page header, then
4450 * we can not copy anything.
4451 */
4452 if (len <= BUF_PAGE_HDR_SIZE)
554f786e 4453 goto out;
474d32b6
SR
4454
4455 len -= BUF_PAGE_HDR_SIZE;
4456
8789a9e7 4457 if (!data_page)
554f786e 4458 goto out;
8789a9e7 4459
044fa782
SR
4460 bpage = *data_page;
4461 if (!bpage)
554f786e 4462 goto out;
8789a9e7 4463
5389f6fa 4464 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
8789a9e7 4465
ef7a4a16
SR
4466 reader = rb_get_reader_page(cpu_buffer);
4467 if (!reader)
554f786e 4468 goto out_unlock;
8789a9e7 4469
ef7a4a16
SR
4470 event = rb_reader_event(cpu_buffer);
4471
4472 read = reader->read;
4473 commit = rb_page_commit(reader);
667d2412 4474
66a8cb95 4475 /* Check if any events were dropped */
ff0ff84a 4476 missed_events = cpu_buffer->lost_events;
66a8cb95 4477
8789a9e7 4478 /*
474d32b6
SR
4479 * If this page has been partially read or
4480 * if len is not big enough to read the rest of the page or
4481 * a writer is still on the page, then
4482 * we must copy the data from the page to the buffer.
4483 * Otherwise, we can simply swap the page with the one passed in.
8789a9e7 4484 */
474d32b6 4485 if (read || (len < (commit - read)) ||
ef7a4a16 4486 cpu_buffer->reader_page == cpu_buffer->commit_page) {
667d2412 4487 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
474d32b6
SR
4488 unsigned int rpos = read;
4489 unsigned int pos = 0;
ef7a4a16 4490 unsigned int size;
8789a9e7
SR
4491
4492 if (full)
554f786e 4493 goto out_unlock;
8789a9e7 4494
ef7a4a16
SR
4495 if (len > (commit - read))
4496 len = (commit - read);
4497
69d1b839
SR
4498 /* Always keep the time extend and data together */
4499 size = rb_event_ts_length(event);
ef7a4a16
SR
4500
4501 if (len < size)
554f786e 4502 goto out_unlock;
ef7a4a16 4503
4f3640f8
SR
4504 /* save the current timestamp, since the user will need it */
4505 save_timestamp = cpu_buffer->read_stamp;
4506
ef7a4a16
SR
4507 /* Need to copy one event at a time */
4508 do {
e1e35927
DS
4509 /* We need the size of one event, because
4510 * rb_advance_reader only advances by one event,
4511 * whereas rb_event_ts_length may include the size of
4512 * one or two events.
4513 * We have already ensured there's enough space if this
4514 * is a time extend. */
4515 size = rb_event_length(event);
474d32b6 4516 memcpy(bpage->data + pos, rpage->data + rpos, size);
ef7a4a16
SR
4517
4518 len -= size;
4519
4520 rb_advance_reader(cpu_buffer);
474d32b6
SR
4521 rpos = reader->read;
4522 pos += size;
ef7a4a16 4523
18fab912
HY
4524 if (rpos >= commit)
4525 break;
4526
ef7a4a16 4527 event = rb_reader_event(cpu_buffer);
69d1b839
SR
4528 /* Always keep the time extend and data together */
4529 size = rb_event_ts_length(event);
e1e35927 4530 } while (len >= size);
667d2412
LJ
4531
4532 /* update bpage */
ef7a4a16 4533 local_set(&bpage->commit, pos);
4f3640f8 4534 bpage->time_stamp = save_timestamp;
ef7a4a16 4535
474d32b6
SR
4536 /* we copied everything to the beginning */
4537 read = 0;
8789a9e7 4538 } else {
afbab76a 4539 /* update the entry counter */
77ae365e 4540 cpu_buffer->read += rb_page_entries(reader);
c64e148a 4541 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
afbab76a 4542
8789a9e7 4543 /* swap the pages */
044fa782 4544 rb_init_page(bpage);
ef7a4a16
SR
4545 bpage = reader->page;
4546 reader->page = *data_page;
4547 local_set(&reader->write, 0);
778c55d4 4548 local_set(&reader->entries, 0);
ef7a4a16 4549 reader->read = 0;
044fa782 4550 *data_page = bpage;
ff0ff84a
SR
4551
4552 /*
4553 * Use the real_end for the data size,
4554 * This gives us a chance to store the lost events
4555 * on the page.
4556 */
4557 if (reader->real_end)
4558 local_set(&bpage->commit, reader->real_end);
8789a9e7 4559 }
667d2412 4560 ret = read;
8789a9e7 4561
66a8cb95 4562 cpu_buffer->lost_events = 0;
2711ca23
SR
4563
4564 commit = local_read(&bpage->commit);
66a8cb95
SR
4565 /*
4566 * Set a flag in the commit field if we lost events
4567 */
ff0ff84a 4568 if (missed_events) {
ff0ff84a
SR
4569 /* If there is room at the end of the page to save the
4570 * missed events, then record it there.
4571 */
4572 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4573 memcpy(&bpage->data[commit], &missed_events,
4574 sizeof(missed_events));
4575 local_add(RB_MISSED_STORED, &bpage->commit);
2711ca23 4576 commit += sizeof(missed_events);
ff0ff84a 4577 }
66a8cb95 4578 local_add(RB_MISSED_EVENTS, &bpage->commit);
ff0ff84a 4579 }
66a8cb95 4580
2711ca23
SR
4581 /*
4582 * This page may be off to user land. Zero it out here.
4583 */
4584 if (commit < BUF_PAGE_SIZE)
4585 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4586
554f786e 4587 out_unlock:
5389f6fa 4588 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
8789a9e7 4589
554f786e 4590 out:
8789a9e7
SR
4591 return ret;
4592}
d6ce96da 4593EXPORT_SYMBOL_GPL(ring_buffer_read_page);
8789a9e7 4594
59222efe 4595#ifdef CONFIG_HOTPLUG_CPU
09c9e84d
FW
4596static int rb_cpu_notify(struct notifier_block *self,
4597 unsigned long action, void *hcpu)
554f786e
SR
4598{
4599 struct ring_buffer *buffer =
4600 container_of(self, struct ring_buffer, cpu_notify);
4601 long cpu = (long)hcpu;
438ced17
VN
4602 int cpu_i, nr_pages_same;
4603 unsigned int nr_pages;
554f786e
SR
4604
4605 switch (action) {
4606 case CPU_UP_PREPARE:
4607 case CPU_UP_PREPARE_FROZEN:
3f237a79 4608 if (cpumask_test_cpu(cpu, buffer->cpumask))
554f786e
SR
4609 return NOTIFY_OK;
4610
438ced17
VN
4611 nr_pages = 0;
4612 nr_pages_same = 1;
4613 /* check if all cpu sizes are same */
4614 for_each_buffer_cpu(buffer, cpu_i) {
4615 /* fill in the size from first enabled cpu */
4616 if (nr_pages == 0)
4617 nr_pages = buffer->buffers[cpu_i]->nr_pages;
4618 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4619 nr_pages_same = 0;
4620 break;
4621 }
4622 }
4623 /* allocate minimum pages, user can later expand it */
4624 if (!nr_pages_same)
4625 nr_pages = 2;
554f786e 4626 buffer->buffers[cpu] =
438ced17 4627 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
554f786e
SR
4628 if (!buffer->buffers[cpu]) {
4629 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4630 cpu);
4631 return NOTIFY_OK;
4632 }
4633 smp_wmb();
3f237a79 4634 cpumask_set_cpu(cpu, buffer->cpumask);
554f786e
SR
4635 break;
4636 case CPU_DOWN_PREPARE:
4637 case CPU_DOWN_PREPARE_FROZEN:
4638 /*
4639 * Do nothing.
4640 * If we were to free the buffer, then the user would
4641 * lose any trace that was in the buffer.
4642 */
4643 break;
4644 default:
4645 break;
4646 }
4647 return NOTIFY_OK;
4648}
4649#endif
6c43e554
SRRH
4650
4651#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4652/*
4653 * This is a basic integrity check of the ring buffer.
4654 * Late in the boot cycle this test will run when configured in.
4655 * It will kick off a thread per CPU that will go into a loop
4656 * writing to the per cpu ring buffer various sizes of data.
4657 * Some of the data will be large items, some small.
4658 *
4659 * Another thread is created that goes into a spin, sending out
4660 * IPIs to the other CPUs to also write into the ring buffer.
4661 * this is to test the nesting ability of the buffer.
4662 *
4663 * Basic stats are recorded and reported. If something in the
4664 * ring buffer should happen that's not expected, a big warning
4665 * is displayed and all ring buffers are disabled.
4666 */
4667static struct task_struct *rb_threads[NR_CPUS] __initdata;
4668
4669struct rb_test_data {
4670 struct ring_buffer *buffer;
4671 unsigned long events;
4672 unsigned long bytes_written;
4673 unsigned long bytes_alloc;
4674 unsigned long bytes_dropped;
4675 unsigned long events_nested;
4676 unsigned long bytes_written_nested;
4677 unsigned long bytes_alloc_nested;
4678 unsigned long bytes_dropped_nested;
4679 int min_size_nested;
4680 int max_size_nested;
4681 int max_size;
4682 int min_size;
4683 int cpu;
4684 int cnt;
4685};
4686
4687static struct rb_test_data rb_data[NR_CPUS] __initdata;
4688
4689/* 1 meg per cpu */
4690#define RB_TEST_BUFFER_SIZE 1048576
4691
4692static char rb_string[] __initdata =
4693 "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4694 "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4695 "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4696
4697static bool rb_test_started __initdata;
4698
4699struct rb_item {
4700 int size;
4701 char str[];
4702};
4703
4704static __init int rb_write_something(struct rb_test_data *data, bool nested)
4705{
4706 struct ring_buffer_event *event;
4707 struct rb_item *item;
4708 bool started;
4709 int event_len;
4710 int size;
4711 int len;
4712 int cnt;
4713
4714 /* Have nested writes different that what is written */
4715 cnt = data->cnt + (nested ? 27 : 0);
4716
4717 /* Multiply cnt by ~e, to make some unique increment */
4718 size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1);
4719
4720 len = size + sizeof(struct rb_item);
4721
4722 started = rb_test_started;
4723 /* read rb_test_started before checking buffer enabled */
4724 smp_rmb();
4725
4726 event = ring_buffer_lock_reserve(data->buffer, len);
4727 if (!event) {
4728 /* Ignore dropped events before test starts. */
4729 if (started) {
4730 if (nested)
4731 data->bytes_dropped += len;
4732 else
4733 data->bytes_dropped_nested += len;
4734 }
4735 return len;
4736 }
4737
4738 event_len = ring_buffer_event_length(event);
4739
4740 if (RB_WARN_ON(data->buffer, event_len < len))
4741 goto out;
4742
4743 item = ring_buffer_event_data(event);
4744 item->size = size;
4745 memcpy(item->str, rb_string, size);
4746
4747 if (nested) {
4748 data->bytes_alloc_nested += event_len;
4749 data->bytes_written_nested += len;
4750 data->events_nested++;
4751 if (!data->min_size_nested || len < data->min_size_nested)
4752 data->min_size_nested = len;
4753 if (len > data->max_size_nested)
4754 data->max_size_nested = len;
4755 } else {
4756 data->bytes_alloc += event_len;
4757 data->bytes_written += len;
4758 data->events++;
4759 if (!data->min_size || len < data->min_size)
4760 data->max_size = len;
4761 if (len > data->max_size)
4762 data->max_size = len;
4763 }
4764
4765 out:
4766 ring_buffer_unlock_commit(data->buffer, event);
4767
4768 return 0;
4769}
4770
4771static __init int rb_test(void *arg)
4772{
4773 struct rb_test_data *data = arg;
4774
4775 while (!kthread_should_stop()) {
4776 rb_write_something(data, false);
4777 data->cnt++;
4778
4779 set_current_state(TASK_INTERRUPTIBLE);
4780 /* Now sleep between a min of 100-300us and a max of 1ms */
4781 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
4782 }
4783
4784 return 0;
4785}
4786
4787static __init void rb_ipi(void *ignore)
4788{
4789 struct rb_test_data *data;
4790 int cpu = smp_processor_id();
4791
4792 data = &rb_data[cpu];
4793 rb_write_something(data, true);
4794}
4795
4796static __init int rb_hammer_test(void *arg)
4797{
4798 while (!kthread_should_stop()) {
4799
4800 /* Send an IPI to all cpus to write data! */
4801 smp_call_function(rb_ipi, NULL, 1);
4802 /* No sleep, but for non preempt, let others run */
4803 schedule();
4804 }
4805
4806 return 0;
4807}
4808
4809static __init int test_ringbuffer(void)
4810{
4811 struct task_struct *rb_hammer;
4812 struct ring_buffer *buffer;
4813 int cpu;
4814 int ret = 0;
4815
4816 pr_info("Running ring buffer tests...\n");
4817
4818 buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
4819 if (WARN_ON(!buffer))
4820 return 0;
4821
4822 /* Disable buffer so that threads can't write to it yet */
4823 ring_buffer_record_off(buffer);
4824
4825 for_each_online_cpu(cpu) {
4826 rb_data[cpu].buffer = buffer;
4827 rb_data[cpu].cpu = cpu;
4828 rb_data[cpu].cnt = cpu;
4829 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
4830 "rbtester/%d", cpu);
4831 if (WARN_ON(!rb_threads[cpu])) {
4832 pr_cont("FAILED\n");
4833 ret = -1;
4834 goto out_free;
4835 }
4836
4837 kthread_bind(rb_threads[cpu], cpu);
4838 wake_up_process(rb_threads[cpu]);
4839 }
4840
4841 /* Now create the rb hammer! */
4842 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
4843 if (WARN_ON(!rb_hammer)) {
4844 pr_cont("FAILED\n");
4845 ret = -1;
4846 goto out_free;
4847 }
4848
4849 ring_buffer_record_on(buffer);
4850 /*
4851 * Show buffer is enabled before setting rb_test_started.
4852 * Yes there's a small race window where events could be
4853 * dropped and the thread wont catch it. But when a ring
4854 * buffer gets enabled, there will always be some kind of
4855 * delay before other CPUs see it. Thus, we don't care about
4856 * those dropped events. We care about events dropped after
4857 * the threads see that the buffer is active.
4858 */
4859 smp_wmb();
4860 rb_test_started = true;
4861
4862 set_current_state(TASK_INTERRUPTIBLE);
4863 /* Just run for 10 seconds */;
4864 schedule_timeout(10 * HZ);
4865
4866 kthread_stop(rb_hammer);
4867
4868 out_free:
4869 for_each_online_cpu(cpu) {
4870 if (!rb_threads[cpu])
4871 break;
4872 kthread_stop(rb_threads[cpu]);
4873 }
4874 if (ret) {
4875 ring_buffer_free(buffer);
4876 return ret;
4877 }
4878
4879 /* Report! */
4880 pr_info("finished\n");
4881 for_each_online_cpu(cpu) {
4882 struct ring_buffer_event *event;
4883 struct rb_test_data *data = &rb_data[cpu];
4884 struct rb_item *item;
4885 unsigned long total_events;
4886 unsigned long total_dropped;
4887 unsigned long total_written;
4888 unsigned long total_alloc;
4889 unsigned long total_read = 0;
4890 unsigned long total_size = 0;
4891 unsigned long total_len = 0;
4892 unsigned long total_lost = 0;
4893 unsigned long lost;
4894 int big_event_size;
4895 int small_event_size;
4896
4897 ret = -1;
4898
4899 total_events = data->events + data->events_nested;
4900 total_written = data->bytes_written + data->bytes_written_nested;
4901 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
4902 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
4903
4904 big_event_size = data->max_size + data->max_size_nested;
4905 small_event_size = data->min_size + data->min_size_nested;
4906
4907 pr_info("CPU %d:\n", cpu);
4908 pr_info(" events: %ld\n", total_events);
4909 pr_info(" dropped bytes: %ld\n", total_dropped);
4910 pr_info(" alloced bytes: %ld\n", total_alloc);
4911 pr_info(" written bytes: %ld\n", total_written);
4912 pr_info(" biggest event: %d\n", big_event_size);
4913 pr_info(" smallest event: %d\n", small_event_size);
4914
4915 if (RB_WARN_ON(buffer, total_dropped))
4916 break;
4917
4918 ret = 0;
4919
4920 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
4921 total_lost += lost;
4922 item = ring_buffer_event_data(event);
4923 total_len += ring_buffer_event_length(event);
4924 total_size += item->size + sizeof(struct rb_item);
4925 if (memcmp(&item->str[0], rb_string, item->size) != 0) {
4926 pr_info("FAILED!\n");
4927 pr_info("buffer had: %.*s\n", item->size, item->str);
4928 pr_info("expected: %.*s\n", item->size, rb_string);
4929 RB_WARN_ON(buffer, 1);
4930 ret = -1;
4931 break;
4932 }
4933 total_read++;
4934 }
4935 if (ret)
4936 break;
4937
4938 ret = -1;
4939
4940 pr_info(" read events: %ld\n", total_read);
4941 pr_info(" lost events: %ld\n", total_lost);
4942 pr_info(" total events: %ld\n", total_lost + total_read);
4943 pr_info(" recorded len bytes: %ld\n", total_len);
4944 pr_info(" recorded size bytes: %ld\n", total_size);
4945 if (total_lost)
4946 pr_info(" With dropped events, record len and size may not match\n"
4947 " alloced and written from above\n");
4948 if (!total_lost) {
4949 if (RB_WARN_ON(buffer, total_len != total_alloc ||
4950 total_size != total_written))
4951 break;
4952 }
4953 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
4954 break;
4955
4956 ret = 0;
4957 }
4958 if (!ret)
4959 pr_info("Ring buffer PASSED!\n");
4960
4961 ring_buffer_free(buffer);
4962 return 0;
4963}
4964
4965late_initcall(test_ringbuffer);
4966#endif /* CONFIG_RING_BUFFER_STARTUP_TEST */
This page took 0.666819 seconds and 5 git commands to generate.