| 1 | /** |
| 2 | * @file cpu_buffer.h |
| 3 | * |
| 4 | * @remark Copyright 2002-2009 OProfile authors |
| 5 | * @remark Read the file COPYING |
| 6 | * |
| 7 | * @author John Levon <levon@movementarian.org> |
| 8 | * @author Robert Richter <robert.richter@amd.com> |
| 9 | */ |
| 10 | |
| 11 | #ifndef OPROFILE_CPU_BUFFER_H |
| 12 | #define OPROFILE_CPU_BUFFER_H |
| 13 | |
| 14 | #include <linux/types.h> |
| 15 | #include <linux/spinlock.h> |
| 16 | #include <linux/workqueue.h> |
| 17 | #include <linux/cache.h> |
| 18 | #include <linux/sched.h> |
| 19 | #include <linux/ring_buffer.h> |
| 20 | |
| 21 | struct task_struct; |
| 22 | |
| 23 | int alloc_cpu_buffers(void); |
| 24 | void free_cpu_buffers(void); |
| 25 | |
| 26 | void start_cpu_work(void); |
| 27 | void end_cpu_work(void); |
| 28 | void flush_cpu_work(void); |
| 29 | |
| 30 | /* CPU buffer is composed of such entries (which are |
| 31 | * also used for context switch notes) |
| 32 | */ |
| 33 | struct op_sample { |
| 34 | unsigned long eip; |
| 35 | unsigned long event; |
| 36 | unsigned long data[0]; |
| 37 | }; |
| 38 | |
| 39 | struct op_entry; |
| 40 | |
| 41 | struct oprofile_cpu_buffer { |
| 42 | unsigned long buffer_size; |
| 43 | struct task_struct *last_task; |
| 44 | int last_is_kernel; |
| 45 | int tracing; |
| 46 | unsigned long sample_received; |
| 47 | unsigned long sample_lost_overflow; |
| 48 | unsigned long backtrace_aborted; |
| 49 | unsigned long sample_invalid_eip; |
| 50 | int cpu; |
| 51 | struct delayed_work work; |
| 52 | }; |
| 53 | |
| 54 | DECLARE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); |
| 55 | |
| 56 | /* |
| 57 | * Resets the cpu buffer to a sane state. |
| 58 | * |
| 59 | * reset these to invalid values; the next sample collected will |
| 60 | * populate the buffer with proper values to initialize the buffer |
| 61 | */ |
| 62 | static inline void op_cpu_buffer_reset(int cpu) |
| 63 | { |
| 64 | struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu); |
| 65 | |
| 66 | cpu_buf->last_is_kernel = -1; |
| 67 | cpu_buf->last_task = NULL; |
| 68 | } |
| 69 | |
| 70 | /* |
| 71 | * op_cpu_buffer_add_data() and op_cpu_buffer_write_commit() may be |
| 72 | * called only if op_cpu_buffer_write_reserve() did not return NULL or |
| 73 | * entry->event != NULL, otherwise entry->size or entry->event will be |
| 74 | * used uninitialized. |
| 75 | */ |
| 76 | |
| 77 | struct op_sample |
| 78 | *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size); |
| 79 | int op_cpu_buffer_write_commit(struct op_entry *entry); |
| 80 | struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu); |
| 81 | unsigned long op_cpu_buffer_entries(int cpu); |
| 82 | |
| 83 | /* returns the remaining free size of data in the entry */ |
| 84 | static inline |
| 85 | int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val) |
| 86 | { |
| 87 | if (!entry->size) |
| 88 | return 0; |
| 89 | *entry->data = val; |
| 90 | entry->size--; |
| 91 | entry->data++; |
| 92 | return entry->size; |
| 93 | } |
| 94 | |
| 95 | /* returns the size of data in the entry */ |
| 96 | static inline |
| 97 | int op_cpu_buffer_get_size(struct op_entry *entry) |
| 98 | { |
| 99 | return entry->size; |
| 100 | } |
| 101 | |
| 102 | /* returns 0 if empty or the size of data including the current value */ |
| 103 | static inline |
| 104 | int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val) |
| 105 | { |
| 106 | int size = entry->size; |
| 107 | if (!size) |
| 108 | return 0; |
| 109 | *val = *entry->data; |
| 110 | entry->size--; |
| 111 | entry->data++; |
| 112 | return size; |
| 113 | } |
| 114 | |
| 115 | /* extra data flags */ |
| 116 | #define KERNEL_CTX_SWITCH (1UL << 0) |
| 117 | #define IS_KERNEL (1UL << 1) |
| 118 | #define TRACE_BEGIN (1UL << 2) |
| 119 | #define USER_CTX_SWITCH (1UL << 3) |
| 120 | |
| 121 | #endif /* OPROFILE_CPU_BUFFER_H */ |