Commit | Line | Data |
---|---|---|
76369139 FW |
1 | #ifndef _KERNEL_EVENTS_INTERNAL_H |
2 | #define _KERNEL_EVENTS_INTERNAL_H | |
3 | ||
9251f904 | 4 | #include <linux/hardirq.h> |
91d7753a | 5 | #include <linux/uaccess.h> |
9251f904 BP |
6 | |
7 | /* Buffer handling */ | |
8 | ||
76369139 FW |
9 | #define RING_BUFFER_WRITABLE 0x01 |
10 | ||
11 | struct ring_buffer { | |
12 | atomic_t refcount; | |
13 | struct rcu_head rcu_head; | |
14 | #ifdef CONFIG_PERF_USE_VMALLOC | |
15 | struct work_struct work; | |
16 | int page_order; /* allocation order */ | |
17 | #endif | |
18 | int nr_pages; /* nr of data pages */ | |
dd9c086d | 19 | int overwrite; /* can overwrite itself */ |
86e7972f | 20 | int paused; /* can write into ring buffer */ |
76369139 FW |
21 | |
22 | atomic_t poll; /* POLL_ for wakeups */ | |
23 | ||
24 | local_t head; /* write position */ | |
25 | local_t nest; /* nested writers */ | |
26 | local_t events; /* event limit */ | |
27 | local_t wakeup; /* wakeup stamp */ | |
28 | local_t lost; /* nr records lost */ | |
29 | ||
30 | long watermark; /* wakeup watermark */ | |
1a594131 | 31 | long aux_watermark; |
10c6db11 PZ |
32 | /* poll crap */ |
33 | spinlock_t event_lock; | |
34 | struct list_head event_list; | |
76369139 | 35 | |
9bb5d40c PZ |
36 | atomic_t mmap_count; |
37 | unsigned long mmap_locked; | |
26cb63ad PZ |
38 | struct user_struct *mmap_user; |
39 | ||
45bfb2e5 | 40 | /* AUX area */ |
fdc26706 AS |
41 | local_t aux_head; |
42 | local_t aux_nest; | |
1a594131 | 43 | local_t aux_wakeup; |
45bfb2e5 PZ |
44 | unsigned long aux_pgoff; |
45 | int aux_nr_pages; | |
2023a0d2 | 46 | int aux_overwrite; |
45bfb2e5 PZ |
47 | atomic_t aux_mmap_count; |
48 | unsigned long aux_mmap_locked; | |
49 | void (*free_aux)(void *); | |
50 | atomic_t aux_refcount; | |
51 | void **aux_pages; | |
52 | void *aux_priv; | |
53 | ||
76369139 FW |
54 | struct perf_event_mmap_page *user_page; |
55 | void *data_pages[0]; | |
56 | }; | |
57 | ||
76369139 | 58 | extern void rb_free(struct ring_buffer *rb); |
57ffc5ca PZ |
59 | |
60 | static inline void rb_free_rcu(struct rcu_head *rcu_head) | |
61 | { | |
62 | struct ring_buffer *rb; | |
63 | ||
64 | rb = container_of(rcu_head, struct ring_buffer, rcu_head); | |
65 | rb_free(rb); | |
66 | } | |
67 | ||
86e7972f WN |
68 | static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause) |
69 | { | |
70 | if (!pause && rb->nr_pages) | |
71 | rb->paused = 0; | |
72 | else | |
73 | rb->paused = 1; | |
74 | } | |
75 | ||
76369139 FW |
76 | extern struct ring_buffer * |
77 | rb_alloc(int nr_pages, long watermark, int cpu, int flags); | |
78 | extern void perf_event_wakeup(struct perf_event *event); | |
45bfb2e5 | 79 | extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, |
1a594131 | 80 | pgoff_t pgoff, int nr_pages, long watermark, int flags); |
45bfb2e5 | 81 | extern void rb_free_aux(struct ring_buffer *rb); |
fdc26706 AS |
82 | extern struct ring_buffer *ring_buffer_get(struct perf_event *event); |
83 | extern void ring_buffer_put(struct ring_buffer *rb); | |
45bfb2e5 PZ |
84 | |
85 | static inline bool rb_has_aux(struct ring_buffer *rb) | |
86 | { | |
87 | return !!rb->aux_nr_pages; | |
88 | } | |
76369139 | 89 | |
68db7e98 AS |
90 | void perf_event_aux_event(struct perf_event *event, unsigned long head, |
91 | unsigned long size, u64 flags); | |
92 | ||
76369139 FW |
93 | extern struct page * |
94 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff); | |
95 | ||
96 | #ifdef CONFIG_PERF_USE_VMALLOC | |
97 | /* | |
98 | * Back perf_mmap() with vmalloc memory. | |
99 | * | |
100 | * Required for architectures that have d-cache aliasing issues. | |
101 | */ | |
102 | ||
103 | static inline int page_order(struct ring_buffer *rb) | |
104 | { | |
105 | return rb->page_order; | |
106 | } | |
107 | ||
108 | #else | |
109 | ||
110 | static inline int page_order(struct ring_buffer *rb) | |
111 | { | |
112 | return 0; | |
113 | } | |
114 | #endif | |
115 | ||
9251f904 | 116 | static inline unsigned long perf_data_size(struct ring_buffer *rb) |
76369139 FW |
117 | { |
118 | return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); | |
119 | } | |
120 | ||
45bfb2e5 PZ |
121 | static inline unsigned long perf_aux_size(struct ring_buffer *rb) |
122 | { | |
123 | return rb->aux_nr_pages << PAGE_SHIFT; | |
124 | } | |
125 | ||
91d7753a | 126 | #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \ |
0a196848 | 127 | static inline unsigned long \ |
91d7753a | 128 | func_name(struct perf_output_handle *handle, \ |
0a196848 | 129 | const void *buf, unsigned long len) \ |
91d7753a FW |
130 | { \ |
131 | unsigned long size, written; \ | |
132 | \ | |
133 | do { \ | |
0a196848 | 134 | size = min(handle->size, len); \ |
91d7753a | 135 | written = memcpy_func(handle->addr, buf, size); \ |
0a196848 | 136 | written = size - written; \ |
91d7753a FW |
137 | \ |
138 | len -= written; \ | |
139 | handle->addr += written; \ | |
140 | buf += written; \ | |
141 | handle->size -= written; \ | |
142 | if (!handle->size) { \ | |
143 | struct ring_buffer *rb = handle->rb; \ | |
144 | \ | |
145 | handle->page++; \ | |
146 | handle->page &= rb->nr_pages - 1; \ | |
147 | handle->addr = rb->data_pages[handle->page]; \ | |
148 | handle->size = PAGE_SIZE << page_order(rb); \ | |
149 | } \ | |
150 | } while (len && written == size); \ | |
151 | \ | |
152 | return len; \ | |
153 | } | |
154 | ||
0a196848 PZ |
155 | static inline unsigned long |
156 | memcpy_common(void *dst, const void *src, unsigned long n) | |
76369139 | 157 | { |
91d7753a | 158 | memcpy(dst, src, n); |
0a196848 | 159 | return 0; |
76369139 FW |
160 | } |
161 | ||
91d7753a FW |
162 | DEFINE_OUTPUT_COPY(__output_copy, memcpy_common) |
163 | ||
0a196848 PZ |
164 | static inline unsigned long |
165 | memcpy_skip(void *dst, const void *src, unsigned long n) | |
166 | { | |
167 | return 0; | |
168 | } | |
5685e0ff | 169 | |
0a196848 | 170 | DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip) |
5685e0ff | 171 | |
91d7753a | 172 | #ifndef arch_perf_out_copy_user |
0a196848 PZ |
173 | #define arch_perf_out_copy_user arch_perf_out_copy_user |
174 | ||
175 | static inline unsigned long | |
176 | arch_perf_out_copy_user(void *dst, const void *src, unsigned long n) | |
177 | { | |
178 | unsigned long ret; | |
179 | ||
180 | pagefault_disable(); | |
181 | ret = __copy_from_user_inatomic(dst, src, n); | |
182 | pagefault_enable(); | |
183 | ||
184 | return ret; | |
185 | } | |
91d7753a FW |
186 | #endif |
187 | ||
188 | DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user) | |
189 | ||
9251f904 | 190 | /* Callchain handling */ |
e6dab5ff AV |
191 | extern struct perf_callchain_entry * |
192 | perf_callchain(struct perf_event *event, struct pt_regs *regs); | |
9251f904 BP |
193 | |
194 | static inline int get_recursion_context(int *recursion) | |
195 | { | |
196 | int rctx; | |
197 | ||
198 | if (in_nmi()) | |
199 | rctx = 3; | |
200 | else if (in_irq()) | |
201 | rctx = 2; | |
202 | else if (in_softirq()) | |
203 | rctx = 1; | |
204 | else | |
205 | rctx = 0; | |
206 | ||
207 | if (recursion[rctx]) | |
208 | return -1; | |
209 | ||
210 | recursion[rctx]++; | |
211 | barrier(); | |
212 | ||
213 | return rctx; | |
214 | } | |
215 | ||
216 | static inline void put_recursion_context(int *recursion, int rctx) | |
217 | { | |
218 | barrier(); | |
219 | recursion[rctx]--; | |
220 | } | |
221 | ||
c5ebcedb JO |
222 | #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP |
223 | static inline bool arch_perf_have_user_stack_dump(void) | |
224 | { | |
225 | return true; | |
226 | } | |
227 | ||
228 | #define perf_user_stack_pointer(regs) user_stack_pointer(regs) | |
229 | #else | |
230 | static inline bool arch_perf_have_user_stack_dump(void) | |
231 | { | |
232 | return false; | |
233 | } | |
234 | ||
235 | #define perf_user_stack_pointer(regs) 0 | |
236 | #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */ | |
237 | ||
76369139 | 238 | #endif /* _KERNEL_EVENTS_INTERNAL_H */ |