bpf, trace: add BPF_F_CURRENT_CPU flag for bpf_perf_event_read
[deliverable/linux.git] / kernel / trace / bpf_trace.c
CommitLineData
2541517c
AS
1/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/bpf.h>
11#include <linux/filter.h>
12#include <linux/uaccess.h>
9c959c86 13#include <linux/ctype.h>
2541517c
AS
14#include "trace.h"
15
2541517c
AS
16/**
17 * trace_call_bpf - invoke BPF program
18 * @prog: BPF program
19 * @ctx: opaque context pointer
20 *
21 * kprobe handlers execute BPF programs via this helper.
22 * Can be used from static tracepoints in the future.
23 *
24 * Return: BPF programs always return an integer which is interpreted by
25 * kprobe handler as:
26 * 0 - return from kprobe (event is filtered out)
27 * 1 - store kprobe event into ring buffer
28 * Other values are reserved and currently alias to 1
29 */
30unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
31{
32 unsigned int ret;
33
34 if (in_nmi()) /* not supported yet */
35 return 1;
36
37 preempt_disable();
38
39 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
40 /*
41 * since some bpf program is already running on this cpu,
42 * don't call into another bpf program (same or different)
43 * and don't send kprobe event into ring-buffer,
44 * so return zero here
45 */
46 ret = 0;
47 goto out;
48 }
49
50 rcu_read_lock();
51 ret = BPF_PROG_RUN(prog, ctx);
52 rcu_read_unlock();
53
54 out:
55 __this_cpu_dec(bpf_prog_active);
56 preempt_enable();
57
58 return ret;
59}
60EXPORT_SYMBOL_GPL(trace_call_bpf);
61
62static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
63{
64 void *dst = (void *) (long) r1;
074f528e 65 int ret, size = (int) r2;
2541517c
AS
66 void *unsafe_ptr = (void *) (long) r3;
67
074f528e
DB
68 ret = probe_kernel_read(dst, unsafe_ptr, size);
69 if (unlikely(ret < 0))
70 memset(dst, 0, size);
71
72 return ret;
2541517c
AS
73}
74
75static const struct bpf_func_proto bpf_probe_read_proto = {
76 .func = bpf_probe_read,
77 .gpl_only = true,
78 .ret_type = RET_INTEGER,
074f528e 79 .arg1_type = ARG_PTR_TO_RAW_STACK,
2541517c
AS
80 .arg2_type = ARG_CONST_STACK_SIZE,
81 .arg3_type = ARG_ANYTHING,
82};
83
9c959c86
AS
84/*
85 * limited trace_printk()
8d3b7dce 86 * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed
9c959c86
AS
87 */
88static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
89{
90 char *fmt = (char *) (long) r1;
8d3b7dce 91 bool str_seen = false;
9c959c86
AS
92 int mod[3] = {};
93 int fmt_cnt = 0;
8d3b7dce
AS
94 u64 unsafe_addr;
95 char buf[64];
9c959c86
AS
96 int i;
97
98 /*
99 * bpf_check()->check_func_arg()->check_stack_boundary()
100 * guarantees that fmt points to bpf program stack,
101 * fmt_size bytes of it were initialized and fmt_size > 0
102 */
103 if (fmt[--fmt_size] != 0)
104 return -EINVAL;
105
106 /* check format string for allowed specifiers */
107 for (i = 0; i < fmt_size; i++) {
108 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
109 return -EINVAL;
110
111 if (fmt[i] != '%')
112 continue;
113
114 if (fmt_cnt >= 3)
115 return -EINVAL;
116
117 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
118 i++;
119 if (fmt[i] == 'l') {
120 mod[fmt_cnt]++;
121 i++;
8d3b7dce 122 } else if (fmt[i] == 'p' || fmt[i] == 's') {
9c959c86
AS
123 mod[fmt_cnt]++;
124 i++;
125 if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
126 return -EINVAL;
127 fmt_cnt++;
8d3b7dce
AS
128 if (fmt[i - 1] == 's') {
129 if (str_seen)
130 /* allow only one '%s' per fmt string */
131 return -EINVAL;
132 str_seen = true;
133
134 switch (fmt_cnt) {
135 case 1:
136 unsafe_addr = r3;
137 r3 = (long) buf;
138 break;
139 case 2:
140 unsafe_addr = r4;
141 r4 = (long) buf;
142 break;
143 case 3:
144 unsafe_addr = r5;
145 r5 = (long) buf;
146 break;
147 }
148 buf[0] = 0;
149 strncpy_from_unsafe(buf,
150 (void *) (long) unsafe_addr,
151 sizeof(buf));
152 }
9c959c86
AS
153 continue;
154 }
155
156 if (fmt[i] == 'l') {
157 mod[fmt_cnt]++;
158 i++;
159 }
160
161 if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x')
162 return -EINVAL;
163 fmt_cnt++;
164 }
165
166 return __trace_printk(1/* fake ip will not be printed */, fmt,
167 mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3,
168 mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4,
169 mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5);
170}
171
172static const struct bpf_func_proto bpf_trace_printk_proto = {
173 .func = bpf_trace_printk,
174 .gpl_only = true,
175 .ret_type = RET_INTEGER,
176 .arg1_type = ARG_PTR_TO_STACK,
177 .arg2_type = ARG_CONST_STACK_SIZE,
178};
179
0756ea3e
AS
180const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
181{
182 /*
183 * this program might be calling bpf_trace_printk,
184 * so allocate per-cpu printk buffers
185 */
186 trace_printk_init_buffers();
187
188 return &bpf_trace_printk_proto;
189}
190
6816a7ff 191static u64 bpf_perf_event_read(u64 r1, u64 flags, u64 r3, u64 r4, u64 r5)
35578d79
KX
192{
193 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
194 struct bpf_array *array = container_of(map, struct bpf_array, map);
6816a7ff
DB
195 unsigned int cpu = smp_processor_id();
196 u64 index = flags & BPF_F_INDEX_MASK;
3b1efb19 197 struct bpf_event_entry *ee;
35578d79
KX
198 struct perf_event *event;
199
6816a7ff
DB
200 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
201 return -EINVAL;
202 if (index == BPF_F_CURRENT_CPU)
203 index = cpu;
35578d79
KX
204 if (unlikely(index >= array->map.max_entries))
205 return -E2BIG;
206
3b1efb19 207 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 208 if (!ee)
35578d79
KX
209 return -ENOENT;
210
3b1efb19 211 event = ee->event;
1ca1cc98
DB
212 if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&
213 event->attr.type != PERF_TYPE_RAW))
214 return -EINVAL;
215
62544ce8 216 /* make sure event is local and doesn't have pmu::count */
6816a7ff 217 if (unlikely(event->oncpu != cpu || event->pmu->count))
62544ce8
AS
218 return -EINVAL;
219
35578d79
KX
220 /*
221 * we don't know if the function is run successfully by the
222 * return value. It can be judged in other places, such as
223 * eBPF programs.
224 */
225 return perf_event_read_local(event);
226}
227
62544ce8 228static const struct bpf_func_proto bpf_perf_event_read_proto = {
35578d79 229 .func = bpf_perf_event_read,
1075ef59 230 .gpl_only = true,
35578d79
KX
231 .ret_type = RET_INTEGER,
232 .arg1_type = ARG_CONST_MAP_PTR,
233 .arg2_type = ARG_ANYTHING,
234};
235
1e33759c 236static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
a43eec30
AS
237{
238 struct pt_regs *regs = (struct pt_regs *) (long) r1;
239 struct bpf_map *map = (struct bpf_map *) (long) r2;
240 struct bpf_array *array = container_of(map, struct bpf_array, map);
d7931330 241 unsigned int cpu = smp_processor_id();
1e33759c 242 u64 index = flags & BPF_F_INDEX_MASK;
a43eec30
AS
243 void *data = (void *) (long) r4;
244 struct perf_sample_data sample_data;
3b1efb19 245 struct bpf_event_entry *ee;
a43eec30
AS
246 struct perf_event *event;
247 struct perf_raw_record raw = {
248 .size = size,
249 .data = data,
250 };
251
1e33759c
DB
252 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
253 return -EINVAL;
254 if (index == BPF_F_CURRENT_CPU)
d7931330 255 index = cpu;
a43eec30
AS
256 if (unlikely(index >= array->map.max_entries))
257 return -E2BIG;
258
3b1efb19 259 ee = READ_ONCE(array->ptrs[index]);
1ca1cc98 260 if (!ee)
a43eec30
AS
261 return -ENOENT;
262
3b1efb19 263 event = ee->event;
a43eec30
AS
264 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
265 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
266 return -EINVAL;
267
d7931330 268 if (unlikely(event->oncpu != cpu))
a43eec30
AS
269 return -EOPNOTSUPP;
270
271 perf_sample_data_init(&sample_data, 0, 0);
272 sample_data.raw = &raw;
273 perf_event_output(event, &sample_data, regs);
274 return 0;
275}
276
277static const struct bpf_func_proto bpf_perf_event_output_proto = {
278 .func = bpf_perf_event_output,
1075ef59 279 .gpl_only = true,
a43eec30
AS
280 .ret_type = RET_INTEGER,
281 .arg1_type = ARG_PTR_TO_CTX,
282 .arg2_type = ARG_CONST_MAP_PTR,
283 .arg3_type = ARG_ANYTHING,
284 .arg4_type = ARG_PTR_TO_STACK,
285 .arg5_type = ARG_CONST_STACK_SIZE,
286};
287
bd570ff9
DB
288static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
289
290static u64 bpf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
291{
292 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
293
294 perf_fetch_caller_regs(regs);
295
296 return bpf_perf_event_output((long)regs, r2, flags, r4, size);
297}
298
299static const struct bpf_func_proto bpf_event_output_proto = {
300 .func = bpf_event_output,
301 .gpl_only = true,
302 .ret_type = RET_INTEGER,
303 .arg1_type = ARG_PTR_TO_CTX,
304 .arg2_type = ARG_CONST_MAP_PTR,
305 .arg3_type = ARG_ANYTHING,
306 .arg4_type = ARG_PTR_TO_STACK,
307 .arg5_type = ARG_CONST_STACK_SIZE,
308};
309
310const struct bpf_func_proto *bpf_get_event_output_proto(void)
311{
312 return &bpf_event_output_proto;
313}
314
9fd82b61 315static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
2541517c
AS
316{
317 switch (func_id) {
318 case BPF_FUNC_map_lookup_elem:
319 return &bpf_map_lookup_elem_proto;
320 case BPF_FUNC_map_update_elem:
321 return &bpf_map_update_elem_proto;
322 case BPF_FUNC_map_delete_elem:
323 return &bpf_map_delete_elem_proto;
324 case BPF_FUNC_probe_read:
325 return &bpf_probe_read_proto;
d9847d31
AS
326 case BPF_FUNC_ktime_get_ns:
327 return &bpf_ktime_get_ns_proto;
04fd61ab
AS
328 case BPF_FUNC_tail_call:
329 return &bpf_tail_call_proto;
ffeedafb
AS
330 case BPF_FUNC_get_current_pid_tgid:
331 return &bpf_get_current_pid_tgid_proto;
332 case BPF_FUNC_get_current_uid_gid:
333 return &bpf_get_current_uid_gid_proto;
334 case BPF_FUNC_get_current_comm:
335 return &bpf_get_current_comm_proto;
9c959c86 336 case BPF_FUNC_trace_printk:
0756ea3e 337 return bpf_get_trace_printk_proto();
ab1973d3
AS
338 case BPF_FUNC_get_smp_processor_id:
339 return &bpf_get_smp_processor_id_proto;
35578d79
KX
340 case BPF_FUNC_perf_event_read:
341 return &bpf_perf_event_read_proto;
9fd82b61
AS
342 default:
343 return NULL;
344 }
345}
346
347static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
348{
349 switch (func_id) {
a43eec30
AS
350 case BPF_FUNC_perf_event_output:
351 return &bpf_perf_event_output_proto;
d5a3b1f6
AS
352 case BPF_FUNC_get_stackid:
353 return &bpf_get_stackid_proto;
2541517c 354 default:
9fd82b61 355 return tracing_func_proto(func_id);
2541517c
AS
356 }
357}
358
359/* bpf+kprobe programs can access fields of 'struct pt_regs' */
19de99f7
AS
360static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
361 enum bpf_reg_type *reg_type)
2541517c 362{
2541517c
AS
363 if (off < 0 || off >= sizeof(struct pt_regs))
364 return false;
2541517c
AS
365 if (type != BPF_READ)
366 return false;
2541517c
AS
367 if (off % size != 0)
368 return false;
2541517c
AS
369 return true;
370}
371
27dff4e0 372static const struct bpf_verifier_ops kprobe_prog_ops = {
2541517c
AS
373 .get_func_proto = kprobe_prog_func_proto,
374 .is_valid_access = kprobe_prog_is_valid_access,
375};
376
377static struct bpf_prog_type_list kprobe_tl = {
378 .ops = &kprobe_prog_ops,
379 .type = BPF_PROG_TYPE_KPROBE,
380};
381
9940d67c
AS
382static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
383{
384 /*
385 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
386 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
387 * from there and call the same bpf_perf_event_output() helper
388 */
266a0a79 389 u64 ctx = *(long *)(uintptr_t)r1;
9940d67c
AS
390
391 return bpf_perf_event_output(ctx, r2, index, r4, size);
392}
393
394static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
395 .func = bpf_perf_event_output_tp,
396 .gpl_only = true,
397 .ret_type = RET_INTEGER,
398 .arg1_type = ARG_PTR_TO_CTX,
399 .arg2_type = ARG_CONST_MAP_PTR,
400 .arg3_type = ARG_ANYTHING,
401 .arg4_type = ARG_PTR_TO_STACK,
402 .arg5_type = ARG_CONST_STACK_SIZE,
403};
404
405static u64 bpf_get_stackid_tp(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
406{
266a0a79 407 u64 ctx = *(long *)(uintptr_t)r1;
9940d67c
AS
408
409 return bpf_get_stackid(ctx, r2, r3, r4, r5);
410}
411
412static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
413 .func = bpf_get_stackid_tp,
414 .gpl_only = true,
415 .ret_type = RET_INTEGER,
416 .arg1_type = ARG_PTR_TO_CTX,
417 .arg2_type = ARG_CONST_MAP_PTR,
418 .arg3_type = ARG_ANYTHING,
419};
420
9fd82b61
AS
421static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
422{
423 switch (func_id) {
424 case BPF_FUNC_perf_event_output:
9940d67c 425 return &bpf_perf_event_output_proto_tp;
9fd82b61 426 case BPF_FUNC_get_stackid:
9940d67c 427 return &bpf_get_stackid_proto_tp;
9fd82b61
AS
428 default:
429 return tracing_func_proto(func_id);
430 }
431}
432
19de99f7
AS
433static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
434 enum bpf_reg_type *reg_type)
9fd82b61
AS
435{
436 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
437 return false;
438 if (type != BPF_READ)
439 return false;
440 if (off % size != 0)
441 return false;
442 return true;
443}
444
445static const struct bpf_verifier_ops tracepoint_prog_ops = {
446 .get_func_proto = tp_prog_func_proto,
447 .is_valid_access = tp_prog_is_valid_access,
448};
449
450static struct bpf_prog_type_list tracepoint_tl = {
451 .ops = &tracepoint_prog_ops,
452 .type = BPF_PROG_TYPE_TRACEPOINT,
453};
454
2541517c
AS
455static int __init register_kprobe_prog_ops(void)
456{
457 bpf_register_prog_type(&kprobe_tl);
9fd82b61 458 bpf_register_prog_type(&tracepoint_tl);
2541517c
AS
459 return 0;
460}
461late_initcall(register_kprobe_prog_ops);
This page took 0.137306 seconds and 5 git commands to generate.