Commit | Line | Data |
---|---|---|
69d262a9 WN |
1 | /* |
2 | * bpf-loader.c | |
3 | * | |
4 | * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> | |
5 | * Copyright (C) 2015 Huawei Inc. | |
6 | */ | |
7 | ||
a08357d8 | 8 | #include <linux/bpf.h> |
69d262a9 | 9 | #include <bpf/libbpf.h> |
8690a2a7 | 10 | #include <bpf/bpf.h> |
69d262a9 | 11 | #include <linux/err.h> |
03e01f56 | 12 | #include <linux/string.h> |
69d262a9 WN |
13 | #include "perf.h" |
14 | #include "debug.h" | |
15 | #include "bpf-loader.h" | |
a08357d8 WN |
16 | #include "bpf-prologue.h" |
17 | #include "llvm-utils.h" | |
aa3abf30 WN |
18 | #include "probe-event.h" |
19 | #include "probe-finder.h" // for MAX_PROBES | |
2d055bf2 | 20 | #include "parse-events.h" |
d509db04 | 21 | #include "llvm-utils.h" |
69d262a9 WN |
22 | |
23 | #define DEFINE_PRINT_FN(name, level) \ | |
24 | static int libbpf_##name(const char *fmt, ...) \ | |
25 | { \ | |
26 | va_list args; \ | |
27 | int ret; \ | |
28 | \ | |
29 | va_start(args, fmt); \ | |
30 | ret = veprintf(level, verbose, pr_fmt(fmt), args);\ | |
31 | va_end(args); \ | |
32 | return ret; \ | |
33 | } | |
34 | ||
7a011946 WN |
35 | DEFINE_PRINT_FN(warning, 1) |
36 | DEFINE_PRINT_FN(info, 1) | |
69d262a9 WN |
37 | DEFINE_PRINT_FN(debug, 1) |
38 | ||
aa3abf30 WN |
39 | struct bpf_prog_priv { |
40 | struct perf_probe_event pev; | |
a08357d8 WN |
41 | bool need_prologue; |
42 | struct bpf_insn *insns_buf; | |
d35b3289 WN |
43 | int nr_types; |
44 | int *type_mapping; | |
aa3abf30 WN |
45 | }; |
46 | ||
ba1fae43 WN |
47 | static bool libbpf_initialized; |
48 | ||
49 | struct bpf_object * | |
50 | bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name) | |
51 | { | |
52 | struct bpf_object *obj; | |
53 | ||
54 | if (!libbpf_initialized) { | |
55 | libbpf_set_print(libbpf_warning, | |
56 | libbpf_info, | |
57 | libbpf_debug); | |
58 | libbpf_initialized = true; | |
59 | } | |
60 | ||
61 | obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name); | |
62 | if (IS_ERR(obj)) { | |
63 | pr_debug("bpf: failed to load buffer\n"); | |
64 | return ERR_PTR(-EINVAL); | |
65 | } | |
66 | ||
67 | return obj; | |
68 | } | |
69 | ||
d509db04 | 70 | struct bpf_object *bpf__prepare_load(const char *filename, bool source) |
69d262a9 WN |
71 | { |
72 | struct bpf_object *obj; | |
69d262a9 WN |
73 | |
74 | if (!libbpf_initialized) { | |
75 | libbpf_set_print(libbpf_warning, | |
76 | libbpf_info, | |
77 | libbpf_debug); | |
78 | libbpf_initialized = true; | |
79 | } | |
80 | ||
d509db04 WN |
81 | if (source) { |
82 | int err; | |
83 | void *obj_buf; | |
84 | size_t obj_buf_sz; | |
85 | ||
86 | err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz); | |
87 | if (err) | |
d3e0ce39 | 88 | return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE); |
d509db04 WN |
89 | obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename); |
90 | free(obj_buf); | |
91 | } else | |
92 | obj = bpf_object__open(filename); | |
93 | ||
6371ca3b | 94 | if (IS_ERR(obj)) { |
69d262a9 | 95 | pr_debug("bpf: failed to load %s\n", filename); |
6371ca3b | 96 | return obj; |
69d262a9 WN |
97 | } |
98 | ||
99 | return obj; | |
100 | } | |
101 | ||
102 | void bpf__clear(void) | |
103 | { | |
104 | struct bpf_object *obj, *tmp; | |
105 | ||
aa3abf30 WN |
106 | bpf_object__for_each_safe(obj, tmp) { |
107 | bpf__unprobe(obj); | |
69d262a9 | 108 | bpf_object__close(obj); |
aa3abf30 WN |
109 | } |
110 | } | |
111 | ||
112 | static void | |
80cdce76 WN |
113 | clear_prog_priv(struct bpf_program *prog __maybe_unused, |
114 | void *_priv) | |
aa3abf30 WN |
115 | { |
116 | struct bpf_prog_priv *priv = _priv; | |
117 | ||
118 | cleanup_perf_probe_events(&priv->pev, 1); | |
a08357d8 | 119 | zfree(&priv->insns_buf); |
d35b3289 | 120 | zfree(&priv->type_mapping); |
aa3abf30 WN |
121 | free(priv); |
122 | } | |
123 | ||
361f2b1d | 124 | static int |
0bb93490 | 125 | prog_config__exec(const char *value, struct perf_probe_event *pev) |
361f2b1d WN |
126 | { |
127 | pev->uprobes = true; | |
128 | pev->target = strdup(value); | |
129 | if (!pev->target) | |
130 | return -ENOMEM; | |
131 | return 0; | |
132 | } | |
133 | ||
5dbd16c0 | 134 | static int |
0bb93490 | 135 | prog_config__module(const char *value, struct perf_probe_event *pev) |
5dbd16c0 WN |
136 | { |
137 | pev->uprobes = false; | |
138 | pev->target = strdup(value); | |
139 | if (!pev->target) | |
140 | return -ENOMEM; | |
141 | return 0; | |
142 | } | |
143 | ||
03e01f56 | 144 | static int |
0bb93490 | 145 | prog_config__bool(const char *value, bool *pbool, bool invert) |
03e01f56 WN |
146 | { |
147 | int err; | |
148 | bool bool_value; | |
149 | ||
150 | if (!pbool) | |
151 | return -EINVAL; | |
152 | ||
153 | err = strtobool(value, &bool_value); | |
154 | if (err) | |
155 | return err; | |
156 | ||
157 | *pbool = invert ? !bool_value : bool_value; | |
158 | return 0; | |
159 | } | |
160 | ||
161 | static int | |
0bb93490 WN |
162 | prog_config__inlines(const char *value, |
163 | struct perf_probe_event *pev __maybe_unused) | |
03e01f56 | 164 | { |
0bb93490 | 165 | return prog_config__bool(value, &probe_conf.no_inlines, true); |
03e01f56 WN |
166 | } |
167 | ||
168 | static int | |
0bb93490 WN |
169 | prog_config__force(const char *value, |
170 | struct perf_probe_event *pev __maybe_unused) | |
03e01f56 | 171 | { |
0bb93490 | 172 | return prog_config__bool(value, &probe_conf.force_add, false); |
03e01f56 WN |
173 | } |
174 | ||
361f2b1d WN |
175 | static struct { |
176 | const char *key; | |
177 | const char *usage; | |
178 | const char *desc; | |
179 | int (*func)(const char *, struct perf_probe_event *); | |
0bb93490 | 180 | } bpf_prog_config_terms[] = { |
361f2b1d WN |
181 | { |
182 | .key = "exec", | |
183 | .usage = "exec=<full path of file>", | |
184 | .desc = "Set uprobe target", | |
0bb93490 | 185 | .func = prog_config__exec, |
361f2b1d | 186 | }, |
5dbd16c0 WN |
187 | { |
188 | .key = "module", | |
189 | .usage = "module=<module name> ", | |
190 | .desc = "Set kprobe module", | |
0bb93490 | 191 | .func = prog_config__module, |
03e01f56 WN |
192 | }, |
193 | { | |
194 | .key = "inlines", | |
195 | .usage = "inlines=[yes|no] ", | |
196 | .desc = "Probe at inline symbol", | |
0bb93490 | 197 | .func = prog_config__inlines, |
03e01f56 WN |
198 | }, |
199 | { | |
200 | .key = "force", | |
201 | .usage = "force=[yes|no] ", | |
202 | .desc = "Forcibly add events with existing name", | |
0bb93490 | 203 | .func = prog_config__force, |
03e01f56 | 204 | }, |
361f2b1d WN |
205 | }; |
206 | ||
207 | static int | |
0bb93490 WN |
208 | do_prog_config(const char *key, const char *value, |
209 | struct perf_probe_event *pev) | |
361f2b1d WN |
210 | { |
211 | unsigned int i; | |
212 | ||
213 | pr_debug("config bpf program: %s=%s\n", key, value); | |
0bb93490 WN |
214 | for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++) |
215 | if (strcmp(key, bpf_prog_config_terms[i].key) == 0) | |
216 | return bpf_prog_config_terms[i].func(value, pev); | |
361f2b1d | 217 | |
0bb93490 | 218 | pr_debug("BPF: ERROR: invalid program config option: %s=%s\n", |
361f2b1d WN |
219 | key, value); |
220 | ||
0bb93490 WN |
221 | pr_debug("\nHint: Valid options are:\n"); |
222 | for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++) | |
223 | pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage, | |
224 | bpf_prog_config_terms[i].desc); | |
361f2b1d WN |
225 | pr_debug("\n"); |
226 | ||
0bb93490 | 227 | return -BPF_LOADER_ERRNO__PROGCONF_TERM; |
361f2b1d WN |
228 | } |
229 | ||
230 | static const char * | |
0bb93490 | 231 | parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev) |
361f2b1d WN |
232 | { |
233 | char *text = strdup(config_str); | |
234 | char *sep, *line; | |
235 | const char *main_str = NULL; | |
236 | int err = 0; | |
237 | ||
238 | if (!text) { | |
239 | pr_debug("No enough memory: dup config_str failed\n"); | |
240 | return ERR_PTR(-ENOMEM); | |
241 | } | |
242 | ||
243 | line = text; | |
244 | while ((sep = strchr(line, ';'))) { | |
245 | char *equ; | |
246 | ||
247 | *sep = '\0'; | |
248 | equ = strchr(line, '='); | |
249 | if (!equ) { | |
250 | pr_warning("WARNING: invalid config in BPF object: %s\n", | |
251 | line); | |
252 | pr_warning("\tShould be 'key=value'.\n"); | |
253 | goto nextline; | |
254 | } | |
255 | *equ = '\0'; | |
256 | ||
0bb93490 | 257 | err = do_prog_config(line, equ + 1, pev); |
361f2b1d WN |
258 | if (err) |
259 | break; | |
260 | nextline: | |
261 | line = sep + 1; | |
262 | } | |
263 | ||
264 | if (!err) | |
265 | main_str = config_str + (line - text); | |
266 | free(text); | |
267 | ||
268 | return err ? ERR_PTR(err) : main_str; | |
269 | } | |
270 | ||
271 | static int | |
0bb93490 | 272 | parse_prog_config(const char *config_str, struct perf_probe_event *pev) |
361f2b1d WN |
273 | { |
274 | int err; | |
0bb93490 | 275 | const char *main_str = parse_prog_config_kvpair(config_str, pev); |
361f2b1d WN |
276 | |
277 | if (IS_ERR(main_str)) | |
278 | return PTR_ERR(main_str); | |
279 | ||
280 | err = parse_perf_probe_command(main_str, pev); | |
281 | if (err < 0) { | |
282 | pr_debug("bpf: '%s' is not a valid config string\n", | |
283 | config_str); | |
284 | /* parse failed, don't need clear pev. */ | |
285 | return -BPF_LOADER_ERRNO__CONFIG; | |
286 | } | |
287 | return 0; | |
288 | } | |
289 | ||
aa3abf30 WN |
290 | static int |
291 | config_bpf_program(struct bpf_program *prog) | |
292 | { | |
293 | struct perf_probe_event *pev = NULL; | |
294 | struct bpf_prog_priv *priv = NULL; | |
295 | const char *config_str; | |
296 | int err; | |
297 | ||
03e01f56 WN |
298 | /* Initialize per-program probing setting */ |
299 | probe_conf.no_inlines = false; | |
300 | probe_conf.force_add = false; | |
301 | ||
aa3abf30 | 302 | config_str = bpf_program__title(prog, false); |
6371ca3b | 303 | if (IS_ERR(config_str)) { |
aa3abf30 | 304 | pr_debug("bpf: unable to get title for program\n"); |
6371ca3b | 305 | return PTR_ERR(config_str); |
aa3abf30 WN |
306 | } |
307 | ||
308 | priv = calloc(sizeof(*priv), 1); | |
309 | if (!priv) { | |
310 | pr_debug("bpf: failed to alloc priv\n"); | |
311 | return -ENOMEM; | |
312 | } | |
313 | pev = &priv->pev; | |
314 | ||
315 | pr_debug("bpf: config program '%s'\n", config_str); | |
0bb93490 | 316 | err = parse_prog_config(config_str, pev); |
361f2b1d | 317 | if (err) |
aa3abf30 | 318 | goto errout; |
aa3abf30 WN |
319 | |
320 | if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) { | |
321 | pr_debug("bpf: '%s': group for event is set and not '%s'.\n", | |
322 | config_str, PERF_BPF_PROBE_GROUP); | |
d3e0ce39 | 323 | err = -BPF_LOADER_ERRNO__GROUP; |
aa3abf30 WN |
324 | goto errout; |
325 | } else if (!pev->group) | |
326 | pev->group = strdup(PERF_BPF_PROBE_GROUP); | |
327 | ||
328 | if (!pev->group) { | |
329 | pr_debug("bpf: strdup failed\n"); | |
330 | err = -ENOMEM; | |
331 | goto errout; | |
332 | } | |
333 | ||
334 | if (!pev->event) { | |
d3e0ce39 | 335 | pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n", |
aa3abf30 | 336 | config_str); |
d3e0ce39 | 337 | err = -BPF_LOADER_ERRNO__EVENTNAME; |
aa3abf30 WN |
338 | goto errout; |
339 | } | |
340 | pr_debug("bpf: config '%s' is ok\n", config_str); | |
341 | ||
edb13ed4 | 342 | err = bpf_program__set_priv(prog, priv, clear_prog_priv); |
aa3abf30 WN |
343 | if (err) { |
344 | pr_debug("Failed to set priv for program '%s'\n", config_str); | |
345 | goto errout; | |
346 | } | |
347 | ||
348 | return 0; | |
349 | ||
350 | errout: | |
351 | if (pev) | |
352 | clear_perf_probe_event(pev); | |
353 | free(priv); | |
354 | return err; | |
355 | } | |
356 | ||
357 | static int bpf__prepare_probe(void) | |
358 | { | |
359 | static int err = 0; | |
360 | static bool initialized = false; | |
361 | ||
362 | /* | |
363 | * Make err static, so if init failed the first, bpf__prepare_probe() | |
364 | * fails each time without calling init_probe_symbol_maps multiple | |
365 | * times. | |
366 | */ | |
367 | if (initialized) | |
368 | return err; | |
369 | ||
370 | initialized = true; | |
371 | err = init_probe_symbol_maps(false); | |
372 | if (err < 0) | |
373 | pr_debug("Failed to init_probe_symbol_maps\n"); | |
374 | probe_conf.max_probes = MAX_PROBES; | |
375 | return err; | |
376 | } | |
377 | ||
a08357d8 WN |
378 | static int |
379 | preproc_gen_prologue(struct bpf_program *prog, int n, | |
380 | struct bpf_insn *orig_insns, int orig_insns_cnt, | |
381 | struct bpf_prog_prep_result *res) | |
382 | { | |
be834ffb | 383 | struct bpf_prog_priv *priv = bpf_program__priv(prog); |
a08357d8 WN |
384 | struct probe_trace_event *tev; |
385 | struct perf_probe_event *pev; | |
a08357d8 WN |
386 | struct bpf_insn *buf; |
387 | size_t prologue_cnt = 0; | |
d35b3289 | 388 | int i, err; |
a08357d8 | 389 | |
be834ffb | 390 | if (IS_ERR(priv) || !priv) |
a08357d8 WN |
391 | goto errout; |
392 | ||
393 | pev = &priv->pev; | |
394 | ||
d35b3289 | 395 | if (n < 0 || n >= priv->nr_types) |
a08357d8 WN |
396 | goto errout; |
397 | ||
d35b3289 WN |
398 | /* Find a tev belongs to that type */ |
399 | for (i = 0; i < pev->ntevs; i++) { | |
400 | if (priv->type_mapping[i] == n) | |
401 | break; | |
402 | } | |
403 | ||
404 | if (i >= pev->ntevs) { | |
405 | pr_debug("Internal error: prologue type %d not found\n", n); | |
406 | return -BPF_LOADER_ERRNO__PROLOGUE; | |
407 | } | |
408 | ||
409 | tev = &pev->tevs[i]; | |
a08357d8 WN |
410 | |
411 | buf = priv->insns_buf; | |
412 | err = bpf__gen_prologue(tev->args, tev->nargs, | |
413 | buf, &prologue_cnt, | |
414 | BPF_MAXINSNS - orig_insns_cnt); | |
415 | if (err) { | |
416 | const char *title; | |
417 | ||
418 | title = bpf_program__title(prog, false); | |
419 | if (!title) | |
420 | title = "[unknown]"; | |
421 | ||
422 | pr_debug("Failed to generate prologue for program %s\n", | |
423 | title); | |
424 | return err; | |
425 | } | |
426 | ||
427 | memcpy(&buf[prologue_cnt], orig_insns, | |
428 | sizeof(struct bpf_insn) * orig_insns_cnt); | |
429 | ||
430 | res->new_insn_ptr = buf; | |
431 | res->new_insn_cnt = prologue_cnt + orig_insns_cnt; | |
432 | res->pfd = NULL; | |
433 | return 0; | |
434 | ||
435 | errout: | |
436 | pr_debug("Internal error in preproc_gen_prologue\n"); | |
437 | return -BPF_LOADER_ERRNO__PROLOGUE; | |
438 | } | |
439 | ||
d35b3289 WN |
440 | /* |
441 | * compare_tev_args is reflexive, transitive and antisymmetric. | |
442 | * I can proof it but this margin is too narrow to contain. | |
443 | */ | |
444 | static int compare_tev_args(const void *ptev1, const void *ptev2) | |
445 | { | |
446 | int i, ret; | |
447 | const struct probe_trace_event *tev1 = | |
448 | *(const struct probe_trace_event **)ptev1; | |
449 | const struct probe_trace_event *tev2 = | |
450 | *(const struct probe_trace_event **)ptev2; | |
451 | ||
452 | ret = tev2->nargs - tev1->nargs; | |
453 | if (ret) | |
454 | return ret; | |
455 | ||
456 | for (i = 0; i < tev1->nargs; i++) { | |
457 | struct probe_trace_arg *arg1, *arg2; | |
458 | struct probe_trace_arg_ref *ref1, *ref2; | |
459 | ||
460 | arg1 = &tev1->args[i]; | |
461 | arg2 = &tev2->args[i]; | |
462 | ||
463 | ret = strcmp(arg1->value, arg2->value); | |
464 | if (ret) | |
465 | return ret; | |
466 | ||
467 | ref1 = arg1->ref; | |
468 | ref2 = arg2->ref; | |
469 | ||
470 | while (ref1 && ref2) { | |
471 | ret = ref2->offset - ref1->offset; | |
472 | if (ret) | |
473 | return ret; | |
474 | ||
475 | ref1 = ref1->next; | |
476 | ref2 = ref2->next; | |
477 | } | |
478 | ||
479 | if (ref1 || ref2) | |
480 | return ref2 ? 1 : -1; | |
481 | } | |
482 | ||
483 | return 0; | |
484 | } | |
485 | ||
486 | /* | |
487 | * Assign a type number to each tevs in a pev. | |
488 | * mapping is an array with same slots as tevs in that pev. | |
489 | * nr_types will be set to number of types. | |
490 | */ | |
491 | static int map_prologue(struct perf_probe_event *pev, int *mapping, | |
492 | int *nr_types) | |
493 | { | |
494 | int i, type = 0; | |
495 | struct probe_trace_event **ptevs; | |
496 | ||
497 | size_t array_sz = sizeof(*ptevs) * pev->ntevs; | |
498 | ||
499 | ptevs = malloc(array_sz); | |
500 | if (!ptevs) { | |
501 | pr_debug("No ehough memory: alloc ptevs failed\n"); | |
502 | return -ENOMEM; | |
503 | } | |
504 | ||
505 | pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs); | |
506 | for (i = 0; i < pev->ntevs; i++) | |
507 | ptevs[i] = &pev->tevs[i]; | |
508 | ||
509 | qsort(ptevs, pev->ntevs, sizeof(*ptevs), | |
510 | compare_tev_args); | |
511 | ||
512 | for (i = 0; i < pev->ntevs; i++) { | |
513 | int n; | |
514 | ||
515 | n = ptevs[i] - pev->tevs; | |
516 | if (i == 0) { | |
517 | mapping[n] = type; | |
518 | pr_debug("mapping[%d]=%d\n", n, type); | |
519 | continue; | |
520 | } | |
521 | ||
522 | if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0) | |
523 | mapping[n] = type; | |
524 | else | |
525 | mapping[n] = ++type; | |
526 | ||
527 | pr_debug("mapping[%d]=%d\n", n, mapping[n]); | |
528 | } | |
529 | free(ptevs); | |
530 | *nr_types = type + 1; | |
531 | ||
532 | return 0; | |
533 | } | |
534 | ||
a08357d8 WN |
535 | static int hook_load_preprocessor(struct bpf_program *prog) |
536 | { | |
be834ffb | 537 | struct bpf_prog_priv *priv = bpf_program__priv(prog); |
a08357d8 | 538 | struct perf_probe_event *pev; |
a08357d8 WN |
539 | bool need_prologue = false; |
540 | int err, i; | |
541 | ||
be834ffb | 542 | if (IS_ERR(priv) || !priv) { |
a08357d8 WN |
543 | pr_debug("Internal error when hook preprocessor\n"); |
544 | return -BPF_LOADER_ERRNO__INTERNAL; | |
545 | } | |
546 | ||
547 | pev = &priv->pev; | |
548 | for (i = 0; i < pev->ntevs; i++) { | |
549 | struct probe_trace_event *tev = &pev->tevs[i]; | |
550 | ||
551 | if (tev->nargs > 0) { | |
552 | need_prologue = true; | |
553 | break; | |
554 | } | |
555 | } | |
556 | ||
557 | /* | |
558 | * Since all tevs don't have argument, we don't need generate | |
559 | * prologue. | |
560 | */ | |
561 | if (!need_prologue) { | |
562 | priv->need_prologue = false; | |
563 | return 0; | |
564 | } | |
565 | ||
566 | priv->need_prologue = true; | |
567 | priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS); | |
568 | if (!priv->insns_buf) { | |
569 | pr_debug("No enough memory: alloc insns_buf failed\n"); | |
570 | return -ENOMEM; | |
571 | } | |
572 | ||
d35b3289 WN |
573 | priv->type_mapping = malloc(sizeof(int) * pev->ntevs); |
574 | if (!priv->type_mapping) { | |
575 | pr_debug("No enough memory: alloc type_mapping failed\n"); | |
576 | return -ENOMEM; | |
577 | } | |
578 | memset(priv->type_mapping, -1, | |
579 | sizeof(int) * pev->ntevs); | |
580 | ||
581 | err = map_prologue(pev, priv->type_mapping, &priv->nr_types); | |
582 | if (err) | |
583 | return err; | |
584 | ||
585 | err = bpf_program__set_prep(prog, priv->nr_types, | |
a08357d8 WN |
586 | preproc_gen_prologue); |
587 | return err; | |
588 | } | |
589 | ||
aa3abf30 WN |
590 | int bpf__probe(struct bpf_object *obj) |
591 | { | |
592 | int err = 0; | |
593 | struct bpf_program *prog; | |
594 | struct bpf_prog_priv *priv; | |
595 | struct perf_probe_event *pev; | |
596 | ||
597 | err = bpf__prepare_probe(); | |
598 | if (err) { | |
599 | pr_debug("bpf__prepare_probe failed\n"); | |
600 | return err; | |
601 | } | |
602 | ||
603 | bpf_object__for_each_program(prog, obj) { | |
604 | err = config_bpf_program(prog); | |
605 | if (err) | |
606 | goto out; | |
607 | ||
be834ffb ACM |
608 | priv = bpf_program__priv(prog); |
609 | if (IS_ERR(priv) || !priv) { | |
610 | err = PTR_ERR(priv); | |
aa3abf30 | 611 | goto out; |
be834ffb | 612 | } |
aa3abf30 WN |
613 | pev = &priv->pev; |
614 | ||
615 | err = convert_perf_probe_events(pev, 1); | |
616 | if (err < 0) { | |
617 | pr_debug("bpf_probe: failed to convert perf probe events"); | |
618 | goto out; | |
619 | } | |
620 | ||
621 | err = apply_perf_probe_events(pev, 1); | |
622 | if (err < 0) { | |
623 | pr_debug("bpf_probe: failed to apply perf probe events"); | |
624 | goto out; | |
625 | } | |
a08357d8 WN |
626 | |
627 | /* | |
628 | * After probing, let's consider prologue, which | |
629 | * adds program fetcher to BPF programs. | |
630 | * | |
631 | * hook_load_preprocessorr() hooks pre-processor | |
632 | * to bpf_program, let it generate prologue | |
633 | * dynamically during loading. | |
634 | */ | |
635 | err = hook_load_preprocessor(prog); | |
636 | if (err) | |
637 | goto out; | |
aa3abf30 WN |
638 | } |
639 | out: | |
640 | return err < 0 ? err : 0; | |
641 | } | |
642 | ||
643 | #define EVENTS_WRITE_BUFSIZE 4096 | |
644 | int bpf__unprobe(struct bpf_object *obj) | |
645 | { | |
646 | int err, ret = 0; | |
647 | struct bpf_program *prog; | |
aa3abf30 WN |
648 | |
649 | bpf_object__for_each_program(prog, obj) { | |
be834ffb | 650 | struct bpf_prog_priv *priv = bpf_program__priv(prog); |
aa3abf30 WN |
651 | int i; |
652 | ||
be834ffb | 653 | if (IS_ERR(priv) || !priv) |
aa3abf30 WN |
654 | continue; |
655 | ||
656 | for (i = 0; i < priv->pev.ntevs; i++) { | |
657 | struct probe_trace_event *tev = &priv->pev.tevs[i]; | |
658 | char name_buf[EVENTS_WRITE_BUFSIZE]; | |
659 | struct strfilter *delfilter; | |
660 | ||
661 | snprintf(name_buf, EVENTS_WRITE_BUFSIZE, | |
662 | "%s:%s", tev->group, tev->event); | |
663 | name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0'; | |
664 | ||
665 | delfilter = strfilter__new(name_buf, NULL); | |
666 | if (!delfilter) { | |
667 | pr_debug("Failed to create filter for unprobing\n"); | |
668 | ret = -ENOMEM; | |
669 | continue; | |
670 | } | |
671 | ||
672 | err = del_perf_probe_events(delfilter); | |
673 | strfilter__delete(delfilter); | |
674 | if (err) { | |
675 | pr_debug("Failed to delete %s\n", name_buf); | |
676 | ret = err; | |
677 | continue; | |
678 | } | |
679 | } | |
680 | } | |
681 | return ret; | |
682 | } | |
683 | ||
1e5e3ee8 WN |
684 | int bpf__load(struct bpf_object *obj) |
685 | { | |
686 | int err; | |
687 | ||
688 | err = bpf_object__load(obj); | |
689 | if (err) { | |
690 | pr_debug("bpf: load objects failed\n"); | |
691 | return err; | |
692 | } | |
693 | return 0; | |
694 | } | |
695 | ||
4edf30e3 WN |
696 | int bpf__foreach_tev(struct bpf_object *obj, |
697 | bpf_prog_iter_callback_t func, | |
698 | void *arg) | |
699 | { | |
700 | struct bpf_program *prog; | |
701 | int err; | |
702 | ||
703 | bpf_object__for_each_program(prog, obj) { | |
be834ffb | 704 | struct bpf_prog_priv *priv = bpf_program__priv(prog); |
4edf30e3 WN |
705 | struct probe_trace_event *tev; |
706 | struct perf_probe_event *pev; | |
4edf30e3 WN |
707 | int i, fd; |
708 | ||
be834ffb | 709 | if (IS_ERR(priv) || !priv) { |
4edf30e3 | 710 | pr_debug("bpf: failed to get private field\n"); |
d3e0ce39 | 711 | return -BPF_LOADER_ERRNO__INTERNAL; |
4edf30e3 WN |
712 | } |
713 | ||
714 | pev = &priv->pev; | |
715 | for (i = 0; i < pev->ntevs; i++) { | |
716 | tev = &pev->tevs[i]; | |
717 | ||
d35b3289 WN |
718 | if (priv->need_prologue) { |
719 | int type = priv->type_mapping[i]; | |
720 | ||
721 | fd = bpf_program__nth_fd(prog, type); | |
722 | } else { | |
a08357d8 | 723 | fd = bpf_program__fd(prog); |
d35b3289 | 724 | } |
a08357d8 | 725 | |
4edf30e3 WN |
726 | if (fd < 0) { |
727 | pr_debug("bpf: failed to get file descriptor\n"); | |
728 | return fd; | |
729 | } | |
730 | ||
731 | err = (*func)(tev, fd, arg); | |
732 | if (err) { | |
733 | pr_debug("bpf: call back failed, stop iterate\n"); | |
734 | return err; | |
735 | } | |
736 | } | |
737 | } | |
738 | return 0; | |
739 | } | |
740 | ||
066dacbf WN |
741 | enum bpf_map_op_type { |
742 | BPF_MAP_OP_SET_VALUE, | |
7630b3e2 | 743 | BPF_MAP_OP_SET_EVSEL, |
066dacbf WN |
744 | }; |
745 | ||
746 | enum bpf_map_key_type { | |
747 | BPF_MAP_KEY_ALL, | |
2d055bf2 | 748 | BPF_MAP_KEY_RANGES, |
066dacbf WN |
749 | }; |
750 | ||
751 | struct bpf_map_op { | |
752 | struct list_head list; | |
753 | enum bpf_map_op_type op_type; | |
754 | enum bpf_map_key_type key_type; | |
2d055bf2 WN |
755 | union { |
756 | struct parse_events_array array; | |
757 | } k; | |
066dacbf WN |
758 | union { |
759 | u64 value; | |
7630b3e2 | 760 | struct perf_evsel *evsel; |
066dacbf WN |
761 | } v; |
762 | }; | |
763 | ||
764 | struct bpf_map_priv { | |
765 | struct list_head ops_list; | |
766 | }; | |
767 | ||
768 | static void | |
769 | bpf_map_op__delete(struct bpf_map_op *op) | |
770 | { | |
771 | if (!list_empty(&op->list)) | |
772 | list_del(&op->list); | |
2d055bf2 WN |
773 | if (op->key_type == BPF_MAP_KEY_RANGES) |
774 | parse_events__clear_array(&op->k.array); | |
066dacbf WN |
775 | free(op); |
776 | } | |
777 | ||
778 | static void | |
779 | bpf_map_priv__purge(struct bpf_map_priv *priv) | |
780 | { | |
781 | struct bpf_map_op *pos, *n; | |
782 | ||
783 | list_for_each_entry_safe(pos, n, &priv->ops_list, list) { | |
784 | list_del_init(&pos->list); | |
785 | bpf_map_op__delete(pos); | |
786 | } | |
787 | } | |
788 | ||
789 | static void | |
790 | bpf_map_priv__clear(struct bpf_map *map __maybe_unused, | |
791 | void *_priv) | |
792 | { | |
793 | struct bpf_map_priv *priv = _priv; | |
794 | ||
795 | bpf_map_priv__purge(priv); | |
796 | free(priv); | |
797 | } | |
798 | ||
2d055bf2 WN |
799 | static int |
800 | bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term) | |
801 | { | |
802 | op->key_type = BPF_MAP_KEY_ALL; | |
803 | if (!term) | |
804 | return 0; | |
805 | ||
806 | if (term->array.nr_ranges) { | |
807 | size_t memsz = term->array.nr_ranges * | |
808 | sizeof(op->k.array.ranges[0]); | |
809 | ||
810 | op->k.array.ranges = memdup(term->array.ranges, memsz); | |
811 | if (!op->k.array.ranges) { | |
812 | pr_debug("No enough memory to alloc indices for map\n"); | |
813 | return -ENOMEM; | |
814 | } | |
815 | op->key_type = BPF_MAP_KEY_RANGES; | |
816 | op->k.array.nr_ranges = term->array.nr_ranges; | |
817 | } | |
818 | return 0; | |
819 | } | |
820 | ||
066dacbf | 821 | static struct bpf_map_op * |
2d055bf2 | 822 | bpf_map_op__new(struct parse_events_term *term) |
066dacbf WN |
823 | { |
824 | struct bpf_map_op *op; | |
2d055bf2 | 825 | int err; |
066dacbf WN |
826 | |
827 | op = zalloc(sizeof(*op)); | |
828 | if (!op) { | |
829 | pr_debug("Failed to alloc bpf_map_op\n"); | |
830 | return ERR_PTR(-ENOMEM); | |
831 | } | |
832 | INIT_LIST_HEAD(&op->list); | |
833 | ||
2d055bf2 WN |
834 | err = bpf_map_op_setkey(op, term); |
835 | if (err) { | |
836 | free(op); | |
837 | return ERR_PTR(err); | |
838 | } | |
066dacbf WN |
839 | return op; |
840 | } | |
841 | ||
d7888573 WN |
842 | static struct bpf_map_op * |
843 | bpf_map_op__clone(struct bpf_map_op *op) | |
844 | { | |
845 | struct bpf_map_op *newop; | |
846 | ||
847 | newop = memdup(op, sizeof(*op)); | |
848 | if (!newop) { | |
849 | pr_debug("Failed to alloc bpf_map_op\n"); | |
850 | return NULL; | |
851 | } | |
852 | ||
853 | INIT_LIST_HEAD(&newop->list); | |
854 | if (op->key_type == BPF_MAP_KEY_RANGES) { | |
855 | size_t memsz = op->k.array.nr_ranges * | |
856 | sizeof(op->k.array.ranges[0]); | |
857 | ||
858 | newop->k.array.ranges = memdup(op->k.array.ranges, memsz); | |
859 | if (!newop->k.array.ranges) { | |
860 | pr_debug("Failed to alloc indices for map\n"); | |
861 | free(newop); | |
862 | return NULL; | |
863 | } | |
864 | } | |
865 | ||
866 | return newop; | |
867 | } | |
868 | ||
869 | static struct bpf_map_priv * | |
870 | bpf_map_priv__clone(struct bpf_map_priv *priv) | |
871 | { | |
872 | struct bpf_map_priv *newpriv; | |
873 | struct bpf_map_op *pos, *newop; | |
874 | ||
875 | newpriv = zalloc(sizeof(*newpriv)); | |
876 | if (!newpriv) { | |
877 | pr_debug("No enough memory to alloc map private\n"); | |
878 | return NULL; | |
879 | } | |
880 | INIT_LIST_HEAD(&newpriv->ops_list); | |
881 | ||
882 | list_for_each_entry(pos, &priv->ops_list, list) { | |
883 | newop = bpf_map_op__clone(pos); | |
884 | if (!newop) { | |
885 | bpf_map_priv__purge(newpriv); | |
886 | return NULL; | |
887 | } | |
888 | list_add_tail(&newop->list, &newpriv->ops_list); | |
889 | } | |
890 | ||
891 | return newpriv; | |
892 | } | |
893 | ||
066dacbf WN |
894 | static int |
895 | bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op) | |
896 | { | |
009ad5d5 | 897 | const char *map_name = bpf_map__name(map); |
b4cbfa56 | 898 | struct bpf_map_priv *priv = bpf_map__priv(map); |
066dacbf | 899 | |
b4cbfa56 | 900 | if (IS_ERR(priv)) { |
066dacbf | 901 | pr_debug("Failed to get private from map %s\n", map_name); |
b4cbfa56 | 902 | return PTR_ERR(priv); |
066dacbf WN |
903 | } |
904 | ||
905 | if (!priv) { | |
906 | priv = zalloc(sizeof(*priv)); | |
907 | if (!priv) { | |
908 | pr_debug("No enough memory to alloc map private\n"); | |
909 | return -ENOMEM; | |
910 | } | |
911 | INIT_LIST_HEAD(&priv->ops_list); | |
912 | ||
edb13ed4 | 913 | if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) { |
066dacbf WN |
914 | free(priv); |
915 | return -BPF_LOADER_ERRNO__INTERNAL; | |
916 | } | |
917 | } | |
918 | ||
919 | list_add_tail(&op->list, &priv->ops_list); | |
920 | return 0; | |
921 | } | |
922 | ||
7630b3e2 | 923 | static struct bpf_map_op * |
2d055bf2 | 924 | bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term) |
7630b3e2 WN |
925 | { |
926 | struct bpf_map_op *op; | |
927 | int err; | |
928 | ||
2d055bf2 | 929 | op = bpf_map_op__new(term); |
7630b3e2 WN |
930 | if (IS_ERR(op)) |
931 | return op; | |
932 | ||
933 | err = bpf_map__add_op(map, op); | |
934 | if (err) { | |
935 | bpf_map_op__delete(op); | |
936 | return ERR_PTR(err); | |
937 | } | |
938 | return op; | |
939 | } | |
940 | ||
066dacbf WN |
941 | static int |
942 | __bpf_map__config_value(struct bpf_map *map, | |
943 | struct parse_events_term *term) | |
944 | { | |
066dacbf | 945 | struct bpf_map_op *op; |
009ad5d5 | 946 | const char *map_name = bpf_map__name(map); |
53897a78 | 947 | const struct bpf_map_def *def = bpf_map__def(map); |
066dacbf | 948 | |
53897a78 | 949 | if (IS_ERR(def)) { |
066dacbf WN |
950 | pr_debug("Unable to get map definition from '%s'\n", |
951 | map_name); | |
952 | return -BPF_LOADER_ERRNO__INTERNAL; | |
953 | } | |
954 | ||
53897a78 | 955 | if (def->type != BPF_MAP_TYPE_ARRAY) { |
066dacbf WN |
956 | pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n", |
957 | map_name); | |
958 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; | |
959 | } | |
53897a78 | 960 | if (def->key_size < sizeof(unsigned int)) { |
066dacbf WN |
961 | pr_debug("Map %s has incorrect key size\n", map_name); |
962 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE; | |
963 | } | |
53897a78 | 964 | switch (def->value_size) { |
066dacbf WN |
965 | case 1: |
966 | case 2: | |
967 | case 4: | |
968 | case 8: | |
969 | break; | |
970 | default: | |
971 | pr_debug("Map %s has incorrect value size\n", map_name); | |
972 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE; | |
973 | } | |
974 | ||
2d055bf2 | 975 | op = bpf_map__add_newop(map, term); |
066dacbf WN |
976 | if (IS_ERR(op)) |
977 | return PTR_ERR(op); | |
978 | op->op_type = BPF_MAP_OP_SET_VALUE; | |
979 | op->v.value = term->val.num; | |
7630b3e2 | 980 | return 0; |
066dacbf WN |
981 | } |
982 | ||
983 | static int | |
984 | bpf_map__config_value(struct bpf_map *map, | |
985 | struct parse_events_term *term, | |
986 | struct perf_evlist *evlist __maybe_unused) | |
987 | { | |
988 | if (!term->err_val) { | |
989 | pr_debug("Config value not set\n"); | |
990 | return -BPF_LOADER_ERRNO__OBJCONF_CONF; | |
991 | } | |
992 | ||
993 | if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) { | |
7630b3e2 | 994 | pr_debug("ERROR: wrong value type for 'value'\n"); |
066dacbf WN |
995 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE; |
996 | } | |
997 | ||
998 | return __bpf_map__config_value(map, term); | |
999 | } | |
1000 | ||
7630b3e2 WN |
1001 | static int |
1002 | __bpf_map__config_event(struct bpf_map *map, | |
1003 | struct parse_events_term *term, | |
1004 | struct perf_evlist *evlist) | |
1005 | { | |
1006 | struct perf_evsel *evsel; | |
53897a78 | 1007 | const struct bpf_map_def *def; |
7630b3e2 | 1008 | struct bpf_map_op *op; |
009ad5d5 | 1009 | const char *map_name = bpf_map__name(map); |
7630b3e2 | 1010 | |
7630b3e2 WN |
1011 | evsel = perf_evlist__find_evsel_by_str(evlist, term->val.str); |
1012 | if (!evsel) { | |
1013 | pr_debug("Event (for '%s') '%s' doesn't exist\n", | |
1014 | map_name, term->val.str); | |
1015 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT; | |
1016 | } | |
1017 | ||
53897a78 ACM |
1018 | def = bpf_map__def(map); |
1019 | if (IS_ERR(def)) { | |
7630b3e2 WN |
1020 | pr_debug("Unable to get map definition from '%s'\n", |
1021 | map_name); | |
53897a78 | 1022 | return PTR_ERR(def); |
7630b3e2 WN |
1023 | } |
1024 | ||
1025 | /* | |
1026 | * No need to check key_size and value_size: | |
1027 | * kernel has already checked them. | |
1028 | */ | |
53897a78 | 1029 | if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { |
7630b3e2 WN |
1030 | pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", |
1031 | map_name); | |
1032 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; | |
1033 | } | |
1034 | ||
2d055bf2 | 1035 | op = bpf_map__add_newop(map, term); |
7630b3e2 WN |
1036 | if (IS_ERR(op)) |
1037 | return PTR_ERR(op); | |
1038 | op->op_type = BPF_MAP_OP_SET_EVSEL; | |
1039 | op->v.evsel = evsel; | |
1040 | return 0; | |
1041 | } | |
1042 | ||
1043 | static int | |
1044 | bpf_map__config_event(struct bpf_map *map, | |
1045 | struct parse_events_term *term, | |
1046 | struct perf_evlist *evlist) | |
1047 | { | |
1048 | if (!term->err_val) { | |
1049 | pr_debug("Config value not set\n"); | |
1050 | return -BPF_LOADER_ERRNO__OBJCONF_CONF; | |
1051 | } | |
1052 | ||
1053 | if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) { | |
1054 | pr_debug("ERROR: wrong value type for 'event'\n"); | |
1055 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE; | |
1056 | } | |
1057 | ||
1058 | return __bpf_map__config_event(map, term, evlist); | |
1059 | } | |
1060 | ||
066dacbf WN |
1061 | struct bpf_obj_config__map_func { |
1062 | const char *config_opt; | |
1063 | int (*config_func)(struct bpf_map *, struct parse_events_term *, | |
1064 | struct perf_evlist *); | |
1065 | }; | |
1066 | ||
1067 | struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = { | |
1068 | {"value", bpf_map__config_value}, | |
7630b3e2 | 1069 | {"event", bpf_map__config_event}, |
066dacbf WN |
1070 | }; |
1071 | ||
2d055bf2 WN |
1072 | static int |
1073 | config_map_indices_range_check(struct parse_events_term *term, | |
1074 | struct bpf_map *map, | |
1075 | const char *map_name) | |
1076 | { | |
1077 | struct parse_events_array *array = &term->array; | |
53897a78 | 1078 | const struct bpf_map_def *def; |
2d055bf2 | 1079 | unsigned int i; |
2d055bf2 WN |
1080 | |
1081 | if (!array->nr_ranges) | |
1082 | return 0; | |
1083 | if (!array->ranges) { | |
1084 | pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n", | |
1085 | map_name, (int)array->nr_ranges); | |
1086 | return -BPF_LOADER_ERRNO__INTERNAL; | |
1087 | } | |
1088 | ||
53897a78 ACM |
1089 | def = bpf_map__def(map); |
1090 | if (IS_ERR(def)) { | |
2d055bf2 WN |
1091 | pr_debug("ERROR: Unable to get map definition from '%s'\n", |
1092 | map_name); | |
1093 | return -BPF_LOADER_ERRNO__INTERNAL; | |
1094 | } | |
1095 | ||
1096 | for (i = 0; i < array->nr_ranges; i++) { | |
1097 | unsigned int start = array->ranges[i].start; | |
1098 | size_t length = array->ranges[i].length; | |
1099 | unsigned int idx = start + length - 1; | |
1100 | ||
53897a78 | 1101 | if (idx >= def->max_entries) { |
2d055bf2 WN |
1102 | pr_debug("ERROR: index %d too large\n", idx); |
1103 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG; | |
1104 | } | |
1105 | } | |
1106 | return 0; | |
1107 | } | |
1108 | ||
066dacbf WN |
1109 | static int |
1110 | bpf__obj_config_map(struct bpf_object *obj, | |
1111 | struct parse_events_term *term, | |
1112 | struct perf_evlist *evlist, | |
1113 | int *key_scan_pos) | |
1114 | { | |
1115 | /* key is "map:<mapname>.<config opt>" */ | |
1116 | char *map_name = strdup(term->config + sizeof("map:") - 1); | |
1117 | struct bpf_map *map; | |
1118 | int err = -BPF_LOADER_ERRNO__OBJCONF_OPT; | |
1119 | char *map_opt; | |
1120 | size_t i; | |
1121 | ||
1122 | if (!map_name) | |
1123 | return -ENOMEM; | |
1124 | ||
1125 | map_opt = strchr(map_name, '.'); | |
1126 | if (!map_opt) { | |
1127 | pr_debug("ERROR: Invalid map config: %s\n", map_name); | |
1128 | goto out; | |
1129 | } | |
1130 | ||
1131 | *map_opt++ = '\0'; | |
1132 | if (*map_opt == '\0') { | |
1133 | pr_debug("ERROR: Invalid map option: %s\n", term->config); | |
1134 | goto out; | |
1135 | } | |
1136 | ||
a7fe0450 | 1137 | map = bpf_object__find_map_by_name(obj, map_name); |
066dacbf WN |
1138 | if (!map) { |
1139 | pr_debug("ERROR: Map %s doesn't exist\n", map_name); | |
1140 | err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST; | |
1141 | goto out; | |
1142 | } | |
1143 | ||
2d055bf2 WN |
1144 | *key_scan_pos += strlen(map_opt); |
1145 | err = config_map_indices_range_check(term, map, map_name); | |
1146 | if (err) | |
1147 | goto out; | |
1148 | *key_scan_pos -= strlen(map_opt); | |
1149 | ||
066dacbf WN |
1150 | for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) { |
1151 | struct bpf_obj_config__map_func *func = | |
1152 | &bpf_obj_config__map_funcs[i]; | |
1153 | ||
1154 | if (strcmp(map_opt, func->config_opt) == 0) { | |
1155 | err = func->config_func(map, term, evlist); | |
1156 | goto out; | |
1157 | } | |
1158 | } | |
1159 | ||
1160 | pr_debug("ERROR: Invalid map config option '%s'\n", map_opt); | |
1161 | err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT; | |
1162 | out: | |
1163 | free(map_name); | |
1164 | if (!err) | |
1165 | key_scan_pos += strlen(map_opt); | |
1166 | return err; | |
1167 | } | |
1168 | ||
1169 | int bpf__config_obj(struct bpf_object *obj, | |
1170 | struct parse_events_term *term, | |
1171 | struct perf_evlist *evlist, | |
1172 | int *error_pos) | |
1173 | { | |
1174 | int key_scan_pos = 0; | |
1175 | int err; | |
1176 | ||
1177 | if (!obj || !term || !term->config) | |
1178 | return -EINVAL; | |
1179 | ||
1180 | if (!prefixcmp(term->config, "map:")) { | |
1181 | key_scan_pos = sizeof("map:") - 1; | |
1182 | err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos); | |
1183 | goto out; | |
1184 | } | |
1185 | err = -BPF_LOADER_ERRNO__OBJCONF_OPT; | |
1186 | out: | |
1187 | if (error_pos) | |
1188 | *error_pos = key_scan_pos; | |
1189 | return err; | |
1190 | ||
1191 | } | |
1192 | ||
8690a2a7 | 1193 | typedef int (*map_config_func_t)(const char *name, int map_fd, |
53897a78 | 1194 | const struct bpf_map_def *pdef, |
8690a2a7 WN |
1195 | struct bpf_map_op *op, |
1196 | void *pkey, void *arg); | |
1197 | ||
1198 | static int | |
1199 | foreach_key_array_all(map_config_func_t func, | |
1200 | void *arg, const char *name, | |
53897a78 | 1201 | int map_fd, const struct bpf_map_def *pdef, |
8690a2a7 WN |
1202 | struct bpf_map_op *op) |
1203 | { | |
1204 | unsigned int i; | |
1205 | int err; | |
1206 | ||
1207 | for (i = 0; i < pdef->max_entries; i++) { | |
1208 | err = func(name, map_fd, pdef, op, &i, arg); | |
1209 | if (err) { | |
1210 | pr_debug("ERROR: failed to insert value to %s[%u]\n", | |
1211 | name, i); | |
1212 | return err; | |
1213 | } | |
1214 | } | |
1215 | return 0; | |
1216 | } | |
1217 | ||
2d055bf2 WN |
1218 | static int |
1219 | foreach_key_array_ranges(map_config_func_t func, void *arg, | |
1220 | const char *name, int map_fd, | |
53897a78 | 1221 | const struct bpf_map_def *pdef, |
2d055bf2 WN |
1222 | struct bpf_map_op *op) |
1223 | { | |
1224 | unsigned int i, j; | |
1225 | int err; | |
1226 | ||
1227 | for (i = 0; i < op->k.array.nr_ranges; i++) { | |
1228 | unsigned int start = op->k.array.ranges[i].start; | |
1229 | size_t length = op->k.array.ranges[i].length; | |
1230 | ||
1231 | for (j = 0; j < length; j++) { | |
1232 | unsigned int idx = start + j; | |
1233 | ||
1234 | err = func(name, map_fd, pdef, op, &idx, arg); | |
1235 | if (err) { | |
1236 | pr_debug("ERROR: failed to insert value to %s[%u]\n", | |
1237 | name, idx); | |
1238 | return err; | |
1239 | } | |
1240 | } | |
1241 | } | |
1242 | return 0; | |
1243 | } | |
1244 | ||
8690a2a7 WN |
1245 | static int |
1246 | bpf_map_config_foreach_key(struct bpf_map *map, | |
1247 | map_config_func_t func, | |
1248 | void *arg) | |
1249 | { | |
1250 | int err, map_fd; | |
8690a2a7 | 1251 | struct bpf_map_op *op; |
53897a78 | 1252 | const struct bpf_map_def *def; |
009ad5d5 | 1253 | const char *name = bpf_map__name(map); |
b4cbfa56 | 1254 | struct bpf_map_priv *priv = bpf_map__priv(map); |
8690a2a7 | 1255 | |
b4cbfa56 | 1256 | if (IS_ERR(priv)) { |
8690a2a7 WN |
1257 | pr_debug("ERROR: failed to get private from map %s\n", name); |
1258 | return -BPF_LOADER_ERRNO__INTERNAL; | |
1259 | } | |
1260 | if (!priv || list_empty(&priv->ops_list)) { | |
1261 | pr_debug("INFO: nothing to config for map %s\n", name); | |
1262 | return 0; | |
1263 | } | |
1264 | ||
53897a78 ACM |
1265 | def = bpf_map__def(map); |
1266 | if (IS_ERR(def)) { | |
8690a2a7 WN |
1267 | pr_debug("ERROR: failed to get definition from map %s\n", name); |
1268 | return -BPF_LOADER_ERRNO__INTERNAL; | |
1269 | } | |
6e009e65 | 1270 | map_fd = bpf_map__fd(map); |
8690a2a7 WN |
1271 | if (map_fd < 0) { |
1272 | pr_debug("ERROR: failed to get fd from map %s\n", name); | |
1273 | return map_fd; | |
1274 | } | |
1275 | ||
1276 | list_for_each_entry(op, &priv->ops_list, list) { | |
53897a78 | 1277 | switch (def->type) { |
8690a2a7 | 1278 | case BPF_MAP_TYPE_ARRAY: |
7630b3e2 | 1279 | case BPF_MAP_TYPE_PERF_EVENT_ARRAY: |
8690a2a7 WN |
1280 | switch (op->key_type) { |
1281 | case BPF_MAP_KEY_ALL: | |
1282 | err = foreach_key_array_all(func, arg, name, | |
53897a78 | 1283 | map_fd, def, op); |
2d055bf2 WN |
1284 | break; |
1285 | case BPF_MAP_KEY_RANGES: | |
1286 | err = foreach_key_array_ranges(func, arg, name, | |
53897a78 | 1287 | map_fd, def, |
2d055bf2 | 1288 | op); |
8690a2a7 WN |
1289 | break; |
1290 | default: | |
1291 | pr_debug("ERROR: keytype for map '%s' invalid\n", | |
1292 | name); | |
1293 | return -BPF_LOADER_ERRNO__INTERNAL; | |
1294 | } | |
2d055bf2 WN |
1295 | if (err) |
1296 | return err; | |
8690a2a7 WN |
1297 | break; |
1298 | default: | |
1299 | pr_debug("ERROR: type of '%s' incorrect\n", name); | |
1300 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; | |
1301 | } | |
1302 | } | |
1303 | ||
1304 | return 0; | |
1305 | } | |
1306 | ||
1307 | static int | |
1308 | apply_config_value_for_key(int map_fd, void *pkey, | |
1309 | size_t val_size, u64 val) | |
1310 | { | |
1311 | int err = 0; | |
1312 | ||
1313 | switch (val_size) { | |
1314 | case 1: { | |
1315 | u8 _val = (u8)(val); | |
1316 | err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY); | |
1317 | break; | |
1318 | } | |
1319 | case 2: { | |
1320 | u16 _val = (u16)(val); | |
1321 | err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY); | |
1322 | break; | |
1323 | } | |
1324 | case 4: { | |
1325 | u32 _val = (u32)(val); | |
1326 | err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY); | |
1327 | break; | |
1328 | } | |
1329 | case 8: { | |
1330 | err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY); | |
1331 | break; | |
1332 | } | |
1333 | default: | |
1334 | pr_debug("ERROR: invalid value size\n"); | |
1335 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE; | |
1336 | } | |
1337 | if (err && errno) | |
1338 | err = -errno; | |
1339 | return err; | |
1340 | } | |
1341 | ||
7630b3e2 WN |
1342 | static int |
1343 | apply_config_evsel_for_key(const char *name, int map_fd, void *pkey, | |
1344 | struct perf_evsel *evsel) | |
1345 | { | |
1346 | struct xyarray *xy = evsel->fd; | |
1347 | struct perf_event_attr *attr; | |
1348 | unsigned int key, events; | |
1349 | bool check_pass = false; | |
1350 | int *evt_fd; | |
1351 | int err; | |
1352 | ||
1353 | if (!xy) { | |
1354 | pr_debug("ERROR: evsel not ready for map %s\n", name); | |
1355 | return -BPF_LOADER_ERRNO__INTERNAL; | |
1356 | } | |
1357 | ||
1358 | if (xy->row_size / xy->entry_size != 1) { | |
1359 | pr_debug("ERROR: Dimension of target event is incorrect for map %s\n", | |
1360 | name); | |
1361 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM; | |
1362 | } | |
1363 | ||
1364 | attr = &evsel->attr; | |
1365 | if (attr->inherit) { | |
1366 | pr_debug("ERROR: Can't put inherit event into map %s\n", name); | |
1367 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH; | |
1368 | } | |
1369 | ||
03e0a7df WN |
1370 | if (perf_evsel__is_bpf_output(evsel)) |
1371 | check_pass = true; | |
7630b3e2 WN |
1372 | if (attr->type == PERF_TYPE_RAW) |
1373 | check_pass = true; | |
1374 | if (attr->type == PERF_TYPE_HARDWARE) | |
1375 | check_pass = true; | |
7630b3e2 WN |
1376 | if (!check_pass) { |
1377 | pr_debug("ERROR: Event type is wrong for map %s\n", name); | |
1378 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE; | |
1379 | } | |
1380 | ||
1381 | events = xy->entries / (xy->row_size / xy->entry_size); | |
1382 | key = *((unsigned int *)pkey); | |
1383 | if (key >= events) { | |
1384 | pr_debug("ERROR: there is no event %d for map %s\n", | |
1385 | key, name); | |
1386 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE; | |
1387 | } | |
1388 | evt_fd = xyarray__entry(xy, key, 0); | |
1389 | err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY); | |
1390 | if (err && errno) | |
1391 | err = -errno; | |
1392 | return err; | |
1393 | } | |
1394 | ||
8690a2a7 WN |
1395 | static int |
1396 | apply_obj_config_map_for_key(const char *name, int map_fd, | |
53897a78 | 1397 | const struct bpf_map_def *pdef, |
8690a2a7 WN |
1398 | struct bpf_map_op *op, |
1399 | void *pkey, void *arg __maybe_unused) | |
1400 | { | |
1401 | int err; | |
1402 | ||
1403 | switch (op->op_type) { | |
1404 | case BPF_MAP_OP_SET_VALUE: | |
1405 | err = apply_config_value_for_key(map_fd, pkey, | |
1406 | pdef->value_size, | |
1407 | op->v.value); | |
1408 | break; | |
7630b3e2 WN |
1409 | case BPF_MAP_OP_SET_EVSEL: |
1410 | err = apply_config_evsel_for_key(name, map_fd, pkey, | |
1411 | op->v.evsel); | |
1412 | break; | |
8690a2a7 WN |
1413 | default: |
1414 | pr_debug("ERROR: unknown value type for '%s'\n", name); | |
1415 | err = -BPF_LOADER_ERRNO__INTERNAL; | |
1416 | } | |
1417 | return err; | |
1418 | } | |
1419 | ||
1420 | static int | |
1421 | apply_obj_config_map(struct bpf_map *map) | |
1422 | { | |
1423 | return bpf_map_config_foreach_key(map, | |
1424 | apply_obj_config_map_for_key, | |
1425 | NULL); | |
1426 | } | |
1427 | ||
1428 | static int | |
1429 | apply_obj_config_object(struct bpf_object *obj) | |
1430 | { | |
1431 | struct bpf_map *map; | |
1432 | int err; | |
1433 | ||
1434 | bpf_map__for_each(map, obj) { | |
1435 | err = apply_obj_config_map(map); | |
1436 | if (err) | |
1437 | return err; | |
1438 | } | |
1439 | return 0; | |
1440 | } | |
1441 | ||
1442 | int bpf__apply_obj_config(void) | |
1443 | { | |
1444 | struct bpf_object *obj, *tmp; | |
1445 | int err; | |
1446 | ||
1447 | bpf_object__for_each_safe(obj, tmp) { | |
1448 | err = apply_obj_config_object(obj); | |
1449 | if (err) | |
1450 | return err; | |
1451 | } | |
1452 | ||
1453 | return 0; | |
1454 | } | |
1455 | ||
d7888573 WN |
1456 | #define bpf__for_each_map(pos, obj, objtmp) \ |
1457 | bpf_object__for_each_safe(obj, objtmp) \ | |
1458 | bpf_map__for_each(pos, obj) | |
1459 | ||
1460 | #define bpf__for_each_stdout_map(pos, obj, objtmp) \ | |
1461 | bpf__for_each_map(pos, obj, objtmp) \ | |
009ad5d5 | 1462 | if (bpf_map__name(pos) && \ |
d7888573 | 1463 | (strcmp("__bpf_stdout__", \ |
009ad5d5 | 1464 | bpf_map__name(pos)) == 0)) |
d7888573 WN |
1465 | |
1466 | int bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused) | |
1467 | { | |
1468 | struct bpf_map_priv *tmpl_priv = NULL; | |
1469 | struct bpf_object *obj, *tmp; | |
72c08098 | 1470 | struct perf_evsel *evsel = NULL; |
d7888573 WN |
1471 | struct bpf_map *map; |
1472 | int err; | |
1473 | bool need_init = false; | |
1474 | ||
1475 | bpf__for_each_stdout_map(map, obj, tmp) { | |
b4cbfa56 | 1476 | struct bpf_map_priv *priv = bpf_map__priv(map); |
d7888573 | 1477 | |
b4cbfa56 | 1478 | if (IS_ERR(priv)) |
d7888573 WN |
1479 | return -BPF_LOADER_ERRNO__INTERNAL; |
1480 | ||
1481 | /* | |
1482 | * No need to check map type: type should have been | |
1483 | * verified by kernel. | |
1484 | */ | |
1485 | if (!need_init && !priv) | |
1486 | need_init = !priv; | |
1487 | if (!tmpl_priv && priv) | |
1488 | tmpl_priv = priv; | |
1489 | } | |
1490 | ||
1491 | if (!need_init) | |
1492 | return 0; | |
1493 | ||
72c08098 WN |
1494 | if (!tmpl_priv) { |
1495 | err = parse_events(evlist, "bpf-output/no-inherit=1,name=__bpf_stdout__/", | |
1496 | NULL); | |
1497 | if (err) { | |
1498 | pr_debug("ERROR: failed to create bpf-output event\n"); | |
1499 | return -err; | |
1500 | } | |
1501 | ||
1502 | evsel = perf_evlist__last(evlist); | |
1503 | } | |
d7888573 WN |
1504 | |
1505 | bpf__for_each_stdout_map(map, obj, tmp) { | |
b4cbfa56 | 1506 | struct bpf_map_priv *priv = bpf_map__priv(map); |
d7888573 | 1507 | |
b4cbfa56 | 1508 | if (IS_ERR(priv)) |
d7888573 WN |
1509 | return -BPF_LOADER_ERRNO__INTERNAL; |
1510 | if (priv) | |
1511 | continue; | |
1512 | ||
72c08098 WN |
1513 | if (tmpl_priv) { |
1514 | priv = bpf_map_priv__clone(tmpl_priv); | |
1515 | if (!priv) | |
1516 | return -ENOMEM; | |
d7888573 | 1517 | |
edb13ed4 | 1518 | err = bpf_map__set_priv(map, priv, bpf_map_priv__clear); |
72c08098 WN |
1519 | if (err) { |
1520 | bpf_map_priv__clear(map, priv); | |
1521 | return err; | |
1522 | } | |
1523 | } else if (evsel) { | |
1524 | struct bpf_map_op *op; | |
1525 | ||
1526 | op = bpf_map__add_newop(map, NULL); | |
1527 | if (IS_ERR(op)) | |
1528 | return PTR_ERR(op); | |
1529 | op->op_type = BPF_MAP_OP_SET_EVSEL; | |
1530 | op->v.evsel = evsel; | |
d7888573 WN |
1531 | } |
1532 | } | |
1533 | ||
1534 | return 0; | |
1535 | } | |
1536 | ||
d3e0ce39 WN |
1537 | #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START) |
1538 | #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c) | |
1539 | #define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START) | |
1540 | ||
1541 | static const char *bpf_loader_strerror_table[NR_ERRNO] = { | |
1542 | [ERRCODE_OFFSET(CONFIG)] = "Invalid config string", | |
1543 | [ERRCODE_OFFSET(GROUP)] = "Invalid group name", | |
1544 | [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string", | |
1545 | [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error", | |
1546 | [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet", | |
0bb93490 | 1547 | [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string", |
bfc077b4 HK |
1548 | [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue", |
1549 | [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program", | |
1550 | [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue", | |
066dacbf WN |
1551 | [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option", |
1552 | [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')", | |
1553 | [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option", | |
1554 | [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist", | |
1555 | [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map", | |
1556 | [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type", | |
1557 | [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size", | |
1558 | [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size", | |
7630b3e2 WN |
1559 | [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting", |
1560 | [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting", | |
1561 | [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large", | |
1562 | [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event", | |
1563 | [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map", | |
2d055bf2 | 1564 | [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large", |
d3e0ce39 WN |
1565 | }; |
1566 | ||
6371ca3b WN |
1567 | static int |
1568 | bpf_loader_strerror(int err, char *buf, size_t size) | |
1569 | { | |
1570 | char sbuf[STRERR_BUFSIZE]; | |
1571 | const char *msg; | |
1572 | ||
1573 | if (!buf || !size) | |
1574 | return -1; | |
1575 | ||
1576 | err = err > 0 ? err : -err; | |
1577 | ||
1578 | if (err >= __LIBBPF_ERRNO__START) | |
1579 | return libbpf_strerror(err, buf, size); | |
1580 | ||
d3e0ce39 WN |
1581 | if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) { |
1582 | msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)]; | |
1583 | snprintf(buf, size, "%s", msg); | |
1584 | buf[size - 1] = '\0'; | |
1585 | return 0; | |
1586 | } | |
1587 | ||
1588 | if (err >= __BPF_LOADER_ERRNO__END) | |
1589 | snprintf(buf, size, "Unknown bpf loader error %d", err); | |
1590 | else | |
1591 | snprintf(buf, size, "%s", | |
1592 | strerror_r(err, sbuf, sizeof(sbuf))); | |
1593 | ||
6371ca3b | 1594 | buf[size - 1] = '\0'; |
d3e0ce39 | 1595 | return -1; |
6371ca3b WN |
1596 | } |
1597 | ||
aa3abf30 WN |
1598 | #define bpf__strerror_head(err, buf, size) \ |
1599 | char sbuf[STRERR_BUFSIZE], *emsg;\ | |
1600 | if (!size)\ | |
1601 | return 0;\ | |
1602 | if (err < 0)\ | |
1603 | err = -err;\ | |
6371ca3b WN |
1604 | bpf_loader_strerror(err, sbuf, sizeof(sbuf));\ |
1605 | emsg = sbuf;\ | |
aa3abf30 WN |
1606 | switch (err) {\ |
1607 | default:\ | |
1608 | scnprintf(buf, size, "%s", emsg);\ | |
1609 | break; | |
1610 | ||
1611 | #define bpf__strerror_entry(val, fmt...)\ | |
1612 | case val: {\ | |
1613 | scnprintf(buf, size, fmt);\ | |
1614 | break;\ | |
1615 | } | |
1616 | ||
1617 | #define bpf__strerror_end(buf, size)\ | |
1618 | }\ | |
1619 | buf[size - 1] = '\0'; | |
1620 | ||
d3e0ce39 WN |
1621 | int bpf__strerror_prepare_load(const char *filename, bool source, |
1622 | int err, char *buf, size_t size) | |
1623 | { | |
1624 | size_t n; | |
1625 | int ret; | |
1626 | ||
1627 | n = snprintf(buf, size, "Failed to load %s%s: ", | |
1628 | filename, source ? " from source" : ""); | |
1629 | if (n >= size) { | |
1630 | buf[size - 1] = '\0'; | |
1631 | return 0; | |
1632 | } | |
1633 | buf += n; | |
1634 | size -= n; | |
1635 | ||
1636 | ret = bpf_loader_strerror(err, buf, size); | |
1637 | buf[size - 1] = '\0'; | |
1638 | return ret; | |
1639 | } | |
1640 | ||
aa3abf30 WN |
1641 | int bpf__strerror_probe(struct bpf_object *obj __maybe_unused, |
1642 | int err, char *buf, size_t size) | |
1643 | { | |
1644 | bpf__strerror_head(err, buf, size); | |
0bb93490 | 1645 | case BPF_LOADER_ERRNO__PROGCONF_TERM: { |
361f2b1d WN |
1646 | scnprintf(buf, size, "%s (add -v to see detail)", emsg); |
1647 | break; | |
1648 | } | |
03e01f56 | 1649 | bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'"); |
d3e0ce39 WN |
1650 | bpf__strerror_entry(EACCES, "You need to be root"); |
1651 | bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0"); | |
1652 | bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file"); | |
aa3abf30 WN |
1653 | bpf__strerror_end(buf, size); |
1654 | return 0; | |
69d262a9 | 1655 | } |
1e5e3ee8 | 1656 | |
d3e0ce39 | 1657 | int bpf__strerror_load(struct bpf_object *obj, |
1e5e3ee8 WN |
1658 | int err, char *buf, size_t size) |
1659 | { | |
1660 | bpf__strerror_head(err, buf, size); | |
d3e0ce39 | 1661 | case LIBBPF_ERRNO__KVER: { |
a7fe0450 | 1662 | unsigned int obj_kver = bpf_object__kversion(obj); |
d3e0ce39 WN |
1663 | unsigned int real_kver; |
1664 | ||
1665 | if (fetch_kernel_version(&real_kver, NULL, 0)) { | |
1666 | scnprintf(buf, size, "Unable to fetch kernel version"); | |
1667 | break; | |
1668 | } | |
1669 | ||
1670 | if (obj_kver != real_kver) { | |
1671 | scnprintf(buf, size, | |
1672 | "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")", | |
1673 | KVER_PARAM(obj_kver), | |
1674 | KVER_PARAM(real_kver)); | |
1675 | break; | |
1676 | } | |
1677 | ||
1678 | scnprintf(buf, size, "Failed to load program for unknown reason"); | |
1679 | break; | |
1680 | } | |
1e5e3ee8 WN |
1681 | bpf__strerror_end(buf, size); |
1682 | return 0; | |
1683 | } | |
066dacbf WN |
1684 | |
1685 | int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused, | |
1686 | struct parse_events_term *term __maybe_unused, | |
1687 | struct perf_evlist *evlist __maybe_unused, | |
1688 | int *error_pos __maybe_unused, int err, | |
1689 | char *buf, size_t size) | |
1690 | { | |
1691 | bpf__strerror_head(err, buf, size); | |
1692 | bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE, | |
1693 | "Can't use this config term with this map type"); | |
1694 | bpf__strerror_end(buf, size); | |
1695 | return 0; | |
1696 | } | |
8690a2a7 WN |
1697 | |
1698 | int bpf__strerror_apply_obj_config(int err, char *buf, size_t size) | |
1699 | { | |
1700 | bpf__strerror_head(err, buf, size); | |
7630b3e2 WN |
1701 | bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM, |
1702 | "Cannot set event to BPF map in multi-thread tracing"); | |
1703 | bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH, | |
1704 | "%s (Hint: use -i to turn off inherit)", emsg); | |
1705 | bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE, | |
1706 | "Can only put raw, hardware and BPF output event into a BPF map"); | |
8690a2a7 WN |
1707 | bpf__strerror_end(buf, size); |
1708 | return 0; | |
1709 | } | |
d7888573 WN |
1710 | |
1711 | int bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused, | |
1712 | int err, char *buf, size_t size) | |
1713 | { | |
1714 | bpf__strerror_head(err, buf, size); | |
1715 | bpf__strerror_end(buf, size); | |
1716 | return 0; | |
1717 | } |