Merge cpufreq fixes going into v4.6.
[deliverable/linux.git] / tools / lib / bpf / libbpf.c
1 /*
2 * Common eBPF ELF object loading operations.
3 *
4 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
5 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6 * Copyright (C) 2015 Huawei Inc.
7 */
8
9 #include <stdlib.h>
10 #include <stdio.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <string.h>
14 #include <unistd.h>
15 #include <fcntl.h>
16 #include <errno.h>
17 #include <asm/unistd.h>
18 #include <linux/kernel.h>
19 #include <linux/bpf.h>
20 #include <linux/list.h>
21 #include <libelf.h>
22 #include <gelf.h>
23
24 #include "libbpf.h"
25 #include "bpf.h"
26
27 #define __printf(a, b) __attribute__((format(printf, a, b)))
28
29 __printf(1, 2)
30 static int __base_pr(const char *format, ...)
31 {
32 va_list args;
33 int err;
34
35 va_start(args, format);
36 err = vfprintf(stderr, format, args);
37 va_end(args);
38 return err;
39 }
40
41 static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
42 static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
43 static __printf(1, 2) libbpf_print_fn_t __pr_debug;
44
45 #define __pr(func, fmt, ...) \
46 do { \
47 if ((func)) \
48 (func)("libbpf: " fmt, ##__VA_ARGS__); \
49 } while (0)
50
51 #define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
52 #define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
53 #define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
54
55 void libbpf_set_print(libbpf_print_fn_t warn,
56 libbpf_print_fn_t info,
57 libbpf_print_fn_t debug)
58 {
59 __pr_warning = warn;
60 __pr_info = info;
61 __pr_debug = debug;
62 }
63
64 #define STRERR_BUFSIZE 128
65
66 #define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
67 #define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
68 #define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
69
70 static const char *libbpf_strerror_table[NR_ERRNO] = {
71 [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
72 [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
73 [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
74 [ERRCODE_OFFSET(ENDIAN)] = "Endian missmatch",
75 [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
76 [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
77 [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
78 [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
79 [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
80 };
81
82 int libbpf_strerror(int err, char *buf, size_t size)
83 {
84 if (!buf || !size)
85 return -1;
86
87 err = err > 0 ? err : -err;
88
89 if (err < __LIBBPF_ERRNO__START) {
90 int ret;
91
92 ret = strerror_r(err, buf, size);
93 buf[size - 1] = '\0';
94 return ret;
95 }
96
97 if (err < __LIBBPF_ERRNO__END) {
98 const char *msg;
99
100 msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
101 snprintf(buf, size, "%s", msg);
102 buf[size - 1] = '\0';
103 return 0;
104 }
105
106 snprintf(buf, size, "Unknown libbpf error %d", err);
107 buf[size - 1] = '\0';
108 return -1;
109 }
110
111 #define CHECK_ERR(action, err, out) do { \
112 err = action; \
113 if (err) \
114 goto out; \
115 } while(0)
116
117
118 /* Copied from tools/perf/util/util.h */
119 #ifndef zfree
120 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
121 #endif
122
123 #ifndef zclose
124 # define zclose(fd) ({ \
125 int ___err = 0; \
126 if ((fd) >= 0) \
127 ___err = close((fd)); \
128 fd = -1; \
129 ___err; })
130 #endif
131
132 #ifdef HAVE_LIBELF_MMAP_SUPPORT
133 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
134 #else
135 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
136 #endif
137
138 /*
139 * bpf_prog should be a better name but it has been used in
140 * linux/filter.h.
141 */
142 struct bpf_program {
143 /* Index in elf obj file, for relocation use. */
144 int idx;
145 char *section_name;
146 struct bpf_insn *insns;
147 size_t insns_cnt;
148
149 struct {
150 int insn_idx;
151 int map_idx;
152 } *reloc_desc;
153 int nr_reloc;
154
155 struct {
156 int nr;
157 int *fds;
158 } instances;
159 bpf_program_prep_t preprocessor;
160
161 struct bpf_object *obj;
162 void *priv;
163 bpf_program_clear_priv_t clear_priv;
164 };
165
166 struct bpf_map {
167 int fd;
168 char *name;
169 struct bpf_map_def def;
170 void *priv;
171 bpf_map_clear_priv_t clear_priv;
172 };
173
174 static LIST_HEAD(bpf_objects_list);
175
176 struct bpf_object {
177 char license[64];
178 u32 kern_version;
179
180 struct bpf_program *programs;
181 size_t nr_programs;
182 struct bpf_map *maps;
183 size_t nr_maps;
184
185 bool loaded;
186
187 /*
188 * Information when doing elf related work. Only valid if fd
189 * is valid.
190 */
191 struct {
192 int fd;
193 void *obj_buf;
194 size_t obj_buf_sz;
195 Elf *elf;
196 GElf_Ehdr ehdr;
197 Elf_Data *symbols;
198 size_t strtabidx;
199 struct {
200 GElf_Shdr shdr;
201 Elf_Data *data;
202 } *reloc;
203 int nr_reloc;
204 int maps_shndx;
205 } efile;
206 /*
207 * All loaded bpf_object is linked in a list, which is
208 * hidden to caller. bpf_objects__<func> handlers deal with
209 * all objects.
210 */
211 struct list_head list;
212 char path[];
213 };
214 #define obj_elf_valid(o) ((o)->efile.elf)
215
216 static void bpf_program__unload(struct bpf_program *prog)
217 {
218 int i;
219
220 if (!prog)
221 return;
222
223 /*
224 * If the object is opened but the program was never loaded,
225 * it is possible that prog->instances.nr == -1.
226 */
227 if (prog->instances.nr > 0) {
228 for (i = 0; i < prog->instances.nr; i++)
229 zclose(prog->instances.fds[i]);
230 } else if (prog->instances.nr != -1) {
231 pr_warning("Internal error: instances.nr is %d\n",
232 prog->instances.nr);
233 }
234
235 prog->instances.nr = -1;
236 zfree(&prog->instances.fds);
237 }
238
239 static void bpf_program__exit(struct bpf_program *prog)
240 {
241 if (!prog)
242 return;
243
244 if (prog->clear_priv)
245 prog->clear_priv(prog, prog->priv);
246
247 prog->priv = NULL;
248 prog->clear_priv = NULL;
249
250 bpf_program__unload(prog);
251 zfree(&prog->section_name);
252 zfree(&prog->insns);
253 zfree(&prog->reloc_desc);
254
255 prog->nr_reloc = 0;
256 prog->insns_cnt = 0;
257 prog->idx = -1;
258 }
259
260 static int
261 bpf_program__init(void *data, size_t size, char *name, int idx,
262 struct bpf_program *prog)
263 {
264 if (size < sizeof(struct bpf_insn)) {
265 pr_warning("corrupted section '%s'\n", name);
266 return -EINVAL;
267 }
268
269 bzero(prog, sizeof(*prog));
270
271 prog->section_name = strdup(name);
272 if (!prog->section_name) {
273 pr_warning("failed to alloc name for prog %s\n",
274 name);
275 goto errout;
276 }
277
278 prog->insns = malloc(size);
279 if (!prog->insns) {
280 pr_warning("failed to alloc insns for %s\n", name);
281 goto errout;
282 }
283 prog->insns_cnt = size / sizeof(struct bpf_insn);
284 memcpy(prog->insns, data,
285 prog->insns_cnt * sizeof(struct bpf_insn));
286 prog->idx = idx;
287 prog->instances.fds = NULL;
288 prog->instances.nr = -1;
289
290 return 0;
291 errout:
292 bpf_program__exit(prog);
293 return -ENOMEM;
294 }
295
296 static int
297 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
298 char *name, int idx)
299 {
300 struct bpf_program prog, *progs;
301 int nr_progs, err;
302
303 err = bpf_program__init(data, size, name, idx, &prog);
304 if (err)
305 return err;
306
307 progs = obj->programs;
308 nr_progs = obj->nr_programs;
309
310 progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1));
311 if (!progs) {
312 /*
313 * In this case the original obj->programs
314 * is still valid, so don't need special treat for
315 * bpf_close_object().
316 */
317 pr_warning("failed to alloc a new program '%s'\n",
318 name);
319 bpf_program__exit(&prog);
320 return -ENOMEM;
321 }
322
323 pr_debug("found program %s\n", prog.section_name);
324 obj->programs = progs;
325 obj->nr_programs = nr_progs + 1;
326 prog.obj = obj;
327 progs[nr_progs] = prog;
328 return 0;
329 }
330
331 static struct bpf_object *bpf_object__new(const char *path,
332 void *obj_buf,
333 size_t obj_buf_sz)
334 {
335 struct bpf_object *obj;
336
337 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
338 if (!obj) {
339 pr_warning("alloc memory failed for %s\n", path);
340 return ERR_PTR(-ENOMEM);
341 }
342
343 strcpy(obj->path, path);
344 obj->efile.fd = -1;
345
346 /*
347 * Caller of this function should also calls
348 * bpf_object__elf_finish() after data collection to return
349 * obj_buf to user. If not, we should duplicate the buffer to
350 * avoid user freeing them before elf finish.
351 */
352 obj->efile.obj_buf = obj_buf;
353 obj->efile.obj_buf_sz = obj_buf_sz;
354 obj->efile.maps_shndx = -1;
355
356 obj->loaded = false;
357
358 INIT_LIST_HEAD(&obj->list);
359 list_add(&obj->list, &bpf_objects_list);
360 return obj;
361 }
362
363 static void bpf_object__elf_finish(struct bpf_object *obj)
364 {
365 if (!obj_elf_valid(obj))
366 return;
367
368 if (obj->efile.elf) {
369 elf_end(obj->efile.elf);
370 obj->efile.elf = NULL;
371 }
372 obj->efile.symbols = NULL;
373
374 zfree(&obj->efile.reloc);
375 obj->efile.nr_reloc = 0;
376 zclose(obj->efile.fd);
377 obj->efile.obj_buf = NULL;
378 obj->efile.obj_buf_sz = 0;
379 }
380
381 static int bpf_object__elf_init(struct bpf_object *obj)
382 {
383 int err = 0;
384 GElf_Ehdr *ep;
385
386 if (obj_elf_valid(obj)) {
387 pr_warning("elf init: internal error\n");
388 return -LIBBPF_ERRNO__LIBELF;
389 }
390
391 if (obj->efile.obj_buf_sz > 0) {
392 /*
393 * obj_buf should have been validated by
394 * bpf_object__open_buffer().
395 */
396 obj->efile.elf = elf_memory(obj->efile.obj_buf,
397 obj->efile.obj_buf_sz);
398 } else {
399 obj->efile.fd = open(obj->path, O_RDONLY);
400 if (obj->efile.fd < 0) {
401 pr_warning("failed to open %s: %s\n", obj->path,
402 strerror(errno));
403 return -errno;
404 }
405
406 obj->efile.elf = elf_begin(obj->efile.fd,
407 LIBBPF_ELF_C_READ_MMAP,
408 NULL);
409 }
410
411 if (!obj->efile.elf) {
412 pr_warning("failed to open %s as ELF file\n",
413 obj->path);
414 err = -LIBBPF_ERRNO__LIBELF;
415 goto errout;
416 }
417
418 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
419 pr_warning("failed to get EHDR from %s\n",
420 obj->path);
421 err = -LIBBPF_ERRNO__FORMAT;
422 goto errout;
423 }
424 ep = &obj->efile.ehdr;
425
426 if ((ep->e_type != ET_REL) || (ep->e_machine != 0)) {
427 pr_warning("%s is not an eBPF object file\n",
428 obj->path);
429 err = -LIBBPF_ERRNO__FORMAT;
430 goto errout;
431 }
432
433 return 0;
434 errout:
435 bpf_object__elf_finish(obj);
436 return err;
437 }
438
439 static int
440 bpf_object__check_endianness(struct bpf_object *obj)
441 {
442 static unsigned int const endian = 1;
443
444 switch (obj->efile.ehdr.e_ident[EI_DATA]) {
445 case ELFDATA2LSB:
446 /* We are big endian, BPF obj is little endian. */
447 if (*(unsigned char const *)&endian != 1)
448 goto mismatch;
449 break;
450
451 case ELFDATA2MSB:
452 /* We are little endian, BPF obj is big endian. */
453 if (*(unsigned char const *)&endian != 0)
454 goto mismatch;
455 break;
456 default:
457 return -LIBBPF_ERRNO__ENDIAN;
458 }
459
460 return 0;
461
462 mismatch:
463 pr_warning("Error: endianness mismatch.\n");
464 return -LIBBPF_ERRNO__ENDIAN;
465 }
466
467 static int
468 bpf_object__init_license(struct bpf_object *obj,
469 void *data, size_t size)
470 {
471 memcpy(obj->license, data,
472 min(size, sizeof(obj->license) - 1));
473 pr_debug("license of %s is %s\n", obj->path, obj->license);
474 return 0;
475 }
476
477 static int
478 bpf_object__init_kversion(struct bpf_object *obj,
479 void *data, size_t size)
480 {
481 u32 kver;
482
483 if (size != sizeof(kver)) {
484 pr_warning("invalid kver section in %s\n", obj->path);
485 return -LIBBPF_ERRNO__FORMAT;
486 }
487 memcpy(&kver, data, sizeof(kver));
488 obj->kern_version = kver;
489 pr_debug("kernel version of %s is %x\n", obj->path,
490 obj->kern_version);
491 return 0;
492 }
493
494 static int
495 bpf_object__init_maps(struct bpf_object *obj, void *data,
496 size_t size)
497 {
498 size_t nr_maps;
499 int i;
500
501 nr_maps = size / sizeof(struct bpf_map_def);
502 if (!data || !nr_maps) {
503 pr_debug("%s doesn't need map definition\n",
504 obj->path);
505 return 0;
506 }
507
508 pr_debug("maps in %s: %zd bytes\n", obj->path, size);
509
510 obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
511 if (!obj->maps) {
512 pr_warning("alloc maps for object failed\n");
513 return -ENOMEM;
514 }
515 obj->nr_maps = nr_maps;
516
517 for (i = 0; i < nr_maps; i++) {
518 struct bpf_map_def *def = &obj->maps[i].def;
519
520 /*
521 * fill all fd with -1 so won't close incorrect
522 * fd (fd=0 is stdin) when failure (zclose won't close
523 * negative fd)).
524 */
525 obj->maps[i].fd = -1;
526
527 /* Save map definition into obj->maps */
528 *def = ((struct bpf_map_def *)data)[i];
529 }
530 return 0;
531 }
532
533 static int
534 bpf_object__init_maps_name(struct bpf_object *obj)
535 {
536 int i;
537 Elf_Data *symbols = obj->efile.symbols;
538
539 if (!symbols || obj->efile.maps_shndx < 0)
540 return -EINVAL;
541
542 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
543 GElf_Sym sym;
544 size_t map_idx;
545 const char *map_name;
546
547 if (!gelf_getsym(symbols, i, &sym))
548 continue;
549 if (sym.st_shndx != obj->efile.maps_shndx)
550 continue;
551
552 map_name = elf_strptr(obj->efile.elf,
553 obj->efile.strtabidx,
554 sym.st_name);
555 map_idx = sym.st_value / sizeof(struct bpf_map_def);
556 if (map_idx >= obj->nr_maps) {
557 pr_warning("index of map \"%s\" is buggy: %zu > %zu\n",
558 map_name, map_idx, obj->nr_maps);
559 continue;
560 }
561 obj->maps[map_idx].name = strdup(map_name);
562 if (!obj->maps[map_idx].name) {
563 pr_warning("failed to alloc map name\n");
564 return -ENOMEM;
565 }
566 pr_debug("map %zu is \"%s\"\n", map_idx,
567 obj->maps[map_idx].name);
568 }
569 return 0;
570 }
571
572 static int bpf_object__elf_collect(struct bpf_object *obj)
573 {
574 Elf *elf = obj->efile.elf;
575 GElf_Ehdr *ep = &obj->efile.ehdr;
576 Elf_Scn *scn = NULL;
577 int idx = 0, err = 0;
578
579 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
580 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
581 pr_warning("failed to get e_shstrndx from %s\n",
582 obj->path);
583 return -LIBBPF_ERRNO__FORMAT;
584 }
585
586 while ((scn = elf_nextscn(elf, scn)) != NULL) {
587 char *name;
588 GElf_Shdr sh;
589 Elf_Data *data;
590
591 idx++;
592 if (gelf_getshdr(scn, &sh) != &sh) {
593 pr_warning("failed to get section header from %s\n",
594 obj->path);
595 err = -LIBBPF_ERRNO__FORMAT;
596 goto out;
597 }
598
599 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
600 if (!name) {
601 pr_warning("failed to get section name from %s\n",
602 obj->path);
603 err = -LIBBPF_ERRNO__FORMAT;
604 goto out;
605 }
606
607 data = elf_getdata(scn, 0);
608 if (!data) {
609 pr_warning("failed to get section data from %s(%s)\n",
610 name, obj->path);
611 err = -LIBBPF_ERRNO__FORMAT;
612 goto out;
613 }
614 pr_debug("section %s, size %ld, link %d, flags %lx, type=%d\n",
615 name, (unsigned long)data->d_size,
616 (int)sh.sh_link, (unsigned long)sh.sh_flags,
617 (int)sh.sh_type);
618
619 if (strcmp(name, "license") == 0)
620 err = bpf_object__init_license(obj,
621 data->d_buf,
622 data->d_size);
623 else if (strcmp(name, "version") == 0)
624 err = bpf_object__init_kversion(obj,
625 data->d_buf,
626 data->d_size);
627 else if (strcmp(name, "maps") == 0) {
628 err = bpf_object__init_maps(obj, data->d_buf,
629 data->d_size);
630 obj->efile.maps_shndx = idx;
631 } else if (sh.sh_type == SHT_SYMTAB) {
632 if (obj->efile.symbols) {
633 pr_warning("bpf: multiple SYMTAB in %s\n",
634 obj->path);
635 err = -LIBBPF_ERRNO__FORMAT;
636 } else {
637 obj->efile.symbols = data;
638 obj->efile.strtabidx = sh.sh_link;
639 }
640 } else if ((sh.sh_type == SHT_PROGBITS) &&
641 (sh.sh_flags & SHF_EXECINSTR) &&
642 (data->d_size > 0)) {
643 err = bpf_object__add_program(obj, data->d_buf,
644 data->d_size, name, idx);
645 if (err) {
646 char errmsg[STRERR_BUFSIZE];
647
648 strerror_r(-err, errmsg, sizeof(errmsg));
649 pr_warning("failed to alloc program %s (%s): %s",
650 name, obj->path, errmsg);
651 }
652 } else if (sh.sh_type == SHT_REL) {
653 void *reloc = obj->efile.reloc;
654 int nr_reloc = obj->efile.nr_reloc + 1;
655
656 reloc = realloc(reloc,
657 sizeof(*obj->efile.reloc) * nr_reloc);
658 if (!reloc) {
659 pr_warning("realloc failed\n");
660 err = -ENOMEM;
661 } else {
662 int n = nr_reloc - 1;
663
664 obj->efile.reloc = reloc;
665 obj->efile.nr_reloc = nr_reloc;
666
667 obj->efile.reloc[n].shdr = sh;
668 obj->efile.reloc[n].data = data;
669 }
670 }
671 if (err)
672 goto out;
673 }
674
675 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
676 pr_warning("Corrupted ELF file: index of strtab invalid\n");
677 return LIBBPF_ERRNO__FORMAT;
678 }
679 if (obj->efile.maps_shndx >= 0)
680 err = bpf_object__init_maps_name(obj);
681 out:
682 return err;
683 }
684
685 static struct bpf_program *
686 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
687 {
688 struct bpf_program *prog;
689 size_t i;
690
691 for (i = 0; i < obj->nr_programs; i++) {
692 prog = &obj->programs[i];
693 if (prog->idx == idx)
694 return prog;
695 }
696 return NULL;
697 }
698
699 static int
700 bpf_program__collect_reloc(struct bpf_program *prog,
701 size_t nr_maps, GElf_Shdr *shdr,
702 Elf_Data *data, Elf_Data *symbols,
703 int maps_shndx)
704 {
705 int i, nrels;
706
707 pr_debug("collecting relocating info for: '%s'\n",
708 prog->section_name);
709 nrels = shdr->sh_size / shdr->sh_entsize;
710
711 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
712 if (!prog->reloc_desc) {
713 pr_warning("failed to alloc memory in relocation\n");
714 return -ENOMEM;
715 }
716 prog->nr_reloc = nrels;
717
718 for (i = 0; i < nrels; i++) {
719 GElf_Sym sym;
720 GElf_Rel rel;
721 unsigned int insn_idx;
722 struct bpf_insn *insns = prog->insns;
723 size_t map_idx;
724
725 if (!gelf_getrel(data, i, &rel)) {
726 pr_warning("relocation: failed to get %d reloc\n", i);
727 return -LIBBPF_ERRNO__FORMAT;
728 }
729
730 if (!gelf_getsym(symbols,
731 GELF_R_SYM(rel.r_info),
732 &sym)) {
733 pr_warning("relocation: symbol %"PRIx64" not found\n",
734 GELF_R_SYM(rel.r_info));
735 return -LIBBPF_ERRNO__FORMAT;
736 }
737
738 if (sym.st_shndx != maps_shndx) {
739 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
740 prog->section_name, sym.st_shndx);
741 return -LIBBPF_ERRNO__RELOC;
742 }
743
744 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
745 pr_debug("relocation: insn_idx=%u\n", insn_idx);
746
747 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
748 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
749 insn_idx, insns[insn_idx].code);
750 return -LIBBPF_ERRNO__RELOC;
751 }
752
753 map_idx = sym.st_value / sizeof(struct bpf_map_def);
754 if (map_idx >= nr_maps) {
755 pr_warning("bpf relocation: map_idx %d large than %d\n",
756 (int)map_idx, (int)nr_maps - 1);
757 return -LIBBPF_ERRNO__RELOC;
758 }
759
760 prog->reloc_desc[i].insn_idx = insn_idx;
761 prog->reloc_desc[i].map_idx = map_idx;
762 }
763 return 0;
764 }
765
766 static int
767 bpf_object__create_maps(struct bpf_object *obj)
768 {
769 unsigned int i;
770
771 for (i = 0; i < obj->nr_maps; i++) {
772 struct bpf_map_def *def = &obj->maps[i].def;
773 int *pfd = &obj->maps[i].fd;
774
775 *pfd = bpf_create_map(def->type,
776 def->key_size,
777 def->value_size,
778 def->max_entries);
779 if (*pfd < 0) {
780 size_t j;
781 int err = *pfd;
782
783 pr_warning("failed to create map: %s\n",
784 strerror(errno));
785 for (j = 0; j < i; j++)
786 zclose(obj->maps[j].fd);
787 return err;
788 }
789 pr_debug("create map: fd=%d\n", *pfd);
790 }
791
792 return 0;
793 }
794
795 static int
796 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
797 {
798 int i;
799
800 if (!prog || !prog->reloc_desc)
801 return 0;
802
803 for (i = 0; i < prog->nr_reloc; i++) {
804 int insn_idx, map_idx;
805 struct bpf_insn *insns = prog->insns;
806
807 insn_idx = prog->reloc_desc[i].insn_idx;
808 map_idx = prog->reloc_desc[i].map_idx;
809
810 if (insn_idx >= (int)prog->insns_cnt) {
811 pr_warning("relocation out of range: '%s'\n",
812 prog->section_name);
813 return -LIBBPF_ERRNO__RELOC;
814 }
815 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
816 insns[insn_idx].imm = obj->maps[map_idx].fd;
817 }
818
819 zfree(&prog->reloc_desc);
820 prog->nr_reloc = 0;
821 return 0;
822 }
823
824
825 static int
826 bpf_object__relocate(struct bpf_object *obj)
827 {
828 struct bpf_program *prog;
829 size_t i;
830 int err;
831
832 for (i = 0; i < obj->nr_programs; i++) {
833 prog = &obj->programs[i];
834
835 err = bpf_program__relocate(prog, obj);
836 if (err) {
837 pr_warning("failed to relocate '%s'\n",
838 prog->section_name);
839 return err;
840 }
841 }
842 return 0;
843 }
844
845 static int bpf_object__collect_reloc(struct bpf_object *obj)
846 {
847 int i, err;
848
849 if (!obj_elf_valid(obj)) {
850 pr_warning("Internal error: elf object is closed\n");
851 return -LIBBPF_ERRNO__INTERNAL;
852 }
853
854 for (i = 0; i < obj->efile.nr_reloc; i++) {
855 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
856 Elf_Data *data = obj->efile.reloc[i].data;
857 int idx = shdr->sh_info;
858 struct bpf_program *prog;
859 size_t nr_maps = obj->nr_maps;
860
861 if (shdr->sh_type != SHT_REL) {
862 pr_warning("internal error at %d\n", __LINE__);
863 return -LIBBPF_ERRNO__INTERNAL;
864 }
865
866 prog = bpf_object__find_prog_by_idx(obj, idx);
867 if (!prog) {
868 pr_warning("relocation failed: no %d section\n",
869 idx);
870 return -LIBBPF_ERRNO__RELOC;
871 }
872
873 err = bpf_program__collect_reloc(prog, nr_maps,
874 shdr, data,
875 obj->efile.symbols,
876 obj->efile.maps_shndx);
877 if (err)
878 return err;
879 }
880 return 0;
881 }
882
883 static int
884 load_program(struct bpf_insn *insns, int insns_cnt,
885 char *license, u32 kern_version, int *pfd)
886 {
887 int ret;
888 char *log_buf;
889
890 if (!insns || !insns_cnt)
891 return -EINVAL;
892
893 log_buf = malloc(BPF_LOG_BUF_SIZE);
894 if (!log_buf)
895 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
896
897 ret = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
898 insns_cnt, license, kern_version,
899 log_buf, BPF_LOG_BUF_SIZE);
900
901 if (ret >= 0) {
902 *pfd = ret;
903 ret = 0;
904 goto out;
905 }
906
907 ret = -LIBBPF_ERRNO__LOAD;
908 pr_warning("load bpf program failed: %s\n", strerror(errno));
909
910 if (log_buf && log_buf[0] != '\0') {
911 ret = -LIBBPF_ERRNO__VERIFY;
912 pr_warning("-- BEGIN DUMP LOG ---\n");
913 pr_warning("\n%s\n", log_buf);
914 pr_warning("-- END LOG --\n");
915 } else {
916 if (insns_cnt >= BPF_MAXINSNS) {
917 pr_warning("Program too large (%d insns), at most %d insns\n",
918 insns_cnt, BPF_MAXINSNS);
919 ret = -LIBBPF_ERRNO__PROG2BIG;
920 } else if (log_buf) {
921 pr_warning("log buffer is empty\n");
922 ret = -LIBBPF_ERRNO__KVER;
923 }
924 }
925
926 out:
927 free(log_buf);
928 return ret;
929 }
930
931 static int
932 bpf_program__load(struct bpf_program *prog,
933 char *license, u32 kern_version)
934 {
935 int err = 0, fd, i;
936
937 if (prog->instances.nr < 0 || !prog->instances.fds) {
938 if (prog->preprocessor) {
939 pr_warning("Internal error: can't load program '%s'\n",
940 prog->section_name);
941 return -LIBBPF_ERRNO__INTERNAL;
942 }
943
944 prog->instances.fds = malloc(sizeof(int));
945 if (!prog->instances.fds) {
946 pr_warning("Not enough memory for BPF fds\n");
947 return -ENOMEM;
948 }
949 prog->instances.nr = 1;
950 prog->instances.fds[0] = -1;
951 }
952
953 if (!prog->preprocessor) {
954 if (prog->instances.nr != 1) {
955 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
956 prog->section_name, prog->instances.nr);
957 }
958 err = load_program(prog->insns, prog->insns_cnt,
959 license, kern_version, &fd);
960 if (!err)
961 prog->instances.fds[0] = fd;
962 goto out;
963 }
964
965 for (i = 0; i < prog->instances.nr; i++) {
966 struct bpf_prog_prep_result result;
967 bpf_program_prep_t preprocessor = prog->preprocessor;
968
969 bzero(&result, sizeof(result));
970 err = preprocessor(prog, i, prog->insns,
971 prog->insns_cnt, &result);
972 if (err) {
973 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
974 i, prog->section_name);
975 goto out;
976 }
977
978 if (!result.new_insn_ptr || !result.new_insn_cnt) {
979 pr_debug("Skip loading the %dth instance of program '%s'\n",
980 i, prog->section_name);
981 prog->instances.fds[i] = -1;
982 if (result.pfd)
983 *result.pfd = -1;
984 continue;
985 }
986
987 err = load_program(result.new_insn_ptr,
988 result.new_insn_cnt,
989 license, kern_version, &fd);
990
991 if (err) {
992 pr_warning("Loading the %dth instance of program '%s' failed\n",
993 i, prog->section_name);
994 goto out;
995 }
996
997 if (result.pfd)
998 *result.pfd = fd;
999 prog->instances.fds[i] = fd;
1000 }
1001 out:
1002 if (err)
1003 pr_warning("failed to load program '%s'\n",
1004 prog->section_name);
1005 zfree(&prog->insns);
1006 prog->insns_cnt = 0;
1007 return err;
1008 }
1009
1010 static int
1011 bpf_object__load_progs(struct bpf_object *obj)
1012 {
1013 size_t i;
1014 int err;
1015
1016 for (i = 0; i < obj->nr_programs; i++) {
1017 err = bpf_program__load(&obj->programs[i],
1018 obj->license,
1019 obj->kern_version);
1020 if (err)
1021 return err;
1022 }
1023 return 0;
1024 }
1025
1026 static int bpf_object__validate(struct bpf_object *obj)
1027 {
1028 if (obj->kern_version == 0) {
1029 pr_warning("%s doesn't provide kernel version\n",
1030 obj->path);
1031 return -LIBBPF_ERRNO__KVERSION;
1032 }
1033 return 0;
1034 }
1035
1036 static struct bpf_object *
1037 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz)
1038 {
1039 struct bpf_object *obj;
1040 int err;
1041
1042 if (elf_version(EV_CURRENT) == EV_NONE) {
1043 pr_warning("failed to init libelf for %s\n", path);
1044 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1045 }
1046
1047 obj = bpf_object__new(path, obj_buf, obj_buf_sz);
1048 if (IS_ERR(obj))
1049 return obj;
1050
1051 CHECK_ERR(bpf_object__elf_init(obj), err, out);
1052 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1053 CHECK_ERR(bpf_object__elf_collect(obj), err, out);
1054 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
1055 CHECK_ERR(bpf_object__validate(obj), err, out);
1056
1057 bpf_object__elf_finish(obj);
1058 return obj;
1059 out:
1060 bpf_object__close(obj);
1061 return ERR_PTR(err);
1062 }
1063
1064 struct bpf_object *bpf_object__open(const char *path)
1065 {
1066 /* param validation */
1067 if (!path)
1068 return NULL;
1069
1070 pr_debug("loading %s\n", path);
1071
1072 return __bpf_object__open(path, NULL, 0);
1073 }
1074
1075 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
1076 size_t obj_buf_sz,
1077 const char *name)
1078 {
1079 char tmp_name[64];
1080
1081 /* param validation */
1082 if (!obj_buf || obj_buf_sz <= 0)
1083 return NULL;
1084
1085 if (!name) {
1086 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1087 (unsigned long)obj_buf,
1088 (unsigned long)obj_buf_sz);
1089 tmp_name[sizeof(tmp_name) - 1] = '\0';
1090 name = tmp_name;
1091 }
1092 pr_debug("loading object '%s' from buffer\n",
1093 name);
1094
1095 return __bpf_object__open(name, obj_buf, obj_buf_sz);
1096 }
1097
1098 int bpf_object__unload(struct bpf_object *obj)
1099 {
1100 size_t i;
1101
1102 if (!obj)
1103 return -EINVAL;
1104
1105 for (i = 0; i < obj->nr_maps; i++)
1106 zclose(obj->maps[i].fd);
1107
1108 for (i = 0; i < obj->nr_programs; i++)
1109 bpf_program__unload(&obj->programs[i]);
1110
1111 return 0;
1112 }
1113
1114 int bpf_object__load(struct bpf_object *obj)
1115 {
1116 int err;
1117
1118 if (!obj)
1119 return -EINVAL;
1120
1121 if (obj->loaded) {
1122 pr_warning("object should not be loaded twice\n");
1123 return -EINVAL;
1124 }
1125
1126 obj->loaded = true;
1127
1128 CHECK_ERR(bpf_object__create_maps(obj), err, out);
1129 CHECK_ERR(bpf_object__relocate(obj), err, out);
1130 CHECK_ERR(bpf_object__load_progs(obj), err, out);
1131
1132 return 0;
1133 out:
1134 bpf_object__unload(obj);
1135 pr_warning("failed to load object '%s'\n", obj->path);
1136 return err;
1137 }
1138
1139 void bpf_object__close(struct bpf_object *obj)
1140 {
1141 size_t i;
1142
1143 if (!obj)
1144 return;
1145
1146 bpf_object__elf_finish(obj);
1147 bpf_object__unload(obj);
1148
1149 for (i = 0; i < obj->nr_maps; i++) {
1150 zfree(&obj->maps[i].name);
1151 if (obj->maps[i].clear_priv)
1152 obj->maps[i].clear_priv(&obj->maps[i],
1153 obj->maps[i].priv);
1154 obj->maps[i].priv = NULL;
1155 obj->maps[i].clear_priv = NULL;
1156 }
1157 zfree(&obj->maps);
1158 obj->nr_maps = 0;
1159
1160 if (obj->programs && obj->nr_programs) {
1161 for (i = 0; i < obj->nr_programs; i++)
1162 bpf_program__exit(&obj->programs[i]);
1163 }
1164 zfree(&obj->programs);
1165
1166 list_del(&obj->list);
1167 free(obj);
1168 }
1169
1170 struct bpf_object *
1171 bpf_object__next(struct bpf_object *prev)
1172 {
1173 struct bpf_object *next;
1174
1175 if (!prev)
1176 next = list_first_entry(&bpf_objects_list,
1177 struct bpf_object,
1178 list);
1179 else
1180 next = list_next_entry(prev, list);
1181
1182 /* Empty list is noticed here so don't need checking on entry. */
1183 if (&next->list == &bpf_objects_list)
1184 return NULL;
1185
1186 return next;
1187 }
1188
1189 const char *
1190 bpf_object__get_name(struct bpf_object *obj)
1191 {
1192 if (!obj)
1193 return ERR_PTR(-EINVAL);
1194 return obj->path;
1195 }
1196
1197 unsigned int
1198 bpf_object__get_kversion(struct bpf_object *obj)
1199 {
1200 if (!obj)
1201 return 0;
1202 return obj->kern_version;
1203 }
1204
1205 struct bpf_program *
1206 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
1207 {
1208 size_t idx;
1209
1210 if (!obj->programs)
1211 return NULL;
1212 /* First handler */
1213 if (prev == NULL)
1214 return &obj->programs[0];
1215
1216 if (prev->obj != obj) {
1217 pr_warning("error: program handler doesn't match object\n");
1218 return NULL;
1219 }
1220
1221 idx = (prev - obj->programs) + 1;
1222 if (idx >= obj->nr_programs)
1223 return NULL;
1224 return &obj->programs[idx];
1225 }
1226
1227 int bpf_program__set_private(struct bpf_program *prog,
1228 void *priv,
1229 bpf_program_clear_priv_t clear_priv)
1230 {
1231 if (prog->priv && prog->clear_priv)
1232 prog->clear_priv(prog, prog->priv);
1233
1234 prog->priv = priv;
1235 prog->clear_priv = clear_priv;
1236 return 0;
1237 }
1238
1239 int bpf_program__get_private(struct bpf_program *prog, void **ppriv)
1240 {
1241 *ppriv = prog->priv;
1242 return 0;
1243 }
1244
1245 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
1246 {
1247 const char *title;
1248
1249 title = prog->section_name;
1250 if (needs_copy) {
1251 title = strdup(title);
1252 if (!title) {
1253 pr_warning("failed to strdup program title\n");
1254 return ERR_PTR(-ENOMEM);
1255 }
1256 }
1257
1258 return title;
1259 }
1260
1261 int bpf_program__fd(struct bpf_program *prog)
1262 {
1263 return bpf_program__nth_fd(prog, 0);
1264 }
1265
1266 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
1267 bpf_program_prep_t prep)
1268 {
1269 int *instances_fds;
1270
1271 if (nr_instances <= 0 || !prep)
1272 return -EINVAL;
1273
1274 if (prog->instances.nr > 0 || prog->instances.fds) {
1275 pr_warning("Can't set pre-processor after loading\n");
1276 return -EINVAL;
1277 }
1278
1279 instances_fds = malloc(sizeof(int) * nr_instances);
1280 if (!instances_fds) {
1281 pr_warning("alloc memory failed for fds\n");
1282 return -ENOMEM;
1283 }
1284
1285 /* fill all fd with -1 */
1286 memset(instances_fds, -1, sizeof(int) * nr_instances);
1287
1288 prog->instances.nr = nr_instances;
1289 prog->instances.fds = instances_fds;
1290 prog->preprocessor = prep;
1291 return 0;
1292 }
1293
1294 int bpf_program__nth_fd(struct bpf_program *prog, int n)
1295 {
1296 int fd;
1297
1298 if (n >= prog->instances.nr || n < 0) {
1299 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
1300 n, prog->section_name, prog->instances.nr);
1301 return -EINVAL;
1302 }
1303
1304 fd = prog->instances.fds[n];
1305 if (fd < 0) {
1306 pr_warning("%dth instance of program '%s' is invalid\n",
1307 n, prog->section_name);
1308 return -ENOENT;
1309 }
1310
1311 return fd;
1312 }
1313
1314 int bpf_map__get_fd(struct bpf_map *map)
1315 {
1316 if (!map)
1317 return -EINVAL;
1318
1319 return map->fd;
1320 }
1321
1322 int bpf_map__get_def(struct bpf_map *map, struct bpf_map_def *pdef)
1323 {
1324 if (!map || !pdef)
1325 return -EINVAL;
1326
1327 *pdef = map->def;
1328 return 0;
1329 }
1330
1331 const char *bpf_map__get_name(struct bpf_map *map)
1332 {
1333 if (!map)
1334 return NULL;
1335 return map->name;
1336 }
1337
1338 int bpf_map__set_private(struct bpf_map *map, void *priv,
1339 bpf_map_clear_priv_t clear_priv)
1340 {
1341 if (!map)
1342 return -EINVAL;
1343
1344 if (map->priv) {
1345 if (map->clear_priv)
1346 map->clear_priv(map, map->priv);
1347 }
1348
1349 map->priv = priv;
1350 map->clear_priv = clear_priv;
1351 return 0;
1352 }
1353
1354 int bpf_map__get_private(struct bpf_map *map, void **ppriv)
1355 {
1356 if (!map)
1357 return -EINVAL;
1358
1359 if (ppriv)
1360 *ppriv = map->priv;
1361 return 0;
1362 }
1363
1364 struct bpf_map *
1365 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
1366 {
1367 size_t idx;
1368 struct bpf_map *s, *e;
1369
1370 if (!obj || !obj->maps)
1371 return NULL;
1372
1373 s = obj->maps;
1374 e = obj->maps + obj->nr_maps;
1375
1376 if (prev == NULL)
1377 return s;
1378
1379 if ((prev < s) || (prev >= e)) {
1380 pr_warning("error in %s: map handler doesn't belong to object\n",
1381 __func__);
1382 return NULL;
1383 }
1384
1385 idx = (prev - obj->maps) + 1;
1386 if (idx >= obj->nr_maps)
1387 return NULL;
1388 return &obj->maps[idx];
1389 }
1390
1391 struct bpf_map *
1392 bpf_object__get_map_by_name(struct bpf_object *obj, const char *name)
1393 {
1394 struct bpf_map *pos;
1395
1396 bpf_map__for_each(pos, obj) {
1397 if (pos->name && !strcmp(pos->name, name))
1398 return pos;
1399 }
1400 return NULL;
1401 }
This page took 0.059184 seconds and 6 git commands to generate.