Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / tools / lib / bpf / libbpf.c
1 /*
2 * Common eBPF ELF object loading operations.
3 *
4 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
5 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6 * Copyright (C) 2015 Huawei Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation;
11 * version 2.1 of the License (not later!)
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this program; if not, see <http://www.gnu.org/licenses>
20 */
21
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <stdarg.h>
25 #include <inttypes.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <errno.h>
30 #include <asm/unistd.h>
31 #include <linux/kernel.h>
32 #include <linux/bpf.h>
33 #include <linux/list.h>
34 #include <libelf.h>
35 #include <gelf.h>
36
37 #include "libbpf.h"
38 #include "bpf.h"
39
40 #define __printf(a, b) __attribute__((format(printf, a, b)))
41
42 __printf(1, 2)
43 static int __base_pr(const char *format, ...)
44 {
45 va_list args;
46 int err;
47
48 va_start(args, format);
49 err = vfprintf(stderr, format, args);
50 va_end(args);
51 return err;
52 }
53
54 static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
55 static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
56 static __printf(1, 2) libbpf_print_fn_t __pr_debug;
57
58 #define __pr(func, fmt, ...) \
59 do { \
60 if ((func)) \
61 (func)("libbpf: " fmt, ##__VA_ARGS__); \
62 } while (0)
63
64 #define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
65 #define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
66 #define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
67
68 void libbpf_set_print(libbpf_print_fn_t warn,
69 libbpf_print_fn_t info,
70 libbpf_print_fn_t debug)
71 {
72 __pr_warning = warn;
73 __pr_info = info;
74 __pr_debug = debug;
75 }
76
77 #define STRERR_BUFSIZE 128
78
79 #define ERRNO_OFFSET(e) ((e) - __LIBBPF_ERRNO__START)
80 #define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c)
81 #define NR_ERRNO (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
82
83 static const char *libbpf_strerror_table[NR_ERRNO] = {
84 [ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
85 [ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
86 [ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
87 [ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch",
88 [ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
89 [ERRCODE_OFFSET(RELOC)] = "Relocation failed",
90 [ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
91 [ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
92 [ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
93 [ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
94 };
95
96 int libbpf_strerror(int err, char *buf, size_t size)
97 {
98 if (!buf || !size)
99 return -1;
100
101 err = err > 0 ? err : -err;
102
103 if (err < __LIBBPF_ERRNO__START) {
104 int ret;
105
106 ret = strerror_r(err, buf, size);
107 buf[size - 1] = '\0';
108 return ret;
109 }
110
111 if (err < __LIBBPF_ERRNO__END) {
112 const char *msg;
113
114 msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
115 snprintf(buf, size, "%s", msg);
116 buf[size - 1] = '\0';
117 return 0;
118 }
119
120 snprintf(buf, size, "Unknown libbpf error %d", err);
121 buf[size - 1] = '\0';
122 return -1;
123 }
124
125 #define CHECK_ERR(action, err, out) do { \
126 err = action; \
127 if (err) \
128 goto out; \
129 } while(0)
130
131
132 /* Copied from tools/perf/util/util.h */
133 #ifndef zfree
134 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
135 #endif
136
137 #ifndef zclose
138 # define zclose(fd) ({ \
139 int ___err = 0; \
140 if ((fd) >= 0) \
141 ___err = close((fd)); \
142 fd = -1; \
143 ___err; })
144 #endif
145
146 #ifdef HAVE_LIBELF_MMAP_SUPPORT
147 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
148 #else
149 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
150 #endif
151
152 /*
153 * bpf_prog should be a better name but it has been used in
154 * linux/filter.h.
155 */
156 struct bpf_program {
157 /* Index in elf obj file, for relocation use. */
158 int idx;
159 char *section_name;
160 struct bpf_insn *insns;
161 size_t insns_cnt;
162 enum bpf_prog_type type;
163
164 struct {
165 int insn_idx;
166 int map_idx;
167 } *reloc_desc;
168 int nr_reloc;
169
170 struct {
171 int nr;
172 int *fds;
173 } instances;
174 bpf_program_prep_t preprocessor;
175
176 struct bpf_object *obj;
177 void *priv;
178 bpf_program_clear_priv_t clear_priv;
179 };
180
181 struct bpf_map {
182 int fd;
183 char *name;
184 struct bpf_map_def def;
185 void *priv;
186 bpf_map_clear_priv_t clear_priv;
187 };
188
189 static LIST_HEAD(bpf_objects_list);
190
191 struct bpf_object {
192 char license[64];
193 u32 kern_version;
194
195 struct bpf_program *programs;
196 size_t nr_programs;
197 struct bpf_map *maps;
198 size_t nr_maps;
199
200 bool loaded;
201
202 /*
203 * Information when doing elf related work. Only valid if fd
204 * is valid.
205 */
206 struct {
207 int fd;
208 void *obj_buf;
209 size_t obj_buf_sz;
210 Elf *elf;
211 GElf_Ehdr ehdr;
212 Elf_Data *symbols;
213 size_t strtabidx;
214 struct {
215 GElf_Shdr shdr;
216 Elf_Data *data;
217 } *reloc;
218 int nr_reloc;
219 int maps_shndx;
220 } efile;
221 /*
222 * All loaded bpf_object is linked in a list, which is
223 * hidden to caller. bpf_objects__<func> handlers deal with
224 * all objects.
225 */
226 struct list_head list;
227 char path[];
228 };
229 #define obj_elf_valid(o) ((o)->efile.elf)
230
231 static void bpf_program__unload(struct bpf_program *prog)
232 {
233 int i;
234
235 if (!prog)
236 return;
237
238 /*
239 * If the object is opened but the program was never loaded,
240 * it is possible that prog->instances.nr == -1.
241 */
242 if (prog->instances.nr > 0) {
243 for (i = 0; i < prog->instances.nr; i++)
244 zclose(prog->instances.fds[i]);
245 } else if (prog->instances.nr != -1) {
246 pr_warning("Internal error: instances.nr is %d\n",
247 prog->instances.nr);
248 }
249
250 prog->instances.nr = -1;
251 zfree(&prog->instances.fds);
252 }
253
254 static void bpf_program__exit(struct bpf_program *prog)
255 {
256 if (!prog)
257 return;
258
259 if (prog->clear_priv)
260 prog->clear_priv(prog, prog->priv);
261
262 prog->priv = NULL;
263 prog->clear_priv = NULL;
264
265 bpf_program__unload(prog);
266 zfree(&prog->section_name);
267 zfree(&prog->insns);
268 zfree(&prog->reloc_desc);
269
270 prog->nr_reloc = 0;
271 prog->insns_cnt = 0;
272 prog->idx = -1;
273 }
274
275 static int
276 bpf_program__init(void *data, size_t size, char *name, int idx,
277 struct bpf_program *prog)
278 {
279 if (size < sizeof(struct bpf_insn)) {
280 pr_warning("corrupted section '%s'\n", name);
281 return -EINVAL;
282 }
283
284 bzero(prog, sizeof(*prog));
285
286 prog->section_name = strdup(name);
287 if (!prog->section_name) {
288 pr_warning("failed to alloc name for prog %s\n",
289 name);
290 goto errout;
291 }
292
293 prog->insns = malloc(size);
294 if (!prog->insns) {
295 pr_warning("failed to alloc insns for %s\n", name);
296 goto errout;
297 }
298 prog->insns_cnt = size / sizeof(struct bpf_insn);
299 memcpy(prog->insns, data,
300 prog->insns_cnt * sizeof(struct bpf_insn));
301 prog->idx = idx;
302 prog->instances.fds = NULL;
303 prog->instances.nr = -1;
304 prog->type = BPF_PROG_TYPE_KPROBE;
305
306 return 0;
307 errout:
308 bpf_program__exit(prog);
309 return -ENOMEM;
310 }
311
312 static int
313 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
314 char *name, int idx)
315 {
316 struct bpf_program prog, *progs;
317 int nr_progs, err;
318
319 err = bpf_program__init(data, size, name, idx, &prog);
320 if (err)
321 return err;
322
323 progs = obj->programs;
324 nr_progs = obj->nr_programs;
325
326 progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1));
327 if (!progs) {
328 /*
329 * In this case the original obj->programs
330 * is still valid, so don't need special treat for
331 * bpf_close_object().
332 */
333 pr_warning("failed to alloc a new program '%s'\n",
334 name);
335 bpf_program__exit(&prog);
336 return -ENOMEM;
337 }
338
339 pr_debug("found program %s\n", prog.section_name);
340 obj->programs = progs;
341 obj->nr_programs = nr_progs + 1;
342 prog.obj = obj;
343 progs[nr_progs] = prog;
344 return 0;
345 }
346
347 static struct bpf_object *bpf_object__new(const char *path,
348 void *obj_buf,
349 size_t obj_buf_sz)
350 {
351 struct bpf_object *obj;
352
353 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
354 if (!obj) {
355 pr_warning("alloc memory failed for %s\n", path);
356 return ERR_PTR(-ENOMEM);
357 }
358
359 strcpy(obj->path, path);
360 obj->efile.fd = -1;
361
362 /*
363 * Caller of this function should also calls
364 * bpf_object__elf_finish() after data collection to return
365 * obj_buf to user. If not, we should duplicate the buffer to
366 * avoid user freeing them before elf finish.
367 */
368 obj->efile.obj_buf = obj_buf;
369 obj->efile.obj_buf_sz = obj_buf_sz;
370 obj->efile.maps_shndx = -1;
371
372 obj->loaded = false;
373
374 INIT_LIST_HEAD(&obj->list);
375 list_add(&obj->list, &bpf_objects_list);
376 return obj;
377 }
378
379 static void bpf_object__elf_finish(struct bpf_object *obj)
380 {
381 if (!obj_elf_valid(obj))
382 return;
383
384 if (obj->efile.elf) {
385 elf_end(obj->efile.elf);
386 obj->efile.elf = NULL;
387 }
388 obj->efile.symbols = NULL;
389
390 zfree(&obj->efile.reloc);
391 obj->efile.nr_reloc = 0;
392 zclose(obj->efile.fd);
393 obj->efile.obj_buf = NULL;
394 obj->efile.obj_buf_sz = 0;
395 }
396
397 static int bpf_object__elf_init(struct bpf_object *obj)
398 {
399 int err = 0;
400 GElf_Ehdr *ep;
401
402 if (obj_elf_valid(obj)) {
403 pr_warning("elf init: internal error\n");
404 return -LIBBPF_ERRNO__LIBELF;
405 }
406
407 if (obj->efile.obj_buf_sz > 0) {
408 /*
409 * obj_buf should have been validated by
410 * bpf_object__open_buffer().
411 */
412 obj->efile.elf = elf_memory(obj->efile.obj_buf,
413 obj->efile.obj_buf_sz);
414 } else {
415 obj->efile.fd = open(obj->path, O_RDONLY);
416 if (obj->efile.fd < 0) {
417 pr_warning("failed to open %s: %s\n", obj->path,
418 strerror(errno));
419 return -errno;
420 }
421
422 obj->efile.elf = elf_begin(obj->efile.fd,
423 LIBBPF_ELF_C_READ_MMAP,
424 NULL);
425 }
426
427 if (!obj->efile.elf) {
428 pr_warning("failed to open %s as ELF file\n",
429 obj->path);
430 err = -LIBBPF_ERRNO__LIBELF;
431 goto errout;
432 }
433
434 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
435 pr_warning("failed to get EHDR from %s\n",
436 obj->path);
437 err = -LIBBPF_ERRNO__FORMAT;
438 goto errout;
439 }
440 ep = &obj->efile.ehdr;
441
442 if ((ep->e_type != ET_REL) || (ep->e_machine != 0)) {
443 pr_warning("%s is not an eBPF object file\n",
444 obj->path);
445 err = -LIBBPF_ERRNO__FORMAT;
446 goto errout;
447 }
448
449 return 0;
450 errout:
451 bpf_object__elf_finish(obj);
452 return err;
453 }
454
455 static int
456 bpf_object__check_endianness(struct bpf_object *obj)
457 {
458 static unsigned int const endian = 1;
459
460 switch (obj->efile.ehdr.e_ident[EI_DATA]) {
461 case ELFDATA2LSB:
462 /* We are big endian, BPF obj is little endian. */
463 if (*(unsigned char const *)&endian != 1)
464 goto mismatch;
465 break;
466
467 case ELFDATA2MSB:
468 /* We are little endian, BPF obj is big endian. */
469 if (*(unsigned char const *)&endian != 0)
470 goto mismatch;
471 break;
472 default:
473 return -LIBBPF_ERRNO__ENDIAN;
474 }
475
476 return 0;
477
478 mismatch:
479 pr_warning("Error: endianness mismatch.\n");
480 return -LIBBPF_ERRNO__ENDIAN;
481 }
482
483 static int
484 bpf_object__init_license(struct bpf_object *obj,
485 void *data, size_t size)
486 {
487 memcpy(obj->license, data,
488 min(size, sizeof(obj->license) - 1));
489 pr_debug("license of %s is %s\n", obj->path, obj->license);
490 return 0;
491 }
492
493 static int
494 bpf_object__init_kversion(struct bpf_object *obj,
495 void *data, size_t size)
496 {
497 u32 kver;
498
499 if (size != sizeof(kver)) {
500 pr_warning("invalid kver section in %s\n", obj->path);
501 return -LIBBPF_ERRNO__FORMAT;
502 }
503 memcpy(&kver, data, sizeof(kver));
504 obj->kern_version = kver;
505 pr_debug("kernel version of %s is %x\n", obj->path,
506 obj->kern_version);
507 return 0;
508 }
509
510 static int
511 bpf_object__init_maps(struct bpf_object *obj, void *data,
512 size_t size)
513 {
514 size_t nr_maps;
515 int i;
516
517 nr_maps = size / sizeof(struct bpf_map_def);
518 if (!data || !nr_maps) {
519 pr_debug("%s doesn't need map definition\n",
520 obj->path);
521 return 0;
522 }
523
524 pr_debug("maps in %s: %zd bytes\n", obj->path, size);
525
526 obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
527 if (!obj->maps) {
528 pr_warning("alloc maps for object failed\n");
529 return -ENOMEM;
530 }
531 obj->nr_maps = nr_maps;
532
533 for (i = 0; i < nr_maps; i++) {
534 struct bpf_map_def *def = &obj->maps[i].def;
535
536 /*
537 * fill all fd with -1 so won't close incorrect
538 * fd (fd=0 is stdin) when failure (zclose won't close
539 * negative fd)).
540 */
541 obj->maps[i].fd = -1;
542
543 /* Save map definition into obj->maps */
544 *def = ((struct bpf_map_def *)data)[i];
545 }
546 return 0;
547 }
548
549 static int
550 bpf_object__init_maps_name(struct bpf_object *obj)
551 {
552 int i;
553 Elf_Data *symbols = obj->efile.symbols;
554
555 if (!symbols || obj->efile.maps_shndx < 0)
556 return -EINVAL;
557
558 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
559 GElf_Sym sym;
560 size_t map_idx;
561 const char *map_name;
562
563 if (!gelf_getsym(symbols, i, &sym))
564 continue;
565 if (sym.st_shndx != obj->efile.maps_shndx)
566 continue;
567
568 map_name = elf_strptr(obj->efile.elf,
569 obj->efile.strtabidx,
570 sym.st_name);
571 map_idx = sym.st_value / sizeof(struct bpf_map_def);
572 if (map_idx >= obj->nr_maps) {
573 pr_warning("index of map \"%s\" is buggy: %zu > %zu\n",
574 map_name, map_idx, obj->nr_maps);
575 continue;
576 }
577 obj->maps[map_idx].name = strdup(map_name);
578 if (!obj->maps[map_idx].name) {
579 pr_warning("failed to alloc map name\n");
580 return -ENOMEM;
581 }
582 pr_debug("map %zu is \"%s\"\n", map_idx,
583 obj->maps[map_idx].name);
584 }
585 return 0;
586 }
587
588 static int bpf_object__elf_collect(struct bpf_object *obj)
589 {
590 Elf *elf = obj->efile.elf;
591 GElf_Ehdr *ep = &obj->efile.ehdr;
592 Elf_Scn *scn = NULL;
593 int idx = 0, err = 0;
594
595 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
596 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
597 pr_warning("failed to get e_shstrndx from %s\n",
598 obj->path);
599 return -LIBBPF_ERRNO__FORMAT;
600 }
601
602 while ((scn = elf_nextscn(elf, scn)) != NULL) {
603 char *name;
604 GElf_Shdr sh;
605 Elf_Data *data;
606
607 idx++;
608 if (gelf_getshdr(scn, &sh) != &sh) {
609 pr_warning("failed to get section header from %s\n",
610 obj->path);
611 err = -LIBBPF_ERRNO__FORMAT;
612 goto out;
613 }
614
615 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
616 if (!name) {
617 pr_warning("failed to get section name from %s\n",
618 obj->path);
619 err = -LIBBPF_ERRNO__FORMAT;
620 goto out;
621 }
622
623 data = elf_getdata(scn, 0);
624 if (!data) {
625 pr_warning("failed to get section data from %s(%s)\n",
626 name, obj->path);
627 err = -LIBBPF_ERRNO__FORMAT;
628 goto out;
629 }
630 pr_debug("section %s, size %ld, link %d, flags %lx, type=%d\n",
631 name, (unsigned long)data->d_size,
632 (int)sh.sh_link, (unsigned long)sh.sh_flags,
633 (int)sh.sh_type);
634
635 if (strcmp(name, "license") == 0)
636 err = bpf_object__init_license(obj,
637 data->d_buf,
638 data->d_size);
639 else if (strcmp(name, "version") == 0)
640 err = bpf_object__init_kversion(obj,
641 data->d_buf,
642 data->d_size);
643 else if (strcmp(name, "maps") == 0) {
644 err = bpf_object__init_maps(obj, data->d_buf,
645 data->d_size);
646 obj->efile.maps_shndx = idx;
647 } else if (sh.sh_type == SHT_SYMTAB) {
648 if (obj->efile.symbols) {
649 pr_warning("bpf: multiple SYMTAB in %s\n",
650 obj->path);
651 err = -LIBBPF_ERRNO__FORMAT;
652 } else {
653 obj->efile.symbols = data;
654 obj->efile.strtabidx = sh.sh_link;
655 }
656 } else if ((sh.sh_type == SHT_PROGBITS) &&
657 (sh.sh_flags & SHF_EXECINSTR) &&
658 (data->d_size > 0)) {
659 err = bpf_object__add_program(obj, data->d_buf,
660 data->d_size, name, idx);
661 if (err) {
662 char errmsg[STRERR_BUFSIZE];
663
664 strerror_r(-err, errmsg, sizeof(errmsg));
665 pr_warning("failed to alloc program %s (%s): %s",
666 name, obj->path, errmsg);
667 }
668 } else if (sh.sh_type == SHT_REL) {
669 void *reloc = obj->efile.reloc;
670 int nr_reloc = obj->efile.nr_reloc + 1;
671
672 reloc = realloc(reloc,
673 sizeof(*obj->efile.reloc) * nr_reloc);
674 if (!reloc) {
675 pr_warning("realloc failed\n");
676 err = -ENOMEM;
677 } else {
678 int n = nr_reloc - 1;
679
680 obj->efile.reloc = reloc;
681 obj->efile.nr_reloc = nr_reloc;
682
683 obj->efile.reloc[n].shdr = sh;
684 obj->efile.reloc[n].data = data;
685 }
686 }
687 if (err)
688 goto out;
689 }
690
691 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
692 pr_warning("Corrupted ELF file: index of strtab invalid\n");
693 return LIBBPF_ERRNO__FORMAT;
694 }
695 if (obj->efile.maps_shndx >= 0)
696 err = bpf_object__init_maps_name(obj);
697 out:
698 return err;
699 }
700
701 static struct bpf_program *
702 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
703 {
704 struct bpf_program *prog;
705 size_t i;
706
707 for (i = 0; i < obj->nr_programs; i++) {
708 prog = &obj->programs[i];
709 if (prog->idx == idx)
710 return prog;
711 }
712 return NULL;
713 }
714
715 static int
716 bpf_program__collect_reloc(struct bpf_program *prog,
717 size_t nr_maps, GElf_Shdr *shdr,
718 Elf_Data *data, Elf_Data *symbols,
719 int maps_shndx)
720 {
721 int i, nrels;
722
723 pr_debug("collecting relocating info for: '%s'\n",
724 prog->section_name);
725 nrels = shdr->sh_size / shdr->sh_entsize;
726
727 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
728 if (!prog->reloc_desc) {
729 pr_warning("failed to alloc memory in relocation\n");
730 return -ENOMEM;
731 }
732 prog->nr_reloc = nrels;
733
734 for (i = 0; i < nrels; i++) {
735 GElf_Sym sym;
736 GElf_Rel rel;
737 unsigned int insn_idx;
738 struct bpf_insn *insns = prog->insns;
739 size_t map_idx;
740
741 if (!gelf_getrel(data, i, &rel)) {
742 pr_warning("relocation: failed to get %d reloc\n", i);
743 return -LIBBPF_ERRNO__FORMAT;
744 }
745
746 if (!gelf_getsym(symbols,
747 GELF_R_SYM(rel.r_info),
748 &sym)) {
749 pr_warning("relocation: symbol %"PRIx64" not found\n",
750 GELF_R_SYM(rel.r_info));
751 return -LIBBPF_ERRNO__FORMAT;
752 }
753
754 if (sym.st_shndx != maps_shndx) {
755 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
756 prog->section_name, sym.st_shndx);
757 return -LIBBPF_ERRNO__RELOC;
758 }
759
760 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
761 pr_debug("relocation: insn_idx=%u\n", insn_idx);
762
763 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
764 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
765 insn_idx, insns[insn_idx].code);
766 return -LIBBPF_ERRNO__RELOC;
767 }
768
769 map_idx = sym.st_value / sizeof(struct bpf_map_def);
770 if (map_idx >= nr_maps) {
771 pr_warning("bpf relocation: map_idx %d large than %d\n",
772 (int)map_idx, (int)nr_maps - 1);
773 return -LIBBPF_ERRNO__RELOC;
774 }
775
776 prog->reloc_desc[i].insn_idx = insn_idx;
777 prog->reloc_desc[i].map_idx = map_idx;
778 }
779 return 0;
780 }
781
782 static int
783 bpf_object__create_maps(struct bpf_object *obj)
784 {
785 unsigned int i;
786
787 for (i = 0; i < obj->nr_maps; i++) {
788 struct bpf_map_def *def = &obj->maps[i].def;
789 int *pfd = &obj->maps[i].fd;
790
791 *pfd = bpf_create_map(def->type,
792 def->key_size,
793 def->value_size,
794 def->max_entries);
795 if (*pfd < 0) {
796 size_t j;
797 int err = *pfd;
798
799 pr_warning("failed to create map: %s\n",
800 strerror(errno));
801 for (j = 0; j < i; j++)
802 zclose(obj->maps[j].fd);
803 return err;
804 }
805 pr_debug("create map: fd=%d\n", *pfd);
806 }
807
808 return 0;
809 }
810
811 static int
812 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
813 {
814 int i;
815
816 if (!prog || !prog->reloc_desc)
817 return 0;
818
819 for (i = 0; i < prog->nr_reloc; i++) {
820 int insn_idx, map_idx;
821 struct bpf_insn *insns = prog->insns;
822
823 insn_idx = prog->reloc_desc[i].insn_idx;
824 map_idx = prog->reloc_desc[i].map_idx;
825
826 if (insn_idx >= (int)prog->insns_cnt) {
827 pr_warning("relocation out of range: '%s'\n",
828 prog->section_name);
829 return -LIBBPF_ERRNO__RELOC;
830 }
831 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
832 insns[insn_idx].imm = obj->maps[map_idx].fd;
833 }
834
835 zfree(&prog->reloc_desc);
836 prog->nr_reloc = 0;
837 return 0;
838 }
839
840
841 static int
842 bpf_object__relocate(struct bpf_object *obj)
843 {
844 struct bpf_program *prog;
845 size_t i;
846 int err;
847
848 for (i = 0; i < obj->nr_programs; i++) {
849 prog = &obj->programs[i];
850
851 err = bpf_program__relocate(prog, obj);
852 if (err) {
853 pr_warning("failed to relocate '%s'\n",
854 prog->section_name);
855 return err;
856 }
857 }
858 return 0;
859 }
860
861 static int bpf_object__collect_reloc(struct bpf_object *obj)
862 {
863 int i, err;
864
865 if (!obj_elf_valid(obj)) {
866 pr_warning("Internal error: elf object is closed\n");
867 return -LIBBPF_ERRNO__INTERNAL;
868 }
869
870 for (i = 0; i < obj->efile.nr_reloc; i++) {
871 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
872 Elf_Data *data = obj->efile.reloc[i].data;
873 int idx = shdr->sh_info;
874 struct bpf_program *prog;
875 size_t nr_maps = obj->nr_maps;
876
877 if (shdr->sh_type != SHT_REL) {
878 pr_warning("internal error at %d\n", __LINE__);
879 return -LIBBPF_ERRNO__INTERNAL;
880 }
881
882 prog = bpf_object__find_prog_by_idx(obj, idx);
883 if (!prog) {
884 pr_warning("relocation failed: no %d section\n",
885 idx);
886 return -LIBBPF_ERRNO__RELOC;
887 }
888
889 err = bpf_program__collect_reloc(prog, nr_maps,
890 shdr, data,
891 obj->efile.symbols,
892 obj->efile.maps_shndx);
893 if (err)
894 return err;
895 }
896 return 0;
897 }
898
899 static int
900 load_program(enum bpf_prog_type type, struct bpf_insn *insns,
901 int insns_cnt, char *license, u32 kern_version, int *pfd)
902 {
903 int ret;
904 char *log_buf;
905
906 if (!insns || !insns_cnt)
907 return -EINVAL;
908
909 log_buf = malloc(BPF_LOG_BUF_SIZE);
910 if (!log_buf)
911 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
912
913 ret = bpf_load_program(type, insns, insns_cnt, license,
914 kern_version, log_buf, BPF_LOG_BUF_SIZE);
915
916 if (ret >= 0) {
917 *pfd = ret;
918 ret = 0;
919 goto out;
920 }
921
922 ret = -LIBBPF_ERRNO__LOAD;
923 pr_warning("load bpf program failed: %s\n", strerror(errno));
924
925 if (log_buf && log_buf[0] != '\0') {
926 ret = -LIBBPF_ERRNO__VERIFY;
927 pr_warning("-- BEGIN DUMP LOG ---\n");
928 pr_warning("\n%s\n", log_buf);
929 pr_warning("-- END LOG --\n");
930 } else if (insns_cnt >= BPF_MAXINSNS) {
931 pr_warning("Program too large (%d insns), at most %d insns\n",
932 insns_cnt, BPF_MAXINSNS);
933 ret = -LIBBPF_ERRNO__PROG2BIG;
934 } else {
935 /* Wrong program type? */
936 if (type != BPF_PROG_TYPE_KPROBE) {
937 int fd;
938
939 fd = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
940 insns_cnt, license, kern_version,
941 NULL, 0);
942 if (fd >= 0) {
943 close(fd);
944 ret = -LIBBPF_ERRNO__PROGTYPE;
945 goto out;
946 }
947 }
948
949 if (log_buf)
950 ret = -LIBBPF_ERRNO__KVER;
951 }
952
953 out:
954 free(log_buf);
955 return ret;
956 }
957
958 static int
959 bpf_program__load(struct bpf_program *prog,
960 char *license, u32 kern_version)
961 {
962 int err = 0, fd, i;
963
964 if (prog->instances.nr < 0 || !prog->instances.fds) {
965 if (prog->preprocessor) {
966 pr_warning("Internal error: can't load program '%s'\n",
967 prog->section_name);
968 return -LIBBPF_ERRNO__INTERNAL;
969 }
970
971 prog->instances.fds = malloc(sizeof(int));
972 if (!prog->instances.fds) {
973 pr_warning("Not enough memory for BPF fds\n");
974 return -ENOMEM;
975 }
976 prog->instances.nr = 1;
977 prog->instances.fds[0] = -1;
978 }
979
980 if (!prog->preprocessor) {
981 if (prog->instances.nr != 1) {
982 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
983 prog->section_name, prog->instances.nr);
984 }
985 err = load_program(prog->type, prog->insns, prog->insns_cnt,
986 license, kern_version, &fd);
987 if (!err)
988 prog->instances.fds[0] = fd;
989 goto out;
990 }
991
992 for (i = 0; i < prog->instances.nr; i++) {
993 struct bpf_prog_prep_result result;
994 bpf_program_prep_t preprocessor = prog->preprocessor;
995
996 bzero(&result, sizeof(result));
997 err = preprocessor(prog, i, prog->insns,
998 prog->insns_cnt, &result);
999 if (err) {
1000 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1001 i, prog->section_name);
1002 goto out;
1003 }
1004
1005 if (!result.new_insn_ptr || !result.new_insn_cnt) {
1006 pr_debug("Skip loading the %dth instance of program '%s'\n",
1007 i, prog->section_name);
1008 prog->instances.fds[i] = -1;
1009 if (result.pfd)
1010 *result.pfd = -1;
1011 continue;
1012 }
1013
1014 err = load_program(prog->type, result.new_insn_ptr,
1015 result.new_insn_cnt,
1016 license, kern_version, &fd);
1017
1018 if (err) {
1019 pr_warning("Loading the %dth instance of program '%s' failed\n",
1020 i, prog->section_name);
1021 goto out;
1022 }
1023
1024 if (result.pfd)
1025 *result.pfd = fd;
1026 prog->instances.fds[i] = fd;
1027 }
1028 out:
1029 if (err)
1030 pr_warning("failed to load program '%s'\n",
1031 prog->section_name);
1032 zfree(&prog->insns);
1033 prog->insns_cnt = 0;
1034 return err;
1035 }
1036
1037 static int
1038 bpf_object__load_progs(struct bpf_object *obj)
1039 {
1040 size_t i;
1041 int err;
1042
1043 for (i = 0; i < obj->nr_programs; i++) {
1044 err = bpf_program__load(&obj->programs[i],
1045 obj->license,
1046 obj->kern_version);
1047 if (err)
1048 return err;
1049 }
1050 return 0;
1051 }
1052
1053 static int bpf_object__validate(struct bpf_object *obj)
1054 {
1055 if (obj->kern_version == 0) {
1056 pr_warning("%s doesn't provide kernel version\n",
1057 obj->path);
1058 return -LIBBPF_ERRNO__KVERSION;
1059 }
1060 return 0;
1061 }
1062
1063 static struct bpf_object *
1064 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz)
1065 {
1066 struct bpf_object *obj;
1067 int err;
1068
1069 if (elf_version(EV_CURRENT) == EV_NONE) {
1070 pr_warning("failed to init libelf for %s\n", path);
1071 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1072 }
1073
1074 obj = bpf_object__new(path, obj_buf, obj_buf_sz);
1075 if (IS_ERR(obj))
1076 return obj;
1077
1078 CHECK_ERR(bpf_object__elf_init(obj), err, out);
1079 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1080 CHECK_ERR(bpf_object__elf_collect(obj), err, out);
1081 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
1082 CHECK_ERR(bpf_object__validate(obj), err, out);
1083
1084 bpf_object__elf_finish(obj);
1085 return obj;
1086 out:
1087 bpf_object__close(obj);
1088 return ERR_PTR(err);
1089 }
1090
1091 struct bpf_object *bpf_object__open(const char *path)
1092 {
1093 /* param validation */
1094 if (!path)
1095 return NULL;
1096
1097 pr_debug("loading %s\n", path);
1098
1099 return __bpf_object__open(path, NULL, 0);
1100 }
1101
1102 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
1103 size_t obj_buf_sz,
1104 const char *name)
1105 {
1106 char tmp_name[64];
1107
1108 /* param validation */
1109 if (!obj_buf || obj_buf_sz <= 0)
1110 return NULL;
1111
1112 if (!name) {
1113 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1114 (unsigned long)obj_buf,
1115 (unsigned long)obj_buf_sz);
1116 tmp_name[sizeof(tmp_name) - 1] = '\0';
1117 name = tmp_name;
1118 }
1119 pr_debug("loading object '%s' from buffer\n",
1120 name);
1121
1122 return __bpf_object__open(name, obj_buf, obj_buf_sz);
1123 }
1124
1125 int bpf_object__unload(struct bpf_object *obj)
1126 {
1127 size_t i;
1128
1129 if (!obj)
1130 return -EINVAL;
1131
1132 for (i = 0; i < obj->nr_maps; i++)
1133 zclose(obj->maps[i].fd);
1134
1135 for (i = 0; i < obj->nr_programs; i++)
1136 bpf_program__unload(&obj->programs[i]);
1137
1138 return 0;
1139 }
1140
1141 int bpf_object__load(struct bpf_object *obj)
1142 {
1143 int err;
1144
1145 if (!obj)
1146 return -EINVAL;
1147
1148 if (obj->loaded) {
1149 pr_warning("object should not be loaded twice\n");
1150 return -EINVAL;
1151 }
1152
1153 obj->loaded = true;
1154
1155 CHECK_ERR(bpf_object__create_maps(obj), err, out);
1156 CHECK_ERR(bpf_object__relocate(obj), err, out);
1157 CHECK_ERR(bpf_object__load_progs(obj), err, out);
1158
1159 return 0;
1160 out:
1161 bpf_object__unload(obj);
1162 pr_warning("failed to load object '%s'\n", obj->path);
1163 return err;
1164 }
1165
1166 void bpf_object__close(struct bpf_object *obj)
1167 {
1168 size_t i;
1169
1170 if (!obj)
1171 return;
1172
1173 bpf_object__elf_finish(obj);
1174 bpf_object__unload(obj);
1175
1176 for (i = 0; i < obj->nr_maps; i++) {
1177 zfree(&obj->maps[i].name);
1178 if (obj->maps[i].clear_priv)
1179 obj->maps[i].clear_priv(&obj->maps[i],
1180 obj->maps[i].priv);
1181 obj->maps[i].priv = NULL;
1182 obj->maps[i].clear_priv = NULL;
1183 }
1184 zfree(&obj->maps);
1185 obj->nr_maps = 0;
1186
1187 if (obj->programs && obj->nr_programs) {
1188 for (i = 0; i < obj->nr_programs; i++)
1189 bpf_program__exit(&obj->programs[i]);
1190 }
1191 zfree(&obj->programs);
1192
1193 list_del(&obj->list);
1194 free(obj);
1195 }
1196
1197 struct bpf_object *
1198 bpf_object__next(struct bpf_object *prev)
1199 {
1200 struct bpf_object *next;
1201
1202 if (!prev)
1203 next = list_first_entry(&bpf_objects_list,
1204 struct bpf_object,
1205 list);
1206 else
1207 next = list_next_entry(prev, list);
1208
1209 /* Empty list is noticed here so don't need checking on entry. */
1210 if (&next->list == &bpf_objects_list)
1211 return NULL;
1212
1213 return next;
1214 }
1215
1216 const char *bpf_object__name(struct bpf_object *obj)
1217 {
1218 return obj ? obj->path : ERR_PTR(-EINVAL);
1219 }
1220
1221 unsigned int bpf_object__kversion(struct bpf_object *obj)
1222 {
1223 return obj ? obj->kern_version : 0;
1224 }
1225
1226 struct bpf_program *
1227 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
1228 {
1229 size_t idx;
1230
1231 if (!obj->programs)
1232 return NULL;
1233 /* First handler */
1234 if (prev == NULL)
1235 return &obj->programs[0];
1236
1237 if (prev->obj != obj) {
1238 pr_warning("error: program handler doesn't match object\n");
1239 return NULL;
1240 }
1241
1242 idx = (prev - obj->programs) + 1;
1243 if (idx >= obj->nr_programs)
1244 return NULL;
1245 return &obj->programs[idx];
1246 }
1247
1248 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
1249 bpf_program_clear_priv_t clear_priv)
1250 {
1251 if (prog->priv && prog->clear_priv)
1252 prog->clear_priv(prog, prog->priv);
1253
1254 prog->priv = priv;
1255 prog->clear_priv = clear_priv;
1256 return 0;
1257 }
1258
1259 void *bpf_program__priv(struct bpf_program *prog)
1260 {
1261 return prog ? prog->priv : ERR_PTR(-EINVAL);
1262 }
1263
1264 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
1265 {
1266 const char *title;
1267
1268 title = prog->section_name;
1269 if (needs_copy) {
1270 title = strdup(title);
1271 if (!title) {
1272 pr_warning("failed to strdup program title\n");
1273 return ERR_PTR(-ENOMEM);
1274 }
1275 }
1276
1277 return title;
1278 }
1279
1280 int bpf_program__fd(struct bpf_program *prog)
1281 {
1282 return bpf_program__nth_fd(prog, 0);
1283 }
1284
1285 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
1286 bpf_program_prep_t prep)
1287 {
1288 int *instances_fds;
1289
1290 if (nr_instances <= 0 || !prep)
1291 return -EINVAL;
1292
1293 if (prog->instances.nr > 0 || prog->instances.fds) {
1294 pr_warning("Can't set pre-processor after loading\n");
1295 return -EINVAL;
1296 }
1297
1298 instances_fds = malloc(sizeof(int) * nr_instances);
1299 if (!instances_fds) {
1300 pr_warning("alloc memory failed for fds\n");
1301 return -ENOMEM;
1302 }
1303
1304 /* fill all fd with -1 */
1305 memset(instances_fds, -1, sizeof(int) * nr_instances);
1306
1307 prog->instances.nr = nr_instances;
1308 prog->instances.fds = instances_fds;
1309 prog->preprocessor = prep;
1310 return 0;
1311 }
1312
1313 int bpf_program__nth_fd(struct bpf_program *prog, int n)
1314 {
1315 int fd;
1316
1317 if (n >= prog->instances.nr || n < 0) {
1318 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
1319 n, prog->section_name, prog->instances.nr);
1320 return -EINVAL;
1321 }
1322
1323 fd = prog->instances.fds[n];
1324 if (fd < 0) {
1325 pr_warning("%dth instance of program '%s' is invalid\n",
1326 n, prog->section_name);
1327 return -ENOENT;
1328 }
1329
1330 return fd;
1331 }
1332
1333 static void bpf_program__set_type(struct bpf_program *prog,
1334 enum bpf_prog_type type)
1335 {
1336 prog->type = type;
1337 }
1338
1339 int bpf_program__set_tracepoint(struct bpf_program *prog)
1340 {
1341 if (!prog)
1342 return -EINVAL;
1343 bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
1344 return 0;
1345 }
1346
1347 int bpf_program__set_kprobe(struct bpf_program *prog)
1348 {
1349 if (!prog)
1350 return -EINVAL;
1351 bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
1352 return 0;
1353 }
1354
1355 static bool bpf_program__is_type(struct bpf_program *prog,
1356 enum bpf_prog_type type)
1357 {
1358 return prog ? (prog->type == type) : false;
1359 }
1360
1361 bool bpf_program__is_tracepoint(struct bpf_program *prog)
1362 {
1363 return bpf_program__is_type(prog, BPF_PROG_TYPE_TRACEPOINT);
1364 }
1365
1366 bool bpf_program__is_kprobe(struct bpf_program *prog)
1367 {
1368 return bpf_program__is_type(prog, BPF_PROG_TYPE_KPROBE);
1369 }
1370
1371 int bpf_map__fd(struct bpf_map *map)
1372 {
1373 return map ? map->fd : -EINVAL;
1374 }
1375
1376 const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
1377 {
1378 return map ? &map->def : ERR_PTR(-EINVAL);
1379 }
1380
1381 const char *bpf_map__name(struct bpf_map *map)
1382 {
1383 return map ? map->name : NULL;
1384 }
1385
1386 int bpf_map__set_priv(struct bpf_map *map, void *priv,
1387 bpf_map_clear_priv_t clear_priv)
1388 {
1389 if (!map)
1390 return -EINVAL;
1391
1392 if (map->priv) {
1393 if (map->clear_priv)
1394 map->clear_priv(map, map->priv);
1395 }
1396
1397 map->priv = priv;
1398 map->clear_priv = clear_priv;
1399 return 0;
1400 }
1401
1402 void *bpf_map__priv(struct bpf_map *map)
1403 {
1404 return map ? map->priv : ERR_PTR(-EINVAL);
1405 }
1406
1407 struct bpf_map *
1408 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
1409 {
1410 size_t idx;
1411 struct bpf_map *s, *e;
1412
1413 if (!obj || !obj->maps)
1414 return NULL;
1415
1416 s = obj->maps;
1417 e = obj->maps + obj->nr_maps;
1418
1419 if (prev == NULL)
1420 return s;
1421
1422 if ((prev < s) || (prev >= e)) {
1423 pr_warning("error in %s: map handler doesn't belong to object\n",
1424 __func__);
1425 return NULL;
1426 }
1427
1428 idx = (prev - obj->maps) + 1;
1429 if (idx >= obj->nr_maps)
1430 return NULL;
1431 return &obj->maps[idx];
1432 }
1433
1434 struct bpf_map *
1435 bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
1436 {
1437 struct bpf_map *pos;
1438
1439 bpf_map__for_each(pos, obj) {
1440 if (pos->name && !strcmp(pos->name, name))
1441 return pos;
1442 }
1443 return NULL;
1444 }
This page took 0.128247 seconds and 5 git commands to generate.