Merge branch 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[deliverable/linux.git] / kernel / bpf / syscall.c
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12 #include <linux/bpf.h>
13 #include <linux/syscalls.h>
14 #include <linux/slab.h>
15 #include <linux/anon_inodes.h>
16 #include <linux/file.h>
17 #include <linux/license.h>
18 #include <linux/filter.h>
19 #include <linux/version.h>
20
21 DEFINE_PER_CPU(int, bpf_prog_active);
22
23 int sysctl_unprivileged_bpf_disabled __read_mostly;
24
25 static LIST_HEAD(bpf_map_types);
26
27 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
28 {
29 struct bpf_map_type_list *tl;
30 struct bpf_map *map;
31
32 list_for_each_entry(tl, &bpf_map_types, list_node) {
33 if (tl->type == attr->map_type) {
34 map = tl->ops->map_alloc(attr);
35 if (IS_ERR(map))
36 return map;
37 map->ops = tl->ops;
38 map->map_type = attr->map_type;
39 return map;
40 }
41 }
42 return ERR_PTR(-EINVAL);
43 }
44
45 /* boot time registration of different map implementations */
46 void bpf_register_map_type(struct bpf_map_type_list *tl)
47 {
48 list_add(&tl->list_node, &bpf_map_types);
49 }
50
51 int bpf_map_precharge_memlock(u32 pages)
52 {
53 struct user_struct *user = get_current_user();
54 unsigned long memlock_limit, cur;
55
56 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
57 cur = atomic_long_read(&user->locked_vm);
58 free_uid(user);
59 if (cur + pages > memlock_limit)
60 return -EPERM;
61 return 0;
62 }
63
64 static int bpf_map_charge_memlock(struct bpf_map *map)
65 {
66 struct user_struct *user = get_current_user();
67 unsigned long memlock_limit;
68
69 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
70
71 atomic_long_add(map->pages, &user->locked_vm);
72
73 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
74 atomic_long_sub(map->pages, &user->locked_vm);
75 free_uid(user);
76 return -EPERM;
77 }
78 map->user = user;
79 return 0;
80 }
81
82 static void bpf_map_uncharge_memlock(struct bpf_map *map)
83 {
84 struct user_struct *user = map->user;
85
86 atomic_long_sub(map->pages, &user->locked_vm);
87 free_uid(user);
88 }
89
90 /* called from workqueue */
91 static void bpf_map_free_deferred(struct work_struct *work)
92 {
93 struct bpf_map *map = container_of(work, struct bpf_map, work);
94
95 bpf_map_uncharge_memlock(map);
96 /* implementation dependent freeing */
97 map->ops->map_free(map);
98 }
99
100 static void bpf_map_put_uref(struct bpf_map *map)
101 {
102 if (atomic_dec_and_test(&map->usercnt)) {
103 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
104 bpf_fd_array_map_clear(map);
105 }
106 }
107
108 /* decrement map refcnt and schedule it for freeing via workqueue
109 * (unrelying map implementation ops->map_free() might sleep)
110 */
111 void bpf_map_put(struct bpf_map *map)
112 {
113 if (atomic_dec_and_test(&map->refcnt)) {
114 INIT_WORK(&map->work, bpf_map_free_deferred);
115 schedule_work(&map->work);
116 }
117 }
118
119 void bpf_map_put_with_uref(struct bpf_map *map)
120 {
121 bpf_map_put_uref(map);
122 bpf_map_put(map);
123 }
124
125 static int bpf_map_release(struct inode *inode, struct file *filp)
126 {
127 bpf_map_put_with_uref(filp->private_data);
128 return 0;
129 }
130
131 #ifdef CONFIG_PROC_FS
132 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
133 {
134 const struct bpf_map *map = filp->private_data;
135
136 seq_printf(m,
137 "map_type:\t%u\n"
138 "key_size:\t%u\n"
139 "value_size:\t%u\n"
140 "max_entries:\t%u\n",
141 map->map_type,
142 map->key_size,
143 map->value_size,
144 map->max_entries);
145 }
146 #endif
147
148 static const struct file_operations bpf_map_fops = {
149 #ifdef CONFIG_PROC_FS
150 .show_fdinfo = bpf_map_show_fdinfo,
151 #endif
152 .release = bpf_map_release,
153 };
154
155 int bpf_map_new_fd(struct bpf_map *map)
156 {
157 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
158 O_RDWR | O_CLOEXEC);
159 }
160
161 /* helper macro to check that unused fields 'union bpf_attr' are zero */
162 #define CHECK_ATTR(CMD) \
163 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
164 sizeof(attr->CMD##_LAST_FIELD), 0, \
165 sizeof(*attr) - \
166 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
167 sizeof(attr->CMD##_LAST_FIELD)) != NULL
168
169 #define BPF_MAP_CREATE_LAST_FIELD map_flags
170 /* called via syscall */
171 static int map_create(union bpf_attr *attr)
172 {
173 struct bpf_map *map;
174 int err;
175
176 err = CHECK_ATTR(BPF_MAP_CREATE);
177 if (err)
178 return -EINVAL;
179
180 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
181 map = find_and_alloc_map(attr);
182 if (IS_ERR(map))
183 return PTR_ERR(map);
184
185 atomic_set(&map->refcnt, 1);
186 atomic_set(&map->usercnt, 1);
187
188 err = bpf_map_charge_memlock(map);
189 if (err)
190 goto free_map;
191
192 err = bpf_map_new_fd(map);
193 if (err < 0)
194 /* failed to allocate fd */
195 goto free_map;
196
197 return err;
198
199 free_map:
200 map->ops->map_free(map);
201 return err;
202 }
203
204 /* if error is returned, fd is released.
205 * On success caller should complete fd access with matching fdput()
206 */
207 struct bpf_map *__bpf_map_get(struct fd f)
208 {
209 if (!f.file)
210 return ERR_PTR(-EBADF);
211 if (f.file->f_op != &bpf_map_fops) {
212 fdput(f);
213 return ERR_PTR(-EINVAL);
214 }
215
216 return f.file->private_data;
217 }
218
219 void bpf_map_inc(struct bpf_map *map, bool uref)
220 {
221 atomic_inc(&map->refcnt);
222 if (uref)
223 atomic_inc(&map->usercnt);
224 }
225
226 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
227 {
228 struct fd f = fdget(ufd);
229 struct bpf_map *map;
230
231 map = __bpf_map_get(f);
232 if (IS_ERR(map))
233 return map;
234
235 bpf_map_inc(map, true);
236 fdput(f);
237
238 return map;
239 }
240
241 /* helper to convert user pointers passed inside __aligned_u64 fields */
242 static void __user *u64_to_ptr(__u64 val)
243 {
244 return (void __user *) (unsigned long) val;
245 }
246
247 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
248 {
249 return -ENOTSUPP;
250 }
251
252 /* last field in 'union bpf_attr' used by this command */
253 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
254
255 static int map_lookup_elem(union bpf_attr *attr)
256 {
257 void __user *ukey = u64_to_ptr(attr->key);
258 void __user *uvalue = u64_to_ptr(attr->value);
259 int ufd = attr->map_fd;
260 struct bpf_map *map;
261 void *key, *value, *ptr;
262 u32 value_size;
263 struct fd f;
264 int err;
265
266 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
267 return -EINVAL;
268
269 f = fdget(ufd);
270 map = __bpf_map_get(f);
271 if (IS_ERR(map))
272 return PTR_ERR(map);
273
274 err = -ENOMEM;
275 key = kmalloc(map->key_size, GFP_USER);
276 if (!key)
277 goto err_put;
278
279 err = -EFAULT;
280 if (copy_from_user(key, ukey, map->key_size) != 0)
281 goto free_key;
282
283 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
284 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
285 value_size = round_up(map->value_size, 8) * num_possible_cpus();
286 else
287 value_size = map->value_size;
288
289 err = -ENOMEM;
290 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
291 if (!value)
292 goto free_key;
293
294 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
295 err = bpf_percpu_hash_copy(map, key, value);
296 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
297 err = bpf_percpu_array_copy(map, key, value);
298 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
299 err = bpf_stackmap_copy(map, key, value);
300 } else {
301 rcu_read_lock();
302 ptr = map->ops->map_lookup_elem(map, key);
303 if (ptr)
304 memcpy(value, ptr, value_size);
305 rcu_read_unlock();
306 err = ptr ? 0 : -ENOENT;
307 }
308
309 if (err)
310 goto free_value;
311
312 err = -EFAULT;
313 if (copy_to_user(uvalue, value, value_size) != 0)
314 goto free_value;
315
316 err = 0;
317
318 free_value:
319 kfree(value);
320 free_key:
321 kfree(key);
322 err_put:
323 fdput(f);
324 return err;
325 }
326
327 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
328
329 static int map_update_elem(union bpf_attr *attr)
330 {
331 void __user *ukey = u64_to_ptr(attr->key);
332 void __user *uvalue = u64_to_ptr(attr->value);
333 int ufd = attr->map_fd;
334 struct bpf_map *map;
335 void *key, *value;
336 u32 value_size;
337 struct fd f;
338 int err;
339
340 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
341 return -EINVAL;
342
343 f = fdget(ufd);
344 map = __bpf_map_get(f);
345 if (IS_ERR(map))
346 return PTR_ERR(map);
347
348 err = -ENOMEM;
349 key = kmalloc(map->key_size, GFP_USER);
350 if (!key)
351 goto err_put;
352
353 err = -EFAULT;
354 if (copy_from_user(key, ukey, map->key_size) != 0)
355 goto free_key;
356
357 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
358 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
359 value_size = round_up(map->value_size, 8) * num_possible_cpus();
360 else
361 value_size = map->value_size;
362
363 err = -ENOMEM;
364 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
365 if (!value)
366 goto free_key;
367
368 err = -EFAULT;
369 if (copy_from_user(value, uvalue, value_size) != 0)
370 goto free_value;
371
372 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
373 * inside bpf map update or delete otherwise deadlocks are possible
374 */
375 preempt_disable();
376 __this_cpu_inc(bpf_prog_active);
377 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
378 err = bpf_percpu_hash_update(map, key, value, attr->flags);
379 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
380 err = bpf_percpu_array_update(map, key, value, attr->flags);
381 } else {
382 rcu_read_lock();
383 err = map->ops->map_update_elem(map, key, value, attr->flags);
384 rcu_read_unlock();
385 }
386 __this_cpu_dec(bpf_prog_active);
387 preempt_enable();
388
389 free_value:
390 kfree(value);
391 free_key:
392 kfree(key);
393 err_put:
394 fdput(f);
395 return err;
396 }
397
398 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
399
400 static int map_delete_elem(union bpf_attr *attr)
401 {
402 void __user *ukey = u64_to_ptr(attr->key);
403 int ufd = attr->map_fd;
404 struct bpf_map *map;
405 struct fd f;
406 void *key;
407 int err;
408
409 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
410 return -EINVAL;
411
412 f = fdget(ufd);
413 map = __bpf_map_get(f);
414 if (IS_ERR(map))
415 return PTR_ERR(map);
416
417 err = -ENOMEM;
418 key = kmalloc(map->key_size, GFP_USER);
419 if (!key)
420 goto err_put;
421
422 err = -EFAULT;
423 if (copy_from_user(key, ukey, map->key_size) != 0)
424 goto free_key;
425
426 preempt_disable();
427 __this_cpu_inc(bpf_prog_active);
428 rcu_read_lock();
429 err = map->ops->map_delete_elem(map, key);
430 rcu_read_unlock();
431 __this_cpu_dec(bpf_prog_active);
432 preempt_enable();
433
434 free_key:
435 kfree(key);
436 err_put:
437 fdput(f);
438 return err;
439 }
440
441 /* last field in 'union bpf_attr' used by this command */
442 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
443
444 static int map_get_next_key(union bpf_attr *attr)
445 {
446 void __user *ukey = u64_to_ptr(attr->key);
447 void __user *unext_key = u64_to_ptr(attr->next_key);
448 int ufd = attr->map_fd;
449 struct bpf_map *map;
450 void *key, *next_key;
451 struct fd f;
452 int err;
453
454 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
455 return -EINVAL;
456
457 f = fdget(ufd);
458 map = __bpf_map_get(f);
459 if (IS_ERR(map))
460 return PTR_ERR(map);
461
462 err = -ENOMEM;
463 key = kmalloc(map->key_size, GFP_USER);
464 if (!key)
465 goto err_put;
466
467 err = -EFAULT;
468 if (copy_from_user(key, ukey, map->key_size) != 0)
469 goto free_key;
470
471 err = -ENOMEM;
472 next_key = kmalloc(map->key_size, GFP_USER);
473 if (!next_key)
474 goto free_key;
475
476 rcu_read_lock();
477 err = map->ops->map_get_next_key(map, key, next_key);
478 rcu_read_unlock();
479 if (err)
480 goto free_next_key;
481
482 err = -EFAULT;
483 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
484 goto free_next_key;
485
486 err = 0;
487
488 free_next_key:
489 kfree(next_key);
490 free_key:
491 kfree(key);
492 err_put:
493 fdput(f);
494 return err;
495 }
496
497 static LIST_HEAD(bpf_prog_types);
498
499 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
500 {
501 struct bpf_prog_type_list *tl;
502
503 list_for_each_entry(tl, &bpf_prog_types, list_node) {
504 if (tl->type == type) {
505 prog->aux->ops = tl->ops;
506 prog->type = type;
507 return 0;
508 }
509 }
510
511 return -EINVAL;
512 }
513
514 void bpf_register_prog_type(struct bpf_prog_type_list *tl)
515 {
516 list_add(&tl->list_node, &bpf_prog_types);
517 }
518
519 /* fixup insn->imm field of bpf_call instructions:
520 * if (insn->imm == BPF_FUNC_map_lookup_elem)
521 * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
522 * else if (insn->imm == BPF_FUNC_map_update_elem)
523 * insn->imm = bpf_map_update_elem - __bpf_call_base;
524 * else ...
525 *
526 * this function is called after eBPF program passed verification
527 */
528 static void fixup_bpf_calls(struct bpf_prog *prog)
529 {
530 const struct bpf_func_proto *fn;
531 int i;
532
533 for (i = 0; i < prog->len; i++) {
534 struct bpf_insn *insn = &prog->insnsi[i];
535
536 if (insn->code == (BPF_JMP | BPF_CALL)) {
537 /* we reach here when program has bpf_call instructions
538 * and it passed bpf_check(), means that
539 * ops->get_func_proto must have been supplied, check it
540 */
541 BUG_ON(!prog->aux->ops->get_func_proto);
542
543 if (insn->imm == BPF_FUNC_get_route_realm)
544 prog->dst_needed = 1;
545 if (insn->imm == BPF_FUNC_get_prandom_u32)
546 bpf_user_rnd_init_once();
547 if (insn->imm == BPF_FUNC_tail_call) {
548 /* mark bpf_tail_call as different opcode
549 * to avoid conditional branch in
550 * interpeter for every normal call
551 * and to prevent accidental JITing by
552 * JIT compiler that doesn't support
553 * bpf_tail_call yet
554 */
555 insn->imm = 0;
556 insn->code |= BPF_X;
557 continue;
558 }
559
560 fn = prog->aux->ops->get_func_proto(insn->imm);
561 /* all functions that have prototype and verifier allowed
562 * programs to call them, must be real in-kernel functions
563 */
564 BUG_ON(!fn->func);
565 insn->imm = fn->func - __bpf_call_base;
566 }
567 }
568 }
569
570 /* drop refcnt on maps used by eBPF program and free auxilary data */
571 static void free_used_maps(struct bpf_prog_aux *aux)
572 {
573 int i;
574
575 for (i = 0; i < aux->used_map_cnt; i++)
576 bpf_map_put(aux->used_maps[i]);
577
578 kfree(aux->used_maps);
579 }
580
581 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
582 {
583 struct user_struct *user = get_current_user();
584 unsigned long memlock_limit;
585
586 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
587
588 atomic_long_add(prog->pages, &user->locked_vm);
589 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
590 atomic_long_sub(prog->pages, &user->locked_vm);
591 free_uid(user);
592 return -EPERM;
593 }
594 prog->aux->user = user;
595 return 0;
596 }
597
598 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
599 {
600 struct user_struct *user = prog->aux->user;
601
602 atomic_long_sub(prog->pages, &user->locked_vm);
603 free_uid(user);
604 }
605
606 static void __prog_put_common(struct rcu_head *rcu)
607 {
608 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
609
610 free_used_maps(aux);
611 bpf_prog_uncharge_memlock(aux->prog);
612 bpf_prog_free(aux->prog);
613 }
614
615 /* version of bpf_prog_put() that is called after a grace period */
616 void bpf_prog_put_rcu(struct bpf_prog *prog)
617 {
618 if (atomic_dec_and_test(&prog->aux->refcnt))
619 call_rcu(&prog->aux->rcu, __prog_put_common);
620 }
621
622 void bpf_prog_put(struct bpf_prog *prog)
623 {
624 if (atomic_dec_and_test(&prog->aux->refcnt))
625 __prog_put_common(&prog->aux->rcu);
626 }
627 EXPORT_SYMBOL_GPL(bpf_prog_put);
628
629 static int bpf_prog_release(struct inode *inode, struct file *filp)
630 {
631 struct bpf_prog *prog = filp->private_data;
632
633 bpf_prog_put_rcu(prog);
634 return 0;
635 }
636
637 static const struct file_operations bpf_prog_fops = {
638 .release = bpf_prog_release,
639 };
640
641 int bpf_prog_new_fd(struct bpf_prog *prog)
642 {
643 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
644 O_RDWR | O_CLOEXEC);
645 }
646
647 static struct bpf_prog *__bpf_prog_get(struct fd f)
648 {
649 if (!f.file)
650 return ERR_PTR(-EBADF);
651 if (f.file->f_op != &bpf_prog_fops) {
652 fdput(f);
653 return ERR_PTR(-EINVAL);
654 }
655
656 return f.file->private_data;
657 }
658
659 /* called by sockets/tracing/seccomp before attaching program to an event
660 * pairs with bpf_prog_put()
661 */
662 struct bpf_prog *bpf_prog_get(u32 ufd)
663 {
664 struct fd f = fdget(ufd);
665 struct bpf_prog *prog;
666
667 prog = __bpf_prog_get(f);
668 if (IS_ERR(prog))
669 return prog;
670
671 atomic_inc(&prog->aux->refcnt);
672 fdput(f);
673
674 return prog;
675 }
676 EXPORT_SYMBOL_GPL(bpf_prog_get);
677
678 /* last field in 'union bpf_attr' used by this command */
679 #define BPF_PROG_LOAD_LAST_FIELD kern_version
680
681 static int bpf_prog_load(union bpf_attr *attr)
682 {
683 enum bpf_prog_type type = attr->prog_type;
684 struct bpf_prog *prog;
685 int err;
686 char license[128];
687 bool is_gpl;
688
689 if (CHECK_ATTR(BPF_PROG_LOAD))
690 return -EINVAL;
691
692 /* copy eBPF program license from user space */
693 if (strncpy_from_user(license, u64_to_ptr(attr->license),
694 sizeof(license) - 1) < 0)
695 return -EFAULT;
696 license[sizeof(license) - 1] = 0;
697
698 /* eBPF programs must be GPL compatible to use GPL-ed functions */
699 is_gpl = license_is_gpl_compatible(license);
700
701 if (attr->insn_cnt >= BPF_MAXINSNS)
702 return -EINVAL;
703
704 if (type == BPF_PROG_TYPE_KPROBE &&
705 attr->kern_version != LINUX_VERSION_CODE)
706 return -EINVAL;
707
708 if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
709 return -EPERM;
710
711 /* plain bpf_prog allocation */
712 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
713 if (!prog)
714 return -ENOMEM;
715
716 err = bpf_prog_charge_memlock(prog);
717 if (err)
718 goto free_prog_nouncharge;
719
720 prog->len = attr->insn_cnt;
721
722 err = -EFAULT;
723 if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
724 prog->len * sizeof(struct bpf_insn)) != 0)
725 goto free_prog;
726
727 prog->orig_prog = NULL;
728 prog->jited = 0;
729
730 atomic_set(&prog->aux->refcnt, 1);
731 prog->gpl_compatible = is_gpl ? 1 : 0;
732
733 /* find program type: socket_filter vs tracing_filter */
734 err = find_prog_type(type, prog);
735 if (err < 0)
736 goto free_prog;
737
738 /* run eBPF verifier */
739 err = bpf_check(&prog, attr);
740 if (err < 0)
741 goto free_used_maps;
742
743 /* fixup BPF_CALL->imm field */
744 fixup_bpf_calls(prog);
745
746 /* eBPF program is ready to be JITed */
747 err = bpf_prog_select_runtime(prog);
748 if (err < 0)
749 goto free_used_maps;
750
751 err = bpf_prog_new_fd(prog);
752 if (err < 0)
753 /* failed to allocate fd */
754 goto free_used_maps;
755
756 return err;
757
758 free_used_maps:
759 free_used_maps(prog->aux);
760 free_prog:
761 bpf_prog_uncharge_memlock(prog);
762 free_prog_nouncharge:
763 bpf_prog_free(prog);
764 return err;
765 }
766
767 #define BPF_OBJ_LAST_FIELD bpf_fd
768
769 static int bpf_obj_pin(const union bpf_attr *attr)
770 {
771 if (CHECK_ATTR(BPF_OBJ))
772 return -EINVAL;
773
774 return bpf_obj_pin_user(attr->bpf_fd, u64_to_ptr(attr->pathname));
775 }
776
777 static int bpf_obj_get(const union bpf_attr *attr)
778 {
779 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
780 return -EINVAL;
781
782 return bpf_obj_get_user(u64_to_ptr(attr->pathname));
783 }
784
785 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
786 {
787 union bpf_attr attr = {};
788 int err;
789
790 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
791 return -EPERM;
792
793 if (!access_ok(VERIFY_READ, uattr, 1))
794 return -EFAULT;
795
796 if (size > PAGE_SIZE) /* silly large */
797 return -E2BIG;
798
799 /* If we're handed a bigger struct than we know of,
800 * ensure all the unknown bits are 0 - i.e. new
801 * user-space does not rely on any kernel feature
802 * extensions we dont know about yet.
803 */
804 if (size > sizeof(attr)) {
805 unsigned char __user *addr;
806 unsigned char __user *end;
807 unsigned char val;
808
809 addr = (void __user *)uattr + sizeof(attr);
810 end = (void __user *)uattr + size;
811
812 for (; addr < end; addr++) {
813 err = get_user(val, addr);
814 if (err)
815 return err;
816 if (val)
817 return -E2BIG;
818 }
819 size = sizeof(attr);
820 }
821
822 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
823 if (copy_from_user(&attr, uattr, size) != 0)
824 return -EFAULT;
825
826 switch (cmd) {
827 case BPF_MAP_CREATE:
828 err = map_create(&attr);
829 break;
830 case BPF_MAP_LOOKUP_ELEM:
831 err = map_lookup_elem(&attr);
832 break;
833 case BPF_MAP_UPDATE_ELEM:
834 err = map_update_elem(&attr);
835 break;
836 case BPF_MAP_DELETE_ELEM:
837 err = map_delete_elem(&attr);
838 break;
839 case BPF_MAP_GET_NEXT_KEY:
840 err = map_get_next_key(&attr);
841 break;
842 case BPF_PROG_LOAD:
843 err = bpf_prog_load(&attr);
844 break;
845 case BPF_OBJ_PIN:
846 err = bpf_obj_pin(&attr);
847 break;
848 case BPF_OBJ_GET:
849 err = bpf_obj_get(&attr);
850 break;
851 default:
852 err = -EINVAL;
853 break;
854 }
855
856 return err;
857 }
This page took 0.047515 seconds and 6 git commands to generate.