1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/syscalls.h>
14 #include <linux/slab.h>
15 #include <linux/anon_inodes.h>
16 #include <linux/file.h>
17 #include <linux/license.h>
18 #include <linux/filter.h>
19 #include <linux/version.h>
21 DEFINE_PER_CPU(int, bpf_prog_active
);
23 int sysctl_unprivileged_bpf_disabled __read_mostly
;
25 static LIST_HEAD(bpf_map_types
);
27 static struct bpf_map
*find_and_alloc_map(union bpf_attr
*attr
)
29 struct bpf_map_type_list
*tl
;
32 list_for_each_entry(tl
, &bpf_map_types
, list_node
) {
33 if (tl
->type
== attr
->map_type
) {
34 map
= tl
->ops
->map_alloc(attr
);
38 map
->map_type
= attr
->map_type
;
42 return ERR_PTR(-EINVAL
);
45 /* boot time registration of different map implementations */
46 void bpf_register_map_type(struct bpf_map_type_list
*tl
)
48 list_add(&tl
->list_node
, &bpf_map_types
);
51 int bpf_map_precharge_memlock(u32 pages
)
53 struct user_struct
*user
= get_current_user();
54 unsigned long memlock_limit
, cur
;
56 memlock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
57 cur
= atomic_long_read(&user
->locked_vm
);
59 if (cur
+ pages
> memlock_limit
)
64 static int bpf_map_charge_memlock(struct bpf_map
*map
)
66 struct user_struct
*user
= get_current_user();
67 unsigned long memlock_limit
;
69 memlock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
71 atomic_long_add(map
->pages
, &user
->locked_vm
);
73 if (atomic_long_read(&user
->locked_vm
) > memlock_limit
) {
74 atomic_long_sub(map
->pages
, &user
->locked_vm
);
82 static void bpf_map_uncharge_memlock(struct bpf_map
*map
)
84 struct user_struct
*user
= map
->user
;
86 atomic_long_sub(map
->pages
, &user
->locked_vm
);
90 /* called from workqueue */
91 static void bpf_map_free_deferred(struct work_struct
*work
)
93 struct bpf_map
*map
= container_of(work
, struct bpf_map
, work
);
95 bpf_map_uncharge_memlock(map
);
96 /* implementation dependent freeing */
97 map
->ops
->map_free(map
);
100 static void bpf_map_put_uref(struct bpf_map
*map
)
102 if (atomic_dec_and_test(&map
->usercnt
)) {
103 if (map
->map_type
== BPF_MAP_TYPE_PROG_ARRAY
)
104 bpf_fd_array_map_clear(map
);
108 /* decrement map refcnt and schedule it for freeing via workqueue
109 * (unrelying map implementation ops->map_free() might sleep)
111 void bpf_map_put(struct bpf_map
*map
)
113 if (atomic_dec_and_test(&map
->refcnt
)) {
114 INIT_WORK(&map
->work
, bpf_map_free_deferred
);
115 schedule_work(&map
->work
);
119 void bpf_map_put_with_uref(struct bpf_map
*map
)
121 bpf_map_put_uref(map
);
125 static int bpf_map_release(struct inode
*inode
, struct file
*filp
)
127 bpf_map_put_with_uref(filp
->private_data
);
131 #ifdef CONFIG_PROC_FS
132 static void bpf_map_show_fdinfo(struct seq_file
*m
, struct file
*filp
)
134 const struct bpf_map
*map
= filp
->private_data
;
140 "max_entries:\t%u\n",
148 static const struct file_operations bpf_map_fops
= {
149 #ifdef CONFIG_PROC_FS
150 .show_fdinfo
= bpf_map_show_fdinfo
,
152 .release
= bpf_map_release
,
155 int bpf_map_new_fd(struct bpf_map
*map
)
157 return anon_inode_getfd("bpf-map", &bpf_map_fops
, map
,
161 /* helper macro to check that unused fields 'union bpf_attr' are zero */
162 #define CHECK_ATTR(CMD) \
163 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
164 sizeof(attr->CMD##_LAST_FIELD), 0, \
166 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
167 sizeof(attr->CMD##_LAST_FIELD)) != NULL
169 #define BPF_MAP_CREATE_LAST_FIELD map_flags
170 /* called via syscall */
171 static int map_create(union bpf_attr
*attr
)
176 err
= CHECK_ATTR(BPF_MAP_CREATE
);
180 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
181 map
= find_and_alloc_map(attr
);
185 atomic_set(&map
->refcnt
, 1);
186 atomic_set(&map
->usercnt
, 1);
188 err
= bpf_map_charge_memlock(map
);
192 err
= bpf_map_new_fd(map
);
194 /* failed to allocate fd */
200 map
->ops
->map_free(map
);
204 /* if error is returned, fd is released.
205 * On success caller should complete fd access with matching fdput()
207 struct bpf_map
*__bpf_map_get(struct fd f
)
210 return ERR_PTR(-EBADF
);
211 if (f
.file
->f_op
!= &bpf_map_fops
) {
213 return ERR_PTR(-EINVAL
);
216 return f
.file
->private_data
;
219 void bpf_map_inc(struct bpf_map
*map
, bool uref
)
221 atomic_inc(&map
->refcnt
);
223 atomic_inc(&map
->usercnt
);
226 struct bpf_map
*bpf_map_get_with_uref(u32 ufd
)
228 struct fd f
= fdget(ufd
);
231 map
= __bpf_map_get(f
);
235 bpf_map_inc(map
, true);
241 /* helper to convert user pointers passed inside __aligned_u64 fields */
242 static void __user
*u64_to_ptr(__u64 val
)
244 return (void __user
*) (unsigned long) val
;
247 /* last field in 'union bpf_attr' used by this command */
248 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
250 static int map_lookup_elem(union bpf_attr
*attr
)
252 void __user
*ukey
= u64_to_ptr(attr
->key
);
253 void __user
*uvalue
= u64_to_ptr(attr
->value
);
254 int ufd
= attr
->map_fd
;
256 void *key
, *value
, *ptr
;
261 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM
))
265 map
= __bpf_map_get(f
);
270 key
= kmalloc(map
->key_size
, GFP_USER
);
275 if (copy_from_user(key
, ukey
, map
->key_size
) != 0)
278 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
279 map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
280 value_size
= round_up(map
->value_size
, 8) * num_possible_cpus();
282 value_size
= map
->value_size
;
285 value
= kmalloc(value_size
, GFP_USER
| __GFP_NOWARN
);
289 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
) {
290 err
= bpf_percpu_hash_copy(map
, key
, value
);
291 } else if (map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
) {
292 err
= bpf_percpu_array_copy(map
, key
, value
);
293 } else if (map
->map_type
== BPF_MAP_TYPE_STACK_TRACE
) {
294 err
= bpf_stackmap_copy(map
, key
, value
);
297 ptr
= map
->ops
->map_lookup_elem(map
, key
);
299 memcpy(value
, ptr
, value_size
);
301 err
= ptr
? 0 : -ENOENT
;
308 if (copy_to_user(uvalue
, value
, value_size
) != 0)
322 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
324 static int map_update_elem(union bpf_attr
*attr
)
326 void __user
*ukey
= u64_to_ptr(attr
->key
);
327 void __user
*uvalue
= u64_to_ptr(attr
->value
);
328 int ufd
= attr
->map_fd
;
335 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM
))
339 map
= __bpf_map_get(f
);
344 key
= kmalloc(map
->key_size
, GFP_USER
);
349 if (copy_from_user(key
, ukey
, map
->key_size
) != 0)
352 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
||
353 map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
)
354 value_size
= round_up(map
->value_size
, 8) * num_possible_cpus();
356 value_size
= map
->value_size
;
359 value
= kmalloc(value_size
, GFP_USER
| __GFP_NOWARN
);
364 if (copy_from_user(value
, uvalue
, value_size
) != 0)
367 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
368 * inside bpf map update or delete otherwise deadlocks are possible
371 __this_cpu_inc(bpf_prog_active
);
372 if (map
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
) {
373 err
= bpf_percpu_hash_update(map
, key
, value
, attr
->flags
);
374 } else if (map
->map_type
== BPF_MAP_TYPE_PERCPU_ARRAY
) {
375 err
= bpf_percpu_array_update(map
, key
, value
, attr
->flags
);
378 err
= map
->ops
->map_update_elem(map
, key
, value
, attr
->flags
);
381 __this_cpu_dec(bpf_prog_active
);
393 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
395 static int map_delete_elem(union bpf_attr
*attr
)
397 void __user
*ukey
= u64_to_ptr(attr
->key
);
398 int ufd
= attr
->map_fd
;
404 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM
))
408 map
= __bpf_map_get(f
);
413 key
= kmalloc(map
->key_size
, GFP_USER
);
418 if (copy_from_user(key
, ukey
, map
->key_size
) != 0)
422 __this_cpu_inc(bpf_prog_active
);
424 err
= map
->ops
->map_delete_elem(map
, key
);
426 __this_cpu_dec(bpf_prog_active
);
436 /* last field in 'union bpf_attr' used by this command */
437 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
439 static int map_get_next_key(union bpf_attr
*attr
)
441 void __user
*ukey
= u64_to_ptr(attr
->key
);
442 void __user
*unext_key
= u64_to_ptr(attr
->next_key
);
443 int ufd
= attr
->map_fd
;
445 void *key
, *next_key
;
449 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY
))
453 map
= __bpf_map_get(f
);
458 key
= kmalloc(map
->key_size
, GFP_USER
);
463 if (copy_from_user(key
, ukey
, map
->key_size
) != 0)
467 next_key
= kmalloc(map
->key_size
, GFP_USER
);
472 err
= map
->ops
->map_get_next_key(map
, key
, next_key
);
478 if (copy_to_user(unext_key
, next_key
, map
->key_size
) != 0)
492 static LIST_HEAD(bpf_prog_types
);
494 static int find_prog_type(enum bpf_prog_type type
, struct bpf_prog
*prog
)
496 struct bpf_prog_type_list
*tl
;
498 list_for_each_entry(tl
, &bpf_prog_types
, list_node
) {
499 if (tl
->type
== type
) {
500 prog
->aux
->ops
= tl
->ops
;
509 void bpf_register_prog_type(struct bpf_prog_type_list
*tl
)
511 list_add(&tl
->list_node
, &bpf_prog_types
);
514 /* fixup insn->imm field of bpf_call instructions:
515 * if (insn->imm == BPF_FUNC_map_lookup_elem)
516 * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
517 * else if (insn->imm == BPF_FUNC_map_update_elem)
518 * insn->imm = bpf_map_update_elem - __bpf_call_base;
521 * this function is called after eBPF program passed verification
523 static void fixup_bpf_calls(struct bpf_prog
*prog
)
525 const struct bpf_func_proto
*fn
;
528 for (i
= 0; i
< prog
->len
; i
++) {
529 struct bpf_insn
*insn
= &prog
->insnsi
[i
];
531 if (insn
->code
== (BPF_JMP
| BPF_CALL
)) {
532 /* we reach here when program has bpf_call instructions
533 * and it passed bpf_check(), means that
534 * ops->get_func_proto must have been supplied, check it
536 BUG_ON(!prog
->aux
->ops
->get_func_proto
);
538 if (insn
->imm
== BPF_FUNC_get_route_realm
)
539 prog
->dst_needed
= 1;
540 if (insn
->imm
== BPF_FUNC_get_prandom_u32
)
541 bpf_user_rnd_init_once();
542 if (insn
->imm
== BPF_FUNC_tail_call
) {
543 /* mark bpf_tail_call as different opcode
544 * to avoid conditional branch in
545 * interpeter for every normal call
546 * and to prevent accidental JITing by
547 * JIT compiler that doesn't support
555 fn
= prog
->aux
->ops
->get_func_proto(insn
->imm
);
556 /* all functions that have prototype and verifier allowed
557 * programs to call them, must be real in-kernel functions
560 insn
->imm
= fn
->func
- __bpf_call_base
;
565 /* drop refcnt on maps used by eBPF program and free auxilary data */
566 static void free_used_maps(struct bpf_prog_aux
*aux
)
570 for (i
= 0; i
< aux
->used_map_cnt
; i
++)
571 bpf_map_put(aux
->used_maps
[i
]);
573 kfree(aux
->used_maps
);
576 static int bpf_prog_charge_memlock(struct bpf_prog
*prog
)
578 struct user_struct
*user
= get_current_user();
579 unsigned long memlock_limit
;
581 memlock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
583 atomic_long_add(prog
->pages
, &user
->locked_vm
);
584 if (atomic_long_read(&user
->locked_vm
) > memlock_limit
) {
585 atomic_long_sub(prog
->pages
, &user
->locked_vm
);
589 prog
->aux
->user
= user
;
593 static void bpf_prog_uncharge_memlock(struct bpf_prog
*prog
)
595 struct user_struct
*user
= prog
->aux
->user
;
597 atomic_long_sub(prog
->pages
, &user
->locked_vm
);
601 static void __prog_put_common(struct rcu_head
*rcu
)
603 struct bpf_prog_aux
*aux
= container_of(rcu
, struct bpf_prog_aux
, rcu
);
606 bpf_prog_uncharge_memlock(aux
->prog
);
607 bpf_prog_free(aux
->prog
);
610 /* version of bpf_prog_put() that is called after a grace period */
611 void bpf_prog_put_rcu(struct bpf_prog
*prog
)
613 if (atomic_dec_and_test(&prog
->aux
->refcnt
))
614 call_rcu(&prog
->aux
->rcu
, __prog_put_common
);
617 void bpf_prog_put(struct bpf_prog
*prog
)
619 if (atomic_dec_and_test(&prog
->aux
->refcnt
))
620 __prog_put_common(&prog
->aux
->rcu
);
622 EXPORT_SYMBOL_GPL(bpf_prog_put
);
624 static int bpf_prog_release(struct inode
*inode
, struct file
*filp
)
626 struct bpf_prog
*prog
= filp
->private_data
;
628 bpf_prog_put_rcu(prog
);
632 static const struct file_operations bpf_prog_fops
= {
633 .release
= bpf_prog_release
,
636 int bpf_prog_new_fd(struct bpf_prog
*prog
)
638 return anon_inode_getfd("bpf-prog", &bpf_prog_fops
, prog
,
642 static struct bpf_prog
*__bpf_prog_get(struct fd f
)
645 return ERR_PTR(-EBADF
);
646 if (f
.file
->f_op
!= &bpf_prog_fops
) {
648 return ERR_PTR(-EINVAL
);
651 return f
.file
->private_data
;
654 /* called by sockets/tracing/seccomp before attaching program to an event
655 * pairs with bpf_prog_put()
657 struct bpf_prog
*bpf_prog_get(u32 ufd
)
659 struct fd f
= fdget(ufd
);
660 struct bpf_prog
*prog
;
662 prog
= __bpf_prog_get(f
);
666 atomic_inc(&prog
->aux
->refcnt
);
671 EXPORT_SYMBOL_GPL(bpf_prog_get
);
673 /* last field in 'union bpf_attr' used by this command */
674 #define BPF_PROG_LOAD_LAST_FIELD kern_version
676 static int bpf_prog_load(union bpf_attr
*attr
)
678 enum bpf_prog_type type
= attr
->prog_type
;
679 struct bpf_prog
*prog
;
684 if (CHECK_ATTR(BPF_PROG_LOAD
))
687 /* copy eBPF program license from user space */
688 if (strncpy_from_user(license
, u64_to_ptr(attr
->license
),
689 sizeof(license
) - 1) < 0)
691 license
[sizeof(license
) - 1] = 0;
693 /* eBPF programs must be GPL compatible to use GPL-ed functions */
694 is_gpl
= license_is_gpl_compatible(license
);
696 if (attr
->insn_cnt
>= BPF_MAXINSNS
)
699 if (type
== BPF_PROG_TYPE_KPROBE
&&
700 attr
->kern_version
!= LINUX_VERSION_CODE
)
703 if (type
!= BPF_PROG_TYPE_SOCKET_FILTER
&& !capable(CAP_SYS_ADMIN
))
706 /* plain bpf_prog allocation */
707 prog
= bpf_prog_alloc(bpf_prog_size(attr
->insn_cnt
), GFP_USER
);
711 err
= bpf_prog_charge_memlock(prog
);
713 goto free_prog_nouncharge
;
715 prog
->len
= attr
->insn_cnt
;
718 if (copy_from_user(prog
->insns
, u64_to_ptr(attr
->insns
),
719 prog
->len
* sizeof(struct bpf_insn
)) != 0)
722 prog
->orig_prog
= NULL
;
725 atomic_set(&prog
->aux
->refcnt
, 1);
726 prog
->gpl_compatible
= is_gpl
? 1 : 0;
728 /* find program type: socket_filter vs tracing_filter */
729 err
= find_prog_type(type
, prog
);
733 /* run eBPF verifier */
734 err
= bpf_check(&prog
, attr
);
738 /* fixup BPF_CALL->imm field */
739 fixup_bpf_calls(prog
);
741 /* eBPF program is ready to be JITed */
742 err
= bpf_prog_select_runtime(prog
);
746 err
= bpf_prog_new_fd(prog
);
748 /* failed to allocate fd */
754 free_used_maps(prog
->aux
);
756 bpf_prog_uncharge_memlock(prog
);
757 free_prog_nouncharge
:
762 #define BPF_OBJ_LAST_FIELD bpf_fd
764 static int bpf_obj_pin(const union bpf_attr
*attr
)
766 if (CHECK_ATTR(BPF_OBJ
))
769 return bpf_obj_pin_user(attr
->bpf_fd
, u64_to_ptr(attr
->pathname
));
772 static int bpf_obj_get(const union bpf_attr
*attr
)
774 if (CHECK_ATTR(BPF_OBJ
) || attr
->bpf_fd
!= 0)
777 return bpf_obj_get_user(u64_to_ptr(attr
->pathname
));
780 SYSCALL_DEFINE3(bpf
, int, cmd
, union bpf_attr __user
*, uattr
, unsigned int, size
)
782 union bpf_attr attr
= {};
785 if (!capable(CAP_SYS_ADMIN
) && sysctl_unprivileged_bpf_disabled
)
788 if (!access_ok(VERIFY_READ
, uattr
, 1))
791 if (size
> PAGE_SIZE
) /* silly large */
794 /* If we're handed a bigger struct than we know of,
795 * ensure all the unknown bits are 0 - i.e. new
796 * user-space does not rely on any kernel feature
797 * extensions we dont know about yet.
799 if (size
> sizeof(attr
)) {
800 unsigned char __user
*addr
;
801 unsigned char __user
*end
;
804 addr
= (void __user
*)uattr
+ sizeof(attr
);
805 end
= (void __user
*)uattr
+ size
;
807 for (; addr
< end
; addr
++) {
808 err
= get_user(val
, addr
);
817 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
818 if (copy_from_user(&attr
, uattr
, size
) != 0)
823 err
= map_create(&attr
);
825 case BPF_MAP_LOOKUP_ELEM
:
826 err
= map_lookup_elem(&attr
);
828 case BPF_MAP_UPDATE_ELEM
:
829 err
= map_update_elem(&attr
);
831 case BPF_MAP_DELETE_ELEM
:
832 err
= map_delete_elem(&attr
);
834 case BPF_MAP_GET_NEXT_KEY
:
835 err
= map_get_next_key(&attr
);
838 err
= bpf_prog_load(&attr
);
841 err
= bpf_obj_pin(&attr
);
844 err
= bpf_obj_get(&attr
);