2 * Linux Socket Filter - Kernel level socket filtering
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/uaccess.h>
37 #include <asm/unaligned.h>
38 #include <linux/filter.h>
39 #include <linux/reciprocal_div.h>
40 #include <linux/ratelimit.h>
41 #include <linux/seccomp.h>
43 /* No hurry in this branch
45 * Exported for the bpf jit load helper.
47 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff
*skb
, int k
, unsigned int size
)
52 ptr
= skb_network_header(skb
) + k
- SKF_NET_OFF
;
53 else if (k
>= SKF_LL_OFF
)
54 ptr
= skb_mac_header(skb
) + k
- SKF_LL_OFF
;
56 if (ptr
>= skb
->head
&& ptr
+ size
<= skb_tail_pointer(skb
))
61 static inline void *load_pointer(const struct sk_buff
*skb
, int k
,
62 unsigned int size
, void *buffer
)
65 return skb_header_pointer(skb
, k
, size
, buffer
);
66 return bpf_internal_load_pointer_neg_helper(skb
, k
, size
);
70 * sk_filter - run a packet through a socket filter
71 * @sk: sock associated with &sk_buff
72 * @skb: buffer to filter
74 * Run the filter code and then cut skb->data to correct size returned by
75 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
76 * than pkt_len we keep whole skb->data. This is the socket level
77 * wrapper to sk_run_filter. It returns 0 if the packet should
78 * be accepted or -EPERM if the packet should be tossed.
81 int sk_filter(struct sock
*sk
, struct sk_buff
*skb
)
84 struct sk_filter
*filter
;
86 err
= security_sock_rcv_skb(sk
, skb
);
91 filter
= rcu_dereference(sk
->sk_filter
);
93 unsigned int pkt_len
= SK_RUN_FILTER(filter
, skb
);
95 err
= pkt_len
? pskb_trim(skb
, pkt_len
) : -EPERM
;
101 EXPORT_SYMBOL(sk_filter
);
104 * sk_run_filter - run a filter on a socket
105 * @skb: buffer to run the filter on
106 * @fentry: filter to apply
108 * Decode and apply filter instructions to the skb->data.
109 * Return length to keep, 0 for none. @skb is the data we are
110 * filtering, @filter is the array of filter instructions.
111 * Because all jumps are guaranteed to be before last instruction,
112 * and last instruction guaranteed to be a RET, we dont need to check
113 * flen. (We used to pass to this function the length of filter)
115 unsigned int sk_run_filter(const struct sk_buff
*skb
,
116 const struct sock_filter
*fentry
)
119 u32 A
= 0; /* Accumulator */
120 u32 X
= 0; /* Index Register */
121 u32 mem
[BPF_MEMWORDS
]; /* Scratch Memory Store */
126 * Process array of filter instructions.
129 #if defined(CONFIG_X86_32)
130 #define K (fentry->k)
132 const u32 K
= fentry
->k
;
135 switch (fentry
->code
) {
136 case BPF_S_ALU_ADD_X
:
139 case BPF_S_ALU_ADD_K
:
142 case BPF_S_ALU_SUB_X
:
145 case BPF_S_ALU_SUB_K
:
148 case BPF_S_ALU_MUL_X
:
151 case BPF_S_ALU_MUL_K
:
154 case BPF_S_ALU_DIV_X
:
159 case BPF_S_ALU_DIV_K
:
160 A
= reciprocal_divide(A
, K
);
162 case BPF_S_ALU_AND_X
:
165 case BPF_S_ALU_AND_K
:
174 case BPF_S_ALU_LSH_X
:
177 case BPF_S_ALU_LSH_K
:
180 case BPF_S_ALU_RSH_X
:
183 case BPF_S_ALU_RSH_K
:
192 case BPF_S_JMP_JGT_K
:
193 fentry
+= (A
> K
) ? fentry
->jt
: fentry
->jf
;
195 case BPF_S_JMP_JGE_K
:
196 fentry
+= (A
>= K
) ? fentry
->jt
: fentry
->jf
;
198 case BPF_S_JMP_JEQ_K
:
199 fentry
+= (A
== K
) ? fentry
->jt
: fentry
->jf
;
201 case BPF_S_JMP_JSET_K
:
202 fentry
+= (A
& K
) ? fentry
->jt
: fentry
->jf
;
204 case BPF_S_JMP_JGT_X
:
205 fentry
+= (A
> X
) ? fentry
->jt
: fentry
->jf
;
207 case BPF_S_JMP_JGE_X
:
208 fentry
+= (A
>= X
) ? fentry
->jt
: fentry
->jf
;
210 case BPF_S_JMP_JEQ_X
:
211 fentry
+= (A
== X
) ? fentry
->jt
: fentry
->jf
;
213 case BPF_S_JMP_JSET_X
:
214 fentry
+= (A
& X
) ? fentry
->jt
: fentry
->jf
;
219 ptr
= load_pointer(skb
, k
, 4, &tmp
);
221 A
= get_unaligned_be32(ptr
);
228 ptr
= load_pointer(skb
, k
, 2, &tmp
);
230 A
= get_unaligned_be16(ptr
);
237 ptr
= load_pointer(skb
, k
, 1, &tmp
);
246 case BPF_S_LDX_W_LEN
:
258 case BPF_S_LDX_B_MSH
:
259 ptr
= load_pointer(skb
, K
, 1, &tmp
);
261 X
= (*(u8
*)ptr
& 0xf) << 2;
293 case BPF_S_ANC_PROTOCOL
:
294 A
= ntohs(skb
->protocol
);
296 case BPF_S_ANC_PKTTYPE
:
299 case BPF_S_ANC_IFINDEX
:
302 A
= skb
->dev
->ifindex
;
307 case BPF_S_ANC_QUEUE
:
308 A
= skb
->queue_mapping
;
310 case BPF_S_ANC_HATYPE
:
315 case BPF_S_ANC_RXHASH
:
319 A
= raw_smp_processor_id();
321 case BPF_S_ANC_ALU_XOR_X
:
324 case BPF_S_ANC_NLATTR
: {
327 if (skb_is_nonlinear(skb
))
329 if (A
> skb
->len
- sizeof(struct nlattr
))
332 nla
= nla_find((struct nlattr
*)&skb
->data
[A
],
335 A
= (void *)nla
- (void *)skb
->data
;
340 case BPF_S_ANC_NLATTR_NEST
: {
343 if (skb_is_nonlinear(skb
))
345 if (A
> skb
->len
- sizeof(struct nlattr
))
348 nla
= (struct nlattr
*)&skb
->data
[A
];
349 if (nla
->nla_len
> A
- skb
->len
)
352 nla
= nla_find_nested(nla
, X
);
354 A
= (void *)nla
- (void *)skb
->data
;
359 #ifdef CONFIG_SECCOMP_FILTER
360 case BPF_S_ANC_SECCOMP_LD_W
:
361 A
= seccomp_bpf_load(fentry
->k
);
365 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
366 fentry
->code
, fentry
->jt
,
367 fentry
->jf
, fentry
->k
);
374 EXPORT_SYMBOL(sk_run_filter
);
378 * A BPF program is able to use 16 cells of memory to store intermediate
379 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
380 * As we dont want to clear mem[] array for each packet going through
381 * sk_run_filter(), we check that filter loaded by user never try to read
382 * a cell if not previously written, and we check all branches to be sure
383 * a malicious user doesn't try to abuse us.
385 static int check_load_and_stores(struct sock_filter
*filter
, int flen
)
387 u16
*masks
, memvalid
= 0; /* one bit per cell, 16 cells */
390 BUILD_BUG_ON(BPF_MEMWORDS
> 16);
391 masks
= kmalloc(flen
* sizeof(*masks
), GFP_KERNEL
);
394 memset(masks
, 0xff, flen
* sizeof(*masks
));
396 for (pc
= 0; pc
< flen
; pc
++) {
397 memvalid
&= masks
[pc
];
399 switch (filter
[pc
].code
) {
402 memvalid
|= (1 << filter
[pc
].k
);
406 if (!(memvalid
& (1 << filter
[pc
].k
))) {
412 /* a jump must set masks on target */
413 masks
[pc
+ 1 + filter
[pc
].k
] &= memvalid
;
416 case BPF_S_JMP_JEQ_K
:
417 case BPF_S_JMP_JEQ_X
:
418 case BPF_S_JMP_JGE_K
:
419 case BPF_S_JMP_JGE_X
:
420 case BPF_S_JMP_JGT_K
:
421 case BPF_S_JMP_JGT_X
:
422 case BPF_S_JMP_JSET_X
:
423 case BPF_S_JMP_JSET_K
:
424 /* a jump must set masks on targets */
425 masks
[pc
+ 1 + filter
[pc
].jt
] &= memvalid
;
426 masks
[pc
+ 1 + filter
[pc
].jf
] &= memvalid
;
437 * sk_chk_filter - verify socket filter code
438 * @filter: filter to verify
439 * @flen: length of filter
441 * Check the user's filter code. If we let some ugly
442 * filter code slip through kaboom! The filter must contain
443 * no references or jumps that are out of range, no illegal
444 * instructions, and must end with a RET instruction.
446 * All jumps are forward as they are not signed.
448 * Returns 0 if the rule set is legal or -EINVAL if not.
450 int sk_chk_filter(struct sock_filter
*filter
, unsigned int flen
)
453 * Valid instructions are initialized to non-0.
454 * Invalid instructions are initialized to 0.
456 static const u8 codes
[] = {
457 [BPF_ALU
|BPF_ADD
|BPF_K
] = BPF_S_ALU_ADD_K
,
458 [BPF_ALU
|BPF_ADD
|BPF_X
] = BPF_S_ALU_ADD_X
,
459 [BPF_ALU
|BPF_SUB
|BPF_K
] = BPF_S_ALU_SUB_K
,
460 [BPF_ALU
|BPF_SUB
|BPF_X
] = BPF_S_ALU_SUB_X
,
461 [BPF_ALU
|BPF_MUL
|BPF_K
] = BPF_S_ALU_MUL_K
,
462 [BPF_ALU
|BPF_MUL
|BPF_X
] = BPF_S_ALU_MUL_X
,
463 [BPF_ALU
|BPF_DIV
|BPF_X
] = BPF_S_ALU_DIV_X
,
464 [BPF_ALU
|BPF_AND
|BPF_K
] = BPF_S_ALU_AND_K
,
465 [BPF_ALU
|BPF_AND
|BPF_X
] = BPF_S_ALU_AND_X
,
466 [BPF_ALU
|BPF_OR
|BPF_K
] = BPF_S_ALU_OR_K
,
467 [BPF_ALU
|BPF_OR
|BPF_X
] = BPF_S_ALU_OR_X
,
468 [BPF_ALU
|BPF_LSH
|BPF_K
] = BPF_S_ALU_LSH_K
,
469 [BPF_ALU
|BPF_LSH
|BPF_X
] = BPF_S_ALU_LSH_X
,
470 [BPF_ALU
|BPF_RSH
|BPF_K
] = BPF_S_ALU_RSH_K
,
471 [BPF_ALU
|BPF_RSH
|BPF_X
] = BPF_S_ALU_RSH_X
,
472 [BPF_ALU
|BPF_NEG
] = BPF_S_ALU_NEG
,
473 [BPF_LD
|BPF_W
|BPF_ABS
] = BPF_S_LD_W_ABS
,
474 [BPF_LD
|BPF_H
|BPF_ABS
] = BPF_S_LD_H_ABS
,
475 [BPF_LD
|BPF_B
|BPF_ABS
] = BPF_S_LD_B_ABS
,
476 [BPF_LD
|BPF_W
|BPF_LEN
] = BPF_S_LD_W_LEN
,
477 [BPF_LD
|BPF_W
|BPF_IND
] = BPF_S_LD_W_IND
,
478 [BPF_LD
|BPF_H
|BPF_IND
] = BPF_S_LD_H_IND
,
479 [BPF_LD
|BPF_B
|BPF_IND
] = BPF_S_LD_B_IND
,
480 [BPF_LD
|BPF_IMM
] = BPF_S_LD_IMM
,
481 [BPF_LDX
|BPF_W
|BPF_LEN
] = BPF_S_LDX_W_LEN
,
482 [BPF_LDX
|BPF_B
|BPF_MSH
] = BPF_S_LDX_B_MSH
,
483 [BPF_LDX
|BPF_IMM
] = BPF_S_LDX_IMM
,
484 [BPF_MISC
|BPF_TAX
] = BPF_S_MISC_TAX
,
485 [BPF_MISC
|BPF_TXA
] = BPF_S_MISC_TXA
,
486 [BPF_RET
|BPF_K
] = BPF_S_RET_K
,
487 [BPF_RET
|BPF_A
] = BPF_S_RET_A
,
488 [BPF_ALU
|BPF_DIV
|BPF_K
] = BPF_S_ALU_DIV_K
,
489 [BPF_LD
|BPF_MEM
] = BPF_S_LD_MEM
,
490 [BPF_LDX
|BPF_MEM
] = BPF_S_LDX_MEM
,
492 [BPF_STX
] = BPF_S_STX
,
493 [BPF_JMP
|BPF_JA
] = BPF_S_JMP_JA
,
494 [BPF_JMP
|BPF_JEQ
|BPF_K
] = BPF_S_JMP_JEQ_K
,
495 [BPF_JMP
|BPF_JEQ
|BPF_X
] = BPF_S_JMP_JEQ_X
,
496 [BPF_JMP
|BPF_JGE
|BPF_K
] = BPF_S_JMP_JGE_K
,
497 [BPF_JMP
|BPF_JGE
|BPF_X
] = BPF_S_JMP_JGE_X
,
498 [BPF_JMP
|BPF_JGT
|BPF_K
] = BPF_S_JMP_JGT_K
,
499 [BPF_JMP
|BPF_JGT
|BPF_X
] = BPF_S_JMP_JGT_X
,
500 [BPF_JMP
|BPF_JSET
|BPF_K
] = BPF_S_JMP_JSET_K
,
501 [BPF_JMP
|BPF_JSET
|BPF_X
] = BPF_S_JMP_JSET_X
,
505 if (flen
== 0 || flen
> BPF_MAXINSNS
)
508 /* check the filter code now */
509 for (pc
= 0; pc
< flen
; pc
++) {
510 struct sock_filter
*ftest
= &filter
[pc
];
511 u16 code
= ftest
->code
;
513 if (code
>= ARRAY_SIZE(codes
))
518 /* Some instructions need special checks */
520 case BPF_S_ALU_DIV_K
:
521 /* check for division by zero */
524 ftest
->k
= reciprocal_value(ftest
->k
);
530 /* check for invalid memory addresses */
531 if (ftest
->k
>= BPF_MEMWORDS
)
536 * Note, the large ftest->k might cause loops.
537 * Compare this with conditional jumps below,
538 * where offsets are limited. --ANK (981016)
540 if (ftest
->k
>= (unsigned int)(flen
-pc
-1))
543 case BPF_S_JMP_JEQ_K
:
544 case BPF_S_JMP_JEQ_X
:
545 case BPF_S_JMP_JGE_K
:
546 case BPF_S_JMP_JGE_X
:
547 case BPF_S_JMP_JGT_K
:
548 case BPF_S_JMP_JGT_X
:
549 case BPF_S_JMP_JSET_X
:
550 case BPF_S_JMP_JSET_K
:
551 /* for conditionals both must be safe */
552 if (pc
+ ftest
->jt
+ 1 >= flen
||
553 pc
+ ftest
->jf
+ 1 >= flen
)
559 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
560 code = BPF_S_ANC_##CODE; \
567 ANCILLARY(NLATTR_NEST
);
573 ANCILLARY(ALU_XOR_X
);
579 /* last instruction must be a RET code */
580 switch (filter
[flen
- 1].code
) {
583 return check_load_and_stores(filter
, flen
);
587 EXPORT_SYMBOL(sk_chk_filter
);
590 * sk_filter_release_rcu - Release a socket filter by rcu_head
591 * @rcu: rcu_head that contains the sk_filter to free
593 void sk_filter_release_rcu(struct rcu_head
*rcu
)
595 struct sk_filter
*fp
= container_of(rcu
, struct sk_filter
, rcu
);
600 EXPORT_SYMBOL(sk_filter_release_rcu
);
602 static int __sk_prepare_filter(struct sk_filter
*fp
)
606 fp
->bpf_func
= sk_run_filter
;
608 err
= sk_chk_filter(fp
->insns
, fp
->len
);
617 * sk_unattached_filter_create - create an unattached filter
618 * @fprog: the filter program
619 * @sk: the socket to use
621 * Create a filter independent ofr any socket. We first run some
622 * sanity checks on it to make sure it does not explode on us later.
623 * If an error occurs or there is insufficient memory for the filter
624 * a negative errno code is returned. On success the return is zero.
626 int sk_unattached_filter_create(struct sk_filter
**pfp
,
627 struct sock_fprog
*fprog
)
629 struct sk_filter
*fp
;
630 unsigned int fsize
= sizeof(struct sock_filter
) * fprog
->len
;
633 /* Make sure new filter is there and in the right amounts. */
634 if (fprog
->filter
== NULL
)
637 fp
= kmalloc(fsize
+ sizeof(*fp
), GFP_KERNEL
);
640 memcpy(fp
->insns
, fprog
->filter
, fsize
);
642 atomic_set(&fp
->refcnt
, 1);
643 fp
->len
= fprog
->len
;
645 err
= __sk_prepare_filter(fp
);
655 EXPORT_SYMBOL_GPL(sk_unattached_filter_create
);
657 void sk_unattached_filter_destroy(struct sk_filter
*fp
)
659 sk_filter_release(fp
);
661 EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy
);
664 * sk_attach_filter - attach a socket filter
665 * @fprog: the filter program
666 * @sk: the socket to use
668 * Attach the user's filter code. We first run some sanity checks on
669 * it to make sure it does not explode on us later. If an error
670 * occurs or there is insufficient memory for the filter a negative
671 * errno code is returned. On success the return is zero.
673 int sk_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
675 struct sk_filter
*fp
, *old_fp
;
676 unsigned int fsize
= sizeof(struct sock_filter
) * fprog
->len
;
679 /* Make sure new filter is there and in the right amounts. */
680 if (fprog
->filter
== NULL
)
683 fp
= sock_kmalloc(sk
, fsize
+sizeof(*fp
), GFP_KERNEL
);
686 if (copy_from_user(fp
->insns
, fprog
->filter
, fsize
)) {
687 sock_kfree_s(sk
, fp
, fsize
+sizeof(*fp
));
691 atomic_set(&fp
->refcnt
, 1);
692 fp
->len
= fprog
->len
;
694 err
= __sk_prepare_filter(fp
);
696 sk_filter_uncharge(sk
, fp
);
700 old_fp
= rcu_dereference_protected(sk
->sk_filter
,
701 sock_owned_by_user(sk
));
702 rcu_assign_pointer(sk
->sk_filter
, fp
);
705 sk_filter_uncharge(sk
, old_fp
);
708 EXPORT_SYMBOL_GPL(sk_attach_filter
);
710 int sk_detach_filter(struct sock
*sk
)
713 struct sk_filter
*filter
;
715 filter
= rcu_dereference_protected(sk
->sk_filter
,
716 sock_owned_by_user(sk
));
718 RCU_INIT_POINTER(sk
->sk_filter
, NULL
);
719 sk_filter_uncharge(sk
, filter
);
724 EXPORT_SYMBOL_GPL(sk_detach_filter
);