2 * Linux Socket Filter - Kernel level socket filtering
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
23 #include <linux/filter.h>
24 #include <linux/skbuff.h>
25 #include <asm/unaligned.h>
28 #define BPF_R0 regs[BPF_REG_0]
29 #define BPF_R1 regs[BPF_REG_1]
30 #define BPF_R2 regs[BPF_REG_2]
31 #define BPF_R3 regs[BPF_REG_3]
32 #define BPF_R4 regs[BPF_REG_4]
33 #define BPF_R5 regs[BPF_REG_5]
34 #define BPF_R6 regs[BPF_REG_6]
35 #define BPF_R7 regs[BPF_REG_7]
36 #define BPF_R8 regs[BPF_REG_8]
37 #define BPF_R9 regs[BPF_REG_9]
38 #define BPF_R10 regs[BPF_REG_10]
41 #define DST regs[insn->dst_reg]
42 #define SRC regs[insn->src_reg]
43 #define FP regs[BPF_REG_FP]
44 #define ARG1 regs[BPF_REG_ARG1]
45 #define CTX regs[BPF_REG_CTX]
48 /* No hurry in this branch
50 * Exported for the bpf jit load helper.
52 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff
*skb
, int k
, unsigned int size
)
57 ptr
= skb_network_header(skb
) + k
- SKF_NET_OFF
;
58 else if (k
>= SKF_LL_OFF
)
59 ptr
= skb_mac_header(skb
) + k
- SKF_LL_OFF
;
60 if (ptr
>= skb
->head
&& ptr
+ size
<= skb_tail_pointer(skb
))
66 /* Base function for offset calculation. Needs to go into .text section,
67 * therefore keeping it non-static as well; will also be used by JITs
68 * anyway later on, so do not let the compiler omit it.
70 noinline u64
__bpf_call_base(u64 r1
, u64 r2
, u64 r3
, u64 r4
, u64 r5
)
76 * __bpf_prog_run - run eBPF program on a given context
77 * @ctx: is the data we are operating on
78 * @insn: is the array of eBPF instructions
80 * Decode and execute eBPF instructions.
82 static unsigned int __bpf_prog_run(void *ctx
, const struct bpf_insn
*insn
)
84 u64 stack
[MAX_BPF_STACK
/ sizeof(u64
)];
85 u64 regs
[MAX_BPF_REG
], tmp
;
86 static const void *jumptable
[256] = {
87 [0 ... 255] = &&default_label
,
88 /* Now overwrite non-defaults ... */
89 /* 32 bit ALU operations */
90 [BPF_ALU
| BPF_ADD
| BPF_X
] = &&ALU_ADD_X
,
91 [BPF_ALU
| BPF_ADD
| BPF_K
] = &&ALU_ADD_K
,
92 [BPF_ALU
| BPF_SUB
| BPF_X
] = &&ALU_SUB_X
,
93 [BPF_ALU
| BPF_SUB
| BPF_K
] = &&ALU_SUB_K
,
94 [BPF_ALU
| BPF_AND
| BPF_X
] = &&ALU_AND_X
,
95 [BPF_ALU
| BPF_AND
| BPF_K
] = &&ALU_AND_K
,
96 [BPF_ALU
| BPF_OR
| BPF_X
] = &&ALU_OR_X
,
97 [BPF_ALU
| BPF_OR
| BPF_K
] = &&ALU_OR_K
,
98 [BPF_ALU
| BPF_LSH
| BPF_X
] = &&ALU_LSH_X
,
99 [BPF_ALU
| BPF_LSH
| BPF_K
] = &&ALU_LSH_K
,
100 [BPF_ALU
| BPF_RSH
| BPF_X
] = &&ALU_RSH_X
,
101 [BPF_ALU
| BPF_RSH
| BPF_K
] = &&ALU_RSH_K
,
102 [BPF_ALU
| BPF_XOR
| BPF_X
] = &&ALU_XOR_X
,
103 [BPF_ALU
| BPF_XOR
| BPF_K
] = &&ALU_XOR_K
,
104 [BPF_ALU
| BPF_MUL
| BPF_X
] = &&ALU_MUL_X
,
105 [BPF_ALU
| BPF_MUL
| BPF_K
] = &&ALU_MUL_K
,
106 [BPF_ALU
| BPF_MOV
| BPF_X
] = &&ALU_MOV_X
,
107 [BPF_ALU
| BPF_MOV
| BPF_K
] = &&ALU_MOV_K
,
108 [BPF_ALU
| BPF_DIV
| BPF_X
] = &&ALU_DIV_X
,
109 [BPF_ALU
| BPF_DIV
| BPF_K
] = &&ALU_DIV_K
,
110 [BPF_ALU
| BPF_MOD
| BPF_X
] = &&ALU_MOD_X
,
111 [BPF_ALU
| BPF_MOD
| BPF_K
] = &&ALU_MOD_K
,
112 [BPF_ALU
| BPF_NEG
] = &&ALU_NEG
,
113 [BPF_ALU
| BPF_END
| BPF_TO_BE
] = &&ALU_END_TO_BE
,
114 [BPF_ALU
| BPF_END
| BPF_TO_LE
] = &&ALU_END_TO_LE
,
115 /* 64 bit ALU operations */
116 [BPF_ALU64
| BPF_ADD
| BPF_X
] = &&ALU64_ADD_X
,
117 [BPF_ALU64
| BPF_ADD
| BPF_K
] = &&ALU64_ADD_K
,
118 [BPF_ALU64
| BPF_SUB
| BPF_X
] = &&ALU64_SUB_X
,
119 [BPF_ALU64
| BPF_SUB
| BPF_K
] = &&ALU64_SUB_K
,
120 [BPF_ALU64
| BPF_AND
| BPF_X
] = &&ALU64_AND_X
,
121 [BPF_ALU64
| BPF_AND
| BPF_K
] = &&ALU64_AND_K
,
122 [BPF_ALU64
| BPF_OR
| BPF_X
] = &&ALU64_OR_X
,
123 [BPF_ALU64
| BPF_OR
| BPF_K
] = &&ALU64_OR_K
,
124 [BPF_ALU64
| BPF_LSH
| BPF_X
] = &&ALU64_LSH_X
,
125 [BPF_ALU64
| BPF_LSH
| BPF_K
] = &&ALU64_LSH_K
,
126 [BPF_ALU64
| BPF_RSH
| BPF_X
] = &&ALU64_RSH_X
,
127 [BPF_ALU64
| BPF_RSH
| BPF_K
] = &&ALU64_RSH_K
,
128 [BPF_ALU64
| BPF_XOR
| BPF_X
] = &&ALU64_XOR_X
,
129 [BPF_ALU64
| BPF_XOR
| BPF_K
] = &&ALU64_XOR_K
,
130 [BPF_ALU64
| BPF_MUL
| BPF_X
] = &&ALU64_MUL_X
,
131 [BPF_ALU64
| BPF_MUL
| BPF_K
] = &&ALU64_MUL_K
,
132 [BPF_ALU64
| BPF_MOV
| BPF_X
] = &&ALU64_MOV_X
,
133 [BPF_ALU64
| BPF_MOV
| BPF_K
] = &&ALU64_MOV_K
,
134 [BPF_ALU64
| BPF_ARSH
| BPF_X
] = &&ALU64_ARSH_X
,
135 [BPF_ALU64
| BPF_ARSH
| BPF_K
] = &&ALU64_ARSH_K
,
136 [BPF_ALU64
| BPF_DIV
| BPF_X
] = &&ALU64_DIV_X
,
137 [BPF_ALU64
| BPF_DIV
| BPF_K
] = &&ALU64_DIV_K
,
138 [BPF_ALU64
| BPF_MOD
| BPF_X
] = &&ALU64_MOD_X
,
139 [BPF_ALU64
| BPF_MOD
| BPF_K
] = &&ALU64_MOD_K
,
140 [BPF_ALU64
| BPF_NEG
] = &&ALU64_NEG
,
141 /* Call instruction */
142 [BPF_JMP
| BPF_CALL
] = &&JMP_CALL
,
144 [BPF_JMP
| BPF_JA
] = &&JMP_JA
,
145 [BPF_JMP
| BPF_JEQ
| BPF_X
] = &&JMP_JEQ_X
,
146 [BPF_JMP
| BPF_JEQ
| BPF_K
] = &&JMP_JEQ_K
,
147 [BPF_JMP
| BPF_JNE
| BPF_X
] = &&JMP_JNE_X
,
148 [BPF_JMP
| BPF_JNE
| BPF_K
] = &&JMP_JNE_K
,
149 [BPF_JMP
| BPF_JGT
| BPF_X
] = &&JMP_JGT_X
,
150 [BPF_JMP
| BPF_JGT
| BPF_K
] = &&JMP_JGT_K
,
151 [BPF_JMP
| BPF_JGE
| BPF_X
] = &&JMP_JGE_X
,
152 [BPF_JMP
| BPF_JGE
| BPF_K
] = &&JMP_JGE_K
,
153 [BPF_JMP
| BPF_JSGT
| BPF_X
] = &&JMP_JSGT_X
,
154 [BPF_JMP
| BPF_JSGT
| BPF_K
] = &&JMP_JSGT_K
,
155 [BPF_JMP
| BPF_JSGE
| BPF_X
] = &&JMP_JSGE_X
,
156 [BPF_JMP
| BPF_JSGE
| BPF_K
] = &&JMP_JSGE_K
,
157 [BPF_JMP
| BPF_JSET
| BPF_X
] = &&JMP_JSET_X
,
158 [BPF_JMP
| BPF_JSET
| BPF_K
] = &&JMP_JSET_K
,
160 [BPF_JMP
| BPF_EXIT
] = &&JMP_EXIT
,
161 /* Store instructions */
162 [BPF_STX
| BPF_MEM
| BPF_B
] = &&STX_MEM_B
,
163 [BPF_STX
| BPF_MEM
| BPF_H
] = &&STX_MEM_H
,
164 [BPF_STX
| BPF_MEM
| BPF_W
] = &&STX_MEM_W
,
165 [BPF_STX
| BPF_MEM
| BPF_DW
] = &&STX_MEM_DW
,
166 [BPF_STX
| BPF_XADD
| BPF_W
] = &&STX_XADD_W
,
167 [BPF_STX
| BPF_XADD
| BPF_DW
] = &&STX_XADD_DW
,
168 [BPF_ST
| BPF_MEM
| BPF_B
] = &&ST_MEM_B
,
169 [BPF_ST
| BPF_MEM
| BPF_H
] = &&ST_MEM_H
,
170 [BPF_ST
| BPF_MEM
| BPF_W
] = &&ST_MEM_W
,
171 [BPF_ST
| BPF_MEM
| BPF_DW
] = &&ST_MEM_DW
,
172 /* Load instructions */
173 [BPF_LDX
| BPF_MEM
| BPF_B
] = &&LDX_MEM_B
,
174 [BPF_LDX
| BPF_MEM
| BPF_H
] = &&LDX_MEM_H
,
175 [BPF_LDX
| BPF_MEM
| BPF_W
] = &&LDX_MEM_W
,
176 [BPF_LDX
| BPF_MEM
| BPF_DW
] = &&LDX_MEM_DW
,
177 [BPF_LD
| BPF_ABS
| BPF_W
] = &&LD_ABS_W
,
178 [BPF_LD
| BPF_ABS
| BPF_H
] = &&LD_ABS_H
,
179 [BPF_LD
| BPF_ABS
| BPF_B
] = &&LD_ABS_B
,
180 [BPF_LD
| BPF_IND
| BPF_W
] = &&LD_IND_W
,
181 [BPF_LD
| BPF_IND
| BPF_H
] = &&LD_IND_H
,
182 [BPF_LD
| BPF_IND
| BPF_B
] = &&LD_IND_B
,
187 #define CONT ({ insn++; goto select_insn; })
188 #define CONT_JMP ({ insn++; goto select_insn; })
190 FP
= (u64
) (unsigned long) &stack
[ARRAY_SIZE(stack
)];
191 ARG1
= (u64
) (unsigned long) ctx
;
193 /* Registers used in classic BPF programs need to be reset first. */
198 goto *jumptable
[insn
->code
];
201 #define ALU(OPCODE, OP) \
202 ALU64_##OPCODE##_X: \
206 DST = (u32) DST OP (u32) SRC; \
208 ALU64_##OPCODE##_K: \
212 DST = (u32) DST OP (u32) IMM; \
243 (*(s64
*) &DST
) >>= SRC
;
246 (*(s64
*) &DST
) >>= IMM
;
249 if (unlikely(SRC
== 0))
252 DST
= do_div(tmp
, SRC
);
255 if (unlikely(SRC
== 0))
258 DST
= do_div(tmp
, (u32
) SRC
);
262 DST
= do_div(tmp
, IMM
);
266 DST
= do_div(tmp
, (u32
) IMM
);
269 if (unlikely(SRC
== 0))
274 if (unlikely(SRC
== 0))
277 do_div(tmp
, (u32
) SRC
);
285 do_div(tmp
, (u32
) IMM
);
291 DST
= (__force u16
) cpu_to_be16(DST
);
294 DST
= (__force u32
) cpu_to_be32(DST
);
297 DST
= (__force u64
) cpu_to_be64(DST
);
304 DST
= (__force u16
) cpu_to_le16(DST
);
307 DST
= (__force u32
) cpu_to_le32(DST
);
310 DST
= (__force u64
) cpu_to_le64(DST
);
317 /* Function call scratches BPF_R1-BPF_R5 registers,
318 * preserves BPF_R6-BPF_R9, and stores return value
321 BPF_R0
= (__bpf_call_base
+ insn
->imm
)(BPF_R1
, BPF_R2
, BPF_R3
,
378 if (((s64
) DST
) > ((s64
) SRC
)) {
384 if (((s64
) DST
) > ((s64
) IMM
)) {
390 if (((s64
) DST
) >= ((s64
) SRC
)) {
396 if (((s64
) DST
) >= ((s64
) IMM
)) {
416 /* STX and ST and LDX*/
417 #define LDST(SIZEOP, SIZE) \
419 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
422 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
425 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
433 STX_XADD_W
: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
434 atomic_add((u32
) SRC
, (atomic_t
*)(unsigned long)
437 STX_XADD_DW
: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
438 atomic64_add((u64
) SRC
, (atomic64_t
*)(unsigned long)
441 LD_ABS_W
: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
444 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
445 * only appearing in the programs where ctx ==
446 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
447 * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
448 * internal BPF verifier will check that BPF_R6 ==
451 * BPF_ABS and BPF_IND are wrappers of function calls,
452 * so they scratch BPF_R1-BPF_R5 registers, preserve
453 * BPF_R6-BPF_R9, and store return value into BPF_R0.
456 * ctx == skb == BPF_R6 == CTX
459 * SRC == any register
460 * IMM == 32-bit immediate
463 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
466 ptr
= bpf_load_pointer((struct sk_buff
*) (unsigned long) CTX
, off
, 4, &tmp
);
467 if (likely(ptr
!= NULL
)) {
468 BPF_R0
= get_unaligned_be32(ptr
);
473 LD_ABS_H
: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
476 ptr
= bpf_load_pointer((struct sk_buff
*) (unsigned long) CTX
, off
, 2, &tmp
);
477 if (likely(ptr
!= NULL
)) {
478 BPF_R0
= get_unaligned_be16(ptr
);
483 LD_ABS_B
: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
486 ptr
= bpf_load_pointer((struct sk_buff
*) (unsigned long) CTX
, off
, 1, &tmp
);
487 if (likely(ptr
!= NULL
)) {
493 LD_IND_W
: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
496 LD_IND_H
: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
499 LD_IND_B
: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
504 /* If we ever reach this, we have a bug somewhere. */
505 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn
->code
);
509 void __weak
bpf_int_jit_compile(struct bpf_prog
*prog
)
514 * bpf_prog_select_runtime - select execution runtime for BPF program
515 * @fp: bpf_prog populated with internal BPF program
517 * try to JIT internal BPF program, if JIT is not available select interpreter
518 * BPF program will be executed via BPF_PROG_RUN() macro
520 void bpf_prog_select_runtime(struct bpf_prog
*fp
)
522 fp
->bpf_func
= (void *) __bpf_prog_run
;
524 /* Probe if internal BPF can be JITed */
525 bpf_int_jit_compile(fp
);
527 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime
);
529 /* free internal BPF program */
530 void bpf_prog_free(struct bpf_prog
*fp
)
534 EXPORT_SYMBOL_GPL(bpf_prog_free
);
This page took 0.052544 seconds and 6 git commands to generate.