x86/xen: Mark xen_cpuid() stack frame as non-standard
[deliverable/linux.git] / kernel / bpf / core.c
CommitLineData
f5bffecd
AS
1/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
6 *
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
4df95ff4 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
f5bffecd 22 */
738cbe72 23
f5bffecd
AS
24#include <linux/filter.h>
25#include <linux/skbuff.h>
60a3b225 26#include <linux/vmalloc.h>
738cbe72
DB
27#include <linux/random.h>
28#include <linux/moduleloader.h>
09756af4 29#include <linux/bpf.h>
f5bffecd 30
3324b584
DB
31#include <asm/unaligned.h>
32
f5bffecd
AS
33/* Registers */
34#define BPF_R0 regs[BPF_REG_0]
35#define BPF_R1 regs[BPF_REG_1]
36#define BPF_R2 regs[BPF_REG_2]
37#define BPF_R3 regs[BPF_REG_3]
38#define BPF_R4 regs[BPF_REG_4]
39#define BPF_R5 regs[BPF_REG_5]
40#define BPF_R6 regs[BPF_REG_6]
41#define BPF_R7 regs[BPF_REG_7]
42#define BPF_R8 regs[BPF_REG_8]
43#define BPF_R9 regs[BPF_REG_9]
44#define BPF_R10 regs[BPF_REG_10]
45
46/* Named registers */
47#define DST regs[insn->dst_reg]
48#define SRC regs[insn->src_reg]
49#define FP regs[BPF_REG_FP]
50#define ARG1 regs[BPF_REG_ARG1]
51#define CTX regs[BPF_REG_CTX]
52#define IMM insn->imm
53
54/* No hurry in this branch
55 *
56 * Exported for the bpf jit load helper.
57 */
58void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
59{
60 u8 *ptr = NULL;
61
62 if (k >= SKF_NET_OFF)
63 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
64 else if (k >= SKF_LL_OFF)
65 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
3324b584 66
f5bffecd
AS
67 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
68 return ptr;
69
70 return NULL;
71}
72
60a3b225
DB
73struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
74{
75 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
76 gfp_extra_flags;
09756af4 77 struct bpf_prog_aux *aux;
60a3b225
DB
78 struct bpf_prog *fp;
79
80 size = round_up(size, PAGE_SIZE);
81 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
82 if (fp == NULL)
83 return NULL;
84
a91263d5
DB
85 kmemcheck_annotate_bitfield(fp, meta);
86
09756af4
AS
87 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
88 if (aux == NULL) {
60a3b225
DB
89 vfree(fp);
90 return NULL;
91 }
92
93 fp->pages = size / PAGE_SIZE;
09756af4 94 fp->aux = aux;
e9d8afa9 95 fp->aux->prog = fp;
60a3b225
DB
96
97 return fp;
98}
99EXPORT_SYMBOL_GPL(bpf_prog_alloc);
100
101struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
102 gfp_t gfp_extra_flags)
103{
104 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
105 gfp_extra_flags;
106 struct bpf_prog *fp;
107
108 BUG_ON(fp_old == NULL);
109
110 size = round_up(size, PAGE_SIZE);
111 if (size <= fp_old->pages * PAGE_SIZE)
112 return fp_old;
113
114 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
115 if (fp != NULL) {
a91263d5
DB
116 kmemcheck_annotate_bitfield(fp, meta);
117
60a3b225
DB
118 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
119 fp->pages = size / PAGE_SIZE;
e9d8afa9 120 fp->aux->prog = fp;
60a3b225 121
09756af4 122 /* We keep fp->aux from fp_old around in the new
60a3b225
DB
123 * reallocated structure.
124 */
09756af4 125 fp_old->aux = NULL;
60a3b225
DB
126 __bpf_prog_free(fp_old);
127 }
128
129 return fp;
130}
131EXPORT_SYMBOL_GPL(bpf_prog_realloc);
132
133void __bpf_prog_free(struct bpf_prog *fp)
134{
09756af4 135 kfree(fp->aux);
60a3b225
DB
136 vfree(fp);
137}
138EXPORT_SYMBOL_GPL(__bpf_prog_free);
139
b954d834 140#ifdef CONFIG_BPF_JIT
738cbe72
DB
141struct bpf_binary_header *
142bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
143 unsigned int alignment,
144 bpf_jit_fill_hole_t bpf_fill_ill_insns)
145{
146 struct bpf_binary_header *hdr;
147 unsigned int size, hole, start;
148
149 /* Most of BPF filters are really small, but if some of them
150 * fill a page, allow at least 128 extra bytes to insert a
151 * random section of illegal instructions.
152 */
153 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
154 hdr = module_alloc(size);
155 if (hdr == NULL)
156 return NULL;
157
158 /* Fill space with illegal/arch-dep instructions. */
159 bpf_fill_ill_insns(hdr, size);
160
161 hdr->pages = size / PAGE_SIZE;
162 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
163 PAGE_SIZE - sizeof(*hdr));
164 start = (prandom_u32() % hole) & ~(alignment - 1);
165
166 /* Leave a random number of instructions before BPF code. */
167 *image_ptr = &hdr->image[start];
168
169 return hdr;
170}
171
172void bpf_jit_binary_free(struct bpf_binary_header *hdr)
173{
be1f221c 174 module_memfree(hdr);
738cbe72 175}
b954d834 176#endif /* CONFIG_BPF_JIT */
738cbe72 177
f5bffecd
AS
178/* Base function for offset calculation. Needs to go into .text section,
179 * therefore keeping it non-static as well; will also be used by JITs
180 * anyway later on, so do not let the compiler omit it.
181 */
182noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
183{
184 return 0;
185}
4d9c5c53 186EXPORT_SYMBOL_GPL(__bpf_call_base);
f5bffecd
AS
187
188/**
7ae457c1
AS
189 * __bpf_prog_run - run eBPF program on a given context
190 * @ctx: is the data we are operating on
191 * @insn: is the array of eBPF instructions
f5bffecd 192 *
7ae457c1 193 * Decode and execute eBPF instructions.
f5bffecd 194 */
7ae457c1 195static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
f5bffecd
AS
196{
197 u64 stack[MAX_BPF_STACK / sizeof(u64)];
198 u64 regs[MAX_BPF_REG], tmp;
199 static const void *jumptable[256] = {
200 [0 ... 255] = &&default_label,
201 /* Now overwrite non-defaults ... */
202 /* 32 bit ALU operations */
203 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
204 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
205 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
206 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
207 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
208 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
209 [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
210 [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
211 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
212 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
213 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
214 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
215 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
216 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
217 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
218 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
219 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
220 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
221 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
222 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
223 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
224 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
225 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
226 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
227 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
228 /* 64 bit ALU operations */
229 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
230 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
231 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
232 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
233 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
234 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
235 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
236 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
237 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
238 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
239 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
240 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
241 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
242 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
243 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
244 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
245 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
246 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
247 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
248 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
249 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
250 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
251 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
252 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
253 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
254 /* Call instruction */
255 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
04fd61ab 256 [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
f5bffecd
AS
257 /* Jumps */
258 [BPF_JMP | BPF_JA] = &&JMP_JA,
259 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
260 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
261 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
262 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
263 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
264 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
265 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
266 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
267 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
268 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
269 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
270 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
271 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
272 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
273 /* Program return */
274 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
275 /* Store instructions */
276 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
277 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
278 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
279 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
280 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
281 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
282 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
283 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
284 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
285 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
286 /* Load instructions */
287 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
288 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
289 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
290 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
291 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
292 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
293 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
294 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
295 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
296 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
02ab695b 297 [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
f5bffecd 298 };
04fd61ab 299 u32 tail_call_cnt = 0;
f5bffecd
AS
300 void *ptr;
301 int off;
302
303#define CONT ({ insn++; goto select_insn; })
304#define CONT_JMP ({ insn++; goto select_insn; })
305
306 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
307 ARG1 = (u64) (unsigned long) ctx;
308
f5bffecd
AS
309select_insn:
310 goto *jumptable[insn->code];
311
312 /* ALU */
313#define ALU(OPCODE, OP) \
314 ALU64_##OPCODE##_X: \
315 DST = DST OP SRC; \
316 CONT; \
317 ALU_##OPCODE##_X: \
318 DST = (u32) DST OP (u32) SRC; \
319 CONT; \
320 ALU64_##OPCODE##_K: \
321 DST = DST OP IMM; \
322 CONT; \
323 ALU_##OPCODE##_K: \
324 DST = (u32) DST OP (u32) IMM; \
325 CONT;
326
327 ALU(ADD, +)
328 ALU(SUB, -)
329 ALU(AND, &)
330 ALU(OR, |)
331 ALU(LSH, <<)
332 ALU(RSH, >>)
333 ALU(XOR, ^)
334 ALU(MUL, *)
335#undef ALU
336 ALU_NEG:
337 DST = (u32) -DST;
338 CONT;
339 ALU64_NEG:
340 DST = -DST;
341 CONT;
342 ALU_MOV_X:
343 DST = (u32) SRC;
344 CONT;
345 ALU_MOV_K:
346 DST = (u32) IMM;
347 CONT;
348 ALU64_MOV_X:
349 DST = SRC;
350 CONT;
351 ALU64_MOV_K:
352 DST = IMM;
353 CONT;
02ab695b
AS
354 LD_IMM_DW:
355 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
356 insn++;
357 CONT;
f5bffecd
AS
358 ALU64_ARSH_X:
359 (*(s64 *) &DST) >>= SRC;
360 CONT;
361 ALU64_ARSH_K:
362 (*(s64 *) &DST) >>= IMM;
363 CONT;
364 ALU64_MOD_X:
365 if (unlikely(SRC == 0))
366 return 0;
876a7ae6
AS
367 div64_u64_rem(DST, SRC, &tmp);
368 DST = tmp;
f5bffecd
AS
369 CONT;
370 ALU_MOD_X:
371 if (unlikely(SRC == 0))
372 return 0;
373 tmp = (u32) DST;
374 DST = do_div(tmp, (u32) SRC);
375 CONT;
376 ALU64_MOD_K:
876a7ae6
AS
377 div64_u64_rem(DST, IMM, &tmp);
378 DST = tmp;
f5bffecd
AS
379 CONT;
380 ALU_MOD_K:
381 tmp = (u32) DST;
382 DST = do_div(tmp, (u32) IMM);
383 CONT;
384 ALU64_DIV_X:
385 if (unlikely(SRC == 0))
386 return 0;
876a7ae6 387 DST = div64_u64(DST, SRC);
f5bffecd
AS
388 CONT;
389 ALU_DIV_X:
390 if (unlikely(SRC == 0))
391 return 0;
392 tmp = (u32) DST;
393 do_div(tmp, (u32) SRC);
394 DST = (u32) tmp;
395 CONT;
396 ALU64_DIV_K:
876a7ae6 397 DST = div64_u64(DST, IMM);
f5bffecd
AS
398 CONT;
399 ALU_DIV_K:
400 tmp = (u32) DST;
401 do_div(tmp, (u32) IMM);
402 DST = (u32) tmp;
403 CONT;
404 ALU_END_TO_BE:
405 switch (IMM) {
406 case 16:
407 DST = (__force u16) cpu_to_be16(DST);
408 break;
409 case 32:
410 DST = (__force u32) cpu_to_be32(DST);
411 break;
412 case 64:
413 DST = (__force u64) cpu_to_be64(DST);
414 break;
415 }
416 CONT;
417 ALU_END_TO_LE:
418 switch (IMM) {
419 case 16:
420 DST = (__force u16) cpu_to_le16(DST);
421 break;
422 case 32:
423 DST = (__force u32) cpu_to_le32(DST);
424 break;
425 case 64:
426 DST = (__force u64) cpu_to_le64(DST);
427 break;
428 }
429 CONT;
430
431 /* CALL */
432 JMP_CALL:
433 /* Function call scratches BPF_R1-BPF_R5 registers,
434 * preserves BPF_R6-BPF_R9, and stores return value
435 * into BPF_R0.
436 */
437 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
438 BPF_R4, BPF_R5);
439 CONT;
440
04fd61ab
AS
441 JMP_TAIL_CALL: {
442 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
443 struct bpf_array *array = container_of(map, struct bpf_array, map);
444 struct bpf_prog *prog;
445 u64 index = BPF_R3;
446
447 if (unlikely(index >= array->map.max_entries))
448 goto out;
449
450 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
451 goto out;
452
453 tail_call_cnt++;
454
2a36f0b9 455 prog = READ_ONCE(array->ptrs[index]);
04fd61ab
AS
456 if (unlikely(!prog))
457 goto out;
458
c4675f93
DB
459 /* ARG1 at this point is guaranteed to point to CTX from
460 * the verifier side due to the fact that the tail call is
461 * handeled like a helper, that is, bpf_tail_call_proto,
462 * where arg1_type is ARG_PTR_TO_CTX.
463 */
04fd61ab
AS
464 insn = prog->insnsi;
465 goto select_insn;
466out:
467 CONT;
468 }
f5bffecd
AS
469 /* JMP */
470 JMP_JA:
471 insn += insn->off;
472 CONT;
473 JMP_JEQ_X:
474 if (DST == SRC) {
475 insn += insn->off;
476 CONT_JMP;
477 }
478 CONT;
479 JMP_JEQ_K:
480 if (DST == IMM) {
481 insn += insn->off;
482 CONT_JMP;
483 }
484 CONT;
485 JMP_JNE_X:
486 if (DST != SRC) {
487 insn += insn->off;
488 CONT_JMP;
489 }
490 CONT;
491 JMP_JNE_K:
492 if (DST != IMM) {
493 insn += insn->off;
494 CONT_JMP;
495 }
496 CONT;
497 JMP_JGT_X:
498 if (DST > SRC) {
499 insn += insn->off;
500 CONT_JMP;
501 }
502 CONT;
503 JMP_JGT_K:
504 if (DST > IMM) {
505 insn += insn->off;
506 CONT_JMP;
507 }
508 CONT;
509 JMP_JGE_X:
510 if (DST >= SRC) {
511 insn += insn->off;
512 CONT_JMP;
513 }
514 CONT;
515 JMP_JGE_K:
516 if (DST >= IMM) {
517 insn += insn->off;
518 CONT_JMP;
519 }
520 CONT;
521 JMP_JSGT_X:
522 if (((s64) DST) > ((s64) SRC)) {
523 insn += insn->off;
524 CONT_JMP;
525 }
526 CONT;
527 JMP_JSGT_K:
528 if (((s64) DST) > ((s64) IMM)) {
529 insn += insn->off;
530 CONT_JMP;
531 }
532 CONT;
533 JMP_JSGE_X:
534 if (((s64) DST) >= ((s64) SRC)) {
535 insn += insn->off;
536 CONT_JMP;
537 }
538 CONT;
539 JMP_JSGE_K:
540 if (((s64) DST) >= ((s64) IMM)) {
541 insn += insn->off;
542 CONT_JMP;
543 }
544 CONT;
545 JMP_JSET_X:
546 if (DST & SRC) {
547 insn += insn->off;
548 CONT_JMP;
549 }
550 CONT;
551 JMP_JSET_K:
552 if (DST & IMM) {
553 insn += insn->off;
554 CONT_JMP;
555 }
556 CONT;
557 JMP_EXIT:
558 return BPF_R0;
559
560 /* STX and ST and LDX*/
561#define LDST(SIZEOP, SIZE) \
562 STX_MEM_##SIZEOP: \
563 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
564 CONT; \
565 ST_MEM_##SIZEOP: \
566 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
567 CONT; \
568 LDX_MEM_##SIZEOP: \
569 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
570 CONT;
571
572 LDST(B, u8)
573 LDST(H, u16)
574 LDST(W, u32)
575 LDST(DW, u64)
576#undef LDST
577 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
578 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
579 (DST + insn->off));
580 CONT;
581 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
582 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
583 (DST + insn->off));
584 CONT;
585 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
586 off = IMM;
587load_word:
588 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
589 * only appearing in the programs where ctx ==
590 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
8fb575ca 591 * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
f5bffecd
AS
592 * internal BPF verifier will check that BPF_R6 ==
593 * ctx.
594 *
595 * BPF_ABS and BPF_IND are wrappers of function calls,
596 * so they scratch BPF_R1-BPF_R5 registers, preserve
597 * BPF_R6-BPF_R9, and store return value into BPF_R0.
598 *
599 * Implicit input:
600 * ctx == skb == BPF_R6 == CTX
601 *
602 * Explicit input:
603 * SRC == any register
604 * IMM == 32-bit immediate
605 *
606 * Output:
607 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
608 */
609
610 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
611 if (likely(ptr != NULL)) {
612 BPF_R0 = get_unaligned_be32(ptr);
613 CONT;
614 }
615
616 return 0;
617 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
618 off = IMM;
619load_half:
620 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
621 if (likely(ptr != NULL)) {
622 BPF_R0 = get_unaligned_be16(ptr);
623 CONT;
624 }
625
626 return 0;
627 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
628 off = IMM;
629load_byte:
630 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
631 if (likely(ptr != NULL)) {
632 BPF_R0 = *(u8 *)ptr;
633 CONT;
634 }
635
636 return 0;
637 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
638 off = IMM + SRC;
639 goto load_word;
640 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
641 off = IMM + SRC;
642 goto load_half;
643 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
644 off = IMM + SRC;
645 goto load_byte;
646
647 default_label:
648 /* If we ever reach this, we have a bug somewhere. */
649 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
650 return 0;
651}
652
3324b584
DB
653bool bpf_prog_array_compatible(struct bpf_array *array,
654 const struct bpf_prog *fp)
04fd61ab 655{
3324b584
DB
656 if (!array->owner_prog_type) {
657 /* There's no owner yet where we could check for
658 * compatibility.
659 */
04fd61ab
AS
660 array->owner_prog_type = fp->type;
661 array->owner_jited = fp->jited;
3324b584
DB
662
663 return true;
04fd61ab 664 }
3324b584
DB
665
666 return array->owner_prog_type == fp->type &&
667 array->owner_jited == fp->jited;
04fd61ab
AS
668}
669
3324b584 670static int bpf_check_tail_call(const struct bpf_prog *fp)
04fd61ab
AS
671{
672 struct bpf_prog_aux *aux = fp->aux;
673 int i;
674
675 for (i = 0; i < aux->used_map_cnt; i++) {
3324b584 676 struct bpf_map *map = aux->used_maps[i];
04fd61ab 677 struct bpf_array *array;
04fd61ab 678
04fd61ab
AS
679 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
680 continue;
3324b584 681
04fd61ab
AS
682 array = container_of(map, struct bpf_array, map);
683 if (!bpf_prog_array_compatible(array, fp))
684 return -EINVAL;
685 }
686
687 return 0;
688}
689
f5bffecd 690/**
3324b584 691 * bpf_prog_select_runtime - select exec runtime for BPF program
7ae457c1 692 * @fp: bpf_prog populated with internal BPF program
f5bffecd 693 *
3324b584
DB
694 * Try to JIT eBPF program, if JIT is not available, use interpreter.
695 * The BPF program will be executed via BPF_PROG_RUN() macro.
f5bffecd 696 */
04fd61ab 697int bpf_prog_select_runtime(struct bpf_prog *fp)
f5bffecd 698{
7ae457c1 699 fp->bpf_func = (void *) __bpf_prog_run;
f5bffecd 700
f5bffecd 701 bpf_int_jit_compile(fp);
60a3b225 702 bpf_prog_lock_ro(fp);
04fd61ab 703
3324b584
DB
704 /* The tail call compatibility check can only be done at
705 * this late stage as we need to determine, if we deal
706 * with JITed or non JITed program concatenations and not
707 * all eBPF JITs might immediately support all features.
708 */
709 return bpf_check_tail_call(fp);
f5bffecd 710}
7ae457c1 711EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
f5bffecd 712
60a3b225
DB
713static void bpf_prog_free_deferred(struct work_struct *work)
714{
09756af4 715 struct bpf_prog_aux *aux;
60a3b225 716
09756af4
AS
717 aux = container_of(work, struct bpf_prog_aux, work);
718 bpf_jit_free(aux->prog);
60a3b225
DB
719}
720
721/* Free internal BPF program */
7ae457c1 722void bpf_prog_free(struct bpf_prog *fp)
f5bffecd 723{
09756af4 724 struct bpf_prog_aux *aux = fp->aux;
60a3b225 725
09756af4 726 INIT_WORK(&aux->work, bpf_prog_free_deferred);
09756af4 727 schedule_work(&aux->work);
f5bffecd 728}
7ae457c1 729EXPORT_SYMBOL_GPL(bpf_prog_free);
f89b7755 730
3ad00405
DB
731/* RNG for unpriviledged user space with separated state from prandom_u32(). */
732static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
733
734void bpf_user_rnd_init_once(void)
735{
736 prandom_init_once(&bpf_user_rnd_state);
737}
738
739u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
740{
741 /* Should someone ever have the rather unwise idea to use some
742 * of the registers passed into this function, then note that
743 * this function is called from native eBPF and classic-to-eBPF
744 * transformations. Register assignments from both sides are
745 * different, f.e. classic always sets fn(ctx, A, X) here.
746 */
747 struct rnd_state *state;
748 u32 res;
749
750 state = &get_cpu_var(bpf_user_rnd_state);
751 res = prandom_u32_state(state);
752 put_cpu_var(state);
753
754 return res;
755}
756
3ba67dab
DB
757/* Weak definitions of helper functions in case we don't have bpf syscall. */
758const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
759const struct bpf_func_proto bpf_map_update_elem_proto __weak;
760const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
761
03e69b50 762const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
c04167ce 763const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
17ca8cbf 764const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
ffeedafb
AS
765const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
766const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
767const struct bpf_func_proto bpf_get_current_comm_proto __weak;
0756ea3e
AS
768const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
769{
770 return NULL;
771}
03e69b50 772
3324b584
DB
773/* Always built-in helper functions. */
774const struct bpf_func_proto bpf_tail_call_proto = {
775 .func = NULL,
776 .gpl_only = false,
777 .ret_type = RET_VOID,
778 .arg1_type = ARG_PTR_TO_CTX,
779 .arg2_type = ARG_CONST_MAP_PTR,
780 .arg3_type = ARG_ANYTHING,
781};
782
783/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
784void __weak bpf_int_jit_compile(struct bpf_prog *prog)
785{
786}
787
f89b7755
AS
788/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
789 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
790 */
791int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
792 int len)
793{
794 return -EFAULT;
795}
This page took 0.128638 seconds and 5 git commands to generate.