Merge tag 'media/v4.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[deliverable/linux.git] / arch / arm / net / bpf_jit_32.c
1 /*
2 * Just-In-Time compiler for BPF filters on 32bit ARM
3 *
4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License.
9 */
10
11 #include <linux/bitops.h>
12 #include <linux/compiler.h>
13 #include <linux/errno.h>
14 #include <linux/filter.h>
15 #include <linux/netdevice.h>
16 #include <linux/string.h>
17 #include <linux/slab.h>
18 #include <linux/if_vlan.h>
19
20 #include <asm/cacheflush.h>
21 #include <asm/hwcap.h>
22 #include <asm/opcodes.h>
23
24 #include "bpf_jit_32.h"
25
26 /*
27 * ABI:
28 *
29 * r0 scratch register
30 * r4 BPF register A
31 * r5 BPF register X
32 * r6 pointer to the skb
33 * r7 skb->data
34 * r8 skb_headlen(skb)
35 */
36
37 #define r_scratch ARM_R0
38 /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
39 #define r_off ARM_R1
40 #define r_A ARM_R4
41 #define r_X ARM_R5
42 #define r_skb ARM_R6
43 #define r_skb_data ARM_R7
44 #define r_skb_hl ARM_R8
45
46 #define SCRATCH_SP_OFFSET 0
47 #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k))
48
49 #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1)
50 #define SEEN_MEM_WORD(k) (1 << (k))
51 #define SEEN_X (1 << BPF_MEMWORDS)
52 #define SEEN_CALL (1 << (BPF_MEMWORDS + 1))
53 #define SEEN_SKB (1 << (BPF_MEMWORDS + 2))
54 #define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
55
56 #define FLAG_NEED_X_RESET (1 << 0)
57 #define FLAG_IMM_OVERFLOW (1 << 1)
58
59 struct jit_ctx {
60 const struct bpf_prog *skf;
61 unsigned idx;
62 unsigned prologue_bytes;
63 int ret0_fp_idx;
64 u32 seen;
65 u32 flags;
66 u32 *offsets;
67 u32 *target;
68 #if __LINUX_ARM_ARCH__ < 7
69 u16 epilogue_bytes;
70 u16 imm_count;
71 u32 *imms;
72 #endif
73 };
74
75 int bpf_jit_enable __read_mostly;
76
77 static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
78 unsigned int size)
79 {
80 void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
81
82 if (!ptr)
83 return -EFAULT;
84 memcpy(ret, ptr, size);
85 return 0;
86 }
87
88 static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
89 {
90 u8 ret;
91 int err;
92
93 if (offset < 0)
94 err = call_neg_helper(skb, offset, &ret, 1);
95 else
96 err = skb_copy_bits(skb, offset, &ret, 1);
97
98 return (u64)err << 32 | ret;
99 }
100
101 static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
102 {
103 u16 ret;
104 int err;
105
106 if (offset < 0)
107 err = call_neg_helper(skb, offset, &ret, 2);
108 else
109 err = skb_copy_bits(skb, offset, &ret, 2);
110
111 return (u64)err << 32 | ntohs(ret);
112 }
113
114 static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
115 {
116 u32 ret;
117 int err;
118
119 if (offset < 0)
120 err = call_neg_helper(skb, offset, &ret, 4);
121 else
122 err = skb_copy_bits(skb, offset, &ret, 4);
123
124 return (u64)err << 32 | ntohl(ret);
125 }
126
127 /*
128 * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
129 * (where the assembly routines like __aeabi_uidiv could cause problems).
130 */
131 static u32 jit_udiv(u32 dividend, u32 divisor)
132 {
133 return dividend / divisor;
134 }
135
136 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
137 {
138 inst |= (cond << 28);
139 inst = __opcode_to_mem_arm(inst);
140
141 if (ctx->target != NULL)
142 ctx->target[ctx->idx] = inst;
143
144 ctx->idx++;
145 }
146
147 /*
148 * Emit an instruction that will be executed unconditionally.
149 */
150 static inline void emit(u32 inst, struct jit_ctx *ctx)
151 {
152 _emit(ARM_COND_AL, inst, ctx);
153 }
154
155 static u16 saved_regs(struct jit_ctx *ctx)
156 {
157 u16 ret = 0;
158
159 if ((ctx->skf->len > 1) ||
160 (ctx->skf->insns[0].code == (BPF_RET | BPF_A)))
161 ret |= 1 << r_A;
162
163 #ifdef CONFIG_FRAME_POINTER
164 ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
165 #else
166 if (ctx->seen & SEEN_CALL)
167 ret |= 1 << ARM_LR;
168 #endif
169 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
170 ret |= 1 << r_skb;
171 if (ctx->seen & SEEN_DATA)
172 ret |= (1 << r_skb_data) | (1 << r_skb_hl);
173 if (ctx->seen & SEEN_X)
174 ret |= 1 << r_X;
175
176 return ret;
177 }
178
179 static inline int mem_words_used(struct jit_ctx *ctx)
180 {
181 /* yes, we do waste some stack space IF there are "holes" in the set" */
182 return fls(ctx->seen & SEEN_MEM);
183 }
184
185 static inline bool is_load_to_a(u16 inst)
186 {
187 switch (inst) {
188 case BPF_LD | BPF_W | BPF_LEN:
189 case BPF_LD | BPF_W | BPF_ABS:
190 case BPF_LD | BPF_H | BPF_ABS:
191 case BPF_LD | BPF_B | BPF_ABS:
192 return true;
193 default:
194 return false;
195 }
196 }
197
198 static void jit_fill_hole(void *area, unsigned int size)
199 {
200 u32 *ptr;
201 /* We are guaranteed to have aligned memory. */
202 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
203 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
204 }
205
206 static void build_prologue(struct jit_ctx *ctx)
207 {
208 u16 reg_set = saved_regs(ctx);
209 u16 first_inst = ctx->skf->insns[0].code;
210 u16 off;
211
212 #ifdef CONFIG_FRAME_POINTER
213 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
214 emit(ARM_PUSH(reg_set), ctx);
215 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
216 #else
217 if (reg_set)
218 emit(ARM_PUSH(reg_set), ctx);
219 #endif
220
221 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
222 emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
223
224 if (ctx->seen & SEEN_DATA) {
225 off = offsetof(struct sk_buff, data);
226 emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
227 /* headlen = len - data_len */
228 off = offsetof(struct sk_buff, len);
229 emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
230 off = offsetof(struct sk_buff, data_len);
231 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
232 emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
233 }
234
235 if (ctx->flags & FLAG_NEED_X_RESET)
236 emit(ARM_MOV_I(r_X, 0), ctx);
237
238 /* do not leak kernel data to userspace */
239 if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
240 emit(ARM_MOV_I(r_A, 0), ctx);
241
242 /* stack space for the BPF_MEM words */
243 if (ctx->seen & SEEN_MEM)
244 emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
245 }
246
247 static void build_epilogue(struct jit_ctx *ctx)
248 {
249 u16 reg_set = saved_regs(ctx);
250
251 if (ctx->seen & SEEN_MEM)
252 emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
253
254 reg_set &= ~(1 << ARM_LR);
255
256 #ifdef CONFIG_FRAME_POINTER
257 /* the first instruction of the prologue was: mov ip, sp */
258 reg_set &= ~(1 << ARM_IP);
259 reg_set |= (1 << ARM_SP);
260 emit(ARM_LDM(ARM_SP, reg_set), ctx);
261 #else
262 if (reg_set) {
263 if (ctx->seen & SEEN_CALL)
264 reg_set |= 1 << ARM_PC;
265 emit(ARM_POP(reg_set), ctx);
266 }
267
268 if (!(ctx->seen & SEEN_CALL))
269 emit(ARM_BX(ARM_LR), ctx);
270 #endif
271 }
272
273 static int16_t imm8m(u32 x)
274 {
275 u32 rot;
276
277 for (rot = 0; rot < 16; rot++)
278 if ((x & ~ror32(0xff, 2 * rot)) == 0)
279 return rol32(x, 2 * rot) | (rot << 8);
280
281 return -1;
282 }
283
284 #if __LINUX_ARM_ARCH__ < 7
285
286 static u16 imm_offset(u32 k, struct jit_ctx *ctx)
287 {
288 unsigned i = 0, offset;
289 u16 imm;
290
291 /* on the "fake" run we just count them (duplicates included) */
292 if (ctx->target == NULL) {
293 ctx->imm_count++;
294 return 0;
295 }
296
297 while ((i < ctx->imm_count) && ctx->imms[i]) {
298 if (ctx->imms[i] == k)
299 break;
300 i++;
301 }
302
303 if (ctx->imms[i] == 0)
304 ctx->imms[i] = k;
305
306 /* constants go just after the epilogue */
307 offset = ctx->offsets[ctx->skf->len];
308 offset += ctx->prologue_bytes;
309 offset += ctx->epilogue_bytes;
310 offset += i * 4;
311
312 ctx->target[offset / 4] = k;
313
314 /* PC in ARM mode == address of the instruction + 8 */
315 imm = offset - (8 + ctx->idx * 4);
316
317 if (imm & ~0xfff) {
318 /*
319 * literal pool is too far, signal it into flags. we
320 * can only detect it on the second pass unfortunately.
321 */
322 ctx->flags |= FLAG_IMM_OVERFLOW;
323 return 0;
324 }
325
326 return imm;
327 }
328
329 #endif /* __LINUX_ARM_ARCH__ */
330
331 /*
332 * Move an immediate that's not an imm8m to a core register.
333 */
334 static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
335 {
336 #if __LINUX_ARM_ARCH__ < 7
337 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
338 #else
339 emit(ARM_MOVW(rd, val & 0xffff), ctx);
340 if (val > 0xffff)
341 emit(ARM_MOVT(rd, val >> 16), ctx);
342 #endif
343 }
344
345 static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
346 {
347 int imm12 = imm8m(val);
348
349 if (imm12 >= 0)
350 emit(ARM_MOV_I(rd, imm12), ctx);
351 else
352 emit_mov_i_no8m(rd, val, ctx);
353 }
354
355 #if __LINUX_ARM_ARCH__ < 6
356
357 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
358 {
359 _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
360 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
361 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
362 _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
363 _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
364 _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
365 _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
366 _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
367 }
368
369 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
370 {
371 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
372 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
373 _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
374 }
375
376 static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
377 {
378 /* r_dst = (r_src << 8) | (r_src >> 8) */
379 emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx);
380 emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx);
381
382 /*
383 * we need to mask out the bits set in r_dst[23:16] due to
384 * the first shift instruction.
385 *
386 * note that 0x8ff is the encoded immediate 0x00ff0000.
387 */
388 emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);
389 }
390
391 #else /* ARMv6+ */
392
393 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
394 {
395 _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
396 #ifdef __LITTLE_ENDIAN
397 _emit(cond, ARM_REV(r_res, r_res), ctx);
398 #endif
399 }
400
401 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
402 {
403 _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
404 #ifdef __LITTLE_ENDIAN
405 _emit(cond, ARM_REV16(r_res, r_res), ctx);
406 #endif
407 }
408
409 static inline void emit_swap16(u8 r_dst __maybe_unused,
410 u8 r_src __maybe_unused,
411 struct jit_ctx *ctx __maybe_unused)
412 {
413 #ifdef __LITTLE_ENDIAN
414 emit(ARM_REV16(r_dst, r_src), ctx);
415 #endif
416 }
417
418 #endif /* __LINUX_ARM_ARCH__ < 6 */
419
420
421 /* Compute the immediate value for a PC-relative branch. */
422 static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
423 {
424 u32 imm;
425
426 if (ctx->target == NULL)
427 return 0;
428 /*
429 * BPF allows only forward jumps and the offset of the target is
430 * still the one computed during the first pass.
431 */
432 imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
433
434 return imm >> 2;
435 }
436
437 #define OP_IMM3(op, r1, r2, imm_val, ctx) \
438 do { \
439 imm12 = imm8m(imm_val); \
440 if (imm12 < 0) { \
441 emit_mov_i_no8m(r_scratch, imm_val, ctx); \
442 emit(op ## _R((r1), (r2), r_scratch), ctx); \
443 } else { \
444 emit(op ## _I((r1), (r2), imm12), ctx); \
445 } \
446 } while (0)
447
448 static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
449 {
450 if (ctx->ret0_fp_idx >= 0) {
451 _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
452 /* NOP to keep the size constant between passes */
453 emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
454 } else {
455 _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
456 _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
457 }
458 }
459
460 static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
461 {
462 #if __LINUX_ARM_ARCH__ < 5
463 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
464
465 if (elf_hwcap & HWCAP_THUMB)
466 emit(ARM_BX(tgt_reg), ctx);
467 else
468 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
469 #else
470 emit(ARM_BLX_R(tgt_reg), ctx);
471 #endif
472 }
473
474 static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
475 {
476 #if __LINUX_ARM_ARCH__ == 7
477 if (elf_hwcap & HWCAP_IDIVA) {
478 emit(ARM_UDIV(rd, rm, rn), ctx);
479 return;
480 }
481 #endif
482
483 /*
484 * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
485 * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
486 * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
487 * before using it as a source for ARM_R1.
488 *
489 * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
490 * ARM_R5 (r_X) so there is no particular register overlap
491 * issues.
492 */
493 if (rn != ARM_R1)
494 emit(ARM_MOV_R(ARM_R1, rn), ctx);
495 if (rm != ARM_R0)
496 emit(ARM_MOV_R(ARM_R0, rm), ctx);
497
498 ctx->seen |= SEEN_CALL;
499 emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
500 emit_blx_r(ARM_R3, ctx);
501
502 if (rd != ARM_R0)
503 emit(ARM_MOV_R(rd, ARM_R0), ctx);
504 }
505
506 static inline void update_on_xread(struct jit_ctx *ctx)
507 {
508 if (!(ctx->seen & SEEN_X))
509 ctx->flags |= FLAG_NEED_X_RESET;
510
511 ctx->seen |= SEEN_X;
512 }
513
514 static int build_body(struct jit_ctx *ctx)
515 {
516 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
517 const struct bpf_prog *prog = ctx->skf;
518 const struct sock_filter *inst;
519 unsigned i, load_order, off, condt;
520 int imm12;
521 u32 k;
522
523 for (i = 0; i < prog->len; i++) {
524 u16 code;
525
526 inst = &(prog->insns[i]);
527 /* K as an immediate value operand */
528 k = inst->k;
529 code = bpf_anc_helper(inst);
530
531 /* compute offsets only in the fake pass */
532 if (ctx->target == NULL)
533 ctx->offsets[i] = ctx->idx * 4;
534
535 switch (code) {
536 case BPF_LD | BPF_IMM:
537 emit_mov_i(r_A, k, ctx);
538 break;
539 case BPF_LD | BPF_W | BPF_LEN:
540 ctx->seen |= SEEN_SKB;
541 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
542 emit(ARM_LDR_I(r_A, r_skb,
543 offsetof(struct sk_buff, len)), ctx);
544 break;
545 case BPF_LD | BPF_MEM:
546 /* A = scratch[k] */
547 ctx->seen |= SEEN_MEM_WORD(k);
548 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
549 break;
550 case BPF_LD | BPF_W | BPF_ABS:
551 load_order = 2;
552 goto load;
553 case BPF_LD | BPF_H | BPF_ABS:
554 load_order = 1;
555 goto load;
556 case BPF_LD | BPF_B | BPF_ABS:
557 load_order = 0;
558 load:
559 emit_mov_i(r_off, k, ctx);
560 load_common:
561 ctx->seen |= SEEN_DATA | SEEN_CALL;
562
563 if (load_order > 0) {
564 emit(ARM_SUB_I(r_scratch, r_skb_hl,
565 1 << load_order), ctx);
566 emit(ARM_CMP_R(r_scratch, r_off), ctx);
567 condt = ARM_COND_GE;
568 } else {
569 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
570 condt = ARM_COND_HI;
571 }
572
573 /*
574 * test for negative offset, only if we are
575 * currently scheduled to take the fast
576 * path. this will update the flags so that
577 * the slowpath instruction are ignored if the
578 * offset is negative.
579 *
580 * for loard_order == 0 the HI condition will
581 * make loads at offset 0 take the slow path too.
582 */
583 _emit(condt, ARM_CMP_I(r_off, 0), ctx);
584
585 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
586 ctx);
587
588 if (load_order == 0)
589 _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
590 ctx);
591 else if (load_order == 1)
592 emit_load_be16(condt, r_A, r_scratch, ctx);
593 else if (load_order == 2)
594 emit_load_be32(condt, r_A, r_scratch, ctx);
595
596 _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
597
598 /* the slowpath */
599 emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
600 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
601 /* the offset is already in R1 */
602 emit_blx_r(ARM_R3, ctx);
603 /* check the result of skb_copy_bits */
604 emit(ARM_CMP_I(ARM_R1, 0), ctx);
605 emit_err_ret(ARM_COND_NE, ctx);
606 emit(ARM_MOV_R(r_A, ARM_R0), ctx);
607 break;
608 case BPF_LD | BPF_W | BPF_IND:
609 load_order = 2;
610 goto load_ind;
611 case BPF_LD | BPF_H | BPF_IND:
612 load_order = 1;
613 goto load_ind;
614 case BPF_LD | BPF_B | BPF_IND:
615 load_order = 0;
616 load_ind:
617 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
618 goto load_common;
619 case BPF_LDX | BPF_IMM:
620 ctx->seen |= SEEN_X;
621 emit_mov_i(r_X, k, ctx);
622 break;
623 case BPF_LDX | BPF_W | BPF_LEN:
624 ctx->seen |= SEEN_X | SEEN_SKB;
625 emit(ARM_LDR_I(r_X, r_skb,
626 offsetof(struct sk_buff, len)), ctx);
627 break;
628 case BPF_LDX | BPF_MEM:
629 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
630 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
631 break;
632 case BPF_LDX | BPF_B | BPF_MSH:
633 /* x = ((*(frame + k)) & 0xf) << 2; */
634 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
635 /* the interpreter should deal with the negative K */
636 if ((int)k < 0)
637 return -1;
638 /* offset in r1: we might have to take the slow path */
639 emit_mov_i(r_off, k, ctx);
640 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
641
642 /* load in r0: common with the slowpath */
643 _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
644 ARM_R1), ctx);
645 /*
646 * emit_mov_i() might generate one or two instructions,
647 * the same holds for emit_blx_r()
648 */
649 _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
650
651 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
652 /* r_off is r1 */
653 emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
654 emit_blx_r(ARM_R3, ctx);
655 /* check the return value of skb_copy_bits */
656 emit(ARM_CMP_I(ARM_R1, 0), ctx);
657 emit_err_ret(ARM_COND_NE, ctx);
658
659 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
660 emit(ARM_LSL_I(r_X, r_X, 2), ctx);
661 break;
662 case BPF_ST:
663 ctx->seen |= SEEN_MEM_WORD(k);
664 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
665 break;
666 case BPF_STX:
667 update_on_xread(ctx);
668 ctx->seen |= SEEN_MEM_WORD(k);
669 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
670 break;
671 case BPF_ALU | BPF_ADD | BPF_K:
672 /* A += K */
673 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
674 break;
675 case BPF_ALU | BPF_ADD | BPF_X:
676 update_on_xread(ctx);
677 emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
678 break;
679 case BPF_ALU | BPF_SUB | BPF_K:
680 /* A -= K */
681 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
682 break;
683 case BPF_ALU | BPF_SUB | BPF_X:
684 update_on_xread(ctx);
685 emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
686 break;
687 case BPF_ALU | BPF_MUL | BPF_K:
688 /* A *= K */
689 emit_mov_i(r_scratch, k, ctx);
690 emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
691 break;
692 case BPF_ALU | BPF_MUL | BPF_X:
693 update_on_xread(ctx);
694 emit(ARM_MUL(r_A, r_A, r_X), ctx);
695 break;
696 case BPF_ALU | BPF_DIV | BPF_K:
697 if (k == 1)
698 break;
699 emit_mov_i(r_scratch, k, ctx);
700 emit_udiv(r_A, r_A, r_scratch, ctx);
701 break;
702 case BPF_ALU | BPF_DIV | BPF_X:
703 update_on_xread(ctx);
704 emit(ARM_CMP_I(r_X, 0), ctx);
705 emit_err_ret(ARM_COND_EQ, ctx);
706 emit_udiv(r_A, r_A, r_X, ctx);
707 break;
708 case BPF_ALU | BPF_OR | BPF_K:
709 /* A |= K */
710 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
711 break;
712 case BPF_ALU | BPF_OR | BPF_X:
713 update_on_xread(ctx);
714 emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
715 break;
716 case BPF_ALU | BPF_XOR | BPF_K:
717 /* A ^= K; */
718 OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
719 break;
720 case BPF_ANC | SKF_AD_ALU_XOR_X:
721 case BPF_ALU | BPF_XOR | BPF_X:
722 /* A ^= X */
723 update_on_xread(ctx);
724 emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
725 break;
726 case BPF_ALU | BPF_AND | BPF_K:
727 /* A &= K */
728 OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
729 break;
730 case BPF_ALU | BPF_AND | BPF_X:
731 update_on_xread(ctx);
732 emit(ARM_AND_R(r_A, r_A, r_X), ctx);
733 break;
734 case BPF_ALU | BPF_LSH | BPF_K:
735 if (unlikely(k > 31))
736 return -1;
737 emit(ARM_LSL_I(r_A, r_A, k), ctx);
738 break;
739 case BPF_ALU | BPF_LSH | BPF_X:
740 update_on_xread(ctx);
741 emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
742 break;
743 case BPF_ALU | BPF_RSH | BPF_K:
744 if (unlikely(k > 31))
745 return -1;
746 emit(ARM_LSR_I(r_A, r_A, k), ctx);
747 break;
748 case BPF_ALU | BPF_RSH | BPF_X:
749 update_on_xread(ctx);
750 emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
751 break;
752 case BPF_ALU | BPF_NEG:
753 /* A = -A */
754 emit(ARM_RSB_I(r_A, r_A, 0), ctx);
755 break;
756 case BPF_JMP | BPF_JA:
757 /* pc += K */
758 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
759 break;
760 case BPF_JMP | BPF_JEQ | BPF_K:
761 /* pc += (A == K) ? pc->jt : pc->jf */
762 condt = ARM_COND_EQ;
763 goto cmp_imm;
764 case BPF_JMP | BPF_JGT | BPF_K:
765 /* pc += (A > K) ? pc->jt : pc->jf */
766 condt = ARM_COND_HI;
767 goto cmp_imm;
768 case BPF_JMP | BPF_JGE | BPF_K:
769 /* pc += (A >= K) ? pc->jt : pc->jf */
770 condt = ARM_COND_HS;
771 cmp_imm:
772 imm12 = imm8m(k);
773 if (imm12 < 0) {
774 emit_mov_i_no8m(r_scratch, k, ctx);
775 emit(ARM_CMP_R(r_A, r_scratch), ctx);
776 } else {
777 emit(ARM_CMP_I(r_A, imm12), ctx);
778 }
779 cond_jump:
780 if (inst->jt)
781 _emit(condt, ARM_B(b_imm(i + inst->jt + 1,
782 ctx)), ctx);
783 if (inst->jf)
784 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
785 ctx)), ctx);
786 break;
787 case BPF_JMP | BPF_JEQ | BPF_X:
788 /* pc += (A == X) ? pc->jt : pc->jf */
789 condt = ARM_COND_EQ;
790 goto cmp_x;
791 case BPF_JMP | BPF_JGT | BPF_X:
792 /* pc += (A > X) ? pc->jt : pc->jf */
793 condt = ARM_COND_HI;
794 goto cmp_x;
795 case BPF_JMP | BPF_JGE | BPF_X:
796 /* pc += (A >= X) ? pc->jt : pc->jf */
797 condt = ARM_COND_CS;
798 cmp_x:
799 update_on_xread(ctx);
800 emit(ARM_CMP_R(r_A, r_X), ctx);
801 goto cond_jump;
802 case BPF_JMP | BPF_JSET | BPF_K:
803 /* pc += (A & K) ? pc->jt : pc->jf */
804 condt = ARM_COND_NE;
805 /* not set iff all zeroes iff Z==1 iff EQ */
806
807 imm12 = imm8m(k);
808 if (imm12 < 0) {
809 emit_mov_i_no8m(r_scratch, k, ctx);
810 emit(ARM_TST_R(r_A, r_scratch), ctx);
811 } else {
812 emit(ARM_TST_I(r_A, imm12), ctx);
813 }
814 goto cond_jump;
815 case BPF_JMP | BPF_JSET | BPF_X:
816 /* pc += (A & X) ? pc->jt : pc->jf */
817 update_on_xread(ctx);
818 condt = ARM_COND_NE;
819 emit(ARM_TST_R(r_A, r_X), ctx);
820 goto cond_jump;
821 case BPF_RET | BPF_A:
822 emit(ARM_MOV_R(ARM_R0, r_A), ctx);
823 goto b_epilogue;
824 case BPF_RET | BPF_K:
825 if ((k == 0) && (ctx->ret0_fp_idx < 0))
826 ctx->ret0_fp_idx = i;
827 emit_mov_i(ARM_R0, k, ctx);
828 b_epilogue:
829 if (i != ctx->skf->len - 1)
830 emit(ARM_B(b_imm(prog->len, ctx)), ctx);
831 break;
832 case BPF_MISC | BPF_TAX:
833 /* X = A */
834 ctx->seen |= SEEN_X;
835 emit(ARM_MOV_R(r_X, r_A), ctx);
836 break;
837 case BPF_MISC | BPF_TXA:
838 /* A = X */
839 update_on_xread(ctx);
840 emit(ARM_MOV_R(r_A, r_X), ctx);
841 break;
842 case BPF_ANC | SKF_AD_PROTOCOL:
843 /* A = ntohs(skb->protocol) */
844 ctx->seen |= SEEN_SKB;
845 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
846 protocol) != 2);
847 off = offsetof(struct sk_buff, protocol);
848 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
849 emit_swap16(r_A, r_scratch, ctx);
850 break;
851 case BPF_ANC | SKF_AD_CPU:
852 /* r_scratch = current_thread_info() */
853 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
854 /* A = current_thread_info()->cpu */
855 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
856 off = offsetof(struct thread_info, cpu);
857 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
858 break;
859 case BPF_ANC | SKF_AD_IFINDEX:
860 case BPF_ANC | SKF_AD_HATYPE:
861 /* A = skb->dev->ifindex */
862 /* A = skb->dev->type */
863 ctx->seen |= SEEN_SKB;
864 off = offsetof(struct sk_buff, dev);
865 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
866
867 emit(ARM_CMP_I(r_scratch, 0), ctx);
868 emit_err_ret(ARM_COND_EQ, ctx);
869
870 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
871 ifindex) != 4);
872 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
873 type) != 2);
874
875 if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
876 off = offsetof(struct net_device, ifindex);
877 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
878 } else {
879 /*
880 * offset of field "type" in "struct
881 * net_device" is above what can be
882 * used in the ldrh rd, [rn, #imm]
883 * instruction, so load the offset in
884 * a register and use ldrh rd, [rn, rm]
885 */
886 off = offsetof(struct net_device, type);
887 emit_mov_i(ARM_R3, off, ctx);
888 emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx);
889 }
890 break;
891 case BPF_ANC | SKF_AD_MARK:
892 ctx->seen |= SEEN_SKB;
893 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
894 off = offsetof(struct sk_buff, mark);
895 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
896 break;
897 case BPF_ANC | SKF_AD_RXHASH:
898 ctx->seen |= SEEN_SKB;
899 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
900 off = offsetof(struct sk_buff, hash);
901 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
902 break;
903 case BPF_ANC | SKF_AD_VLAN_TAG:
904 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
905 ctx->seen |= SEEN_SKB;
906 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
907 off = offsetof(struct sk_buff, vlan_tci);
908 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
909 if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
910 OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
911 else {
912 OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
913 OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
914 }
915 break;
916 case BPF_ANC | SKF_AD_PKTTYPE:
917 ctx->seen |= SEEN_SKB;
918 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
919 __pkt_type_offset[0]) != 1);
920 off = PKT_TYPE_OFFSET();
921 emit(ARM_LDRB_I(r_A, r_skb, off), ctx);
922 emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx);
923 #ifdef __BIG_ENDIAN_BITFIELD
924 emit(ARM_LSR_I(r_A, r_A, 5), ctx);
925 #endif
926 break;
927 case BPF_ANC | SKF_AD_QUEUE:
928 ctx->seen |= SEEN_SKB;
929 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
930 queue_mapping) != 2);
931 BUILD_BUG_ON(offsetof(struct sk_buff,
932 queue_mapping) > 0xff);
933 off = offsetof(struct sk_buff, queue_mapping);
934 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
935 break;
936 case BPF_ANC | SKF_AD_PAY_OFFSET:
937 ctx->seen |= SEEN_SKB | SEEN_CALL;
938
939 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
940 emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx);
941 emit_blx_r(ARM_R3, ctx);
942 emit(ARM_MOV_R(r_A, ARM_R0), ctx);
943 break;
944 case BPF_LDX | BPF_W | BPF_ABS:
945 /*
946 * load a 32bit word from struct seccomp_data.
947 * seccomp_check_filter() will already have checked
948 * that k is 32bit aligned and lies within the
949 * struct seccomp_data.
950 */
951 ctx->seen |= SEEN_SKB;
952 emit(ARM_LDR_I(r_A, r_skb, k), ctx);
953 break;
954 default:
955 return -1;
956 }
957
958 if (ctx->flags & FLAG_IMM_OVERFLOW)
959 /*
960 * this instruction generated an overflow when
961 * trying to access the literal pool, so
962 * delegate this filter to the kernel interpreter.
963 */
964 return -1;
965 }
966
967 /* compute offsets only during the first pass */
968 if (ctx->target == NULL)
969 ctx->offsets[i] = ctx->idx * 4;
970
971 return 0;
972 }
973
974
975 void bpf_jit_compile(struct bpf_prog *fp)
976 {
977 struct bpf_binary_header *header;
978 struct jit_ctx ctx;
979 unsigned tmp_idx;
980 unsigned alloc_size;
981 u8 *target_ptr;
982
983 if (!bpf_jit_enable)
984 return;
985
986 memset(&ctx, 0, sizeof(ctx));
987 ctx.skf = fp;
988 ctx.ret0_fp_idx = -1;
989
990 ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
991 if (ctx.offsets == NULL)
992 return;
993
994 /* fake pass to fill in the ctx->seen */
995 if (unlikely(build_body(&ctx)))
996 goto out;
997
998 tmp_idx = ctx.idx;
999 build_prologue(&ctx);
1000 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1001
1002 #if __LINUX_ARM_ARCH__ < 7
1003 tmp_idx = ctx.idx;
1004 build_epilogue(&ctx);
1005 ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
1006
1007 ctx.idx += ctx.imm_count;
1008 if (ctx.imm_count) {
1009 ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL);
1010 if (ctx.imms == NULL)
1011 goto out;
1012 }
1013 #else
1014 /* there's nothing after the epilogue on ARMv7 */
1015 build_epilogue(&ctx);
1016 #endif
1017 alloc_size = 4 * ctx.idx;
1018 header = bpf_jit_binary_alloc(alloc_size, &target_ptr,
1019 4, jit_fill_hole);
1020 if (header == NULL)
1021 goto out;
1022
1023 ctx.target = (u32 *) target_ptr;
1024 ctx.idx = 0;
1025
1026 build_prologue(&ctx);
1027 if (build_body(&ctx) < 0) {
1028 #if __LINUX_ARM_ARCH__ < 7
1029 if (ctx.imm_count)
1030 kfree(ctx.imms);
1031 #endif
1032 bpf_jit_binary_free(header);
1033 goto out;
1034 }
1035 build_epilogue(&ctx);
1036
1037 flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
1038
1039 #if __LINUX_ARM_ARCH__ < 7
1040 if (ctx.imm_count)
1041 kfree(ctx.imms);
1042 #endif
1043
1044 if (bpf_jit_enable > 1)
1045 /* there are 2 passes here */
1046 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
1047
1048 set_memory_ro((unsigned long)header, header->pages);
1049 fp->bpf_func = (void *)ctx.target;
1050 fp->jited = true;
1051 out:
1052 kfree(ctx.offsets);
1053 return;
1054 }
1055
1056 void bpf_jit_free(struct bpf_prog *fp)
1057 {
1058 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1059 struct bpf_binary_header *header = (void *)addr;
1060
1061 if (!fp->jited)
1062 goto free_filter;
1063
1064 set_memory_rw(addr, header->pages);
1065 bpf_jit_binary_free(header);
1066
1067 free_filter:
1068 bpf_prog_unlock_free(fp);
1069 }
This page took 0.078691 seconds and 5 git commands to generate.