2 * Just-In-Time compiler for BPF filters on 32bit ARM
4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License.
11 #include <linux/bitops.h>
12 #include <linux/compiler.h>
13 #include <linux/errno.h>
14 #include <linux/filter.h>
15 #include <linux/netdevice.h>
16 #include <linux/string.h>
17 #include <linux/slab.h>
18 #include <linux/if_vlan.h>
20 #include <asm/cacheflush.h>
21 #include <asm/hwcap.h>
22 #include <asm/opcodes.h>
24 #include "bpf_jit_32.h"
32 * r6 pointer to the skb
37 #define r_scratch ARM_R0
38 /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
43 #define r_skb_data ARM_R7
44 #define r_skb_hl ARM_R8
46 #define SCRATCH_SP_OFFSET 0
47 #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k))
49 #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1)
50 #define SEEN_MEM_WORD(k) (1 << (k))
51 #define SEEN_X (1 << BPF_MEMWORDS)
52 #define SEEN_CALL (1 << (BPF_MEMWORDS + 1))
53 #define SEEN_SKB (1 << (BPF_MEMWORDS + 2))
54 #define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
56 #define FLAG_NEED_X_RESET (1 << 0)
57 #define FLAG_IMM_OVERFLOW (1 << 1)
60 const struct bpf_prog
*skf
;
62 unsigned prologue_bytes
;
68 #if __LINUX_ARM_ARCH__ < 7
75 int bpf_jit_enable __read_mostly
;
77 static inline int call_neg_helper(struct sk_buff
*skb
, int offset
, void *ret
,
80 void *ptr
= bpf_internal_load_pointer_neg_helper(skb
, offset
, size
);
84 memcpy(ret
, ptr
, size
);
88 static u64
jit_get_skb_b(struct sk_buff
*skb
, int offset
)
94 err
= call_neg_helper(skb
, offset
, &ret
, 1);
96 err
= skb_copy_bits(skb
, offset
, &ret
, 1);
98 return (u64
)err
<< 32 | ret
;
101 static u64
jit_get_skb_h(struct sk_buff
*skb
, int offset
)
107 err
= call_neg_helper(skb
, offset
, &ret
, 2);
109 err
= skb_copy_bits(skb
, offset
, &ret
, 2);
111 return (u64
)err
<< 32 | ntohs(ret
);
114 static u64
jit_get_skb_w(struct sk_buff
*skb
, int offset
)
120 err
= call_neg_helper(skb
, offset
, &ret
, 4);
122 err
= skb_copy_bits(skb
, offset
, &ret
, 4);
124 return (u64
)err
<< 32 | ntohl(ret
);
128 * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
129 * (where the assembly routines like __aeabi_uidiv could cause problems).
131 static u32
jit_udiv(u32 dividend
, u32 divisor
)
133 return dividend
/ divisor
;
136 static u32
jit_mod(u32 dividend
, u32 divisor
)
138 return dividend
% divisor
;
141 static inline void _emit(int cond
, u32 inst
, struct jit_ctx
*ctx
)
143 inst
|= (cond
<< 28);
144 inst
= __opcode_to_mem_arm(inst
);
146 if (ctx
->target
!= NULL
)
147 ctx
->target
[ctx
->idx
] = inst
;
153 * Emit an instruction that will be executed unconditionally.
155 static inline void emit(u32 inst
, struct jit_ctx
*ctx
)
157 _emit(ARM_COND_AL
, inst
, ctx
);
160 static u16
saved_regs(struct jit_ctx
*ctx
)
164 if ((ctx
->skf
->len
> 1) ||
165 (ctx
->skf
->insns
[0].code
== (BPF_RET
| BPF_A
)))
168 #ifdef CONFIG_FRAME_POINTER
169 ret
|= (1 << ARM_FP
) | (1 << ARM_IP
) | (1 << ARM_LR
) | (1 << ARM_PC
);
171 if (ctx
->seen
& SEEN_CALL
)
174 if (ctx
->seen
& (SEEN_DATA
| SEEN_SKB
))
176 if (ctx
->seen
& SEEN_DATA
)
177 ret
|= (1 << r_skb_data
) | (1 << r_skb_hl
);
178 if (ctx
->seen
& SEEN_X
)
184 static inline int mem_words_used(struct jit_ctx
*ctx
)
186 /* yes, we do waste some stack space IF there are "holes" in the set" */
187 return fls(ctx
->seen
& SEEN_MEM
);
190 static void jit_fill_hole(void *area
, unsigned int size
)
193 /* We are guaranteed to have aligned memory. */
194 for (ptr
= area
; size
>= sizeof(u32
); size
-= sizeof(u32
))
195 *ptr
++ = __opcode_to_mem_arm(ARM_INST_UDF
);
198 static void build_prologue(struct jit_ctx
*ctx
)
200 u16 reg_set
= saved_regs(ctx
);
203 #ifdef CONFIG_FRAME_POINTER
204 emit(ARM_MOV_R(ARM_IP
, ARM_SP
), ctx
);
205 emit(ARM_PUSH(reg_set
), ctx
);
206 emit(ARM_SUB_I(ARM_FP
, ARM_IP
, 4), ctx
);
209 emit(ARM_PUSH(reg_set
), ctx
);
212 if (ctx
->seen
& (SEEN_DATA
| SEEN_SKB
))
213 emit(ARM_MOV_R(r_skb
, ARM_R0
), ctx
);
215 if (ctx
->seen
& SEEN_DATA
) {
216 off
= offsetof(struct sk_buff
, data
);
217 emit(ARM_LDR_I(r_skb_data
, r_skb
, off
), ctx
);
218 /* headlen = len - data_len */
219 off
= offsetof(struct sk_buff
, len
);
220 emit(ARM_LDR_I(r_skb_hl
, r_skb
, off
), ctx
);
221 off
= offsetof(struct sk_buff
, data_len
);
222 emit(ARM_LDR_I(r_scratch
, r_skb
, off
), ctx
);
223 emit(ARM_SUB_R(r_skb_hl
, r_skb_hl
, r_scratch
), ctx
);
226 if (ctx
->flags
& FLAG_NEED_X_RESET
)
227 emit(ARM_MOV_I(r_X
, 0), ctx
);
229 /* do not leak kernel data to userspace */
230 if (bpf_needs_clear_a(&ctx
->skf
->insns
[0]))
231 emit(ARM_MOV_I(r_A
, 0), ctx
);
233 /* stack space for the BPF_MEM words */
234 if (ctx
->seen
& SEEN_MEM
)
235 emit(ARM_SUB_I(ARM_SP
, ARM_SP
, mem_words_used(ctx
) * 4), ctx
);
238 static void build_epilogue(struct jit_ctx
*ctx
)
240 u16 reg_set
= saved_regs(ctx
);
242 if (ctx
->seen
& SEEN_MEM
)
243 emit(ARM_ADD_I(ARM_SP
, ARM_SP
, mem_words_used(ctx
) * 4), ctx
);
245 reg_set
&= ~(1 << ARM_LR
);
247 #ifdef CONFIG_FRAME_POINTER
248 /* the first instruction of the prologue was: mov ip, sp */
249 reg_set
&= ~(1 << ARM_IP
);
250 reg_set
|= (1 << ARM_SP
);
251 emit(ARM_LDM(ARM_SP
, reg_set
), ctx
);
254 if (ctx
->seen
& SEEN_CALL
)
255 reg_set
|= 1 << ARM_PC
;
256 emit(ARM_POP(reg_set
), ctx
);
259 if (!(ctx
->seen
& SEEN_CALL
))
260 emit(ARM_BX(ARM_LR
), ctx
);
264 static int16_t imm8m(u32 x
)
268 for (rot
= 0; rot
< 16; rot
++)
269 if ((x
& ~ror32(0xff, 2 * rot
)) == 0)
270 return rol32(x
, 2 * rot
) | (rot
<< 8);
275 #if __LINUX_ARM_ARCH__ < 7
277 static u16
imm_offset(u32 k
, struct jit_ctx
*ctx
)
279 unsigned i
= 0, offset
;
282 /* on the "fake" run we just count them (duplicates included) */
283 if (ctx
->target
== NULL
) {
288 while ((i
< ctx
->imm_count
) && ctx
->imms
[i
]) {
289 if (ctx
->imms
[i
] == k
)
294 if (ctx
->imms
[i
] == 0)
297 /* constants go just after the epilogue */
298 offset
= ctx
->offsets
[ctx
->skf
->len
];
299 offset
+= ctx
->prologue_bytes
;
300 offset
+= ctx
->epilogue_bytes
;
303 ctx
->target
[offset
/ 4] = k
;
305 /* PC in ARM mode == address of the instruction + 8 */
306 imm
= offset
- (8 + ctx
->idx
* 4);
310 * literal pool is too far, signal it into flags. we
311 * can only detect it on the second pass unfortunately.
313 ctx
->flags
|= FLAG_IMM_OVERFLOW
;
320 #endif /* __LINUX_ARM_ARCH__ */
323 * Move an immediate that's not an imm8m to a core register.
325 static inline void emit_mov_i_no8m(int rd
, u32 val
, struct jit_ctx
*ctx
)
327 #if __LINUX_ARM_ARCH__ < 7
328 emit(ARM_LDR_I(rd
, ARM_PC
, imm_offset(val
, ctx
)), ctx
);
330 emit(ARM_MOVW(rd
, val
& 0xffff), ctx
);
332 emit(ARM_MOVT(rd
, val
>> 16), ctx
);
336 static inline void emit_mov_i(int rd
, u32 val
, struct jit_ctx
*ctx
)
338 int imm12
= imm8m(val
);
341 emit(ARM_MOV_I(rd
, imm12
), ctx
);
343 emit_mov_i_no8m(rd
, val
, ctx
);
346 #if __LINUX_ARM_ARCH__ < 6
348 static void emit_load_be32(u8 cond
, u8 r_res
, u8 r_addr
, struct jit_ctx
*ctx
)
350 _emit(cond
, ARM_LDRB_I(ARM_R3
, r_addr
, 1), ctx
);
351 _emit(cond
, ARM_LDRB_I(ARM_R1
, r_addr
, 0), ctx
);
352 _emit(cond
, ARM_LDRB_I(ARM_R2
, r_addr
, 3), ctx
);
353 _emit(cond
, ARM_LSL_I(ARM_R3
, ARM_R3
, 16), ctx
);
354 _emit(cond
, ARM_LDRB_I(ARM_R0
, r_addr
, 2), ctx
);
355 _emit(cond
, ARM_ORR_S(ARM_R3
, ARM_R3
, ARM_R1
, SRTYPE_LSL
, 24), ctx
);
356 _emit(cond
, ARM_ORR_R(ARM_R3
, ARM_R3
, ARM_R2
), ctx
);
357 _emit(cond
, ARM_ORR_S(r_res
, ARM_R3
, ARM_R0
, SRTYPE_LSL
, 8), ctx
);
360 static void emit_load_be16(u8 cond
, u8 r_res
, u8 r_addr
, struct jit_ctx
*ctx
)
362 _emit(cond
, ARM_LDRB_I(ARM_R1
, r_addr
, 0), ctx
);
363 _emit(cond
, ARM_LDRB_I(ARM_R2
, r_addr
, 1), ctx
);
364 _emit(cond
, ARM_ORR_S(r_res
, ARM_R2
, ARM_R1
, SRTYPE_LSL
, 8), ctx
);
367 static inline void emit_swap16(u8 r_dst
, u8 r_src
, struct jit_ctx
*ctx
)
369 /* r_dst = (r_src << 8) | (r_src >> 8) */
370 emit(ARM_LSL_I(ARM_R1
, r_src
, 8), ctx
);
371 emit(ARM_ORR_S(r_dst
, ARM_R1
, r_src
, SRTYPE_LSR
, 8), ctx
);
374 * we need to mask out the bits set in r_dst[23:16] due to
375 * the first shift instruction.
377 * note that 0x8ff is the encoded immediate 0x00ff0000.
379 emit(ARM_BIC_I(r_dst
, r_dst
, 0x8ff), ctx
);
384 static void emit_load_be32(u8 cond
, u8 r_res
, u8 r_addr
, struct jit_ctx
*ctx
)
386 _emit(cond
, ARM_LDR_I(r_res
, r_addr
, 0), ctx
);
387 #ifdef __LITTLE_ENDIAN
388 _emit(cond
, ARM_REV(r_res
, r_res
), ctx
);
392 static void emit_load_be16(u8 cond
, u8 r_res
, u8 r_addr
, struct jit_ctx
*ctx
)
394 _emit(cond
, ARM_LDRH_I(r_res
, r_addr
, 0), ctx
);
395 #ifdef __LITTLE_ENDIAN
396 _emit(cond
, ARM_REV16(r_res
, r_res
), ctx
);
400 static inline void emit_swap16(u8 r_dst __maybe_unused
,
401 u8 r_src __maybe_unused
,
402 struct jit_ctx
*ctx __maybe_unused
)
404 #ifdef __LITTLE_ENDIAN
405 emit(ARM_REV16(r_dst
, r_src
), ctx
);
409 #endif /* __LINUX_ARM_ARCH__ < 6 */
412 /* Compute the immediate value for a PC-relative branch. */
413 static inline u32
b_imm(unsigned tgt
, struct jit_ctx
*ctx
)
417 if (ctx
->target
== NULL
)
420 * BPF allows only forward jumps and the offset of the target is
421 * still the one computed during the first pass.
423 imm
= ctx
->offsets
[tgt
] + ctx
->prologue_bytes
- (ctx
->idx
* 4 + 8);
428 #define OP_IMM3(op, r1, r2, imm_val, ctx) \
430 imm12 = imm8m(imm_val); \
432 emit_mov_i_no8m(r_scratch, imm_val, ctx); \
433 emit(op ## _R((r1), (r2), r_scratch), ctx); \
435 emit(op ## _I((r1), (r2), imm12), ctx); \
439 static inline void emit_err_ret(u8 cond
, struct jit_ctx
*ctx
)
441 if (ctx
->ret0_fp_idx
>= 0) {
442 _emit(cond
, ARM_B(b_imm(ctx
->ret0_fp_idx
, ctx
)), ctx
);
443 /* NOP to keep the size constant between passes */
444 emit(ARM_MOV_R(ARM_R0
, ARM_R0
), ctx
);
446 _emit(cond
, ARM_MOV_I(ARM_R0
, 0), ctx
);
447 _emit(cond
, ARM_B(b_imm(ctx
->skf
->len
, ctx
)), ctx
);
451 static inline void emit_blx_r(u8 tgt_reg
, struct jit_ctx
*ctx
)
453 #if __LINUX_ARM_ARCH__ < 5
454 emit(ARM_MOV_R(ARM_LR
, ARM_PC
), ctx
);
456 if (elf_hwcap
& HWCAP_THUMB
)
457 emit(ARM_BX(tgt_reg
), ctx
);
459 emit(ARM_MOV_R(ARM_PC
, tgt_reg
), ctx
);
461 emit(ARM_BLX_R(tgt_reg
), ctx
);
465 static inline void emit_udivmod(u8 rd
, u8 rm
, u8 rn
, struct jit_ctx
*ctx
,
468 #if __LINUX_ARM_ARCH__ == 7
469 if (elf_hwcap
& HWCAP_IDIVA
) {
470 if (bpf_op
== BPF_DIV
)
471 emit(ARM_UDIV(rd
, rm
, rn
), ctx
);
473 emit(ARM_UDIV(ARM_R3
, rm
, rn
), ctx
);
474 emit(ARM_MLS(rd
, rn
, ARM_R3
, rm
), ctx
);
481 * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
482 * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
483 * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
484 * before using it as a source for ARM_R1.
486 * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
487 * ARM_R5 (r_X) so there is no particular register overlap
491 emit(ARM_MOV_R(ARM_R1
, rn
), ctx
);
493 emit(ARM_MOV_R(ARM_R0
, rm
), ctx
);
495 ctx
->seen
|= SEEN_CALL
;
496 emit_mov_i(ARM_R3
, bpf_op
== BPF_DIV
? (u32
)jit_udiv
: (u32
)jit_mod
,
498 emit_blx_r(ARM_R3
, ctx
);
501 emit(ARM_MOV_R(rd
, ARM_R0
), ctx
);
504 static inline void update_on_xread(struct jit_ctx
*ctx
)
506 if (!(ctx
->seen
& SEEN_X
))
507 ctx
->flags
|= FLAG_NEED_X_RESET
;
512 static int build_body(struct jit_ctx
*ctx
)
514 void *load_func
[] = {jit_get_skb_b
, jit_get_skb_h
, jit_get_skb_w
};
515 const struct bpf_prog
*prog
= ctx
->skf
;
516 const struct sock_filter
*inst
;
517 unsigned i
, load_order
, off
, condt
;
521 for (i
= 0; i
< prog
->len
; i
++) {
524 inst
= &(prog
->insns
[i
]);
525 /* K as an immediate value operand */
527 code
= bpf_anc_helper(inst
);
529 /* compute offsets only in the fake pass */
530 if (ctx
->target
== NULL
)
531 ctx
->offsets
[i
] = ctx
->idx
* 4;
534 case BPF_LD
| BPF_IMM
:
535 emit_mov_i(r_A
, k
, ctx
);
537 case BPF_LD
| BPF_W
| BPF_LEN
:
538 ctx
->seen
|= SEEN_SKB
;
539 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, len
) != 4);
540 emit(ARM_LDR_I(r_A
, r_skb
,
541 offsetof(struct sk_buff
, len
)), ctx
);
543 case BPF_LD
| BPF_MEM
:
545 ctx
->seen
|= SEEN_MEM_WORD(k
);
546 emit(ARM_LDR_I(r_A
, ARM_SP
, SCRATCH_OFF(k
)), ctx
);
548 case BPF_LD
| BPF_W
| BPF_ABS
:
551 case BPF_LD
| BPF_H
| BPF_ABS
:
554 case BPF_LD
| BPF_B
| BPF_ABS
:
557 emit_mov_i(r_off
, k
, ctx
);
559 ctx
->seen
|= SEEN_DATA
| SEEN_CALL
;
561 if (load_order
> 0) {
562 emit(ARM_SUB_I(r_scratch
, r_skb_hl
,
563 1 << load_order
), ctx
);
564 emit(ARM_CMP_R(r_scratch
, r_off
), ctx
);
567 emit(ARM_CMP_R(r_skb_hl
, r_off
), ctx
);
572 * test for negative offset, only if we are
573 * currently scheduled to take the fast
574 * path. this will update the flags so that
575 * the slowpath instruction are ignored if the
576 * offset is negative.
578 * for loard_order == 0 the HI condition will
579 * make loads at offset 0 take the slow path too.
581 _emit(condt
, ARM_CMP_I(r_off
, 0), ctx
);
583 _emit(condt
, ARM_ADD_R(r_scratch
, r_off
, r_skb_data
),
587 _emit(condt
, ARM_LDRB_I(r_A
, r_scratch
, 0),
589 else if (load_order
== 1)
590 emit_load_be16(condt
, r_A
, r_scratch
, ctx
);
591 else if (load_order
== 2)
592 emit_load_be32(condt
, r_A
, r_scratch
, ctx
);
594 _emit(condt
, ARM_B(b_imm(i
+ 1, ctx
)), ctx
);
597 emit_mov_i(ARM_R3
, (u32
)load_func
[load_order
], ctx
);
598 emit(ARM_MOV_R(ARM_R0
, r_skb
), ctx
);
599 /* the offset is already in R1 */
600 emit_blx_r(ARM_R3
, ctx
);
601 /* check the result of skb_copy_bits */
602 emit(ARM_CMP_I(ARM_R1
, 0), ctx
);
603 emit_err_ret(ARM_COND_NE
, ctx
);
604 emit(ARM_MOV_R(r_A
, ARM_R0
), ctx
);
606 case BPF_LD
| BPF_W
| BPF_IND
:
609 case BPF_LD
| BPF_H
| BPF_IND
:
612 case BPF_LD
| BPF_B
| BPF_IND
:
615 update_on_xread(ctx
);
616 OP_IMM3(ARM_ADD
, r_off
, r_X
, k
, ctx
);
618 case BPF_LDX
| BPF_IMM
:
620 emit_mov_i(r_X
, k
, ctx
);
622 case BPF_LDX
| BPF_W
| BPF_LEN
:
623 ctx
->seen
|= SEEN_X
| SEEN_SKB
;
624 emit(ARM_LDR_I(r_X
, r_skb
,
625 offsetof(struct sk_buff
, len
)), ctx
);
627 case BPF_LDX
| BPF_MEM
:
628 ctx
->seen
|= SEEN_X
| SEEN_MEM_WORD(k
);
629 emit(ARM_LDR_I(r_X
, ARM_SP
, SCRATCH_OFF(k
)), ctx
);
631 case BPF_LDX
| BPF_B
| BPF_MSH
:
632 /* x = ((*(frame + k)) & 0xf) << 2; */
633 ctx
->seen
|= SEEN_X
| SEEN_DATA
| SEEN_CALL
;
634 /* the interpreter should deal with the negative K */
637 /* offset in r1: we might have to take the slow path */
638 emit_mov_i(r_off
, k
, ctx
);
639 emit(ARM_CMP_R(r_skb_hl
, r_off
), ctx
);
641 /* load in r0: common with the slowpath */
642 _emit(ARM_COND_HI
, ARM_LDRB_R(ARM_R0
, r_skb_data
,
645 * emit_mov_i() might generate one or two instructions,
646 * the same holds for emit_blx_r()
648 _emit(ARM_COND_HI
, ARM_B(b_imm(i
+ 1, ctx
) - 2), ctx
);
650 emit(ARM_MOV_R(ARM_R0
, r_skb
), ctx
);
652 emit_mov_i(ARM_R3
, (u32
)jit_get_skb_b
, ctx
);
653 emit_blx_r(ARM_R3
, ctx
);
654 /* check the return value of skb_copy_bits */
655 emit(ARM_CMP_I(ARM_R1
, 0), ctx
);
656 emit_err_ret(ARM_COND_NE
, ctx
);
658 emit(ARM_AND_I(r_X
, ARM_R0
, 0x00f), ctx
);
659 emit(ARM_LSL_I(r_X
, r_X
, 2), ctx
);
662 ctx
->seen
|= SEEN_MEM_WORD(k
);
663 emit(ARM_STR_I(r_A
, ARM_SP
, SCRATCH_OFF(k
)), ctx
);
666 update_on_xread(ctx
);
667 ctx
->seen
|= SEEN_MEM_WORD(k
);
668 emit(ARM_STR_I(r_X
, ARM_SP
, SCRATCH_OFF(k
)), ctx
);
670 case BPF_ALU
| BPF_ADD
| BPF_K
:
672 OP_IMM3(ARM_ADD
, r_A
, r_A
, k
, ctx
);
674 case BPF_ALU
| BPF_ADD
| BPF_X
:
675 update_on_xread(ctx
);
676 emit(ARM_ADD_R(r_A
, r_A
, r_X
), ctx
);
678 case BPF_ALU
| BPF_SUB
| BPF_K
:
680 OP_IMM3(ARM_SUB
, r_A
, r_A
, k
, ctx
);
682 case BPF_ALU
| BPF_SUB
| BPF_X
:
683 update_on_xread(ctx
);
684 emit(ARM_SUB_R(r_A
, r_A
, r_X
), ctx
);
686 case BPF_ALU
| BPF_MUL
| BPF_K
:
688 emit_mov_i(r_scratch
, k
, ctx
);
689 emit(ARM_MUL(r_A
, r_A
, r_scratch
), ctx
);
691 case BPF_ALU
| BPF_MUL
| BPF_X
:
692 update_on_xread(ctx
);
693 emit(ARM_MUL(r_A
, r_A
, r_X
), ctx
);
695 case BPF_ALU
| BPF_DIV
| BPF_K
:
698 emit_mov_i(r_scratch
, k
, ctx
);
699 emit_udivmod(r_A
, r_A
, r_scratch
, ctx
, BPF_DIV
);
701 case BPF_ALU
| BPF_DIV
| BPF_X
:
702 update_on_xread(ctx
);
703 emit(ARM_CMP_I(r_X
, 0), ctx
);
704 emit_err_ret(ARM_COND_EQ
, ctx
);
705 emit_udivmod(r_A
, r_A
, r_X
, ctx
, BPF_DIV
);
707 case BPF_ALU
| BPF_MOD
| BPF_K
:
709 emit_mov_i(r_A
, 0, ctx
);
712 emit_mov_i(r_scratch
, k
, ctx
);
713 emit_udivmod(r_A
, r_A
, r_scratch
, ctx
, BPF_MOD
);
715 case BPF_ALU
| BPF_MOD
| BPF_X
:
716 update_on_xread(ctx
);
717 emit(ARM_CMP_I(r_X
, 0), ctx
);
718 emit_err_ret(ARM_COND_EQ
, ctx
);
719 emit_udivmod(r_A
, r_A
, r_X
, ctx
, BPF_MOD
);
721 case BPF_ALU
| BPF_OR
| BPF_K
:
723 OP_IMM3(ARM_ORR
, r_A
, r_A
, k
, ctx
);
725 case BPF_ALU
| BPF_OR
| BPF_X
:
726 update_on_xread(ctx
);
727 emit(ARM_ORR_R(r_A
, r_A
, r_X
), ctx
);
729 case BPF_ALU
| BPF_XOR
| BPF_K
:
731 OP_IMM3(ARM_EOR
, r_A
, r_A
, k
, ctx
);
733 case BPF_ANC
| SKF_AD_ALU_XOR_X
:
734 case BPF_ALU
| BPF_XOR
| BPF_X
:
736 update_on_xread(ctx
);
737 emit(ARM_EOR_R(r_A
, r_A
, r_X
), ctx
);
739 case BPF_ALU
| BPF_AND
| BPF_K
:
741 OP_IMM3(ARM_AND
, r_A
, r_A
, k
, ctx
);
743 case BPF_ALU
| BPF_AND
| BPF_X
:
744 update_on_xread(ctx
);
745 emit(ARM_AND_R(r_A
, r_A
, r_X
), ctx
);
747 case BPF_ALU
| BPF_LSH
| BPF_K
:
748 if (unlikely(k
> 31))
750 emit(ARM_LSL_I(r_A
, r_A
, k
), ctx
);
752 case BPF_ALU
| BPF_LSH
| BPF_X
:
753 update_on_xread(ctx
);
754 emit(ARM_LSL_R(r_A
, r_A
, r_X
), ctx
);
756 case BPF_ALU
| BPF_RSH
| BPF_K
:
757 if (unlikely(k
> 31))
759 emit(ARM_LSR_I(r_A
, r_A
, k
), ctx
);
761 case BPF_ALU
| BPF_RSH
| BPF_X
:
762 update_on_xread(ctx
);
763 emit(ARM_LSR_R(r_A
, r_A
, r_X
), ctx
);
765 case BPF_ALU
| BPF_NEG
:
767 emit(ARM_RSB_I(r_A
, r_A
, 0), ctx
);
769 case BPF_JMP
| BPF_JA
:
771 emit(ARM_B(b_imm(i
+ k
+ 1, ctx
)), ctx
);
773 case BPF_JMP
| BPF_JEQ
| BPF_K
:
774 /* pc += (A == K) ? pc->jt : pc->jf */
777 case BPF_JMP
| BPF_JGT
| BPF_K
:
778 /* pc += (A > K) ? pc->jt : pc->jf */
781 case BPF_JMP
| BPF_JGE
| BPF_K
:
782 /* pc += (A >= K) ? pc->jt : pc->jf */
787 emit_mov_i_no8m(r_scratch
, k
, ctx
);
788 emit(ARM_CMP_R(r_A
, r_scratch
), ctx
);
790 emit(ARM_CMP_I(r_A
, imm12
), ctx
);
794 _emit(condt
, ARM_B(b_imm(i
+ inst
->jt
+ 1,
797 _emit(condt
^ 1, ARM_B(b_imm(i
+ inst
->jf
+ 1,
800 case BPF_JMP
| BPF_JEQ
| BPF_X
:
801 /* pc += (A == X) ? pc->jt : pc->jf */
804 case BPF_JMP
| BPF_JGT
| BPF_X
:
805 /* pc += (A > X) ? pc->jt : pc->jf */
808 case BPF_JMP
| BPF_JGE
| BPF_X
:
809 /* pc += (A >= X) ? pc->jt : pc->jf */
812 update_on_xread(ctx
);
813 emit(ARM_CMP_R(r_A
, r_X
), ctx
);
815 case BPF_JMP
| BPF_JSET
| BPF_K
:
816 /* pc += (A & K) ? pc->jt : pc->jf */
818 /* not set iff all zeroes iff Z==1 iff EQ */
822 emit_mov_i_no8m(r_scratch
, k
, ctx
);
823 emit(ARM_TST_R(r_A
, r_scratch
), ctx
);
825 emit(ARM_TST_I(r_A
, imm12
), ctx
);
828 case BPF_JMP
| BPF_JSET
| BPF_X
:
829 /* pc += (A & X) ? pc->jt : pc->jf */
830 update_on_xread(ctx
);
832 emit(ARM_TST_R(r_A
, r_X
), ctx
);
834 case BPF_RET
| BPF_A
:
835 emit(ARM_MOV_R(ARM_R0
, r_A
), ctx
);
837 case BPF_RET
| BPF_K
:
838 if ((k
== 0) && (ctx
->ret0_fp_idx
< 0))
839 ctx
->ret0_fp_idx
= i
;
840 emit_mov_i(ARM_R0
, k
, ctx
);
842 if (i
!= ctx
->skf
->len
- 1)
843 emit(ARM_B(b_imm(prog
->len
, ctx
)), ctx
);
845 case BPF_MISC
| BPF_TAX
:
848 emit(ARM_MOV_R(r_X
, r_A
), ctx
);
850 case BPF_MISC
| BPF_TXA
:
852 update_on_xread(ctx
);
853 emit(ARM_MOV_R(r_A
, r_X
), ctx
);
855 case BPF_ANC
| SKF_AD_PROTOCOL
:
856 /* A = ntohs(skb->protocol) */
857 ctx
->seen
|= SEEN_SKB
;
858 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
,
860 off
= offsetof(struct sk_buff
, protocol
);
861 emit(ARM_LDRH_I(r_scratch
, r_skb
, off
), ctx
);
862 emit_swap16(r_A
, r_scratch
, ctx
);
864 case BPF_ANC
| SKF_AD_CPU
:
865 /* r_scratch = current_thread_info() */
866 OP_IMM3(ARM_BIC
, r_scratch
, ARM_SP
, THREAD_SIZE
- 1, ctx
);
867 /* A = current_thread_info()->cpu */
868 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info
, cpu
) != 4);
869 off
= offsetof(struct thread_info
, cpu
);
870 emit(ARM_LDR_I(r_A
, r_scratch
, off
), ctx
);
872 case BPF_ANC
| SKF_AD_IFINDEX
:
873 case BPF_ANC
| SKF_AD_HATYPE
:
874 /* A = skb->dev->ifindex */
875 /* A = skb->dev->type */
876 ctx
->seen
|= SEEN_SKB
;
877 off
= offsetof(struct sk_buff
, dev
);
878 emit(ARM_LDR_I(r_scratch
, r_skb
, off
), ctx
);
880 emit(ARM_CMP_I(r_scratch
, 0), ctx
);
881 emit_err_ret(ARM_COND_EQ
, ctx
);
883 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
,
885 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
,
888 if (code
== (BPF_ANC
| SKF_AD_IFINDEX
)) {
889 off
= offsetof(struct net_device
, ifindex
);
890 emit(ARM_LDR_I(r_A
, r_scratch
, off
), ctx
);
893 * offset of field "type" in "struct
894 * net_device" is above what can be
895 * used in the ldrh rd, [rn, #imm]
896 * instruction, so load the offset in
897 * a register and use ldrh rd, [rn, rm]
899 off
= offsetof(struct net_device
, type
);
900 emit_mov_i(ARM_R3
, off
, ctx
);
901 emit(ARM_LDRH_R(r_A
, r_scratch
, ARM_R3
), ctx
);
904 case BPF_ANC
| SKF_AD_MARK
:
905 ctx
->seen
|= SEEN_SKB
;
906 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, mark
) != 4);
907 off
= offsetof(struct sk_buff
, mark
);
908 emit(ARM_LDR_I(r_A
, r_skb
, off
), ctx
);
910 case BPF_ANC
| SKF_AD_RXHASH
:
911 ctx
->seen
|= SEEN_SKB
;
912 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, hash
) != 4);
913 off
= offsetof(struct sk_buff
, hash
);
914 emit(ARM_LDR_I(r_A
, r_skb
, off
), ctx
);
916 case BPF_ANC
| SKF_AD_VLAN_TAG
:
917 case BPF_ANC
| SKF_AD_VLAN_TAG_PRESENT
:
918 ctx
->seen
|= SEEN_SKB
;
919 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, vlan_tci
) != 2);
920 off
= offsetof(struct sk_buff
, vlan_tci
);
921 emit(ARM_LDRH_I(r_A
, r_skb
, off
), ctx
);
922 if (code
== (BPF_ANC
| SKF_AD_VLAN_TAG
))
923 OP_IMM3(ARM_AND
, r_A
, r_A
, ~VLAN_TAG_PRESENT
, ctx
);
925 OP_IMM3(ARM_LSR
, r_A
, r_A
, 12, ctx
);
926 OP_IMM3(ARM_AND
, r_A
, r_A
, 0x1, ctx
);
929 case BPF_ANC
| SKF_AD_PKTTYPE
:
930 ctx
->seen
|= SEEN_SKB
;
931 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
,
932 __pkt_type_offset
[0]) != 1);
933 off
= PKT_TYPE_OFFSET();
934 emit(ARM_LDRB_I(r_A
, r_skb
, off
), ctx
);
935 emit(ARM_AND_I(r_A
, r_A
, PKT_TYPE_MAX
), ctx
);
936 #ifdef __BIG_ENDIAN_BITFIELD
937 emit(ARM_LSR_I(r_A
, r_A
, 5), ctx
);
940 case BPF_ANC
| SKF_AD_QUEUE
:
941 ctx
->seen
|= SEEN_SKB
;
942 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
,
943 queue_mapping
) != 2);
944 BUILD_BUG_ON(offsetof(struct sk_buff
,
945 queue_mapping
) > 0xff);
946 off
= offsetof(struct sk_buff
, queue_mapping
);
947 emit(ARM_LDRH_I(r_A
, r_skb
, off
), ctx
);
949 case BPF_ANC
| SKF_AD_PAY_OFFSET
:
950 ctx
->seen
|= SEEN_SKB
| SEEN_CALL
;
952 emit(ARM_MOV_R(ARM_R0
, r_skb
), ctx
);
953 emit_mov_i(ARM_R3
, (unsigned int)skb_get_poff
, ctx
);
954 emit_blx_r(ARM_R3
, ctx
);
955 emit(ARM_MOV_R(r_A
, ARM_R0
), ctx
);
957 case BPF_LDX
| BPF_W
| BPF_ABS
:
959 * load a 32bit word from struct seccomp_data.
960 * seccomp_check_filter() will already have checked
961 * that k is 32bit aligned and lies within the
962 * struct seccomp_data.
964 ctx
->seen
|= SEEN_SKB
;
965 emit(ARM_LDR_I(r_A
, r_skb
, k
), ctx
);
971 if (ctx
->flags
& FLAG_IMM_OVERFLOW
)
973 * this instruction generated an overflow when
974 * trying to access the literal pool, so
975 * delegate this filter to the kernel interpreter.
980 /* compute offsets only during the first pass */
981 if (ctx
->target
== NULL
)
982 ctx
->offsets
[i
] = ctx
->idx
* 4;
988 void bpf_jit_compile(struct bpf_prog
*fp
)
990 struct bpf_binary_header
*header
;
999 memset(&ctx
, 0, sizeof(ctx
));
1001 ctx
.ret0_fp_idx
= -1;
1003 ctx
.offsets
= kzalloc(4 * (ctx
.skf
->len
+ 1), GFP_KERNEL
);
1004 if (ctx
.offsets
== NULL
)
1007 /* fake pass to fill in the ctx->seen */
1008 if (unlikely(build_body(&ctx
)))
1012 build_prologue(&ctx
);
1013 ctx
.prologue_bytes
= (ctx
.idx
- tmp_idx
) * 4;
1015 #if __LINUX_ARM_ARCH__ < 7
1017 build_epilogue(&ctx
);
1018 ctx
.epilogue_bytes
= (ctx
.idx
- tmp_idx
) * 4;
1020 ctx
.idx
+= ctx
.imm_count
;
1021 if (ctx
.imm_count
) {
1022 ctx
.imms
= kzalloc(4 * ctx
.imm_count
, GFP_KERNEL
);
1023 if (ctx
.imms
== NULL
)
1027 /* there's nothing after the epilogue on ARMv7 */
1028 build_epilogue(&ctx
);
1030 alloc_size
= 4 * ctx
.idx
;
1031 header
= bpf_jit_binary_alloc(alloc_size
, &target_ptr
,
1036 ctx
.target
= (u32
*) target_ptr
;
1039 build_prologue(&ctx
);
1040 if (build_body(&ctx
) < 0) {
1041 #if __LINUX_ARM_ARCH__ < 7
1045 bpf_jit_binary_free(header
);
1048 build_epilogue(&ctx
);
1050 flush_icache_range((u32
)header
, (u32
)(ctx
.target
+ ctx
.idx
));
1052 #if __LINUX_ARM_ARCH__ < 7
1057 if (bpf_jit_enable
> 1)
1058 /* there are 2 passes here */
1059 bpf_jit_dump(fp
->len
, alloc_size
, 2, ctx
.target
);
1061 set_memory_ro((unsigned long)header
, header
->pages
);
1062 fp
->bpf_func
= (void *)ctx
.target
;
1069 void bpf_jit_free(struct bpf_prog
*fp
)
1071 unsigned long addr
= (unsigned long)fp
->bpf_func
& PAGE_MASK
;
1072 struct bpf_binary_header
*header
= (void *)addr
;
1077 set_memory_rw(addr
, header
->pages
);
1078 bpf_jit_binary_free(header
);
1081 bpf_prog_unlock_free(fp
);