2 * BPF JIT compiler for ARM64
4 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #define pr_fmt(fmt) "bpf_jit: " fmt
21 #include <linux/filter.h>
22 #include <linux/printk.h>
23 #include <linux/skbuff.h>
24 #include <linux/slab.h>
26 #include <asm/byteorder.h>
27 #include <asm/cacheflush.h>
28 #include <asm/debug-monitors.h>
32 int bpf_jit_enable __read_mostly
;
34 #define TMP_REG_1 (MAX_BPF_REG + 0)
35 #define TMP_REG_2 (MAX_BPF_REG + 1)
37 /* Map BPF registers to A64 registers */
38 static const int bpf2a64
[] = {
39 /* return value from in-kernel function, and exit value from eBPF */
40 [BPF_REG_0
] = A64_R(7),
41 /* arguments from eBPF program to in-kernel function */
42 [BPF_REG_1
] = A64_R(0),
43 [BPF_REG_2
] = A64_R(1),
44 [BPF_REG_3
] = A64_R(2),
45 [BPF_REG_4
] = A64_R(3),
46 [BPF_REG_5
] = A64_R(4),
47 /* callee saved registers that in-kernel function will preserve */
48 [BPF_REG_6
] = A64_R(19),
49 [BPF_REG_7
] = A64_R(20),
50 [BPF_REG_8
] = A64_R(21),
51 [BPF_REG_9
] = A64_R(22),
52 /* read-only frame pointer to access stack */
53 [BPF_REG_FP
] = A64_R(25),
54 /* temporary register for internal BPF JIT */
55 [TMP_REG_1
] = A64_R(23),
56 [TMP_REG_2
] = A64_R(24),
60 const struct bpf_prog
*prog
;
68 static inline void emit(const u32 insn
, struct jit_ctx
*ctx
)
70 if (ctx
->image
!= NULL
)
71 ctx
->image
[ctx
->idx
] = cpu_to_le32(insn
);
76 static inline void emit_a64_mov_i64(const int reg
, const u64 val
,
82 emit(A64_MOVZ(1, reg
, tmp
& 0xffff, shift
), ctx
);
87 emit(A64_MOVK(1, reg
, tmp
& 0xffff, shift
), ctx
);
93 static inline void emit_a64_mov_i(const int is64
, const int reg
,
94 const s32 val
, struct jit_ctx
*ctx
)
97 u16 lo
= val
& 0xffff;
101 emit(A64_MOVN(is64
, reg
, (u16
)~lo
, 0), ctx
);
103 emit(A64_MOVN(is64
, reg
, (u16
)~hi
, 16), ctx
);
104 emit(A64_MOVK(is64
, reg
, lo
, 0), ctx
);
107 emit(A64_MOVZ(is64
, reg
, lo
, 0), ctx
);
109 emit(A64_MOVK(is64
, reg
, hi
, 16), ctx
);
113 static inline int bpf2a64_offset(int bpf_to
, int bpf_from
,
114 const struct jit_ctx
*ctx
)
116 int to
= ctx
->offset
[bpf_to
];
117 /* -1 to account for the Branch instruction */
118 int from
= ctx
->offset
[bpf_from
] - 1;
123 static void jit_fill_hole(void *area
, unsigned int size
)
126 /* We are guaranteed to have aligned memory. */
127 for (ptr
= area
; size
>= sizeof(u32
); size
-= sizeof(u32
))
128 *ptr
++ = cpu_to_le32(AARCH64_BREAK_FAULT
);
131 static inline int epilogue_offset(const struct jit_ctx
*ctx
)
133 int to
= ctx
->epilogue_offset
;
139 /* Stack must be multiples of 16B */
140 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
142 #define _STACK_SIZE \
144 + 4 /* extra for skb_copy_bits buffer */)
146 #define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
148 static void build_prologue(struct jit_ctx
*ctx
)
150 const u8 r6
= bpf2a64
[BPF_REG_6
];
151 const u8 r7
= bpf2a64
[BPF_REG_7
];
152 const u8 r8
= bpf2a64
[BPF_REG_8
];
153 const u8 r9
= bpf2a64
[BPF_REG_9
];
154 const u8 fp
= bpf2a64
[BPF_REG_FP
];
155 const u8 tmp1
= bpf2a64
[TMP_REG_1
];
156 const u8 tmp2
= bpf2a64
[TMP_REG_2
];
159 * BPF prog stack layout
162 * original A64_SP => 0:+-----+ BPF prologue
164 * current A64_FP => -16:+-----+
165 * | ... | callee saved registers
168 * BPF fp register => -80:+-----+ <= (BPF_FP)
170 * | ... | BPF prog stack
172 * +-----+ <= (BPF_FP - MAX_BPF_STACK)
173 * |RSVD | JIT scratchpad
174 * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
176 * | ... | Function call stack
183 /* Save FP and LR registers to stay align with ARM64 AAPCS */
184 emit(A64_PUSH(A64_FP
, A64_LR
, A64_SP
), ctx
);
185 emit(A64_MOV(1, A64_FP
, A64_SP
), ctx
);
187 /* Save callee-saved register */
188 emit(A64_PUSH(r6
, r7
, A64_SP
), ctx
);
189 emit(A64_PUSH(r8
, r9
, A64_SP
), ctx
);
191 emit(A64_PUSH(tmp1
, tmp2
, A64_SP
), ctx
);
193 /* Save fp (x25) and x26. SP requires 16 bytes alignment */
194 emit(A64_PUSH(fp
, A64_R(26), A64_SP
), ctx
);
196 /* Set up BPF prog stack base register (x25) */
197 emit(A64_MOV(1, fp
, A64_SP
), ctx
);
199 /* Set up function call stack */
200 emit(A64_SUB_I(1, A64_SP
, A64_SP
, STACK_SIZE
), ctx
);
203 static void build_epilogue(struct jit_ctx
*ctx
)
205 const u8 r0
= bpf2a64
[BPF_REG_0
];
206 const u8 r6
= bpf2a64
[BPF_REG_6
];
207 const u8 r7
= bpf2a64
[BPF_REG_7
];
208 const u8 r8
= bpf2a64
[BPF_REG_8
];
209 const u8 r9
= bpf2a64
[BPF_REG_9
];
210 const u8 fp
= bpf2a64
[BPF_REG_FP
];
211 const u8 tmp1
= bpf2a64
[TMP_REG_1
];
212 const u8 tmp2
= bpf2a64
[TMP_REG_2
];
214 /* We're done with BPF stack */
215 emit(A64_ADD_I(1, A64_SP
, A64_SP
, STACK_SIZE
), ctx
);
217 /* Restore fs (x25) and x26 */
218 emit(A64_POP(fp
, A64_R(26), A64_SP
), ctx
);
220 /* Restore callee-saved register */
222 emit(A64_POP(tmp1
, tmp2
, A64_SP
), ctx
);
223 emit(A64_POP(r8
, r9
, A64_SP
), ctx
);
224 emit(A64_POP(r6
, r7
, A64_SP
), ctx
);
226 /* Restore FP/LR registers */
227 emit(A64_POP(A64_FP
, A64_LR
, A64_SP
), ctx
);
229 /* Set return value */
230 emit(A64_MOV(1, A64_R(0), r0
), ctx
);
232 emit(A64_RET(A64_LR
), ctx
);
235 /* JITs an eBPF instruction.
237 * 0 - successfully JITed an 8-byte eBPF instruction.
238 * >0 - successfully JITed a 16-byte eBPF instruction.
239 * <0 - failed to JIT.
241 static int build_insn(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
)
243 const u8 code
= insn
->code
;
244 const u8 dst
= bpf2a64
[insn
->dst_reg
];
245 const u8 src
= bpf2a64
[insn
->src_reg
];
246 const u8 tmp
= bpf2a64
[TMP_REG_1
];
247 const u8 tmp2
= bpf2a64
[TMP_REG_2
];
248 const s16 off
= insn
->off
;
249 const s32 imm
= insn
->imm
;
250 const int i
= insn
- ctx
->prog
->insnsi
;
251 const bool is64
= BPF_CLASS(code
) == BPF_ALU64
;
255 #define check_imm(bits, imm) do { \
256 if ((((imm) > 0) && ((imm) >> (bits))) || \
257 (((imm) < 0) && (~(imm) >> (bits)))) { \
258 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
263 #define check_imm19(imm) check_imm(19, imm)
264 #define check_imm26(imm) check_imm(26, imm)
268 case BPF_ALU
| BPF_MOV
| BPF_X
:
269 case BPF_ALU64
| BPF_MOV
| BPF_X
:
270 emit(A64_MOV(is64
, dst
, src
), ctx
);
272 /* dst = dst OP src */
273 case BPF_ALU
| BPF_ADD
| BPF_X
:
274 case BPF_ALU64
| BPF_ADD
| BPF_X
:
275 emit(A64_ADD(is64
, dst
, dst
, src
), ctx
);
277 case BPF_ALU
| BPF_SUB
| BPF_X
:
278 case BPF_ALU64
| BPF_SUB
| BPF_X
:
279 emit(A64_SUB(is64
, dst
, dst
, src
), ctx
);
281 case BPF_ALU
| BPF_AND
| BPF_X
:
282 case BPF_ALU64
| BPF_AND
| BPF_X
:
283 emit(A64_AND(is64
, dst
, dst
, src
), ctx
);
285 case BPF_ALU
| BPF_OR
| BPF_X
:
286 case BPF_ALU64
| BPF_OR
| BPF_X
:
287 emit(A64_ORR(is64
, dst
, dst
, src
), ctx
);
289 case BPF_ALU
| BPF_XOR
| BPF_X
:
290 case BPF_ALU64
| BPF_XOR
| BPF_X
:
291 emit(A64_EOR(is64
, dst
, dst
, src
), ctx
);
293 case BPF_ALU
| BPF_MUL
| BPF_X
:
294 case BPF_ALU64
| BPF_MUL
| BPF_X
:
295 emit(A64_MUL(is64
, dst
, dst
, src
), ctx
);
297 case BPF_ALU
| BPF_DIV
| BPF_X
:
298 case BPF_ALU64
| BPF_DIV
| BPF_X
:
299 case BPF_ALU
| BPF_MOD
| BPF_X
:
300 case BPF_ALU64
| BPF_MOD
| BPF_X
:
302 const u8 r0
= bpf2a64
[BPF_REG_0
];
304 /* if (src == 0) return 0 */
305 jmp_offset
= 3; /* skip ahead to else path */
306 check_imm19(jmp_offset
);
307 emit(A64_CBNZ(is64
, src
, jmp_offset
), ctx
);
308 emit(A64_MOVZ(1, r0
, 0, 0), ctx
);
309 jmp_offset
= epilogue_offset(ctx
);
310 check_imm26(jmp_offset
);
311 emit(A64_B(jmp_offset
), ctx
);
313 switch (BPF_OP(code
)) {
315 emit(A64_UDIV(is64
, dst
, dst
, src
), ctx
);
319 emit(A64_UDIV(is64
, tmp
, dst
, src
), ctx
);
320 emit(A64_MUL(is64
, tmp
, tmp
, src
), ctx
);
321 emit(A64_SUB(is64
, dst
, dst
, tmp
), ctx
);
326 case BPF_ALU
| BPF_LSH
| BPF_X
:
327 case BPF_ALU64
| BPF_LSH
| BPF_X
:
328 emit(A64_LSLV(is64
, dst
, dst
, src
), ctx
);
330 case BPF_ALU
| BPF_RSH
| BPF_X
:
331 case BPF_ALU64
| BPF_RSH
| BPF_X
:
332 emit(A64_LSRV(is64
, dst
, dst
, src
), ctx
);
334 case BPF_ALU
| BPF_ARSH
| BPF_X
:
335 case BPF_ALU64
| BPF_ARSH
| BPF_X
:
336 emit(A64_ASRV(is64
, dst
, dst
, src
), ctx
);
339 case BPF_ALU
| BPF_NEG
:
340 case BPF_ALU64
| BPF_NEG
:
341 emit(A64_NEG(is64
, dst
, dst
), ctx
);
343 /* dst = BSWAP##imm(dst) */
344 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
345 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
346 #ifdef CONFIG_CPU_BIG_ENDIAN
347 if (BPF_SRC(code
) == BPF_FROM_BE
)
349 #else /* !CONFIG_CPU_BIG_ENDIAN */
350 if (BPF_SRC(code
) == BPF_FROM_LE
)
355 emit(A64_REV16(is64
, dst
, dst
), ctx
);
356 /* zero-extend 16 bits into 64 bits */
357 emit(A64_UXTH(is64
, dst
, dst
), ctx
);
360 emit(A64_REV32(is64
, dst
, dst
), ctx
);
361 /* upper 32 bits already cleared */
364 emit(A64_REV64(dst
, dst
), ctx
);
371 /* zero-extend 16 bits into 64 bits */
372 emit(A64_UXTH(is64
, dst
, dst
), ctx
);
375 /* zero-extend 32 bits into 64 bits */
376 emit(A64_UXTW(is64
, dst
, dst
), ctx
);
384 case BPF_ALU
| BPF_MOV
| BPF_K
:
385 case BPF_ALU64
| BPF_MOV
| BPF_K
:
386 emit_a64_mov_i(is64
, dst
, imm
, ctx
);
388 /* dst = dst OP imm */
389 case BPF_ALU
| BPF_ADD
| BPF_K
:
390 case BPF_ALU64
| BPF_ADD
| BPF_K
:
392 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
393 emit(A64_ADD(is64
, dst
, dst
, tmp
), ctx
);
395 case BPF_ALU
| BPF_SUB
| BPF_K
:
396 case BPF_ALU64
| BPF_SUB
| BPF_K
:
398 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
399 emit(A64_SUB(is64
, dst
, dst
, tmp
), ctx
);
401 case BPF_ALU
| BPF_AND
| BPF_K
:
402 case BPF_ALU64
| BPF_AND
| BPF_K
:
404 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
405 emit(A64_AND(is64
, dst
, dst
, tmp
), ctx
);
407 case BPF_ALU
| BPF_OR
| BPF_K
:
408 case BPF_ALU64
| BPF_OR
| BPF_K
:
410 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
411 emit(A64_ORR(is64
, dst
, dst
, tmp
), ctx
);
413 case BPF_ALU
| BPF_XOR
| BPF_K
:
414 case BPF_ALU64
| BPF_XOR
| BPF_K
:
416 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
417 emit(A64_EOR(is64
, dst
, dst
, tmp
), ctx
);
419 case BPF_ALU
| BPF_MUL
| BPF_K
:
420 case BPF_ALU64
| BPF_MUL
| BPF_K
:
422 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
423 emit(A64_MUL(is64
, dst
, dst
, tmp
), ctx
);
425 case BPF_ALU
| BPF_DIV
| BPF_K
:
426 case BPF_ALU64
| BPF_DIV
| BPF_K
:
428 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
429 emit(A64_UDIV(is64
, dst
, dst
, tmp
), ctx
);
431 case BPF_ALU
| BPF_MOD
| BPF_K
:
432 case BPF_ALU64
| BPF_MOD
| BPF_K
:
434 emit_a64_mov_i(is64
, tmp2
, imm
, ctx
);
435 emit(A64_UDIV(is64
, tmp
, dst
, tmp2
), ctx
);
436 emit(A64_MUL(is64
, tmp
, tmp
, tmp2
), ctx
);
437 emit(A64_SUB(is64
, dst
, dst
, tmp
), ctx
);
439 case BPF_ALU
| BPF_LSH
| BPF_K
:
440 case BPF_ALU64
| BPF_LSH
| BPF_K
:
441 emit(A64_LSL(is64
, dst
, dst
, imm
), ctx
);
443 case BPF_ALU
| BPF_RSH
| BPF_K
:
444 case BPF_ALU64
| BPF_RSH
| BPF_K
:
445 emit(A64_LSR(is64
, dst
, dst
, imm
), ctx
);
447 case BPF_ALU
| BPF_ARSH
| BPF_K
:
448 case BPF_ALU64
| BPF_ARSH
| BPF_K
:
449 emit(A64_ASR(is64
, dst
, dst
, imm
), ctx
);
453 case BPF_JMP
| BPF_JA
:
454 jmp_offset
= bpf2a64_offset(i
+ off
, i
, ctx
);
455 check_imm26(jmp_offset
);
456 emit(A64_B(jmp_offset
), ctx
);
458 /* IF (dst COND src) JUMP off */
459 case BPF_JMP
| BPF_JEQ
| BPF_X
:
460 case BPF_JMP
| BPF_JGT
| BPF_X
:
461 case BPF_JMP
| BPF_JGE
| BPF_X
:
462 case BPF_JMP
| BPF_JNE
| BPF_X
:
463 case BPF_JMP
| BPF_JSGT
| BPF_X
:
464 case BPF_JMP
| BPF_JSGE
| BPF_X
:
465 emit(A64_CMP(1, dst
, src
), ctx
);
467 jmp_offset
= bpf2a64_offset(i
+ off
, i
, ctx
);
468 check_imm19(jmp_offset
);
469 switch (BPF_OP(code
)) {
471 jmp_cond
= A64_COND_EQ
;
474 jmp_cond
= A64_COND_HI
;
477 jmp_cond
= A64_COND_CS
;
481 jmp_cond
= A64_COND_NE
;
484 jmp_cond
= A64_COND_GT
;
487 jmp_cond
= A64_COND_GE
;
492 emit(A64_B_(jmp_cond
, jmp_offset
), ctx
);
494 case BPF_JMP
| BPF_JSET
| BPF_X
:
495 emit(A64_TST(1, dst
, src
), ctx
);
497 /* IF (dst COND imm) JUMP off */
498 case BPF_JMP
| BPF_JEQ
| BPF_K
:
499 case BPF_JMP
| BPF_JGT
| BPF_K
:
500 case BPF_JMP
| BPF_JGE
| BPF_K
:
501 case BPF_JMP
| BPF_JNE
| BPF_K
:
502 case BPF_JMP
| BPF_JSGT
| BPF_K
:
503 case BPF_JMP
| BPF_JSGE
| BPF_K
:
505 emit_a64_mov_i(1, tmp
, imm
, ctx
);
506 emit(A64_CMP(1, dst
, tmp
), ctx
);
508 case BPF_JMP
| BPF_JSET
| BPF_K
:
510 emit_a64_mov_i(1, tmp
, imm
, ctx
);
511 emit(A64_TST(1, dst
, tmp
), ctx
);
514 case BPF_JMP
| BPF_CALL
:
516 const u8 r0
= bpf2a64
[BPF_REG_0
];
517 const u64 func
= (u64
)__bpf_call_base
+ imm
;
520 emit_a64_mov_i64(tmp
, func
, ctx
);
521 emit(A64_PUSH(A64_FP
, A64_LR
, A64_SP
), ctx
);
522 emit(A64_MOV(1, A64_FP
, A64_SP
), ctx
);
523 emit(A64_BLR(tmp
), ctx
);
524 emit(A64_MOV(1, r0
, A64_R(0)), ctx
);
525 emit(A64_POP(A64_FP
, A64_LR
, A64_SP
), ctx
);
528 /* function return */
529 case BPF_JMP
| BPF_EXIT
:
530 /* Optimization: when last instruction is EXIT,
531 simply fallthrough to epilogue. */
532 if (i
== ctx
->prog
->len
- 1)
534 jmp_offset
= epilogue_offset(ctx
);
535 check_imm26(jmp_offset
);
536 emit(A64_B(jmp_offset
), ctx
);
540 case BPF_LD
| BPF_IMM
| BPF_DW
:
542 const struct bpf_insn insn1
= insn
[1];
545 if (insn1
.code
!= 0 || insn1
.src_reg
!= 0 ||
546 insn1
.dst_reg
!= 0 || insn1
.off
!= 0) {
547 /* Note: verifier in BPF core must catch invalid
550 pr_err_once("Invalid BPF_LD_IMM64 instruction\n");
554 imm64
= (u64
)insn1
.imm
<< 32 | (u32
)imm
;
555 emit_a64_mov_i64(dst
, imm64
, ctx
);
560 /* LDX: dst = *(size *)(src + off) */
561 case BPF_LDX
| BPF_MEM
| BPF_W
:
562 case BPF_LDX
| BPF_MEM
| BPF_H
:
563 case BPF_LDX
| BPF_MEM
| BPF_B
:
564 case BPF_LDX
| BPF_MEM
| BPF_DW
:
566 emit_a64_mov_i(1, tmp
, off
, ctx
);
567 switch (BPF_SIZE(code
)) {
569 emit(A64_LDR32(dst
, src
, tmp
), ctx
);
572 emit(A64_LDRH(dst
, src
, tmp
), ctx
);
575 emit(A64_LDRB(dst
, src
, tmp
), ctx
);
578 emit(A64_LDR64(dst
, src
, tmp
), ctx
);
583 /* ST: *(size *)(dst + off) = imm */
584 case BPF_ST
| BPF_MEM
| BPF_W
:
585 case BPF_ST
| BPF_MEM
| BPF_H
:
586 case BPF_ST
| BPF_MEM
| BPF_B
:
587 case BPF_ST
| BPF_MEM
| BPF_DW
:
588 /* Load imm to a register then store it */
590 emit_a64_mov_i(1, tmp2
, off
, ctx
);
591 emit_a64_mov_i(1, tmp
, imm
, ctx
);
592 switch (BPF_SIZE(code
)) {
594 emit(A64_STR32(tmp
, dst
, tmp2
), ctx
);
597 emit(A64_STRH(tmp
, dst
, tmp2
), ctx
);
600 emit(A64_STRB(tmp
, dst
, tmp2
), ctx
);
603 emit(A64_STR64(tmp
, dst
, tmp2
), ctx
);
608 /* STX: *(size *)(dst + off) = src */
609 case BPF_STX
| BPF_MEM
| BPF_W
:
610 case BPF_STX
| BPF_MEM
| BPF_H
:
611 case BPF_STX
| BPF_MEM
| BPF_B
:
612 case BPF_STX
| BPF_MEM
| BPF_DW
:
614 emit_a64_mov_i(1, tmp
, off
, ctx
);
615 switch (BPF_SIZE(code
)) {
617 emit(A64_STR32(src
, dst
, tmp
), ctx
);
620 emit(A64_STRH(src
, dst
, tmp
), ctx
);
623 emit(A64_STRB(src
, dst
, tmp
), ctx
);
626 emit(A64_STR64(src
, dst
, tmp
), ctx
);
630 /* STX XADD: lock *(u32 *)(dst + off) += src */
631 case BPF_STX
| BPF_XADD
| BPF_W
:
632 /* STX XADD: lock *(u64 *)(dst + off) += src */
633 case BPF_STX
| BPF_XADD
| BPF_DW
:
636 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
637 case BPF_LD
| BPF_ABS
| BPF_W
:
638 case BPF_LD
| BPF_ABS
| BPF_H
:
639 case BPF_LD
| BPF_ABS
| BPF_B
:
640 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
641 case BPF_LD
| BPF_IND
| BPF_W
:
642 case BPF_LD
| BPF_IND
| BPF_H
:
643 case BPF_LD
| BPF_IND
| BPF_B
:
645 const u8 r0
= bpf2a64
[BPF_REG_0
]; /* r0 = return value */
646 const u8 r6
= bpf2a64
[BPF_REG_6
]; /* r6 = pointer to sk_buff */
647 const u8 fp
= bpf2a64
[BPF_REG_FP
];
648 const u8 r1
= bpf2a64
[BPF_REG_1
]; /* r1: struct sk_buff *skb */
649 const u8 r2
= bpf2a64
[BPF_REG_2
]; /* r2: int k */
650 const u8 r3
= bpf2a64
[BPF_REG_3
]; /* r3: unsigned int size */
651 const u8 r4
= bpf2a64
[BPF_REG_4
]; /* r4: void *buffer */
652 const u8 r5
= bpf2a64
[BPF_REG_5
]; /* r5: void *(*func)(...) */
655 emit(A64_MOV(1, r1
, r6
), ctx
);
656 emit_a64_mov_i(0, r2
, imm
, ctx
);
657 if (BPF_MODE(code
) == BPF_IND
)
658 emit(A64_ADD(0, r2
, r2
, src
), ctx
);
659 switch (BPF_SIZE(code
)) {
672 emit_a64_mov_i64(r3
, size
, ctx
);
673 emit(A64_SUB_I(1, r4
, fp
, STACK_SIZE
), ctx
);
674 emit_a64_mov_i64(r5
, (unsigned long)bpf_load_pointer
, ctx
);
675 emit(A64_PUSH(A64_FP
, A64_LR
, A64_SP
), ctx
);
676 emit(A64_MOV(1, A64_FP
, A64_SP
), ctx
);
677 emit(A64_BLR(r5
), ctx
);
678 emit(A64_MOV(1, r0
, A64_R(0)), ctx
);
679 emit(A64_POP(A64_FP
, A64_LR
, A64_SP
), ctx
);
681 jmp_offset
= epilogue_offset(ctx
);
682 check_imm19(jmp_offset
);
683 emit(A64_CBZ(1, r0
, jmp_offset
), ctx
);
684 emit(A64_MOV(1, r5
, r0
), ctx
);
685 switch (BPF_SIZE(code
)) {
687 emit(A64_LDR32(r0
, r5
, A64_ZR
), ctx
);
688 #ifndef CONFIG_CPU_BIG_ENDIAN
689 emit(A64_REV32(0, r0
, r0
), ctx
);
693 emit(A64_LDRH(r0
, r5
, A64_ZR
), ctx
);
694 #ifndef CONFIG_CPU_BIG_ENDIAN
695 emit(A64_REV16(0, r0
, r0
), ctx
);
699 emit(A64_LDRB(r0
, r5
, A64_ZR
), ctx
);
705 pr_info_once("*** NOT YET: opcode %02x ***\n", code
);
709 pr_err_once("unknown opcode %02x\n", code
);
716 static int build_body(struct jit_ctx
*ctx
)
718 const struct bpf_prog
*prog
= ctx
->prog
;
721 for (i
= 0; i
< prog
->len
; i
++) {
722 const struct bpf_insn
*insn
= &prog
->insnsi
[i
];
725 ret
= build_insn(insn
, ctx
);
727 if (ctx
->image
== NULL
)
728 ctx
->offset
[i
] = ctx
->idx
;
741 static int validate_code(struct jit_ctx
*ctx
)
745 for (i
= 0; i
< ctx
->idx
; i
++) {
746 u32 a64_insn
= le32_to_cpu(ctx
->image
[i
]);
748 if (a64_insn
== AARCH64_BREAK_FAULT
)
755 static inline void bpf_flush_icache(void *start
, void *end
)
757 flush_icache_range((unsigned long)start
, (unsigned long)end
);
760 void bpf_jit_compile(struct bpf_prog
*prog
)
762 /* Nothing to do here. We support Internal BPF. */
765 void bpf_int_jit_compile(struct bpf_prog
*prog
)
767 struct bpf_binary_header
*header
;
775 if (!prog
|| !prog
->len
)
778 memset(&ctx
, 0, sizeof(ctx
));
781 ctx
.offset
= kcalloc(prog
->len
, sizeof(int), GFP_KERNEL
);
782 if (ctx
.offset
== NULL
)
785 /* 1. Initial fake pass to compute ctx->idx. */
787 /* Fake pass to fill in ctx->offset and ctx->tmp_used. */
788 if (build_body(&ctx
))
791 build_prologue(&ctx
);
793 ctx
.epilogue_offset
= ctx
.idx
;
794 build_epilogue(&ctx
);
796 /* Now we know the actual image size. */
797 image_size
= sizeof(u32
) * ctx
.idx
;
798 header
= bpf_jit_binary_alloc(image_size
, &image_ptr
,
799 sizeof(u32
), jit_fill_hole
);
803 /* 2. Now, the actual pass. */
805 ctx
.image
= (u32
*)image_ptr
;
808 build_prologue(&ctx
);
810 if (build_body(&ctx
)) {
811 bpf_jit_binary_free(header
);
815 build_epilogue(&ctx
);
817 /* 3. Extra pass to validate JITed code. */
818 if (validate_code(&ctx
)) {
819 bpf_jit_binary_free(header
);
823 /* And we're done. */
824 if (bpf_jit_enable
> 1)
825 bpf_jit_dump(prog
->len
, image_size
, 2, ctx
.image
);
827 bpf_flush_icache(header
, ctx
.image
+ ctx
.idx
);
829 set_memory_ro((unsigned long)header
, header
->pages
);
830 prog
->bpf_func
= (void *)ctx
.image
;
836 void bpf_jit_free(struct bpf_prog
*prog
)
838 unsigned long addr
= (unsigned long)prog
->bpf_func
& PAGE_MASK
;
839 struct bpf_binary_header
*header
= (void *)addr
;
844 set_memory_rw(addr
, header
->pages
);
845 bpf_jit_binary_free(header
);
848 bpf_prog_unlock_free(prog
);