2 * BPF JIT compiler for ARM64
4 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #define pr_fmt(fmt) "bpf_jit: " fmt
21 #include <linux/filter.h>
22 #include <linux/printk.h>
23 #include <linux/skbuff.h>
24 #include <linux/slab.h>
26 #include <asm/byteorder.h>
27 #include <asm/cacheflush.h>
28 #include <asm/debug-monitors.h>
32 int bpf_jit_enable __read_mostly
;
34 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
35 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
37 /* Map BPF registers to A64 registers */
38 static const int bpf2a64
[] = {
39 /* return value from in-kernel function, and exit value from eBPF */
40 [BPF_REG_0
] = A64_R(7),
41 /* arguments from eBPF program to in-kernel function */
42 [BPF_REG_1
] = A64_R(0),
43 [BPF_REG_2
] = A64_R(1),
44 [BPF_REG_3
] = A64_R(2),
45 [BPF_REG_4
] = A64_R(3),
46 [BPF_REG_5
] = A64_R(4),
47 /* callee saved registers that in-kernel function will preserve */
48 [BPF_REG_6
] = A64_R(19),
49 [BPF_REG_7
] = A64_R(20),
50 [BPF_REG_8
] = A64_R(21),
51 [BPF_REG_9
] = A64_R(22),
52 /* read-only frame pointer to access stack */
53 [BPF_REG_FP
] = A64_R(25),
54 /* temporary registers for internal BPF JIT */
55 [TMP_REG_1
] = A64_R(10),
56 [TMP_REG_2
] = A64_R(11),
57 /* temporary register for blinding constants */
58 [BPF_REG_AX
] = A64_R(9),
62 const struct bpf_prog
*prog
;
69 static inline void emit(const u32 insn
, struct jit_ctx
*ctx
)
71 if (ctx
->image
!= NULL
)
72 ctx
->image
[ctx
->idx
] = cpu_to_le32(insn
);
77 static inline void emit_a64_mov_i64(const int reg
, const u64 val
,
83 emit(A64_MOVZ(1, reg
, tmp
& 0xffff, shift
), ctx
);
88 emit(A64_MOVK(1, reg
, tmp
& 0xffff, shift
), ctx
);
94 static inline void emit_a64_mov_i(const int is64
, const int reg
,
95 const s32 val
, struct jit_ctx
*ctx
)
98 u16 lo
= val
& 0xffff;
102 emit(A64_MOVN(is64
, reg
, (u16
)~lo
, 0), ctx
);
104 emit(A64_MOVN(is64
, reg
, (u16
)~hi
, 16), ctx
);
105 emit(A64_MOVK(is64
, reg
, lo
, 0), ctx
);
108 emit(A64_MOVZ(is64
, reg
, lo
, 0), ctx
);
110 emit(A64_MOVK(is64
, reg
, hi
, 16), ctx
);
114 static inline int bpf2a64_offset(int bpf_to
, int bpf_from
,
115 const struct jit_ctx
*ctx
)
117 int to
= ctx
->offset
[bpf_to
];
118 /* -1 to account for the Branch instruction */
119 int from
= ctx
->offset
[bpf_from
] - 1;
124 static void jit_fill_hole(void *area
, unsigned int size
)
127 /* We are guaranteed to have aligned memory. */
128 for (ptr
= area
; size
>= sizeof(u32
); size
-= sizeof(u32
))
129 *ptr
++ = cpu_to_le32(AARCH64_BREAK_FAULT
);
132 static inline int epilogue_offset(const struct jit_ctx
*ctx
)
134 int to
= ctx
->epilogue_offset
;
140 /* Stack must be multiples of 16B */
141 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
143 #define _STACK_SIZE \
145 + 4 /* extra for skb_copy_bits buffer */)
147 #define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
149 static void build_prologue(struct jit_ctx
*ctx
)
151 const u8 r6
= bpf2a64
[BPF_REG_6
];
152 const u8 r7
= bpf2a64
[BPF_REG_7
];
153 const u8 r8
= bpf2a64
[BPF_REG_8
];
154 const u8 r9
= bpf2a64
[BPF_REG_9
];
155 const u8 fp
= bpf2a64
[BPF_REG_FP
];
158 * BPF prog stack layout
161 * original A64_SP => 0:+-----+ BPF prologue
163 * current A64_FP => -16:+-----+
164 * | ... | callee saved registers
167 * BPF fp register => -64:+-----+ <= (BPF_FP)
169 * | ... | BPF prog stack
171 * +-----+ <= (BPF_FP - MAX_BPF_STACK)
172 * |RSVD | JIT scratchpad
173 * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
175 * | ... | Function call stack
182 /* Save FP and LR registers to stay align with ARM64 AAPCS */
183 emit(A64_PUSH(A64_FP
, A64_LR
, A64_SP
), ctx
);
184 emit(A64_MOV(1, A64_FP
, A64_SP
), ctx
);
186 /* Save callee-saved register */
187 emit(A64_PUSH(r6
, r7
, A64_SP
), ctx
);
188 emit(A64_PUSH(r8
, r9
, A64_SP
), ctx
);
190 /* Save fp (x25) and x26. SP requires 16 bytes alignment */
191 emit(A64_PUSH(fp
, A64_R(26), A64_SP
), ctx
);
193 /* Set up BPF prog stack base register (x25) */
194 emit(A64_MOV(1, fp
, A64_SP
), ctx
);
196 /* Set up function call stack */
197 emit(A64_SUB_I(1, A64_SP
, A64_SP
, STACK_SIZE
), ctx
);
200 static void build_epilogue(struct jit_ctx
*ctx
)
202 const u8 r0
= bpf2a64
[BPF_REG_0
];
203 const u8 r6
= bpf2a64
[BPF_REG_6
];
204 const u8 r7
= bpf2a64
[BPF_REG_7
];
205 const u8 r8
= bpf2a64
[BPF_REG_8
];
206 const u8 r9
= bpf2a64
[BPF_REG_9
];
207 const u8 fp
= bpf2a64
[BPF_REG_FP
];
209 /* We're done with BPF stack */
210 emit(A64_ADD_I(1, A64_SP
, A64_SP
, STACK_SIZE
), ctx
);
212 /* Restore fs (x25) and x26 */
213 emit(A64_POP(fp
, A64_R(26), A64_SP
), ctx
);
215 /* Restore callee-saved register */
216 emit(A64_POP(r8
, r9
, A64_SP
), ctx
);
217 emit(A64_POP(r6
, r7
, A64_SP
), ctx
);
219 /* Restore FP/LR registers */
220 emit(A64_POP(A64_FP
, A64_LR
, A64_SP
), ctx
);
222 /* Set return value */
223 emit(A64_MOV(1, A64_R(0), r0
), ctx
);
225 emit(A64_RET(A64_LR
), ctx
);
228 /* JITs an eBPF instruction.
230 * 0 - successfully JITed an 8-byte eBPF instruction.
231 * >0 - successfully JITed a 16-byte eBPF instruction.
232 * <0 - failed to JIT.
234 static int build_insn(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
)
236 const u8 code
= insn
->code
;
237 const u8 dst
= bpf2a64
[insn
->dst_reg
];
238 const u8 src
= bpf2a64
[insn
->src_reg
];
239 const u8 tmp
= bpf2a64
[TMP_REG_1
];
240 const u8 tmp2
= bpf2a64
[TMP_REG_2
];
241 const s16 off
= insn
->off
;
242 const s32 imm
= insn
->imm
;
243 const int i
= insn
- ctx
->prog
->insnsi
;
244 const bool is64
= BPF_CLASS(code
) == BPF_ALU64
;
248 #define check_imm(bits, imm) do { \
249 if ((((imm) > 0) && ((imm) >> (bits))) || \
250 (((imm) < 0) && (~(imm) >> (bits)))) { \
251 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
256 #define check_imm19(imm) check_imm(19, imm)
257 #define check_imm26(imm) check_imm(26, imm)
261 case BPF_ALU
| BPF_MOV
| BPF_X
:
262 case BPF_ALU64
| BPF_MOV
| BPF_X
:
263 emit(A64_MOV(is64
, dst
, src
), ctx
);
265 /* dst = dst OP src */
266 case BPF_ALU
| BPF_ADD
| BPF_X
:
267 case BPF_ALU64
| BPF_ADD
| BPF_X
:
268 emit(A64_ADD(is64
, dst
, dst
, src
), ctx
);
270 case BPF_ALU
| BPF_SUB
| BPF_X
:
271 case BPF_ALU64
| BPF_SUB
| BPF_X
:
272 emit(A64_SUB(is64
, dst
, dst
, src
), ctx
);
274 case BPF_ALU
| BPF_AND
| BPF_X
:
275 case BPF_ALU64
| BPF_AND
| BPF_X
:
276 emit(A64_AND(is64
, dst
, dst
, src
), ctx
);
278 case BPF_ALU
| BPF_OR
| BPF_X
:
279 case BPF_ALU64
| BPF_OR
| BPF_X
:
280 emit(A64_ORR(is64
, dst
, dst
, src
), ctx
);
282 case BPF_ALU
| BPF_XOR
| BPF_X
:
283 case BPF_ALU64
| BPF_XOR
| BPF_X
:
284 emit(A64_EOR(is64
, dst
, dst
, src
), ctx
);
286 case BPF_ALU
| BPF_MUL
| BPF_X
:
287 case BPF_ALU64
| BPF_MUL
| BPF_X
:
288 emit(A64_MUL(is64
, dst
, dst
, src
), ctx
);
290 case BPF_ALU
| BPF_DIV
| BPF_X
:
291 case BPF_ALU64
| BPF_DIV
| BPF_X
:
292 case BPF_ALU
| BPF_MOD
| BPF_X
:
293 case BPF_ALU64
| BPF_MOD
| BPF_X
:
295 const u8 r0
= bpf2a64
[BPF_REG_0
];
297 /* if (src == 0) return 0 */
298 jmp_offset
= 3; /* skip ahead to else path */
299 check_imm19(jmp_offset
);
300 emit(A64_CBNZ(is64
, src
, jmp_offset
), ctx
);
301 emit(A64_MOVZ(1, r0
, 0, 0), ctx
);
302 jmp_offset
= epilogue_offset(ctx
);
303 check_imm26(jmp_offset
);
304 emit(A64_B(jmp_offset
), ctx
);
306 switch (BPF_OP(code
)) {
308 emit(A64_UDIV(is64
, dst
, dst
, src
), ctx
);
311 emit(A64_UDIV(is64
, tmp
, dst
, src
), ctx
);
312 emit(A64_MUL(is64
, tmp
, tmp
, src
), ctx
);
313 emit(A64_SUB(is64
, dst
, dst
, tmp
), ctx
);
318 case BPF_ALU
| BPF_LSH
| BPF_X
:
319 case BPF_ALU64
| BPF_LSH
| BPF_X
:
320 emit(A64_LSLV(is64
, dst
, dst
, src
), ctx
);
322 case BPF_ALU
| BPF_RSH
| BPF_X
:
323 case BPF_ALU64
| BPF_RSH
| BPF_X
:
324 emit(A64_LSRV(is64
, dst
, dst
, src
), ctx
);
326 case BPF_ALU
| BPF_ARSH
| BPF_X
:
327 case BPF_ALU64
| BPF_ARSH
| BPF_X
:
328 emit(A64_ASRV(is64
, dst
, dst
, src
), ctx
);
331 case BPF_ALU
| BPF_NEG
:
332 case BPF_ALU64
| BPF_NEG
:
333 emit(A64_NEG(is64
, dst
, dst
), ctx
);
335 /* dst = BSWAP##imm(dst) */
336 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
337 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
338 #ifdef CONFIG_CPU_BIG_ENDIAN
339 if (BPF_SRC(code
) == BPF_FROM_BE
)
341 #else /* !CONFIG_CPU_BIG_ENDIAN */
342 if (BPF_SRC(code
) == BPF_FROM_LE
)
347 emit(A64_REV16(is64
, dst
, dst
), ctx
);
348 /* zero-extend 16 bits into 64 bits */
349 emit(A64_UXTH(is64
, dst
, dst
), ctx
);
352 emit(A64_REV32(is64
, dst
, dst
), ctx
);
353 /* upper 32 bits already cleared */
356 emit(A64_REV64(dst
, dst
), ctx
);
363 /* zero-extend 16 bits into 64 bits */
364 emit(A64_UXTH(is64
, dst
, dst
), ctx
);
367 /* zero-extend 32 bits into 64 bits */
368 emit(A64_UXTW(is64
, dst
, dst
), ctx
);
376 case BPF_ALU
| BPF_MOV
| BPF_K
:
377 case BPF_ALU64
| BPF_MOV
| BPF_K
:
378 emit_a64_mov_i(is64
, dst
, imm
, ctx
);
380 /* dst = dst OP imm */
381 case BPF_ALU
| BPF_ADD
| BPF_K
:
382 case BPF_ALU64
| BPF_ADD
| BPF_K
:
383 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
384 emit(A64_ADD(is64
, dst
, dst
, tmp
), ctx
);
386 case BPF_ALU
| BPF_SUB
| BPF_K
:
387 case BPF_ALU64
| BPF_SUB
| BPF_K
:
388 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
389 emit(A64_SUB(is64
, dst
, dst
, tmp
), ctx
);
391 case BPF_ALU
| BPF_AND
| BPF_K
:
392 case BPF_ALU64
| BPF_AND
| BPF_K
:
393 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
394 emit(A64_AND(is64
, dst
, dst
, tmp
), ctx
);
396 case BPF_ALU
| BPF_OR
| BPF_K
:
397 case BPF_ALU64
| BPF_OR
| BPF_K
:
398 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
399 emit(A64_ORR(is64
, dst
, dst
, tmp
), ctx
);
401 case BPF_ALU
| BPF_XOR
| BPF_K
:
402 case BPF_ALU64
| BPF_XOR
| BPF_K
:
403 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
404 emit(A64_EOR(is64
, dst
, dst
, tmp
), ctx
);
406 case BPF_ALU
| BPF_MUL
| BPF_K
:
407 case BPF_ALU64
| BPF_MUL
| BPF_K
:
408 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
409 emit(A64_MUL(is64
, dst
, dst
, tmp
), ctx
);
411 case BPF_ALU
| BPF_DIV
| BPF_K
:
412 case BPF_ALU64
| BPF_DIV
| BPF_K
:
413 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
414 emit(A64_UDIV(is64
, dst
, dst
, tmp
), ctx
);
416 case BPF_ALU
| BPF_MOD
| BPF_K
:
417 case BPF_ALU64
| BPF_MOD
| BPF_K
:
418 emit_a64_mov_i(is64
, tmp2
, imm
, ctx
);
419 emit(A64_UDIV(is64
, tmp
, dst
, tmp2
), ctx
);
420 emit(A64_MUL(is64
, tmp
, tmp
, tmp2
), ctx
);
421 emit(A64_SUB(is64
, dst
, dst
, tmp
), ctx
);
423 case BPF_ALU
| BPF_LSH
| BPF_K
:
424 case BPF_ALU64
| BPF_LSH
| BPF_K
:
425 emit(A64_LSL(is64
, dst
, dst
, imm
), ctx
);
427 case BPF_ALU
| BPF_RSH
| BPF_K
:
428 case BPF_ALU64
| BPF_RSH
| BPF_K
:
429 emit(A64_LSR(is64
, dst
, dst
, imm
), ctx
);
431 case BPF_ALU
| BPF_ARSH
| BPF_K
:
432 case BPF_ALU64
| BPF_ARSH
| BPF_K
:
433 emit(A64_ASR(is64
, dst
, dst
, imm
), ctx
);
437 case BPF_JMP
| BPF_JA
:
438 jmp_offset
= bpf2a64_offset(i
+ off
, i
, ctx
);
439 check_imm26(jmp_offset
);
440 emit(A64_B(jmp_offset
), ctx
);
442 /* IF (dst COND src) JUMP off */
443 case BPF_JMP
| BPF_JEQ
| BPF_X
:
444 case BPF_JMP
| BPF_JGT
| BPF_X
:
445 case BPF_JMP
| BPF_JGE
| BPF_X
:
446 case BPF_JMP
| BPF_JNE
| BPF_X
:
447 case BPF_JMP
| BPF_JSGT
| BPF_X
:
448 case BPF_JMP
| BPF_JSGE
| BPF_X
:
449 emit(A64_CMP(1, dst
, src
), ctx
);
451 jmp_offset
= bpf2a64_offset(i
+ off
, i
, ctx
);
452 check_imm19(jmp_offset
);
453 switch (BPF_OP(code
)) {
455 jmp_cond
= A64_COND_EQ
;
458 jmp_cond
= A64_COND_HI
;
461 jmp_cond
= A64_COND_CS
;
465 jmp_cond
= A64_COND_NE
;
468 jmp_cond
= A64_COND_GT
;
471 jmp_cond
= A64_COND_GE
;
476 emit(A64_B_(jmp_cond
, jmp_offset
), ctx
);
478 case BPF_JMP
| BPF_JSET
| BPF_X
:
479 emit(A64_TST(1, dst
, src
), ctx
);
481 /* IF (dst COND imm) JUMP off */
482 case BPF_JMP
| BPF_JEQ
| BPF_K
:
483 case BPF_JMP
| BPF_JGT
| BPF_K
:
484 case BPF_JMP
| BPF_JGE
| BPF_K
:
485 case BPF_JMP
| BPF_JNE
| BPF_K
:
486 case BPF_JMP
| BPF_JSGT
| BPF_K
:
487 case BPF_JMP
| BPF_JSGE
| BPF_K
:
488 emit_a64_mov_i(1, tmp
, imm
, ctx
);
489 emit(A64_CMP(1, dst
, tmp
), ctx
);
491 case BPF_JMP
| BPF_JSET
| BPF_K
:
492 emit_a64_mov_i(1, tmp
, imm
, ctx
);
493 emit(A64_TST(1, dst
, tmp
), ctx
);
496 case BPF_JMP
| BPF_CALL
:
498 const u8 r0
= bpf2a64
[BPF_REG_0
];
499 const u64 func
= (u64
)__bpf_call_base
+ imm
;
501 emit_a64_mov_i64(tmp
, func
, ctx
);
502 emit(A64_PUSH(A64_FP
, A64_LR
, A64_SP
), ctx
);
503 emit(A64_MOV(1, A64_FP
, A64_SP
), ctx
);
504 emit(A64_BLR(tmp
), ctx
);
505 emit(A64_MOV(1, r0
, A64_R(0)), ctx
);
506 emit(A64_POP(A64_FP
, A64_LR
, A64_SP
), ctx
);
509 /* function return */
510 case BPF_JMP
| BPF_EXIT
:
511 /* Optimization: when last instruction is EXIT,
512 simply fallthrough to epilogue. */
513 if (i
== ctx
->prog
->len
- 1)
515 jmp_offset
= epilogue_offset(ctx
);
516 check_imm26(jmp_offset
);
517 emit(A64_B(jmp_offset
), ctx
);
521 case BPF_LD
| BPF_IMM
| BPF_DW
:
523 const struct bpf_insn insn1
= insn
[1];
526 if (insn1
.code
!= 0 || insn1
.src_reg
!= 0 ||
527 insn1
.dst_reg
!= 0 || insn1
.off
!= 0) {
528 /* Note: verifier in BPF core must catch invalid
531 pr_err_once("Invalid BPF_LD_IMM64 instruction\n");
535 imm64
= (u64
)insn1
.imm
<< 32 | (u32
)imm
;
536 emit_a64_mov_i64(dst
, imm64
, ctx
);
541 /* LDX: dst = *(size *)(src + off) */
542 case BPF_LDX
| BPF_MEM
| BPF_W
:
543 case BPF_LDX
| BPF_MEM
| BPF_H
:
544 case BPF_LDX
| BPF_MEM
| BPF_B
:
545 case BPF_LDX
| BPF_MEM
| BPF_DW
:
546 emit_a64_mov_i(1, tmp
, off
, ctx
);
547 switch (BPF_SIZE(code
)) {
549 emit(A64_LDR32(dst
, src
, tmp
), ctx
);
552 emit(A64_LDRH(dst
, src
, tmp
), ctx
);
555 emit(A64_LDRB(dst
, src
, tmp
), ctx
);
558 emit(A64_LDR64(dst
, src
, tmp
), ctx
);
563 /* ST: *(size *)(dst + off) = imm */
564 case BPF_ST
| BPF_MEM
| BPF_W
:
565 case BPF_ST
| BPF_MEM
| BPF_H
:
566 case BPF_ST
| BPF_MEM
| BPF_B
:
567 case BPF_ST
| BPF_MEM
| BPF_DW
:
568 /* Load imm to a register then store it */
569 emit_a64_mov_i(1, tmp2
, off
, ctx
);
570 emit_a64_mov_i(1, tmp
, imm
, ctx
);
571 switch (BPF_SIZE(code
)) {
573 emit(A64_STR32(tmp
, dst
, tmp2
), ctx
);
576 emit(A64_STRH(tmp
, dst
, tmp2
), ctx
);
579 emit(A64_STRB(tmp
, dst
, tmp2
), ctx
);
582 emit(A64_STR64(tmp
, dst
, tmp2
), ctx
);
587 /* STX: *(size *)(dst + off) = src */
588 case BPF_STX
| BPF_MEM
| BPF_W
:
589 case BPF_STX
| BPF_MEM
| BPF_H
:
590 case BPF_STX
| BPF_MEM
| BPF_B
:
591 case BPF_STX
| BPF_MEM
| BPF_DW
:
592 emit_a64_mov_i(1, tmp
, off
, ctx
);
593 switch (BPF_SIZE(code
)) {
595 emit(A64_STR32(src
, dst
, tmp
), ctx
);
598 emit(A64_STRH(src
, dst
, tmp
), ctx
);
601 emit(A64_STRB(src
, dst
, tmp
), ctx
);
604 emit(A64_STR64(src
, dst
, tmp
), ctx
);
608 /* STX XADD: lock *(u32 *)(dst + off) += src */
609 case BPF_STX
| BPF_XADD
| BPF_W
:
610 /* STX XADD: lock *(u64 *)(dst + off) += src */
611 case BPF_STX
| BPF_XADD
| BPF_DW
:
614 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
615 case BPF_LD
| BPF_ABS
| BPF_W
:
616 case BPF_LD
| BPF_ABS
| BPF_H
:
617 case BPF_LD
| BPF_ABS
| BPF_B
:
618 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
619 case BPF_LD
| BPF_IND
| BPF_W
:
620 case BPF_LD
| BPF_IND
| BPF_H
:
621 case BPF_LD
| BPF_IND
| BPF_B
:
623 const u8 r0
= bpf2a64
[BPF_REG_0
]; /* r0 = return value */
624 const u8 r6
= bpf2a64
[BPF_REG_6
]; /* r6 = pointer to sk_buff */
625 const u8 fp
= bpf2a64
[BPF_REG_FP
];
626 const u8 r1
= bpf2a64
[BPF_REG_1
]; /* r1: struct sk_buff *skb */
627 const u8 r2
= bpf2a64
[BPF_REG_2
]; /* r2: int k */
628 const u8 r3
= bpf2a64
[BPF_REG_3
]; /* r3: unsigned int size */
629 const u8 r4
= bpf2a64
[BPF_REG_4
]; /* r4: void *buffer */
630 const u8 r5
= bpf2a64
[BPF_REG_5
]; /* r5: void *(*func)(...) */
633 emit(A64_MOV(1, r1
, r6
), ctx
);
634 emit_a64_mov_i(0, r2
, imm
, ctx
);
635 if (BPF_MODE(code
) == BPF_IND
)
636 emit(A64_ADD(0, r2
, r2
, src
), ctx
);
637 switch (BPF_SIZE(code
)) {
650 emit_a64_mov_i64(r3
, size
, ctx
);
651 emit(A64_SUB_I(1, r4
, fp
, STACK_SIZE
), ctx
);
652 emit_a64_mov_i64(r5
, (unsigned long)bpf_load_pointer
, ctx
);
653 emit(A64_PUSH(A64_FP
, A64_LR
, A64_SP
), ctx
);
654 emit(A64_MOV(1, A64_FP
, A64_SP
), ctx
);
655 emit(A64_BLR(r5
), ctx
);
656 emit(A64_MOV(1, r0
, A64_R(0)), ctx
);
657 emit(A64_POP(A64_FP
, A64_LR
, A64_SP
), ctx
);
659 jmp_offset
= epilogue_offset(ctx
);
660 check_imm19(jmp_offset
);
661 emit(A64_CBZ(1, r0
, jmp_offset
), ctx
);
662 emit(A64_MOV(1, r5
, r0
), ctx
);
663 switch (BPF_SIZE(code
)) {
665 emit(A64_LDR32(r0
, r5
, A64_ZR
), ctx
);
666 #ifndef CONFIG_CPU_BIG_ENDIAN
667 emit(A64_REV32(0, r0
, r0
), ctx
);
671 emit(A64_LDRH(r0
, r5
, A64_ZR
), ctx
);
672 #ifndef CONFIG_CPU_BIG_ENDIAN
673 emit(A64_REV16(0, r0
, r0
), ctx
);
677 emit(A64_LDRB(r0
, r5
, A64_ZR
), ctx
);
683 pr_info_once("*** NOT YET: opcode %02x ***\n", code
);
687 pr_err_once("unknown opcode %02x\n", code
);
694 static int build_body(struct jit_ctx
*ctx
)
696 const struct bpf_prog
*prog
= ctx
->prog
;
699 for (i
= 0; i
< prog
->len
; i
++) {
700 const struct bpf_insn
*insn
= &prog
->insnsi
[i
];
703 ret
= build_insn(insn
, ctx
);
705 if (ctx
->image
== NULL
)
706 ctx
->offset
[i
] = ctx
->idx
;
719 static int validate_code(struct jit_ctx
*ctx
)
723 for (i
= 0; i
< ctx
->idx
; i
++) {
724 u32 a64_insn
= le32_to_cpu(ctx
->image
[i
]);
726 if (a64_insn
== AARCH64_BREAK_FAULT
)
733 static inline void bpf_flush_icache(void *start
, void *end
)
735 flush_icache_range((unsigned long)start
, (unsigned long)end
);
738 void bpf_jit_compile(struct bpf_prog
*prog
)
740 /* Nothing to do here. We support Internal BPF. */
743 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*prog
)
745 struct bpf_prog
*tmp
, *orig_prog
= prog
;
746 struct bpf_binary_header
*header
;
747 bool tmp_blinded
= false;
755 tmp
= bpf_jit_blind_constants(prog
);
756 /* If blinding was requested and we failed during blinding,
757 * we must fall back to the interpreter.
766 memset(&ctx
, 0, sizeof(ctx
));
769 ctx
.offset
= kcalloc(prog
->len
, sizeof(int), GFP_KERNEL
);
770 if (ctx
.offset
== NULL
) {
775 /* 1. Initial fake pass to compute ctx->idx. */
777 /* Fake pass to fill in ctx->offset. */
778 if (build_body(&ctx
)) {
783 build_prologue(&ctx
);
785 ctx
.epilogue_offset
= ctx
.idx
;
786 build_epilogue(&ctx
);
788 /* Now we know the actual image size. */
789 image_size
= sizeof(u32
) * ctx
.idx
;
790 header
= bpf_jit_binary_alloc(image_size
, &image_ptr
,
791 sizeof(u32
), jit_fill_hole
);
792 if (header
== NULL
) {
797 /* 2. Now, the actual pass. */
799 ctx
.image
= (u32
*)image_ptr
;
802 build_prologue(&ctx
);
804 if (build_body(&ctx
)) {
805 bpf_jit_binary_free(header
);
810 build_epilogue(&ctx
);
812 /* 3. Extra pass to validate JITed code. */
813 if (validate_code(&ctx
)) {
814 bpf_jit_binary_free(header
);
819 /* And we're done. */
820 if (bpf_jit_enable
> 1)
821 bpf_jit_dump(prog
->len
, image_size
, 2, ctx
.image
);
823 bpf_flush_icache(header
, ctx
.image
+ ctx
.idx
);
825 set_memory_ro((unsigned long)header
, header
->pages
);
826 prog
->bpf_func
= (void *)ctx
.image
;
833 bpf_jit_prog_release_other(prog
, prog
== orig_prog
?
838 void bpf_jit_free(struct bpf_prog
*prog
)
840 unsigned long addr
= (unsigned long)prog
->bpf_func
& PAGE_MASK
;
841 struct bpf_binary_header
*header
= (void *)addr
;
846 set_memory_rw(addr
, header
->pages
);
847 bpf_jit_binary_free(header
);
850 bpf_prog_unlock_free(prog
);