2 * BPF JIT compiler for ARM64
4 * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #define pr_fmt(fmt) "bpf_jit: " fmt
21 #include <linux/filter.h>
22 #include <linux/printk.h>
23 #include <linux/skbuff.h>
24 #include <linux/slab.h>
26 #include <asm/byteorder.h>
27 #include <asm/cacheflush.h>
28 #include <asm/debug-monitors.h>
32 int bpf_jit_enable __read_mostly
;
34 #define TMP_REG_1 (MAX_BPF_REG + 0)
35 #define TMP_REG_2 (MAX_BPF_REG + 1)
37 /* Map BPF registers to A64 registers */
38 static const int bpf2a64
[] = {
39 /* return value from in-kernel function, and exit value from eBPF */
40 [BPF_REG_0
] = A64_R(7),
41 /* arguments from eBPF program to in-kernel function */
42 [BPF_REG_1
] = A64_R(0),
43 [BPF_REG_2
] = A64_R(1),
44 [BPF_REG_3
] = A64_R(2),
45 [BPF_REG_4
] = A64_R(3),
46 [BPF_REG_5
] = A64_R(4),
47 /* callee saved registers that in-kernel function will preserve */
48 [BPF_REG_6
] = A64_R(19),
49 [BPF_REG_7
] = A64_R(20),
50 [BPF_REG_8
] = A64_R(21),
51 [BPF_REG_9
] = A64_R(22),
52 /* read-only frame pointer to access stack */
53 [BPF_REG_FP
] = A64_FP
,
54 /* temporary register for internal BPF JIT */
55 [TMP_REG_1
] = A64_R(23),
56 [TMP_REG_2
] = A64_R(24),
60 const struct bpf_prog
*prog
;
68 static inline void emit(const u32 insn
, struct jit_ctx
*ctx
)
70 if (ctx
->image
!= NULL
)
71 ctx
->image
[ctx
->idx
] = cpu_to_le32(insn
);
76 static inline void emit_a64_mov_i64(const int reg
, const u64 val
,
82 emit(A64_MOVZ(1, reg
, tmp
& 0xffff, shift
), ctx
);
87 emit(A64_MOVK(1, reg
, tmp
& 0xffff, shift
), ctx
);
93 static inline void emit_a64_mov_i(const int is64
, const int reg
,
94 const s32 val
, struct jit_ctx
*ctx
)
97 u16 lo
= val
& 0xffff;
101 emit(A64_MOVN(is64
, reg
, (u16
)~lo
, 0), ctx
);
103 emit(A64_MOVN(is64
, reg
, (u16
)~hi
, 16), ctx
);
104 emit(A64_MOVK(is64
, reg
, lo
, 0), ctx
);
107 emit(A64_MOVZ(is64
, reg
, lo
, 0), ctx
);
109 emit(A64_MOVK(is64
, reg
, hi
, 16), ctx
);
113 static inline int bpf2a64_offset(int bpf_to
, int bpf_from
,
114 const struct jit_ctx
*ctx
)
116 int to
= ctx
->offset
[bpf_to
+ 1];
117 /* -1 to account for the Branch instruction */
118 int from
= ctx
->offset
[bpf_from
+ 1] - 1;
123 static void jit_fill_hole(void *area
, unsigned int size
)
126 /* We are guaranteed to have aligned memory. */
127 for (ptr
= area
; size
>= sizeof(u32
); size
-= sizeof(u32
))
128 *ptr
++ = cpu_to_le32(AARCH64_BREAK_FAULT
);
131 static inline int epilogue_offset(const struct jit_ctx
*ctx
)
133 int to
= ctx
->epilogue_offset
;
139 /* Stack must be multiples of 16B */
140 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
142 static void build_prologue(struct jit_ctx
*ctx
)
144 const u8 r6
= bpf2a64
[BPF_REG_6
];
145 const u8 r7
= bpf2a64
[BPF_REG_7
];
146 const u8 r8
= bpf2a64
[BPF_REG_8
];
147 const u8 r9
= bpf2a64
[BPF_REG_9
];
148 const u8 fp
= bpf2a64
[BPF_REG_FP
];
149 const u8 ra
= bpf2a64
[BPF_REG_A
];
150 const u8 rx
= bpf2a64
[BPF_REG_X
];
151 const u8 tmp1
= bpf2a64
[TMP_REG_1
];
152 const u8 tmp2
= bpf2a64
[TMP_REG_2
];
153 int stack_size
= MAX_BPF_STACK
;
155 stack_size
+= 4; /* extra for skb_copy_bits buffer */
156 stack_size
= STACK_ALIGN(stack_size
);
158 /* Save callee-saved register */
159 emit(A64_PUSH(r6
, r7
, A64_SP
), ctx
);
160 emit(A64_PUSH(r8
, r9
, A64_SP
), ctx
);
162 emit(A64_PUSH(tmp1
, tmp2
, A64_SP
), ctx
);
164 /* Set up BPF stack */
165 emit(A64_SUB_I(1, A64_SP
, A64_SP
, stack_size
), ctx
);
167 /* Set up frame pointer */
168 emit(A64_MOV(1, fp
, A64_SP
), ctx
);
170 /* Clear registers A and X */
171 emit_a64_mov_i64(ra
, 0, ctx
);
172 emit_a64_mov_i64(rx
, 0, ctx
);
175 static void build_epilogue(struct jit_ctx
*ctx
)
177 const u8 r0
= bpf2a64
[BPF_REG_0
];
178 const u8 r6
= bpf2a64
[BPF_REG_6
];
179 const u8 r7
= bpf2a64
[BPF_REG_7
];
180 const u8 r8
= bpf2a64
[BPF_REG_8
];
181 const u8 r9
= bpf2a64
[BPF_REG_9
];
182 const u8 fp
= bpf2a64
[BPF_REG_FP
];
183 const u8 tmp1
= bpf2a64
[TMP_REG_1
];
184 const u8 tmp2
= bpf2a64
[TMP_REG_2
];
185 int stack_size
= MAX_BPF_STACK
;
187 stack_size
+= 4; /* extra for skb_copy_bits buffer */
188 stack_size
= STACK_ALIGN(stack_size
);
190 /* We're done with BPF stack */
191 emit(A64_ADD_I(1, A64_SP
, A64_SP
, stack_size
), ctx
);
193 /* Restore callee-saved register */
195 emit(A64_POP(tmp1
, tmp2
, A64_SP
), ctx
);
196 emit(A64_POP(r8
, r9
, A64_SP
), ctx
);
197 emit(A64_POP(r6
, r7
, A64_SP
), ctx
);
199 /* Restore frame pointer */
200 emit(A64_MOV(1, fp
, A64_SP
), ctx
);
202 /* Set return value */
203 emit(A64_MOV(1, A64_R(0), r0
), ctx
);
205 emit(A64_RET(A64_LR
), ctx
);
208 /* JITs an eBPF instruction.
210 * 0 - successfully JITed an 8-byte eBPF instruction.
211 * >0 - successfully JITed a 16-byte eBPF instruction.
212 * <0 - failed to JIT.
214 static int build_insn(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
)
216 const u8 code
= insn
->code
;
217 const u8 dst
= bpf2a64
[insn
->dst_reg
];
218 const u8 src
= bpf2a64
[insn
->src_reg
];
219 const u8 tmp
= bpf2a64
[TMP_REG_1
];
220 const u8 tmp2
= bpf2a64
[TMP_REG_2
];
221 const s16 off
= insn
->off
;
222 const s32 imm
= insn
->imm
;
223 const int i
= insn
- ctx
->prog
->insnsi
;
224 const bool is64
= BPF_CLASS(code
) == BPF_ALU64
;
230 case BPF_ALU
| BPF_MOV
| BPF_X
:
231 case BPF_ALU64
| BPF_MOV
| BPF_X
:
232 emit(A64_MOV(is64
, dst
, src
), ctx
);
234 /* dst = dst OP src */
235 case BPF_ALU
| BPF_ADD
| BPF_X
:
236 case BPF_ALU64
| BPF_ADD
| BPF_X
:
237 emit(A64_ADD(is64
, dst
, dst
, src
), ctx
);
239 case BPF_ALU
| BPF_SUB
| BPF_X
:
240 case BPF_ALU64
| BPF_SUB
| BPF_X
:
241 emit(A64_SUB(is64
, dst
, dst
, src
), ctx
);
243 case BPF_ALU
| BPF_AND
| BPF_X
:
244 case BPF_ALU64
| BPF_AND
| BPF_X
:
245 emit(A64_AND(is64
, dst
, dst
, src
), ctx
);
247 case BPF_ALU
| BPF_OR
| BPF_X
:
248 case BPF_ALU64
| BPF_OR
| BPF_X
:
249 emit(A64_ORR(is64
, dst
, dst
, src
), ctx
);
251 case BPF_ALU
| BPF_XOR
| BPF_X
:
252 case BPF_ALU64
| BPF_XOR
| BPF_X
:
253 emit(A64_EOR(is64
, dst
, dst
, src
), ctx
);
255 case BPF_ALU
| BPF_MUL
| BPF_X
:
256 case BPF_ALU64
| BPF_MUL
| BPF_X
:
257 emit(A64_MUL(is64
, dst
, dst
, src
), ctx
);
259 case BPF_ALU
| BPF_DIV
| BPF_X
:
260 case BPF_ALU64
| BPF_DIV
| BPF_X
:
261 emit(A64_UDIV(is64
, dst
, dst
, src
), ctx
);
263 case BPF_ALU
| BPF_MOD
| BPF_X
:
264 case BPF_ALU64
| BPF_MOD
| BPF_X
:
266 emit(A64_UDIV(is64
, tmp
, dst
, src
), ctx
);
267 emit(A64_MUL(is64
, tmp
, tmp
, src
), ctx
);
268 emit(A64_SUB(is64
, dst
, dst
, tmp
), ctx
);
270 case BPF_ALU
| BPF_LSH
| BPF_X
:
271 case BPF_ALU64
| BPF_LSH
| BPF_X
:
272 emit(A64_LSLV(is64
, dst
, dst
, src
), ctx
);
274 case BPF_ALU
| BPF_RSH
| BPF_X
:
275 case BPF_ALU64
| BPF_RSH
| BPF_X
:
276 emit(A64_LSRV(is64
, dst
, dst
, src
), ctx
);
278 case BPF_ALU
| BPF_ARSH
| BPF_X
:
279 case BPF_ALU64
| BPF_ARSH
| BPF_X
:
280 emit(A64_ASRV(is64
, dst
, dst
, src
), ctx
);
283 case BPF_ALU
| BPF_NEG
:
284 case BPF_ALU64
| BPF_NEG
:
285 emit(A64_NEG(is64
, dst
, dst
), ctx
);
287 /* dst = BSWAP##imm(dst) */
288 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
289 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
290 #ifdef CONFIG_CPU_BIG_ENDIAN
291 if (BPF_SRC(code
) == BPF_FROM_BE
)
293 #else /* !CONFIG_CPU_BIG_ENDIAN */
294 if (BPF_SRC(code
) == BPF_FROM_LE
)
299 emit(A64_REV16(is64
, dst
, dst
), ctx
);
302 emit(A64_REV32(is64
, dst
, dst
), ctx
);
305 emit(A64_REV64(dst
, dst
), ctx
);
310 case BPF_ALU
| BPF_MOV
| BPF_K
:
311 case BPF_ALU64
| BPF_MOV
| BPF_K
:
312 emit_a64_mov_i(is64
, dst
, imm
, ctx
);
314 /* dst = dst OP imm */
315 case BPF_ALU
| BPF_ADD
| BPF_K
:
316 case BPF_ALU64
| BPF_ADD
| BPF_K
:
318 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
319 emit(A64_ADD(is64
, dst
, dst
, tmp
), ctx
);
321 case BPF_ALU
| BPF_SUB
| BPF_K
:
322 case BPF_ALU64
| BPF_SUB
| BPF_K
:
324 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
325 emit(A64_SUB(is64
, dst
, dst
, tmp
), ctx
);
327 case BPF_ALU
| BPF_AND
| BPF_K
:
328 case BPF_ALU64
| BPF_AND
| BPF_K
:
330 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
331 emit(A64_AND(is64
, dst
, dst
, tmp
), ctx
);
333 case BPF_ALU
| BPF_OR
| BPF_K
:
334 case BPF_ALU64
| BPF_OR
| BPF_K
:
336 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
337 emit(A64_ORR(is64
, dst
, dst
, tmp
), ctx
);
339 case BPF_ALU
| BPF_XOR
| BPF_K
:
340 case BPF_ALU64
| BPF_XOR
| BPF_K
:
342 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
343 emit(A64_EOR(is64
, dst
, dst
, tmp
), ctx
);
345 case BPF_ALU
| BPF_MUL
| BPF_K
:
346 case BPF_ALU64
| BPF_MUL
| BPF_K
:
348 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
349 emit(A64_MUL(is64
, dst
, dst
, tmp
), ctx
);
351 case BPF_ALU
| BPF_DIV
| BPF_K
:
352 case BPF_ALU64
| BPF_DIV
| BPF_K
:
354 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
355 emit(A64_UDIV(is64
, dst
, dst
, tmp
), ctx
);
357 case BPF_ALU
| BPF_MOD
| BPF_K
:
358 case BPF_ALU64
| BPF_MOD
| BPF_K
:
360 emit_a64_mov_i(is64
, tmp2
, imm
, ctx
);
361 emit(A64_UDIV(is64
, tmp
, dst
, tmp2
), ctx
);
362 emit(A64_MUL(is64
, tmp
, tmp
, tmp2
), ctx
);
363 emit(A64_SUB(is64
, dst
, dst
, tmp
), ctx
);
365 case BPF_ALU
| BPF_LSH
| BPF_K
:
366 case BPF_ALU64
| BPF_LSH
| BPF_K
:
367 emit(A64_LSL(is64
, dst
, dst
, imm
), ctx
);
369 case BPF_ALU
| BPF_RSH
| BPF_K
:
370 case BPF_ALU64
| BPF_RSH
| BPF_K
:
371 emit(A64_LSR(is64
, dst
, dst
, imm
), ctx
);
373 case BPF_ALU
| BPF_ARSH
| BPF_K
:
374 case BPF_ALU64
| BPF_ARSH
| BPF_K
:
375 emit(A64_ASR(is64
, dst
, dst
, imm
), ctx
);
378 #define check_imm(bits, imm) do { \
379 if ((((imm) > 0) && ((imm) >> (bits))) || \
380 (((imm) < 0) && (~(imm) >> (bits)))) { \
381 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
386 #define check_imm19(imm) check_imm(19, imm)
387 #define check_imm26(imm) check_imm(26, imm)
390 case BPF_JMP
| BPF_JA
:
391 jmp_offset
= bpf2a64_offset(i
+ off
, i
, ctx
);
392 check_imm26(jmp_offset
);
393 emit(A64_B(jmp_offset
), ctx
);
395 /* IF (dst COND src) JUMP off */
396 case BPF_JMP
| BPF_JEQ
| BPF_X
:
397 case BPF_JMP
| BPF_JGT
| BPF_X
:
398 case BPF_JMP
| BPF_JGE
| BPF_X
:
399 case BPF_JMP
| BPF_JNE
| BPF_X
:
400 case BPF_JMP
| BPF_JSGT
| BPF_X
:
401 case BPF_JMP
| BPF_JSGE
| BPF_X
:
402 emit(A64_CMP(1, dst
, src
), ctx
);
404 jmp_offset
= bpf2a64_offset(i
+ off
, i
, ctx
);
405 check_imm19(jmp_offset
);
406 switch (BPF_OP(code
)) {
408 jmp_cond
= A64_COND_EQ
;
411 jmp_cond
= A64_COND_HI
;
414 jmp_cond
= A64_COND_CS
;
417 jmp_cond
= A64_COND_NE
;
420 jmp_cond
= A64_COND_GT
;
423 jmp_cond
= A64_COND_GE
;
428 emit(A64_B_(jmp_cond
, jmp_offset
), ctx
);
430 case BPF_JMP
| BPF_JSET
| BPF_X
:
431 emit(A64_TST(1, dst
, src
), ctx
);
433 /* IF (dst COND imm) JUMP off */
434 case BPF_JMP
| BPF_JEQ
| BPF_K
:
435 case BPF_JMP
| BPF_JGT
| BPF_K
:
436 case BPF_JMP
| BPF_JGE
| BPF_K
:
437 case BPF_JMP
| BPF_JNE
| BPF_K
:
438 case BPF_JMP
| BPF_JSGT
| BPF_K
:
439 case BPF_JMP
| BPF_JSGE
| BPF_K
:
441 emit_a64_mov_i(1, tmp
, imm
, ctx
);
442 emit(A64_CMP(1, dst
, tmp
), ctx
);
444 case BPF_JMP
| BPF_JSET
| BPF_K
:
446 emit_a64_mov_i(1, tmp
, imm
, ctx
);
447 emit(A64_TST(1, dst
, tmp
), ctx
);
450 case BPF_JMP
| BPF_CALL
:
452 const u8 r0
= bpf2a64
[BPF_REG_0
];
453 const u64 func
= (u64
)__bpf_call_base
+ imm
;
456 emit_a64_mov_i64(tmp
, func
, ctx
);
457 emit(A64_PUSH(A64_FP
, A64_LR
, A64_SP
), ctx
);
458 emit(A64_MOV(1, A64_FP
, A64_SP
), ctx
);
459 emit(A64_BLR(tmp
), ctx
);
460 emit(A64_MOV(1, r0
, A64_R(0)), ctx
);
461 emit(A64_POP(A64_FP
, A64_LR
, A64_SP
), ctx
);
464 /* function return */
465 case BPF_JMP
| BPF_EXIT
:
466 /* Optimization: when last instruction is EXIT,
467 simply fallthrough to epilogue. */
468 if (i
== ctx
->prog
->len
- 1)
470 jmp_offset
= epilogue_offset(ctx
);
471 check_imm26(jmp_offset
);
472 emit(A64_B(jmp_offset
), ctx
);
476 case BPF_LD
| BPF_IMM
| BPF_DW
:
478 const struct bpf_insn insn1
= insn
[1];
481 if (insn1
.code
!= 0 || insn1
.src_reg
!= 0 ||
482 insn1
.dst_reg
!= 0 || insn1
.off
!= 0) {
483 /* Note: verifier in BPF core must catch invalid
486 pr_err_once("Invalid BPF_LD_IMM64 instruction\n");
490 imm64
= (u64
)insn1
.imm
<< 32 | (u32
)imm
;
491 emit_a64_mov_i64(dst
, imm64
, ctx
);
496 /* LDX: dst = *(size *)(src + off) */
497 case BPF_LDX
| BPF_MEM
| BPF_W
:
498 case BPF_LDX
| BPF_MEM
| BPF_H
:
499 case BPF_LDX
| BPF_MEM
| BPF_B
:
500 case BPF_LDX
| BPF_MEM
| BPF_DW
:
502 emit_a64_mov_i(1, tmp
, off
, ctx
);
503 switch (BPF_SIZE(code
)) {
505 emit(A64_LDR32(dst
, src
, tmp
), ctx
);
508 emit(A64_LDRH(dst
, src
, tmp
), ctx
);
511 emit(A64_LDRB(dst
, src
, tmp
), ctx
);
514 emit(A64_LDR64(dst
, src
, tmp
), ctx
);
519 /* ST: *(size *)(dst + off) = imm */
520 case BPF_ST
| BPF_MEM
| BPF_W
:
521 case BPF_ST
| BPF_MEM
| BPF_H
:
522 case BPF_ST
| BPF_MEM
| BPF_B
:
523 case BPF_ST
| BPF_MEM
| BPF_DW
:
526 /* STX: *(size *)(dst + off) = src */
527 case BPF_STX
| BPF_MEM
| BPF_W
:
528 case BPF_STX
| BPF_MEM
| BPF_H
:
529 case BPF_STX
| BPF_MEM
| BPF_B
:
530 case BPF_STX
| BPF_MEM
| BPF_DW
:
532 emit_a64_mov_i(1, tmp
, off
, ctx
);
533 switch (BPF_SIZE(code
)) {
535 emit(A64_STR32(src
, dst
, tmp
), ctx
);
538 emit(A64_STRH(src
, dst
, tmp
), ctx
);
541 emit(A64_STRB(src
, dst
, tmp
), ctx
);
544 emit(A64_STR64(src
, dst
, tmp
), ctx
);
548 /* STX XADD: lock *(u32 *)(dst + off) += src */
549 case BPF_STX
| BPF_XADD
| BPF_W
:
550 /* STX XADD: lock *(u64 *)(dst + off) += src */
551 case BPF_STX
| BPF_XADD
| BPF_DW
:
554 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
555 case BPF_LD
| BPF_ABS
| BPF_W
:
556 case BPF_LD
| BPF_ABS
| BPF_H
:
557 case BPF_LD
| BPF_ABS
| BPF_B
:
558 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
559 case BPF_LD
| BPF_IND
| BPF_W
:
560 case BPF_LD
| BPF_IND
| BPF_H
:
561 case BPF_LD
| BPF_IND
| BPF_B
:
563 const u8 r0
= bpf2a64
[BPF_REG_0
]; /* r0 = return value */
564 const u8 r6
= bpf2a64
[BPF_REG_6
]; /* r6 = pointer to sk_buff */
565 const u8 fp
= bpf2a64
[BPF_REG_FP
];
566 const u8 r1
= bpf2a64
[BPF_REG_1
]; /* r1: struct sk_buff *skb */
567 const u8 r2
= bpf2a64
[BPF_REG_2
]; /* r2: int k */
568 const u8 r3
= bpf2a64
[BPF_REG_3
]; /* r3: unsigned int size */
569 const u8 r4
= bpf2a64
[BPF_REG_4
]; /* r4: void *buffer */
570 const u8 r5
= bpf2a64
[BPF_REG_5
]; /* r5: void *(*func)(...) */
573 emit(A64_MOV(1, r1
, r6
), ctx
);
574 emit_a64_mov_i(0, r2
, imm
, ctx
);
575 if (BPF_MODE(code
) == BPF_IND
)
576 emit(A64_ADD(0, r2
, r2
, src
), ctx
);
577 switch (BPF_SIZE(code
)) {
590 emit_a64_mov_i64(r3
, size
, ctx
);
591 emit(A64_ADD_I(1, r4
, fp
, MAX_BPF_STACK
), ctx
);
592 emit_a64_mov_i64(r5
, (unsigned long)bpf_load_pointer
, ctx
);
593 emit(A64_PUSH(A64_FP
, A64_LR
, A64_SP
), ctx
);
594 emit(A64_MOV(1, A64_FP
, A64_SP
), ctx
);
595 emit(A64_BLR(r5
), ctx
);
596 emit(A64_MOV(1, r0
, A64_R(0)), ctx
);
597 emit(A64_POP(A64_FP
, A64_LR
, A64_SP
), ctx
);
599 jmp_offset
= epilogue_offset(ctx
);
600 check_imm19(jmp_offset
);
601 emit(A64_CBZ(1, r0
, jmp_offset
), ctx
);
602 emit(A64_MOV(1, r5
, r0
), ctx
);
603 switch (BPF_SIZE(code
)) {
605 emit(A64_LDR32(r0
, r5
, A64_ZR
), ctx
);
606 #ifndef CONFIG_CPU_BIG_ENDIAN
607 emit(A64_REV32(0, r0
, r0
), ctx
);
611 emit(A64_LDRH(r0
, r5
, A64_ZR
), ctx
);
612 #ifndef CONFIG_CPU_BIG_ENDIAN
613 emit(A64_REV16(0, r0
, r0
), ctx
);
617 emit(A64_LDRB(r0
, r5
, A64_ZR
), ctx
);
623 pr_info_once("*** NOT YET: opcode %02x ***\n", code
);
627 pr_err_once("unknown opcode %02x\n", code
);
634 static int build_body(struct jit_ctx
*ctx
)
636 const struct bpf_prog
*prog
= ctx
->prog
;
639 for (i
= 0; i
< prog
->len
; i
++) {
640 const struct bpf_insn
*insn
= &prog
->insnsi
[i
];
643 if (ctx
->image
== NULL
)
644 ctx
->offset
[i
] = ctx
->idx
;
646 ret
= build_insn(insn
, ctx
);
658 static inline void bpf_flush_icache(void *start
, void *end
)
660 flush_icache_range((unsigned long)start
, (unsigned long)end
);
663 void bpf_jit_compile(struct bpf_prog
*prog
)
665 /* Nothing to do here. We support Internal BPF. */
668 void bpf_int_jit_compile(struct bpf_prog
*prog
)
670 struct bpf_binary_header
*header
;
678 if (!prog
|| !prog
->len
)
681 memset(&ctx
, 0, sizeof(ctx
));
684 ctx
.offset
= kcalloc(prog
->len
, sizeof(int), GFP_KERNEL
);
685 if (ctx
.offset
== NULL
)
688 /* 1. Initial fake pass to compute ctx->idx. */
690 /* Fake pass to fill in ctx->offset and ctx->tmp_used. */
691 if (build_body(&ctx
))
694 build_prologue(&ctx
);
696 ctx
.epilogue_offset
= ctx
.idx
;
697 build_epilogue(&ctx
);
699 /* Now we know the actual image size. */
700 image_size
= sizeof(u32
) * ctx
.idx
;
701 header
= bpf_jit_binary_alloc(image_size
, &image_ptr
,
702 sizeof(u32
), jit_fill_hole
);
706 /* 2. Now, the actual pass. */
708 ctx
.image
= (u32
*)image_ptr
;
711 build_prologue(&ctx
);
713 if (build_body(&ctx
)) {
714 bpf_jit_binary_free(header
);
718 build_epilogue(&ctx
);
720 /* And we're done. */
721 if (bpf_jit_enable
> 1)
722 bpf_jit_dump(prog
->len
, image_size
, 2, ctx
.image
);
724 bpf_flush_icache(ctx
.image
, ctx
.image
+ ctx
.idx
);
726 set_memory_ro((unsigned long)header
, header
->pages
);
727 prog
->bpf_func
= (void *)ctx
.image
;
733 void bpf_jit_free(struct bpf_prog
*prog
)
735 unsigned long addr
= (unsigned long)prog
->bpf_func
& PAGE_MASK
;
736 struct bpf_binary_header
*header
= (void *)addr
;
741 set_memory_rw(addr
, header
->pages
);
742 bpf_jit_binary_free(header
);
745 bpf_prog_unlock_free(prog
);