Commit | Line | Data |
---|---|---|
e54bcde3 ZSL |
1 | /* |
2 | * BPF JIT compiler for ARM64 | |
3 | * | |
4 | * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
17 | */ | |
18 | ||
19 | #define pr_fmt(fmt) "bpf_jit: " fmt | |
20 | ||
21 | #include <linux/filter.h> | |
e54bcde3 ZSL |
22 | #include <linux/printk.h> |
23 | #include <linux/skbuff.h> | |
24 | #include <linux/slab.h> | |
b569c1c6 | 25 | |
e54bcde3 ZSL |
26 | #include <asm/byteorder.h> |
27 | #include <asm/cacheflush.h> | |
b569c1c6 | 28 | #include <asm/debug-monitors.h> |
e54bcde3 ZSL |
29 | |
30 | #include "bpf_jit.h" | |
31 | ||
32 | int bpf_jit_enable __read_mostly; | |
33 | ||
34 | #define TMP_REG_1 (MAX_BPF_REG + 0) | |
35 | #define TMP_REG_2 (MAX_BPF_REG + 1) | |
36 | ||
37 | /* Map BPF registers to A64 registers */ | |
38 | static const int bpf2a64[] = { | |
39 | /* return value from in-kernel function, and exit value from eBPF */ | |
40 | [BPF_REG_0] = A64_R(7), | |
41 | /* arguments from eBPF program to in-kernel function */ | |
42 | [BPF_REG_1] = A64_R(0), | |
43 | [BPF_REG_2] = A64_R(1), | |
44 | [BPF_REG_3] = A64_R(2), | |
45 | [BPF_REG_4] = A64_R(3), | |
46 | [BPF_REG_5] = A64_R(4), | |
47 | /* callee saved registers that in-kernel function will preserve */ | |
48 | [BPF_REG_6] = A64_R(19), | |
49 | [BPF_REG_7] = A64_R(20), | |
50 | [BPF_REG_8] = A64_R(21), | |
51 | [BPF_REG_9] = A64_R(22), | |
52 | /* read-only frame pointer to access stack */ | |
53 | [BPF_REG_FP] = A64_FP, | |
54 | /* temporary register for internal BPF JIT */ | |
55 | [TMP_REG_1] = A64_R(23), | |
56 | [TMP_REG_2] = A64_R(24), | |
57 | }; | |
58 | ||
59 | struct jit_ctx { | |
60 | const struct bpf_prog *prog; | |
61 | int idx; | |
62 | int tmp_used; | |
51c9fbb1 | 63 | int epilogue_offset; |
e54bcde3 ZSL |
64 | int *offset; |
65 | u32 *image; | |
66 | }; | |
67 | ||
68 | static inline void emit(const u32 insn, struct jit_ctx *ctx) | |
69 | { | |
70 | if (ctx->image != NULL) | |
71 | ctx->image[ctx->idx] = cpu_to_le32(insn); | |
72 | ||
73 | ctx->idx++; | |
74 | } | |
75 | ||
76 | static inline void emit_a64_mov_i64(const int reg, const u64 val, | |
77 | struct jit_ctx *ctx) | |
78 | { | |
79 | u64 tmp = val; | |
80 | int shift = 0; | |
81 | ||
82 | emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx); | |
83 | tmp >>= 16; | |
84 | shift += 16; | |
85 | while (tmp) { | |
86 | if (tmp & 0xffff) | |
87 | emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx); | |
88 | tmp >>= 16; | |
89 | shift += 16; | |
90 | } | |
91 | } | |
92 | ||
93 | static inline void emit_a64_mov_i(const int is64, const int reg, | |
94 | const s32 val, struct jit_ctx *ctx) | |
95 | { | |
96 | u16 hi = val >> 16; | |
97 | u16 lo = val & 0xffff; | |
98 | ||
99 | if (hi & 0x8000) { | |
100 | if (hi == 0xffff) { | |
101 | emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx); | |
102 | } else { | |
103 | emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx); | |
104 | emit(A64_MOVK(is64, reg, lo, 0), ctx); | |
105 | } | |
106 | } else { | |
107 | emit(A64_MOVZ(is64, reg, lo, 0), ctx); | |
108 | if (hi) | |
109 | emit(A64_MOVK(is64, reg, hi, 16), ctx); | |
110 | } | |
111 | } | |
112 | ||
113 | static inline int bpf2a64_offset(int bpf_to, int bpf_from, | |
114 | const struct jit_ctx *ctx) | |
115 | { | |
116 | int to = ctx->offset[bpf_to + 1]; | |
117 | /* -1 to account for the Branch instruction */ | |
118 | int from = ctx->offset[bpf_from + 1] - 1; | |
119 | ||
120 | return to - from; | |
121 | } | |
122 | ||
b569c1c6 DB |
123 | static void jit_fill_hole(void *area, unsigned int size) |
124 | { | |
125 | u32 *ptr; | |
126 | /* We are guaranteed to have aligned memory. */ | |
127 | for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) | |
128 | *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT); | |
129 | } | |
130 | ||
e54bcde3 ZSL |
131 | static inline int epilogue_offset(const struct jit_ctx *ctx) |
132 | { | |
51c9fbb1 ZSL |
133 | int to = ctx->epilogue_offset; |
134 | int from = ctx->idx; | |
e54bcde3 ZSL |
135 | |
136 | return to - from; | |
137 | } | |
138 | ||
139 | /* Stack must be multiples of 16B */ | |
140 | #define STACK_ALIGN(sz) (((sz) + 15) & ~15) | |
141 | ||
142 | static void build_prologue(struct jit_ctx *ctx) | |
143 | { | |
144 | const u8 r6 = bpf2a64[BPF_REG_6]; | |
145 | const u8 r7 = bpf2a64[BPF_REG_7]; | |
146 | const u8 r8 = bpf2a64[BPF_REG_8]; | |
147 | const u8 r9 = bpf2a64[BPF_REG_9]; | |
148 | const u8 fp = bpf2a64[BPF_REG_FP]; | |
149 | const u8 ra = bpf2a64[BPF_REG_A]; | |
150 | const u8 rx = bpf2a64[BPF_REG_X]; | |
151 | const u8 tmp1 = bpf2a64[TMP_REG_1]; | |
152 | const u8 tmp2 = bpf2a64[TMP_REG_2]; | |
153 | int stack_size = MAX_BPF_STACK; | |
154 | ||
155 | stack_size += 4; /* extra for skb_copy_bits buffer */ | |
156 | stack_size = STACK_ALIGN(stack_size); | |
157 | ||
158 | /* Save callee-saved register */ | |
159 | emit(A64_PUSH(r6, r7, A64_SP), ctx); | |
160 | emit(A64_PUSH(r8, r9, A64_SP), ctx); | |
161 | if (ctx->tmp_used) | |
162 | emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx); | |
163 | ||
164 | /* Set up BPF stack */ | |
165 | emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); | |
166 | ||
167 | /* Set up frame pointer */ | |
168 | emit(A64_MOV(1, fp, A64_SP), ctx); | |
169 | ||
170 | /* Clear registers A and X */ | |
171 | emit_a64_mov_i64(ra, 0, ctx); | |
172 | emit_a64_mov_i64(rx, 0, ctx); | |
173 | } | |
174 | ||
175 | static void build_epilogue(struct jit_ctx *ctx) | |
176 | { | |
177 | const u8 r0 = bpf2a64[BPF_REG_0]; | |
178 | const u8 r6 = bpf2a64[BPF_REG_6]; | |
179 | const u8 r7 = bpf2a64[BPF_REG_7]; | |
180 | const u8 r8 = bpf2a64[BPF_REG_8]; | |
181 | const u8 r9 = bpf2a64[BPF_REG_9]; | |
182 | const u8 fp = bpf2a64[BPF_REG_FP]; | |
183 | const u8 tmp1 = bpf2a64[TMP_REG_1]; | |
184 | const u8 tmp2 = bpf2a64[TMP_REG_2]; | |
185 | int stack_size = MAX_BPF_STACK; | |
186 | ||
187 | stack_size += 4; /* extra for skb_copy_bits buffer */ | |
188 | stack_size = STACK_ALIGN(stack_size); | |
189 | ||
190 | /* We're done with BPF stack */ | |
191 | emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx); | |
192 | ||
193 | /* Restore callee-saved register */ | |
194 | if (ctx->tmp_used) | |
195 | emit(A64_POP(tmp1, tmp2, A64_SP), ctx); | |
196 | emit(A64_POP(r8, r9, A64_SP), ctx); | |
197 | emit(A64_POP(r6, r7, A64_SP), ctx); | |
198 | ||
199 | /* Restore frame pointer */ | |
200 | emit(A64_MOV(1, fp, A64_SP), ctx); | |
201 | ||
202 | /* Set return value */ | |
203 | emit(A64_MOV(1, A64_R(0), r0), ctx); | |
204 | ||
205 | emit(A64_RET(A64_LR), ctx); | |
206 | } | |
207 | ||
30d3d94c ZSL |
208 | /* JITs an eBPF instruction. |
209 | * Returns: | |
210 | * 0 - successfully JITed an 8-byte eBPF instruction. | |
211 | * >0 - successfully JITed a 16-byte eBPF instruction. | |
212 | * <0 - failed to JIT. | |
213 | */ | |
e54bcde3 ZSL |
214 | static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) |
215 | { | |
216 | const u8 code = insn->code; | |
217 | const u8 dst = bpf2a64[insn->dst_reg]; | |
218 | const u8 src = bpf2a64[insn->src_reg]; | |
219 | const u8 tmp = bpf2a64[TMP_REG_1]; | |
220 | const u8 tmp2 = bpf2a64[TMP_REG_2]; | |
221 | const s16 off = insn->off; | |
222 | const s32 imm = insn->imm; | |
223 | const int i = insn - ctx->prog->insnsi; | |
224 | const bool is64 = BPF_CLASS(code) == BPF_ALU64; | |
225 | u8 jmp_cond; | |
226 | s32 jmp_offset; | |
227 | ||
228 | switch (code) { | |
229 | /* dst = src */ | |
230 | case BPF_ALU | BPF_MOV | BPF_X: | |
231 | case BPF_ALU64 | BPF_MOV | BPF_X: | |
232 | emit(A64_MOV(is64, dst, src), ctx); | |
233 | break; | |
234 | /* dst = dst OP src */ | |
235 | case BPF_ALU | BPF_ADD | BPF_X: | |
236 | case BPF_ALU64 | BPF_ADD | BPF_X: | |
237 | emit(A64_ADD(is64, dst, dst, src), ctx); | |
238 | break; | |
239 | case BPF_ALU | BPF_SUB | BPF_X: | |
240 | case BPF_ALU64 | BPF_SUB | BPF_X: | |
241 | emit(A64_SUB(is64, dst, dst, src), ctx); | |
242 | break; | |
243 | case BPF_ALU | BPF_AND | BPF_X: | |
244 | case BPF_ALU64 | BPF_AND | BPF_X: | |
245 | emit(A64_AND(is64, dst, dst, src), ctx); | |
246 | break; | |
247 | case BPF_ALU | BPF_OR | BPF_X: | |
248 | case BPF_ALU64 | BPF_OR | BPF_X: | |
249 | emit(A64_ORR(is64, dst, dst, src), ctx); | |
250 | break; | |
251 | case BPF_ALU | BPF_XOR | BPF_X: | |
252 | case BPF_ALU64 | BPF_XOR | BPF_X: | |
253 | emit(A64_EOR(is64, dst, dst, src), ctx); | |
254 | break; | |
255 | case BPF_ALU | BPF_MUL | BPF_X: | |
256 | case BPF_ALU64 | BPF_MUL | BPF_X: | |
257 | emit(A64_MUL(is64, dst, dst, src), ctx); | |
258 | break; | |
259 | case BPF_ALU | BPF_DIV | BPF_X: | |
260 | case BPF_ALU64 | BPF_DIV | BPF_X: | |
261 | emit(A64_UDIV(is64, dst, dst, src), ctx); | |
262 | break; | |
263 | case BPF_ALU | BPF_MOD | BPF_X: | |
264 | case BPF_ALU64 | BPF_MOD | BPF_X: | |
265 | ctx->tmp_used = 1; | |
266 | emit(A64_UDIV(is64, tmp, dst, src), ctx); | |
267 | emit(A64_MUL(is64, tmp, tmp, src), ctx); | |
268 | emit(A64_SUB(is64, dst, dst, tmp), ctx); | |
269 | break; | |
d65a634a ZSL |
270 | case BPF_ALU | BPF_LSH | BPF_X: |
271 | case BPF_ALU64 | BPF_LSH | BPF_X: | |
272 | emit(A64_LSLV(is64, dst, dst, src), ctx); | |
273 | break; | |
274 | case BPF_ALU | BPF_RSH | BPF_X: | |
275 | case BPF_ALU64 | BPF_RSH | BPF_X: | |
276 | emit(A64_LSRV(is64, dst, dst, src), ctx); | |
277 | break; | |
278 | case BPF_ALU | BPF_ARSH | BPF_X: | |
279 | case BPF_ALU64 | BPF_ARSH | BPF_X: | |
280 | emit(A64_ASRV(is64, dst, dst, src), ctx); | |
281 | break; | |
e54bcde3 ZSL |
282 | /* dst = -dst */ |
283 | case BPF_ALU | BPF_NEG: | |
284 | case BPF_ALU64 | BPF_NEG: | |
285 | emit(A64_NEG(is64, dst, dst), ctx); | |
286 | break; | |
287 | /* dst = BSWAP##imm(dst) */ | |
288 | case BPF_ALU | BPF_END | BPF_FROM_LE: | |
289 | case BPF_ALU | BPF_END | BPF_FROM_BE: | |
290 | #ifdef CONFIG_CPU_BIG_ENDIAN | |
291 | if (BPF_SRC(code) == BPF_FROM_BE) | |
292 | break; | |
293 | #else /* !CONFIG_CPU_BIG_ENDIAN */ | |
294 | if (BPF_SRC(code) == BPF_FROM_LE) | |
295 | break; | |
296 | #endif | |
297 | switch (imm) { | |
298 | case 16: | |
299 | emit(A64_REV16(is64, dst, dst), ctx); | |
300 | break; | |
301 | case 32: | |
302 | emit(A64_REV32(is64, dst, dst), ctx); | |
303 | break; | |
304 | case 64: | |
305 | emit(A64_REV64(dst, dst), ctx); | |
306 | break; | |
307 | } | |
308 | break; | |
309 | /* dst = imm */ | |
310 | case BPF_ALU | BPF_MOV | BPF_K: | |
311 | case BPF_ALU64 | BPF_MOV | BPF_K: | |
312 | emit_a64_mov_i(is64, dst, imm, ctx); | |
313 | break; | |
314 | /* dst = dst OP imm */ | |
315 | case BPF_ALU | BPF_ADD | BPF_K: | |
316 | case BPF_ALU64 | BPF_ADD | BPF_K: | |
317 | ctx->tmp_used = 1; | |
318 | emit_a64_mov_i(is64, tmp, imm, ctx); | |
319 | emit(A64_ADD(is64, dst, dst, tmp), ctx); | |
320 | break; | |
321 | case BPF_ALU | BPF_SUB | BPF_K: | |
322 | case BPF_ALU64 | BPF_SUB | BPF_K: | |
323 | ctx->tmp_used = 1; | |
324 | emit_a64_mov_i(is64, tmp, imm, ctx); | |
325 | emit(A64_SUB(is64, dst, dst, tmp), ctx); | |
326 | break; | |
327 | case BPF_ALU | BPF_AND | BPF_K: | |
328 | case BPF_ALU64 | BPF_AND | BPF_K: | |
329 | ctx->tmp_used = 1; | |
330 | emit_a64_mov_i(is64, tmp, imm, ctx); | |
331 | emit(A64_AND(is64, dst, dst, tmp), ctx); | |
332 | break; | |
333 | case BPF_ALU | BPF_OR | BPF_K: | |
334 | case BPF_ALU64 | BPF_OR | BPF_K: | |
335 | ctx->tmp_used = 1; | |
336 | emit_a64_mov_i(is64, tmp, imm, ctx); | |
337 | emit(A64_ORR(is64, dst, dst, tmp), ctx); | |
338 | break; | |
339 | case BPF_ALU | BPF_XOR | BPF_K: | |
340 | case BPF_ALU64 | BPF_XOR | BPF_K: | |
341 | ctx->tmp_used = 1; | |
342 | emit_a64_mov_i(is64, tmp, imm, ctx); | |
343 | emit(A64_EOR(is64, dst, dst, tmp), ctx); | |
344 | break; | |
345 | case BPF_ALU | BPF_MUL | BPF_K: | |
346 | case BPF_ALU64 | BPF_MUL | BPF_K: | |
347 | ctx->tmp_used = 1; | |
348 | emit_a64_mov_i(is64, tmp, imm, ctx); | |
349 | emit(A64_MUL(is64, dst, dst, tmp), ctx); | |
350 | break; | |
351 | case BPF_ALU | BPF_DIV | BPF_K: | |
352 | case BPF_ALU64 | BPF_DIV | BPF_K: | |
353 | ctx->tmp_used = 1; | |
354 | emit_a64_mov_i(is64, tmp, imm, ctx); | |
355 | emit(A64_UDIV(is64, dst, dst, tmp), ctx); | |
356 | break; | |
357 | case BPF_ALU | BPF_MOD | BPF_K: | |
358 | case BPF_ALU64 | BPF_MOD | BPF_K: | |
359 | ctx->tmp_used = 1; | |
360 | emit_a64_mov_i(is64, tmp2, imm, ctx); | |
361 | emit(A64_UDIV(is64, tmp, dst, tmp2), ctx); | |
362 | emit(A64_MUL(is64, tmp, tmp, tmp2), ctx); | |
363 | emit(A64_SUB(is64, dst, dst, tmp), ctx); | |
364 | break; | |
365 | case BPF_ALU | BPF_LSH | BPF_K: | |
366 | case BPF_ALU64 | BPF_LSH | BPF_K: | |
367 | emit(A64_LSL(is64, dst, dst, imm), ctx); | |
368 | break; | |
369 | case BPF_ALU | BPF_RSH | BPF_K: | |
370 | case BPF_ALU64 | BPF_RSH | BPF_K: | |
371 | emit(A64_LSR(is64, dst, dst, imm), ctx); | |
372 | break; | |
373 | case BPF_ALU | BPF_ARSH | BPF_K: | |
374 | case BPF_ALU64 | BPF_ARSH | BPF_K: | |
375 | emit(A64_ASR(is64, dst, dst, imm), ctx); | |
376 | break; | |
377 | ||
378 | #define check_imm(bits, imm) do { \ | |
379 | if ((((imm) > 0) && ((imm) >> (bits))) || \ | |
380 | (((imm) < 0) && (~(imm) >> (bits)))) { \ | |
381 | pr_info("[%2d] imm=%d(0x%x) out of range\n", \ | |
382 | i, imm, imm); \ | |
383 | return -EINVAL; \ | |
384 | } \ | |
385 | } while (0) | |
386 | #define check_imm19(imm) check_imm(19, imm) | |
387 | #define check_imm26(imm) check_imm(26, imm) | |
388 | ||
389 | /* JUMP off */ | |
390 | case BPF_JMP | BPF_JA: | |
391 | jmp_offset = bpf2a64_offset(i + off, i, ctx); | |
392 | check_imm26(jmp_offset); | |
393 | emit(A64_B(jmp_offset), ctx); | |
394 | break; | |
395 | /* IF (dst COND src) JUMP off */ | |
396 | case BPF_JMP | BPF_JEQ | BPF_X: | |
397 | case BPF_JMP | BPF_JGT | BPF_X: | |
398 | case BPF_JMP | BPF_JGE | BPF_X: | |
399 | case BPF_JMP | BPF_JNE | BPF_X: | |
400 | case BPF_JMP | BPF_JSGT | BPF_X: | |
401 | case BPF_JMP | BPF_JSGE | BPF_X: | |
402 | emit(A64_CMP(1, dst, src), ctx); | |
403 | emit_cond_jmp: | |
404 | jmp_offset = bpf2a64_offset(i + off, i, ctx); | |
405 | check_imm19(jmp_offset); | |
406 | switch (BPF_OP(code)) { | |
407 | case BPF_JEQ: | |
408 | jmp_cond = A64_COND_EQ; | |
409 | break; | |
410 | case BPF_JGT: | |
411 | jmp_cond = A64_COND_HI; | |
412 | break; | |
413 | case BPF_JGE: | |
414 | jmp_cond = A64_COND_CS; | |
415 | break; | |
416 | case BPF_JNE: | |
417 | jmp_cond = A64_COND_NE; | |
418 | break; | |
419 | case BPF_JSGT: | |
420 | jmp_cond = A64_COND_GT; | |
421 | break; | |
422 | case BPF_JSGE: | |
423 | jmp_cond = A64_COND_GE; | |
424 | break; | |
425 | default: | |
426 | return -EFAULT; | |
427 | } | |
428 | emit(A64_B_(jmp_cond, jmp_offset), ctx); | |
429 | break; | |
430 | case BPF_JMP | BPF_JSET | BPF_X: | |
431 | emit(A64_TST(1, dst, src), ctx); | |
432 | goto emit_cond_jmp; | |
433 | /* IF (dst COND imm) JUMP off */ | |
434 | case BPF_JMP | BPF_JEQ | BPF_K: | |
435 | case BPF_JMP | BPF_JGT | BPF_K: | |
436 | case BPF_JMP | BPF_JGE | BPF_K: | |
437 | case BPF_JMP | BPF_JNE | BPF_K: | |
438 | case BPF_JMP | BPF_JSGT | BPF_K: | |
439 | case BPF_JMP | BPF_JSGE | BPF_K: | |
440 | ctx->tmp_used = 1; | |
441 | emit_a64_mov_i(1, tmp, imm, ctx); | |
442 | emit(A64_CMP(1, dst, tmp), ctx); | |
443 | goto emit_cond_jmp; | |
444 | case BPF_JMP | BPF_JSET | BPF_K: | |
445 | ctx->tmp_used = 1; | |
446 | emit_a64_mov_i(1, tmp, imm, ctx); | |
447 | emit(A64_TST(1, dst, tmp), ctx); | |
448 | goto emit_cond_jmp; | |
449 | /* function call */ | |
450 | case BPF_JMP | BPF_CALL: | |
451 | { | |
452 | const u8 r0 = bpf2a64[BPF_REG_0]; | |
453 | const u64 func = (u64)__bpf_call_base + imm; | |
454 | ||
455 | ctx->tmp_used = 1; | |
456 | emit_a64_mov_i64(tmp, func, ctx); | |
457 | emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); | |
458 | emit(A64_MOV(1, A64_FP, A64_SP), ctx); | |
459 | emit(A64_BLR(tmp), ctx); | |
460 | emit(A64_MOV(1, r0, A64_R(0)), ctx); | |
461 | emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); | |
462 | break; | |
463 | } | |
464 | /* function return */ | |
465 | case BPF_JMP | BPF_EXIT: | |
51c9fbb1 ZSL |
466 | /* Optimization: when last instruction is EXIT, |
467 | simply fallthrough to epilogue. */ | |
e54bcde3 ZSL |
468 | if (i == ctx->prog->len - 1) |
469 | break; | |
470 | jmp_offset = epilogue_offset(ctx); | |
471 | check_imm26(jmp_offset); | |
472 | emit(A64_B(jmp_offset), ctx); | |
473 | break; | |
474 | ||
30d3d94c ZSL |
475 | /* dst = imm64 */ |
476 | case BPF_LD | BPF_IMM | BPF_DW: | |
477 | { | |
478 | const struct bpf_insn insn1 = insn[1]; | |
479 | u64 imm64; | |
480 | ||
481 | if (insn1.code != 0 || insn1.src_reg != 0 || | |
482 | insn1.dst_reg != 0 || insn1.off != 0) { | |
483 | /* Note: verifier in BPF core must catch invalid | |
484 | * instructions. | |
485 | */ | |
486 | pr_err_once("Invalid BPF_LD_IMM64 instruction\n"); | |
487 | return -EINVAL; | |
488 | } | |
489 | ||
1e4df6b7 | 490 | imm64 = (u64)insn1.imm << 32 | (u32)imm; |
30d3d94c ZSL |
491 | emit_a64_mov_i64(dst, imm64, ctx); |
492 | ||
493 | return 1; | |
494 | } | |
495 | ||
e54bcde3 ZSL |
496 | /* LDX: dst = *(size *)(src + off) */ |
497 | case BPF_LDX | BPF_MEM | BPF_W: | |
498 | case BPF_LDX | BPF_MEM | BPF_H: | |
499 | case BPF_LDX | BPF_MEM | BPF_B: | |
500 | case BPF_LDX | BPF_MEM | BPF_DW: | |
501 | ctx->tmp_used = 1; | |
502 | emit_a64_mov_i(1, tmp, off, ctx); | |
503 | switch (BPF_SIZE(code)) { | |
504 | case BPF_W: | |
505 | emit(A64_LDR32(dst, src, tmp), ctx); | |
506 | break; | |
507 | case BPF_H: | |
508 | emit(A64_LDRH(dst, src, tmp), ctx); | |
509 | break; | |
510 | case BPF_B: | |
511 | emit(A64_LDRB(dst, src, tmp), ctx); | |
512 | break; | |
513 | case BPF_DW: | |
514 | emit(A64_LDR64(dst, src, tmp), ctx); | |
515 | break; | |
516 | } | |
517 | break; | |
518 | ||
519 | /* ST: *(size *)(dst + off) = imm */ | |
520 | case BPF_ST | BPF_MEM | BPF_W: | |
521 | case BPF_ST | BPF_MEM | BPF_H: | |
522 | case BPF_ST | BPF_MEM | BPF_B: | |
523 | case BPF_ST | BPF_MEM | BPF_DW: | |
524 | goto notyet; | |
525 | ||
526 | /* STX: *(size *)(dst + off) = src */ | |
527 | case BPF_STX | BPF_MEM | BPF_W: | |
528 | case BPF_STX | BPF_MEM | BPF_H: | |
529 | case BPF_STX | BPF_MEM | BPF_B: | |
530 | case BPF_STX | BPF_MEM | BPF_DW: | |
531 | ctx->tmp_used = 1; | |
532 | emit_a64_mov_i(1, tmp, off, ctx); | |
533 | switch (BPF_SIZE(code)) { | |
534 | case BPF_W: | |
535 | emit(A64_STR32(src, dst, tmp), ctx); | |
536 | break; | |
537 | case BPF_H: | |
538 | emit(A64_STRH(src, dst, tmp), ctx); | |
539 | break; | |
540 | case BPF_B: | |
541 | emit(A64_STRB(src, dst, tmp), ctx); | |
542 | break; | |
543 | case BPF_DW: | |
544 | emit(A64_STR64(src, dst, tmp), ctx); | |
545 | break; | |
546 | } | |
547 | break; | |
548 | /* STX XADD: lock *(u32 *)(dst + off) += src */ | |
549 | case BPF_STX | BPF_XADD | BPF_W: | |
550 | /* STX XADD: lock *(u64 *)(dst + off) += src */ | |
551 | case BPF_STX | BPF_XADD | BPF_DW: | |
552 | goto notyet; | |
553 | ||
554 | /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ | |
555 | case BPF_LD | BPF_ABS | BPF_W: | |
556 | case BPF_LD | BPF_ABS | BPF_H: | |
557 | case BPF_LD | BPF_ABS | BPF_B: | |
558 | /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */ | |
559 | case BPF_LD | BPF_IND | BPF_W: | |
560 | case BPF_LD | BPF_IND | BPF_H: | |
561 | case BPF_LD | BPF_IND | BPF_B: | |
562 | { | |
563 | const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */ | |
564 | const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */ | |
565 | const u8 fp = bpf2a64[BPF_REG_FP]; | |
566 | const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */ | |
567 | const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */ | |
568 | const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */ | |
569 | const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */ | |
570 | const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */ | |
571 | int size; | |
572 | ||
573 | emit(A64_MOV(1, r1, r6), ctx); | |
574 | emit_a64_mov_i(0, r2, imm, ctx); | |
575 | if (BPF_MODE(code) == BPF_IND) | |
576 | emit(A64_ADD(0, r2, r2, src), ctx); | |
577 | switch (BPF_SIZE(code)) { | |
578 | case BPF_W: | |
579 | size = 4; | |
580 | break; | |
581 | case BPF_H: | |
582 | size = 2; | |
583 | break; | |
584 | case BPF_B: | |
585 | size = 1; | |
586 | break; | |
587 | default: | |
588 | return -EINVAL; | |
589 | } | |
590 | emit_a64_mov_i64(r3, size, ctx); | |
591 | emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx); | |
592 | emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); | |
593 | emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); | |
594 | emit(A64_MOV(1, A64_FP, A64_SP), ctx); | |
595 | emit(A64_BLR(r5), ctx); | |
596 | emit(A64_MOV(1, r0, A64_R(0)), ctx); | |
597 | emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); | |
598 | ||
599 | jmp_offset = epilogue_offset(ctx); | |
600 | check_imm19(jmp_offset); | |
601 | emit(A64_CBZ(1, r0, jmp_offset), ctx); | |
602 | emit(A64_MOV(1, r5, r0), ctx); | |
603 | switch (BPF_SIZE(code)) { | |
604 | case BPF_W: | |
605 | emit(A64_LDR32(r0, r5, A64_ZR), ctx); | |
606 | #ifndef CONFIG_CPU_BIG_ENDIAN | |
607 | emit(A64_REV32(0, r0, r0), ctx); | |
608 | #endif | |
609 | break; | |
610 | case BPF_H: | |
611 | emit(A64_LDRH(r0, r5, A64_ZR), ctx); | |
612 | #ifndef CONFIG_CPU_BIG_ENDIAN | |
613 | emit(A64_REV16(0, r0, r0), ctx); | |
614 | #endif | |
615 | break; | |
616 | case BPF_B: | |
617 | emit(A64_LDRB(r0, r5, A64_ZR), ctx); | |
618 | break; | |
619 | } | |
620 | break; | |
621 | } | |
622 | notyet: | |
623 | pr_info_once("*** NOT YET: opcode %02x ***\n", code); | |
624 | return -EFAULT; | |
625 | ||
626 | default: | |
627 | pr_err_once("unknown opcode %02x\n", code); | |
628 | return -EINVAL; | |
629 | } | |
630 | ||
631 | return 0; | |
632 | } | |
633 | ||
634 | static int build_body(struct jit_ctx *ctx) | |
635 | { | |
636 | const struct bpf_prog *prog = ctx->prog; | |
637 | int i; | |
638 | ||
639 | for (i = 0; i < prog->len; i++) { | |
640 | const struct bpf_insn *insn = &prog->insnsi[i]; | |
641 | int ret; | |
642 | ||
643 | if (ctx->image == NULL) | |
644 | ctx->offset[i] = ctx->idx; | |
645 | ||
646 | ret = build_insn(insn, ctx); | |
30d3d94c ZSL |
647 | if (ret > 0) { |
648 | i++; | |
649 | continue; | |
650 | } | |
e54bcde3 ZSL |
651 | if (ret) |
652 | return ret; | |
653 | } | |
654 | ||
655 | return 0; | |
656 | } | |
657 | ||
658 | static inline void bpf_flush_icache(void *start, void *end) | |
659 | { | |
660 | flush_icache_range((unsigned long)start, (unsigned long)end); | |
661 | } | |
662 | ||
663 | void bpf_jit_compile(struct bpf_prog *prog) | |
664 | { | |
665 | /* Nothing to do here. We support Internal BPF. */ | |
666 | } | |
667 | ||
668 | void bpf_int_jit_compile(struct bpf_prog *prog) | |
669 | { | |
b569c1c6 | 670 | struct bpf_binary_header *header; |
e54bcde3 ZSL |
671 | struct jit_ctx ctx; |
672 | int image_size; | |
b569c1c6 | 673 | u8 *image_ptr; |
e54bcde3 ZSL |
674 | |
675 | if (!bpf_jit_enable) | |
676 | return; | |
677 | ||
678 | if (!prog || !prog->len) | |
679 | return; | |
680 | ||
681 | memset(&ctx, 0, sizeof(ctx)); | |
682 | ctx.prog = prog; | |
683 | ||
684 | ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); | |
685 | if (ctx.offset == NULL) | |
686 | return; | |
687 | ||
688 | /* 1. Initial fake pass to compute ctx->idx. */ | |
689 | ||
51c9fbb1 | 690 | /* Fake pass to fill in ctx->offset and ctx->tmp_used. */ |
e54bcde3 ZSL |
691 | if (build_body(&ctx)) |
692 | goto out; | |
693 | ||
694 | build_prologue(&ctx); | |
51c9fbb1 ZSL |
695 | |
696 | ctx.epilogue_offset = ctx.idx; | |
e54bcde3 ZSL |
697 | build_epilogue(&ctx); |
698 | ||
699 | /* Now we know the actual image size. */ | |
700 | image_size = sizeof(u32) * ctx.idx; | |
b569c1c6 DB |
701 | header = bpf_jit_binary_alloc(image_size, &image_ptr, |
702 | sizeof(u32), jit_fill_hole); | |
703 | if (header == NULL) | |
e54bcde3 ZSL |
704 | goto out; |
705 | ||
706 | /* 2. Now, the actual pass. */ | |
707 | ||
b569c1c6 | 708 | ctx.image = (u32 *)image_ptr; |
e54bcde3 | 709 | ctx.idx = 0; |
b569c1c6 | 710 | |
e54bcde3 ZSL |
711 | build_prologue(&ctx); |
712 | ||
60ef0494 | 713 | if (build_body(&ctx)) { |
b569c1c6 | 714 | bpf_jit_binary_free(header); |
e54bcde3 | 715 | goto out; |
60ef0494 | 716 | } |
e54bcde3 ZSL |
717 | |
718 | build_epilogue(&ctx); | |
719 | ||
720 | /* And we're done. */ | |
721 | if (bpf_jit_enable > 1) | |
722 | bpf_jit_dump(prog->len, image_size, 2, ctx.image); | |
723 | ||
724 | bpf_flush_icache(ctx.image, ctx.image + ctx.idx); | |
b569c1c6 DB |
725 | |
726 | set_memory_ro((unsigned long)header, header->pages); | |
e54bcde3 | 727 | prog->bpf_func = (void *)ctx.image; |
74c3deac | 728 | prog->jited = true; |
e54bcde3 ZSL |
729 | out: |
730 | kfree(ctx.offset); | |
731 | } | |
732 | ||
733 | void bpf_jit_free(struct bpf_prog *prog) | |
734 | { | |
b569c1c6 DB |
735 | unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK; |
736 | struct bpf_binary_header *header = (void *)addr; | |
737 | ||
738 | if (!prog->jited) | |
739 | goto free_filter; | |
740 | ||
741 | set_memory_rw(addr, header->pages); | |
742 | bpf_jit_binary_free(header); | |
e54bcde3 | 743 | |
b569c1c6 DB |
744 | free_filter: |
745 | bpf_prog_unlock_free(prog); | |
e54bcde3 | 746 | } |