Commit | Line | Data |
---|---|---|
e54bcde3 ZSL |
1 | /* |
2 | * BPF JIT compiler for ARM64 | |
3 | * | |
4 | * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
17 | */ | |
18 | #ifndef _BPF_JIT_H | |
19 | #define _BPF_JIT_H | |
20 | ||
21 | #include <asm/insn.h> | |
22 | ||
23 | /* 5-bit Register Operand */ | |
24 | #define A64_R(x) AARCH64_INSN_REG_##x | |
25 | #define A64_FP AARCH64_INSN_REG_FP | |
26 | #define A64_LR AARCH64_INSN_REG_LR | |
27 | #define A64_ZR AARCH64_INSN_REG_ZR | |
28 | #define A64_SP AARCH64_INSN_REG_SP | |
29 | ||
30 | #define A64_VARIANT(sf) \ | |
31 | ((sf) ? AARCH64_INSN_VARIANT_64BIT : AARCH64_INSN_VARIANT_32BIT) | |
32 | ||
33 | /* Compare & branch (immediate) */ | |
34 | #define A64_COMP_BRANCH(sf, Rt, offset, type) \ | |
35 | aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \ | |
36 | AARCH64_INSN_BRANCH_COMP_##type) | |
37 | #define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO) | |
38 | ||
39 | /* Conditional branch (immediate) */ | |
40 | #define A64_COND_BRANCH(cond, offset) \ | |
41 | aarch64_insn_gen_cond_branch_imm(0, offset, cond) | |
42 | #define A64_COND_EQ AARCH64_INSN_COND_EQ /* == */ | |
43 | #define A64_COND_NE AARCH64_INSN_COND_NE /* != */ | |
44 | #define A64_COND_CS AARCH64_INSN_COND_CS /* unsigned >= */ | |
45 | #define A64_COND_HI AARCH64_INSN_COND_HI /* unsigned > */ | |
46 | #define A64_COND_GE AARCH64_INSN_COND_GE /* signed >= */ | |
47 | #define A64_COND_GT AARCH64_INSN_COND_GT /* signed > */ | |
48 | #define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2) | |
49 | ||
50 | /* Unconditional branch (immediate) */ | |
51 | #define A64_BRANCH(offset, type) aarch64_insn_gen_branch_imm(0, offset, \ | |
52 | AARCH64_INSN_BRANCH_##type) | |
53 | #define A64_B(imm26) A64_BRANCH((imm26) << 2, NOLINK) | |
54 | #define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK) | |
55 | ||
56 | /* Unconditional branch (register) */ | |
57 | #define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK) | |
58 | #define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN) | |
59 | ||
60 | /* Load/store register (register offset) */ | |
61 | #define A64_LS_REG(Rt, Rn, Rm, size, type) \ | |
62 | aarch64_insn_gen_load_store_reg(Rt, Rn, Rm, \ | |
63 | AARCH64_INSN_SIZE_##size, \ | |
64 | AARCH64_INSN_LDST_##type##_REG_OFFSET) | |
65 | #define A64_STRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, STORE) | |
66 | #define A64_LDRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, LOAD) | |
67 | #define A64_STRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, STORE) | |
68 | #define A64_LDRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, LOAD) | |
69 | #define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE) | |
70 | #define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD) | |
71 | #define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE) | |
72 | #define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD) | |
73 | ||
74 | /* Load/store register pair */ | |
75 | #define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \ | |
76 | aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \ | |
77 | AARCH64_INSN_VARIANT_64BIT, \ | |
78 | AARCH64_INSN_LDST_##ls##_PAIR_##type) | |
79 | /* Rn -= 16; Rn[0] = Rt; Rn[8] = Rt2; */ | |
80 | #define A64_PUSH(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, -16, STORE, PRE_INDEX) | |
81 | /* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */ | |
82 | #define A64_POP(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX) | |
83 | ||
84 | /* Add/subtract (immediate) */ | |
85 | #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \ | |
86 | aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \ | |
87 | A64_VARIANT(sf), AARCH64_INSN_ADSB_##type) | |
88 | /* Rd = Rn OP imm12 */ | |
89 | #define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD) | |
90 | #define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB) | |
91 | /* Rd = Rn */ | |
92 | #define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0) | |
93 | ||
94 | /* Bitfield move */ | |
95 | #define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \ | |
96 | aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \ | |
97 | A64_VARIANT(sf), AARCH64_INSN_BITFIELD_MOVE_##type) | |
98 | /* Signed, with sign replication to left and zeros to right */ | |
99 | #define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED) | |
100 | /* Unsigned, with zeros to left and right */ | |
101 | #define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED) | |
102 | ||
103 | /* Rd = Rn << shift */ | |
104 | #define A64_LSL(sf, Rd, Rn, shift) ({ \ | |
105 | int sz = (sf) ? 64 : 32; \ | |
106 | A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \ | |
107 | }) | |
108 | /* Rd = Rn >> shift */ | |
109 | #define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31) | |
110 | /* Rd = Rn >> shift; signed */ | |
111 | #define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31) | |
112 | ||
113 | /* Move wide (immediate) */ | |
114 | #define A64_MOVEW(sf, Rd, imm16, shift, type) \ | |
115 | aarch64_insn_gen_movewide(Rd, imm16, shift, \ | |
116 | A64_VARIANT(sf), AARCH64_INSN_MOVEWIDE_##type) | |
117 | /* Rd = Zeros (for MOVZ); | |
118 | * Rd |= imm16 << shift (where shift is {0, 16, 32, 48}); | |
119 | * Rd = ~Rd; (for MOVN); */ | |
120 | #define A64_MOVN(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, INVERSE) | |
121 | #define A64_MOVZ(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, ZERO) | |
122 | #define A64_MOVK(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, KEEP) | |
123 | ||
124 | /* Add/subtract (shifted register) */ | |
125 | #define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \ | |
126 | aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \ | |
127 | A64_VARIANT(sf), AARCH64_INSN_ADSB_##type) | |
128 | /* Rd = Rn OP Rm */ | |
129 | #define A64_ADD(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD) | |
130 | #define A64_SUB(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB) | |
131 | #define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS) | |
132 | /* Rd = -Rm */ | |
133 | #define A64_NEG(sf, Rd, Rm) A64_SUB(sf, Rd, A64_ZR, Rm) | |
134 | /* Rn - Rm; set condition flags */ | |
135 | #define A64_CMP(sf, Rn, Rm) A64_SUBS(sf, A64_ZR, Rn, Rm) | |
136 | ||
137 | /* Data-processing (1 source) */ | |
138 | #define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \ | |
139 | A64_VARIANT(sf), AARCH64_INSN_DATA1_##type) | |
140 | /* Rd = BSWAPx(Rn) */ | |
141 | #define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16) | |
142 | #define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32) | |
143 | #define A64_REV64(Rd, Rn) A64_DATA1(1, Rd, Rn, REVERSE_64) | |
144 | ||
145 | /* Data-processing (2 source) */ | |
146 | /* Rd = Rn OP Rm */ | |
d65a634a ZSL |
147 | #define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \ |
148 | A64_VARIANT(sf), AARCH64_INSN_DATA2_##type) | |
149 | #define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV) | |
150 | #define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV) | |
151 | #define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV) | |
152 | #define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV) | |
e54bcde3 ZSL |
153 | |
154 | /* Data-processing (3 source) */ | |
155 | /* Rd = Ra + Rn * Rm */ | |
156 | #define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \ | |
157 | A64_VARIANT(sf), AARCH64_INSN_DATA3_MADD) | |
158 | /* Rd = Rn * Rm */ | |
159 | #define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm) | |
160 | ||
161 | /* Logical (shifted register) */ | |
162 | #define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \ | |
163 | aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \ | |
164 | A64_VARIANT(sf), AARCH64_INSN_LOGIC_##type) | |
165 | /* Rd = Rn OP Rm */ | |
166 | #define A64_AND(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND) | |
167 | #define A64_ORR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR) | |
168 | #define A64_EOR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR) | |
169 | #define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS) | |
170 | /* Rn & Rm; set condition flags */ | |
171 | #define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm) | |
172 | ||
173 | #endif /* _BPF_JIT_H */ |