MIPS: KVM/locore.S: Relax noat
[deliverable/linux.git] / arch / mips / net / bpf_jit_asm.S
1 /*
2 * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
3 * compiler.
4 *
5 * Copyright (C) 2015 Imagination Technologies Ltd.
6 * Author: Markos Chandras <markos.chandras@imgtec.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; version 2 of the License.
11 */
12
13 #include <asm/asm.h>
14 #include <asm/regdef.h>
15 #include "bpf_jit.h"
16
17 /* ABI
18 *
19 * r_skb_hl skb header length
20 * r_skb_data skb data
21 * r_off(a1) offset register
22 * r_A BPF register A
23 * r_X PF register X
24 * r_skb(a0) *skb
25 * r_M *scratch memory
26 * r_skb_le skb length
27 * r_s0 Scratch register 0
28 * r_s1 Scratch register 1
29 *
30 * On entry:
31 * a0: *skb
32 * a1: offset (imm or imm + X)
33 *
34 * All non-BPF-ABI registers are free for use. On return, we only
35 * care about r_ret. The BPF-ABI registers are assumed to remain
36 * unmodified during the entire filter operation.
37 */
38
39 #define skb a0
40 #define offset a1
41 #define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */
42
43 /* We know better :) so prevent assembler reordering etc */
44 .set noreorder
45
46 #define is_offset_negative(TYPE) \
47 /* If offset is negative we have more work to do */ \
48 slti t0, offset, 0; \
49 bgtz t0, bpf_slow_path_##TYPE##_neg; \
50 /* Be careful what follows in DS. */
51
52 #define is_offset_in_header(SIZE, TYPE) \
53 /* Reading from header? */ \
54 addiu $r_s0, $r_skb_hl, -SIZE; \
55 slt t0, $r_s0, offset; \
56 bgtz t0, bpf_slow_path_##TYPE; \
57
58 LEAF(sk_load_word)
59 is_offset_negative(word)
60 FEXPORT(sk_load_word_positive)
61 is_offset_in_header(4, word)
62 /* Offset within header boundaries */
63 PTR_ADDU t1, $r_skb_data, offset
64 .set reorder
65 lw $r_A, 0(t1)
66 .set noreorder
67 #ifdef CONFIG_CPU_LITTLE_ENDIAN
68 # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
69 wsbh t0, $r_A
70 rotr $r_A, t0, 16
71 # else
72 sll t0, $r_A, 24
73 srl t1, $r_A, 24
74 srl t2, $r_A, 8
75 or t0, t0, t1
76 andi t2, t2, 0xff00
77 andi t1, $r_A, 0xff00
78 or t0, t0, t2
79 sll t1, t1, 8
80 or $r_A, t0, t1
81 # endif
82 #endif
83 jr $r_ra
84 move $r_ret, zero
85 END(sk_load_word)
86
87 LEAF(sk_load_half)
88 is_offset_negative(half)
89 FEXPORT(sk_load_half_positive)
90 is_offset_in_header(2, half)
91 /* Offset within header boundaries */
92 PTR_ADDU t1, $r_skb_data, offset
93 .set reorder
94 lh $r_A, 0(t1)
95 .set noreorder
96 #ifdef CONFIG_CPU_LITTLE_ENDIAN
97 # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
98 wsbh t0, $r_A
99 seh $r_A, t0
100 # else
101 sll t0, $r_A, 24
102 andi t1, $r_A, 0xff00
103 sra t0, t0, 16
104 srl t1, t1, 8
105 or $r_A, t0, t1
106 # endif
107 #endif
108 jr $r_ra
109 move $r_ret, zero
110 END(sk_load_half)
111
112 LEAF(sk_load_byte)
113 is_offset_negative(byte)
114 FEXPORT(sk_load_byte_positive)
115 is_offset_in_header(1, byte)
116 /* Offset within header boundaries */
117 PTR_ADDU t1, $r_skb_data, offset
118 lb $r_A, 0(t1)
119 jr $r_ra
120 move $r_ret, zero
121 END(sk_load_byte)
122
123 /*
124 * call skb_copy_bits:
125 * (prototype in linux/skbuff.h)
126 *
127 * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
128 *
129 * o32 mandates we leave 4 spaces for argument registers in case
130 * the callee needs to use them. Even though we don't care about
131 * the argument registers ourselves, we need to allocate that space
132 * to remain ABI compliant since the callee may want to use that space.
133 * We also allocate 2 more spaces for $r_ra and our return register (*to).
134 *
135 * n64 is a bit different. The *caller* will allocate the space to preserve
136 * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
137 * good reason but it does not matter that much really.
138 *
139 * (void *to) is returned in r_s0
140 *
141 */
142 #define bpf_slow_path_common(SIZE) \
143 /* Quick check. Are we within reasonable boundaries? */ \
144 LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
145 sltu $r_s0, offset, $r_s1; \
146 beqz $r_s0, fault; \
147 /* Load 4th argument in DS */ \
148 LONG_ADDIU a3, zero, SIZE; \
149 PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
150 PTR_LA t0, skb_copy_bits; \
151 PTR_S $r_ra, (5 * SZREG)($r_sp); \
152 /* Assign low slot to a2 */ \
153 move a2, $r_sp; \
154 jalr t0; \
155 /* Reset our destination slot (DS but it's ok) */ \
156 INT_S zero, (4 * SZREG)($r_sp); \
157 /* \
158 * skb_copy_bits returns 0 on success and -EFAULT \
159 * on error. Our data live in a2. Do not bother with \
160 * our data if an error has been returned. \
161 */ \
162 /* Restore our frame */ \
163 PTR_L $r_ra, (5 * SZREG)($r_sp); \
164 INT_L $r_s0, (4 * SZREG)($r_sp); \
165 bltz v0, fault; \
166 PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
167 move $r_ret, zero; \
168
169 NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
170 bpf_slow_path_common(4)
171 #ifdef CONFIG_CPU_LITTLE_ENDIAN
172 # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
173 wsbh t0, $r_s0
174 jr $r_ra
175 rotr $r_A, t0, 16
176 # else
177 sll t0, $r_s0, 24
178 srl t1, $r_s0, 24
179 srl t2, $r_s0, 8
180 or t0, t0, t1
181 andi t2, t2, 0xff00
182 andi t1, $r_s0, 0xff00
183 or t0, t0, t2
184 sll t1, t1, 8
185 jr $r_ra
186 or $r_A, t0, t1
187 # endif
188 #else
189 jr $r_ra
190 move $r_A, $r_s0
191 #endif
192
193 END(bpf_slow_path_word)
194
195 NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
196 bpf_slow_path_common(2)
197 #ifdef CONFIG_CPU_LITTLE_ENDIAN
198 # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
199 jr $r_ra
200 wsbh $r_A, $r_s0
201 # else
202 sll t0, $r_s0, 8
203 andi t1, $r_s0, 0xff00
204 andi t0, t0, 0xff00
205 srl t1, t1, 8
206 jr $r_ra
207 or $r_A, t0, t1
208 # endif
209 #else
210 jr $r_ra
211 move $r_A, $r_s0
212 #endif
213
214 END(bpf_slow_path_half)
215
216 NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
217 bpf_slow_path_common(1)
218 jr $r_ra
219 move $r_A, $r_s0
220
221 END(bpf_slow_path_byte)
222
223 /*
224 * Negative entry points
225 */
226 .macro bpf_is_end_of_data
227 li t0, SKF_LL_OFF
228 /* Reading link layer data? */
229 slt t1, offset, t0
230 bgtz t1, fault
231 /* Be careful what follows in DS. */
232 .endm
233 /*
234 * call skb_copy_bits:
235 * (prototype in linux/filter.h)
236 *
237 * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
238 * int k, unsigned int size)
239 *
240 * see above (bpf_slow_path_common) for ABI restrictions
241 */
242 #define bpf_negative_common(SIZE) \
243 PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
244 PTR_LA t0, bpf_internal_load_pointer_neg_helper; \
245 PTR_S $r_ra, (5 * SZREG)($r_sp); \
246 jalr t0; \
247 li a2, SIZE; \
248 PTR_L $r_ra, (5 * SZREG)($r_sp); \
249 /* Check return pointer */ \
250 beqz v0, fault; \
251 PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
252 /* Preserve our pointer */ \
253 move $r_s0, v0; \
254 /* Set return value */ \
255 move $r_ret, zero; \
256
257 bpf_slow_path_word_neg:
258 bpf_is_end_of_data
259 NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
260 bpf_negative_common(4)
261 jr $r_ra
262 lw $r_A, 0($r_s0)
263 END(sk_load_word_negative)
264
265 bpf_slow_path_half_neg:
266 bpf_is_end_of_data
267 NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
268 bpf_negative_common(2)
269 jr $r_ra
270 lhu $r_A, 0($r_s0)
271 END(sk_load_half_negative)
272
273 bpf_slow_path_byte_neg:
274 bpf_is_end_of_data
275 NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
276 bpf_negative_common(1)
277 jr $r_ra
278 lbu $r_A, 0($r_s0)
279 END(sk_load_byte_negative)
280
281 fault:
282 jr $r_ra
283 addiu $r_ret, zero, 1
This page took 0.042871 seconds and 5 git commands to generate.