Merge tag 'modules-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / arch / sparc / net / bpf_jit_asm.S
1 #include <asm/ptrace.h>
2
3 #include "bpf_jit.h"
4
5 #ifdef CONFIG_SPARC64
6 #define SAVE_SZ 176
7 #define SCRATCH_OFF STACK_BIAS + 128
8 #define BE_PTR(label) be,pn %xcc, label
9 #else
10 #define SAVE_SZ 96
11 #define SCRATCH_OFF 72
12 #define BE_PTR(label) be label
13 #endif
14
15 #define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */
16
17 .text
18 .globl bpf_jit_load_word
19 bpf_jit_load_word:
20 cmp r_OFF, 0
21 bl bpf_slow_path_word_neg
22 nop
23 .globl bpf_jit_load_word_positive_offset
24 bpf_jit_load_word_positive_offset:
25 sub r_HEADLEN, r_OFF, r_TMP
26 cmp r_TMP, 3
27 ble bpf_slow_path_word
28 add r_SKB_DATA, r_OFF, r_TMP
29 andcc r_TMP, 3, %g0
30 bne load_word_unaligned
31 nop
32 retl
33 ld [r_TMP], r_A
34 load_word_unaligned:
35 ldub [r_TMP + 0x0], r_OFF
36 ldub [r_TMP + 0x1], r_TMP2
37 sll r_OFF, 8, r_OFF
38 or r_OFF, r_TMP2, r_OFF
39 ldub [r_TMP + 0x2], r_TMP2
40 sll r_OFF, 8, r_OFF
41 or r_OFF, r_TMP2, r_OFF
42 ldub [r_TMP + 0x3], r_TMP2
43 sll r_OFF, 8, r_OFF
44 retl
45 or r_OFF, r_TMP2, r_A
46
47 .globl bpf_jit_load_half
48 bpf_jit_load_half:
49 cmp r_OFF, 0
50 bl bpf_slow_path_half_neg
51 nop
52 .globl bpf_jit_load_half_positive_offset
53 bpf_jit_load_half_positive_offset:
54 sub r_HEADLEN, r_OFF, r_TMP
55 cmp r_TMP, 1
56 ble bpf_slow_path_half
57 add r_SKB_DATA, r_OFF, r_TMP
58 andcc r_TMP, 1, %g0
59 bne load_half_unaligned
60 nop
61 retl
62 lduh [r_TMP], r_A
63 load_half_unaligned:
64 ldub [r_TMP + 0x0], r_OFF
65 ldub [r_TMP + 0x1], r_TMP2
66 sll r_OFF, 8, r_OFF
67 retl
68 or r_OFF, r_TMP2, r_A
69
70 .globl bpf_jit_load_byte
71 bpf_jit_load_byte:
72 cmp r_OFF, 0
73 bl bpf_slow_path_byte_neg
74 nop
75 .globl bpf_jit_load_byte_positive_offset
76 bpf_jit_load_byte_positive_offset:
77 cmp r_OFF, r_HEADLEN
78 bge bpf_slow_path_byte
79 nop
80 retl
81 ldub [r_SKB_DATA + r_OFF], r_A
82
83 .globl bpf_jit_load_byte_msh
84 bpf_jit_load_byte_msh:
85 cmp r_OFF, 0
86 bl bpf_slow_path_byte_msh_neg
87 nop
88 .globl bpf_jit_load_byte_msh_positive_offset
89 bpf_jit_load_byte_msh_positive_offset:
90 cmp r_OFF, r_HEADLEN
91 bge bpf_slow_path_byte_msh
92 nop
93 ldub [r_SKB_DATA + r_OFF], r_OFF
94 and r_OFF, 0xf, r_OFF
95 retl
96 sll r_OFF, 2, r_X
97
98 #define bpf_slow_path_common(LEN) \
99 save %sp, -SAVE_SZ, %sp; \
100 mov %i0, %o0; \
101 mov r_OFF, %o1; \
102 add %fp, SCRATCH_OFF, %o2; \
103 call skb_copy_bits; \
104 mov (LEN), %o3; \
105 cmp %o0, 0; \
106 restore;
107
108 bpf_slow_path_word:
109 bpf_slow_path_common(4)
110 bl bpf_error
111 ld [%sp + SCRATCH_OFF], r_A
112 retl
113 nop
114 bpf_slow_path_half:
115 bpf_slow_path_common(2)
116 bl bpf_error
117 lduh [%sp + SCRATCH_OFF], r_A
118 retl
119 nop
120 bpf_slow_path_byte:
121 bpf_slow_path_common(1)
122 bl bpf_error
123 ldub [%sp + SCRATCH_OFF], r_A
124 retl
125 nop
126 bpf_slow_path_byte_msh:
127 bpf_slow_path_common(1)
128 bl bpf_error
129 ldub [%sp + SCRATCH_OFF], r_A
130 and r_OFF, 0xf, r_OFF
131 retl
132 sll r_OFF, 2, r_X
133
134 #define bpf_negative_common(LEN) \
135 save %sp, -SAVE_SZ, %sp; \
136 mov %i0, %o0; \
137 mov r_OFF, %o1; \
138 call bpf_internal_load_pointer_neg_helper; \
139 mov (LEN), %o2; \
140 mov %o0, r_TMP; \
141 cmp %o0, 0; \
142 BE_PTR(bpf_error); \
143 restore;
144
145 bpf_slow_path_word_neg:
146 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
147 cmp r_OFF, r_TMP
148 bl bpf_error
149 nop
150 .globl bpf_jit_load_word_negative_offset
151 bpf_jit_load_word_negative_offset:
152 bpf_negative_common(4)
153 andcc r_TMP, 3, %g0
154 bne load_word_unaligned
155 nop
156 retl
157 ld [r_TMP], r_A
158
159 bpf_slow_path_half_neg:
160 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
161 cmp r_OFF, r_TMP
162 bl bpf_error
163 nop
164 .globl bpf_jit_load_half_negative_offset
165 bpf_jit_load_half_negative_offset:
166 bpf_negative_common(2)
167 andcc r_TMP, 1, %g0
168 bne load_half_unaligned
169 nop
170 retl
171 lduh [r_TMP], r_A
172
173 bpf_slow_path_byte_neg:
174 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
175 cmp r_OFF, r_TMP
176 bl bpf_error
177 nop
178 .globl bpf_jit_load_byte_negative_offset
179 bpf_jit_load_byte_negative_offset:
180 bpf_negative_common(1)
181 retl
182 ldub [r_TMP], r_A
183
184 bpf_slow_path_byte_msh_neg:
185 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
186 cmp r_OFF, r_TMP
187 bl bpf_error
188 nop
189 .globl bpf_jit_load_byte_msh_negative_offset
190 bpf_jit_load_byte_msh_negative_offset:
191 bpf_negative_common(1)
192 ldub [r_TMP], r_OFF
193 and r_OFF, 0xf, r_OFF
194 retl
195 sll r_OFF, 2, r_X
196
197 bpf_error:
198 /* Make the JIT program return zero. The JIT epilogue
199 * stores away the original %o7 into r_saved_O7. The
200 * normal leaf function return is to use "retl" which
201 * would evalute to "jmpl %o7 + 8, %g0" but we want to
202 * use the saved value thus the sequence you see here.
203 */
204 jmpl r_saved_O7 + 8, %g0
205 clr %o0
This page took 0.035254 seconds and 5 git commands to generate.