Commit | Line | Data |
---|---|---|
a7bcb3d8 MD |
1 | /* |
2 | * rseq-x86.h | |
3 | * | |
4 | * (C) Copyright 2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
22 | * SOFTWARE. | |
23 | */ | |
24 | ||
25 | #ifdef __x86_64__ | |
26 | ||
27 | #define smp_mb() __asm__ __volatile__ ("mfence" : : : "memory") | |
28 | #define smp_rmb() barrier() | |
29 | #define smp_wmb() barrier() | |
30 | ||
31 | #define smp_load_acquire(p) \ | |
32 | __extension__ ({ \ | |
33 | __typeof(*p) ____p1 = READ_ONCE(*p); \ | |
34 | barrier(); \ | |
35 | ____p1; \ | |
36 | }) | |
37 | ||
38 | #define smp_acquire__after_ctrl_dep() smp_rmb() | |
39 | ||
40 | #define smp_store_release(p, v) \ | |
41 | do { \ | |
42 | barrier(); \ | |
43 | WRITE_ONCE(*p, v); \ | |
44 | } while (0) | |
45 | ||
46 | #define has_fast_acquire_release() 1 | |
47 | #define has_single_copy_load_64() 1 | |
48 | ||
49 | /* | |
50 | * The __rseq_table section can be used by debuggers to better handle | |
51 | * single-stepping through the restartable critical sections. | |
52 | */ | |
53 | #define RSEQ_FINISH_ASM(_target_final, _to_write_final, _start_value, \ | |
54 | _failure, _spec_store, _spec_input, \ | |
55 | _final_store, _final_input, _extra_clobber, \ | |
56 | _setup, _teardown, _scratch) \ | |
57 | do { \ | |
58 | _scratch \ | |
59 | __asm__ __volatile__ goto ( \ | |
60 | ".pushsection __rseq_table, \"aw\"\n\t" \ | |
61 | ".balign 32\n\t" \ | |
62 | "3:\n\t" \ | |
63 | ".quad 1f, 2f, 4f, 0x0\n\t" \ | |
64 | ".popsection\n\t" \ | |
65 | "1:\n\t" \ | |
66 | _setup \ | |
67 | RSEQ_INJECT_ASM(1) \ | |
68 | "movq $3b, %[rseq_cs]\n\t" \ | |
69 | RSEQ_INJECT_ASM(2) \ | |
70 | "cmpl %[start_event_counter], %[current_event_counter]\n\t" \ | |
71 | "jnz 4f\n\t" \ | |
72 | RSEQ_INJECT_ASM(3) \ | |
73 | _spec_store \ | |
74 | _final_store \ | |
75 | "2:\n\t" \ | |
76 | RSEQ_INJECT_ASM(5) \ | |
77 | "movq $0, %[rseq_cs]\n\t" \ | |
78 | _teardown \ | |
79 | ".pushsection __rseq_failure, \"a\"\n\t" \ | |
80 | "4:\n\t" \ | |
81 | "movq $0, %[rseq_cs]\n\t" \ | |
82 | _teardown \ | |
83 | "jmp %l[failure]\n\t" \ | |
84 | ".popsection\n\t" \ | |
85 | : /* gcc asm goto does not allow outputs */ \ | |
86 | : [start_event_counter]"r"((_start_value).event_counter), \ | |
87 | [current_event_counter]"m"((_start_value).rseqp->u.e.event_counter), \ | |
88 | [rseq_cs]"m"((_start_value).rseqp->rseq_cs) \ | |
89 | _spec_input \ | |
90 | _final_input \ | |
91 | RSEQ_INJECT_INPUT \ | |
92 | : "memory", "cc" \ | |
93 | _extra_clobber \ | |
94 | RSEQ_INJECT_CLOBBER \ | |
95 | : _failure \ | |
96 | ); \ | |
97 | } while (0) | |
98 | ||
99 | #define RSEQ_FINISH_FINAL_STORE_ASM() \ | |
100 | "movq %[to_write_final], %[target_final]\n\t" | |
101 | ||
102 | /* x86-64 is TSO */ | |
103 | #define RSEQ_FINISH_FINAL_STORE_RELEASE_ASM() \ | |
104 | RSEQ_FINISH_FINAL_STORE_ASM() | |
105 | ||
106 | #define RSEQ_FINISH_FINAL_STORE_INPUT(_target_final, _to_write_final) \ | |
107 | , [to_write_final]"r"(_to_write_final), \ | |
108 | [target_final]"m"(*(_target_final)) | |
109 | ||
110 | #define RSEQ_FINISH_SPECULATIVE_STORE_ASM() \ | |
111 | "movq %[to_write_spec], %[target_spec]\n\t" \ | |
112 | RSEQ_INJECT_ASM(4) | |
113 | ||
114 | #define RSEQ_FINISH_SPECULATIVE_STORE_INPUT(_target_spec, _to_write_spec) \ | |
115 | , [to_write_spec]"r"(_to_write_spec), \ | |
116 | [target_spec]"m"(*(_target_spec)) | |
117 | ||
118 | /* TODO: implement a faster memcpy. */ | |
119 | #define RSEQ_FINISH_MEMCPY_STORE_ASM() \ | |
120 | "test %[len_memcpy], %[len_memcpy]\n\t" \ | |
121 | "jz 333f\n\t" \ | |
122 | "222:\n\t" \ | |
123 | "movb (%[to_write_memcpy]), %%al\n\t" \ | |
124 | "movb %%al, (%[target_memcpy])\n\t" \ | |
125 | "inc %[to_write_memcpy]\n\t" \ | |
126 | "inc %[target_memcpy]\n\t" \ | |
127 | "dec %[len_memcpy]\n\t" \ | |
128 | "jnz 222b\n\t" \ | |
129 | "333:\n\t" \ | |
130 | RSEQ_INJECT_ASM(4) | |
131 | ||
132 | #define RSEQ_FINISH_MEMCPY_STORE_INPUT(_target_memcpy, _to_write_memcpy, _len_memcpy) \ | |
133 | , [to_write_memcpy]"r"(_to_write_memcpy), \ | |
134 | [target_memcpy]"r"(_target_memcpy), \ | |
135 | [len_memcpy]"r"(_len_memcpy), \ | |
136 | [rseq_scratch0]"m"(rseq_scratch[0]), \ | |
137 | [rseq_scratch1]"m"(rseq_scratch[1]), \ | |
138 | [rseq_scratch2]"m"(rseq_scratch[2]) | |
139 | ||
140 | #define RSEQ_FINISH_MEMCPY_CLOBBER() \ | |
141 | , "rax" | |
142 | ||
143 | #define RSEQ_FINISH_MEMCPY_SCRATCH() \ | |
144 | uint64_t rseq_scratch[3]; | |
145 | ||
146 | /* | |
147 | * We need to save and restore those input registers so they can be | |
148 | * modified within the assembly. | |
149 | */ | |
150 | #define RSEQ_FINISH_MEMCPY_SETUP() \ | |
151 | "movq %[to_write_memcpy], %[rseq_scratch0]\n\t" \ | |
152 | "movq %[target_memcpy], %[rseq_scratch1]\n\t" \ | |
153 | "movq %[len_memcpy], %[rseq_scratch2]\n\t" | |
154 | ||
155 | #define RSEQ_FINISH_MEMCPY_TEARDOWN() \ | |
156 | "movq %[rseq_scratch2], %[len_memcpy]\n\t" \ | |
157 | "movq %[rseq_scratch1], %[target_memcpy]\n\t" \ | |
158 | "movq %[rseq_scratch0], %[to_write_memcpy]\n\t" | |
159 | ||
160 | #elif __i386__ | |
161 | ||
162 | /* | |
163 | * Support older 32-bit architectures that do not implement fence | |
164 | * instructions. | |
165 | */ | |
166 | #define smp_mb() \ | |
167 | __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory") | |
168 | #define smp_rmb() \ | |
169 | __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory") | |
170 | #define smp_wmb() \ | |
171 | __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory") | |
172 | ||
173 | #define smp_load_acquire(p) \ | |
174 | __extension__ ({ \ | |
175 | __typeof(*p) ____p1 = READ_ONCE(*p); \ | |
176 | smp_mb(); \ | |
177 | ____p1; \ | |
178 | }) | |
179 | ||
180 | #define smp_acquire__after_ctrl_dep() smp_rmb() | |
181 | ||
182 | #define smp_store_release(p, v) \ | |
183 | do { \ | |
184 | smp_mb(); \ | |
185 | WRITE_ONCE(*p, v); \ | |
186 | } while (0) | |
187 | ||
188 | #define has_fast_acquire_release() 0 | |
189 | #define has_single_copy_load_64() 0 | |
190 | ||
191 | /* | |
192 | * Use eax as scratch register and take memory operands as input to | |
193 | * lessen register pressure. Especially needed when compiling | |
194 | * do_rseq_memcpy() in O0. | |
195 | */ | |
196 | #define RSEQ_FINISH_ASM(_target_final, _to_write_final, _start_value, \ | |
197 | _failure, _spec_store, _spec_input, \ | |
198 | _final_store, _final_input, _extra_clobber, \ | |
199 | _setup, _teardown, _scratch) \ | |
200 | do { \ | |
201 | _scratch \ | |
202 | __asm__ __volatile__ goto ( \ | |
203 | ".pushsection __rseq_table, \"aw\"\n\t" \ | |
204 | ".balign 32\n\t" \ | |
205 | "3:\n\t" \ | |
206 | ".long 1f, 0x0, 2f, 0x0, 4f, 0x0, 0x0, 0x0\n\t" \ | |
207 | ".popsection\n\t" \ | |
208 | "1:\n\t" \ | |
209 | _setup \ | |
210 | RSEQ_INJECT_ASM(1) \ | |
211 | "movl $3b, %[rseq_cs]\n\t" \ | |
212 | RSEQ_INJECT_ASM(2) \ | |
213 | "movl %[start_event_counter], %%eax\n\t" \ | |
214 | "cmpl %%eax, %[current_event_counter]\n\t" \ | |
215 | "jnz 4f\n\t" \ | |
216 | RSEQ_INJECT_ASM(3) \ | |
217 | _spec_store \ | |
218 | _final_store \ | |
219 | "2:\n\t" \ | |
220 | RSEQ_INJECT_ASM(5) \ | |
221 | "movl $0, %[rseq_cs]\n\t" \ | |
222 | _teardown \ | |
223 | ".pushsection __rseq_failure, \"a\"\n\t" \ | |
224 | "4:\n\t" \ | |
225 | "movl $0, %[rseq_cs]\n\t" \ | |
226 | _teardown \ | |
227 | "jmp %l[failure]\n\t" \ | |
228 | ".popsection\n\t" \ | |
229 | : /* gcc asm goto does not allow outputs */ \ | |
230 | : [start_event_counter]"m"((_start_value).event_counter), \ | |
231 | [current_event_counter]"m"((_start_value).rseqp->u.e.event_counter), \ | |
232 | [rseq_cs]"m"((_start_value).rseqp->rseq_cs) \ | |
233 | _spec_input \ | |
234 | _final_input \ | |
235 | RSEQ_INJECT_INPUT \ | |
236 | : "memory", "cc", "eax" \ | |
237 | _extra_clobber \ | |
238 | RSEQ_INJECT_CLOBBER \ | |
239 | : _failure \ | |
240 | ); \ | |
241 | } while (0) | |
242 | ||
243 | #define RSEQ_FINISH_FINAL_STORE_ASM() \ | |
244 | "movl %[to_write_final], %%eax\n\t" \ | |
245 | "movl %%eax, %[target_final]\n\t" | |
246 | ||
247 | #define RSEQ_FINISH_FINAL_STORE_RELEASE_ASM() \ | |
248 | "lock; addl $0,0(%%esp)\n\t" \ | |
249 | RSEQ_FINISH_FINAL_STORE_ASM() | |
250 | ||
251 | #define RSEQ_FINISH_FINAL_STORE_INPUT(_target_final, _to_write_final) \ | |
252 | , [to_write_final]"m"(_to_write_final), \ | |
253 | [target_final]"m"(*(_target_final)) | |
254 | ||
255 | #define RSEQ_FINISH_SPECULATIVE_STORE_ASM() \ | |
256 | "movl %[to_write_spec], %%eax\n\t" \ | |
257 | "movl %%eax, %[target_spec]\n\t" \ | |
258 | RSEQ_INJECT_ASM(4) | |
259 | ||
260 | #define RSEQ_FINISH_SPECULATIVE_STORE_INPUT(_target_spec, _to_write_spec) \ | |
261 | , [to_write_spec]"m"(_to_write_spec), \ | |
262 | [target_spec]"m"(*(_target_spec)) | |
263 | ||
264 | /* TODO: implement a faster memcpy. */ | |
265 | #define RSEQ_FINISH_MEMCPY_STORE_ASM() \ | |
266 | "movl %[len_memcpy], %%eax\n\t" \ | |
267 | "test %%eax, %%eax\n\t" \ | |
268 | "jz 333f\n\t" \ | |
269 | "222:\n\t" \ | |
270 | "movb (%[to_write_memcpy]), %%al\n\t" \ | |
271 | "movb %%al, (%[target_memcpy])\n\t" \ | |
272 | "inc %[to_write_memcpy]\n\t" \ | |
273 | "inc %[target_memcpy]\n\t" \ | |
274 | "decl %[rseq_scratch2]\n\t" \ | |
275 | "jnz 222b\n\t" \ | |
276 | "333:\n\t" \ | |
277 | RSEQ_INJECT_ASM(4) | |
278 | ||
279 | #define RSEQ_FINISH_MEMCPY_STORE_INPUT(_target_memcpy, _to_write_memcpy, _len_memcpy) \ | |
280 | , [to_write_memcpy]"r"(_to_write_memcpy), \ | |
281 | [target_memcpy]"r"(_target_memcpy), \ | |
282 | [len_memcpy]"m"(_len_memcpy), \ | |
283 | [rseq_scratch0]"m"(rseq_scratch[0]), \ | |
284 | [rseq_scratch1]"m"(rseq_scratch[1]), \ | |
285 | [rseq_scratch2]"m"(rseq_scratch[2]) | |
286 | ||
287 | #define RSEQ_FINISH_MEMCPY_CLOBBER() | |
288 | ||
289 | #define RSEQ_FINISH_MEMCPY_SCRATCH() \ | |
290 | uint32_t rseq_scratch[3]; | |
291 | ||
292 | /* | |
293 | * We need to save and restore those input registers so they can be | |
294 | * modified within the assembly. | |
295 | */ | |
296 | #define RSEQ_FINISH_MEMCPY_SETUP() \ | |
297 | "movl %[to_write_memcpy], %[rseq_scratch0]\n\t" \ | |
298 | "movl %[target_memcpy], %[rseq_scratch1]\n\t" \ | |
299 | "movl %[len_memcpy], %%eax\n\t" \ | |
300 | "movl %%eax, %[rseq_scratch2]\n\t" | |
301 | ||
302 | #define RSEQ_FINISH_MEMCPY_TEARDOWN() \ | |
303 | "movl %[rseq_scratch1], %[target_memcpy]\n\t" \ | |
304 | "movl %[rseq_scratch0], %[to_write_memcpy]\n\t" | |
305 | ||
306 | #endif |