641b2782368e03f07d60f3cd9c1260876ea518f8
[librseq.git] / include / rseq / arch / mips.h
1 /* SPDX-License-Identifier: MIT */
2 /* SPDX-FileCopyrightText: 2018 MIPS Tech LLC */
3 /* SPDX-FileCopyrightText: 2016-2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */
4
5 /*
6 * Author: Paul Burton <paul.burton@mips.com>
7 */
8
9 #ifndef _RSEQ_RSEQ_H
10 #error "Never use <rseq/arch/mips.h> directly; include <rseq/rseq.h> instead."
11 #endif
12
13 #include <asm/byteorder.h>
14
15 /*
16 * RSEQ_ASM_*() macro helpers are internal to the librseq headers. Those
17 * are not part of the public API.
18 */
19
20 #if (RSEQ_BITS_PER_LONG != 64) && (RSEQ_BITS_PER_LONG != 32)
21 # error unsupported RSEQ_BITS_PER_LONG
22 #endif
23
24 /*
25 * RSEQ_SIG uses the break instruction. The instruction pattern is:
26 *
27 * On MIPS:
28 * 0350000d break 0x350
29 *
30 * On nanoMIPS:
31 * 00100350 break 0x350
32 *
33 * On microMIPS:
34 * 0000d407 break 0x350
35 *
36 * For nanoMIPS32 and microMIPS, the instruction stream is encoded as 16-bit
37 * halfwords, so the signature halfwords need to be swapped accordingly for
38 * little-endian.
39 */
40 #if defined(__nanomips__)
41 # ifdef __MIPSEL__
42 # define RSEQ_SIG 0x03500010
43 # else
44 # define RSEQ_SIG 0x00100350
45 # endif
46 #elif defined(__mips_micromips)
47 # ifdef __MIPSEL__
48 # define RSEQ_SIG 0xd4070000
49 # else
50 # define RSEQ_SIG 0x0000d407
51 # endif
52 #elif defined(__mips__)
53 # define RSEQ_SIG 0x0350000d
54 #else
55 /* Unknown MIPS architecture. */
56 #endif
57
58 /*
59 * Refer to the Linux kernel memory model (LKMM) for documentation of
60 * the memory barriers.
61 */
62
63 /* CPU memory barrier. */
64 #define rseq_smp_mb() __asm__ __volatile__ ("sync" ::: "memory")
65 /* CPU read memory barrier */
66 #define rseq_smp_rmb() rseq_smp_mb()
67 /* CPU write memory barrier */
68 #define rseq_smp_wmb() rseq_smp_mb()
69
70 /* Acquire: One-way permeable barrier. */
71 #define rseq_smp_load_acquire(p) \
72 __extension__ ({ \
73 rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
74 rseq_smp_mb(); \
75 ____p1; \
76 })
77
78 /* Acquire barrier after control dependency. */
79 #define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb()
80
81 /* Release: One-way permeable barrier. */
82 #define rseq_smp_store_release(p, v) \
83 do { \
84 rseq_smp_mb(); \
85 RSEQ_WRITE_ONCE(*(p), v); \
86 } while (0)
87
88 /*
89 * Helper macros to define and access a variable of long integer type.
90 * Only used internally in rseq headers.
91 */
92 #if RSEQ_BITS_PER_LONG == 64
93 # define RSEQ_ASM_LONG ".dword"
94 # define RSEQ_ASM_LONG_LA "dla"
95 # define RSEQ_ASM_LONG_L "ld"
96 # define RSEQ_ASM_LONG_S "sd"
97 # define RSEQ_ASM_LONG_ADDI "daddiu"
98 #else
99 # define RSEQ_ASM_LONG ".word"
100 # define RSEQ_ASM_LONG_LA "la"
101 # define RSEQ_ASM_LONG_L "lw"
102 # define RSEQ_ASM_LONG_S "sw"
103 # define RSEQ_ASM_LONG_ADDI "addiu"
104 #endif
105
106 /*
107 * Helper macros to define a variable of pointer type stored in a 64-bit
108 * integer. Only used internally in rseq headers.
109 */
110 #if RSEQ_BITS_PER_LONG == 64
111 # define RSEQ_ASM_U64_PTR(x) ".dword " x
112 #else
113 # if defined(__BYTE_ORDER) ? (__BYTE_ORDER == __BIG_ENDIAN) : defined(__BIG_ENDIAN)
114 # define RSEQ_ASM_U64_PTR(x) ".word 0x0, " x
115 # else
116 # define RSEQ_ASM_U64_PTR(x) ".word " x ", 0x0"
117 # endif
118 #endif
119
120 #define RSEQ_ASM_U32(x) ".word " x
121
122 /* Only used in RSEQ_ASM_DEFINE_TABLE. */
123 #define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
124 post_commit_offset, abort_ip) \
125 ".pushsection __rseq_cs, \"aw\"\n\t" \
126 ".balign 32\n\t" \
127 __rseq_str(label) ":\n\t" \
128 RSEQ_ASM_U32(__rseq_str(version)) "\n\t" \
129 RSEQ_ASM_U32(__rseq_str(flags)) "\n\t" \
130 RSEQ_ASM_U64_PTR(__rseq_str(start_ip)) "\n\t" \
131 RSEQ_ASM_U64_PTR(__rseq_str(post_commit_offset)) "\n\t" \
132 RSEQ_ASM_U64_PTR(__rseq_str(abort_ip)) "\n\t" \
133 ".popsection\n\t" \
134 ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
135 RSEQ_ASM_U64_PTR(__rseq_str(label) "b") "\n\t" \
136 ".popsection\n\t"
137
138 /*
139 * Define an rseq critical section structure of version 0 with no flags.
140 *
141 * @label:
142 * Local label for the beginning of the critical section descriptor
143 * structure.
144 * @start_ip:
145 * Pointer to the first instruction of the sequence of consecutive assembly
146 * instructions.
147 * @post_commit_ip:
148 * Pointer to the instruction after the last instruction of the sequence of
149 * consecutive assembly instructions.
150 * @abort_ip:
151 * Pointer to the instruction where to move the execution flow in case of
152 * abort of the sequence of consecutive assembly instructions.
153 */
154 #define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
155 __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
156 (post_commit_ip) - (start_ip), abort_ip)
157
158 /*
159 * Define the @exit_ip pointer as an exit point for the sequence of consecutive
160 * assembly instructions at @start_ip.
161 *
162 * @start_ip:
163 * Pointer to the first instruction of the sequence of consecutive assembly
164 * instructions.
165 * @exit_ip:
166 * Pointer to an exit point instruction.
167 *
168 * Exit points of a rseq critical section consist of all instructions outside
169 * of the critical section where a critical section can either branch to or
170 * reach through the normal course of its execution. The abort IP and the
171 * post-commit IP are already part of the __rseq_cs section and should not be
172 * explicitly defined as additional exit points. Knowing all exit points is
173 * useful to assist debuggers stepping over the critical section.
174 */
175 #define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
176 ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
177 RSEQ_ASM_U64_PTR(__rseq_str(start_ip)) "\n\t" \
178 RSEQ_ASM_U64_PTR(__rseq_str(exit_ip)) "\n\t" \
179 ".popsection\n\t"
180
181 /* Only used in RSEQ_ASM_DEFINE_ABORT. */
182 #define __RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label, \
183 table_label, version, flags, \
184 start_ip, post_commit_offset, abort_ip) \
185 ".balign 32\n\t" \
186 __rseq_str(table_label) ":\n\t" \
187 RSEQ_ASM_U32(__rseq_str(version)) "\n\t" \
188 RSEQ_ASM_U32(__rseq_str(flags)) "\n\t" \
189 RSEQ_ASM_U64_PTR(__rseq_str(start_ip)) "\n\t" \
190 RSEQ_ASM_U64_PTR(__rseq_str(post_commit_offset)) "\n\t" \
191 RSEQ_ASM_U64_PTR(__rseq_str(abort_ip)) "\n\t" \
192 RSEQ_ASM_U32(__rseq_str(RSEQ_SIG)) "\n\t" \
193 __rseq_str(label) ":\n\t" \
194 teardown \
195 "b %l[" __rseq_str(abort_label) "]\n\t"
196
197 /*
198 * Define a critical section abort handler.
199 *
200 * @label:
201 * Local label to the abort handler.
202 * @teardown:
203 * Sequence of instructions to run on abort.
204 * @abort_label:
205 * C label to jump to at the end of the sequence.
206 * @table_label:
207 * Local label to the critical section descriptor copy placed near
208 * the program counter. This is done for performance reasons because
209 * computing this address is faster than accessing the program data.
210 *
211 * The purpose of @start_ip, @post_commit_ip, and @abort_ip are
212 * documented in RSEQ_ASM_DEFINE_TABLE.
213 */
214 #define RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label, \
215 table_label, start_ip, post_commit_ip, abort_ip) \
216 __RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label, \
217 table_label, 0x0, 0x0, start_ip, \
218 (post_commit_ip) - (start_ip), abort_ip)
219
220 /*
221 * Define a critical section teardown handler.
222 *
223 * @label:
224 * Local label to the teardown handler.
225 * @teardown:
226 * Sequence of instructions to run on teardown.
227 * @target_label:
228 * C label to jump to at the end of the sequence.
229 */
230 #define RSEQ_ASM_DEFINE_TEARDOWN(label, teardown, target_label) \
231 __rseq_str(label) ":\n\t" \
232 teardown \
233 "b %l[" __rseq_str(target_label) "]\n\t"
234
235 /*
236 * Store the address of the critical section descriptor structure at
237 * @cs_label into the @rseq_cs pointer and emit the label @label, which
238 * is the beginning of the sequence of consecutive assembly instructions.
239 *
240 * @label:
241 * Local label to the beginning of the sequence of consecutive assembly
242 * instructions.
243 * @cs_label:
244 * Source local label to the critical section descriptor structure.
245 * @rseq_cs:
246 * Destination pointer where to store the address of the critical
247 * section descriptor structure.
248 */
249 #define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
250 RSEQ_INJECT_ASM(1) \
251 RSEQ_ASM_LONG_LA " $4, " __rseq_str(cs_label) "\n\t" \
252 RSEQ_ASM_LONG_S " $4, %[" __rseq_str(rseq_cs) "]\n\t" \
253 __rseq_str(label) ":\n\t"
254
255 /* Jump to local label @label when @cpu_id != @current_cpu_id. */
256 #define RSEQ_ASM_CBNE_CPU_ID(cpu_id, current_cpu_id, label) \
257 RSEQ_INJECT_ASM(2) \
258 "lw $4, %[" __rseq_str(current_cpu_id) "]\n\t" \
259 "bne $4, %[" __rseq_str(cpu_id) "], " __rseq_str(label) "\n\t"
260
261 /* Per-cpu-id indexing. */
262
263 #define RSEQ_TEMPLATE_INDEX_CPU_ID
264 #define RSEQ_TEMPLATE_MO_RELAXED
265 #include "rseq/arch/mips/bits.h"
266 #undef RSEQ_TEMPLATE_MO_RELAXED
267
268 #define RSEQ_TEMPLATE_MO_RELEASE
269 #include "rseq/arch/mips/bits.h"
270 #undef RSEQ_TEMPLATE_MO_RELEASE
271 #undef RSEQ_TEMPLATE_INDEX_CPU_ID
272
273 /* Per-mm-cid indexing. */
274
275 #define RSEQ_TEMPLATE_INDEX_MM_CID
276 #define RSEQ_TEMPLATE_MO_RELAXED
277 #include "rseq/arch/mips/bits.h"
278 #undef RSEQ_TEMPLATE_MO_RELAXED
279
280 #define RSEQ_TEMPLATE_MO_RELEASE
281 #include "rseq/arch/mips/bits.h"
282 #undef RSEQ_TEMPLATE_MO_RELEASE
283 #undef RSEQ_TEMPLATE_INDEX_MM_CID
284
285 /* APIs which are not indexed. */
286
287 #define RSEQ_TEMPLATE_INDEX_NONE
288 #define RSEQ_TEMPLATE_MO_RELAXED
289 #include "rseq/arch/mips/bits.h"
290 #undef RSEQ_TEMPLATE_MO_RELAXED
291 #undef RSEQ_TEMPLATE_INDEX_NONE
This page took 0.08294 seconds and 3 git commands to generate.