1 /* SPDX-License-Identifier: MIT */
2 /* SPDX-FileCopyrightText: 2016-2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */
23 #include <rseq/compiler.h>
24 #include <rseq/inject.h>
25 #include <rseq/thread-pointer.h>
26 #include <rseq/utils.h>
30 RSEQ_MO_CONSUME
= 1, /* Unused */
31 RSEQ_MO_ACQUIRE
= 2, /* Unused */
33 RSEQ_MO_ACQ_REL
= 4, /* Unused */
34 RSEQ_MO_SEQ_CST
= 5, /* Unused */
37 enum rseq_percpu_mode
{
38 RSEQ_PERCPU_CPU_ID
= 0,
39 RSEQ_PERCPU_MM_CID
= 1,
42 enum rseq_available_query
{
43 RSEQ_AVAILABLE_QUERY_KERNEL
= 0,
44 RSEQ_AVAILABLE_QUERY_LIBC
= 1,
48 * User code can define RSEQ_GET_ABI_OVERRIDE to override the
49 * rseq_get_abi() implementation, for instance to use glibc's symbols
52 #ifndef RSEQ_GET_ABI_OVERRIDE
58 /* Offset from the thread pointer to the rseq area. */
59 extern ptrdiff_t rseq_offset
;
62 * Size of the registered rseq area. 0 if the registration was
65 extern unsigned int rseq_size
;
67 /* Flags used during rseq registration. */
68 extern unsigned int rseq_flags
;
71 * rseq feature size supported by the kernel. 0 if the registration was
74 extern unsigned int rseq_feature_size
;
77 * Returns a pointer to the rseq area.
79 static inline __attribute__((always_inline
))
80 struct rseq_abi
*rseq_get_abi(void)
82 return (struct rseq_abi
*) ((uintptr_t) rseq_thread_pointer() + rseq_offset
);
89 #endif /* RSEQ_GET_ABI_OVERRIDE */
93 * Architecture specific.
95 #include <rseq/arch.h>
103 * Register rseq for the current thread. This needs to be called once
104 * by any thread which uses restartable sequences, before they start
105 * using restartable sequences, to ensure restartable sequences
106 * succeed. A restartable sequence executed from a non-registered
107 * thread will always fail.
109 int rseq_register_current_thread(void);
112 * Unregister rseq for current thread.
114 int rseq_unregister_current_thread(void);
117 * Restartable sequence fallback for reading the current CPU number.
119 int32_t rseq_fallback_current_cpu(void);
122 * Restartable sequence fallback for reading the current node number.
124 int32_t rseq_fallback_current_node(void);
127 * Returns true if rseq is supported.
129 bool rseq_available(unsigned int query
);
132 * Values returned can be either the current CPU number, -1 (rseq is
133 * uninitialized), or -2 (rseq initialization has failed).
135 static inline __attribute__((always_inline
))
136 int32_t rseq_current_cpu_raw(void)
138 return RSEQ_READ_ONCE(rseq_get_abi()->cpu_id
);
142 * Returns a possible CPU number, which is typically the current CPU.
143 * The returned CPU number can be used to prepare for an rseq critical
144 * section, which will confirm whether the cpu number is indeed the
145 * current one, and whether rseq is initialized.
147 * The CPU number returned by rseq_cpu_start should always be validated
148 * by passing it to a rseq asm sequence, or by comparing it to the
149 * return value of rseq_current_cpu_raw() if the rseq asm sequence
150 * does not need to be invoked.
152 static inline __attribute__((always_inline
))
153 uint32_t rseq_cpu_start(void)
155 return RSEQ_READ_ONCE(rseq_get_abi()->cpu_id_start
);
158 static inline __attribute__((always_inline
))
159 uint32_t rseq_current_cpu(void)
163 cpu
= rseq_current_cpu_raw();
164 if (rseq_unlikely(cpu
< 0))
165 cpu
= rseq_fallback_current_cpu();
169 static inline __attribute__((always_inline
))
170 bool rseq_node_id_available(void)
172 return (int) rseq_feature_size
>= (int) rseq_offsetofend(struct rseq_abi
, node_id
);
176 * Current NUMA node number.
178 static inline __attribute__((always_inline
))
179 uint32_t rseq_current_node_id(void)
181 assert(rseq_node_id_available());
182 return RSEQ_READ_ONCE(rseq_get_abi()->node_id
);
185 static inline __attribute__((always_inline
))
186 bool rseq_mm_cid_available(void)
188 return (int) rseq_feature_size
>= (int) rseq_offsetofend(struct rseq_abi
, mm_cid
);
191 static inline __attribute__((always_inline
))
192 uint32_t rseq_current_mm_cid(void)
194 return RSEQ_READ_ONCE(rseq_get_abi()->mm_cid
);
197 static inline __attribute__((always_inline
))
198 void rseq_clear_rseq_cs(void)
200 RSEQ_WRITE_ONCE(rseq_get_abi()->rseq_cs
.arch
.ptr
, 0);
204 * rseq_prepare_unload() should be invoked by each thread executing a rseq
205 * critical section at least once between their last critical section and
206 * library unload of the library defining the rseq critical section (struct
207 * rseq_cs) or the code referred to by the struct rseq_cs start_ip and
208 * post_commit_offset fields. This also applies to use of rseq in code
209 * generated by JIT: rseq_prepare_unload() should be invoked at least once by
210 * each thread executing a rseq critical section before reclaim of the memory
211 * holding the struct rseq_cs or reclaim of the code pointed to by struct
212 * rseq_cs start_ip and post_commit_offset fields.
214 static inline __attribute__((always_inline
))
215 void rseq_prepare_unload(void)
217 rseq_clear_rseq_cs();
221 * Refer to rseq/pseudocode.h for documentation and pseudo-code of the
222 * rseq critical section helpers.
224 #include "rseq/pseudocode.h"
226 static inline __attribute__((always_inline
))
227 int rseq_load_cbne_store__ptr(enum rseq_mo rseq_mo
, enum rseq_percpu_mode percpu_mode
,
228 intptr_t *v
, intptr_t expect
,
229 intptr_t newv
, int cpu
)
231 if (rseq_mo
!= RSEQ_MO_RELAXED
)
233 switch (percpu_mode
) {
234 case RSEQ_PERCPU_CPU_ID
:
235 return rseq_load_cbne_store__ptr_relaxed_cpu_id(v
, expect
, newv
, cpu
);
236 case RSEQ_PERCPU_MM_CID
:
237 return rseq_load_cbne_store__ptr_relaxed_mm_cid(v
, expect
, newv
, cpu
);
243 static inline __attribute__((always_inline
))
244 int rseq_load_cbeq_store_add_load_store__ptr(enum rseq_mo rseq_mo
, enum rseq_percpu_mode percpu_mode
,
245 intptr_t *v
, intptr_t expectnot
, long voffp
, intptr_t *load
,
248 if (rseq_mo
!= RSEQ_MO_RELAXED
)
250 switch (percpu_mode
) {
251 case RSEQ_PERCPU_CPU_ID
:
252 return rseq_load_cbeq_store_add_load_store__ptr_relaxed_cpu_id(v
, expectnot
, voffp
, load
, cpu
);
253 case RSEQ_PERCPU_MM_CID
:
254 return rseq_load_cbeq_store_add_load_store__ptr_relaxed_mm_cid(v
, expectnot
, voffp
, load
, cpu
);
260 static inline __attribute__((always_inline
))
261 int rseq_load_add_store__ptr(enum rseq_mo rseq_mo
, enum rseq_percpu_mode percpu_mode
,
262 intptr_t *v
, intptr_t count
, int cpu
)
264 if (rseq_mo
!= RSEQ_MO_RELAXED
)
266 switch (percpu_mode
) {
267 case RSEQ_PERCPU_CPU_ID
:
268 return rseq_load_add_store__ptr_relaxed_cpu_id(v
, count
, cpu
);
269 case RSEQ_PERCPU_MM_CID
:
270 return rseq_load_add_store__ptr_relaxed_mm_cid(v
, count
, cpu
);
276 #ifdef RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV
277 static inline __attribute__((always_inline
))
278 int rseq_load_add_load_load_add_store__ptr(enum rseq_mo rseq_mo
, enum rseq_percpu_mode percpu_mode
,
279 intptr_t *ptr
, long off
, intptr_t inc
, int cpu
)
281 if (rseq_mo
!= RSEQ_MO_RELAXED
)
283 switch (percpu_mode
) {
284 case RSEQ_PERCPU_CPU_ID
:
285 return rseq_load_add_load_load_add_store__ptr_relaxed_cpu_id(ptr
, off
, inc
, cpu
);
286 case RSEQ_PERCPU_MM_CID
:
287 return rseq_load_add_load_load_add_store__ptr_relaxed_mm_cid(ptr
, off
, inc
, cpu
);
294 static inline __attribute__((always_inline
))
295 int rseq_load_cbne_store_store__ptr(enum rseq_mo rseq_mo
, enum rseq_percpu_mode percpu_mode
,
296 intptr_t *v
, intptr_t expect
,
297 intptr_t *v2
, intptr_t newv2
,
298 intptr_t newv
, int cpu
)
301 case RSEQ_MO_RELAXED
:
302 switch (percpu_mode
) {
303 case RSEQ_PERCPU_CPU_ID
:
304 return rseq_load_cbne_store_store__ptr_relaxed_cpu_id(v
, expect
, v2
, newv2
, newv
, cpu
);
305 case RSEQ_PERCPU_MM_CID
:
306 return rseq_load_cbne_store_store__ptr_relaxed_mm_cid(v
, expect
, v2
, newv2
, newv
, cpu
);
310 case RSEQ_MO_RELEASE
:
311 switch (percpu_mode
) {
312 case RSEQ_PERCPU_CPU_ID
:
313 return rseq_load_cbne_store_store__ptr_release_cpu_id(v
, expect
, v2
, newv2
, newv
, cpu
);
314 case RSEQ_PERCPU_MM_CID
:
315 return rseq_load_cbne_store_store__ptr_release_mm_cid(v
, expect
, v2
, newv2
, newv
, cpu
);
319 case RSEQ_MO_ACQUIRE
: /* Fallthrough */
320 case RSEQ_MO_ACQ_REL
: /* Fallthrough */
321 case RSEQ_MO_CONSUME
: /* Fallthrough */
322 case RSEQ_MO_SEQ_CST
: /* Fallthrough */
328 static inline __attribute__((always_inline
))
329 int rseq_load_cbne_load_cbne_store__ptr(enum rseq_mo rseq_mo
, enum rseq_percpu_mode percpu_mode
,
330 intptr_t *v
, intptr_t expect
,
331 intptr_t *v2
, intptr_t expect2
,
332 intptr_t newv
, int cpu
)
334 if (rseq_mo
!= RSEQ_MO_RELAXED
)
336 switch (percpu_mode
) {
337 case RSEQ_PERCPU_CPU_ID
:
338 return rseq_load_cbne_load_cbne_store__ptr_relaxed_cpu_id(v
, expect
, v2
, expect2
, newv
, cpu
);
339 case RSEQ_PERCPU_MM_CID
:
340 return rseq_load_cbne_load_cbne_store__ptr_relaxed_mm_cid(v
, expect
, v2
, expect2
, newv
, cpu
);
346 static inline __attribute__((always_inline
))
347 int rseq_load_cbne_memcpy_store__ptr(enum rseq_mo rseq_mo
, enum rseq_percpu_mode percpu_mode
,
348 intptr_t *v
, intptr_t expect
,
349 void *dst
, void *src
, size_t len
,
350 intptr_t newv
, int cpu
)
353 case RSEQ_MO_RELAXED
:
354 switch (percpu_mode
) {
355 case RSEQ_PERCPU_CPU_ID
:
356 return rseq_load_cbne_memcpy_store__ptr_relaxed_cpu_id(v
, expect
, dst
, src
, len
, newv
, cpu
);
357 case RSEQ_PERCPU_MM_CID
:
358 return rseq_load_cbne_memcpy_store__ptr_relaxed_mm_cid(v
, expect
, dst
, src
, len
, newv
, cpu
);
362 case RSEQ_MO_RELEASE
:
363 switch (percpu_mode
) {
364 case RSEQ_PERCPU_CPU_ID
:
365 return rseq_load_cbne_memcpy_store__ptr_release_cpu_id(v
, expect
, dst
, src
, len
, newv
, cpu
);
366 case RSEQ_PERCPU_MM_CID
:
367 return rseq_load_cbne_memcpy_store__ptr_release_mm_cid(v
, expect
, dst
, src
, len
, newv
, cpu
);
371 case RSEQ_MO_ACQUIRE
: /* Fallthrough */
372 case RSEQ_MO_ACQ_REL
: /* Fallthrough */
373 case RSEQ_MO_CONSUME
: /* Fallthrough */
374 case RSEQ_MO_SEQ_CST
: /* Fallthrough */
384 #endif /* _RSEQ_RSEQ_H */