Fix: make dist missing header files
[librseq.git] / include / rseq / rseq.h
CommitLineData
90702366 1/* SPDX-License-Identifier: MIT */
f2d7b530
MJ
2/* SPDX-FileCopyrightText: 2016-2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */
3
784b0012
MD
4/*
5 * rseq.h
784b0012
MD
6 */
7
8#ifndef RSEQ_H
9#define RSEQ_H
10
11#include <stdint.h>
12#include <stdbool.h>
13#include <pthread.h>
14#include <signal.h>
15#include <sched.h>
16#include <errno.h>
17#include <stdio.h>
18#include <stdlib.h>
170f840b 19#include <stddef.h>
d2fa6d30 20#include <assert.h>
2d533093 21#include <rseq/rseq-abi.h>
dd76f2d6 22#include <rseq/compiler.h>
784b0012 23
baa98a34
MD
24#ifndef rseq_sizeof_field
25#define rseq_sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
26#endif
27
28#ifndef rseq_offsetofend
29#define rseq_offsetofend(TYPE, MEMBER) \
30 (offsetof(TYPE, MEMBER) + rseq_sizeof_field(TYPE, MEMBER))
31#endif
32
784b0012
MD
33/*
34 * Empty code injection macros, override when testing.
35 * It is important to consider that the ASM injection macros need to be
36 * fully reentrant (e.g. do not modify the stack).
37 */
38#ifndef RSEQ_INJECT_ASM
39#define RSEQ_INJECT_ASM(n)
40#endif
41
42#ifndef RSEQ_INJECT_C
43#define RSEQ_INJECT_C(n)
44#endif
45
46#ifndef RSEQ_INJECT_INPUT
47#define RSEQ_INJECT_INPUT
48#endif
49
50#ifndef RSEQ_INJECT_CLOBBER
51#define RSEQ_INJECT_CLOBBER
52#endif
53
54#ifndef RSEQ_INJECT_FAILED
55#define RSEQ_INJECT_FAILED
56#endif
57
96b6ce39
MD
58/*
59 * User code can define RSEQ_GET_ABI_OVERRIDE to override the
60 * rseq_get_abi() implementation, for instance to use glibc's symbols
61 * directly.
62 */
63#ifndef RSEQ_GET_ABI_OVERRIDE
64
65# include <rseq/rseq-thread-pointer.h>
66
67# ifdef __cplusplus
60a27517 68extern "C" {
96b6ce39 69# endif
60a27517 70
baa98a34 71/* Offset from the thread pointer to the rseq area. */
170f840b 72extern ptrdiff_t rseq_offset;
baa98a34
MD
73
74/*
75 * Size of the registered rseq area. 0 if the registration was
76 * unsuccessful.
77 */
9698c399 78extern unsigned int rseq_size;
baa98a34
MD
79
80/* Flags used during rseq registration. */
9698c399 81extern unsigned int rseq_flags;
784b0012 82
baa98a34
MD
83/*
84 * rseq feature size supported by the kernel. 0 if the registration was
85 * unsuccessful.
86 */
87extern unsigned int rseq_feature_size;
88
2d533093 89static inline struct rseq_abi *rseq_get_abi(void)
96b6ce39 90{
2d533093 91 return (struct rseq_abi *) ((uintptr_t) rseq_thread_pointer() + rseq_offset);
60a27517 92}
96b6ce39
MD
93
94# ifdef __cplusplus
95}
96# endif
97
98#endif /* RSEQ_GET_ABI_OVERRIDE */
60a27517 99
809f5ee3
MD
100enum rseq_mo {
101 RSEQ_MO_RELAXED = 0,
102 RSEQ_MO_CONSUME = 1, /* Unused */
103 RSEQ_MO_ACQUIRE = 2, /* Unused */
104 RSEQ_MO_RELEASE = 3,
105 RSEQ_MO_ACQ_REL = 4, /* Unused */
106 RSEQ_MO_SEQ_CST = 5, /* Unused */
107};
108
109enum rseq_percpu_mode {
110 RSEQ_PERCPU_CPU_ID = 0,
111 RSEQ_PERCPU_MM_CID = 1,
112};
113
784b0012
MD
114#define rseq_likely(x) __builtin_expect(!!(x), 1)
115#define rseq_unlikely(x) __builtin_expect(!!(x), 0)
116#define rseq_barrier() __asm__ __volatile__("" : : : "memory")
117
118#define RSEQ_ACCESS_ONCE(x) (*(__volatile__ __typeof__(x) *)&(x))
119#define RSEQ_WRITE_ONCE(x, v) __extension__ ({ RSEQ_ACCESS_ONCE(x) = (v); })
120#define RSEQ_READ_ONCE(x) RSEQ_ACCESS_ONCE(x)
121
122#define __rseq_str_1(x) #x
123#define __rseq_str(x) __rseq_str_1(x)
124
48c54ef9 125#define rseq_log(fmt, ...) \
784b0012 126 fprintf(stderr, fmt "(in %s() at " __FILE__ ":" __rseq_str(__LINE__)"\n", \
48c54ef9 127 ## __VA_ARGS__, __func__)
784b0012 128
48c54ef9 129#define rseq_bug(fmt, ...) \
784b0012 130 do { \
d561ebfa 131 rseq_log(fmt, ## __VA_ARGS__); \
784b0012
MD
132 abort(); \
133 } while (0)
134
135#if defined(__x86_64__) || defined(__i386__)
136#include <rseq/rseq-x86.h>
510fdec0 137#elif defined(__ARMEL__) || defined(__ARMEB__)
784b0012 138#include <rseq/rseq-arm.h>
d78a16c2
MD
139#elif defined (__AARCH64EL__)
140#include <rseq/rseq-arm64.h>
784b0012
MD
141#elif defined(__PPC__)
142#include <rseq/rseq-ppc.h>
143#elif defined(__mips__)
144#include <rseq/rseq-mips.h>
4969e3fa
MD
145#elif defined(__s390__)
146#include <rseq/rseq-s390.h>
074b1077
MJ
147#elif defined(__riscv)
148#include <rseq/rseq-riscv.h>
784b0012
MD
149#else
150#error unsupported target
151#endif
152
15260018 153#ifdef __cplusplus
60a27517
MG
154extern "C" {
155#endif
156
784b0012
MD
157/*
158 * Register rseq for the current thread. This needs to be called once
159 * by any thread which uses restartable sequences, before they start
160 * using restartable sequences, to ensure restartable sequences
161 * succeed. A restartable sequence executed from a non-registered
162 * thread will always fail.
163 */
164int rseq_register_current_thread(void);
165
166/*
167 * Unregister rseq for current thread.
168 */
169int rseq_unregister_current_thread(void);
170
171/*
172 * Restartable sequence fallback for reading the current CPU number.
173 */
174int32_t rseq_fallback_current_cpu(void);
175
baa98a34
MD
176/*
177 * Restartable sequence fallback for reading the current node number.
178 */
179int32_t rseq_fallback_current_node(void);
180
8b34114a
MD
181enum rseq_available_query {
182 RSEQ_AVAILABLE_QUERY_KERNEL = 0,
183 RSEQ_AVAILABLE_QUERY_LIBC = 1,
184};
185
186/*
187 * Returns true if rseq is supported.
188 */
189bool rseq_available(unsigned int query);
52e82b87 190
784b0012
MD
191/*
192 * Values returned can be either the current CPU number, -1 (rseq is
193 * uninitialized), or -2 (rseq initialization has failed).
194 */
195static inline int32_t rseq_current_cpu_raw(void)
196{
9698c399 197 return RSEQ_READ_ONCE(rseq_get_abi()->cpu_id);
784b0012
MD
198}
199
200/*
201 * Returns a possible CPU number, which is typically the current CPU.
202 * The returned CPU number can be used to prepare for an rseq critical
203 * section, which will confirm whether the cpu number is indeed the
204 * current one, and whether rseq is initialized.
205 *
206 * The CPU number returned by rseq_cpu_start should always be validated
207 * by passing it to a rseq asm sequence, or by comparing it to the
208 * return value of rseq_current_cpu_raw() if the rseq asm sequence
209 * does not need to be invoked.
210 */
211static inline uint32_t rseq_cpu_start(void)
212{
9698c399 213 return RSEQ_READ_ONCE(rseq_get_abi()->cpu_id_start);
784b0012
MD
214}
215
216static inline uint32_t rseq_current_cpu(void)
217{
218 int32_t cpu;
219
220 cpu = rseq_current_cpu_raw();
221 if (rseq_unlikely(cpu < 0))
222 cpu = rseq_fallback_current_cpu();
223 return cpu;
224}
225
d2fa6d30
MD
226static inline bool rseq_node_id_available(void)
227{
228 return (int) rseq_feature_size >= (int) rseq_offsetofend(struct rseq_abi, node_id);
229}
230
231/*
232 * Current NUMA node number.
233 */
234static inline uint32_t rseq_current_node_id(void)
235{
236 assert(rseq_node_id_available());
237 return RSEQ_READ_ONCE(rseq_get_abi()->node_id);
238}
239
240static inline bool rseq_mm_cid_available(void)
241{
242 return (int) rseq_feature_size >= (int) rseq_offsetofend(struct rseq_abi, mm_cid);
243}
244
245static inline uint32_t rseq_current_mm_cid(void)
246{
247 return RSEQ_READ_ONCE(rseq_get_abi()->mm_cid);
248}
249
784b0012
MD
250static inline void rseq_clear_rseq_cs(void)
251{
d2fa6d30 252 RSEQ_WRITE_ONCE(rseq_get_abi()->rseq_cs, 0);
784b0012
MD
253}
254
255/*
256 * rseq_prepare_unload() should be invoked by each thread executing a rseq
257 * critical section at least once between their last critical section and
d2fa6d30
MD
258 * library unload of the library defining the rseq critical section (struct
259 * rseq_cs) or the code referred to by the struct rseq_cs start_ip and
260 * post_commit_offset fields. This also applies to use of rseq in code
261 * generated by JIT: rseq_prepare_unload() should be invoked at least once by
262 * each thread executing a rseq critical section before reclaim of the memory
263 * holding the struct rseq_cs or reclaim of the code pointed to by struct
264 * rseq_cs start_ip and post_commit_offset fields.
784b0012
MD
265 */
266static inline void rseq_prepare_unload(void)
267{
268 rseq_clear_rseq_cs();
269}
270
809f5ee3
MD
271static inline __attribute__((always_inline))
272int rseq_cmpeqv_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
273 intptr_t *v, intptr_t expect,
274 intptr_t newv, int cpu)
275{
276 if (rseq_mo != RSEQ_MO_RELAXED)
277 return -1;
278 switch (percpu_mode) {
279 case RSEQ_PERCPU_CPU_ID:
280 return rseq_cmpeqv_storev_relaxed_cpu_id(v, expect, newv, cpu);
281 case RSEQ_PERCPU_MM_CID:
282 return rseq_cmpeqv_storev_relaxed_mm_cid(v, expect, newv, cpu);
283 }
284 return -1;
285}
286
287/*
288 * Compare @v against @expectnot. When it does _not_ match, load @v
289 * into @load, and store the content of *@v + voffp into @v.
290 */
291static inline __attribute__((always_inline))
292int rseq_cmpnev_storeoffp_load(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
293 intptr_t *v, intptr_t expectnot, long voffp, intptr_t *load,
294 int cpu)
295{
296 if (rseq_mo != RSEQ_MO_RELAXED)
297 return -1;
298 switch (percpu_mode) {
299 case RSEQ_PERCPU_CPU_ID:
300 return rseq_cmpnev_storeoffp_load_relaxed_cpu_id(v, expectnot, voffp, load, cpu);
301 case RSEQ_PERCPU_MM_CID:
302 return rseq_cmpnev_storeoffp_load_relaxed_mm_cid(v, expectnot, voffp, load, cpu);
303 }
304 return -1;
305}
306
307static inline __attribute__((always_inline))
308int rseq_addv(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
309 intptr_t *v, intptr_t count, int cpu)
310{
311 if (rseq_mo != RSEQ_MO_RELAXED)
312 return -1;
313 switch (percpu_mode) {
314 case RSEQ_PERCPU_CPU_ID:
315 return rseq_addv_relaxed_cpu_id(v, count, cpu);
316 case RSEQ_PERCPU_MM_CID:
317 return rseq_addv_relaxed_mm_cid(v, count, cpu);
318 }
319 return -1;
320}
321
322#ifdef RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV
323/*
324 * pval = *(ptr+off)
325 * *pval += inc;
326 */
327static inline __attribute__((always_inline))
328int rseq_offset_deref_addv(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
329 intptr_t *ptr, long off, intptr_t inc, int cpu)
330{
331 if (rseq_mo != RSEQ_MO_RELAXED)
332 return -1;
333 switch (percpu_mode) {
334 case RSEQ_PERCPU_CPU_ID:
335 return rseq_offset_deref_addv_relaxed_cpu_id(ptr, off, inc, cpu);
336 case RSEQ_PERCPU_MM_CID:
337 return rseq_offset_deref_addv_relaxed_mm_cid(ptr, off, inc, cpu);
338 }
339 return -1;
340}
341#endif
342
343static inline __attribute__((always_inline))
344int rseq_cmpeqv_trystorev_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
345 intptr_t *v, intptr_t expect,
346 intptr_t *v2, intptr_t newv2,
347 intptr_t newv, int cpu)
348{
349 switch (rseq_mo) {
350 case RSEQ_MO_RELAXED:
351 switch (percpu_mode) {
352 case RSEQ_PERCPU_CPU_ID:
353 return rseq_cmpeqv_trystorev_storev_relaxed_cpu_id(v, expect, v2, newv2, newv, cpu);
354 case RSEQ_PERCPU_MM_CID:
355 return rseq_cmpeqv_trystorev_storev_relaxed_mm_cid(v, expect, v2, newv2, newv, cpu);
356 }
357 return -1;
358 case RSEQ_MO_RELEASE:
359 switch (percpu_mode) {
360 case RSEQ_PERCPU_CPU_ID:
361 return rseq_cmpeqv_trystorev_storev_release_cpu_id(v, expect, v2, newv2, newv, cpu);
362 case RSEQ_PERCPU_MM_CID:
363 return rseq_cmpeqv_trystorev_storev_release_mm_cid(v, expect, v2, newv2, newv, cpu);
364 }
365 return -1;
366 default:
367 return -1;
368 }
369}
370
371static inline __attribute__((always_inline))
372int rseq_cmpeqv_cmpeqv_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
373 intptr_t *v, intptr_t expect,
374 intptr_t *v2, intptr_t expect2,
375 intptr_t newv, int cpu)
376{
377 if (rseq_mo != RSEQ_MO_RELAXED)
378 return -1;
379 switch (percpu_mode) {
380 case RSEQ_PERCPU_CPU_ID:
381 return rseq_cmpeqv_cmpeqv_storev_relaxed_cpu_id(v, expect, v2, expect2, newv, cpu);
382 case RSEQ_PERCPU_MM_CID:
383 return rseq_cmpeqv_cmpeqv_storev_relaxed_mm_cid(v, expect, v2, expect2, newv, cpu);
384 }
385 return -1;
386}
387
388static inline __attribute__((always_inline))
389int rseq_cmpeqv_trymemcpy_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
390 intptr_t *v, intptr_t expect,
391 void *dst, void *src, size_t len,
392 intptr_t newv, int cpu)
393{
394 switch (rseq_mo) {
395 case RSEQ_MO_RELAXED:
396 switch (percpu_mode) {
397 case RSEQ_PERCPU_CPU_ID:
398 return rseq_cmpeqv_trymemcpy_storev_relaxed_cpu_id(v, expect, dst, src, len, newv, cpu);
399 case RSEQ_PERCPU_MM_CID:
400 return rseq_cmpeqv_trymemcpy_storev_relaxed_mm_cid(v, expect, dst, src, len, newv, cpu);
401 }
402 return -1;
403 case RSEQ_MO_RELEASE:
404 switch (percpu_mode) {
405 case RSEQ_PERCPU_CPU_ID:
406 return rseq_cmpeqv_trymemcpy_storev_release_cpu_id(v, expect, dst, src, len, newv, cpu);
407 case RSEQ_PERCPU_MM_CID:
408 return rseq_cmpeqv_trymemcpy_storev_release_mm_cid(v, expect, dst, src, len, newv, cpu);
409 }
410 return -1;
411 default:
412 return -1;
413 }
414}
415
15260018 416#ifdef __cplusplus
60a27517
MG
417}
418#endif
419
784b0012 420#endif /* RSEQ_H_ */
This page took 0.040201 seconds and 4 git commands to generate.