rseq: Introduce rseq_get_max_nr_cpus
[librseq.git] / include / rseq / rseq.h
CommitLineData
90702366 1/* SPDX-License-Identifier: MIT */
f2d7b530
MJ
2/* SPDX-FileCopyrightText: 2016-2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */
3
784b0012 4/*
44ec21eb 5 * rseq/rseq.h
784b0012
MD
6 */
7
44ec21eb
MJ
8#ifndef _RSEQ_RSEQ_H
9#define _RSEQ_RSEQ_H
784b0012
MD
10
11#include <stdint.h>
12#include <stdbool.h>
13#include <pthread.h>
14#include <signal.h>
15#include <sched.h>
16#include <errno.h>
17#include <stdio.h>
18#include <stdlib.h>
170f840b 19#include <stddef.h>
d2fa6d30 20#include <assert.h>
784b0012 21
44ec21eb
MJ
22#include <rseq/abi.h>
23#include <rseq/compiler.h>
24#include <rseq/inject.h>
25#include <rseq/thread-pointer.h>
26#include <rseq/utils.h>
784b0012 27
44ec21eb
MJ
28enum rseq_mo {
29 RSEQ_MO_RELAXED = 0,
30 RSEQ_MO_CONSUME = 1, /* Unused */
31 RSEQ_MO_ACQUIRE = 2, /* Unused */
32 RSEQ_MO_RELEASE = 3,
33 RSEQ_MO_ACQ_REL = 4, /* Unused */
34 RSEQ_MO_SEQ_CST = 5, /* Unused */
35};
784b0012 36
44ec21eb
MJ
37enum rseq_percpu_mode {
38 RSEQ_PERCPU_CPU_ID = 0,
39 RSEQ_PERCPU_MM_CID = 1,
40};
784b0012 41
44ec21eb
MJ
42enum rseq_available_query {
43 RSEQ_AVAILABLE_QUERY_KERNEL = 0,
44 RSEQ_AVAILABLE_QUERY_LIBC = 1,
45};
784b0012 46
96b6ce39
MD
47/*
48 * User code can define RSEQ_GET_ABI_OVERRIDE to override the
49 * rseq_get_abi() implementation, for instance to use glibc's symbols
50 * directly.
51 */
52#ifndef RSEQ_GET_ABI_OVERRIDE
53
96b6ce39 54# ifdef __cplusplus
60a27517 55extern "C" {
96b6ce39 56# endif
60a27517 57
baa98a34 58/* Offset from the thread pointer to the rseq area. */
170f840b 59extern ptrdiff_t rseq_offset;
baa98a34
MD
60
61/*
62 * Size of the registered rseq area. 0 if the registration was
63 * unsuccessful.
64 */
9698c399 65extern unsigned int rseq_size;
baa98a34
MD
66
67/* Flags used during rseq registration. */
9698c399 68extern unsigned int rseq_flags;
784b0012 69
baa98a34
MD
70/*
71 * rseq feature size supported by the kernel. 0 if the registration was
72 * unsuccessful.
73 */
74extern unsigned int rseq_feature_size;
75
44ec21eb
MJ
76/*
77 * Returns a pointer to the rseq area.
78 */
79static inline __attribute__((always_inline))
80struct rseq_abi *rseq_get_abi(void)
96b6ce39 81{
2d533093 82 return (struct rseq_abi *) ((uintptr_t) rseq_thread_pointer() + rseq_offset);
60a27517 83}
96b6ce39
MD
84
85# ifdef __cplusplus
86}
87# endif
88
89#endif /* RSEQ_GET_ABI_OVERRIDE */
60a27517 90
809f5ee3 91
44ec21eb
MJ
92/*
93 * Architecture specific.
94 */
95#include <rseq/arch.h>
809f5ee3 96
784b0012 97
15260018 98#ifdef __cplusplus
60a27517
MG
99extern "C" {
100#endif
101
784b0012
MD
102/*
103 * Register rseq for the current thread. This needs to be called once
104 * by any thread which uses restartable sequences, before they start
105 * using restartable sequences, to ensure restartable sequences
106 * succeed. A restartable sequence executed from a non-registered
107 * thread will always fail.
108 */
109int rseq_register_current_thread(void);
110
111/*
112 * Unregister rseq for current thread.
113 */
114int rseq_unregister_current_thread(void);
115
116/*
117 * Restartable sequence fallback for reading the current CPU number.
118 */
119int32_t rseq_fallback_current_cpu(void);
120
baa98a34
MD
121/*
122 * Restartable sequence fallback for reading the current node number.
123 */
124int32_t rseq_fallback_current_node(void);
125
8b34114a
MD
126/*
127 * Returns true if rseq is supported.
128 */
129bool rseq_available(unsigned int query);
52e82b87 130
47c725dd
MD
131/*
132 * rseq_get_max_nr_cpus: Get the max_nr_cpus auto-detected value.
133 *
134 * Returns the max_nr_cpus auto-detected at pool creation when invoked
135 * with @nr_max_cpus=0 argument.
136 */
137int rseq_get_max_nr_cpus(void);
138
784b0012
MD
139/*
140 * Values returned can be either the current CPU number, -1 (rseq is
141 * uninitialized), or -2 (rseq initialization has failed).
142 */
44ec21eb
MJ
143static inline __attribute__((always_inline))
144int32_t rseq_current_cpu_raw(void)
784b0012 145{
9698c399 146 return RSEQ_READ_ONCE(rseq_get_abi()->cpu_id);
784b0012
MD
147}
148
149/*
150 * Returns a possible CPU number, which is typically the current CPU.
151 * The returned CPU number can be used to prepare for an rseq critical
152 * section, which will confirm whether the cpu number is indeed the
153 * current one, and whether rseq is initialized.
154 *
155 * The CPU number returned by rseq_cpu_start should always be validated
156 * by passing it to a rseq asm sequence, or by comparing it to the
157 * return value of rseq_current_cpu_raw() if the rseq asm sequence
158 * does not need to be invoked.
159 */
44ec21eb
MJ
160static inline __attribute__((always_inline))
161uint32_t rseq_cpu_start(void)
784b0012 162{
9698c399 163 return RSEQ_READ_ONCE(rseq_get_abi()->cpu_id_start);
784b0012
MD
164}
165
44ec21eb
MJ
166static inline __attribute__((always_inline))
167uint32_t rseq_current_cpu(void)
784b0012
MD
168{
169 int32_t cpu;
170
171 cpu = rseq_current_cpu_raw();
172 if (rseq_unlikely(cpu < 0))
173 cpu = rseq_fallback_current_cpu();
174 return cpu;
175}
176
44ec21eb
MJ
177static inline __attribute__((always_inline))
178bool rseq_node_id_available(void)
d2fa6d30
MD
179{
180 return (int) rseq_feature_size >= (int) rseq_offsetofend(struct rseq_abi, node_id);
181}
182
183/*
184 * Current NUMA node number.
185 */
44ec21eb
MJ
186static inline __attribute__((always_inline))
187uint32_t rseq_current_node_id(void)
d2fa6d30
MD
188{
189 assert(rseq_node_id_available());
190 return RSEQ_READ_ONCE(rseq_get_abi()->node_id);
191}
192
44ec21eb
MJ
193static inline __attribute__((always_inline))
194bool rseq_mm_cid_available(void)
d2fa6d30
MD
195{
196 return (int) rseq_feature_size >= (int) rseq_offsetofend(struct rseq_abi, mm_cid);
197}
198
44ec21eb
MJ
199static inline __attribute__((always_inline))
200uint32_t rseq_current_mm_cid(void)
d2fa6d30
MD
201{
202 return RSEQ_READ_ONCE(rseq_get_abi()->mm_cid);
203}
204
44ec21eb
MJ
205static inline __attribute__((always_inline))
206void rseq_clear_rseq_cs(void)
784b0012 207{
5b40603c 208 RSEQ_WRITE_ONCE(rseq_get_abi()->rseq_cs.arch.ptr, 0);
784b0012
MD
209}
210
211/*
212 * rseq_prepare_unload() should be invoked by each thread executing a rseq
213 * critical section at least once between their last critical section and
d2fa6d30
MD
214 * library unload of the library defining the rseq critical section (struct
215 * rseq_cs) or the code referred to by the struct rseq_cs start_ip and
216 * post_commit_offset fields. This also applies to use of rseq in code
217 * generated by JIT: rseq_prepare_unload() should be invoked at least once by
218 * each thread executing a rseq critical section before reclaim of the memory
219 * holding the struct rseq_cs or reclaim of the code pointed to by struct
220 * rseq_cs start_ip and post_commit_offset fields.
784b0012 221 */
44ec21eb
MJ
222static inline __attribute__((always_inline))
223void rseq_prepare_unload(void)
784b0012
MD
224{
225 rseq_clear_rseq_cs();
226}
227
201c1a2a 228/*
44ec21eb 229 * Refer to rseq/pseudocode.h for documentation and pseudo-code of the
201c1a2a
MD
230 * rseq critical section helpers.
231 */
44ec21eb 232#include "rseq/pseudocode.h"
201c1a2a 233
809f5ee3 234static inline __attribute__((always_inline))
41149e28 235int rseq_load_cbne_store__ptr(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
809f5ee3
MD
236 intptr_t *v, intptr_t expect,
237 intptr_t newv, int cpu)
238{
239 if (rseq_mo != RSEQ_MO_RELAXED)
240 return -1;
241 switch (percpu_mode) {
242 case RSEQ_PERCPU_CPU_ID:
41149e28 243 return rseq_load_cbne_store__ptr_relaxed_cpu_id(v, expect, newv, cpu);
809f5ee3 244 case RSEQ_PERCPU_MM_CID:
41149e28 245 return rseq_load_cbne_store__ptr_relaxed_mm_cid(v, expect, newv, cpu);
95dbaeba
GK
246 default:
247 return -1;
809f5ee3 248 }
809f5ee3
MD
249}
250
809f5ee3 251static inline __attribute__((always_inline))
41149e28 252int rseq_load_cbeq_store_add_load_store__ptr(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
809f5ee3
MD
253 intptr_t *v, intptr_t expectnot, long voffp, intptr_t *load,
254 int cpu)
255{
256 if (rseq_mo != RSEQ_MO_RELAXED)
257 return -1;
258 switch (percpu_mode) {
259 case RSEQ_PERCPU_CPU_ID:
41149e28 260 return rseq_load_cbeq_store_add_load_store__ptr_relaxed_cpu_id(v, expectnot, voffp, load, cpu);
809f5ee3 261 case RSEQ_PERCPU_MM_CID:
41149e28 262 return rseq_load_cbeq_store_add_load_store__ptr_relaxed_mm_cid(v, expectnot, voffp, load, cpu);
95dbaeba
GK
263 default:
264 return -1;
809f5ee3 265 }
809f5ee3
MD
266}
267
268static inline __attribute__((always_inline))
41149e28 269int rseq_load_add_store__ptr(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
809f5ee3
MD
270 intptr_t *v, intptr_t count, int cpu)
271{
272 if (rseq_mo != RSEQ_MO_RELAXED)
273 return -1;
274 switch (percpu_mode) {
275 case RSEQ_PERCPU_CPU_ID:
41149e28 276 return rseq_load_add_store__ptr_relaxed_cpu_id(v, count, cpu);
809f5ee3 277 case RSEQ_PERCPU_MM_CID:
41149e28 278 return rseq_load_add_store__ptr_relaxed_mm_cid(v, count, cpu);
95dbaeba
GK
279 default:
280 return -1;
809f5ee3 281 }
809f5ee3
MD
282}
283
aa021469 284#ifdef rseq_arch_has_load_add_load_load_add_store
809f5ee3 285static inline __attribute__((always_inline))
aa021469
MD
286int rseq_load_add_load_load_add_store__ptr(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
287 intptr_t *ptr, long off, intptr_t inc, int cpu)
809f5ee3
MD
288{
289 if (rseq_mo != RSEQ_MO_RELAXED)
290 return -1;
291 switch (percpu_mode) {
292 case RSEQ_PERCPU_CPU_ID:
aa021469 293 return rseq_load_add_load_load_add_store__ptr_relaxed_cpu_id(ptr, off, inc, cpu);
809f5ee3 294 case RSEQ_PERCPU_MM_CID:
aa021469 295 return rseq_load_add_load_load_add_store__ptr_relaxed_mm_cid(ptr, off, inc, cpu);
95dbaeba
GK
296 default:
297 return -1;
809f5ee3 298 }
809f5ee3
MD
299}
300#endif
301
302static inline __attribute__((always_inline))
41149e28 303int rseq_load_cbne_store_store__ptr(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
809f5ee3
MD
304 intptr_t *v, intptr_t expect,
305 intptr_t *v2, intptr_t newv2,
306 intptr_t newv, int cpu)
307{
308 switch (rseq_mo) {
309 case RSEQ_MO_RELAXED:
310 switch (percpu_mode) {
311 case RSEQ_PERCPU_CPU_ID:
41149e28 312 return rseq_load_cbne_store_store__ptr_relaxed_cpu_id(v, expect, v2, newv2, newv, cpu);
809f5ee3 313 case RSEQ_PERCPU_MM_CID:
41149e28 314 return rseq_load_cbne_store_store__ptr_relaxed_mm_cid(v, expect, v2, newv2, newv, cpu);
95dbaeba
GK
315 default:
316 return -1;
809f5ee3 317 }
809f5ee3
MD
318 case RSEQ_MO_RELEASE:
319 switch (percpu_mode) {
320 case RSEQ_PERCPU_CPU_ID:
41149e28 321 return rseq_load_cbne_store_store__ptr_release_cpu_id(v, expect, v2, newv2, newv, cpu);
809f5ee3 322 case RSEQ_PERCPU_MM_CID:
41149e28 323 return rseq_load_cbne_store_store__ptr_release_mm_cid(v, expect, v2, newv2, newv, cpu);
95dbaeba
GK
324 default:
325 return -1;
809f5ee3 326 }
8dd73cf9
GK
327 case RSEQ_MO_ACQUIRE: /* Fallthrough */
328 case RSEQ_MO_ACQ_REL: /* Fallthrough */
329 case RSEQ_MO_CONSUME: /* Fallthrough */
330 case RSEQ_MO_SEQ_CST: /* Fallthrough */
809f5ee3
MD
331 default:
332 return -1;
333 }
334}
335
336static inline __attribute__((always_inline))
41149e28 337int rseq_load_cbne_load_cbne_store__ptr(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
809f5ee3
MD
338 intptr_t *v, intptr_t expect,
339 intptr_t *v2, intptr_t expect2,
340 intptr_t newv, int cpu)
341{
342 if (rseq_mo != RSEQ_MO_RELAXED)
343 return -1;
344 switch (percpu_mode) {
345 case RSEQ_PERCPU_CPU_ID:
41149e28 346 return rseq_load_cbne_load_cbne_store__ptr_relaxed_cpu_id(v, expect, v2, expect2, newv, cpu);
809f5ee3 347 case RSEQ_PERCPU_MM_CID:
41149e28 348 return rseq_load_cbne_load_cbne_store__ptr_relaxed_mm_cid(v, expect, v2, expect2, newv, cpu);
95dbaeba
GK
349 default:
350 return -1;
809f5ee3 351 }
809f5ee3
MD
352}
353
354static inline __attribute__((always_inline))
41149e28 355int rseq_load_cbne_memcpy_store__ptr(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
809f5ee3
MD
356 intptr_t *v, intptr_t expect,
357 void *dst, void *src, size_t len,
358 intptr_t newv, int cpu)
359{
360 switch (rseq_mo) {
361 case RSEQ_MO_RELAXED:
362 switch (percpu_mode) {
363 case RSEQ_PERCPU_CPU_ID:
41149e28 364 return rseq_load_cbne_memcpy_store__ptr_relaxed_cpu_id(v, expect, dst, src, len, newv, cpu);
809f5ee3 365 case RSEQ_PERCPU_MM_CID:
41149e28 366 return rseq_load_cbne_memcpy_store__ptr_relaxed_mm_cid(v, expect, dst, src, len, newv, cpu);
95dbaeba
GK
367 default:
368 return -1;
809f5ee3 369 }
809f5ee3
MD
370 case RSEQ_MO_RELEASE:
371 switch (percpu_mode) {
372 case RSEQ_PERCPU_CPU_ID:
41149e28 373 return rseq_load_cbne_memcpy_store__ptr_release_cpu_id(v, expect, dst, src, len, newv, cpu);
809f5ee3 374 case RSEQ_PERCPU_MM_CID:
41149e28 375 return rseq_load_cbne_memcpy_store__ptr_release_mm_cid(v, expect, dst, src, len, newv, cpu);
95dbaeba
GK
376 default:
377 return -1;
809f5ee3 378 }
8dd73cf9
GK
379 case RSEQ_MO_ACQUIRE: /* Fallthrough */
380 case RSEQ_MO_ACQ_REL: /* Fallthrough */
381 case RSEQ_MO_CONSUME: /* Fallthrough */
382 case RSEQ_MO_SEQ_CST: /* Fallthrough */
809f5ee3
MD
383 default:
384 return -1;
385 }
386}
387
15260018 388#ifdef __cplusplus
60a27517
MG
389}
390#endif
391
44ec21eb 392#endif /* _RSEQ_RSEQ_H */
This page took 0.043666 seconds and 4 git commands to generate.