fix: compiler warning `-Wswitch-enum`
[librseq.git] / include / rseq / rseq.h
index 36483a7956f82b4b4295cbdcaffc7c94cb65959d..080508e2988de275367e101a8d571398fc72cfa1 100644 (file)
@@ -1,8 +1,8 @@
-/* SPDX-License-Identifier: LGPL-2.1-only OR MIT */
+/* SPDX-License-Identifier: MIT */
+/* SPDX-FileCopyrightText: 2016-2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */
+
 /*
  * rseq.h
- *
- * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  */
 
 #ifndef RSEQ_H
 #include <errno.h>
 #include <stdio.h>
 #include <stdlib.h>
-#include <sched.h>
-#include <linux/rseq.h>
+#include <stddef.h>
+#include <assert.h>
+#include <rseq/rseq-abi.h>
+#include <rseq/compiler.h>
+
+#ifndef rseq_sizeof_field
+#define rseq_sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
+#endif
+
+#ifndef rseq_offsetofend
+#define rseq_offsetofend(TYPE, MEMBER) \
+       (offsetof(TYPE, MEMBER) + rseq_sizeof_field(TYPE, MEMBER))
+#endif
 
 /*
  * Empty code injection macros, override when testing.
 #define RSEQ_INJECT_FAILED
 #endif
 
-extern __thread struct rseq __rseq_abi;
-extern int __rseq_handled;
+/*
+ * User code can define RSEQ_GET_ABI_OVERRIDE to override the
+ * rseq_get_abi() implementation, for instance to use glibc's symbols
+ * directly.
+ */
+#ifndef RSEQ_GET_ABI_OVERRIDE
+
+# include <rseq/rseq-thread-pointer.h>
+
+# ifdef __cplusplus
+extern "C" {
+# endif
+
+/* Offset from the thread pointer to the rseq area. */
+extern ptrdiff_t rseq_offset;
+
+/*
+ * Size of the registered rseq area. 0 if the registration was
+ * unsuccessful.
+ */
+extern unsigned int rseq_size;
+
+/* Flags used during rseq registration. */
+extern unsigned int rseq_flags;
+
+/*
+ * rseq feature size supported by the kernel. 0 if the registration was
+ * unsuccessful.
+ */
+extern unsigned int rseq_feature_size;
+
+static inline struct rseq_abi *rseq_get_abi(void)
+{
+       return (struct rseq_abi *) ((uintptr_t) rseq_thread_pointer() + rseq_offset);
+}
+
+# ifdef __cplusplus
+}
+# endif
+
+#endif /* RSEQ_GET_ABI_OVERRIDE */
+
+enum rseq_mo {
+       RSEQ_MO_RELAXED = 0,
+       RSEQ_MO_CONSUME = 1,    /* Unused */
+       RSEQ_MO_ACQUIRE = 2,    /* Unused */
+       RSEQ_MO_RELEASE = 3,
+       RSEQ_MO_ACQ_REL = 4,    /* Unused */
+       RSEQ_MO_SEQ_CST = 5,    /* Unused */
+};
+
+enum rseq_percpu_mode {
+       RSEQ_PERCPU_CPU_ID = 0,
+       RSEQ_PERCPU_MM_CID = 1,
+};
 
 #define rseq_likely(x)         __builtin_expect(!!(x), 1)
 #define rseq_unlikely(x)       __builtin_expect(!!(x), 0)
@@ -58,13 +122,13 @@ extern int __rseq_handled;
 #define __rseq_str_1(x)        #x
 #define __rseq_str(x)          __rseq_str_1(x)
 
-#define rseq_log(fmt, args...)                                                \
+#define rseq_log(fmt, ...)                                                    \
        fprintf(stderr, fmt "(in %s() at " __FILE__ ":" __rseq_str(__LINE__)"\n", \
-               ## args, __func__)
+               ## __VA_ARGS__, __func__)
 
-#define rseq_bug(fmt, args...)         \
+#define rseq_bug(fmt, ...)             \
        do {                            \
-               rseq_log(fmt, ##args);  \
+               rseq_log(fmt, ## __VA_ARGS__); \
                abort();                \
        } while (0)
 
@@ -80,10 +144,16 @@ extern int __rseq_handled;
 #include <rseq/rseq-mips.h>
 #elif defined(__s390__)
 #include <rseq/rseq-s390.h>
+#elif defined(__riscv)
+#include <rseq/rseq-riscv.h>
 #else
 #error unsupported target
 #endif
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /*
  * Register rseq for the current thread. This needs to be called once
  * by any thread which uses restartable sequences, before they start
@@ -103,7 +173,20 @@ int rseq_unregister_current_thread(void);
  */
 int32_t rseq_fallback_current_cpu(void);
 
-int rseq_available(void);
+/*
+ * Restartable sequence fallback for reading the current node number.
+ */
+int32_t rseq_fallback_current_node(void);
+
+enum rseq_available_query {
+       RSEQ_AVAILABLE_QUERY_KERNEL = 0,
+       RSEQ_AVAILABLE_QUERY_LIBC = 1,
+};
+
+/*
+ * Returns true if rseq is supported.
+ */
+bool rseq_available(unsigned int query);
 
 /*
  * Values returned can be either the current CPU number, -1 (rseq is
@@ -111,7 +194,7 @@ int rseq_available(void);
  */
 static inline int32_t rseq_current_cpu_raw(void)
 {
-       return RSEQ_READ_ONCE(__rseq_abi.cpu_id);
+       return RSEQ_READ_ONCE(rseq_get_abi()->cpu_id);
 }
 
 /*
@@ -127,7 +210,7 @@ static inline int32_t rseq_current_cpu_raw(void)
  */
 static inline uint32_t rseq_cpu_start(void)
 {
-       return RSEQ_READ_ONCE(__rseq_abi.cpu_id_start);
+       return RSEQ_READ_ONCE(rseq_get_abi()->cpu_id_start);
 }
 
 static inline uint32_t rseq_current_cpu(void)
@@ -140,27 +223,215 @@ static inline uint32_t rseq_current_cpu(void)
        return cpu;
 }
 
+static inline bool rseq_node_id_available(void)
+{
+       return (int) rseq_feature_size >= (int) rseq_offsetofend(struct rseq_abi, node_id);
+}
+
+/*
+ * Current NUMA node number.
+ */
+static inline uint32_t rseq_current_node_id(void)
+{
+       assert(rseq_node_id_available());
+       return RSEQ_READ_ONCE(rseq_get_abi()->node_id);
+}
+
+static inline bool rseq_mm_cid_available(void)
+{
+       return (int) rseq_feature_size >= (int) rseq_offsetofend(struct rseq_abi, mm_cid);
+}
+
+static inline uint32_t rseq_current_mm_cid(void)
+{
+       return RSEQ_READ_ONCE(rseq_get_abi()->mm_cid);
+}
+
 static inline void rseq_clear_rseq_cs(void)
 {
-#ifdef __LP64__
-       RSEQ_WRITE_ONCE(__rseq_abi.rseq_cs.ptr, 0);
-#else
-       RSEQ_WRITE_ONCE(__rseq_abi.rseq_cs.ptr.ptr32, 0);
-#endif
+       RSEQ_WRITE_ONCE(rseq_get_abi()->rseq_cs.arch.ptr, 0);
 }
 
 /*
  * rseq_prepare_unload() should be invoked by each thread executing a rseq
  * critical section at least once between their last critical section and
- * library unload of the library defining the rseq critical section
- * (struct rseq_cs). This also applies to use of rseq in code generated by
- * JIT: rseq_prepare_unload() should be invoked at least once by each
- * thread executing a rseq critical section before reclaim of the memory
- * holding the struct rseq_cs.
+ * library unload of the library defining the rseq critical section (struct
+ * rseq_cs) or the code referred to by the struct rseq_cs start_ip and
+ * post_commit_offset fields. This also applies to use of rseq in code
+ * generated by JIT: rseq_prepare_unload() should be invoked at least once by
+ * each thread executing a rseq critical section before reclaim of the memory
+ * holding the struct rseq_cs or reclaim of the code pointed to by struct
+ * rseq_cs start_ip and post_commit_offset fields.
  */
 static inline void rseq_prepare_unload(void)
 {
        rseq_clear_rseq_cs();
 }
 
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
+                      intptr_t *v, intptr_t expect,
+                      intptr_t newv, int cpu)
+{
+       if (rseq_mo != RSEQ_MO_RELAXED)
+               return -1;
+       switch (percpu_mode) {
+       case RSEQ_PERCPU_CPU_ID:
+               return rseq_cmpeqv_storev_relaxed_cpu_id(v, expect, newv, cpu);
+       case RSEQ_PERCPU_MM_CID:
+               return rseq_cmpeqv_storev_relaxed_mm_cid(v, expect, newv, cpu);
+       default:
+               return -1;
+       }
+}
+
+/*
+ * Compare @v against @expectnot. When it does _not_ match, load @v
+ * into @load, and store the content of *@v + voffp into @v.
+ */
+static inline __attribute__((always_inline))
+int rseq_cmpnev_storeoffp_load(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
+                              intptr_t *v, intptr_t expectnot, long voffp, intptr_t *load,
+                              int cpu)
+{
+       if (rseq_mo != RSEQ_MO_RELAXED)
+               return -1;
+       switch (percpu_mode) {
+       case RSEQ_PERCPU_CPU_ID:
+               return rseq_cmpnev_storeoffp_load_relaxed_cpu_id(v, expectnot, voffp, load, cpu);
+       case RSEQ_PERCPU_MM_CID:
+               return rseq_cmpnev_storeoffp_load_relaxed_mm_cid(v, expectnot, voffp, load, cpu);
+       default:
+               return -1;
+       }
+}
+
+static inline __attribute__((always_inline))
+int rseq_addv(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
+             intptr_t *v, intptr_t count, int cpu)
+{
+       if (rseq_mo != RSEQ_MO_RELAXED)
+               return -1;
+       switch (percpu_mode) {
+       case RSEQ_PERCPU_CPU_ID:
+               return rseq_addv_relaxed_cpu_id(v, count, cpu);
+       case RSEQ_PERCPU_MM_CID:
+               return rseq_addv_relaxed_mm_cid(v, count, cpu);
+       default:
+               return -1;
+       }
+}
+
+#ifdef RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV
+/*
+ *   pval = *(ptr+off)
+ *  *pval += inc;
+ */
+static inline __attribute__((always_inline))
+int rseq_offset_deref_addv(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
+                          intptr_t *ptr, long off, intptr_t inc, int cpu)
+{
+       if (rseq_mo != RSEQ_MO_RELAXED)
+               return -1;
+       switch (percpu_mode) {
+       case RSEQ_PERCPU_CPU_ID:
+               return rseq_offset_deref_addv_relaxed_cpu_id(ptr, off, inc, cpu);
+       case RSEQ_PERCPU_MM_CID:
+               return rseq_offset_deref_addv_relaxed_mm_cid(ptr, off, inc, cpu);
+       default:
+               return -1;
+       }
+}
+#endif
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
+                                intptr_t *v, intptr_t expect,
+                                intptr_t *v2, intptr_t newv2,
+                                intptr_t newv, int cpu)
+{
+       switch (rseq_mo) {
+       case RSEQ_MO_RELAXED:
+               switch (percpu_mode) {
+               case RSEQ_PERCPU_CPU_ID:
+                       return rseq_cmpeqv_trystorev_storev_relaxed_cpu_id(v, expect, v2, newv2, newv, cpu);
+               case RSEQ_PERCPU_MM_CID:
+                       return rseq_cmpeqv_trystorev_storev_relaxed_mm_cid(v, expect, v2, newv2, newv, cpu);
+               default:
+                       return -1;
+               }
+       case RSEQ_MO_RELEASE:
+               switch (percpu_mode) {
+               case RSEQ_PERCPU_CPU_ID:
+                       return rseq_cmpeqv_trystorev_storev_release_cpu_id(v, expect, v2, newv2, newv, cpu);
+               case RSEQ_PERCPU_MM_CID:
+                       return rseq_cmpeqv_trystorev_storev_release_mm_cid(v, expect, v2, newv2, newv, cpu);
+               default:
+                       return -1;
+               }
+       case RSEQ_MO_ACQUIRE:   /* Fallthrough */
+       case RSEQ_MO_ACQ_REL:   /* Fallthrough */
+       case RSEQ_MO_CONSUME:   /* Fallthrough */
+       case RSEQ_MO_SEQ_CST:   /* Fallthrough */
+       default:
+               return -1;
+       }
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_cmpeqv_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
+                             intptr_t *v, intptr_t expect,
+                             intptr_t *v2, intptr_t expect2,
+                             intptr_t newv, int cpu)
+{
+       if (rseq_mo != RSEQ_MO_RELAXED)
+               return -1;
+       switch (percpu_mode) {
+       case RSEQ_PERCPU_CPU_ID:
+               return rseq_cmpeqv_cmpeqv_storev_relaxed_cpu_id(v, expect, v2, expect2, newv, cpu);
+       case RSEQ_PERCPU_MM_CID:
+               return rseq_cmpeqv_cmpeqv_storev_relaxed_mm_cid(v, expect, v2, expect2, newv, cpu);
+       default:
+               return -1;
+       }
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
+                                intptr_t *v, intptr_t expect,
+                                void *dst, void *src, size_t len,
+                                intptr_t newv, int cpu)
+{
+       switch (rseq_mo) {
+       case RSEQ_MO_RELAXED:
+               switch (percpu_mode) {
+               case RSEQ_PERCPU_CPU_ID:
+                       return rseq_cmpeqv_trymemcpy_storev_relaxed_cpu_id(v, expect, dst, src, len, newv, cpu);
+               case RSEQ_PERCPU_MM_CID:
+                       return rseq_cmpeqv_trymemcpy_storev_relaxed_mm_cid(v, expect, dst, src, len, newv, cpu);
+               default:
+                       return -1;
+               }
+       case RSEQ_MO_RELEASE:
+               switch (percpu_mode) {
+               case RSEQ_PERCPU_CPU_ID:
+                       return rseq_cmpeqv_trymemcpy_storev_release_cpu_id(v, expect, dst, src, len, newv, cpu);
+               case RSEQ_PERCPU_MM_CID:
+                       return rseq_cmpeqv_trymemcpy_storev_release_mm_cid(v, expect, dst, src, len, newv, cpu);
+               default:
+                       return -1;
+               }
+       case RSEQ_MO_ACQUIRE:   /* Fallthrough */
+       case RSEQ_MO_ACQ_REL:   /* Fallthrough */
+       case RSEQ_MO_CONSUME:   /* Fallthrough */
+       case RSEQ_MO_SEQ_CST:   /* Fallthrough */
+       default:
+               return -1;
+       }
+}
+
+#ifdef __cplusplus
+}
+#endif
+
 #endif  /* RSEQ_H_ */
This page took 0.028996 seconds and 4 git commands to generate.