1 /* SPDX-License-Identifier: MIT */
2 /* SPDX-FileCopyrightText: 2016-2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */
9 #error "Never use <rseq/arch/x86.h> directly; include <rseq/rseq.h> instead."
15 * RSEQ_ASM_*() macro helpers are internal to the librseq headers. Those
16 * are not part of the public API.
20 * RSEQ_SIG is used with the following reserved undefined instructions, which
23 * x86-32: 0f b9 3d 53 30 05 53 ud1 0x53053053,%edi
24 * x86-64: 0f b9 3d 53 30 05 53 ud1 0x53053053(%rip),%edi
26 #define RSEQ_SIG 0x53053053
29 * Due to a compiler optimization bug in gcc-8 with asm goto and TLS asm input
30 * operands, we cannot use "m" input operands, and rather pass the __rseq_abi
31 * address through a "r" input operand.
35 * Offset of cpu_id, rseq_cs, and mm_cid fields in struct rseq. Those
36 * are defined explicitly as macros to be used from assembly.
38 #define RSEQ_ASM_CPU_ID_OFFSET 4
39 #define RSEQ_ASM_CS_OFFSET 8
40 #define RSEQ_ASM_MM_CID_OFFSET 24
43 * Refer to the Linux kernel memory model (LKMM) for documentation of
44 * the memory barriers. Expect all x86 hardware to be x86-TSO (Total
48 /* CPU memory barrier. */
49 #define rseq_smp_mb() \
50 __asm__ __volatile__ ("lock; addl $0,-128(%%rsp)" ::: "memory", "cc")
51 /* CPU read memory barrier */
52 #define rseq_smp_rmb() rseq_barrier()
53 /* CPU write memory barrier */
54 #define rseq_smp_wmb() rseq_barrier()
56 /* Acquire: One-way permeable barrier. */
57 #define rseq_smp_load_acquire(p) \
59 rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
64 /* Acquire barrier after control dependency. */
65 #define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb()
67 /* Release: One-way permeable barrier. */
68 #define rseq_smp_store_release(p, v) \
71 RSEQ_WRITE_ONCE(*(p), v); \
74 /* Segment selector for the thread pointer. */
75 #ifdef RSEQ_ARCH_AMD64
76 # define RSEQ_ASM_TP_SEGMENT %%fs
78 # define RSEQ_ASM_TP_SEGMENT %%gs
82 * Helper macro to define a variable of pointer type stored in a 64-bit
83 * integer. Only used internally in rseq headers.
85 #ifdef RSEQ_ARCH_AMD64
86 # define RSEQ_ASM_U64_PTR(x) ".quad " x
88 # define RSEQ_ASM_U64_PTR(x) ".long " x ", 0x0"
91 #define RSEQ_ASM_U32(x) ".long " x
93 /* Common architecture support macros. */
94 #include "rseq/arch/generic/common.h"
97 * Define a critical section abort handler.
100 * Local label to the abort handler.
102 * Sequence of instructions to run on abort.
104 * C label to jump to at the end of the sequence.
106 #define RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label) \
107 ".pushsection __rseq_failure, \"ax\"\n\t" \
109 * Disassembler-friendly signature: \
110 * x86-32: ud1 <sig>,%edi \
111 * x86-64: ud1 <sig>(%rip),%edi \
113 ".byte 0x0f, 0xb9, 0x3d\n\t" \
114 ".long " __rseq_str(RSEQ_SIG) "\n\t" \
115 __rseq_str(label) ":\n\t" \
117 "jmp %l[" __rseq_str(abort_label) "]\n\t" \
121 * Define a critical section teardown handler.
124 * Local label to the teardown handler.
126 * Sequence of instructions to run on teardown.
128 * C label to jump to at the end of the sequence.
130 #define RSEQ_ASM_DEFINE_TEARDOWN(label, teardown, target_label) \
131 ".pushsection __rseq_failure, \"ax\"\n\t" \
132 __rseq_str(label) ":\n\t" \
134 "jmp %l[" __rseq_str(target_label) "]\n\t" \
138 * Store the address of the critical section descriptor structure at
139 * @cs_label into the @rseq_cs pointer and emit the label @label, which
140 * is the beginning of the sequence of consecutive assembly instructions.
143 * Local label to the beginning of the sequence of consecutive assembly
146 * Source local label to the critical section descriptor structure.
148 * Destination pointer where to store the address of the critical
149 * section descriptor structure.
151 #ifdef RSEQ_ARCH_AMD64
152 #define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
154 "leaq " __rseq_str(cs_label) "(%%rip), %%rax\n\t" \
155 "movq %%rax, " __rseq_str(rseq_cs) "\n\t" \
156 __rseq_str(label) ":\n\t"
158 # define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
160 "movl $" __rseq_str(cs_label) ", " __rseq_str(rseq_cs) "\n\t" \
161 __rseq_str(label) ":\n\t"
164 /* Jump to local label @label when @cpu_id != @current_cpu_id. */
165 #define RSEQ_ASM_CBNE_CPU_ID(cpu_id, current_cpu_id, label) \
167 "cmpl %[" __rseq_str(cpu_id) "], " __rseq_str(current_cpu_id) "\n\t" \
168 "jnz " __rseq_str(label) "\n\t"
170 /* Per-cpu-id indexing. */
172 #define RSEQ_TEMPLATE_INDEX_CPU_ID
173 #define RSEQ_TEMPLATE_MO_RELAXED
174 #include "rseq/arch/x86/bits.h"
175 #undef RSEQ_TEMPLATE_MO_RELAXED
177 #define RSEQ_TEMPLATE_MO_RELEASE
178 #include "rseq/arch/x86/bits.h"
179 #undef RSEQ_TEMPLATE_MO_RELEASE
180 #undef RSEQ_TEMPLATE_INDEX_CPU_ID
182 /* Per-mm-cid indexing. */
184 #define RSEQ_TEMPLATE_INDEX_MM_CID
185 #define RSEQ_TEMPLATE_MO_RELAXED
186 #include "rseq/arch/x86/bits.h"
187 #undef RSEQ_TEMPLATE_MO_RELAXED
189 #define RSEQ_TEMPLATE_MO_RELEASE
190 #include "rseq/arch/x86/bits.h"
191 #undef RSEQ_TEMPLATE_MO_RELEASE
192 #undef RSEQ_TEMPLATE_INDEX_MM_CID
194 /* APIs which are not indexed. */
196 #define RSEQ_TEMPLATE_INDEX_NONE
197 #define RSEQ_TEMPLATE_MO_RELAXED
198 #include "rseq/arch/x86/bits.h"
199 #undef RSEQ_TEMPLATE_MO_RELAXED
200 #undef RSEQ_TEMPLATE_INDEX_NONE
This page took 0.141248 seconds and 4 git commands to generate.