ee5487d1b53a1919ef3a0f4034a8295c6665f96b
[librseq.git] / include / rseq / rseq-arm64.h
1 /* SPDX-License-Identifier: MIT */
2 /* SPDX-FileCopyrightText: 2016-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */
3 /* SPDX-FileCopyrightText: 2018 Will Deacon <will.deacon@arm.com> */
4
5 /*
6 * rseq-arm64.h
7 */
8
9 /*
10 * aarch64 -mbig-endian generates mixed endianness code vs data:
11 * little-endian code and big-endian data. Ensure the RSEQ_SIG signature
12 * matches code endianness.
13 */
14 #define RSEQ_SIG_CODE 0xd428bc00 /* BRK #0x45E0. */
15
16 #ifdef __AARCH64EB__
17 #define RSEQ_SIG_DATA 0x00bc28d4 /* BRK #0x45E0. */
18 #else
19 #define RSEQ_SIG_DATA RSEQ_SIG_CODE
20 #endif
21
22 #define RSEQ_SIG RSEQ_SIG_DATA
23
24 #define rseq_smp_mb() __asm__ __volatile__ ("dmb ish" ::: "memory")
25 #define rseq_smp_rmb() __asm__ __volatile__ ("dmb ishld" ::: "memory")
26 #define rseq_smp_wmb() __asm__ __volatile__ ("dmb ishst" ::: "memory")
27
28 #define rseq_smp_load_acquire(p) \
29 __extension__ ({ \
30 union { rseq_unqual_scalar_typeof(*(p)) __val; char __c[sizeof(*(p))]; } __u; \
31 switch (sizeof(*(p))) { \
32 case 1: \
33 __asm__ __volatile__ ("ldarb %w0, %1" \
34 : "=r" (*(__u8 *)__u.__c) \
35 : "Q" (*(p)) : "memory"); \
36 break; \
37 case 2: \
38 __asm__ __volatile__ ("ldarh %w0, %1" \
39 : "=r" (*(__u16 *)__u.__c) \
40 : "Q" (*(p)) : "memory"); \
41 break; \
42 case 4: \
43 __asm__ __volatile__ ("ldar %w0, %1" \
44 : "=r" (*(__u32 *)__u.__c) \
45 : "Q" (*(p)) : "memory"); \
46 break; \
47 case 8: \
48 __asm__ __volatile__ ("ldar %0, %1" \
49 : "=r" (*(__u64 *)__u.__c) \
50 : "Q" (*(p)) : "memory"); \
51 break; \
52 } \
53 (rseq_unqual_scalar_typeof(*(p)))__u.__val; \
54 })
55
56 #define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb()
57
58 #define rseq_smp_store_release(p, v) \
59 do { \
60 union { rseq_unqual_scalar_typeof(*(p)) __val; char __c[sizeof(*(p))]; } __u = \
61 { .__val = (rseq_unqual_scalar_typeof(*(p))) (v) }; \
62 switch (sizeof(*(p))) { \
63 case 1: \
64 __asm__ __volatile__ ("stlrb %w1, %0" \
65 : "=Q" (*(p)) \
66 : "r" (*(__u8 *)__u.__c) \
67 : "memory"); \
68 break; \
69 case 2: \
70 __asm__ __volatile__ ("stlrh %w1, %0" \
71 : "=Q" (*(p)) \
72 : "r" (*(__u16 *)__u.__c) \
73 : "memory"); \
74 break; \
75 case 4: \
76 __asm__ __volatile__ ("stlr %w1, %0" \
77 : "=Q" (*(p)) \
78 : "r" (*(__u32 *)__u.__c) \
79 : "memory"); \
80 break; \
81 case 8: \
82 __asm__ __volatile__ ("stlr %1, %0" \
83 : "=Q" (*(p)) \
84 : "r" (*(__u64 *)__u.__c) \
85 : "memory"); \
86 break; \
87 } \
88 } while (0)
89
90 #define RSEQ_ASM_TMP_REG32 "w15"
91 #define RSEQ_ASM_TMP_REG "x15"
92 #define RSEQ_ASM_TMP_REG_2 "x14"
93
94 #define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
95 post_commit_offset, abort_ip) \
96 " .pushsection __rseq_cs, \"aw\"\n" \
97 " .balign 32\n" \
98 __rseq_str(label) ":\n" \
99 " .long " __rseq_str(version) ", " __rseq_str(flags) "\n" \
100 " .quad " __rseq_str(start_ip) ", " \
101 __rseq_str(post_commit_offset) ", " \
102 __rseq_str(abort_ip) "\n" \
103 " .popsection\n\t" \
104 " .pushsection __rseq_cs_ptr_array, \"aw\"\n" \
105 " .quad " __rseq_str(label) "b\n" \
106 " .popsection\n"
107
108 #define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
109 __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
110 (post_commit_ip - start_ip), abort_ip)
111
112 /*
113 * Exit points of a rseq critical section consist of all instructions outside
114 * of the critical section where a critical section can either branch to or
115 * reach through the normal course of its execution. The abort IP and the
116 * post-commit IP are already part of the __rseq_cs section and should not be
117 * explicitly defined as additional exit points. Knowing all exit points is
118 * useful to assist debuggers stepping over the critical section.
119 */
120 #define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
121 " .pushsection __rseq_exit_point_array, \"aw\"\n" \
122 " .quad " __rseq_str(start_ip) ", " __rseq_str(exit_ip) "\n" \
123 " .popsection\n"
124
125 #define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
126 RSEQ_INJECT_ASM(1) \
127 " adrp " RSEQ_ASM_TMP_REG ", " __rseq_str(cs_label) "\n" \
128 " add " RSEQ_ASM_TMP_REG ", " RSEQ_ASM_TMP_REG \
129 ", :lo12:" __rseq_str(cs_label) "\n" \
130 " str " RSEQ_ASM_TMP_REG ", %[" __rseq_str(rseq_cs) "]\n" \
131 __rseq_str(label) ":\n"
132
133 #define RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label) \
134 " b 222f\n" \
135 " .inst " __rseq_str(RSEQ_SIG_CODE) "\n" \
136 __rseq_str(label) ":\n" \
137 teardown \
138 " b %l[" __rseq_str(abort_label) "]\n" \
139 "222:\n"
140
141 #define RSEQ_ASM_OP_STORE(value, var) \
142 " str %[" __rseq_str(value) "], %[" __rseq_str(var) "]\n"
143
144 #define RSEQ_ASM_OP_STORE_RELEASE(value, var) \
145 " stlr %[" __rseq_str(value) "], %[" __rseq_str(var) "]\n"
146
147 #define RSEQ_ASM_OP_FINAL_STORE(value, var, post_commit_label) \
148 RSEQ_ASM_OP_STORE(value, var) \
149 __rseq_str(post_commit_label) ":\n"
150
151 #define RSEQ_ASM_OP_FINAL_STORE_RELEASE(value, var, post_commit_label) \
152 RSEQ_ASM_OP_STORE_RELEASE(value, var) \
153 __rseq_str(post_commit_label) ":\n"
154
155 #define RSEQ_ASM_OP_CBNE(var, expect, label) \
156 " ldr " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n" \
157 " sub " RSEQ_ASM_TMP_REG ", " RSEQ_ASM_TMP_REG \
158 ", %[" __rseq_str(expect) "]\n" \
159 " cbnz " RSEQ_ASM_TMP_REG ", " __rseq_str(label) "\n"
160
161 #define RSEQ_ASM_OP_CBNE32(var, expect, label) \
162 " ldr " RSEQ_ASM_TMP_REG32 ", %[" __rseq_str(var) "]\n" \
163 " sub " RSEQ_ASM_TMP_REG32 ", " RSEQ_ASM_TMP_REG32 \
164 ", %w[" __rseq_str(expect) "]\n" \
165 " cbnz " RSEQ_ASM_TMP_REG32 ", " __rseq_str(label) "\n"
166
167 #define RSEQ_ASM_OP_CBEQ(var, expect, label) \
168 " ldr " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n" \
169 " sub " RSEQ_ASM_TMP_REG ", " RSEQ_ASM_TMP_REG \
170 ", %[" __rseq_str(expect) "]\n" \
171 " cbz " RSEQ_ASM_TMP_REG ", " __rseq_str(label) "\n"
172
173 #define RSEQ_ASM_CBNE_CPU_ID(cpu_id, current_cpu_id, label) \
174 RSEQ_INJECT_ASM(2) \
175 RSEQ_ASM_OP_CBNE32(current_cpu_id, cpu_id, label)
176
177 #define RSEQ_ASM_OP_R_LOAD(var) \
178 " ldr " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n"
179
180 #define RSEQ_ASM_OP_R_STORE(var) \
181 " str " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n"
182
183 #define RSEQ_ASM_OP_R_LOAD_OFF(offset) \
184 " ldr " RSEQ_ASM_TMP_REG ", [" RSEQ_ASM_TMP_REG \
185 ", %[" __rseq_str(offset) "]]\n"
186
187 #define RSEQ_ASM_OP_R_ADD(count) \
188 " add " RSEQ_ASM_TMP_REG ", " RSEQ_ASM_TMP_REG \
189 ", %[" __rseq_str(count) "]\n"
190
191 #define RSEQ_ASM_OP_R_FINAL_STORE(var, post_commit_label) \
192 " str " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n" \
193 __rseq_str(post_commit_label) ":\n"
194
195 #define RSEQ_ASM_OP_R_BAD_MEMCPY(dst, src, len) \
196 " cbz %[" __rseq_str(len) "], 333f\n" \
197 " mov " RSEQ_ASM_TMP_REG_2 ", %[" __rseq_str(len) "]\n" \
198 "222: sub " RSEQ_ASM_TMP_REG_2 ", " RSEQ_ASM_TMP_REG_2 ", #1\n" \
199 " ldrb " RSEQ_ASM_TMP_REG32 ", [%[" __rseq_str(src) "]" \
200 ", " RSEQ_ASM_TMP_REG_2 "]\n" \
201 " strb " RSEQ_ASM_TMP_REG32 ", [%[" __rseq_str(dst) "]" \
202 ", " RSEQ_ASM_TMP_REG_2 "]\n" \
203 " cbnz " RSEQ_ASM_TMP_REG_2 ", 222b\n" \
204 "333:\n"
205
206 /* Per-cpu-id indexing. */
207
208 #define RSEQ_TEMPLATE_CPU_ID
209 #define RSEQ_TEMPLATE_MO_RELAXED
210 #include "rseq-arm64-bits.h"
211 #undef RSEQ_TEMPLATE_MO_RELAXED
212
213 #define RSEQ_TEMPLATE_MO_RELEASE
214 #include "rseq-arm64-bits.h"
215 #undef RSEQ_TEMPLATE_MO_RELEASE
216 #undef RSEQ_TEMPLATE_CPU_ID
217
218 /* Per-mm-cid indexing. */
219
220 #define RSEQ_TEMPLATE_MM_CID
221 #define RSEQ_TEMPLATE_MO_RELAXED
222 #include "rseq-arm64-bits.h"
223 #undef RSEQ_TEMPLATE_MO_RELAXED
224
225 #define RSEQ_TEMPLATE_MO_RELEASE
226 #include "rseq-arm64-bits.h"
227 #undef RSEQ_TEMPLATE_MO_RELEASE
228 #undef RSEQ_TEMPLATE_MM_CID
229
230 /* APIs which are not based on cpu ids. */
231
232 #define RSEQ_TEMPLATE_CPU_ID_NONE
233 #define RSEQ_TEMPLATE_MO_RELAXED
234 #include "rseq-arm64-bits.h"
235 #undef RSEQ_TEMPLATE_MO_RELAXED
236 #undef RSEQ_TEMPLATE_CPU_ID_NONE
This page took 0.03487 seconds and 3 git commands to generate.