Introduce common generic header file
[librseq.git] / include / rseq / arch / riscv.h
CommitLineData
90702366 1/* SPDX-License-Identifier: MIT */
f2d7b530 2/* SPDX-FileCopyrightText: 2022 Vincent Chen <vincent.chen@sifive.com> */
c4f3965f
MD
3/* SPDX-FileCopyrightText: 2024 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */
4
5/*
6 * rseq-riscv.h
7 */
8
9/*
10 * RSEQ_ASM_*() macro helpers are internal to the librseq headers. Those
11 * are not part of the public API.
12 */
f2d7b530 13
44ec21eb
MJ
14#ifndef _RSEQ_RSEQ_H
15#error "Never use <rseq/arch/riscv.h> directly; include <rseq/rseq.h> instead."
16#endif
17
074b1077
MJ
18/*
19 * Select the instruction "csrw mhartid, x0" as the RSEQ_SIG. Unlike
20 * other architectures, the ebreak instruction has no immediate field for
21 * distinguishing purposes. Hence, ebreak is not suitable as RSEQ_SIG.
22 * "csrw mhartid, x0" can also satisfy the RSEQ requirement because it
23 * is an uncommon instruction and will raise an illegal instruction
24 * exception when executed in all modes.
25 */
26#include <endian.h>
27
28#if defined(__BYTE_ORDER) ? (__BYTE_ORDER == __LITTLE_ENDIAN) : defined(__LITTLE_ENDIAN)
29#define RSEQ_SIG 0xf1401073 /* csrr mhartid, x0 */
30#else
31#error "Currently, RSEQ only supports Little-Endian version"
32#endif
33
c4f3965f
MD
34/*
35 * Instruction selection between 32-bit/64-bit. Used internally in the
36 * rseq headers.
37 */
074b1077 38#if __riscv_xlen == 64
ad5902d4 39#define __RSEQ_ASM_REG_SEL(a, b) a
074b1077 40#elif __riscv_xlen == 32
ad5902d4 41#define __RSEQ_ASM_REG_SEL(a, b) b
074b1077
MJ
42#endif
43
ad5902d4
MD
44#define RSEQ_ASM_REG_L __RSEQ_ASM_REG_SEL("ld ", "lw ")
45#define RSEQ_ASM_REG_S __RSEQ_ASM_REG_SEL("sd ", "sw ")
074b1077 46
c4f3965f
MD
47/*
48 * Refer to the Linux kernel memory model (LKMM) for documentation of
49 * the memory barriers.
50 */
51
52/* Only used internally in rseq headers. */
ad5902d4 53#define RSEQ_ASM_RISCV_FENCE(p, s) \
074b1077 54 __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
c4f3965f 55/* CPU memory barrier. */
ad5902d4 56#define rseq_smp_mb() RSEQ_ASM_RISCV_FENCE(rw, rw)
c4f3965f 57/* CPU read memory barrier */
ad5902d4 58#define rseq_smp_rmb() RSEQ_ASM_RISCV_FENCE(r, r)
c4f3965f 59/* CPU write memory barrier */
ad5902d4 60#define rseq_smp_wmb() RSEQ_ASM_RISCV_FENCE(w, w)
074b1077 61
c4f3965f 62/* Acquire: One-way permeable barrier. */
074b1077
MJ
63#define rseq_smp_load_acquire(p) \
64__extension__ ({ \
3664a718 65 rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
ad5902d4 66 RSEQ_ASM_RISCV_FENCE(r, rw); \
074b1077
MJ
67 ____p1; \
68})
69
c4f3965f 70/* Acquire barrier after control dependency. */
074b1077
MJ
71#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb()
72
c4f3965f 73/* Release: One-way permeable barrier. */
074b1077
MJ
74#define rseq_smp_store_release(p, v) \
75do { \
ad5902d4 76 RSEQ_ASM_RISCV_FENCE(rw, w); \
826417f6 77 RSEQ_WRITE_ONCE(*(p), v); \
074b1077
MJ
78} while (0)
79
9dc22f84
MD
80#define RSEQ_ASM_U64_PTR(x) ".quad " x
81#define RSEQ_ASM_U32(x) ".long " x
82
c4f3965f
MD
83/* Temporary registers. */
84#define RSEQ_ASM_TMP_REG_1 "t6"
85#define RSEQ_ASM_TMP_REG_2 "t5"
86#define RSEQ_ASM_TMP_REG_3 "t4"
87#define RSEQ_ASM_TMP_REG_4 "t3"
88
ed21bf6d
MD
89/* Common architecture support macros. */
90#include "rseq/arch/generic/common.h"
074b1077 91
c4f3965f
MD
92/*
93 * Define a critical section abort handler.
94 *
95 * @label:
96 * Local label to the abort handler.
97 * @teardown:
98 * Sequence of instructions to run on abort.
99 * @abort_label:
100 * C label to jump to at the end of the sequence.
101 */
ebd27573 102#define RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label) \
074b1077
MJ
103 "j 222f\n" \
104 ".balign 4\n" \
9dc22f84 105 RSEQ_ASM_U32(__rseq_str(RSEQ_SIG)) "\n" \
074b1077 106 __rseq_str(label) ":\n" \
ebd27573 107 teardown \
074b1077
MJ
108 "j %l[" __rseq_str(abort_label) "]\n" \
109 "222:\n"
110
c4f3965f
MD
111/* Jump to local label @label when @cpu_id != @current_cpu_id. */
112#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
113 RSEQ_INJECT_ASM(1) \
114 "la " RSEQ_ASM_TMP_REG_1 ", " __rseq_str(cs_label) "\n" \
115 RSEQ_ASM_REG_S RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(rseq_cs) "]\n" \
116 __rseq_str(label) ":\n"
117
118/* Store @value to address @var. */
074b1077 119#define RSEQ_ASM_OP_STORE(value, var) \
ad5902d4 120 RSEQ_ASM_REG_S "%[" __rseq_str(value) "], %[" __rseq_str(var) "]\n"
074b1077 121
c4f3965f 122/* Jump to local label @label when @var != @expect. */
769ec9a5 123#define RSEQ_ASM_OP_CBNE(var, expect, label) \
ad5902d4 124 RSEQ_ASM_REG_L RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n" \
074b1077
MJ
125 "bne " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(expect) "] ," \
126 __rseq_str(label) "\n"
127
c4f3965f
MD
128/*
129 * Jump to local label @label when @var != @expect (32-bit register
130 * comparison).
131 */
769ec9a5 132#define RSEQ_ASM_OP_CBNE32(var, expect, label) \
074b1077
MJ
133 "lw " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n" \
134 "bne " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(expect) "] ," \
135 __rseq_str(label) "\n"
136
c4f3965f 137/* Jump to local label @label when @var == @expect. */
769ec9a5 138#define RSEQ_ASM_OP_CBEQ(var, expect, label) \
ad5902d4 139 RSEQ_ASM_REG_L RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n" \
074b1077
MJ
140 "beq " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(expect) "] ," \
141 __rseq_str(label) "\n"
142
c4f3965f 143/* Jump to local label @label when @cpu_id != @current_cpu_id. */
769ec9a5 144#define RSEQ_ASM_CBNE_CPU_ID(cpu_id, current_cpu_id, label) \
074b1077 145 RSEQ_INJECT_ASM(2) \
769ec9a5 146 RSEQ_ASM_OP_CBNE32(current_cpu_id, cpu_id, label)
074b1077 147
c4f3965f 148/* Load @var into temporary register. */
074b1077 149#define RSEQ_ASM_OP_R_LOAD(var) \
ad5902d4 150 RSEQ_ASM_REG_L RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n"
074b1077 151
c4f3965f 152/* Store from temporary register into @var. */
074b1077 153#define RSEQ_ASM_OP_R_STORE(var) \
ad5902d4 154 RSEQ_ASM_REG_S RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n"
074b1077 155
c4f3965f 156/* Load from address in temporary register+@offset into temporary register. */
074b1077
MJ
157#define RSEQ_ASM_OP_R_LOAD_OFF(offset) \
158 "add " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(offset) "], " \
159 RSEQ_ASM_TMP_REG_1 "\n" \
ad5902d4 160 RSEQ_ASM_REG_L RSEQ_ASM_TMP_REG_1 ", (" RSEQ_ASM_TMP_REG_1 ")\n"
074b1077 161
c4f3965f 162/* Add @count to temporary register. */
074b1077
MJ
163#define RSEQ_ASM_OP_R_ADD(count) \
164 "add " RSEQ_ASM_TMP_REG_1 ", " RSEQ_ASM_TMP_REG_1 \
165 ", %[" __rseq_str(count) "]\n"
166
c4f3965f
MD
167/*
168 * End-of-sequence store of @value to address @var. Emit
169 * @post_commit_label label after the store instruction.
170 */
074b1077
MJ
171#define RSEQ_ASM_OP_FINAL_STORE(value, var, post_commit_label) \
172 RSEQ_ASM_OP_STORE(value, var) \
173 __rseq_str(post_commit_label) ":\n"
174
c4f3965f
MD
175/*
176 * End-of-sequence store-release of @value to address @var. Emit
177 * @post_commit_label label after the store instruction.
178 */
074b1077
MJ
179#define RSEQ_ASM_OP_FINAL_STORE_RELEASE(value, var, post_commit_label) \
180 "fence rw, w\n" \
181 RSEQ_ASM_OP_STORE(value, var) \
182 __rseq_str(post_commit_label) ":\n"
183
c4f3965f
MD
184/*
185 * End-of-sequence store of temporary register to address @var. Emit
186 * @post_commit_label label after the store instruction.
187 */
074b1077 188#define RSEQ_ASM_OP_R_FINAL_STORE(var, post_commit_label) \
ad5902d4 189 RSEQ_ASM_REG_S RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n" \
074b1077
MJ
190 __rseq_str(post_commit_label) ":\n"
191
c4f3965f
MD
192/*
193 * Copy @len bytes from @src to @dst. This is an inefficient bytewise
194 * copy and could be improved in the future.
195 */
28880e72 196#define RSEQ_ASM_OP_R_BYTEWISE_MEMCPY(dst, src, len) \
074b1077
MJ
197 "beqz %[" __rseq_str(len) "], 333f\n" \
198 "mv " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(len) "]\n" \
199 "mv " RSEQ_ASM_TMP_REG_2 ", %[" __rseq_str(src) "]\n" \
200 "mv " RSEQ_ASM_TMP_REG_3 ", %[" __rseq_str(dst) "]\n" \
201 "222:\n" \
202 "lb " RSEQ_ASM_TMP_REG_4 ", 0(" RSEQ_ASM_TMP_REG_2 ")\n" \
203 "sb " RSEQ_ASM_TMP_REG_4 ", 0(" RSEQ_ASM_TMP_REG_3 ")\n" \
204 "addi " RSEQ_ASM_TMP_REG_1 ", " RSEQ_ASM_TMP_REG_1 ", -1\n" \
205 "addi " RSEQ_ASM_TMP_REG_2 ", " RSEQ_ASM_TMP_REG_2 ", 1\n" \
206 "addi " RSEQ_ASM_TMP_REG_3 ", " RSEQ_ASM_TMP_REG_3 ", 1\n" \
207 "bnez " RSEQ_ASM_TMP_REG_1 ", 222b\n" \
208 "333:\n"
209
c4f3965f
MD
210/*
211 * Load pointer address from @ptr. Add @off to offset from this pointer.
212 * Add @inc to the resulting address as an end-of-sequence store.
213 */
bbf6c31a 214#define RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, inc, post_commit_label) \
074b1077
MJ
215 "mv " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(ptr) "]\n" \
216 RSEQ_ASM_OP_R_ADD(off) \
ad5902d4 217 RSEQ_ASM_REG_L RSEQ_ASM_TMP_REG_1 ", 0(" RSEQ_ASM_TMP_REG_1 ")\n" \
074b1077
MJ
218 RSEQ_ASM_OP_R_ADD(inc) \
219 __rseq_str(post_commit_label) ":\n"
220
154b6bde 221/* Per-cpu-id indexing. */
074b1077 222
abf9e855 223#define RSEQ_TEMPLATE_INDEX_CPU_ID
154b6bde 224#define RSEQ_TEMPLATE_MO_RELAXED
44ec21eb 225#include "rseq/arch/riscv/bits.h"
154b6bde 226#undef RSEQ_TEMPLATE_MO_RELAXED
074b1077 227
154b6bde 228#define RSEQ_TEMPLATE_MO_RELEASE
44ec21eb 229#include "rseq/arch/riscv/bits.h"
154b6bde 230#undef RSEQ_TEMPLATE_MO_RELEASE
abf9e855 231#undef RSEQ_TEMPLATE_INDEX_CPU_ID
074b1077 232
154b6bde 233/* Per-mm-cid indexing. */
074b1077 234
abf9e855 235#define RSEQ_TEMPLATE_INDEX_MM_CID
154b6bde 236#define RSEQ_TEMPLATE_MO_RELAXED
44ec21eb 237#include "rseq/arch/riscv/bits.h"
154b6bde
MD
238#undef RSEQ_TEMPLATE_MO_RELAXED
239
240#define RSEQ_TEMPLATE_MO_RELEASE
44ec21eb 241#include "rseq/arch/riscv/bits.h"
154b6bde 242#undef RSEQ_TEMPLATE_MO_RELEASE
abf9e855 243#undef RSEQ_TEMPLATE_INDEX_MM_CID
154b6bde 244
abf9e855 245/* APIs which are not indexed. */
154b6bde 246
abf9e855 247#define RSEQ_TEMPLATE_INDEX_NONE
154b6bde 248#define RSEQ_TEMPLATE_MO_RELAXED
44ec21eb 249#include "rseq/arch/riscv/bits.h"
154b6bde 250#undef RSEQ_TEMPLATE_MO_RELAXED
abf9e855 251#undef RSEQ_TEMPLATE_INDEX_NONE
This page took 0.035652 seconds and 4 git commands to generate.