Implement percpu_fence()
[librseq.git] / include / rseq / percpu-op.h
1 /* SPDX-License-Identifier: LGPL-2.1 OR MIT */
2 /*
3 * percpu-op.h
4 *
5 * (C) Copyright 2017-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 */
7
8 #ifndef RSEQ_PERCPU_OP_H
9 #define RSEQ_PERCPU_OP_H
10
11 #include <stdint.h>
12 #include <stdbool.h>
13 #include <errno.h>
14 #include <stdlib.h>
15 #include <rseq/rseq.h>
16 #include <rseq/cpu-op.h>
17
18 static inline uint32_t percpu_current_cpu(void)
19 {
20 return rseq_current_cpu();
21 }
22
23 static inline __attribute__((always_inline))
24 int percpu_fence(int cpu)
25 {
26 if (cpu < 0)
27 return -1;
28 if ((uint32_t) cpu == percpu_current_cpu())
29 return 0;
30 return cpu_op_fence(cpu);
31 }
32
33 static inline __attribute__((always_inline))
34 int percpu_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv,
35 int cpu)
36 {
37 int ret;
38
39 ret = rseq_cmpeqv_storev(v, expect, newv, cpu);
40 if (rseq_unlikely(ret)) {
41 if (ret > 0)
42 return ret;
43 return cpu_op_cmpeqv_storev(v, expect, newv, cpu);
44 }
45 return 0;
46 }
47
48 static inline __attribute__((always_inline))
49 int percpu_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
50 off_t voffp, intptr_t *load, int cpu)
51 {
52 int ret;
53
54 ret = rseq_cmpnev_storeoffp_load(v, expectnot, voffp, load, cpu);
55 if (rseq_unlikely(ret)) {
56 if (ret > 0)
57 return ret;
58 return cpu_op_cmpnev_storeoffp_load(v, expectnot, voffp,
59 load, cpu);
60 }
61 return 0;
62 }
63
64 static inline __attribute__((always_inline))
65 int percpu_addv(intptr_t *v, intptr_t count, int cpu)
66 {
67 if (rseq_unlikely(rseq_addv(v, count, cpu)))
68 return cpu_op_addv(v, count, cpu);
69 return 0;
70 }
71
72 static inline __attribute__((always_inline))
73 int percpu_cmpeqv_storev_storev(intptr_t *v, intptr_t expect,
74 intptr_t *v2, intptr_t newv2,
75 intptr_t newv, int cpu)
76 {
77 int ret;
78
79 ret = rseq_cmpeqv_trystorev_storev(v, expect, v2, newv2,
80 newv, cpu);
81 if (rseq_unlikely(ret)) {
82 if (ret > 0)
83 return ret;
84 return cpu_op_cmpeqv_storev_storev(v, expect, v2, newv2,
85 newv, cpu);
86 }
87 return 0;
88 }
89
90 static inline __attribute__((always_inline))
91 int percpu_cmpeqv_storev_storev_release(intptr_t *v, intptr_t expect,
92 intptr_t *v2, intptr_t newv2,
93 intptr_t newv, int cpu)
94 {
95 int ret;
96
97 ret = rseq_cmpeqv_trystorev_storev_release(v, expect, v2, newv2,
98 newv, cpu);
99 if (rseq_unlikely(ret)) {
100 if (ret > 0)
101 return ret;
102 return cpu_op_cmpeqv_storev_storev_release(v, expect, v2, newv2,
103 newv, cpu);
104 }
105 return 0;
106 }
107
108 static inline __attribute__((always_inline))
109 int percpu_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
110 intptr_t *v2, intptr_t expect2,
111 intptr_t newv, int cpu)
112 {
113 int ret;
114
115 ret = rseq_cmpeqv_cmpeqv_storev(v, expect, v2, expect2, newv, cpu);
116 if (rseq_unlikely(ret)) {
117 if (ret > 0)
118 return ret;
119 return cpu_op_cmpeqv_cmpeqv_storev(v, expect, v2, expect2,
120 newv, cpu);
121 }
122 return 0;
123 }
124
125 static inline __attribute__((always_inline))
126 int percpu_cmpeqv_memcpy_storev(intptr_t *v, intptr_t expect,
127 void *dst, void *src, size_t len,
128 intptr_t newv, int cpu)
129 {
130 int ret;
131
132 ret = rseq_cmpeqv_trymemcpy_storev(v, expect, dst, src, len,
133 newv, cpu);
134 if (rseq_unlikely(ret)) {
135 if (ret > 0)
136 return ret;
137 return cpu_op_cmpeqv_memcpy_storev(v, expect, dst, src, len,
138 newv, cpu);
139 }
140 return 0;
141 }
142
143 static inline __attribute__((always_inline))
144 int percpu_cmpeqv_memcpy_storev_release(intptr_t *v, intptr_t expect,
145 void *dst, void *src, size_t len,
146 intptr_t newv, int cpu)
147 {
148 int ret;
149
150 ret = rseq_cmpeqv_trymemcpy_storev_release(v, expect, dst, src, len,
151 newv, cpu);
152 if (rseq_unlikely(ret)) {
153 if (ret > 0)
154 return ret;
155 return cpu_op_cmpeqv_memcpy_storev_release(v, expect, dst, src,
156 len, newv, cpu);
157 }
158 return 0;
159 }
160
161 static inline __attribute__((always_inline))
162 int percpu_deref_loadoffp(intptr_t *p, off_t voffp, intptr_t *load, int cpu)
163 {
164 int ret;
165
166 ret = rseq_deref_loadoffp(p, voffp, load, cpu);
167 if (rseq_unlikely(ret)) {
168 if (ret > 0)
169 return ret;
170 return cpu_op_deref_loadoffp(p, voffp, load, cpu);
171 }
172 return 0;
173 }
174
175 #endif /* RSEQ_PERCPU_OP_H_ */
This page took 0.038008 seconds and 4 git commands to generate.