1 /* SPDX-License-Identifier: LGPL-2.1 OR MIT */
5 * (C) Copyright 2017-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 #ifndef RSEQ_PERCPU_OP_H
9 #define RSEQ_PERCPU_OP_H
15 #include <rseq/rseq.h>
16 #include <rseq/cpu-op.h>
18 static inline uint32_t percpu_current_cpu(void)
20 return rseq_current_cpu();
23 static inline __attribute__((always_inline
))
24 int percpu_fence(int cpu
)
28 if ((uint32_t) cpu
== percpu_current_cpu())
30 return cpu_op_fence(cpu
);
33 static inline __attribute__((always_inline
))
34 int percpu_cmpeqv_storev(intptr_t *v
, intptr_t expect
, intptr_t newv
,
39 ret
= rseq_cmpeqv_storev(v
, expect
, newv
, cpu
);
40 if (rseq_unlikely(ret
)) {
43 return cpu_op_cmpeqv_storev(v
, expect
, newv
, cpu
);
48 static inline __attribute__((always_inline
))
49 int percpu_cmpnev_storeoffp_load(intptr_t *v
, intptr_t expectnot
,
50 off_t voffp
, intptr_t *load
, int cpu
)
54 ret
= rseq_cmpnev_storeoffp_load(v
, expectnot
, voffp
, load
, cpu
);
55 if (rseq_unlikely(ret
)) {
58 return cpu_op_cmpnev_storeoffp_load(v
, expectnot
, voffp
,
64 static inline __attribute__((always_inline
))
65 int percpu_addv(intptr_t *v
, intptr_t count
, int cpu
)
67 if (rseq_unlikely(rseq_addv(v
, count
, cpu
)))
68 return cpu_op_addv(v
, count
, cpu
);
72 static inline __attribute__((always_inline
))
73 int percpu_cmpeqv_storev_storev(intptr_t *v
, intptr_t expect
,
74 intptr_t *v2
, intptr_t newv2
,
75 intptr_t newv
, int cpu
)
79 ret
= rseq_cmpeqv_trystorev_storev(v
, expect
, v2
, newv2
,
81 if (rseq_unlikely(ret
)) {
84 return cpu_op_cmpeqv_storev_storev(v
, expect
, v2
, newv2
,
90 static inline __attribute__((always_inline
))
91 int percpu_cmpeqv_storev_storev_release(intptr_t *v
, intptr_t expect
,
92 intptr_t *v2
, intptr_t newv2
,
93 intptr_t newv
, int cpu
)
97 ret
= rseq_cmpeqv_trystorev_storev_release(v
, expect
, v2
, newv2
,
99 if (rseq_unlikely(ret
)) {
102 return cpu_op_cmpeqv_storev_storev_release(v
, expect
, v2
, newv2
,
108 static inline __attribute__((always_inline
))
109 int percpu_cmpeqv_cmpeqv_storev(intptr_t *v
, intptr_t expect
,
110 intptr_t *v2
, intptr_t expect2
,
111 intptr_t newv
, int cpu
)
115 ret
= rseq_cmpeqv_cmpeqv_storev(v
, expect
, v2
, expect2
, newv
, cpu
);
116 if (rseq_unlikely(ret
)) {
119 return cpu_op_cmpeqv_cmpeqv_storev(v
, expect
, v2
, expect2
,
125 static inline __attribute__((always_inline
))
126 int percpu_cmpeqv_memcpy_storev(intptr_t *v
, intptr_t expect
,
127 void *dst
, void *src
, size_t len
,
128 intptr_t newv
, int cpu
)
132 ret
= rseq_cmpeqv_trymemcpy_storev(v
, expect
, dst
, src
, len
,
134 if (rseq_unlikely(ret
)) {
137 return cpu_op_cmpeqv_memcpy_storev(v
, expect
, dst
, src
, len
,
143 static inline __attribute__((always_inline
))
144 int percpu_cmpeqv_memcpy_storev_release(intptr_t *v
, intptr_t expect
,
145 void *dst
, void *src
, size_t len
,
146 intptr_t newv
, int cpu
)
150 ret
= rseq_cmpeqv_trymemcpy_storev_release(v
, expect
, dst
, src
, len
,
152 if (rseq_unlikely(ret
)) {
155 return cpu_op_cmpeqv_memcpy_storev_release(v
, expect
, dst
, src
,
161 static inline __attribute__((always_inline
))
162 int percpu_deref_loadoffp(intptr_t *p
, off_t voffp
, intptr_t *load
, int cpu
)
166 ret
= rseq_deref_loadoffp(p
, voffp
, load
, cpu
);
167 if (rseq_unlikely(ret
)) {
170 return cpu_op_deref_loadoffp(p
, voffp
, load
, cpu
);
175 #endif /* RSEQ_PERCPU_OP_H_ */
This page took 0.038008 seconds and 4 git commands to generate.