Move to do_on_cpu system call
[librseq.git] / include / rseq / percpu-op.h
CommitLineData
de28c254 1/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
b32429a9
MD
2/*
3 * percpu-op.h
4 *
5 * (C) Copyright 2017-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 */
7
8#ifndef RSEQ_PERCPU_OP_H
9#define RSEQ_PERCPU_OP_H
10
11#include <stdint.h>
12#include <stdbool.h>
13#include <errno.h>
14#include <stdlib.h>
15#include <rseq/rseq.h>
16#include <rseq/cpu-op.h>
17
0cfe92f9
MD
18static inline uint32_t percpu_current_cpu(void)
19{
20 return rseq_current_cpu();
21}
22
b32429a9
MD
23static inline __attribute__((always_inline))
24int percpu_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv,
25 int cpu)
26{
27 int ret;
28
29 ret = rseq_cmpeqv_storev(v, expect, newv, cpu);
30 if (rseq_unlikely(ret)) {
31 if (ret > 0)
32 return ret;
33 return cpu_op_cmpeqv_storev(v, expect, newv, cpu);
34 }
35 return 0;
36}
37
38static inline __attribute__((always_inline))
39int percpu_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
40 off_t voffp, intptr_t *load, int cpu)
41{
42 int ret;
43
44 ret = rseq_cmpnev_storeoffp_load(v, expectnot, voffp, load, cpu);
45 if (rseq_unlikely(ret)) {
46 if (ret > 0)
47 return ret;
48 return cpu_op_cmpnev_storeoffp_load(v, expectnot, voffp,
49 load, cpu);
50 }
51 return 0;
52}
53
54static inline __attribute__((always_inline))
55int percpu_addv(intptr_t *v, intptr_t count, int cpu)
56{
57 if (rseq_unlikely(rseq_addv(v, count, cpu)))
58 return cpu_op_addv(v, count, cpu);
59 return 0;
60}
61
62static inline __attribute__((always_inline))
63int percpu_cmpeqv_storev_storev(intptr_t *v, intptr_t expect,
64 intptr_t *v2, intptr_t newv2,
65 intptr_t newv, int cpu)
66{
67 int ret;
68
69 ret = rseq_cmpeqv_trystorev_storev(v, expect, v2, newv2,
70 newv, cpu);
71 if (rseq_unlikely(ret)) {
72 if (ret > 0)
73 return ret;
74 return cpu_op_cmpeqv_storev_storev(v, expect, v2, newv2,
75 newv, cpu);
76 }
77 return 0;
78}
79
80static inline __attribute__((always_inline))
81int percpu_cmpeqv_storev_storev_release(intptr_t *v, intptr_t expect,
82 intptr_t *v2, intptr_t newv2,
83 intptr_t newv, int cpu)
84{
85 int ret;
86
87 ret = rseq_cmpeqv_trystorev_storev_release(v, expect, v2, newv2,
88 newv, cpu);
89 if (rseq_unlikely(ret)) {
90 if (ret > 0)
91 return ret;
d15728d5
MD
92 return cpu_op_cmpeqv_storev_storev_release(v, expect, v2, newv2,
93 newv, cpu);
b32429a9
MD
94 }
95 return 0;
96}
97
98static inline __attribute__((always_inline))
99int percpu_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
100 intptr_t *v2, intptr_t expect2,
101 intptr_t newv, int cpu)
102{
103 int ret;
104
105 ret = rseq_cmpeqv_cmpeqv_storev(v, expect, v2, expect2, newv, cpu);
106 if (rseq_unlikely(ret)) {
107 if (ret > 0)
108 return ret;
109 return cpu_op_cmpeqv_cmpeqv_storev(v, expect, v2, expect2,
110 newv, cpu);
111 }
112 return 0;
113}
114
115static inline __attribute__((always_inline))
116int percpu_cmpeqv_memcpy_storev(intptr_t *v, intptr_t expect,
117 void *dst, void *src, size_t len,
118 intptr_t newv, int cpu)
119{
120 int ret;
121
122 ret = rseq_cmpeqv_trymemcpy_storev(v, expect, dst, src, len,
123 newv, cpu);
124 if (rseq_unlikely(ret)) {
125 if (ret > 0)
126 return ret;
127 return cpu_op_cmpeqv_memcpy_storev(v, expect, dst, src, len,
128 newv, cpu);
129 }
130 return 0;
131}
132
133static inline __attribute__((always_inline))
134int percpu_cmpeqv_memcpy_storev_release(intptr_t *v, intptr_t expect,
135 void *dst, void *src, size_t len,
136 intptr_t newv, int cpu)
137{
138 int ret;
139
140 ret = rseq_cmpeqv_trymemcpy_storev_release(v, expect, dst, src, len,
141 newv, cpu);
142 if (rseq_unlikely(ret)) {
143 if (ret > 0)
144 return ret;
d15728d5
MD
145 return cpu_op_cmpeqv_memcpy_storev_release(v, expect, dst, src,
146 len, newv, cpu);
b32429a9
MD
147 }
148 return 0;
149}
150
de28c254
MD
151static inline __attribute__((always_inline))
152int percpu_deref_loadoffp(intptr_t *p, off_t voffp, intptr_t *load, int cpu)
153{
154 int ret;
155
156 ret = rseq_deref_loadoffp(p, voffp, load, cpu);
157 if (rseq_unlikely(ret)) {
158 if (ret > 0)
159 return ret;
160 return cpu_op_deref_loadoffp(p, voffp, load, cpu);
161 }
162 return 0;
163}
164
b32429a9 165#endif /* RSEQ_PERCPU_OP_H_ */
This page took 0.028101 seconds and 4 git commands to generate.