From b32429a9921115b0bd88c06480ca05b46f667a09 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Wed, 3 Oct 2018 17:04:18 -0400 Subject: [PATCH] Add cpu-opv helpers Signed-off-by: Mathieu Desnoyers --- Makefile | 7 +- include/rseq/cpu-op.h | 42 +++++ include/rseq/percpu-op.h | 146 ++++++++++++++++ src/cpu-op.c | 353 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 545 insertions(+), 3 deletions(-) create mode 100644 include/rseq/cpu-op.h create mode 100644 include/rseq/percpu-op.h create mode 100644 src/cpu-op.c diff --git a/Makefile b/Makefile index 876bd16..1a8e5b0 100644 --- a/Makefile +++ b/Makefile @@ -18,10 +18,11 @@ PREFIX = /usr/local all: librseq.so -INCLUDES=$(wildcard remote/*.h) +INCLUDES=$(wildcard include/rseq/*.h) -librseq.so: src/rseq.c ${INCLUDES} - $(CC) $(CFLAGS) $(LDFLAGS) $(CPPFLAGS) -shared -fpic src/rseq.c -o $@ +librseq.so: src/rseq.c src/cpu-op.c ${INCLUDES} + $(CC) $(CFLAGS) $(LDFLAGS) $(CPPFLAGS) -shared -fpic \ + src/rseq.c src/cpu-op.c -o $@ .PHONY: clean install uninstall diff --git a/include/rseq/cpu-op.h b/include/rseq/cpu-op.h new file mode 100644 index 0000000..0d3c1bf --- /dev/null +++ b/include/rseq/cpu-op.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: LGPL-2.1 OR MIT */ +/* + * cpu-op.h + * + * (C) Copyright 2017-2018 - Mathieu Desnoyers + */ + +#ifndef RSEQ_CPU_OP_H +#define RSEQ_CPU_OP_H + +#include +#include +#include + +int cpu_opv(struct cpu_op *cpuopv, int cpuopcnt, int cpu, int flags); +int cpu_op_get_current_cpu(void); + +int cpu_op_cmpxchg(void *v, void *expect, void *old, void *_new, size_t len, + int cpu); +int cpu_op_add(void *v, int64_t count, size_t len, int cpu); + +int cpu_op_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu); +int cpu_op_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, + off_t voffp, intptr_t *load, int cpu); +int cpu_op_cmpeqv_storev_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu); +int cpu_op_cmpeqv_storev_mb_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu); +int cpu_op_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t expect2, + intptr_t newv, int cpu); +int cpu_op_cmpeqv_memcpy_storev(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu); +int cpu_op_cmpeqv_memcpy_mb_storev(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu); +int cpu_op_addv(intptr_t *v, int64_t count, int cpu); + +#endif /* RSEQ_CPU_OP_H_ */ diff --git a/include/rseq/percpu-op.h b/include/rseq/percpu-op.h new file mode 100644 index 0000000..61df793 --- /dev/null +++ b/include/rseq/percpu-op.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: LGPL-2.1 OR MIT */ +/* + * percpu-op.h + * + * (C) Copyright 2017-2018 - Mathieu Desnoyers + */ + +#ifndef RSEQ_PERCPU_OP_H +#define RSEQ_PERCPU_OP_H + +#include +#include +#include +#include +#include +#include + +static inline __attribute__((always_inline)) +int percpu_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, + int cpu) +{ + int ret; + + ret = rseq_cmpeqv_storev(v, expect, newv, cpu); + if (rseq_unlikely(ret)) { + if (ret > 0) + return ret; + return cpu_op_cmpeqv_storev(v, expect, newv, cpu); + } + return 0; +} + +static inline __attribute__((always_inline)) +int percpu_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, + off_t voffp, intptr_t *load, int cpu) +{ + int ret; + + ret = rseq_cmpnev_storeoffp_load(v, expectnot, voffp, load, cpu); + if (rseq_unlikely(ret)) { + if (ret > 0) + return ret; + return cpu_op_cmpnev_storeoffp_load(v, expectnot, voffp, + load, cpu); + } + return 0; +} + +static inline __attribute__((always_inline)) +int percpu_addv(intptr_t *v, intptr_t count, int cpu) +{ + if (rseq_unlikely(rseq_addv(v, count, cpu))) + return cpu_op_addv(v, count, cpu); + return 0; +} + +static inline __attribute__((always_inline)) +int percpu_cmpeqv_storev_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + int ret; + + ret = rseq_cmpeqv_trystorev_storev(v, expect, v2, newv2, + newv, cpu); + if (rseq_unlikely(ret)) { + if (ret > 0) + return ret; + return cpu_op_cmpeqv_storev_storev(v, expect, v2, newv2, + newv, cpu); + } + return 0; +} + +static inline __attribute__((always_inline)) +int percpu_cmpeqv_storev_storev_release(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + int ret; + + ret = rseq_cmpeqv_trystorev_storev_release(v, expect, v2, newv2, + newv, cpu); + if (rseq_unlikely(ret)) { + if (ret > 0) + return ret; + return cpu_op_cmpeqv_storev_mb_storev(v, expect, v2, newv2, + newv, cpu); + } + return 0; +} + +static inline __attribute__((always_inline)) +int percpu_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t expect2, + intptr_t newv, int cpu) +{ + int ret; + + ret = rseq_cmpeqv_cmpeqv_storev(v, expect, v2, expect2, newv, cpu); + if (rseq_unlikely(ret)) { + if (ret > 0) + return ret; + return cpu_op_cmpeqv_cmpeqv_storev(v, expect, v2, expect2, + newv, cpu); + } + return 0; +} + +static inline __attribute__((always_inline)) +int percpu_cmpeqv_memcpy_storev(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + int ret; + + ret = rseq_cmpeqv_trymemcpy_storev(v, expect, dst, src, len, + newv, cpu); + if (rseq_unlikely(ret)) { + if (ret > 0) + return ret; + return cpu_op_cmpeqv_memcpy_storev(v, expect, dst, src, len, + newv, cpu); + } + return 0; +} + +static inline __attribute__((always_inline)) +int percpu_cmpeqv_memcpy_storev_release(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + int ret; + + ret = rseq_cmpeqv_trymemcpy_storev_release(v, expect, dst, src, len, + newv, cpu); + if (rseq_unlikely(ret)) { + if (ret > 0) + return ret; + return cpu_op_cmpeqv_memcpy_mb_storev(v, expect, dst, src, len, + newv, cpu); + } + return 0; +} + +#endif /* RSEQ_PERCPU_OP_H_ */ diff --git a/src/cpu-op.c b/src/cpu-op.c new file mode 100644 index 0000000..f02e0b9 --- /dev/null +++ b/src/cpu-op.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: LGPL-2.1 +/* + * cpu-op.c + * + * Copyright (C) 2017 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) + +#define ACCESS_ONCE(x) (*(__volatile__ __typeof__(x) *)&(x)) +#define WRITE_ONCE(x, v) __extension__ ({ ACCESS_ONCE(x) = (v); }) +#define READ_ONCE(x) ACCESS_ONCE(x) + +int cpu_opv(struct cpu_op *cpu_opv, int cpuopcnt, int cpu, int flags) +{ + return syscall(__NR_cpu_opv, cpu_opv, cpuopcnt, cpu, flags); +} + +int cpu_op_get_current_cpu(void) +{ + int cpu; + + cpu = sched_getcpu(); + if (cpu < 0) { + perror("sched_getcpu()"); + abort(); + } + return cpu; +} + +int cpu_op_cmpxchg(void *v, void *expect, void *old, void *n, size_t len, + int cpu) +{ + struct cpu_op opvec[] = { + [0] = { + .op = CPU_MEMCPY_OP, + .len = len, + .u.memcpy_op.dst = (unsigned long)old, + .u.memcpy_op.src = (unsigned long)v, + .u.memcpy_op.expect_fault_dst = 0, + .u.memcpy_op.expect_fault_src = 0, + }, + [1] = { + .op = CPU_COMPARE_EQ_OP, + .len = len, + .u.compare_op.a = (unsigned long)v, + .u.compare_op.b = (unsigned long)expect, + .u.compare_op.expect_fault_a = 0, + .u.compare_op.expect_fault_b = 0, + }, + [2] = { + .op = CPU_MEMCPY_OP, + .len = len, + .u.memcpy_op.dst = (unsigned long)v, + .u.memcpy_op.src = (unsigned long)n, + .u.memcpy_op.expect_fault_dst = 0, + .u.memcpy_op.expect_fault_src = 0, + }, + }; + + return cpu_opv(opvec, ARRAY_SIZE(opvec), cpu, 0); +} + +int cpu_op_add(void *v, int64_t count, size_t len, int cpu) +{ + struct cpu_op opvec[] = { + [0] = { + .op = CPU_ADD_OP, + .len = len, + .u.arithmetic_op.p = (unsigned long)v, + .u.arithmetic_op.count = count, + .u.arithmetic_op.expect_fault_p = 0, + }, + }; + + return cpu_opv(opvec, ARRAY_SIZE(opvec), cpu, 0); +} + +int cpu_op_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, + int cpu) +{ + struct cpu_op opvec[] = { + [0] = { + .op = CPU_COMPARE_EQ_OP, + .len = sizeof(intptr_t), + .u.compare_op.a = (unsigned long)v, + .u.compare_op.b = (unsigned long)&expect, + .u.compare_op.expect_fault_a = 0, + .u.compare_op.expect_fault_b = 0, + }, + [1] = { + .op = CPU_MEMCPY_OP, + .len = sizeof(intptr_t), + .u.memcpy_op.dst = (unsigned long)v, + .u.memcpy_op.src = (unsigned long)&newv, + .u.memcpy_op.expect_fault_dst = 0, + .u.memcpy_op.expect_fault_src = 0, + }, + }; + + return cpu_opv(opvec, ARRAY_SIZE(opvec), cpu, 0); +} + +static int cpu_op_cmpeqv_storep_expect_fault(intptr_t *v, intptr_t expect, + intptr_t *newp, int cpu) +{ + struct cpu_op opvec[] = { + [0] = { + .op = CPU_COMPARE_EQ_OP, + .len = sizeof(intptr_t), + .u.compare_op.a = (unsigned long)v, + .u.compare_op.b = (unsigned long)&expect, + .u.compare_op.expect_fault_a = 0, + .u.compare_op.expect_fault_b = 0, + }, + [1] = { + .op = CPU_MEMCPY_OP, + .len = sizeof(intptr_t), + .u.memcpy_op.dst = (unsigned long)v, + .u.memcpy_op.src = (unsigned long)newp, + .u.memcpy_op.expect_fault_dst = 0, + /* Return EAGAIN on src fault. */ + .u.memcpy_op.expect_fault_src = 1, + }, + }; + + return cpu_opv(opvec, ARRAY_SIZE(opvec), cpu, 0); +} + +int cpu_op_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, + off_t voffp, intptr_t *load, int cpu) +{ + int ret; + + do { + intptr_t oldv = READ_ONCE(*v); + intptr_t *newp = (intptr_t *)(oldv + voffp); + + if (oldv == expectnot) + return 1; + ret = cpu_op_cmpeqv_storep_expect_fault(v, oldv, newp, cpu); + if (!ret) { + *load = oldv; + return 0; + } + } while (ret > 0); + + return -1; +} + +int cpu_op_cmpeqv_storev_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + struct cpu_op opvec[] = { + [0] = { + .op = CPU_COMPARE_EQ_OP, + .len = sizeof(intptr_t), + .u.compare_op.a = (unsigned long)v, + .u.compare_op.b = (unsigned long)&expect, + .u.compare_op.expect_fault_a = 0, + .u.compare_op.expect_fault_b = 0, + }, + [1] = { + .op = CPU_MEMCPY_OP, + .len = sizeof(intptr_t), + .u.memcpy_op.dst = (unsigned long)v2, + .u.memcpy_op.src = (unsigned long)&newv2, + .u.memcpy_op.expect_fault_dst = 0, + .u.memcpy_op.expect_fault_src = 0, + }, + [2] = { + .op = CPU_MEMCPY_OP, + .len = sizeof(intptr_t), + .u.memcpy_op.dst = (unsigned long)v, + .u.memcpy_op.src = (unsigned long)&newv, + .u.memcpy_op.expect_fault_dst = 0, + .u.memcpy_op.expect_fault_src = 0, + }, + }; + + return cpu_opv(opvec, ARRAY_SIZE(opvec), cpu, 0); +} + +int cpu_op_cmpeqv_storev_mb_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + struct cpu_op opvec[] = { + [0] = { + .op = CPU_COMPARE_EQ_OP, + .len = sizeof(intptr_t), + .u.compare_op.a = (unsigned long)v, + .u.compare_op.b = (unsigned long)&expect, + .u.compare_op.expect_fault_a = 0, + .u.compare_op.expect_fault_b = 0, + }, + [1] = { + .op = CPU_MEMCPY_OP, + .len = sizeof(intptr_t), + .u.memcpy_op.dst = (unsigned long)v2, + .u.memcpy_op.src = (unsigned long)&newv2, + .u.memcpy_op.expect_fault_dst = 0, + .u.memcpy_op.expect_fault_src = 0, + }, + [2] = { + .op = CPU_MB_OP, + }, + [3] = { + .op = CPU_MEMCPY_OP, + .len = sizeof(intptr_t), + .u.memcpy_op.dst = (unsigned long)v, + .u.memcpy_op.src = (unsigned long)&newv, + .u.memcpy_op.expect_fault_dst = 0, + .u.memcpy_op.expect_fault_src = 0, + }, + }; + + return cpu_opv(opvec, ARRAY_SIZE(opvec), cpu, 0); +} + +int cpu_op_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t expect2, + intptr_t newv, int cpu) +{ + struct cpu_op opvec[] = { + [0] = { + .op = CPU_COMPARE_EQ_OP, + .len = sizeof(intptr_t), + .u.compare_op.a = (unsigned long)v, + .u.compare_op.b = (unsigned long)&expect, + .u.compare_op.expect_fault_a = 0, + .u.compare_op.expect_fault_b = 0, + }, + [1] = { + .op = CPU_COMPARE_EQ_OP, + .len = sizeof(intptr_t), + .u.compare_op.a = (unsigned long)v2, + .u.compare_op.b = (unsigned long)&expect2, + .u.compare_op.expect_fault_a = 0, + .u.compare_op.expect_fault_b = 0, + }, + [2] = { + .op = CPU_MEMCPY_OP, + .len = sizeof(intptr_t), + .u.memcpy_op.dst = (unsigned long)v, + .u.memcpy_op.src = (unsigned long)&newv, + .u.memcpy_op.expect_fault_dst = 0, + .u.memcpy_op.expect_fault_src = 0, + }, + }; + + return cpu_opv(opvec, ARRAY_SIZE(opvec), cpu, 0); +} + +int cpu_op_cmpeqv_memcpy_storev(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + struct cpu_op opvec[] = { + [0] = { + .op = CPU_COMPARE_EQ_OP, + .len = sizeof(intptr_t), + .u.compare_op.a = (unsigned long)v, + .u.compare_op.b = (unsigned long)&expect, + .u.compare_op.expect_fault_a = 0, + .u.compare_op.expect_fault_b = 0, + }, + [1] = { + .op = CPU_MEMCPY_OP, + .len = len, + .u.memcpy_op.dst = (unsigned long)dst, + .u.memcpy_op.src = (unsigned long)src, + .u.memcpy_op.expect_fault_dst = 0, + .u.memcpy_op.expect_fault_src = 0, + }, + [2] = { + .op = CPU_MEMCPY_OP, + .len = sizeof(intptr_t), + .u.memcpy_op.dst = (unsigned long)v, + .u.memcpy_op.src = (unsigned long)&newv, + .u.memcpy_op.expect_fault_dst = 0, + .u.memcpy_op.expect_fault_src = 0, + }, + }; + + return cpu_opv(opvec, ARRAY_SIZE(opvec), cpu, 0); +} + +int cpu_op_cmpeqv_memcpy_mb_storev(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + struct cpu_op opvec[] = { + [0] = { + .op = CPU_COMPARE_EQ_OP, + .len = sizeof(intptr_t), + .u.compare_op.a = (unsigned long)v, + .u.compare_op.b = (unsigned long)&expect, + .u.compare_op.expect_fault_a = 0, + .u.compare_op.expect_fault_b = 0, + }, + [1] = { + .op = CPU_MEMCPY_OP, + .len = len, + .u.memcpy_op.dst = (unsigned long)dst, + .u.memcpy_op.src = (unsigned long)src, + .u.memcpy_op.expect_fault_dst = 0, + .u.memcpy_op.expect_fault_src = 0, + }, + [2] = { + .op = CPU_MB_OP, + }, + [3] = { + .op = CPU_MEMCPY_OP, + .len = sizeof(intptr_t), + .u.memcpy_op.dst = (unsigned long)v, + .u.memcpy_op.src = (unsigned long)&newv, + .u.memcpy_op.expect_fault_dst = 0, + .u.memcpy_op.expect_fault_src = 0, + }, + }; + + return cpu_opv(opvec, ARRAY_SIZE(opvec), cpu, 0); +} + +int cpu_op_addv(intptr_t *v, int64_t count, int cpu) +{ + return cpu_op_add(v, count, sizeof(intptr_t), cpu); +} -- 2.34.1