Merge tag 'armsoc-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[deliverable/linux.git] / tools / virtio / ringtest / main.h
CommitLineData
481eaec3
MT
1/*
2 * Copyright (C) 2016 Red Hat, Inc.
3 * Author: Michael S. Tsirkin <mst@redhat.com>
4 * This work is licensed under the terms of the GNU GPL, version 2.
5 *
6 * Common macros and functions for ring benchmarking.
7 */
8#ifndef MAIN_H
9#define MAIN_H
10
11#include <stdbool.h>
12
13extern bool do_exit;
14
15#if defined(__x86_64__) || defined(__i386__)
16#include "x86intrin.h"
17
18static inline void wait_cycles(unsigned long long cycles)
19{
20 unsigned long long t;
21
22 t = __rdtsc();
23 while (__rdtsc() - t < cycles) {}
24}
25
26#define VMEXIT_CYCLES 500
27#define VMENTRY_CYCLES 500
28
29#else
30static inline void wait_cycles(unsigned long long cycles)
31{
32 _Exit(5);
33}
34#define VMEXIT_CYCLES 0
35#define VMENTRY_CYCLES 0
36#endif
37
38static inline void vmexit(void)
39{
40 if (!do_exit)
41 return;
42
43 wait_cycles(VMEXIT_CYCLES);
44}
45static inline void vmentry(void)
46{
47 if (!do_exit)
48 return;
49
50 wait_cycles(VMENTRY_CYCLES);
51}
52
53/* implemented by ring */
54void alloc_ring(void);
55/* guest side */
56int add_inbuf(unsigned, void *, void *);
57void *get_buf(unsigned *, void **);
58void disable_call();
59bool enable_call();
60void kick_available();
61void poll_used();
62/* host side */
63void disable_kick();
64bool enable_kick();
65bool use_buf(unsigned *, void **);
66void call_used();
67void poll_avail();
68
69/* implemented by main */
70extern bool do_sleep;
71void kick(void);
72void wait_for_kick(void);
73void call(void);
74void wait_for_call(void);
75
76extern unsigned ring_size;
77
78/* Compiler barrier - similar to what Linux uses */
79#define barrier() asm volatile("" ::: "memory")
80
81/* Is there a portable way to do this? */
82#if defined(__x86_64__) || defined(__i386__)
83#define cpu_relax() asm ("rep; nop" ::: "memory")
84#else
85#define cpu_relax() assert(0)
86#endif
87
88extern bool do_relax;
89
90static inline void busy_wait(void)
91{
92 if (do_relax)
93 cpu_relax();
94 else
95 /* prevent compiler from removing busy loops */
96 barrier();
97}
98
99/*
100 * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
101 * with other __ATOMIC_SEQ_CST calls.
102 */
103#define smp_mb() __sync_synchronize()
104
105/*
106 * This abuses the atomic builtins for thread fences, and
107 * adds a compiler barrier.
108 */
109#define smp_release() do { \
110 barrier(); \
111 __atomic_thread_fence(__ATOMIC_RELEASE); \
112} while (0)
113
114#define smp_acquire() do { \
115 __atomic_thread_fence(__ATOMIC_ACQUIRE); \
116 barrier(); \
117} while (0)
118
119#endif
This page took 0.075276 seconds and 5 git commands to generate.