]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - tools/virtio/ringtest/main.h
Merge tag 'spi-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
[mirror_ubuntu-artful-kernel.git] / tools / virtio / ringtest / main.h
1 /*
2 * Copyright (C) 2016 Red Hat, Inc.
3 * Author: Michael S. Tsirkin <mst@redhat.com>
4 * This work is licensed under the terms of the GNU GPL, version 2.
5 *
6 * Common macros and functions for ring benchmarking.
7 */
8 #ifndef MAIN_H
9 #define MAIN_H
10
11 #include <stdbool.h>
12
13 extern bool do_exit;
14
15 #if defined(__x86_64__) || defined(__i386__)
16 #include "x86intrin.h"
17
18 static inline void wait_cycles(unsigned long long cycles)
19 {
20 unsigned long long t;
21
22 t = __rdtsc();
23 while (__rdtsc() - t < cycles) {}
24 }
25
26 #define VMEXIT_CYCLES 500
27 #define VMENTRY_CYCLES 500
28
29 #elif defined(__s390x__)
30 static inline void wait_cycles(unsigned long long cycles)
31 {
32 asm volatile("0: brctg %0,0b" : : "d" (cycles));
33 }
34
35 /* tweak me */
36 #define VMEXIT_CYCLES 200
37 #define VMENTRY_CYCLES 200
38
39 #else
40 static inline void wait_cycles(unsigned long long cycles)
41 {
42 _Exit(5);
43 }
44 #define VMEXIT_CYCLES 0
45 #define VMENTRY_CYCLES 0
46 #endif
47
48 static inline void vmexit(void)
49 {
50 if (!do_exit)
51 return;
52
53 wait_cycles(VMEXIT_CYCLES);
54 }
55 static inline void vmentry(void)
56 {
57 if (!do_exit)
58 return;
59
60 wait_cycles(VMENTRY_CYCLES);
61 }
62
63 /* implemented by ring */
64 void alloc_ring(void);
65 /* guest side */
66 int add_inbuf(unsigned, void *, void *);
67 void *get_buf(unsigned *, void **);
68 void disable_call();
69 bool used_empty();
70 bool enable_call();
71 void kick_available();
72 /* host side */
73 void disable_kick();
74 bool avail_empty();
75 bool enable_kick();
76 bool use_buf(unsigned *, void **);
77 void call_used();
78
79 /* implemented by main */
80 extern bool do_sleep;
81 void kick(void);
82 void wait_for_kick(void);
83 void call(void);
84 void wait_for_call(void);
85
86 extern unsigned ring_size;
87
88 /* Compiler barrier - similar to what Linux uses */
89 #define barrier() asm volatile("" ::: "memory")
90
91 /* Is there a portable way to do this? */
92 #if defined(__x86_64__) || defined(__i386__)
93 #define cpu_relax() asm ("rep; nop" ::: "memory")
94 #elif defined(__s390x__)
95 #define cpu_relax() barrier()
96 #else
97 #define cpu_relax() assert(0)
98 #endif
99
100 extern bool do_relax;
101
102 static inline void busy_wait(void)
103 {
104 if (do_relax)
105 cpu_relax();
106 else
107 /* prevent compiler from removing busy loops */
108 barrier();
109 }
110
111 /*
112 * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
113 * with other __ATOMIC_SEQ_CST calls.
114 */
115 #define smp_mb() __sync_synchronize()
116
117 /*
118 * This abuses the atomic builtins for thread fences, and
119 * adds a compiler barrier.
120 */
121 #define smp_release() do { \
122 barrier(); \
123 __atomic_thread_fence(__ATOMIC_RELEASE); \
124 } while (0)
125
126 #define smp_acquire() do { \
127 __atomic_thread_fence(__ATOMIC_ACQUIRE); \
128 barrier(); \
129 } while (0)
130
131 #endif