]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
9fb6bc5b MT |
2 | #define _GNU_SOURCE |
3 | #include "main.h" | |
4 | #include <stdlib.h> | |
5 | #include <stdio.h> | |
6 | #include <string.h> | |
7 | #include <pthread.h> | |
8 | #include <malloc.h> | |
9 | #include <assert.h> | |
10 | #include <errno.h> | |
11 | #include <limits.h> | |
12 | ||
13 | #define SMP_CACHE_BYTES 64 | |
14 | #define cache_line_size() SMP_CACHE_BYTES | |
15 | #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES))) | |
16 | #define unlikely(x) (__builtin_expect(!!(x), 0)) | |
52012619 | 17 | #define likely(x) (__builtin_expect(!!(x), 1)) |
9fb6bc5b MT |
18 | #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) |
19 | typedef pthread_spinlock_t spinlock_t; | |
20 | ||
21 | typedef int gfp_t; | |
59e6ae53 MT |
22 | static void *kmalloc(unsigned size, gfp_t gfp) |
23 | { | |
24 | return memalign(64, size); | |
25 | } | |
26 | ||
9fb6bc5b MT |
27 | static void *kzalloc(unsigned size, gfp_t gfp) |
28 | { | |
29 | void *p = memalign(64, size); | |
30 | if (!p) | |
31 | return p; | |
32 | memset(p, 0, size); | |
33 | ||
34 | return p; | |
35 | } | |
36 | ||
37 | static void kfree(void *p) | |
38 | { | |
39 | if (p) | |
40 | free(p); | |
41 | } | |
42 | ||
43 | static void spin_lock_init(spinlock_t *lock) | |
44 | { | |
45 | int r = pthread_spin_init(lock, 0); | |
46 | assert(!r); | |
47 | } | |
48 | ||
49 | static void spin_lock(spinlock_t *lock) | |
50 | { | |
51 | int ret = pthread_spin_lock(lock); | |
52 | assert(!ret); | |
53 | } | |
54 | ||
55 | static void spin_unlock(spinlock_t *lock) | |
56 | { | |
57 | int ret = pthread_spin_unlock(lock); | |
58 | assert(!ret); | |
59 | } | |
60 | ||
61 | static void spin_lock_bh(spinlock_t *lock) | |
62 | { | |
63 | spin_lock(lock); | |
64 | } | |
65 | ||
66 | static void spin_unlock_bh(spinlock_t *lock) | |
67 | { | |
68 | spin_unlock(lock); | |
69 | } | |
70 | ||
71 | static void spin_lock_irq(spinlock_t *lock) | |
72 | { | |
73 | spin_lock(lock); | |
74 | } | |
75 | ||
76 | static void spin_unlock_irq(spinlock_t *lock) | |
77 | { | |
78 | spin_unlock(lock); | |
79 | } | |
80 | ||
81 | static void spin_lock_irqsave(spinlock_t *lock, unsigned long f) | |
82 | { | |
83 | spin_lock(lock); | |
84 | } | |
85 | ||
86 | static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f) | |
87 | { | |
88 | spin_unlock(lock); | |
89 | } | |
90 | ||
91 | #include "../../../include/linux/ptr_ring.h" | |
92 | ||
93 | static unsigned long long headcnt, tailcnt; | |
94 | static struct ptr_ring array ____cacheline_aligned_in_smp; | |
95 | ||
96 | /* implemented by ring */ | |
97 | void alloc_ring(void) | |
98 | { | |
99 | int ret = ptr_ring_init(&array, ring_size, 0); | |
100 | assert(!ret); | |
3008a206 MT |
101 | /* Hacky way to poke at ring internals. Useful for testing though. */ |
102 | if (param) | |
103 | array.batch = param; | |
9fb6bc5b MT |
104 | } |
105 | ||
106 | /* guest side */ | |
107 | int add_inbuf(unsigned len, void *buf, void *datap) | |
108 | { | |
109 | int ret; | |
110 | ||
111 | ret = __ptr_ring_produce(&array, buf); | |
112 | if (ret >= 0) { | |
113 | ret = 0; | |
114 | headcnt++; | |
115 | } | |
116 | ||
117 | return ret; | |
118 | } | |
119 | ||
120 | /* | |
121 | * ptr_ring API provides no way for producer to find out whether a given | |
122 | * buffer was consumed. Our tests merely require that a successful get_buf | |
123 | * implies that add_inbuf succeed in the past, and that add_inbuf will succeed, | |
124 | * fake it accordingly. | |
125 | */ | |
126 | void *get_buf(unsigned *lenp, void **bufp) | |
127 | { | |
128 | void *datap; | |
129 | ||
130 | if (tailcnt == headcnt || __ptr_ring_full(&array)) | |
131 | datap = NULL; | |
132 | else { | |
133 | datap = "Buffer\n"; | |
134 | ++tailcnt; | |
135 | } | |
136 | ||
137 | return datap; | |
138 | } | |
139 | ||
d3c3589b | 140 | bool used_empty() |
9fb6bc5b | 141 | { |
d3c3589b | 142 | return (tailcnt == headcnt || __ptr_ring_full(&array)); |
9fb6bc5b MT |
143 | } |
144 | ||
145 | void disable_call() | |
146 | { | |
147 | assert(0); | |
148 | } | |
149 | ||
150 | bool enable_call() | |
151 | { | |
152 | assert(0); | |
153 | } | |
154 | ||
155 | void kick_available(void) | |
156 | { | |
157 | assert(0); | |
158 | } | |
159 | ||
160 | /* host side */ | |
161 | void disable_kick() | |
162 | { | |
163 | assert(0); | |
164 | } | |
165 | ||
166 | bool enable_kick() | |
167 | { | |
168 | assert(0); | |
169 | } | |
170 | ||
d3c3589b | 171 | bool avail_empty() |
9fb6bc5b | 172 | { |
d3c3589b | 173 | return !__ptr_ring_peek(&array); |
9fb6bc5b MT |
174 | } |
175 | ||
176 | bool use_buf(unsigned *lenp, void **bufp) | |
177 | { | |
178 | void *ptr; | |
179 | ||
180 | ptr = __ptr_ring_consume(&array); | |
181 | ||
182 | return ptr; | |
183 | } | |
184 | ||
185 | void call_used(void) | |
186 | { | |
187 | assert(0); | |
188 | } |