]>
Commit | Line | Data |
---|---|---|
4e53f78e MT |
1 | #ifndef LINUX_VIRTIO_H |
2 | #define LINUX_VIRTIO_H | |
3 | ||
4 | #include <stdbool.h> | |
5 | #include <stdlib.h> | |
6 | #include <stddef.h> | |
7 | #include <stdio.h> | |
8 | #include <string.h> | |
9 | #include <assert.h> | |
10 | ||
11 | #include <linux/types.h> | |
12 | #include <errno.h> | |
13 | ||
14 | typedef unsigned long long dma_addr_t; | |
15 | ||
16 | struct scatterlist { | |
17 | unsigned long page_link; | |
18 | unsigned int offset; | |
19 | unsigned int length; | |
20 | dma_addr_t dma_address; | |
21 | }; | |
22 | ||
23 | struct page { | |
24 | unsigned long long dummy; | |
25 | }; | |
26 | ||
27 | #define BUG_ON(__BUG_ON_cond) assert(!(__BUG_ON_cond)) | |
28 | ||
29 | /* Physical == Virtual */ | |
30 | #define virt_to_phys(p) ((unsigned long)p) | |
31 | #define phys_to_virt(a) ((void *)(unsigned long)(a)) | |
32 | /* Page address: Virtual / 4K */ | |
33 | #define virt_to_page(p) ((struct page*)((virt_to_phys(p) / 4096) * \ | |
34 | sizeof(struct page))) | |
35 | #define offset_in_page(p) (((unsigned long)p) % 4096) | |
36 | #define sg_phys(sg) ((sg->page_link & ~0x3) / sizeof(struct page) * 4096 + \ | |
37 | sg->offset) | |
38 | static inline void sg_mark_end(struct scatterlist *sg) | |
39 | { | |
40 | /* | |
41 | * Set termination bit, clear potential chain bit | |
42 | */ | |
43 | sg->page_link |= 0x02; | |
44 | sg->page_link &= ~0x01; | |
45 | } | |
46 | static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents) | |
47 | { | |
48 | memset(sgl, 0, sizeof(*sgl) * nents); | |
49 | sg_mark_end(&sgl[nents - 1]); | |
50 | } | |
51 | static inline void sg_assign_page(struct scatterlist *sg, struct page *page) | |
52 | { | |
53 | unsigned long page_link = sg->page_link & 0x3; | |
54 | ||
55 | /* | |
56 | * In order for the low bit stealing approach to work, pages | |
57 | * must be aligned at a 32-bit boundary as a minimum. | |
58 | */ | |
59 | BUG_ON((unsigned long) page & 0x03); | |
60 | sg->page_link = page_link | (unsigned long) page; | |
61 | } | |
62 | ||
63 | static inline void sg_set_page(struct scatterlist *sg, struct page *page, | |
64 | unsigned int len, unsigned int offset) | |
65 | { | |
66 | sg_assign_page(sg, page); | |
67 | sg->offset = offset; | |
68 | sg->length = len; | |
69 | } | |
70 | ||
71 | static inline void sg_set_buf(struct scatterlist *sg, const void *buf, | |
72 | unsigned int buflen) | |
73 | { | |
74 | sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); | |
75 | } | |
76 | ||
77 | static inline void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen) | |
78 | { | |
79 | sg_init_table(sg, 1); | |
80 | sg_set_buf(sg, buf, buflen); | |
81 | } | |
82 | ||
83 | typedef __u16 u16; | |
84 | ||
85 | typedef enum { | |
86 | GFP_KERNEL, | |
87 | GFP_ATOMIC, | |
88 | } gfp_t; | |
89 | typedef enum { | |
90 | IRQ_NONE, | |
91 | IRQ_HANDLED | |
92 | } irqreturn_t; | |
93 | ||
94 | static inline void *kmalloc(size_t s, gfp_t gfp) | |
95 | { | |
96 | return malloc(s); | |
97 | } | |
98 | ||
99 | static inline void kfree(void *p) | |
100 | { | |
101 | free(p); | |
102 | } | |
103 | ||
104 | #define container_of(ptr, type, member) ({ \ | |
105 | const typeof( ((type *)0)->member ) *__mptr = (ptr); \ | |
106 | (type *)( (char *)__mptr - offsetof(type,member) );}) | |
107 | ||
108 | #define uninitialized_var(x) x = x | |
109 | ||
110 | # ifndef likely | |
111 | # define likely(x) (__builtin_expect(!!(x), 1)) | |
112 | # endif | |
113 | # ifndef unlikely | |
114 | # define unlikely(x) (__builtin_expect(!!(x), 0)) | |
115 | # endif | |
116 | ||
117 | #define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__) | |
118 | #ifdef DEBUG | |
119 | #define pr_debug(format, ...) fprintf (stderr, format, ## __VA_ARGS__) | |
120 | #else | |
121 | #define pr_debug(format, ...) do {} while (0) | |
122 | #endif | |
123 | #define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) | |
124 | #define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) | |
125 | ||
126 | /* TODO: empty stubs for now. Broken but enough for virtio_ring.c */ | |
127 | #define list_add_tail(a, b) do {} while (0) | |
128 | #define list_del(a) do {} while (0) | |
129 | ||
130 | #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) | |
131 | #define BITS_PER_BYTE 8 | |
132 | #define BITS_PER_LONG (sizeof(long) * BITS_PER_BYTE) | |
133 | #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) | |
134 | /* TODO: Not atomic as it should be: | |
135 | * we don't use this for anything important. */ | |
136 | static inline void clear_bit(int nr, volatile unsigned long *addr) | |
137 | { | |
138 | unsigned long mask = BIT_MASK(nr); | |
139 | unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); | |
140 | ||
141 | *p &= ~mask; | |
142 | } | |
143 | ||
144 | static inline int test_bit(int nr, const volatile unsigned long *addr) | |
145 | { | |
146 | return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); | |
147 | } | |
148 | ||
149 | /* The only feature we care to support */ | |
150 | #define virtio_has_feature(dev, feature) \ | |
151 | test_bit((feature), (dev)->features) | |
152 | /* end of stubs */ | |
153 | ||
154 | struct virtio_device { | |
155 | void *dev; | |
156 | unsigned long features[1]; | |
157 | }; | |
158 | ||
159 | struct virtqueue { | |
160 | /* TODO: commented as list macros are empty stubs for now. | |
161 | * Broken but enough for virtio_ring.c | |
162 | * struct list_head list; */ | |
163 | void (*callback)(struct virtqueue *vq); | |
164 | const char *name; | |
165 | struct virtio_device *vdev; | |
166 | void *priv; | |
167 | }; | |
168 | ||
169 | #define EXPORT_SYMBOL_GPL(__EXPORT_SYMBOL_GPL_name) \ | |
170 | void __EXPORT_SYMBOL_GPL##__EXPORT_SYMBOL_GPL_name() { \ | |
171 | } | |
172 | #define MODULE_LICENSE(__MODULE_LICENSE_value) \ | |
173 | const char *__MODULE_LICENSE_name = __MODULE_LICENSE_value | |
174 | ||
175 | #define CONFIG_SMP | |
176 | ||
177 | #if defined(__i386__) || defined(__x86_64__) | |
178 | #define barrier() asm volatile("" ::: "memory") | |
179 | #define mb() __sync_synchronize() | |
180 | ||
181 | #define smp_mb() mb() | |
182 | # define smp_rmb() barrier() | |
183 | # define smp_wmb() barrier() | |
b17d5c6e MT |
184 | /* Weak barriers should be used. If not - it's a bug */ |
185 | # define rmb() abort() | |
186 | # define wmb() abort() | |
4e53f78e MT |
187 | #else |
188 | #error Please fill in barrier macros | |
189 | #endif | |
190 | ||
191 | /* Interfaces exported by virtio_ring. */ | |
f96fde41 RR |
192 | int virtqueue_add_buf(struct virtqueue *vq, |
193 | struct scatterlist sg[], | |
194 | unsigned int out_num, | |
195 | unsigned int in_num, | |
196 | void *data, | |
197 | gfp_t gfp); | |
4e53f78e MT |
198 | |
199 | void virtqueue_kick(struct virtqueue *vq); | |
200 | ||
201 | void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); | |
202 | ||
203 | void virtqueue_disable_cb(struct virtqueue *vq); | |
204 | ||
205 | bool virtqueue_enable_cb(struct virtqueue *vq); | |
206 | ||
207 | void *virtqueue_detach_unused_buf(struct virtqueue *vq); | |
208 | struct virtqueue *vring_new_virtqueue(unsigned int num, | |
209 | unsigned int vring_align, | |
210 | struct virtio_device *vdev, | |
7b21e34f | 211 | bool weak_barriers, |
4e53f78e MT |
212 | void *pages, |
213 | void (*notify)(struct virtqueue *vq), | |
214 | void (*callback)(struct virtqueue *vq), | |
215 | const char *name); | |
216 | void vring_del_virtqueue(struct virtqueue *vq); | |
217 | ||
218 | #endif |