]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Virtio Support | |
3 | * | |
4 | * Copyright IBM, Corp. 2007 | |
5 | * | |
6 | * Authors: | |
7 | * Anthony Liguori <aliguori@us.ibm.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | #include <inttypes.h> | |
15 | ||
16 | #include "virtio.h" | |
17 | #include "sysemu.h" | |
18 | ||
19 | /* The alignment to use between consumer and producer parts of vring. | |
20 | * x86 pagesize again. */ | |
21 | #define VIRTIO_PCI_VRING_ALIGN 4096 | |
22 | ||
23 | /* QEMU doesn't strictly need write barriers since everything runs in | |
24 | * lock-step. We'll leave the calls to wmb() in though to make it obvious for | |
25 | * KVM or if kqemu gets SMP support. | |
26 | */ | |
27 | #define wmb() do { } while (0) | |
28 | ||
29 | typedef struct VRingDesc | |
30 | { | |
31 | uint64_t addr; | |
32 | uint32_t len; | |
33 | uint16_t flags; | |
34 | uint16_t next; | |
35 | } VRingDesc; | |
36 | ||
37 | typedef struct VRingAvail | |
38 | { | |
39 | uint16_t flags; | |
40 | uint16_t idx; | |
41 | uint16_t ring[0]; | |
42 | } VRingAvail; | |
43 | ||
44 | typedef struct VRingUsedElem | |
45 | { | |
46 | uint32_t id; | |
47 | uint32_t len; | |
48 | } VRingUsedElem; | |
49 | ||
50 | typedef struct VRingUsed | |
51 | { | |
52 | uint16_t flags; | |
53 | uint16_t idx; | |
54 | VRingUsedElem ring[0]; | |
55 | } VRingUsed; | |
56 | ||
57 | typedef struct VRing | |
58 | { | |
59 | unsigned int num; | |
60 | target_phys_addr_t desc; | |
61 | target_phys_addr_t avail; | |
62 | target_phys_addr_t used; | |
63 | } VRing; | |
64 | ||
65 | struct VirtQueue | |
66 | { | |
67 | VRing vring; | |
68 | target_phys_addr_t pa; | |
69 | uint16_t last_avail_idx; | |
70 | int inuse; | |
71 | void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq); | |
72 | }; | |
73 | ||
74 | #define VIRTIO_PCI_QUEUE_MAX 16 | |
75 | ||
76 | /* virt queue functions */ | |
77 | static void virtqueue_init(VirtQueue *vq) | |
78 | { | |
79 | target_phys_addr_t pa = vq->pa; | |
80 | ||
81 | vq->vring.desc = pa; | |
82 | vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc); | |
83 | vq->vring.used = vring_align(vq->vring.avail + | |
84 | offsetof(VRingAvail, ring[vq->vring.num]), | |
85 | VIRTIO_PCI_VRING_ALIGN); | |
86 | } | |
87 | ||
88 | static inline uint64_t vring_desc_addr(target_phys_addr_t desc_pa, int i) | |
89 | { | |
90 | target_phys_addr_t pa; | |
91 | pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr); | |
92 | return ldq_phys(pa); | |
93 | } | |
94 | ||
95 | static inline uint32_t vring_desc_len(target_phys_addr_t desc_pa, int i) | |
96 | { | |
97 | target_phys_addr_t pa; | |
98 | pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len); | |
99 | return ldl_phys(pa); | |
100 | } | |
101 | ||
102 | static inline uint16_t vring_desc_flags(target_phys_addr_t desc_pa, int i) | |
103 | { | |
104 | target_phys_addr_t pa; | |
105 | pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags); | |
106 | return lduw_phys(pa); | |
107 | } | |
108 | ||
109 | static inline uint16_t vring_desc_next(target_phys_addr_t desc_pa, int i) | |
110 | { | |
111 | target_phys_addr_t pa; | |
112 | pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next); | |
113 | return lduw_phys(pa); | |
114 | } | |
115 | ||
116 | static inline uint16_t vring_avail_flags(VirtQueue *vq) | |
117 | { | |
118 | target_phys_addr_t pa; | |
119 | pa = vq->vring.avail + offsetof(VRingAvail, flags); | |
120 | return lduw_phys(pa); | |
121 | } | |
122 | ||
123 | static inline uint16_t vring_avail_idx(VirtQueue *vq) | |
124 | { | |
125 | target_phys_addr_t pa; | |
126 | pa = vq->vring.avail + offsetof(VRingAvail, idx); | |
127 | return lduw_phys(pa); | |
128 | } | |
129 | ||
130 | static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) | |
131 | { | |
132 | target_phys_addr_t pa; | |
133 | pa = vq->vring.avail + offsetof(VRingAvail, ring[i]); | |
134 | return lduw_phys(pa); | |
135 | } | |
136 | ||
137 | static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val) | |
138 | { | |
139 | target_phys_addr_t pa; | |
140 | pa = vq->vring.used + offsetof(VRingUsed, ring[i].id); | |
141 | stl_phys(pa, val); | |
142 | } | |
143 | ||
144 | static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val) | |
145 | { | |
146 | target_phys_addr_t pa; | |
147 | pa = vq->vring.used + offsetof(VRingUsed, ring[i].len); | |
148 | stl_phys(pa, val); | |
149 | } | |
150 | ||
151 | static uint16_t vring_used_idx(VirtQueue *vq) | |
152 | { | |
153 | target_phys_addr_t pa; | |
154 | pa = vq->vring.used + offsetof(VRingUsed, idx); | |
155 | return lduw_phys(pa); | |
156 | } | |
157 | ||
158 | static inline void vring_used_idx_increment(VirtQueue *vq, uint16_t val) | |
159 | { | |
160 | target_phys_addr_t pa; | |
161 | pa = vq->vring.used + offsetof(VRingUsed, idx); | |
162 | stw_phys(pa, vring_used_idx(vq) + val); | |
163 | } | |
164 | ||
165 | static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) | |
166 | { | |
167 | target_phys_addr_t pa; | |
168 | pa = vq->vring.used + offsetof(VRingUsed, flags); | |
169 | stw_phys(pa, lduw_phys(pa) | mask); | |
170 | } | |
171 | ||
172 | static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) | |
173 | { | |
174 | target_phys_addr_t pa; | |
175 | pa = vq->vring.used + offsetof(VRingUsed, flags); | |
176 | stw_phys(pa, lduw_phys(pa) & ~mask); | |
177 | } | |
178 | ||
179 | void virtio_queue_set_notification(VirtQueue *vq, int enable) | |
180 | { | |
181 | if (enable) | |
182 | vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); | |
183 | else | |
184 | vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); | |
185 | } | |
186 | ||
187 | int virtio_queue_ready(VirtQueue *vq) | |
188 | { | |
189 | return vq->vring.avail != 0; | |
190 | } | |
191 | ||
192 | int virtio_queue_empty(VirtQueue *vq) | |
193 | { | |
194 | return vring_avail_idx(vq) == vq->last_avail_idx; | |
195 | } | |
196 | ||
197 | void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, | |
198 | unsigned int len, unsigned int idx) | |
199 | { | |
200 | unsigned int offset; | |
201 | int i; | |
202 | ||
203 | offset = 0; | |
204 | for (i = 0; i < elem->in_num; i++) { | |
205 | size_t size = MIN(len - offset, elem->in_sg[i].iov_len); | |
206 | ||
207 | cpu_physical_memory_unmap(elem->in_sg[i].iov_base, | |
208 | elem->in_sg[i].iov_len, | |
209 | 1, size); | |
210 | ||
211 | offset += elem->in_sg[i].iov_len; | |
212 | } | |
213 | ||
214 | for (i = 0; i < elem->out_num; i++) | |
215 | cpu_physical_memory_unmap(elem->out_sg[i].iov_base, | |
216 | elem->out_sg[i].iov_len, | |
217 | 0, elem->out_sg[i].iov_len); | |
218 | ||
219 | idx = (idx + vring_used_idx(vq)) % vq->vring.num; | |
220 | ||
221 | /* Get a pointer to the next entry in the used ring. */ | |
222 | vring_used_ring_id(vq, idx, elem->index); | |
223 | vring_used_ring_len(vq, idx, len); | |
224 | } | |
225 | ||
226 | void virtqueue_flush(VirtQueue *vq, unsigned int count) | |
227 | { | |
228 | /* Make sure buffer is written before we update index. */ | |
229 | wmb(); | |
230 | vring_used_idx_increment(vq, count); | |
231 | vq->inuse -= count; | |
232 | } | |
233 | ||
234 | void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, | |
235 | unsigned int len) | |
236 | { | |
237 | virtqueue_fill(vq, elem, len, 0); | |
238 | virtqueue_flush(vq, 1); | |
239 | } | |
240 | ||
241 | static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) | |
242 | { | |
243 | uint16_t num_heads = vring_avail_idx(vq) - idx; | |
244 | ||
245 | /* Check it isn't doing very strange things with descriptor numbers. */ | |
246 | if (num_heads > vq->vring.num) { | |
247 | fprintf(stderr, "Guest moved used index from %u to %u", | |
248 | idx, vring_avail_idx(vq)); | |
249 | exit(1); | |
250 | } | |
251 | ||
252 | return num_heads; | |
253 | } | |
254 | ||
255 | static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx) | |
256 | { | |
257 | unsigned int head; | |
258 | ||
259 | /* Grab the next descriptor number they're advertising, and increment | |
260 | * the index we've seen. */ | |
261 | head = vring_avail_ring(vq, idx % vq->vring.num); | |
262 | ||
263 | /* If their number is silly, that's a fatal mistake. */ | |
264 | if (head >= vq->vring.num) { | |
265 | fprintf(stderr, "Guest says index %u is available", head); | |
266 | exit(1); | |
267 | } | |
268 | ||
269 | return head; | |
270 | } | |
271 | ||
272 | static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa, | |
273 | unsigned int i, unsigned int max) | |
274 | { | |
275 | unsigned int next; | |
276 | ||
277 | /* If this descriptor says it doesn't chain, we're done. */ | |
278 | if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT)) | |
279 | return max; | |
280 | ||
281 | /* Check they're not leading us off end of descriptors. */ | |
282 | next = vring_desc_next(desc_pa, i); | |
283 | /* Make sure compiler knows to grab that: we don't want it changing! */ | |
284 | wmb(); | |
285 | ||
286 | if (next >= max) { | |
287 | fprintf(stderr, "Desc next is %u", next); | |
288 | exit(1); | |
289 | } | |
290 | ||
291 | return next; | |
292 | } | |
293 | ||
294 | int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes) | |
295 | { | |
296 | unsigned int idx; | |
297 | int total_bufs, in_total, out_total; | |
298 | ||
299 | idx = vq->last_avail_idx; | |
300 | ||
301 | total_bufs = in_total = out_total = 0; | |
302 | while (virtqueue_num_heads(vq, idx)) { | |
303 | unsigned int max, num_bufs, indirect = 0; | |
304 | target_phys_addr_t desc_pa; | |
305 | int i; | |
306 | ||
307 | max = vq->vring.num; | |
308 | num_bufs = total_bufs; | |
309 | i = virtqueue_get_head(vq, idx++); | |
310 | desc_pa = vq->vring.desc; | |
311 | ||
312 | if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) { | |
313 | if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) { | |
314 | fprintf(stderr, "Invalid size for indirect buffer table\n"); | |
315 | exit(1); | |
316 | } | |
317 | ||
318 | /* If we've got too many, that implies a descriptor loop. */ | |
319 | if (num_bufs >= max) { | |
320 | fprintf(stderr, "Looped descriptor"); | |
321 | exit(1); | |
322 | } | |
323 | ||
324 | /* loop over the indirect descriptor table */ | |
325 | indirect = 1; | |
326 | max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc); | |
327 | num_bufs = i = 0; | |
328 | desc_pa = vring_desc_addr(desc_pa, i); | |
329 | } | |
330 | ||
331 | do { | |
332 | /* If we've got too many, that implies a descriptor loop. */ | |
333 | if (++num_bufs > max) { | |
334 | fprintf(stderr, "Looped descriptor"); | |
335 | exit(1); | |
336 | } | |
337 | ||
338 | if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) { | |
339 | if (in_bytes > 0 && | |
340 | (in_total += vring_desc_len(desc_pa, i)) >= in_bytes) | |
341 | return 1; | |
342 | } else { | |
343 | if (out_bytes > 0 && | |
344 | (out_total += vring_desc_len(desc_pa, i)) >= out_bytes) | |
345 | return 1; | |
346 | } | |
347 | } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max); | |
348 | ||
349 | if (!indirect) | |
350 | total_bufs = num_bufs; | |
351 | else | |
352 | total_bufs++; | |
353 | } | |
354 | ||
355 | return 0; | |
356 | } | |
357 | ||
358 | int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem) | |
359 | { | |
360 | unsigned int i, head, max; | |
361 | target_phys_addr_t desc_pa = vq->vring.desc; | |
362 | target_phys_addr_t len; | |
363 | ||
364 | if (!virtqueue_num_heads(vq, vq->last_avail_idx)) | |
365 | return 0; | |
366 | ||
367 | /* When we start there are none of either input nor output. */ | |
368 | elem->out_num = elem->in_num = 0; | |
369 | ||
370 | max = vq->vring.num; | |
371 | ||
372 | i = head = virtqueue_get_head(vq, vq->last_avail_idx++); | |
373 | ||
374 | if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) { | |
375 | if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) { | |
376 | fprintf(stderr, "Invalid size for indirect buffer table\n"); | |
377 | exit(1); | |
378 | } | |
379 | ||
380 | /* loop over the indirect descriptor table */ | |
381 | max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc); | |
382 | desc_pa = vring_desc_addr(desc_pa, i); | |
383 | i = 0; | |
384 | } | |
385 | ||
386 | do { | |
387 | struct iovec *sg; | |
388 | int is_write = 0; | |
389 | ||
390 | if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) { | |
391 | elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i); | |
392 | sg = &elem->in_sg[elem->in_num++]; | |
393 | is_write = 1; | |
394 | } else | |
395 | sg = &elem->out_sg[elem->out_num++]; | |
396 | ||
397 | /* Grab the first descriptor, and check it's OK. */ | |
398 | sg->iov_len = vring_desc_len(desc_pa, i); | |
399 | len = sg->iov_len; | |
400 | ||
401 | sg->iov_base = cpu_physical_memory_map(vring_desc_addr(desc_pa, i), | |
402 | &len, is_write); | |
403 | ||
404 | if (sg->iov_base == NULL || len != sg->iov_len) { | |
405 | fprintf(stderr, "virtio: trying to map MMIO memory\n"); | |
406 | exit(1); | |
407 | } | |
408 | ||
409 | /* If we've got too many, that implies a descriptor loop. */ | |
410 | if ((elem->in_num + elem->out_num) > max) { | |
411 | fprintf(stderr, "Looped descriptor"); | |
412 | exit(1); | |
413 | } | |
414 | } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max); | |
415 | ||
416 | elem->index = head; | |
417 | ||
418 | vq->inuse++; | |
419 | ||
420 | return elem->in_num + elem->out_num; | |
421 | } | |
422 | ||
423 | /* virtio device */ | |
424 | ||
425 | void virtio_update_irq(VirtIODevice *vdev) | |
426 | { | |
427 | if (vdev->binding->update_irq) { | |
428 | vdev->binding->update_irq(vdev->binding_opaque); | |
429 | } | |
430 | } | |
431 | ||
432 | void virtio_reset(void *opaque) | |
433 | { | |
434 | VirtIODevice *vdev = opaque; | |
435 | int i; | |
436 | ||
437 | if (vdev->reset) | |
438 | vdev->reset(vdev); | |
439 | ||
440 | vdev->features = 0; | |
441 | vdev->queue_sel = 0; | |
442 | vdev->status = 0; | |
443 | vdev->isr = 0; | |
444 | virtio_update_irq(vdev); | |
445 | ||
446 | for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { | |
447 | vdev->vq[i].vring.desc = 0; | |
448 | vdev->vq[i].vring.avail = 0; | |
449 | vdev->vq[i].vring.used = 0; | |
450 | vdev->vq[i].last_avail_idx = 0; | |
451 | vdev->vq[i].pa = 0; | |
452 | } | |
453 | } | |
454 | ||
455 | uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr) | |
456 | { | |
457 | uint8_t val; | |
458 | ||
459 | vdev->get_config(vdev, vdev->config); | |
460 | ||
461 | if (addr > (vdev->config_len - sizeof(val))) | |
462 | return (uint32_t)-1; | |
463 | ||
464 | memcpy(&val, vdev->config + addr, sizeof(val)); | |
465 | return val; | |
466 | } | |
467 | ||
468 | uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr) | |
469 | { | |
470 | uint16_t val; | |
471 | ||
472 | vdev->get_config(vdev, vdev->config); | |
473 | ||
474 | if (addr > (vdev->config_len - sizeof(val))) | |
475 | return (uint32_t)-1; | |
476 | ||
477 | memcpy(&val, vdev->config + addr, sizeof(val)); | |
478 | return val; | |
479 | } | |
480 | ||
481 | uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr) | |
482 | { | |
483 | uint32_t val; | |
484 | ||
485 | vdev->get_config(vdev, vdev->config); | |
486 | ||
487 | if (addr > (vdev->config_len - sizeof(val))) | |
488 | return (uint32_t)-1; | |
489 | ||
490 | memcpy(&val, vdev->config + addr, sizeof(val)); | |
491 | return val; | |
492 | } | |
493 | ||
494 | void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data) | |
495 | { | |
496 | uint8_t val = data; | |
497 | ||
498 | if (addr > (vdev->config_len - sizeof(val))) | |
499 | return; | |
500 | ||
501 | memcpy(vdev->config + addr, &val, sizeof(val)); | |
502 | ||
503 | if (vdev->set_config) | |
504 | vdev->set_config(vdev, vdev->config); | |
505 | } | |
506 | ||
507 | void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data) | |
508 | { | |
509 | uint16_t val = data; | |
510 | ||
511 | if (addr > (vdev->config_len - sizeof(val))) | |
512 | return; | |
513 | ||
514 | memcpy(vdev->config + addr, &val, sizeof(val)); | |
515 | ||
516 | if (vdev->set_config) | |
517 | vdev->set_config(vdev, vdev->config); | |
518 | } | |
519 | ||
520 | void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data) | |
521 | { | |
522 | uint32_t val = data; | |
523 | ||
524 | if (addr > (vdev->config_len - sizeof(val))) | |
525 | return; | |
526 | ||
527 | memcpy(vdev->config + addr, &val, sizeof(val)); | |
528 | ||
529 | if (vdev->set_config) | |
530 | vdev->set_config(vdev, vdev->config); | |
531 | } | |
532 | ||
533 | void virtio_queue_set_addr(VirtIODevice *vdev, int n, target_phys_addr_t addr) | |
534 | { | |
535 | if (addr == 0) { | |
536 | virtio_reset(vdev); | |
537 | } else { | |
538 | vdev->vq[n].pa = addr; | |
539 | virtqueue_init(&vdev->vq[n]); | |
540 | } | |
541 | } | |
542 | ||
543 | target_phys_addr_t virtio_queue_get_addr(VirtIODevice *vdev, int n) | |
544 | { | |
545 | return vdev->vq[n].pa; | |
546 | } | |
547 | ||
548 | int virtio_queue_get_num(VirtIODevice *vdev, int n) | |
549 | { | |
550 | return vdev->vq[n].vring.num; | |
551 | } | |
552 | ||
553 | void virtio_queue_notify(VirtIODevice *vdev, int n) | |
554 | { | |
555 | if (n < VIRTIO_PCI_QUEUE_MAX && vdev->vq[n].vring.desc) { | |
556 | vdev->vq[n].handle_output(vdev, &vdev->vq[n]); | |
557 | } | |
558 | } | |
559 | ||
560 | VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, | |
561 | void (*handle_output)(VirtIODevice *, VirtQueue *)) | |
562 | { | |
563 | int i; | |
564 | ||
565 | for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { | |
566 | if (vdev->vq[i].vring.num == 0) | |
567 | break; | |
568 | } | |
569 | ||
570 | if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) | |
571 | abort(); | |
572 | ||
573 | vdev->vq[i].vring.num = queue_size; | |
574 | vdev->vq[i].handle_output = handle_output; | |
575 | ||
576 | return &vdev->vq[i]; | |
577 | } | |
578 | ||
579 | void virtio_notify(VirtIODevice *vdev, VirtQueue *vq) | |
580 | { | |
581 | /* Always notify when queue is empty (when feature acknowledge) */ | |
582 | if ((vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT) && | |
583 | (!(vdev->features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) || | |
584 | (vq->inuse || vring_avail_idx(vq) != vq->last_avail_idx))) | |
585 | return; | |
586 | ||
587 | vdev->isr |= 0x01; | |
588 | virtio_update_irq(vdev); | |
589 | } | |
590 | ||
591 | void virtio_notify_config(VirtIODevice *vdev) | |
592 | { | |
593 | if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) | |
594 | return; | |
595 | ||
596 | vdev->isr |= 0x03; | |
597 | virtio_update_irq(vdev); | |
598 | } | |
599 | ||
600 | void virtio_save(VirtIODevice *vdev, QEMUFile *f) | |
601 | { | |
602 | int i; | |
603 | ||
604 | /* FIXME: load/save binding. */ | |
605 | //pci_device_save(&vdev->pci_dev, f); | |
606 | ||
607 | qemu_put_8s(f, &vdev->status); | |
608 | qemu_put_8s(f, &vdev->isr); | |
609 | qemu_put_be16s(f, &vdev->queue_sel); | |
610 | qemu_put_be32s(f, &vdev->features); | |
611 | qemu_put_be32(f, vdev->config_len); | |
612 | qemu_put_buffer(f, vdev->config, vdev->config_len); | |
613 | ||
614 | for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { | |
615 | if (vdev->vq[i].vring.num == 0) | |
616 | break; | |
617 | } | |
618 | ||
619 | qemu_put_be32(f, i); | |
620 | ||
621 | for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) { | |
622 | if (vdev->vq[i].vring.num == 0) | |
623 | break; | |
624 | ||
625 | qemu_put_be32(f, vdev->vq[i].vring.num); | |
626 | qemu_put_be64(f, vdev->vq[i].pa); | |
627 | qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); | |
628 | } | |
629 | } | |
630 | ||
631 | void virtio_load(VirtIODevice *vdev, QEMUFile *f) | |
632 | { | |
633 | int num, i; | |
634 | ||
635 | /* FIXME: load/save binding. */ | |
636 | //pci_device_load(&vdev->pci_dev, f); | |
637 | ||
638 | qemu_get_8s(f, &vdev->status); | |
639 | qemu_get_8s(f, &vdev->isr); | |
640 | qemu_get_be16s(f, &vdev->queue_sel); | |
641 | qemu_get_be32s(f, &vdev->features); | |
642 | vdev->config_len = qemu_get_be32(f); | |
643 | qemu_get_buffer(f, vdev->config, vdev->config_len); | |
644 | ||
645 | num = qemu_get_be32(f); | |
646 | ||
647 | for (i = 0; i < num; i++) { | |
648 | vdev->vq[i].vring.num = qemu_get_be32(f); | |
649 | vdev->vq[i].pa = qemu_get_be64(f); | |
650 | qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); | |
651 | ||
652 | if (vdev->vq[i].pa) { | |
653 | virtqueue_init(&vdev->vq[i]); | |
654 | } | |
655 | } | |
656 | ||
657 | virtio_update_irq(vdev); | |
658 | } | |
659 | ||
660 | void virtio_cleanup(VirtIODevice *vdev) | |
661 | { | |
662 | if (vdev->config) | |
663 | qemu_free(vdev->config); | |
664 | qemu_free(vdev->vq); | |
665 | } | |
666 | ||
667 | VirtIODevice *virtio_common_init(const char *name, uint16_t device_id, | |
668 | size_t config_size, size_t struct_size) | |
669 | { | |
670 | VirtIODevice *vdev; | |
671 | ||
672 | vdev = qemu_mallocz(struct_size); | |
673 | ||
674 | vdev->device_id = device_id; | |
675 | vdev->status = 0; | |
676 | vdev->isr = 0; | |
677 | vdev->queue_sel = 0; | |
678 | vdev->vq = qemu_mallocz(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX); | |
679 | ||
680 | vdev->name = name; | |
681 | vdev->config_len = config_size; | |
682 | if (vdev->config_len) | |
683 | vdev->config = qemu_mallocz(config_size); | |
684 | else | |
685 | vdev->config = NULL; | |
686 | ||
687 | qemu_register_reset(virtio_reset, 0, vdev); | |
688 | ||
689 | return vdev; | |
690 | } | |
691 | ||
692 | void virtio_bind_device(VirtIODevice *vdev, const VirtIOBindings *binding, | |
693 | void *opaque) | |
694 | { | |
695 | vdev->binding = binding; | |
696 | vdev->binding_opaque = opaque; | |
697 | } |