]>
Commit | Line | Data |
---|---|---|
f7105843 MM |
1 | /* |
2 | * Copyright (c) 2003-2008 Fabrice Bellard | |
3 | * Copyright (c) 2009 Red Hat, Inc. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
6 | * of this software and associated documentation files (the "Software"), to deal | |
7 | * in the Software without restriction, including without limitation the rights | |
8 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
9 | * copies of the Software, and to permit persons to whom the Software is | |
10 | * furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice shall be included in | |
13 | * all copies or substantial portions of the Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
21 | * THE SOFTWARE. | |
22 | */ | |
23 | ||
24 | #include "net-queue.h" | |
25 | #include "qemu-queue.h" | |
26 | ||
27 | /* The delivery handler may only return zero if it will call | |
28 | * qemu_net_queue_flush() when it determines that it is once again able | |
29 | * to deliver packets. It must also call qemu_net_queue_purge() in its | |
30 | * cleanup path. | |
31 | * | |
32 | * If a sent callback is provided to send(), the caller must handle a | |
33 | * zero return from the delivery handler by not sending any more packets | |
34 | * until we have invoked the callback. Only in that case will we queue | |
35 | * the packet. | |
36 | * | |
37 | * If a sent callback isn't provided, we just drop the packet to avoid | |
38 | * unbounded queueing. | |
39 | */ | |
40 | ||
41 | struct NetPacket { | |
42 | QTAILQ_ENTRY(NetPacket) entry; | |
43 | VLANClientState *sender; | |
44 | int size; | |
45 | NetPacketSent *sent_cb; | |
46 | uint8_t data[0]; | |
47 | }; | |
48 | ||
49 | struct NetQueue { | |
50 | NetPacketDeliver *deliver; | |
51 | NetPacketDeliverIOV *deliver_iov; | |
52 | void *opaque; | |
53 | ||
54 | QTAILQ_HEAD(packets, NetPacket) packets; | |
55 | ||
56 | unsigned delivering : 1; | |
57 | }; | |
58 | ||
59 | NetQueue *qemu_new_net_queue(NetPacketDeliver *deliver, | |
60 | NetPacketDeliverIOV *deliver_iov, | |
61 | void *opaque) | |
62 | { | |
63 | NetQueue *queue; | |
64 | ||
65 | queue = qemu_mallocz(sizeof(NetQueue)); | |
66 | ||
67 | queue->deliver = deliver; | |
68 | queue->deliver_iov = deliver_iov; | |
69 | queue->opaque = opaque; | |
70 | ||
71 | QTAILQ_INIT(&queue->packets); | |
72 | ||
73 | queue->delivering = 0; | |
74 | ||
75 | return queue; | |
76 | } | |
77 | ||
78 | void qemu_del_net_queue(NetQueue *queue) | |
79 | { | |
80 | NetPacket *packet, *next; | |
81 | ||
82 | QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) { | |
83 | QTAILQ_REMOVE(&queue->packets, packet, entry); | |
84 | qemu_free(packet); | |
85 | } | |
86 | ||
87 | qemu_free(queue); | |
88 | } | |
89 | ||
90 | static ssize_t qemu_net_queue_append(NetQueue *queue, | |
91 | VLANClientState *sender, | |
92 | const uint8_t *buf, | |
93 | size_t size, | |
94 | NetPacketSent *sent_cb) | |
95 | { | |
96 | NetPacket *packet; | |
97 | ||
98 | packet = qemu_malloc(sizeof(NetPacket) + size); | |
99 | packet->sender = sender; | |
100 | packet->size = size; | |
101 | packet->sent_cb = sent_cb; | |
102 | memcpy(packet->data, buf, size); | |
103 | ||
104 | QTAILQ_INSERT_TAIL(&queue->packets, packet, entry); | |
105 | ||
106 | return size; | |
107 | } | |
108 | ||
109 | static ssize_t qemu_net_queue_append_iov(NetQueue *queue, | |
110 | VLANClientState *sender, | |
111 | const struct iovec *iov, | |
112 | int iovcnt, | |
113 | NetPacketSent *sent_cb) | |
114 | { | |
115 | NetPacket *packet; | |
116 | size_t max_len = 0; | |
117 | int i; | |
118 | ||
119 | for (i = 0; i < iovcnt; i++) { | |
120 | max_len += iov[i].iov_len; | |
121 | } | |
122 | ||
123 | packet = qemu_malloc(sizeof(NetPacket) + max_len); | |
124 | packet->sender = sender; | |
125 | packet->sent_cb = sent_cb; | |
126 | packet->size = 0; | |
127 | ||
128 | for (i = 0; i < iovcnt; i++) { | |
129 | size_t len = iov[i].iov_len; | |
130 | ||
131 | memcpy(packet->data + packet->size, iov[i].iov_base, len); | |
132 | packet->size += len; | |
133 | } | |
134 | ||
135 | QTAILQ_INSERT_TAIL(&queue->packets, packet, entry); | |
136 | ||
137 | return packet->size; | |
138 | } | |
139 | ||
140 | static ssize_t qemu_net_queue_deliver(NetQueue *queue, | |
141 | VLANClientState *sender, | |
142 | const uint8_t *data, | |
143 | size_t size) | |
144 | { | |
145 | ssize_t ret = -1; | |
146 | ||
147 | queue->delivering = 1; | |
148 | ret = queue->deliver(sender, data, size, queue->opaque); | |
149 | queue->delivering = 0; | |
150 | ||
151 | return ret; | |
152 | } | |
153 | ||
154 | static ssize_t qemu_net_queue_deliver_iov(NetQueue *queue, | |
155 | VLANClientState *sender, | |
156 | const struct iovec *iov, | |
157 | int iovcnt) | |
158 | { | |
159 | ssize_t ret = -1; | |
160 | ||
161 | queue->delivering = 1; | |
162 | ret = queue->deliver_iov(sender, iov, iovcnt, queue->opaque); | |
163 | queue->delivering = 0; | |
164 | ||
165 | return ret; | |
166 | } | |
167 | ||
168 | ssize_t qemu_net_queue_send(NetQueue *queue, | |
169 | VLANClientState *sender, | |
170 | const uint8_t *data, | |
171 | size_t size, | |
172 | NetPacketSent *sent_cb) | |
173 | { | |
174 | ssize_t ret; | |
175 | ||
176 | if (queue->delivering) { | |
177 | return qemu_net_queue_append(queue, sender, data, size, NULL); | |
178 | } | |
179 | ||
180 | ret = qemu_net_queue_deliver(queue, sender, data, size); | |
181 | if (ret == 0 && sent_cb != NULL) { | |
182 | qemu_net_queue_append(queue, sender, data, size, sent_cb); | |
183 | return 0; | |
184 | } | |
185 | ||
186 | qemu_net_queue_flush(queue); | |
187 | ||
188 | return ret; | |
189 | } | |
190 | ||
191 | ssize_t qemu_net_queue_send_iov(NetQueue *queue, | |
192 | VLANClientState *sender, | |
193 | const struct iovec *iov, | |
194 | int iovcnt, | |
195 | NetPacketSent *sent_cb) | |
196 | { | |
197 | ssize_t ret; | |
198 | ||
199 | if (queue->delivering) { | |
200 | return qemu_net_queue_append_iov(queue, sender, iov, iovcnt, NULL); | |
201 | } | |
202 | ||
203 | ret = qemu_net_queue_deliver_iov(queue, sender, iov, iovcnt); | |
204 | if (ret == 0 && sent_cb != NULL) { | |
205 | qemu_net_queue_append_iov(queue, sender, iov, iovcnt, sent_cb); | |
206 | return 0; | |
207 | } | |
208 | ||
209 | qemu_net_queue_flush(queue); | |
210 | ||
211 | return ret; | |
212 | } | |
213 | ||
214 | void qemu_net_queue_purge(NetQueue *queue, VLANClientState *from) | |
215 | { | |
216 | NetPacket *packet, *next; | |
217 | ||
218 | QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) { | |
219 | if (packet->sender == from) { | |
220 | QTAILQ_REMOVE(&queue->packets, packet, entry); | |
221 | qemu_free(packet); | |
222 | } | |
223 | } | |
224 | } | |
225 | ||
226 | void qemu_net_queue_flush(NetQueue *queue) | |
227 | { | |
228 | while (!QTAILQ_EMPTY(&queue->packets)) { | |
229 | NetPacket *packet; | |
230 | int ret; | |
231 | ||
232 | packet = QTAILQ_FIRST(&queue->packets); | |
233 | QTAILQ_REMOVE(&queue->packets, packet, entry); | |
234 | ||
235 | ret = qemu_net_queue_deliver(queue, | |
236 | packet->sender, | |
237 | packet->data, | |
238 | packet->size); | |
239 | if (ret == 0 && packet->sent_cb != NULL) { | |
240 | QTAILQ_INSERT_HEAD(&queue->packets, packet, entry); | |
241 | break; | |
242 | } | |
243 | ||
244 | if (packet->sent_cb) { | |
245 | packet->sent_cb(packet->sender, ret); | |
246 | } | |
247 | ||
248 | qemu_free(packet); | |
249 | } | |
250 | } |