]> git.proxmox.com Git - qemu.git/blame - net/queue.c
Open 2.0 development tree
[qemu.git] / net / queue.c
CommitLineData
f7105843
MM
1/*
2 * Copyright (c) 2003-2008 Fabrice Bellard
3 * Copyright (c) 2009 Red Hat, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a copy
6 * of this software and associated documentation files (the "Software"), to deal
7 * in the Software without restriction, including without limitation the rights
8 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 * copies of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 * THE SOFTWARE.
22 */
23
e1144d00 24#include "net/queue.h"
1de7afc9 25#include "qemu/queue.h"
1422e32d 26#include "net/net.h"
f7105843
MM
27
28/* The delivery handler may only return zero if it will call
29 * qemu_net_queue_flush() when it determines that it is once again able
30 * to deliver packets. It must also call qemu_net_queue_purge() in its
31 * cleanup path.
32 *
33 * If a sent callback is provided to send(), the caller must handle a
34 * zero return from the delivery handler by not sending any more packets
35 * until we have invoked the callback. Only in that case will we queue
36 * the packet.
37 *
38 * If a sent callback isn't provided, we just drop the packet to avoid
39 * unbounded queueing.
40 */
41
42struct NetPacket {
43 QTAILQ_ENTRY(NetPacket) entry;
4e68f7a0 44 NetClientState *sender;
c0b8e49c 45 unsigned flags;
f7105843
MM
46 int size;
47 NetPacketSent *sent_cb;
48 uint8_t data[0];
49};
50
51struct NetQueue {
f7105843 52 void *opaque;
7d91ddd2
LR
53 uint32_t nq_maxlen;
54 uint32_t nq_count;
f7105843
MM
55
56 QTAILQ_HEAD(packets, NetPacket) packets;
57
58 unsigned delivering : 1;
59};
60
86a77c38 61NetQueue *qemu_new_net_queue(void *opaque)
f7105843
MM
62{
63 NetQueue *queue;
64
7267c094 65 queue = g_malloc0(sizeof(NetQueue));
f7105843 66
f7105843 67 queue->opaque = opaque;
7d91ddd2
LR
68 queue->nq_maxlen = 10000;
69 queue->nq_count = 0;
f7105843
MM
70
71 QTAILQ_INIT(&queue->packets);
72
73 queue->delivering = 0;
74
75 return queue;
76}
77
78void qemu_del_net_queue(NetQueue *queue)
79{
80 NetPacket *packet, *next;
81
82 QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) {
83 QTAILQ_REMOVE(&queue->packets, packet, entry);
7267c094 84 g_free(packet);
f7105843
MM
85 }
86
7267c094 87 g_free(queue);
f7105843
MM
88}
89
06b5f36d
SH
90static void qemu_net_queue_append(NetQueue *queue,
91 NetClientState *sender,
92 unsigned flags,
93 const uint8_t *buf,
94 size_t size,
95 NetPacketSent *sent_cb)
f7105843
MM
96{
97 NetPacket *packet;
98
7d91ddd2
LR
99 if (queue->nq_count >= queue->nq_maxlen && !sent_cb) {
100 return; /* drop if queue full and no callback */
101 }
7267c094 102 packet = g_malloc(sizeof(NetPacket) + size);
f7105843 103 packet->sender = sender;
c0b8e49c 104 packet->flags = flags;
f7105843
MM
105 packet->size = size;
106 packet->sent_cb = sent_cb;
107 memcpy(packet->data, buf, size);
108
7d91ddd2 109 queue->nq_count++;
f7105843 110 QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
f7105843
MM
111}
112
06b5f36d
SH
113static void qemu_net_queue_append_iov(NetQueue *queue,
114 NetClientState *sender,
115 unsigned flags,
116 const struct iovec *iov,
117 int iovcnt,
118 NetPacketSent *sent_cb)
f7105843
MM
119{
120 NetPacket *packet;
121 size_t max_len = 0;
122 int i;
123
7d91ddd2
LR
124 if (queue->nq_count >= queue->nq_maxlen && !sent_cb) {
125 return; /* drop if queue full and no callback */
126 }
f7105843
MM
127 for (i = 0; i < iovcnt; i++) {
128 max_len += iov[i].iov_len;
129 }
130
7267c094 131 packet = g_malloc(sizeof(NetPacket) + max_len);
f7105843
MM
132 packet->sender = sender;
133 packet->sent_cb = sent_cb;
c0b8e49c 134 packet->flags = flags;
f7105843
MM
135 packet->size = 0;
136
137 for (i = 0; i < iovcnt; i++) {
138 size_t len = iov[i].iov_len;
139
140 memcpy(packet->data + packet->size, iov[i].iov_base, len);
141 packet->size += len;
142 }
143
7d91ddd2 144 queue->nq_count++;
f7105843 145 QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
f7105843
MM
146}
147
148static ssize_t qemu_net_queue_deliver(NetQueue *queue,
4e68f7a0 149 NetClientState *sender,
c0b8e49c 150 unsigned flags,
f7105843
MM
151 const uint8_t *data,
152 size_t size)
153{
154 ssize_t ret = -1;
155
156 queue->delivering = 1;
86a77c38 157 ret = qemu_deliver_packet(sender, flags, data, size, queue->opaque);
f7105843
MM
158 queue->delivering = 0;
159
160 return ret;
161}
162
163static ssize_t qemu_net_queue_deliver_iov(NetQueue *queue,
4e68f7a0 164 NetClientState *sender,
c0b8e49c 165 unsigned flags,
f7105843
MM
166 const struct iovec *iov,
167 int iovcnt)
168{
169 ssize_t ret = -1;
170
171 queue->delivering = 1;
86a77c38 172 ret = qemu_deliver_packet_iov(sender, flags, iov, iovcnt, queue->opaque);
f7105843
MM
173 queue->delivering = 0;
174
175 return ret;
176}
177
178ssize_t qemu_net_queue_send(NetQueue *queue,
4e68f7a0 179 NetClientState *sender,
c0b8e49c 180 unsigned flags,
f7105843
MM
181 const uint8_t *data,
182 size_t size,
183 NetPacketSent *sent_cb)
184{
185 ssize_t ret;
186
691a4f3a 187 if (queue->delivering || !qemu_can_send_packet(sender)) {
06b5f36d
SH
188 qemu_net_queue_append(queue, sender, flags, data, size, sent_cb);
189 return 0;
f7105843
MM
190 }
191
c0b8e49c 192 ret = qemu_net_queue_deliver(queue, sender, flags, data, size);
839f368f 193 if (ret == 0) {
c0b8e49c 194 qemu_net_queue_append(queue, sender, flags, data, size, sent_cb);
f7105843
MM
195 return 0;
196 }
197
198 qemu_net_queue_flush(queue);
199
200 return ret;
201}
202
203ssize_t qemu_net_queue_send_iov(NetQueue *queue,
4e68f7a0 204 NetClientState *sender,
c0b8e49c 205 unsigned flags,
f7105843
MM
206 const struct iovec *iov,
207 int iovcnt,
208 NetPacketSent *sent_cb)
209{
210 ssize_t ret;
211
691a4f3a 212 if (queue->delivering || !qemu_can_send_packet(sender)) {
06b5f36d
SH
213 qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb);
214 return 0;
f7105843
MM
215 }
216
c0b8e49c 217 ret = qemu_net_queue_deliver_iov(queue, sender, flags, iov, iovcnt);
839f368f 218 if (ret == 0) {
c0b8e49c 219 qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb);
f7105843
MM
220 return 0;
221 }
222
223 qemu_net_queue_flush(queue);
224
225 return ret;
226}
227
4e68f7a0 228void qemu_net_queue_purge(NetQueue *queue, NetClientState *from)
f7105843
MM
229{
230 NetPacket *packet, *next;
231
232 QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) {
233 if (packet->sender == from) {
234 QTAILQ_REMOVE(&queue->packets, packet, entry);
7d91ddd2 235 queue->nq_count--;
7267c094 236 g_free(packet);
f7105843
MM
237 }
238 }
239}
240
987a9b48 241bool qemu_net_queue_flush(NetQueue *queue)
f7105843
MM
242{
243 while (!QTAILQ_EMPTY(&queue->packets)) {
244 NetPacket *packet;
245 int ret;
246
247 packet = QTAILQ_FIRST(&queue->packets);
248 QTAILQ_REMOVE(&queue->packets, packet, entry);
7d91ddd2 249 queue->nq_count--;
f7105843
MM
250
251 ret = qemu_net_queue_deliver(queue,
252 packet->sender,
c0b8e49c 253 packet->flags,
f7105843
MM
254 packet->data,
255 packet->size);
839f368f 256 if (ret == 0) {
7d91ddd2 257 queue->nq_count++;
f7105843 258 QTAILQ_INSERT_HEAD(&queue->packets, packet, entry);
987a9b48 259 return false;
f7105843
MM
260 }
261
262 if (packet->sent_cb) {
263 packet->sent_cb(packet->sender, ret);
264 }
265
7267c094 266 g_free(packet);
f7105843 267 }
987a9b48 268 return true;
f7105843 269}