]> git.proxmox.com Git - qemu.git/blob - slirp/if.c
slirp: Fix queue walking in if_start
[qemu.git] / slirp / if.c
1 /*
2 * Copyright (c) 1995 Danny Gasparovski.
3 *
4 * Please read the file COPYRIGHT for the
5 * terms and conditions of the copyright.
6 */
7
8 #include <slirp.h>
9 #include "qemu-timer.h"
10
11 static void
12 ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead)
13 {
14 ifm->ifs_next = ifmhead->ifs_next;
15 ifmhead->ifs_next = ifm;
16 ifm->ifs_prev = ifmhead;
17 ifm->ifs_next->ifs_prev = ifm;
18 }
19
20 static void
21 ifs_remque(struct mbuf *ifm)
22 {
23 ifm->ifs_prev->ifs_next = ifm->ifs_next;
24 ifm->ifs_next->ifs_prev = ifm->ifs_prev;
25 }
26
27 void
28 if_init(Slirp *slirp)
29 {
30 slirp->if_fastq.ifq_next = slirp->if_fastq.ifq_prev = &slirp->if_fastq;
31 slirp->if_batchq.ifq_next = slirp->if_batchq.ifq_prev = &slirp->if_batchq;
32 slirp->next_m = &slirp->if_batchq;
33 }
34
35 /*
36 * if_output: Queue packet into an output queue.
37 * There are 2 output queue's, if_fastq and if_batchq.
38 * Each output queue is a doubly linked list of double linked lists
39 * of mbufs, each list belonging to one "session" (socket). This
40 * way, we can output packets fairly by sending one packet from each
41 * session, instead of all the packets from one session, then all packets
42 * from the next session, etc. Packets on the if_fastq get absolute
43 * priority, but if one session hogs the link, it gets "downgraded"
44 * to the batchq until it runs out of packets, then it'll return
45 * to the fastq (eg. if the user does an ls -alR in a telnet session,
46 * it'll temporarily get downgraded to the batchq)
47 */
48 void
49 if_output(struct socket *so, struct mbuf *ifm)
50 {
51 Slirp *slirp = ifm->slirp;
52 struct mbuf *ifq;
53 int on_fastq = 1;
54
55 DEBUG_CALL("if_output");
56 DEBUG_ARG("so = %lx", (long)so);
57 DEBUG_ARG("ifm = %lx", (long)ifm);
58
59 /*
60 * First remove the mbuf from m_usedlist,
61 * since we're gonna use m_next and m_prev ourselves
62 * XXX Shouldn't need this, gotta change dtom() etc.
63 */
64 if (ifm->m_flags & M_USEDLIST) {
65 remque(ifm);
66 ifm->m_flags &= ~M_USEDLIST;
67 }
68
69 /*
70 * See if there's already a batchq list for this session.
71 * This can include an interactive session, which should go on fastq,
72 * but gets too greedy... hence it'll be downgraded from fastq to batchq.
73 * We mustn't put this packet back on the fastq (or we'll send it out of order)
74 * XXX add cache here?
75 */
76 for (ifq = slirp->if_batchq.ifq_prev; ifq != &slirp->if_batchq;
77 ifq = ifq->ifq_prev) {
78 if (so == ifq->ifq_so) {
79 /* A match! */
80 ifm->ifq_so = so;
81 ifs_insque(ifm, ifq->ifs_prev);
82 goto diddit;
83 }
84 }
85
86 /* No match, check which queue to put it on */
87 if (so && (so->so_iptos & IPTOS_LOWDELAY)) {
88 ifq = slirp->if_fastq.ifq_prev;
89 on_fastq = 1;
90 /*
91 * Check if this packet is a part of the last
92 * packet's session
93 */
94 if (ifq->ifq_so == so) {
95 ifm->ifq_so = so;
96 ifs_insque(ifm, ifq->ifs_prev);
97 goto diddit;
98 }
99 } else {
100 ifq = slirp->if_batchq.ifq_prev;
101 /* Set next_m if the queue was empty so far */
102 if (slirp->next_m == &slirp->if_batchq) {
103 slirp->next_m = ifm;
104 }
105 }
106
107 /* Create a new doubly linked list for this session */
108 ifm->ifq_so = so;
109 ifs_init(ifm);
110 insque(ifm, ifq);
111
112 diddit:
113 slirp->if_queued++;
114
115 if (so) {
116 /* Update *_queued */
117 so->so_queued++;
118 so->so_nqueued++;
119 /*
120 * Check if the interactive session should be downgraded to
121 * the batchq. A session is downgraded if it has queued 6
122 * packets without pausing, and at least 3 of those packets
123 * have been sent over the link
124 * (XXX These are arbitrary numbers, probably not optimal..)
125 */
126 if (on_fastq && ((so->so_nqueued >= 6) &&
127 (so->so_nqueued - so->so_queued) >= 3)) {
128
129 /* Remove from current queue... */
130 remque(ifm->ifs_next);
131
132 /* ...And insert in the new. That'll teach ya! */
133 insque(ifm->ifs_next, &slirp->if_batchq);
134 }
135 }
136
137 #ifndef FULL_BOLT
138 /*
139 * This prevents us from malloc()ing too many mbufs
140 */
141 if_start(ifm->slirp);
142 #endif
143 }
144
145 /*
146 * Send a packet
147 * We choose a packet based on it's position in the output queues;
148 * If there are packets on the fastq, they are sent FIFO, before
149 * everything else. Otherwise we choose the first packet from the
150 * batchq and send it. the next packet chosen will be from the session
151 * after this one, then the session after that one, and so on.. So,
152 * for example, if there are 3 ftp session's fighting for bandwidth,
153 * one packet will be sent from the first session, then one packet
154 * from the second session, then one packet from the third, then back
155 * to the first, etc. etc.
156 */
157 void if_start(Slirp *slirp)
158 {
159 uint64_t now = qemu_get_clock_ns(rt_clock);
160 int requeued = 0;
161 bool from_batchq, next_from_batchq;
162 struct mbuf *ifm, *ifm_next, *ifqt;
163
164 DEBUG_CALL("if_start");
165
166 if (slirp->if_start_busy) {
167 return;
168 }
169 slirp->if_start_busy = true;
170
171 if (slirp->if_fastq.ifq_next != &slirp->if_fastq) {
172 ifm_next = slirp->if_fastq.ifq_next;
173 next_from_batchq = false;
174 } else if (slirp->next_m != &slirp->if_batchq) {
175 /* Nothing on fastq, pick up from batchq via next_m */
176 ifm_next = slirp->next_m;
177 next_from_batchq = true;
178 } else {
179 ifm_next = NULL;
180 }
181
182 while (ifm_next) {
183 /* check if we can really output */
184 if (!slirp_can_output(slirp->opaque)) {
185 slirp->if_start_busy = false;
186 return;
187 }
188
189 ifm = ifm_next;
190 from_batchq = next_from_batchq;
191
192 ifm_next = ifm->ifq_next;
193 if (ifm_next == &slirp->if_fastq) {
194 /* No more packets in fastq, switch to batchq */
195 ifm_next = slirp->next_m;
196 next_from_batchq = true;
197 }
198 if (ifm_next == &slirp->if_batchq) {
199 /* end of batchq */
200 ifm_next = NULL;
201 }
202
203 slirp->if_queued--;
204
205 /* Try to send packet unless it already expired */
206 if (ifm->expiration_date >= now && !if_encap(slirp, ifm)) {
207 /* Packet is delayed due to pending ARP resolution */
208 requeued++;
209 continue;
210 }
211
212 if (ifm == slirp->next_m) {
213 /* Set which packet to send on next iteration */
214 slirp->next_m = ifm->ifq_next;
215 }
216
217 /* Remove it from the queue */
218 ifqt = ifm->ifq_prev;
219 remque(ifm);
220
221 /* If there are more packets for this session, re-queue them */
222 if (ifm->ifs_next != ifm) {
223 struct mbuf *next = ifm->ifs_next;
224
225 insque(next, ifqt);
226 ifs_remque(ifm);
227
228 if (!from_batchq) {
229 /* Next packet in fastq is from the same session */
230 ifm_next = next;
231 next_from_batchq = false;
232 } else if (slirp->next_m == &slirp->if_batchq) {
233 /* Set next_m and ifm_next if the session packet is now the
234 * only one on batchq */
235 slirp->next_m = ifm_next = next;
236 }
237 }
238
239 /* Update so_queued */
240 if (ifm->ifq_so && --ifm->ifq_so->so_queued == 0) {
241 /* If there's no more queued, reset nqueued */
242 ifm->ifq_so->so_nqueued = 0;
243 }
244
245 m_free(ifm);
246 }
247
248 slirp->if_queued = requeued;
249
250 slirp->if_start_busy = false;
251 }