]>
Commit | Line | Data |
---|---|---|
f0cbd3ec FB |
1 | /* |
2 | * Copyright (c) 1995 Danny Gasparovski. | |
3 | * | |
4 | * Please read the file COPYRIGHT for the | |
5 | * terms and conditions of the copyright. | |
6 | */ | |
7 | ||
8 | #include <slirp.h> | |
1de7afc9 | 9 | #include "qemu/timer.h" |
f0cbd3ec | 10 | |
674bb261 | 11 | static void |
a5f1b965 | 12 | ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead) |
f0cbd3ec FB |
13 | { |
14 | ifm->ifs_next = ifmhead->ifs_next; | |
15 | ifmhead->ifs_next = ifm; | |
16 | ifm->ifs_prev = ifmhead; | |
17 | ifm->ifs_next->ifs_prev = ifm; | |
18 | } | |
19 | ||
674bb261 | 20 | static void |
a5f1b965 | 21 | ifs_remque(struct mbuf *ifm) |
f0cbd3ec FB |
22 | { |
23 | ifm->ifs_prev->ifs_next = ifm->ifs_next; | |
24 | ifm->ifs_next->ifs_prev = ifm->ifs_prev; | |
25 | } | |
26 | ||
27 | void | |
460fec67 | 28 | if_init(Slirp *slirp) |
f0cbd3ec | 29 | { |
460fec67 JK |
30 | slirp->if_fastq.ifq_next = slirp->if_fastq.ifq_prev = &slirp->if_fastq; |
31 | slirp->if_batchq.ifq_next = slirp->if_batchq.ifq_prev = &slirp->if_batchq; | |
32 | slirp->next_m = &slirp->if_batchq; | |
f0cbd3ec FB |
33 | } |
34 | ||
f0cbd3ec FB |
35 | /* |
36 | * if_output: Queue packet into an output queue. | |
5fafdf24 | 37 | * There are 2 output queue's, if_fastq and if_batchq. |
f0cbd3ec FB |
38 | * Each output queue is a doubly linked list of double linked lists |
39 | * of mbufs, each list belonging to one "session" (socket). This | |
40 | * way, we can output packets fairly by sending one packet from each | |
41 | * session, instead of all the packets from one session, then all packets | |
5fafdf24 | 42 | * from the next session, etc. Packets on the if_fastq get absolute |
f0cbd3ec FB |
43 | * priority, but if one session hogs the link, it gets "downgraded" |
44 | * to the batchq until it runs out of packets, then it'll return | |
45 | * to the fastq (eg. if the user does an ls -alR in a telnet session, | |
46 | * it'll temporarily get downgraded to the batchq) | |
47 | */ | |
48 | void | |
511d2b14 | 49 | if_output(struct socket *so, struct mbuf *ifm) |
f0cbd3ec | 50 | { |
460fec67 | 51 | Slirp *slirp = ifm->slirp; |
f0cbd3ec FB |
52 | struct mbuf *ifq; |
53 | int on_fastq = 1; | |
5fafdf24 | 54 | |
f0cbd3ec FB |
55 | DEBUG_CALL("if_output"); |
56 | DEBUG_ARG("so = %lx", (long)so); | |
57 | DEBUG_ARG("ifm = %lx", (long)ifm); | |
5fafdf24 | 58 | |
f0cbd3ec FB |
59 | /* |
60 | * First remove the mbuf from m_usedlist, | |
61 | * since we're gonna use m_next and m_prev ourselves | |
62 | * XXX Shouldn't need this, gotta change dtom() etc. | |
63 | */ | |
64 | if (ifm->m_flags & M_USEDLIST) { | |
65 | remque(ifm); | |
66 | ifm->m_flags &= ~M_USEDLIST; | |
67 | } | |
5fafdf24 | 68 | |
f0cbd3ec | 69 | /* |
3b46e624 | 70 | * See if there's already a batchq list for this session. |
f0cbd3ec FB |
71 | * This can include an interactive session, which should go on fastq, |
72 | * but gets too greedy... hence it'll be downgraded from fastq to batchq. | |
73 | * We mustn't put this packet back on the fastq (or we'll send it out of order) | |
74 | * XXX add cache here? | |
75 | */ | |
460fec67 JK |
76 | for (ifq = slirp->if_batchq.ifq_prev; ifq != &slirp->if_batchq; |
77 | ifq = ifq->ifq_prev) { | |
f0cbd3ec FB |
78 | if (so == ifq->ifq_so) { |
79 | /* A match! */ | |
80 | ifm->ifq_so = so; | |
81 | ifs_insque(ifm, ifq->ifs_prev); | |
82 | goto diddit; | |
83 | } | |
84 | } | |
5fafdf24 | 85 | |
f0cbd3ec FB |
86 | /* No match, check which queue to put it on */ |
87 | if (so && (so->so_iptos & IPTOS_LOWDELAY)) { | |
460fec67 | 88 | ifq = slirp->if_fastq.ifq_prev; |
f0cbd3ec FB |
89 | on_fastq = 1; |
90 | /* | |
91 | * Check if this packet is a part of the last | |
92 | * packet's session | |
93 | */ | |
94 | if (ifq->ifq_so == so) { | |
95 | ifm->ifq_so = so; | |
96 | ifs_insque(ifm, ifq->ifs_prev); | |
97 | goto diddit; | |
98 | } | |
d6536b2c | 99 | } else { |
460fec67 | 100 | ifq = slirp->if_batchq.ifq_prev; |
d6536b2c JK |
101 | /* Set next_m if the queue was empty so far */ |
102 | if (slirp->next_m == &slirp->if_batchq) { | |
103 | slirp->next_m = ifm; | |
104 | } | |
105 | } | |
5fafdf24 | 106 | |
f0cbd3ec FB |
107 | /* Create a new doubly linked list for this session */ |
108 | ifm->ifq_so = so; | |
109 | ifs_init(ifm); | |
110 | insque(ifm, ifq); | |
5fafdf24 | 111 | |
f0cbd3ec | 112 | diddit: |
f0cbd3ec FB |
113 | if (so) { |
114 | /* Update *_queued */ | |
115 | so->so_queued++; | |
116 | so->so_nqueued++; | |
117 | /* | |
118 | * Check if the interactive session should be downgraded to | |
119 | * the batchq. A session is downgraded if it has queued 6 | |
120 | * packets without pausing, and at least 3 of those packets | |
121 | * have been sent over the link | |
122 | * (XXX These are arbitrary numbers, probably not optimal..) | |
123 | */ | |
5fafdf24 | 124 | if (on_fastq && ((so->so_nqueued >= 6) && |
f0cbd3ec | 125 | (so->so_nqueued - so->so_queued) >= 3)) { |
3b46e624 | 126 | |
f0cbd3ec FB |
127 | /* Remove from current queue... */ |
128 | remque(ifm->ifs_next); | |
3b46e624 | 129 | |
f0cbd3ec | 130 | /* ...And insert in the new. That'll teach ya! */ |
460fec67 | 131 | insque(ifm->ifs_next, &slirp->if_batchq); |
f0cbd3ec FB |
132 | } |
133 | } | |
134 | ||
135 | #ifndef FULL_BOLT | |
136 | /* | |
137 | * This prevents us from malloc()ing too many mbufs | |
138 | */ | |
460fec67 | 139 | if_start(ifm->slirp); |
f0cbd3ec FB |
140 | #endif |
141 | } | |
142 | ||
143 | /* | |
144 | * Send a packet | |
59b00962 | 145 | * We choose a packet based on its position in the output queues; |
f0cbd3ec FB |
146 | * If there are packets on the fastq, they are sent FIFO, before |
147 | * everything else. Otherwise we choose the first packet from the | |
148 | * batchq and send it. the next packet chosen will be from the session | |
149 | * after this one, then the session after that one, and so on.. So, | |
150 | * for example, if there are 3 ftp session's fighting for bandwidth, | |
151 | * one packet will be sent from the first session, then one packet | |
152 | * from the second session, then one packet from the third, then back | |
153 | * to the first, etc. etc. | |
154 | */ | |
b87ffa16 | 155 | void if_start(Slirp *slirp) |
f0cbd3ec | 156 | { |
bc72ad67 | 157 | uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
e3078bf4 JK |
158 | bool from_batchq, next_from_batchq; |
159 | struct mbuf *ifm, *ifm_next, *ifqt; | |
5fafdf24 | 160 | |
b87ffa16 | 161 | DEBUG_CALL("if_start"); |
5fafdf24 | 162 | |
953e7f54 JK |
163 | if (slirp->if_start_busy) { |
164 | return; | |
165 | } | |
166 | slirp->if_start_busy = true; | |
167 | ||
e3078bf4 JK |
168 | if (slirp->if_fastq.ifq_next != &slirp->if_fastq) { |
169 | ifm_next = slirp->if_fastq.ifq_next; | |
170 | next_from_batchq = false; | |
171 | } else if (slirp->next_m != &slirp->if_batchq) { | |
172 | /* Nothing on fastq, pick up from batchq via next_m */ | |
173 | ifm_next = slirp->next_m; | |
174 | next_from_batchq = true; | |
175 | } else { | |
176 | ifm_next = NULL; | |
177 | } | |
178 | ||
179 | while (ifm_next) { | |
e3078bf4 JK |
180 | ifm = ifm_next; |
181 | from_batchq = next_from_batchq; | |
182 | ||
183 | ifm_next = ifm->ifq_next; | |
184 | if (ifm_next == &slirp->if_fastq) { | |
185 | /* No more packets in fastq, switch to batchq */ | |
186 | ifm_next = slirp->next_m; | |
187 | next_from_batchq = true; | |
188 | } | |
189 | if (ifm_next == &slirp->if_batchq) { | |
190 | /* end of batchq */ | |
191 | ifm_next = NULL; | |
b87ffa16 | 192 | } |
b248ede2 | 193 | |
b248ede2 JK |
194 | /* Try to send packet unless it already expired */ |
195 | if (ifm->expiration_date >= now && !if_encap(slirp, ifm)) { | |
196 | /* Packet is delayed due to pending ARP resolution */ | |
b87ffa16 | 197 | continue; |
b248ede2 JK |
198 | } |
199 | ||
e3078bf4 | 200 | if (ifm == slirp->next_m) { |
b248ede2 JK |
201 | /* Set which packet to send on next iteration */ |
202 | slirp->next_m = ifm->ifq_next; | |
203 | } | |
204 | ||
b87ffa16 JK |
205 | /* Remove it from the queue */ |
206 | ifqt = ifm->ifq_prev; | |
207 | remque(ifm); | |
5fafdf24 | 208 | |
b87ffa16 JK |
209 | /* If there are more packets for this session, re-queue them */ |
210 | if (ifm->ifs_next != ifm) { | |
e3078bf4 JK |
211 | struct mbuf *next = ifm->ifs_next; |
212 | ||
213 | insque(next, ifqt); | |
b87ffa16 | 214 | ifs_remque(ifm); |
e3078bf4 JK |
215 | |
216 | if (!from_batchq) { | |
217 | /* Next packet in fastq is from the same session */ | |
218 | ifm_next = next; | |
219 | next_from_batchq = false; | |
220 | } else if (slirp->next_m == &slirp->if_batchq) { | |
221 | /* Set next_m and ifm_next if the session packet is now the | |
222 | * only one on batchq */ | |
223 | slirp->next_m = ifm_next = next; | |
d6536b2c | 224 | } |
b87ffa16 | 225 | } |
5fafdf24 | 226 | |
b87ffa16 JK |
227 | /* Update so_queued */ |
228 | if (ifm->ifq_so && --ifm->ifq_so->so_queued == 0) { | |
229 | /* If there's no more queued, reset nqueued */ | |
230 | ifm->ifq_so->so_nqueued = 0; | |
231 | } | |
5fafdf24 | 232 | |
b248ede2 | 233 | m_free(ifm); |
b87ffa16 | 234 | } |
1ab74cea | 235 | |
953e7f54 | 236 | slirp->if_start_busy = false; |
f0cbd3ec | 237 | } |