]> git.proxmox.com Git - mirror_qemu.git/blob - slirp/if.c
Use #include "..." for our own headers, <...> for others
[mirror_qemu.git] / slirp / if.c
1 /*
2 * Copyright (c) 1995 Danny Gasparovski.
3 *
4 * Please read the file COPYRIGHT for the
5 * terms and conditions of the copyright.
6 */
7
8 #include "qemu/osdep.h"
9 #include "slirp.h"
10 #include "qemu/timer.h"
11
12 static void
13 ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead)
14 {
15 ifm->ifs_next = ifmhead->ifs_next;
16 ifmhead->ifs_next = ifm;
17 ifm->ifs_prev = ifmhead;
18 ifm->ifs_next->ifs_prev = ifm;
19 }
20
21 static void
22 ifs_remque(struct mbuf *ifm)
23 {
24 ifm->ifs_prev->ifs_next = ifm->ifs_next;
25 ifm->ifs_next->ifs_prev = ifm->ifs_prev;
26 }
27
28 void
29 if_init(Slirp *slirp)
30 {
31 slirp->if_fastq.qh_link = slirp->if_fastq.qh_rlink = &slirp->if_fastq;
32 slirp->if_batchq.qh_link = slirp->if_batchq.qh_rlink = &slirp->if_batchq;
33 slirp->next_m = (struct mbuf *) &slirp->if_batchq;
34 }
35
36 /*
37 * if_output: Queue packet into an output queue.
38 * There are 2 output queue's, if_fastq and if_batchq.
39 * Each output queue is a doubly linked list of double linked lists
40 * of mbufs, each list belonging to one "session" (socket). This
41 * way, we can output packets fairly by sending one packet from each
42 * session, instead of all the packets from one session, then all packets
43 * from the next session, etc. Packets on the if_fastq get absolute
44 * priority, but if one session hogs the link, it gets "downgraded"
45 * to the batchq until it runs out of packets, then it'll return
46 * to the fastq (eg. if the user does an ls -alR in a telnet session,
47 * it'll temporarily get downgraded to the batchq)
48 */
49 void
50 if_output(struct socket *so, struct mbuf *ifm)
51 {
52 Slirp *slirp = ifm->slirp;
53 struct mbuf *ifq;
54 int on_fastq = 1;
55
56 DEBUG_CALL("if_output");
57 DEBUG_ARG("so = %p", so);
58 DEBUG_ARG("ifm = %p", ifm);
59
60 /*
61 * First remove the mbuf from m_usedlist,
62 * since we're gonna use m_next and m_prev ourselves
63 * XXX Shouldn't need this, gotta change dtom() etc.
64 */
65 if (ifm->m_flags & M_USEDLIST) {
66 remque(ifm);
67 ifm->m_flags &= ~M_USEDLIST;
68 }
69
70 /*
71 * See if there's already a batchq list for this session.
72 * This can include an interactive session, which should go on fastq,
73 * but gets too greedy... hence it'll be downgraded from fastq to batchq.
74 * We mustn't put this packet back on the fastq (or we'll send it out of order)
75 * XXX add cache here?
76 */
77 for (ifq = (struct mbuf *) slirp->if_batchq.qh_rlink;
78 (struct quehead *) ifq != &slirp->if_batchq;
79 ifq = ifq->ifq_prev) {
80 if (so == ifq->ifq_so) {
81 /* A match! */
82 ifm->ifq_so = so;
83 ifs_insque(ifm, ifq->ifs_prev);
84 goto diddit;
85 }
86 }
87
88 /* No match, check which queue to put it on */
89 if (so && (so->so_iptos & IPTOS_LOWDELAY)) {
90 ifq = (struct mbuf *) slirp->if_fastq.qh_rlink;
91 on_fastq = 1;
92 /*
93 * Check if this packet is a part of the last
94 * packet's session
95 */
96 if (ifq->ifq_so == so) {
97 ifm->ifq_so = so;
98 ifs_insque(ifm, ifq->ifs_prev);
99 goto diddit;
100 }
101 } else {
102 ifq = (struct mbuf *) slirp->if_batchq.qh_rlink;
103 /* Set next_m if the queue was empty so far */
104 if ((struct quehead *) slirp->next_m == &slirp->if_batchq) {
105 slirp->next_m = ifm;
106 }
107 }
108
109 /* Create a new doubly linked list for this session */
110 ifm->ifq_so = so;
111 ifs_init(ifm);
112 insque(ifm, ifq);
113
114 diddit:
115 if (so) {
116 /* Update *_queued */
117 so->so_queued++;
118 so->so_nqueued++;
119 /*
120 * Check if the interactive session should be downgraded to
121 * the batchq. A session is downgraded if it has queued 6
122 * packets without pausing, and at least 3 of those packets
123 * have been sent over the link
124 * (XXX These are arbitrary numbers, probably not optimal..)
125 */
126 if (on_fastq && ((so->so_nqueued >= 6) &&
127 (so->so_nqueued - so->so_queued) >= 3)) {
128
129 /* Remove from current queue... */
130 remque(ifm->ifs_next);
131
132 /* ...And insert in the new. That'll teach ya! */
133 insque(ifm->ifs_next, &slirp->if_batchq);
134 }
135 }
136
137 #ifndef FULL_BOLT
138 /*
139 * This prevents us from malloc()ing too many mbufs
140 */
141 if_start(ifm->slirp);
142 #endif
143 }
144
145 /*
146 * Send a packet
147 * We choose a packet based on its position in the output queues;
148 * If there are packets on the fastq, they are sent FIFO, before
149 * everything else. Otherwise we choose the first packet from the
150 * batchq and send it. the next packet chosen will be from the session
151 * after this one, then the session after that one, and so on.. So,
152 * for example, if there are 3 ftp session's fighting for bandwidth,
153 * one packet will be sent from the first session, then one packet
154 * from the second session, then one packet from the third, then back
155 * to the first, etc. etc.
156 */
157 void if_start(Slirp *slirp)
158 {
159 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
160 bool from_batchq, next_from_batchq;
161 struct mbuf *ifm, *ifm_next, *ifqt;
162
163 DEBUG_CALL("if_start");
164
165 if (slirp->if_start_busy) {
166 return;
167 }
168 slirp->if_start_busy = true;
169
170 if (slirp->if_fastq.qh_link != &slirp->if_fastq) {
171 ifm_next = (struct mbuf *) slirp->if_fastq.qh_link;
172 next_from_batchq = false;
173 } else if ((struct quehead *) slirp->next_m != &slirp->if_batchq) {
174 /* Nothing on fastq, pick up from batchq via next_m */
175 ifm_next = slirp->next_m;
176 next_from_batchq = true;
177 } else {
178 ifm_next = NULL;
179 }
180
181 while (ifm_next) {
182 ifm = ifm_next;
183 from_batchq = next_from_batchq;
184
185 ifm_next = ifm->ifq_next;
186 if ((struct quehead *) ifm_next == &slirp->if_fastq) {
187 /* No more packets in fastq, switch to batchq */
188 ifm_next = slirp->next_m;
189 next_from_batchq = true;
190 }
191 if ((struct quehead *) ifm_next == &slirp->if_batchq) {
192 /* end of batchq */
193 ifm_next = NULL;
194 }
195
196 /* Try to send packet unless it already expired */
197 if (ifm->expiration_date >= now && !if_encap(slirp, ifm)) {
198 /* Packet is delayed due to pending ARP or NDP resolution */
199 continue;
200 }
201
202 if (ifm == slirp->next_m) {
203 /* Set which packet to send on next iteration */
204 slirp->next_m = ifm->ifq_next;
205 }
206
207 /* Remove it from the queue */
208 ifqt = ifm->ifq_prev;
209 remque(ifm);
210
211 /* If there are more packets for this session, re-queue them */
212 if (ifm->ifs_next != ifm) {
213 struct mbuf *next = ifm->ifs_next;
214
215 insque(next, ifqt);
216 ifs_remque(ifm);
217
218 if (!from_batchq) {
219 /* Next packet in fastq is from the same session */
220 ifm_next = next;
221 next_from_batchq = false;
222 } else if ((struct quehead *) slirp->next_m == &slirp->if_batchq) {
223 /* Set next_m and ifm_next if the session packet is now the
224 * only one on batchq */
225 slirp->next_m = ifm_next = next;
226 }
227 }
228
229 /* Update so_queued */
230 if (ifm->ifq_so && --ifm->ifq_so->so_queued == 0) {
231 /* If there's no more queued, reset nqueued */
232 ifm->ifq_so->so_nqueued = 0;
233 }
234
235 m_free(ifm);
236 }
237
238 slirp->if_start_busy = false;
239 }