]>
Commit | Line | Data |
---|---|---|
3e6d35e5 | 1 | /* SPDX-License-Identifier: BSD-3-Clause */ |
f0cbd3ec FB |
2 | /* |
3 | * Copyright (c) 1995 Danny Gasparovski. | |
f0cbd3ec FB |
4 | */ |
5 | ||
a9c94277 | 6 | #include "slirp.h" |
f0cbd3ec | 7 | |
674bb261 | 8 | static void |
a5f1b965 | 9 | ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead) |
f0cbd3ec FB |
10 | { |
11 | ifm->ifs_next = ifmhead->ifs_next; | |
12 | ifmhead->ifs_next = ifm; | |
13 | ifm->ifs_prev = ifmhead; | |
14 | ifm->ifs_next->ifs_prev = ifm; | |
15 | } | |
16 | ||
674bb261 | 17 | static void |
a5f1b965 | 18 | ifs_remque(struct mbuf *ifm) |
f0cbd3ec FB |
19 | { |
20 | ifm->ifs_prev->ifs_next = ifm->ifs_next; | |
21 | ifm->ifs_next->ifs_prev = ifm->ifs_prev; | |
22 | } | |
23 | ||
24 | void | |
460fec67 | 25 | if_init(Slirp *slirp) |
f0cbd3ec | 26 | { |
67e3eee4 ST |
27 | slirp->if_fastq.qh_link = slirp->if_fastq.qh_rlink = &slirp->if_fastq; |
28 | slirp->if_batchq.qh_link = slirp->if_batchq.qh_rlink = &slirp->if_batchq; | |
f0cbd3ec FB |
29 | } |
30 | ||
f0cbd3ec FB |
31 | /* |
32 | * if_output: Queue packet into an output queue. | |
5fafdf24 | 33 | * There are 2 output queue's, if_fastq and if_batchq. |
f0cbd3ec FB |
34 | * Each output queue is a doubly linked list of double linked lists |
35 | * of mbufs, each list belonging to one "session" (socket). This | |
36 | * way, we can output packets fairly by sending one packet from each | |
37 | * session, instead of all the packets from one session, then all packets | |
5fafdf24 | 38 | * from the next session, etc. Packets on the if_fastq get absolute |
f0cbd3ec FB |
39 | * priority, but if one session hogs the link, it gets "downgraded" |
40 | * to the batchq until it runs out of packets, then it'll return | |
41 | * to the fastq (eg. if the user does an ls -alR in a telnet session, | |
42 | * it'll temporarily get downgraded to the batchq) | |
43 | */ | |
44 | void | |
511d2b14 | 45 | if_output(struct socket *so, struct mbuf *ifm) |
f0cbd3ec | 46 | { |
460fec67 | 47 | Slirp *slirp = ifm->slirp; |
f0cbd3ec FB |
48 | struct mbuf *ifq; |
49 | int on_fastq = 1; | |
5fafdf24 | 50 | |
f0cbd3ec | 51 | DEBUG_CALL("if_output"); |
ecc804ca SW |
52 | DEBUG_ARG("so = %p", so); |
53 | DEBUG_ARG("ifm = %p", ifm); | |
5fafdf24 | 54 | |
f0cbd3ec FB |
55 | /* |
56 | * First remove the mbuf from m_usedlist, | |
57 | * since we're gonna use m_next and m_prev ourselves | |
58 | * XXX Shouldn't need this, gotta change dtom() etc. | |
59 | */ | |
60 | if (ifm->m_flags & M_USEDLIST) { | |
61 | remque(ifm); | |
62 | ifm->m_flags &= ~M_USEDLIST; | |
63 | } | |
5fafdf24 | 64 | |
f0cbd3ec | 65 | /* |
3b46e624 | 66 | * See if there's already a batchq list for this session. |
f0cbd3ec FB |
67 | * This can include an interactive session, which should go on fastq, |
68 | * but gets too greedy... hence it'll be downgraded from fastq to batchq. | |
69 | * We mustn't put this packet back on the fastq (or we'll send it out of order) | |
70 | * XXX add cache here? | |
71 | */ | |
13146a83 KC |
72 | if (so) { |
73 | for (ifq = (struct mbuf *) slirp->if_batchq.qh_rlink; | |
74 | (struct quehead *) ifq != &slirp->if_batchq; | |
75 | ifq = ifq->ifq_prev) { | |
76 | if (so == ifq->ifq_so) { | |
77 | /* A match! */ | |
78 | ifm->ifq_so = so; | |
79 | ifs_insque(ifm, ifq->ifs_prev); | |
80 | goto diddit; | |
81 | } | |
f0cbd3ec FB |
82 | } |
83 | } | |
5fafdf24 | 84 | |
f0cbd3ec FB |
85 | /* No match, check which queue to put it on */ |
86 | if (so && (so->so_iptos & IPTOS_LOWDELAY)) { | |
67e3eee4 | 87 | ifq = (struct mbuf *) slirp->if_fastq.qh_rlink; |
f0cbd3ec FB |
88 | on_fastq = 1; |
89 | /* | |
90 | * Check if this packet is a part of the last | |
91 | * packet's session | |
92 | */ | |
93 | if (ifq->ifq_so == so) { | |
94 | ifm->ifq_so = so; | |
95 | ifs_insque(ifm, ifq->ifs_prev); | |
96 | goto diddit; | |
97 | } | |
d6536b2c | 98 | } else { |
67e3eee4 | 99 | ifq = (struct mbuf *) slirp->if_batchq.qh_rlink; |
d6536b2c | 100 | } |
5fafdf24 | 101 | |
f0cbd3ec FB |
102 | /* Create a new doubly linked list for this session */ |
103 | ifm->ifq_so = so; | |
104 | ifs_init(ifm); | |
105 | insque(ifm, ifq); | |
5fafdf24 | 106 | |
f0cbd3ec | 107 | diddit: |
f0cbd3ec FB |
108 | if (so) { |
109 | /* Update *_queued */ | |
110 | so->so_queued++; | |
111 | so->so_nqueued++; | |
112 | /* | |
113 | * Check if the interactive session should be downgraded to | |
114 | * the batchq. A session is downgraded if it has queued 6 | |
115 | * packets without pausing, and at least 3 of those packets | |
116 | * have been sent over the link | |
117 | * (XXX These are arbitrary numbers, probably not optimal..) | |
118 | */ | |
5fafdf24 | 119 | if (on_fastq && ((so->so_nqueued >= 6) && |
f0cbd3ec | 120 | (so->so_nqueued - so->so_queued) >= 3)) { |
3b46e624 | 121 | |
f0cbd3ec FB |
122 | /* Remove from current queue... */ |
123 | remque(ifm->ifs_next); | |
3b46e624 | 124 | |
f0cbd3ec | 125 | /* ...And insert in the new. That'll teach ya! */ |
460fec67 | 126 | insque(ifm->ifs_next, &slirp->if_batchq); |
f0cbd3ec FB |
127 | } |
128 | } | |
129 | ||
f0cbd3ec FB |
130 | /* |
131 | * This prevents us from malloc()ing too many mbufs | |
132 | */ | |
460fec67 | 133 | if_start(ifm->slirp); |
f0cbd3ec FB |
134 | } |
135 | ||
136 | /* | |
e2aad34d | 137 | * Send one packet from each session. |
f0cbd3ec | 138 | * If there are packets on the fastq, they are sent FIFO, before |
e2aad34d KC |
139 | * everything else. Then we choose the first packet from each |
140 | * batchq session (socket) and send it. | |
141 | * For example, if there are 3 ftp sessions fighting for bandwidth, | |
f0cbd3ec | 142 | * one packet will be sent from the first session, then one packet |
e2aad34d | 143 | * from the second session, then one packet from the third. |
f0cbd3ec | 144 | */ |
b87ffa16 | 145 | void if_start(Slirp *slirp) |
f0cbd3ec | 146 | { |
3e0fad3a | 147 | uint64_t now = slirp->cb->clock_get_ns(slirp->opaque); |
e2aad34d | 148 | bool from_batchq = false; |
e3078bf4 | 149 | struct mbuf *ifm, *ifm_next, *ifqt; |
5fafdf24 | 150 | |
b87ffa16 | 151 | DEBUG_CALL("if_start"); |
5fafdf24 | 152 | |
953e7f54 JK |
153 | if (slirp->if_start_busy) { |
154 | return; | |
155 | } | |
156 | slirp->if_start_busy = true; | |
157 | ||
e2aad34d KC |
158 | struct mbuf *batch_head = NULL; |
159 | if (slirp->if_batchq.qh_link != &slirp->if_batchq) { | |
160 | batch_head = (struct mbuf *) slirp->if_batchq.qh_link; | |
161 | } | |
162 | ||
67e3eee4 ST |
163 | if (slirp->if_fastq.qh_link != &slirp->if_fastq) { |
164 | ifm_next = (struct mbuf *) slirp->if_fastq.qh_link; | |
e2aad34d KC |
165 | } else if (batch_head) { |
166 | /* Nothing on fastq, pick up from batchq */ | |
167 | ifm_next = batch_head; | |
168 | from_batchq = true; | |
e3078bf4 JK |
169 | } else { |
170 | ifm_next = NULL; | |
171 | } | |
172 | ||
173 | while (ifm_next) { | |
e3078bf4 | 174 | ifm = ifm_next; |
e3078bf4 JK |
175 | |
176 | ifm_next = ifm->ifq_next; | |
67e3eee4 | 177 | if ((struct quehead *) ifm_next == &slirp->if_fastq) { |
e3078bf4 | 178 | /* No more packets in fastq, switch to batchq */ |
e2aad34d KC |
179 | ifm_next = batch_head; |
180 | from_batchq = true; | |
e3078bf4 | 181 | } |
67e3eee4 | 182 | if ((struct quehead *) ifm_next == &slirp->if_batchq) { |
e3078bf4 JK |
183 | /* end of batchq */ |
184 | ifm_next = NULL; | |
b87ffa16 | 185 | } |
b248ede2 | 186 | |
b248ede2 JK |
187 | /* Try to send packet unless it already expired */ |
188 | if (ifm->expiration_date >= now && !if_encap(slirp, ifm)) { | |
0d6ff71a | 189 | /* Packet is delayed due to pending ARP or NDP resolution */ |
b87ffa16 | 190 | continue; |
b248ede2 JK |
191 | } |
192 | ||
b87ffa16 JK |
193 | /* Remove it from the queue */ |
194 | ifqt = ifm->ifq_prev; | |
195 | remque(ifm); | |
5fafdf24 | 196 | |
b87ffa16 JK |
197 | /* If there are more packets for this session, re-queue them */ |
198 | if (ifm->ifs_next != ifm) { | |
e3078bf4 JK |
199 | struct mbuf *next = ifm->ifs_next; |
200 | ||
201 | insque(next, ifqt); | |
b87ffa16 | 202 | ifs_remque(ifm); |
e3078bf4 | 203 | if (!from_batchq) { |
e3078bf4 | 204 | ifm_next = next; |
d6536b2c | 205 | } |
b87ffa16 | 206 | } |
5fafdf24 | 207 | |
b87ffa16 JK |
208 | /* Update so_queued */ |
209 | if (ifm->ifq_so && --ifm->ifq_so->so_queued == 0) { | |
210 | /* If there's no more queued, reset nqueued */ | |
211 | ifm->ifq_so->so_nqueued = 0; | |
212 | } | |
5fafdf24 | 213 | |
b248ede2 | 214 | m_free(ifm); |
b87ffa16 | 215 | } |
1ab74cea | 216 | |
953e7f54 | 217 | slirp->if_start_busy = false; |
f0cbd3ec | 218 | } |