]> git.proxmox.com Git - mirror_qemu.git/blob - slirp/slirp.c
slirp: VMStatify sbuf
[mirror_qemu.git] / slirp / slirp.c
1 /*
2 * libslirp glue
3 *
4 * Copyright (c) 2004-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include "qemu/osdep.h"
25 #include "qemu-common.h"
26 #include "qemu/timer.h"
27 #include "qemu/error-report.h"
28 #include "sysemu/char.h"
29 #include "slirp.h"
30 #include "hw/hw.h"
31 #include "qemu/cutils.h"
32
33 #ifndef _WIN32
34 #include <net/if.h>
35 #endif
36
37 /* host loopback address */
38 struct in_addr loopback_addr;
39 /* host loopback network mask */
40 unsigned long loopback_mask;
41
42 /* emulated hosts use the MAC addr 52:55:IP:IP:IP:IP */
43 static const uint8_t special_ethaddr[ETH_ALEN] = {
44 0x52, 0x55, 0x00, 0x00, 0x00, 0x00
45 };
46
47 u_int curtime;
48
49 static QTAILQ_HEAD(slirp_instances, Slirp) slirp_instances =
50 QTAILQ_HEAD_INITIALIZER(slirp_instances);
51
52 static struct in_addr dns_addr;
53 #ifndef _WIN32
54 static struct in6_addr dns6_addr;
55 #endif
56 static u_int dns_addr_time;
57 #ifndef _WIN32
58 static u_int dns6_addr_time;
59 #endif
60
61 #define TIMEOUT_FAST 2 /* milliseconds */
62 #define TIMEOUT_SLOW 499 /* milliseconds */
63 /* for the aging of certain requests like DNS */
64 #define TIMEOUT_DEFAULT 1000 /* milliseconds */
65
66 #ifdef _WIN32
67
68 int get_dns_addr(struct in_addr *pdns_addr)
69 {
70 FIXED_INFO *FixedInfo=NULL;
71 ULONG BufLen;
72 DWORD ret;
73 IP_ADDR_STRING *pIPAddr;
74 struct in_addr tmp_addr;
75
76 if (dns_addr.s_addr != 0 && (curtime - dns_addr_time) < TIMEOUT_DEFAULT) {
77 *pdns_addr = dns_addr;
78 return 0;
79 }
80
81 FixedInfo = (FIXED_INFO *)GlobalAlloc(GPTR, sizeof(FIXED_INFO));
82 BufLen = sizeof(FIXED_INFO);
83
84 if (ERROR_BUFFER_OVERFLOW == GetNetworkParams(FixedInfo, &BufLen)) {
85 if (FixedInfo) {
86 GlobalFree(FixedInfo);
87 FixedInfo = NULL;
88 }
89 FixedInfo = GlobalAlloc(GPTR, BufLen);
90 }
91
92 if ((ret = GetNetworkParams(FixedInfo, &BufLen)) != ERROR_SUCCESS) {
93 printf("GetNetworkParams failed. ret = %08x\n", (u_int)ret );
94 if (FixedInfo) {
95 GlobalFree(FixedInfo);
96 FixedInfo = NULL;
97 }
98 return -1;
99 }
100
101 pIPAddr = &(FixedInfo->DnsServerList);
102 inet_aton(pIPAddr->IpAddress.String, &tmp_addr);
103 *pdns_addr = tmp_addr;
104 dns_addr = tmp_addr;
105 dns_addr_time = curtime;
106 if (FixedInfo) {
107 GlobalFree(FixedInfo);
108 FixedInfo = NULL;
109 }
110 return 0;
111 }
112
113 int get_dns6_addr(struct in6_addr *pdns6_addr, uint32_t *scope_id)
114 {
115 return -1;
116 }
117
118 static void winsock_cleanup(void)
119 {
120 WSACleanup();
121 }
122
123 #else
124
125 static int get_dns_addr_cached(void *pdns_addr, void *cached_addr,
126 socklen_t addrlen,
127 struct stat *cached_stat, u_int *cached_time)
128 {
129 struct stat old_stat;
130 if (curtime - *cached_time < TIMEOUT_DEFAULT) {
131 memcpy(pdns_addr, cached_addr, addrlen);
132 return 0;
133 }
134 old_stat = *cached_stat;
135 if (stat("/etc/resolv.conf", cached_stat) != 0) {
136 return -1;
137 }
138 if (cached_stat->st_dev == old_stat.st_dev
139 && cached_stat->st_ino == old_stat.st_ino
140 && cached_stat->st_size == old_stat.st_size
141 && cached_stat->st_mtime == old_stat.st_mtime) {
142 memcpy(pdns_addr, cached_addr, addrlen);
143 return 0;
144 }
145 return 1;
146 }
147
148 static int get_dns_addr_resolv_conf(int af, void *pdns_addr, void *cached_addr,
149 socklen_t addrlen, uint32_t *scope_id,
150 u_int *cached_time)
151 {
152 char buff[512];
153 char buff2[257];
154 FILE *f;
155 int found = 0;
156 void *tmp_addr = alloca(addrlen);
157 unsigned if_index;
158
159 f = fopen("/etc/resolv.conf", "r");
160 if (!f)
161 return -1;
162
163 #ifdef DEBUG
164 fprintf(stderr, "IP address of your DNS(s): ");
165 #endif
166 while (fgets(buff, 512, f) != NULL) {
167 if (sscanf(buff, "nameserver%*[ \t]%256s", buff2) == 1) {
168 char *c = strchr(buff2, '%');
169 if (c) {
170 if_index = if_nametoindex(c + 1);
171 *c = '\0';
172 } else {
173 if_index = 0;
174 }
175
176 if (!inet_pton(af, buff2, tmp_addr)) {
177 continue;
178 }
179 /* If it's the first one, set it to dns_addr */
180 if (!found) {
181 memcpy(pdns_addr, tmp_addr, addrlen);
182 memcpy(cached_addr, tmp_addr, addrlen);
183 if (scope_id) {
184 *scope_id = if_index;
185 }
186 *cached_time = curtime;
187 }
188 #ifdef DEBUG
189 else
190 fprintf(stderr, ", ");
191 #endif
192 if (++found > 3) {
193 #ifdef DEBUG
194 fprintf(stderr, "(more)");
195 #endif
196 break;
197 }
198 #ifdef DEBUG
199 else {
200 char s[INET6_ADDRSTRLEN];
201 const char *res = inet_ntop(af, tmp_addr, s, sizeof(s));
202 if (!res) {
203 res = "(string conversion error)";
204 }
205 fprintf(stderr, "%s", res);
206 }
207 #endif
208 }
209 }
210 fclose(f);
211 if (!found)
212 return -1;
213 return 0;
214 }
215
216 int get_dns_addr(struct in_addr *pdns_addr)
217 {
218 static struct stat dns_addr_stat;
219
220 if (dns_addr.s_addr != 0) {
221 int ret;
222 ret = get_dns_addr_cached(pdns_addr, &dns_addr, sizeof(dns_addr),
223 &dns_addr_stat, &dns_addr_time);
224 if (ret <= 0) {
225 return ret;
226 }
227 }
228 return get_dns_addr_resolv_conf(AF_INET, pdns_addr, &dns_addr,
229 sizeof(dns_addr), NULL, &dns_addr_time);
230 }
231
232 int get_dns6_addr(struct in6_addr *pdns6_addr, uint32_t *scope_id)
233 {
234 static struct stat dns6_addr_stat;
235
236 if (!in6_zero(&dns6_addr)) {
237 int ret;
238 ret = get_dns_addr_cached(pdns6_addr, &dns6_addr, sizeof(dns6_addr),
239 &dns6_addr_stat, &dns6_addr_time);
240 if (ret <= 0) {
241 return ret;
242 }
243 }
244 return get_dns_addr_resolv_conf(AF_INET6, pdns6_addr, &dns6_addr,
245 sizeof(dns6_addr),
246 scope_id, &dns6_addr_time);
247 }
248
249 #endif
250
251 static void slirp_init_once(void)
252 {
253 static int initialized;
254 #ifdef _WIN32
255 WSADATA Data;
256 #endif
257
258 if (initialized) {
259 return;
260 }
261 initialized = 1;
262
263 #ifdef _WIN32
264 WSAStartup(MAKEWORD(2,0), &Data);
265 atexit(winsock_cleanup);
266 #endif
267
268 loopback_addr.s_addr = htonl(INADDR_LOOPBACK);
269 loopback_mask = htonl(IN_CLASSA_NET);
270 }
271
272 static void slirp_state_save(QEMUFile *f, void *opaque);
273 static int slirp_state_load(QEMUFile *f, void *opaque, int version_id);
274
275 Slirp *slirp_init(int restricted, bool in_enabled, struct in_addr vnetwork,
276 struct in_addr vnetmask, struct in_addr vhost,
277 bool in6_enabled,
278 struct in6_addr vprefix_addr6, uint8_t vprefix_len,
279 struct in6_addr vhost6, const char *vhostname,
280 const char *tftp_path, const char *bootfile,
281 struct in_addr vdhcp_start, struct in_addr vnameserver,
282 struct in6_addr vnameserver6, const char **vdnssearch,
283 void *opaque)
284 {
285 Slirp *slirp = g_malloc0(sizeof(Slirp));
286
287 slirp_init_once();
288
289 slirp->grand = g_rand_new();
290 slirp->restricted = restricted;
291
292 slirp->in_enabled = in_enabled;
293 slirp->in6_enabled = in6_enabled;
294
295 if_init(slirp);
296 ip_init(slirp);
297 ip6_init(slirp);
298
299 /* Initialise mbufs *after* setting the MTU */
300 m_init(slirp);
301
302 slirp->vnetwork_addr = vnetwork;
303 slirp->vnetwork_mask = vnetmask;
304 slirp->vhost_addr = vhost;
305 slirp->vprefix_addr6 = vprefix_addr6;
306 slirp->vprefix_len = vprefix_len;
307 slirp->vhost_addr6 = vhost6;
308 if (vhostname) {
309 pstrcpy(slirp->client_hostname, sizeof(slirp->client_hostname),
310 vhostname);
311 }
312 slirp->tftp_prefix = g_strdup(tftp_path);
313 slirp->bootp_filename = g_strdup(bootfile);
314 slirp->vdhcp_startaddr = vdhcp_start;
315 slirp->vnameserver_addr = vnameserver;
316 slirp->vnameserver_addr6 = vnameserver6;
317
318 if (vdnssearch) {
319 translate_dnssearch(slirp, vdnssearch);
320 }
321
322 slirp->opaque = opaque;
323
324 register_savevm(NULL, "slirp", 0, 4,
325 slirp_state_save, slirp_state_load, slirp);
326
327 QTAILQ_INSERT_TAIL(&slirp_instances, slirp, entry);
328
329 return slirp;
330 }
331
332 void slirp_cleanup(Slirp *slirp)
333 {
334 QTAILQ_REMOVE(&slirp_instances, slirp, entry);
335
336 unregister_savevm(NULL, "slirp", slirp);
337
338 ip_cleanup(slirp);
339 ip6_cleanup(slirp);
340 m_cleanup(slirp);
341
342 g_rand_free(slirp->grand);
343
344 g_free(slirp->vdnssearch);
345 g_free(slirp->tftp_prefix);
346 g_free(slirp->bootp_filename);
347 g_free(slirp);
348 }
349
350 #define CONN_CANFSEND(so) (((so)->so_state & (SS_FCANTSENDMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
351 #define CONN_CANFRCV(so) (((so)->so_state & (SS_FCANTRCVMORE|SS_ISFCONNECTED)) == SS_ISFCONNECTED)
352
353 static void slirp_update_timeout(uint32_t *timeout)
354 {
355 Slirp *slirp;
356 uint32_t t;
357
358 if (*timeout <= TIMEOUT_FAST) {
359 return;
360 }
361
362 t = MIN(1000, *timeout);
363
364 /* If we have tcp timeout with slirp, then we will fill @timeout with
365 * more precise value.
366 */
367 QTAILQ_FOREACH(slirp, &slirp_instances, entry) {
368 if (slirp->time_fasttimo) {
369 *timeout = TIMEOUT_FAST;
370 return;
371 }
372 if (slirp->do_slowtimo) {
373 t = MIN(TIMEOUT_SLOW, t);
374 }
375 }
376 *timeout = t;
377 }
378
379 void slirp_pollfds_fill(GArray *pollfds, uint32_t *timeout)
380 {
381 Slirp *slirp;
382 struct socket *so, *so_next;
383
384 if (QTAILQ_EMPTY(&slirp_instances)) {
385 return;
386 }
387
388 /*
389 * First, TCP sockets
390 */
391
392 QTAILQ_FOREACH(slirp, &slirp_instances, entry) {
393 /*
394 * *_slowtimo needs calling if there are IP fragments
395 * in the fragment queue, or there are TCP connections active
396 */
397 slirp->do_slowtimo = ((slirp->tcb.so_next != &slirp->tcb) ||
398 (&slirp->ipq.ip_link != slirp->ipq.ip_link.next));
399
400 for (so = slirp->tcb.so_next; so != &slirp->tcb;
401 so = so_next) {
402 int events = 0;
403
404 so_next = so->so_next;
405
406 so->pollfds_idx = -1;
407
408 /*
409 * See if we need a tcp_fasttimo
410 */
411 if (slirp->time_fasttimo == 0 &&
412 so->so_tcpcb->t_flags & TF_DELACK) {
413 slirp->time_fasttimo = curtime; /* Flag when want a fasttimo */
414 }
415
416 /*
417 * NOFDREF can include still connecting to local-host,
418 * newly socreated() sockets etc. Don't want to select these.
419 */
420 if (so->so_state & SS_NOFDREF || so->s == -1) {
421 continue;
422 }
423
424 /*
425 * Set for reading sockets which are accepting
426 */
427 if (so->so_state & SS_FACCEPTCONN) {
428 GPollFD pfd = {
429 .fd = so->s,
430 .events = G_IO_IN | G_IO_HUP | G_IO_ERR,
431 };
432 so->pollfds_idx = pollfds->len;
433 g_array_append_val(pollfds, pfd);
434 continue;
435 }
436
437 /*
438 * Set for writing sockets which are connecting
439 */
440 if (so->so_state & SS_ISFCONNECTING) {
441 GPollFD pfd = {
442 .fd = so->s,
443 .events = G_IO_OUT | G_IO_ERR,
444 };
445 so->pollfds_idx = pollfds->len;
446 g_array_append_val(pollfds, pfd);
447 continue;
448 }
449
450 /*
451 * Set for writing if we are connected, can send more, and
452 * we have something to send
453 */
454 if (CONN_CANFSEND(so) && so->so_rcv.sb_cc) {
455 events |= G_IO_OUT | G_IO_ERR;
456 }
457
458 /*
459 * Set for reading (and urgent data) if we are connected, can
460 * receive more, and we have room for it XXX /2 ?
461 */
462 if (CONN_CANFRCV(so) &&
463 (so->so_snd.sb_cc < (so->so_snd.sb_datalen/2))) {
464 events |= G_IO_IN | G_IO_HUP | G_IO_ERR | G_IO_PRI;
465 }
466
467 if (events) {
468 GPollFD pfd = {
469 .fd = so->s,
470 .events = events,
471 };
472 so->pollfds_idx = pollfds->len;
473 g_array_append_val(pollfds, pfd);
474 }
475 }
476
477 /*
478 * UDP sockets
479 */
480 for (so = slirp->udb.so_next; so != &slirp->udb;
481 so = so_next) {
482 so_next = so->so_next;
483
484 so->pollfds_idx = -1;
485
486 /*
487 * See if it's timed out
488 */
489 if (so->so_expire) {
490 if (so->so_expire <= curtime) {
491 udp_detach(so);
492 continue;
493 } else {
494 slirp->do_slowtimo = true; /* Let socket expire */
495 }
496 }
497
498 /*
499 * When UDP packets are received from over the
500 * link, they're sendto()'d straight away, so
501 * no need for setting for writing
502 * Limit the number of packets queued by this session
503 * to 4. Note that even though we try and limit this
504 * to 4 packets, the session could have more queued
505 * if the packets needed to be fragmented
506 * (XXX <= 4 ?)
507 */
508 if ((so->so_state & SS_ISFCONNECTED) && so->so_queued <= 4) {
509 GPollFD pfd = {
510 .fd = so->s,
511 .events = G_IO_IN | G_IO_HUP | G_IO_ERR,
512 };
513 so->pollfds_idx = pollfds->len;
514 g_array_append_val(pollfds, pfd);
515 }
516 }
517
518 /*
519 * ICMP sockets
520 */
521 for (so = slirp->icmp.so_next; so != &slirp->icmp;
522 so = so_next) {
523 so_next = so->so_next;
524
525 so->pollfds_idx = -1;
526
527 /*
528 * See if it's timed out
529 */
530 if (so->so_expire) {
531 if (so->so_expire <= curtime) {
532 icmp_detach(so);
533 continue;
534 } else {
535 slirp->do_slowtimo = true; /* Let socket expire */
536 }
537 }
538
539 if (so->so_state & SS_ISFCONNECTED) {
540 GPollFD pfd = {
541 .fd = so->s,
542 .events = G_IO_IN | G_IO_HUP | G_IO_ERR,
543 };
544 so->pollfds_idx = pollfds->len;
545 g_array_append_val(pollfds, pfd);
546 }
547 }
548 }
549 slirp_update_timeout(timeout);
550 }
551
552 void slirp_pollfds_poll(GArray *pollfds, int select_error)
553 {
554 Slirp *slirp;
555 struct socket *so, *so_next;
556 int ret;
557
558 if (QTAILQ_EMPTY(&slirp_instances)) {
559 return;
560 }
561
562 curtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
563
564 QTAILQ_FOREACH(slirp, &slirp_instances, entry) {
565 /*
566 * See if anything has timed out
567 */
568 if (slirp->time_fasttimo &&
569 ((curtime - slirp->time_fasttimo) >= TIMEOUT_FAST)) {
570 tcp_fasttimo(slirp);
571 slirp->time_fasttimo = 0;
572 }
573 if (slirp->do_slowtimo &&
574 ((curtime - slirp->last_slowtimo) >= TIMEOUT_SLOW)) {
575 ip_slowtimo(slirp);
576 tcp_slowtimo(slirp);
577 slirp->last_slowtimo = curtime;
578 }
579
580 /*
581 * Check sockets
582 */
583 if (!select_error) {
584 /*
585 * Check TCP sockets
586 */
587 for (so = slirp->tcb.so_next; so != &slirp->tcb;
588 so = so_next) {
589 int revents;
590
591 so_next = so->so_next;
592
593 revents = 0;
594 if (so->pollfds_idx != -1) {
595 revents = g_array_index(pollfds, GPollFD,
596 so->pollfds_idx).revents;
597 }
598
599 if (so->so_state & SS_NOFDREF || so->s == -1) {
600 continue;
601 }
602
603 /*
604 * Check for URG data
605 * This will soread as well, so no need to
606 * test for G_IO_IN below if this succeeds
607 */
608 if (revents & G_IO_PRI) {
609 ret = sorecvoob(so);
610 if (ret < 0) {
611 /* Socket error might have resulted in the socket being
612 * removed, do not try to do anything more with it. */
613 continue;
614 }
615 }
616 /*
617 * Check sockets for reading
618 */
619 else if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) {
620 /*
621 * Check for incoming connections
622 */
623 if (so->so_state & SS_FACCEPTCONN) {
624 tcp_connect(so);
625 continue;
626 } /* else */
627 ret = soread(so);
628
629 /* Output it if we read something */
630 if (ret > 0) {
631 tcp_output(sototcpcb(so));
632 }
633 if (ret < 0) {
634 /* Socket error might have resulted in the socket being
635 * removed, do not try to do anything more with it. */
636 continue;
637 }
638 }
639
640 /*
641 * Check sockets for writing
642 */
643 if (!(so->so_state & SS_NOFDREF) &&
644 (revents & (G_IO_OUT | G_IO_ERR))) {
645 /*
646 * Check for non-blocking, still-connecting sockets
647 */
648 if (so->so_state & SS_ISFCONNECTING) {
649 /* Connected */
650 so->so_state &= ~SS_ISFCONNECTING;
651
652 ret = send(so->s, (const void *) &ret, 0, 0);
653 if (ret < 0) {
654 /* XXXXX Must fix, zero bytes is a NOP */
655 if (errno == EAGAIN || errno == EWOULDBLOCK ||
656 errno == EINPROGRESS || errno == ENOTCONN) {
657 continue;
658 }
659
660 /* else failed */
661 so->so_state &= SS_PERSISTENT_MASK;
662 so->so_state |= SS_NOFDREF;
663 }
664 /* else so->so_state &= ~SS_ISFCONNECTING; */
665
666 /*
667 * Continue tcp_input
668 */
669 tcp_input((struct mbuf *)NULL, sizeof(struct ip), so,
670 so->so_ffamily);
671 /* continue; */
672 } else {
673 ret = sowrite(so);
674 }
675 /*
676 * XXXXX If we wrote something (a lot), there
677 * could be a need for a window update.
678 * In the worst case, the remote will send
679 * a window probe to get things going again
680 */
681 }
682
683 /*
684 * Probe a still-connecting, non-blocking socket
685 * to check if it's still alive
686 */
687 #ifdef PROBE_CONN
688 if (so->so_state & SS_ISFCONNECTING) {
689 ret = qemu_recv(so->s, &ret, 0, 0);
690
691 if (ret < 0) {
692 /* XXX */
693 if (errno == EAGAIN || errno == EWOULDBLOCK ||
694 errno == EINPROGRESS || errno == ENOTCONN) {
695 continue; /* Still connecting, continue */
696 }
697
698 /* else failed */
699 so->so_state &= SS_PERSISTENT_MASK;
700 so->so_state |= SS_NOFDREF;
701
702 /* tcp_input will take care of it */
703 } else {
704 ret = send(so->s, &ret, 0, 0);
705 if (ret < 0) {
706 /* XXX */
707 if (errno == EAGAIN || errno == EWOULDBLOCK ||
708 errno == EINPROGRESS || errno == ENOTCONN) {
709 continue;
710 }
711 /* else failed */
712 so->so_state &= SS_PERSISTENT_MASK;
713 so->so_state |= SS_NOFDREF;
714 } else {
715 so->so_state &= ~SS_ISFCONNECTING;
716 }
717
718 }
719 tcp_input((struct mbuf *)NULL, sizeof(struct ip), so,
720 so->so_ffamily);
721 } /* SS_ISFCONNECTING */
722 #endif
723 }
724
725 /*
726 * Now UDP sockets.
727 * Incoming packets are sent straight away, they're not buffered.
728 * Incoming UDP data isn't buffered either.
729 */
730 for (so = slirp->udb.so_next; so != &slirp->udb;
731 so = so_next) {
732 int revents;
733
734 so_next = so->so_next;
735
736 revents = 0;
737 if (so->pollfds_idx != -1) {
738 revents = g_array_index(pollfds, GPollFD,
739 so->pollfds_idx).revents;
740 }
741
742 if (so->s != -1 &&
743 (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR))) {
744 sorecvfrom(so);
745 }
746 }
747
748 /*
749 * Check incoming ICMP relies.
750 */
751 for (so = slirp->icmp.so_next; so != &slirp->icmp;
752 so = so_next) {
753 int revents;
754
755 so_next = so->so_next;
756
757 revents = 0;
758 if (so->pollfds_idx != -1) {
759 revents = g_array_index(pollfds, GPollFD,
760 so->pollfds_idx).revents;
761 }
762
763 if (so->s != -1 &&
764 (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR))) {
765 icmp_receive(so);
766 }
767 }
768 }
769
770 if_start(slirp);
771 }
772 }
773
774 static void arp_input(Slirp *slirp, const uint8_t *pkt, int pkt_len)
775 {
776 struct slirp_arphdr *ah = (struct slirp_arphdr *)(pkt + ETH_HLEN);
777 uint8_t arp_reply[MAX(ETH_HLEN + sizeof(struct slirp_arphdr), 64)];
778 struct ethhdr *reh = (struct ethhdr *)arp_reply;
779 struct slirp_arphdr *rah = (struct slirp_arphdr *)(arp_reply + ETH_HLEN);
780 int ar_op;
781 struct ex_list *ex_ptr;
782
783 if (!slirp->in_enabled) {
784 return;
785 }
786
787 ar_op = ntohs(ah->ar_op);
788 switch(ar_op) {
789 case ARPOP_REQUEST:
790 if (ah->ar_tip == ah->ar_sip) {
791 /* Gratuitous ARP */
792 arp_table_add(slirp, ah->ar_sip, ah->ar_sha);
793 return;
794 }
795
796 if ((ah->ar_tip & slirp->vnetwork_mask.s_addr) ==
797 slirp->vnetwork_addr.s_addr) {
798 if (ah->ar_tip == slirp->vnameserver_addr.s_addr ||
799 ah->ar_tip == slirp->vhost_addr.s_addr)
800 goto arp_ok;
801 for (ex_ptr = slirp->exec_list; ex_ptr; ex_ptr = ex_ptr->ex_next) {
802 if (ex_ptr->ex_addr.s_addr == ah->ar_tip)
803 goto arp_ok;
804 }
805 return;
806 arp_ok:
807 memset(arp_reply, 0, sizeof(arp_reply));
808
809 arp_table_add(slirp, ah->ar_sip, ah->ar_sha);
810
811 /* ARP request for alias/dns mac address */
812 memcpy(reh->h_dest, pkt + ETH_ALEN, ETH_ALEN);
813 memcpy(reh->h_source, special_ethaddr, ETH_ALEN - 4);
814 memcpy(&reh->h_source[2], &ah->ar_tip, 4);
815 reh->h_proto = htons(ETH_P_ARP);
816
817 rah->ar_hrd = htons(1);
818 rah->ar_pro = htons(ETH_P_IP);
819 rah->ar_hln = ETH_ALEN;
820 rah->ar_pln = 4;
821 rah->ar_op = htons(ARPOP_REPLY);
822 memcpy(rah->ar_sha, reh->h_source, ETH_ALEN);
823 rah->ar_sip = ah->ar_tip;
824 memcpy(rah->ar_tha, ah->ar_sha, ETH_ALEN);
825 rah->ar_tip = ah->ar_sip;
826 slirp_output(slirp->opaque, arp_reply, sizeof(arp_reply));
827 }
828 break;
829 case ARPOP_REPLY:
830 arp_table_add(slirp, ah->ar_sip, ah->ar_sha);
831 break;
832 default:
833 break;
834 }
835 }
836
837 void slirp_input(Slirp *slirp, const uint8_t *pkt, int pkt_len)
838 {
839 struct mbuf *m;
840 int proto;
841
842 if (pkt_len < ETH_HLEN)
843 return;
844
845 proto = ntohs(*(uint16_t *)(pkt + 12));
846 switch(proto) {
847 case ETH_P_ARP:
848 arp_input(slirp, pkt, pkt_len);
849 break;
850 case ETH_P_IP:
851 case ETH_P_IPV6:
852 m = m_get(slirp);
853 if (!m)
854 return;
855 /* Note: we add 2 to align the IP header on 4 bytes,
856 * and add the margin for the tcpiphdr overhead */
857 if (M_FREEROOM(m) < pkt_len + TCPIPHDR_DELTA + 2) {
858 m_inc(m, pkt_len + TCPIPHDR_DELTA + 2);
859 }
860 m->m_len = pkt_len + TCPIPHDR_DELTA + 2;
861 memcpy(m->m_data + TCPIPHDR_DELTA + 2, pkt, pkt_len);
862
863 m->m_data += TCPIPHDR_DELTA + 2 + ETH_HLEN;
864 m->m_len -= TCPIPHDR_DELTA + 2 + ETH_HLEN;
865
866 if (proto == ETH_P_IP) {
867 ip_input(m);
868 } else if (proto == ETH_P_IPV6) {
869 ip6_input(m);
870 }
871 break;
872
873 case ETH_P_NCSI:
874 ncsi_input(slirp, pkt, pkt_len);
875 break;
876
877 default:
878 break;
879 }
880 }
881
882 /* Prepare the IPv4 packet to be sent to the ethernet device. Returns 1 if no
883 * packet should be sent, 0 if the packet must be re-queued, 2 if the packet
884 * is ready to go.
885 */
886 static int if_encap4(Slirp *slirp, struct mbuf *ifm, struct ethhdr *eh,
887 uint8_t ethaddr[ETH_ALEN])
888 {
889 const struct ip *iph = (const struct ip *)ifm->m_data;
890
891 if (iph->ip_dst.s_addr == 0) {
892 /* 0.0.0.0 can not be a destination address, something went wrong,
893 * avoid making it worse */
894 return 1;
895 }
896 if (!arp_table_search(slirp, iph->ip_dst.s_addr, ethaddr)) {
897 uint8_t arp_req[ETH_HLEN + sizeof(struct slirp_arphdr)];
898 struct ethhdr *reh = (struct ethhdr *)arp_req;
899 struct slirp_arphdr *rah = (struct slirp_arphdr *)(arp_req + ETH_HLEN);
900
901 if (!ifm->resolution_requested) {
902 /* If the client addr is not known, send an ARP request */
903 memset(reh->h_dest, 0xff, ETH_ALEN);
904 memcpy(reh->h_source, special_ethaddr, ETH_ALEN - 4);
905 memcpy(&reh->h_source[2], &slirp->vhost_addr, 4);
906 reh->h_proto = htons(ETH_P_ARP);
907 rah->ar_hrd = htons(1);
908 rah->ar_pro = htons(ETH_P_IP);
909 rah->ar_hln = ETH_ALEN;
910 rah->ar_pln = 4;
911 rah->ar_op = htons(ARPOP_REQUEST);
912
913 /* source hw addr */
914 memcpy(rah->ar_sha, special_ethaddr, ETH_ALEN - 4);
915 memcpy(&rah->ar_sha[2], &slirp->vhost_addr, 4);
916
917 /* source IP */
918 rah->ar_sip = slirp->vhost_addr.s_addr;
919
920 /* target hw addr (none) */
921 memset(rah->ar_tha, 0, ETH_ALEN);
922
923 /* target IP */
924 rah->ar_tip = iph->ip_dst.s_addr;
925 slirp->client_ipaddr = iph->ip_dst;
926 slirp_output(slirp->opaque, arp_req, sizeof(arp_req));
927 ifm->resolution_requested = true;
928
929 /* Expire request and drop outgoing packet after 1 second */
930 ifm->expiration_date = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + 1000000000ULL;
931 }
932 return 0;
933 } else {
934 memcpy(eh->h_source, special_ethaddr, ETH_ALEN - 4);
935 /* XXX: not correct */
936 memcpy(&eh->h_source[2], &slirp->vhost_addr, 4);
937 eh->h_proto = htons(ETH_P_IP);
938
939 /* Send this */
940 return 2;
941 }
942 }
943
944 /* Prepare the IPv6 packet to be sent to the ethernet device. Returns 1 if no
945 * packet should be sent, 0 if the packet must be re-queued, 2 if the packet
946 * is ready to go.
947 */
948 static int if_encap6(Slirp *slirp, struct mbuf *ifm, struct ethhdr *eh,
949 uint8_t ethaddr[ETH_ALEN])
950 {
951 const struct ip6 *ip6h = mtod(ifm, const struct ip6 *);
952 if (!ndp_table_search(slirp, ip6h->ip_dst, ethaddr)) {
953 if (!ifm->resolution_requested) {
954 ndp_send_ns(slirp, ip6h->ip_dst);
955 ifm->resolution_requested = true;
956 ifm->expiration_date =
957 qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + 1000000000ULL;
958 }
959 return 0;
960 } else {
961 eh->h_proto = htons(ETH_P_IPV6);
962 in6_compute_ethaddr(ip6h->ip_src, eh->h_source);
963
964 /* Send this */
965 return 2;
966 }
967 }
968
969 /* Output the IP packet to the ethernet device. Returns 0 if the packet must be
970 * re-queued.
971 */
972 int if_encap(Slirp *slirp, struct mbuf *ifm)
973 {
974 uint8_t buf[1600];
975 struct ethhdr *eh = (struct ethhdr *)buf;
976 uint8_t ethaddr[ETH_ALEN];
977 const struct ip *iph = (const struct ip *)ifm->m_data;
978 int ret;
979
980 if (ifm->m_len + ETH_HLEN > sizeof(buf)) {
981 return 1;
982 }
983
984 switch (iph->ip_v) {
985 case IPVERSION:
986 ret = if_encap4(slirp, ifm, eh, ethaddr);
987 if (ret < 2) {
988 return ret;
989 }
990 break;
991
992 case IP6VERSION:
993 ret = if_encap6(slirp, ifm, eh, ethaddr);
994 if (ret < 2) {
995 return ret;
996 }
997 break;
998
999 default:
1000 g_assert_not_reached();
1001 break;
1002 }
1003
1004 memcpy(eh->h_dest, ethaddr, ETH_ALEN);
1005 DEBUG_ARGS((dfd, " src = %02x:%02x:%02x:%02x:%02x:%02x\n",
1006 eh->h_source[0], eh->h_source[1], eh->h_source[2],
1007 eh->h_source[3], eh->h_source[4], eh->h_source[5]));
1008 DEBUG_ARGS((dfd, " dst = %02x:%02x:%02x:%02x:%02x:%02x\n",
1009 eh->h_dest[0], eh->h_dest[1], eh->h_dest[2],
1010 eh->h_dest[3], eh->h_dest[4], eh->h_dest[5]));
1011 memcpy(buf + sizeof(struct ethhdr), ifm->m_data, ifm->m_len);
1012 slirp_output(slirp->opaque, buf, ifm->m_len + ETH_HLEN);
1013 return 1;
1014 }
1015
1016 /* Drop host forwarding rule, return 0 if found. */
1017 int slirp_remove_hostfwd(Slirp *slirp, int is_udp, struct in_addr host_addr,
1018 int host_port)
1019 {
1020 struct socket *so;
1021 struct socket *head = (is_udp ? &slirp->udb : &slirp->tcb);
1022 struct sockaddr_in addr;
1023 int port = htons(host_port);
1024 socklen_t addr_len;
1025
1026 for (so = head->so_next; so != head; so = so->so_next) {
1027 addr_len = sizeof(addr);
1028 if ((so->so_state & SS_HOSTFWD) &&
1029 getsockname(so->s, (struct sockaddr *)&addr, &addr_len) == 0 &&
1030 addr.sin_addr.s_addr == host_addr.s_addr &&
1031 addr.sin_port == port) {
1032 close(so->s);
1033 sofree(so);
1034 return 0;
1035 }
1036 }
1037
1038 return -1;
1039 }
1040
1041 int slirp_add_hostfwd(Slirp *slirp, int is_udp, struct in_addr host_addr,
1042 int host_port, struct in_addr guest_addr, int guest_port)
1043 {
1044 if (!guest_addr.s_addr) {
1045 guest_addr = slirp->vdhcp_startaddr;
1046 }
1047 if (is_udp) {
1048 if (!udp_listen(slirp, host_addr.s_addr, htons(host_port),
1049 guest_addr.s_addr, htons(guest_port), SS_HOSTFWD))
1050 return -1;
1051 } else {
1052 if (!tcp_listen(slirp, host_addr.s_addr, htons(host_port),
1053 guest_addr.s_addr, htons(guest_port), SS_HOSTFWD))
1054 return -1;
1055 }
1056 return 0;
1057 }
1058
1059 int slirp_add_exec(Slirp *slirp, int do_pty, const void *args,
1060 struct in_addr *guest_addr, int guest_port)
1061 {
1062 if (!guest_addr->s_addr) {
1063 guest_addr->s_addr = slirp->vnetwork_addr.s_addr |
1064 (htonl(0x0204) & ~slirp->vnetwork_mask.s_addr);
1065 }
1066 if ((guest_addr->s_addr & slirp->vnetwork_mask.s_addr) !=
1067 slirp->vnetwork_addr.s_addr ||
1068 guest_addr->s_addr == slirp->vhost_addr.s_addr ||
1069 guest_addr->s_addr == slirp->vnameserver_addr.s_addr) {
1070 return -1;
1071 }
1072 return add_exec(&slirp->exec_list, do_pty, (char *)args, *guest_addr,
1073 htons(guest_port));
1074 }
1075
1076 ssize_t slirp_send(struct socket *so, const void *buf, size_t len, int flags)
1077 {
1078 if (so->s == -1 && so->extra) {
1079 /* XXX this blocks entire thread. Rewrite to use
1080 * qemu_chr_fe_write and background I/O callbacks */
1081 qemu_chr_fe_write_all(so->extra, buf, len);
1082 return len;
1083 }
1084
1085 return send(so->s, buf, len, flags);
1086 }
1087
1088 static struct socket *
1089 slirp_find_ctl_socket(Slirp *slirp, struct in_addr guest_addr, int guest_port)
1090 {
1091 struct socket *so;
1092
1093 for (so = slirp->tcb.so_next; so != &slirp->tcb; so = so->so_next) {
1094 if (so->so_faddr.s_addr == guest_addr.s_addr &&
1095 htons(so->so_fport) == guest_port) {
1096 return so;
1097 }
1098 }
1099 return NULL;
1100 }
1101
1102 size_t slirp_socket_can_recv(Slirp *slirp, struct in_addr guest_addr,
1103 int guest_port)
1104 {
1105 struct iovec iov[2];
1106 struct socket *so;
1107
1108 so = slirp_find_ctl_socket(slirp, guest_addr, guest_port);
1109
1110 if (!so || so->so_state & SS_NOFDREF) {
1111 return 0;
1112 }
1113
1114 if (!CONN_CANFRCV(so) || so->so_snd.sb_cc >= (so->so_snd.sb_datalen/2)) {
1115 return 0;
1116 }
1117
1118 return sopreprbuf(so, iov, NULL);
1119 }
1120
1121 void slirp_socket_recv(Slirp *slirp, struct in_addr guest_addr, int guest_port,
1122 const uint8_t *buf, int size)
1123 {
1124 int ret;
1125 struct socket *so = slirp_find_ctl_socket(slirp, guest_addr, guest_port);
1126
1127 if (!so)
1128 return;
1129
1130 ret = soreadbuf(so, (const char *)buf, size);
1131
1132 if (ret > 0)
1133 tcp_output(sototcpcb(so));
1134 }
1135
1136 static int slirp_tcp_post_load(void *opaque, int version)
1137 {
1138 tcp_template((struct tcpcb *)opaque);
1139
1140 return 0;
1141 }
1142
1143 static const VMStateDescription vmstate_slirp_tcp = {
1144 .name = "slirp-tcp",
1145 .version_id = 0,
1146 .post_load = slirp_tcp_post_load,
1147 .fields = (VMStateField[]) {
1148 VMSTATE_INT16(t_state, struct tcpcb),
1149 VMSTATE_INT16_ARRAY(t_timer, struct tcpcb, TCPT_NTIMERS),
1150 VMSTATE_INT16(t_rxtshift, struct tcpcb),
1151 VMSTATE_INT16(t_rxtcur, struct tcpcb),
1152 VMSTATE_INT16(t_dupacks, struct tcpcb),
1153 VMSTATE_UINT16(t_maxseg, struct tcpcb),
1154 VMSTATE_UINT8(t_force, struct tcpcb),
1155 VMSTATE_UINT16(t_flags, struct tcpcb),
1156 VMSTATE_UINT32(snd_una, struct tcpcb),
1157 VMSTATE_UINT32(snd_nxt, struct tcpcb),
1158 VMSTATE_UINT32(snd_up, struct tcpcb),
1159 VMSTATE_UINT32(snd_wl1, struct tcpcb),
1160 VMSTATE_UINT32(snd_wl2, struct tcpcb),
1161 VMSTATE_UINT32(iss, struct tcpcb),
1162 VMSTATE_UINT32(snd_wnd, struct tcpcb),
1163 VMSTATE_UINT32(rcv_wnd, struct tcpcb),
1164 VMSTATE_UINT32(rcv_nxt, struct tcpcb),
1165 VMSTATE_UINT32(rcv_up, struct tcpcb),
1166 VMSTATE_UINT32(irs, struct tcpcb),
1167 VMSTATE_UINT32(rcv_adv, struct tcpcb),
1168 VMSTATE_UINT32(snd_max, struct tcpcb),
1169 VMSTATE_UINT32(snd_cwnd, struct tcpcb),
1170 VMSTATE_UINT32(snd_ssthresh, struct tcpcb),
1171 VMSTATE_INT16(t_idle, struct tcpcb),
1172 VMSTATE_INT16(t_rtt, struct tcpcb),
1173 VMSTATE_UINT32(t_rtseq, struct tcpcb),
1174 VMSTATE_INT16(t_srtt, struct tcpcb),
1175 VMSTATE_INT16(t_rttvar, struct tcpcb),
1176 VMSTATE_UINT16(t_rttmin, struct tcpcb),
1177 VMSTATE_UINT32(max_sndwnd, struct tcpcb),
1178 VMSTATE_UINT8(t_oobflags, struct tcpcb),
1179 VMSTATE_UINT8(t_iobc, struct tcpcb),
1180 VMSTATE_INT16(t_softerror, struct tcpcb),
1181 VMSTATE_UINT8(snd_scale, struct tcpcb),
1182 VMSTATE_UINT8(rcv_scale, struct tcpcb),
1183 VMSTATE_UINT8(request_r_scale, struct tcpcb),
1184 VMSTATE_UINT8(requested_s_scale, struct tcpcb),
1185 VMSTATE_UINT32(ts_recent, struct tcpcb),
1186 VMSTATE_UINT32(ts_recent_age, struct tcpcb),
1187 VMSTATE_UINT32(last_ack_sent, struct tcpcb),
1188 VMSTATE_END_OF_LIST()
1189 }
1190 };
1191
1192 /* The sbuf has a pair of pointers that are migrated as offsets;
1193 * we calculate the offsets and restore the pointers using
1194 * pre_save/post_load on a tmp structure.
1195 */
1196 struct sbuf_tmp {
1197 struct sbuf *parent;
1198 uint32_t roff, woff;
1199 };
1200
1201 static void sbuf_tmp_pre_save(void *opaque)
1202 {
1203 struct sbuf_tmp *tmp = opaque;
1204 tmp->woff = tmp->parent->sb_wptr - tmp->parent->sb_data;
1205 tmp->roff = tmp->parent->sb_rptr - tmp->parent->sb_data;
1206 }
1207
1208 static int sbuf_tmp_post_load(void *opaque, int version)
1209 {
1210 struct sbuf_tmp *tmp = opaque;
1211 uint32_t requested_len = tmp->parent->sb_datalen;
1212
1213 /* Allocate the buffer space used by the field after the tmp */
1214 sbreserve(tmp->parent, tmp->parent->sb_datalen);
1215
1216 if (tmp->parent->sb_datalen != requested_len) {
1217 return -ENOMEM;
1218 }
1219 if (tmp->woff >= requested_len ||
1220 tmp->roff >= requested_len) {
1221 error_report("invalid sbuf offsets r/w=%u/%u len=%u",
1222 tmp->roff, tmp->woff, requested_len);
1223 return -EINVAL;
1224 }
1225
1226 tmp->parent->sb_wptr = tmp->parent->sb_data + tmp->woff;
1227 tmp->parent->sb_rptr = tmp->parent->sb_data + tmp->roff;
1228
1229 return 0;
1230 }
1231
1232
1233 static const VMStateDescription vmstate_slirp_sbuf_tmp = {
1234 .name = "slirp-sbuf-tmp",
1235 .post_load = sbuf_tmp_post_load,
1236 .pre_save = sbuf_tmp_pre_save,
1237 .version_id = 0,
1238 .fields = (VMStateField[]) {
1239 VMSTATE_UINT32(woff, struct sbuf_tmp),
1240 VMSTATE_UINT32(roff, struct sbuf_tmp),
1241 VMSTATE_END_OF_LIST()
1242 }
1243 };
1244
1245 static const VMStateDescription vmstate_slirp_sbuf = {
1246 .name = "slirp-sbuf",
1247 .version_id = 0,
1248 .fields = (VMStateField[]) {
1249 VMSTATE_UINT32(sb_cc, struct sbuf),
1250 VMSTATE_UINT32(sb_datalen, struct sbuf),
1251 VMSTATE_WITH_TMP(struct sbuf, struct sbuf_tmp, vmstate_slirp_sbuf_tmp),
1252 VMSTATE_VBUFFER_UINT32(sb_data, struct sbuf, 0, NULL, sb_datalen),
1253 VMSTATE_END_OF_LIST()
1254 }
1255 };
1256
1257
1258 static void slirp_socket_save(QEMUFile *f, struct socket *so)
1259 {
1260 qemu_put_be32(f, so->so_urgc);
1261 qemu_put_be16(f, so->so_ffamily);
1262 switch (so->so_ffamily) {
1263 case AF_INET:
1264 qemu_put_be32(f, so->so_faddr.s_addr);
1265 qemu_put_be16(f, so->so_fport);
1266 break;
1267 default:
1268 error_report("so_ffamily unknown, unable to save so_faddr and"
1269 " so_fport");
1270 }
1271 qemu_put_be16(f, so->so_lfamily);
1272 switch (so->so_lfamily) {
1273 case AF_INET:
1274 qemu_put_be32(f, so->so_laddr.s_addr);
1275 qemu_put_be16(f, so->so_lport);
1276 break;
1277 default:
1278 error_report("so_ffamily unknown, unable to save so_laddr and"
1279 " so_lport");
1280 }
1281 qemu_put_byte(f, so->so_iptos);
1282 qemu_put_byte(f, so->so_emu);
1283 qemu_put_byte(f, so->so_type);
1284 qemu_put_be32(f, so->so_state);
1285 /* TODO: Build vmstate at this level */
1286 vmstate_save_state(f, &vmstate_slirp_sbuf, &so->so_rcv, 0);
1287 vmstate_save_state(f, &vmstate_slirp_sbuf, &so->so_snd, 0);
1288 vmstate_save_state(f, &vmstate_slirp_tcp, so->so_tcpcb, 0);
1289 }
1290
1291 static void slirp_bootp_save(QEMUFile *f, Slirp *slirp)
1292 {
1293 int i;
1294
1295 for (i = 0; i < NB_BOOTP_CLIENTS; i++) {
1296 qemu_put_be16(f, slirp->bootp_clients[i].allocated);
1297 qemu_put_buffer(f, slirp->bootp_clients[i].macaddr, 6);
1298 }
1299 }
1300
1301 static void slirp_state_save(QEMUFile *f, void *opaque)
1302 {
1303 Slirp *slirp = opaque;
1304 struct ex_list *ex_ptr;
1305
1306 for (ex_ptr = slirp->exec_list; ex_ptr; ex_ptr = ex_ptr->ex_next)
1307 if (ex_ptr->ex_pty == 3) {
1308 struct socket *so;
1309 so = slirp_find_ctl_socket(slirp, ex_ptr->ex_addr,
1310 ntohs(ex_ptr->ex_fport));
1311 if (!so)
1312 continue;
1313
1314 qemu_put_byte(f, 42);
1315 slirp_socket_save(f, so);
1316 }
1317 qemu_put_byte(f, 0);
1318
1319 qemu_put_be16(f, slirp->ip_id);
1320
1321 slirp_bootp_save(f, slirp);
1322 }
1323
1324 static int slirp_socket_load(QEMUFile *f, struct socket *so, int version_id)
1325 {
1326 int ret = 0;
1327 if (tcp_attach(so) < 0)
1328 return -ENOMEM;
1329
1330 so->so_urgc = qemu_get_be32(f);
1331 if (version_id <= 3) {
1332 so->so_ffamily = AF_INET;
1333 so->so_faddr.s_addr = qemu_get_be32(f);
1334 so->so_laddr.s_addr = qemu_get_be32(f);
1335 so->so_fport = qemu_get_be16(f);
1336 so->so_lport = qemu_get_be16(f);
1337 } else {
1338 so->so_ffamily = qemu_get_be16(f);
1339 switch (so->so_ffamily) {
1340 case AF_INET:
1341 so->so_faddr.s_addr = qemu_get_be32(f);
1342 so->so_fport = qemu_get_be16(f);
1343 break;
1344 default:
1345 error_report(
1346 "so_ffamily unknown, unable to restore so_faddr and so_lport");
1347 }
1348 so->so_lfamily = qemu_get_be16(f);
1349 switch (so->so_lfamily) {
1350 case AF_INET:
1351 so->so_laddr.s_addr = qemu_get_be32(f);
1352 so->so_lport = qemu_get_be16(f);
1353 break;
1354 default:
1355 error_report(
1356 "so_ffamily unknown, unable to restore so_laddr and so_lport");
1357 }
1358 }
1359 so->so_iptos = qemu_get_byte(f);
1360 so->so_emu = qemu_get_byte(f);
1361 so->so_type = qemu_get_byte(f);
1362 so->so_state = qemu_get_be32(f);
1363 /* TODO: VMState at this level */
1364 ret = vmstate_load_state(f, &vmstate_slirp_sbuf, &so->so_rcv, 0);
1365 if (!ret) {
1366 ret = vmstate_load_state(f, &vmstate_slirp_sbuf, &so->so_snd, 0);
1367 }
1368 if (!ret) {
1369 ret = vmstate_load_state(f, &vmstate_slirp_tcp, so->so_tcpcb, 0);
1370 }
1371 return ret;
1372 }
1373
1374 static void slirp_bootp_load(QEMUFile *f, Slirp *slirp)
1375 {
1376 int i;
1377
1378 for (i = 0; i < NB_BOOTP_CLIENTS; i++) {
1379 slirp->bootp_clients[i].allocated = qemu_get_be16(f);
1380 qemu_get_buffer(f, slirp->bootp_clients[i].macaddr, 6);
1381 }
1382 }
1383
1384 static int slirp_state_load(QEMUFile *f, void *opaque, int version_id)
1385 {
1386 Slirp *slirp = opaque;
1387 struct ex_list *ex_ptr;
1388
1389 while (qemu_get_byte(f)) {
1390 int ret;
1391 struct socket *so = socreate(slirp);
1392
1393 if (!so)
1394 return -ENOMEM;
1395
1396 ret = slirp_socket_load(f, so, version_id);
1397
1398 if (ret < 0)
1399 return ret;
1400
1401 if ((so->so_faddr.s_addr & slirp->vnetwork_mask.s_addr) !=
1402 slirp->vnetwork_addr.s_addr) {
1403 return -EINVAL;
1404 }
1405 for (ex_ptr = slirp->exec_list; ex_ptr; ex_ptr = ex_ptr->ex_next) {
1406 if (ex_ptr->ex_pty == 3 &&
1407 so->so_faddr.s_addr == ex_ptr->ex_addr.s_addr &&
1408 so->so_fport == ex_ptr->ex_fport) {
1409 break;
1410 }
1411 }
1412 if (!ex_ptr)
1413 return -EINVAL;
1414
1415 so->extra = (void *)ex_ptr->ex_exec;
1416 }
1417
1418 if (version_id >= 2) {
1419 slirp->ip_id = qemu_get_be16(f);
1420 }
1421
1422 if (version_id >= 3) {
1423 slirp_bootp_load(f, slirp);
1424 }
1425
1426 return 0;
1427 }