]> git.proxmox.com Git - mirror_qemu.git/blob - util/oslib-win32.c
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
[mirror_qemu.git] / util / oslib-win32.c
1 /*
2 * os-win32.c
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2010-2016 Red Hat, Inc.
6 *
7 * QEMU library functions for win32 which are shared between QEMU and
8 * the QEMU tools.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 *
28 * The implementation of g_poll (functions poll_rest, g_poll) at the end of
29 * this file are based on code from GNOME glib-2 and use a different license,
30 * see the license comment there.
31 */
32 #include "qemu/osdep.h"
33 #include <windows.h>
34 #include <glib.h>
35 #include "qapi/error.h"
36 #include "sysemu/sysemu.h"
37 #include "qemu/main-loop.h"
38 #include "trace.h"
39 #include "qemu/sockets.h"
40 #include "qemu/cutils.h"
41
42 /* this must come after including "trace.h" */
43 #include <shlobj.h>
44
45 void *qemu_oom_check(void *ptr)
46 {
47 if (ptr == NULL) {
48 fprintf(stderr, "Failed to allocate memory: %lu\n", GetLastError());
49 abort();
50 }
51 return ptr;
52 }
53
54 void *qemu_try_memalign(size_t alignment, size_t size)
55 {
56 void *ptr;
57
58 if (!size) {
59 abort();
60 }
61 ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
62 trace_qemu_memalign(alignment, size, ptr);
63 return ptr;
64 }
65
66 void *qemu_memalign(size_t alignment, size_t size)
67 {
68 return qemu_oom_check(qemu_try_memalign(alignment, size));
69 }
70
71 void *qemu_anon_ram_alloc(size_t size, uint64_t *align)
72 {
73 void *ptr;
74
75 /* FIXME: this is not exactly optimal solution since VirtualAlloc
76 has 64Kb granularity, but at least it guarantees us that the
77 memory is page aligned. */
78 ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
79 trace_qemu_anon_ram_alloc(size, ptr);
80 return ptr;
81 }
82
83 void qemu_vfree(void *ptr)
84 {
85 trace_qemu_vfree(ptr);
86 if (ptr) {
87 VirtualFree(ptr, 0, MEM_RELEASE);
88 }
89 }
90
91 void qemu_anon_ram_free(void *ptr, size_t size)
92 {
93 trace_qemu_anon_ram_free(ptr, size);
94 if (ptr) {
95 VirtualFree(ptr, 0, MEM_RELEASE);
96 }
97 }
98
99 #ifndef CONFIG_LOCALTIME_R
100 /* FIXME: add proper locking */
101 struct tm *gmtime_r(const time_t *timep, struct tm *result)
102 {
103 struct tm *p = gmtime(timep);
104 memset(result, 0, sizeof(*result));
105 if (p) {
106 *result = *p;
107 p = result;
108 }
109 return p;
110 }
111
112 /* FIXME: add proper locking */
113 struct tm *localtime_r(const time_t *timep, struct tm *result)
114 {
115 struct tm *p = localtime(timep);
116 memset(result, 0, sizeof(*result));
117 if (p) {
118 *result = *p;
119 p = result;
120 }
121 return p;
122 }
123 #endif /* CONFIG_LOCALTIME_R */
124
125 void qemu_set_block(int fd)
126 {
127 unsigned long opt = 0;
128 WSAEventSelect(fd, NULL, 0);
129 ioctlsocket(fd, FIONBIO, &opt);
130 }
131
132 void qemu_set_nonblock(int fd)
133 {
134 unsigned long opt = 1;
135 ioctlsocket(fd, FIONBIO, &opt);
136 qemu_fd_register(fd);
137 }
138
139 int socket_set_fast_reuse(int fd)
140 {
141 /* Enabling the reuse of an endpoint that was used by a socket still in
142 * TIME_WAIT state is usually performed by setting SO_REUSEADDR. On Windows
143 * fast reuse is the default and SO_REUSEADDR does strange things. So we
144 * don't have to do anything here. More info can be found at:
145 * http://msdn.microsoft.com/en-us/library/windows/desktop/ms740621.aspx */
146 return 0;
147 }
148
149
150 static int socket_error(void)
151 {
152 switch (WSAGetLastError()) {
153 case 0:
154 return 0;
155 case WSAEINTR:
156 return EINTR;
157 case WSAEINVAL:
158 return EINVAL;
159 case WSA_INVALID_HANDLE:
160 return EBADF;
161 case WSA_NOT_ENOUGH_MEMORY:
162 return ENOMEM;
163 case WSA_INVALID_PARAMETER:
164 return EINVAL;
165 case WSAENAMETOOLONG:
166 return ENAMETOOLONG;
167 case WSAENOTEMPTY:
168 return ENOTEMPTY;
169 case WSAEWOULDBLOCK:
170 /* not using EWOULDBLOCK as we don't want code to have
171 * to check both EWOULDBLOCK and EAGAIN */
172 return EAGAIN;
173 case WSAEINPROGRESS:
174 return EINPROGRESS;
175 case WSAEALREADY:
176 return EALREADY;
177 case WSAENOTSOCK:
178 return ENOTSOCK;
179 case WSAEDESTADDRREQ:
180 return EDESTADDRREQ;
181 case WSAEMSGSIZE:
182 return EMSGSIZE;
183 case WSAEPROTOTYPE:
184 return EPROTOTYPE;
185 case WSAENOPROTOOPT:
186 return ENOPROTOOPT;
187 case WSAEPROTONOSUPPORT:
188 return EPROTONOSUPPORT;
189 case WSAEOPNOTSUPP:
190 return EOPNOTSUPP;
191 case WSAEAFNOSUPPORT:
192 return EAFNOSUPPORT;
193 case WSAEADDRINUSE:
194 return EADDRINUSE;
195 case WSAEADDRNOTAVAIL:
196 return EADDRNOTAVAIL;
197 case WSAENETDOWN:
198 return ENETDOWN;
199 case WSAENETUNREACH:
200 return ENETUNREACH;
201 case WSAENETRESET:
202 return ENETRESET;
203 case WSAECONNABORTED:
204 return ECONNABORTED;
205 case WSAECONNRESET:
206 return ECONNRESET;
207 case WSAENOBUFS:
208 return ENOBUFS;
209 case WSAEISCONN:
210 return EISCONN;
211 case WSAENOTCONN:
212 return ENOTCONN;
213 case WSAETIMEDOUT:
214 return ETIMEDOUT;
215 case WSAECONNREFUSED:
216 return ECONNREFUSED;
217 case WSAELOOP:
218 return ELOOP;
219 case WSAEHOSTUNREACH:
220 return EHOSTUNREACH;
221 default:
222 return EIO;
223 }
224 }
225
226 int inet_aton(const char *cp, struct in_addr *ia)
227 {
228 uint32_t addr = inet_addr(cp);
229 if (addr == 0xffffffff) {
230 return 0;
231 }
232 ia->s_addr = addr;
233 return 1;
234 }
235
236 void qemu_set_cloexec(int fd)
237 {
238 }
239
240 /* Offset between 1/1/1601 and 1/1/1970 in 100 nanosec units */
241 #define _W32_FT_OFFSET (116444736000000000ULL)
242
243 int qemu_gettimeofday(qemu_timeval *tp)
244 {
245 union {
246 unsigned long long ns100; /*time since 1 Jan 1601 in 100ns units */
247 FILETIME ft;
248 } _now;
249
250 if(tp) {
251 GetSystemTimeAsFileTime (&_now.ft);
252 tp->tv_usec=(long)((_now.ns100 / 10ULL) % 1000000ULL );
253 tp->tv_sec= (long)((_now.ns100 - _W32_FT_OFFSET) / 10000000ULL);
254 }
255 /* Always return 0 as per Open Group Base Specifications Issue 6.
256 Do not set errno on error. */
257 return 0;
258 }
259
260 int qemu_get_thread_id(void)
261 {
262 return GetCurrentThreadId();
263 }
264
265 char *
266 qemu_get_local_state_pathname(const char *relative_pathname)
267 {
268 HRESULT result;
269 char base_path[MAX_PATH+1] = "";
270
271 result = SHGetFolderPath(NULL, CSIDL_COMMON_APPDATA, NULL,
272 /* SHGFP_TYPE_CURRENT */ 0, base_path);
273 if (result != S_OK) {
274 /* misconfigured environment */
275 g_critical("CSIDL_COMMON_APPDATA unavailable: %ld", (long)result);
276 abort();
277 }
278 return g_strdup_printf("%s" G_DIR_SEPARATOR_S "%s", base_path,
279 relative_pathname);
280 }
281
282 void qemu_set_tty_echo(int fd, bool echo)
283 {
284 HANDLE handle = (HANDLE)_get_osfhandle(fd);
285 DWORD dwMode = 0;
286
287 if (handle == INVALID_HANDLE_VALUE) {
288 return;
289 }
290
291 GetConsoleMode(handle, &dwMode);
292
293 if (echo) {
294 SetConsoleMode(handle, dwMode | ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT);
295 } else {
296 SetConsoleMode(handle,
297 dwMode & ~(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT));
298 }
299 }
300
301 static char exec_dir[PATH_MAX];
302
303 void qemu_init_exec_dir(const char *argv0)
304 {
305
306 char *p;
307 char buf[MAX_PATH];
308 DWORD len;
309
310 len = GetModuleFileName(NULL, buf, sizeof(buf) - 1);
311 if (len == 0) {
312 return;
313 }
314
315 buf[len] = 0;
316 p = buf + len - 1;
317 while (p != buf && *p != '\\') {
318 p--;
319 }
320 *p = 0;
321 if (access(buf, R_OK) == 0) {
322 pstrcpy(exec_dir, sizeof(exec_dir), buf);
323 }
324 }
325
326 char *qemu_get_exec_dir(void)
327 {
328 return g_strdup(exec_dir);
329 }
330
331 /*
332 * The original implementation of g_poll from glib has a problem on Windows
333 * when using timeouts < 10 ms.
334 *
335 * Whenever g_poll is called with timeout < 10 ms, it does a quick poll instead
336 * of wait. This causes significant performance degradation of QEMU.
337 *
338 * The following code is a copy of the original code from glib/gpoll.c
339 * (glib commit 20f4d1820b8d4d0fc4447188e33efffd6d4a88d8 from 2014-02-19).
340 * Some debug code was removed and the code was reformatted.
341 * All other code modifications are marked with 'QEMU'.
342 */
343
344 /*
345 * gpoll.c: poll(2) abstraction
346 * Copyright 1998 Owen Taylor
347 * Copyright 2008 Red Hat, Inc.
348 *
349 * This library is free software; you can redistribute it and/or
350 * modify it under the terms of the GNU Lesser General Public
351 * License as published by the Free Software Foundation; either
352 * version 2 of the License, or (at your option) any later version.
353 *
354 * This library is distributed in the hope that it will be useful,
355 * but WITHOUT ANY WARRANTY; without even the implied warranty of
356 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
357 * Lesser General Public License for more details.
358 *
359 * You should have received a copy of the GNU Lesser General Public
360 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
361 */
362
363 static int poll_rest(gboolean poll_msgs, HANDLE *handles, gint nhandles,
364 GPollFD *fds, guint nfds, gint timeout)
365 {
366 DWORD ready;
367 GPollFD *f;
368 int recursed_result;
369
370 if (poll_msgs) {
371 /* Wait for either messages or handles
372 * -> Use MsgWaitForMultipleObjectsEx
373 */
374 ready = MsgWaitForMultipleObjectsEx(nhandles, handles, timeout,
375 QS_ALLINPUT, MWMO_ALERTABLE);
376
377 if (ready == WAIT_FAILED) {
378 gchar *emsg = g_win32_error_message(GetLastError());
379 g_warning("MsgWaitForMultipleObjectsEx failed: %s", emsg);
380 g_free(emsg);
381 }
382 } else if (nhandles == 0) {
383 /* No handles to wait for, just the timeout */
384 if (timeout == INFINITE) {
385 ready = WAIT_FAILED;
386 } else {
387 SleepEx(timeout, TRUE);
388 ready = WAIT_TIMEOUT;
389 }
390 } else {
391 /* Wait for just handles
392 * -> Use WaitForMultipleObjectsEx
393 */
394 ready =
395 WaitForMultipleObjectsEx(nhandles, handles, FALSE, timeout, TRUE);
396 if (ready == WAIT_FAILED) {
397 gchar *emsg = g_win32_error_message(GetLastError());
398 g_warning("WaitForMultipleObjectsEx failed: %s", emsg);
399 g_free(emsg);
400 }
401 }
402
403 if (ready == WAIT_FAILED) {
404 return -1;
405 } else if (ready == WAIT_TIMEOUT || ready == WAIT_IO_COMPLETION) {
406 return 0;
407 } else if (poll_msgs && ready == WAIT_OBJECT_0 + nhandles) {
408 for (f = fds; f < &fds[nfds]; ++f) {
409 if (f->fd == G_WIN32_MSG_HANDLE && f->events & G_IO_IN) {
410 f->revents |= G_IO_IN;
411 }
412 }
413
414 /* If we have a timeout, or no handles to poll, be satisfied
415 * with just noticing we have messages waiting.
416 */
417 if (timeout != 0 || nhandles == 0) {
418 return 1;
419 }
420
421 /* If no timeout and handles to poll, recurse to poll them,
422 * too.
423 */
424 recursed_result = poll_rest(FALSE, handles, nhandles, fds, nfds, 0);
425 return (recursed_result == -1) ? -1 : 1 + recursed_result;
426 } else if (/* QEMU: removed the following unneeded statement which causes
427 * a compiler warning: ready >= WAIT_OBJECT_0 && */
428 ready < WAIT_OBJECT_0 + nhandles) {
429 for (f = fds; f < &fds[nfds]; ++f) {
430 if ((HANDLE) f->fd == handles[ready - WAIT_OBJECT_0]) {
431 f->revents = f->events;
432 }
433 }
434
435 /* If no timeout and polling several handles, recurse to poll
436 * the rest of them.
437 */
438 if (timeout == 0 && nhandles > 1) {
439 /* Remove the handle that fired */
440 int i;
441 if (ready < nhandles - 1) {
442 for (i = ready - WAIT_OBJECT_0 + 1; i < nhandles; i++) {
443 handles[i-1] = handles[i];
444 }
445 }
446 nhandles--;
447 recursed_result = poll_rest(FALSE, handles, nhandles, fds, nfds, 0);
448 return (recursed_result == -1) ? -1 : 1 + recursed_result;
449 }
450 return 1;
451 }
452
453 return 0;
454 }
455
456 gint g_poll(GPollFD *fds, guint nfds, gint timeout)
457 {
458 HANDLE handles[MAXIMUM_WAIT_OBJECTS];
459 gboolean poll_msgs = FALSE;
460 GPollFD *f;
461 gint nhandles = 0;
462 int retval;
463
464 for (f = fds; f < &fds[nfds]; ++f) {
465 if (f->fd == G_WIN32_MSG_HANDLE && (f->events & G_IO_IN)) {
466 poll_msgs = TRUE;
467 } else if (f->fd > 0) {
468 /* Don't add the same handle several times into the array, as
469 * docs say that is not allowed, even if it actually does seem
470 * to work.
471 */
472 gint i;
473
474 for (i = 0; i < nhandles; i++) {
475 if (handles[i] == (HANDLE) f->fd) {
476 break;
477 }
478 }
479
480 if (i == nhandles) {
481 if (nhandles == MAXIMUM_WAIT_OBJECTS) {
482 g_warning("Too many handles to wait for!\n");
483 break;
484 } else {
485 handles[nhandles++] = (HANDLE) f->fd;
486 }
487 }
488 }
489 }
490
491 for (f = fds; f < &fds[nfds]; ++f) {
492 f->revents = 0;
493 }
494
495 if (timeout == -1) {
496 timeout = INFINITE;
497 }
498
499 /* Polling for several things? */
500 if (nhandles > 1 || (nhandles > 0 && poll_msgs)) {
501 /* First check if one or several of them are immediately
502 * available
503 */
504 retval = poll_rest(poll_msgs, handles, nhandles, fds, nfds, 0);
505
506 /* If not, and we have a significant timeout, poll again with
507 * timeout then. Note that this will return indication for only
508 * one event, or only for messages. We ignore timeouts less than
509 * ten milliseconds as they are mostly pointless on Windows, the
510 * MsgWaitForMultipleObjectsEx() call will timeout right away
511 * anyway.
512 *
513 * Modification for QEMU: replaced timeout >= 10 by timeout > 0.
514 */
515 if (retval == 0 && (timeout == INFINITE || timeout > 0)) {
516 retval = poll_rest(poll_msgs, handles, nhandles,
517 fds, nfds, timeout);
518 }
519 } else {
520 /* Just polling for one thing, so no need to check first if
521 * available immediately
522 */
523 retval = poll_rest(poll_msgs, handles, nhandles, fds, nfds, timeout);
524 }
525
526 if (retval == -1) {
527 for (f = fds; f < &fds[nfds]; ++f) {
528 f->revents = 0;
529 }
530 }
531
532 return retval;
533 }
534
535 int getpagesize(void)
536 {
537 SYSTEM_INFO system_info;
538
539 GetSystemInfo(&system_info);
540 return system_info.dwPageSize;
541 }
542
543 void os_mem_prealloc(int fd, char *area, size_t memory)
544 {
545 int i;
546 size_t pagesize = getpagesize();
547
548 memory = (memory + pagesize - 1) & -pagesize;
549 for (i = 0; i < memory / pagesize; i++) {
550 memset(area + pagesize * i, 0, 1);
551 }
552 }
553
554
555 /* XXX: put correct support for win32 */
556 int qemu_read_password(char *buf, int buf_size)
557 {
558 int c, i;
559
560 printf("Password: ");
561 fflush(stdout);
562 i = 0;
563 for (;;) {
564 c = getchar();
565 if (c < 0) {
566 buf[i] = '\0';
567 return -1;
568 } else if (c == '\n') {
569 break;
570 } else if (i < (buf_size - 1)) {
571 buf[i++] = c;
572 }
573 }
574 buf[i] = '\0';
575 return 0;
576 }
577
578
579 pid_t qemu_fork(Error **errp)
580 {
581 errno = ENOSYS;
582 error_setg_errno(errp, errno,
583 "cannot fork child process");
584 return -1;
585 }
586
587
588 #undef connect
589 int qemu_connect_wrap(int sockfd, const struct sockaddr *addr,
590 socklen_t addrlen)
591 {
592 int ret;
593 ret = connect(sockfd, addr, addrlen);
594 if (ret < 0) {
595 errno = socket_error();
596 }
597 return ret;
598 }
599
600
601 #undef listen
602 int qemu_listen_wrap(int sockfd, int backlog)
603 {
604 int ret;
605 ret = listen(sockfd, backlog);
606 if (ret < 0) {
607 errno = socket_error();
608 }
609 return ret;
610 }
611
612
613 #undef bind
614 int qemu_bind_wrap(int sockfd, const struct sockaddr *addr,
615 socklen_t addrlen)
616 {
617 int ret;
618 ret = bind(sockfd, addr, addrlen);
619 if (ret < 0) {
620 errno = socket_error();
621 }
622 return ret;
623 }
624
625
626 #undef socket
627 int qemu_socket_wrap(int domain, int type, int protocol)
628 {
629 int ret;
630 ret = socket(domain, type, protocol);
631 if (ret < 0) {
632 errno = socket_error();
633 }
634 return ret;
635 }
636
637
638 #undef accept
639 int qemu_accept_wrap(int sockfd, struct sockaddr *addr,
640 socklen_t *addrlen)
641 {
642 int ret;
643 ret = accept(sockfd, addr, addrlen);
644 if (ret < 0) {
645 errno = socket_error();
646 }
647 return ret;
648 }
649
650
651 #undef shutdown
652 int qemu_shutdown_wrap(int sockfd, int how)
653 {
654 int ret;
655 ret = shutdown(sockfd, how);
656 if (ret < 0) {
657 errno = socket_error();
658 }
659 return ret;
660 }
661
662
663 #undef ioctlsocket
664 int qemu_ioctlsocket_wrap(int fd, int req, void *val)
665 {
666 int ret;
667 ret = ioctlsocket(fd, req, val);
668 if (ret < 0) {
669 errno = socket_error();
670 }
671 return ret;
672 }
673
674
675 #undef closesocket
676 int qemu_closesocket_wrap(int fd)
677 {
678 int ret;
679 ret = closesocket(fd);
680 if (ret < 0) {
681 errno = socket_error();
682 }
683 return ret;
684 }
685
686
687 #undef getsockopt
688 int qemu_getsockopt_wrap(int sockfd, int level, int optname,
689 void *optval, socklen_t *optlen)
690 {
691 int ret;
692 ret = getsockopt(sockfd, level, optname, optval, optlen);
693 if (ret < 0) {
694 errno = socket_error();
695 }
696 return ret;
697 }
698
699
700 #undef setsockopt
701 int qemu_setsockopt_wrap(int sockfd, int level, int optname,
702 const void *optval, socklen_t optlen)
703 {
704 int ret;
705 ret = setsockopt(sockfd, level, optname, optval, optlen);
706 if (ret < 0) {
707 errno = socket_error();
708 }
709 return ret;
710 }
711
712
713 #undef getpeername
714 int qemu_getpeername_wrap(int sockfd, struct sockaddr *addr,
715 socklen_t *addrlen)
716 {
717 int ret;
718 ret = getpeername(sockfd, addr, addrlen);
719 if (ret < 0) {
720 errno = socket_error();
721 }
722 return ret;
723 }
724
725
726 #undef getsockname
727 int qemu_getsockname_wrap(int sockfd, struct sockaddr *addr,
728 socklen_t *addrlen)
729 {
730 int ret;
731 ret = getsockname(sockfd, addr, addrlen);
732 if (ret < 0) {
733 errno = socket_error();
734 }
735 return ret;
736 }
737
738
739 #undef send
740 ssize_t qemu_send_wrap(int sockfd, const void *buf, size_t len, int flags)
741 {
742 int ret;
743 ret = send(sockfd, buf, len, flags);
744 if (ret < 0) {
745 errno = socket_error();
746 }
747 return ret;
748 }
749
750
751 #undef sendto
752 ssize_t qemu_sendto_wrap(int sockfd, const void *buf, size_t len, int flags,
753 const struct sockaddr *addr, socklen_t addrlen)
754 {
755 int ret;
756 ret = sendto(sockfd, buf, len, flags, addr, addrlen);
757 if (ret < 0) {
758 errno = socket_error();
759 }
760 return ret;
761 }
762
763
764 #undef recv
765 ssize_t qemu_recv_wrap(int sockfd, void *buf, size_t len, int flags)
766 {
767 int ret;
768 ret = recv(sockfd, buf, len, flags);
769 if (ret < 0) {
770 errno = socket_error();
771 }
772 return ret;
773 }
774
775
776 #undef recvfrom
777 ssize_t qemu_recvfrom_wrap(int sockfd, void *buf, size_t len, int flags,
778 struct sockaddr *addr, socklen_t *addrlen)
779 {
780 int ret;
781 ret = recvfrom(sockfd, buf, len, flags, addr, addrlen);
782 if (ret < 0) {
783 errno = socket_error();
784 }
785 return ret;
786 }