]> git.proxmox.com Git - qemu.git/blame - posix-aio-compat.c
Error checking
[qemu.git] / posix-aio-compat.c
CommitLineData
3c529d93
AL
1/*
2 * QEMU posix-aio emulation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#include <pthread.h>
15#include <unistd.h>
16#include <errno.h>
17#include <sys/time.h>
8653c015 18#include <string.h>
19#include <stdlib.h>
20#include <stdio.h>
3c529d93
AL
21#include "osdep.h"
22
23#include "posix-aio-compat.h"
24
25static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
26static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
27static pthread_t thread_id;
28static int max_threads = 64;
29static int cur_threads = 0;
30static int idle_threads = 0;
31static TAILQ_HEAD(, qemu_paiocb) request_list;
32
8653c015 33static void die2(int err, const char *what)
34{
35 fprintf(stderr, "%s failed: %s\n", what, strerror(err));
36 abort();
37}
38
39static void die(const char *what)
40{
41 die2(errno, what);
42}
43
44static void mutex_lock(pthread_mutex_t *mutex)
45{
46 int ret = pthread_mutex_lock(mutex);
47 if (ret) die2(ret, "pthread_mutex_lock");
48}
49
50static void mutex_unlock(pthread_mutex_t *mutex)
51{
52 int ret = pthread_mutex_unlock(mutex);
53 if (ret) die2(ret, "pthread_mutex_unlock");
54}
55
56static int cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
57 struct timespec *ts)
58{
59 int ret = pthread_cond_timedwait(cond, mutex, ts);
60 if (ret && ret != ETIMEDOUT) die2(ret, "pthread_cond_timedwait");
61 return ret;
62}
63
64static void cond_broadcast(pthread_cond_t *cond)
65{
66 int ret = pthread_cond_broadcast(cond);
67 if (ret) die2(ret, "pthread_cond_broadcast");
68}
69
70static void thread_create(pthread_t *thread, pthread_attr_t *attr,
71 void *(*start_routine)(void*), void *arg)
72{
73 int ret = pthread_create(thread, attr, start_routine, arg);
74 if (ret) die2(ret, "pthread_create");
75}
76
3c529d93
AL
77static void *aio_thread(void *unused)
78{
79 sigset_t set;
80
81 /* block all signals */
8653c015 82 if (sigfillset(&set)) die("sigfillset");
83 if (sigprocmask(SIG_BLOCK, &set, NULL)) die("sigprocmask");
3c529d93
AL
84
85 while (1) {
86 struct qemu_paiocb *aiocb;
87 size_t offset;
88 int ret = 0;
89
8653c015 90 mutex_lock(&lock);
3c529d93
AL
91
92 while (TAILQ_EMPTY(&request_list) &&
93 !(ret == ETIMEDOUT)) {
94 struct timespec ts = { 0 };
95 qemu_timeval tv;
96
97 qemu_gettimeofday(&tv);
98 ts.tv_sec = tv.tv_sec + 10;
8653c015 99 ret = cond_timedwait(&cond, &lock, &ts);
3c529d93
AL
100 }
101
102 if (ret == ETIMEDOUT)
103 break;
104
105 aiocb = TAILQ_FIRST(&request_list);
106 TAILQ_REMOVE(&request_list, aiocb, node);
107
108 offset = 0;
109 aiocb->active = 1;
110
111 idle_threads--;
8653c015 112 mutex_unlock(&lock);
3c529d93
AL
113
114 while (offset < aiocb->aio_nbytes) {
115 ssize_t len;
116
117 if (aiocb->is_write)
118 len = pwrite(aiocb->aio_fildes,
119 (const char *)aiocb->aio_buf + offset,
120 aiocb->aio_nbytes - offset,
121 aiocb->aio_offset + offset);
122 else
123 len = pread(aiocb->aio_fildes,
124 (char *)aiocb->aio_buf + offset,
125 aiocb->aio_nbytes - offset,
126 aiocb->aio_offset + offset);
127
128 if (len == -1 && errno == EINTR)
129 continue;
130 else if (len == -1) {
f094a782 131 offset = -errno;
3c529d93
AL
132 break;
133 } else if (len == 0)
134 break;
135
136 offset += len;
3c529d93
AL
137 }
138
8653c015 139 mutex_lock(&lock);
f094a782 140 aiocb->ret = offset;
3c529d93 141 idle_threads++;
8653c015 142 mutex_unlock(&lock);
3c529d93 143
8653c015 144 if (kill(getpid(), aiocb->ev_signo)) die("kill failed");
3c529d93
AL
145 }
146
147 idle_threads--;
148 cur_threads--;
8653c015 149 mutex_unlock(&lock);
3c529d93
AL
150
151 return NULL;
152}
153
8653c015 154static void spawn_thread(void)
3c529d93 155{
3c529d93 156 int ret;
8653c015 157 pthread_attr_t attr;
3c529d93
AL
158
159 cur_threads++;
160 idle_threads++;
161
8653c015 162 ret = pthread_attr_init(&attr);
163 if (ret) die2 (ret, "pthread_attr_init");
164 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
165 if (ret) die2 (ret, "pthread_attr_setdetachstate");
166 thread_create(&thread_id, &attr, aio_thread, NULL);
167 ret = pthread_attr_destroy(&attr);
168 if (ret) die2 (ret, "pthread_attr_destroy");
3c529d93
AL
169}
170
171int qemu_paio_init(struct qemu_paioinit *aioinit)
172{
173 TAILQ_INIT(&request_list);
174
175 return 0;
176}
177
178static int qemu_paio_submit(struct qemu_paiocb *aiocb, int is_write)
179{
180 aiocb->is_write = is_write;
181 aiocb->ret = -EINPROGRESS;
182 aiocb->active = 0;
8653c015 183 mutex_lock(&lock);
3c529d93
AL
184 if (idle_threads == 0 && cur_threads < max_threads)
185 spawn_thread();
186 TAILQ_INSERT_TAIL(&request_list, aiocb, node);
8653c015 187 mutex_unlock(&lock);
188 cond_broadcast(&cond);
3c529d93
AL
189
190 return 0;
191}
192
193int qemu_paio_read(struct qemu_paiocb *aiocb)
194{
195 return qemu_paio_submit(aiocb, 0);
196}
197
198int qemu_paio_write(struct qemu_paiocb *aiocb)
199{
200 return qemu_paio_submit(aiocb, 1);
201}
202
203ssize_t qemu_paio_return(struct qemu_paiocb *aiocb)
204{
205 ssize_t ret;
206
8653c015 207 mutex_lock(&lock);
3c529d93 208 ret = aiocb->ret;
8653c015 209 mutex_unlock(&lock);
3c529d93
AL
210
211 return ret;
212}
213
214int qemu_paio_error(struct qemu_paiocb *aiocb)
215{
216 ssize_t ret = qemu_paio_return(aiocb);
217
218 if (ret < 0)
219 ret = -ret;
220 else
221 ret = 0;
222
223 return ret;
224}
225
226int qemu_paio_cancel(int fd, struct qemu_paiocb *aiocb)
227{
228 int ret;
229
8653c015 230 mutex_lock(&lock);
3c529d93
AL
231 if (!aiocb->active) {
232 TAILQ_REMOVE(&request_list, aiocb, node);
233 aiocb->ret = -ECANCELED;
234 ret = QEMU_PAIO_CANCELED;
235 } else if (aiocb->ret == -EINPROGRESS)
236 ret = QEMU_PAIO_NOTCANCELED;
237 else
238 ret = QEMU_PAIO_ALLDONE;
8653c015 239 mutex_unlock(&lock);
3c529d93
AL
240
241 return ret;
242}