]>
Commit | Line | Data |
---|---|---|
72e59c30 SS |
1 | /* |
2 | * (c) 2017 Stefano Stabellini <stefano@aporeto.com> | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | */ | |
14 | ||
fb029875 | 15 | #include <linux/inet.h> |
72e59c30 SS |
16 | #include <linux/kthread.h> |
17 | #include <linux/list.h> | |
18 | #include <linux/radix-tree.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/semaphore.h> | |
21 | #include <linux/wait.h> | |
fb029875 SS |
22 | #include <net/sock.h> |
23 | #include <net/inet_common.h> | |
24 | #include <net/inet_connection_sock.h> | |
25 | #include <net/request_sock.h> | |
72e59c30 SS |
26 | |
27 | #include <xen/events.h> | |
28 | #include <xen/grant_table.h> | |
29 | #include <xen/xen.h> | |
30 | #include <xen/xenbus.h> | |
31 | #include <xen/interface/io/pvcalls.h> | |
32 | ||
0a9c75c2 SS |
33 | #define PVCALLS_VERSIONS "1" |
34 | #define MAX_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER | |
35 | ||
9be07334 SS |
36 | struct pvcalls_back_global { |
37 | struct list_head frontends; | |
38 | struct semaphore frontends_lock; | |
39 | } pvcalls_back_global; | |
40 | ||
d0e4d560 SS |
41 | /* |
42 | * Per-frontend data structure. It contains pointers to the command | |
43 | * ring, its event channel, a list of active sockets and a tree of | |
44 | * passive sockets. | |
45 | */ | |
46 | struct pvcalls_fedata { | |
47 | struct list_head list; | |
48 | struct xenbus_device *dev; | |
49 | struct xen_pvcalls_sring *sring; | |
50 | struct xen_pvcalls_back_ring ring; | |
51 | int irq; | |
52 | struct list_head socket_mappings; | |
53 | struct radix_tree_root socketpass_mappings; | |
54 | struct semaphore socket_lock; | |
55 | }; | |
56 | ||
5db4d286 SS |
57 | struct pvcalls_ioworker { |
58 | struct work_struct register_work; | |
59 | struct workqueue_struct *wq; | |
60 | }; | |
61 | ||
62 | struct sock_mapping { | |
63 | struct list_head list; | |
64 | struct pvcalls_fedata *fedata; | |
6f474e71 | 65 | struct sockpass_mapping *sockpass; |
5db4d286 SS |
66 | struct socket *sock; |
67 | uint64_t id; | |
68 | grant_ref_t ref; | |
69 | struct pvcalls_data_intf *ring; | |
70 | void *bytes; | |
71 | struct pvcalls_data data; | |
72 | uint32_t ring_order; | |
73 | int irq; | |
74 | atomic_t read; | |
75 | atomic_t write; | |
76 | atomic_t io; | |
77 | atomic_t release; | |
78 | void (*saved_data_ready)(struct sock *sk); | |
79 | struct pvcalls_ioworker ioworker; | |
80 | }; | |
81 | ||
331a63e6 SS |
82 | struct sockpass_mapping { |
83 | struct list_head list; | |
84 | struct pvcalls_fedata *fedata; | |
85 | struct socket *sock; | |
86 | uint64_t id; | |
87 | struct xen_pvcalls_request reqcopy; | |
88 | spinlock_t copy_lock; | |
89 | struct workqueue_struct *wq; | |
90 | struct work_struct register_work; | |
91 | void (*saved_data_ready)(struct sock *sk); | |
92 | }; | |
93 | ||
5db4d286 SS |
94 | static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map); |
95 | static int pvcalls_back_release_active(struct xenbus_device *dev, | |
96 | struct pvcalls_fedata *fedata, | |
97 | struct sock_mapping *map); | |
98 | ||
5d520d85 SS |
99 | static void pvcalls_conn_back_read(void *opaque) |
100 | { | |
b3f9f773 SS |
101 | struct sock_mapping *map = (struct sock_mapping *)opaque; |
102 | struct msghdr msg; | |
103 | struct kvec vec[2]; | |
104 | RING_IDX cons, prod, size, wanted, array_size, masked_prod, masked_cons; | |
105 | int32_t error; | |
106 | struct pvcalls_data_intf *intf = map->ring; | |
107 | struct pvcalls_data *data = &map->data; | |
108 | unsigned long flags; | |
109 | int ret; | |
110 | ||
111 | array_size = XEN_FLEX_RING_SIZE(map->ring_order); | |
112 | cons = intf->in_cons; | |
113 | prod = intf->in_prod; | |
114 | error = intf->in_error; | |
115 | /* read the indexes first, then deal with the data */ | |
116 | virt_mb(); | |
117 | ||
118 | if (error) | |
119 | return; | |
120 | ||
121 | size = pvcalls_queued(prod, cons, array_size); | |
122 | if (size >= array_size) | |
123 | return; | |
124 | spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); | |
125 | if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) { | |
126 | atomic_set(&map->read, 0); | |
127 | spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, | |
128 | flags); | |
129 | return; | |
130 | } | |
131 | spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags); | |
132 | wanted = array_size - size; | |
133 | masked_prod = pvcalls_mask(prod, array_size); | |
134 | masked_cons = pvcalls_mask(cons, array_size); | |
135 | ||
136 | memset(&msg, 0, sizeof(msg)); | |
137 | msg.msg_iter.type = ITER_KVEC|WRITE; | |
138 | msg.msg_iter.count = wanted; | |
139 | if (masked_prod < masked_cons) { | |
140 | vec[0].iov_base = data->in + masked_prod; | |
141 | vec[0].iov_len = wanted; | |
142 | msg.msg_iter.kvec = vec; | |
143 | msg.msg_iter.nr_segs = 1; | |
144 | } else { | |
145 | vec[0].iov_base = data->in + masked_prod; | |
146 | vec[0].iov_len = array_size - masked_prod; | |
147 | vec[1].iov_base = data->in; | |
148 | vec[1].iov_len = wanted - vec[0].iov_len; | |
149 | msg.msg_iter.kvec = vec; | |
150 | msg.msg_iter.nr_segs = 2; | |
151 | } | |
152 | ||
153 | atomic_set(&map->read, 0); | |
154 | ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT); | |
155 | WARN_ON(ret > wanted); | |
156 | if (ret == -EAGAIN) /* shouldn't happen */ | |
157 | return; | |
158 | if (!ret) | |
159 | ret = -ENOTCONN; | |
160 | spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); | |
161 | if (ret > 0 && !skb_queue_empty(&map->sock->sk->sk_receive_queue)) | |
162 | atomic_inc(&map->read); | |
163 | spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags); | |
164 | ||
165 | /* write the data, then modify the indexes */ | |
166 | virt_wmb(); | |
167 | if (ret < 0) | |
168 | intf->in_error = ret; | |
169 | else | |
170 | intf->in_prod = prod + ret; | |
171 | /* update the indexes, then notify the other end */ | |
172 | virt_wmb(); | |
173 | notify_remote_via_irq(map->irq); | |
174 | ||
175 | return; | |
5d520d85 SS |
176 | } |
177 | ||
178 | static void pvcalls_conn_back_write(struct sock_mapping *map) | |
179 | { | |
180 | } | |
181 | ||
5db4d286 SS |
182 | static void pvcalls_back_ioworker(struct work_struct *work) |
183 | { | |
5d520d85 SS |
184 | struct pvcalls_ioworker *ioworker = container_of(work, |
185 | struct pvcalls_ioworker, register_work); | |
186 | struct sock_mapping *map = container_of(ioworker, struct sock_mapping, | |
187 | ioworker); | |
188 | ||
189 | while (atomic_read(&map->io) > 0) { | |
190 | if (atomic_read(&map->release) > 0) { | |
191 | atomic_set(&map->release, 0); | |
192 | return; | |
193 | } | |
194 | ||
195 | if (atomic_read(&map->read) > 0) | |
196 | pvcalls_conn_back_read(map); | |
197 | if (atomic_read(&map->write) > 0) | |
198 | pvcalls_conn_back_write(map); | |
199 | ||
200 | atomic_dec(&map->io); | |
201 | } | |
5db4d286 SS |
202 | } |
203 | ||
b1efa693 SS |
204 | static int pvcalls_back_socket(struct xenbus_device *dev, |
205 | struct xen_pvcalls_request *req) | |
206 | { | |
fb029875 SS |
207 | struct pvcalls_fedata *fedata; |
208 | int ret; | |
209 | struct xen_pvcalls_response *rsp; | |
210 | ||
211 | fedata = dev_get_drvdata(&dev->dev); | |
212 | ||
213 | if (req->u.socket.domain != AF_INET || | |
214 | req->u.socket.type != SOCK_STREAM || | |
215 | (req->u.socket.protocol != IPPROTO_IP && | |
216 | req->u.socket.protocol != AF_INET)) | |
217 | ret = -EAFNOSUPPORT; | |
218 | else | |
219 | ret = 0; | |
220 | ||
221 | /* leave the actual socket allocation for later */ | |
222 | ||
223 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); | |
224 | rsp->req_id = req->req_id; | |
225 | rsp->cmd = req->cmd; | |
226 | rsp->u.socket.id = req->u.socket.id; | |
227 | rsp->ret = ret; | |
228 | ||
b1efa693 SS |
229 | return 0; |
230 | } | |
231 | ||
5db4d286 SS |
232 | static void pvcalls_sk_state_change(struct sock *sock) |
233 | { | |
234 | struct sock_mapping *map = sock->sk_user_data; | |
235 | struct pvcalls_data_intf *intf; | |
236 | ||
237 | if (map == NULL) | |
238 | return; | |
239 | ||
240 | intf = map->ring; | |
241 | intf->in_error = -ENOTCONN; | |
242 | notify_remote_via_irq(map->irq); | |
243 | } | |
244 | ||
245 | static void pvcalls_sk_data_ready(struct sock *sock) | |
246 | { | |
b3f9f773 SS |
247 | struct sock_mapping *map = sock->sk_user_data; |
248 | struct pvcalls_ioworker *iow; | |
249 | ||
250 | if (map == NULL) | |
251 | return; | |
252 | ||
253 | iow = &map->ioworker; | |
254 | atomic_inc(&map->read); | |
255 | atomic_inc(&map->io); | |
256 | queue_work(iow->wq, &iow->register_work); | |
5db4d286 SS |
257 | } |
258 | ||
259 | static struct sock_mapping *pvcalls_new_active_socket( | |
260 | struct pvcalls_fedata *fedata, | |
261 | uint64_t id, | |
262 | grant_ref_t ref, | |
263 | uint32_t evtchn, | |
264 | struct socket *sock) | |
265 | { | |
266 | int ret; | |
267 | struct sock_mapping *map; | |
268 | void *page; | |
269 | ||
270 | map = kzalloc(sizeof(*map), GFP_KERNEL); | |
271 | if (map == NULL) | |
272 | return NULL; | |
273 | ||
274 | map->fedata = fedata; | |
275 | map->sock = sock; | |
276 | map->id = id; | |
277 | map->ref = ref; | |
278 | ||
279 | ret = xenbus_map_ring_valloc(fedata->dev, &ref, 1, &page); | |
280 | if (ret < 0) | |
281 | goto out; | |
282 | map->ring = page; | |
283 | map->ring_order = map->ring->ring_order; | |
284 | /* first read the order, then map the data ring */ | |
285 | virt_rmb(); | |
286 | if (map->ring_order > MAX_RING_ORDER) { | |
287 | pr_warn("%s frontend requested ring_order %u, which is > MAX (%u)\n", | |
288 | __func__, map->ring_order, MAX_RING_ORDER); | |
289 | goto out; | |
290 | } | |
291 | ret = xenbus_map_ring_valloc(fedata->dev, map->ring->ref, | |
292 | (1 << map->ring_order), &page); | |
293 | if (ret < 0) | |
294 | goto out; | |
295 | map->bytes = page; | |
296 | ||
297 | ret = bind_interdomain_evtchn_to_irqhandler(fedata->dev->otherend_id, | |
298 | evtchn, | |
299 | pvcalls_back_conn_event, | |
300 | 0, | |
301 | "pvcalls-backend", | |
302 | map); | |
303 | if (ret < 0) | |
304 | goto out; | |
305 | map->irq = ret; | |
306 | ||
307 | map->data.in = map->bytes; | |
308 | map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order); | |
309 | ||
310 | map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1); | |
311 | if (!map->ioworker.wq) | |
312 | goto out; | |
313 | atomic_set(&map->io, 1); | |
314 | INIT_WORK(&map->ioworker.register_work, pvcalls_back_ioworker); | |
315 | ||
316 | down(&fedata->socket_lock); | |
317 | list_add_tail(&map->list, &fedata->socket_mappings); | |
318 | up(&fedata->socket_lock); | |
319 | ||
320 | write_lock_bh(&map->sock->sk->sk_callback_lock); | |
321 | map->saved_data_ready = map->sock->sk->sk_data_ready; | |
322 | map->sock->sk->sk_user_data = map; | |
323 | map->sock->sk->sk_data_ready = pvcalls_sk_data_ready; | |
324 | map->sock->sk->sk_state_change = pvcalls_sk_state_change; | |
325 | write_unlock_bh(&map->sock->sk->sk_callback_lock); | |
326 | ||
327 | return map; | |
328 | out: | |
329 | down(&fedata->socket_lock); | |
330 | list_del(&map->list); | |
331 | pvcalls_back_release_active(fedata->dev, fedata, map); | |
332 | up(&fedata->socket_lock); | |
333 | return NULL; | |
334 | } | |
335 | ||
b1efa693 SS |
336 | static int pvcalls_back_connect(struct xenbus_device *dev, |
337 | struct xen_pvcalls_request *req) | |
5db4d286 SS |
338 | { |
339 | struct pvcalls_fedata *fedata; | |
340 | int ret = -EINVAL; | |
341 | struct socket *sock; | |
342 | struct sock_mapping *map; | |
343 | struct xen_pvcalls_response *rsp; | |
344 | struct sockaddr *sa = (struct sockaddr *)&req->u.connect.addr; | |
345 | ||
346 | fedata = dev_get_drvdata(&dev->dev); | |
347 | ||
348 | if (req->u.connect.len < sizeof(sa->sa_family) || | |
349 | req->u.connect.len > sizeof(req->u.connect.addr) || | |
350 | sa->sa_family != AF_INET) | |
351 | goto out; | |
352 | ||
353 | ret = sock_create(AF_INET, SOCK_STREAM, 0, &sock); | |
354 | if (ret < 0) | |
355 | goto out; | |
356 | ret = inet_stream_connect(sock, sa, req->u.connect.len, 0); | |
357 | if (ret < 0) { | |
358 | sock_release(sock); | |
359 | goto out; | |
360 | } | |
361 | ||
362 | map = pvcalls_new_active_socket(fedata, | |
363 | req->u.connect.id, | |
364 | req->u.connect.ref, | |
365 | req->u.connect.evtchn, | |
366 | sock); | |
367 | if (!map) { | |
368 | ret = -EFAULT; | |
369 | sock_release(map->sock); | |
370 | } | |
371 | ||
372 | out: | |
373 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); | |
374 | rsp->req_id = req->req_id; | |
375 | rsp->cmd = req->cmd; | |
376 | rsp->u.connect.id = req->u.connect.id; | |
377 | rsp->ret = ret; | |
378 | ||
379 | return 0; | |
380 | } | |
381 | ||
382 | static int pvcalls_back_release_active(struct xenbus_device *dev, | |
383 | struct pvcalls_fedata *fedata, | |
384 | struct sock_mapping *map) | |
b1efa693 | 385 | { |
a51729cb SS |
386 | disable_irq(map->irq); |
387 | if (map->sock->sk != NULL) { | |
388 | write_lock_bh(&map->sock->sk->sk_callback_lock); | |
389 | map->sock->sk->sk_user_data = NULL; | |
390 | map->sock->sk->sk_data_ready = map->saved_data_ready; | |
391 | write_unlock_bh(&map->sock->sk->sk_callback_lock); | |
392 | } | |
393 | ||
394 | atomic_set(&map->release, 1); | |
395 | flush_work(&map->ioworker.register_work); | |
396 | ||
397 | xenbus_unmap_ring_vfree(dev, map->bytes); | |
398 | xenbus_unmap_ring_vfree(dev, (void *)map->ring); | |
399 | unbind_from_irqhandler(map->irq, map); | |
400 | ||
401 | sock_release(map->sock); | |
402 | kfree(map); | |
403 | ||
404 | return 0; | |
405 | } | |
406 | ||
407 | static int pvcalls_back_release_passive(struct xenbus_device *dev, | |
408 | struct pvcalls_fedata *fedata, | |
409 | struct sockpass_mapping *mappass) | |
410 | { | |
411 | if (mappass->sock->sk != NULL) { | |
412 | write_lock_bh(&mappass->sock->sk->sk_callback_lock); | |
413 | mappass->sock->sk->sk_user_data = NULL; | |
414 | mappass->sock->sk->sk_data_ready = mappass->saved_data_ready; | |
415 | write_unlock_bh(&mappass->sock->sk->sk_callback_lock); | |
416 | } | |
417 | sock_release(mappass->sock); | |
418 | flush_workqueue(mappass->wq); | |
419 | destroy_workqueue(mappass->wq); | |
420 | kfree(mappass); | |
421 | ||
b1efa693 SS |
422 | return 0; |
423 | } | |
424 | ||
425 | static int pvcalls_back_release(struct xenbus_device *dev, | |
426 | struct xen_pvcalls_request *req) | |
427 | { | |
a51729cb SS |
428 | struct pvcalls_fedata *fedata; |
429 | struct sock_mapping *map, *n; | |
430 | struct sockpass_mapping *mappass; | |
431 | int ret = 0; | |
432 | struct xen_pvcalls_response *rsp; | |
433 | ||
434 | fedata = dev_get_drvdata(&dev->dev); | |
435 | ||
436 | down(&fedata->socket_lock); | |
437 | list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) { | |
438 | if (map->id == req->u.release.id) { | |
439 | list_del(&map->list); | |
440 | up(&fedata->socket_lock); | |
441 | ret = pvcalls_back_release_active(dev, fedata, map); | |
442 | goto out; | |
443 | } | |
444 | } | |
445 | mappass = radix_tree_lookup(&fedata->socketpass_mappings, | |
446 | req->u.release.id); | |
447 | if (mappass != NULL) { | |
448 | radix_tree_delete(&fedata->socketpass_mappings, mappass->id); | |
449 | up(&fedata->socket_lock); | |
450 | ret = pvcalls_back_release_passive(dev, fedata, mappass); | |
451 | } else | |
452 | up(&fedata->socket_lock); | |
453 | ||
454 | out: | |
455 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); | |
456 | rsp->req_id = req->req_id; | |
457 | rsp->u.release.id = req->u.release.id; | |
458 | rsp->cmd = req->cmd; | |
459 | rsp->ret = ret; | |
b1efa693 SS |
460 | return 0; |
461 | } | |
462 | ||
331a63e6 SS |
463 | static void __pvcalls_back_accept(struct work_struct *work) |
464 | { | |
6f474e71 SS |
465 | struct sockpass_mapping *mappass = container_of( |
466 | work, struct sockpass_mapping, register_work); | |
467 | struct sock_mapping *map; | |
468 | struct pvcalls_ioworker *iow; | |
469 | struct pvcalls_fedata *fedata; | |
470 | struct socket *sock; | |
471 | struct xen_pvcalls_response *rsp; | |
472 | struct xen_pvcalls_request *req; | |
473 | int notify; | |
474 | int ret = -EINVAL; | |
475 | unsigned long flags; | |
476 | ||
477 | fedata = mappass->fedata; | |
478 | /* | |
479 | * __pvcalls_back_accept can race against pvcalls_back_accept. | |
480 | * We only need to check the value of "cmd" on read. It could be | |
481 | * done atomically, but to simplify the code on the write side, we | |
482 | * use a spinlock. | |
483 | */ | |
484 | spin_lock_irqsave(&mappass->copy_lock, flags); | |
485 | req = &mappass->reqcopy; | |
486 | if (req->cmd != PVCALLS_ACCEPT) { | |
487 | spin_unlock_irqrestore(&mappass->copy_lock, flags); | |
488 | return; | |
489 | } | |
490 | spin_unlock_irqrestore(&mappass->copy_lock, flags); | |
491 | ||
492 | sock = sock_alloc(); | |
493 | if (sock == NULL) | |
494 | goto out_error; | |
495 | sock->type = mappass->sock->type; | |
496 | sock->ops = mappass->sock->ops; | |
497 | ||
498 | ret = inet_accept(mappass->sock, sock, O_NONBLOCK, true); | |
499 | if (ret == -EAGAIN) { | |
500 | sock_release(sock); | |
501 | goto out_error; | |
502 | } | |
503 | ||
504 | map = pvcalls_new_active_socket(fedata, | |
505 | req->u.accept.id_new, | |
506 | req->u.accept.ref, | |
507 | req->u.accept.evtchn, | |
508 | sock); | |
509 | if (!map) { | |
510 | ret = -EFAULT; | |
511 | sock_release(sock); | |
512 | goto out_error; | |
513 | } | |
514 | ||
515 | map->sockpass = mappass; | |
516 | iow = &map->ioworker; | |
517 | atomic_inc(&map->read); | |
518 | atomic_inc(&map->io); | |
519 | queue_work(iow->wq, &iow->register_work); | |
520 | ||
521 | out_error: | |
522 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); | |
523 | rsp->req_id = req->req_id; | |
524 | rsp->cmd = req->cmd; | |
525 | rsp->u.accept.id = req->u.accept.id; | |
526 | rsp->ret = ret; | |
527 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify); | |
528 | if (notify) | |
529 | notify_remote_via_irq(fedata->irq); | |
530 | ||
531 | mappass->reqcopy.cmd = 0; | |
331a63e6 SS |
532 | } |
533 | ||
534 | static void pvcalls_pass_sk_data_ready(struct sock *sock) | |
535 | { | |
6f474e71 | 536 | struct sockpass_mapping *mappass = sock->sk_user_data; |
3cf33a58 SS |
537 | struct pvcalls_fedata *fedata; |
538 | struct xen_pvcalls_response *rsp; | |
539 | unsigned long flags; | |
540 | int notify; | |
6f474e71 SS |
541 | |
542 | if (mappass == NULL) | |
543 | return; | |
544 | ||
3cf33a58 SS |
545 | fedata = mappass->fedata; |
546 | spin_lock_irqsave(&mappass->copy_lock, flags); | |
547 | if (mappass->reqcopy.cmd == PVCALLS_POLL) { | |
548 | rsp = RING_GET_RESPONSE(&fedata->ring, | |
549 | fedata->ring.rsp_prod_pvt++); | |
550 | rsp->req_id = mappass->reqcopy.req_id; | |
551 | rsp->u.poll.id = mappass->reqcopy.u.poll.id; | |
552 | rsp->cmd = mappass->reqcopy.cmd; | |
553 | rsp->ret = 0; | |
554 | ||
555 | mappass->reqcopy.cmd = 0; | |
556 | spin_unlock_irqrestore(&mappass->copy_lock, flags); | |
557 | ||
558 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify); | |
559 | if (notify) | |
560 | notify_remote_via_irq(mappass->fedata->irq); | |
561 | } else { | |
562 | spin_unlock_irqrestore(&mappass->copy_lock, flags); | |
563 | queue_work(mappass->wq, &mappass->register_work); | |
564 | } | |
331a63e6 SS |
565 | } |
566 | ||
b1efa693 SS |
567 | static int pvcalls_back_bind(struct xenbus_device *dev, |
568 | struct xen_pvcalls_request *req) | |
569 | { | |
331a63e6 SS |
570 | struct pvcalls_fedata *fedata; |
571 | int ret; | |
572 | struct sockpass_mapping *map; | |
573 | struct xen_pvcalls_response *rsp; | |
574 | ||
575 | fedata = dev_get_drvdata(&dev->dev); | |
576 | ||
577 | map = kzalloc(sizeof(*map), GFP_KERNEL); | |
578 | if (map == NULL) { | |
579 | ret = -ENOMEM; | |
580 | goto out; | |
581 | } | |
582 | ||
583 | INIT_WORK(&map->register_work, __pvcalls_back_accept); | |
584 | spin_lock_init(&map->copy_lock); | |
585 | map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1); | |
586 | if (!map->wq) { | |
587 | ret = -ENOMEM; | |
588 | goto out; | |
589 | } | |
590 | ||
591 | ret = sock_create(AF_INET, SOCK_STREAM, 0, &map->sock); | |
592 | if (ret < 0) | |
593 | goto out; | |
594 | ||
595 | ret = inet_bind(map->sock, (struct sockaddr *)&req->u.bind.addr, | |
596 | req->u.bind.len); | |
597 | if (ret < 0) | |
598 | goto out; | |
599 | ||
600 | map->fedata = fedata; | |
601 | map->id = req->u.bind.id; | |
602 | ||
603 | down(&fedata->socket_lock); | |
604 | ret = radix_tree_insert(&fedata->socketpass_mappings, map->id, | |
605 | map); | |
606 | up(&fedata->socket_lock); | |
607 | if (ret) | |
608 | goto out; | |
609 | ||
610 | write_lock_bh(&map->sock->sk->sk_callback_lock); | |
611 | map->saved_data_ready = map->sock->sk->sk_data_ready; | |
612 | map->sock->sk->sk_user_data = map; | |
613 | map->sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready; | |
614 | write_unlock_bh(&map->sock->sk->sk_callback_lock); | |
615 | ||
616 | out: | |
617 | if (ret) { | |
618 | if (map && map->sock) | |
619 | sock_release(map->sock); | |
620 | if (map && map->wq) | |
621 | destroy_workqueue(map->wq); | |
622 | kfree(map); | |
623 | } | |
624 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); | |
625 | rsp->req_id = req->req_id; | |
626 | rsp->cmd = req->cmd; | |
627 | rsp->u.bind.id = req->u.bind.id; | |
628 | rsp->ret = ret; | |
b1efa693 SS |
629 | return 0; |
630 | } | |
631 | ||
632 | static int pvcalls_back_listen(struct xenbus_device *dev, | |
633 | struct xen_pvcalls_request *req) | |
634 | { | |
8ce3f762 SS |
635 | struct pvcalls_fedata *fedata; |
636 | int ret = -EINVAL; | |
637 | struct sockpass_mapping *map; | |
638 | struct xen_pvcalls_response *rsp; | |
639 | ||
640 | fedata = dev_get_drvdata(&dev->dev); | |
641 | ||
642 | down(&fedata->socket_lock); | |
643 | map = radix_tree_lookup(&fedata->socketpass_mappings, req->u.listen.id); | |
644 | up(&fedata->socket_lock); | |
645 | if (map == NULL) | |
646 | goto out; | |
647 | ||
648 | ret = inet_listen(map->sock, req->u.listen.backlog); | |
649 | ||
650 | out: | |
651 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); | |
652 | rsp->req_id = req->req_id; | |
653 | rsp->cmd = req->cmd; | |
654 | rsp->u.listen.id = req->u.listen.id; | |
655 | rsp->ret = ret; | |
b1efa693 SS |
656 | return 0; |
657 | } | |
658 | ||
659 | static int pvcalls_back_accept(struct xenbus_device *dev, | |
660 | struct xen_pvcalls_request *req) | |
661 | { | |
6f474e71 SS |
662 | struct pvcalls_fedata *fedata; |
663 | struct sockpass_mapping *mappass; | |
664 | int ret = -EINVAL; | |
665 | struct xen_pvcalls_response *rsp; | |
666 | unsigned long flags; | |
667 | ||
668 | fedata = dev_get_drvdata(&dev->dev); | |
669 | ||
670 | down(&fedata->socket_lock); | |
671 | mappass = radix_tree_lookup(&fedata->socketpass_mappings, | |
672 | req->u.accept.id); | |
673 | up(&fedata->socket_lock); | |
674 | if (mappass == NULL) | |
675 | goto out_error; | |
676 | ||
677 | /* | |
678 | * Limitation of the current implementation: only support one | |
679 | * concurrent accept or poll call on one socket. | |
680 | */ | |
681 | spin_lock_irqsave(&mappass->copy_lock, flags); | |
682 | if (mappass->reqcopy.cmd != 0) { | |
683 | spin_unlock_irqrestore(&mappass->copy_lock, flags); | |
684 | ret = -EINTR; | |
685 | goto out_error; | |
686 | } | |
687 | ||
688 | mappass->reqcopy = *req; | |
689 | spin_unlock_irqrestore(&mappass->copy_lock, flags); | |
690 | queue_work(mappass->wq, &mappass->register_work); | |
691 | ||
692 | /* Tell the caller we don't need to send back a notification yet */ | |
693 | return -1; | |
694 | ||
695 | out_error: | |
696 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); | |
697 | rsp->req_id = req->req_id; | |
698 | rsp->cmd = req->cmd; | |
699 | rsp->u.accept.id = req->u.accept.id; | |
700 | rsp->ret = ret; | |
b1efa693 SS |
701 | return 0; |
702 | } | |
703 | ||
704 | static int pvcalls_back_poll(struct xenbus_device *dev, | |
705 | struct xen_pvcalls_request *req) | |
706 | { | |
3cf33a58 SS |
707 | struct pvcalls_fedata *fedata; |
708 | struct sockpass_mapping *mappass; | |
709 | struct xen_pvcalls_response *rsp; | |
710 | struct inet_connection_sock *icsk; | |
711 | struct request_sock_queue *queue; | |
712 | unsigned long flags; | |
713 | int ret; | |
714 | bool data; | |
715 | ||
716 | fedata = dev_get_drvdata(&dev->dev); | |
717 | ||
718 | down(&fedata->socket_lock); | |
719 | mappass = radix_tree_lookup(&fedata->socketpass_mappings, | |
720 | req->u.poll.id); | |
721 | up(&fedata->socket_lock); | |
722 | if (mappass == NULL) | |
723 | return -EINVAL; | |
724 | ||
725 | /* | |
726 | * Limitation of the current implementation: only support one | |
727 | * concurrent accept or poll call on one socket. | |
728 | */ | |
729 | spin_lock_irqsave(&mappass->copy_lock, flags); | |
730 | if (mappass->reqcopy.cmd != 0) { | |
731 | ret = -EINTR; | |
732 | goto out; | |
733 | } | |
734 | ||
735 | mappass->reqcopy = *req; | |
736 | icsk = inet_csk(mappass->sock->sk); | |
737 | queue = &icsk->icsk_accept_queue; | |
738 | data = queue->rskq_accept_head != NULL; | |
739 | if (data) { | |
740 | mappass->reqcopy.cmd = 0; | |
741 | ret = 0; | |
742 | goto out; | |
743 | } | |
744 | spin_unlock_irqrestore(&mappass->copy_lock, flags); | |
745 | ||
746 | /* Tell the caller we don't need to send back a notification yet */ | |
747 | return -1; | |
748 | ||
749 | out: | |
750 | spin_unlock_irqrestore(&mappass->copy_lock, flags); | |
751 | ||
752 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); | |
753 | rsp->req_id = req->req_id; | |
754 | rsp->cmd = req->cmd; | |
755 | rsp->u.poll.id = req->u.poll.id; | |
756 | rsp->ret = ret; | |
b1efa693 SS |
757 | return 0; |
758 | } | |
759 | ||
760 | static int pvcalls_back_handle_cmd(struct xenbus_device *dev, | |
761 | struct xen_pvcalls_request *req) | |
762 | { | |
763 | int ret = 0; | |
764 | ||
765 | switch (req->cmd) { | |
766 | case PVCALLS_SOCKET: | |
767 | ret = pvcalls_back_socket(dev, req); | |
768 | break; | |
769 | case PVCALLS_CONNECT: | |
770 | ret = pvcalls_back_connect(dev, req); | |
771 | break; | |
772 | case PVCALLS_RELEASE: | |
773 | ret = pvcalls_back_release(dev, req); | |
774 | break; | |
775 | case PVCALLS_BIND: | |
776 | ret = pvcalls_back_bind(dev, req); | |
777 | break; | |
778 | case PVCALLS_LISTEN: | |
779 | ret = pvcalls_back_listen(dev, req); | |
780 | break; | |
781 | case PVCALLS_ACCEPT: | |
782 | ret = pvcalls_back_accept(dev, req); | |
783 | break; | |
784 | case PVCALLS_POLL: | |
785 | ret = pvcalls_back_poll(dev, req); | |
786 | break; | |
787 | default: | |
788 | { | |
789 | struct pvcalls_fedata *fedata; | |
790 | struct xen_pvcalls_response *rsp; | |
791 | ||
792 | fedata = dev_get_drvdata(&dev->dev); | |
793 | rsp = RING_GET_RESPONSE( | |
794 | &fedata->ring, fedata->ring.rsp_prod_pvt++); | |
795 | rsp->req_id = req->req_id; | |
796 | rsp->cmd = req->cmd; | |
797 | rsp->ret = -ENOTSUPP; | |
798 | break; | |
799 | } | |
800 | } | |
801 | return ret; | |
802 | } | |
803 | ||
804 | static void pvcalls_back_work(struct pvcalls_fedata *fedata) | |
805 | { | |
806 | int notify, notify_all = 0, more = 1; | |
807 | struct xen_pvcalls_request req; | |
808 | struct xenbus_device *dev = fedata->dev; | |
809 | ||
810 | while (more) { | |
811 | while (RING_HAS_UNCONSUMED_REQUESTS(&fedata->ring)) { | |
812 | RING_COPY_REQUEST(&fedata->ring, | |
813 | fedata->ring.req_cons++, | |
814 | &req); | |
815 | ||
816 | if (!pvcalls_back_handle_cmd(dev, &req)) { | |
817 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY( | |
818 | &fedata->ring, notify); | |
819 | notify_all += notify; | |
820 | } | |
821 | } | |
822 | ||
823 | if (notify_all) { | |
824 | notify_remote_via_irq(fedata->irq); | |
825 | notify_all = 0; | |
826 | } | |
827 | ||
828 | RING_FINAL_CHECK_FOR_REQUESTS(&fedata->ring, more); | |
829 | } | |
830 | } | |
831 | ||
d0e4d560 SS |
832 | static irqreturn_t pvcalls_back_event(int irq, void *dev_id) |
833 | { | |
b1efa693 SS |
834 | struct xenbus_device *dev = dev_id; |
835 | struct pvcalls_fedata *fedata = NULL; | |
836 | ||
837 | if (dev == NULL) | |
838 | return IRQ_HANDLED; | |
839 | ||
840 | fedata = dev_get_drvdata(&dev->dev); | |
841 | if (fedata == NULL) | |
842 | return IRQ_HANDLED; | |
843 | ||
844 | pvcalls_back_work(fedata); | |
d0e4d560 SS |
845 | return IRQ_HANDLED; |
846 | } | |
847 | ||
5db4d286 SS |
848 | static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map) |
849 | { | |
850 | return IRQ_HANDLED; | |
851 | } | |
852 | ||
0a9c75c2 SS |
853 | static int backend_connect(struct xenbus_device *dev) |
854 | { | |
d0e4d560 SS |
855 | int err, evtchn; |
856 | grant_ref_t ring_ref; | |
857 | struct pvcalls_fedata *fedata = NULL; | |
858 | ||
859 | fedata = kzalloc(sizeof(struct pvcalls_fedata), GFP_KERNEL); | |
860 | if (!fedata) | |
861 | return -ENOMEM; | |
862 | ||
863 | fedata->irq = -1; | |
864 | err = xenbus_scanf(XBT_NIL, dev->otherend, "port", "%u", | |
865 | &evtchn); | |
866 | if (err != 1) { | |
867 | err = -EINVAL; | |
868 | xenbus_dev_fatal(dev, err, "reading %s/event-channel", | |
869 | dev->otherend); | |
870 | goto error; | |
871 | } | |
872 | ||
873 | err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", "%u", &ring_ref); | |
874 | if (err != 1) { | |
875 | err = -EINVAL; | |
876 | xenbus_dev_fatal(dev, err, "reading %s/ring-ref", | |
877 | dev->otherend); | |
878 | goto error; | |
879 | } | |
880 | ||
881 | err = bind_interdomain_evtchn_to_irq(dev->otherend_id, evtchn); | |
882 | if (err < 0) | |
883 | goto error; | |
884 | fedata->irq = err; | |
885 | ||
886 | err = request_threaded_irq(fedata->irq, NULL, pvcalls_back_event, | |
887 | IRQF_ONESHOT, "pvcalls-back", dev); | |
888 | if (err < 0) | |
889 | goto error; | |
890 | ||
891 | err = xenbus_map_ring_valloc(dev, &ring_ref, 1, | |
892 | (void **)&fedata->sring); | |
893 | if (err < 0) | |
894 | goto error; | |
895 | ||
896 | BACK_RING_INIT(&fedata->ring, fedata->sring, XEN_PAGE_SIZE * 1); | |
897 | fedata->dev = dev; | |
898 | ||
899 | INIT_LIST_HEAD(&fedata->socket_mappings); | |
900 | INIT_RADIX_TREE(&fedata->socketpass_mappings, GFP_KERNEL); | |
901 | sema_init(&fedata->socket_lock, 1); | |
902 | dev_set_drvdata(&dev->dev, fedata); | |
903 | ||
904 | down(&pvcalls_back_global.frontends_lock); | |
905 | list_add_tail(&fedata->list, &pvcalls_back_global.frontends); | |
906 | up(&pvcalls_back_global.frontends_lock); | |
907 | ||
0a9c75c2 | 908 | return 0; |
d0e4d560 SS |
909 | |
910 | error: | |
911 | if (fedata->irq >= 0) | |
912 | unbind_from_irqhandler(fedata->irq, dev); | |
913 | if (fedata->sring != NULL) | |
914 | xenbus_unmap_ring_vfree(dev, fedata->sring); | |
915 | kfree(fedata); | |
916 | return err; | |
0a9c75c2 SS |
917 | } |
918 | ||
919 | static int backend_disconnect(struct xenbus_device *dev) | |
920 | { | |
0a85d23b SS |
921 | struct pvcalls_fedata *fedata; |
922 | struct sock_mapping *map, *n; | |
923 | struct sockpass_mapping *mappass; | |
924 | struct radix_tree_iter iter; | |
925 | void **slot; | |
926 | ||
927 | ||
928 | fedata = dev_get_drvdata(&dev->dev); | |
929 | ||
930 | down(&fedata->socket_lock); | |
931 | list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) { | |
932 | list_del(&map->list); | |
933 | pvcalls_back_release_active(dev, fedata, map); | |
934 | } | |
935 | ||
936 | radix_tree_for_each_slot(slot, &fedata->socketpass_mappings, &iter, 0) { | |
937 | mappass = radix_tree_deref_slot(slot); | |
938 | if (!mappass) | |
939 | continue; | |
940 | if (radix_tree_exception(mappass)) { | |
941 | if (radix_tree_deref_retry(mappass)) | |
942 | slot = radix_tree_iter_retry(&iter); | |
943 | } else { | |
944 | radix_tree_delete(&fedata->socketpass_mappings, | |
945 | mappass->id); | |
946 | pvcalls_back_release_passive(dev, fedata, mappass); | |
947 | } | |
948 | } | |
949 | up(&fedata->socket_lock); | |
950 | ||
951 | unbind_from_irqhandler(fedata->irq, dev); | |
952 | xenbus_unmap_ring_vfree(dev, fedata->sring); | |
953 | ||
954 | list_del(&fedata->list); | |
955 | kfree(fedata); | |
956 | dev_set_drvdata(&dev->dev, NULL); | |
957 | ||
0a9c75c2 SS |
958 | return 0; |
959 | } | |
960 | ||
72e59c30 SS |
961 | static int pvcalls_back_probe(struct xenbus_device *dev, |
962 | const struct xenbus_device_id *id) | |
963 | { | |
0a9c75c2 SS |
964 | int err, abort; |
965 | struct xenbus_transaction xbt; | |
966 | ||
967 | again: | |
968 | abort = 1; | |
969 | ||
970 | err = xenbus_transaction_start(&xbt); | |
971 | if (err) { | |
972 | pr_warn("%s cannot create xenstore transaction\n", __func__); | |
973 | return err; | |
974 | } | |
975 | ||
976 | err = xenbus_printf(xbt, dev->nodename, "versions", "%s", | |
977 | PVCALLS_VERSIONS); | |
978 | if (err) { | |
979 | pr_warn("%s write out 'versions' failed\n", __func__); | |
980 | goto abort; | |
981 | } | |
982 | ||
983 | err = xenbus_printf(xbt, dev->nodename, "max-page-order", "%u", | |
984 | MAX_RING_ORDER); | |
985 | if (err) { | |
986 | pr_warn("%s write out 'max-page-order' failed\n", __func__); | |
987 | goto abort; | |
988 | } | |
989 | ||
990 | err = xenbus_printf(xbt, dev->nodename, "function-calls", | |
991 | XENBUS_FUNCTIONS_CALLS); | |
992 | if (err) { | |
993 | pr_warn("%s write out 'function-calls' failed\n", __func__); | |
994 | goto abort; | |
995 | } | |
996 | ||
997 | abort = 0; | |
998 | abort: | |
999 | err = xenbus_transaction_end(xbt, abort); | |
1000 | if (err) { | |
1001 | if (err == -EAGAIN && !abort) | |
1002 | goto again; | |
1003 | pr_warn("%s cannot complete xenstore transaction\n", __func__); | |
1004 | return err; | |
1005 | } | |
1006 | ||
1007 | if (abort) | |
1008 | return -EFAULT; | |
1009 | ||
1010 | xenbus_switch_state(dev, XenbusStateInitWait); | |
1011 | ||
72e59c30 SS |
1012 | return 0; |
1013 | } | |
1014 | ||
0a9c75c2 SS |
1015 | static void set_backend_state(struct xenbus_device *dev, |
1016 | enum xenbus_state state) | |
1017 | { | |
1018 | while (dev->state != state) { | |
1019 | switch (dev->state) { | |
1020 | case XenbusStateClosed: | |
1021 | switch (state) { | |
1022 | case XenbusStateInitWait: | |
1023 | case XenbusStateConnected: | |
1024 | xenbus_switch_state(dev, XenbusStateInitWait); | |
1025 | break; | |
1026 | case XenbusStateClosing: | |
1027 | xenbus_switch_state(dev, XenbusStateClosing); | |
1028 | break; | |
1029 | default: | |
1030 | __WARN(); | |
1031 | } | |
1032 | break; | |
1033 | case XenbusStateInitWait: | |
1034 | case XenbusStateInitialised: | |
1035 | switch (state) { | |
1036 | case XenbusStateConnected: | |
1037 | backend_connect(dev); | |
1038 | xenbus_switch_state(dev, XenbusStateConnected); | |
1039 | break; | |
1040 | case XenbusStateClosing: | |
1041 | case XenbusStateClosed: | |
1042 | xenbus_switch_state(dev, XenbusStateClosing); | |
1043 | break; | |
1044 | default: | |
1045 | __WARN(); | |
1046 | } | |
1047 | break; | |
1048 | case XenbusStateConnected: | |
1049 | switch (state) { | |
1050 | case XenbusStateInitWait: | |
1051 | case XenbusStateClosing: | |
1052 | case XenbusStateClosed: | |
1053 | down(&pvcalls_back_global.frontends_lock); | |
1054 | backend_disconnect(dev); | |
1055 | up(&pvcalls_back_global.frontends_lock); | |
1056 | xenbus_switch_state(dev, XenbusStateClosing); | |
1057 | break; | |
1058 | default: | |
1059 | __WARN(); | |
1060 | } | |
1061 | break; | |
1062 | case XenbusStateClosing: | |
1063 | switch (state) { | |
1064 | case XenbusStateInitWait: | |
1065 | case XenbusStateConnected: | |
1066 | case XenbusStateClosed: | |
1067 | xenbus_switch_state(dev, XenbusStateClosed); | |
1068 | break; | |
1069 | default: | |
1070 | __WARN(); | |
1071 | } | |
1072 | break; | |
1073 | default: | |
1074 | __WARN(); | |
1075 | } | |
1076 | } | |
1077 | } | |
1078 | ||
72e59c30 SS |
1079 | static void pvcalls_back_changed(struct xenbus_device *dev, |
1080 | enum xenbus_state frontend_state) | |
1081 | { | |
0a9c75c2 SS |
1082 | switch (frontend_state) { |
1083 | case XenbusStateInitialising: | |
1084 | set_backend_state(dev, XenbusStateInitWait); | |
1085 | break; | |
1086 | ||
1087 | case XenbusStateInitialised: | |
1088 | case XenbusStateConnected: | |
1089 | set_backend_state(dev, XenbusStateConnected); | |
1090 | break; | |
1091 | ||
1092 | case XenbusStateClosing: | |
1093 | set_backend_state(dev, XenbusStateClosing); | |
1094 | break; | |
1095 | ||
1096 | case XenbusStateClosed: | |
1097 | set_backend_state(dev, XenbusStateClosed); | |
1098 | if (xenbus_dev_is_online(dev)) | |
1099 | break; | |
1100 | device_unregister(&dev->dev); | |
1101 | break; | |
1102 | case XenbusStateUnknown: | |
1103 | set_backend_state(dev, XenbusStateClosed); | |
1104 | device_unregister(&dev->dev); | |
1105 | break; | |
1106 | ||
1107 | default: | |
1108 | xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend", | |
1109 | frontend_state); | |
1110 | break; | |
1111 | } | |
72e59c30 SS |
1112 | } |
1113 | ||
1114 | static int pvcalls_back_remove(struct xenbus_device *dev) | |
1115 | { | |
1116 | return 0; | |
1117 | } | |
1118 | ||
1119 | static int pvcalls_back_uevent(struct xenbus_device *xdev, | |
1120 | struct kobj_uevent_env *env) | |
1121 | { | |
1122 | return 0; | |
1123 | } | |
1124 | ||
1125 | static const struct xenbus_device_id pvcalls_back_ids[] = { | |
1126 | { "pvcalls" }, | |
1127 | { "" } | |
1128 | }; | |
1129 | ||
1130 | static struct xenbus_driver pvcalls_back_driver = { | |
1131 | .ids = pvcalls_back_ids, | |
1132 | .probe = pvcalls_back_probe, | |
1133 | .remove = pvcalls_back_remove, | |
1134 | .uevent = pvcalls_back_uevent, | |
1135 | .otherend_changed = pvcalls_back_changed, | |
1136 | }; | |
9be07334 SS |
1137 | |
1138 | static int __init pvcalls_back_init(void) | |
1139 | { | |
1140 | int ret; | |
1141 | ||
1142 | if (!xen_domain()) | |
1143 | return -ENODEV; | |
1144 | ||
1145 | ret = xenbus_register_backend(&pvcalls_back_driver); | |
1146 | if (ret < 0) | |
1147 | return ret; | |
1148 | ||
1149 | sema_init(&pvcalls_back_global.frontends_lock, 1); | |
1150 | INIT_LIST_HEAD(&pvcalls_back_global.frontends); | |
1151 | return 0; | |
1152 | } | |
1153 | module_init(pvcalls_back_init); | |
0a85d23b SS |
1154 | |
1155 | static void __exit pvcalls_back_fin(void) | |
1156 | { | |
1157 | struct pvcalls_fedata *fedata, *nfedata; | |
1158 | ||
1159 | down(&pvcalls_back_global.frontends_lock); | |
1160 | list_for_each_entry_safe(fedata, nfedata, | |
1161 | &pvcalls_back_global.frontends, list) { | |
1162 | backend_disconnect(fedata->dev); | |
1163 | } | |
1164 | up(&pvcalls_back_global.frontends_lock); | |
1165 | ||
1166 | xenbus_unregister_driver(&pvcalls_back_driver); | |
1167 | } | |
1168 | ||
1169 | module_exit(pvcalls_back_fin); |