]>
Commit | Line | Data |
---|---|---|
72e59c30 SS |
1 | /* |
2 | * (c) 2017 Stefano Stabellini <stefano@aporeto.com> | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | */ | |
14 | ||
fb029875 | 15 | #include <linux/inet.h> |
72e59c30 SS |
16 | #include <linux/kthread.h> |
17 | #include <linux/list.h> | |
18 | #include <linux/radix-tree.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/semaphore.h> | |
21 | #include <linux/wait.h> | |
fb029875 SS |
22 | #include <net/sock.h> |
23 | #include <net/inet_common.h> | |
24 | #include <net/inet_connection_sock.h> | |
25 | #include <net/request_sock.h> | |
72e59c30 SS |
26 | |
27 | #include <xen/events.h> | |
28 | #include <xen/grant_table.h> | |
29 | #include <xen/xen.h> | |
30 | #include <xen/xenbus.h> | |
31 | #include <xen/interface/io/pvcalls.h> | |
32 | ||
0a9c75c2 SS |
33 | #define PVCALLS_VERSIONS "1" |
34 | #define MAX_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER | |
35 | ||
9be07334 SS |
36 | struct pvcalls_back_global { |
37 | struct list_head frontends; | |
38 | struct semaphore frontends_lock; | |
39 | } pvcalls_back_global; | |
40 | ||
d0e4d560 SS |
41 | /* |
42 | * Per-frontend data structure. It contains pointers to the command | |
43 | * ring, its event channel, a list of active sockets and a tree of | |
44 | * passive sockets. | |
45 | */ | |
46 | struct pvcalls_fedata { | |
47 | struct list_head list; | |
48 | struct xenbus_device *dev; | |
49 | struct xen_pvcalls_sring *sring; | |
50 | struct xen_pvcalls_back_ring ring; | |
51 | int irq; | |
52 | struct list_head socket_mappings; | |
53 | struct radix_tree_root socketpass_mappings; | |
54 | struct semaphore socket_lock; | |
55 | }; | |
56 | ||
5db4d286 SS |
57 | struct pvcalls_ioworker { |
58 | struct work_struct register_work; | |
59 | struct workqueue_struct *wq; | |
60 | }; | |
61 | ||
62 | struct sock_mapping { | |
63 | struct list_head list; | |
64 | struct pvcalls_fedata *fedata; | |
6f474e71 | 65 | struct sockpass_mapping *sockpass; |
5db4d286 SS |
66 | struct socket *sock; |
67 | uint64_t id; | |
68 | grant_ref_t ref; | |
69 | struct pvcalls_data_intf *ring; | |
70 | void *bytes; | |
71 | struct pvcalls_data data; | |
72 | uint32_t ring_order; | |
73 | int irq; | |
74 | atomic_t read; | |
75 | atomic_t write; | |
76 | atomic_t io; | |
77 | atomic_t release; | |
78 | void (*saved_data_ready)(struct sock *sk); | |
79 | struct pvcalls_ioworker ioworker; | |
80 | }; | |
81 | ||
331a63e6 SS |
82 | struct sockpass_mapping { |
83 | struct list_head list; | |
84 | struct pvcalls_fedata *fedata; | |
85 | struct socket *sock; | |
86 | uint64_t id; | |
87 | struct xen_pvcalls_request reqcopy; | |
88 | spinlock_t copy_lock; | |
89 | struct workqueue_struct *wq; | |
90 | struct work_struct register_work; | |
91 | void (*saved_data_ready)(struct sock *sk); | |
92 | }; | |
93 | ||
5db4d286 SS |
94 | static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map); |
95 | static int pvcalls_back_release_active(struct xenbus_device *dev, | |
96 | struct pvcalls_fedata *fedata, | |
97 | struct sock_mapping *map); | |
98 | ||
5d520d85 SS |
99 | static void pvcalls_conn_back_read(void *opaque) |
100 | { | |
b3f9f773 SS |
101 | struct sock_mapping *map = (struct sock_mapping *)opaque; |
102 | struct msghdr msg; | |
103 | struct kvec vec[2]; | |
104 | RING_IDX cons, prod, size, wanted, array_size, masked_prod, masked_cons; | |
105 | int32_t error; | |
106 | struct pvcalls_data_intf *intf = map->ring; | |
107 | struct pvcalls_data *data = &map->data; | |
108 | unsigned long flags; | |
109 | int ret; | |
110 | ||
111 | array_size = XEN_FLEX_RING_SIZE(map->ring_order); | |
112 | cons = intf->in_cons; | |
113 | prod = intf->in_prod; | |
114 | error = intf->in_error; | |
115 | /* read the indexes first, then deal with the data */ | |
116 | virt_mb(); | |
117 | ||
118 | if (error) | |
119 | return; | |
120 | ||
121 | size = pvcalls_queued(prod, cons, array_size); | |
122 | if (size >= array_size) | |
123 | return; | |
124 | spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); | |
125 | if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) { | |
126 | atomic_set(&map->read, 0); | |
127 | spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, | |
128 | flags); | |
129 | return; | |
130 | } | |
131 | spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags); | |
132 | wanted = array_size - size; | |
133 | masked_prod = pvcalls_mask(prod, array_size); | |
134 | masked_cons = pvcalls_mask(cons, array_size); | |
135 | ||
136 | memset(&msg, 0, sizeof(msg)); | |
137 | msg.msg_iter.type = ITER_KVEC|WRITE; | |
138 | msg.msg_iter.count = wanted; | |
139 | if (masked_prod < masked_cons) { | |
140 | vec[0].iov_base = data->in + masked_prod; | |
141 | vec[0].iov_len = wanted; | |
142 | msg.msg_iter.kvec = vec; | |
143 | msg.msg_iter.nr_segs = 1; | |
144 | } else { | |
145 | vec[0].iov_base = data->in + masked_prod; | |
146 | vec[0].iov_len = array_size - masked_prod; | |
147 | vec[1].iov_base = data->in; | |
148 | vec[1].iov_len = wanted - vec[0].iov_len; | |
149 | msg.msg_iter.kvec = vec; | |
150 | msg.msg_iter.nr_segs = 2; | |
151 | } | |
152 | ||
153 | atomic_set(&map->read, 0); | |
154 | ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT); | |
155 | WARN_ON(ret > wanted); | |
156 | if (ret == -EAGAIN) /* shouldn't happen */ | |
157 | return; | |
158 | if (!ret) | |
159 | ret = -ENOTCONN; | |
160 | spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); | |
161 | if (ret > 0 && !skb_queue_empty(&map->sock->sk->sk_receive_queue)) | |
162 | atomic_inc(&map->read); | |
163 | spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags); | |
164 | ||
165 | /* write the data, then modify the indexes */ | |
166 | virt_wmb(); | |
167 | if (ret < 0) | |
168 | intf->in_error = ret; | |
169 | else | |
170 | intf->in_prod = prod + ret; | |
171 | /* update the indexes, then notify the other end */ | |
172 | virt_wmb(); | |
173 | notify_remote_via_irq(map->irq); | |
174 | ||
175 | return; | |
5d520d85 SS |
176 | } |
177 | ||
178 | static void pvcalls_conn_back_write(struct sock_mapping *map) | |
179 | { | |
5ad9918f SS |
180 | struct pvcalls_data_intf *intf = map->ring; |
181 | struct pvcalls_data *data = &map->data; | |
182 | struct msghdr msg; | |
183 | struct kvec vec[2]; | |
184 | RING_IDX cons, prod, size, array_size; | |
185 | int ret; | |
186 | ||
187 | cons = intf->out_cons; | |
188 | prod = intf->out_prod; | |
189 | /* read the indexes before dealing with the data */ | |
190 | virt_mb(); | |
191 | ||
192 | array_size = XEN_FLEX_RING_SIZE(map->ring_order); | |
193 | size = pvcalls_queued(prod, cons, array_size); | |
194 | if (size == 0) | |
195 | return; | |
196 | ||
197 | memset(&msg, 0, sizeof(msg)); | |
198 | msg.msg_flags |= MSG_DONTWAIT; | |
199 | msg.msg_iter.type = ITER_KVEC|READ; | |
200 | msg.msg_iter.count = size; | |
201 | if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) { | |
202 | vec[0].iov_base = data->out + pvcalls_mask(cons, array_size); | |
203 | vec[0].iov_len = size; | |
204 | msg.msg_iter.kvec = vec; | |
205 | msg.msg_iter.nr_segs = 1; | |
206 | } else { | |
207 | vec[0].iov_base = data->out + pvcalls_mask(cons, array_size); | |
208 | vec[0].iov_len = array_size - pvcalls_mask(cons, array_size); | |
209 | vec[1].iov_base = data->out; | |
210 | vec[1].iov_len = size - vec[0].iov_len; | |
211 | msg.msg_iter.kvec = vec; | |
212 | msg.msg_iter.nr_segs = 2; | |
213 | } | |
214 | ||
215 | atomic_set(&map->write, 0); | |
216 | ret = inet_sendmsg(map->sock, &msg, size); | |
217 | if (ret == -EAGAIN || (ret >= 0 && ret < size)) { | |
218 | atomic_inc(&map->write); | |
219 | atomic_inc(&map->io); | |
220 | } | |
221 | if (ret == -EAGAIN) | |
222 | return; | |
223 | ||
224 | /* write the data, then update the indexes */ | |
225 | virt_wmb(); | |
226 | if (ret < 0) { | |
227 | intf->out_error = ret; | |
228 | } else { | |
229 | intf->out_error = 0; | |
230 | intf->out_cons = cons + ret; | |
231 | prod = intf->out_prod; | |
232 | } | |
233 | /* update the indexes, then notify the other end */ | |
234 | virt_wmb(); | |
235 | if (prod != cons + ret) | |
236 | atomic_inc(&map->write); | |
237 | notify_remote_via_irq(map->irq); | |
5d520d85 SS |
238 | } |
239 | ||
5db4d286 SS |
240 | static void pvcalls_back_ioworker(struct work_struct *work) |
241 | { | |
5d520d85 SS |
242 | struct pvcalls_ioworker *ioworker = container_of(work, |
243 | struct pvcalls_ioworker, register_work); | |
244 | struct sock_mapping *map = container_of(ioworker, struct sock_mapping, | |
245 | ioworker); | |
246 | ||
247 | while (atomic_read(&map->io) > 0) { | |
248 | if (atomic_read(&map->release) > 0) { | |
249 | atomic_set(&map->release, 0); | |
250 | return; | |
251 | } | |
252 | ||
253 | if (atomic_read(&map->read) > 0) | |
254 | pvcalls_conn_back_read(map); | |
255 | if (atomic_read(&map->write) > 0) | |
256 | pvcalls_conn_back_write(map); | |
257 | ||
258 | atomic_dec(&map->io); | |
259 | } | |
5db4d286 SS |
260 | } |
261 | ||
b1efa693 SS |
262 | static int pvcalls_back_socket(struct xenbus_device *dev, |
263 | struct xen_pvcalls_request *req) | |
264 | { | |
fb029875 SS |
265 | struct pvcalls_fedata *fedata; |
266 | int ret; | |
267 | struct xen_pvcalls_response *rsp; | |
268 | ||
269 | fedata = dev_get_drvdata(&dev->dev); | |
270 | ||
271 | if (req->u.socket.domain != AF_INET || | |
272 | req->u.socket.type != SOCK_STREAM || | |
273 | (req->u.socket.protocol != IPPROTO_IP && | |
274 | req->u.socket.protocol != AF_INET)) | |
275 | ret = -EAFNOSUPPORT; | |
276 | else | |
277 | ret = 0; | |
278 | ||
279 | /* leave the actual socket allocation for later */ | |
280 | ||
281 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); | |
282 | rsp->req_id = req->req_id; | |
283 | rsp->cmd = req->cmd; | |
284 | rsp->u.socket.id = req->u.socket.id; | |
285 | rsp->ret = ret; | |
286 | ||
b1efa693 SS |
287 | return 0; |
288 | } | |
289 | ||
5db4d286 SS |
290 | static void pvcalls_sk_state_change(struct sock *sock) |
291 | { | |
292 | struct sock_mapping *map = sock->sk_user_data; | |
293 | struct pvcalls_data_intf *intf; | |
294 | ||
295 | if (map == NULL) | |
296 | return; | |
297 | ||
298 | intf = map->ring; | |
299 | intf->in_error = -ENOTCONN; | |
300 | notify_remote_via_irq(map->irq); | |
301 | } | |
302 | ||
303 | static void pvcalls_sk_data_ready(struct sock *sock) | |
304 | { | |
b3f9f773 SS |
305 | struct sock_mapping *map = sock->sk_user_data; |
306 | struct pvcalls_ioworker *iow; | |
307 | ||
308 | if (map == NULL) | |
309 | return; | |
310 | ||
311 | iow = &map->ioworker; | |
312 | atomic_inc(&map->read); | |
313 | atomic_inc(&map->io); | |
314 | queue_work(iow->wq, &iow->register_work); | |
5db4d286 SS |
315 | } |
316 | ||
317 | static struct sock_mapping *pvcalls_new_active_socket( | |
318 | struct pvcalls_fedata *fedata, | |
319 | uint64_t id, | |
320 | grant_ref_t ref, | |
321 | uint32_t evtchn, | |
322 | struct socket *sock) | |
323 | { | |
324 | int ret; | |
325 | struct sock_mapping *map; | |
326 | void *page; | |
327 | ||
328 | map = kzalloc(sizeof(*map), GFP_KERNEL); | |
329 | if (map == NULL) | |
330 | return NULL; | |
331 | ||
332 | map->fedata = fedata; | |
333 | map->sock = sock; | |
334 | map->id = id; | |
335 | map->ref = ref; | |
336 | ||
337 | ret = xenbus_map_ring_valloc(fedata->dev, &ref, 1, &page); | |
338 | if (ret < 0) | |
339 | goto out; | |
340 | map->ring = page; | |
341 | map->ring_order = map->ring->ring_order; | |
342 | /* first read the order, then map the data ring */ | |
343 | virt_rmb(); | |
344 | if (map->ring_order > MAX_RING_ORDER) { | |
345 | pr_warn("%s frontend requested ring_order %u, which is > MAX (%u)\n", | |
346 | __func__, map->ring_order, MAX_RING_ORDER); | |
347 | goto out; | |
348 | } | |
349 | ret = xenbus_map_ring_valloc(fedata->dev, map->ring->ref, | |
350 | (1 << map->ring_order), &page); | |
351 | if (ret < 0) | |
352 | goto out; | |
353 | map->bytes = page; | |
354 | ||
355 | ret = bind_interdomain_evtchn_to_irqhandler(fedata->dev->otherend_id, | |
356 | evtchn, | |
357 | pvcalls_back_conn_event, | |
358 | 0, | |
359 | "pvcalls-backend", | |
360 | map); | |
361 | if (ret < 0) | |
362 | goto out; | |
363 | map->irq = ret; | |
364 | ||
365 | map->data.in = map->bytes; | |
366 | map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order); | |
367 | ||
368 | map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1); | |
369 | if (!map->ioworker.wq) | |
370 | goto out; | |
371 | atomic_set(&map->io, 1); | |
372 | INIT_WORK(&map->ioworker.register_work, pvcalls_back_ioworker); | |
373 | ||
374 | down(&fedata->socket_lock); | |
375 | list_add_tail(&map->list, &fedata->socket_mappings); | |
376 | up(&fedata->socket_lock); | |
377 | ||
378 | write_lock_bh(&map->sock->sk->sk_callback_lock); | |
379 | map->saved_data_ready = map->sock->sk->sk_data_ready; | |
380 | map->sock->sk->sk_user_data = map; | |
381 | map->sock->sk->sk_data_ready = pvcalls_sk_data_ready; | |
382 | map->sock->sk->sk_state_change = pvcalls_sk_state_change; | |
383 | write_unlock_bh(&map->sock->sk->sk_callback_lock); | |
384 | ||
385 | return map; | |
386 | out: | |
387 | down(&fedata->socket_lock); | |
388 | list_del(&map->list); | |
389 | pvcalls_back_release_active(fedata->dev, fedata, map); | |
390 | up(&fedata->socket_lock); | |
391 | return NULL; | |
392 | } | |
393 | ||
b1efa693 SS |
394 | static int pvcalls_back_connect(struct xenbus_device *dev, |
395 | struct xen_pvcalls_request *req) | |
5db4d286 SS |
396 | { |
397 | struct pvcalls_fedata *fedata; | |
398 | int ret = -EINVAL; | |
399 | struct socket *sock; | |
400 | struct sock_mapping *map; | |
401 | struct xen_pvcalls_response *rsp; | |
402 | struct sockaddr *sa = (struct sockaddr *)&req->u.connect.addr; | |
403 | ||
404 | fedata = dev_get_drvdata(&dev->dev); | |
405 | ||
406 | if (req->u.connect.len < sizeof(sa->sa_family) || | |
407 | req->u.connect.len > sizeof(req->u.connect.addr) || | |
408 | sa->sa_family != AF_INET) | |
409 | goto out; | |
410 | ||
411 | ret = sock_create(AF_INET, SOCK_STREAM, 0, &sock); | |
412 | if (ret < 0) | |
413 | goto out; | |
414 | ret = inet_stream_connect(sock, sa, req->u.connect.len, 0); | |
415 | if (ret < 0) { | |
416 | sock_release(sock); | |
417 | goto out; | |
418 | } | |
419 | ||
420 | map = pvcalls_new_active_socket(fedata, | |
421 | req->u.connect.id, | |
422 | req->u.connect.ref, | |
423 | req->u.connect.evtchn, | |
424 | sock); | |
425 | if (!map) { | |
426 | ret = -EFAULT; | |
427 | sock_release(map->sock); | |
428 | } | |
429 | ||
430 | out: | |
431 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); | |
432 | rsp->req_id = req->req_id; | |
433 | rsp->cmd = req->cmd; | |
434 | rsp->u.connect.id = req->u.connect.id; | |
435 | rsp->ret = ret; | |
436 | ||
437 | return 0; | |
438 | } | |
439 | ||
440 | static int pvcalls_back_release_active(struct xenbus_device *dev, | |
441 | struct pvcalls_fedata *fedata, | |
442 | struct sock_mapping *map) | |
b1efa693 | 443 | { |
a51729cb SS |
444 | disable_irq(map->irq); |
445 | if (map->sock->sk != NULL) { | |
446 | write_lock_bh(&map->sock->sk->sk_callback_lock); | |
447 | map->sock->sk->sk_user_data = NULL; | |
448 | map->sock->sk->sk_data_ready = map->saved_data_ready; | |
449 | write_unlock_bh(&map->sock->sk->sk_callback_lock); | |
450 | } | |
451 | ||
452 | atomic_set(&map->release, 1); | |
453 | flush_work(&map->ioworker.register_work); | |
454 | ||
455 | xenbus_unmap_ring_vfree(dev, map->bytes); | |
456 | xenbus_unmap_ring_vfree(dev, (void *)map->ring); | |
457 | unbind_from_irqhandler(map->irq, map); | |
458 | ||
459 | sock_release(map->sock); | |
460 | kfree(map); | |
461 | ||
462 | return 0; | |
463 | } | |
464 | ||
465 | static int pvcalls_back_release_passive(struct xenbus_device *dev, | |
466 | struct pvcalls_fedata *fedata, | |
467 | struct sockpass_mapping *mappass) | |
468 | { | |
469 | if (mappass->sock->sk != NULL) { | |
470 | write_lock_bh(&mappass->sock->sk->sk_callback_lock); | |
471 | mappass->sock->sk->sk_user_data = NULL; | |
472 | mappass->sock->sk->sk_data_ready = mappass->saved_data_ready; | |
473 | write_unlock_bh(&mappass->sock->sk->sk_callback_lock); | |
474 | } | |
475 | sock_release(mappass->sock); | |
476 | flush_workqueue(mappass->wq); | |
477 | destroy_workqueue(mappass->wq); | |
478 | kfree(mappass); | |
479 | ||
b1efa693 SS |
480 | return 0; |
481 | } | |
482 | ||
483 | static int pvcalls_back_release(struct xenbus_device *dev, | |
484 | struct xen_pvcalls_request *req) | |
485 | { | |
a51729cb SS |
486 | struct pvcalls_fedata *fedata; |
487 | struct sock_mapping *map, *n; | |
488 | struct sockpass_mapping *mappass; | |
489 | int ret = 0; | |
490 | struct xen_pvcalls_response *rsp; | |
491 | ||
492 | fedata = dev_get_drvdata(&dev->dev); | |
493 | ||
494 | down(&fedata->socket_lock); | |
495 | list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) { | |
496 | if (map->id == req->u.release.id) { | |
497 | list_del(&map->list); | |
498 | up(&fedata->socket_lock); | |
499 | ret = pvcalls_back_release_active(dev, fedata, map); | |
500 | goto out; | |
501 | } | |
502 | } | |
503 | mappass = radix_tree_lookup(&fedata->socketpass_mappings, | |
504 | req->u.release.id); | |
505 | if (mappass != NULL) { | |
506 | radix_tree_delete(&fedata->socketpass_mappings, mappass->id); | |
507 | up(&fedata->socket_lock); | |
508 | ret = pvcalls_back_release_passive(dev, fedata, mappass); | |
509 | } else | |
510 | up(&fedata->socket_lock); | |
511 | ||
512 | out: | |
513 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); | |
514 | rsp->req_id = req->req_id; | |
515 | rsp->u.release.id = req->u.release.id; | |
516 | rsp->cmd = req->cmd; | |
517 | rsp->ret = ret; | |
b1efa693 SS |
518 | return 0; |
519 | } | |
520 | ||
331a63e6 SS |
521 | static void __pvcalls_back_accept(struct work_struct *work) |
522 | { | |
6f474e71 SS |
523 | struct sockpass_mapping *mappass = container_of( |
524 | work, struct sockpass_mapping, register_work); | |
525 | struct sock_mapping *map; | |
526 | struct pvcalls_ioworker *iow; | |
527 | struct pvcalls_fedata *fedata; | |
528 | struct socket *sock; | |
529 | struct xen_pvcalls_response *rsp; | |
530 | struct xen_pvcalls_request *req; | |
531 | int notify; | |
532 | int ret = -EINVAL; | |
533 | unsigned long flags; | |
534 | ||
535 | fedata = mappass->fedata; | |
536 | /* | |
537 | * __pvcalls_back_accept can race against pvcalls_back_accept. | |
538 | * We only need to check the value of "cmd" on read. It could be | |
539 | * done atomically, but to simplify the code on the write side, we | |
540 | * use a spinlock. | |
541 | */ | |
542 | spin_lock_irqsave(&mappass->copy_lock, flags); | |
543 | req = &mappass->reqcopy; | |
544 | if (req->cmd != PVCALLS_ACCEPT) { | |
545 | spin_unlock_irqrestore(&mappass->copy_lock, flags); | |
546 | return; | |
547 | } | |
548 | spin_unlock_irqrestore(&mappass->copy_lock, flags); | |
549 | ||
550 | sock = sock_alloc(); | |
551 | if (sock == NULL) | |
552 | goto out_error; | |
553 | sock->type = mappass->sock->type; | |
554 | sock->ops = mappass->sock->ops; | |
555 | ||
556 | ret = inet_accept(mappass->sock, sock, O_NONBLOCK, true); | |
557 | if (ret == -EAGAIN) { | |
558 | sock_release(sock); | |
559 | goto out_error; | |
560 | } | |
561 | ||
562 | map = pvcalls_new_active_socket(fedata, | |
563 | req->u.accept.id_new, | |
564 | req->u.accept.ref, | |
565 | req->u.accept.evtchn, | |
566 | sock); | |
567 | if (!map) { | |
568 | ret = -EFAULT; | |
569 | sock_release(sock); | |
570 | goto out_error; | |
571 | } | |
572 | ||
573 | map->sockpass = mappass; | |
574 | iow = &map->ioworker; | |
575 | atomic_inc(&map->read); | |
576 | atomic_inc(&map->io); | |
577 | queue_work(iow->wq, &iow->register_work); | |
578 | ||
579 | out_error: | |
580 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); | |
581 | rsp->req_id = req->req_id; | |
582 | rsp->cmd = req->cmd; | |
583 | rsp->u.accept.id = req->u.accept.id; | |
584 | rsp->ret = ret; | |
585 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify); | |
586 | if (notify) | |
587 | notify_remote_via_irq(fedata->irq); | |
588 | ||
589 | mappass->reqcopy.cmd = 0; | |
331a63e6 SS |
590 | } |
591 | ||
592 | static void pvcalls_pass_sk_data_ready(struct sock *sock) | |
593 | { | |
6f474e71 | 594 | struct sockpass_mapping *mappass = sock->sk_user_data; |
3cf33a58 SS |
595 | struct pvcalls_fedata *fedata; |
596 | struct xen_pvcalls_response *rsp; | |
597 | unsigned long flags; | |
598 | int notify; | |
6f474e71 SS |
599 | |
600 | if (mappass == NULL) | |
601 | return; | |
602 | ||
3cf33a58 SS |
603 | fedata = mappass->fedata; |
604 | spin_lock_irqsave(&mappass->copy_lock, flags); | |
605 | if (mappass->reqcopy.cmd == PVCALLS_POLL) { | |
606 | rsp = RING_GET_RESPONSE(&fedata->ring, | |
607 | fedata->ring.rsp_prod_pvt++); | |
608 | rsp->req_id = mappass->reqcopy.req_id; | |
609 | rsp->u.poll.id = mappass->reqcopy.u.poll.id; | |
610 | rsp->cmd = mappass->reqcopy.cmd; | |
611 | rsp->ret = 0; | |
612 | ||
613 | mappass->reqcopy.cmd = 0; | |
614 | spin_unlock_irqrestore(&mappass->copy_lock, flags); | |
615 | ||
616 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify); | |
617 | if (notify) | |
618 | notify_remote_via_irq(mappass->fedata->irq); | |
619 | } else { | |
620 | spin_unlock_irqrestore(&mappass->copy_lock, flags); | |
621 | queue_work(mappass->wq, &mappass->register_work); | |
622 | } | |
331a63e6 SS |
623 | } |
624 | ||
b1efa693 SS |
625 | static int pvcalls_back_bind(struct xenbus_device *dev, |
626 | struct xen_pvcalls_request *req) | |
627 | { | |
331a63e6 SS |
628 | struct pvcalls_fedata *fedata; |
629 | int ret; | |
630 | struct sockpass_mapping *map; | |
631 | struct xen_pvcalls_response *rsp; | |
632 | ||
633 | fedata = dev_get_drvdata(&dev->dev); | |
634 | ||
635 | map = kzalloc(sizeof(*map), GFP_KERNEL); | |
636 | if (map == NULL) { | |
637 | ret = -ENOMEM; | |
638 | goto out; | |
639 | } | |
640 | ||
641 | INIT_WORK(&map->register_work, __pvcalls_back_accept); | |
642 | spin_lock_init(&map->copy_lock); | |
643 | map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1); | |
644 | if (!map->wq) { | |
645 | ret = -ENOMEM; | |
646 | goto out; | |
647 | } | |
648 | ||
649 | ret = sock_create(AF_INET, SOCK_STREAM, 0, &map->sock); | |
650 | if (ret < 0) | |
651 | goto out; | |
652 | ||
653 | ret = inet_bind(map->sock, (struct sockaddr *)&req->u.bind.addr, | |
654 | req->u.bind.len); | |
655 | if (ret < 0) | |
656 | goto out; | |
657 | ||
658 | map->fedata = fedata; | |
659 | map->id = req->u.bind.id; | |
660 | ||
661 | down(&fedata->socket_lock); | |
662 | ret = radix_tree_insert(&fedata->socketpass_mappings, map->id, | |
663 | map); | |
664 | up(&fedata->socket_lock); | |
665 | if (ret) | |
666 | goto out; | |
667 | ||
668 | write_lock_bh(&map->sock->sk->sk_callback_lock); | |
669 | map->saved_data_ready = map->sock->sk->sk_data_ready; | |
670 | map->sock->sk->sk_user_data = map; | |
671 | map->sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready; | |
672 | write_unlock_bh(&map->sock->sk->sk_callback_lock); | |
673 | ||
674 | out: | |
675 | if (ret) { | |
676 | if (map && map->sock) | |
677 | sock_release(map->sock); | |
678 | if (map && map->wq) | |
679 | destroy_workqueue(map->wq); | |
680 | kfree(map); | |
681 | } | |
682 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); | |
683 | rsp->req_id = req->req_id; | |
684 | rsp->cmd = req->cmd; | |
685 | rsp->u.bind.id = req->u.bind.id; | |
686 | rsp->ret = ret; | |
b1efa693 SS |
687 | return 0; |
688 | } | |
689 | ||
690 | static int pvcalls_back_listen(struct xenbus_device *dev, | |
691 | struct xen_pvcalls_request *req) | |
692 | { | |
8ce3f762 SS |
693 | struct pvcalls_fedata *fedata; |
694 | int ret = -EINVAL; | |
695 | struct sockpass_mapping *map; | |
696 | struct xen_pvcalls_response *rsp; | |
697 | ||
698 | fedata = dev_get_drvdata(&dev->dev); | |
699 | ||
700 | down(&fedata->socket_lock); | |
701 | map = radix_tree_lookup(&fedata->socketpass_mappings, req->u.listen.id); | |
702 | up(&fedata->socket_lock); | |
703 | if (map == NULL) | |
704 | goto out; | |
705 | ||
706 | ret = inet_listen(map->sock, req->u.listen.backlog); | |
707 | ||
708 | out: | |
709 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); | |
710 | rsp->req_id = req->req_id; | |
711 | rsp->cmd = req->cmd; | |
712 | rsp->u.listen.id = req->u.listen.id; | |
713 | rsp->ret = ret; | |
b1efa693 SS |
714 | return 0; |
715 | } | |
716 | ||
717 | static int pvcalls_back_accept(struct xenbus_device *dev, | |
718 | struct xen_pvcalls_request *req) | |
719 | { | |
6f474e71 SS |
720 | struct pvcalls_fedata *fedata; |
721 | struct sockpass_mapping *mappass; | |
722 | int ret = -EINVAL; | |
723 | struct xen_pvcalls_response *rsp; | |
724 | unsigned long flags; | |
725 | ||
726 | fedata = dev_get_drvdata(&dev->dev); | |
727 | ||
728 | down(&fedata->socket_lock); | |
729 | mappass = radix_tree_lookup(&fedata->socketpass_mappings, | |
730 | req->u.accept.id); | |
731 | up(&fedata->socket_lock); | |
732 | if (mappass == NULL) | |
733 | goto out_error; | |
734 | ||
735 | /* | |
736 | * Limitation of the current implementation: only support one | |
737 | * concurrent accept or poll call on one socket. | |
738 | */ | |
739 | spin_lock_irqsave(&mappass->copy_lock, flags); | |
740 | if (mappass->reqcopy.cmd != 0) { | |
741 | spin_unlock_irqrestore(&mappass->copy_lock, flags); | |
742 | ret = -EINTR; | |
743 | goto out_error; | |
744 | } | |
745 | ||
746 | mappass->reqcopy = *req; | |
747 | spin_unlock_irqrestore(&mappass->copy_lock, flags); | |
748 | queue_work(mappass->wq, &mappass->register_work); | |
749 | ||
750 | /* Tell the caller we don't need to send back a notification yet */ | |
751 | return -1; | |
752 | ||
753 | out_error: | |
754 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); | |
755 | rsp->req_id = req->req_id; | |
756 | rsp->cmd = req->cmd; | |
757 | rsp->u.accept.id = req->u.accept.id; | |
758 | rsp->ret = ret; | |
b1efa693 SS |
759 | return 0; |
760 | } | |
761 | ||
762 | static int pvcalls_back_poll(struct xenbus_device *dev, | |
763 | struct xen_pvcalls_request *req) | |
764 | { | |
3cf33a58 SS |
765 | struct pvcalls_fedata *fedata; |
766 | struct sockpass_mapping *mappass; | |
767 | struct xen_pvcalls_response *rsp; | |
768 | struct inet_connection_sock *icsk; | |
769 | struct request_sock_queue *queue; | |
770 | unsigned long flags; | |
771 | int ret; | |
772 | bool data; | |
773 | ||
774 | fedata = dev_get_drvdata(&dev->dev); | |
775 | ||
776 | down(&fedata->socket_lock); | |
777 | mappass = radix_tree_lookup(&fedata->socketpass_mappings, | |
778 | req->u.poll.id); | |
779 | up(&fedata->socket_lock); | |
780 | if (mappass == NULL) | |
781 | return -EINVAL; | |
782 | ||
783 | /* | |
784 | * Limitation of the current implementation: only support one | |
785 | * concurrent accept or poll call on one socket. | |
786 | */ | |
787 | spin_lock_irqsave(&mappass->copy_lock, flags); | |
788 | if (mappass->reqcopy.cmd != 0) { | |
789 | ret = -EINTR; | |
790 | goto out; | |
791 | } | |
792 | ||
793 | mappass->reqcopy = *req; | |
794 | icsk = inet_csk(mappass->sock->sk); | |
795 | queue = &icsk->icsk_accept_queue; | |
796 | data = queue->rskq_accept_head != NULL; | |
797 | if (data) { | |
798 | mappass->reqcopy.cmd = 0; | |
799 | ret = 0; | |
800 | goto out; | |
801 | } | |
802 | spin_unlock_irqrestore(&mappass->copy_lock, flags); | |
803 | ||
804 | /* Tell the caller we don't need to send back a notification yet */ | |
805 | return -1; | |
806 | ||
807 | out: | |
808 | spin_unlock_irqrestore(&mappass->copy_lock, flags); | |
809 | ||
810 | rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++); | |
811 | rsp->req_id = req->req_id; | |
812 | rsp->cmd = req->cmd; | |
813 | rsp->u.poll.id = req->u.poll.id; | |
814 | rsp->ret = ret; | |
b1efa693 SS |
815 | return 0; |
816 | } | |
817 | ||
818 | static int pvcalls_back_handle_cmd(struct xenbus_device *dev, | |
819 | struct xen_pvcalls_request *req) | |
820 | { | |
821 | int ret = 0; | |
822 | ||
823 | switch (req->cmd) { | |
824 | case PVCALLS_SOCKET: | |
825 | ret = pvcalls_back_socket(dev, req); | |
826 | break; | |
827 | case PVCALLS_CONNECT: | |
828 | ret = pvcalls_back_connect(dev, req); | |
829 | break; | |
830 | case PVCALLS_RELEASE: | |
831 | ret = pvcalls_back_release(dev, req); | |
832 | break; | |
833 | case PVCALLS_BIND: | |
834 | ret = pvcalls_back_bind(dev, req); | |
835 | break; | |
836 | case PVCALLS_LISTEN: | |
837 | ret = pvcalls_back_listen(dev, req); | |
838 | break; | |
839 | case PVCALLS_ACCEPT: | |
840 | ret = pvcalls_back_accept(dev, req); | |
841 | break; | |
842 | case PVCALLS_POLL: | |
843 | ret = pvcalls_back_poll(dev, req); | |
844 | break; | |
845 | default: | |
846 | { | |
847 | struct pvcalls_fedata *fedata; | |
848 | struct xen_pvcalls_response *rsp; | |
849 | ||
850 | fedata = dev_get_drvdata(&dev->dev); | |
851 | rsp = RING_GET_RESPONSE( | |
852 | &fedata->ring, fedata->ring.rsp_prod_pvt++); | |
853 | rsp->req_id = req->req_id; | |
854 | rsp->cmd = req->cmd; | |
855 | rsp->ret = -ENOTSUPP; | |
856 | break; | |
857 | } | |
858 | } | |
859 | return ret; | |
860 | } | |
861 | ||
862 | static void pvcalls_back_work(struct pvcalls_fedata *fedata) | |
863 | { | |
864 | int notify, notify_all = 0, more = 1; | |
865 | struct xen_pvcalls_request req; | |
866 | struct xenbus_device *dev = fedata->dev; | |
867 | ||
868 | while (more) { | |
869 | while (RING_HAS_UNCONSUMED_REQUESTS(&fedata->ring)) { | |
870 | RING_COPY_REQUEST(&fedata->ring, | |
871 | fedata->ring.req_cons++, | |
872 | &req); | |
873 | ||
874 | if (!pvcalls_back_handle_cmd(dev, &req)) { | |
875 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY( | |
876 | &fedata->ring, notify); | |
877 | notify_all += notify; | |
878 | } | |
879 | } | |
880 | ||
881 | if (notify_all) { | |
882 | notify_remote_via_irq(fedata->irq); | |
883 | notify_all = 0; | |
884 | } | |
885 | ||
886 | RING_FINAL_CHECK_FOR_REQUESTS(&fedata->ring, more); | |
887 | } | |
888 | } | |
889 | ||
d0e4d560 SS |
890 | static irqreturn_t pvcalls_back_event(int irq, void *dev_id) |
891 | { | |
b1efa693 SS |
892 | struct xenbus_device *dev = dev_id; |
893 | struct pvcalls_fedata *fedata = NULL; | |
894 | ||
895 | if (dev == NULL) | |
896 | return IRQ_HANDLED; | |
897 | ||
898 | fedata = dev_get_drvdata(&dev->dev); | |
899 | if (fedata == NULL) | |
900 | return IRQ_HANDLED; | |
901 | ||
902 | pvcalls_back_work(fedata); | |
d0e4d560 SS |
903 | return IRQ_HANDLED; |
904 | } | |
905 | ||
5db4d286 SS |
906 | static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map) |
907 | { | |
5ad9918f SS |
908 | struct sock_mapping *map = sock_map; |
909 | struct pvcalls_ioworker *iow; | |
910 | ||
911 | if (map == NULL || map->sock == NULL || map->sock->sk == NULL || | |
912 | map->sock->sk->sk_user_data != map) | |
913 | return IRQ_HANDLED; | |
914 | ||
915 | iow = &map->ioworker; | |
916 | ||
917 | atomic_inc(&map->write); | |
918 | atomic_inc(&map->io); | |
919 | queue_work(iow->wq, &iow->register_work); | |
920 | ||
5db4d286 SS |
921 | return IRQ_HANDLED; |
922 | } | |
923 | ||
0a9c75c2 SS |
924 | static int backend_connect(struct xenbus_device *dev) |
925 | { | |
d0e4d560 SS |
926 | int err, evtchn; |
927 | grant_ref_t ring_ref; | |
928 | struct pvcalls_fedata *fedata = NULL; | |
929 | ||
930 | fedata = kzalloc(sizeof(struct pvcalls_fedata), GFP_KERNEL); | |
931 | if (!fedata) | |
932 | return -ENOMEM; | |
933 | ||
934 | fedata->irq = -1; | |
935 | err = xenbus_scanf(XBT_NIL, dev->otherend, "port", "%u", | |
936 | &evtchn); | |
937 | if (err != 1) { | |
938 | err = -EINVAL; | |
939 | xenbus_dev_fatal(dev, err, "reading %s/event-channel", | |
940 | dev->otherend); | |
941 | goto error; | |
942 | } | |
943 | ||
944 | err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", "%u", &ring_ref); | |
945 | if (err != 1) { | |
946 | err = -EINVAL; | |
947 | xenbus_dev_fatal(dev, err, "reading %s/ring-ref", | |
948 | dev->otherend); | |
949 | goto error; | |
950 | } | |
951 | ||
952 | err = bind_interdomain_evtchn_to_irq(dev->otherend_id, evtchn); | |
953 | if (err < 0) | |
954 | goto error; | |
955 | fedata->irq = err; | |
956 | ||
957 | err = request_threaded_irq(fedata->irq, NULL, pvcalls_back_event, | |
958 | IRQF_ONESHOT, "pvcalls-back", dev); | |
959 | if (err < 0) | |
960 | goto error; | |
961 | ||
962 | err = xenbus_map_ring_valloc(dev, &ring_ref, 1, | |
963 | (void **)&fedata->sring); | |
964 | if (err < 0) | |
965 | goto error; | |
966 | ||
967 | BACK_RING_INIT(&fedata->ring, fedata->sring, XEN_PAGE_SIZE * 1); | |
968 | fedata->dev = dev; | |
969 | ||
970 | INIT_LIST_HEAD(&fedata->socket_mappings); | |
971 | INIT_RADIX_TREE(&fedata->socketpass_mappings, GFP_KERNEL); | |
972 | sema_init(&fedata->socket_lock, 1); | |
973 | dev_set_drvdata(&dev->dev, fedata); | |
974 | ||
975 | down(&pvcalls_back_global.frontends_lock); | |
976 | list_add_tail(&fedata->list, &pvcalls_back_global.frontends); | |
977 | up(&pvcalls_back_global.frontends_lock); | |
978 | ||
0a9c75c2 | 979 | return 0; |
d0e4d560 SS |
980 | |
981 | error: | |
982 | if (fedata->irq >= 0) | |
983 | unbind_from_irqhandler(fedata->irq, dev); | |
984 | if (fedata->sring != NULL) | |
985 | xenbus_unmap_ring_vfree(dev, fedata->sring); | |
986 | kfree(fedata); | |
987 | return err; | |
0a9c75c2 SS |
988 | } |
989 | ||
990 | static int backend_disconnect(struct xenbus_device *dev) | |
991 | { | |
0a85d23b SS |
992 | struct pvcalls_fedata *fedata; |
993 | struct sock_mapping *map, *n; | |
994 | struct sockpass_mapping *mappass; | |
995 | struct radix_tree_iter iter; | |
996 | void **slot; | |
997 | ||
998 | ||
999 | fedata = dev_get_drvdata(&dev->dev); | |
1000 | ||
1001 | down(&fedata->socket_lock); | |
1002 | list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) { | |
1003 | list_del(&map->list); | |
1004 | pvcalls_back_release_active(dev, fedata, map); | |
1005 | } | |
1006 | ||
1007 | radix_tree_for_each_slot(slot, &fedata->socketpass_mappings, &iter, 0) { | |
1008 | mappass = radix_tree_deref_slot(slot); | |
1009 | if (!mappass) | |
1010 | continue; | |
1011 | if (radix_tree_exception(mappass)) { | |
1012 | if (radix_tree_deref_retry(mappass)) | |
1013 | slot = radix_tree_iter_retry(&iter); | |
1014 | } else { | |
1015 | radix_tree_delete(&fedata->socketpass_mappings, | |
1016 | mappass->id); | |
1017 | pvcalls_back_release_passive(dev, fedata, mappass); | |
1018 | } | |
1019 | } | |
1020 | up(&fedata->socket_lock); | |
1021 | ||
1022 | unbind_from_irqhandler(fedata->irq, dev); | |
1023 | xenbus_unmap_ring_vfree(dev, fedata->sring); | |
1024 | ||
1025 | list_del(&fedata->list); | |
1026 | kfree(fedata); | |
1027 | dev_set_drvdata(&dev->dev, NULL); | |
1028 | ||
0a9c75c2 SS |
1029 | return 0; |
1030 | } | |
1031 | ||
72e59c30 SS |
1032 | static int pvcalls_back_probe(struct xenbus_device *dev, |
1033 | const struct xenbus_device_id *id) | |
1034 | { | |
0a9c75c2 SS |
1035 | int err, abort; |
1036 | struct xenbus_transaction xbt; | |
1037 | ||
1038 | again: | |
1039 | abort = 1; | |
1040 | ||
1041 | err = xenbus_transaction_start(&xbt); | |
1042 | if (err) { | |
1043 | pr_warn("%s cannot create xenstore transaction\n", __func__); | |
1044 | return err; | |
1045 | } | |
1046 | ||
1047 | err = xenbus_printf(xbt, dev->nodename, "versions", "%s", | |
1048 | PVCALLS_VERSIONS); | |
1049 | if (err) { | |
1050 | pr_warn("%s write out 'versions' failed\n", __func__); | |
1051 | goto abort; | |
1052 | } | |
1053 | ||
1054 | err = xenbus_printf(xbt, dev->nodename, "max-page-order", "%u", | |
1055 | MAX_RING_ORDER); | |
1056 | if (err) { | |
1057 | pr_warn("%s write out 'max-page-order' failed\n", __func__); | |
1058 | goto abort; | |
1059 | } | |
1060 | ||
1061 | err = xenbus_printf(xbt, dev->nodename, "function-calls", | |
1062 | XENBUS_FUNCTIONS_CALLS); | |
1063 | if (err) { | |
1064 | pr_warn("%s write out 'function-calls' failed\n", __func__); | |
1065 | goto abort; | |
1066 | } | |
1067 | ||
1068 | abort = 0; | |
1069 | abort: | |
1070 | err = xenbus_transaction_end(xbt, abort); | |
1071 | if (err) { | |
1072 | if (err == -EAGAIN && !abort) | |
1073 | goto again; | |
1074 | pr_warn("%s cannot complete xenstore transaction\n", __func__); | |
1075 | return err; | |
1076 | } | |
1077 | ||
1078 | if (abort) | |
1079 | return -EFAULT; | |
1080 | ||
1081 | xenbus_switch_state(dev, XenbusStateInitWait); | |
1082 | ||
72e59c30 SS |
1083 | return 0; |
1084 | } | |
1085 | ||
0a9c75c2 SS |
1086 | static void set_backend_state(struct xenbus_device *dev, |
1087 | enum xenbus_state state) | |
1088 | { | |
1089 | while (dev->state != state) { | |
1090 | switch (dev->state) { | |
1091 | case XenbusStateClosed: | |
1092 | switch (state) { | |
1093 | case XenbusStateInitWait: | |
1094 | case XenbusStateConnected: | |
1095 | xenbus_switch_state(dev, XenbusStateInitWait); | |
1096 | break; | |
1097 | case XenbusStateClosing: | |
1098 | xenbus_switch_state(dev, XenbusStateClosing); | |
1099 | break; | |
1100 | default: | |
fefcfb99 | 1101 | WARN_ON(1); |
0a9c75c2 SS |
1102 | } |
1103 | break; | |
1104 | case XenbusStateInitWait: | |
1105 | case XenbusStateInitialised: | |
1106 | switch (state) { | |
1107 | case XenbusStateConnected: | |
1108 | backend_connect(dev); | |
1109 | xenbus_switch_state(dev, XenbusStateConnected); | |
1110 | break; | |
1111 | case XenbusStateClosing: | |
1112 | case XenbusStateClosed: | |
1113 | xenbus_switch_state(dev, XenbusStateClosing); | |
1114 | break; | |
1115 | default: | |
fefcfb99 | 1116 | WARN_ON(1); |
0a9c75c2 SS |
1117 | } |
1118 | break; | |
1119 | case XenbusStateConnected: | |
1120 | switch (state) { | |
1121 | case XenbusStateInitWait: | |
1122 | case XenbusStateClosing: | |
1123 | case XenbusStateClosed: | |
1124 | down(&pvcalls_back_global.frontends_lock); | |
1125 | backend_disconnect(dev); | |
1126 | up(&pvcalls_back_global.frontends_lock); | |
1127 | xenbus_switch_state(dev, XenbusStateClosing); | |
1128 | break; | |
1129 | default: | |
fefcfb99 | 1130 | WARN_ON(1); |
0a9c75c2 SS |
1131 | } |
1132 | break; | |
1133 | case XenbusStateClosing: | |
1134 | switch (state) { | |
1135 | case XenbusStateInitWait: | |
1136 | case XenbusStateConnected: | |
1137 | case XenbusStateClosed: | |
1138 | xenbus_switch_state(dev, XenbusStateClosed); | |
1139 | break; | |
1140 | default: | |
fefcfb99 | 1141 | WARN_ON(1); |
0a9c75c2 SS |
1142 | } |
1143 | break; | |
1144 | default: | |
fefcfb99 | 1145 | WARN_ON(1); |
0a9c75c2 SS |
1146 | } |
1147 | } | |
1148 | } | |
1149 | ||
72e59c30 SS |
1150 | static void pvcalls_back_changed(struct xenbus_device *dev, |
1151 | enum xenbus_state frontend_state) | |
1152 | { | |
0a9c75c2 SS |
1153 | switch (frontend_state) { |
1154 | case XenbusStateInitialising: | |
1155 | set_backend_state(dev, XenbusStateInitWait); | |
1156 | break; | |
1157 | ||
1158 | case XenbusStateInitialised: | |
1159 | case XenbusStateConnected: | |
1160 | set_backend_state(dev, XenbusStateConnected); | |
1161 | break; | |
1162 | ||
1163 | case XenbusStateClosing: | |
1164 | set_backend_state(dev, XenbusStateClosing); | |
1165 | break; | |
1166 | ||
1167 | case XenbusStateClosed: | |
1168 | set_backend_state(dev, XenbusStateClosed); | |
1169 | if (xenbus_dev_is_online(dev)) | |
1170 | break; | |
1171 | device_unregister(&dev->dev); | |
1172 | break; | |
1173 | case XenbusStateUnknown: | |
1174 | set_backend_state(dev, XenbusStateClosed); | |
1175 | device_unregister(&dev->dev); | |
1176 | break; | |
1177 | ||
1178 | default: | |
1179 | xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend", | |
1180 | frontend_state); | |
1181 | break; | |
1182 | } | |
72e59c30 SS |
1183 | } |
1184 | ||
1185 | static int pvcalls_back_remove(struct xenbus_device *dev) | |
1186 | { | |
1187 | return 0; | |
1188 | } | |
1189 | ||
1190 | static int pvcalls_back_uevent(struct xenbus_device *xdev, | |
1191 | struct kobj_uevent_env *env) | |
1192 | { | |
1193 | return 0; | |
1194 | } | |
1195 | ||
1196 | static const struct xenbus_device_id pvcalls_back_ids[] = { | |
1197 | { "pvcalls" }, | |
1198 | { "" } | |
1199 | }; | |
1200 | ||
1201 | static struct xenbus_driver pvcalls_back_driver = { | |
1202 | .ids = pvcalls_back_ids, | |
1203 | .probe = pvcalls_back_probe, | |
1204 | .remove = pvcalls_back_remove, | |
1205 | .uevent = pvcalls_back_uevent, | |
1206 | .otherend_changed = pvcalls_back_changed, | |
1207 | }; | |
9be07334 SS |
1208 | |
1209 | static int __init pvcalls_back_init(void) | |
1210 | { | |
1211 | int ret; | |
1212 | ||
1213 | if (!xen_domain()) | |
1214 | return -ENODEV; | |
1215 | ||
1216 | ret = xenbus_register_backend(&pvcalls_back_driver); | |
1217 | if (ret < 0) | |
1218 | return ret; | |
1219 | ||
1220 | sema_init(&pvcalls_back_global.frontends_lock, 1); | |
1221 | INIT_LIST_HEAD(&pvcalls_back_global.frontends); | |
1222 | return 0; | |
1223 | } | |
1224 | module_init(pvcalls_back_init); | |
0a85d23b SS |
1225 | |
1226 | static void __exit pvcalls_back_fin(void) | |
1227 | { | |
1228 | struct pvcalls_fedata *fedata, *nfedata; | |
1229 | ||
1230 | down(&pvcalls_back_global.frontends_lock); | |
1231 | list_for_each_entry_safe(fedata, nfedata, | |
1232 | &pvcalls_back_global.frontends, list) { | |
1233 | backend_disconnect(fedata->dev); | |
1234 | } | |
1235 | up(&pvcalls_back_global.frontends_lock); | |
1236 | ||
1237 | xenbus_unregister_driver(&pvcalls_back_driver); | |
1238 | } | |
1239 | ||
1240 | module_exit(pvcalls_back_fin); | |
24e7f84d BO |
1241 | |
1242 | MODULE_DESCRIPTION("Xen PV Calls backend driver"); | |
1243 | MODULE_AUTHOR("Stefano Stabellini <sstabellini@kernel.org>"); | |
1244 | MODULE_LICENSE("GPL"); |