1 /* incoming call handling
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/errqueue.h>
18 #include <linux/udp.h>
20 #include <linux/in6.h>
21 #include <linux/icmp.h>
22 #include <linux/gfp.h>
23 #include <linux/circ_buf.h>
25 #include <net/af_rxrpc.h>
27 #include "ar-internal.h"
30 * Preallocate a single service call, connection and peer and, if possible,
31 * give them a user ID and attach the user's side of the ID to them.
33 static int rxrpc_service_prealloc_one(struct rxrpc_sock
*rx
,
34 struct rxrpc_backlog
*b
,
35 rxrpc_notify_rx_t notify_rx
,
36 rxrpc_user_attach_call_t user_attach_call
,
37 unsigned long user_call_ID
, gfp_t gfp
)
39 const void *here
= __builtin_return_address(0);
40 struct rxrpc_call
*call
;
41 struct rxrpc_net
*rxnet
= rxrpc_net(sock_net(&rx
->sk
));
43 unsigned int size
= RXRPC_BACKLOG_MAX
;
44 unsigned int head
, tail
, call_head
, call_tail
;
46 max
= rx
->sk
.sk_max_ack_backlog
;
47 tmp
= rx
->sk
.sk_ack_backlog
;
49 _leave(" = -ENOBUFS [full %u]", max
);
54 /* We don't need more conns and peers than we have calls, but on the
55 * other hand, we shouldn't ever use more peers than conns or conns
58 call_head
= b
->call_backlog_head
;
59 call_tail
= READ_ONCE(b
->call_backlog_tail
);
60 tmp
= CIRC_CNT(call_head
, call_tail
, size
);
62 _leave(" = -ENOBUFS [enough %u]", tmp
);
67 head
= b
->peer_backlog_head
;
68 tail
= READ_ONCE(b
->peer_backlog_tail
);
69 if (CIRC_CNT(head
, tail
, size
) < max
) {
70 struct rxrpc_peer
*peer
= rxrpc_alloc_peer(rx
->local
, gfp
);
73 b
->peer_backlog
[head
] = peer
;
74 smp_store_release(&b
->peer_backlog_head
,
75 (head
+ 1) & (size
- 1));
78 head
= b
->conn_backlog_head
;
79 tail
= READ_ONCE(b
->conn_backlog_tail
);
80 if (CIRC_CNT(head
, tail
, size
) < max
) {
81 struct rxrpc_connection
*conn
;
83 conn
= rxrpc_prealloc_service_connection(rxnet
, gfp
);
86 b
->conn_backlog
[head
] = conn
;
87 smp_store_release(&b
->conn_backlog_head
,
88 (head
+ 1) & (size
- 1));
90 trace_rxrpc_conn(conn
, rxrpc_conn_new_service
,
91 atomic_read(&conn
->usage
), here
);
94 /* Now it gets complicated, because calls get registered with the
95 * socket here, particularly if a user ID is preassigned by the user.
97 call
= rxrpc_alloc_call(gfp
);
100 call
->flags
|= (1 << RXRPC_CALL_IS_SERVICE
);
101 call
->state
= RXRPC_CALL_SERVER_PREALLOC
;
103 trace_rxrpc_call(call
, rxrpc_call_new_service
,
104 atomic_read(&call
->usage
),
105 here
, (const void *)user_call_ID
);
107 write_lock(&rx
->call_lock
);
108 if (user_attach_call
) {
109 struct rxrpc_call
*xcall
;
110 struct rb_node
*parent
, **pp
;
112 /* Check the user ID isn't already in use */
113 pp
= &rx
->calls
.rb_node
;
117 xcall
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
118 if (user_call_ID
< call
->user_call_ID
)
119 pp
= &(*pp
)->rb_left
;
120 else if (user_call_ID
> call
->user_call_ID
)
121 pp
= &(*pp
)->rb_right
;
126 call
->user_call_ID
= user_call_ID
;
127 call
->notify_rx
= notify_rx
;
128 rxrpc_get_call(call
, rxrpc_call_got_kernel
);
129 user_attach_call(call
, user_call_ID
);
130 rxrpc_get_call(call
, rxrpc_call_got_userid
);
131 rb_link_node(&call
->sock_node
, parent
, pp
);
132 rb_insert_color(&call
->sock_node
, &rx
->calls
);
133 set_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
);
136 list_add(&call
->sock_link
, &rx
->sock_calls
);
138 write_unlock(&rx
->call_lock
);
140 write_lock(&rxnet
->call_lock
);
141 list_add_tail(&call
->link
, &rxnet
->calls
);
142 write_unlock(&rxnet
->call_lock
);
144 b
->call_backlog
[call_head
] = call
;
145 smp_store_release(&b
->call_backlog_head
, (call_head
+ 1) & (size
- 1));
146 _leave(" = 0 [%d -> %lx]", call
->debug_id
, user_call_ID
);
150 write_unlock(&rx
->call_lock
);
151 rxrpc_cleanup_call(call
);
152 _leave(" = -EBADSLT");
157 * Preallocate sufficient service connections, calls and peers to cover the
158 * entire backlog of a socket. When a new call comes in, if we don't have
159 * sufficient of each available, the call gets rejected as busy or ignored.
161 * The backlog is replenished when a connection is accepted or rejected.
163 int rxrpc_service_prealloc(struct rxrpc_sock
*rx
, gfp_t gfp
)
165 struct rxrpc_backlog
*b
= rx
->backlog
;
168 b
= kzalloc(sizeof(struct rxrpc_backlog
), gfp
);
174 if (rx
->discard_new_call
)
177 while (rxrpc_service_prealloc_one(rx
, b
, NULL
, NULL
, 0, gfp
) == 0)
184 * Discard the preallocation on a service.
186 void rxrpc_discard_prealloc(struct rxrpc_sock
*rx
)
188 struct rxrpc_backlog
*b
= rx
->backlog
;
189 struct rxrpc_net
*rxnet
= rxrpc_net(sock_net(&rx
->sk
));
190 unsigned int size
= RXRPC_BACKLOG_MAX
, head
, tail
;
196 /* Make sure that there aren't any incoming calls in progress before we
197 * clear the preallocation buffers.
199 spin_lock_bh(&rx
->incoming_lock
);
200 spin_unlock_bh(&rx
->incoming_lock
);
202 head
= b
->peer_backlog_head
;
203 tail
= b
->peer_backlog_tail
;
204 while (CIRC_CNT(head
, tail
, size
) > 0) {
205 struct rxrpc_peer
*peer
= b
->peer_backlog
[tail
];
207 tail
= (tail
+ 1) & (size
- 1);
210 head
= b
->conn_backlog_head
;
211 tail
= b
->conn_backlog_tail
;
212 while (CIRC_CNT(head
, tail
, size
) > 0) {
213 struct rxrpc_connection
*conn
= b
->conn_backlog
[tail
];
214 write_lock(&rxnet
->conn_lock
);
215 list_del(&conn
->link
);
216 list_del(&conn
->proc_link
);
217 write_unlock(&rxnet
->conn_lock
);
219 tail
= (tail
+ 1) & (size
- 1);
222 head
= b
->call_backlog_head
;
223 tail
= b
->call_backlog_tail
;
224 while (CIRC_CNT(head
, tail
, size
) > 0) {
225 struct rxrpc_call
*call
= b
->call_backlog
[tail
];
226 if (rx
->discard_new_call
) {
227 _debug("discard %lx", call
->user_call_ID
);
228 rx
->discard_new_call(call
, call
->user_call_ID
);
229 rxrpc_put_call(call
, rxrpc_call_put_kernel
);
231 rxrpc_call_completed(call
);
232 rxrpc_release_call(rx
, call
);
233 rxrpc_put_call(call
, rxrpc_call_put
);
234 tail
= (tail
+ 1) & (size
- 1);
241 * Allocate a new incoming call from the prealloc pool, along with a connection
242 * and a peer as necessary.
244 static struct rxrpc_call
*rxrpc_alloc_incoming_call(struct rxrpc_sock
*rx
,
245 struct rxrpc_local
*local
,
246 struct rxrpc_connection
*conn
,
249 struct rxrpc_backlog
*b
= rx
->backlog
;
250 struct rxrpc_peer
*peer
, *xpeer
;
251 struct rxrpc_call
*call
;
252 unsigned short call_head
, conn_head
, peer_head
;
253 unsigned short call_tail
, conn_tail
, peer_tail
;
254 unsigned short call_count
, conn_count
;
256 /* #calls >= #conns >= #peers must hold true. */
257 call_head
= smp_load_acquire(&b
->call_backlog_head
);
258 call_tail
= b
->call_backlog_tail
;
259 call_count
= CIRC_CNT(call_head
, call_tail
, RXRPC_BACKLOG_MAX
);
260 conn_head
= smp_load_acquire(&b
->conn_backlog_head
);
261 conn_tail
= b
->conn_backlog_tail
;
262 conn_count
= CIRC_CNT(conn_head
, conn_tail
, RXRPC_BACKLOG_MAX
);
263 ASSERTCMP(conn_count
, >=, call_count
);
264 peer_head
= smp_load_acquire(&b
->peer_backlog_head
);
265 peer_tail
= b
->peer_backlog_tail
;
266 ASSERTCMP(CIRC_CNT(peer_head
, peer_tail
, RXRPC_BACKLOG_MAX
), >=,
273 /* No connection. We're going to need a peer to start off
274 * with. If one doesn't yet exist, use a spare from the
275 * preallocation set. We dump the address into the spare in
276 * anticipation - and to save on stack space.
278 xpeer
= b
->peer_backlog
[peer_tail
];
279 if (rxrpc_extract_addr_from_skb(&xpeer
->srx
, skb
) < 0)
282 peer
= rxrpc_lookup_incoming_peer(local
, xpeer
);
284 b
->peer_backlog
[peer_tail
] = NULL
;
285 smp_store_release(&b
->peer_backlog_tail
,
287 (RXRPC_BACKLOG_MAX
- 1));
290 /* Now allocate and set up the connection */
291 conn
= b
->conn_backlog
[conn_tail
];
292 b
->conn_backlog
[conn_tail
] = NULL
;
293 smp_store_release(&b
->conn_backlog_tail
,
294 (conn_tail
+ 1) & (RXRPC_BACKLOG_MAX
- 1));
295 rxrpc_get_local(local
);
296 conn
->params
.local
= local
;
297 conn
->params
.peer
= peer
;
298 rxrpc_see_connection(conn
);
299 rxrpc_new_incoming_connection(rx
, conn
, skb
);
301 rxrpc_get_connection(conn
);
304 /* And now we can allocate and set up a new call */
305 call
= b
->call_backlog
[call_tail
];
306 b
->call_backlog
[call_tail
] = NULL
;
307 smp_store_release(&b
->call_backlog_tail
,
308 (call_tail
+ 1) & (RXRPC_BACKLOG_MAX
- 1));
310 rxrpc_see_call(call
);
312 call
->peer
= rxrpc_get_peer(conn
->params
.peer
);
317 * Set up a new incoming call. Called in BH context with the RCU read lock
320 * If this is for a kernel service, when we allocate the call, it will have
321 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
322 * retainer ref obtained from the backlog buffer. Prealloc calls for userspace
323 * services only have the ref from the backlog buffer. We want to pass this
324 * ref to non-BH context to dispose of.
326 * If we want to report an error, we mark the skb with the packet type and
327 * abort code and return NULL.
329 * The call is returned with the user access mutex held.
331 struct rxrpc_call
*rxrpc_new_incoming_call(struct rxrpc_local
*local
,
332 struct rxrpc_connection
*conn
,
335 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
336 struct rxrpc_sock
*rx
;
337 struct rxrpc_call
*call
;
338 u16 service_id
= sp
->hdr
.serviceId
;
342 /* Get the socket providing the service */
343 rx
= rcu_dereference(local
->service
);
344 if (rx
&& (service_id
== rx
->srx
.srx_service
||
345 service_id
== rx
->second_service
))
348 trace_rxrpc_abort("INV", sp
->hdr
.cid
, sp
->hdr
.callNumber
, sp
->hdr
.seq
,
349 RX_INVALID_OPERATION
, EOPNOTSUPP
);
350 skb
->mark
= RXRPC_SKB_MARK_LOCAL_ABORT
;
351 skb
->priority
= RX_INVALID_OPERATION
;
352 _leave(" = NULL [service]");
356 spin_lock(&rx
->incoming_lock
);
357 if (rx
->sk
.sk_state
== RXRPC_SERVER_LISTEN_DISABLED
||
358 rx
->sk
.sk_state
== RXRPC_CLOSE
) {
359 trace_rxrpc_abort("CLS", sp
->hdr
.cid
, sp
->hdr
.callNumber
,
360 sp
->hdr
.seq
, RX_INVALID_OPERATION
, ESHUTDOWN
);
361 skb
->mark
= RXRPC_SKB_MARK_LOCAL_ABORT
;
362 skb
->priority
= RX_INVALID_OPERATION
;
363 _leave(" = NULL [close]");
368 call
= rxrpc_alloc_incoming_call(rx
, local
, conn
, skb
);
370 skb
->mark
= RXRPC_SKB_MARK_BUSY
;
371 _leave(" = NULL [busy]");
376 trace_rxrpc_receive(call
, rxrpc_receive_incoming
,
377 sp
->hdr
.serial
, sp
->hdr
.seq
);
379 /* Lock the call to prevent rxrpc_kernel_send/recv_data() and
380 * sendmsg()/recvmsg() inconveniently stealing the mutex once the
381 * notification is generated.
383 * The BUG should never happen because the kernel should be well
384 * behaved enough not to access the call before the first notification
385 * event and userspace is prevented from doing so until the state is
388 if (!mutex_trylock(&call
->user_mutex
))
391 /* Make the call live. */
392 rxrpc_incoming_call(rx
, call
, skb
);
395 if (rx
->notify_new_call
)
396 rx
->notify_new_call(&rx
->sk
, call
, call
->user_call_ID
);
398 sk_acceptq_added(&rx
->sk
);
400 spin_lock(&conn
->state_lock
);
401 switch (conn
->state
) {
402 case RXRPC_CONN_SERVICE_UNSECURED
:
403 conn
->state
= RXRPC_CONN_SERVICE_CHALLENGING
;
404 set_bit(RXRPC_CONN_EV_CHALLENGE
, &call
->conn
->events
);
405 rxrpc_queue_conn(call
->conn
);
408 case RXRPC_CONN_SERVICE
:
409 write_lock(&call
->state_lock
);
410 if (rx
->discard_new_call
)
411 call
->state
= RXRPC_CALL_SERVER_RECV_REQUEST
;
413 call
->state
= RXRPC_CALL_SERVER_ACCEPTING
;
414 write_unlock(&call
->state_lock
);
417 case RXRPC_CONN_REMOTELY_ABORTED
:
418 rxrpc_set_call_completion(call
, RXRPC_CALL_REMOTELY_ABORTED
,
419 conn
->remote_abort
, -ECONNABORTED
);
421 case RXRPC_CONN_LOCALLY_ABORTED
:
422 rxrpc_abort_call("CON", call
, sp
->hdr
.seq
,
423 conn
->local_abort
, -ECONNABORTED
);
428 spin_unlock(&conn
->state_lock
);
430 if (call
->state
== RXRPC_CALL_SERVER_ACCEPTING
)
431 rxrpc_notify_socket(call
);
433 /* We have to discard the prealloc queue's ref here and rely on a
434 * combination of the RCU read lock and refs held either by the socket
435 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
436 * service to prevent the call from being deallocated too early.
438 rxrpc_put_call(call
, rxrpc_call_put
);
440 _leave(" = %p{%d}", call
, call
->debug_id
);
442 spin_unlock(&rx
->incoming_lock
);
447 * handle acceptance of a call by userspace
448 * - assign the user call ID to the call at the front of the queue
449 * - called with the socket locked.
451 struct rxrpc_call
*rxrpc_accept_call(struct rxrpc_sock
*rx
,
452 unsigned long user_call_ID
,
453 rxrpc_notify_rx_t notify_rx
)
454 __releases(&rx
->sk
.sk_lock
.slock
)
456 struct rxrpc_call
*call
;
457 struct rb_node
*parent
, **pp
;
460 _enter(",%lx", user_call_ID
);
462 ASSERT(!irqs_disabled());
464 write_lock(&rx
->call_lock
);
466 if (list_empty(&rx
->to_be_accepted
)) {
467 write_unlock(&rx
->call_lock
);
468 release_sock(&rx
->sk
);
469 kleave(" = -ENODATA [empty]");
470 return ERR_PTR(-ENODATA
);
473 /* check the user ID isn't already in use */
474 pp
= &rx
->calls
.rb_node
;
478 call
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
480 if (user_call_ID
< call
->user_call_ID
)
481 pp
= &(*pp
)->rb_left
;
482 else if (user_call_ID
> call
->user_call_ID
)
483 pp
= &(*pp
)->rb_right
;
488 /* Dequeue the first call and check it's still valid. We gain
489 * responsibility for the queue's reference.
491 call
= list_entry(rx
->to_be_accepted
.next
,
492 struct rxrpc_call
, accept_link
);
493 write_unlock(&rx
->call_lock
);
495 /* We need to gain the mutex from the interrupt handler without
496 * upsetting lockdep, so we have to release it there and take it here.
497 * We are, however, still holding the socket lock, so other accepts
498 * must wait for us and no one can add the user ID behind our backs.
500 if (mutex_lock_interruptible(&call
->user_mutex
) < 0) {
501 release_sock(&rx
->sk
);
502 kleave(" = -ERESTARTSYS");
503 return ERR_PTR(-ERESTARTSYS
);
506 write_lock(&rx
->call_lock
);
507 list_del_init(&call
->accept_link
);
508 sk_acceptq_removed(&rx
->sk
);
509 rxrpc_see_call(call
);
511 /* Find the user ID insertion point. */
512 pp
= &rx
->calls
.rb_node
;
516 call
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
518 if (user_call_ID
< call
->user_call_ID
)
519 pp
= &(*pp
)->rb_left
;
520 else if (user_call_ID
> call
->user_call_ID
)
521 pp
= &(*pp
)->rb_right
;
526 write_lock_bh(&call
->state_lock
);
527 switch (call
->state
) {
528 case RXRPC_CALL_SERVER_ACCEPTING
:
529 call
->state
= RXRPC_CALL_SERVER_RECV_REQUEST
;
531 case RXRPC_CALL_COMPLETE
:
538 /* formalise the acceptance */
539 call
->notify_rx
= notify_rx
;
540 call
->user_call_ID
= user_call_ID
;
541 rxrpc_get_call(call
, rxrpc_call_got_userid
);
542 rb_link_node(&call
->sock_node
, parent
, pp
);
543 rb_insert_color(&call
->sock_node
, &rx
->calls
);
544 if (test_and_set_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
))
547 write_unlock_bh(&call
->state_lock
);
548 write_unlock(&rx
->call_lock
);
549 rxrpc_notify_socket(call
);
550 rxrpc_service_prealloc(rx
, GFP_KERNEL
);
551 release_sock(&rx
->sk
);
552 _leave(" = %p{%d}", call
, call
->debug_id
);
556 _debug("release %p", call
);
557 write_unlock_bh(&call
->state_lock
);
558 write_unlock(&rx
->call_lock
);
559 rxrpc_release_call(rx
, call
);
560 rxrpc_put_call(call
, rxrpc_call_put
);
565 write_unlock(&rx
->call_lock
);
567 rxrpc_service_prealloc(rx
, GFP_KERNEL
);
568 release_sock(&rx
->sk
);
569 _leave(" = %d", ret
);
574 * Handle rejection of a call by userspace
575 * - reject the call at the front of the queue
577 int rxrpc_reject_call(struct rxrpc_sock
*rx
)
579 struct rxrpc_call
*call
;
585 ASSERT(!irqs_disabled());
587 write_lock(&rx
->call_lock
);
589 if (list_empty(&rx
->to_be_accepted
)) {
590 write_unlock(&rx
->call_lock
);
594 /* Dequeue the first call and check it's still valid. We gain
595 * responsibility for the queue's reference.
597 call
= list_entry(rx
->to_be_accepted
.next
,
598 struct rxrpc_call
, accept_link
);
599 list_del_init(&call
->accept_link
);
600 sk_acceptq_removed(&rx
->sk
);
601 rxrpc_see_call(call
);
603 write_lock_bh(&call
->state_lock
);
604 switch (call
->state
) {
605 case RXRPC_CALL_SERVER_ACCEPTING
:
606 __rxrpc_abort_call("REJ", call
, 1, RX_USER_ABORT
, -ECONNABORTED
);
609 case RXRPC_CALL_COMPLETE
:
617 write_unlock_bh(&call
->state_lock
);
618 write_unlock(&rx
->call_lock
);
620 rxrpc_send_abort_packet(call
);
621 rxrpc_release_call(rx
, call
);
622 rxrpc_put_call(call
, rxrpc_call_put
);
624 rxrpc_service_prealloc(rx
, GFP_KERNEL
);
625 _leave(" = %d", ret
);
630 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
631 * @sock: The socket on which to preallocate
632 * @notify_rx: Event notification function for the call
633 * @user_attach_call: Func to attach call to user_call_ID
634 * @user_call_ID: The tag to attach to the preallocated call
635 * @gfp: The allocation conditions.
637 * Charge up the socket with preallocated calls, each with a user ID. A
638 * function should be provided to effect the attachment from the user's side.
639 * The user is given a ref to hold on the call.
641 * Note that the call may be come connected before this function returns.
643 int rxrpc_kernel_charge_accept(struct socket
*sock
,
644 rxrpc_notify_rx_t notify_rx
,
645 rxrpc_user_attach_call_t user_attach_call
,
646 unsigned long user_call_ID
, gfp_t gfp
)
648 struct rxrpc_sock
*rx
= rxrpc_sk(sock
->sk
);
649 struct rxrpc_backlog
*b
= rx
->backlog
;
651 if (sock
->sk
->sk_state
== RXRPC_CLOSE
)
654 return rxrpc_service_prealloc_one(rx
, b
, notify_rx
,
655 user_attach_call
, user_call_ID
,
658 EXPORT_SYMBOL(rxrpc_kernel_charge_accept
);