1 /* incoming call handling
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/errqueue.h>
18 #include <linux/udp.h>
20 #include <linux/in6.h>
21 #include <linux/icmp.h>
22 #include <linux/gfp.h>
23 #include <linux/circ_buf.h>
25 #include <net/af_rxrpc.h>
27 #include "ar-internal.h"
30 * Preallocate a single service call, connection and peer and, if possible,
31 * give them a user ID and attach the user's side of the ID to them.
33 static int rxrpc_service_prealloc_one(struct rxrpc_sock
*rx
,
34 struct rxrpc_backlog
*b
,
35 rxrpc_notify_rx_t notify_rx
,
36 rxrpc_user_attach_call_t user_attach_call
,
37 unsigned long user_call_ID
, gfp_t gfp
)
39 const void *here
= __builtin_return_address(0);
40 struct rxrpc_call
*call
;
42 unsigned int size
= RXRPC_BACKLOG_MAX
;
43 unsigned int head
, tail
, call_head
, call_tail
;
45 max
= rx
->sk
.sk_max_ack_backlog
;
46 tmp
= rx
->sk
.sk_ack_backlog
;
48 _leave(" = -ENOBUFS [full %u]", max
);
53 /* We don't need more conns and peers than we have calls, but on the
54 * other hand, we shouldn't ever use more peers than conns or conns
57 call_head
= b
->call_backlog_head
;
58 call_tail
= READ_ONCE(b
->call_backlog_tail
);
59 tmp
= CIRC_CNT(call_head
, call_tail
, size
);
61 _leave(" = -ENOBUFS [enough %u]", tmp
);
66 head
= b
->peer_backlog_head
;
67 tail
= READ_ONCE(b
->peer_backlog_tail
);
68 if (CIRC_CNT(head
, tail
, size
) < max
) {
69 struct rxrpc_peer
*peer
= rxrpc_alloc_peer(rx
->local
, gfp
);
72 b
->peer_backlog
[head
] = peer
;
73 smp_store_release(&b
->peer_backlog_head
,
74 (head
+ 1) & (size
- 1));
77 head
= b
->conn_backlog_head
;
78 tail
= READ_ONCE(b
->conn_backlog_tail
);
79 if (CIRC_CNT(head
, tail
, size
) < max
) {
80 struct rxrpc_connection
*conn
;
82 conn
= rxrpc_prealloc_service_connection(gfp
);
85 b
->conn_backlog
[head
] = conn
;
86 smp_store_release(&b
->conn_backlog_head
,
87 (head
+ 1) & (size
- 1));
90 /* Now it gets complicated, because calls get registered with the
91 * socket here, particularly if a user ID is preassigned by the user.
93 call
= rxrpc_alloc_call(gfp
);
96 call
->flags
|= (1 << RXRPC_CALL_IS_SERVICE
);
97 call
->state
= RXRPC_CALL_SERVER_PREALLOC
;
99 trace_rxrpc_call(call
, rxrpc_call_new_service
,
100 atomic_read(&call
->usage
),
101 here
, (const void *)user_call_ID
);
103 write_lock(&rx
->call_lock
);
104 if (user_attach_call
) {
105 struct rxrpc_call
*xcall
;
106 struct rb_node
*parent
, **pp
;
108 /* Check the user ID isn't already in use */
109 pp
= &rx
->calls
.rb_node
;
113 xcall
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
114 if (user_call_ID
< call
->user_call_ID
)
115 pp
= &(*pp
)->rb_left
;
116 else if (user_call_ID
> call
->user_call_ID
)
117 pp
= &(*pp
)->rb_right
;
122 call
->user_call_ID
= user_call_ID
;
123 call
->notify_rx
= notify_rx
;
124 rxrpc_get_call(call
, rxrpc_call_got
);
125 user_attach_call(call
, user_call_ID
);
126 rxrpc_get_call(call
, rxrpc_call_got_userid
);
127 rb_link_node(&call
->sock_node
, parent
, pp
);
128 rb_insert_color(&call
->sock_node
, &rx
->calls
);
129 set_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
);
132 list_add(&call
->sock_link
, &rx
->sock_calls
);
134 write_unlock(&rx
->call_lock
);
136 write_lock(&rxrpc_call_lock
);
137 list_add_tail(&call
->link
, &rxrpc_calls
);
138 write_unlock(&rxrpc_call_lock
);
140 b
->call_backlog
[call_head
] = call
;
141 smp_store_release(&b
->call_backlog_head
, (call_head
+ 1) & (size
- 1));
142 _leave(" = 0 [%d -> %lx]", call
->debug_id
, user_call_ID
);
146 write_unlock(&rx
->call_lock
);
147 rxrpc_cleanup_call(call
);
148 _leave(" = -EBADSLT");
153 * Preallocate sufficient service connections, calls and peers to cover the
154 * entire backlog of a socket. When a new call comes in, if we don't have
155 * sufficient of each available, the call gets rejected as busy or ignored.
157 * The backlog is replenished when a connection is accepted or rejected.
159 int rxrpc_service_prealloc(struct rxrpc_sock
*rx
, gfp_t gfp
)
161 struct rxrpc_backlog
*b
= rx
->backlog
;
164 b
= kzalloc(sizeof(struct rxrpc_backlog
), gfp
);
170 if (rx
->discard_new_call
)
173 while (rxrpc_service_prealloc_one(rx
, b
, NULL
, NULL
, 0, gfp
) == 0)
180 * Discard the preallocation on a service.
182 void rxrpc_discard_prealloc(struct rxrpc_sock
*rx
)
184 struct rxrpc_backlog
*b
= rx
->backlog
;
185 unsigned int size
= RXRPC_BACKLOG_MAX
, head
, tail
;
191 /* Make sure that there aren't any incoming calls in progress before we
192 * clear the preallocation buffers.
194 spin_lock_bh(&rx
->incoming_lock
);
195 spin_unlock_bh(&rx
->incoming_lock
);
197 head
= b
->peer_backlog_head
;
198 tail
= b
->peer_backlog_tail
;
199 while (CIRC_CNT(head
, tail
, size
) > 0) {
200 struct rxrpc_peer
*peer
= b
->peer_backlog
[tail
];
202 tail
= (tail
+ 1) & (size
- 1);
205 head
= b
->conn_backlog_head
;
206 tail
= b
->conn_backlog_tail
;
207 while (CIRC_CNT(head
, tail
, size
) > 0) {
208 struct rxrpc_connection
*conn
= b
->conn_backlog
[tail
];
209 write_lock(&rxrpc_connection_lock
);
210 list_del(&conn
->link
);
211 list_del(&conn
->proc_link
);
212 write_unlock(&rxrpc_connection_lock
);
214 tail
= (tail
+ 1) & (size
- 1);
217 head
= b
->call_backlog_head
;
218 tail
= b
->call_backlog_tail
;
219 while (CIRC_CNT(head
, tail
, size
) > 0) {
220 struct rxrpc_call
*call
= b
->call_backlog
[tail
];
221 if (rx
->discard_new_call
) {
222 _debug("discard %lx", call
->user_call_ID
);
223 rx
->discard_new_call(call
, call
->user_call_ID
);
225 rxrpc_call_completed(call
);
226 rxrpc_release_call(rx
, call
);
227 rxrpc_put_call(call
, rxrpc_call_put
);
228 tail
= (tail
+ 1) & (size
- 1);
235 * Allocate a new incoming call from the prealloc pool, along with a connection
236 * and a peer as necessary.
238 static struct rxrpc_call
*rxrpc_alloc_incoming_call(struct rxrpc_sock
*rx
,
239 struct rxrpc_local
*local
,
240 struct rxrpc_connection
*conn
,
243 struct rxrpc_backlog
*b
= rx
->backlog
;
244 struct rxrpc_peer
*peer
, *xpeer
;
245 struct rxrpc_call
*call
;
246 unsigned short call_head
, conn_head
, peer_head
;
247 unsigned short call_tail
, conn_tail
, peer_tail
;
248 unsigned short call_count
, conn_count
;
250 /* #calls >= #conns >= #peers must hold true. */
251 call_head
= smp_load_acquire(&b
->call_backlog_head
);
252 call_tail
= b
->call_backlog_tail
;
253 call_count
= CIRC_CNT(call_head
, call_tail
, RXRPC_BACKLOG_MAX
);
254 conn_head
= smp_load_acquire(&b
->conn_backlog_head
);
255 conn_tail
= b
->conn_backlog_tail
;
256 conn_count
= CIRC_CNT(conn_head
, conn_tail
, RXRPC_BACKLOG_MAX
);
257 ASSERTCMP(conn_count
, >=, call_count
);
258 peer_head
= smp_load_acquire(&b
->peer_backlog_head
);
259 peer_tail
= b
->peer_backlog_tail
;
260 ASSERTCMP(CIRC_CNT(peer_head
, peer_tail
, RXRPC_BACKLOG_MAX
), >=,
267 /* No connection. We're going to need a peer to start off
268 * with. If one doesn't yet exist, use a spare from the
269 * preallocation set. We dump the address into the spare in
270 * anticipation - and to save on stack space.
272 xpeer
= b
->peer_backlog
[peer_tail
];
273 if (rxrpc_extract_addr_from_skb(&xpeer
->srx
, skb
) < 0)
276 peer
= rxrpc_lookup_incoming_peer(local
, xpeer
);
278 b
->peer_backlog
[peer_tail
] = NULL
;
279 smp_store_release(&b
->peer_backlog_tail
,
281 (RXRPC_BACKLOG_MAX
- 1));
284 /* Now allocate and set up the connection */
285 conn
= b
->conn_backlog
[conn_tail
];
286 b
->conn_backlog
[conn_tail
] = NULL
;
287 smp_store_release(&b
->conn_backlog_tail
,
288 (conn_tail
+ 1) & (RXRPC_BACKLOG_MAX
- 1));
289 rxrpc_get_local(local
);
290 conn
->params
.local
= local
;
291 conn
->params
.peer
= peer
;
292 rxrpc_new_incoming_connection(conn
, skb
);
294 rxrpc_get_connection(conn
);
297 /* And now we can allocate and set up a new call */
298 call
= b
->call_backlog
[call_tail
];
299 b
->call_backlog
[call_tail
] = NULL
;
300 smp_store_release(&b
->call_backlog_tail
,
301 (call_tail
+ 1) & (RXRPC_BACKLOG_MAX
- 1));
304 call
->peer
= rxrpc_get_peer(conn
->params
.peer
);
309 * Set up a new incoming call. Called in BH context with the RCU read lock
312 * If this is for a kernel service, when we allocate the call, it will have
313 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
314 * retainer ref obtained from the backlog buffer. Prealloc calls for userspace
315 * services only have the ref from the backlog buffer. We want to pass this
316 * ref to non-BH context to dispose of.
318 * If we want to report an error, we mark the skb with the packet type and
319 * abort code and return NULL.
321 struct rxrpc_call
*rxrpc_new_incoming_call(struct rxrpc_local
*local
,
322 struct rxrpc_connection
*conn
,
325 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
326 struct rxrpc_sock
*rx
;
327 struct rxrpc_call
*call
;
331 /* Get the socket providing the service */
332 hlist_for_each_entry_rcu_bh(rx
, &local
->services
, listen_link
) {
333 if (rx
->srx
.srx_service
== sp
->hdr
.serviceId
)
337 trace_rxrpc_abort("INV", sp
->hdr
.cid
, sp
->hdr
.callNumber
, sp
->hdr
.seq
,
338 RX_INVALID_OPERATION
, EOPNOTSUPP
);
339 skb
->mark
= RXRPC_SKB_MARK_LOCAL_ABORT
;
340 skb
->priority
= RX_INVALID_OPERATION
;
341 _leave(" = NULL [service]");
345 spin_lock(&rx
->incoming_lock
);
346 if (rx
->sk
.sk_state
== RXRPC_CLOSE
) {
347 trace_rxrpc_abort("CLS", sp
->hdr
.cid
, sp
->hdr
.callNumber
,
348 sp
->hdr
.seq
, RX_INVALID_OPERATION
, ESHUTDOWN
);
349 skb
->mark
= RXRPC_SKB_MARK_LOCAL_ABORT
;
350 skb
->priority
= RX_INVALID_OPERATION
;
351 _leave(" = NULL [close]");
356 call
= rxrpc_alloc_incoming_call(rx
, local
, conn
, skb
);
358 skb
->mark
= RXRPC_SKB_MARK_BUSY
;
359 _leave(" = NULL [busy]");
364 /* Make the call live. */
365 rxrpc_incoming_call(rx
, call
, skb
);
368 if (rx
->notify_new_call
)
369 rx
->notify_new_call(&rx
->sk
, call
, call
->user_call_ID
);
371 spin_lock(&conn
->state_lock
);
372 switch (conn
->state
) {
373 case RXRPC_CONN_SERVICE_UNSECURED
:
374 conn
->state
= RXRPC_CONN_SERVICE_CHALLENGING
;
375 set_bit(RXRPC_CONN_EV_CHALLENGE
, &call
->conn
->events
);
376 rxrpc_queue_conn(call
->conn
);
379 case RXRPC_CONN_SERVICE
:
380 write_lock(&call
->state_lock
);
381 if (rx
->discard_new_call
)
382 call
->state
= RXRPC_CALL_SERVER_RECV_REQUEST
;
384 call
->state
= RXRPC_CALL_SERVER_ACCEPTING
;
385 write_unlock(&call
->state_lock
);
388 case RXRPC_CONN_REMOTELY_ABORTED
:
389 rxrpc_set_call_completion(call
, RXRPC_CALL_REMOTELY_ABORTED
,
390 conn
->remote_abort
, ECONNABORTED
);
392 case RXRPC_CONN_LOCALLY_ABORTED
:
393 rxrpc_abort_call("CON", call
, sp
->hdr
.seq
,
394 conn
->local_abort
, ECONNABORTED
);
399 spin_unlock(&conn
->state_lock
);
401 if (call
->state
== RXRPC_CALL_SERVER_ACCEPTING
)
402 rxrpc_notify_socket(call
);
404 _leave(" = %p{%d}", call
, call
->debug_id
);
406 spin_unlock(&rx
->incoming_lock
);
411 * handle acceptance of a call by userspace
412 * - assign the user call ID to the call at the front of the queue
414 struct rxrpc_call
*rxrpc_accept_call(struct rxrpc_sock
*rx
,
415 unsigned long user_call_ID
,
416 rxrpc_notify_rx_t notify_rx
)
418 struct rxrpc_call
*call
;
419 struct rb_node
*parent
, **pp
;
422 _enter(",%lx", user_call_ID
);
424 ASSERT(!irqs_disabled());
426 write_lock(&rx
->call_lock
);
429 if (list_empty(&rx
->to_be_accepted
))
432 /* check the user ID isn't already in use */
433 pp
= &rx
->calls
.rb_node
;
437 call
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
439 if (user_call_ID
< call
->user_call_ID
)
440 pp
= &(*pp
)->rb_left
;
441 else if (user_call_ID
> call
->user_call_ID
)
442 pp
= &(*pp
)->rb_right
;
447 /* Dequeue the first call and check it's still valid. We gain
448 * responsibility for the queue's reference.
450 call
= list_entry(rx
->to_be_accepted
.next
,
451 struct rxrpc_call
, accept_link
);
452 list_del_init(&call
->accept_link
);
453 sk_acceptq_removed(&rx
->sk
);
454 rxrpc_see_call(call
);
456 write_lock_bh(&call
->state_lock
);
457 switch (call
->state
) {
458 case RXRPC_CALL_SERVER_ACCEPTING
:
459 call
->state
= RXRPC_CALL_SERVER_RECV_REQUEST
;
461 case RXRPC_CALL_COMPLETE
:
468 /* formalise the acceptance */
469 rxrpc_get_call(call
, rxrpc_call_got
);
470 call
->notify_rx
= notify_rx
;
471 call
->user_call_ID
= user_call_ID
;
472 rxrpc_get_call(call
, rxrpc_call_got_userid
);
473 rb_link_node(&call
->sock_node
, parent
, pp
);
474 rb_insert_color(&call
->sock_node
, &rx
->calls
);
475 if (test_and_set_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
))
478 write_unlock_bh(&call
->state_lock
);
479 write_unlock(&rx
->call_lock
);
480 rxrpc_notify_socket(call
);
481 rxrpc_service_prealloc(rx
, GFP_KERNEL
);
482 _leave(" = %p{%d}", call
, call
->debug_id
);
486 _debug("release %p", call
);
487 write_unlock_bh(&call
->state_lock
);
488 write_unlock(&rx
->call_lock
);
489 rxrpc_release_call(rx
, call
);
490 rxrpc_put_call(call
, rxrpc_call_put
);
495 write_unlock(&rx
->call_lock
);
497 rxrpc_service_prealloc(rx
, GFP_KERNEL
);
498 _leave(" = %d", ret
);
503 * Handle rejection of a call by userspace
504 * - reject the call at the front of the queue
506 int rxrpc_reject_call(struct rxrpc_sock
*rx
)
508 struct rxrpc_call
*call
;
514 ASSERT(!irqs_disabled());
516 write_lock(&rx
->call_lock
);
518 if (list_empty(&rx
->to_be_accepted
)) {
519 write_unlock(&rx
->call_lock
);
523 /* Dequeue the first call and check it's still valid. We gain
524 * responsibility for the queue's reference.
526 call
= list_entry(rx
->to_be_accepted
.next
,
527 struct rxrpc_call
, accept_link
);
528 list_del_init(&call
->accept_link
);
529 sk_acceptq_removed(&rx
->sk
);
530 rxrpc_see_call(call
);
532 write_lock_bh(&call
->state_lock
);
533 switch (call
->state
) {
534 case RXRPC_CALL_SERVER_ACCEPTING
:
535 __rxrpc_abort_call("REJ", call
, 1, RX_USER_ABORT
, ECONNABORTED
);
538 case RXRPC_CALL_COMPLETE
:
546 write_unlock_bh(&call
->state_lock
);
547 write_unlock(&rx
->call_lock
);
549 rxrpc_send_call_packet(call
, RXRPC_PACKET_TYPE_ABORT
);
550 rxrpc_release_call(rx
, call
);
551 rxrpc_put_call(call
, rxrpc_call_put
);
553 rxrpc_service_prealloc(rx
, GFP_KERNEL
);
554 _leave(" = %d", ret
);
559 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
560 * @sock: The socket on which to preallocate
561 * @notify_rx: Event notification function for the call
562 * @user_attach_call: Func to attach call to user_call_ID
563 * @user_call_ID: The tag to attach to the preallocated call
564 * @gfp: The allocation conditions.
566 * Charge up the socket with preallocated calls, each with a user ID. A
567 * function should be provided to effect the attachment from the user's side.
568 * The user is given a ref to hold on the call.
570 * Note that the call may be come connected before this function returns.
572 int rxrpc_kernel_charge_accept(struct socket
*sock
,
573 rxrpc_notify_rx_t notify_rx
,
574 rxrpc_user_attach_call_t user_attach_call
,
575 unsigned long user_call_ID
, gfp_t gfp
)
577 struct rxrpc_sock
*rx
= rxrpc_sk(sock
->sk
);
578 struct rxrpc_backlog
*b
= rx
->backlog
;
580 if (sock
->sk
->sk_state
== RXRPC_CLOSE
)
583 return rxrpc_service_prealloc_one(rx
, b
, notify_rx
,
584 user_attach_call
, user_call_ID
,
587 EXPORT_SYMBOL(rxrpc_kernel_charge_accept
);