]>
Commit | Line | Data |
---|---|---|
17926a79 DH |
1 | /* incoming call handling |
2 | * | |
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
9b6d5398 JP |
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | ||
17926a79 DH |
14 | #include <linux/module.h> |
15 | #include <linux/net.h> | |
16 | #include <linux/skbuff.h> | |
17 | #include <linux/errqueue.h> | |
18 | #include <linux/udp.h> | |
19 | #include <linux/in.h> | |
20 | #include <linux/in6.h> | |
21 | #include <linux/icmp.h> | |
5a0e3ad6 | 22 | #include <linux/gfp.h> |
00e90712 | 23 | #include <linux/circ_buf.h> |
17926a79 DH |
24 | #include <net/sock.h> |
25 | #include <net/af_rxrpc.h> | |
26 | #include <net/ip.h> | |
27 | #include "ar-internal.h" | |
28 | ||
00e90712 DH |
29 | /* |
30 | * Preallocate a single service call, connection and peer and, if possible, | |
31 | * give them a user ID and attach the user's side of the ID to them. | |
32 | */ | |
33 | static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, | |
34 | struct rxrpc_backlog *b, | |
35 | rxrpc_notify_rx_t notify_rx, | |
36 | rxrpc_user_attach_call_t user_attach_call, | |
37 | unsigned long user_call_ID, gfp_t gfp) | |
38 | { | |
39 | const void *here = __builtin_return_address(0); | |
40 | struct rxrpc_call *call; | |
2baec2c3 | 41 | struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); |
00e90712 DH |
42 | int max, tmp; |
43 | unsigned int size = RXRPC_BACKLOG_MAX; | |
44 | unsigned int head, tail, call_head, call_tail; | |
45 | ||
46 | max = rx->sk.sk_max_ack_backlog; | |
47 | tmp = rx->sk.sk_ack_backlog; | |
48 | if (tmp >= max) { | |
49 | _leave(" = -ENOBUFS [full %u]", max); | |
50 | return -ENOBUFS; | |
51 | } | |
52 | max -= tmp; | |
53 | ||
54 | /* We don't need more conns and peers than we have calls, but on the | |
55 | * other hand, we shouldn't ever use more peers than conns or conns | |
56 | * than calls. | |
57 | */ | |
58 | call_head = b->call_backlog_head; | |
59 | call_tail = READ_ONCE(b->call_backlog_tail); | |
60 | tmp = CIRC_CNT(call_head, call_tail, size); | |
61 | if (tmp >= max) { | |
62 | _leave(" = -ENOBUFS [enough %u]", tmp); | |
63 | return -ENOBUFS; | |
64 | } | |
65 | max = tmp + 1; | |
66 | ||
67 | head = b->peer_backlog_head; | |
68 | tail = READ_ONCE(b->peer_backlog_tail); | |
69 | if (CIRC_CNT(head, tail, size) < max) { | |
70 | struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp); | |
71 | if (!peer) | |
72 | return -ENOMEM; | |
73 | b->peer_backlog[head] = peer; | |
74 | smp_store_release(&b->peer_backlog_head, | |
75 | (head + 1) & (size - 1)); | |
76 | } | |
77 | ||
78 | head = b->conn_backlog_head; | |
79 | tail = READ_ONCE(b->conn_backlog_tail); | |
80 | if (CIRC_CNT(head, tail, size) < max) { | |
81 | struct rxrpc_connection *conn; | |
82 | ||
2baec2c3 | 83 | conn = rxrpc_prealloc_service_connection(rxnet, gfp); |
00e90712 DH |
84 | if (!conn) |
85 | return -ENOMEM; | |
86 | b->conn_backlog[head] = conn; | |
87 | smp_store_release(&b->conn_backlog_head, | |
88 | (head + 1) & (size - 1)); | |
363deeab DH |
89 | |
90 | trace_rxrpc_conn(conn, rxrpc_conn_new_service, | |
91 | atomic_read(&conn->usage), here); | |
00e90712 DH |
92 | } |
93 | ||
94 | /* Now it gets complicated, because calls get registered with the | |
95 | * socket here, particularly if a user ID is preassigned by the user. | |
96 | */ | |
97 | call = rxrpc_alloc_call(gfp); | |
98 | if (!call) | |
99 | return -ENOMEM; | |
100 | call->flags |= (1 << RXRPC_CALL_IS_SERVICE); | |
101 | call->state = RXRPC_CALL_SERVER_PREALLOC; | |
102 | ||
103 | trace_rxrpc_call(call, rxrpc_call_new_service, | |
104 | atomic_read(&call->usage), | |
105 | here, (const void *)user_call_ID); | |
106 | ||
107 | write_lock(&rx->call_lock); | |
108 | if (user_attach_call) { | |
109 | struct rxrpc_call *xcall; | |
110 | struct rb_node *parent, **pp; | |
111 | ||
112 | /* Check the user ID isn't already in use */ | |
113 | pp = &rx->calls.rb_node; | |
114 | parent = NULL; | |
115 | while (*pp) { | |
116 | parent = *pp; | |
117 | xcall = rb_entry(parent, struct rxrpc_call, sock_node); | |
118 | if (user_call_ID < call->user_call_ID) | |
119 | pp = &(*pp)->rb_left; | |
120 | else if (user_call_ID > call->user_call_ID) | |
121 | pp = &(*pp)->rb_right; | |
122 | else | |
123 | goto id_in_use; | |
124 | } | |
125 | ||
126 | call->user_call_ID = user_call_ID; | |
127 | call->notify_rx = notify_rx; | |
cbd00891 | 128 | rxrpc_get_call(call, rxrpc_call_got_kernel); |
00e90712 DH |
129 | user_attach_call(call, user_call_ID); |
130 | rxrpc_get_call(call, rxrpc_call_got_userid); | |
131 | rb_link_node(&call->sock_node, parent, pp); | |
132 | rb_insert_color(&call->sock_node, &rx->calls); | |
133 | set_bit(RXRPC_CALL_HAS_USERID, &call->flags); | |
134 | } | |
135 | ||
248f219c DH |
136 | list_add(&call->sock_link, &rx->sock_calls); |
137 | ||
00e90712 DH |
138 | write_unlock(&rx->call_lock); |
139 | ||
2baec2c3 DH |
140 | write_lock(&rxnet->call_lock); |
141 | list_add_tail(&call->link, &rxnet->calls); | |
142 | write_unlock(&rxnet->call_lock); | |
00e90712 DH |
143 | |
144 | b->call_backlog[call_head] = call; | |
145 | smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1)); | |
146 | _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID); | |
147 | return 0; | |
148 | ||
149 | id_in_use: | |
150 | write_unlock(&rx->call_lock); | |
151 | rxrpc_cleanup_call(call); | |
152 | _leave(" = -EBADSLT"); | |
153 | return -EBADSLT; | |
154 | } | |
155 | ||
156 | /* | |
157 | * Preallocate sufficient service connections, calls and peers to cover the | |
158 | * entire backlog of a socket. When a new call comes in, if we don't have | |
159 | * sufficient of each available, the call gets rejected as busy or ignored. | |
160 | * | |
161 | * The backlog is replenished when a connection is accepted or rejected. | |
162 | */ | |
163 | int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) | |
164 | { | |
165 | struct rxrpc_backlog *b = rx->backlog; | |
166 | ||
167 | if (!b) { | |
168 | b = kzalloc(sizeof(struct rxrpc_backlog), gfp); | |
169 | if (!b) | |
170 | return -ENOMEM; | |
171 | rx->backlog = b; | |
172 | } | |
173 | ||
174 | if (rx->discard_new_call) | |
175 | return 0; | |
176 | ||
177 | while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp) == 0) | |
178 | ; | |
179 | ||
180 | return 0; | |
181 | } | |
182 | ||
183 | /* | |
184 | * Discard the preallocation on a service. | |
185 | */ | |
186 | void rxrpc_discard_prealloc(struct rxrpc_sock *rx) | |
187 | { | |
188 | struct rxrpc_backlog *b = rx->backlog; | |
2baec2c3 | 189 | struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); |
00e90712 DH |
190 | unsigned int size = RXRPC_BACKLOG_MAX, head, tail; |
191 | ||
192 | if (!b) | |
193 | return; | |
194 | rx->backlog = NULL; | |
195 | ||
248f219c DH |
196 | /* Make sure that there aren't any incoming calls in progress before we |
197 | * clear the preallocation buffers. | |
198 | */ | |
199 | spin_lock_bh(&rx->incoming_lock); | |
200 | spin_unlock_bh(&rx->incoming_lock); | |
201 | ||
00e90712 DH |
202 | head = b->peer_backlog_head; |
203 | tail = b->peer_backlog_tail; | |
204 | while (CIRC_CNT(head, tail, size) > 0) { | |
205 | struct rxrpc_peer *peer = b->peer_backlog[tail]; | |
206 | kfree(peer); | |
207 | tail = (tail + 1) & (size - 1); | |
208 | } | |
209 | ||
210 | head = b->conn_backlog_head; | |
211 | tail = b->conn_backlog_tail; | |
212 | while (CIRC_CNT(head, tail, size) > 0) { | |
213 | struct rxrpc_connection *conn = b->conn_backlog[tail]; | |
2baec2c3 | 214 | write_lock(&rxnet->conn_lock); |
00e90712 DH |
215 | list_del(&conn->link); |
216 | list_del(&conn->proc_link); | |
2baec2c3 | 217 | write_unlock(&rxnet->conn_lock); |
00e90712 DH |
218 | kfree(conn); |
219 | tail = (tail + 1) & (size - 1); | |
220 | } | |
221 | ||
222 | head = b->call_backlog_head; | |
223 | tail = b->call_backlog_tail; | |
224 | while (CIRC_CNT(head, tail, size) > 0) { | |
225 | struct rxrpc_call *call = b->call_backlog[tail]; | |
9a19bad7 | 226 | call->socket = rx; |
00e90712 DH |
227 | if (rx->discard_new_call) { |
228 | _debug("discard %lx", call->user_call_ID); | |
229 | rx->discard_new_call(call, call->user_call_ID); | |
3432a757 | 230 | rxrpc_put_call(call, rxrpc_call_put_kernel); |
00e90712 DH |
231 | } |
232 | rxrpc_call_completed(call); | |
233 | rxrpc_release_call(rx, call); | |
234 | rxrpc_put_call(call, rxrpc_call_put); | |
235 | tail = (tail + 1) & (size - 1); | |
236 | } | |
237 | ||
238 | kfree(b); | |
239 | } | |
240 | ||
17926a79 | 241 | /* |
248f219c DH |
242 | * Allocate a new incoming call from the prealloc pool, along with a connection |
243 | * and a peer as necessary. | |
17926a79 | 244 | */ |
248f219c DH |
245 | static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, |
246 | struct rxrpc_local *local, | |
247 | struct rxrpc_connection *conn, | |
248 | struct sk_buff *skb) | |
17926a79 | 249 | { |
248f219c DH |
250 | struct rxrpc_backlog *b = rx->backlog; |
251 | struct rxrpc_peer *peer, *xpeer; | |
252 | struct rxrpc_call *call; | |
253 | unsigned short call_head, conn_head, peer_head; | |
254 | unsigned short call_tail, conn_tail, peer_tail; | |
255 | unsigned short call_count, conn_count; | |
256 | ||
257 | /* #calls >= #conns >= #peers must hold true. */ | |
258 | call_head = smp_load_acquire(&b->call_backlog_head); | |
259 | call_tail = b->call_backlog_tail; | |
260 | call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX); | |
261 | conn_head = smp_load_acquire(&b->conn_backlog_head); | |
262 | conn_tail = b->conn_backlog_tail; | |
263 | conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX); | |
264 | ASSERTCMP(conn_count, >=, call_count); | |
265 | peer_head = smp_load_acquire(&b->peer_backlog_head); | |
266 | peer_tail = b->peer_backlog_tail; | |
267 | ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=, | |
268 | conn_count); | |
269 | ||
270 | if (call_count == 0) | |
271 | return NULL; | |
272 | ||
273 | if (!conn) { | |
274 | /* No connection. We're going to need a peer to start off | |
275 | * with. If one doesn't yet exist, use a spare from the | |
276 | * preallocation set. We dump the address into the spare in | |
277 | * anticipation - and to save on stack space. | |
278 | */ | |
279 | xpeer = b->peer_backlog[peer_tail]; | |
280 | if (rxrpc_extract_addr_from_skb(&xpeer->srx, skb) < 0) | |
281 | return NULL; | |
282 | ||
283 | peer = rxrpc_lookup_incoming_peer(local, xpeer); | |
284 | if (peer == xpeer) { | |
285 | b->peer_backlog[peer_tail] = NULL; | |
286 | smp_store_release(&b->peer_backlog_tail, | |
287 | (peer_tail + 1) & | |
288 | (RXRPC_BACKLOG_MAX - 1)); | |
289 | } | |
17926a79 | 290 | |
248f219c DH |
291 | /* Now allocate and set up the connection */ |
292 | conn = b->conn_backlog[conn_tail]; | |
293 | b->conn_backlog[conn_tail] = NULL; | |
294 | smp_store_release(&b->conn_backlog_tail, | |
295 | (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); | |
296 | rxrpc_get_local(local); | |
297 | conn->params.local = local; | |
298 | conn->params.peer = peer; | |
363deeab | 299 | rxrpc_see_connection(conn); |
4722974d | 300 | rxrpc_new_incoming_connection(rx, conn, skb); |
248f219c DH |
301 | } else { |
302 | rxrpc_get_connection(conn); | |
17926a79 DH |
303 | } |
304 | ||
248f219c DH |
305 | /* And now we can allocate and set up a new call */ |
306 | call = b->call_backlog[call_tail]; | |
307 | b->call_backlog[call_tail] = NULL; | |
308 | smp_store_release(&b->call_backlog_tail, | |
309 | (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); | |
310 | ||
cbd00891 | 311 | rxrpc_see_call(call); |
248f219c DH |
312 | call->conn = conn; |
313 | call->peer = rxrpc_get_peer(conn->params.peer); | |
f7aec129 | 314 | call->cong_cwnd = call->peer->cong_cwnd; |
248f219c | 315 | return call; |
17926a79 DH |
316 | } |
317 | ||
318 | /* | |
248f219c DH |
319 | * Set up a new incoming call. Called in BH context with the RCU read lock |
320 | * held. | |
321 | * | |
322 | * If this is for a kernel service, when we allocate the call, it will have | |
323 | * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the | |
324 | * retainer ref obtained from the backlog buffer. Prealloc calls for userspace | |
325 | * services only have the ref from the backlog buffer. We want to pass this | |
326 | * ref to non-BH context to dispose of. | |
327 | * | |
328 | * If we want to report an error, we mark the skb with the packet type and | |
329 | * abort code and return NULL. | |
540b1c48 DH |
330 | * |
331 | * The call is returned with the user access mutex held. | |
17926a79 | 332 | */ |
248f219c DH |
333 | struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, |
334 | struct rxrpc_connection *conn, | |
335 | struct sk_buff *skb) | |
17926a79 | 336 | { |
248f219c DH |
337 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
338 | struct rxrpc_sock *rx; | |
17926a79 | 339 | struct rxrpc_call *call; |
1e9e5c95 | 340 | u16 service_id = sp->hdr.serviceId; |
17926a79 DH |
341 | |
342 | _enter(""); | |
343 | ||
248f219c | 344 | /* Get the socket providing the service */ |
1e9e5c95 | 345 | rx = rcu_dereference(local->service); |
28036f44 DH |
346 | if (rx && (service_id == rx->srx.srx_service || |
347 | service_id == rx->second_service)) | |
1e9e5c95 | 348 | goto found_service; |
17926a79 | 349 | |
248f219c DH |
350 | trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, |
351 | RX_INVALID_OPERATION, EOPNOTSUPP); | |
352 | skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; | |
353 | skb->priority = RX_INVALID_OPERATION; | |
354 | _leave(" = NULL [service]"); | |
355 | return NULL; | |
17926a79 | 356 | |
248f219c DH |
357 | found_service: |
358 | spin_lock(&rx->incoming_lock); | |
210f0353 DH |
359 | if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || |
360 | rx->sk.sk_state == RXRPC_CLOSE) { | |
248f219c DH |
361 | trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber, |
362 | sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); | |
363 | skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; | |
364 | skb->priority = RX_INVALID_OPERATION; | |
365 | _leave(" = NULL [close]"); | |
366 | call = NULL; | |
367 | goto out; | |
17926a79 | 368 | } |
17926a79 | 369 | |
248f219c DH |
370 | call = rxrpc_alloc_incoming_call(rx, local, conn, skb); |
371 | if (!call) { | |
372 | skb->mark = RXRPC_SKB_MARK_BUSY; | |
373 | _leave(" = NULL [busy]"); | |
374 | call = NULL; | |
375 | goto out; | |
376 | } | |
58dc63c9 DH |
377 | |
378 | trace_rxrpc_receive(call, rxrpc_receive_incoming, | |
379 | sp->hdr.serial, sp->hdr.seq); | |
17926a79 | 380 | |
540b1c48 DH |
381 | /* Lock the call to prevent rxrpc_kernel_send/recv_data() and |
382 | * sendmsg()/recvmsg() inconveniently stealing the mutex once the | |
383 | * notification is generated. | |
384 | * | |
385 | * The BUG should never happen because the kernel should be well | |
386 | * behaved enough not to access the call before the first notification | |
387 | * event and userspace is prevented from doing so until the state is | |
388 | * appropriate. | |
389 | */ | |
390 | if (!mutex_trylock(&call->user_mutex)) | |
391 | BUG(); | |
392 | ||
248f219c DH |
393 | /* Make the call live. */ |
394 | rxrpc_incoming_call(rx, call, skb); | |
395 | conn = call->conn; | |
17926a79 | 396 | |
248f219c DH |
397 | if (rx->notify_new_call) |
398 | rx->notify_new_call(&rx->sk, call, call->user_call_ID); | |
e6f3afb3 DH |
399 | else |
400 | sk_acceptq_added(&rx->sk); | |
17926a79 | 401 | |
248f219c DH |
402 | spin_lock(&conn->state_lock); |
403 | switch (conn->state) { | |
404 | case RXRPC_CONN_SERVICE_UNSECURED: | |
405 | conn->state = RXRPC_CONN_SERVICE_CHALLENGING; | |
406 | set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events); | |
407 | rxrpc_queue_conn(call->conn); | |
408 | break; | |
17926a79 | 409 | |
248f219c DH |
410 | case RXRPC_CONN_SERVICE: |
411 | write_lock(&call->state_lock); | |
412 | if (rx->discard_new_call) | |
413 | call->state = RXRPC_CALL_SERVER_RECV_REQUEST; | |
414 | else | |
415 | call->state = RXRPC_CALL_SERVER_ACCEPTING; | |
416 | write_unlock(&call->state_lock); | |
417 | break; | |
17926a79 | 418 | |
248f219c DH |
419 | case RXRPC_CONN_REMOTELY_ABORTED: |
420 | rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, | |
3a92789a | 421 | conn->remote_abort, -ECONNABORTED); |
248f219c DH |
422 | break; |
423 | case RXRPC_CONN_LOCALLY_ABORTED: | |
424 | rxrpc_abort_call("CON", call, sp->hdr.seq, | |
3a92789a | 425 | conn->local_abort, -ECONNABORTED); |
248f219c | 426 | break; |
17926a79 DH |
427 | default: |
428 | BUG(); | |
429 | } | |
248f219c | 430 | spin_unlock(&conn->state_lock); |
17926a79 | 431 | |
248f219c DH |
432 | if (call->state == RXRPC_CALL_SERVER_ACCEPTING) |
433 | rxrpc_notify_socket(call); | |
d991b4a3 | 434 | |
3432a757 DH |
435 | /* We have to discard the prealloc queue's ref here and rely on a |
436 | * combination of the RCU read lock and refs held either by the socket | |
437 | * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel | |
438 | * service to prevent the call from being deallocated too early. | |
439 | */ | |
440 | rxrpc_put_call(call, rxrpc_call_put); | |
441 | ||
248f219c DH |
442 | _leave(" = %p{%d}", call, call->debug_id); |
443 | out: | |
444 | spin_unlock(&rx->incoming_lock); | |
445 | return call; | |
17926a79 DH |
446 | } |
447 | ||
448 | /* | |
449 | * handle acceptance of a call by userspace | |
450 | * - assign the user call ID to the call at the front of the queue | |
540b1c48 | 451 | * - called with the socket locked. |
17926a79 | 452 | */ |
651350d1 | 453 | struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, |
d001648e DH |
454 | unsigned long user_call_ID, |
455 | rxrpc_notify_rx_t notify_rx) | |
540b1c48 | 456 | __releases(&rx->sk.sk_lock.slock) |
17926a79 DH |
457 | { |
458 | struct rxrpc_call *call; | |
459 | struct rb_node *parent, **pp; | |
460 | int ret; | |
461 | ||
462 | _enter(",%lx", user_call_ID); | |
463 | ||
464 | ASSERT(!irqs_disabled()); | |
465 | ||
466 | write_lock(&rx->call_lock); | |
467 | ||
b25de360 DH |
468 | if (list_empty(&rx->to_be_accepted)) { |
469 | write_unlock(&rx->call_lock); | |
540b1c48 | 470 | release_sock(&rx->sk); |
b25de360 DH |
471 | kleave(" = -ENODATA [empty]"); |
472 | return ERR_PTR(-ENODATA); | |
473 | } | |
17926a79 DH |
474 | |
475 | /* check the user ID isn't already in use */ | |
17926a79 DH |
476 | pp = &rx->calls.rb_node; |
477 | parent = NULL; | |
478 | while (*pp) { | |
479 | parent = *pp; | |
480 | call = rb_entry(parent, struct rxrpc_call, sock_node); | |
481 | ||
482 | if (user_call_ID < call->user_call_ID) | |
483 | pp = &(*pp)->rb_left; | |
484 | else if (user_call_ID > call->user_call_ID) | |
485 | pp = &(*pp)->rb_right; | |
486 | else | |
248f219c | 487 | goto id_in_use; |
17926a79 DH |
488 | } |
489 | ||
248f219c DH |
490 | /* Dequeue the first call and check it's still valid. We gain |
491 | * responsibility for the queue's reference. | |
492 | */ | |
493 | call = list_entry(rx->to_be_accepted.next, | |
494 | struct rxrpc_call, accept_link); | |
540b1c48 DH |
495 | write_unlock(&rx->call_lock); |
496 | ||
497 | /* We need to gain the mutex from the interrupt handler without | |
498 | * upsetting lockdep, so we have to release it there and take it here. | |
499 | * We are, however, still holding the socket lock, so other accepts | |
500 | * must wait for us and no one can add the user ID behind our backs. | |
501 | */ | |
502 | if (mutex_lock_interruptible(&call->user_mutex) < 0) { | |
503 | release_sock(&rx->sk); | |
504 | kleave(" = -ERESTARTSYS"); | |
505 | return ERR_PTR(-ERESTARTSYS); | |
506 | } | |
507 | ||
508 | write_lock(&rx->call_lock); | |
17926a79 DH |
509 | list_del_init(&call->accept_link); |
510 | sk_acceptq_removed(&rx->sk); | |
e34d4234 | 511 | rxrpc_see_call(call); |
17926a79 | 512 | |
540b1c48 DH |
513 | /* Find the user ID insertion point. */ |
514 | pp = &rx->calls.rb_node; | |
515 | parent = NULL; | |
516 | while (*pp) { | |
517 | parent = *pp; | |
518 | call = rb_entry(parent, struct rxrpc_call, sock_node); | |
519 | ||
520 | if (user_call_ID < call->user_call_ID) | |
521 | pp = &(*pp)->rb_left; | |
522 | else if (user_call_ID > call->user_call_ID) | |
523 | pp = &(*pp)->rb_right; | |
524 | else | |
525 | BUG(); | |
526 | } | |
527 | ||
17926a79 DH |
528 | write_lock_bh(&call->state_lock); |
529 | switch (call->state) { | |
530 | case RXRPC_CALL_SERVER_ACCEPTING: | |
531 | call->state = RXRPC_CALL_SERVER_RECV_REQUEST; | |
532 | break; | |
f5c17aae DH |
533 | case RXRPC_CALL_COMPLETE: |
534 | ret = call->error; | |
17926a79 | 535 | goto out_release; |
17926a79 DH |
536 | default: |
537 | BUG(); | |
538 | } | |
539 | ||
540 | /* formalise the acceptance */ | |
d001648e | 541 | call->notify_rx = notify_rx; |
17926a79 | 542 | call->user_call_ID = user_call_ID; |
248f219c | 543 | rxrpc_get_call(call, rxrpc_call_got_userid); |
17926a79 DH |
544 | rb_link_node(&call->sock_node, parent, pp); |
545 | rb_insert_color(&call->sock_node, &rx->calls); | |
546 | if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags)) | |
547 | BUG(); | |
17926a79 DH |
548 | |
549 | write_unlock_bh(&call->state_lock); | |
550 | write_unlock(&rx->call_lock); | |
248f219c DH |
551 | rxrpc_notify_socket(call); |
552 | rxrpc_service_prealloc(rx, GFP_KERNEL); | |
540b1c48 | 553 | release_sock(&rx->sk); |
651350d1 DH |
554 | _leave(" = %p{%d}", call, call->debug_id); |
555 | return call; | |
556 | ||
651350d1 | 557 | out_release: |
248f219c | 558 | _debug("release %p", call); |
651350d1 | 559 | write_unlock_bh(&call->state_lock); |
8d94aa38 | 560 | write_unlock(&rx->call_lock); |
8d94aa38 | 561 | rxrpc_release_call(rx, call); |
248f219c DH |
562 | rxrpc_put_call(call, rxrpc_call_put); |
563 | goto out; | |
564 | ||
565 | id_in_use: | |
566 | ret = -EBADSLT; | |
651350d1 | 567 | write_unlock(&rx->call_lock); |
248f219c DH |
568 | out: |
569 | rxrpc_service_prealloc(rx, GFP_KERNEL); | |
540b1c48 | 570 | release_sock(&rx->sk); |
651350d1 DH |
571 | _leave(" = %d", ret); |
572 | return ERR_PTR(ret); | |
573 | } | |
574 | ||
575 | /* | |
b4f1342f | 576 | * Handle rejection of a call by userspace |
651350d1 DH |
577 | * - reject the call at the front of the queue |
578 | */ | |
579 | int rxrpc_reject_call(struct rxrpc_sock *rx) | |
580 | { | |
581 | struct rxrpc_call *call; | |
248f219c | 582 | bool abort = false; |
651350d1 DH |
583 | int ret; |
584 | ||
585 | _enter(""); | |
586 | ||
587 | ASSERT(!irqs_disabled()); | |
588 | ||
589 | write_lock(&rx->call_lock); | |
590 | ||
248f219c | 591 | if (list_empty(&rx->to_be_accepted)) { |
8d94aa38 | 592 | write_unlock(&rx->call_lock); |
8d94aa38 DH |
593 | return -ENODATA; |
594 | } | |
651350d1 | 595 | |
248f219c DH |
596 | /* Dequeue the first call and check it's still valid. We gain |
597 | * responsibility for the queue's reference. | |
598 | */ | |
599 | call = list_entry(rx->to_be_accepted.next, | |
600 | struct rxrpc_call, accept_link); | |
651350d1 DH |
601 | list_del_init(&call->accept_link); |
602 | sk_acceptq_removed(&rx->sk); | |
e34d4234 | 603 | rxrpc_see_call(call); |
651350d1 DH |
604 | |
605 | write_lock_bh(&call->state_lock); | |
606 | switch (call->state) { | |
607 | case RXRPC_CALL_SERVER_ACCEPTING: | |
3a92789a | 608 | __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED); |
248f219c DH |
609 | abort = true; |
610 | /* fall through */ | |
f5c17aae DH |
611 | case RXRPC_CALL_COMPLETE: |
612 | ret = call->error; | |
248f219c | 613 | goto out_discard; |
651350d1 DH |
614 | default: |
615 | BUG(); | |
616 | } | |
17926a79 | 617 | |
248f219c | 618 | out_discard: |
17926a79 | 619 | write_unlock_bh(&call->state_lock); |
17926a79 | 620 | write_unlock(&rx->call_lock); |
248f219c | 621 | if (abort) { |
26cb02aa | 622 | rxrpc_send_abort_packet(call); |
248f219c DH |
623 | rxrpc_release_call(rx, call); |
624 | rxrpc_put_call(call, rxrpc_call_put); | |
625 | } | |
626 | rxrpc_service_prealloc(rx, GFP_KERNEL); | |
651350d1 DH |
627 | _leave(" = %d", ret); |
628 | return ret; | |
629 | } | |
00e90712 DH |
630 | |
631 | /* | |
632 | * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls | |
633 | * @sock: The socket on which to preallocate | |
634 | * @notify_rx: Event notification function for the call | |
635 | * @user_attach_call: Func to attach call to user_call_ID | |
636 | * @user_call_ID: The tag to attach to the preallocated call | |
637 | * @gfp: The allocation conditions. | |
638 | * | |
639 | * Charge up the socket with preallocated calls, each with a user ID. A | |
640 | * function should be provided to effect the attachment from the user's side. | |
641 | * The user is given a ref to hold on the call. | |
642 | * | |
643 | * Note that the call may be come connected before this function returns. | |
644 | */ | |
645 | int rxrpc_kernel_charge_accept(struct socket *sock, | |
646 | rxrpc_notify_rx_t notify_rx, | |
647 | rxrpc_user_attach_call_t user_attach_call, | |
648 | unsigned long user_call_ID, gfp_t gfp) | |
649 | { | |
650 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | |
651 | struct rxrpc_backlog *b = rx->backlog; | |
652 | ||
653 | if (sock->sk->sk_state == RXRPC_CLOSE) | |
654 | return -ESHUTDOWN; | |
655 | ||
656 | return rxrpc_service_prealloc_one(rx, b, notify_rx, | |
657 | user_attach_call, user_call_ID, | |
658 | gfp); | |
659 | } | |
660 | EXPORT_SYMBOL(rxrpc_kernel_charge_accept); |