]>
Commit | Line | Data |
---|---|---|
17926a79 DH |
1 | /* incoming call handling |
2 | * | |
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
9b6d5398 JP |
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | ||
17926a79 DH |
14 | #include <linux/module.h> |
15 | #include <linux/net.h> | |
16 | #include <linux/skbuff.h> | |
17 | #include <linux/errqueue.h> | |
18 | #include <linux/udp.h> | |
19 | #include <linux/in.h> | |
20 | #include <linux/in6.h> | |
21 | #include <linux/icmp.h> | |
5a0e3ad6 | 22 | #include <linux/gfp.h> |
00e90712 | 23 | #include <linux/circ_buf.h> |
17926a79 DH |
24 | #include <net/sock.h> |
25 | #include <net/af_rxrpc.h> | |
26 | #include <net/ip.h> | |
27 | #include "ar-internal.h" | |
28 | ||
00e90712 DH |
29 | /* |
30 | * Preallocate a single service call, connection and peer and, if possible, | |
31 | * give them a user ID and attach the user's side of the ID to them. | |
32 | */ | |
33 | static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, | |
34 | struct rxrpc_backlog *b, | |
35 | rxrpc_notify_rx_t notify_rx, | |
36 | rxrpc_user_attach_call_t user_attach_call, | |
37 | unsigned long user_call_ID, gfp_t gfp) | |
38 | { | |
39 | const void *here = __builtin_return_address(0); | |
40 | struct rxrpc_call *call; | |
2baec2c3 | 41 | struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); |
00e90712 DH |
42 | int max, tmp; |
43 | unsigned int size = RXRPC_BACKLOG_MAX; | |
44 | unsigned int head, tail, call_head, call_tail; | |
45 | ||
46 | max = rx->sk.sk_max_ack_backlog; | |
47 | tmp = rx->sk.sk_ack_backlog; | |
48 | if (tmp >= max) { | |
49 | _leave(" = -ENOBUFS [full %u]", max); | |
50 | return -ENOBUFS; | |
51 | } | |
52 | max -= tmp; | |
53 | ||
54 | /* We don't need more conns and peers than we have calls, but on the | |
55 | * other hand, we shouldn't ever use more peers than conns or conns | |
56 | * than calls. | |
57 | */ | |
58 | call_head = b->call_backlog_head; | |
59 | call_tail = READ_ONCE(b->call_backlog_tail); | |
60 | tmp = CIRC_CNT(call_head, call_tail, size); | |
61 | if (tmp >= max) { | |
62 | _leave(" = -ENOBUFS [enough %u]", tmp); | |
63 | return -ENOBUFS; | |
64 | } | |
65 | max = tmp + 1; | |
66 | ||
67 | head = b->peer_backlog_head; | |
68 | tail = READ_ONCE(b->peer_backlog_tail); | |
69 | if (CIRC_CNT(head, tail, size) < max) { | |
70 | struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp); | |
71 | if (!peer) | |
72 | return -ENOMEM; | |
73 | b->peer_backlog[head] = peer; | |
74 | smp_store_release(&b->peer_backlog_head, | |
75 | (head + 1) & (size - 1)); | |
76 | } | |
77 | ||
78 | head = b->conn_backlog_head; | |
79 | tail = READ_ONCE(b->conn_backlog_tail); | |
80 | if (CIRC_CNT(head, tail, size) < max) { | |
81 | struct rxrpc_connection *conn; | |
82 | ||
2baec2c3 | 83 | conn = rxrpc_prealloc_service_connection(rxnet, gfp); |
00e90712 DH |
84 | if (!conn) |
85 | return -ENOMEM; | |
86 | b->conn_backlog[head] = conn; | |
87 | smp_store_release(&b->conn_backlog_head, | |
88 | (head + 1) & (size - 1)); | |
363deeab DH |
89 | |
90 | trace_rxrpc_conn(conn, rxrpc_conn_new_service, | |
91 | atomic_read(&conn->usage), here); | |
00e90712 DH |
92 | } |
93 | ||
94 | /* Now it gets complicated, because calls get registered with the | |
95 | * socket here, particularly if a user ID is preassigned by the user. | |
96 | */ | |
97 | call = rxrpc_alloc_call(gfp); | |
98 | if (!call) | |
99 | return -ENOMEM; | |
100 | call->flags |= (1 << RXRPC_CALL_IS_SERVICE); | |
101 | call->state = RXRPC_CALL_SERVER_PREALLOC; | |
102 | ||
103 | trace_rxrpc_call(call, rxrpc_call_new_service, | |
104 | atomic_read(&call->usage), | |
105 | here, (const void *)user_call_ID); | |
106 | ||
107 | write_lock(&rx->call_lock); | |
108 | if (user_attach_call) { | |
109 | struct rxrpc_call *xcall; | |
110 | struct rb_node *parent, **pp; | |
111 | ||
112 | /* Check the user ID isn't already in use */ | |
113 | pp = &rx->calls.rb_node; | |
114 | parent = NULL; | |
115 | while (*pp) { | |
116 | parent = *pp; | |
117 | xcall = rb_entry(parent, struct rxrpc_call, sock_node); | |
118 | if (user_call_ID < call->user_call_ID) | |
119 | pp = &(*pp)->rb_left; | |
120 | else if (user_call_ID > call->user_call_ID) | |
121 | pp = &(*pp)->rb_right; | |
122 | else | |
123 | goto id_in_use; | |
124 | } | |
125 | ||
126 | call->user_call_ID = user_call_ID; | |
127 | call->notify_rx = notify_rx; | |
cbd00891 | 128 | rxrpc_get_call(call, rxrpc_call_got_kernel); |
00e90712 DH |
129 | user_attach_call(call, user_call_ID); |
130 | rxrpc_get_call(call, rxrpc_call_got_userid); | |
131 | rb_link_node(&call->sock_node, parent, pp); | |
132 | rb_insert_color(&call->sock_node, &rx->calls); | |
133 | set_bit(RXRPC_CALL_HAS_USERID, &call->flags); | |
134 | } | |
135 | ||
248f219c DH |
136 | list_add(&call->sock_link, &rx->sock_calls); |
137 | ||
00e90712 DH |
138 | write_unlock(&rx->call_lock); |
139 | ||
2baec2c3 DH |
140 | write_lock(&rxnet->call_lock); |
141 | list_add_tail(&call->link, &rxnet->calls); | |
142 | write_unlock(&rxnet->call_lock); | |
00e90712 DH |
143 | |
144 | b->call_backlog[call_head] = call; | |
145 | smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1)); | |
146 | _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID); | |
147 | return 0; | |
148 | ||
149 | id_in_use: | |
150 | write_unlock(&rx->call_lock); | |
151 | rxrpc_cleanup_call(call); | |
152 | _leave(" = -EBADSLT"); | |
153 | return -EBADSLT; | |
154 | } | |
155 | ||
156 | /* | |
157 | * Preallocate sufficient service connections, calls and peers to cover the | |
158 | * entire backlog of a socket. When a new call comes in, if we don't have | |
159 | * sufficient of each available, the call gets rejected as busy or ignored. | |
160 | * | |
161 | * The backlog is replenished when a connection is accepted or rejected. | |
162 | */ | |
163 | int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) | |
164 | { | |
165 | struct rxrpc_backlog *b = rx->backlog; | |
166 | ||
167 | if (!b) { | |
168 | b = kzalloc(sizeof(struct rxrpc_backlog), gfp); | |
169 | if (!b) | |
170 | return -ENOMEM; | |
171 | rx->backlog = b; | |
172 | } | |
173 | ||
174 | if (rx->discard_new_call) | |
175 | return 0; | |
176 | ||
177 | while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp) == 0) | |
178 | ; | |
179 | ||
180 | return 0; | |
181 | } | |
182 | ||
183 | /* | |
184 | * Discard the preallocation on a service. | |
185 | */ | |
186 | void rxrpc_discard_prealloc(struct rxrpc_sock *rx) | |
187 | { | |
188 | struct rxrpc_backlog *b = rx->backlog; | |
2baec2c3 | 189 | struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); |
00e90712 DH |
190 | unsigned int size = RXRPC_BACKLOG_MAX, head, tail; |
191 | ||
192 | if (!b) | |
193 | return; | |
194 | rx->backlog = NULL; | |
195 | ||
248f219c DH |
196 | /* Make sure that there aren't any incoming calls in progress before we |
197 | * clear the preallocation buffers. | |
198 | */ | |
199 | spin_lock_bh(&rx->incoming_lock); | |
200 | spin_unlock_bh(&rx->incoming_lock); | |
201 | ||
00e90712 DH |
202 | head = b->peer_backlog_head; |
203 | tail = b->peer_backlog_tail; | |
204 | while (CIRC_CNT(head, tail, size) > 0) { | |
205 | struct rxrpc_peer *peer = b->peer_backlog[tail]; | |
206 | kfree(peer); | |
207 | tail = (tail + 1) & (size - 1); | |
208 | } | |
209 | ||
210 | head = b->conn_backlog_head; | |
211 | tail = b->conn_backlog_tail; | |
212 | while (CIRC_CNT(head, tail, size) > 0) { | |
213 | struct rxrpc_connection *conn = b->conn_backlog[tail]; | |
2baec2c3 | 214 | write_lock(&rxnet->conn_lock); |
00e90712 DH |
215 | list_del(&conn->link); |
216 | list_del(&conn->proc_link); | |
2baec2c3 | 217 | write_unlock(&rxnet->conn_lock); |
00e90712 DH |
218 | kfree(conn); |
219 | tail = (tail + 1) & (size - 1); | |
220 | } | |
221 | ||
222 | head = b->call_backlog_head; | |
223 | tail = b->call_backlog_tail; | |
224 | while (CIRC_CNT(head, tail, size) > 0) { | |
225 | struct rxrpc_call *call = b->call_backlog[tail]; | |
226 | if (rx->discard_new_call) { | |
227 | _debug("discard %lx", call->user_call_ID); | |
228 | rx->discard_new_call(call, call->user_call_ID); | |
3432a757 | 229 | rxrpc_put_call(call, rxrpc_call_put_kernel); |
00e90712 DH |
230 | } |
231 | rxrpc_call_completed(call); | |
232 | rxrpc_release_call(rx, call); | |
233 | rxrpc_put_call(call, rxrpc_call_put); | |
234 | tail = (tail + 1) & (size - 1); | |
235 | } | |
236 | ||
237 | kfree(b); | |
238 | } | |
239 | ||
17926a79 | 240 | /* |
248f219c DH |
241 | * Allocate a new incoming call from the prealloc pool, along with a connection |
242 | * and a peer as necessary. | |
17926a79 | 243 | */ |
248f219c DH |
244 | static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, |
245 | struct rxrpc_local *local, | |
246 | struct rxrpc_connection *conn, | |
247 | struct sk_buff *skb) | |
17926a79 | 248 | { |
248f219c DH |
249 | struct rxrpc_backlog *b = rx->backlog; |
250 | struct rxrpc_peer *peer, *xpeer; | |
251 | struct rxrpc_call *call; | |
252 | unsigned short call_head, conn_head, peer_head; | |
253 | unsigned short call_tail, conn_tail, peer_tail; | |
254 | unsigned short call_count, conn_count; | |
255 | ||
256 | /* #calls >= #conns >= #peers must hold true. */ | |
257 | call_head = smp_load_acquire(&b->call_backlog_head); | |
258 | call_tail = b->call_backlog_tail; | |
259 | call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX); | |
260 | conn_head = smp_load_acquire(&b->conn_backlog_head); | |
261 | conn_tail = b->conn_backlog_tail; | |
262 | conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX); | |
263 | ASSERTCMP(conn_count, >=, call_count); | |
264 | peer_head = smp_load_acquire(&b->peer_backlog_head); | |
265 | peer_tail = b->peer_backlog_tail; | |
266 | ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=, | |
267 | conn_count); | |
268 | ||
269 | if (call_count == 0) | |
270 | return NULL; | |
271 | ||
272 | if (!conn) { | |
273 | /* No connection. We're going to need a peer to start off | |
274 | * with. If one doesn't yet exist, use a spare from the | |
275 | * preallocation set. We dump the address into the spare in | |
276 | * anticipation - and to save on stack space. | |
277 | */ | |
278 | xpeer = b->peer_backlog[peer_tail]; | |
279 | if (rxrpc_extract_addr_from_skb(&xpeer->srx, skb) < 0) | |
280 | return NULL; | |
281 | ||
282 | peer = rxrpc_lookup_incoming_peer(local, xpeer); | |
283 | if (peer == xpeer) { | |
284 | b->peer_backlog[peer_tail] = NULL; | |
285 | smp_store_release(&b->peer_backlog_tail, | |
286 | (peer_tail + 1) & | |
287 | (RXRPC_BACKLOG_MAX - 1)); | |
288 | } | |
17926a79 | 289 | |
248f219c DH |
290 | /* Now allocate and set up the connection */ |
291 | conn = b->conn_backlog[conn_tail]; | |
292 | b->conn_backlog[conn_tail] = NULL; | |
293 | smp_store_release(&b->conn_backlog_tail, | |
294 | (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); | |
295 | rxrpc_get_local(local); | |
296 | conn->params.local = local; | |
297 | conn->params.peer = peer; | |
363deeab | 298 | rxrpc_see_connection(conn); |
248f219c DH |
299 | rxrpc_new_incoming_connection(conn, skb); |
300 | } else { | |
301 | rxrpc_get_connection(conn); | |
17926a79 DH |
302 | } |
303 | ||
248f219c DH |
304 | /* And now we can allocate and set up a new call */ |
305 | call = b->call_backlog[call_tail]; | |
306 | b->call_backlog[call_tail] = NULL; | |
307 | smp_store_release(&b->call_backlog_tail, | |
308 | (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); | |
309 | ||
cbd00891 | 310 | rxrpc_see_call(call); |
248f219c DH |
311 | call->conn = conn; |
312 | call->peer = rxrpc_get_peer(conn->params.peer); | |
313 | return call; | |
17926a79 DH |
314 | } |
315 | ||
316 | /* | |
248f219c DH |
317 | * Set up a new incoming call. Called in BH context with the RCU read lock |
318 | * held. | |
319 | * | |
320 | * If this is for a kernel service, when we allocate the call, it will have | |
321 | * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the | |
322 | * retainer ref obtained from the backlog buffer. Prealloc calls for userspace | |
323 | * services only have the ref from the backlog buffer. We want to pass this | |
324 | * ref to non-BH context to dispose of. | |
325 | * | |
326 | * If we want to report an error, we mark the skb with the packet type and | |
327 | * abort code and return NULL. | |
540b1c48 DH |
328 | * |
329 | * The call is returned with the user access mutex held. | |
17926a79 | 330 | */ |
248f219c DH |
331 | struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, |
332 | struct rxrpc_connection *conn, | |
333 | struct sk_buff *skb) | |
17926a79 | 334 | { |
248f219c DH |
335 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
336 | struct rxrpc_sock *rx; | |
17926a79 | 337 | struct rxrpc_call *call; |
1e9e5c95 | 338 | u16 service_id = sp->hdr.serviceId; |
17926a79 DH |
339 | |
340 | _enter(""); | |
341 | ||
248f219c | 342 | /* Get the socket providing the service */ |
1e9e5c95 | 343 | rx = rcu_dereference(local->service); |
7212a57e | 344 | if (rx && service_id == rx->srx.srx_service) |
1e9e5c95 | 345 | goto found_service; |
17926a79 | 346 | |
248f219c DH |
347 | trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, |
348 | RX_INVALID_OPERATION, EOPNOTSUPP); | |
349 | skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; | |
350 | skb->priority = RX_INVALID_OPERATION; | |
351 | _leave(" = NULL [service]"); | |
352 | return NULL; | |
17926a79 | 353 | |
248f219c DH |
354 | found_service: |
355 | spin_lock(&rx->incoming_lock); | |
210f0353 DH |
356 | if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || |
357 | rx->sk.sk_state == RXRPC_CLOSE) { | |
248f219c DH |
358 | trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber, |
359 | sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); | |
360 | skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; | |
361 | skb->priority = RX_INVALID_OPERATION; | |
362 | _leave(" = NULL [close]"); | |
363 | call = NULL; | |
364 | goto out; | |
17926a79 | 365 | } |
17926a79 | 366 | |
248f219c DH |
367 | call = rxrpc_alloc_incoming_call(rx, local, conn, skb); |
368 | if (!call) { | |
369 | skb->mark = RXRPC_SKB_MARK_BUSY; | |
370 | _leave(" = NULL [busy]"); | |
371 | call = NULL; | |
372 | goto out; | |
373 | } | |
58dc63c9 DH |
374 | |
375 | trace_rxrpc_receive(call, rxrpc_receive_incoming, | |
376 | sp->hdr.serial, sp->hdr.seq); | |
17926a79 | 377 | |
540b1c48 DH |
378 | /* Lock the call to prevent rxrpc_kernel_send/recv_data() and |
379 | * sendmsg()/recvmsg() inconveniently stealing the mutex once the | |
380 | * notification is generated. | |
381 | * | |
382 | * The BUG should never happen because the kernel should be well | |
383 | * behaved enough not to access the call before the first notification | |
384 | * event and userspace is prevented from doing so until the state is | |
385 | * appropriate. | |
386 | */ | |
387 | if (!mutex_trylock(&call->user_mutex)) | |
388 | BUG(); | |
389 | ||
248f219c DH |
390 | /* Make the call live. */ |
391 | rxrpc_incoming_call(rx, call, skb); | |
392 | conn = call->conn; | |
17926a79 | 393 | |
248f219c DH |
394 | if (rx->notify_new_call) |
395 | rx->notify_new_call(&rx->sk, call, call->user_call_ID); | |
e6f3afb3 DH |
396 | else |
397 | sk_acceptq_added(&rx->sk); | |
17926a79 | 398 | |
248f219c DH |
399 | spin_lock(&conn->state_lock); |
400 | switch (conn->state) { | |
401 | case RXRPC_CONN_SERVICE_UNSECURED: | |
402 | conn->state = RXRPC_CONN_SERVICE_CHALLENGING; | |
403 | set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events); | |
404 | rxrpc_queue_conn(call->conn); | |
405 | break; | |
17926a79 | 406 | |
248f219c DH |
407 | case RXRPC_CONN_SERVICE: |
408 | write_lock(&call->state_lock); | |
409 | if (rx->discard_new_call) | |
410 | call->state = RXRPC_CALL_SERVER_RECV_REQUEST; | |
411 | else | |
412 | call->state = RXRPC_CALL_SERVER_ACCEPTING; | |
413 | write_unlock(&call->state_lock); | |
414 | break; | |
17926a79 | 415 | |
248f219c DH |
416 | case RXRPC_CONN_REMOTELY_ABORTED: |
417 | rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, | |
3a92789a | 418 | conn->remote_abort, -ECONNABORTED); |
248f219c DH |
419 | break; |
420 | case RXRPC_CONN_LOCALLY_ABORTED: | |
421 | rxrpc_abort_call("CON", call, sp->hdr.seq, | |
3a92789a | 422 | conn->local_abort, -ECONNABORTED); |
248f219c | 423 | break; |
17926a79 DH |
424 | default: |
425 | BUG(); | |
426 | } | |
248f219c | 427 | spin_unlock(&conn->state_lock); |
17926a79 | 428 | |
248f219c DH |
429 | if (call->state == RXRPC_CALL_SERVER_ACCEPTING) |
430 | rxrpc_notify_socket(call); | |
d991b4a3 | 431 | |
3432a757 DH |
432 | /* We have to discard the prealloc queue's ref here and rely on a |
433 | * combination of the RCU read lock and refs held either by the socket | |
434 | * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel | |
435 | * service to prevent the call from being deallocated too early. | |
436 | */ | |
437 | rxrpc_put_call(call, rxrpc_call_put); | |
438 | ||
248f219c DH |
439 | _leave(" = %p{%d}", call, call->debug_id); |
440 | out: | |
441 | spin_unlock(&rx->incoming_lock); | |
442 | return call; | |
17926a79 DH |
443 | } |
444 | ||
445 | /* | |
446 | * handle acceptance of a call by userspace | |
447 | * - assign the user call ID to the call at the front of the queue | |
540b1c48 | 448 | * - called with the socket locked. |
17926a79 | 449 | */ |
651350d1 | 450 | struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, |
d001648e DH |
451 | unsigned long user_call_ID, |
452 | rxrpc_notify_rx_t notify_rx) | |
540b1c48 | 453 | __releases(&rx->sk.sk_lock.slock) |
17926a79 DH |
454 | { |
455 | struct rxrpc_call *call; | |
456 | struct rb_node *parent, **pp; | |
457 | int ret; | |
458 | ||
459 | _enter(",%lx", user_call_ID); | |
460 | ||
461 | ASSERT(!irqs_disabled()); | |
462 | ||
463 | write_lock(&rx->call_lock); | |
464 | ||
b25de360 DH |
465 | if (list_empty(&rx->to_be_accepted)) { |
466 | write_unlock(&rx->call_lock); | |
540b1c48 | 467 | release_sock(&rx->sk); |
b25de360 DH |
468 | kleave(" = -ENODATA [empty]"); |
469 | return ERR_PTR(-ENODATA); | |
470 | } | |
17926a79 DH |
471 | |
472 | /* check the user ID isn't already in use */ | |
17926a79 DH |
473 | pp = &rx->calls.rb_node; |
474 | parent = NULL; | |
475 | while (*pp) { | |
476 | parent = *pp; | |
477 | call = rb_entry(parent, struct rxrpc_call, sock_node); | |
478 | ||
479 | if (user_call_ID < call->user_call_ID) | |
480 | pp = &(*pp)->rb_left; | |
481 | else if (user_call_ID > call->user_call_ID) | |
482 | pp = &(*pp)->rb_right; | |
483 | else | |
248f219c | 484 | goto id_in_use; |
17926a79 DH |
485 | } |
486 | ||
248f219c DH |
487 | /* Dequeue the first call and check it's still valid. We gain |
488 | * responsibility for the queue's reference. | |
489 | */ | |
490 | call = list_entry(rx->to_be_accepted.next, | |
491 | struct rxrpc_call, accept_link); | |
540b1c48 DH |
492 | write_unlock(&rx->call_lock); |
493 | ||
494 | /* We need to gain the mutex from the interrupt handler without | |
495 | * upsetting lockdep, so we have to release it there and take it here. | |
496 | * We are, however, still holding the socket lock, so other accepts | |
497 | * must wait for us and no one can add the user ID behind our backs. | |
498 | */ | |
499 | if (mutex_lock_interruptible(&call->user_mutex) < 0) { | |
500 | release_sock(&rx->sk); | |
501 | kleave(" = -ERESTARTSYS"); | |
502 | return ERR_PTR(-ERESTARTSYS); | |
503 | } | |
504 | ||
505 | write_lock(&rx->call_lock); | |
17926a79 DH |
506 | list_del_init(&call->accept_link); |
507 | sk_acceptq_removed(&rx->sk); | |
e34d4234 | 508 | rxrpc_see_call(call); |
17926a79 | 509 | |
540b1c48 DH |
510 | /* Find the user ID insertion point. */ |
511 | pp = &rx->calls.rb_node; | |
512 | parent = NULL; | |
513 | while (*pp) { | |
514 | parent = *pp; | |
515 | call = rb_entry(parent, struct rxrpc_call, sock_node); | |
516 | ||
517 | if (user_call_ID < call->user_call_ID) | |
518 | pp = &(*pp)->rb_left; | |
519 | else if (user_call_ID > call->user_call_ID) | |
520 | pp = &(*pp)->rb_right; | |
521 | else | |
522 | BUG(); | |
523 | } | |
524 | ||
17926a79 DH |
525 | write_lock_bh(&call->state_lock); |
526 | switch (call->state) { | |
527 | case RXRPC_CALL_SERVER_ACCEPTING: | |
528 | call->state = RXRPC_CALL_SERVER_RECV_REQUEST; | |
529 | break; | |
f5c17aae DH |
530 | case RXRPC_CALL_COMPLETE: |
531 | ret = call->error; | |
17926a79 | 532 | goto out_release; |
17926a79 DH |
533 | default: |
534 | BUG(); | |
535 | } | |
536 | ||
537 | /* formalise the acceptance */ | |
d001648e | 538 | call->notify_rx = notify_rx; |
17926a79 | 539 | call->user_call_ID = user_call_ID; |
248f219c | 540 | rxrpc_get_call(call, rxrpc_call_got_userid); |
17926a79 DH |
541 | rb_link_node(&call->sock_node, parent, pp); |
542 | rb_insert_color(&call->sock_node, &rx->calls); | |
543 | if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags)) | |
544 | BUG(); | |
17926a79 DH |
545 | |
546 | write_unlock_bh(&call->state_lock); | |
547 | write_unlock(&rx->call_lock); | |
248f219c DH |
548 | rxrpc_notify_socket(call); |
549 | rxrpc_service_prealloc(rx, GFP_KERNEL); | |
540b1c48 | 550 | release_sock(&rx->sk); |
651350d1 DH |
551 | _leave(" = %p{%d}", call, call->debug_id); |
552 | return call; | |
553 | ||
651350d1 | 554 | out_release: |
248f219c | 555 | _debug("release %p", call); |
651350d1 | 556 | write_unlock_bh(&call->state_lock); |
8d94aa38 | 557 | write_unlock(&rx->call_lock); |
8d94aa38 | 558 | rxrpc_release_call(rx, call); |
248f219c DH |
559 | rxrpc_put_call(call, rxrpc_call_put); |
560 | goto out; | |
561 | ||
562 | id_in_use: | |
563 | ret = -EBADSLT; | |
651350d1 | 564 | write_unlock(&rx->call_lock); |
248f219c DH |
565 | out: |
566 | rxrpc_service_prealloc(rx, GFP_KERNEL); | |
540b1c48 | 567 | release_sock(&rx->sk); |
651350d1 DH |
568 | _leave(" = %d", ret); |
569 | return ERR_PTR(ret); | |
570 | } | |
571 | ||
572 | /* | |
b4f1342f | 573 | * Handle rejection of a call by userspace |
651350d1 DH |
574 | * - reject the call at the front of the queue |
575 | */ | |
576 | int rxrpc_reject_call(struct rxrpc_sock *rx) | |
577 | { | |
578 | struct rxrpc_call *call; | |
248f219c | 579 | bool abort = false; |
651350d1 DH |
580 | int ret; |
581 | ||
582 | _enter(""); | |
583 | ||
584 | ASSERT(!irqs_disabled()); | |
585 | ||
586 | write_lock(&rx->call_lock); | |
587 | ||
248f219c | 588 | if (list_empty(&rx->to_be_accepted)) { |
8d94aa38 | 589 | write_unlock(&rx->call_lock); |
8d94aa38 DH |
590 | return -ENODATA; |
591 | } | |
651350d1 | 592 | |
248f219c DH |
593 | /* Dequeue the first call and check it's still valid. We gain |
594 | * responsibility for the queue's reference. | |
595 | */ | |
596 | call = list_entry(rx->to_be_accepted.next, | |
597 | struct rxrpc_call, accept_link); | |
651350d1 DH |
598 | list_del_init(&call->accept_link); |
599 | sk_acceptq_removed(&rx->sk); | |
e34d4234 | 600 | rxrpc_see_call(call); |
651350d1 DH |
601 | |
602 | write_lock_bh(&call->state_lock); | |
603 | switch (call->state) { | |
604 | case RXRPC_CALL_SERVER_ACCEPTING: | |
3a92789a | 605 | __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED); |
248f219c DH |
606 | abort = true; |
607 | /* fall through */ | |
f5c17aae DH |
608 | case RXRPC_CALL_COMPLETE: |
609 | ret = call->error; | |
248f219c | 610 | goto out_discard; |
651350d1 DH |
611 | default: |
612 | BUG(); | |
613 | } | |
17926a79 | 614 | |
248f219c | 615 | out_discard: |
17926a79 | 616 | write_unlock_bh(&call->state_lock); |
17926a79 | 617 | write_unlock(&rx->call_lock); |
248f219c | 618 | if (abort) { |
26cb02aa | 619 | rxrpc_send_abort_packet(call); |
248f219c DH |
620 | rxrpc_release_call(rx, call); |
621 | rxrpc_put_call(call, rxrpc_call_put); | |
622 | } | |
623 | rxrpc_service_prealloc(rx, GFP_KERNEL); | |
651350d1 DH |
624 | _leave(" = %d", ret); |
625 | return ret; | |
626 | } | |
00e90712 DH |
627 | |
628 | /* | |
629 | * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls | |
630 | * @sock: The socket on which to preallocate | |
631 | * @notify_rx: Event notification function for the call | |
632 | * @user_attach_call: Func to attach call to user_call_ID | |
633 | * @user_call_ID: The tag to attach to the preallocated call | |
634 | * @gfp: The allocation conditions. | |
635 | * | |
636 | * Charge up the socket with preallocated calls, each with a user ID. A | |
637 | * function should be provided to effect the attachment from the user's side. | |
638 | * The user is given a ref to hold on the call. | |
639 | * | |
640 | * Note that the call may be come connected before this function returns. | |
641 | */ | |
642 | int rxrpc_kernel_charge_accept(struct socket *sock, | |
643 | rxrpc_notify_rx_t notify_rx, | |
644 | rxrpc_user_attach_call_t user_attach_call, | |
645 | unsigned long user_call_ID, gfp_t gfp) | |
646 | { | |
647 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | |
648 | struct rxrpc_backlog *b = rx->backlog; | |
649 | ||
650 | if (sock->sk->sk_state == RXRPC_CLOSE) | |
651 | return -ESHUTDOWN; | |
652 | ||
653 | return rxrpc_service_prealloc_one(rx, b, notify_rx, | |
654 | user_attach_call, user_call_ID, | |
655 | gfp); | |
656 | } | |
657 | EXPORT_SYMBOL(rxrpc_kernel_charge_accept); |