]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - net/rxrpc/call_object.c
rxrpc: Preallocate peers, conns and calls for incoming service requests
[mirror_ubuntu-zesty-kernel.git] / net / rxrpc / call_object.c
CommitLineData
17926a79
DH
1/* RxRPC individual remote procedure call handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
9b6d5398
JP
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
5a0e3ad6 14#include <linux/slab.h>
17926a79
DH
15#include <linux/module.h>
16#include <linux/circ_buf.h>
7727640c 17#include <linux/spinlock_types.h>
17926a79
DH
18#include <net/sock.h>
19#include <net/af_rxrpc.h>
20#include "ar-internal.h"
21
5873c083
DH
22/*
23 * Maximum lifetime of a call (in jiffies).
24 */
dad8aff7 25unsigned int rxrpc_max_call_lifetime = 60 * HZ;
5873c083 26
5b8848d1 27const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
f5c17aae 28 [RXRPC_CALL_UNINITIALISED] = "Uninit ",
999b69f8 29 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
1f8481d1
DH
30 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
31 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
32 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
33 [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
00e90712 34 [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
1f8481d1
DH
35 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
36 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
37 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
38 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
39 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
40 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
41 [RXRPC_CALL_COMPLETE] = "Complete",
f5c17aae
DH
42};
43
44const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
45 [RXRPC_CALL_SUCCEEDED] = "Complete",
1f8481d1
DH
46 [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
47 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
48 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
f5c17aae 49 [RXRPC_CALL_LOCAL_ERROR] = "LocError",
1f8481d1 50 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
1f8481d1
DH
51};
52
fff72429
DH
53const char rxrpc_call_traces[rxrpc_call__nr_trace][4] = {
54 [rxrpc_call_new_client] = "NWc",
55 [rxrpc_call_new_service] = "NWs",
56 [rxrpc_call_queued] = "QUE",
57 [rxrpc_call_queued_ref] = "QUR",
58 [rxrpc_call_seen] = "SEE",
59 [rxrpc_call_got] = "GOT",
60 [rxrpc_call_got_skb] = "Gsk",
61 [rxrpc_call_got_userid] = "Gus",
62 [rxrpc_call_put] = "PUT",
63 [rxrpc_call_put_skb] = "Psk",
64 [rxrpc_call_put_userid] = "Pus",
65 [rxrpc_call_put_noqueue] = "PNQ",
66};
67
17926a79
DH
68struct kmem_cache *rxrpc_call_jar;
69LIST_HEAD(rxrpc_calls);
70DEFINE_RWLOCK(rxrpc_call_lock);
17926a79 71
17926a79 72static void rxrpc_call_life_expired(unsigned long _call);
17926a79
DH
73static void rxrpc_ack_time_expired(unsigned long _call);
74static void rxrpc_resend_time_expired(unsigned long _call);
75
2341e077
DH
76/*
77 * find an extant server call
78 * - called in process context with IRQs enabled
79 */
80struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
81 unsigned long user_call_ID)
82{
83 struct rxrpc_call *call;
84 struct rb_node *p;
85
86 _enter("%p,%lx", rx, user_call_ID);
87
88 read_lock(&rx->call_lock);
89
90 p = rx->calls.rb_node;
91 while (p) {
92 call = rb_entry(p, struct rxrpc_call, sock_node);
93
94 if (user_call_ID < call->user_call_ID)
95 p = p->rb_left;
96 else if (user_call_ID > call->user_call_ID)
97 p = p->rb_right;
98 else
99 goto found_extant_call;
100 }
101
102 read_unlock(&rx->call_lock);
103 _leave(" = NULL");
104 return NULL;
105
106found_extant_call:
fff72429 107 rxrpc_get_call(call, rxrpc_call_got);
2341e077
DH
108 read_unlock(&rx->call_lock);
109 _leave(" = %p [%d]", call, atomic_read(&call->usage));
110 return call;
111}
112
17926a79
DH
113/*
114 * allocate a new call
115 */
00e90712 116struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
17926a79
DH
117{
118 struct rxrpc_call *call;
119
120 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
121 if (!call)
122 return NULL;
123
124 call->acks_winsz = 16;
125 call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
126 gfp);
127 if (!call->acks_window) {
128 kmem_cache_free(rxrpc_call_jar, call);
129 return NULL;
130 }
131
132 setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
133 (unsigned long) call);
17926a79
DH
134 setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
135 (unsigned long) call);
136 setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
137 (unsigned long) call);
17926a79 138 INIT_WORK(&call->processor, &rxrpc_process_call);
999b69f8 139 INIT_LIST_HEAD(&call->link);
45025bce 140 INIT_LIST_HEAD(&call->chan_wait_link);
17926a79
DH
141 INIT_LIST_HEAD(&call->accept_link);
142 skb_queue_head_init(&call->rx_queue);
143 skb_queue_head_init(&call->rx_oos_queue);
d001648e 144 skb_queue_head_init(&call->knlrecv_queue);
45025bce 145 init_waitqueue_head(&call->waitq);
17926a79
DH
146 spin_lock_init(&call->lock);
147 rwlock_init(&call->state_lock);
148 atomic_set(&call->usage, 1);
149 call->debug_id = atomic_inc_return(&rxrpc_debug_id);
17926a79
DH
150
151 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
152
153 call->rx_data_expect = 1;
154 call->rx_data_eaten = 0;
155 call->rx_first_oos = 0;
817913d8 156 call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
17926a79
DH
157 call->creation_jif = jiffies;
158 return call;
159}
160
161/*
999b69f8 162 * Allocate a new client call.
17926a79 163 */
aa390bbe
DH
164static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
165 struct sockaddr_rxrpc *srx,
166 gfp_t gfp)
17926a79
DH
167{
168 struct rxrpc_call *call;
17926a79
DH
169
170 _enter("");
171
999b69f8 172 ASSERT(rx->local != NULL);
17926a79
DH
173
174 call = rxrpc_alloc_call(gfp);
175 if (!call)
176 return ERR_PTR(-ENOMEM);
999b69f8 177 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
17926a79 178 call->rx_data_post = 1;
999b69f8 179 call->service_id = srx->srx_service;
8d94aa38 180 rcu_assign_pointer(call->socket, rx);
999b69f8
DH
181
182 _leave(" = %p", call);
183 return call;
184}
185
186/*
187 * Begin client call.
188 */
189static int rxrpc_begin_client_call(struct rxrpc_call *call,
190 struct rxrpc_conn_parameters *cp,
999b69f8
DH
191 struct sockaddr_rxrpc *srx,
192 gfp_t gfp)
193{
194 int ret;
195
196 /* Set up or get a connection record and set the protocol parameters,
197 * including channel number and call ID.
198 */
aa390bbe 199 ret = rxrpc_connect_call(call, cp, srx, gfp);
999b69f8
DH
200 if (ret < 0)
201 return ret;
202
85f32278
DH
203 spin_lock(&call->conn->params.peer->lock);
204 hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
205 spin_unlock(&call->conn->params.peer->lock);
17926a79 206
5873c083 207 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
17926a79 208 add_timer(&call->lifetimer);
999b69f8 209 return 0;
17926a79
DH
210}
211
212/*
213 * set up a call for the given data
214 * - called in process context with IRQs enabled
215 */
2341e077 216struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
19ffa01c 217 struct rxrpc_conn_parameters *cp,
999b69f8 218 struct sockaddr_rxrpc *srx,
17926a79 219 unsigned long user_call_ID,
17926a79
DH
220 gfp_t gfp)
221{
2341e077
DH
222 struct rxrpc_call *call, *xcall;
223 struct rb_node *parent, **pp;
e34d4234 224 const void *here = __builtin_return_address(0);
999b69f8 225 int ret;
17926a79 226
999b69f8 227 _enter("%p,%lx", rx, user_call_ID);
17926a79 228
aa390bbe 229 call = rxrpc_alloc_client_call(rx, srx, gfp);
2341e077
DH
230 if (IS_ERR(call)) {
231 _leave(" = %ld", PTR_ERR(call));
232 return call;
17926a79
DH
233 }
234
2ab27215
DH
235 trace_rxrpc_call(call, 0, atomic_read(&call->usage), here,
236 (const void *)user_call_ID);
e34d4234 237
999b69f8 238 /* Publish the call, even though it is incompletely set up as yet */
2341e077
DH
239 call->user_call_ID = user_call_ID;
240 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
17926a79
DH
241
242 write_lock(&rx->call_lock);
243
244 pp = &rx->calls.rb_node;
245 parent = NULL;
246 while (*pp) {
247 parent = *pp;
2341e077 248 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
17926a79 249
2341e077 250 if (user_call_ID < xcall->user_call_ID)
17926a79 251 pp = &(*pp)->rb_left;
2341e077 252 else if (user_call_ID > xcall->user_call_ID)
17926a79
DH
253 pp = &(*pp)->rb_right;
254 else
2341e077 255 goto found_user_ID_now_present;
17926a79
DH
256 }
257
fff72429 258 rxrpc_get_call(call, rxrpc_call_got_userid);
17926a79
DH
259 rb_link_node(&call->sock_node, parent, pp);
260 rb_insert_color(&call->sock_node, &rx->calls);
261 write_unlock(&rx->call_lock);
262
263 write_lock_bh(&rxrpc_call_lock);
264 list_add_tail(&call->link, &rxrpc_calls);
265 write_unlock_bh(&rxrpc_call_lock);
266
aa390bbe 267 ret = rxrpc_begin_client_call(call, cp, srx, gfp);
999b69f8
DH
268 if (ret < 0)
269 goto error;
270
17926a79
DH
271 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
272
273 _leave(" = %p [new]", call);
274 return call;
275
999b69f8
DH
276error:
277 write_lock(&rx->call_lock);
278 rb_erase(&call->sock_node, &rx->calls);
279 write_unlock(&rx->call_lock);
fff72429 280 rxrpc_put_call(call, rxrpc_call_put_userid);
999b69f8
DH
281
282 write_lock_bh(&rxrpc_call_lock);
d1e858c5 283 list_del_init(&call->link);
999b69f8
DH
284 write_unlock_bh(&rxrpc_call_lock);
285
8d94aa38
DH
286error_out:
287 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
288 RX_CALL_DEAD, ret);
17b963e3 289 set_bit(RXRPC_CALL_RELEASED, &call->flags);
fff72429 290 rxrpc_put_call(call, rxrpc_call_put);
999b69f8
DH
291 _leave(" = %d", ret);
292 return ERR_PTR(ret);
293
2341e077
DH
294 /* We unexpectedly found the user ID in the list after taking
295 * the call_lock. This shouldn't happen unless the user races
296 * with itself and tries to add the same user ID twice at the
297 * same time in different threads.
298 */
299found_user_ID_now_present:
17926a79 300 write_unlock(&rx->call_lock);
8d94aa38
DH
301 ret = -EEXIST;
302 goto error_out;
17926a79
DH
303}
304
305/*
306 * set up an incoming call
307 * - called in process context with IRQs enabled
308 */
309struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
310 struct rxrpc_connection *conn,
42886ffe 311 struct sk_buff *skb)
17926a79 312{
42886ffe 313 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
17926a79 314 struct rxrpc_call *call, *candidate;
e34d4234 315 const void *here = __builtin_return_address(0);
a1399f8b 316 u32 call_id, chan;
17926a79 317
843099ca 318 _enter(",%d", conn->debug_id);
17926a79
DH
319
320 ASSERT(rx != NULL);
321
843099ca 322 candidate = rxrpc_alloc_call(GFP_NOIO);
17926a79
DH
323 if (!candidate)
324 return ERR_PTR(-EBUSY);
325
fff72429 326 trace_rxrpc_call(candidate, rxrpc_call_new_service,
2ab27215 327 atomic_read(&candidate->usage), here, NULL);
e34d4234 328
a1399f8b 329 chan = sp->hdr.cid & RXRPC_CHANNELMASK;
42886ffe 330 candidate->conn = conn;
df5d8bf7 331 candidate->peer = conn->params.peer;
42886ffe
DH
332 candidate->cid = sp->hdr.cid;
333 candidate->call_id = sp->hdr.callNumber;
278ac0cd 334 candidate->security_ix = sp->hdr.securityIndex;
42886ffe
DH
335 candidate->rx_data_post = 0;
336 candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
dabe5a79 337 candidate->flags |= (1 << RXRPC_CALL_IS_SERVICE);
17926a79
DH
338 if (conn->security_ix > 0)
339 candidate->state = RXRPC_CALL_SERVER_SECURING;
8d94aa38 340 rcu_assign_pointer(candidate->socket, rx);
17926a79 341
a1399f8b 342 spin_lock(&conn->channel_lock);
17926a79
DH
343
344 /* set the channel for this call */
a1399f8b
DH
345 call = rcu_dereference_protected(conn->channels[chan].call,
346 lockdep_is_held(&conn->channel_lock));
347
01a90a45 348 _debug("channel[%u] is %p", candidate->cid & RXRPC_CHANNELMASK, call);
42886ffe 349 if (call && call->call_id == sp->hdr.callNumber) {
17926a79
DH
350 /* already set; must've been a duplicate packet */
351 _debug("extant call [%d]", call->state);
352 ASSERTCMP(call->conn, ==, conn);
353
354 read_lock(&call->state_lock);
355 switch (call->state) {
356 case RXRPC_CALL_LOCALLY_ABORTED:
4c198ad1 357 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
651350d1 358 rxrpc_queue_call(call);
17926a79
DH
359 case RXRPC_CALL_REMOTELY_ABORTED:
360 read_unlock(&call->state_lock);
361 goto aborted_call;
362 default:
fff72429 363 rxrpc_get_call(call, rxrpc_call_got);
17926a79
DH
364 read_unlock(&call->state_lock);
365 goto extant_call;
366 }
367 }
368
369 if (call) {
370 /* it seems the channel is still in use from the previous call
371 * - ditch the old binding if its call is now complete */
372 _debug("CALL: %u { %s }",
373 call->debug_id, rxrpc_call_states[call->state]);
374
f5c17aae 375 if (call->state == RXRPC_CALL_COMPLETE) {
45025bce 376 __rxrpc_disconnect_call(conn, call);
17926a79 377 } else {
a1399f8b 378 spin_unlock(&conn->channel_lock);
17926a79
DH
379 kmem_cache_free(rxrpc_call_jar, candidate);
380 _leave(" = -EBUSY");
381 return ERR_PTR(-EBUSY);
382 }
383 }
384
385 /* check the call number isn't duplicate */
386 _debug("check dup");
42886ffe 387 call_id = sp->hdr.callNumber;
a1399f8b
DH
388
389 /* We just ignore calls prior to the current call ID. Terminated calls
390 * are handled via the connection.
391 */
392 if (call_id <= conn->channels[chan].call_counter)
393 goto old_call; /* TODO: Just drop packet */
17926a79 394
00e90712
DH
395 /* Temporary: Mirror the backlog prealloc ref (TODO: use prealloc) */
396 rxrpc_get_call(candidate, rxrpc_call_got);
397
17926a79
DH
398 /* make the call available */
399 _debug("new call");
400 call = candidate;
401 candidate = NULL;
a1399f8b
DH
402 conn->channels[chan].call_counter = call_id;
403 rcu_assign_pointer(conn->channels[chan].call, call);
5627cc8b 404 rxrpc_get_connection(conn);
df5d8bf7 405 rxrpc_get_peer(call->peer);
a1399f8b 406 spin_unlock(&conn->channel_lock);
17926a79 407
85f32278
DH
408 spin_lock(&conn->params.peer->lock);
409 hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
410 spin_unlock(&conn->params.peer->lock);
17926a79
DH
411
412 write_lock_bh(&rxrpc_call_lock);
413 list_add_tail(&call->link, &rxrpc_calls);
414 write_unlock_bh(&rxrpc_call_lock);
415
19ffa01c 416 call->service_id = conn->params.service_id;
7727640c 417
17926a79
DH
418 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
419
5873c083 420 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
17926a79
DH
421 add_timer(&call->lifetimer);
422 _leave(" = %p {%d} [new]", call, call->debug_id);
423 return call;
424
425extant_call:
a1399f8b 426 spin_unlock(&conn->channel_lock);
17926a79
DH
427 kmem_cache_free(rxrpc_call_jar, candidate);
428 _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
429 return call;
430
431aborted_call:
a1399f8b 432 spin_unlock(&conn->channel_lock);
17926a79
DH
433 kmem_cache_free(rxrpc_call_jar, candidate);
434 _leave(" = -ECONNABORTED");
435 return ERR_PTR(-ECONNABORTED);
436
437old_call:
a1399f8b 438 spin_unlock(&conn->channel_lock);
17926a79
DH
439 kmem_cache_free(rxrpc_call_jar, candidate);
440 _leave(" = -ECONNRESET [old]");
441 return ERR_PTR(-ECONNRESET);
442}
443
8d94aa38
DH
444/*
445 * Queue a call's work processor, getting a ref to pass to the work queue.
446 */
447bool rxrpc_queue_call(struct rxrpc_call *call)
448{
449 const void *here = __builtin_return_address(0);
450 int n = __atomic_add_unless(&call->usage, 1, 0);
8d94aa38
DH
451 if (n == 0)
452 return false;
453 if (rxrpc_queue_work(&call->processor))
2ab27215 454 trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
8d94aa38
DH
455 else
456 rxrpc_put_call(call, rxrpc_call_put_noqueue);
457 return true;
458}
459
460/*
461 * Queue a call's work processor, passing the callers ref to the work queue.
462 */
463bool __rxrpc_queue_call(struct rxrpc_call *call)
464{
465 const void *here = __builtin_return_address(0);
466 int n = atomic_read(&call->usage);
8d94aa38
DH
467 ASSERTCMP(n, >=, 1);
468 if (rxrpc_queue_work(&call->processor))
2ab27215 469 trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
8d94aa38
DH
470 else
471 rxrpc_put_call(call, rxrpc_call_put_noqueue);
472 return true;
473}
474
e34d4234
DH
475/*
476 * Note the re-emergence of a call.
477 */
478void rxrpc_see_call(struct rxrpc_call *call)
479{
480 const void *here = __builtin_return_address(0);
481 if (call) {
482 int n = atomic_read(&call->usage);
e34d4234 483
2ab27215 484 trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
e34d4234
DH
485 }
486}
487
488/*
489 * Note the addition of a ref on a call.
490 */
fff72429 491void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
e34d4234
DH
492{
493 const void *here = __builtin_return_address(0);
494 int n = atomic_inc_return(&call->usage);
e34d4234 495
2ab27215 496 trace_rxrpc_call(call, op, n, here, NULL);
e34d4234
DH
497}
498
499/*
500 * Note the addition of a ref on a call for a socket buffer.
501 */
502void rxrpc_get_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
503{
504 const void *here = __builtin_return_address(0);
505 int n = atomic_inc_return(&call->usage);
e34d4234 506
2ab27215 507 trace_rxrpc_call(call, rxrpc_call_got_skb, n, here, skb);
e34d4234
DH
508}
509
17926a79
DH
510/*
511 * detach a call from a socket and set up for release
512 */
8d94aa38 513void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
17926a79 514{
17926a79
DH
515 _enter("{%d,%d,%d,%d}",
516 call->debug_id, atomic_read(&call->usage),
517 atomic_read(&call->ackr_not_idle),
518 call->rx_first_oos);
519
e34d4234
DH
520 rxrpc_see_call(call);
521
17926a79
DH
522 spin_lock_bh(&call->lock);
523 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
524 BUG();
525 spin_unlock_bh(&call->lock);
526
527 /* dissociate from the socket
528 * - the socket's ref on the call is passed to the death timer
529 */
8d94aa38 530 _debug("RELEASE CALL %p (%d)", call, call->debug_id);
17926a79 531
f4fdb352
DH
532 if (call->peer) {
533 spin_lock(&call->peer->lock);
534 hlist_del_init(&call->error_link);
535 spin_unlock(&call->peer->lock);
536 }
e653cfe4 537
17926a79
DH
538 write_lock_bh(&rx->call_lock);
539 if (!list_empty(&call->accept_link)) {
540 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
541 call, call->events, call->flags);
542 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
543 list_del_init(&call->accept_link);
544 sk_acceptq_removed(&rx->sk);
545 } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
546 rb_erase(&call->sock_node, &rx->calls);
547 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
548 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
8d94aa38 549 rxrpc_put_call(call, rxrpc_call_put_userid);
17926a79
DH
550 }
551 write_unlock_bh(&rx->call_lock);
552
17926a79 553 /* free up the channel for reuse */
8d94aa38
DH
554 if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK) {
555 clear_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
556 rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
557 rxrpc_call_completed(call);
558 } else {
559 write_lock_bh(&call->state_lock);
560
561 if (call->state < RXRPC_CALL_COMPLETE) {
562 _debug("+++ ABORTING STATE %d +++\n", call->state);
5a42976d 563 __rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, ECONNRESET);
8d94aa38
DH
564 clear_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
565 rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
566 }
651350d1 567
8d94aa38 568 write_unlock_bh(&call->state_lock);
17926a79 569 }
17926a79 570
8d94aa38
DH
571 if (call->conn)
572 rxrpc_disconnect_call(call);
e653cfe4 573
651350d1 574 /* clean up the Rx queue */
17926a79
DH
575 if (!skb_queue_empty(&call->rx_queue) ||
576 !skb_queue_empty(&call->rx_oos_queue)) {
577 struct rxrpc_skb_priv *sp;
578 struct sk_buff *skb;
579
580 _debug("purge Rx queues");
581
582 spin_lock_bh(&call->lock);
583 while ((skb = skb_dequeue(&call->rx_queue)) ||
584 (skb = skb_dequeue(&call->rx_oos_queue))) {
17926a79
DH
585 spin_unlock_bh(&call->lock);
586
55cae7a4 587 sp = rxrpc_skb(skb);
17926a79
DH
588 _debug("- zap %s %%%u #%u",
589 rxrpc_pkts[sp->hdr.type],
0d12f8a4 590 sp->hdr.serial, sp->hdr.seq);
17926a79
DH
591 rxrpc_free_skb(skb);
592 spin_lock_bh(&call->lock);
593 }
594 spin_unlock_bh(&call->lock);
17926a79 595 }
8d94aa38 596 rxrpc_purge_queue(&call->knlrecv_queue);
17926a79
DH
597
598 del_timer_sync(&call->resend_timer);
599 del_timer_sync(&call->ack_timer);
600 del_timer_sync(&call->lifetimer);
17926a79 601
00e90712
DH
602 /* We have to release the prealloc backlog ref */
603 if (rxrpc_is_service_call(call))
604 rxrpc_put_call(call, rxrpc_call_put);
17926a79
DH
605 _leave("");
606}
607
17926a79
DH
608/*
609 * release all the calls associated with a socket
610 */
611void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
612{
613 struct rxrpc_call *call;
614 struct rb_node *p;
615
616 _enter("%p", rx);
617
618 read_lock_bh(&rx->call_lock);
619
17926a79
DH
620 /* kill the not-yet-accepted incoming calls */
621 list_for_each_entry(call, &rx->secureq, accept_link) {
8d94aa38 622 rxrpc_release_call(rx, call);
17926a79
DH
623 }
624
625 list_for_each_entry(call, &rx->acceptq, accept_link) {
8d94aa38 626 rxrpc_release_call(rx, call);
17926a79
DH
627 }
628
f36b5e44
DH
629 /* mark all the calls as no longer wanting incoming packets */
630 for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
631 call = rb_entry(p, struct rxrpc_call, sock_node);
8d94aa38 632 rxrpc_release_call(rx, call);
f36b5e44
DH
633 }
634
17926a79
DH
635 read_unlock_bh(&rx->call_lock);
636 _leave("");
637}
638
639/*
640 * release a call
641 */
fff72429 642void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
17926a79 643{
e34d4234 644 const void *here = __builtin_return_address(0);
2ab27215 645 int n;
17926a79 646
e34d4234 647 ASSERT(call != NULL);
17926a79 648
e34d4234 649 n = atomic_dec_return(&call->usage);
2ab27215 650 trace_rxrpc_call(call, op, n, here, NULL);
e34d4234
DH
651 ASSERTCMP(n, >=, 0);
652 if (n == 0) {
653 _debug("call %d dead", call->debug_id);
8d94aa38 654 rxrpc_cleanup_call(call);
e34d4234
DH
655 }
656}
17926a79 657
e34d4234
DH
658/*
659 * Release a call ref held by a socket buffer.
660 */
661void rxrpc_put_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
662{
663 const void *here = __builtin_return_address(0);
2ab27215 664 int n;
e34d4234
DH
665
666 n = atomic_dec_return(&call->usage);
2ab27215 667 trace_rxrpc_call(call, rxrpc_call_put_skb, n, here, skb);
e34d4234
DH
668 ASSERTCMP(n, >=, 0);
669 if (n == 0) {
17926a79 670 _debug("call %d dead", call->debug_id);
8d94aa38 671 rxrpc_cleanup_call(call);
17926a79 672 }
17926a79
DH
673}
674
dee46364
DH
675/*
676 * Final call destruction under RCU.
677 */
678static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
679{
680 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
681
682 rxrpc_purge_queue(&call->rx_queue);
d001648e 683 rxrpc_purge_queue(&call->knlrecv_queue);
df5d8bf7 684 rxrpc_put_peer(call->peer);
dee46364
DH
685 kmem_cache_free(rxrpc_call_jar, call);
686}
687
17926a79
DH
688/*
689 * clean up a call
690 */
00e90712 691void rxrpc_cleanup_call(struct rxrpc_call *call)
17926a79
DH
692{
693 _net("DESTROY CALL %d", call->debug_id);
694
8d94aa38
DH
695 write_lock_bh(&rxrpc_call_lock);
696 list_del_init(&call->link);
697 write_unlock_bh(&rxrpc_call_lock);
17926a79
DH
698
699 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
700
701 del_timer_sync(&call->lifetimer);
17926a79
DH
702 del_timer_sync(&call->ack_timer);
703 del_timer_sync(&call->resend_timer);
704
8d94aa38 705 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
17926a79 706 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
8d94aa38 707 ASSERT(!work_pending(&call->processor));
e653cfe4 708 ASSERTCMP(call->conn, ==, NULL);
17926a79
DH
709
710 if (call->acks_window) {
711 _debug("kill Tx window %d",
712 CIRC_CNT(call->acks_head, call->acks_tail,
713 call->acks_winsz));
714 smp_mb();
715 while (CIRC_CNT(call->acks_head, call->acks_tail,
716 call->acks_winsz) > 0) {
717 struct rxrpc_skb_priv *sp;
718 unsigned long _skb;
719
720 _skb = call->acks_window[call->acks_tail] & ~1;
0d12f8a4
DH
721 sp = rxrpc_skb((struct sk_buff *)_skb);
722 _debug("+++ clear Tx %u", sp->hdr.seq);
723 rxrpc_free_skb((struct sk_buff *)_skb);
17926a79
DH
724 call->acks_tail =
725 (call->acks_tail + 1) & (call->acks_winsz - 1);
726 }
727
728 kfree(call->acks_window);
729 }
730
731 rxrpc_free_skb(call->tx_pending);
732
733 rxrpc_purge_queue(&call->rx_queue);
734 ASSERT(skb_queue_empty(&call->rx_oos_queue));
d001648e 735 rxrpc_purge_queue(&call->knlrecv_queue);
dee46364 736 call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
17926a79
DH
737}
738
739/*
8d94aa38 740 * Make sure that all calls are gone.
17926a79
DH
741 */
742void __exit rxrpc_destroy_all_calls(void)
743{
744 struct rxrpc_call *call;
745
746 _enter("");
8d94aa38
DH
747
748 if (list_empty(&rxrpc_calls))
749 return;
750
17926a79
DH
751 write_lock_bh(&rxrpc_call_lock);
752
753 while (!list_empty(&rxrpc_calls)) {
754 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
755 _debug("Zapping call %p", call);
756
e34d4234 757 rxrpc_see_call(call);
17926a79
DH
758 list_del_init(&call->link);
759
8d94aa38
DH
760 pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
761 call, atomic_read(&call->usage),
762 atomic_read(&call->ackr_not_idle),
763 rxrpc_call_states[call->state],
764 call->flags, call->events);
765 if (!skb_queue_empty(&call->rx_queue))
766 pr_err("Rx queue occupied\n");
767 if (!skb_queue_empty(&call->rx_oos_queue))
768 pr_err("OOS queue occupied\n");
17926a79
DH
769
770 write_unlock_bh(&rxrpc_call_lock);
771 cond_resched();
772 write_lock_bh(&rxrpc_call_lock);
773 }
774
775 write_unlock_bh(&rxrpc_call_lock);
776 _leave("");
777}
778
779/*
780 * handle call lifetime being exceeded
781 */
782static void rxrpc_call_life_expired(unsigned long _call)
783{
784 struct rxrpc_call *call = (struct rxrpc_call *) _call;
785
f5c17aae
DH
786 _enter("{%d}", call->debug_id);
787
e34d4234 788 rxrpc_see_call(call);
17926a79
DH
789 if (call->state >= RXRPC_CALL_COMPLETE)
790 return;
791
f5c17aae
DH
792 set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
793 rxrpc_queue_call(call);
17926a79
DH
794}
795
796/*
797 * handle resend timer expiry
3b5bac2b 798 * - may not take call->state_lock as this can deadlock against del_timer_sync()
17926a79
DH
799 */
800static void rxrpc_resend_time_expired(unsigned long _call)
801{
802 struct rxrpc_call *call = (struct rxrpc_call *) _call;
803
804 _enter("{%d}", call->debug_id);
805
e34d4234 806 rxrpc_see_call(call);
17926a79
DH
807 if (call->state >= RXRPC_CALL_COMPLETE)
808 return;
809
17926a79 810 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
4c198ad1 811 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
651350d1 812 rxrpc_queue_call(call);
17926a79
DH
813}
814
815/*
816 * handle ACK timer expiry
817 */
818static void rxrpc_ack_time_expired(unsigned long _call)
819{
820 struct rxrpc_call *call = (struct rxrpc_call *) _call;
821
822 _enter("{%d}", call->debug_id);
823
e34d4234 824 rxrpc_see_call(call);
17926a79
DH
825 if (call->state >= RXRPC_CALL_COMPLETE)
826 return;
827
f5c17aae 828 if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
651350d1 829 rxrpc_queue_call(call);
17926a79 830}