]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/rxrpc/call_object.c
rxrpc: Provide more refcount helper functions
[mirror_ubuntu-artful-kernel.git] / net / rxrpc / call_object.c
CommitLineData
17926a79
DH
1/* RxRPC individual remote procedure call handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
9b6d5398
JP
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
5a0e3ad6 14#include <linux/slab.h>
17926a79
DH
15#include <linux/module.h>
16#include <linux/circ_buf.h>
7727640c
TS
17#include <linux/hashtable.h>
18#include <linux/spinlock_types.h>
17926a79
DH
19#include <net/sock.h>
20#include <net/af_rxrpc.h>
21#include "ar-internal.h"
22
5873c083
DH
23/*
24 * Maximum lifetime of a call (in jiffies).
25 */
dad8aff7 26unsigned int rxrpc_max_call_lifetime = 60 * HZ;
5873c083
DH
27
28/*
29 * Time till dead call expires after last use (in jiffies).
30 */
dad8aff7 31unsigned int rxrpc_dead_call_expiry = 2 * HZ;
5873c083 32
5b8848d1 33const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
1f8481d1
DH
34 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
35 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
36 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
37 [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
38 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
39 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
40 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
41 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
42 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
43 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
44 [RXRPC_CALL_COMPLETE] = "Complete",
45 [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
46 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
47 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
48 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
49 [RXRPC_CALL_DEAD] = "Dead ",
50};
51
17926a79
DH
52struct kmem_cache *rxrpc_call_jar;
53LIST_HEAD(rxrpc_calls);
54DEFINE_RWLOCK(rxrpc_call_lock);
17926a79
DH
55
56static void rxrpc_destroy_call(struct work_struct *work);
57static void rxrpc_call_life_expired(unsigned long _call);
58static void rxrpc_dead_call_expired(unsigned long _call);
59static void rxrpc_ack_time_expired(unsigned long _call);
60static void rxrpc_resend_time_expired(unsigned long _call);
61
7727640c
TS
62static DEFINE_SPINLOCK(rxrpc_call_hash_lock);
63static DEFINE_HASHTABLE(rxrpc_call_hash, 10);
64
65/*
66 * Hash function for rxrpc_call_hash
67 */
68static unsigned long rxrpc_call_hashfunc(
0d12f8a4
DH
69 u8 in_clientflag,
70 u32 cid,
71 u32 call_id,
72 u32 epoch,
73 u16 service_id,
19ffa01c 74 sa_family_t family,
7727640c
TS
75 void *localptr,
76 unsigned int addr_size,
77 const u8 *peer_addr)
78{
79 const u16 *p;
80 unsigned int i;
81 unsigned long key;
7727640c
TS
82
83 _enter("");
84
85 key = (unsigned long)localptr;
86 /* We just want to add up the __be32 values, so forcing the
87 * cast should be okay.
88 */
0d12f8a4
DH
89 key += epoch;
90 key += service_id;
91 key += call_id;
92 key += (cid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT;
93 key += cid & RXRPC_CHANNELMASK;
94 key += in_clientflag;
19ffa01c 95 key += family;
7727640c
TS
96 /* Step through the peer address in 16-bit portions for speed */
97 for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++)
98 key += *p;
99 _leave(" key = 0x%lx", key);
100 return key;
101}
102
103/*
104 * Add a call to the hashtable
105 */
106static void rxrpc_call_hash_add(struct rxrpc_call *call)
107{
108 unsigned long key;
109 unsigned int addr_size = 0;
110
111 _enter("");
19ffa01c 112 switch (call->family) {
7727640c
TS
113 case AF_INET:
114 addr_size = sizeof(call->peer_ip.ipv4_addr);
115 break;
116 case AF_INET6:
117 addr_size = sizeof(call->peer_ip.ipv6_addr);
118 break;
119 default:
120 break;
121 }
122 key = rxrpc_call_hashfunc(call->in_clientflag, call->cid,
123 call->call_id, call->epoch,
19ffa01c 124 call->service_id, call->family,
85f32278 125 call->conn->params.local, addr_size,
7727640c
TS
126 call->peer_ip.ipv6_addr);
127 /* Store the full key in the call */
128 call->hash_key = key;
129 spin_lock(&rxrpc_call_hash_lock);
130 hash_add_rcu(rxrpc_call_hash, &call->hash_node, key);
131 spin_unlock(&rxrpc_call_hash_lock);
132 _leave("");
133}
134
135/*
136 * Remove a call from the hashtable
137 */
138static void rxrpc_call_hash_del(struct rxrpc_call *call)
139{
140 _enter("");
141 spin_lock(&rxrpc_call_hash_lock);
142 hash_del_rcu(&call->hash_node);
143 spin_unlock(&rxrpc_call_hash_lock);
144 _leave("");
145}
146
147/*
148 * Find a call in the hashtable and return it, or NULL if it
149 * isn't there.
150 */
151struct rxrpc_call *rxrpc_find_call_hash(
0d12f8a4 152 struct rxrpc_host_header *hdr,
7727640c 153 void *localptr,
19ffa01c 154 sa_family_t family,
0d12f8a4 155 const void *peer_addr)
7727640c
TS
156{
157 unsigned long key;
158 unsigned int addr_size = 0;
159 struct rxrpc_call *call = NULL;
160 struct rxrpc_call *ret = NULL;
0d12f8a4 161 u8 in_clientflag = hdr->flags & RXRPC_CLIENT_INITIATED;
7727640c
TS
162
163 _enter("");
19ffa01c 164 switch (family) {
7727640c
TS
165 case AF_INET:
166 addr_size = sizeof(call->peer_ip.ipv4_addr);
167 break;
168 case AF_INET6:
169 addr_size = sizeof(call->peer_ip.ipv6_addr);
170 break;
171 default:
172 break;
173 }
174
0d12f8a4
DH
175 key = rxrpc_call_hashfunc(in_clientflag, hdr->cid, hdr->callNumber,
176 hdr->epoch, hdr->serviceId,
19ffa01c 177 family, localptr, addr_size,
7727640c
TS
178 peer_addr);
179 hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) {
180 if (call->hash_key == key &&
0d12f8a4
DH
181 call->call_id == hdr->callNumber &&
182 call->cid == hdr->cid &&
183 call->in_clientflag == in_clientflag &&
184 call->service_id == hdr->serviceId &&
19ffa01c 185 call->family == family &&
7727640c
TS
186 call->local == localptr &&
187 memcmp(call->peer_ip.ipv6_addr, peer_addr,
0d12f8a4
DH
188 addr_size) == 0 &&
189 call->epoch == hdr->epoch) {
7727640c
TS
190 ret = call;
191 break;
192 }
193 }
194 _leave(" = %p", ret);
195 return ret;
196}
197
2341e077
DH
198/*
199 * find an extant server call
200 * - called in process context with IRQs enabled
201 */
202struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
203 unsigned long user_call_ID)
204{
205 struct rxrpc_call *call;
206 struct rb_node *p;
207
208 _enter("%p,%lx", rx, user_call_ID);
209
210 read_lock(&rx->call_lock);
211
212 p = rx->calls.rb_node;
213 while (p) {
214 call = rb_entry(p, struct rxrpc_call, sock_node);
215
216 if (user_call_ID < call->user_call_ID)
217 p = p->rb_left;
218 else if (user_call_ID > call->user_call_ID)
219 p = p->rb_right;
220 else
221 goto found_extant_call;
222 }
223
224 read_unlock(&rx->call_lock);
225 _leave(" = NULL");
226 return NULL;
227
228found_extant_call:
229 rxrpc_get_call(call);
230 read_unlock(&rx->call_lock);
231 _leave(" = %p [%d]", call, atomic_read(&call->usage));
232 return call;
233}
234
17926a79
DH
235/*
236 * allocate a new call
237 */
238static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
239{
240 struct rxrpc_call *call;
241
242 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
243 if (!call)
244 return NULL;
245
246 call->acks_winsz = 16;
247 call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
248 gfp);
249 if (!call->acks_window) {
250 kmem_cache_free(rxrpc_call_jar, call);
251 return NULL;
252 }
253
254 setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
255 (unsigned long) call);
256 setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
257 (unsigned long) call);
258 setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
259 (unsigned long) call);
260 setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
261 (unsigned long) call);
262 INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
263 INIT_WORK(&call->processor, &rxrpc_process_call);
264 INIT_LIST_HEAD(&call->accept_link);
265 skb_queue_head_init(&call->rx_queue);
266 skb_queue_head_init(&call->rx_oos_queue);
267 init_waitqueue_head(&call->tx_waitq);
268 spin_lock_init(&call->lock);
269 rwlock_init(&call->state_lock);
270 atomic_set(&call->usage, 1);
271 call->debug_id = atomic_inc_return(&rxrpc_debug_id);
272 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
273
274 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
275
276 call->rx_data_expect = 1;
277 call->rx_data_eaten = 0;
278 call->rx_first_oos = 0;
817913d8 279 call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
17926a79
DH
280 call->creation_jif = jiffies;
281 return call;
282}
283
284/*
fd589a8f 285 * allocate a new client call and attempt to get a connection slot for it
17926a79
DH
286 */
287static struct rxrpc_call *rxrpc_alloc_client_call(
288 struct rxrpc_sock *rx,
19ffa01c 289 struct rxrpc_conn_parameters *cp,
17926a79
DH
290 struct rxrpc_transport *trans,
291 struct rxrpc_conn_bundle *bundle,
292 gfp_t gfp)
293{
294 struct rxrpc_call *call;
295 int ret;
296
297 _enter("");
298
299 ASSERT(rx != NULL);
300 ASSERT(trans != NULL);
301 ASSERT(bundle != NULL);
302
303 call = rxrpc_alloc_call(gfp);
304 if (!call)
305 return ERR_PTR(-ENOMEM);
306
307 sock_hold(&rx->sk);
308 call->socket = rx;
309 call->rx_data_post = 1;
310
19ffa01c 311 ret = rxrpc_connect_call(rx, cp, trans, bundle, call, gfp);
17926a79
DH
312 if (ret < 0) {
313 kmem_cache_free(rxrpc_call_jar, call);
314 return ERR_PTR(ret);
315 }
316
7727640c 317 /* Record copies of information for hashtable lookup */
19ffa01c
DH
318 call->family = rx->family;
319 call->local = call->conn->params.local;
320 switch (call->family) {
7727640c
TS
321 case AF_INET:
322 call->peer_ip.ipv4_addr =
85f32278 323 call->conn->params.peer->srx.transport.sin.sin_addr.s_addr;
7727640c
TS
324 break;
325 case AF_INET6:
326 memcpy(call->peer_ip.ipv6_addr,
85f32278 327 call->conn->params.peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
7727640c
TS
328 sizeof(call->peer_ip.ipv6_addr));
329 break;
330 }
19ffa01c
DH
331 call->epoch = call->conn->proto.epoch;
332 call->service_id = call->conn->params.service_id;
333 call->in_clientflag = call->conn->proto.in_clientflag;
7727640c
TS
334 /* Add the new call to the hashtable */
335 rxrpc_call_hash_add(call);
336
85f32278
DH
337 spin_lock(&call->conn->params.peer->lock);
338 hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
339 spin_unlock(&call->conn->params.peer->lock);
17926a79 340
5873c083 341 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
17926a79
DH
342 add_timer(&call->lifetimer);
343
344 _leave(" = %p", call);
345 return call;
346}
347
348/*
349 * set up a call for the given data
350 * - called in process context with IRQs enabled
351 */
2341e077 352struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
19ffa01c 353 struct rxrpc_conn_parameters *cp,
17926a79
DH
354 struct rxrpc_transport *trans,
355 struct rxrpc_conn_bundle *bundle,
356 unsigned long user_call_ID,
17926a79
DH
357 gfp_t gfp)
358{
2341e077
DH
359 struct rxrpc_call *call, *xcall;
360 struct rb_node *parent, **pp;
17926a79 361
2341e077
DH
362 _enter("%p,%d,%d,%lx",
363 rx, trans->debug_id, bundle ? bundle->debug_id : -1,
364 user_call_ID);
17926a79 365
19ffa01c 366 call = rxrpc_alloc_client_call(rx, cp, trans, bundle, gfp);
2341e077
DH
367 if (IS_ERR(call)) {
368 _leave(" = %ld", PTR_ERR(call));
369 return call;
17926a79
DH
370 }
371
2341e077
DH
372 call->user_call_ID = user_call_ID;
373 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
17926a79
DH
374
375 write_lock(&rx->call_lock);
376
377 pp = &rx->calls.rb_node;
378 parent = NULL;
379 while (*pp) {
380 parent = *pp;
2341e077 381 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
17926a79 382
2341e077 383 if (user_call_ID < xcall->user_call_ID)
17926a79 384 pp = &(*pp)->rb_left;
2341e077 385 else if (user_call_ID > xcall->user_call_ID)
17926a79
DH
386 pp = &(*pp)->rb_right;
387 else
2341e077 388 goto found_user_ID_now_present;
17926a79
DH
389 }
390
17926a79
DH
391 rxrpc_get_call(call);
392
393 rb_link_node(&call->sock_node, parent, pp);
394 rb_insert_color(&call->sock_node, &rx->calls);
395 write_unlock(&rx->call_lock);
396
397 write_lock_bh(&rxrpc_call_lock);
398 list_add_tail(&call->link, &rxrpc_calls);
399 write_unlock_bh(&rxrpc_call_lock);
400
401 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
402
403 _leave(" = %p [new]", call);
404 return call;
405
2341e077
DH
406 /* We unexpectedly found the user ID in the list after taking
407 * the call_lock. This shouldn't happen unless the user races
408 * with itself and tries to add the same user ID twice at the
409 * same time in different threads.
410 */
411found_user_ID_now_present:
17926a79 412 write_unlock(&rx->call_lock);
2341e077
DH
413 rxrpc_put_call(call);
414 _leave(" = -EEXIST [%p]", call);
415 return ERR_PTR(-EEXIST);
17926a79
DH
416}
417
418/*
419 * set up an incoming call
420 * - called in process context with IRQs enabled
421 */
422struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
423 struct rxrpc_connection *conn,
42886ffe 424 struct sk_buff *skb)
17926a79 425{
42886ffe 426 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
17926a79
DH
427 struct rxrpc_call *call, *candidate;
428 struct rb_node **p, *parent;
0d12f8a4 429 u32 call_id;
17926a79 430
843099ca 431 _enter(",%d", conn->debug_id);
17926a79
DH
432
433 ASSERT(rx != NULL);
434
843099ca 435 candidate = rxrpc_alloc_call(GFP_NOIO);
17926a79
DH
436 if (!candidate)
437 return ERR_PTR(-EBUSY);
438
42886ffe
DH
439 candidate->socket = rx;
440 candidate->conn = conn;
441 candidate->cid = sp->hdr.cid;
442 candidate->call_id = sp->hdr.callNumber;
443 candidate->channel = sp->hdr.cid & RXRPC_CHANNELMASK;
444 candidate->rx_data_post = 0;
445 candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
17926a79
DH
446 if (conn->security_ix > 0)
447 candidate->state = RXRPC_CALL_SERVER_SECURING;
448
449 write_lock_bh(&conn->lock);
450
451 /* set the channel for this call */
452 call = conn->channels[candidate->channel];
453 _debug("channel[%u] is %p", candidate->channel, call);
42886ffe 454 if (call && call->call_id == sp->hdr.callNumber) {
17926a79
DH
455 /* already set; must've been a duplicate packet */
456 _debug("extant call [%d]", call->state);
457 ASSERTCMP(call->conn, ==, conn);
458
459 read_lock(&call->state_lock);
460 switch (call->state) {
461 case RXRPC_CALL_LOCALLY_ABORTED:
4c198ad1 462 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
651350d1 463 rxrpc_queue_call(call);
17926a79
DH
464 case RXRPC_CALL_REMOTELY_ABORTED:
465 read_unlock(&call->state_lock);
466 goto aborted_call;
467 default:
468 rxrpc_get_call(call);
469 read_unlock(&call->state_lock);
470 goto extant_call;
471 }
472 }
473
474 if (call) {
475 /* it seems the channel is still in use from the previous call
476 * - ditch the old binding if its call is now complete */
477 _debug("CALL: %u { %s }",
478 call->debug_id, rxrpc_call_states[call->state]);
479
480 if (call->state >= RXRPC_CALL_COMPLETE) {
481 conn->channels[call->channel] = NULL;
482 } else {
483 write_unlock_bh(&conn->lock);
484 kmem_cache_free(rxrpc_call_jar, candidate);
485 _leave(" = -EBUSY");
486 return ERR_PTR(-EBUSY);
487 }
488 }
489
490 /* check the call number isn't duplicate */
491 _debug("check dup");
42886ffe 492 call_id = sp->hdr.callNumber;
17926a79
DH
493 p = &conn->calls.rb_node;
494 parent = NULL;
495 while (*p) {
496 parent = *p;
497 call = rb_entry(parent, struct rxrpc_call, conn_node);
498
7727640c
TS
499 /* The tree is sorted in order of the __be32 value without
500 * turning it into host order.
501 */
0d12f8a4 502 if (call_id < call->call_id)
17926a79 503 p = &(*p)->rb_left;
0d12f8a4 504 else if (call_id > call->call_id)
17926a79
DH
505 p = &(*p)->rb_right;
506 else
507 goto old_call;
508 }
509
510 /* make the call available */
511 _debug("new call");
512 call = candidate;
513 candidate = NULL;
514 rb_link_node(&call->conn_node, parent, p);
515 rb_insert_color(&call->conn_node, &conn->calls);
516 conn->channels[call->channel] = call;
517 sock_hold(&rx->sk);
5627cc8b 518 rxrpc_get_connection(conn);
17926a79
DH
519 write_unlock_bh(&conn->lock);
520
85f32278
DH
521 spin_lock(&conn->params.peer->lock);
522 hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
523 spin_unlock(&conn->params.peer->lock);
17926a79
DH
524
525 write_lock_bh(&rxrpc_call_lock);
526 list_add_tail(&call->link, &rxrpc_calls);
527 write_unlock_bh(&rxrpc_call_lock);
528
7727640c 529 /* Record copies of information for hashtable lookup */
19ffa01c 530 call->family = rx->family;
85f32278 531 call->local = conn->params.local;
19ffa01c 532 switch (call->family) {
7727640c
TS
533 case AF_INET:
534 call->peer_ip.ipv4_addr =
85f32278 535 conn->params.peer->srx.transport.sin.sin_addr.s_addr;
7727640c
TS
536 break;
537 case AF_INET6:
538 memcpy(call->peer_ip.ipv6_addr,
85f32278 539 conn->params.peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
7727640c
TS
540 sizeof(call->peer_ip.ipv6_addr));
541 break;
542 default:
543 break;
544 }
19ffa01c
DH
545 call->epoch = conn->proto.epoch;
546 call->service_id = conn->params.service_id;
547 call->in_clientflag = conn->proto.in_clientflag;
7727640c
TS
548 /* Add the new call to the hashtable */
549 rxrpc_call_hash_add(call);
550
17926a79
DH
551 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
552
5873c083 553 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
17926a79
DH
554 add_timer(&call->lifetimer);
555 _leave(" = %p {%d} [new]", call, call->debug_id);
556 return call;
557
558extant_call:
559 write_unlock_bh(&conn->lock);
560 kmem_cache_free(rxrpc_call_jar, candidate);
561 _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
562 return call;
563
564aborted_call:
565 write_unlock_bh(&conn->lock);
566 kmem_cache_free(rxrpc_call_jar, candidate);
567 _leave(" = -ECONNABORTED");
568 return ERR_PTR(-ECONNABORTED);
569
570old_call:
571 write_unlock_bh(&conn->lock);
572 kmem_cache_free(rxrpc_call_jar, candidate);
573 _leave(" = -ECONNRESET [old]");
574 return ERR_PTR(-ECONNRESET);
575}
576
17926a79
DH
577/*
578 * detach a call from a socket and set up for release
579 */
580void rxrpc_release_call(struct rxrpc_call *call)
581{
651350d1 582 struct rxrpc_connection *conn = call->conn;
17926a79
DH
583 struct rxrpc_sock *rx = call->socket;
584
585 _enter("{%d,%d,%d,%d}",
586 call->debug_id, atomic_read(&call->usage),
587 atomic_read(&call->ackr_not_idle),
588 call->rx_first_oos);
589
590 spin_lock_bh(&call->lock);
591 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
592 BUG();
593 spin_unlock_bh(&call->lock);
594
595 /* dissociate from the socket
596 * - the socket's ref on the call is passed to the death timer
597 */
651350d1 598 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
17926a79
DH
599
600 write_lock_bh(&rx->call_lock);
601 if (!list_empty(&call->accept_link)) {
602 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
603 call, call->events, call->flags);
604 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
605 list_del_init(&call->accept_link);
606 sk_acceptq_removed(&rx->sk);
607 } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
608 rb_erase(&call->sock_node, &rx->calls);
609 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
610 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
611 }
612 write_unlock_bh(&rx->call_lock);
613
17926a79 614 /* free up the channel for reuse */
651350d1
DH
615 spin_lock(&conn->trans->client_lock);
616 write_lock_bh(&conn->lock);
617 write_lock(&call->state_lock);
618
619 if (conn->channels[call->channel] == call)
620 conn->channels[call->channel] = NULL;
621
622 if (conn->out_clientflag && conn->bundle) {
623 conn->avail_calls++;
624 switch (conn->avail_calls) {
625 case 1:
626 list_move_tail(&conn->bundle_link,
627 &conn->bundle->avail_conns);
628 case 2 ... RXRPC_MAXCALLS - 1:
629 ASSERT(conn->channels[0] == NULL ||
630 conn->channels[1] == NULL ||
631 conn->channels[2] == NULL ||
632 conn->channels[3] == NULL);
633 break;
634 case RXRPC_MAXCALLS:
635 list_move_tail(&conn->bundle_link,
636 &conn->bundle->unused_conns);
637 ASSERT(conn->channels[0] == NULL &&
638 conn->channels[1] == NULL &&
639 conn->channels[2] == NULL &&
640 conn->channels[3] == NULL);
641 break;
642 default:
9b6d5398 643 pr_err("conn->avail_calls=%d\n", conn->avail_calls);
651350d1
DH
644 BUG();
645 }
17926a79
DH
646 }
647
651350d1 648 spin_unlock(&conn->trans->client_lock);
17926a79
DH
649
650 if (call->state < RXRPC_CALL_COMPLETE &&
651 call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
652 _debug("+++ ABORTING STATE %d +++\n", call->state);
653 call->state = RXRPC_CALL_LOCALLY_ABORTED;
dc44b3a0 654 call->local_abort = RX_CALL_DEAD;
4c198ad1 655 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
651350d1 656 rxrpc_queue_call(call);
17926a79
DH
657 }
658 write_unlock(&call->state_lock);
651350d1 659 write_unlock_bh(&conn->lock);
17926a79 660
651350d1 661 /* clean up the Rx queue */
17926a79
DH
662 if (!skb_queue_empty(&call->rx_queue) ||
663 !skb_queue_empty(&call->rx_oos_queue)) {
664 struct rxrpc_skb_priv *sp;
665 struct sk_buff *skb;
666
667 _debug("purge Rx queues");
668
669 spin_lock_bh(&call->lock);
670 while ((skb = skb_dequeue(&call->rx_queue)) ||
671 (skb = skb_dequeue(&call->rx_oos_queue))) {
672 sp = rxrpc_skb(skb);
673 if (sp->call) {
674 ASSERTCMP(sp->call, ==, call);
675 rxrpc_put_call(call);
676 sp->call = NULL;
677 }
678 skb->destructor = NULL;
679 spin_unlock_bh(&call->lock);
680
681 _debug("- zap %s %%%u #%u",
682 rxrpc_pkts[sp->hdr.type],
0d12f8a4 683 sp->hdr.serial, sp->hdr.seq);
17926a79
DH
684 rxrpc_free_skb(skb);
685 spin_lock_bh(&call->lock);
686 }
687 spin_unlock_bh(&call->lock);
688
689 ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
690 }
691
692 del_timer_sync(&call->resend_timer);
693 del_timer_sync(&call->ack_timer);
694 del_timer_sync(&call->lifetimer);
5873c083 695 call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
17926a79
DH
696 add_timer(&call->deadspan);
697
698 _leave("");
699}
700
701/*
702 * handle a dead call being ready for reaping
703 */
704static void rxrpc_dead_call_expired(unsigned long _call)
705{
706 struct rxrpc_call *call = (struct rxrpc_call *) _call;
707
708 _enter("{%d}", call->debug_id);
709
710 write_lock_bh(&call->state_lock);
711 call->state = RXRPC_CALL_DEAD;
712 write_unlock_bh(&call->state_lock);
713 rxrpc_put_call(call);
714}
715
716/*
717 * mark a call as to be released, aborting it if it's still in progress
718 * - called with softirqs disabled
719 */
720static void rxrpc_mark_call_released(struct rxrpc_call *call)
721{
722 bool sched;
723
724 write_lock(&call->state_lock);
725 if (call->state < RXRPC_CALL_DEAD) {
726 sched = false;
727 if (call->state < RXRPC_CALL_COMPLETE) {
728 _debug("abort call %p", call);
729 call->state = RXRPC_CALL_LOCALLY_ABORTED;
dc44b3a0 730 call->local_abort = RX_CALL_DEAD;
4c198ad1 731 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
17926a79
DH
732 sched = true;
733 }
4c198ad1 734 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
17926a79
DH
735 sched = true;
736 if (sched)
651350d1 737 rxrpc_queue_call(call);
17926a79
DH
738 }
739 write_unlock(&call->state_lock);
740}
741
742/*
743 * release all the calls associated with a socket
744 */
745void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
746{
747 struct rxrpc_call *call;
748 struct rb_node *p;
749
750 _enter("%p", rx);
751
752 read_lock_bh(&rx->call_lock);
753
754 /* mark all the calls as no longer wanting incoming packets */
755 for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
756 call = rb_entry(p, struct rxrpc_call, sock_node);
757 rxrpc_mark_call_released(call);
758 }
759
760 /* kill the not-yet-accepted incoming calls */
761 list_for_each_entry(call, &rx->secureq, accept_link) {
762 rxrpc_mark_call_released(call);
763 }
764
765 list_for_each_entry(call, &rx->acceptq, accept_link) {
766 rxrpc_mark_call_released(call);
767 }
768
769 read_unlock_bh(&rx->call_lock);
770 _leave("");
771}
772
773/*
774 * release a call
775 */
776void __rxrpc_put_call(struct rxrpc_call *call)
777{
778 ASSERT(call != NULL);
779
780 _enter("%p{u=%d}", call, atomic_read(&call->usage));
781
782 ASSERTCMP(atomic_read(&call->usage), >, 0);
783
784 if (atomic_dec_and_test(&call->usage)) {
785 _debug("call %d dead", call->debug_id);
786 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
651350d1 787 rxrpc_queue_work(&call->destroyer);
17926a79
DH
788 }
789 _leave("");
790}
791
792/*
793 * clean up a call
794 */
795static void rxrpc_cleanup_call(struct rxrpc_call *call)
796{
797 _net("DESTROY CALL %d", call->debug_id);
798
799 ASSERT(call->socket);
800
801 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
802
803 del_timer_sync(&call->lifetimer);
804 del_timer_sync(&call->deadspan);
805 del_timer_sync(&call->ack_timer);
806 del_timer_sync(&call->resend_timer);
807
808 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
809 ASSERTCMP(call->events, ==, 0);
810 if (work_pending(&call->processor)) {
811 _debug("defer destroy");
651350d1 812 rxrpc_queue_work(&call->destroyer);
17926a79
DH
813 return;
814 }
815
816 if (call->conn) {
85f32278 817 spin_lock(&call->conn->params.peer->lock);
f66d7490 818 hlist_del_init(&call->error_link);
85f32278 819 spin_unlock(&call->conn->params.peer->lock);
17926a79
DH
820
821 write_lock_bh(&call->conn->lock);
822 rb_erase(&call->conn_node, &call->conn->calls);
823 write_unlock_bh(&call->conn->lock);
824 rxrpc_put_connection(call->conn);
825 }
826
7727640c
TS
827 /* Remove the call from the hash */
828 rxrpc_call_hash_del(call);
829
17926a79
DH
830 if (call->acks_window) {
831 _debug("kill Tx window %d",
832 CIRC_CNT(call->acks_head, call->acks_tail,
833 call->acks_winsz));
834 smp_mb();
835 while (CIRC_CNT(call->acks_head, call->acks_tail,
836 call->acks_winsz) > 0) {
837 struct rxrpc_skb_priv *sp;
838 unsigned long _skb;
839
840 _skb = call->acks_window[call->acks_tail] & ~1;
0d12f8a4
DH
841 sp = rxrpc_skb((struct sk_buff *)_skb);
842 _debug("+++ clear Tx %u", sp->hdr.seq);
843 rxrpc_free_skb((struct sk_buff *)_skb);
17926a79
DH
844 call->acks_tail =
845 (call->acks_tail + 1) & (call->acks_winsz - 1);
846 }
847
848 kfree(call->acks_window);
849 }
850
851 rxrpc_free_skb(call->tx_pending);
852
853 rxrpc_purge_queue(&call->rx_queue);
854 ASSERT(skb_queue_empty(&call->rx_oos_queue));
855 sock_put(&call->socket->sk);
856 kmem_cache_free(rxrpc_call_jar, call);
857}
858
859/*
860 * destroy a call
861 */
862static void rxrpc_destroy_call(struct work_struct *work)
863{
864 struct rxrpc_call *call =
865 container_of(work, struct rxrpc_call, destroyer);
866
867 _enter("%p{%d,%d,%p}",
868 call, atomic_read(&call->usage), call->channel, call->conn);
869
870 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
871
872 write_lock_bh(&rxrpc_call_lock);
873 list_del_init(&call->link);
874 write_unlock_bh(&rxrpc_call_lock);
875
876 rxrpc_cleanup_call(call);
877 _leave("");
878}
879
880/*
881 * preemptively destroy all the call records from a transport endpoint rather
882 * than waiting for them to time out
883 */
884void __exit rxrpc_destroy_all_calls(void)
885{
886 struct rxrpc_call *call;
887
888 _enter("");
889 write_lock_bh(&rxrpc_call_lock);
890
891 while (!list_empty(&rxrpc_calls)) {
892 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
893 _debug("Zapping call %p", call);
894
895 list_del_init(&call->link);
896
897 switch (atomic_read(&call->usage)) {
898 case 0:
899 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
900 break;
901 case 1:
902 if (del_timer_sync(&call->deadspan) != 0 &&
903 call->state != RXRPC_CALL_DEAD)
904 rxrpc_dead_call_expired((unsigned long) call);
905 if (call->state != RXRPC_CALL_DEAD)
906 break;
907 default:
9b6d5398 908 pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
17926a79
DH
909 call, atomic_read(&call->usage),
910 atomic_read(&call->ackr_not_idle),
911 rxrpc_call_states[call->state],
912 call->flags, call->events);
913 if (!skb_queue_empty(&call->rx_queue))
9b6d5398 914 pr_err("Rx queue occupied\n");
17926a79 915 if (!skb_queue_empty(&call->rx_oos_queue))
9b6d5398 916 pr_err("OOS queue occupied\n");
17926a79
DH
917 break;
918 }
919
920 write_unlock_bh(&rxrpc_call_lock);
921 cond_resched();
922 write_lock_bh(&rxrpc_call_lock);
923 }
924
925 write_unlock_bh(&rxrpc_call_lock);
926 _leave("");
927}
928
929/*
930 * handle call lifetime being exceeded
931 */
932static void rxrpc_call_life_expired(unsigned long _call)
933{
934 struct rxrpc_call *call = (struct rxrpc_call *) _call;
935
936 if (call->state >= RXRPC_CALL_COMPLETE)
937 return;
938
939 _enter("{%d}", call->debug_id);
940 read_lock_bh(&call->state_lock);
941 if (call->state < RXRPC_CALL_COMPLETE) {
4c198ad1 942 set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
651350d1 943 rxrpc_queue_call(call);
17926a79
DH
944 }
945 read_unlock_bh(&call->state_lock);
946}
947
948/*
949 * handle resend timer expiry
3b5bac2b 950 * - may not take call->state_lock as this can deadlock against del_timer_sync()
17926a79
DH
951 */
952static void rxrpc_resend_time_expired(unsigned long _call)
953{
954 struct rxrpc_call *call = (struct rxrpc_call *) _call;
955
956 _enter("{%d}", call->debug_id);
957
958 if (call->state >= RXRPC_CALL_COMPLETE)
959 return;
960
17926a79 961 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
4c198ad1 962 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
651350d1 963 rxrpc_queue_call(call);
17926a79
DH
964}
965
966/*
967 * handle ACK timer expiry
968 */
969static void rxrpc_ack_time_expired(unsigned long _call)
970{
971 struct rxrpc_call *call = (struct rxrpc_call *) _call;
972
973 _enter("{%d}", call->debug_id);
974
975 if (call->state >= RXRPC_CALL_COMPLETE)
976 return;
977
978 read_lock_bh(&call->state_lock);
979 if (call->state < RXRPC_CALL_COMPLETE &&
4c198ad1 980 !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
651350d1 981 rxrpc_queue_call(call);
17926a79
DH
982 read_unlock_bh(&call->state_lock);
983}