]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/rxrpc/ar-call.c
Merge tag 'pci-v4.7-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[mirror_ubuntu-zesty-kernel.git] / net / rxrpc / ar-call.c
1 /* RxRPC individual remote procedure call handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/circ_buf.h>
15 #include <linux/hashtable.h>
16 #include <linux/spinlock_types.h>
17 #include <net/sock.h>
18 #include <net/af_rxrpc.h>
19 #include "ar-internal.h"
20
21 /*
22 * Maximum lifetime of a call (in jiffies).
23 */
24 unsigned int rxrpc_max_call_lifetime = 60 * HZ;
25
26 /*
27 * Time till dead call expires after last use (in jiffies).
28 */
29 unsigned int rxrpc_dead_call_expiry = 2 * HZ;
30
31 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
32 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
33 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
34 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
35 [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
36 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
37 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
38 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
39 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
40 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
41 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
42 [RXRPC_CALL_COMPLETE] = "Complete",
43 [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
44 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
45 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
46 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
47 [RXRPC_CALL_DEAD] = "Dead ",
48 };
49
50 struct kmem_cache *rxrpc_call_jar;
51 LIST_HEAD(rxrpc_calls);
52 DEFINE_RWLOCK(rxrpc_call_lock);
53
54 static void rxrpc_destroy_call(struct work_struct *work);
55 static void rxrpc_call_life_expired(unsigned long _call);
56 static void rxrpc_dead_call_expired(unsigned long _call);
57 static void rxrpc_ack_time_expired(unsigned long _call);
58 static void rxrpc_resend_time_expired(unsigned long _call);
59
60 static DEFINE_SPINLOCK(rxrpc_call_hash_lock);
61 static DEFINE_HASHTABLE(rxrpc_call_hash, 10);
62
63 /*
64 * Hash function for rxrpc_call_hash
65 */
66 static unsigned long rxrpc_call_hashfunc(
67 u8 in_clientflag,
68 u32 cid,
69 u32 call_id,
70 u32 epoch,
71 u16 service_id,
72 sa_family_t proto,
73 void *localptr,
74 unsigned int addr_size,
75 const u8 *peer_addr)
76 {
77 const u16 *p;
78 unsigned int i;
79 unsigned long key;
80
81 _enter("");
82
83 key = (unsigned long)localptr;
84 /* We just want to add up the __be32 values, so forcing the
85 * cast should be okay.
86 */
87 key += epoch;
88 key += service_id;
89 key += call_id;
90 key += (cid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT;
91 key += cid & RXRPC_CHANNELMASK;
92 key += in_clientflag;
93 key += proto;
94 /* Step through the peer address in 16-bit portions for speed */
95 for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++)
96 key += *p;
97 _leave(" key = 0x%lx", key);
98 return key;
99 }
100
101 /*
102 * Add a call to the hashtable
103 */
104 static void rxrpc_call_hash_add(struct rxrpc_call *call)
105 {
106 unsigned long key;
107 unsigned int addr_size = 0;
108
109 _enter("");
110 switch (call->proto) {
111 case AF_INET:
112 addr_size = sizeof(call->peer_ip.ipv4_addr);
113 break;
114 case AF_INET6:
115 addr_size = sizeof(call->peer_ip.ipv6_addr);
116 break;
117 default:
118 break;
119 }
120 key = rxrpc_call_hashfunc(call->in_clientflag, call->cid,
121 call->call_id, call->epoch,
122 call->service_id, call->proto,
123 call->conn->trans->local, addr_size,
124 call->peer_ip.ipv6_addr);
125 /* Store the full key in the call */
126 call->hash_key = key;
127 spin_lock(&rxrpc_call_hash_lock);
128 hash_add_rcu(rxrpc_call_hash, &call->hash_node, key);
129 spin_unlock(&rxrpc_call_hash_lock);
130 _leave("");
131 }
132
133 /*
134 * Remove a call from the hashtable
135 */
136 static void rxrpc_call_hash_del(struct rxrpc_call *call)
137 {
138 _enter("");
139 spin_lock(&rxrpc_call_hash_lock);
140 hash_del_rcu(&call->hash_node);
141 spin_unlock(&rxrpc_call_hash_lock);
142 _leave("");
143 }
144
145 /*
146 * Find a call in the hashtable and return it, or NULL if it
147 * isn't there.
148 */
149 struct rxrpc_call *rxrpc_find_call_hash(
150 struct rxrpc_host_header *hdr,
151 void *localptr,
152 sa_family_t proto,
153 const void *peer_addr)
154 {
155 unsigned long key;
156 unsigned int addr_size = 0;
157 struct rxrpc_call *call = NULL;
158 struct rxrpc_call *ret = NULL;
159 u8 in_clientflag = hdr->flags & RXRPC_CLIENT_INITIATED;
160
161 _enter("");
162 switch (proto) {
163 case AF_INET:
164 addr_size = sizeof(call->peer_ip.ipv4_addr);
165 break;
166 case AF_INET6:
167 addr_size = sizeof(call->peer_ip.ipv6_addr);
168 break;
169 default:
170 break;
171 }
172
173 key = rxrpc_call_hashfunc(in_clientflag, hdr->cid, hdr->callNumber,
174 hdr->epoch, hdr->serviceId,
175 proto, localptr, addr_size,
176 peer_addr);
177 hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) {
178 if (call->hash_key == key &&
179 call->call_id == hdr->callNumber &&
180 call->cid == hdr->cid &&
181 call->in_clientflag == in_clientflag &&
182 call->service_id == hdr->serviceId &&
183 call->proto == proto &&
184 call->local == localptr &&
185 memcmp(call->peer_ip.ipv6_addr, peer_addr,
186 addr_size) == 0 &&
187 call->epoch == hdr->epoch) {
188 ret = call;
189 break;
190 }
191 }
192 _leave(" = %p", ret);
193 return ret;
194 }
195
196 /*
197 * allocate a new call
198 */
199 static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
200 {
201 struct rxrpc_call *call;
202
203 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
204 if (!call)
205 return NULL;
206
207 call->acks_winsz = 16;
208 call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
209 gfp);
210 if (!call->acks_window) {
211 kmem_cache_free(rxrpc_call_jar, call);
212 return NULL;
213 }
214
215 setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
216 (unsigned long) call);
217 setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
218 (unsigned long) call);
219 setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
220 (unsigned long) call);
221 setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
222 (unsigned long) call);
223 INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
224 INIT_WORK(&call->processor, &rxrpc_process_call);
225 INIT_LIST_HEAD(&call->accept_link);
226 skb_queue_head_init(&call->rx_queue);
227 skb_queue_head_init(&call->rx_oos_queue);
228 init_waitqueue_head(&call->tx_waitq);
229 spin_lock_init(&call->lock);
230 rwlock_init(&call->state_lock);
231 atomic_set(&call->usage, 1);
232 call->debug_id = atomic_inc_return(&rxrpc_debug_id);
233 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
234
235 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
236
237 call->rx_data_expect = 1;
238 call->rx_data_eaten = 0;
239 call->rx_first_oos = 0;
240 call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
241 call->creation_jif = jiffies;
242 return call;
243 }
244
245 /*
246 * allocate a new client call and attempt to get a connection slot for it
247 */
248 static struct rxrpc_call *rxrpc_alloc_client_call(
249 struct rxrpc_sock *rx,
250 struct rxrpc_transport *trans,
251 struct rxrpc_conn_bundle *bundle,
252 gfp_t gfp)
253 {
254 struct rxrpc_call *call;
255 int ret;
256
257 _enter("");
258
259 ASSERT(rx != NULL);
260 ASSERT(trans != NULL);
261 ASSERT(bundle != NULL);
262
263 call = rxrpc_alloc_call(gfp);
264 if (!call)
265 return ERR_PTR(-ENOMEM);
266
267 sock_hold(&rx->sk);
268 call->socket = rx;
269 call->rx_data_post = 1;
270
271 ret = rxrpc_connect_call(rx, trans, bundle, call, gfp);
272 if (ret < 0) {
273 kmem_cache_free(rxrpc_call_jar, call);
274 return ERR_PTR(ret);
275 }
276
277 /* Record copies of information for hashtable lookup */
278 call->proto = rx->proto;
279 call->local = trans->local;
280 switch (call->proto) {
281 case AF_INET:
282 call->peer_ip.ipv4_addr =
283 trans->peer->srx.transport.sin.sin_addr.s_addr;
284 break;
285 case AF_INET6:
286 memcpy(call->peer_ip.ipv6_addr,
287 trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
288 sizeof(call->peer_ip.ipv6_addr));
289 break;
290 }
291 call->epoch = call->conn->epoch;
292 call->service_id = call->conn->service_id;
293 call->in_clientflag = call->conn->in_clientflag;
294 /* Add the new call to the hashtable */
295 rxrpc_call_hash_add(call);
296
297 spin_lock(&call->conn->trans->peer->lock);
298 list_add(&call->error_link, &call->conn->trans->peer->error_targets);
299 spin_unlock(&call->conn->trans->peer->lock);
300
301 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
302 add_timer(&call->lifetimer);
303
304 _leave(" = %p", call);
305 return call;
306 }
307
308 /*
309 * set up a call for the given data
310 * - called in process context with IRQs enabled
311 */
312 struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx,
313 struct rxrpc_transport *trans,
314 struct rxrpc_conn_bundle *bundle,
315 unsigned long user_call_ID,
316 int create,
317 gfp_t gfp)
318 {
319 struct rxrpc_call *call, *candidate;
320 struct rb_node *p, *parent, **pp;
321
322 _enter("%p,%d,%d,%lx,%d",
323 rx, trans ? trans->debug_id : -1, bundle ? bundle->debug_id : -1,
324 user_call_ID, create);
325
326 /* search the extant calls first for one that matches the specified
327 * user ID */
328 read_lock(&rx->call_lock);
329
330 p = rx->calls.rb_node;
331 while (p) {
332 call = rb_entry(p, struct rxrpc_call, sock_node);
333
334 if (user_call_ID < call->user_call_ID)
335 p = p->rb_left;
336 else if (user_call_ID > call->user_call_ID)
337 p = p->rb_right;
338 else
339 goto found_extant_call;
340 }
341
342 read_unlock(&rx->call_lock);
343
344 if (!create || !trans)
345 return ERR_PTR(-EBADSLT);
346
347 /* not yet present - create a candidate for a new record and then
348 * redo the search */
349 candidate = rxrpc_alloc_client_call(rx, trans, bundle, gfp);
350 if (IS_ERR(candidate)) {
351 _leave(" = %ld", PTR_ERR(candidate));
352 return candidate;
353 }
354
355 candidate->user_call_ID = user_call_ID;
356 __set_bit(RXRPC_CALL_HAS_USERID, &candidate->flags);
357
358 write_lock(&rx->call_lock);
359
360 pp = &rx->calls.rb_node;
361 parent = NULL;
362 while (*pp) {
363 parent = *pp;
364 call = rb_entry(parent, struct rxrpc_call, sock_node);
365
366 if (user_call_ID < call->user_call_ID)
367 pp = &(*pp)->rb_left;
368 else if (user_call_ID > call->user_call_ID)
369 pp = &(*pp)->rb_right;
370 else
371 goto found_extant_second;
372 }
373
374 /* second search also failed; add the new call */
375 call = candidate;
376 candidate = NULL;
377 rxrpc_get_call(call);
378
379 rb_link_node(&call->sock_node, parent, pp);
380 rb_insert_color(&call->sock_node, &rx->calls);
381 write_unlock(&rx->call_lock);
382
383 write_lock_bh(&rxrpc_call_lock);
384 list_add_tail(&call->link, &rxrpc_calls);
385 write_unlock_bh(&rxrpc_call_lock);
386
387 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
388
389 _leave(" = %p [new]", call);
390 return call;
391
392 /* we found the call in the list immediately */
393 found_extant_call:
394 rxrpc_get_call(call);
395 read_unlock(&rx->call_lock);
396 _leave(" = %p [extant %d]", call, atomic_read(&call->usage));
397 return call;
398
399 /* we found the call on the second time through the list */
400 found_extant_second:
401 rxrpc_get_call(call);
402 write_unlock(&rx->call_lock);
403 rxrpc_put_call(candidate);
404 _leave(" = %p [second %d]", call, atomic_read(&call->usage));
405 return call;
406 }
407
408 /*
409 * set up an incoming call
410 * - called in process context with IRQs enabled
411 */
412 struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
413 struct rxrpc_connection *conn,
414 struct rxrpc_host_header *hdr)
415 {
416 struct rxrpc_call *call, *candidate;
417 struct rb_node **p, *parent;
418 u32 call_id;
419
420 _enter(",%d", conn->debug_id);
421
422 ASSERT(rx != NULL);
423
424 candidate = rxrpc_alloc_call(GFP_NOIO);
425 if (!candidate)
426 return ERR_PTR(-EBUSY);
427
428 candidate->socket = rx;
429 candidate->conn = conn;
430 candidate->cid = hdr->cid;
431 candidate->call_id = hdr->callNumber;
432 candidate->channel = hdr->cid & RXRPC_CHANNELMASK;
433 candidate->rx_data_post = 0;
434 candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
435 if (conn->security_ix > 0)
436 candidate->state = RXRPC_CALL_SERVER_SECURING;
437
438 write_lock_bh(&conn->lock);
439
440 /* set the channel for this call */
441 call = conn->channels[candidate->channel];
442 _debug("channel[%u] is %p", candidate->channel, call);
443 if (call && call->call_id == hdr->callNumber) {
444 /* already set; must've been a duplicate packet */
445 _debug("extant call [%d]", call->state);
446 ASSERTCMP(call->conn, ==, conn);
447
448 read_lock(&call->state_lock);
449 switch (call->state) {
450 case RXRPC_CALL_LOCALLY_ABORTED:
451 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
452 rxrpc_queue_call(call);
453 case RXRPC_CALL_REMOTELY_ABORTED:
454 read_unlock(&call->state_lock);
455 goto aborted_call;
456 default:
457 rxrpc_get_call(call);
458 read_unlock(&call->state_lock);
459 goto extant_call;
460 }
461 }
462
463 if (call) {
464 /* it seems the channel is still in use from the previous call
465 * - ditch the old binding if its call is now complete */
466 _debug("CALL: %u { %s }",
467 call->debug_id, rxrpc_call_states[call->state]);
468
469 if (call->state >= RXRPC_CALL_COMPLETE) {
470 conn->channels[call->channel] = NULL;
471 } else {
472 write_unlock_bh(&conn->lock);
473 kmem_cache_free(rxrpc_call_jar, candidate);
474 _leave(" = -EBUSY");
475 return ERR_PTR(-EBUSY);
476 }
477 }
478
479 /* check the call number isn't duplicate */
480 _debug("check dup");
481 call_id = hdr->callNumber;
482 p = &conn->calls.rb_node;
483 parent = NULL;
484 while (*p) {
485 parent = *p;
486 call = rb_entry(parent, struct rxrpc_call, conn_node);
487
488 /* The tree is sorted in order of the __be32 value without
489 * turning it into host order.
490 */
491 if (call_id < call->call_id)
492 p = &(*p)->rb_left;
493 else if (call_id > call->call_id)
494 p = &(*p)->rb_right;
495 else
496 goto old_call;
497 }
498
499 /* make the call available */
500 _debug("new call");
501 call = candidate;
502 candidate = NULL;
503 rb_link_node(&call->conn_node, parent, p);
504 rb_insert_color(&call->conn_node, &conn->calls);
505 conn->channels[call->channel] = call;
506 sock_hold(&rx->sk);
507 atomic_inc(&conn->usage);
508 write_unlock_bh(&conn->lock);
509
510 spin_lock(&conn->trans->peer->lock);
511 list_add(&call->error_link, &conn->trans->peer->error_targets);
512 spin_unlock(&conn->trans->peer->lock);
513
514 write_lock_bh(&rxrpc_call_lock);
515 list_add_tail(&call->link, &rxrpc_calls);
516 write_unlock_bh(&rxrpc_call_lock);
517
518 /* Record copies of information for hashtable lookup */
519 call->proto = rx->proto;
520 call->local = conn->trans->local;
521 switch (call->proto) {
522 case AF_INET:
523 call->peer_ip.ipv4_addr =
524 conn->trans->peer->srx.transport.sin.sin_addr.s_addr;
525 break;
526 case AF_INET6:
527 memcpy(call->peer_ip.ipv6_addr,
528 conn->trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
529 sizeof(call->peer_ip.ipv6_addr));
530 break;
531 default:
532 break;
533 }
534 call->epoch = conn->epoch;
535 call->service_id = conn->service_id;
536 call->in_clientflag = conn->in_clientflag;
537 /* Add the new call to the hashtable */
538 rxrpc_call_hash_add(call);
539
540 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
541
542 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
543 add_timer(&call->lifetimer);
544 _leave(" = %p {%d} [new]", call, call->debug_id);
545 return call;
546
547 extant_call:
548 write_unlock_bh(&conn->lock);
549 kmem_cache_free(rxrpc_call_jar, candidate);
550 _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
551 return call;
552
553 aborted_call:
554 write_unlock_bh(&conn->lock);
555 kmem_cache_free(rxrpc_call_jar, candidate);
556 _leave(" = -ECONNABORTED");
557 return ERR_PTR(-ECONNABORTED);
558
559 old_call:
560 write_unlock_bh(&conn->lock);
561 kmem_cache_free(rxrpc_call_jar, candidate);
562 _leave(" = -ECONNRESET [old]");
563 return ERR_PTR(-ECONNRESET);
564 }
565
566 /*
567 * find an extant server call
568 * - called in process context with IRQs enabled
569 */
570 struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *rx,
571 unsigned long user_call_ID)
572 {
573 struct rxrpc_call *call;
574 struct rb_node *p;
575
576 _enter("%p,%lx", rx, user_call_ID);
577
578 /* search the extant calls for one that matches the specified user
579 * ID */
580 read_lock(&rx->call_lock);
581
582 p = rx->calls.rb_node;
583 while (p) {
584 call = rb_entry(p, struct rxrpc_call, sock_node);
585
586 if (user_call_ID < call->user_call_ID)
587 p = p->rb_left;
588 else if (user_call_ID > call->user_call_ID)
589 p = p->rb_right;
590 else
591 goto found_extant_call;
592 }
593
594 read_unlock(&rx->call_lock);
595 _leave(" = NULL");
596 return NULL;
597
598 /* we found the call in the list immediately */
599 found_extant_call:
600 rxrpc_get_call(call);
601 read_unlock(&rx->call_lock);
602 _leave(" = %p [%d]", call, atomic_read(&call->usage));
603 return call;
604 }
605
606 /*
607 * detach a call from a socket and set up for release
608 */
609 void rxrpc_release_call(struct rxrpc_call *call)
610 {
611 struct rxrpc_connection *conn = call->conn;
612 struct rxrpc_sock *rx = call->socket;
613
614 _enter("{%d,%d,%d,%d}",
615 call->debug_id, atomic_read(&call->usage),
616 atomic_read(&call->ackr_not_idle),
617 call->rx_first_oos);
618
619 spin_lock_bh(&call->lock);
620 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
621 BUG();
622 spin_unlock_bh(&call->lock);
623
624 /* dissociate from the socket
625 * - the socket's ref on the call is passed to the death timer
626 */
627 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
628
629 write_lock_bh(&rx->call_lock);
630 if (!list_empty(&call->accept_link)) {
631 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
632 call, call->events, call->flags);
633 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
634 list_del_init(&call->accept_link);
635 sk_acceptq_removed(&rx->sk);
636 } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
637 rb_erase(&call->sock_node, &rx->calls);
638 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
639 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
640 }
641 write_unlock_bh(&rx->call_lock);
642
643 /* free up the channel for reuse */
644 spin_lock(&conn->trans->client_lock);
645 write_lock_bh(&conn->lock);
646 write_lock(&call->state_lock);
647
648 if (conn->channels[call->channel] == call)
649 conn->channels[call->channel] = NULL;
650
651 if (conn->out_clientflag && conn->bundle) {
652 conn->avail_calls++;
653 switch (conn->avail_calls) {
654 case 1:
655 list_move_tail(&conn->bundle_link,
656 &conn->bundle->avail_conns);
657 case 2 ... RXRPC_MAXCALLS - 1:
658 ASSERT(conn->channels[0] == NULL ||
659 conn->channels[1] == NULL ||
660 conn->channels[2] == NULL ||
661 conn->channels[3] == NULL);
662 break;
663 case RXRPC_MAXCALLS:
664 list_move_tail(&conn->bundle_link,
665 &conn->bundle->unused_conns);
666 ASSERT(conn->channels[0] == NULL &&
667 conn->channels[1] == NULL &&
668 conn->channels[2] == NULL &&
669 conn->channels[3] == NULL);
670 break;
671 default:
672 printk(KERN_ERR "RxRPC: conn->avail_calls=%d\n",
673 conn->avail_calls);
674 BUG();
675 }
676 }
677
678 spin_unlock(&conn->trans->client_lock);
679
680 if (call->state < RXRPC_CALL_COMPLETE &&
681 call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
682 _debug("+++ ABORTING STATE %d +++\n", call->state);
683 call->state = RXRPC_CALL_LOCALLY_ABORTED;
684 call->local_abort = RX_CALL_DEAD;
685 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
686 rxrpc_queue_call(call);
687 }
688 write_unlock(&call->state_lock);
689 write_unlock_bh(&conn->lock);
690
691 /* clean up the Rx queue */
692 if (!skb_queue_empty(&call->rx_queue) ||
693 !skb_queue_empty(&call->rx_oos_queue)) {
694 struct rxrpc_skb_priv *sp;
695 struct sk_buff *skb;
696
697 _debug("purge Rx queues");
698
699 spin_lock_bh(&call->lock);
700 while ((skb = skb_dequeue(&call->rx_queue)) ||
701 (skb = skb_dequeue(&call->rx_oos_queue))) {
702 sp = rxrpc_skb(skb);
703 if (sp->call) {
704 ASSERTCMP(sp->call, ==, call);
705 rxrpc_put_call(call);
706 sp->call = NULL;
707 }
708 skb->destructor = NULL;
709 spin_unlock_bh(&call->lock);
710
711 _debug("- zap %s %%%u #%u",
712 rxrpc_pkts[sp->hdr.type],
713 sp->hdr.serial, sp->hdr.seq);
714 rxrpc_free_skb(skb);
715 spin_lock_bh(&call->lock);
716 }
717 spin_unlock_bh(&call->lock);
718
719 ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
720 }
721
722 del_timer_sync(&call->resend_timer);
723 del_timer_sync(&call->ack_timer);
724 del_timer_sync(&call->lifetimer);
725 call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
726 add_timer(&call->deadspan);
727
728 _leave("");
729 }
730
731 /*
732 * handle a dead call being ready for reaping
733 */
734 static void rxrpc_dead_call_expired(unsigned long _call)
735 {
736 struct rxrpc_call *call = (struct rxrpc_call *) _call;
737
738 _enter("{%d}", call->debug_id);
739
740 write_lock_bh(&call->state_lock);
741 call->state = RXRPC_CALL_DEAD;
742 write_unlock_bh(&call->state_lock);
743 rxrpc_put_call(call);
744 }
745
746 /*
747 * mark a call as to be released, aborting it if it's still in progress
748 * - called with softirqs disabled
749 */
750 static void rxrpc_mark_call_released(struct rxrpc_call *call)
751 {
752 bool sched;
753
754 write_lock(&call->state_lock);
755 if (call->state < RXRPC_CALL_DEAD) {
756 sched = false;
757 if (call->state < RXRPC_CALL_COMPLETE) {
758 _debug("abort call %p", call);
759 call->state = RXRPC_CALL_LOCALLY_ABORTED;
760 call->local_abort = RX_CALL_DEAD;
761 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
762 sched = true;
763 }
764 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
765 sched = true;
766 if (sched)
767 rxrpc_queue_call(call);
768 }
769 write_unlock(&call->state_lock);
770 }
771
772 /*
773 * release all the calls associated with a socket
774 */
775 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
776 {
777 struct rxrpc_call *call;
778 struct rb_node *p;
779
780 _enter("%p", rx);
781
782 read_lock_bh(&rx->call_lock);
783
784 /* mark all the calls as no longer wanting incoming packets */
785 for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
786 call = rb_entry(p, struct rxrpc_call, sock_node);
787 rxrpc_mark_call_released(call);
788 }
789
790 /* kill the not-yet-accepted incoming calls */
791 list_for_each_entry(call, &rx->secureq, accept_link) {
792 rxrpc_mark_call_released(call);
793 }
794
795 list_for_each_entry(call, &rx->acceptq, accept_link) {
796 rxrpc_mark_call_released(call);
797 }
798
799 read_unlock_bh(&rx->call_lock);
800 _leave("");
801 }
802
803 /*
804 * release a call
805 */
806 void __rxrpc_put_call(struct rxrpc_call *call)
807 {
808 ASSERT(call != NULL);
809
810 _enter("%p{u=%d}", call, atomic_read(&call->usage));
811
812 ASSERTCMP(atomic_read(&call->usage), >, 0);
813
814 if (atomic_dec_and_test(&call->usage)) {
815 _debug("call %d dead", call->debug_id);
816 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
817 rxrpc_queue_work(&call->destroyer);
818 }
819 _leave("");
820 }
821
822 /*
823 * clean up a call
824 */
825 static void rxrpc_cleanup_call(struct rxrpc_call *call)
826 {
827 _net("DESTROY CALL %d", call->debug_id);
828
829 ASSERT(call->socket);
830
831 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
832
833 del_timer_sync(&call->lifetimer);
834 del_timer_sync(&call->deadspan);
835 del_timer_sync(&call->ack_timer);
836 del_timer_sync(&call->resend_timer);
837
838 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
839 ASSERTCMP(call->events, ==, 0);
840 if (work_pending(&call->processor)) {
841 _debug("defer destroy");
842 rxrpc_queue_work(&call->destroyer);
843 return;
844 }
845
846 if (call->conn) {
847 spin_lock(&call->conn->trans->peer->lock);
848 list_del(&call->error_link);
849 spin_unlock(&call->conn->trans->peer->lock);
850
851 write_lock_bh(&call->conn->lock);
852 rb_erase(&call->conn_node, &call->conn->calls);
853 write_unlock_bh(&call->conn->lock);
854 rxrpc_put_connection(call->conn);
855 }
856
857 /* Remove the call from the hash */
858 rxrpc_call_hash_del(call);
859
860 if (call->acks_window) {
861 _debug("kill Tx window %d",
862 CIRC_CNT(call->acks_head, call->acks_tail,
863 call->acks_winsz));
864 smp_mb();
865 while (CIRC_CNT(call->acks_head, call->acks_tail,
866 call->acks_winsz) > 0) {
867 struct rxrpc_skb_priv *sp;
868 unsigned long _skb;
869
870 _skb = call->acks_window[call->acks_tail] & ~1;
871 sp = rxrpc_skb((struct sk_buff *)_skb);
872 _debug("+++ clear Tx %u", sp->hdr.seq);
873 rxrpc_free_skb((struct sk_buff *)_skb);
874 call->acks_tail =
875 (call->acks_tail + 1) & (call->acks_winsz - 1);
876 }
877
878 kfree(call->acks_window);
879 }
880
881 rxrpc_free_skb(call->tx_pending);
882
883 rxrpc_purge_queue(&call->rx_queue);
884 ASSERT(skb_queue_empty(&call->rx_oos_queue));
885 sock_put(&call->socket->sk);
886 kmem_cache_free(rxrpc_call_jar, call);
887 }
888
889 /*
890 * destroy a call
891 */
892 static void rxrpc_destroy_call(struct work_struct *work)
893 {
894 struct rxrpc_call *call =
895 container_of(work, struct rxrpc_call, destroyer);
896
897 _enter("%p{%d,%d,%p}",
898 call, atomic_read(&call->usage), call->channel, call->conn);
899
900 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
901
902 write_lock_bh(&rxrpc_call_lock);
903 list_del_init(&call->link);
904 write_unlock_bh(&rxrpc_call_lock);
905
906 rxrpc_cleanup_call(call);
907 _leave("");
908 }
909
910 /*
911 * preemptively destroy all the call records from a transport endpoint rather
912 * than waiting for them to time out
913 */
914 void __exit rxrpc_destroy_all_calls(void)
915 {
916 struct rxrpc_call *call;
917
918 _enter("");
919 write_lock_bh(&rxrpc_call_lock);
920
921 while (!list_empty(&rxrpc_calls)) {
922 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
923 _debug("Zapping call %p", call);
924
925 list_del_init(&call->link);
926
927 switch (atomic_read(&call->usage)) {
928 case 0:
929 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
930 break;
931 case 1:
932 if (del_timer_sync(&call->deadspan) != 0 &&
933 call->state != RXRPC_CALL_DEAD)
934 rxrpc_dead_call_expired((unsigned long) call);
935 if (call->state != RXRPC_CALL_DEAD)
936 break;
937 default:
938 printk(KERN_ERR "RXRPC:"
939 " Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
940 call, atomic_read(&call->usage),
941 atomic_read(&call->ackr_not_idle),
942 rxrpc_call_states[call->state],
943 call->flags, call->events);
944 if (!skb_queue_empty(&call->rx_queue))
945 printk(KERN_ERR"RXRPC: Rx queue occupied\n");
946 if (!skb_queue_empty(&call->rx_oos_queue))
947 printk(KERN_ERR"RXRPC: OOS queue occupied\n");
948 break;
949 }
950
951 write_unlock_bh(&rxrpc_call_lock);
952 cond_resched();
953 write_lock_bh(&rxrpc_call_lock);
954 }
955
956 write_unlock_bh(&rxrpc_call_lock);
957 _leave("");
958 }
959
960 /*
961 * handle call lifetime being exceeded
962 */
963 static void rxrpc_call_life_expired(unsigned long _call)
964 {
965 struct rxrpc_call *call = (struct rxrpc_call *) _call;
966
967 if (call->state >= RXRPC_CALL_COMPLETE)
968 return;
969
970 _enter("{%d}", call->debug_id);
971 read_lock_bh(&call->state_lock);
972 if (call->state < RXRPC_CALL_COMPLETE) {
973 set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
974 rxrpc_queue_call(call);
975 }
976 read_unlock_bh(&call->state_lock);
977 }
978
979 /*
980 * handle resend timer expiry
981 * - may not take call->state_lock as this can deadlock against del_timer_sync()
982 */
983 static void rxrpc_resend_time_expired(unsigned long _call)
984 {
985 struct rxrpc_call *call = (struct rxrpc_call *) _call;
986
987 _enter("{%d}", call->debug_id);
988
989 if (call->state >= RXRPC_CALL_COMPLETE)
990 return;
991
992 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
993 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
994 rxrpc_queue_call(call);
995 }
996
997 /*
998 * handle ACK timer expiry
999 */
1000 static void rxrpc_ack_time_expired(unsigned long _call)
1001 {
1002 struct rxrpc_call *call = (struct rxrpc_call *) _call;
1003
1004 _enter("{%d}", call->debug_id);
1005
1006 if (call->state >= RXRPC_CALL_COMPLETE)
1007 return;
1008
1009 read_lock_bh(&call->state_lock);
1010 if (call->state < RXRPC_CALL_COMPLETE &&
1011 !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
1012 rxrpc_queue_call(call);
1013 read_unlock_bh(&call->state_lock);
1014 }