]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/rxrpc/conn_object.c
Merge tag 'rxrpc-rewrite-20160613' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-bionic-kernel.git] / net / rxrpc / conn_object.c
1 /* RxRPC virtual connection handler
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/net.h>
17 #include <linux/skbuff.h>
18 #include <linux/crypto.h>
19 #include <net/sock.h>
20 #include <net/af_rxrpc.h>
21 #include "ar-internal.h"
22
23 /*
24 * Time till a connection expires after last use (in seconds).
25 */
26 unsigned int rxrpc_connection_expiry = 10 * 60;
27
28 static void rxrpc_connection_reaper(struct work_struct *work);
29
30 LIST_HEAD(rxrpc_connections);
31 DEFINE_RWLOCK(rxrpc_connection_lock);
32 static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
33
34 /*
35 * allocate a new client connection bundle
36 */
37 static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp)
38 {
39 struct rxrpc_conn_bundle *bundle;
40
41 _enter("");
42
43 bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp);
44 if (bundle) {
45 INIT_LIST_HEAD(&bundle->unused_conns);
46 INIT_LIST_HEAD(&bundle->avail_conns);
47 INIT_LIST_HEAD(&bundle->busy_conns);
48 init_waitqueue_head(&bundle->chanwait);
49 atomic_set(&bundle->usage, 1);
50 }
51
52 _leave(" = %p", bundle);
53 return bundle;
54 }
55
56 /*
57 * compare bundle parameters with what we're looking for
58 * - return -ve, 0 or +ve
59 */
60 static inline
61 int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle,
62 struct key *key, u16 service_id)
63 {
64 return (bundle->service_id - service_id) ?:
65 ((unsigned long)bundle->key - (unsigned long)key);
66 }
67
68 /*
69 * get bundle of client connections that a client socket can make use of
70 */
71 struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
72 struct rxrpc_transport *trans,
73 struct key *key,
74 u16 service_id,
75 gfp_t gfp)
76 {
77 struct rxrpc_conn_bundle *bundle, *candidate;
78 struct rb_node *p, *parent, **pp;
79
80 _enter("%p{%x},%x,%hx,",
81 rx, key_serial(key), trans->debug_id, service_id);
82
83 /* search the extant bundles first for one that matches the specified
84 * user ID */
85 spin_lock(&trans->client_lock);
86
87 p = trans->bundles.rb_node;
88 while (p) {
89 bundle = rb_entry(p, struct rxrpc_conn_bundle, node);
90
91 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
92 p = p->rb_left;
93 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
94 p = p->rb_right;
95 else
96 goto found_extant_bundle;
97 }
98
99 spin_unlock(&trans->client_lock);
100
101 /* not yet present - create a candidate for a new record and then
102 * redo the search */
103 candidate = rxrpc_alloc_bundle(gfp);
104 if (!candidate) {
105 _leave(" = -ENOMEM");
106 return ERR_PTR(-ENOMEM);
107 }
108
109 candidate->key = key_get(key);
110 candidate->service_id = service_id;
111
112 spin_lock(&trans->client_lock);
113
114 pp = &trans->bundles.rb_node;
115 parent = NULL;
116 while (*pp) {
117 parent = *pp;
118 bundle = rb_entry(parent, struct rxrpc_conn_bundle, node);
119
120 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
121 pp = &(*pp)->rb_left;
122 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
123 pp = &(*pp)->rb_right;
124 else
125 goto found_extant_second;
126 }
127
128 /* second search also failed; add the new bundle */
129 bundle = candidate;
130 candidate = NULL;
131
132 rb_link_node(&bundle->node, parent, pp);
133 rb_insert_color(&bundle->node, &trans->bundles);
134 spin_unlock(&trans->client_lock);
135 _net("BUNDLE new on trans %d", trans->debug_id);
136 _leave(" = %p [new]", bundle);
137 return bundle;
138
139 /* we found the bundle in the list immediately */
140 found_extant_bundle:
141 atomic_inc(&bundle->usage);
142 spin_unlock(&trans->client_lock);
143 _net("BUNDLE old on trans %d", trans->debug_id);
144 _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage));
145 return bundle;
146
147 /* we found the bundle on the second time through the list */
148 found_extant_second:
149 atomic_inc(&bundle->usage);
150 spin_unlock(&trans->client_lock);
151 kfree(candidate);
152 _net("BUNDLE old2 on trans %d", trans->debug_id);
153 _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage));
154 return bundle;
155 }
156
157 /*
158 * release a bundle
159 */
160 void rxrpc_put_bundle(struct rxrpc_transport *trans,
161 struct rxrpc_conn_bundle *bundle)
162 {
163 _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage));
164
165 if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) {
166 _debug("Destroy bundle");
167 rb_erase(&bundle->node, &trans->bundles);
168 spin_unlock(&trans->client_lock);
169 ASSERT(list_empty(&bundle->unused_conns));
170 ASSERT(list_empty(&bundle->avail_conns));
171 ASSERT(list_empty(&bundle->busy_conns));
172 ASSERTCMP(bundle->num_conns, ==, 0);
173 key_put(bundle->key);
174 kfree(bundle);
175 }
176
177 _leave("");
178 }
179
180 /*
181 * allocate a new connection
182 */
183 static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
184 {
185 struct rxrpc_connection *conn;
186
187 _enter("");
188
189 conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
190 if (conn) {
191 INIT_WORK(&conn->processor, &rxrpc_process_connection);
192 INIT_LIST_HEAD(&conn->bundle_link);
193 conn->calls = RB_ROOT;
194 skb_queue_head_init(&conn->rx_queue);
195 conn->security = &rxrpc_no_security;
196 rwlock_init(&conn->lock);
197 spin_lock_init(&conn->state_lock);
198 atomic_set(&conn->usage, 1);
199 conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
200 conn->avail_calls = RXRPC_MAXCALLS;
201 conn->size_align = 4;
202 conn->header_size = sizeof(struct rxrpc_wire_header);
203 }
204
205 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
206 return conn;
207 }
208
209 /*
210 * assign a connection ID to a connection and add it to the transport's
211 * connection lookup tree
212 * - called with transport client lock held
213 */
214 static void rxrpc_assign_connection_id(struct rxrpc_connection *conn)
215 {
216 struct rxrpc_connection *xconn;
217 struct rb_node *parent, **p;
218 __be32 epoch;
219 u32 cid;
220
221 _enter("");
222
223 epoch = conn->epoch;
224
225 write_lock_bh(&conn->trans->conn_lock);
226
227 conn->trans->conn_idcounter += RXRPC_CID_INC;
228 if (conn->trans->conn_idcounter < RXRPC_CID_INC)
229 conn->trans->conn_idcounter = RXRPC_CID_INC;
230 cid = conn->trans->conn_idcounter;
231
232 attempt_insertion:
233 parent = NULL;
234 p = &conn->trans->client_conns.rb_node;
235
236 while (*p) {
237 parent = *p;
238 xconn = rb_entry(parent, struct rxrpc_connection, node);
239
240 if (epoch < xconn->epoch)
241 p = &(*p)->rb_left;
242 else if (epoch > xconn->epoch)
243 p = &(*p)->rb_right;
244 else if (cid < xconn->cid)
245 p = &(*p)->rb_left;
246 else if (cid > xconn->cid)
247 p = &(*p)->rb_right;
248 else
249 goto id_exists;
250 }
251
252 /* we've found a suitable hole - arrange for this connection to occupy
253 * it */
254 rb_link_node(&conn->node, parent, p);
255 rb_insert_color(&conn->node, &conn->trans->client_conns);
256
257 conn->cid = cid;
258 write_unlock_bh(&conn->trans->conn_lock);
259 _leave(" [CID %x]", cid);
260 return;
261
262 /* we found a connection with the proposed ID - walk the tree from that
263 * point looking for the next unused ID */
264 id_exists:
265 for (;;) {
266 cid += RXRPC_CID_INC;
267 if (cid < RXRPC_CID_INC) {
268 cid = RXRPC_CID_INC;
269 conn->trans->conn_idcounter = cid;
270 goto attempt_insertion;
271 }
272
273 parent = rb_next(parent);
274 if (!parent)
275 goto attempt_insertion;
276
277 xconn = rb_entry(parent, struct rxrpc_connection, node);
278 if (epoch < xconn->epoch ||
279 cid < xconn->cid)
280 goto attempt_insertion;
281 }
282 }
283
284 /*
285 * add a call to a connection's call-by-ID tree
286 */
287 static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
288 struct rxrpc_call *call)
289 {
290 struct rxrpc_call *xcall;
291 struct rb_node *parent, **p;
292 __be32 call_id;
293
294 write_lock_bh(&conn->lock);
295
296 call_id = call->call_id;
297 p = &conn->calls.rb_node;
298 parent = NULL;
299 while (*p) {
300 parent = *p;
301 xcall = rb_entry(parent, struct rxrpc_call, conn_node);
302
303 if (call_id < xcall->call_id)
304 p = &(*p)->rb_left;
305 else if (call_id > xcall->call_id)
306 p = &(*p)->rb_right;
307 else
308 BUG();
309 }
310
311 rb_link_node(&call->conn_node, parent, p);
312 rb_insert_color(&call->conn_node, &conn->calls);
313
314 write_unlock_bh(&conn->lock);
315 }
316
317 /*
318 * connect a call on an exclusive connection
319 */
320 static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
321 struct rxrpc_transport *trans,
322 u16 service_id,
323 struct rxrpc_call *call,
324 gfp_t gfp)
325 {
326 struct rxrpc_connection *conn;
327 int chan, ret;
328
329 _enter("");
330
331 conn = rx->conn;
332 if (!conn) {
333 /* not yet present - create a candidate for a new connection
334 * and then redo the check */
335 conn = rxrpc_alloc_connection(gfp);
336 if (!conn) {
337 _leave(" = -ENOMEM");
338 return -ENOMEM;
339 }
340
341 conn->trans = trans;
342 conn->bundle = NULL;
343 conn->service_id = service_id;
344 conn->epoch = rxrpc_epoch;
345 conn->in_clientflag = 0;
346 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
347 conn->cid = 0;
348 conn->state = RXRPC_CONN_CLIENT;
349 conn->avail_calls = RXRPC_MAXCALLS - 1;
350 conn->security_level = rx->min_sec_level;
351 conn->key = key_get(rx->key);
352
353 ret = rxrpc_init_client_conn_security(conn);
354 if (ret < 0) {
355 key_put(conn->key);
356 kfree(conn);
357 _leave(" = %d [key]", ret);
358 return ret;
359 }
360
361 write_lock_bh(&rxrpc_connection_lock);
362 list_add_tail(&conn->link, &rxrpc_connections);
363 write_unlock_bh(&rxrpc_connection_lock);
364
365 spin_lock(&trans->client_lock);
366 atomic_inc(&trans->usage);
367
368 _net("CONNECT EXCL new %d on TRANS %d",
369 conn->debug_id, conn->trans->debug_id);
370
371 rxrpc_assign_connection_id(conn);
372 rx->conn = conn;
373 } else {
374 spin_lock(&trans->client_lock);
375 }
376
377 /* we've got a connection with a free channel and we can now attach the
378 * call to it
379 * - we're holding the transport's client lock
380 * - we're holding a reference on the connection
381 */
382 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
383 if (!conn->channels[chan])
384 goto found_channel;
385 goto no_free_channels;
386
387 found_channel:
388 atomic_inc(&conn->usage);
389 conn->channels[chan] = call;
390 call->conn = conn;
391 call->channel = chan;
392 call->cid = conn->cid | chan;
393 call->call_id = ++conn->call_counter;
394
395 _net("CONNECT client on conn %d chan %d as call %x",
396 conn->debug_id, chan, call->call_id);
397
398 spin_unlock(&trans->client_lock);
399
400 rxrpc_add_call_ID_to_conn(conn, call);
401 _leave(" = 0");
402 return 0;
403
404 no_free_channels:
405 spin_unlock(&trans->client_lock);
406 _leave(" = -ENOSR");
407 return -ENOSR;
408 }
409
410 /*
411 * find a connection for a call
412 * - called in process context with IRQs enabled
413 */
414 int rxrpc_connect_call(struct rxrpc_sock *rx,
415 struct rxrpc_transport *trans,
416 struct rxrpc_conn_bundle *bundle,
417 struct rxrpc_call *call,
418 gfp_t gfp)
419 {
420 struct rxrpc_connection *conn, *candidate;
421 int chan, ret;
422
423 DECLARE_WAITQUEUE(myself, current);
424
425 _enter("%p,%lx,", rx, call->user_call_ID);
426
427 if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags))
428 return rxrpc_connect_exclusive(rx, trans, bundle->service_id,
429 call, gfp);
430
431 spin_lock(&trans->client_lock);
432 for (;;) {
433 /* see if the bundle has a call slot available */
434 if (!list_empty(&bundle->avail_conns)) {
435 _debug("avail");
436 conn = list_entry(bundle->avail_conns.next,
437 struct rxrpc_connection,
438 bundle_link);
439 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
440 list_del_init(&conn->bundle_link);
441 bundle->num_conns--;
442 continue;
443 }
444 if (--conn->avail_calls == 0)
445 list_move(&conn->bundle_link,
446 &bundle->busy_conns);
447 ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
448 ASSERT(conn->channels[0] == NULL ||
449 conn->channels[1] == NULL ||
450 conn->channels[2] == NULL ||
451 conn->channels[3] == NULL);
452 atomic_inc(&conn->usage);
453 break;
454 }
455
456 if (!list_empty(&bundle->unused_conns)) {
457 _debug("unused");
458 conn = list_entry(bundle->unused_conns.next,
459 struct rxrpc_connection,
460 bundle_link);
461 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
462 list_del_init(&conn->bundle_link);
463 bundle->num_conns--;
464 continue;
465 }
466 ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS);
467 conn->avail_calls = RXRPC_MAXCALLS - 1;
468 ASSERT(conn->channels[0] == NULL &&
469 conn->channels[1] == NULL &&
470 conn->channels[2] == NULL &&
471 conn->channels[3] == NULL);
472 atomic_inc(&conn->usage);
473 list_move(&conn->bundle_link, &bundle->avail_conns);
474 break;
475 }
476
477 /* need to allocate a new connection */
478 _debug("get new conn [%d]", bundle->num_conns);
479
480 spin_unlock(&trans->client_lock);
481
482 if (signal_pending(current))
483 goto interrupted;
484
485 if (bundle->num_conns >= 20) {
486 _debug("too many conns");
487
488 if (!gfpflags_allow_blocking(gfp)) {
489 _leave(" = -EAGAIN");
490 return -EAGAIN;
491 }
492
493 add_wait_queue(&bundle->chanwait, &myself);
494 for (;;) {
495 set_current_state(TASK_INTERRUPTIBLE);
496 if (bundle->num_conns < 20 ||
497 !list_empty(&bundle->unused_conns) ||
498 !list_empty(&bundle->avail_conns))
499 break;
500 if (signal_pending(current))
501 goto interrupted_dequeue;
502 schedule();
503 }
504 remove_wait_queue(&bundle->chanwait, &myself);
505 __set_current_state(TASK_RUNNING);
506 spin_lock(&trans->client_lock);
507 continue;
508 }
509
510 /* not yet present - create a candidate for a new connection and then
511 * redo the check */
512 candidate = rxrpc_alloc_connection(gfp);
513 if (!candidate) {
514 _leave(" = -ENOMEM");
515 return -ENOMEM;
516 }
517
518 candidate->trans = trans;
519 candidate->bundle = bundle;
520 candidate->service_id = bundle->service_id;
521 candidate->epoch = rxrpc_epoch;
522 candidate->in_clientflag = 0;
523 candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
524 candidate->cid = 0;
525 candidate->state = RXRPC_CONN_CLIENT;
526 candidate->avail_calls = RXRPC_MAXCALLS;
527 candidate->security_level = rx->min_sec_level;
528 candidate->key = key_get(bundle->key);
529
530 ret = rxrpc_init_client_conn_security(candidate);
531 if (ret < 0) {
532 key_put(candidate->key);
533 kfree(candidate);
534 _leave(" = %d [key]", ret);
535 return ret;
536 }
537
538 write_lock_bh(&rxrpc_connection_lock);
539 list_add_tail(&candidate->link, &rxrpc_connections);
540 write_unlock_bh(&rxrpc_connection_lock);
541
542 spin_lock(&trans->client_lock);
543
544 list_add(&candidate->bundle_link, &bundle->unused_conns);
545 bundle->num_conns++;
546 atomic_inc(&bundle->usage);
547 atomic_inc(&trans->usage);
548
549 _net("CONNECT new %d on TRANS %d",
550 candidate->debug_id, candidate->trans->debug_id);
551
552 rxrpc_assign_connection_id(candidate);
553 candidate->security->prime_packet_security(candidate);
554
555 /* leave the candidate lurking in zombie mode attached to the
556 * bundle until we're ready for it */
557 rxrpc_put_connection(candidate);
558 candidate = NULL;
559 }
560
561 /* we've got a connection with a free channel and we can now attach the
562 * call to it
563 * - we're holding the transport's client lock
564 * - we're holding a reference on the connection
565 * - we're holding a reference on the bundle
566 */
567 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
568 if (!conn->channels[chan])
569 goto found_channel;
570 ASSERT(conn->channels[0] == NULL ||
571 conn->channels[1] == NULL ||
572 conn->channels[2] == NULL ||
573 conn->channels[3] == NULL);
574 BUG();
575
576 found_channel:
577 conn->channels[chan] = call;
578 call->conn = conn;
579 call->channel = chan;
580 call->cid = conn->cid | chan;
581 call->call_id = ++conn->call_counter;
582
583 _net("CONNECT client on conn %d chan %d as call %x",
584 conn->debug_id, chan, call->call_id);
585
586 ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
587 spin_unlock(&trans->client_lock);
588
589 rxrpc_add_call_ID_to_conn(conn, call);
590
591 _leave(" = 0");
592 return 0;
593
594 interrupted_dequeue:
595 remove_wait_queue(&bundle->chanwait, &myself);
596 __set_current_state(TASK_RUNNING);
597 interrupted:
598 _leave(" = -ERESTARTSYS");
599 return -ERESTARTSYS;
600 }
601
602 /*
603 * get a record of an incoming connection
604 */
605 struct rxrpc_connection *
606 rxrpc_incoming_connection(struct rxrpc_transport *trans,
607 struct rxrpc_host_header *hdr)
608 {
609 struct rxrpc_connection *conn, *candidate = NULL;
610 struct rb_node *p, **pp;
611 const char *new = "old";
612 __be32 epoch;
613 u32 cid;
614
615 _enter("");
616
617 ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED);
618
619 epoch = hdr->epoch;
620 cid = hdr->cid & RXRPC_CIDMASK;
621
622 /* search the connection list first */
623 read_lock_bh(&trans->conn_lock);
624
625 p = trans->server_conns.rb_node;
626 while (p) {
627 conn = rb_entry(p, struct rxrpc_connection, node);
628
629 _debug("maybe %x", conn->cid);
630
631 if (epoch < conn->epoch)
632 p = p->rb_left;
633 else if (epoch > conn->epoch)
634 p = p->rb_right;
635 else if (cid < conn->cid)
636 p = p->rb_left;
637 else if (cid > conn->cid)
638 p = p->rb_right;
639 else
640 goto found_extant_connection;
641 }
642 read_unlock_bh(&trans->conn_lock);
643
644 /* not yet present - create a candidate for a new record and then
645 * redo the search */
646 candidate = rxrpc_alloc_connection(GFP_NOIO);
647 if (!candidate) {
648 _leave(" = -ENOMEM");
649 return ERR_PTR(-ENOMEM);
650 }
651
652 candidate->trans = trans;
653 candidate->epoch = hdr->epoch;
654 candidate->cid = hdr->cid & RXRPC_CIDMASK;
655 candidate->service_id = hdr->serviceId;
656 candidate->security_ix = hdr->securityIndex;
657 candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
658 candidate->out_clientflag = 0;
659 candidate->state = RXRPC_CONN_SERVER;
660 if (candidate->service_id)
661 candidate->state = RXRPC_CONN_SERVER_UNSECURED;
662
663 write_lock_bh(&trans->conn_lock);
664
665 pp = &trans->server_conns.rb_node;
666 p = NULL;
667 while (*pp) {
668 p = *pp;
669 conn = rb_entry(p, struct rxrpc_connection, node);
670
671 if (epoch < conn->epoch)
672 pp = &(*pp)->rb_left;
673 else if (epoch > conn->epoch)
674 pp = &(*pp)->rb_right;
675 else if (cid < conn->cid)
676 pp = &(*pp)->rb_left;
677 else if (cid > conn->cid)
678 pp = &(*pp)->rb_right;
679 else
680 goto found_extant_second;
681 }
682
683 /* we can now add the new candidate to the list */
684 conn = candidate;
685 candidate = NULL;
686 rb_link_node(&conn->node, p, pp);
687 rb_insert_color(&conn->node, &trans->server_conns);
688 atomic_inc(&conn->trans->usage);
689
690 write_unlock_bh(&trans->conn_lock);
691
692 write_lock_bh(&rxrpc_connection_lock);
693 list_add_tail(&conn->link, &rxrpc_connections);
694 write_unlock_bh(&rxrpc_connection_lock);
695
696 new = "new";
697
698 success:
699 _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->cid);
700
701 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
702 return conn;
703
704 /* we found the connection in the list immediately */
705 found_extant_connection:
706 if (hdr->securityIndex != conn->security_ix) {
707 read_unlock_bh(&trans->conn_lock);
708 goto security_mismatch;
709 }
710 atomic_inc(&conn->usage);
711 read_unlock_bh(&trans->conn_lock);
712 goto success;
713
714 /* we found the connection on the second time through the list */
715 found_extant_second:
716 if (hdr->securityIndex != conn->security_ix) {
717 write_unlock_bh(&trans->conn_lock);
718 goto security_mismatch;
719 }
720 atomic_inc(&conn->usage);
721 write_unlock_bh(&trans->conn_lock);
722 kfree(candidate);
723 goto success;
724
725 security_mismatch:
726 kfree(candidate);
727 _leave(" = -EKEYREJECTED");
728 return ERR_PTR(-EKEYREJECTED);
729 }
730
731 /*
732 * find a connection based on transport and RxRPC connection ID for an incoming
733 * packet
734 */
735 struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans,
736 struct rxrpc_host_header *hdr)
737 {
738 struct rxrpc_connection *conn;
739 struct rb_node *p;
740 u32 epoch, cid;
741
742 _enter(",{%x,%x}", hdr->cid, hdr->flags);
743
744 read_lock_bh(&trans->conn_lock);
745
746 cid = hdr->cid & RXRPC_CIDMASK;
747 epoch = hdr->epoch;
748
749 if (hdr->flags & RXRPC_CLIENT_INITIATED)
750 p = trans->server_conns.rb_node;
751 else
752 p = trans->client_conns.rb_node;
753
754 while (p) {
755 conn = rb_entry(p, struct rxrpc_connection, node);
756
757 _debug("maybe %x", conn->cid);
758
759 if (epoch < conn->epoch)
760 p = p->rb_left;
761 else if (epoch > conn->epoch)
762 p = p->rb_right;
763 else if (cid < conn->cid)
764 p = p->rb_left;
765 else if (cid > conn->cid)
766 p = p->rb_right;
767 else
768 goto found;
769 }
770
771 read_unlock_bh(&trans->conn_lock);
772 _leave(" = NULL");
773 return NULL;
774
775 found:
776 atomic_inc(&conn->usage);
777 read_unlock_bh(&trans->conn_lock);
778 _leave(" = %p", conn);
779 return conn;
780 }
781
782 /*
783 * release a virtual connection
784 */
785 void rxrpc_put_connection(struct rxrpc_connection *conn)
786 {
787 _enter("%p{u=%d,d=%d}",
788 conn, atomic_read(&conn->usage), conn->debug_id);
789
790 ASSERTCMP(atomic_read(&conn->usage), >, 0);
791
792 conn->put_time = ktime_get_seconds();
793 if (atomic_dec_and_test(&conn->usage)) {
794 _debug("zombie");
795 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
796 }
797
798 _leave("");
799 }
800
801 /*
802 * destroy a virtual connection
803 */
804 static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
805 {
806 _enter("%p{%d}", conn, atomic_read(&conn->usage));
807
808 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
809
810 _net("DESTROY CONN %d", conn->debug_id);
811
812 if (conn->bundle)
813 rxrpc_put_bundle(conn->trans, conn->bundle);
814
815 ASSERT(RB_EMPTY_ROOT(&conn->calls));
816 rxrpc_purge_queue(&conn->rx_queue);
817
818 conn->security->clear(conn);
819 key_put(conn->key);
820 key_put(conn->server_key);
821
822 rxrpc_put_transport(conn->trans);
823 kfree(conn);
824 _leave("");
825 }
826
827 /*
828 * reap dead connections
829 */
830 static void rxrpc_connection_reaper(struct work_struct *work)
831 {
832 struct rxrpc_connection *conn, *_p;
833 unsigned long now, earliest, reap_time;
834
835 LIST_HEAD(graveyard);
836
837 _enter("");
838
839 now = ktime_get_seconds();
840 earliest = ULONG_MAX;
841
842 write_lock_bh(&rxrpc_connection_lock);
843 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
844 _debug("reap CONN %d { u=%d,t=%ld }",
845 conn->debug_id, atomic_read(&conn->usage),
846 (long) now - (long) conn->put_time);
847
848 if (likely(atomic_read(&conn->usage) > 0))
849 continue;
850
851 spin_lock(&conn->trans->client_lock);
852 write_lock(&conn->trans->conn_lock);
853 reap_time = conn->put_time + rxrpc_connection_expiry;
854
855 if (atomic_read(&conn->usage) > 0) {
856 ;
857 } else if (reap_time <= now) {
858 list_move_tail(&conn->link, &graveyard);
859 if (conn->out_clientflag)
860 rb_erase(&conn->node,
861 &conn->trans->client_conns);
862 else
863 rb_erase(&conn->node,
864 &conn->trans->server_conns);
865 if (conn->bundle) {
866 list_del_init(&conn->bundle_link);
867 conn->bundle->num_conns--;
868 }
869
870 } else if (reap_time < earliest) {
871 earliest = reap_time;
872 }
873
874 write_unlock(&conn->trans->conn_lock);
875 spin_unlock(&conn->trans->client_lock);
876 }
877 write_unlock_bh(&rxrpc_connection_lock);
878
879 if (earliest != ULONG_MAX) {
880 _debug("reschedule reaper %ld", (long) earliest - now);
881 ASSERTCMP(earliest, >, now);
882 rxrpc_queue_delayed_work(&rxrpc_connection_reap,
883 (earliest - now) * HZ);
884 }
885
886 /* then destroy all those pulled out */
887 while (!list_empty(&graveyard)) {
888 conn = list_entry(graveyard.next, struct rxrpc_connection,
889 link);
890 list_del_init(&conn->link);
891
892 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
893 rxrpc_destroy_connection(conn);
894 }
895
896 _leave("");
897 }
898
899 /*
900 * preemptively destroy all the connection records rather than waiting for them
901 * to time out
902 */
903 void __exit rxrpc_destroy_all_connections(void)
904 {
905 _enter("");
906
907 rxrpc_connection_expiry = 0;
908 cancel_delayed_work(&rxrpc_connection_reap);
909 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
910
911 _leave("");
912 }