1 /* Client connection-specific management code.
3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 * Client connections need to be cached for a little while after they've made a
13 * call so as to handle retransmitted DATA packets in case the server didn't
14 * receive the final ACK or terminating ABORT we sent it.
16 * Client connections can be in one of a number of cache states:
18 * (1) INACTIVE - The connection is not held in any list and may not have been
19 * exposed to the world. If it has been previously exposed, it was
20 * discarded from the idle list after expiring.
22 * (2) WAITING - The connection is waiting for the number of client conns to
23 * drop below the maximum capacity. Calls may be in progress upon it from
24 * when it was active and got culled.
26 * The connection is on the rxrpc_waiting_client_conns list which is kept
27 * in to-be-granted order. Culled conns with waiters go to the back of
28 * the queue just like new conns.
30 * (3) ACTIVE - The connection has at least one call in progress upon it, it
31 * may freely grant available channels to new calls and calls may be
32 * waiting on it for channels to become available.
34 * The connection is on the rxnet->active_client_conns list which is kept
35 * in activation order for culling purposes.
37 * rxrpc_nr_active_client_conns is held incremented also.
39 * (4) CULLED - The connection got summarily culled to try and free up
40 * capacity. Calls currently in progress on the connection are allowed to
41 * continue, but new calls will have to wait. There can be no waiters in
42 * this state - the conn would have to go to the WAITING state instead.
44 * (5) IDLE - The connection has no calls in progress upon it and must have
45 * been exposed to the world (ie. the EXPOSED flag must be set). When it
46 * expires, the EXPOSED flag is cleared and the connection transitions to
49 * The connection is on the rxnet->idle_client_conns list which is kept in
50 * order of how soon they'll expire.
52 * There are flags of relevance to the cache:
54 * (1) EXPOSED - The connection ID got exposed to the world. If this flag is
55 * set, an extra ref is added to the connection preventing it from being
56 * reaped when it has no calls outstanding. This flag is cleared and the
57 * ref dropped when a conn is discarded from the idle list.
59 * This allows us to move terminal call state retransmission to the
60 * connection and to discard the call immediately we think it is done
61 * with. It also give us a chance to reuse the connection.
63 * (2) DONT_REUSE - The connection should be discarded as soon as possible and
64 * should not be reused. This is set when an exclusive connection is used
65 * or a call ID counter overflows.
67 * The caching state may only be changed if the cache lock is held.
69 * There are two idle client connection expiry durations. If the total number
70 * of connections is below the reap threshold, we use the normal duration; if
71 * it's above, we use the fast duration.
74 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
76 #include <linux/slab.h>
77 #include <linux/idr.h>
78 #include <linux/timer.h>
79 #include <linux/sched/signal.h>
81 #include "ar-internal.h"
83 __read_mostly
unsigned int rxrpc_max_client_connections
= 1000;
84 __read_mostly
unsigned int rxrpc_reap_client_connections
= 900;
85 __read_mostly
unsigned int rxrpc_conn_idle_client_expiry
= 2 * 60 * HZ
;
86 __read_mostly
unsigned int rxrpc_conn_idle_client_fast_expiry
= 2 * HZ
;
89 * We use machine-unique IDs for our client connections.
91 DEFINE_IDR(rxrpc_client_conn_ids
);
92 static DEFINE_SPINLOCK(rxrpc_conn_id_lock
);
94 static void rxrpc_cull_active_client_conns(struct rxrpc_net
*);
97 * Get a connection ID and epoch for a client connection from the global pool.
98 * The connection struct pointer is then recorded in the idr radix tree. The
99 * epoch doesn't change until the client is rebooted (or, at least, unless the
100 * module is unloaded).
102 static int rxrpc_get_client_connection_id(struct rxrpc_connection
*conn
,
105 struct rxrpc_net
*rxnet
= conn
->params
.local
->rxnet
;
111 spin_lock(&rxrpc_conn_id_lock
);
113 id
= idr_alloc_cyclic(&rxrpc_client_conn_ids
, conn
,
114 1, 0x40000000, GFP_NOWAIT
);
118 spin_unlock(&rxrpc_conn_id_lock
);
121 conn
->proto
.epoch
= rxnet
->epoch
;
122 conn
->proto
.cid
= id
<< RXRPC_CIDSHIFT
;
123 set_bit(RXRPC_CONN_HAS_IDR
, &conn
->flags
);
124 _leave(" [CID %x]", conn
->proto
.cid
);
128 spin_unlock(&rxrpc_conn_id_lock
);
135 * Release a connection ID for a client connection from the global pool.
137 static void rxrpc_put_client_connection_id(struct rxrpc_connection
*conn
)
139 if (test_bit(RXRPC_CONN_HAS_IDR
, &conn
->flags
)) {
140 spin_lock(&rxrpc_conn_id_lock
);
141 idr_remove(&rxrpc_client_conn_ids
,
142 conn
->proto
.cid
>> RXRPC_CIDSHIFT
);
143 spin_unlock(&rxrpc_conn_id_lock
);
148 * Destroy the client connection ID tree.
150 void rxrpc_destroy_client_conn_ids(void)
152 struct rxrpc_connection
*conn
;
155 if (!idr_is_empty(&rxrpc_client_conn_ids
)) {
156 idr_for_each_entry(&rxrpc_client_conn_ids
, conn
, id
) {
157 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
158 conn
, atomic_read(&conn
->usage
));
163 idr_destroy(&rxrpc_client_conn_ids
);
167 * Allocate a client connection.
169 static struct rxrpc_connection
*
170 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters
*cp
, gfp_t gfp
)
172 struct rxrpc_connection
*conn
;
173 struct rxrpc_net
*rxnet
= cp
->local
->rxnet
;
178 conn
= rxrpc_alloc_connection(gfp
);
180 _leave(" = -ENOMEM");
181 return ERR_PTR(-ENOMEM
);
184 atomic_set(&conn
->usage
, 1);
186 __set_bit(RXRPC_CONN_DONT_REUSE
, &conn
->flags
);
189 conn
->out_clientflag
= RXRPC_CLIENT_INITIATED
;
190 conn
->state
= RXRPC_CONN_CLIENT
;
191 conn
->service_id
= cp
->service_id
;
193 ret
= rxrpc_get_client_connection_id(conn
, gfp
);
197 ret
= rxrpc_init_client_conn_security(conn
);
201 ret
= conn
->security
->prime_packet_security(conn
);
205 write_lock(&rxnet
->conn_lock
);
206 list_add_tail(&conn
->proc_link
, &rxnet
->conn_proc_list
);
207 write_unlock(&rxnet
->conn_lock
);
209 /* We steal the caller's peer ref. */
211 rxrpc_get_local(conn
->params
.local
);
212 key_get(conn
->params
.key
);
214 trace_rxrpc_conn(conn
, rxrpc_conn_new_client
, atomic_read(&conn
->usage
),
215 __builtin_return_address(0));
216 trace_rxrpc_client(conn
, -1, rxrpc_client_alloc
);
217 _leave(" = %p", conn
);
221 conn
->security
->clear(conn
);
223 rxrpc_put_client_connection_id(conn
);
226 _leave(" = %d", ret
);
231 * Determine if a connection may be reused.
233 static bool rxrpc_may_reuse_conn(struct rxrpc_connection
*conn
)
235 struct rxrpc_net
*rxnet
= conn
->params
.local
->rxnet
;
236 int id_cursor
, id
, distance
, limit
;
238 if (test_bit(RXRPC_CONN_DONT_REUSE
, &conn
->flags
))
241 if (conn
->proto
.epoch
!= rxnet
->epoch
)
242 goto mark_dont_reuse
;
244 /* The IDR tree gets very expensive on memory if the connection IDs are
245 * widely scattered throughout the number space, so we shall want to
246 * kill off connections that, say, have an ID more than about four
247 * times the maximum number of client conns away from the current
248 * allocation point to try and keep the IDs concentrated.
250 id_cursor
= idr_get_cursor(&rxrpc_client_conn_ids
);
251 id
= conn
->proto
.cid
>> RXRPC_CIDSHIFT
;
252 distance
= id
- id_cursor
;
254 distance
= -distance
;
255 limit
= max(rxrpc_max_client_connections
* 4, 1024U);
256 if (distance
> limit
)
257 goto mark_dont_reuse
;
262 set_bit(RXRPC_CONN_DONT_REUSE
, &conn
->flags
);
268 * Create or find a client connection to use for a call.
270 * If we return with a connection, the call will be on its waiting list. It's
271 * left to the caller to assign a channel and wake up the call.
273 static int rxrpc_get_client_conn(struct rxrpc_call
*call
,
274 struct rxrpc_conn_parameters
*cp
,
275 struct sockaddr_rxrpc
*srx
,
278 struct rxrpc_connection
*conn
, *candidate
= NULL
;
279 struct rxrpc_local
*local
= cp
->local
;
280 struct rb_node
*p
, **pp
, *parent
;
284 _enter("{%d,%lx},", call
->debug_id
, call
->user_call_ID
);
286 cp
->peer
= rxrpc_lookup_peer(cp
->local
, srx
, gfp
);
290 /* If the connection is not meant to be exclusive, search the available
291 * connections to see if the connection we want to use already exists.
293 if (!cp
->exclusive
) {
295 spin_lock(&local
->client_conns_lock
);
296 p
= local
->client_conns
.rb_node
;
298 conn
= rb_entry(p
, struct rxrpc_connection
, client_node
);
300 #define cmp(X) ((long)conn->params.X - (long)cp->X)
303 cmp(security_level
));
307 } else if (diff
> 0) {
310 if (rxrpc_may_reuse_conn(conn
) &&
311 rxrpc_get_connection_maybe(conn
))
312 goto found_extant_conn
;
313 /* The connection needs replacing. It's better
314 * to effect that when we have something to
315 * replace it with so that we don't have to
316 * rebalance the tree twice.
321 spin_unlock(&local
->client_conns_lock
);
324 /* There wasn't a connection yet or we need an exclusive connection.
325 * We need to create a candidate and then potentially redo the search
326 * in case we're racing with another thread also trying to connect on a
327 * shareable connection.
330 candidate
= rxrpc_alloc_client_connection(cp
, gfp
);
331 if (IS_ERR(candidate
)) {
332 ret
= PTR_ERR(candidate
);
336 /* Add the call to the new connection's waiting list in case we're
337 * going to have to wait for the connection to come live. It's our
338 * connection, so we want first dibs on the channel slots. We would
339 * normally have to take channel_lock but we do this before anyone else
340 * can see the connection.
342 list_add_tail(&call
->chan_wait_link
, &candidate
->waiting_calls
);
345 call
->conn
= candidate
;
346 call
->security_ix
= candidate
->security_ix
;
347 call
->service_id
= candidate
->service_id
;
348 _leave(" = 0 [exclusive %d]", candidate
->debug_id
);
352 /* Publish the new connection for userspace to find. We need to redo
353 * the search before doing this lest we race with someone else adding a
354 * conflicting instance.
357 spin_lock(&local
->client_conns_lock
);
359 pp
= &local
->client_conns
.rb_node
;
363 conn
= rb_entry(parent
, struct rxrpc_connection
, client_node
);
365 #define cmp(X) ((long)conn->params.X - (long)candidate->params.X)
368 cmp(security_level
));
371 pp
= &(*pp
)->rb_left
;
372 } else if (diff
> 0) {
373 pp
= &(*pp
)->rb_right
;
375 if (rxrpc_may_reuse_conn(conn
) &&
376 rxrpc_get_connection_maybe(conn
))
377 goto found_extant_conn
;
378 /* The old connection is from an outdated epoch. */
379 _debug("replace conn");
380 clear_bit(RXRPC_CONN_IN_CLIENT_CONNS
, &conn
->flags
);
381 rb_replace_node(&conn
->client_node
,
382 &candidate
->client_node
,
383 &local
->client_conns
);
384 trace_rxrpc_client(conn
, -1, rxrpc_client_replace
);
385 goto candidate_published
;
390 rb_link_node(&candidate
->client_node
, parent
, pp
);
391 rb_insert_color(&candidate
->client_node
, &local
->client_conns
);
394 set_bit(RXRPC_CONN_IN_CLIENT_CONNS
, &candidate
->flags
);
395 call
->conn
= candidate
;
396 call
->security_ix
= candidate
->security_ix
;
397 call
->service_id
= candidate
->service_id
;
398 spin_unlock(&local
->client_conns_lock
);
399 _leave(" = 0 [new %d]", candidate
->debug_id
);
402 /* We come here if we found a suitable connection already in existence.
403 * Discard any candidate we may have allocated, and try to get a
404 * channel on this one.
407 _debug("found conn");
408 spin_unlock(&local
->client_conns_lock
);
411 trace_rxrpc_client(candidate
, -1, rxrpc_client_duplicate
);
412 rxrpc_put_connection(candidate
);
416 spin_lock(&conn
->channel_lock
);
418 call
->security_ix
= conn
->security_ix
;
419 call
->service_id
= conn
->service_id
;
420 list_add(&call
->chan_wait_link
, &conn
->waiting_calls
);
421 spin_unlock(&conn
->channel_lock
);
422 _leave(" = 0 [extant %d]", conn
->debug_id
);
426 rxrpc_put_peer(cp
->peer
);
429 _leave(" = %d", ret
);
434 * Activate a connection.
436 static void rxrpc_activate_conn(struct rxrpc_net
*rxnet
,
437 struct rxrpc_connection
*conn
)
439 trace_rxrpc_client(conn
, -1, rxrpc_client_to_active
);
440 conn
->cache_state
= RXRPC_CONN_CLIENT_ACTIVE
;
441 rxnet
->nr_active_client_conns
++;
442 list_move_tail(&conn
->cache_link
, &rxnet
->active_client_conns
);
446 * Attempt to animate a connection for a new call.
448 * If it's not exclusive, the connection is in the endpoint tree, and we're in
449 * the conn's list of those waiting to grab a channel. There is, however, a
450 * limit on the number of live connections allowed at any one time, so we may
451 * have to wait for capacity to become available.
453 * Note that a connection on the waiting queue might *also* have active
454 * channels if it has been culled to make space and then re-requested by a new
457 static void rxrpc_animate_client_conn(struct rxrpc_net
*rxnet
,
458 struct rxrpc_connection
*conn
)
460 unsigned int nr_conns
;
462 _enter("%d,%d", conn
->debug_id
, conn
->cache_state
);
464 if (conn
->cache_state
== RXRPC_CONN_CLIENT_ACTIVE
)
467 spin_lock(&rxnet
->client_conn_cache_lock
);
469 nr_conns
= rxnet
->nr_client_conns
;
470 if (!test_and_set_bit(RXRPC_CONN_COUNTED
, &conn
->flags
)) {
471 trace_rxrpc_client(conn
, -1, rxrpc_client_count
);
472 rxnet
->nr_client_conns
= nr_conns
+ 1;
475 switch (conn
->cache_state
) {
476 case RXRPC_CONN_CLIENT_ACTIVE
:
477 case RXRPC_CONN_CLIENT_WAITING
:
480 case RXRPC_CONN_CLIENT_INACTIVE
:
481 case RXRPC_CONN_CLIENT_CULLED
:
482 case RXRPC_CONN_CLIENT_IDLE
:
483 if (nr_conns
>= rxrpc_max_client_connections
)
484 goto wait_for_capacity
;
492 spin_unlock(&rxnet
->client_conn_cache_lock
);
494 _leave(" [%d]", conn
->cache_state
);
499 rxrpc_activate_conn(rxnet
, conn
);
504 trace_rxrpc_client(conn
, -1, rxrpc_client_to_waiting
);
505 conn
->cache_state
= RXRPC_CONN_CLIENT_WAITING
;
506 list_move_tail(&conn
->cache_link
, &rxnet
->waiting_client_conns
);
511 * Deactivate a channel.
513 static void rxrpc_deactivate_one_channel(struct rxrpc_connection
*conn
,
514 unsigned int channel
)
516 struct rxrpc_channel
*chan
= &conn
->channels
[channel
];
518 rcu_assign_pointer(chan
->call
, NULL
);
519 conn
->active_chans
&= ~(1 << channel
);
523 * Assign a channel to the call at the front of the queue and wake the call up.
524 * We don't increment the callNumber counter until this number has been exposed
527 static void rxrpc_activate_one_channel(struct rxrpc_connection
*conn
,
528 unsigned int channel
)
530 struct rxrpc_channel
*chan
= &conn
->channels
[channel
];
531 struct rxrpc_call
*call
= list_entry(conn
->waiting_calls
.next
,
532 struct rxrpc_call
, chan_wait_link
);
533 u32 call_id
= chan
->call_counter
+ 1;
535 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_activate
);
537 write_lock_bh(&call
->state_lock
);
538 call
->state
= RXRPC_CALL_CLIENT_SEND_REQUEST
;
539 write_unlock_bh(&call
->state_lock
);
541 rxrpc_see_call(call
);
542 list_del_init(&call
->chan_wait_link
);
543 conn
->active_chans
|= 1 << channel
;
544 call
->peer
= rxrpc_get_peer(conn
->params
.peer
);
545 call
->cid
= conn
->proto
.cid
| channel
;
546 call
->call_id
= call_id
;
548 trace_rxrpc_connect_call(call
);
549 _net("CONNECT call %08x:%08x as call %d on conn %d",
550 call
->cid
, call
->call_id
, call
->debug_id
, conn
->debug_id
);
552 /* Paired with the read barrier in rxrpc_wait_for_channel(). This
553 * orders cid and epoch in the connection wrt to call_id without the
554 * need to take the channel_lock.
556 * We provisionally assign a callNumber at this point, but we don't
557 * confirm it until the call is about to be exposed.
559 * TODO: Pair with a barrier in the data_ready handler when that looks
560 * at the call ID through a connection channel.
563 chan
->call_id
= call_id
;
564 rcu_assign_pointer(chan
->call
, call
);
565 wake_up(&call
->waitq
);
569 * Assign channels and callNumbers to waiting calls with channel_lock
572 static void rxrpc_activate_channels_locked(struct rxrpc_connection
*conn
)
576 switch (conn
->cache_state
) {
577 case RXRPC_CONN_CLIENT_ACTIVE
:
578 mask
= RXRPC_ACTIVE_CHANS_MASK
;
584 while (!list_empty(&conn
->waiting_calls
) &&
585 (avail
= ~conn
->active_chans
,
588 rxrpc_activate_one_channel(conn
, __ffs(avail
));
592 * Assign channels and callNumbers to waiting calls.
594 static void rxrpc_activate_channels(struct rxrpc_connection
*conn
)
596 _enter("%d", conn
->debug_id
);
598 trace_rxrpc_client(conn
, -1, rxrpc_client_activate_chans
);
600 if (conn
->active_chans
== RXRPC_ACTIVE_CHANS_MASK
)
603 spin_lock(&conn
->channel_lock
);
604 rxrpc_activate_channels_locked(conn
);
605 spin_unlock(&conn
->channel_lock
);
610 * Wait for a callNumber and a channel to be granted to a call.
612 static int rxrpc_wait_for_channel(struct rxrpc_call
*call
, gfp_t gfp
)
616 _enter("%d", call
->debug_id
);
618 if (!call
->call_id
) {
619 DECLARE_WAITQUEUE(myself
, current
);
621 if (!gfpflags_allow_blocking(gfp
)) {
626 add_wait_queue_exclusive(&call
->waitq
, &myself
);
628 set_current_state(TASK_INTERRUPTIBLE
);
631 if (signal_pending(current
)) {
637 remove_wait_queue(&call
->waitq
, &myself
);
638 __set_current_state(TASK_RUNNING
);
641 /* Paired with the write barrier in rxrpc_activate_one_channel(). */
645 _leave(" = %d", ret
);
650 * find a connection for a call
651 * - called in process context with IRQs enabled
653 int rxrpc_connect_call(struct rxrpc_call
*call
,
654 struct rxrpc_conn_parameters
*cp
,
655 struct sockaddr_rxrpc
*srx
,
658 struct rxrpc_net
*rxnet
= cp
->local
->rxnet
;
661 _enter("{%d,%lx},", call
->debug_id
, call
->user_call_ID
);
663 rxrpc_discard_expired_client_conns(&rxnet
->client_conn_reaper
.work
);
664 rxrpc_cull_active_client_conns(rxnet
);
666 ret
= rxrpc_get_client_conn(call
, cp
, srx
, gfp
);
670 rxrpc_animate_client_conn(rxnet
, call
->conn
);
671 rxrpc_activate_channels(call
->conn
);
673 ret
= rxrpc_wait_for_channel(call
, gfp
);
675 rxrpc_disconnect_client_call(call
);
677 _leave(" = %d", ret
);
682 * Note that a connection is about to be exposed to the world. Once it is
683 * exposed, we maintain an extra ref on it that stops it from being summarily
684 * discarded before it's (a) had a chance to deal with retransmission and (b)
685 * had a chance at re-use (the per-connection security negotiation is
688 static void rxrpc_expose_client_conn(struct rxrpc_connection
*conn
,
689 unsigned int channel
)
691 if (!test_and_set_bit(RXRPC_CONN_EXPOSED
, &conn
->flags
)) {
692 trace_rxrpc_client(conn
, channel
, rxrpc_client_exposed
);
693 rxrpc_get_connection(conn
);
698 * Note that a call, and thus a connection, is about to be exposed to the
701 void rxrpc_expose_client_call(struct rxrpc_call
*call
)
703 unsigned int channel
= call
->cid
& RXRPC_CHANNELMASK
;
704 struct rxrpc_connection
*conn
= call
->conn
;
705 struct rxrpc_channel
*chan
= &conn
->channels
[channel
];
707 if (!test_and_set_bit(RXRPC_CALL_EXPOSED
, &call
->flags
)) {
708 /* Mark the call ID as being used. If the callNumber counter
709 * exceeds ~2 billion, we kill the connection after its
710 * outstanding calls have finished so that the counter doesn't
713 chan
->call_counter
++;
714 if (chan
->call_counter
>= INT_MAX
)
715 set_bit(RXRPC_CONN_DONT_REUSE
, &conn
->flags
);
716 rxrpc_expose_client_conn(conn
, channel
);
721 * Disconnect a client call.
723 void rxrpc_disconnect_client_call(struct rxrpc_call
*call
)
725 unsigned int channel
= call
->cid
& RXRPC_CHANNELMASK
;
726 struct rxrpc_connection
*conn
= call
->conn
;
727 struct rxrpc_channel
*chan
= &conn
->channels
[channel
];
728 struct rxrpc_net
*rxnet
= rxrpc_net(sock_net(&call
->socket
->sk
));
730 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_disconnect
);
733 spin_lock(&conn
->channel_lock
);
735 /* Calls that have never actually been assigned a channel can simply be
736 * discarded. If the conn didn't get used either, it will follow
737 * immediately unless someone else grabs it in the meantime.
739 if (!list_empty(&call
->chan_wait_link
)) {
740 _debug("call is waiting");
741 ASSERTCMP(call
->call_id
, ==, 0);
742 ASSERT(!test_bit(RXRPC_CALL_EXPOSED
, &call
->flags
));
743 list_del_init(&call
->chan_wait_link
);
745 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_unstarted
);
747 /* We must deactivate or idle the connection if it's now
748 * waiting for nothing.
750 spin_lock(&rxnet
->client_conn_cache_lock
);
751 if (conn
->cache_state
== RXRPC_CONN_CLIENT_WAITING
&&
752 list_empty(&conn
->waiting_calls
) &&
754 goto idle_connection
;
758 ASSERTCMP(rcu_access_pointer(chan
->call
), ==, call
);
760 /* If a client call was exposed to the world, we save the result for
763 * We use a barrier here so that the call number and abort code can be
764 * read without needing to take a lock.
766 * TODO: Make the incoming packet handler check this and handle
767 * terminal retransmission without requiring access to the call.
769 if (test_bit(RXRPC_CALL_EXPOSED
, &call
->flags
)) {
770 _debug("exposed %u,%u", call
->call_id
, call
->abort_code
);
771 __rxrpc_disconnect_call(conn
, call
);
774 /* See if we can pass the channel directly to another call. */
775 if (conn
->cache_state
== RXRPC_CONN_CLIENT_ACTIVE
&&
776 !list_empty(&conn
->waiting_calls
)) {
777 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_pass
);
778 rxrpc_activate_one_channel(conn
, channel
);
782 /* Things are more complex and we need the cache lock. We might be
783 * able to simply idle the conn or it might now be lurking on the wait
784 * list. It might even get moved back to the active list whilst we're
785 * waiting for the lock.
787 spin_lock(&rxnet
->client_conn_cache_lock
);
789 switch (conn
->cache_state
) {
790 case RXRPC_CONN_CLIENT_ACTIVE
:
791 if (list_empty(&conn
->waiting_calls
)) {
792 rxrpc_deactivate_one_channel(conn
, channel
);
793 if (!conn
->active_chans
) {
794 rxnet
->nr_active_client_conns
--;
795 goto idle_connection
;
800 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_pass
);
801 rxrpc_activate_one_channel(conn
, channel
);
804 case RXRPC_CONN_CLIENT_CULLED
:
805 rxrpc_deactivate_one_channel(conn
, channel
);
806 ASSERT(list_empty(&conn
->waiting_calls
));
807 if (!conn
->active_chans
)
808 goto idle_connection
;
811 case RXRPC_CONN_CLIENT_WAITING
:
812 rxrpc_deactivate_one_channel(conn
, channel
);
820 spin_unlock(&rxnet
->client_conn_cache_lock
);
822 spin_unlock(&conn
->channel_lock
);
823 rxrpc_put_connection(conn
);
828 /* As no channels remain active, the connection gets deactivated
829 * immediately or moved to the idle list for a short while.
831 if (test_bit(RXRPC_CONN_EXPOSED
, &conn
->flags
)) {
832 trace_rxrpc_client(conn
, channel
, rxrpc_client_to_idle
);
833 conn
->idle_timestamp
= jiffies
;
834 conn
->cache_state
= RXRPC_CONN_CLIENT_IDLE
;
835 list_move_tail(&conn
->cache_link
, &rxnet
->idle_client_conns
);
836 if (rxnet
->idle_client_conns
.next
== &conn
->cache_link
&&
837 !rxnet
->kill_all_client_conns
)
838 queue_delayed_work(rxrpc_workqueue
,
839 &rxnet
->client_conn_reaper
,
840 rxrpc_conn_idle_client_expiry
);
842 trace_rxrpc_client(conn
, channel
, rxrpc_client_to_inactive
);
843 conn
->cache_state
= RXRPC_CONN_CLIENT_INACTIVE
;
844 list_del_init(&conn
->cache_link
);
850 * Clean up a dead client connection.
852 static struct rxrpc_connection
*
853 rxrpc_put_one_client_conn(struct rxrpc_connection
*conn
)
855 struct rxrpc_connection
*next
= NULL
;
856 struct rxrpc_local
*local
= conn
->params
.local
;
857 struct rxrpc_net
*rxnet
= local
->rxnet
;
858 unsigned int nr_conns
;
860 trace_rxrpc_client(conn
, -1, rxrpc_client_cleanup
);
862 if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS
, &conn
->flags
)) {
863 spin_lock(&local
->client_conns_lock
);
864 if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS
,
866 rb_erase(&conn
->client_node
, &local
->client_conns
);
867 spin_unlock(&local
->client_conns_lock
);
870 rxrpc_put_client_connection_id(conn
);
872 ASSERTCMP(conn
->cache_state
, ==, RXRPC_CONN_CLIENT_INACTIVE
);
874 if (test_bit(RXRPC_CONN_COUNTED
, &conn
->flags
)) {
875 trace_rxrpc_client(conn
, -1, rxrpc_client_uncount
);
876 spin_lock(&rxnet
->client_conn_cache_lock
);
877 nr_conns
= --rxnet
->nr_client_conns
;
879 if (nr_conns
< rxrpc_max_client_connections
&&
880 !list_empty(&rxnet
->waiting_client_conns
)) {
881 next
= list_entry(rxnet
->waiting_client_conns
.next
,
882 struct rxrpc_connection
, cache_link
);
883 rxrpc_get_connection(next
);
884 rxrpc_activate_conn(rxnet
, next
);
887 spin_unlock(&rxnet
->client_conn_cache_lock
);
890 rxrpc_kill_connection(conn
);
892 rxrpc_activate_channels(next
);
894 /* We need to get rid of the temporary ref we took upon next, but we
895 * can't call rxrpc_put_connection() recursively.
901 * Clean up a dead client connections.
903 void rxrpc_put_client_conn(struct rxrpc_connection
*conn
)
905 const void *here
= __builtin_return_address(0);
909 n
= atomic_dec_return(&conn
->usage
);
910 trace_rxrpc_conn(conn
, rxrpc_conn_put_client
, n
, here
);
915 conn
= rxrpc_put_one_client_conn(conn
);
920 * Kill the longest-active client connections to make room for new ones.
922 static void rxrpc_cull_active_client_conns(struct rxrpc_net
*rxnet
)
924 struct rxrpc_connection
*conn
;
925 unsigned int nr_conns
= rxnet
->nr_client_conns
;
926 unsigned int nr_active
, limit
;
930 ASSERTCMP(nr_conns
, >=, 0);
931 if (nr_conns
< rxrpc_max_client_connections
) {
935 limit
= rxrpc_reap_client_connections
;
937 spin_lock(&rxnet
->client_conn_cache_lock
);
938 nr_active
= rxnet
->nr_active_client_conns
;
940 while (nr_active
> limit
) {
941 ASSERT(!list_empty(&rxnet
->active_client_conns
));
942 conn
= list_entry(rxnet
->active_client_conns
.next
,
943 struct rxrpc_connection
, cache_link
);
944 ASSERTCMP(conn
->cache_state
, ==, RXRPC_CONN_CLIENT_ACTIVE
);
946 if (list_empty(&conn
->waiting_calls
)) {
947 trace_rxrpc_client(conn
, -1, rxrpc_client_to_culled
);
948 conn
->cache_state
= RXRPC_CONN_CLIENT_CULLED
;
949 list_del_init(&conn
->cache_link
);
951 trace_rxrpc_client(conn
, -1, rxrpc_client_to_waiting
);
952 conn
->cache_state
= RXRPC_CONN_CLIENT_WAITING
;
953 list_move_tail(&conn
->cache_link
,
954 &rxnet
->waiting_client_conns
);
960 rxnet
->nr_active_client_conns
= nr_active
;
961 spin_unlock(&rxnet
->client_conn_cache_lock
);
962 ASSERTCMP(nr_active
, >=, 0);
967 * Discard expired client connections from the idle list. Each conn in the
968 * idle list has been exposed and holds an extra ref because of that.
970 * This may be called from conn setup or from a work item so cannot be
971 * considered non-reentrant.
973 void rxrpc_discard_expired_client_conns(struct work_struct
*work
)
975 struct rxrpc_connection
*conn
;
976 struct rxrpc_net
*rxnet
=
977 container_of(to_delayed_work(work
),
978 struct rxrpc_net
, client_conn_reaper
);
979 unsigned long expiry
, conn_expires_at
, now
;
980 unsigned int nr_conns
;
981 bool did_discard
= false;
985 if (list_empty(&rxnet
->idle_client_conns
)) {
990 /* Don't double up on the discarding */
991 if (!spin_trylock(&rxnet
->client_conn_discard_lock
)) {
992 _leave(" [already]");
996 /* We keep an estimate of what the number of conns ought to be after
997 * we've discarded some so that we don't overdo the discarding.
999 nr_conns
= rxnet
->nr_client_conns
;
1002 spin_lock(&rxnet
->client_conn_cache_lock
);
1004 if (list_empty(&rxnet
->idle_client_conns
))
1007 conn
= list_entry(rxnet
->idle_client_conns
.next
,
1008 struct rxrpc_connection
, cache_link
);
1009 ASSERT(test_bit(RXRPC_CONN_EXPOSED
, &conn
->flags
));
1011 if (!rxnet
->kill_all_client_conns
) {
1012 /* If the number of connections is over the reap limit, we
1013 * expedite discard by reducing the expiry timeout. We must,
1014 * however, have at least a short grace period to be able to do
1015 * final-ACK or ABORT retransmission.
1017 expiry
= rxrpc_conn_idle_client_expiry
;
1018 if (nr_conns
> rxrpc_reap_client_connections
)
1019 expiry
= rxrpc_conn_idle_client_fast_expiry
;
1021 conn_expires_at
= conn
->idle_timestamp
+ expiry
;
1023 now
= READ_ONCE(jiffies
);
1024 if (time_after(conn_expires_at
, now
))
1025 goto not_yet_expired
;
1028 trace_rxrpc_client(conn
, -1, rxrpc_client_discard
);
1029 if (!test_and_clear_bit(RXRPC_CONN_EXPOSED
, &conn
->flags
))
1031 conn
->cache_state
= RXRPC_CONN_CLIENT_INACTIVE
;
1032 list_del_init(&conn
->cache_link
);
1034 spin_unlock(&rxnet
->client_conn_cache_lock
);
1036 /* When we cleared the EXPOSED flag, we took on responsibility for the
1037 * reference that that had on the usage count. We deal with that here.
1038 * If someone re-sets the flag and re-gets the ref, that's fine.
1040 rxrpc_put_connection(conn
);
1046 /* The connection at the front of the queue hasn't yet expired, so
1047 * schedule the work item for that point if we discarded something.
1049 * We don't worry if the work item is already scheduled - it can look
1050 * after rescheduling itself at a later time. We could cancel it, but
1051 * then things get messier.
1054 if (!rxnet
->kill_all_client_conns
)
1055 queue_delayed_work(rxrpc_workqueue
,
1056 &rxnet
->client_conn_reaper
,
1057 conn_expires_at
- now
);
1060 spin_unlock(&rxnet
->client_conn_cache_lock
);
1061 spin_unlock(&rxnet
->client_conn_discard_lock
);
1066 * Preemptively destroy all the client connection records rather than waiting
1067 * for them to time out
1069 void rxrpc_destroy_all_client_connections(struct rxrpc_net
*rxnet
)
1073 spin_lock(&rxnet
->client_conn_cache_lock
);
1074 rxnet
->kill_all_client_conns
= true;
1075 spin_unlock(&rxnet
->client_conn_cache_lock
);
1077 cancel_delayed_work(&rxnet
->client_conn_reaper
);
1079 if (!queue_delayed_work(rxrpc_workqueue
, &rxnet
->client_conn_reaper
, 0))
1080 _debug("destroy: queue failed");