]>
Commit | Line | Data |
---|---|---|
45025bce | 1 | /* RxRPC virtual connection handler, common bits. |
17926a79 | 2 | * |
45025bce | 3 | * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved. |
17926a79 DH |
4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
9b6d5398 JP |
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | ||
17926a79 | 14 | #include <linux/module.h> |
5a0e3ad6 | 15 | #include <linux/slab.h> |
17926a79 DH |
16 | #include <linux/net.h> |
17 | #include <linux/skbuff.h> | |
17926a79 DH |
18 | #include "ar-internal.h" |
19 | ||
5873c083 DH |
20 | /* |
21 | * Time till a connection expires after last use (in seconds). | |
22 | */ | |
dad8aff7 | 23 | unsigned int rxrpc_connection_expiry = 10 * 60; |
5873c083 | 24 | |
17926a79 DH |
25 | static void rxrpc_connection_reaper(struct work_struct *work); |
26 | ||
27 | LIST_HEAD(rxrpc_connections); | |
4d028b2c | 28 | LIST_HEAD(rxrpc_connection_proc_list); |
17926a79 | 29 | DEFINE_RWLOCK(rxrpc_connection_lock); |
17926a79 DH |
30 | static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper); |
31 | ||
45025bce DH |
32 | static void rxrpc_destroy_connection(struct rcu_head *); |
33 | ||
17926a79 DH |
34 | /* |
35 | * allocate a new connection | |
36 | */ | |
c6d2b8d7 | 37 | struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) |
17926a79 DH |
38 | { |
39 | struct rxrpc_connection *conn; | |
40 | ||
41 | _enter(""); | |
42 | ||
43 | conn = kzalloc(sizeof(struct rxrpc_connection), gfp); | |
44 | if (conn) { | |
45025bce | 45 | INIT_LIST_HEAD(&conn->cache_link); |
999b69f8 | 46 | spin_lock_init(&conn->channel_lock); |
45025bce | 47 | INIT_LIST_HEAD(&conn->waiting_calls); |
17926a79 | 48 | INIT_WORK(&conn->processor, &rxrpc_process_connection); |
4d028b2c | 49 | INIT_LIST_HEAD(&conn->proc_link); |
999b69f8 | 50 | INIT_LIST_HEAD(&conn->link); |
17926a79 | 51 | skb_queue_head_init(&conn->rx_queue); |
e0e4d82f | 52 | conn->security = &rxrpc_no_security; |
17926a79 | 53 | spin_lock_init(&conn->state_lock); |
17926a79 | 54 | conn->debug_id = atomic_inc_return(&rxrpc_debug_id); |
17926a79 | 55 | conn->size_align = 4; |
f51b4480 | 56 | conn->idle_timestamp = jiffies; |
17926a79 DH |
57 | } |
58 | ||
16c61add | 59 | _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0); |
17926a79 DH |
60 | return conn; |
61 | } | |
62 | ||
17926a79 | 63 | /* |
8496af50 DH |
64 | * Look up a connection in the cache by protocol parameters. |
65 | * | |
66 | * If successful, a pointer to the connection is returned, but no ref is taken. | |
67 | * NULL is returned if there is no match. | |
68 | * | |
69 | * The caller must be holding the RCU read lock. | |
17926a79 | 70 | */ |
8496af50 DH |
71 | struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local, |
72 | struct sk_buff *skb) | |
17926a79 DH |
73 | { |
74 | struct rxrpc_connection *conn; | |
1291e9d1 | 75 | struct rxrpc_conn_proto k; |
42886ffe | 76 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
1291e9d1 DH |
77 | struct sockaddr_rxrpc srx; |
78 | struct rxrpc_peer *peer; | |
17926a79 | 79 | |
8496af50 | 80 | _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK); |
17926a79 | 81 | |
1291e9d1 DH |
82 | if (rxrpc_extract_addr_from_skb(&srx, skb) < 0) |
83 | goto not_found; | |
17926a79 | 84 | |
8496af50 DH |
85 | k.epoch = sp->hdr.epoch; |
86 | k.cid = sp->hdr.cid & RXRPC_CIDMASK; | |
87 | ||
1291e9d1 DH |
88 | /* We may have to handle mixing IPv4 and IPv6 */ |
89 | if (srx.transport.family != local->srx.transport.family) { | |
90 | pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n", | |
91 | srx.transport.family, | |
92 | local->srx.transport.family); | |
93 | goto not_found; | |
94 | } | |
95 | ||
96 | k.epoch = sp->hdr.epoch; | |
97 | k.cid = sp->hdr.cid & RXRPC_CIDMASK; | |
17926a79 | 98 | |
4a3388c8 | 99 | if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) { |
1291e9d1 DH |
100 | /* We need to look up service connections by the full protocol |
101 | * parameter set. We look up the peer first as an intermediate | |
102 | * step and then the connection from the peer's tree. | |
103 | */ | |
104 | peer = rxrpc_lookup_peer_rcu(local, &srx); | |
105 | if (!peer) | |
106 | goto not_found; | |
8496af50 DH |
107 | conn = rxrpc_find_service_conn_rcu(peer, skb); |
108 | if (!conn || atomic_read(&conn->usage) == 0) | |
109 | goto not_found; | |
110 | _leave(" = %p", conn); | |
111 | return conn; | |
4a3388c8 | 112 | } else { |
8496af50 DH |
113 | /* Look up client connections by connection ID alone as their |
114 | * IDs are unique for this machine. | |
115 | */ | |
1291e9d1 | 116 | conn = idr_find(&rxrpc_client_conn_ids, |
8496af50 DH |
117 | sp->hdr.cid >> RXRPC_CIDSHIFT); |
118 | if (!conn || atomic_read(&conn->usage) == 0) { | |
119 | _debug("no conn"); | |
120 | goto not_found; | |
121 | } | |
122 | ||
123 | if (conn->proto.epoch != k.epoch || | |
1291e9d1 DH |
124 | conn->params.local != local) |
125 | goto not_found; | |
126 | ||
127 | peer = conn->params.peer; | |
128 | switch (srx.transport.family) { | |
129 | case AF_INET: | |
130 | if (peer->srx.transport.sin.sin_port != | |
131 | srx.transport.sin.sin_port || | |
132 | peer->srx.transport.sin.sin_addr.s_addr != | |
133 | srx.transport.sin.sin_addr.s_addr) | |
134 | goto not_found; | |
135 | break; | |
d1912747 | 136 | #ifdef CONFIG_AF_RXRPC_IPV6 |
75b54cb5 DH |
137 | case AF_INET6: |
138 | if (peer->srx.transport.sin6.sin6_port != | |
139 | srx.transport.sin6.sin6_port || | |
140 | memcmp(&peer->srx.transport.sin6.sin6_addr, | |
141 | &srx.transport.sin6.sin6_addr, | |
142 | sizeof(struct in6_addr)) != 0) | |
143 | goto not_found; | |
144 | break; | |
d1912747 | 145 | #endif |
1291e9d1 DH |
146 | default: |
147 | BUG(); | |
148 | } | |
149 | ||
1291e9d1 DH |
150 | _leave(" = %p", conn); |
151 | return conn; | |
17926a79 DH |
152 | } |
153 | ||
1291e9d1 | 154 | not_found: |
17926a79 DH |
155 | _leave(" = NULL"); |
156 | return NULL; | |
17926a79 DH |
157 | } |
158 | ||
999b69f8 DH |
159 | /* |
160 | * Disconnect a call and clear any channel it occupies when that call | |
a1399f8b DH |
161 | * terminates. The caller must hold the channel_lock and must release the |
162 | * call's ref on the connection. | |
999b69f8 | 163 | */ |
45025bce DH |
164 | void __rxrpc_disconnect_call(struct rxrpc_connection *conn, |
165 | struct rxrpc_call *call) | |
999b69f8 | 166 | { |
01a90a45 DH |
167 | struct rxrpc_channel *chan = |
168 | &conn->channels[call->cid & RXRPC_CHANNELMASK]; | |
999b69f8 | 169 | |
01a90a45 | 170 | _enter("%d,%x", conn->debug_id, call->cid); |
999b69f8 | 171 | |
a1399f8b DH |
172 | if (rcu_access_pointer(chan->call) == call) { |
173 | /* Save the result of the call so that we can repeat it if necessary | |
174 | * through the channel, whilst disposing of the actual call record. | |
175 | */ | |
18bfeba5 | 176 | chan->last_service_id = call->service_id; |
f5c17aae DH |
177 | if (call->abort_code) { |
178 | chan->last_abort = call->abort_code; | |
18bfeba5 DH |
179 | chan->last_type = RXRPC_PACKET_TYPE_ABORT; |
180 | } else { | |
248f219c | 181 | chan->last_seq = call->rx_hard_ack; |
18bfeba5 DH |
182 | chan->last_type = RXRPC_PACKET_TYPE_ACK; |
183 | } | |
184 | /* Sync with rxrpc_conn_retransmit(). */ | |
a1399f8b DH |
185 | smp_wmb(); |
186 | chan->last_call = chan->call_id; | |
187 | chan->call_id = chan->call_counter; | |
e653cfe4 | 188 | |
a1399f8b | 189 | rcu_assign_pointer(chan->call, NULL); |
999b69f8 | 190 | } |
e653cfe4 | 191 | |
a1399f8b DH |
192 | _leave(""); |
193 | } | |
194 | ||
195 | /* | |
196 | * Disconnect a call and clear any channel it occupies when that call | |
197 | * terminates. | |
198 | */ | |
199 | void rxrpc_disconnect_call(struct rxrpc_call *call) | |
200 | { | |
201 | struct rxrpc_connection *conn = call->conn; | |
202 | ||
248f219c DH |
203 | spin_lock_bh(&conn->params.peer->lock); |
204 | hlist_del_init(&call->error_link); | |
205 | spin_unlock_bh(&conn->params.peer->lock); | |
206 | ||
45025bce DH |
207 | if (rxrpc_is_client_call(call)) |
208 | return rxrpc_disconnect_client_call(call); | |
209 | ||
a1399f8b | 210 | spin_lock(&conn->channel_lock); |
45025bce | 211 | __rxrpc_disconnect_call(conn, call); |
e653cfe4 DH |
212 | spin_unlock(&conn->channel_lock); |
213 | ||
214 | call->conn = NULL; | |
f51b4480 | 215 | conn->idle_timestamp = jiffies; |
e653cfe4 | 216 | rxrpc_put_connection(conn); |
999b69f8 DH |
217 | } |
218 | ||
45025bce DH |
219 | /* |
220 | * Kill off a connection. | |
221 | */ | |
222 | void rxrpc_kill_connection(struct rxrpc_connection *conn) | |
223 | { | |
224 | ASSERT(!rcu_access_pointer(conn->channels[0].call) && | |
225 | !rcu_access_pointer(conn->channels[1].call) && | |
226 | !rcu_access_pointer(conn->channels[2].call) && | |
227 | !rcu_access_pointer(conn->channels[3].call)); | |
228 | ASSERT(list_empty(&conn->cache_link)); | |
229 | ||
230 | write_lock(&rxrpc_connection_lock); | |
231 | list_del_init(&conn->proc_link); | |
232 | write_unlock(&rxrpc_connection_lock); | |
233 | ||
234 | /* Drain the Rx queue. Note that even though we've unpublished, an | |
235 | * incoming packet could still be being added to our Rx queue, so we | |
236 | * will need to drain it again in the RCU cleanup handler. | |
237 | */ | |
238 | rxrpc_purge_queue(&conn->rx_queue); | |
239 | ||
240 | /* Leave final destruction to RCU. The connection processor work item | |
241 | * must carry a ref on the connection to prevent us getting here whilst | |
242 | * it is queued or running. | |
243 | */ | |
244 | call_rcu(&conn->rcu, rxrpc_destroy_connection); | |
245 | } | |
246 | ||
17926a79 | 247 | /* |
363deeab DH |
248 | * Queue a connection's work processor, getting a ref to pass to the work |
249 | * queue. | |
17926a79 | 250 | */ |
363deeab | 251 | bool rxrpc_queue_conn(struct rxrpc_connection *conn) |
17926a79 | 252 | { |
363deeab DH |
253 | const void *here = __builtin_return_address(0); |
254 | int n = __atomic_add_unless(&conn->usage, 1, 0); | |
255 | if (n == 0) | |
256 | return false; | |
257 | if (rxrpc_queue_work(&conn->processor)) | |
258 | trace_rxrpc_conn(conn, rxrpc_conn_queued, n + 1, here); | |
259 | else | |
260 | rxrpc_put_connection(conn); | |
261 | return true; | |
262 | } | |
263 | ||
264 | /* | |
265 | * Note the re-emergence of a connection. | |
266 | */ | |
267 | void rxrpc_see_connection(struct rxrpc_connection *conn) | |
268 | { | |
269 | const void *here = __builtin_return_address(0); | |
270 | if (conn) { | |
271 | int n = atomic_read(&conn->usage); | |
272 | ||
273 | trace_rxrpc_conn(conn, rxrpc_conn_seen, n, here); | |
274 | } | |
275 | } | |
276 | ||
277 | /* | |
278 | * Get a ref on a connection. | |
279 | */ | |
280 | void rxrpc_get_connection(struct rxrpc_connection *conn) | |
281 | { | |
282 | const void *here = __builtin_return_address(0); | |
283 | int n = atomic_inc_return(&conn->usage); | |
284 | ||
285 | trace_rxrpc_conn(conn, rxrpc_conn_got, n, here); | |
286 | } | |
287 | ||
288 | /* | |
289 | * Try to get a ref on a connection. | |
290 | */ | |
291 | struct rxrpc_connection * | |
292 | rxrpc_get_connection_maybe(struct rxrpc_connection *conn) | |
293 | { | |
294 | const void *here = __builtin_return_address(0); | |
295 | ||
296 | if (conn) { | |
297 | int n = __atomic_add_unless(&conn->usage, 1, 0); | |
298 | if (n > 0) | |
299 | trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here); | |
300 | else | |
301 | conn = NULL; | |
302 | } | |
303 | return conn; | |
304 | } | |
305 | ||
306 | /* | |
307 | * Release a service connection | |
308 | */ | |
309 | void rxrpc_put_service_conn(struct rxrpc_connection *conn) | |
310 | { | |
311 | const void *here = __builtin_return_address(0); | |
312 | int n; | |
313 | ||
314 | n = atomic_dec_return(&conn->usage); | |
315 | trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here); | |
316 | ASSERTCMP(n, >=, 0); | |
317 | if (n == 0) | |
318 | rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); | |
17926a79 DH |
319 | } |
320 | ||
321 | /* | |
322 | * destroy a virtual connection | |
323 | */ | |
dee46364 | 324 | static void rxrpc_destroy_connection(struct rcu_head *rcu) |
17926a79 | 325 | { |
dee46364 DH |
326 | struct rxrpc_connection *conn = |
327 | container_of(rcu, struct rxrpc_connection, rcu); | |
328 | ||
329 | _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage)); | |
17926a79 DH |
330 | |
331 | ASSERTCMP(atomic_read(&conn->usage), ==, 0); | |
332 | ||
333 | _net("DESTROY CONN %d", conn->debug_id); | |
334 | ||
17926a79 DH |
335 | rxrpc_purge_queue(&conn->rx_queue); |
336 | ||
e0e4d82f | 337 | conn->security->clear(conn); |
19ffa01c | 338 | key_put(conn->params.key); |
e0e4d82f | 339 | key_put(conn->server_key); |
aa390bbe DH |
340 | rxrpc_put_peer(conn->params.peer); |
341 | rxrpc_put_local(conn->params.local); | |
e0e4d82f | 342 | |
17926a79 DH |
343 | kfree(conn); |
344 | _leave(""); | |
345 | } | |
346 | ||
347 | /* | |
45025bce | 348 | * reap dead service connections |
17926a79 | 349 | */ |
5eaa65b2 | 350 | static void rxrpc_connection_reaper(struct work_struct *work) |
17926a79 DH |
351 | { |
352 | struct rxrpc_connection *conn, *_p; | |
f51b4480 | 353 | unsigned long reap_older_than, earliest, idle_timestamp, now; |
17926a79 DH |
354 | |
355 | LIST_HEAD(graveyard); | |
356 | ||
357 | _enter(""); | |
358 | ||
f51b4480 DH |
359 | now = jiffies; |
360 | reap_older_than = now - rxrpc_connection_expiry * HZ; | |
17926a79 DH |
361 | earliest = ULONG_MAX; |
362 | ||
b3f57504 | 363 | write_lock(&rxrpc_connection_lock); |
17926a79 | 364 | list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) { |
001c1122 DH |
365 | ASSERTCMP(atomic_read(&conn->usage), >, 0); |
366 | if (likely(atomic_read(&conn->usage) > 1)) | |
17926a79 | 367 | continue; |
00e90712 DH |
368 | if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) |
369 | continue; | |
17926a79 | 370 | |
f51b4480 DH |
371 | idle_timestamp = READ_ONCE(conn->idle_timestamp); |
372 | _debug("reap CONN %d { u=%d,t=%ld }", | |
373 | conn->debug_id, atomic_read(&conn->usage), | |
374 | (long)reap_older_than - (long)idle_timestamp); | |
375 | ||
376 | if (time_after(idle_timestamp, reap_older_than)) { | |
377 | if (time_before(idle_timestamp, earliest)) | |
378 | earliest = idle_timestamp; | |
001c1122 | 379 | continue; |
17926a79 | 380 | } |
001c1122 DH |
381 | |
382 | /* The usage count sits at 1 whilst the object is unused on the | |
383 | * list; we reduce that to 0 to make the object unavailable. | |
384 | */ | |
385 | if (atomic_cmpxchg(&conn->usage, 1, 0) != 1) | |
386 | continue; | |
387 | ||
388 | if (rxrpc_conn_is_client(conn)) | |
45025bce | 389 | BUG(); |
001c1122 DH |
390 | else |
391 | rxrpc_unpublish_service_conn(conn); | |
392 | ||
393 | list_move_tail(&conn->link, &graveyard); | |
17926a79 | 394 | } |
b3f57504 | 395 | write_unlock(&rxrpc_connection_lock); |
17926a79 DH |
396 | |
397 | if (earliest != ULONG_MAX) { | |
398 | _debug("reschedule reaper %ld", (long) earliest - now); | |
f51b4480 | 399 | ASSERT(time_after(earliest, now)); |
651350d1 | 400 | rxrpc_queue_delayed_work(&rxrpc_connection_reap, |
f51b4480 | 401 | earliest - now); |
17926a79 DH |
402 | } |
403 | ||
17926a79 DH |
404 | while (!list_empty(&graveyard)) { |
405 | conn = list_entry(graveyard.next, struct rxrpc_connection, | |
406 | link); | |
407 | list_del_init(&conn->link); | |
408 | ||
409 | ASSERTCMP(atomic_read(&conn->usage), ==, 0); | |
45025bce | 410 | rxrpc_kill_connection(conn); |
17926a79 DH |
411 | } |
412 | ||
413 | _leave(""); | |
414 | } | |
415 | ||
416 | /* | |
45025bce DH |
417 | * preemptively destroy all the service connection records rather than |
418 | * waiting for them to time out | |
17926a79 DH |
419 | */ |
420 | void __exit rxrpc_destroy_all_connections(void) | |
421 | { | |
dee46364 DH |
422 | struct rxrpc_connection *conn, *_p; |
423 | bool leak = false; | |
424 | ||
17926a79 DH |
425 | _enter(""); |
426 | ||
45025bce DH |
427 | rxrpc_destroy_all_client_connections(); |
428 | ||
5873c083 | 429 | rxrpc_connection_expiry = 0; |
17926a79 | 430 | cancel_delayed_work(&rxrpc_connection_reap); |
651350d1 | 431 | rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); |
dee46364 DH |
432 | flush_workqueue(rxrpc_workqueue); |
433 | ||
434 | write_lock(&rxrpc_connection_lock); | |
435 | list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) { | |
436 | pr_err("AF_RXRPC: Leaked conn %p {%d}\n", | |
437 | conn, atomic_read(&conn->usage)); | |
438 | leak = true; | |
439 | } | |
440 | write_unlock(&rxrpc_connection_lock); | |
441 | BUG_ON(leak); | |
442 | ||
45025bce DH |
443 | ASSERT(list_empty(&rxrpc_connection_proc_list)); |
444 | ||
dee46364 DH |
445 | /* Make sure the local and peer records pinned by any dying connections |
446 | * are released. | |
447 | */ | |
448 | rcu_barrier(); | |
449 | rxrpc_destroy_client_conn_ids(); | |
17926a79 DH |
450 | |
451 | _leave(""); | |
452 | } |