]>
Commit | Line | Data |
---|---|---|
17926a79 DH |
1 | /* RxRPC individual remote procedure call handling |
2 | * | |
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
9b6d5398 JP |
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | ||
5a0e3ad6 | 14 | #include <linux/slab.h> |
17926a79 DH |
15 | #include <linux/module.h> |
16 | #include <linux/circ_buf.h> | |
7727640c TS |
17 | #include <linux/hashtable.h> |
18 | #include <linux/spinlock_types.h> | |
17926a79 DH |
19 | #include <net/sock.h> |
20 | #include <net/af_rxrpc.h> | |
21 | #include "ar-internal.h" | |
22 | ||
5873c083 DH |
23 | /* |
24 | * Maximum lifetime of a call (in jiffies). | |
25 | */ | |
dad8aff7 | 26 | unsigned int rxrpc_max_call_lifetime = 60 * HZ; |
5873c083 DH |
27 | |
28 | /* | |
29 | * Time till dead call expires after last use (in jiffies). | |
30 | */ | |
dad8aff7 | 31 | unsigned int rxrpc_dead_call_expiry = 2 * HZ; |
5873c083 | 32 | |
5b8848d1 | 33 | const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = { |
999b69f8 DH |
34 | [RXRPC_CALL_UNINITIALISED] = "Uninit", |
35 | [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn", | |
1f8481d1 DH |
36 | [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq", |
37 | [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl", | |
38 | [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl", | |
39 | [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK", | |
40 | [RXRPC_CALL_SERVER_SECURING] = "SvSecure", | |
41 | [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept", | |
42 | [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq", | |
43 | [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq", | |
44 | [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl", | |
45 | [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK", | |
46 | [RXRPC_CALL_COMPLETE] = "Complete", | |
47 | [RXRPC_CALL_SERVER_BUSY] = "SvBusy ", | |
48 | [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort", | |
49 | [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort", | |
50 | [RXRPC_CALL_NETWORK_ERROR] = "NetError", | |
51 | [RXRPC_CALL_DEAD] = "Dead ", | |
52 | }; | |
53 | ||
17926a79 DH |
54 | struct kmem_cache *rxrpc_call_jar; |
55 | LIST_HEAD(rxrpc_calls); | |
56 | DEFINE_RWLOCK(rxrpc_call_lock); | |
17926a79 DH |
57 | |
58 | static void rxrpc_destroy_call(struct work_struct *work); | |
59 | static void rxrpc_call_life_expired(unsigned long _call); | |
60 | static void rxrpc_dead_call_expired(unsigned long _call); | |
61 | static void rxrpc_ack_time_expired(unsigned long _call); | |
62 | static void rxrpc_resend_time_expired(unsigned long _call); | |
63 | ||
7727640c TS |
64 | static DEFINE_SPINLOCK(rxrpc_call_hash_lock); |
65 | static DEFINE_HASHTABLE(rxrpc_call_hash, 10); | |
66 | ||
67 | /* | |
68 | * Hash function for rxrpc_call_hash | |
69 | */ | |
70 | static unsigned long rxrpc_call_hashfunc( | |
0d12f8a4 DH |
71 | u8 in_clientflag, |
72 | u32 cid, | |
73 | u32 call_id, | |
74 | u32 epoch, | |
75 | u16 service_id, | |
19ffa01c | 76 | sa_family_t family, |
7727640c TS |
77 | void *localptr, |
78 | unsigned int addr_size, | |
79 | const u8 *peer_addr) | |
80 | { | |
81 | const u16 *p; | |
82 | unsigned int i; | |
83 | unsigned long key; | |
7727640c TS |
84 | |
85 | _enter(""); | |
86 | ||
87 | key = (unsigned long)localptr; | |
88 | /* We just want to add up the __be32 values, so forcing the | |
89 | * cast should be okay. | |
90 | */ | |
0d12f8a4 DH |
91 | key += epoch; |
92 | key += service_id; | |
93 | key += call_id; | |
94 | key += (cid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT; | |
95 | key += cid & RXRPC_CHANNELMASK; | |
96 | key += in_clientflag; | |
19ffa01c | 97 | key += family; |
7727640c TS |
98 | /* Step through the peer address in 16-bit portions for speed */ |
99 | for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++) | |
100 | key += *p; | |
101 | _leave(" key = 0x%lx", key); | |
102 | return key; | |
103 | } | |
104 | ||
105 | /* | |
106 | * Add a call to the hashtable | |
107 | */ | |
108 | static void rxrpc_call_hash_add(struct rxrpc_call *call) | |
109 | { | |
110 | unsigned long key; | |
111 | unsigned int addr_size = 0; | |
112 | ||
113 | _enter(""); | |
19ffa01c | 114 | switch (call->family) { |
7727640c TS |
115 | case AF_INET: |
116 | addr_size = sizeof(call->peer_ip.ipv4_addr); | |
117 | break; | |
118 | case AF_INET6: | |
119 | addr_size = sizeof(call->peer_ip.ipv6_addr); | |
120 | break; | |
121 | default: | |
122 | break; | |
123 | } | |
124 | key = rxrpc_call_hashfunc(call->in_clientflag, call->cid, | |
125 | call->call_id, call->epoch, | |
19ffa01c | 126 | call->service_id, call->family, |
85f32278 | 127 | call->conn->params.local, addr_size, |
7727640c TS |
128 | call->peer_ip.ipv6_addr); |
129 | /* Store the full key in the call */ | |
130 | call->hash_key = key; | |
131 | spin_lock(&rxrpc_call_hash_lock); | |
132 | hash_add_rcu(rxrpc_call_hash, &call->hash_node, key); | |
133 | spin_unlock(&rxrpc_call_hash_lock); | |
134 | _leave(""); | |
135 | } | |
136 | ||
137 | /* | |
138 | * Remove a call from the hashtable | |
139 | */ | |
140 | static void rxrpc_call_hash_del(struct rxrpc_call *call) | |
141 | { | |
142 | _enter(""); | |
143 | spin_lock(&rxrpc_call_hash_lock); | |
144 | hash_del_rcu(&call->hash_node); | |
145 | spin_unlock(&rxrpc_call_hash_lock); | |
146 | _leave(""); | |
147 | } | |
148 | ||
149 | /* | |
150 | * Find a call in the hashtable and return it, or NULL if it | |
151 | * isn't there. | |
152 | */ | |
153 | struct rxrpc_call *rxrpc_find_call_hash( | |
0d12f8a4 | 154 | struct rxrpc_host_header *hdr, |
7727640c | 155 | void *localptr, |
19ffa01c | 156 | sa_family_t family, |
0d12f8a4 | 157 | const void *peer_addr) |
7727640c TS |
158 | { |
159 | unsigned long key; | |
160 | unsigned int addr_size = 0; | |
161 | struct rxrpc_call *call = NULL; | |
162 | struct rxrpc_call *ret = NULL; | |
0d12f8a4 | 163 | u8 in_clientflag = hdr->flags & RXRPC_CLIENT_INITIATED; |
7727640c TS |
164 | |
165 | _enter(""); | |
19ffa01c | 166 | switch (family) { |
7727640c TS |
167 | case AF_INET: |
168 | addr_size = sizeof(call->peer_ip.ipv4_addr); | |
169 | break; | |
170 | case AF_INET6: | |
171 | addr_size = sizeof(call->peer_ip.ipv6_addr); | |
172 | break; | |
173 | default: | |
174 | break; | |
175 | } | |
176 | ||
0d12f8a4 DH |
177 | key = rxrpc_call_hashfunc(in_clientflag, hdr->cid, hdr->callNumber, |
178 | hdr->epoch, hdr->serviceId, | |
19ffa01c | 179 | family, localptr, addr_size, |
7727640c TS |
180 | peer_addr); |
181 | hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) { | |
182 | if (call->hash_key == key && | |
0d12f8a4 DH |
183 | call->call_id == hdr->callNumber && |
184 | call->cid == hdr->cid && | |
185 | call->in_clientflag == in_clientflag && | |
186 | call->service_id == hdr->serviceId && | |
19ffa01c | 187 | call->family == family && |
7727640c TS |
188 | call->local == localptr && |
189 | memcmp(call->peer_ip.ipv6_addr, peer_addr, | |
0d12f8a4 DH |
190 | addr_size) == 0 && |
191 | call->epoch == hdr->epoch) { | |
7727640c TS |
192 | ret = call; |
193 | break; | |
194 | } | |
195 | } | |
196 | _leave(" = %p", ret); | |
197 | return ret; | |
198 | } | |
199 | ||
2341e077 DH |
200 | /* |
201 | * find an extant server call | |
202 | * - called in process context with IRQs enabled | |
203 | */ | |
204 | struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx, | |
205 | unsigned long user_call_ID) | |
206 | { | |
207 | struct rxrpc_call *call; | |
208 | struct rb_node *p; | |
209 | ||
210 | _enter("%p,%lx", rx, user_call_ID); | |
211 | ||
212 | read_lock(&rx->call_lock); | |
213 | ||
214 | p = rx->calls.rb_node; | |
215 | while (p) { | |
216 | call = rb_entry(p, struct rxrpc_call, sock_node); | |
217 | ||
218 | if (user_call_ID < call->user_call_ID) | |
219 | p = p->rb_left; | |
220 | else if (user_call_ID > call->user_call_ID) | |
221 | p = p->rb_right; | |
222 | else | |
223 | goto found_extant_call; | |
224 | } | |
225 | ||
226 | read_unlock(&rx->call_lock); | |
227 | _leave(" = NULL"); | |
228 | return NULL; | |
229 | ||
230 | found_extant_call: | |
231 | rxrpc_get_call(call); | |
232 | read_unlock(&rx->call_lock); | |
233 | _leave(" = %p [%d]", call, atomic_read(&call->usage)); | |
234 | return call; | |
235 | } | |
236 | ||
17926a79 DH |
237 | /* |
238 | * allocate a new call | |
239 | */ | |
240 | static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) | |
241 | { | |
242 | struct rxrpc_call *call; | |
243 | ||
244 | call = kmem_cache_zalloc(rxrpc_call_jar, gfp); | |
245 | if (!call) | |
246 | return NULL; | |
247 | ||
248 | call->acks_winsz = 16; | |
249 | call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long), | |
250 | gfp); | |
251 | if (!call->acks_window) { | |
252 | kmem_cache_free(rxrpc_call_jar, call); | |
253 | return NULL; | |
254 | } | |
255 | ||
256 | setup_timer(&call->lifetimer, &rxrpc_call_life_expired, | |
257 | (unsigned long) call); | |
258 | setup_timer(&call->deadspan, &rxrpc_dead_call_expired, | |
259 | (unsigned long) call); | |
260 | setup_timer(&call->ack_timer, &rxrpc_ack_time_expired, | |
261 | (unsigned long) call); | |
262 | setup_timer(&call->resend_timer, &rxrpc_resend_time_expired, | |
263 | (unsigned long) call); | |
264 | INIT_WORK(&call->destroyer, &rxrpc_destroy_call); | |
265 | INIT_WORK(&call->processor, &rxrpc_process_call); | |
999b69f8 | 266 | INIT_LIST_HEAD(&call->link); |
17926a79 DH |
267 | INIT_LIST_HEAD(&call->accept_link); |
268 | skb_queue_head_init(&call->rx_queue); | |
269 | skb_queue_head_init(&call->rx_oos_queue); | |
270 | init_waitqueue_head(&call->tx_waitq); | |
271 | spin_lock_init(&call->lock); | |
272 | rwlock_init(&call->state_lock); | |
273 | atomic_set(&call->usage, 1); | |
274 | call->debug_id = atomic_inc_return(&rxrpc_debug_id); | |
17926a79 DH |
275 | |
276 | memset(&call->sock_node, 0xed, sizeof(call->sock_node)); | |
277 | ||
278 | call->rx_data_expect = 1; | |
279 | call->rx_data_eaten = 0; | |
280 | call->rx_first_oos = 0; | |
817913d8 | 281 | call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size; |
17926a79 DH |
282 | call->creation_jif = jiffies; |
283 | return call; | |
284 | } | |
285 | ||
286 | /* | |
999b69f8 | 287 | * Allocate a new client call. |
17926a79 | 288 | */ |
aa390bbe DH |
289 | static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx, |
290 | struct sockaddr_rxrpc *srx, | |
291 | gfp_t gfp) | |
17926a79 DH |
292 | { |
293 | struct rxrpc_call *call; | |
17926a79 DH |
294 | |
295 | _enter(""); | |
296 | ||
999b69f8 | 297 | ASSERT(rx->local != NULL); |
17926a79 DH |
298 | |
299 | call = rxrpc_alloc_call(gfp); | |
300 | if (!call) | |
301 | return ERR_PTR(-ENOMEM); | |
999b69f8 | 302 | call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; |
17926a79 DH |
303 | |
304 | sock_hold(&rx->sk); | |
305 | call->socket = rx; | |
306 | call->rx_data_post = 1; | |
307 | ||
7727640c | 308 | /* Record copies of information for hashtable lookup */ |
19ffa01c | 309 | call->family = rx->family; |
999b69f8 | 310 | call->local = rx->local; |
19ffa01c | 311 | switch (call->family) { |
7727640c | 312 | case AF_INET: |
999b69f8 | 313 | call->peer_ip.ipv4_addr = srx->transport.sin.sin_addr.s_addr; |
7727640c TS |
314 | break; |
315 | case AF_INET6: | |
316 | memcpy(call->peer_ip.ipv6_addr, | |
999b69f8 | 317 | srx->transport.sin6.sin6_addr.in6_u.u6_addr8, |
7727640c TS |
318 | sizeof(call->peer_ip.ipv6_addr)); |
319 | break; | |
320 | } | |
999b69f8 DH |
321 | |
322 | call->service_id = srx->srx_service; | |
323 | call->in_clientflag = 0; | |
324 | ||
325 | _leave(" = %p", call); | |
326 | return call; | |
327 | } | |
328 | ||
329 | /* | |
330 | * Begin client call. | |
331 | */ | |
332 | static int rxrpc_begin_client_call(struct rxrpc_call *call, | |
333 | struct rxrpc_conn_parameters *cp, | |
999b69f8 DH |
334 | struct sockaddr_rxrpc *srx, |
335 | gfp_t gfp) | |
336 | { | |
337 | int ret; | |
338 | ||
339 | /* Set up or get a connection record and set the protocol parameters, | |
340 | * including channel number and call ID. | |
341 | */ | |
aa390bbe | 342 | ret = rxrpc_connect_call(call, cp, srx, gfp); |
999b69f8 DH |
343 | if (ret < 0) |
344 | return ret; | |
345 | ||
346 | call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; | |
347 | ||
7727640c TS |
348 | /* Add the new call to the hashtable */ |
349 | rxrpc_call_hash_add(call); | |
350 | ||
85f32278 DH |
351 | spin_lock(&call->conn->params.peer->lock); |
352 | hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets); | |
353 | spin_unlock(&call->conn->params.peer->lock); | |
17926a79 | 354 | |
5873c083 | 355 | call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime; |
17926a79 | 356 | add_timer(&call->lifetimer); |
999b69f8 | 357 | return 0; |
17926a79 DH |
358 | } |
359 | ||
360 | /* | |
361 | * set up a call for the given data | |
362 | * - called in process context with IRQs enabled | |
363 | */ | |
2341e077 | 364 | struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, |
19ffa01c | 365 | struct rxrpc_conn_parameters *cp, |
999b69f8 | 366 | struct sockaddr_rxrpc *srx, |
17926a79 | 367 | unsigned long user_call_ID, |
17926a79 DH |
368 | gfp_t gfp) |
369 | { | |
2341e077 DH |
370 | struct rxrpc_call *call, *xcall; |
371 | struct rb_node *parent, **pp; | |
999b69f8 | 372 | int ret; |
17926a79 | 373 | |
999b69f8 | 374 | _enter("%p,%lx", rx, user_call_ID); |
17926a79 | 375 | |
aa390bbe | 376 | call = rxrpc_alloc_client_call(rx, srx, gfp); |
2341e077 DH |
377 | if (IS_ERR(call)) { |
378 | _leave(" = %ld", PTR_ERR(call)); | |
379 | return call; | |
17926a79 DH |
380 | } |
381 | ||
999b69f8 | 382 | /* Publish the call, even though it is incompletely set up as yet */ |
2341e077 DH |
383 | call->user_call_ID = user_call_ID; |
384 | __set_bit(RXRPC_CALL_HAS_USERID, &call->flags); | |
17926a79 DH |
385 | |
386 | write_lock(&rx->call_lock); | |
387 | ||
388 | pp = &rx->calls.rb_node; | |
389 | parent = NULL; | |
390 | while (*pp) { | |
391 | parent = *pp; | |
2341e077 | 392 | xcall = rb_entry(parent, struct rxrpc_call, sock_node); |
17926a79 | 393 | |
2341e077 | 394 | if (user_call_ID < xcall->user_call_ID) |
17926a79 | 395 | pp = &(*pp)->rb_left; |
2341e077 | 396 | else if (user_call_ID > xcall->user_call_ID) |
17926a79 DH |
397 | pp = &(*pp)->rb_right; |
398 | else | |
2341e077 | 399 | goto found_user_ID_now_present; |
17926a79 DH |
400 | } |
401 | ||
17926a79 DH |
402 | rxrpc_get_call(call); |
403 | ||
404 | rb_link_node(&call->sock_node, parent, pp); | |
405 | rb_insert_color(&call->sock_node, &rx->calls); | |
406 | write_unlock(&rx->call_lock); | |
407 | ||
408 | write_lock_bh(&rxrpc_call_lock); | |
409 | list_add_tail(&call->link, &rxrpc_calls); | |
410 | write_unlock_bh(&rxrpc_call_lock); | |
411 | ||
aa390bbe | 412 | ret = rxrpc_begin_client_call(call, cp, srx, gfp); |
999b69f8 DH |
413 | if (ret < 0) |
414 | goto error; | |
415 | ||
17926a79 DH |
416 | _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); |
417 | ||
418 | _leave(" = %p [new]", call); | |
419 | return call; | |
420 | ||
999b69f8 DH |
421 | error: |
422 | write_lock(&rx->call_lock); | |
423 | rb_erase(&call->sock_node, &rx->calls); | |
424 | write_unlock(&rx->call_lock); | |
425 | rxrpc_put_call(call); | |
426 | ||
427 | write_lock_bh(&rxrpc_call_lock); | |
d1e858c5 | 428 | list_del_init(&call->link); |
999b69f8 DH |
429 | write_unlock_bh(&rxrpc_call_lock); |
430 | ||
d1e858c5 | 431 | call->state = RXRPC_CALL_DEAD; |
999b69f8 DH |
432 | rxrpc_put_call(call); |
433 | _leave(" = %d", ret); | |
434 | return ERR_PTR(ret); | |
435 | ||
2341e077 DH |
436 | /* We unexpectedly found the user ID in the list after taking |
437 | * the call_lock. This shouldn't happen unless the user races | |
438 | * with itself and tries to add the same user ID twice at the | |
439 | * same time in different threads. | |
440 | */ | |
441 | found_user_ID_now_present: | |
17926a79 | 442 | write_unlock(&rx->call_lock); |
d1e858c5 | 443 | call->state = RXRPC_CALL_DEAD; |
2341e077 DH |
444 | rxrpc_put_call(call); |
445 | _leave(" = -EEXIST [%p]", call); | |
446 | return ERR_PTR(-EEXIST); | |
17926a79 DH |
447 | } |
448 | ||
449 | /* | |
450 | * set up an incoming call | |
451 | * - called in process context with IRQs enabled | |
452 | */ | |
453 | struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx, | |
454 | struct rxrpc_connection *conn, | |
42886ffe | 455 | struct sk_buff *skb) |
17926a79 | 456 | { |
42886ffe | 457 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
17926a79 DH |
458 | struct rxrpc_call *call, *candidate; |
459 | struct rb_node **p, *parent; | |
0d12f8a4 | 460 | u32 call_id; |
17926a79 | 461 | |
843099ca | 462 | _enter(",%d", conn->debug_id); |
17926a79 DH |
463 | |
464 | ASSERT(rx != NULL); | |
465 | ||
843099ca | 466 | candidate = rxrpc_alloc_call(GFP_NOIO); |
17926a79 DH |
467 | if (!candidate) |
468 | return ERR_PTR(-EBUSY); | |
469 | ||
42886ffe DH |
470 | candidate->socket = rx; |
471 | candidate->conn = conn; | |
472 | candidate->cid = sp->hdr.cid; | |
473 | candidate->call_id = sp->hdr.callNumber; | |
474 | candidate->channel = sp->hdr.cid & RXRPC_CHANNELMASK; | |
475 | candidate->rx_data_post = 0; | |
476 | candidate->state = RXRPC_CALL_SERVER_ACCEPTING; | |
17926a79 DH |
477 | if (conn->security_ix > 0) |
478 | candidate->state = RXRPC_CALL_SERVER_SECURING; | |
479 | ||
480 | write_lock_bh(&conn->lock); | |
481 | ||
482 | /* set the channel for this call */ | |
483 | call = conn->channels[candidate->channel]; | |
484 | _debug("channel[%u] is %p", candidate->channel, call); | |
42886ffe | 485 | if (call && call->call_id == sp->hdr.callNumber) { |
17926a79 DH |
486 | /* already set; must've been a duplicate packet */ |
487 | _debug("extant call [%d]", call->state); | |
488 | ASSERTCMP(call->conn, ==, conn); | |
489 | ||
490 | read_lock(&call->state_lock); | |
491 | switch (call->state) { | |
492 | case RXRPC_CALL_LOCALLY_ABORTED: | |
4c198ad1 | 493 | if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events)) |
651350d1 | 494 | rxrpc_queue_call(call); |
17926a79 DH |
495 | case RXRPC_CALL_REMOTELY_ABORTED: |
496 | read_unlock(&call->state_lock); | |
497 | goto aborted_call; | |
498 | default: | |
499 | rxrpc_get_call(call); | |
500 | read_unlock(&call->state_lock); | |
501 | goto extant_call; | |
502 | } | |
503 | } | |
504 | ||
505 | if (call) { | |
506 | /* it seems the channel is still in use from the previous call | |
507 | * - ditch the old binding if its call is now complete */ | |
508 | _debug("CALL: %u { %s }", | |
509 | call->debug_id, rxrpc_call_states[call->state]); | |
510 | ||
511 | if (call->state >= RXRPC_CALL_COMPLETE) { | |
512 | conn->channels[call->channel] = NULL; | |
513 | } else { | |
514 | write_unlock_bh(&conn->lock); | |
515 | kmem_cache_free(rxrpc_call_jar, candidate); | |
516 | _leave(" = -EBUSY"); | |
517 | return ERR_PTR(-EBUSY); | |
518 | } | |
519 | } | |
520 | ||
521 | /* check the call number isn't duplicate */ | |
522 | _debug("check dup"); | |
42886ffe | 523 | call_id = sp->hdr.callNumber; |
17926a79 DH |
524 | p = &conn->calls.rb_node; |
525 | parent = NULL; | |
526 | while (*p) { | |
527 | parent = *p; | |
528 | call = rb_entry(parent, struct rxrpc_call, conn_node); | |
529 | ||
7727640c TS |
530 | /* The tree is sorted in order of the __be32 value without |
531 | * turning it into host order. | |
532 | */ | |
0d12f8a4 | 533 | if (call_id < call->call_id) |
17926a79 | 534 | p = &(*p)->rb_left; |
0d12f8a4 | 535 | else if (call_id > call->call_id) |
17926a79 DH |
536 | p = &(*p)->rb_right; |
537 | else | |
538 | goto old_call; | |
539 | } | |
540 | ||
541 | /* make the call available */ | |
542 | _debug("new call"); | |
543 | call = candidate; | |
544 | candidate = NULL; | |
545 | rb_link_node(&call->conn_node, parent, p); | |
546 | rb_insert_color(&call->conn_node, &conn->calls); | |
547 | conn->channels[call->channel] = call; | |
548 | sock_hold(&rx->sk); | |
5627cc8b | 549 | rxrpc_get_connection(conn); |
17926a79 DH |
550 | write_unlock_bh(&conn->lock); |
551 | ||
85f32278 DH |
552 | spin_lock(&conn->params.peer->lock); |
553 | hlist_add_head(&call->error_link, &conn->params.peer->error_targets); | |
554 | spin_unlock(&conn->params.peer->lock); | |
17926a79 DH |
555 | |
556 | write_lock_bh(&rxrpc_call_lock); | |
557 | list_add_tail(&call->link, &rxrpc_calls); | |
558 | write_unlock_bh(&rxrpc_call_lock); | |
559 | ||
7727640c | 560 | /* Record copies of information for hashtable lookup */ |
19ffa01c | 561 | call->family = rx->family; |
85f32278 | 562 | call->local = conn->params.local; |
19ffa01c | 563 | switch (call->family) { |
7727640c TS |
564 | case AF_INET: |
565 | call->peer_ip.ipv4_addr = | |
85f32278 | 566 | conn->params.peer->srx.transport.sin.sin_addr.s_addr; |
7727640c TS |
567 | break; |
568 | case AF_INET6: | |
569 | memcpy(call->peer_ip.ipv6_addr, | |
85f32278 | 570 | conn->params.peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8, |
7727640c TS |
571 | sizeof(call->peer_ip.ipv6_addr)); |
572 | break; | |
573 | default: | |
574 | break; | |
575 | } | |
19ffa01c DH |
576 | call->epoch = conn->proto.epoch; |
577 | call->service_id = conn->params.service_id; | |
578 | call->in_clientflag = conn->proto.in_clientflag; | |
7727640c TS |
579 | /* Add the new call to the hashtable */ |
580 | rxrpc_call_hash_add(call); | |
581 | ||
17926a79 DH |
582 | _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id); |
583 | ||
5873c083 | 584 | call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime; |
17926a79 DH |
585 | add_timer(&call->lifetimer); |
586 | _leave(" = %p {%d} [new]", call, call->debug_id); | |
587 | return call; | |
588 | ||
589 | extant_call: | |
590 | write_unlock_bh(&conn->lock); | |
591 | kmem_cache_free(rxrpc_call_jar, candidate); | |
592 | _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1); | |
593 | return call; | |
594 | ||
595 | aborted_call: | |
596 | write_unlock_bh(&conn->lock); | |
597 | kmem_cache_free(rxrpc_call_jar, candidate); | |
598 | _leave(" = -ECONNABORTED"); | |
599 | return ERR_PTR(-ECONNABORTED); | |
600 | ||
601 | old_call: | |
602 | write_unlock_bh(&conn->lock); | |
603 | kmem_cache_free(rxrpc_call_jar, candidate); | |
604 | _leave(" = -ECONNRESET [old]"); | |
605 | return ERR_PTR(-ECONNRESET); | |
606 | } | |
607 | ||
17926a79 DH |
608 | /* |
609 | * detach a call from a socket and set up for release | |
610 | */ | |
611 | void rxrpc_release_call(struct rxrpc_call *call) | |
612 | { | |
651350d1 | 613 | struct rxrpc_connection *conn = call->conn; |
17926a79 DH |
614 | struct rxrpc_sock *rx = call->socket; |
615 | ||
616 | _enter("{%d,%d,%d,%d}", | |
617 | call->debug_id, atomic_read(&call->usage), | |
618 | atomic_read(&call->ackr_not_idle), | |
619 | call->rx_first_oos); | |
620 | ||
621 | spin_lock_bh(&call->lock); | |
622 | if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags)) | |
623 | BUG(); | |
624 | spin_unlock_bh(&call->lock); | |
625 | ||
626 | /* dissociate from the socket | |
627 | * - the socket's ref on the call is passed to the death timer | |
628 | */ | |
651350d1 | 629 | _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn); |
17926a79 | 630 | |
e653cfe4 DH |
631 | spin_lock(&conn->params.peer->lock); |
632 | hlist_del_init(&call->error_link); | |
633 | spin_unlock(&conn->params.peer->lock); | |
634 | ||
17926a79 DH |
635 | write_lock_bh(&rx->call_lock); |
636 | if (!list_empty(&call->accept_link)) { | |
637 | _debug("unlinking once-pending call %p { e=%lx f=%lx }", | |
638 | call, call->events, call->flags); | |
639 | ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); | |
640 | list_del_init(&call->accept_link); | |
641 | sk_acceptq_removed(&rx->sk); | |
642 | } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { | |
643 | rb_erase(&call->sock_node, &rx->calls); | |
644 | memset(&call->sock_node, 0xdd, sizeof(call->sock_node)); | |
645 | clear_bit(RXRPC_CALL_HAS_USERID, &call->flags); | |
646 | } | |
647 | write_unlock_bh(&rx->call_lock); | |
648 | ||
17926a79 | 649 | /* free up the channel for reuse */ |
651350d1 DH |
650 | write_lock_bh(&conn->lock); |
651 | write_lock(&call->state_lock); | |
652 | ||
17926a79 DH |
653 | if (call->state < RXRPC_CALL_COMPLETE && |
654 | call->state != RXRPC_CALL_CLIENT_FINAL_ACK) { | |
655 | _debug("+++ ABORTING STATE %d +++\n", call->state); | |
656 | call->state = RXRPC_CALL_LOCALLY_ABORTED; | |
dc44b3a0 | 657 | call->local_abort = RX_CALL_DEAD; |
17926a79 DH |
658 | } |
659 | write_unlock(&call->state_lock); | |
e653cfe4 DH |
660 | |
661 | rb_erase(&call->conn_node, &conn->calls); | |
651350d1 | 662 | write_unlock_bh(&conn->lock); |
17926a79 | 663 | |
e653cfe4 DH |
664 | rxrpc_disconnect_call(call); |
665 | ||
651350d1 | 666 | /* clean up the Rx queue */ |
17926a79 DH |
667 | if (!skb_queue_empty(&call->rx_queue) || |
668 | !skb_queue_empty(&call->rx_oos_queue)) { | |
669 | struct rxrpc_skb_priv *sp; | |
670 | struct sk_buff *skb; | |
671 | ||
672 | _debug("purge Rx queues"); | |
673 | ||
674 | spin_lock_bh(&call->lock); | |
675 | while ((skb = skb_dequeue(&call->rx_queue)) || | |
676 | (skb = skb_dequeue(&call->rx_oos_queue))) { | |
677 | sp = rxrpc_skb(skb); | |
678 | if (sp->call) { | |
679 | ASSERTCMP(sp->call, ==, call); | |
680 | rxrpc_put_call(call); | |
681 | sp->call = NULL; | |
682 | } | |
683 | skb->destructor = NULL; | |
684 | spin_unlock_bh(&call->lock); | |
685 | ||
686 | _debug("- zap %s %%%u #%u", | |
687 | rxrpc_pkts[sp->hdr.type], | |
0d12f8a4 | 688 | sp->hdr.serial, sp->hdr.seq); |
17926a79 DH |
689 | rxrpc_free_skb(skb); |
690 | spin_lock_bh(&call->lock); | |
691 | } | |
692 | spin_unlock_bh(&call->lock); | |
693 | ||
694 | ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE); | |
695 | } | |
696 | ||
697 | del_timer_sync(&call->resend_timer); | |
698 | del_timer_sync(&call->ack_timer); | |
699 | del_timer_sync(&call->lifetimer); | |
5873c083 | 700 | call->deadspan.expires = jiffies + rxrpc_dead_call_expiry; |
17926a79 DH |
701 | add_timer(&call->deadspan); |
702 | ||
703 | _leave(""); | |
704 | } | |
705 | ||
706 | /* | |
707 | * handle a dead call being ready for reaping | |
708 | */ | |
709 | static void rxrpc_dead_call_expired(unsigned long _call) | |
710 | { | |
711 | struct rxrpc_call *call = (struct rxrpc_call *) _call; | |
712 | ||
713 | _enter("{%d}", call->debug_id); | |
714 | ||
715 | write_lock_bh(&call->state_lock); | |
716 | call->state = RXRPC_CALL_DEAD; | |
717 | write_unlock_bh(&call->state_lock); | |
718 | rxrpc_put_call(call); | |
719 | } | |
720 | ||
721 | /* | |
722 | * mark a call as to be released, aborting it if it's still in progress | |
723 | * - called with softirqs disabled | |
724 | */ | |
725 | static void rxrpc_mark_call_released(struct rxrpc_call *call) | |
726 | { | |
727 | bool sched; | |
728 | ||
729 | write_lock(&call->state_lock); | |
730 | if (call->state < RXRPC_CALL_DEAD) { | |
731 | sched = false; | |
732 | if (call->state < RXRPC_CALL_COMPLETE) { | |
733 | _debug("abort call %p", call); | |
734 | call->state = RXRPC_CALL_LOCALLY_ABORTED; | |
dc44b3a0 | 735 | call->local_abort = RX_CALL_DEAD; |
4c198ad1 | 736 | if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events)) |
17926a79 DH |
737 | sched = true; |
738 | } | |
4c198ad1 | 739 | if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events)) |
17926a79 DH |
740 | sched = true; |
741 | if (sched) | |
651350d1 | 742 | rxrpc_queue_call(call); |
17926a79 DH |
743 | } |
744 | write_unlock(&call->state_lock); | |
745 | } | |
746 | ||
747 | /* | |
748 | * release all the calls associated with a socket | |
749 | */ | |
750 | void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) | |
751 | { | |
752 | struct rxrpc_call *call; | |
753 | struct rb_node *p; | |
754 | ||
755 | _enter("%p", rx); | |
756 | ||
757 | read_lock_bh(&rx->call_lock); | |
758 | ||
759 | /* mark all the calls as no longer wanting incoming packets */ | |
760 | for (p = rb_first(&rx->calls); p; p = rb_next(p)) { | |
761 | call = rb_entry(p, struct rxrpc_call, sock_node); | |
762 | rxrpc_mark_call_released(call); | |
763 | } | |
764 | ||
765 | /* kill the not-yet-accepted incoming calls */ | |
766 | list_for_each_entry(call, &rx->secureq, accept_link) { | |
767 | rxrpc_mark_call_released(call); | |
768 | } | |
769 | ||
770 | list_for_each_entry(call, &rx->acceptq, accept_link) { | |
771 | rxrpc_mark_call_released(call); | |
772 | } | |
773 | ||
774 | read_unlock_bh(&rx->call_lock); | |
775 | _leave(""); | |
776 | } | |
777 | ||
778 | /* | |
779 | * release a call | |
780 | */ | |
781 | void __rxrpc_put_call(struct rxrpc_call *call) | |
782 | { | |
783 | ASSERT(call != NULL); | |
784 | ||
785 | _enter("%p{u=%d}", call, atomic_read(&call->usage)); | |
786 | ||
787 | ASSERTCMP(atomic_read(&call->usage), >, 0); | |
788 | ||
789 | if (atomic_dec_and_test(&call->usage)) { | |
790 | _debug("call %d dead", call->debug_id); | |
791 | ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); | |
651350d1 | 792 | rxrpc_queue_work(&call->destroyer); |
17926a79 DH |
793 | } |
794 | _leave(""); | |
795 | } | |
796 | ||
797 | /* | |
798 | * clean up a call | |
799 | */ | |
800 | static void rxrpc_cleanup_call(struct rxrpc_call *call) | |
801 | { | |
802 | _net("DESTROY CALL %d", call->debug_id); | |
803 | ||
804 | ASSERT(call->socket); | |
805 | ||
806 | memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); | |
807 | ||
808 | del_timer_sync(&call->lifetimer); | |
809 | del_timer_sync(&call->deadspan); | |
810 | del_timer_sync(&call->ack_timer); | |
811 | del_timer_sync(&call->resend_timer); | |
812 | ||
813 | ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); | |
814 | ASSERTCMP(call->events, ==, 0); | |
815 | if (work_pending(&call->processor)) { | |
816 | _debug("defer destroy"); | |
651350d1 | 817 | rxrpc_queue_work(&call->destroyer); |
17926a79 DH |
818 | return; |
819 | } | |
820 | ||
e653cfe4 | 821 | ASSERTCMP(call->conn, ==, NULL); |
17926a79 | 822 | |
7727640c TS |
823 | /* Remove the call from the hash */ |
824 | rxrpc_call_hash_del(call); | |
825 | ||
17926a79 DH |
826 | if (call->acks_window) { |
827 | _debug("kill Tx window %d", | |
828 | CIRC_CNT(call->acks_head, call->acks_tail, | |
829 | call->acks_winsz)); | |
830 | smp_mb(); | |
831 | while (CIRC_CNT(call->acks_head, call->acks_tail, | |
832 | call->acks_winsz) > 0) { | |
833 | struct rxrpc_skb_priv *sp; | |
834 | unsigned long _skb; | |
835 | ||
836 | _skb = call->acks_window[call->acks_tail] & ~1; | |
0d12f8a4 DH |
837 | sp = rxrpc_skb((struct sk_buff *)_skb); |
838 | _debug("+++ clear Tx %u", sp->hdr.seq); | |
839 | rxrpc_free_skb((struct sk_buff *)_skb); | |
17926a79 DH |
840 | call->acks_tail = |
841 | (call->acks_tail + 1) & (call->acks_winsz - 1); | |
842 | } | |
843 | ||
844 | kfree(call->acks_window); | |
845 | } | |
846 | ||
847 | rxrpc_free_skb(call->tx_pending); | |
848 | ||
849 | rxrpc_purge_queue(&call->rx_queue); | |
850 | ASSERT(skb_queue_empty(&call->rx_oos_queue)); | |
851 | sock_put(&call->socket->sk); | |
852 | kmem_cache_free(rxrpc_call_jar, call); | |
853 | } | |
854 | ||
855 | /* | |
856 | * destroy a call | |
857 | */ | |
858 | static void rxrpc_destroy_call(struct work_struct *work) | |
859 | { | |
860 | struct rxrpc_call *call = | |
861 | container_of(work, struct rxrpc_call, destroyer); | |
862 | ||
863 | _enter("%p{%d,%d,%p}", | |
864 | call, atomic_read(&call->usage), call->channel, call->conn); | |
865 | ||
866 | ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); | |
867 | ||
868 | write_lock_bh(&rxrpc_call_lock); | |
869 | list_del_init(&call->link); | |
870 | write_unlock_bh(&rxrpc_call_lock); | |
871 | ||
872 | rxrpc_cleanup_call(call); | |
873 | _leave(""); | |
874 | } | |
875 | ||
876 | /* | |
877 | * preemptively destroy all the call records from a transport endpoint rather | |
878 | * than waiting for them to time out | |
879 | */ | |
880 | void __exit rxrpc_destroy_all_calls(void) | |
881 | { | |
882 | struct rxrpc_call *call; | |
883 | ||
884 | _enter(""); | |
885 | write_lock_bh(&rxrpc_call_lock); | |
886 | ||
887 | while (!list_empty(&rxrpc_calls)) { | |
888 | call = list_entry(rxrpc_calls.next, struct rxrpc_call, link); | |
889 | _debug("Zapping call %p", call); | |
890 | ||
891 | list_del_init(&call->link); | |
892 | ||
893 | switch (atomic_read(&call->usage)) { | |
894 | case 0: | |
895 | ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); | |
896 | break; | |
897 | case 1: | |
898 | if (del_timer_sync(&call->deadspan) != 0 && | |
899 | call->state != RXRPC_CALL_DEAD) | |
900 | rxrpc_dead_call_expired((unsigned long) call); | |
901 | if (call->state != RXRPC_CALL_DEAD) | |
902 | break; | |
903 | default: | |
9b6d5398 | 904 | pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n", |
17926a79 DH |
905 | call, atomic_read(&call->usage), |
906 | atomic_read(&call->ackr_not_idle), | |
907 | rxrpc_call_states[call->state], | |
908 | call->flags, call->events); | |
909 | if (!skb_queue_empty(&call->rx_queue)) | |
9b6d5398 | 910 | pr_err("Rx queue occupied\n"); |
17926a79 | 911 | if (!skb_queue_empty(&call->rx_oos_queue)) |
9b6d5398 | 912 | pr_err("OOS queue occupied\n"); |
17926a79 DH |
913 | break; |
914 | } | |
915 | ||
916 | write_unlock_bh(&rxrpc_call_lock); | |
917 | cond_resched(); | |
918 | write_lock_bh(&rxrpc_call_lock); | |
919 | } | |
920 | ||
921 | write_unlock_bh(&rxrpc_call_lock); | |
922 | _leave(""); | |
923 | } | |
924 | ||
925 | /* | |
926 | * handle call lifetime being exceeded | |
927 | */ | |
928 | static void rxrpc_call_life_expired(unsigned long _call) | |
929 | { | |
930 | struct rxrpc_call *call = (struct rxrpc_call *) _call; | |
931 | ||
932 | if (call->state >= RXRPC_CALL_COMPLETE) | |
933 | return; | |
934 | ||
935 | _enter("{%d}", call->debug_id); | |
936 | read_lock_bh(&call->state_lock); | |
937 | if (call->state < RXRPC_CALL_COMPLETE) { | |
4c198ad1 | 938 | set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events); |
651350d1 | 939 | rxrpc_queue_call(call); |
17926a79 DH |
940 | } |
941 | read_unlock_bh(&call->state_lock); | |
942 | } | |
943 | ||
944 | /* | |
945 | * handle resend timer expiry | |
3b5bac2b | 946 | * - may not take call->state_lock as this can deadlock against del_timer_sync() |
17926a79 DH |
947 | */ |
948 | static void rxrpc_resend_time_expired(unsigned long _call) | |
949 | { | |
950 | struct rxrpc_call *call = (struct rxrpc_call *) _call; | |
951 | ||
952 | _enter("{%d}", call->debug_id); | |
953 | ||
954 | if (call->state >= RXRPC_CALL_COMPLETE) | |
955 | return; | |
956 | ||
17926a79 | 957 | clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); |
4c198ad1 | 958 | if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events)) |
651350d1 | 959 | rxrpc_queue_call(call); |
17926a79 DH |
960 | } |
961 | ||
962 | /* | |
963 | * handle ACK timer expiry | |
964 | */ | |
965 | static void rxrpc_ack_time_expired(unsigned long _call) | |
966 | { | |
967 | struct rxrpc_call *call = (struct rxrpc_call *) _call; | |
968 | ||
969 | _enter("{%d}", call->debug_id); | |
970 | ||
971 | if (call->state >= RXRPC_CALL_COMPLETE) | |
972 | return; | |
973 | ||
974 | read_lock_bh(&call->state_lock); | |
975 | if (call->state < RXRPC_CALL_COMPLETE && | |
4c198ad1 | 976 | !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events)) |
651350d1 | 977 | rxrpc_queue_call(call); |
17926a79 DH |
978 | read_unlock_bh(&call->state_lock); |
979 | } |