1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* AFS server record management
4 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/sched.h>
9 #include <linux/slab.h>
12 #include "protocol_yfs.h"
14 static unsigned afs_server_gc_delay
= 10; /* Server record timeout in seconds */
15 static unsigned afs_server_update_delay
= 30; /* Time till VLDB recheck in secs */
16 static atomic_t afs_server_debug_id
;
18 static void afs_inc_servers_outstanding(struct afs_net
*net
)
20 atomic_inc(&net
->servers_outstanding
);
23 static void afs_dec_servers_outstanding(struct afs_net
*net
)
25 if (atomic_dec_and_test(&net
->servers_outstanding
))
26 wake_up_var(&net
->servers_outstanding
);
30 * Find a server by one of its addresses.
32 struct afs_server
*afs_find_server(struct afs_net
*net
,
33 const struct sockaddr_rxrpc
*srx
)
35 const struct afs_addr_list
*alist
;
36 struct afs_server
*server
= NULL
;
44 afs_put_server(net
, server
, afs_server_trace_put_find_rsq
);
46 read_seqbegin_or_lock(&net
->fs_addr_lock
, &seq
);
48 if (srx
->transport
.family
== AF_INET6
) {
49 const struct sockaddr_in6
*a
= &srx
->transport
.sin6
, *b
;
50 hlist_for_each_entry_rcu(server
, &net
->fs_addresses6
, addr6_link
) {
51 alist
= rcu_dereference(server
->addresses
);
52 for (i
= alist
->nr_ipv4
; i
< alist
->nr_addrs
; i
++) {
53 b
= &alist
->addrs
[i
].transport
.sin6
;
54 diff
= ((u16 __force
)a
->sin6_port
-
55 (u16 __force
)b
->sin6_port
);
57 diff
= memcmp(&a
->sin6_addr
,
59 sizeof(struct in6_addr
));
65 const struct sockaddr_in
*a
= &srx
->transport
.sin
, *b
;
66 hlist_for_each_entry_rcu(server
, &net
->fs_addresses4
, addr4_link
) {
67 alist
= rcu_dereference(server
->addresses
);
68 for (i
= 0; i
< alist
->nr_ipv4
; i
++) {
69 b
= &alist
->addrs
[i
].transport
.sin
;
70 diff
= ((u16 __force
)a
->sin_port
-
71 (u16 __force
)b
->sin_port
);
73 diff
= ((u32 __force
)a
->sin_addr
.s_addr
-
74 (u32 __force
)b
->sin_addr
.s_addr
);
83 if (server
&& !atomic_inc_not_zero(&server
->usage
))
86 } while (need_seqretry(&net
->fs_addr_lock
, seq
));
88 done_seqretry(&net
->fs_addr_lock
, seq
);
95 * Look up a server by its UUID
97 struct afs_server
*afs_find_server_by_uuid(struct afs_net
*net
, const uuid_t
*uuid
)
99 struct afs_server
*server
= NULL
;
106 /* Unfortunately, rbtree walking doesn't give reliable results
107 * under just the RCU read lock, so we have to check for
111 afs_put_server(net
, server
, afs_server_trace_put_uuid_rsq
);
114 read_seqbegin_or_lock(&net
->fs_lock
, &seq
);
116 p
= net
->fs_servers
.rb_node
;
118 server
= rb_entry(p
, struct afs_server
, uuid_rb
);
120 diff
= memcmp(uuid
, &server
->uuid
, sizeof(*uuid
));
123 } else if (diff
> 0) {
126 afs_get_server(server
, afs_server_trace_get_by_uuid
);
132 } while (need_seqretry(&net
->fs_lock
, seq
));
134 done_seqretry(&net
->fs_lock
, seq
);
136 _leave(" = %p", server
);
141 * Install a server record in the namespace tree
143 static struct afs_server
*afs_install_server(struct afs_net
*net
,
144 struct afs_server
*candidate
)
146 const struct afs_addr_list
*alist
;
147 struct afs_server
*server
;
148 struct rb_node
**pp
, *p
;
151 _enter("%p", candidate
);
153 write_seqlock(&net
->fs_lock
);
155 /* Firstly install the server in the UUID lookup tree */
156 pp
= &net
->fs_servers
.rb_node
;
160 _debug("- consider %p", p
);
161 server
= rb_entry(p
, struct afs_server
, uuid_rb
);
162 diff
= memcmp(&candidate
->uuid
, &server
->uuid
, sizeof(uuid_t
));
164 pp
= &(*pp
)->rb_left
;
166 pp
= &(*pp
)->rb_right
;
172 rb_link_node(&server
->uuid_rb
, p
, pp
);
173 rb_insert_color(&server
->uuid_rb
, &net
->fs_servers
);
174 hlist_add_head_rcu(&server
->proc_link
, &net
->fs_proc
);
176 write_seqlock(&net
->fs_addr_lock
);
177 alist
= rcu_dereference_protected(server
->addresses
,
178 lockdep_is_held(&net
->fs_addr_lock
.lock
));
180 /* Secondly, if the server has any IPv4 and/or IPv6 addresses, install
181 * it in the IPv4 and/or IPv6 reverse-map lists.
183 * TODO: For speed we want to use something other than a flat list
184 * here; even sorting the list in terms of lowest address would help a
185 * bit, but anything we might want to do gets messy and memory
188 if (alist
->nr_ipv4
> 0)
189 hlist_add_head_rcu(&server
->addr4_link
, &net
->fs_addresses4
);
190 if (alist
->nr_addrs
> alist
->nr_ipv4
)
191 hlist_add_head_rcu(&server
->addr6_link
, &net
->fs_addresses6
);
193 write_sequnlock(&net
->fs_addr_lock
);
196 afs_get_server(server
, afs_server_trace_get_install
);
197 write_sequnlock(&net
->fs_lock
);
202 * allocate a new server record
204 static struct afs_server
*afs_alloc_server(struct afs_net
*net
,
206 struct afs_addr_list
*alist
)
208 struct afs_server
*server
;
212 server
= kzalloc(sizeof(struct afs_server
), GFP_KERNEL
);
216 atomic_set(&server
->usage
, 1);
217 server
->debug_id
= atomic_inc_return(&afs_server_debug_id
);
218 RCU_INIT_POINTER(server
->addresses
, alist
);
219 server
->addr_version
= alist
->version
;
220 server
->uuid
= *uuid
;
221 server
->update_at
= ktime_get_real_seconds() + afs_server_update_delay
;
222 rwlock_init(&server
->fs_lock
);
223 INIT_HLIST_HEAD(&server
->cb_volumes
);
224 rwlock_init(&server
->cb_break_lock
);
225 init_waitqueue_head(&server
->probe_wq
);
226 spin_lock_init(&server
->probe_lock
);
228 afs_inc_servers_outstanding(net
);
229 trace_afs_server(server
, 1, afs_server_trace_alloc
);
230 _leave(" = %p", server
);
234 _leave(" = NULL [nomem]");
239 * Look up an address record for a server
241 static struct afs_addr_list
*afs_vl_lookup_addrs(struct afs_cell
*cell
,
242 struct key
*key
, const uuid_t
*uuid
)
244 struct afs_vl_cursor vc
;
245 struct afs_addr_list
*alist
= NULL
;
249 if (afs_begin_vlserver_operation(&vc
, cell
, key
)) {
250 while (afs_select_vlserver(&vc
)) {
251 if (test_bit(AFS_VLSERVER_FL_IS_YFS
, &vc
.server
->flags
))
252 alist
= afs_yfsvl_get_endpoints(&vc
, uuid
);
254 alist
= afs_vl_get_addrs_u(&vc
, uuid
);
257 ret
= afs_end_vlserver_operation(&vc
);
260 return ret
< 0 ? ERR_PTR(ret
) : alist
;
264 * Get or create a fileserver record.
266 struct afs_server
*afs_lookup_server(struct afs_cell
*cell
, struct key
*key
,
269 struct afs_addr_list
*alist
;
270 struct afs_server
*server
, *candidate
;
272 _enter("%p,%pU", cell
->net
, uuid
);
274 server
= afs_find_server_by_uuid(cell
->net
, uuid
);
278 alist
= afs_vl_lookup_addrs(cell
, key
, uuid
);
280 return ERR_CAST(alist
);
282 candidate
= afs_alloc_server(cell
->net
, uuid
, alist
);
284 afs_put_addrlist(alist
);
285 return ERR_PTR(-ENOMEM
);
288 server
= afs_install_server(cell
->net
, candidate
);
289 if (server
!= candidate
) {
290 afs_put_addrlist(alist
);
294 _leave(" = %p{%d}", server
, atomic_read(&server
->usage
));
299 * Set the server timer to fire after a given delay, assuming it's not already
300 * set for an earlier time.
302 static void afs_set_server_timer(struct afs_net
*net
, time64_t delay
)
305 afs_inc_servers_outstanding(net
);
306 if (timer_reduce(&net
->fs_timer
, jiffies
+ delay
* HZ
))
307 afs_dec_servers_outstanding(net
);
312 * Server management timer. We have an increment on fs_outstanding that we
313 * need to pass along to the work item.
315 void afs_servers_timer(struct timer_list
*timer
)
317 struct afs_net
*net
= container_of(timer
, struct afs_net
, fs_timer
);
320 if (!queue_work(afs_wq
, &net
->fs_manager
))
321 afs_dec_servers_outstanding(net
);
325 * Get a reference on a server object.
327 struct afs_server
*afs_get_server(struct afs_server
*server
,
328 enum afs_server_trace reason
)
330 unsigned int u
= atomic_inc_return(&server
->usage
);
332 trace_afs_server(server
, u
, reason
);
337 * Release a reference on a server record.
339 void afs_put_server(struct afs_net
*net
, struct afs_server
*server
,
340 enum afs_server_trace reason
)
347 server
->put_time
= ktime_get_real_seconds();
349 usage
= atomic_dec_return(&server
->usage
);
351 trace_afs_server(server
, usage
, reason
);
353 if (likely(usage
> 0))
356 afs_set_server_timer(net
, afs_server_gc_delay
);
359 static void afs_server_rcu(struct rcu_head
*rcu
)
361 struct afs_server
*server
= container_of(rcu
, struct afs_server
, rcu
);
363 trace_afs_server(server
, atomic_read(&server
->usage
),
364 afs_server_trace_free
);
365 afs_put_addrlist(rcu_access_pointer(server
->addresses
));
370 * destroy a dead server
372 static void afs_destroy_server(struct afs_net
*net
, struct afs_server
*server
)
374 struct afs_addr_list
*alist
= rcu_access_pointer(server
->addresses
);
375 struct afs_addr_cursor ac
= {
377 .index
= alist
->preferred
,
381 trace_afs_server(server
, atomic_read(&server
->usage
),
382 afs_server_trace_give_up_cb
);
384 if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB
, &server
->flags
))
385 afs_fs_give_up_all_callbacks(net
, server
, &ac
, NULL
);
387 wait_var_event(&server
->probe_outstanding
,
388 atomic_read(&server
->probe_outstanding
) == 0);
390 trace_afs_server(server
, atomic_read(&server
->usage
),
391 afs_server_trace_destroy
);
392 call_rcu(&server
->rcu
, afs_server_rcu
);
393 afs_dec_servers_outstanding(net
);
397 * Garbage collect any expired servers.
399 static void afs_gc_servers(struct afs_net
*net
, struct afs_server
*gc_list
)
401 struct afs_server
*server
;
405 while ((server
= gc_list
)) {
406 gc_list
= server
->gc_next
;
408 write_seqlock(&net
->fs_lock
);
410 deleted
= atomic_try_cmpxchg(&server
->usage
, &usage
, 0);
411 trace_afs_server(server
, usage
, afs_server_trace_gc
);
413 rb_erase(&server
->uuid_rb
, &net
->fs_servers
);
414 hlist_del_rcu(&server
->proc_link
);
416 write_sequnlock(&net
->fs_lock
);
419 write_seqlock(&net
->fs_addr_lock
);
420 if (!hlist_unhashed(&server
->addr4_link
))
421 hlist_del_rcu(&server
->addr4_link
);
422 if (!hlist_unhashed(&server
->addr6_link
))
423 hlist_del_rcu(&server
->addr6_link
);
424 write_sequnlock(&net
->fs_addr_lock
);
425 afs_destroy_server(net
, server
);
431 * Manage the records of servers known to be within a network namespace. This
432 * includes garbage collecting unused servers.
434 * Note also that we were given an increment on net->servers_outstanding by
435 * whoever queued us that we need to deal with before returning.
437 void afs_manage_servers(struct work_struct
*work
)
439 struct afs_net
*net
= container_of(work
, struct afs_net
, fs_manager
);
440 struct afs_server
*gc_list
= NULL
;
441 struct rb_node
*cursor
;
442 time64_t now
= ktime_get_real_seconds(), next_manage
= TIME64_MAX
;
443 bool purging
= !net
->live
;
447 /* Trawl the server list looking for servers that have expired from
450 read_seqlock_excl(&net
->fs_lock
);
452 for (cursor
= rb_first(&net
->fs_servers
); cursor
; cursor
= rb_next(cursor
)) {
453 struct afs_server
*server
=
454 rb_entry(cursor
, struct afs_server
, uuid_rb
);
455 int usage
= atomic_read(&server
->usage
);
457 _debug("manage %pU %u", &server
->uuid
, usage
);
459 ASSERTCMP(usage
, >=, 1);
460 ASSERTIFCMP(purging
, usage
, ==, 1);
463 time64_t expire_at
= server
->put_time
;
465 if (!test_bit(AFS_SERVER_FL_VL_FAIL
, &server
->flags
) &&
466 !test_bit(AFS_SERVER_FL_NOT_FOUND
, &server
->flags
))
467 expire_at
+= afs_server_gc_delay
;
468 if (purging
|| expire_at
<= now
) {
469 server
->gc_next
= gc_list
;
471 } else if (expire_at
< next_manage
) {
472 next_manage
= expire_at
;
477 read_sequnlock_excl(&net
->fs_lock
);
479 /* Update the timer on the way out. We have to pass an increment on
480 * servers_outstanding in the namespace that we are in to the timer or
481 * the work scheduler.
483 if (!purging
&& next_manage
< TIME64_MAX
) {
484 now
= ktime_get_real_seconds();
486 if (next_manage
- now
<= 0) {
487 if (queue_work(afs_wq
, &net
->fs_manager
))
488 afs_inc_servers_outstanding(net
);
490 afs_set_server_timer(net
, next_manage
- now
);
494 afs_gc_servers(net
, gc_list
);
496 afs_dec_servers_outstanding(net
);
497 _leave(" [%d]", atomic_read(&net
->servers_outstanding
));
500 static void afs_queue_server_manager(struct afs_net
*net
)
502 afs_inc_servers_outstanding(net
);
503 if (!queue_work(afs_wq
, &net
->fs_manager
))
504 afs_dec_servers_outstanding(net
);
508 * Purge list of servers.
510 void afs_purge_servers(struct afs_net
*net
)
514 if (del_timer_sync(&net
->fs_timer
))
515 atomic_dec(&net
->servers_outstanding
);
517 afs_queue_server_manager(net
);
520 wait_var_event(&net
->servers_outstanding
,
521 !atomic_read(&net
->servers_outstanding
));
526 * Get an update for a server's address list.
528 static noinline
bool afs_update_server_record(struct afs_fs_cursor
*fc
, struct afs_server
*server
)
530 struct afs_addr_list
*alist
, *discard
;
534 trace_afs_server(server
, atomic_read(&server
->usage
), afs_server_trace_update
);
536 alist
= afs_vl_lookup_addrs(fc
->vnode
->volume
->cell
, fc
->key
,
539 if ((PTR_ERR(alist
) == -ERESTARTSYS
||
540 PTR_ERR(alist
) == -EINTR
) &&
541 !(fc
->flags
& AFS_FS_CURSOR_INTR
) &&
543 _leave(" = t [intr]");
546 fc
->error
= PTR_ERR(alist
);
547 _leave(" = f [%d]", fc
->error
);
552 if (server
->addr_version
!= alist
->version
) {
553 write_lock(&server
->fs_lock
);
554 discard
= rcu_dereference_protected(server
->addresses
,
555 lockdep_is_held(&server
->fs_lock
));
556 rcu_assign_pointer(server
->addresses
, alist
);
557 server
->addr_version
= alist
->version
;
558 write_unlock(&server
->fs_lock
);
561 server
->update_at
= ktime_get_real_seconds() + afs_server_update_delay
;
562 afs_put_addrlist(discard
);
568 * See if a server's address list needs updating.
570 bool afs_check_server_record(struct afs_fs_cursor
*fc
, struct afs_server
*server
)
572 time64_t now
= ktime_get_real_seconds();
575 int ret
, retries
= 0;
582 diff
= READ_ONCE(server
->update_at
) - now
;
584 _leave(" = t [not now %ld]", diff
);
588 if (!test_and_set_bit_lock(AFS_SERVER_FL_UPDATING
, &server
->flags
)) {
589 success
= afs_update_server_record(fc
, server
);
590 clear_bit_unlock(AFS_SERVER_FL_UPDATING
, &server
->flags
);
591 wake_up_bit(&server
->flags
, AFS_SERVER_FL_UPDATING
);
592 _leave(" = %d", success
);
596 ret
= wait_on_bit(&server
->flags
, AFS_SERVER_FL_UPDATING
,
598 if (ret
== -ERESTARTSYS
) {
599 if (!(fc
->flags
& AFS_FS_CURSOR_INTR
) && server
->addresses
) {
600 _leave(" = t [intr]");
604 _leave(" = f [intr]");
610 _leave(" = f [stale]");