1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* AFS server record management
4 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/sched.h>
9 #include <linux/slab.h>
12 #include "protocol_yfs.h"
14 static unsigned afs_server_gc_delay
= 10; /* Server record timeout in seconds */
15 static atomic_t afs_server_debug_id
;
17 static struct afs_server
*afs_maybe_use_server(struct afs_server
*,
18 enum afs_server_trace
);
19 static void __afs_put_server(struct afs_net
*, struct afs_server
*);
22 * Find a server by one of its addresses.
24 struct afs_server
*afs_find_server(struct afs_net
*net
,
25 const struct sockaddr_rxrpc
*srx
)
27 const struct afs_addr_list
*alist
;
28 struct afs_server
*server
= NULL
;
36 afs_unuse_server_notime(net
, server
, afs_server_trace_put_find_rsq
);
38 read_seqbegin_or_lock(&net
->fs_addr_lock
, &seq
);
40 if (srx
->transport
.family
== AF_INET6
) {
41 const struct sockaddr_in6
*a
= &srx
->transport
.sin6
, *b
;
42 hlist_for_each_entry_rcu(server
, &net
->fs_addresses6
, addr6_link
) {
43 alist
= rcu_dereference(server
->addresses
);
44 for (i
= alist
->nr_ipv4
; i
< alist
->nr_addrs
; i
++) {
45 b
= &alist
->addrs
[i
].transport
.sin6
;
46 diff
= ((u16 __force
)a
->sin6_port
-
47 (u16 __force
)b
->sin6_port
);
49 diff
= memcmp(&a
->sin6_addr
,
51 sizeof(struct in6_addr
));
57 const struct sockaddr_in
*a
= &srx
->transport
.sin
, *b
;
58 hlist_for_each_entry_rcu(server
, &net
->fs_addresses4
, addr4_link
) {
59 alist
= rcu_dereference(server
->addresses
);
60 for (i
= 0; i
< alist
->nr_ipv4
; i
++) {
61 b
= &alist
->addrs
[i
].transport
.sin
;
62 diff
= ((u16 __force
)a
->sin_port
-
63 (u16 __force
)b
->sin_port
);
65 diff
= ((u32 __force
)a
->sin_addr
.s_addr
-
66 (u32 __force
)b
->sin_addr
.s_addr
);
76 server
= afs_maybe_use_server(server
, afs_server_trace_get_by_addr
);
78 } while (need_seqretry(&net
->fs_addr_lock
, seq
));
80 done_seqretry(&net
->fs_addr_lock
, seq
);
87 * Look up a server by its UUID and mark it active.
89 struct afs_server
*afs_find_server_by_uuid(struct afs_net
*net
, const uuid_t
*uuid
)
91 struct afs_server
*server
= NULL
;
98 /* Unfortunately, rbtree walking doesn't give reliable results
99 * under just the RCU read lock, so we have to check for
103 afs_unuse_server(net
, server
, afs_server_trace_put_uuid_rsq
);
106 read_seqbegin_or_lock(&net
->fs_lock
, &seq
);
108 p
= net
->fs_servers
.rb_node
;
110 server
= rb_entry(p
, struct afs_server
, uuid_rb
);
112 diff
= memcmp(uuid
, &server
->uuid
, sizeof(*uuid
));
115 } else if (diff
> 0) {
118 afs_use_server(server
, afs_server_trace_get_by_uuid
);
124 } while (need_seqretry(&net
->fs_lock
, seq
));
126 done_seqretry(&net
->fs_lock
, seq
);
128 _leave(" = %p", server
);
133 * Install a server record in the namespace tree. If there's a clash, we stick
134 * it into a list anchored on whichever afs_server struct is actually in the
137 static struct afs_server
*afs_install_server(struct afs_cell
*cell
,
138 struct afs_server
*candidate
)
140 const struct afs_addr_list
*alist
;
141 struct afs_server
*server
, *next
;
142 struct afs_net
*net
= cell
->net
;
143 struct rb_node
**pp
, *p
;
146 _enter("%p", candidate
);
148 write_seqlock(&net
->fs_lock
);
150 /* Firstly install the server in the UUID lookup tree */
151 pp
= &net
->fs_servers
.rb_node
;
155 _debug("- consider %p", p
);
156 server
= rb_entry(p
, struct afs_server
, uuid_rb
);
157 diff
= memcmp(&candidate
->uuid
, &server
->uuid
, sizeof(uuid_t
));
159 pp
= &(*pp
)->rb_left
;
160 } else if (diff
> 0) {
161 pp
= &(*pp
)->rb_right
;
163 if (server
->cell
== cell
)
166 /* We have the same UUID representing servers in
167 * different cells. Append the new server to the list.
170 next
= rcu_dereference_protected(
172 lockdep_is_held(&net
->fs_lock
.lock
));
177 rcu_assign_pointer(server
->uuid_next
, candidate
);
178 candidate
->uuid_prev
= server
;
185 rb_link_node(&server
->uuid_rb
, p
, pp
);
186 rb_insert_color(&server
->uuid_rb
, &net
->fs_servers
);
187 hlist_add_head_rcu(&server
->proc_link
, &net
->fs_proc
);
190 write_seqlock(&net
->fs_addr_lock
);
191 alist
= rcu_dereference_protected(server
->addresses
,
192 lockdep_is_held(&net
->fs_addr_lock
.lock
));
194 /* Secondly, if the server has any IPv4 and/or IPv6 addresses, install
195 * it in the IPv4 and/or IPv6 reverse-map lists.
197 * TODO: For speed we want to use something other than a flat list
198 * here; even sorting the list in terms of lowest address would help a
199 * bit, but anything we might want to do gets messy and memory
202 if (alist
->nr_ipv4
> 0)
203 hlist_add_head_rcu(&server
->addr4_link
, &net
->fs_addresses4
);
204 if (alist
->nr_addrs
> alist
->nr_ipv4
)
205 hlist_add_head_rcu(&server
->addr6_link
, &net
->fs_addresses6
);
207 write_sequnlock(&net
->fs_addr_lock
);
210 afs_get_server(server
, afs_server_trace_get_install
);
211 write_sequnlock(&net
->fs_lock
);
216 * Allocate a new server record and mark it active.
218 static struct afs_server
*afs_alloc_server(struct afs_cell
*cell
,
220 struct afs_addr_list
*alist
)
222 struct afs_server
*server
;
223 struct afs_net
*net
= cell
->net
;
227 server
= kzalloc(sizeof(struct afs_server
), GFP_KERNEL
);
231 atomic_set(&server
->ref
, 1);
232 atomic_set(&server
->active
, 1);
233 server
->debug_id
= atomic_inc_return(&afs_server_debug_id
);
234 RCU_INIT_POINTER(server
->addresses
, alist
);
235 server
->addr_version
= alist
->version
;
236 server
->uuid
= *uuid
;
237 rwlock_init(&server
->fs_lock
);
238 init_waitqueue_head(&server
->probe_wq
);
239 INIT_LIST_HEAD(&server
->probe_link
);
240 spin_lock_init(&server
->probe_lock
);
242 server
->rtt
= UINT_MAX
;
244 afs_inc_servers_outstanding(net
);
245 trace_afs_server(server
, 1, 1, afs_server_trace_alloc
);
246 _leave(" = %p", server
);
250 _leave(" = NULL [nomem]");
255 * Look up an address record for a server
257 static struct afs_addr_list
*afs_vl_lookup_addrs(struct afs_cell
*cell
,
258 struct key
*key
, const uuid_t
*uuid
)
260 struct afs_vl_cursor vc
;
261 struct afs_addr_list
*alist
= NULL
;
265 if (afs_begin_vlserver_operation(&vc
, cell
, key
)) {
266 while (afs_select_vlserver(&vc
)) {
267 if (test_bit(AFS_VLSERVER_FL_IS_YFS
, &vc
.server
->flags
))
268 alist
= afs_yfsvl_get_endpoints(&vc
, uuid
);
270 alist
= afs_vl_get_addrs_u(&vc
, uuid
);
273 ret
= afs_end_vlserver_operation(&vc
);
276 return ret
< 0 ? ERR_PTR(ret
) : alist
;
280 * Get or create a fileserver record.
282 struct afs_server
*afs_lookup_server(struct afs_cell
*cell
, struct key
*key
,
283 const uuid_t
*uuid
, u32 addr_version
)
285 struct afs_addr_list
*alist
;
286 struct afs_server
*server
, *candidate
;
288 _enter("%p,%pU", cell
->net
, uuid
);
290 server
= afs_find_server_by_uuid(cell
->net
, uuid
);
292 if (server
->addr_version
!= addr_version
)
293 set_bit(AFS_SERVER_FL_NEEDS_UPDATE
, &server
->flags
);
297 alist
= afs_vl_lookup_addrs(cell
, key
, uuid
);
299 return ERR_CAST(alist
);
301 candidate
= afs_alloc_server(cell
, uuid
, alist
);
303 afs_put_addrlist(alist
);
304 return ERR_PTR(-ENOMEM
);
307 server
= afs_install_server(cell
, candidate
);
308 if (server
!= candidate
) {
309 afs_put_addrlist(alist
);
312 /* Immediately dispatch an asynchronous probe to each interface
313 * on the fileserver. This will make sure the repeat-probing
314 * service is started.
316 afs_fs_probe_fileserver(cell
->net
, server
, key
, true);
323 * Set the server timer to fire after a given delay, assuming it's not already
324 * set for an earlier time.
326 static void afs_set_server_timer(struct afs_net
*net
, time64_t delay
)
329 afs_inc_servers_outstanding(net
);
330 if (timer_reduce(&net
->fs_timer
, jiffies
+ delay
* HZ
))
331 afs_dec_servers_outstanding(net
);
336 * Server management timer. We have an increment on fs_outstanding that we
337 * need to pass along to the work item.
339 void afs_servers_timer(struct timer_list
*timer
)
341 struct afs_net
*net
= container_of(timer
, struct afs_net
, fs_timer
);
344 if (!queue_work(afs_wq
, &net
->fs_manager
))
345 afs_dec_servers_outstanding(net
);
349 * Get a reference on a server object.
351 struct afs_server
*afs_get_server(struct afs_server
*server
,
352 enum afs_server_trace reason
)
354 unsigned int u
= atomic_inc_return(&server
->ref
);
356 trace_afs_server(server
, u
, atomic_read(&server
->active
), reason
);
361 * Try to get a reference on a server object.
363 static struct afs_server
*afs_maybe_use_server(struct afs_server
*server
,
364 enum afs_server_trace reason
)
366 unsigned int r
= atomic_fetch_add_unless(&server
->ref
, 1, 0);
372 a
= atomic_inc_return(&server
->active
);
373 trace_afs_server(server
, r
, a
, reason
);
378 * Get an active count on a server object.
380 struct afs_server
*afs_use_server(struct afs_server
*server
, enum afs_server_trace reason
)
382 unsigned int r
= atomic_inc_return(&server
->ref
);
383 unsigned int a
= atomic_inc_return(&server
->active
);
385 trace_afs_server(server
, r
, a
, reason
);
390 * Release a reference on a server record.
392 void afs_put_server(struct afs_net
*net
, struct afs_server
*server
,
393 enum afs_server_trace reason
)
400 usage
= atomic_dec_return(&server
->ref
);
401 trace_afs_server(server
, usage
, atomic_read(&server
->active
), reason
);
402 if (unlikely(usage
== 0))
403 __afs_put_server(net
, server
);
407 * Drop an active count on a server object without updating the last-unused
410 void afs_unuse_server_notime(struct afs_net
*net
, struct afs_server
*server
,
411 enum afs_server_trace reason
)
414 unsigned int active
= atomic_dec_return(&server
->active
);
417 afs_set_server_timer(net
, afs_server_gc_delay
);
418 afs_put_server(net
, server
, reason
);
423 * Drop an active count on a server object.
425 void afs_unuse_server(struct afs_net
*net
, struct afs_server
*server
,
426 enum afs_server_trace reason
)
429 server
->unuse_time
= ktime_get_real_seconds();
430 afs_unuse_server_notime(net
, server
, reason
);
434 static void afs_server_rcu(struct rcu_head
*rcu
)
436 struct afs_server
*server
= container_of(rcu
, struct afs_server
, rcu
);
438 trace_afs_server(server
, atomic_read(&server
->ref
),
439 atomic_read(&server
->active
), afs_server_trace_free
);
440 afs_put_addrlist(rcu_access_pointer(server
->addresses
));
444 static void __afs_put_server(struct afs_net
*net
, struct afs_server
*server
)
446 call_rcu(&server
->rcu
, afs_server_rcu
);
447 afs_dec_servers_outstanding(net
);
450 static void afs_give_up_callbacks(struct afs_net
*net
, struct afs_server
*server
)
452 struct afs_addr_list
*alist
= rcu_access_pointer(server
->addresses
);
453 struct afs_addr_cursor ac
= {
455 .index
= alist
->preferred
,
459 afs_fs_give_up_all_callbacks(net
, server
, &ac
, NULL
);
463 * destroy a dead server
465 static void afs_destroy_server(struct afs_net
*net
, struct afs_server
*server
)
467 if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB
, &server
->flags
))
468 afs_give_up_callbacks(net
, server
);
470 afs_put_server(net
, server
, afs_server_trace_destroy
);
474 * Garbage collect any expired servers.
476 static void afs_gc_servers(struct afs_net
*net
, struct afs_server
*gc_list
)
478 struct afs_server
*server
, *next
, *prev
;
481 while ((server
= gc_list
)) {
482 gc_list
= server
->gc_next
;
484 write_seqlock(&net
->fs_lock
);
486 active
= atomic_read(&server
->active
);
488 trace_afs_server(server
, atomic_read(&server
->ref
),
489 active
, afs_server_trace_gc
);
490 next
= rcu_dereference_protected(
491 server
->uuid_next
, lockdep_is_held(&net
->fs_lock
.lock
));
492 prev
= server
->uuid_prev
;
494 /* The one at the front is in the tree */
496 rb_erase(&server
->uuid_rb
, &net
->fs_servers
);
498 rb_replace_node_rcu(&server
->uuid_rb
,
501 next
->uuid_prev
= NULL
;
504 /* This server is not at the front */
505 rcu_assign_pointer(prev
->uuid_next
, next
);
507 next
->uuid_prev
= prev
;
510 list_del(&server
->probe_link
);
511 hlist_del_rcu(&server
->proc_link
);
512 if (!hlist_unhashed(&server
->addr4_link
))
513 hlist_del_rcu(&server
->addr4_link
);
514 if (!hlist_unhashed(&server
->addr6_link
))
515 hlist_del_rcu(&server
->addr6_link
);
517 write_sequnlock(&net
->fs_lock
);
520 afs_destroy_server(net
, server
);
525 * Manage the records of servers known to be within a network namespace. This
526 * includes garbage collecting unused servers.
528 * Note also that we were given an increment on net->servers_outstanding by
529 * whoever queued us that we need to deal with before returning.
531 void afs_manage_servers(struct work_struct
*work
)
533 struct afs_net
*net
= container_of(work
, struct afs_net
, fs_manager
);
534 struct afs_server
*gc_list
= NULL
;
535 struct rb_node
*cursor
;
536 time64_t now
= ktime_get_real_seconds(), next_manage
= TIME64_MAX
;
537 bool purging
= !net
->live
;
541 /* Trawl the server list looking for servers that have expired from
544 read_seqlock_excl(&net
->fs_lock
);
546 for (cursor
= rb_first(&net
->fs_servers
); cursor
; cursor
= rb_next(cursor
)) {
547 struct afs_server
*server
=
548 rb_entry(cursor
, struct afs_server
, uuid_rb
);
549 int active
= atomic_read(&server
->active
);
551 _debug("manage %pU %u", &server
->uuid
, active
);
554 trace_afs_server(server
, atomic_read(&server
->ref
),
555 active
, afs_server_trace_purging
);
557 pr_notice("Can't purge s=%08x\n", server
->debug_id
);
561 time64_t expire_at
= server
->unuse_time
;
563 if (!test_bit(AFS_SERVER_FL_VL_FAIL
, &server
->flags
) &&
564 !test_bit(AFS_SERVER_FL_NOT_FOUND
, &server
->flags
))
565 expire_at
+= afs_server_gc_delay
;
566 if (purging
|| expire_at
<= now
) {
567 server
->gc_next
= gc_list
;
569 } else if (expire_at
< next_manage
) {
570 next_manage
= expire_at
;
575 read_sequnlock_excl(&net
->fs_lock
);
577 /* Update the timer on the way out. We have to pass an increment on
578 * servers_outstanding in the namespace that we are in to the timer or
579 * the work scheduler.
581 if (!purging
&& next_manage
< TIME64_MAX
) {
582 now
= ktime_get_real_seconds();
584 if (next_manage
- now
<= 0) {
585 if (queue_work(afs_wq
, &net
->fs_manager
))
586 afs_inc_servers_outstanding(net
);
588 afs_set_server_timer(net
, next_manage
- now
);
592 afs_gc_servers(net
, gc_list
);
594 afs_dec_servers_outstanding(net
);
595 _leave(" [%d]", atomic_read(&net
->servers_outstanding
));
598 static void afs_queue_server_manager(struct afs_net
*net
)
600 afs_inc_servers_outstanding(net
);
601 if (!queue_work(afs_wq
, &net
->fs_manager
))
602 afs_dec_servers_outstanding(net
);
606 * Purge list of servers.
608 void afs_purge_servers(struct afs_net
*net
)
612 if (del_timer_sync(&net
->fs_timer
))
613 afs_dec_servers_outstanding(net
);
615 afs_queue_server_manager(net
);
618 atomic_dec(&net
->servers_outstanding
);
619 wait_var_event(&net
->servers_outstanding
,
620 !atomic_read(&net
->servers_outstanding
));
625 * Get an update for a server's address list.
627 static noinline
bool afs_update_server_record(struct afs_operation
*op
,
628 struct afs_server
*server
)
630 struct afs_addr_list
*alist
, *discard
;
634 trace_afs_server(server
, atomic_read(&server
->ref
), atomic_read(&server
->active
),
635 afs_server_trace_update
);
637 alist
= afs_vl_lookup_addrs(op
->volume
->cell
, op
->key
, &server
->uuid
);
639 if ((PTR_ERR(alist
) == -ERESTARTSYS
||
640 PTR_ERR(alist
) == -EINTR
) &&
641 (op
->flags
& AFS_OPERATION_UNINTR
) &&
643 _leave(" = t [intr]");
646 op
->error
= PTR_ERR(alist
);
647 _leave(" = f [%d]", op
->error
);
652 if (server
->addr_version
!= alist
->version
) {
653 write_lock(&server
->fs_lock
);
654 discard
= rcu_dereference_protected(server
->addresses
,
655 lockdep_is_held(&server
->fs_lock
));
656 rcu_assign_pointer(server
->addresses
, alist
);
657 server
->addr_version
= alist
->version
;
658 write_unlock(&server
->fs_lock
);
661 afs_put_addrlist(discard
);
667 * See if a server's address list needs updating.
669 bool afs_check_server_record(struct afs_operation
*op
, struct afs_server
*server
)
672 int ret
, retries
= 0;
679 if (test_bit(AFS_SERVER_FL_UPDATING
, &server
->flags
))
681 if (test_bit(AFS_SERVER_FL_NEEDS_UPDATE
, &server
->flags
))
683 _leave(" = t [good]");
687 if (!test_and_set_bit_lock(AFS_SERVER_FL_UPDATING
, &server
->flags
)) {
688 clear_bit(AFS_SERVER_FL_NEEDS_UPDATE
, &server
->flags
);
689 success
= afs_update_server_record(op
, server
);
690 clear_bit_unlock(AFS_SERVER_FL_UPDATING
, &server
->flags
);
691 wake_up_bit(&server
->flags
, AFS_SERVER_FL_UPDATING
);
692 _leave(" = %d", success
);
697 ret
= wait_on_bit(&server
->flags
, AFS_SERVER_FL_UPDATING
,
698 (op
->flags
& AFS_OPERATION_UNINTR
) ?
699 TASK_UNINTERRUPTIBLE
: TASK_INTERRUPTIBLE
);
700 if (ret
== -ERESTARTSYS
) {
702 _leave(" = f [intr]");
708 _leave(" = f [stale]");