]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/lockd/host.c
NLM: sem to mutex conversion
[mirror_ubuntu-bionic-kernel.git] / fs / lockd / host.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/lockd/host.c
3 *
4 * Management for NLM peer hosts. The nlm_host struct is shared
5 * between client and server implementation. The only reason to
6 * do so is to reduce code bloat.
7 *
8 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
9 */
10
11#include <linux/types.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/in.h>
15#include <linux/sunrpc/clnt.h>
16#include <linux/sunrpc/svc.h>
17#include <linux/lockd/lockd.h>
18#include <linux/lockd/sm_inter.h>
353ab6e9 19#include <linux/mutex.h>
1da177e4
LT
20
21
22#define NLMDBG_FACILITY NLMDBG_HOSTCACHE
23#define NLM_HOST_MAX 64
24#define NLM_HOST_NRHASH 32
25#define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1))
26#define NLM_HOST_REBIND (60 * HZ)
27#define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ)
28#define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ)
29#define NLM_HOST_ADDR(sv) (&(sv)->s_nlmclnt->cl_xprt->addr)
30
31static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH];
32static unsigned long next_gc;
33static int nrhosts;
353ab6e9 34static DEFINE_MUTEX(nlm_host_mutex);
1da177e4
LT
35
36
37static void nlm_gc_hosts(void);
38
39/*
40 * Find an NLM server handle in the cache. If there is none, create it.
41 */
42struct nlm_host *
43nlmclnt_lookup_host(struct sockaddr_in *sin, int proto, int version)
44{
45 return nlm_lookup_host(0, sin, proto, version);
46}
47
48/*
49 * Find an NLM client handle in the cache. If there is none, create it.
50 */
51struct nlm_host *
52nlmsvc_lookup_host(struct svc_rqst *rqstp)
53{
54 return nlm_lookup_host(1, &rqstp->rq_addr,
55 rqstp->rq_prot, rqstp->rq_vers);
56}
57
58/*
59 * Common host lookup routine for server & client
60 */
61struct nlm_host *
62nlm_lookup_host(int server, struct sockaddr_in *sin,
63 int proto, int version)
64{
65 struct nlm_host *host, **hp;
66 u32 addr;
67 int hash;
68
69 dprintk("lockd: nlm_lookup_host(%08x, p=%d, v=%d)\n",
70 (unsigned)(sin? ntohl(sin->sin_addr.s_addr) : 0), proto, version);
71
72 hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
73
74 /* Lock hash table */
353ab6e9 75 mutex_lock(&nlm_host_mutex);
1da177e4
LT
76
77 if (time_after_eq(jiffies, next_gc))
78 nlm_gc_hosts();
79
80 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
81 if (host->h_proto != proto)
82 continue;
83 if (host->h_version != version)
84 continue;
85 if (host->h_server != server)
86 continue;
87
88 if (nlm_cmp_addr(&host->h_addr, sin)) {
89 if (hp != nlm_hosts + hash) {
90 *hp = host->h_next;
91 host->h_next = nlm_hosts[hash];
92 nlm_hosts[hash] = host;
93 }
94 nlm_get_host(host);
353ab6e9 95 mutex_unlock(&nlm_host_mutex);
1da177e4
LT
96 return host;
97 }
98 }
99
100 /* Ooops, no host found, create it */
101 dprintk("lockd: creating host entry\n");
102
103 if (!(host = (struct nlm_host *) kmalloc(sizeof(*host), GFP_KERNEL)))
104 goto nohost;
105 memset(host, 0, sizeof(*host));
106
107 addr = sin->sin_addr.s_addr;
108 sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr));
109
110 host->h_addr = *sin;
111 host->h_addr.sin_port = 0; /* ouch! */
112 host->h_version = version;
113 host->h_proto = proto;
114 host->h_rpcclnt = NULL;
50467914 115 mutex_init(&host->h_mutex);
1da177e4
LT
116 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
117 host->h_expires = jiffies + NLM_HOST_EXPIRE;
118 atomic_set(&host->h_count, 1);
119 init_waitqueue_head(&host->h_gracewait);
120 host->h_state = 0; /* pseudo NSM state */
121 host->h_nsmstate = 0; /* real NSM state */
122 host->h_server = server;
123 host->h_next = nlm_hosts[hash];
124 nlm_hosts[hash] = host;
125 INIT_LIST_HEAD(&host->h_lockowners);
126 spin_lock_init(&host->h_lock);
26bcbf96
CH
127 INIT_LIST_HEAD(&host->h_granted);
128 INIT_LIST_HEAD(&host->h_reclaim);
1da177e4
LT
129
130 if (++nrhosts > NLM_HOST_MAX)
131 next_gc = 0;
132
133nohost:
353ab6e9 134 mutex_unlock(&nlm_host_mutex);
1da177e4
LT
135 return host;
136}
137
138struct nlm_host *
139nlm_find_client(void)
140{
141 /* find a nlm_host for a client for which h_killed == 0.
142 * and return it
143 */
144 int hash;
353ab6e9 145 mutex_lock(&nlm_host_mutex);
1da177e4
LT
146 for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) {
147 struct nlm_host *host, **hp;
148 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
149 if (host->h_server &&
150 host->h_killed == 0) {
151 nlm_get_host(host);
353ab6e9 152 mutex_unlock(&nlm_host_mutex);
1da177e4
LT
153 return host;
154 }
155 }
156 }
353ab6e9 157 mutex_unlock(&nlm_host_mutex);
1da177e4
LT
158 return NULL;
159}
160
161
162/*
163 * Create the NLM RPC client for an NLM peer
164 */
165struct rpc_clnt *
166nlm_bind_host(struct nlm_host *host)
167{
168 struct rpc_clnt *clnt;
169 struct rpc_xprt *xprt;
170
171 dprintk("lockd: nlm_bind_host(%08x)\n",
172 (unsigned)ntohl(host->h_addr.sin_addr.s_addr));
173
174 /* Lock host handle */
50467914 175 mutex_lock(&host->h_mutex);
1da177e4
LT
176
177 /* If we've already created an RPC client, check whether
178 * RPC rebind is required
1da177e4
LT
179 */
180 if ((clnt = host->h_rpcclnt) != NULL) {
181 xprt = clnt->cl_xprt;
43118c29 182 if (time_after_eq(jiffies, host->h_nextrebind)) {
35f5a422 183 rpc_force_rebind(clnt);
1da177e4
LT
184 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
185 dprintk("lockd: next rebind in %ld jiffies\n",
186 host->h_nextrebind - jiffies);
187 }
188 } else {
189 xprt = xprt_create_proto(host->h_proto, &host->h_addr, NULL);
190 if (IS_ERR(xprt))
191 goto forgetit;
192
193 xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout);
5ee0ed7d 194 xprt->resvport = 1; /* NLM requires a reserved port */
1da177e4
LT
195
196 /* Existing NLM servers accept AUTH_UNIX only */
04266473 197 clnt = rpc_new_client(xprt, host->h_name, &nlm_program,
1da177e4 198 host->h_version, RPC_AUTH_UNIX);
5b616f5d 199 if (IS_ERR(clnt))
1da177e4 200 goto forgetit;
1da177e4 201 clnt->cl_autobind = 1; /* turn on pmap queries */
04266473 202 clnt->cl_softrtry = 1; /* All queries are soft */
1da177e4
LT
203
204 host->h_rpcclnt = clnt;
205 }
206
50467914 207 mutex_unlock(&host->h_mutex);
1da177e4
LT
208 return clnt;
209
210forgetit:
211 printk("lockd: couldn't create RPC handle for %s\n", host->h_name);
50467914 212 mutex_unlock(&host->h_mutex);
1da177e4
LT
213 return NULL;
214}
215
216/*
217 * Force a portmap lookup of the remote lockd port
218 */
219void
220nlm_rebind_host(struct nlm_host *host)
221{
222 dprintk("lockd: rebind host %s\n", host->h_name);
223 if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
35f5a422 224 rpc_force_rebind(host->h_rpcclnt);
1da177e4
LT
225 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
226 }
227}
228
229/*
230 * Increment NLM host count
231 */
232struct nlm_host * nlm_get_host(struct nlm_host *host)
233{
234 if (host) {
235 dprintk("lockd: get host %s\n", host->h_name);
236 atomic_inc(&host->h_count);
237 host->h_expires = jiffies + NLM_HOST_EXPIRE;
238 }
239 return host;
240}
241
242/*
243 * Release NLM host after use
244 */
245void nlm_release_host(struct nlm_host *host)
246{
247 if (host != NULL) {
248 dprintk("lockd: release host %s\n", host->h_name);
1da177e4 249 BUG_ON(atomic_read(&host->h_count) < 0);
4c060b53
TM
250 if (atomic_dec_and_test(&host->h_count)) {
251 BUG_ON(!list_empty(&host->h_lockowners));
252 BUG_ON(!list_empty(&host->h_granted));
253 BUG_ON(!list_empty(&host->h_reclaim));
254 }
1da177e4
LT
255 }
256}
257
258/*
259 * Shut down the hosts module.
260 * Note that this routine is called only at server shutdown time.
261 */
262void
263nlm_shutdown_hosts(void)
264{
265 struct nlm_host *host;
266 int i;
267
268 dprintk("lockd: shutting down host module\n");
353ab6e9 269 mutex_lock(&nlm_host_mutex);
1da177e4
LT
270
271 /* First, make all hosts eligible for gc */
272 dprintk("lockd: nuking all hosts...\n");
273 for (i = 0; i < NLM_HOST_NRHASH; i++) {
274 for (host = nlm_hosts[i]; host; host = host->h_next)
275 host->h_expires = jiffies - 1;
276 }
277
278 /* Then, perform a garbage collection pass */
279 nlm_gc_hosts();
353ab6e9 280 mutex_unlock(&nlm_host_mutex);
1da177e4
LT
281
282 /* complain if any hosts are left */
283 if (nrhosts) {
284 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
285 dprintk("lockd: %d hosts left:\n", nrhosts);
286 for (i = 0; i < NLM_HOST_NRHASH; i++) {
287 for (host = nlm_hosts[i]; host; host = host->h_next) {
288 dprintk(" %s (cnt %d use %d exp %ld)\n",
289 host->h_name, atomic_read(&host->h_count),
290 host->h_inuse, host->h_expires);
291 }
292 }
293 }
294}
295
296/*
297 * Garbage collect any unused NLM hosts.
298 * This GC combines reference counting for async operations with
299 * mark & sweep for resources held by remote clients.
300 */
301static void
302nlm_gc_hosts(void)
303{
304 struct nlm_host **q, *host;
305 struct rpc_clnt *clnt;
306 int i;
307
308 dprintk("lockd: host garbage collection\n");
309 for (i = 0; i < NLM_HOST_NRHASH; i++) {
310 for (host = nlm_hosts[i]; host; host = host->h_next)
311 host->h_inuse = 0;
312 }
313
314 /* Mark all hosts that hold locks, blocks or shares */
315 nlmsvc_mark_resources();
316
317 for (i = 0; i < NLM_HOST_NRHASH; i++) {
318 q = &nlm_hosts[i];
319 while ((host = *q) != NULL) {
320 if (atomic_read(&host->h_count) || host->h_inuse
321 || time_before(jiffies, host->h_expires)) {
322 dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
323 host->h_name, atomic_read(&host->h_count),
324 host->h_inuse, host->h_expires);
325 q = &host->h_next;
326 continue;
327 }
328 dprintk("lockd: delete host %s\n", host->h_name);
329 *q = host->h_next;
330 /* Don't unmonitor hosts that have been invalidated */
331 if (host->h_monitored && !host->h_killed)
332 nsm_unmonitor(host);
333 if ((clnt = host->h_rpcclnt) != NULL) {
334 if (atomic_read(&clnt->cl_users)) {
335 printk(KERN_WARNING
336 "lockd: active RPC handle\n");
337 clnt->cl_dead = 1;
338 } else {
339 rpc_destroy_client(host->h_rpcclnt);
340 }
341 }
1da177e4
LT
342 kfree(host);
343 nrhosts--;
344 }
345 }
346
347 next_gc = jiffies + NLM_HOST_COLLECT;
348}
349