]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/lockd/host.c | |
3 | * | |
4 | * Management for NLM peer hosts. The nlm_host struct is shared | |
5 | * between client and server implementation. The only reason to | |
6 | * do so is to reduce code bloat. | |
7 | * | |
8 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | |
9 | */ | |
10 | ||
11 | #include <linux/types.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/in.h> | |
15 | #include <linux/sunrpc/clnt.h> | |
16 | #include <linux/sunrpc/svc.h> | |
17 | #include <linux/lockd/lockd.h> | |
18 | #include <linux/lockd/sm_inter.h> | |
353ab6e9 | 19 | #include <linux/mutex.h> |
1da177e4 LT |
20 | |
21 | ||
22 | #define NLMDBG_FACILITY NLMDBG_HOSTCACHE | |
23 | #define NLM_HOST_MAX 64 | |
24 | #define NLM_HOST_NRHASH 32 | |
25 | #define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1)) | |
26 | #define NLM_HOST_REBIND (60 * HZ) | |
27 | #define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ) | |
28 | #define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ) | |
1da177e4 LT |
29 | |
30 | static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH]; | |
31 | static unsigned long next_gc; | |
32 | static int nrhosts; | |
353ab6e9 | 33 | static DEFINE_MUTEX(nlm_host_mutex); |
1da177e4 LT |
34 | |
35 | ||
36 | static void nlm_gc_hosts(void); | |
37 | ||
38 | /* | |
39 | * Find an NLM server handle in the cache. If there is none, create it. | |
40 | */ | |
41 | struct nlm_host * | |
42 | nlmclnt_lookup_host(struct sockaddr_in *sin, int proto, int version) | |
43 | { | |
44 | return nlm_lookup_host(0, sin, proto, version); | |
45 | } | |
46 | ||
47 | /* | |
48 | * Find an NLM client handle in the cache. If there is none, create it. | |
49 | */ | |
50 | struct nlm_host * | |
51 | nlmsvc_lookup_host(struct svc_rqst *rqstp) | |
52 | { | |
53 | return nlm_lookup_host(1, &rqstp->rq_addr, | |
54 | rqstp->rq_prot, rqstp->rq_vers); | |
55 | } | |
56 | ||
57 | /* | |
58 | * Common host lookup routine for server & client | |
59 | */ | |
60 | struct nlm_host * | |
61 | nlm_lookup_host(int server, struct sockaddr_in *sin, | |
62 | int proto, int version) | |
63 | { | |
64 | struct nlm_host *host, **hp; | |
65 | u32 addr; | |
66 | int hash; | |
67 | ||
68 | dprintk("lockd: nlm_lookup_host(%08x, p=%d, v=%d)\n", | |
69 | (unsigned)(sin? ntohl(sin->sin_addr.s_addr) : 0), proto, version); | |
70 | ||
71 | hash = NLM_ADDRHASH(sin->sin_addr.s_addr); | |
72 | ||
73 | /* Lock hash table */ | |
353ab6e9 | 74 | mutex_lock(&nlm_host_mutex); |
1da177e4 LT |
75 | |
76 | if (time_after_eq(jiffies, next_gc)) | |
77 | nlm_gc_hosts(); | |
78 | ||
79 | for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { | |
80 | if (host->h_proto != proto) | |
81 | continue; | |
82 | if (host->h_version != version) | |
83 | continue; | |
84 | if (host->h_server != server) | |
85 | continue; | |
86 | ||
87 | if (nlm_cmp_addr(&host->h_addr, sin)) { | |
88 | if (hp != nlm_hosts + hash) { | |
89 | *hp = host->h_next; | |
90 | host->h_next = nlm_hosts[hash]; | |
91 | nlm_hosts[hash] = host; | |
92 | } | |
93 | nlm_get_host(host); | |
353ab6e9 | 94 | mutex_unlock(&nlm_host_mutex); |
1da177e4 LT |
95 | return host; |
96 | } | |
97 | } | |
98 | ||
99 | /* Ooops, no host found, create it */ | |
100 | dprintk("lockd: creating host entry\n"); | |
101 | ||
f8314dc6 PI |
102 | host = kzalloc(sizeof(*host), GFP_KERNEL); |
103 | if (!host) | |
1da177e4 | 104 | goto nohost; |
1da177e4 LT |
105 | |
106 | addr = sin->sin_addr.s_addr; | |
107 | sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr)); | |
108 | ||
109 | host->h_addr = *sin; | |
110 | host->h_addr.sin_port = 0; /* ouch! */ | |
111 | host->h_version = version; | |
112 | host->h_proto = proto; | |
113 | host->h_rpcclnt = NULL; | |
50467914 | 114 | mutex_init(&host->h_mutex); |
1da177e4 LT |
115 | host->h_nextrebind = jiffies + NLM_HOST_REBIND; |
116 | host->h_expires = jiffies + NLM_HOST_EXPIRE; | |
117 | atomic_set(&host->h_count, 1); | |
118 | init_waitqueue_head(&host->h_gracewait); | |
28df955a | 119 | init_rwsem(&host->h_rwsem); |
1da177e4 LT |
120 | host->h_state = 0; /* pseudo NSM state */ |
121 | host->h_nsmstate = 0; /* real NSM state */ | |
122 | host->h_server = server; | |
123 | host->h_next = nlm_hosts[hash]; | |
124 | nlm_hosts[hash] = host; | |
125 | INIT_LIST_HEAD(&host->h_lockowners); | |
126 | spin_lock_init(&host->h_lock); | |
26bcbf96 CH |
127 | INIT_LIST_HEAD(&host->h_granted); |
128 | INIT_LIST_HEAD(&host->h_reclaim); | |
1da177e4 LT |
129 | |
130 | if (++nrhosts > NLM_HOST_MAX) | |
131 | next_gc = 0; | |
132 | ||
133 | nohost: | |
353ab6e9 | 134 | mutex_unlock(&nlm_host_mutex); |
1da177e4 LT |
135 | return host; |
136 | } | |
137 | ||
138 | struct nlm_host * | |
139 | nlm_find_client(void) | |
140 | { | |
141 | /* find a nlm_host for a client for which h_killed == 0. | |
142 | * and return it | |
143 | */ | |
144 | int hash; | |
353ab6e9 | 145 | mutex_lock(&nlm_host_mutex); |
1da177e4 LT |
146 | for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) { |
147 | struct nlm_host *host, **hp; | |
148 | for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { | |
149 | if (host->h_server && | |
150 | host->h_killed == 0) { | |
151 | nlm_get_host(host); | |
353ab6e9 | 152 | mutex_unlock(&nlm_host_mutex); |
1da177e4 LT |
153 | return host; |
154 | } | |
155 | } | |
156 | } | |
353ab6e9 | 157 | mutex_unlock(&nlm_host_mutex); |
1da177e4 LT |
158 | return NULL; |
159 | } | |
160 | ||
161 | ||
162 | /* | |
163 | * Create the NLM RPC client for an NLM peer | |
164 | */ | |
165 | struct rpc_clnt * | |
166 | nlm_bind_host(struct nlm_host *host) | |
167 | { | |
168 | struct rpc_clnt *clnt; | |
1da177e4 LT |
169 | |
170 | dprintk("lockd: nlm_bind_host(%08x)\n", | |
171 | (unsigned)ntohl(host->h_addr.sin_addr.s_addr)); | |
172 | ||
173 | /* Lock host handle */ | |
50467914 | 174 | mutex_lock(&host->h_mutex); |
1da177e4 LT |
175 | |
176 | /* If we've already created an RPC client, check whether | |
177 | * RPC rebind is required | |
1da177e4 LT |
178 | */ |
179 | if ((clnt = host->h_rpcclnt) != NULL) { | |
43118c29 | 180 | if (time_after_eq(jiffies, host->h_nextrebind)) { |
35f5a422 | 181 | rpc_force_rebind(clnt); |
1da177e4 LT |
182 | host->h_nextrebind = jiffies + NLM_HOST_REBIND; |
183 | dprintk("lockd: next rebind in %ld jiffies\n", | |
184 | host->h_nextrebind - jiffies); | |
185 | } | |
186 | } else { | |
e1ec7892 CL |
187 | unsigned long increment = nlmsvc_timeout * HZ; |
188 | struct rpc_timeout timeparms = { | |
189 | .to_initval = increment, | |
190 | .to_increment = increment, | |
191 | .to_maxval = increment * 6UL, | |
192 | .to_retries = 5U, | |
193 | }; | |
194 | struct rpc_create_args args = { | |
195 | .protocol = host->h_proto, | |
196 | .address = (struct sockaddr *)&host->h_addr, | |
197 | .addrsize = sizeof(host->h_addr), | |
198 | .timeout = &timeparms, | |
199 | .servername = host->h_name, | |
200 | .program = &nlm_program, | |
201 | .version = host->h_version, | |
202 | .authflavor = RPC_AUTH_UNIX, | |
203 | .flags = (RPC_CLNT_CREATE_HARDRTRY | | |
204 | RPC_CLNT_CREATE_AUTOBIND), | |
205 | }; | |
206 | ||
207 | clnt = rpc_create(&args); | |
208 | if (!IS_ERR(clnt)) | |
209 | host->h_rpcclnt = clnt; | |
210 | else { | |
211 | printk("lockd: couldn't create RPC handle for %s\n", host->h_name); | |
212 | clnt = NULL; | |
213 | } | |
1da177e4 LT |
214 | } |
215 | ||
50467914 | 216 | mutex_unlock(&host->h_mutex); |
1da177e4 | 217 | return clnt; |
1da177e4 LT |
218 | } |
219 | ||
220 | /* | |
221 | * Force a portmap lookup of the remote lockd port | |
222 | */ | |
223 | void | |
224 | nlm_rebind_host(struct nlm_host *host) | |
225 | { | |
226 | dprintk("lockd: rebind host %s\n", host->h_name); | |
227 | if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) { | |
35f5a422 | 228 | rpc_force_rebind(host->h_rpcclnt); |
1da177e4 LT |
229 | host->h_nextrebind = jiffies + NLM_HOST_REBIND; |
230 | } | |
231 | } | |
232 | ||
233 | /* | |
234 | * Increment NLM host count | |
235 | */ | |
236 | struct nlm_host * nlm_get_host(struct nlm_host *host) | |
237 | { | |
238 | if (host) { | |
239 | dprintk("lockd: get host %s\n", host->h_name); | |
240 | atomic_inc(&host->h_count); | |
241 | host->h_expires = jiffies + NLM_HOST_EXPIRE; | |
242 | } | |
243 | return host; | |
244 | } | |
245 | ||
246 | /* | |
247 | * Release NLM host after use | |
248 | */ | |
249 | void nlm_release_host(struct nlm_host *host) | |
250 | { | |
251 | if (host != NULL) { | |
252 | dprintk("lockd: release host %s\n", host->h_name); | |
1da177e4 | 253 | BUG_ON(atomic_read(&host->h_count) < 0); |
4c060b53 TM |
254 | if (atomic_dec_and_test(&host->h_count)) { |
255 | BUG_ON(!list_empty(&host->h_lockowners)); | |
256 | BUG_ON(!list_empty(&host->h_granted)); | |
257 | BUG_ON(!list_empty(&host->h_reclaim)); | |
258 | } | |
1da177e4 LT |
259 | } |
260 | } | |
261 | ||
262 | /* | |
263 | * Shut down the hosts module. | |
264 | * Note that this routine is called only at server shutdown time. | |
265 | */ | |
266 | void | |
267 | nlm_shutdown_hosts(void) | |
268 | { | |
269 | struct nlm_host *host; | |
270 | int i; | |
271 | ||
272 | dprintk("lockd: shutting down host module\n"); | |
353ab6e9 | 273 | mutex_lock(&nlm_host_mutex); |
1da177e4 LT |
274 | |
275 | /* First, make all hosts eligible for gc */ | |
276 | dprintk("lockd: nuking all hosts...\n"); | |
277 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | |
278 | for (host = nlm_hosts[i]; host; host = host->h_next) | |
279 | host->h_expires = jiffies - 1; | |
280 | } | |
281 | ||
282 | /* Then, perform a garbage collection pass */ | |
283 | nlm_gc_hosts(); | |
353ab6e9 | 284 | mutex_unlock(&nlm_host_mutex); |
1da177e4 LT |
285 | |
286 | /* complain if any hosts are left */ | |
287 | if (nrhosts) { | |
288 | printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); | |
289 | dprintk("lockd: %d hosts left:\n", nrhosts); | |
290 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | |
291 | for (host = nlm_hosts[i]; host; host = host->h_next) { | |
292 | dprintk(" %s (cnt %d use %d exp %ld)\n", | |
293 | host->h_name, atomic_read(&host->h_count), | |
294 | host->h_inuse, host->h_expires); | |
295 | } | |
296 | } | |
297 | } | |
298 | } | |
299 | ||
300 | /* | |
301 | * Garbage collect any unused NLM hosts. | |
302 | * This GC combines reference counting for async operations with | |
303 | * mark & sweep for resources held by remote clients. | |
304 | */ | |
305 | static void | |
306 | nlm_gc_hosts(void) | |
307 | { | |
308 | struct nlm_host **q, *host; | |
309 | struct rpc_clnt *clnt; | |
310 | int i; | |
311 | ||
312 | dprintk("lockd: host garbage collection\n"); | |
313 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | |
314 | for (host = nlm_hosts[i]; host; host = host->h_next) | |
315 | host->h_inuse = 0; | |
316 | } | |
317 | ||
318 | /* Mark all hosts that hold locks, blocks or shares */ | |
319 | nlmsvc_mark_resources(); | |
320 | ||
321 | for (i = 0; i < NLM_HOST_NRHASH; i++) { | |
322 | q = &nlm_hosts[i]; | |
323 | while ((host = *q) != NULL) { | |
324 | if (atomic_read(&host->h_count) || host->h_inuse | |
325 | || time_before(jiffies, host->h_expires)) { | |
326 | dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n", | |
327 | host->h_name, atomic_read(&host->h_count), | |
328 | host->h_inuse, host->h_expires); | |
329 | q = &host->h_next; | |
330 | continue; | |
331 | } | |
332 | dprintk("lockd: delete host %s\n", host->h_name); | |
333 | *q = host->h_next; | |
977faf39 OK |
334 | |
335 | /* | |
336 | * Unmonitor unless host was invalidated (i.e. lockd restarted) | |
337 | */ | |
338 | nsm_unmonitor(host); | |
339 | ||
1da177e4 LT |
340 | if ((clnt = host->h_rpcclnt) != NULL) { |
341 | if (atomic_read(&clnt->cl_users)) { | |
342 | printk(KERN_WARNING | |
343 | "lockd: active RPC handle\n"); | |
344 | clnt->cl_dead = 1; | |
345 | } else { | |
346 | rpc_destroy_client(host->h_rpcclnt); | |
347 | } | |
348 | } | |
1da177e4 LT |
349 | kfree(host); |
350 | nrhosts--; | |
351 | } | |
352 | } | |
353 | ||
354 | next_gc = jiffies + NLM_HOST_COLLECT; | |
355 | } | |
356 |