]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Request reply cache. This is currently a global cache, but this may |
3 | * change in the future and be a per-client cache. | |
4 | * | |
5 | * This code is heavily inspired by the 44BSD implementation, although | |
6 | * it does things a bit differently. | |
7 | * | |
8 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> | |
9 | */ | |
10 | ||
5a0e3ad6 | 11 | #include <linux/slab.h> |
7b9e8522 | 12 | #include <linux/sunrpc/clnt.h> |
5a0e3ad6 | 13 | |
9a74af21 BH |
14 | #include "nfsd.h" |
15 | #include "cache.h" | |
1da177e4 LT |
16 | |
17 | /* Size of reply cache. Common values are: | |
18 | * 4.3BSD: 128 | |
19 | * 4.4BSD: 256 | |
20 | * Solaris2: 1024 | |
21 | * DEC Unix: 512-4096 | |
22 | */ | |
23 | #define CACHESIZE 1024 | |
24 | #define HASHSIZE 64 | |
1da177e4 | 25 | |
fca4217c | 26 | static struct hlist_head * cache_hash; |
1da177e4 LT |
27 | static struct list_head lru_head; |
28 | static int cache_disabled = 1; | |
8a8bc40d | 29 | static struct kmem_cache *drc_slab; |
0ee0bf7e | 30 | static unsigned int num_drc_entries; |
1da177e4 | 31 | |
fca4217c GB |
32 | /* |
33 | * Calculate the hash index from an XID. | |
34 | */ | |
35 | static inline u32 request_hash(u32 xid) | |
36 | { | |
37 | u32 h = xid; | |
38 | h ^= (xid >> 24); | |
39 | return h & (HASHSIZE-1); | |
40 | } | |
41 | ||
1da177e4 LT |
42 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); |
43 | ||
fca4217c | 44 | /* |
1da177e4 LT |
45 | * locking for the reply cache: |
46 | * A cache entry is "single use" if c_state == RC_INPROG | |
47 | * Otherwise, it when accessing _prev or _next, the lock must be held. | |
48 | */ | |
49 | static DEFINE_SPINLOCK(cache_lock); | |
50 | ||
f09841fd JL |
51 | static struct svc_cacherep * |
52 | nfsd_reply_cache_alloc(void) | |
1da177e4 LT |
53 | { |
54 | struct svc_cacherep *rp; | |
f09841fd JL |
55 | |
56 | rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); | |
57 | if (rp) { | |
58 | rp->c_state = RC_UNUSED; | |
59 | rp->c_type = RC_NOCACHE; | |
60 | INIT_LIST_HEAD(&rp->c_lru); | |
61 | INIT_HLIST_NODE(&rp->c_hash); | |
62 | } | |
63 | return rp; | |
64 | } | |
65 | ||
66 | static void | |
67 | nfsd_reply_cache_free_locked(struct svc_cacherep *rp) | |
68 | { | |
25e6b8b0 | 69 | if (rp->c_type == RC_REPLBUFF) |
f09841fd JL |
70 | kfree(rp->c_replvec.iov_base); |
71 | list_del(&rp->c_lru); | |
0ee0bf7e | 72 | --num_drc_entries; |
f09841fd JL |
73 | kmem_cache_free(drc_slab, rp); |
74 | } | |
75 | ||
76 | int nfsd_reply_cache_init(void) | |
77 | { | |
1da177e4 | 78 | int i; |
f09841fd | 79 | struct svc_cacherep *rp; |
1da177e4 | 80 | |
8a8bc40d JL |
81 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), |
82 | 0, 0, NULL); | |
83 | if (!drc_slab) | |
84 | goto out_nomem; | |
85 | ||
1da177e4 LT |
86 | INIT_LIST_HEAD(&lru_head); |
87 | i = CACHESIZE; | |
0ee0bf7e | 88 | num_drc_entries = 0; |
d5c3428b | 89 | while (i) { |
f09841fd | 90 | rp = nfsd_reply_cache_alloc(); |
d5c3428b BF |
91 | if (!rp) |
92 | goto out_nomem; | |
0ee0bf7e | 93 | ++num_drc_entries; |
1da177e4 | 94 | list_add(&rp->c_lru, &lru_head); |
1da177e4 LT |
95 | i--; |
96 | } | |
97 | ||
fca4217c GB |
98 | cache_hash = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL); |
99 | if (!cache_hash) | |
d5c3428b | 100 | goto out_nomem; |
1da177e4 LT |
101 | |
102 | cache_disabled = 0; | |
d5c3428b BF |
103 | return 0; |
104 | out_nomem: | |
105 | printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); | |
106 | nfsd_reply_cache_shutdown(); | |
107 | return -ENOMEM; | |
1da177e4 LT |
108 | } |
109 | ||
d5c3428b | 110 | void nfsd_reply_cache_shutdown(void) |
1da177e4 LT |
111 | { |
112 | struct svc_cacherep *rp; | |
113 | ||
114 | while (!list_empty(&lru_head)) { | |
115 | rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); | |
f09841fd | 116 | nfsd_reply_cache_free_locked(rp); |
1da177e4 LT |
117 | } |
118 | ||
119 | cache_disabled = 1; | |
120 | ||
fca4217c GB |
121 | kfree (cache_hash); |
122 | cache_hash = NULL; | |
8a8bc40d JL |
123 | |
124 | if (drc_slab) { | |
125 | kmem_cache_destroy(drc_slab); | |
126 | drc_slab = NULL; | |
127 | } | |
1da177e4 LT |
128 | } |
129 | ||
130 | /* | |
131 | * Move cache entry to end of LRU list | |
132 | */ | |
133 | static void | |
134 | lru_put_end(struct svc_cacherep *rp) | |
135 | { | |
56c2548b | 136 | rp->c_timestamp = jiffies; |
f116629d | 137 | list_move_tail(&rp->c_lru, &lru_head); |
1da177e4 LT |
138 | } |
139 | ||
140 | /* | |
141 | * Move a cache entry from one hash list to another | |
142 | */ | |
143 | static void | |
144 | hash_refile(struct svc_cacherep *rp) | |
145 | { | |
146 | hlist_del_init(&rp->c_hash); | |
fca4217c | 147 | hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid)); |
1da177e4 LT |
148 | } |
149 | ||
d1a0774d JL |
150 | static inline bool |
151 | nfsd_cache_entry_expired(struct svc_cacherep *rp) | |
152 | { | |
153 | return rp->c_state != RC_INPROG && | |
154 | time_after(jiffies, rp->c_timestamp + RC_EXPIRE); | |
155 | } | |
156 | ||
a4a3ec32 JL |
157 | /* |
158 | * Search the request hash for an entry that matches the given rqstp. | |
159 | * Must be called with cache_lock held. Returns the found entry or | |
160 | * NULL on failure. | |
161 | */ | |
162 | static struct svc_cacherep * | |
163 | nfsd_cache_search(struct svc_rqst *rqstp) | |
164 | { | |
165 | struct svc_cacherep *rp; | |
166 | struct hlist_node *hn; | |
167 | struct hlist_head *rh; | |
168 | __be32 xid = rqstp->rq_xid; | |
169 | u32 proto = rqstp->rq_prot, | |
170 | vers = rqstp->rq_vers, | |
171 | proc = rqstp->rq_proc; | |
172 | ||
173 | rh = &cache_hash[request_hash(xid)]; | |
174 | hlist_for_each_entry(rp, hn, rh, c_hash) { | |
175 | if (rp->c_state != RC_UNUSED && | |
176 | xid == rp->c_xid && proc == rp->c_proc && | |
177 | proto == rp->c_prot && vers == rp->c_vers && | |
178 | !nfsd_cache_entry_expired(rp) && | |
179 | rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) && | |
180 | rpc_get_port(svc_addr(rqstp)) == rpc_get_port((struct sockaddr *)&rp->c_addr)) | |
181 | return rp; | |
182 | } | |
183 | return NULL; | |
184 | } | |
185 | ||
1da177e4 LT |
186 | /* |
187 | * Try to find an entry matching the current call in the cache. When none | |
188 | * is found, we grab the oldest unlocked entry off the LRU list. | |
189 | * Note that no operation within the loop may sleep. | |
190 | */ | |
191 | int | |
1091006c | 192 | nfsd_cache_lookup(struct svc_rqst *rqstp) |
1da177e4 | 193 | { |
1da177e4 | 194 | struct svc_cacherep *rp; |
c7afef1f AV |
195 | __be32 xid = rqstp->rq_xid; |
196 | u32 proto = rqstp->rq_prot, | |
1da177e4 LT |
197 | vers = rqstp->rq_vers, |
198 | proc = rqstp->rq_proc; | |
199 | unsigned long age; | |
1091006c | 200 | int type = rqstp->rq_cachetype; |
1da177e4 LT |
201 | int rtn; |
202 | ||
203 | rqstp->rq_cacherep = NULL; | |
204 | if (cache_disabled || type == RC_NOCACHE) { | |
205 | nfsdstats.rcnocache++; | |
206 | return RC_DOIT; | |
207 | } | |
208 | ||
209 | spin_lock(&cache_lock); | |
210 | rtn = RC_DOIT; | |
211 | ||
a4a3ec32 JL |
212 | rp = nfsd_cache_search(rqstp); |
213 | if (rp) { | |
214 | nfsdstats.rchits++; | |
215 | goto found_entry; | |
1da177e4 LT |
216 | } |
217 | nfsdstats.rcmisses++; | |
218 | ||
219 | /* This loop shouldn't take more than a few iterations normally */ | |
220 | { | |
221 | int safe = 0; | |
222 | list_for_each_entry(rp, &lru_head, c_lru) { | |
223 | if (rp->c_state != RC_INPROG) | |
224 | break; | |
225 | if (safe++ > CACHESIZE) { | |
226 | printk("nfsd: loop in repcache LRU list\n"); | |
227 | cache_disabled = 1; | |
228 | goto out; | |
229 | } | |
230 | } | |
231 | } | |
232 | ||
cf0a586c GB |
233 | /* All entries on the LRU are in-progress. This should not happen */ |
234 | if (&rp->c_lru == &lru_head) { | |
1da177e4 LT |
235 | static int complaints; |
236 | ||
237 | printk(KERN_WARNING "nfsd: all repcache entries locked!\n"); | |
238 | if (++complaints > 5) { | |
239 | printk(KERN_WARNING "nfsd: disabling repcache.\n"); | |
240 | cache_disabled = 1; | |
241 | } | |
242 | goto out; | |
243 | } | |
244 | ||
245 | rqstp->rq_cacherep = rp; | |
246 | rp->c_state = RC_INPROG; | |
247 | rp->c_xid = xid; | |
248 | rp->c_proc = proc; | |
7b9e8522 JL |
249 | rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp)); |
250 | rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp))); | |
1da177e4 LT |
251 | rp->c_prot = proto; |
252 | rp->c_vers = vers; | |
1da177e4 LT |
253 | |
254 | hash_refile(rp); | |
56c2548b | 255 | lru_put_end(rp); |
1da177e4 LT |
256 | |
257 | /* release any buffer */ | |
258 | if (rp->c_type == RC_REPLBUFF) { | |
259 | kfree(rp->c_replvec.iov_base); | |
260 | rp->c_replvec.iov_base = NULL; | |
261 | } | |
262 | rp->c_type = RC_NOCACHE; | |
263 | out: | |
264 | spin_unlock(&cache_lock); | |
265 | return rtn; | |
266 | ||
267 | found_entry: | |
268 | /* We found a matching entry which is either in progress or done. */ | |
269 | age = jiffies - rp->c_timestamp; | |
1da177e4 LT |
270 | lru_put_end(rp); |
271 | ||
272 | rtn = RC_DROPIT; | |
273 | /* Request being processed or excessive rexmits */ | |
274 | if (rp->c_state == RC_INPROG || age < RC_DELAY) | |
275 | goto out; | |
276 | ||
277 | /* From the hall of fame of impractical attacks: | |
278 | * Is this a user who tries to snoop on the cache? */ | |
279 | rtn = RC_DOIT; | |
280 | if (!rqstp->rq_secure && rp->c_secure) | |
281 | goto out; | |
282 | ||
283 | /* Compose RPC reply header */ | |
284 | switch (rp->c_type) { | |
285 | case RC_NOCACHE: | |
286 | break; | |
287 | case RC_REPLSTAT: | |
288 | svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); | |
289 | rtn = RC_REPLY; | |
290 | break; | |
291 | case RC_REPLBUFF: | |
292 | if (!nfsd_cache_append(rqstp, &rp->c_replvec)) | |
293 | goto out; /* should not happen */ | |
294 | rtn = RC_REPLY; | |
295 | break; | |
296 | default: | |
297 | printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); | |
298 | rp->c_state = RC_UNUSED; | |
299 | } | |
300 | ||
301 | goto out; | |
302 | } | |
303 | ||
304 | /* | |
305 | * Update a cache entry. This is called from nfsd_dispatch when | |
306 | * the procedure has been executed and the complete reply is in | |
307 | * rqstp->rq_res. | |
308 | * | |
309 | * We're copying around data here rather than swapping buffers because | |
310 | * the toplevel loop requires max-sized buffers, which would be a waste | |
311 | * of memory for a cache with a max reply size of 100 bytes (diropokres). | |
312 | * | |
313 | * If we should start to use different types of cache entries tailored | |
314 | * specifically for attrstat and fh's, we may save even more space. | |
315 | * | |
316 | * Also note that a cachetype of RC_NOCACHE can legally be passed when | |
317 | * nfsd failed to encode a reply that otherwise would have been cached. | |
318 | * In this case, nfsd_cache_update is called with statp == NULL. | |
319 | */ | |
320 | void | |
c7afef1f | 321 | nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) |
1da177e4 LT |
322 | { |
323 | struct svc_cacherep *rp; | |
324 | struct kvec *resv = &rqstp->rq_res.head[0], *cachv; | |
325 | int len; | |
326 | ||
327 | if (!(rp = rqstp->rq_cacherep) || cache_disabled) | |
328 | return; | |
329 | ||
330 | len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); | |
331 | len >>= 2; | |
fca4217c | 332 | |
1da177e4 LT |
333 | /* Don't cache excessive amounts of data and XDR failures */ |
334 | if (!statp || len > (256 >> 2)) { | |
335 | rp->c_state = RC_UNUSED; | |
336 | return; | |
337 | } | |
338 | ||
339 | switch (cachetype) { | |
340 | case RC_REPLSTAT: | |
341 | if (len != 1) | |
342 | printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); | |
343 | rp->c_replstat = *statp; | |
344 | break; | |
345 | case RC_REPLBUFF: | |
346 | cachv = &rp->c_replvec; | |
347 | cachv->iov_base = kmalloc(len << 2, GFP_KERNEL); | |
348 | if (!cachv->iov_base) { | |
1da177e4 | 349 | rp->c_state = RC_UNUSED; |
1da177e4 LT |
350 | return; |
351 | } | |
352 | cachv->iov_len = len << 2; | |
353 | memcpy(cachv->iov_base, statp, len << 2); | |
354 | break; | |
355 | } | |
356 | spin_lock(&cache_lock); | |
357 | lru_put_end(rp); | |
358 | rp->c_secure = rqstp->rq_secure; | |
359 | rp->c_type = cachetype; | |
360 | rp->c_state = RC_DONE; | |
1da177e4 LT |
361 | spin_unlock(&cache_lock); |
362 | return; | |
363 | } | |
364 | ||
365 | /* | |
366 | * Copy cached reply to current reply buffer. Should always fit. | |
367 | * FIXME as reply is in a page, we should just attach the page, and | |
368 | * keep a refcount.... | |
369 | */ | |
370 | static int | |
371 | nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) | |
372 | { | |
373 | struct kvec *vec = &rqstp->rq_res.head[0]; | |
374 | ||
375 | if (vec->iov_len + data->iov_len > PAGE_SIZE) { | |
376 | printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n", | |
377 | data->iov_len); | |
378 | return 0; | |
379 | } | |
380 | memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); | |
381 | vec->iov_len += data->iov_len; | |
382 | return 1; | |
383 | } |