]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
1da177e4 LT |
3 | * Request reply cache. This is currently a global cache, but this may |
4 | * change in the future and be a per-client cache. | |
5 | * | |
6 | * This code is heavily inspired by the 44BSD implementation, although | |
7 | * it does things a bit differently. | |
8 | * | |
9 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> | |
10 | */ | |
11 | ||
3ba75830 | 12 | #include <linux/sunrpc/svc_xprt.h> |
5a0e3ad6 | 13 | #include <linux/slab.h> |
8f97514b | 14 | #include <linux/vmalloc.h> |
5976687a | 15 | #include <linux/sunrpc/addr.h> |
0338dd15 | 16 | #include <linux/highmem.h> |
0733c7ba JL |
17 | #include <linux/log2.h> |
18 | #include <linux/hash.h> | |
01a7decf | 19 | #include <net/checksum.h> |
5a0e3ad6 | 20 | |
9a74af21 BH |
21 | #include "nfsd.h" |
22 | #include "cache.h" | |
1da177e4 | 23 | |
0338dd15 JL |
24 | #define NFSDDBG_FACILITY NFSDDBG_REPCACHE |
25 | ||
0733c7ba JL |
26 | /* |
27 | * We use this value to determine the number of hash buckets from the max | |
28 | * cache size, the idea being that when the cache is at its maximum number | |
29 | * of entries, then this should be the average number of entries per bucket. | |
30 | */ | |
31 | #define TARGET_BUCKET_SIZE 64 | |
1da177e4 | 32 | |
7142b98d | 33 | struct nfsd_drc_bucket { |
736c6625 | 34 | struct rb_root rb_head; |
bedd4b61 | 35 | struct list_head lru_head; |
89a26b3d | 36 | spinlock_t cache_lock; |
7142b98d TM |
37 | }; |
38 | ||
1da177e4 | 39 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); |
1ab6c499 DC |
40 | static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, |
41 | struct shrink_control *sc); | |
42 | static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, | |
43 | struct shrink_control *sc); | |
b4e7f2c9 | 44 | |
0338dd15 JL |
45 | /* |
46 | * Put a cap on the size of the DRC based on the amount of available | |
47 | * low memory in the machine. | |
48 | * | |
49 | * 64MB: 8192 | |
50 | * 128MB: 11585 | |
51 | * 256MB: 16384 | |
52 | * 512MB: 23170 | |
53 | * 1GB: 32768 | |
54 | * 2GB: 46340 | |
55 | * 4GB: 65536 | |
56 | * 8GB: 92681 | |
57 | * 16GB: 131072 | |
58 | * | |
59 | * ...with a hard cap of 256k entries. In the worst case, each entry will be | |
60 | * ~1k, so the above numbers should give a rough max of the amount of memory | |
61 | * used in k. | |
3ba75830 BF |
62 | * |
63 | * XXX: these limits are per-container, so memory used will increase | |
64 | * linearly with number of containers. Maybe that's OK. | |
0338dd15 JL |
65 | */ |
66 | static unsigned int | |
67 | nfsd_cache_size_limit(void) | |
68 | { | |
69 | unsigned int limit; | |
ca79b0c2 | 70 | unsigned long low_pages = totalram_pages() - totalhigh_pages(); |
0338dd15 JL |
71 | |
72 | limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); | |
73 | return min_t(unsigned int, limit, 256*1024); | |
74 | } | |
75 | ||
0733c7ba JL |
76 | /* |
77 | * Compute the number of hash buckets we need. Divide the max cachesize by | |
78 | * the "target" max bucket size, and round up to next power of two. | |
79 | */ | |
80 | static unsigned int | |
81 | nfsd_hashsize(unsigned int limit) | |
82 | { | |
83 | return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE); | |
84 | } | |
85 | ||
7142b98d | 86 | static u32 |
3ba75830 | 87 | nfsd_cache_hash(__be32 xid, struct nfsd_net *nn) |
7142b98d | 88 | { |
3ba75830 | 89 | return hash_32(be32_to_cpu(xid), nn->maskbits); |
7142b98d TM |
90 | } |
91 | ||
f09841fd | 92 | static struct svc_cacherep * |
3ba75830 BF |
93 | nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum, |
94 | struct nfsd_net *nn) | |
1da177e4 LT |
95 | { |
96 | struct svc_cacherep *rp; | |
1da177e4 | 97 | |
3ba75830 | 98 | rp = kmem_cache_alloc(nn->drc_slab, GFP_KERNEL); |
f09841fd | 99 | if (rp) { |
1da177e4 LT |
100 | rp->c_state = RC_UNUSED; |
101 | rp->c_type = RC_NOCACHE; | |
736c6625 | 102 | RB_CLEAR_NODE(&rp->c_node); |
f09841fd | 103 | INIT_LIST_HEAD(&rp->c_lru); |
76ecec21 | 104 | |
ed00c2f6 TM |
105 | memset(&rp->c_key, 0, sizeof(rp->c_key)); |
106 | rp->c_key.k_xid = rqstp->rq_xid; | |
107 | rp->c_key.k_proc = rqstp->rq_proc; | |
108 | rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp)); | |
109 | rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp))); | |
110 | rp->c_key.k_prot = rqstp->rq_prot; | |
111 | rp->c_key.k_vers = rqstp->rq_vers; | |
112 | rp->c_key.k_len = rqstp->rq_arg.len; | |
113 | rp->c_key.k_csum = csum; | |
1da177e4 | 114 | } |
f09841fd JL |
115 | return rp; |
116 | } | |
1da177e4 | 117 | |
f09841fd | 118 | static void |
3ba75830 BF |
119 | nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, |
120 | struct nfsd_net *nn) | |
f09841fd | 121 | { |
6c6910cd | 122 | if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { |
3ba75830 | 123 | nn->drc_mem_usage -= rp->c_replvec.iov_len; |
f09841fd | 124 | kfree(rp->c_replvec.iov_base); |
6c6910cd | 125 | } |
76ecec21 | 126 | if (rp->c_state != RC_UNUSED) { |
736c6625 | 127 | rb_erase(&rp->c_node, &b->rb_head); |
76ecec21 | 128 | list_del(&rp->c_lru); |
3ba75830 BF |
129 | atomic_dec(&nn->num_drc_entries); |
130 | nn->drc_mem_usage -= sizeof(*rp); | |
76ecec21 | 131 | } |
3ba75830 | 132 | kmem_cache_free(nn->drc_slab, rp); |
f09841fd JL |
133 | } |
134 | ||
2c6b691c | 135 | static void |
3ba75830 BF |
136 | nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, |
137 | struct nfsd_net *nn) | |
2c6b691c | 138 | { |
89a26b3d | 139 | spin_lock(&b->cache_lock); |
3ba75830 | 140 | nfsd_reply_cache_free_locked(b, rp, nn); |
89a26b3d | 141 | spin_unlock(&b->cache_lock); |
2c6b691c JL |
142 | } |
143 | ||
3ba75830 | 144 | int nfsd_reply_cache_init(struct nfsd_net *nn) |
f09841fd | 145 | { |
0733c7ba | 146 | unsigned int hashsize; |
bedd4b61 | 147 | unsigned int i; |
a68465c9 | 148 | int status = 0; |
0733c7ba | 149 | |
3ba75830 BF |
150 | nn->max_drc_entries = nfsd_cache_size_limit(); |
151 | atomic_set(&nn->num_drc_entries, 0); | |
152 | hashsize = nfsd_hashsize(nn->max_drc_entries); | |
153 | nn->maskbits = ilog2(hashsize); | |
ac534ff2 | 154 | |
3ba75830 BF |
155 | nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan; |
156 | nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count; | |
157 | nn->nfsd_reply_cache_shrinker.seeks = 1; | |
158 | status = register_shrinker(&nn->nfsd_reply_cache_shrinker); | |
a68465c9 KM |
159 | if (status) |
160 | return status; | |
161 | ||
3ba75830 BF |
162 | nn->drc_slab = kmem_cache_create("nfsd_drc", |
163 | sizeof(struct svc_cacherep), 0, 0, NULL); | |
164 | if (!nn->drc_slab) | |
8a8bc40d JL |
165 | goto out_nomem; |
166 | ||
3ba75830 BF |
167 | nn->drc_hashtbl = kcalloc(hashsize, |
168 | sizeof(*nn->drc_hashtbl), GFP_KERNEL); | |
169 | if (!nn->drc_hashtbl) { | |
170 | nn->drc_hashtbl = vzalloc(array_size(hashsize, | |
171 | sizeof(*nn->drc_hashtbl))); | |
172 | if (!nn->drc_hashtbl) | |
8f97514b JL |
173 | goto out_nomem; |
174 | } | |
175 | ||
89a26b3d | 176 | for (i = 0; i < hashsize; i++) { |
3ba75830 BF |
177 | INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head); |
178 | spin_lock_init(&nn->drc_hashtbl[i].cache_lock); | |
89a26b3d | 179 | } |
3ba75830 | 180 | nn->drc_hashsize = hashsize; |
1da177e4 | 181 | |
d5c3428b BF |
182 | return 0; |
183 | out_nomem: | |
184 | printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); | |
d5c3428b | 185 | return -ENOMEM; |
1da177e4 LT |
186 | } |
187 | ||
3ba75830 | 188 | void nfsd_reply_cache_shutdown(struct nfsd_net *nn) |
1da177e4 LT |
189 | { |
190 | struct svc_cacherep *rp; | |
bedd4b61 | 191 | unsigned int i; |
1da177e4 | 192 | |
3ba75830 | 193 | unregister_shrinker(&nn->nfsd_reply_cache_shrinker); |
aca8a23d | 194 | |
3ba75830 BF |
195 | for (i = 0; i < nn->drc_hashsize; i++) { |
196 | struct list_head *head = &nn->drc_hashtbl[i].lru_head; | |
bedd4b61 TM |
197 | while (!list_empty(head)) { |
198 | rp = list_first_entry(head, struct svc_cacherep, c_lru); | |
3ba75830 BF |
199 | nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i], |
200 | rp, nn); | |
bedd4b61 | 201 | } |
1da177e4 LT |
202 | } |
203 | ||
3ba75830 BF |
204 | kvfree(nn->drc_hashtbl); |
205 | nn->drc_hashtbl = NULL; | |
206 | nn->drc_hashsize = 0; | |
8a8bc40d | 207 | |
3ba75830 BF |
208 | kmem_cache_destroy(nn->drc_slab); |
209 | nn->drc_slab = NULL; | |
1da177e4 LT |
210 | } |
211 | ||
212 | /* | |
aca8a23d JL |
213 | * Move cache entry to end of LRU list, and queue the cleaner to run if it's |
214 | * not already scheduled. | |
1da177e4 LT |
215 | */ |
216 | static void | |
bedd4b61 | 217 | lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp) |
1da177e4 | 218 | { |
56c2548b | 219 | rp->c_timestamp = jiffies; |
bedd4b61 | 220 | list_move_tail(&rp->c_lru, &b->lru_head); |
1da177e4 LT |
221 | } |
222 | ||
1ab6c499 | 223 | static long |
3ba75830 | 224 | prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn) |
aca8a23d JL |
225 | { |
226 | struct svc_cacherep *rp, *tmp; | |
1ab6c499 | 227 | long freed = 0; |
aca8a23d | 228 | |
bedd4b61 | 229 | list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) { |
1b19453d JL |
230 | /* |
231 | * Don't free entries attached to calls that are still | |
232 | * in-progress, but do keep scanning the list. | |
233 | */ | |
234 | if (rp->c_state == RC_INPROG) | |
235 | continue; | |
3ba75830 | 236 | if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries && |
1b19453d | 237 | time_before(jiffies, rp->c_timestamp + RC_EXPIRE)) |
aca8a23d | 238 | break; |
3ba75830 | 239 | nfsd_reply_cache_free_locked(b, rp, nn); |
1ab6c499 | 240 | freed++; |
aca8a23d | 241 | } |
bedd4b61 TM |
242 | return freed; |
243 | } | |
244 | ||
245 | /* | |
246 | * Walk the LRU list and prune off entries that are older than RC_EXPIRE. | |
247 | * Also prune the oldest ones when the total exceeds the max number of entries. | |
248 | */ | |
249 | static long | |
3ba75830 | 250 | prune_cache_entries(struct nfsd_net *nn) |
bedd4b61 TM |
251 | { |
252 | unsigned int i; | |
253 | long freed = 0; | |
bedd4b61 | 254 | |
3ba75830 BF |
255 | for (i = 0; i < nn->drc_hashsize; i++) { |
256 | struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i]; | |
bedd4b61 | 257 | |
89a26b3d TM |
258 | if (list_empty(&b->lru_head)) |
259 | continue; | |
260 | spin_lock(&b->cache_lock); | |
3ba75830 | 261 | freed += prune_bucket(b, nn); |
89a26b3d | 262 | spin_unlock(&b->cache_lock); |
bedd4b61 | 263 | } |
1ab6c499 | 264 | return freed; |
aca8a23d JL |
265 | } |
266 | ||
1ab6c499 DC |
267 | static unsigned long |
268 | nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) | |
b4e7f2c9 | 269 | { |
3ba75830 BF |
270 | struct nfsd_net *nn = container_of(shrink, |
271 | struct nfsd_net, nfsd_reply_cache_shrinker); | |
272 | ||
273 | return atomic_read(&nn->num_drc_entries); | |
b4e7f2c9 JL |
274 | } |
275 | ||
1ab6c499 DC |
276 | static unsigned long |
277 | nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) | |
278 | { | |
3ba75830 BF |
279 | struct nfsd_net *nn = container_of(shrink, |
280 | struct nfsd_net, nfsd_reply_cache_shrinker); | |
281 | ||
282 | return prune_cache_entries(nn); | |
1ab6c499 | 283 | } |
01a7decf JL |
284 | /* |
285 | * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes | |
286 | */ | |
287 | static __wsum | |
288 | nfsd_cache_csum(struct svc_rqst *rqstp) | |
289 | { | |
290 | int idx; | |
291 | unsigned int base; | |
292 | __wsum csum; | |
293 | struct xdr_buf *buf = &rqstp->rq_arg; | |
294 | const unsigned char *p = buf->head[0].iov_base; | |
295 | size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, | |
296 | RC_CSUMLEN); | |
297 | size_t len = min(buf->head[0].iov_len, csum_len); | |
298 | ||
299 | /* rq_arg.head first */ | |
300 | csum = csum_partial(p, len, 0); | |
301 | csum_len -= len; | |
302 | ||
303 | /* Continue into page array */ | |
304 | idx = buf->page_base / PAGE_SIZE; | |
305 | base = buf->page_base & ~PAGE_MASK; | |
306 | while (csum_len) { | |
307 | p = page_address(buf->pages[idx]) + base; | |
56edc86b | 308 | len = min_t(size_t, PAGE_SIZE - base, csum_len); |
01a7decf JL |
309 | csum = csum_partial(p, len, csum); |
310 | csum_len -= len; | |
311 | base = 0; | |
312 | ++idx; | |
313 | } | |
314 | return csum; | |
315 | } | |
316 | ||
ed00c2f6 | 317 | static int |
3ba75830 BF |
318 | nfsd_cache_key_cmp(const struct svc_cacherep *key, |
319 | const struct svc_cacherep *rp, struct nfsd_net *nn) | |
9dc56143 | 320 | { |
ed00c2f6 TM |
321 | if (key->c_key.k_xid == rp->c_key.k_xid && |
322 | key->c_key.k_csum != rp->c_key.k_csum) | |
3ba75830 | 323 | ++nn->payload_misses; |
ef9b16dc | 324 | |
ed00c2f6 | 325 | return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key)); |
9dc56143 JL |
326 | } |
327 | ||
a4a3ec32 JL |
328 | /* |
329 | * Search the request hash for an entry that matches the given rqstp. | |
330 | * Must be called with cache_lock held. Returns the found entry or | |
76ecec21 | 331 | * inserts an empty key on failure. |
a4a3ec32 JL |
332 | */ |
333 | static struct svc_cacherep * | |
3ba75830 BF |
334 | nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key, |
335 | struct nfsd_net *nn) | |
a4a3ec32 | 336 | { |
76ecec21 | 337 | struct svc_cacherep *rp, *ret = key; |
736c6625 TM |
338 | struct rb_node **p = &b->rb_head.rb_node, |
339 | *parent = NULL; | |
98d821bd | 340 | unsigned int entries = 0; |
736c6625 | 341 | int cmp; |
a4a3ec32 | 342 | |
736c6625 | 343 | while (*p != NULL) { |
98d821bd | 344 | ++entries; |
736c6625 TM |
345 | parent = *p; |
346 | rp = rb_entry(parent, struct svc_cacherep, c_node); | |
347 | ||
3ba75830 | 348 | cmp = nfsd_cache_key_cmp(key, rp, nn); |
736c6625 TM |
349 | if (cmp < 0) |
350 | p = &parent->rb_left; | |
351 | else if (cmp > 0) | |
352 | p = &parent->rb_right; | |
353 | else { | |
98d821bd | 354 | ret = rp; |
736c6625 | 355 | goto out; |
98d821bd JL |
356 | } |
357 | } | |
736c6625 TM |
358 | rb_link_node(&key->c_node, parent, p); |
359 | rb_insert_color(&key->c_node, &b->rb_head); | |
360 | out: | |
98d821bd | 361 | /* tally hash chain length stats */ |
3ba75830 BF |
362 | if (entries > nn->longest_chain) { |
363 | nn->longest_chain = entries; | |
364 | nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries); | |
365 | } else if (entries == nn->longest_chain) { | |
98d821bd | 366 | /* prefer to keep the smallest cachesize possible here */ |
3ba75830 BF |
367 | nn->longest_chain_cachesize = min_t(unsigned int, |
368 | nn->longest_chain_cachesize, | |
369 | atomic_read(&nn->num_drc_entries)); | |
a4a3ec32 | 370 | } |
98d821bd | 371 | |
76ecec21 | 372 | lru_put_end(b, ret); |
98d821bd | 373 | return ret; |
a4a3ec32 JL |
374 | } |
375 | ||
1da177e4 LT |
376 | /* |
377 | * Try to find an entry matching the current call in the cache. When none | |
1ac83629 JL |
378 | * is found, we try to grab the oldest expired entry off the LRU list. If |
379 | * a suitable one isn't there, then drop the cache_lock and allocate a | |
380 | * new one, then search again in case one got inserted while this thread | |
381 | * didn't hold the lock. | |
1da177e4 LT |
382 | */ |
383 | int | |
1091006c | 384 | nfsd_cache_lookup(struct svc_rqst *rqstp) |
1da177e4 | 385 | { |
3ba75830 | 386 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); |
0338dd15 | 387 | struct svc_cacherep *rp, *found; |
c7afef1f | 388 | __be32 xid = rqstp->rq_xid; |
01a7decf | 389 | __wsum csum; |
3ba75830 BF |
390 | u32 hash = nfsd_cache_hash(xid, nn); |
391 | struct nfsd_drc_bucket *b = &nn->drc_hashtbl[hash]; | |
1091006c | 392 | int type = rqstp->rq_cachetype; |
0b9ea37f | 393 | int rtn = RC_DOIT; |
1da177e4 LT |
394 | |
395 | rqstp->rq_cacherep = NULL; | |
13cc8a78 | 396 | if (type == RC_NOCACHE) { |
1da177e4 | 397 | nfsdstats.rcnocache++; |
0b9ea37f | 398 | return rtn; |
1da177e4 LT |
399 | } |
400 | ||
01a7decf JL |
401 | csum = nfsd_cache_csum(rqstp); |
402 | ||
0b9ea37f JL |
403 | /* |
404 | * Since the common case is a cache miss followed by an insert, | |
a0ef5e19 | 405 | * preallocate an entry. |
0b9ea37f | 406 | */ |
3ba75830 | 407 | rp = nfsd_reply_cache_alloc(rqstp, csum, nn); |
76ecec21 TM |
408 | if (!rp) { |
409 | dprintk("nfsd: unable to allocate DRC entry!\n"); | |
410 | return rtn; | |
6c6910cd | 411 | } |
0338dd15 | 412 | |
76ecec21 | 413 | spin_lock(&b->cache_lock); |
3ba75830 | 414 | found = nfsd_cache_insert(b, rp, nn); |
76ecec21 | 415 | if (found != rp) { |
3ba75830 | 416 | nfsd_reply_cache_free_locked(NULL, rp, nn); |
0338dd15 JL |
417 | rp = found; |
418 | goto found_entry; | |
1da177e4 LT |
419 | } |
420 | ||
0338dd15 | 421 | nfsdstats.rcmisses++; |
1da177e4 LT |
422 | rqstp->rq_cacherep = rp; |
423 | rp->c_state = RC_INPROG; | |
76ecec21 | 424 | |
3ba75830 BF |
425 | atomic_inc(&nn->num_drc_entries); |
426 | nn->drc_mem_usage += sizeof(*rp); | |
76ecec21 TM |
427 | |
428 | /* go ahead and prune the cache */ | |
3ba75830 | 429 | prune_bucket(b, nn); |
1da177e4 | 430 | out: |
89a26b3d | 431 | spin_unlock(&b->cache_lock); |
1da177e4 LT |
432 | return rtn; |
433 | ||
434 | found_entry: | |
435 | /* We found a matching entry which is either in progress or done. */ | |
76ecec21 | 436 | nfsdstats.rchits++; |
1da177e4 | 437 | rtn = RC_DROPIT; |
76ecec21 | 438 | |
7e5d0e0d TM |
439 | /* Request being processed */ |
440 | if (rp->c_state == RC_INPROG) | |
1da177e4 LT |
441 | goto out; |
442 | ||
443 | /* From the hall of fame of impractical attacks: | |
444 | * Is this a user who tries to snoop on the cache? */ | |
445 | rtn = RC_DOIT; | |
4d152e2c | 446 | if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) |
1da177e4 LT |
447 | goto out; |
448 | ||
449 | /* Compose RPC reply header */ | |
450 | switch (rp->c_type) { | |
451 | case RC_NOCACHE: | |
452 | break; | |
453 | case RC_REPLSTAT: | |
454 | svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); | |
455 | rtn = RC_REPLY; | |
456 | break; | |
457 | case RC_REPLBUFF: | |
458 | if (!nfsd_cache_append(rqstp, &rp->c_replvec)) | |
459 | goto out; /* should not happen */ | |
460 | rtn = RC_REPLY; | |
461 | break; | |
462 | default: | |
463 | printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); | |
3ba75830 | 464 | nfsd_reply_cache_free_locked(b, rp, nn); |
1da177e4 LT |
465 | } |
466 | ||
467 | goto out; | |
468 | } | |
469 | ||
470 | /* | |
471 | * Update a cache entry. This is called from nfsd_dispatch when | |
472 | * the procedure has been executed and the complete reply is in | |
473 | * rqstp->rq_res. | |
474 | * | |
475 | * We're copying around data here rather than swapping buffers because | |
476 | * the toplevel loop requires max-sized buffers, which would be a waste | |
477 | * of memory for a cache with a max reply size of 100 bytes (diropokres). | |
478 | * | |
479 | * If we should start to use different types of cache entries tailored | |
480 | * specifically for attrstat and fh's, we may save even more space. | |
481 | * | |
482 | * Also note that a cachetype of RC_NOCACHE can legally be passed when | |
483 | * nfsd failed to encode a reply that otherwise would have been cached. | |
484 | * In this case, nfsd_cache_update is called with statp == NULL. | |
485 | */ | |
486 | void | |
c7afef1f | 487 | nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) |
1da177e4 | 488 | { |
3ba75830 | 489 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); |
13cc8a78 | 490 | struct svc_cacherep *rp = rqstp->rq_cacherep; |
1da177e4 | 491 | struct kvec *resv = &rqstp->rq_res.head[0], *cachv; |
bedd4b61 TM |
492 | u32 hash; |
493 | struct nfsd_drc_bucket *b; | |
1da177e4 | 494 | int len; |
6c6910cd | 495 | size_t bufsize = 0; |
1da177e4 | 496 | |
13cc8a78 | 497 | if (!rp) |
1da177e4 LT |
498 | return; |
499 | ||
3ba75830 BF |
500 | hash = nfsd_cache_hash(rp->c_key.k_xid, nn); |
501 | b = &nn->drc_hashtbl[hash]; | |
bedd4b61 | 502 | |
1da177e4 LT |
503 | len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); |
504 | len >>= 2; | |
fca4217c | 505 | |
1da177e4 LT |
506 | /* Don't cache excessive amounts of data and XDR failures */ |
507 | if (!statp || len > (256 >> 2)) { | |
3ba75830 | 508 | nfsd_reply_cache_free(b, rp, nn); |
1da177e4 LT |
509 | return; |
510 | } | |
511 | ||
512 | switch (cachetype) { | |
513 | case RC_REPLSTAT: | |
514 | if (len != 1) | |
515 | printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); | |
516 | rp->c_replstat = *statp; | |
517 | break; | |
518 | case RC_REPLBUFF: | |
519 | cachv = &rp->c_replvec; | |
6c6910cd JL |
520 | bufsize = len << 2; |
521 | cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); | |
1da177e4 | 522 | if (!cachv->iov_base) { |
3ba75830 | 523 | nfsd_reply_cache_free(b, rp, nn); |
1da177e4 LT |
524 | return; |
525 | } | |
6c6910cd JL |
526 | cachv->iov_len = bufsize; |
527 | memcpy(cachv->iov_base, statp, bufsize); | |
1da177e4 | 528 | break; |
2c6b691c | 529 | case RC_NOCACHE: |
3ba75830 | 530 | nfsd_reply_cache_free(b, rp, nn); |
2c6b691c | 531 | return; |
1da177e4 | 532 | } |
89a26b3d | 533 | spin_lock(&b->cache_lock); |
3ba75830 | 534 | nn->drc_mem_usage += bufsize; |
bedd4b61 | 535 | lru_put_end(b, rp); |
4d152e2c | 536 | rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); |
1da177e4 LT |
537 | rp->c_type = cachetype; |
538 | rp->c_state = RC_DONE; | |
89a26b3d | 539 | spin_unlock(&b->cache_lock); |
1da177e4 LT |
540 | return; |
541 | } | |
542 | ||
543 | /* | |
544 | * Copy cached reply to current reply buffer. Should always fit. | |
545 | * FIXME as reply is in a page, we should just attach the page, and | |
546 | * keep a refcount.... | |
547 | */ | |
548 | static int | |
549 | nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) | |
550 | { | |
551 | struct kvec *vec = &rqstp->rq_res.head[0]; | |
552 | ||
553 | if (vec->iov_len + data->iov_len > PAGE_SIZE) { | |
5b5e0928 | 554 | printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n", |
1da177e4 LT |
555 | data->iov_len); |
556 | return 0; | |
557 | } | |
558 | memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); | |
559 | vec->iov_len += data->iov_len; | |
560 | return 1; | |
561 | } | |
a2f999a3 JL |
562 | |
563 | /* | |
564 | * Note that fields may be added, removed or reordered in the future. Programs | |
565 | * scraping this file for info should test the labels to ensure they're | |
566 | * getting the correct field. | |
567 | */ | |
568 | static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) | |
569 | { | |
3ba75830 BF |
570 | struct nfsd_net *nn = v; |
571 | ||
572 | seq_printf(m, "max entries: %u\n", nn->max_drc_entries); | |
31e60f52 | 573 | seq_printf(m, "num entries: %u\n", |
3ba75830 BF |
574 | atomic_read(&nn->num_drc_entries)); |
575 | seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits); | |
576 | seq_printf(m, "mem usage: %u\n", nn->drc_mem_usage); | |
a2f999a3 JL |
577 | seq_printf(m, "cache hits: %u\n", nfsdstats.rchits); |
578 | seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses); | |
579 | seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache); | |
3ba75830 BF |
580 | seq_printf(m, "payload misses: %u\n", nn->payload_misses); |
581 | seq_printf(m, "longest chain len: %u\n", nn->longest_chain); | |
582 | seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize); | |
a2f999a3 JL |
583 | return 0; |
584 | } | |
585 | ||
586 | int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file) | |
587 | { | |
3ba75830 BF |
588 | struct nfsd_net *nn = net_generic(file_inode(file)->i_sb->s_fs_info, |
589 | nfsd_net_id); | |
590 | ||
591 | return single_open(file, nfsd_reply_cache_stats_show, nn); | |
a2f999a3 | 592 | } |