]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Request reply cache. This is currently a global cache, but this may |
3 | * change in the future and be a per-client cache. | |
4 | * | |
5 | * This code is heavily inspired by the 44BSD implementation, although | |
6 | * it does things a bit differently. | |
7 | * | |
8 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> | |
9 | */ | |
10 | ||
5a0e3ad6 | 11 | #include <linux/slab.h> |
8f97514b | 12 | #include <linux/vmalloc.h> |
5976687a | 13 | #include <linux/sunrpc/addr.h> |
0338dd15 | 14 | #include <linux/highmem.h> |
0733c7ba JL |
15 | #include <linux/log2.h> |
16 | #include <linux/hash.h> | |
01a7decf | 17 | #include <net/checksum.h> |
5a0e3ad6 | 18 | |
9a74af21 BH |
19 | #include "nfsd.h" |
20 | #include "cache.h" | |
1da177e4 | 21 | |
0338dd15 JL |
22 | #define NFSDDBG_FACILITY NFSDDBG_REPCACHE |
23 | ||
0733c7ba JL |
24 | /* |
25 | * We use this value to determine the number of hash buckets from the max | |
26 | * cache size, the idea being that when the cache is at its maximum number | |
27 | * of entries, then this should be the average number of entries per bucket. | |
28 | */ | |
29 | #define TARGET_BUCKET_SIZE 64 | |
1da177e4 | 30 | |
7142b98d | 31 | struct nfsd_drc_bucket { |
bedd4b61 | 32 | struct list_head lru_head; |
89a26b3d | 33 | spinlock_t cache_lock; |
7142b98d TM |
34 | }; |
35 | ||
36 | static struct nfsd_drc_bucket *drc_hashtbl; | |
8a8bc40d | 37 | static struct kmem_cache *drc_slab; |
9dc56143 JL |
38 | |
39 | /* max number of entries allowed in the cache */ | |
0338dd15 | 40 | static unsigned int max_drc_entries; |
1da177e4 | 41 | |
0733c7ba JL |
42 | /* number of significant bits in the hash value */ |
43 | static unsigned int maskbits; | |
bedd4b61 | 44 | static unsigned int drc_hashsize; |
0733c7ba | 45 | |
9dc56143 JL |
46 | /* |
47 | * Stats and other tracking of on the duplicate reply cache. All of these and | |
48 | * the "rc" fields in nfsdstats are protected by the cache_lock | |
49 | */ | |
50 | ||
51 | /* total number of entries */ | |
31e60f52 | 52 | static atomic_t num_drc_entries; |
9dc56143 JL |
53 | |
54 | /* cache misses due only to checksum comparison failures */ | |
55 | static unsigned int payload_misses; | |
56 | ||
6c6910cd JL |
57 | /* amount of memory (in bytes) currently consumed by the DRC */ |
58 | static unsigned int drc_mem_usage; | |
59 | ||
98d821bd JL |
60 | /* longest hash chain seen */ |
61 | static unsigned int longest_chain; | |
62 | ||
63 | /* size of cache when we saw the longest hash chain */ | |
64 | static unsigned int longest_chain_cachesize; | |
65 | ||
1da177e4 | 66 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); |
1ab6c499 DC |
67 | static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, |
68 | struct shrink_control *sc); | |
69 | static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, | |
70 | struct shrink_control *sc); | |
b4e7f2c9 | 71 | |
c8c797f9 | 72 | static struct shrinker nfsd_reply_cache_shrinker = { |
1ab6c499 DC |
73 | .scan_objects = nfsd_reply_cache_scan, |
74 | .count_objects = nfsd_reply_cache_count, | |
b4e7f2c9 JL |
75 | .seeks = 1, |
76 | }; | |
1da177e4 | 77 | |
0338dd15 JL |
78 | /* |
79 | * Put a cap on the size of the DRC based on the amount of available | |
80 | * low memory in the machine. | |
81 | * | |
82 | * 64MB: 8192 | |
83 | * 128MB: 11585 | |
84 | * 256MB: 16384 | |
85 | * 512MB: 23170 | |
86 | * 1GB: 32768 | |
87 | * 2GB: 46340 | |
88 | * 4GB: 65536 | |
89 | * 8GB: 92681 | |
90 | * 16GB: 131072 | |
91 | * | |
92 | * ...with a hard cap of 256k entries. In the worst case, each entry will be | |
93 | * ~1k, so the above numbers should give a rough max of the amount of memory | |
94 | * used in k. | |
95 | */ | |
96 | static unsigned int | |
97 | nfsd_cache_size_limit(void) | |
98 | { | |
99 | unsigned int limit; | |
100 | unsigned long low_pages = totalram_pages - totalhigh_pages; | |
101 | ||
102 | limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); | |
103 | return min_t(unsigned int, limit, 256*1024); | |
104 | } | |
105 | ||
0733c7ba JL |
106 | /* |
107 | * Compute the number of hash buckets we need. Divide the max cachesize by | |
108 | * the "target" max bucket size, and round up to next power of two. | |
109 | */ | |
110 | static unsigned int | |
111 | nfsd_hashsize(unsigned int limit) | |
112 | { | |
113 | return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE); | |
114 | } | |
115 | ||
7142b98d TM |
116 | static u32 |
117 | nfsd_cache_hash(__be32 xid) | |
118 | { | |
119 | return hash_32(be32_to_cpu(xid), maskbits); | |
120 | } | |
121 | ||
f09841fd JL |
122 | static struct svc_cacherep * |
123 | nfsd_reply_cache_alloc(void) | |
1da177e4 LT |
124 | { |
125 | struct svc_cacherep *rp; | |
1da177e4 | 126 | |
f09841fd JL |
127 | rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); |
128 | if (rp) { | |
1da177e4 LT |
129 | rp->c_state = RC_UNUSED; |
130 | rp->c_type = RC_NOCACHE; | |
f09841fd | 131 | INIT_LIST_HEAD(&rp->c_lru); |
1da177e4 | 132 | } |
f09841fd JL |
133 | return rp; |
134 | } | |
1da177e4 | 135 | |
f09841fd JL |
136 | static void |
137 | nfsd_reply_cache_free_locked(struct svc_cacherep *rp) | |
138 | { | |
6c6910cd JL |
139 | if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { |
140 | drc_mem_usage -= rp->c_replvec.iov_len; | |
f09841fd | 141 | kfree(rp->c_replvec.iov_base); |
6c6910cd | 142 | } |
f09841fd | 143 | list_del(&rp->c_lru); |
31e60f52 | 144 | atomic_dec(&num_drc_entries); |
6c6910cd | 145 | drc_mem_usage -= sizeof(*rp); |
f09841fd JL |
146 | kmem_cache_free(drc_slab, rp); |
147 | } | |
148 | ||
2c6b691c | 149 | static void |
89a26b3d | 150 | nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp) |
2c6b691c | 151 | { |
89a26b3d | 152 | spin_lock(&b->cache_lock); |
2c6b691c | 153 | nfsd_reply_cache_free_locked(rp); |
89a26b3d | 154 | spin_unlock(&b->cache_lock); |
2c6b691c JL |
155 | } |
156 | ||
f09841fd JL |
157 | int nfsd_reply_cache_init(void) |
158 | { | |
0733c7ba | 159 | unsigned int hashsize; |
bedd4b61 | 160 | unsigned int i; |
a68465c9 | 161 | int status = 0; |
0733c7ba | 162 | |
ac534ff2 | 163 | max_drc_entries = nfsd_cache_size_limit(); |
31e60f52 | 164 | atomic_set(&num_drc_entries, 0); |
0733c7ba JL |
165 | hashsize = nfsd_hashsize(max_drc_entries); |
166 | maskbits = ilog2(hashsize); | |
ac534ff2 | 167 | |
a68465c9 KM |
168 | status = register_shrinker(&nfsd_reply_cache_shrinker); |
169 | if (status) | |
170 | return status; | |
171 | ||
8a8bc40d JL |
172 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), |
173 | 0, 0, NULL); | |
174 | if (!drc_slab) | |
175 | goto out_nomem; | |
176 | ||
7142b98d | 177 | drc_hashtbl = kcalloc(hashsize, sizeof(*drc_hashtbl), GFP_KERNEL); |
8f97514b JL |
178 | if (!drc_hashtbl) { |
179 | drc_hashtbl = vzalloc(hashsize * sizeof(*drc_hashtbl)); | |
180 | if (!drc_hashtbl) | |
181 | goto out_nomem; | |
182 | } | |
183 | ||
89a26b3d | 184 | for (i = 0; i < hashsize; i++) { |
bedd4b61 | 185 | INIT_LIST_HEAD(&drc_hashtbl[i].lru_head); |
89a26b3d TM |
186 | spin_lock_init(&drc_hashtbl[i].cache_lock); |
187 | } | |
bedd4b61 | 188 | drc_hashsize = hashsize; |
1da177e4 | 189 | |
d5c3428b BF |
190 | return 0; |
191 | out_nomem: | |
192 | printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); | |
193 | nfsd_reply_cache_shutdown(); | |
194 | return -ENOMEM; | |
1da177e4 LT |
195 | } |
196 | ||
d5c3428b | 197 | void nfsd_reply_cache_shutdown(void) |
1da177e4 LT |
198 | { |
199 | struct svc_cacherep *rp; | |
bedd4b61 | 200 | unsigned int i; |
1da177e4 | 201 | |
b4e7f2c9 | 202 | unregister_shrinker(&nfsd_reply_cache_shrinker); |
aca8a23d | 203 | |
bedd4b61 TM |
204 | for (i = 0; i < drc_hashsize; i++) { |
205 | struct list_head *head = &drc_hashtbl[i].lru_head; | |
206 | while (!list_empty(head)) { | |
207 | rp = list_first_entry(head, struct svc_cacherep, c_lru); | |
208 | nfsd_reply_cache_free_locked(rp); | |
209 | } | |
1da177e4 LT |
210 | } |
211 | ||
8f97514b | 212 | kvfree(drc_hashtbl); |
7142b98d | 213 | drc_hashtbl = NULL; |
bedd4b61 | 214 | drc_hashsize = 0; |
8a8bc40d | 215 | |
e79017dd JL |
216 | kmem_cache_destroy(drc_slab); |
217 | drc_slab = NULL; | |
1da177e4 LT |
218 | } |
219 | ||
220 | /* | |
aca8a23d JL |
221 | * Move cache entry to end of LRU list, and queue the cleaner to run if it's |
222 | * not already scheduled. | |
1da177e4 LT |
223 | */ |
224 | static void | |
bedd4b61 | 225 | lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp) |
1da177e4 | 226 | { |
56c2548b | 227 | rp->c_timestamp = jiffies; |
bedd4b61 | 228 | list_move_tail(&rp->c_lru, &b->lru_head); |
1da177e4 LT |
229 | } |
230 | ||
1ab6c499 | 231 | static long |
bedd4b61 | 232 | prune_bucket(struct nfsd_drc_bucket *b) |
aca8a23d JL |
233 | { |
234 | struct svc_cacherep *rp, *tmp; | |
1ab6c499 | 235 | long freed = 0; |
aca8a23d | 236 | |
bedd4b61 | 237 | list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) { |
1b19453d JL |
238 | /* |
239 | * Don't free entries attached to calls that are still | |
240 | * in-progress, but do keep scanning the list. | |
241 | */ | |
242 | if (rp->c_state == RC_INPROG) | |
243 | continue; | |
31e60f52 | 244 | if (atomic_read(&num_drc_entries) <= max_drc_entries && |
1b19453d | 245 | time_before(jiffies, rp->c_timestamp + RC_EXPIRE)) |
aca8a23d JL |
246 | break; |
247 | nfsd_reply_cache_free_locked(rp); | |
1ab6c499 | 248 | freed++; |
aca8a23d | 249 | } |
bedd4b61 TM |
250 | return freed; |
251 | } | |
252 | ||
253 | /* | |
254 | * Walk the LRU list and prune off entries that are older than RC_EXPIRE. | |
255 | * Also prune the oldest ones when the total exceeds the max number of entries. | |
256 | */ | |
257 | static long | |
258 | prune_cache_entries(void) | |
259 | { | |
260 | unsigned int i; | |
261 | long freed = 0; | |
bedd4b61 TM |
262 | |
263 | for (i = 0; i < drc_hashsize; i++) { | |
264 | struct nfsd_drc_bucket *b = &drc_hashtbl[i]; | |
265 | ||
89a26b3d TM |
266 | if (list_empty(&b->lru_head)) |
267 | continue; | |
268 | spin_lock(&b->cache_lock); | |
bedd4b61 | 269 | freed += prune_bucket(b); |
89a26b3d | 270 | spin_unlock(&b->cache_lock); |
bedd4b61 | 271 | } |
1ab6c499 | 272 | return freed; |
aca8a23d JL |
273 | } |
274 | ||
1ab6c499 DC |
275 | static unsigned long |
276 | nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) | |
b4e7f2c9 | 277 | { |
31e60f52 | 278 | return atomic_read(&num_drc_entries); |
b4e7f2c9 JL |
279 | } |
280 | ||
1ab6c499 DC |
281 | static unsigned long |
282 | nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) | |
283 | { | |
89a26b3d | 284 | return prune_cache_entries(); |
1ab6c499 | 285 | } |
01a7decf JL |
286 | /* |
287 | * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes | |
288 | */ | |
289 | static __wsum | |
290 | nfsd_cache_csum(struct svc_rqst *rqstp) | |
291 | { | |
292 | int idx; | |
293 | unsigned int base; | |
294 | __wsum csum; | |
295 | struct xdr_buf *buf = &rqstp->rq_arg; | |
296 | const unsigned char *p = buf->head[0].iov_base; | |
297 | size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, | |
298 | RC_CSUMLEN); | |
299 | size_t len = min(buf->head[0].iov_len, csum_len); | |
300 | ||
301 | /* rq_arg.head first */ | |
302 | csum = csum_partial(p, len, 0); | |
303 | csum_len -= len; | |
304 | ||
305 | /* Continue into page array */ | |
306 | idx = buf->page_base / PAGE_SIZE; | |
307 | base = buf->page_base & ~PAGE_MASK; | |
308 | while (csum_len) { | |
309 | p = page_address(buf->pages[idx]) + base; | |
56edc86b | 310 | len = min_t(size_t, PAGE_SIZE - base, csum_len); |
01a7decf JL |
311 | csum = csum_partial(p, len, csum); |
312 | csum_len -= len; | |
313 | base = 0; | |
314 | ++idx; | |
315 | } | |
316 | return csum; | |
317 | } | |
318 | ||
9dc56143 JL |
319 | static bool |
320 | nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp) | |
321 | { | |
ef9b16dc TM |
322 | /* Check RPC XID first */ |
323 | if (rqstp->rq_xid != rp->c_xid) | |
9dc56143 | 324 | return false; |
9dc56143 JL |
325 | /* compare checksum of NFS data */ |
326 | if (csum != rp->c_csum) { | |
327 | ++payload_misses; | |
328 | return false; | |
329 | } | |
330 | ||
ef9b16dc TM |
331 | /* Other discriminators */ |
332 | if (rqstp->rq_proc != rp->c_proc || | |
333 | rqstp->rq_prot != rp->c_prot || | |
334 | rqstp->rq_vers != rp->c_vers || | |
335 | rqstp->rq_arg.len != rp->c_len || | |
336 | !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) || | |
337 | rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr)) | |
338 | return false; | |
339 | ||
9dc56143 JL |
340 | return true; |
341 | } | |
342 | ||
a4a3ec32 JL |
343 | /* |
344 | * Search the request hash for an entry that matches the given rqstp. | |
345 | * Must be called with cache_lock held. Returns the found entry or | |
346 | * NULL on failure. | |
347 | */ | |
348 | static struct svc_cacherep * | |
7142b98d TM |
349 | nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp, |
350 | __wsum csum) | |
a4a3ec32 | 351 | { |
98d821bd | 352 | struct svc_cacherep *rp, *ret = NULL; |
11acf6ef | 353 | struct list_head *rh = &b->lru_head; |
98d821bd | 354 | unsigned int entries = 0; |
a4a3ec32 | 355 | |
11acf6ef | 356 | list_for_each_entry(rp, rh, c_lru) { |
98d821bd JL |
357 | ++entries; |
358 | if (nfsd_cache_match(rqstp, csum, rp)) { | |
359 | ret = rp; | |
360 | break; | |
361 | } | |
362 | } | |
363 | ||
364 | /* tally hash chain length stats */ | |
365 | if (entries > longest_chain) { | |
366 | longest_chain = entries; | |
31e60f52 | 367 | longest_chain_cachesize = atomic_read(&num_drc_entries); |
98d821bd JL |
368 | } else if (entries == longest_chain) { |
369 | /* prefer to keep the smallest cachesize possible here */ | |
31e60f52 TM |
370 | longest_chain_cachesize = min_t(unsigned int, |
371 | longest_chain_cachesize, | |
372 | atomic_read(&num_drc_entries)); | |
a4a3ec32 | 373 | } |
98d821bd JL |
374 | |
375 | return ret; | |
a4a3ec32 JL |
376 | } |
377 | ||
1da177e4 LT |
378 | /* |
379 | * Try to find an entry matching the current call in the cache. When none | |
1ac83629 JL |
380 | * is found, we try to grab the oldest expired entry off the LRU list. If |
381 | * a suitable one isn't there, then drop the cache_lock and allocate a | |
382 | * new one, then search again in case one got inserted while this thread | |
383 | * didn't hold the lock. | |
1da177e4 LT |
384 | */ |
385 | int | |
1091006c | 386 | nfsd_cache_lookup(struct svc_rqst *rqstp) |
1da177e4 | 387 | { |
0338dd15 | 388 | struct svc_cacherep *rp, *found; |
c7afef1f AV |
389 | __be32 xid = rqstp->rq_xid; |
390 | u32 proto = rqstp->rq_prot, | |
1da177e4 LT |
391 | vers = rqstp->rq_vers, |
392 | proc = rqstp->rq_proc; | |
01a7decf | 393 | __wsum csum; |
7142b98d TM |
394 | u32 hash = nfsd_cache_hash(xid); |
395 | struct nfsd_drc_bucket *b = &drc_hashtbl[hash]; | |
1da177e4 | 396 | unsigned long age; |
1091006c | 397 | int type = rqstp->rq_cachetype; |
0b9ea37f | 398 | int rtn = RC_DOIT; |
1da177e4 LT |
399 | |
400 | rqstp->rq_cacherep = NULL; | |
13cc8a78 | 401 | if (type == RC_NOCACHE) { |
1da177e4 | 402 | nfsdstats.rcnocache++; |
0b9ea37f | 403 | return rtn; |
1da177e4 LT |
404 | } |
405 | ||
01a7decf JL |
406 | csum = nfsd_cache_csum(rqstp); |
407 | ||
0b9ea37f JL |
408 | /* |
409 | * Since the common case is a cache miss followed by an insert, | |
a0ef5e19 | 410 | * preallocate an entry. |
0b9ea37f | 411 | */ |
0338dd15 | 412 | rp = nfsd_reply_cache_alloc(); |
89a26b3d | 413 | spin_lock(&b->cache_lock); |
6c6910cd | 414 | if (likely(rp)) { |
31e60f52 | 415 | atomic_inc(&num_drc_entries); |
6c6910cd JL |
416 | drc_mem_usage += sizeof(*rp); |
417 | } | |
0338dd15 | 418 | |
a0ef5e19 | 419 | /* go ahead and prune the cache */ |
89a26b3d | 420 | prune_bucket(b); |
a0ef5e19 | 421 | |
7142b98d | 422 | found = nfsd_cache_search(b, rqstp, csum); |
0338dd15 | 423 | if (found) { |
0b9ea37f JL |
424 | if (likely(rp)) |
425 | nfsd_reply_cache_free_locked(rp); | |
0338dd15 JL |
426 | rp = found; |
427 | goto found_entry; | |
1da177e4 LT |
428 | } |
429 | ||
0b9ea37f JL |
430 | if (!rp) { |
431 | dprintk("nfsd: unable to allocate DRC entry!\n"); | |
432 | goto out; | |
433 | } | |
434 | ||
0338dd15 | 435 | nfsdstats.rcmisses++; |
1da177e4 LT |
436 | rqstp->rq_cacherep = rp; |
437 | rp->c_state = RC_INPROG; | |
438 | rp->c_xid = xid; | |
439 | rp->c_proc = proc; | |
7b9e8522 JL |
440 | rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp)); |
441 | rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp))); | |
1da177e4 LT |
442 | rp->c_prot = proto; |
443 | rp->c_vers = vers; | |
01a7decf JL |
444 | rp->c_len = rqstp->rq_arg.len; |
445 | rp->c_csum = csum; | |
1da177e4 | 446 | |
bedd4b61 | 447 | lru_put_end(b, rp); |
1da177e4 LT |
448 | |
449 | /* release any buffer */ | |
450 | if (rp->c_type == RC_REPLBUFF) { | |
6c6910cd | 451 | drc_mem_usage -= rp->c_replvec.iov_len; |
1da177e4 LT |
452 | kfree(rp->c_replvec.iov_base); |
453 | rp->c_replvec.iov_base = NULL; | |
454 | } | |
455 | rp->c_type = RC_NOCACHE; | |
456 | out: | |
89a26b3d | 457 | spin_unlock(&b->cache_lock); |
1da177e4 LT |
458 | return rtn; |
459 | ||
460 | found_entry: | |
0338dd15 | 461 | nfsdstats.rchits++; |
1da177e4 LT |
462 | /* We found a matching entry which is either in progress or done. */ |
463 | age = jiffies - rp->c_timestamp; | |
bedd4b61 | 464 | lru_put_end(b, rp); |
1da177e4 LT |
465 | |
466 | rtn = RC_DROPIT; | |
467 | /* Request being processed or excessive rexmits */ | |
468 | if (rp->c_state == RC_INPROG || age < RC_DELAY) | |
469 | goto out; | |
470 | ||
471 | /* From the hall of fame of impractical attacks: | |
472 | * Is this a user who tries to snoop on the cache? */ | |
473 | rtn = RC_DOIT; | |
4d152e2c | 474 | if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) |
1da177e4 LT |
475 | goto out; |
476 | ||
477 | /* Compose RPC reply header */ | |
478 | switch (rp->c_type) { | |
479 | case RC_NOCACHE: | |
480 | break; | |
481 | case RC_REPLSTAT: | |
482 | svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); | |
483 | rtn = RC_REPLY; | |
484 | break; | |
485 | case RC_REPLBUFF: | |
486 | if (!nfsd_cache_append(rqstp, &rp->c_replvec)) | |
487 | goto out; /* should not happen */ | |
488 | rtn = RC_REPLY; | |
489 | break; | |
490 | default: | |
491 | printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); | |
0338dd15 | 492 | nfsd_reply_cache_free_locked(rp); |
1da177e4 LT |
493 | } |
494 | ||
495 | goto out; | |
496 | } | |
497 | ||
498 | /* | |
499 | * Update a cache entry. This is called from nfsd_dispatch when | |
500 | * the procedure has been executed and the complete reply is in | |
501 | * rqstp->rq_res. | |
502 | * | |
503 | * We're copying around data here rather than swapping buffers because | |
504 | * the toplevel loop requires max-sized buffers, which would be a waste | |
505 | * of memory for a cache with a max reply size of 100 bytes (diropokres). | |
506 | * | |
507 | * If we should start to use different types of cache entries tailored | |
508 | * specifically for attrstat and fh's, we may save even more space. | |
509 | * | |
510 | * Also note that a cachetype of RC_NOCACHE can legally be passed when | |
511 | * nfsd failed to encode a reply that otherwise would have been cached. | |
512 | * In this case, nfsd_cache_update is called with statp == NULL. | |
513 | */ | |
514 | void | |
c7afef1f | 515 | nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) |
1da177e4 | 516 | { |
13cc8a78 | 517 | struct svc_cacherep *rp = rqstp->rq_cacherep; |
1da177e4 | 518 | struct kvec *resv = &rqstp->rq_res.head[0], *cachv; |
bedd4b61 TM |
519 | u32 hash; |
520 | struct nfsd_drc_bucket *b; | |
1da177e4 | 521 | int len; |
6c6910cd | 522 | size_t bufsize = 0; |
1da177e4 | 523 | |
13cc8a78 | 524 | if (!rp) |
1da177e4 LT |
525 | return; |
526 | ||
bedd4b61 TM |
527 | hash = nfsd_cache_hash(rp->c_xid); |
528 | b = &drc_hashtbl[hash]; | |
529 | ||
1da177e4 LT |
530 | len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); |
531 | len >>= 2; | |
fca4217c | 532 | |
1da177e4 LT |
533 | /* Don't cache excessive amounts of data and XDR failures */ |
534 | if (!statp || len > (256 >> 2)) { | |
89a26b3d | 535 | nfsd_reply_cache_free(b, rp); |
1da177e4 LT |
536 | return; |
537 | } | |
538 | ||
539 | switch (cachetype) { | |
540 | case RC_REPLSTAT: | |
541 | if (len != 1) | |
542 | printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); | |
543 | rp->c_replstat = *statp; | |
544 | break; | |
545 | case RC_REPLBUFF: | |
546 | cachv = &rp->c_replvec; | |
6c6910cd JL |
547 | bufsize = len << 2; |
548 | cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); | |
1da177e4 | 549 | if (!cachv->iov_base) { |
89a26b3d | 550 | nfsd_reply_cache_free(b, rp); |
1da177e4 LT |
551 | return; |
552 | } | |
6c6910cd JL |
553 | cachv->iov_len = bufsize; |
554 | memcpy(cachv->iov_base, statp, bufsize); | |
1da177e4 | 555 | break; |
2c6b691c | 556 | case RC_NOCACHE: |
89a26b3d | 557 | nfsd_reply_cache_free(b, rp); |
2c6b691c | 558 | return; |
1da177e4 | 559 | } |
89a26b3d | 560 | spin_lock(&b->cache_lock); |
6c6910cd | 561 | drc_mem_usage += bufsize; |
bedd4b61 | 562 | lru_put_end(b, rp); |
4d152e2c | 563 | rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); |
1da177e4 LT |
564 | rp->c_type = cachetype; |
565 | rp->c_state = RC_DONE; | |
89a26b3d | 566 | spin_unlock(&b->cache_lock); |
1da177e4 LT |
567 | return; |
568 | } | |
569 | ||
570 | /* | |
571 | * Copy cached reply to current reply buffer. Should always fit. | |
572 | * FIXME as reply is in a page, we should just attach the page, and | |
573 | * keep a refcount.... | |
574 | */ | |
575 | static int | |
576 | nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) | |
577 | { | |
578 | struct kvec *vec = &rqstp->rq_res.head[0]; | |
579 | ||
580 | if (vec->iov_len + data->iov_len > PAGE_SIZE) { | |
5b5e0928 | 581 | printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n", |
1da177e4 LT |
582 | data->iov_len); |
583 | return 0; | |
584 | } | |
585 | memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); | |
586 | vec->iov_len += data->iov_len; | |
587 | return 1; | |
588 | } | |
a2f999a3 JL |
589 | |
590 | /* | |
591 | * Note that fields may be added, removed or reordered in the future. Programs | |
592 | * scraping this file for info should test the labels to ensure they're | |
593 | * getting the correct field. | |
594 | */ | |
595 | static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) | |
596 | { | |
a2f999a3 | 597 | seq_printf(m, "max entries: %u\n", max_drc_entries); |
31e60f52 TM |
598 | seq_printf(m, "num entries: %u\n", |
599 | atomic_read(&num_drc_entries)); | |
0733c7ba | 600 | seq_printf(m, "hash buckets: %u\n", 1 << maskbits); |
a2f999a3 JL |
601 | seq_printf(m, "mem usage: %u\n", drc_mem_usage); |
602 | seq_printf(m, "cache hits: %u\n", nfsdstats.rchits); | |
603 | seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses); | |
604 | seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache); | |
605 | seq_printf(m, "payload misses: %u\n", payload_misses); | |
98d821bd JL |
606 | seq_printf(m, "longest chain len: %u\n", longest_chain); |
607 | seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize); | |
a2f999a3 JL |
608 | return 0; |
609 | } | |
610 | ||
611 | int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file) | |
612 | { | |
613 | return single_open(file, nfsd_reply_cache_stats_show, NULL); | |
614 | } |