2 * Request reply cache. This is currently a global cache, but this may
3 * change in the future and be a per-client cache.
5 * This code is heavily inspired by the 44BSD implementation, although
6 * it does things a bit differently.
8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
11 #include <linux/slab.h>
12 #include <linux/sunrpc/addr.h>
13 #include <linux/highmem.h>
14 #include <net/checksum.h>
19 #define NFSDDBG_FACILITY NFSDDBG_REPCACHE
23 static struct hlist_head
* cache_hash
;
24 static struct list_head lru_head
;
25 static struct kmem_cache
*drc_slab
;
26 static unsigned int num_drc_entries
;
27 static unsigned int max_drc_entries
;
30 * Calculate the hash index from an XID.
32 static inline u32
request_hash(u32 xid
)
36 return h
& (HASHSIZE
-1);
39 static int nfsd_cache_append(struct svc_rqst
*rqstp
, struct kvec
*vec
);
40 static void cache_cleaner_func(struct work_struct
*unused
);
41 static int nfsd_reply_cache_shrink(struct shrinker
*shrink
,
42 struct shrink_control
*sc
);
44 struct shrinker nfsd_reply_cache_shrinker
= {
45 .shrink
= nfsd_reply_cache_shrink
,
50 * locking for the reply cache:
51 * A cache entry is "single use" if c_state == RC_INPROG
52 * Otherwise, it when accessing _prev or _next, the lock must be held.
54 static DEFINE_SPINLOCK(cache_lock
);
55 static DECLARE_DELAYED_WORK(cache_cleaner
, cache_cleaner_func
);
58 * Put a cap on the size of the DRC based on the amount of available
59 * low memory in the machine.
71 * ...with a hard cap of 256k entries. In the worst case, each entry will be
72 * ~1k, so the above numbers should give a rough max of the amount of memory
76 nfsd_cache_size_limit(void)
79 unsigned long low_pages
= totalram_pages
- totalhigh_pages
;
81 limit
= (16 * int_sqrt(low_pages
)) << (PAGE_SHIFT
-10);
82 return min_t(unsigned int, limit
, 256*1024);
85 static struct svc_cacherep
*
86 nfsd_reply_cache_alloc(void)
88 struct svc_cacherep
*rp
;
90 rp
= kmem_cache_alloc(drc_slab
, GFP_KERNEL
);
92 rp
->c_state
= RC_UNUSED
;
93 rp
->c_type
= RC_NOCACHE
;
94 INIT_LIST_HEAD(&rp
->c_lru
);
95 INIT_HLIST_NODE(&rp
->c_hash
);
101 nfsd_reply_cache_free_locked(struct svc_cacherep
*rp
)
103 if (rp
->c_type
== RC_REPLBUFF
)
104 kfree(rp
->c_replvec
.iov_base
);
105 if (!hlist_unhashed(&rp
->c_hash
))
106 hlist_del(&rp
->c_hash
);
107 list_del(&rp
->c_lru
);
109 kmem_cache_free(drc_slab
, rp
);
113 nfsd_reply_cache_free(struct svc_cacherep
*rp
)
115 spin_lock(&cache_lock
);
116 nfsd_reply_cache_free_locked(rp
);
117 spin_unlock(&cache_lock
);
120 int nfsd_reply_cache_init(void)
122 INIT_LIST_HEAD(&lru_head
);
123 max_drc_entries
= nfsd_cache_size_limit();
126 register_shrinker(&nfsd_reply_cache_shrinker
);
127 drc_slab
= kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep
),
132 cache_hash
= kcalloc(HASHSIZE
, sizeof(struct hlist_head
), GFP_KERNEL
);
138 printk(KERN_ERR
"nfsd: failed to allocate reply cache\n");
139 nfsd_reply_cache_shutdown();
143 void nfsd_reply_cache_shutdown(void)
145 struct svc_cacherep
*rp
;
147 unregister_shrinker(&nfsd_reply_cache_shrinker
);
148 cancel_delayed_work_sync(&cache_cleaner
);
150 while (!list_empty(&lru_head
)) {
151 rp
= list_entry(lru_head
.next
, struct svc_cacherep
, c_lru
);
152 nfsd_reply_cache_free_locked(rp
);
159 kmem_cache_destroy(drc_slab
);
165 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
166 * not already scheduled.
169 lru_put_end(struct svc_cacherep
*rp
)
171 rp
->c_timestamp
= jiffies
;
172 list_move_tail(&rp
->c_lru
, &lru_head
);
173 schedule_delayed_work(&cache_cleaner
, RC_EXPIRE
);
177 * Move a cache entry from one hash list to another
180 hash_refile(struct svc_cacherep
*rp
)
182 hlist_del_init(&rp
->c_hash
);
183 hlist_add_head(&rp
->c_hash
, cache_hash
+ request_hash(rp
->c_xid
));
187 nfsd_cache_entry_expired(struct svc_cacherep
*rp
)
189 return rp
->c_state
!= RC_INPROG
&&
190 time_after(jiffies
, rp
->c_timestamp
+ RC_EXPIRE
);
194 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
195 * Also prune the oldest ones when the total exceeds the max number of entries.
198 prune_cache_entries(void)
200 struct svc_cacherep
*rp
, *tmp
;
202 list_for_each_entry_safe(rp
, tmp
, &lru_head
, c_lru
) {
203 if (!nfsd_cache_entry_expired(rp
) &&
204 num_drc_entries
<= max_drc_entries
)
206 nfsd_reply_cache_free_locked(rp
);
210 * Conditionally rearm the job. If we cleaned out the list, then
211 * cancel any pending run (since there won't be any work to do).
212 * Otherwise, we rearm the job or modify the existing one to run in
213 * RC_EXPIRE since we just ran the pruner.
215 if (list_empty(&lru_head
))
216 cancel_delayed_work(&cache_cleaner
);
218 mod_delayed_work(system_wq
, &cache_cleaner
, RC_EXPIRE
);
222 cache_cleaner_func(struct work_struct
*unused
)
224 spin_lock(&cache_lock
);
225 prune_cache_entries();
226 spin_unlock(&cache_lock
);
230 nfsd_reply_cache_shrink(struct shrinker
*shrink
, struct shrink_control
*sc
)
234 spin_lock(&cache_lock
);
236 prune_cache_entries();
237 num
= num_drc_entries
;
238 spin_unlock(&cache_lock
);
244 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
247 nfsd_cache_csum(struct svc_rqst
*rqstp
)
252 struct xdr_buf
*buf
= &rqstp
->rq_arg
;
253 const unsigned char *p
= buf
->head
[0].iov_base
;
254 size_t csum_len
= min_t(size_t, buf
->head
[0].iov_len
+ buf
->page_len
,
256 size_t len
= min(buf
->head
[0].iov_len
, csum_len
);
258 /* rq_arg.head first */
259 csum
= csum_partial(p
, len
, 0);
262 /* Continue into page array */
263 idx
= buf
->page_base
/ PAGE_SIZE
;
264 base
= buf
->page_base
& ~PAGE_MASK
;
266 p
= page_address(buf
->pages
[idx
]) + base
;
267 len
= min_t(size_t, PAGE_SIZE
- base
, csum_len
);
268 csum
= csum_partial(p
, len
, csum
);
277 * Search the request hash for an entry that matches the given rqstp.
278 * Must be called with cache_lock held. Returns the found entry or
281 static struct svc_cacherep
*
282 nfsd_cache_search(struct svc_rqst
*rqstp
, __wsum csum
)
284 struct svc_cacherep
*rp
;
285 struct hlist_head
*rh
;
286 __be32 xid
= rqstp
->rq_xid
;
287 u32 proto
= rqstp
->rq_prot
,
288 vers
= rqstp
->rq_vers
,
289 proc
= rqstp
->rq_proc
;
291 rh
= &cache_hash
[request_hash(xid
)];
292 hlist_for_each_entry(rp
, rh
, c_hash
) {
293 if (xid
== rp
->c_xid
&& proc
== rp
->c_proc
&&
294 proto
== rp
->c_prot
&& vers
== rp
->c_vers
&&
295 rqstp
->rq_arg
.len
== rp
->c_len
&& csum
== rp
->c_csum
&&
296 rpc_cmp_addr(svc_addr(rqstp
), (struct sockaddr
*)&rp
->c_addr
) &&
297 rpc_get_port(svc_addr(rqstp
)) == rpc_get_port((struct sockaddr
*)&rp
->c_addr
))
304 * Try to find an entry matching the current call in the cache. When none
305 * is found, we try to grab the oldest expired entry off the LRU list. If
306 * a suitable one isn't there, then drop the cache_lock and allocate a
307 * new one, then search again in case one got inserted while this thread
308 * didn't hold the lock.
311 nfsd_cache_lookup(struct svc_rqst
*rqstp
)
313 struct svc_cacherep
*rp
, *found
;
314 __be32 xid
= rqstp
->rq_xid
;
315 u32 proto
= rqstp
->rq_prot
,
316 vers
= rqstp
->rq_vers
,
317 proc
= rqstp
->rq_proc
;
320 int type
= rqstp
->rq_cachetype
;
323 rqstp
->rq_cacherep
= NULL
;
324 if (type
== RC_NOCACHE
) {
325 nfsdstats
.rcnocache
++;
329 csum
= nfsd_cache_csum(rqstp
);
331 spin_lock(&cache_lock
);
334 rp
= nfsd_cache_search(rqstp
, csum
);
338 /* Try to use the first entry on the LRU */
339 if (!list_empty(&lru_head
)) {
340 rp
= list_first_entry(&lru_head
, struct svc_cacherep
, c_lru
);
341 if (nfsd_cache_entry_expired(rp
) ||
342 num_drc_entries
>= max_drc_entries
) {
344 prune_cache_entries();
349 /* Drop the lock and allocate a new entry */
350 spin_unlock(&cache_lock
);
351 rp
= nfsd_reply_cache_alloc();
353 dprintk("nfsd: unable to allocate DRC entry!\n");
356 spin_lock(&cache_lock
);
360 * Must search again just in case someone inserted one
361 * after we dropped the lock above.
363 found
= nfsd_cache_search(rqstp
, csum
);
365 nfsd_reply_cache_free_locked(rp
);
371 * We're keeping the one we just allocated. Are we now over the
372 * limit? Prune one off the tip of the LRU in trade for the one we
373 * just allocated if so.
375 if (num_drc_entries
>= max_drc_entries
)
376 nfsd_reply_cache_free_locked(list_first_entry(&lru_head
,
377 struct svc_cacherep
, c_lru
));
380 nfsdstats
.rcmisses
++;
381 rqstp
->rq_cacherep
= rp
;
382 rp
->c_state
= RC_INPROG
;
385 rpc_copy_addr((struct sockaddr
*)&rp
->c_addr
, svc_addr(rqstp
));
386 rpc_set_port((struct sockaddr
*)&rp
->c_addr
, rpc_get_port(svc_addr(rqstp
)));
389 rp
->c_len
= rqstp
->rq_arg
.len
;
395 /* release any buffer */
396 if (rp
->c_type
== RC_REPLBUFF
) {
397 kfree(rp
->c_replvec
.iov_base
);
398 rp
->c_replvec
.iov_base
= NULL
;
400 rp
->c_type
= RC_NOCACHE
;
402 spin_unlock(&cache_lock
);
407 /* We found a matching entry which is either in progress or done. */
408 age
= jiffies
- rp
->c_timestamp
;
412 /* Request being processed or excessive rexmits */
413 if (rp
->c_state
== RC_INPROG
|| age
< RC_DELAY
)
416 /* From the hall of fame of impractical attacks:
417 * Is this a user who tries to snoop on the cache? */
419 if (!rqstp
->rq_secure
&& rp
->c_secure
)
422 /* Compose RPC reply header */
423 switch (rp
->c_type
) {
427 svc_putu32(&rqstp
->rq_res
.head
[0], rp
->c_replstat
);
431 if (!nfsd_cache_append(rqstp
, &rp
->c_replvec
))
432 goto out
; /* should not happen */
436 printk(KERN_WARNING
"nfsd: bad repcache type %d\n", rp
->c_type
);
437 nfsd_reply_cache_free_locked(rp
);
444 * Update a cache entry. This is called from nfsd_dispatch when
445 * the procedure has been executed and the complete reply is in
448 * We're copying around data here rather than swapping buffers because
449 * the toplevel loop requires max-sized buffers, which would be a waste
450 * of memory for a cache with a max reply size of 100 bytes (diropokres).
452 * If we should start to use different types of cache entries tailored
453 * specifically for attrstat and fh's, we may save even more space.
455 * Also note that a cachetype of RC_NOCACHE can legally be passed when
456 * nfsd failed to encode a reply that otherwise would have been cached.
457 * In this case, nfsd_cache_update is called with statp == NULL.
460 nfsd_cache_update(struct svc_rqst
*rqstp
, int cachetype
, __be32
*statp
)
462 struct svc_cacherep
*rp
= rqstp
->rq_cacherep
;
463 struct kvec
*resv
= &rqstp
->rq_res
.head
[0], *cachv
;
469 len
= resv
->iov_len
- ((char*)statp
- (char*)resv
->iov_base
);
472 /* Don't cache excessive amounts of data and XDR failures */
473 if (!statp
|| len
> (256 >> 2)) {
474 nfsd_reply_cache_free(rp
);
481 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len
);
482 rp
->c_replstat
= *statp
;
485 cachv
= &rp
->c_replvec
;
486 cachv
->iov_base
= kmalloc(len
<< 2, GFP_KERNEL
);
487 if (!cachv
->iov_base
) {
488 nfsd_reply_cache_free(rp
);
491 cachv
->iov_len
= len
<< 2;
492 memcpy(cachv
->iov_base
, statp
, len
<< 2);
495 nfsd_reply_cache_free(rp
);
498 spin_lock(&cache_lock
);
500 rp
->c_secure
= rqstp
->rq_secure
;
501 rp
->c_type
= cachetype
;
502 rp
->c_state
= RC_DONE
;
503 spin_unlock(&cache_lock
);
508 * Copy cached reply to current reply buffer. Should always fit.
509 * FIXME as reply is in a page, we should just attach the page, and
510 * keep a refcount....
513 nfsd_cache_append(struct svc_rqst
*rqstp
, struct kvec
*data
)
515 struct kvec
*vec
= &rqstp
->rq_res
.head
[0];
517 if (vec
->iov_len
+ data
->iov_len
> PAGE_SIZE
) {
518 printk(KERN_WARNING
"nfsd: cached reply too large (%Zd).\n",
522 memcpy((char*)vec
->iov_base
+ vec
->iov_len
, data
->iov_base
, data
->iov_len
);
523 vec
->iov_len
+= data
->iov_len
;