]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Request reply cache. This is currently a global cache, but this may |
3 | * change in the future and be a per-client cache. | |
4 | * | |
5 | * This code is heavily inspired by the 44BSD implementation, although | |
6 | * it does things a bit differently. | |
7 | * | |
8 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> | |
9 | */ | |
10 | ||
5a0e3ad6 | 11 | #include <linux/slab.h> |
5976687a | 12 | #include <linux/sunrpc/addr.h> |
0338dd15 | 13 | #include <linux/highmem.h> |
01a7decf | 14 | #include <net/checksum.h> |
5a0e3ad6 | 15 | |
9a74af21 BH |
16 | #include "nfsd.h" |
17 | #include "cache.h" | |
1da177e4 | 18 | |
0338dd15 JL |
19 | #define NFSDDBG_FACILITY NFSDDBG_REPCACHE |
20 | ||
1da177e4 | 21 | #define HASHSIZE 64 |
1da177e4 | 22 | |
fca4217c | 23 | static struct hlist_head * cache_hash; |
1da177e4 | 24 | static struct list_head lru_head; |
8a8bc40d | 25 | static struct kmem_cache *drc_slab; |
9dc56143 JL |
26 | |
27 | /* max number of entries allowed in the cache */ | |
0338dd15 | 28 | static unsigned int max_drc_entries; |
1da177e4 | 29 | |
9dc56143 JL |
30 | /* |
31 | * Stats and other tracking of on the duplicate reply cache. All of these and | |
32 | * the "rc" fields in nfsdstats are protected by the cache_lock | |
33 | */ | |
34 | ||
35 | /* total number of entries */ | |
36 | static unsigned int num_drc_entries; | |
37 | ||
38 | /* cache misses due only to checksum comparison failures */ | |
39 | static unsigned int payload_misses; | |
40 | ||
fca4217c GB |
41 | /* |
42 | * Calculate the hash index from an XID. | |
43 | */ | |
44 | static inline u32 request_hash(u32 xid) | |
45 | { | |
46 | u32 h = xid; | |
47 | h ^= (xid >> 24); | |
48 | return h & (HASHSIZE-1); | |
49 | } | |
50 | ||
1da177e4 | 51 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); |
aca8a23d | 52 | static void cache_cleaner_func(struct work_struct *unused); |
b4e7f2c9 JL |
53 | static int nfsd_reply_cache_shrink(struct shrinker *shrink, |
54 | struct shrink_control *sc); | |
55 | ||
56 | struct shrinker nfsd_reply_cache_shrinker = { | |
57 | .shrink = nfsd_reply_cache_shrink, | |
58 | .seeks = 1, | |
59 | }; | |
1da177e4 | 60 | |
fca4217c | 61 | /* |
1da177e4 LT |
62 | * locking for the reply cache: |
63 | * A cache entry is "single use" if c_state == RC_INPROG | |
64 | * Otherwise, it when accessing _prev or _next, the lock must be held. | |
65 | */ | |
66 | static DEFINE_SPINLOCK(cache_lock); | |
aca8a23d | 67 | static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func); |
1da177e4 | 68 | |
0338dd15 JL |
69 | /* |
70 | * Put a cap on the size of the DRC based on the amount of available | |
71 | * low memory in the machine. | |
72 | * | |
73 | * 64MB: 8192 | |
74 | * 128MB: 11585 | |
75 | * 256MB: 16384 | |
76 | * 512MB: 23170 | |
77 | * 1GB: 32768 | |
78 | * 2GB: 46340 | |
79 | * 4GB: 65536 | |
80 | * 8GB: 92681 | |
81 | * 16GB: 131072 | |
82 | * | |
83 | * ...with a hard cap of 256k entries. In the worst case, each entry will be | |
84 | * ~1k, so the above numbers should give a rough max of the amount of memory | |
85 | * used in k. | |
86 | */ | |
87 | static unsigned int | |
88 | nfsd_cache_size_limit(void) | |
89 | { | |
90 | unsigned int limit; | |
91 | unsigned long low_pages = totalram_pages - totalhigh_pages; | |
92 | ||
93 | limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); | |
94 | return min_t(unsigned int, limit, 256*1024); | |
95 | } | |
96 | ||
f09841fd JL |
97 | static struct svc_cacherep * |
98 | nfsd_reply_cache_alloc(void) | |
1da177e4 LT |
99 | { |
100 | struct svc_cacherep *rp; | |
1da177e4 | 101 | |
f09841fd JL |
102 | rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); |
103 | if (rp) { | |
1da177e4 LT |
104 | rp->c_state = RC_UNUSED; |
105 | rp->c_type = RC_NOCACHE; | |
f09841fd | 106 | INIT_LIST_HEAD(&rp->c_lru); |
1da177e4 | 107 | INIT_HLIST_NODE(&rp->c_hash); |
1da177e4 | 108 | } |
f09841fd JL |
109 | return rp; |
110 | } | |
1da177e4 | 111 | |
f09841fd JL |
112 | static void |
113 | nfsd_reply_cache_free_locked(struct svc_cacherep *rp) | |
114 | { | |
25e6b8b0 | 115 | if (rp->c_type == RC_REPLBUFF) |
f09841fd | 116 | kfree(rp->c_replvec.iov_base); |
a517b608 JL |
117 | if (!hlist_unhashed(&rp->c_hash)) |
118 | hlist_del(&rp->c_hash); | |
f09841fd | 119 | list_del(&rp->c_lru); |
0ee0bf7e | 120 | --num_drc_entries; |
f09841fd JL |
121 | kmem_cache_free(drc_slab, rp); |
122 | } | |
123 | ||
2c6b691c JL |
124 | static void |
125 | nfsd_reply_cache_free(struct svc_cacherep *rp) | |
126 | { | |
127 | spin_lock(&cache_lock); | |
128 | nfsd_reply_cache_free_locked(rp); | |
129 | spin_unlock(&cache_lock); | |
130 | } | |
131 | ||
f09841fd JL |
132 | int nfsd_reply_cache_init(void) |
133 | { | |
ac534ff2 JL |
134 | INIT_LIST_HEAD(&lru_head); |
135 | max_drc_entries = nfsd_cache_size_limit(); | |
136 | num_drc_entries = 0; | |
137 | ||
b4e7f2c9 | 138 | register_shrinker(&nfsd_reply_cache_shrinker); |
8a8bc40d JL |
139 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), |
140 | 0, 0, NULL); | |
141 | if (!drc_slab) | |
142 | goto out_nomem; | |
143 | ||
0338dd15 | 144 | cache_hash = kcalloc(HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL); |
fca4217c | 145 | if (!cache_hash) |
d5c3428b | 146 | goto out_nomem; |
1da177e4 | 147 | |
d5c3428b BF |
148 | return 0; |
149 | out_nomem: | |
150 | printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); | |
151 | nfsd_reply_cache_shutdown(); | |
152 | return -ENOMEM; | |
1da177e4 LT |
153 | } |
154 | ||
d5c3428b | 155 | void nfsd_reply_cache_shutdown(void) |
1da177e4 LT |
156 | { |
157 | struct svc_cacherep *rp; | |
158 | ||
b4e7f2c9 | 159 | unregister_shrinker(&nfsd_reply_cache_shrinker); |
aca8a23d JL |
160 | cancel_delayed_work_sync(&cache_cleaner); |
161 | ||
1da177e4 LT |
162 | while (!list_empty(&lru_head)) { |
163 | rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); | |
f09841fd | 164 | nfsd_reply_cache_free_locked(rp); |
1da177e4 LT |
165 | } |
166 | ||
fca4217c GB |
167 | kfree (cache_hash); |
168 | cache_hash = NULL; | |
8a8bc40d JL |
169 | |
170 | if (drc_slab) { | |
171 | kmem_cache_destroy(drc_slab); | |
172 | drc_slab = NULL; | |
173 | } | |
1da177e4 LT |
174 | } |
175 | ||
176 | /* | |
aca8a23d JL |
177 | * Move cache entry to end of LRU list, and queue the cleaner to run if it's |
178 | * not already scheduled. | |
1da177e4 LT |
179 | */ |
180 | static void | |
181 | lru_put_end(struct svc_cacherep *rp) | |
182 | { | |
56c2548b | 183 | rp->c_timestamp = jiffies; |
f116629d | 184 | list_move_tail(&rp->c_lru, &lru_head); |
aca8a23d | 185 | schedule_delayed_work(&cache_cleaner, RC_EXPIRE); |
1da177e4 LT |
186 | } |
187 | ||
188 | /* | |
189 | * Move a cache entry from one hash list to another | |
190 | */ | |
191 | static void | |
192 | hash_refile(struct svc_cacherep *rp) | |
193 | { | |
194 | hlist_del_init(&rp->c_hash); | |
fca4217c | 195 | hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid)); |
1da177e4 LT |
196 | } |
197 | ||
d1a0774d JL |
198 | static inline bool |
199 | nfsd_cache_entry_expired(struct svc_cacherep *rp) | |
200 | { | |
201 | return rp->c_state != RC_INPROG && | |
202 | time_after(jiffies, rp->c_timestamp + RC_EXPIRE); | |
203 | } | |
204 | ||
aca8a23d JL |
205 | /* |
206 | * Walk the LRU list and prune off entries that are older than RC_EXPIRE. | |
207 | * Also prune the oldest ones when the total exceeds the max number of entries. | |
208 | */ | |
209 | static void | |
210 | prune_cache_entries(void) | |
211 | { | |
212 | struct svc_cacherep *rp, *tmp; | |
213 | ||
214 | list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { | |
215 | if (!nfsd_cache_entry_expired(rp) && | |
216 | num_drc_entries <= max_drc_entries) | |
217 | break; | |
218 | nfsd_reply_cache_free_locked(rp); | |
219 | } | |
220 | ||
221 | /* | |
222 | * Conditionally rearm the job. If we cleaned out the list, then | |
223 | * cancel any pending run (since there won't be any work to do). | |
224 | * Otherwise, we rearm the job or modify the existing one to run in | |
225 | * RC_EXPIRE since we just ran the pruner. | |
226 | */ | |
227 | if (list_empty(&lru_head)) | |
228 | cancel_delayed_work(&cache_cleaner); | |
229 | else | |
230 | mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); | |
231 | } | |
232 | ||
233 | static void | |
234 | cache_cleaner_func(struct work_struct *unused) | |
235 | { | |
236 | spin_lock(&cache_lock); | |
237 | prune_cache_entries(); | |
238 | spin_unlock(&cache_lock); | |
239 | } | |
240 | ||
b4e7f2c9 JL |
241 | static int |
242 | nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc) | |
243 | { | |
244 | unsigned int num; | |
245 | ||
246 | spin_lock(&cache_lock); | |
247 | if (sc->nr_to_scan) | |
248 | prune_cache_entries(); | |
249 | num = num_drc_entries; | |
250 | spin_unlock(&cache_lock); | |
251 | ||
252 | return num; | |
253 | } | |
254 | ||
01a7decf JL |
255 | /* |
256 | * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes | |
257 | */ | |
258 | static __wsum | |
259 | nfsd_cache_csum(struct svc_rqst *rqstp) | |
260 | { | |
261 | int idx; | |
262 | unsigned int base; | |
263 | __wsum csum; | |
264 | struct xdr_buf *buf = &rqstp->rq_arg; | |
265 | const unsigned char *p = buf->head[0].iov_base; | |
266 | size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, | |
267 | RC_CSUMLEN); | |
268 | size_t len = min(buf->head[0].iov_len, csum_len); | |
269 | ||
270 | /* rq_arg.head first */ | |
271 | csum = csum_partial(p, len, 0); | |
272 | csum_len -= len; | |
273 | ||
274 | /* Continue into page array */ | |
275 | idx = buf->page_base / PAGE_SIZE; | |
276 | base = buf->page_base & ~PAGE_MASK; | |
277 | while (csum_len) { | |
278 | p = page_address(buf->pages[idx]) + base; | |
56edc86b | 279 | len = min_t(size_t, PAGE_SIZE - base, csum_len); |
01a7decf JL |
280 | csum = csum_partial(p, len, csum); |
281 | csum_len -= len; | |
282 | base = 0; | |
283 | ++idx; | |
284 | } | |
285 | return csum; | |
286 | } | |
287 | ||
9dc56143 JL |
288 | static bool |
289 | nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp) | |
290 | { | |
291 | /* Check RPC header info first */ | |
292 | if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc || | |
293 | rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers || | |
294 | rqstp->rq_arg.len != rp->c_len || | |
295 | !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) || | |
296 | rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr)) | |
297 | return false; | |
298 | ||
299 | /* compare checksum of NFS data */ | |
300 | if (csum != rp->c_csum) { | |
301 | ++payload_misses; | |
302 | return false; | |
303 | } | |
304 | ||
305 | return true; | |
306 | } | |
307 | ||
a4a3ec32 JL |
308 | /* |
309 | * Search the request hash for an entry that matches the given rqstp. | |
310 | * Must be called with cache_lock held. Returns the found entry or | |
311 | * NULL on failure. | |
312 | */ | |
313 | static struct svc_cacherep * | |
01a7decf | 314 | nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum) |
a4a3ec32 JL |
315 | { |
316 | struct svc_cacherep *rp; | |
a4a3ec32 | 317 | struct hlist_head *rh; |
a4a3ec32 | 318 | |
9dc56143 | 319 | rh = &cache_hash[request_hash(rqstp->rq_xid)]; |
b6669737 | 320 | hlist_for_each_entry(rp, rh, c_hash) { |
9dc56143 | 321 | if (nfsd_cache_match(rqstp, csum, rp)) |
a4a3ec32 JL |
322 | return rp; |
323 | } | |
324 | return NULL; | |
325 | } | |
326 | ||
1da177e4 LT |
327 | /* |
328 | * Try to find an entry matching the current call in the cache. When none | |
1ac83629 JL |
329 | * is found, we try to grab the oldest expired entry off the LRU list. If |
330 | * a suitable one isn't there, then drop the cache_lock and allocate a | |
331 | * new one, then search again in case one got inserted while this thread | |
332 | * didn't hold the lock. | |
1da177e4 LT |
333 | */ |
334 | int | |
1091006c | 335 | nfsd_cache_lookup(struct svc_rqst *rqstp) |
1da177e4 | 336 | { |
0338dd15 | 337 | struct svc_cacherep *rp, *found; |
c7afef1f AV |
338 | __be32 xid = rqstp->rq_xid; |
339 | u32 proto = rqstp->rq_prot, | |
1da177e4 LT |
340 | vers = rqstp->rq_vers, |
341 | proc = rqstp->rq_proc; | |
01a7decf | 342 | __wsum csum; |
1da177e4 | 343 | unsigned long age; |
1091006c | 344 | int type = rqstp->rq_cachetype; |
0b9ea37f | 345 | int rtn = RC_DOIT; |
1da177e4 LT |
346 | |
347 | rqstp->rq_cacherep = NULL; | |
13cc8a78 | 348 | if (type == RC_NOCACHE) { |
1da177e4 | 349 | nfsdstats.rcnocache++; |
0b9ea37f | 350 | return rtn; |
1da177e4 LT |
351 | } |
352 | ||
01a7decf JL |
353 | csum = nfsd_cache_csum(rqstp); |
354 | ||
0b9ea37f JL |
355 | /* |
356 | * Since the common case is a cache miss followed by an insert, | |
357 | * preallocate an entry. First, try to reuse the first entry on the LRU | |
358 | * if it works, then go ahead and prune the LRU list. | |
359 | */ | |
1da177e4 | 360 | spin_lock(&cache_lock); |
0338dd15 JL |
361 | if (!list_empty(&lru_head)) { |
362 | rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); | |
363 | if (nfsd_cache_entry_expired(rp) || | |
aca8a23d JL |
364 | num_drc_entries >= max_drc_entries) { |
365 | lru_put_end(rp); | |
366 | prune_cache_entries(); | |
0b9ea37f | 367 | goto search_cache; |
1da177e4 LT |
368 | } |
369 | } | |
1da177e4 | 370 | |
0b9ea37f | 371 | /* No expired ones available, allocate a new one. */ |
0338dd15 JL |
372 | spin_unlock(&cache_lock); |
373 | rp = nfsd_reply_cache_alloc(); | |
0338dd15 | 374 | spin_lock(&cache_lock); |
0b9ea37f JL |
375 | if (likely(rp)) |
376 | ++num_drc_entries; | |
0338dd15 | 377 | |
0b9ea37f | 378 | search_cache: |
01a7decf | 379 | found = nfsd_cache_search(rqstp, csum); |
0338dd15 | 380 | if (found) { |
0b9ea37f JL |
381 | if (likely(rp)) |
382 | nfsd_reply_cache_free_locked(rp); | |
0338dd15 JL |
383 | rp = found; |
384 | goto found_entry; | |
1da177e4 LT |
385 | } |
386 | ||
0b9ea37f JL |
387 | if (!rp) { |
388 | dprintk("nfsd: unable to allocate DRC entry!\n"); | |
389 | goto out; | |
390 | } | |
391 | ||
0338dd15 JL |
392 | /* |
393 | * We're keeping the one we just allocated. Are we now over the | |
394 | * limit? Prune one off the tip of the LRU in trade for the one we | |
395 | * just allocated if so. | |
396 | */ | |
397 | if (num_drc_entries >= max_drc_entries) | |
398 | nfsd_reply_cache_free_locked(list_first_entry(&lru_head, | |
399 | struct svc_cacherep, c_lru)); | |
1da177e4 | 400 | |
0338dd15 | 401 | nfsdstats.rcmisses++; |
1da177e4 LT |
402 | rqstp->rq_cacherep = rp; |
403 | rp->c_state = RC_INPROG; | |
404 | rp->c_xid = xid; | |
405 | rp->c_proc = proc; | |
7b9e8522 JL |
406 | rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp)); |
407 | rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp))); | |
1da177e4 LT |
408 | rp->c_prot = proto; |
409 | rp->c_vers = vers; | |
01a7decf JL |
410 | rp->c_len = rqstp->rq_arg.len; |
411 | rp->c_csum = csum; | |
1da177e4 LT |
412 | |
413 | hash_refile(rp); | |
56c2548b | 414 | lru_put_end(rp); |
1da177e4 LT |
415 | |
416 | /* release any buffer */ | |
417 | if (rp->c_type == RC_REPLBUFF) { | |
418 | kfree(rp->c_replvec.iov_base); | |
419 | rp->c_replvec.iov_base = NULL; | |
420 | } | |
421 | rp->c_type = RC_NOCACHE; | |
422 | out: | |
423 | spin_unlock(&cache_lock); | |
424 | return rtn; | |
425 | ||
426 | found_entry: | |
0338dd15 | 427 | nfsdstats.rchits++; |
1da177e4 LT |
428 | /* We found a matching entry which is either in progress or done. */ |
429 | age = jiffies - rp->c_timestamp; | |
1da177e4 LT |
430 | lru_put_end(rp); |
431 | ||
432 | rtn = RC_DROPIT; | |
433 | /* Request being processed or excessive rexmits */ | |
434 | if (rp->c_state == RC_INPROG || age < RC_DELAY) | |
435 | goto out; | |
436 | ||
437 | /* From the hall of fame of impractical attacks: | |
438 | * Is this a user who tries to snoop on the cache? */ | |
439 | rtn = RC_DOIT; | |
440 | if (!rqstp->rq_secure && rp->c_secure) | |
441 | goto out; | |
442 | ||
443 | /* Compose RPC reply header */ | |
444 | switch (rp->c_type) { | |
445 | case RC_NOCACHE: | |
446 | break; | |
447 | case RC_REPLSTAT: | |
448 | svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); | |
449 | rtn = RC_REPLY; | |
450 | break; | |
451 | case RC_REPLBUFF: | |
452 | if (!nfsd_cache_append(rqstp, &rp->c_replvec)) | |
453 | goto out; /* should not happen */ | |
454 | rtn = RC_REPLY; | |
455 | break; | |
456 | default: | |
457 | printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); | |
0338dd15 | 458 | nfsd_reply_cache_free_locked(rp); |
1da177e4 LT |
459 | } |
460 | ||
461 | goto out; | |
462 | } | |
463 | ||
464 | /* | |
465 | * Update a cache entry. This is called from nfsd_dispatch when | |
466 | * the procedure has been executed and the complete reply is in | |
467 | * rqstp->rq_res. | |
468 | * | |
469 | * We're copying around data here rather than swapping buffers because | |
470 | * the toplevel loop requires max-sized buffers, which would be a waste | |
471 | * of memory for a cache with a max reply size of 100 bytes (diropokres). | |
472 | * | |
473 | * If we should start to use different types of cache entries tailored | |
474 | * specifically for attrstat and fh's, we may save even more space. | |
475 | * | |
476 | * Also note that a cachetype of RC_NOCACHE can legally be passed when | |
477 | * nfsd failed to encode a reply that otherwise would have been cached. | |
478 | * In this case, nfsd_cache_update is called with statp == NULL. | |
479 | */ | |
480 | void | |
c7afef1f | 481 | nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) |
1da177e4 | 482 | { |
13cc8a78 | 483 | struct svc_cacherep *rp = rqstp->rq_cacherep; |
1da177e4 LT |
484 | struct kvec *resv = &rqstp->rq_res.head[0], *cachv; |
485 | int len; | |
486 | ||
13cc8a78 | 487 | if (!rp) |
1da177e4 LT |
488 | return; |
489 | ||
490 | len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); | |
491 | len >>= 2; | |
fca4217c | 492 | |
1da177e4 LT |
493 | /* Don't cache excessive amounts of data and XDR failures */ |
494 | if (!statp || len > (256 >> 2)) { | |
2c6b691c | 495 | nfsd_reply_cache_free(rp); |
1da177e4 LT |
496 | return; |
497 | } | |
498 | ||
499 | switch (cachetype) { | |
500 | case RC_REPLSTAT: | |
501 | if (len != 1) | |
502 | printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); | |
503 | rp->c_replstat = *statp; | |
504 | break; | |
505 | case RC_REPLBUFF: | |
506 | cachv = &rp->c_replvec; | |
507 | cachv->iov_base = kmalloc(len << 2, GFP_KERNEL); | |
508 | if (!cachv->iov_base) { | |
2c6b691c | 509 | nfsd_reply_cache_free(rp); |
1da177e4 LT |
510 | return; |
511 | } | |
512 | cachv->iov_len = len << 2; | |
513 | memcpy(cachv->iov_base, statp, len << 2); | |
514 | break; | |
2c6b691c JL |
515 | case RC_NOCACHE: |
516 | nfsd_reply_cache_free(rp); | |
517 | return; | |
1da177e4 LT |
518 | } |
519 | spin_lock(&cache_lock); | |
520 | lru_put_end(rp); | |
521 | rp->c_secure = rqstp->rq_secure; | |
522 | rp->c_type = cachetype; | |
523 | rp->c_state = RC_DONE; | |
1da177e4 LT |
524 | spin_unlock(&cache_lock); |
525 | return; | |
526 | } | |
527 | ||
528 | /* | |
529 | * Copy cached reply to current reply buffer. Should always fit. | |
530 | * FIXME as reply is in a page, we should just attach the page, and | |
531 | * keep a refcount.... | |
532 | */ | |
533 | static int | |
534 | nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) | |
535 | { | |
536 | struct kvec *vec = &rqstp->rq_res.head[0]; | |
537 | ||
538 | if (vec->iov_len + data->iov_len > PAGE_SIZE) { | |
539 | printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n", | |
540 | data->iov_len); | |
541 | return 0; | |
542 | } | |
543 | memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); | |
544 | vec->iov_len += data->iov_len; | |
545 | return 1; | |
546 | } |