]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/nfsd/nfscache.c
nfsd4: drc containerization
[mirror_ubuntu-jammy-kernel.git] / fs / nfsd / nfscache.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Request reply cache. This is currently a global cache, but this may
4 * change in the future and be a per-client cache.
5 *
6 * This code is heavily inspired by the 44BSD implementation, although
7 * it does things a bit differently.
8 *
9 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
10 */
11
12 #include <linux/sunrpc/svc_xprt.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/sunrpc/addr.h>
16 #include <linux/highmem.h>
17 #include <linux/log2.h>
18 #include <linux/hash.h>
19 #include <net/checksum.h>
20
21 #include "nfsd.h"
22 #include "cache.h"
23
24 #define NFSDDBG_FACILITY NFSDDBG_REPCACHE
25
26 /*
27 * We use this value to determine the number of hash buckets from the max
28 * cache size, the idea being that when the cache is at its maximum number
29 * of entries, then this should be the average number of entries per bucket.
30 */
31 #define TARGET_BUCKET_SIZE 64
32
33 struct nfsd_drc_bucket {
34 struct rb_root rb_head;
35 struct list_head lru_head;
36 spinlock_t cache_lock;
37 };
38
39 static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
40 static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
41 struct shrink_control *sc);
42 static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
43 struct shrink_control *sc);
44
45 /*
46 * Put a cap on the size of the DRC based on the amount of available
47 * low memory in the machine.
48 *
49 * 64MB: 8192
50 * 128MB: 11585
51 * 256MB: 16384
52 * 512MB: 23170
53 * 1GB: 32768
54 * 2GB: 46340
55 * 4GB: 65536
56 * 8GB: 92681
57 * 16GB: 131072
58 *
59 * ...with a hard cap of 256k entries. In the worst case, each entry will be
60 * ~1k, so the above numbers should give a rough max of the amount of memory
61 * used in k.
62 *
63 * XXX: these limits are per-container, so memory used will increase
64 * linearly with number of containers. Maybe that's OK.
65 */
66 static unsigned int
67 nfsd_cache_size_limit(void)
68 {
69 unsigned int limit;
70 unsigned long low_pages = totalram_pages() - totalhigh_pages();
71
72 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
73 return min_t(unsigned int, limit, 256*1024);
74 }
75
76 /*
77 * Compute the number of hash buckets we need. Divide the max cachesize by
78 * the "target" max bucket size, and round up to next power of two.
79 */
80 static unsigned int
81 nfsd_hashsize(unsigned int limit)
82 {
83 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
84 }
85
86 static u32
87 nfsd_cache_hash(__be32 xid, struct nfsd_net *nn)
88 {
89 return hash_32(be32_to_cpu(xid), nn->maskbits);
90 }
91
92 static struct svc_cacherep *
93 nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
94 struct nfsd_net *nn)
95 {
96 struct svc_cacherep *rp;
97
98 rp = kmem_cache_alloc(nn->drc_slab, GFP_KERNEL);
99 if (rp) {
100 rp->c_state = RC_UNUSED;
101 rp->c_type = RC_NOCACHE;
102 RB_CLEAR_NODE(&rp->c_node);
103 INIT_LIST_HEAD(&rp->c_lru);
104
105 memset(&rp->c_key, 0, sizeof(rp->c_key));
106 rp->c_key.k_xid = rqstp->rq_xid;
107 rp->c_key.k_proc = rqstp->rq_proc;
108 rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp));
109 rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp)));
110 rp->c_key.k_prot = rqstp->rq_prot;
111 rp->c_key.k_vers = rqstp->rq_vers;
112 rp->c_key.k_len = rqstp->rq_arg.len;
113 rp->c_key.k_csum = csum;
114 }
115 return rp;
116 }
117
118 static void
119 nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
120 struct nfsd_net *nn)
121 {
122 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
123 nn->drc_mem_usage -= rp->c_replvec.iov_len;
124 kfree(rp->c_replvec.iov_base);
125 }
126 if (rp->c_state != RC_UNUSED) {
127 rb_erase(&rp->c_node, &b->rb_head);
128 list_del(&rp->c_lru);
129 atomic_dec(&nn->num_drc_entries);
130 nn->drc_mem_usage -= sizeof(*rp);
131 }
132 kmem_cache_free(nn->drc_slab, rp);
133 }
134
135 static void
136 nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
137 struct nfsd_net *nn)
138 {
139 spin_lock(&b->cache_lock);
140 nfsd_reply_cache_free_locked(b, rp, nn);
141 spin_unlock(&b->cache_lock);
142 }
143
144 int nfsd_reply_cache_init(struct nfsd_net *nn)
145 {
146 unsigned int hashsize;
147 unsigned int i;
148 int status = 0;
149
150 nn->max_drc_entries = nfsd_cache_size_limit();
151 atomic_set(&nn->num_drc_entries, 0);
152 hashsize = nfsd_hashsize(nn->max_drc_entries);
153 nn->maskbits = ilog2(hashsize);
154
155 nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
156 nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
157 nn->nfsd_reply_cache_shrinker.seeks = 1;
158 status = register_shrinker(&nn->nfsd_reply_cache_shrinker);
159 if (status)
160 return status;
161
162 nn->drc_slab = kmem_cache_create("nfsd_drc",
163 sizeof(struct svc_cacherep), 0, 0, NULL);
164 if (!nn->drc_slab)
165 goto out_nomem;
166
167 nn->drc_hashtbl = kcalloc(hashsize,
168 sizeof(*nn->drc_hashtbl), GFP_KERNEL);
169 if (!nn->drc_hashtbl) {
170 nn->drc_hashtbl = vzalloc(array_size(hashsize,
171 sizeof(*nn->drc_hashtbl)));
172 if (!nn->drc_hashtbl)
173 goto out_nomem;
174 }
175
176 for (i = 0; i < hashsize; i++) {
177 INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head);
178 spin_lock_init(&nn->drc_hashtbl[i].cache_lock);
179 }
180 nn->drc_hashsize = hashsize;
181
182 return 0;
183 out_nomem:
184 printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
185 return -ENOMEM;
186 }
187
188 void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
189 {
190 struct svc_cacherep *rp;
191 unsigned int i;
192
193 unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
194
195 for (i = 0; i < nn->drc_hashsize; i++) {
196 struct list_head *head = &nn->drc_hashtbl[i].lru_head;
197 while (!list_empty(head)) {
198 rp = list_first_entry(head, struct svc_cacherep, c_lru);
199 nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i],
200 rp, nn);
201 }
202 }
203
204 kvfree(nn->drc_hashtbl);
205 nn->drc_hashtbl = NULL;
206 nn->drc_hashsize = 0;
207
208 kmem_cache_destroy(nn->drc_slab);
209 nn->drc_slab = NULL;
210 }
211
212 /*
213 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
214 * not already scheduled.
215 */
216 static void
217 lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
218 {
219 rp->c_timestamp = jiffies;
220 list_move_tail(&rp->c_lru, &b->lru_head);
221 }
222
223 static long
224 prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn)
225 {
226 struct svc_cacherep *rp, *tmp;
227 long freed = 0;
228
229 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
230 /*
231 * Don't free entries attached to calls that are still
232 * in-progress, but do keep scanning the list.
233 */
234 if (rp->c_state == RC_INPROG)
235 continue;
236 if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
237 time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
238 break;
239 nfsd_reply_cache_free_locked(b, rp, nn);
240 freed++;
241 }
242 return freed;
243 }
244
245 /*
246 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
247 * Also prune the oldest ones when the total exceeds the max number of entries.
248 */
249 static long
250 prune_cache_entries(struct nfsd_net *nn)
251 {
252 unsigned int i;
253 long freed = 0;
254
255 for (i = 0; i < nn->drc_hashsize; i++) {
256 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
257
258 if (list_empty(&b->lru_head))
259 continue;
260 spin_lock(&b->cache_lock);
261 freed += prune_bucket(b, nn);
262 spin_unlock(&b->cache_lock);
263 }
264 return freed;
265 }
266
267 static unsigned long
268 nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
269 {
270 struct nfsd_net *nn = container_of(shrink,
271 struct nfsd_net, nfsd_reply_cache_shrinker);
272
273 return atomic_read(&nn->num_drc_entries);
274 }
275
276 static unsigned long
277 nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
278 {
279 struct nfsd_net *nn = container_of(shrink,
280 struct nfsd_net, nfsd_reply_cache_shrinker);
281
282 return prune_cache_entries(nn);
283 }
284 /*
285 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
286 */
287 static __wsum
288 nfsd_cache_csum(struct svc_rqst *rqstp)
289 {
290 int idx;
291 unsigned int base;
292 __wsum csum;
293 struct xdr_buf *buf = &rqstp->rq_arg;
294 const unsigned char *p = buf->head[0].iov_base;
295 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
296 RC_CSUMLEN);
297 size_t len = min(buf->head[0].iov_len, csum_len);
298
299 /* rq_arg.head first */
300 csum = csum_partial(p, len, 0);
301 csum_len -= len;
302
303 /* Continue into page array */
304 idx = buf->page_base / PAGE_SIZE;
305 base = buf->page_base & ~PAGE_MASK;
306 while (csum_len) {
307 p = page_address(buf->pages[idx]) + base;
308 len = min_t(size_t, PAGE_SIZE - base, csum_len);
309 csum = csum_partial(p, len, csum);
310 csum_len -= len;
311 base = 0;
312 ++idx;
313 }
314 return csum;
315 }
316
317 static int
318 nfsd_cache_key_cmp(const struct svc_cacherep *key,
319 const struct svc_cacherep *rp, struct nfsd_net *nn)
320 {
321 if (key->c_key.k_xid == rp->c_key.k_xid &&
322 key->c_key.k_csum != rp->c_key.k_csum)
323 ++nn->payload_misses;
324
325 return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
326 }
327
328 /*
329 * Search the request hash for an entry that matches the given rqstp.
330 * Must be called with cache_lock held. Returns the found entry or
331 * inserts an empty key on failure.
332 */
333 static struct svc_cacherep *
334 nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
335 struct nfsd_net *nn)
336 {
337 struct svc_cacherep *rp, *ret = key;
338 struct rb_node **p = &b->rb_head.rb_node,
339 *parent = NULL;
340 unsigned int entries = 0;
341 int cmp;
342
343 while (*p != NULL) {
344 ++entries;
345 parent = *p;
346 rp = rb_entry(parent, struct svc_cacherep, c_node);
347
348 cmp = nfsd_cache_key_cmp(key, rp, nn);
349 if (cmp < 0)
350 p = &parent->rb_left;
351 else if (cmp > 0)
352 p = &parent->rb_right;
353 else {
354 ret = rp;
355 goto out;
356 }
357 }
358 rb_link_node(&key->c_node, parent, p);
359 rb_insert_color(&key->c_node, &b->rb_head);
360 out:
361 /* tally hash chain length stats */
362 if (entries > nn->longest_chain) {
363 nn->longest_chain = entries;
364 nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries);
365 } else if (entries == nn->longest_chain) {
366 /* prefer to keep the smallest cachesize possible here */
367 nn->longest_chain_cachesize = min_t(unsigned int,
368 nn->longest_chain_cachesize,
369 atomic_read(&nn->num_drc_entries));
370 }
371
372 lru_put_end(b, ret);
373 return ret;
374 }
375
376 /*
377 * Try to find an entry matching the current call in the cache. When none
378 * is found, we try to grab the oldest expired entry off the LRU list. If
379 * a suitable one isn't there, then drop the cache_lock and allocate a
380 * new one, then search again in case one got inserted while this thread
381 * didn't hold the lock.
382 */
383 int
384 nfsd_cache_lookup(struct svc_rqst *rqstp)
385 {
386 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
387 struct svc_cacherep *rp, *found;
388 __be32 xid = rqstp->rq_xid;
389 __wsum csum;
390 u32 hash = nfsd_cache_hash(xid, nn);
391 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[hash];
392 int type = rqstp->rq_cachetype;
393 int rtn = RC_DOIT;
394
395 rqstp->rq_cacherep = NULL;
396 if (type == RC_NOCACHE) {
397 nfsdstats.rcnocache++;
398 return rtn;
399 }
400
401 csum = nfsd_cache_csum(rqstp);
402
403 /*
404 * Since the common case is a cache miss followed by an insert,
405 * preallocate an entry.
406 */
407 rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
408 if (!rp) {
409 dprintk("nfsd: unable to allocate DRC entry!\n");
410 return rtn;
411 }
412
413 spin_lock(&b->cache_lock);
414 found = nfsd_cache_insert(b, rp, nn);
415 if (found != rp) {
416 nfsd_reply_cache_free_locked(NULL, rp, nn);
417 rp = found;
418 goto found_entry;
419 }
420
421 nfsdstats.rcmisses++;
422 rqstp->rq_cacherep = rp;
423 rp->c_state = RC_INPROG;
424
425 atomic_inc(&nn->num_drc_entries);
426 nn->drc_mem_usage += sizeof(*rp);
427
428 /* go ahead and prune the cache */
429 prune_bucket(b, nn);
430 out:
431 spin_unlock(&b->cache_lock);
432 return rtn;
433
434 found_entry:
435 /* We found a matching entry which is either in progress or done. */
436 nfsdstats.rchits++;
437 rtn = RC_DROPIT;
438
439 /* Request being processed */
440 if (rp->c_state == RC_INPROG)
441 goto out;
442
443 /* From the hall of fame of impractical attacks:
444 * Is this a user who tries to snoop on the cache? */
445 rtn = RC_DOIT;
446 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
447 goto out;
448
449 /* Compose RPC reply header */
450 switch (rp->c_type) {
451 case RC_NOCACHE:
452 break;
453 case RC_REPLSTAT:
454 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
455 rtn = RC_REPLY;
456 break;
457 case RC_REPLBUFF:
458 if (!nfsd_cache_append(rqstp, &rp->c_replvec))
459 goto out; /* should not happen */
460 rtn = RC_REPLY;
461 break;
462 default:
463 printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
464 nfsd_reply_cache_free_locked(b, rp, nn);
465 }
466
467 goto out;
468 }
469
470 /*
471 * Update a cache entry. This is called from nfsd_dispatch when
472 * the procedure has been executed and the complete reply is in
473 * rqstp->rq_res.
474 *
475 * We're copying around data here rather than swapping buffers because
476 * the toplevel loop requires max-sized buffers, which would be a waste
477 * of memory for a cache with a max reply size of 100 bytes (diropokres).
478 *
479 * If we should start to use different types of cache entries tailored
480 * specifically for attrstat and fh's, we may save even more space.
481 *
482 * Also note that a cachetype of RC_NOCACHE can legally be passed when
483 * nfsd failed to encode a reply that otherwise would have been cached.
484 * In this case, nfsd_cache_update is called with statp == NULL.
485 */
486 void
487 nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
488 {
489 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
490 struct svc_cacherep *rp = rqstp->rq_cacherep;
491 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
492 u32 hash;
493 struct nfsd_drc_bucket *b;
494 int len;
495 size_t bufsize = 0;
496
497 if (!rp)
498 return;
499
500 hash = nfsd_cache_hash(rp->c_key.k_xid, nn);
501 b = &nn->drc_hashtbl[hash];
502
503 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
504 len >>= 2;
505
506 /* Don't cache excessive amounts of data and XDR failures */
507 if (!statp || len > (256 >> 2)) {
508 nfsd_reply_cache_free(b, rp, nn);
509 return;
510 }
511
512 switch (cachetype) {
513 case RC_REPLSTAT:
514 if (len != 1)
515 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
516 rp->c_replstat = *statp;
517 break;
518 case RC_REPLBUFF:
519 cachv = &rp->c_replvec;
520 bufsize = len << 2;
521 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
522 if (!cachv->iov_base) {
523 nfsd_reply_cache_free(b, rp, nn);
524 return;
525 }
526 cachv->iov_len = bufsize;
527 memcpy(cachv->iov_base, statp, bufsize);
528 break;
529 case RC_NOCACHE:
530 nfsd_reply_cache_free(b, rp, nn);
531 return;
532 }
533 spin_lock(&b->cache_lock);
534 nn->drc_mem_usage += bufsize;
535 lru_put_end(b, rp);
536 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
537 rp->c_type = cachetype;
538 rp->c_state = RC_DONE;
539 spin_unlock(&b->cache_lock);
540 return;
541 }
542
543 /*
544 * Copy cached reply to current reply buffer. Should always fit.
545 * FIXME as reply is in a page, we should just attach the page, and
546 * keep a refcount....
547 */
548 static int
549 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
550 {
551 struct kvec *vec = &rqstp->rq_res.head[0];
552
553 if (vec->iov_len + data->iov_len > PAGE_SIZE) {
554 printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
555 data->iov_len);
556 return 0;
557 }
558 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
559 vec->iov_len += data->iov_len;
560 return 1;
561 }
562
563 /*
564 * Note that fields may be added, removed or reordered in the future. Programs
565 * scraping this file for info should test the labels to ensure they're
566 * getting the correct field.
567 */
568 static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
569 {
570 struct nfsd_net *nn = v;
571
572 seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
573 seq_printf(m, "num entries: %u\n",
574 atomic_read(&nn->num_drc_entries));
575 seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits);
576 seq_printf(m, "mem usage: %u\n", nn->drc_mem_usage);
577 seq_printf(m, "cache hits: %u\n", nfsdstats.rchits);
578 seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses);
579 seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache);
580 seq_printf(m, "payload misses: %u\n", nn->payload_misses);
581 seq_printf(m, "longest chain len: %u\n", nn->longest_chain);
582 seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize);
583 return 0;
584 }
585
586 int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
587 {
588 struct nfsd_net *nn = net_generic(file_inode(file)->i_sb->s_fs_info,
589 nfsd_net_id);
590
591 return single_open(file, nfsd_reply_cache_stats_show, nn);
592 }