4 * Generic code for various authentication-related caches
5 * used by sunrpc clients and servers.
7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
9 * Released under terms in GPL version 2. See COPYING.
13 #include <linux/types.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <asm/uaccess.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/net.h>
28 #include <linux/workqueue.h>
29 #include <linux/mutex.h>
30 #include <linux/pagemap.h>
31 #include <linux/smp_lock.h>
32 #include <asm/ioctls.h>
33 #include <linux/sunrpc/types.h>
34 #include <linux/sunrpc/cache.h>
35 #include <linux/sunrpc/stats.h>
36 #include <linux/sunrpc/rpc_pipe_fs.h>
38 #define RPCDBG_FACILITY RPCDBG_CACHE
40 static int cache_defer_req(struct cache_req
*req
, struct cache_head
*item
);
41 static void cache_revisit_request(struct cache_head
*item
);
43 static void cache_init(struct cache_head
*h
)
45 time_t now
= seconds_since_boot();
49 h
->expiry_time
= now
+ CACHE_NEW_EXPIRY
;
50 h
->last_refresh
= now
;
53 static inline int cache_is_expired(struct cache_detail
*detail
, struct cache_head
*h
)
55 return (h
->expiry_time
< seconds_since_boot()) ||
56 (detail
->flush_time
> h
->last_refresh
);
59 struct cache_head
*sunrpc_cache_lookup(struct cache_detail
*detail
,
60 struct cache_head
*key
, int hash
)
62 struct cache_head
**head
, **hp
;
63 struct cache_head
*new = NULL
, *freeme
= NULL
;
65 head
= &detail
->hash_table
[hash
];
67 read_lock(&detail
->hash_lock
);
69 for (hp
=head
; *hp
!= NULL
; hp
= &(*hp
)->next
) {
70 struct cache_head
*tmp
= *hp
;
71 if (detail
->match(tmp
, key
)) {
72 if (cache_is_expired(detail
, tmp
))
73 /* This entry is expired, we will discard it. */
76 read_unlock(&detail
->hash_lock
);
80 read_unlock(&detail
->hash_lock
);
81 /* Didn't find anything, insert an empty entry */
83 new = detail
->alloc();
86 /* must fully initialise 'new', else
87 * we might get lose if we need to
91 detail
->init(new, key
);
93 write_lock(&detail
->hash_lock
);
95 /* check if entry appeared while we slept */
96 for (hp
=head
; *hp
!= NULL
; hp
= &(*hp
)->next
) {
97 struct cache_head
*tmp
= *hp
;
98 if (detail
->match(tmp
, key
)) {
99 if (cache_is_expired(detail
, tmp
)) {
107 write_unlock(&detail
->hash_lock
);
108 cache_put(new, detail
);
116 write_unlock(&detail
->hash_lock
);
119 cache_put(freeme
, detail
);
122 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup
);
125 static void cache_dequeue(struct cache_detail
*detail
, struct cache_head
*ch
);
127 static void cache_fresh_locked(struct cache_head
*head
, time_t expiry
)
129 head
->expiry_time
= expiry
;
130 head
->last_refresh
= seconds_since_boot();
131 set_bit(CACHE_VALID
, &head
->flags
);
134 static void cache_fresh_unlocked(struct cache_head
*head
,
135 struct cache_detail
*detail
)
137 if (test_and_clear_bit(CACHE_PENDING
, &head
->flags
)) {
138 cache_revisit_request(head
);
139 cache_dequeue(detail
, head
);
143 struct cache_head
*sunrpc_cache_update(struct cache_detail
*detail
,
144 struct cache_head
*new, struct cache_head
*old
, int hash
)
146 /* The 'old' entry is to be replaced by 'new'.
147 * If 'old' is not VALID, we update it directly,
148 * otherwise we need to replace it
150 struct cache_head
**head
;
151 struct cache_head
*tmp
;
153 if (!test_bit(CACHE_VALID
, &old
->flags
)) {
154 write_lock(&detail
->hash_lock
);
155 if (!test_bit(CACHE_VALID
, &old
->flags
)) {
156 if (test_bit(CACHE_NEGATIVE
, &new->flags
))
157 set_bit(CACHE_NEGATIVE
, &old
->flags
);
159 detail
->update(old
, new);
160 cache_fresh_locked(old
, new->expiry_time
);
161 write_unlock(&detail
->hash_lock
);
162 cache_fresh_unlocked(old
, detail
);
165 write_unlock(&detail
->hash_lock
);
167 /* We need to insert a new entry */
168 tmp
= detail
->alloc();
170 cache_put(old
, detail
);
174 detail
->init(tmp
, old
);
175 head
= &detail
->hash_table
[hash
];
177 write_lock(&detail
->hash_lock
);
178 if (test_bit(CACHE_NEGATIVE
, &new->flags
))
179 set_bit(CACHE_NEGATIVE
, &tmp
->flags
);
181 detail
->update(tmp
, new);
186 cache_fresh_locked(tmp
, new->expiry_time
);
187 cache_fresh_locked(old
, 0);
188 write_unlock(&detail
->hash_lock
);
189 cache_fresh_unlocked(tmp
, detail
);
190 cache_fresh_unlocked(old
, detail
);
191 cache_put(old
, detail
);
194 EXPORT_SYMBOL_GPL(sunrpc_cache_update
);
196 static int cache_make_upcall(struct cache_detail
*cd
, struct cache_head
*h
)
198 if (!cd
->cache_upcall
)
200 return cd
->cache_upcall(cd
, h
);
203 static inline int cache_is_valid(struct cache_detail
*detail
, struct cache_head
*h
)
205 if (!test_bit(CACHE_VALID
, &h
->flags
))
209 if (test_bit(CACHE_NEGATIVE
, &h
->flags
))
217 * This is the generic cache management routine for all
218 * the authentication caches.
219 * It checks the currency of a cache item and will (later)
220 * initiate an upcall to fill it if needed.
223 * Returns 0 if the cache_head can be used, or cache_puts it and returns
224 * -EAGAIN if upcall is pending and request has been queued
225 * -ETIMEDOUT if upcall failed or request could not be queue or
226 * upcall completed but item is still invalid (implying that
227 * the cache item has been replaced with a newer one).
228 * -ENOENT if cache entry was negative
230 int cache_check(struct cache_detail
*detail
,
231 struct cache_head
*h
, struct cache_req
*rqstp
)
234 long refresh_age
, age
;
236 /* First decide return status as best we can */
237 rv
= cache_is_valid(detail
, h
);
239 /* now see if we want to start an upcall */
240 refresh_age
= (h
->expiry_time
- h
->last_refresh
);
241 age
= seconds_since_boot() - h
->last_refresh
;
246 } else if (rv
== -EAGAIN
|| age
> refresh_age
/2) {
247 dprintk("RPC: Want update, refage=%ld, age=%ld\n",
249 if (!test_and_set_bit(CACHE_PENDING
, &h
->flags
)) {
250 switch (cache_make_upcall(detail
, h
)) {
252 clear_bit(CACHE_PENDING
, &h
->flags
);
253 cache_revisit_request(h
);
255 set_bit(CACHE_NEGATIVE
, &h
->flags
);
256 cache_fresh_locked(h
, seconds_since_boot()+CACHE_NEW_EXPIRY
);
257 cache_fresh_unlocked(h
, detail
);
263 clear_bit(CACHE_PENDING
, &h
->flags
);
264 cache_revisit_request(h
);
271 if (cache_defer_req(rqstp
, h
) < 0) {
272 /* Request is not deferred */
273 rv
= cache_is_valid(detail
, h
);
279 cache_put(h
, detail
);
282 EXPORT_SYMBOL_GPL(cache_check
);
285 * caches need to be periodically cleaned.
286 * For this we maintain a list of cache_detail and
287 * a current pointer into that list and into the table
290 * Each time clean_cache is called it finds the next non-empty entry
291 * in the current table and walks the list in that entry
292 * looking for entries that can be removed.
294 * An entry gets removed if:
295 * - The expiry is before current time
296 * - The last_refresh time is before the flush_time for that cache
298 * later we might drop old entries with non-NEVER expiry if that table
299 * is getting 'full' for some definition of 'full'
301 * The question of "how often to scan a table" is an interesting one
302 * and is answered in part by the use of the "nextcheck" field in the
304 * When a scan of a table begins, the nextcheck field is set to a time
305 * that is well into the future.
306 * While scanning, if an expiry time is found that is earlier than the
307 * current nextcheck time, nextcheck is set to that expiry time.
308 * If the flush_time is ever set to a time earlier than the nextcheck
309 * time, the nextcheck time is then set to that flush_time.
311 * A table is then only scanned if the current time is at least
312 * the nextcheck time.
316 static LIST_HEAD(cache_list
);
317 static DEFINE_SPINLOCK(cache_list_lock
);
318 static struct cache_detail
*current_detail
;
319 static int current_index
;
321 static void do_cache_clean(struct work_struct
*work
);
322 static struct delayed_work cache_cleaner
;
324 static void sunrpc_init_cache_detail(struct cache_detail
*cd
)
326 rwlock_init(&cd
->hash_lock
);
327 INIT_LIST_HEAD(&cd
->queue
);
328 spin_lock(&cache_list_lock
);
331 atomic_set(&cd
->readers
, 0);
334 list_add(&cd
->others
, &cache_list
);
335 spin_unlock(&cache_list_lock
);
337 /* start the cleaning process */
338 schedule_delayed_work(&cache_cleaner
, 0);
341 static void sunrpc_destroy_cache_detail(struct cache_detail
*cd
)
344 spin_lock(&cache_list_lock
);
345 write_lock(&cd
->hash_lock
);
346 if (cd
->entries
|| atomic_read(&cd
->inuse
)) {
347 write_unlock(&cd
->hash_lock
);
348 spin_unlock(&cache_list_lock
);
351 if (current_detail
== cd
)
352 current_detail
= NULL
;
353 list_del_init(&cd
->others
);
354 write_unlock(&cd
->hash_lock
);
355 spin_unlock(&cache_list_lock
);
356 if (list_empty(&cache_list
)) {
357 /* module must be being unloaded so its safe to kill the worker */
358 cancel_delayed_work_sync(&cache_cleaner
);
362 printk(KERN_ERR
"nfsd: failed to unregister %s cache\n", cd
->name
);
365 /* clean cache tries to find something to clean
367 * It returns 1 if it cleaned something,
368 * 0 if it didn't find anything this time
369 * -1 if it fell off the end of the list.
371 static int cache_clean(void)
374 struct list_head
*next
;
376 spin_lock(&cache_list_lock
);
378 /* find a suitable table if we don't already have one */
379 while (current_detail
== NULL
||
380 current_index
>= current_detail
->hash_size
) {
382 next
= current_detail
->others
.next
;
384 next
= cache_list
.next
;
385 if (next
== &cache_list
) {
386 current_detail
= NULL
;
387 spin_unlock(&cache_list_lock
);
390 current_detail
= list_entry(next
, struct cache_detail
, others
);
391 if (current_detail
->nextcheck
> seconds_since_boot())
392 current_index
= current_detail
->hash_size
;
395 current_detail
->nextcheck
= seconds_since_boot()+30*60;
399 /* find a non-empty bucket in the table */
400 while (current_detail
&&
401 current_index
< current_detail
->hash_size
&&
402 current_detail
->hash_table
[current_index
] == NULL
)
405 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
407 if (current_detail
&& current_index
< current_detail
->hash_size
) {
408 struct cache_head
*ch
, **cp
;
409 struct cache_detail
*d
;
411 write_lock(¤t_detail
->hash_lock
);
413 /* Ok, now to clean this strand */
415 cp
= & current_detail
->hash_table
[current_index
];
416 for (ch
= *cp
; ch
; cp
= & ch
->next
, ch
= *cp
) {
417 if (current_detail
->nextcheck
> ch
->expiry_time
)
418 current_detail
->nextcheck
= ch
->expiry_time
+1;
419 if (!cache_is_expired(current_detail
, ch
))
424 current_detail
->entries
--;
429 write_unlock(¤t_detail
->hash_lock
);
433 spin_unlock(&cache_list_lock
);
435 if (test_and_clear_bit(CACHE_PENDING
, &ch
->flags
))
436 cache_dequeue(current_detail
, ch
);
437 cache_revisit_request(ch
);
441 spin_unlock(&cache_list_lock
);
447 * We want to regularly clean the cache, so we need to schedule some work ...
449 static void do_cache_clean(struct work_struct
*work
)
452 if (cache_clean() == -1)
453 delay
= round_jiffies_relative(30*HZ
);
455 if (list_empty(&cache_list
))
459 schedule_delayed_work(&cache_cleaner
, delay
);
464 * Clean all caches promptly. This just calls cache_clean
465 * repeatedly until we are sure that every cache has had a chance to
468 void cache_flush(void)
470 while (cache_clean() != -1)
472 while (cache_clean() != -1)
475 EXPORT_SYMBOL_GPL(cache_flush
);
477 void cache_purge(struct cache_detail
*detail
)
479 detail
->flush_time
= LONG_MAX
;
480 detail
->nextcheck
= seconds_since_boot();
482 detail
->flush_time
= 1;
484 EXPORT_SYMBOL_GPL(cache_purge
);
488 * Deferral and Revisiting of Requests.
490 * If a cache lookup finds a pending entry, we
491 * need to defer the request and revisit it later.
492 * All deferred requests are stored in a hash table,
493 * indexed by "struct cache_head *".
494 * As it may be wasteful to store a whole request
495 * structure, we allow the request to provide a
496 * deferred form, which must contain a
497 * 'struct cache_deferred_req'
498 * This cache_deferred_req contains a method to allow
499 * it to be revisited when cache info is available
502 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
503 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
505 #define DFR_MAX 300 /* ??? */
507 static DEFINE_SPINLOCK(cache_defer_lock
);
508 static LIST_HEAD(cache_defer_list
);
509 static struct list_head cache_defer_hash
[DFR_HASHSIZE
];
510 static int cache_defer_cnt
;
512 struct thread_deferred_req
{
513 struct cache_deferred_req handle
;
514 struct completion completion
;
516 static void cache_restart_thread(struct cache_deferred_req
*dreq
, int too_many
)
518 struct thread_deferred_req
*dr
=
519 container_of(dreq
, struct thread_deferred_req
, handle
);
520 complete(&dr
->completion
);
523 static void __unhash_deferred_req(struct cache_deferred_req
*dreq
)
525 list_del_init(&dreq
->recent
);
526 list_del_init(&dreq
->hash
);
530 static void __hash_deferred_req(struct cache_deferred_req
*dreq
, struct cache_head
*item
)
532 int hash
= DFR_HASH(item
);
534 list_add(&dreq
->recent
, &cache_defer_list
);
535 if (cache_defer_hash
[hash
].next
== NULL
)
536 INIT_LIST_HEAD(&cache_defer_hash
[hash
]);
537 list_add(&dreq
->hash
, &cache_defer_hash
[hash
]);
540 static int cache_defer_req(struct cache_req
*req
, struct cache_head
*item
)
542 struct cache_deferred_req
*dreq
, *discard
;
543 struct thread_deferred_req sleeper
;
545 if (cache_defer_cnt
>= DFR_MAX
) {
546 /* too much in the cache, randomly drop this one,
547 * or continue and drop the oldest below
552 if (req
->thread_wait
) {
553 dreq
= &sleeper
.handle
;
555 COMPLETION_INITIALIZER_ONSTACK(sleeper
.completion
);
556 dreq
->revisit
= cache_restart_thread
;
558 dreq
= req
->defer(req
);
566 spin_lock(&cache_defer_lock
);
568 __hash_deferred_req(dreq
, item
);
570 /* it is in, now maybe clean up */
572 if (++cache_defer_cnt
> DFR_MAX
) {
573 discard
= list_entry(cache_defer_list
.prev
,
574 struct cache_deferred_req
, recent
);
575 __unhash_deferred_req(discard
);
577 spin_unlock(&cache_defer_lock
);
580 /* there was one too many */
581 discard
->revisit(discard
, 1);
583 if (!test_bit(CACHE_PENDING
, &item
->flags
)) {
584 /* must have just been validated... */
585 cache_revisit_request(item
);
589 if (dreq
== &sleeper
.handle
) {
590 if (wait_for_completion_interruptible_timeout(
591 &sleeper
.completion
, req
->thread_wait
) <= 0) {
592 /* The completion wasn't completed, so we need
595 spin_lock(&cache_defer_lock
);
596 if (!list_empty(&sleeper
.handle
.hash
)) {
597 __unhash_deferred_req(&sleeper
.handle
);
598 spin_unlock(&cache_defer_lock
);
600 /* cache_revisit_request already removed
601 * this from the hash table, but hasn't
602 * called ->revisit yet. It will very soon
603 * and we need to wait for it.
605 spin_unlock(&cache_defer_lock
);
606 wait_for_completion(&sleeper
.completion
);
609 if (test_bit(CACHE_PENDING
, &item
->flags
)) {
610 /* item is still pending, try request
613 dreq
= req
->defer(req
);
616 /* only return success if we actually deferred the
617 * request. In this case we waited until it was
618 * answered so no deferral has happened - rather
619 * an answer already exists.
626 static void cache_revisit_request(struct cache_head
*item
)
628 struct cache_deferred_req
*dreq
;
629 struct list_head pending
;
631 struct list_head
*lp
;
632 int hash
= DFR_HASH(item
);
634 INIT_LIST_HEAD(&pending
);
635 spin_lock(&cache_defer_lock
);
637 lp
= cache_defer_hash
[hash
].next
;
639 while (lp
!= &cache_defer_hash
[hash
]) {
640 dreq
= list_entry(lp
, struct cache_deferred_req
, hash
);
642 if (dreq
->item
== item
) {
643 __unhash_deferred_req(dreq
);
644 list_add(&dreq
->recent
, &pending
);
648 spin_unlock(&cache_defer_lock
);
650 while (!list_empty(&pending
)) {
651 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
652 list_del_init(&dreq
->recent
);
653 dreq
->revisit(dreq
, 0);
657 void cache_clean_deferred(void *owner
)
659 struct cache_deferred_req
*dreq
, *tmp
;
660 struct list_head pending
;
663 INIT_LIST_HEAD(&pending
);
664 spin_lock(&cache_defer_lock
);
666 list_for_each_entry_safe(dreq
, tmp
, &cache_defer_list
, recent
) {
667 if (dreq
->owner
== owner
)
668 __unhash_deferred_req(dreq
);
670 spin_unlock(&cache_defer_lock
);
672 while (!list_empty(&pending
)) {
673 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
674 list_del_init(&dreq
->recent
);
675 dreq
->revisit(dreq
, 1);
680 * communicate with user-space
682 * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
683 * On read, you get a full request, or block.
684 * On write, an update request is processed.
685 * Poll works if anything to read, and always allows write.
687 * Implemented by linked list of requests. Each open file has
688 * a ->private that also exists in this list. New requests are added
689 * to the end and may wakeup and preceding readers.
690 * New readers are added to the head. If, on read, an item is found with
691 * CACHE_UPCALLING clear, we free it from the list.
695 static DEFINE_SPINLOCK(queue_lock
);
696 static DEFINE_MUTEX(queue_io_mutex
);
699 struct list_head list
;
700 int reader
; /* if 0, then request */
702 struct cache_request
{
703 struct cache_queue q
;
704 struct cache_head
*item
;
709 struct cache_reader
{
710 struct cache_queue q
;
711 int offset
; /* if non-0, we have a refcnt on next request */
714 static ssize_t
cache_read(struct file
*filp
, char __user
*buf
, size_t count
,
715 loff_t
*ppos
, struct cache_detail
*cd
)
717 struct cache_reader
*rp
= filp
->private_data
;
718 struct cache_request
*rq
;
719 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
725 mutex_lock(&inode
->i_mutex
); /* protect against multiple concurrent
726 * readers on this file */
728 spin_lock(&queue_lock
);
729 /* need to find next request */
730 while (rp
->q
.list
.next
!= &cd
->queue
&&
731 list_entry(rp
->q
.list
.next
, struct cache_queue
, list
)
733 struct list_head
*next
= rp
->q
.list
.next
;
734 list_move(&rp
->q
.list
, next
);
736 if (rp
->q
.list
.next
== &cd
->queue
) {
737 spin_unlock(&queue_lock
);
738 mutex_unlock(&inode
->i_mutex
);
742 rq
= container_of(rp
->q
.list
.next
, struct cache_request
, q
.list
);
743 BUG_ON(rq
->q
.reader
);
746 spin_unlock(&queue_lock
);
748 if (rp
->offset
== 0 && !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
750 spin_lock(&queue_lock
);
751 list_move(&rp
->q
.list
, &rq
->q
.list
);
752 spin_unlock(&queue_lock
);
754 if (rp
->offset
+ count
> rq
->len
)
755 count
= rq
->len
- rp
->offset
;
757 if (copy_to_user(buf
, rq
->buf
+ rp
->offset
, count
))
760 if (rp
->offset
>= rq
->len
) {
762 spin_lock(&queue_lock
);
763 list_move(&rp
->q
.list
, &rq
->q
.list
);
764 spin_unlock(&queue_lock
);
769 if (rp
->offset
== 0) {
770 /* need to release rq */
771 spin_lock(&queue_lock
);
773 if (rq
->readers
== 0 &&
774 !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
775 list_del(&rq
->q
.list
);
776 spin_unlock(&queue_lock
);
777 cache_put(rq
->item
, cd
);
781 spin_unlock(&queue_lock
);
785 mutex_unlock(&inode
->i_mutex
);
786 return err
? err
: count
;
789 static ssize_t
cache_do_downcall(char *kaddr
, const char __user
*buf
,
790 size_t count
, struct cache_detail
*cd
)
794 if (copy_from_user(kaddr
, buf
, count
))
797 ret
= cd
->cache_parse(cd
, kaddr
, count
);
803 static ssize_t
cache_slow_downcall(const char __user
*buf
,
804 size_t count
, struct cache_detail
*cd
)
806 static char write_buf
[8192]; /* protected by queue_io_mutex */
807 ssize_t ret
= -EINVAL
;
809 if (count
>= sizeof(write_buf
))
811 mutex_lock(&queue_io_mutex
);
812 ret
= cache_do_downcall(write_buf
, buf
, count
, cd
);
813 mutex_unlock(&queue_io_mutex
);
818 static ssize_t
cache_downcall(struct address_space
*mapping
,
819 const char __user
*buf
,
820 size_t count
, struct cache_detail
*cd
)
824 ssize_t ret
= -ENOMEM
;
826 if (count
>= PAGE_CACHE_SIZE
)
829 page
= find_or_create_page(mapping
, 0, GFP_KERNEL
);
834 ret
= cache_do_downcall(kaddr
, buf
, count
, cd
);
837 page_cache_release(page
);
840 return cache_slow_downcall(buf
, count
, cd
);
843 static ssize_t
cache_write(struct file
*filp
, const char __user
*buf
,
844 size_t count
, loff_t
*ppos
,
845 struct cache_detail
*cd
)
847 struct address_space
*mapping
= filp
->f_mapping
;
848 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
849 ssize_t ret
= -EINVAL
;
851 if (!cd
->cache_parse
)
854 mutex_lock(&inode
->i_mutex
);
855 ret
= cache_downcall(mapping
, buf
, count
, cd
);
856 mutex_unlock(&inode
->i_mutex
);
861 static DECLARE_WAIT_QUEUE_HEAD(queue_wait
);
863 static unsigned int cache_poll(struct file
*filp
, poll_table
*wait
,
864 struct cache_detail
*cd
)
867 struct cache_reader
*rp
= filp
->private_data
;
868 struct cache_queue
*cq
;
870 poll_wait(filp
, &queue_wait
, wait
);
872 /* alway allow write */
873 mask
= POLL_OUT
| POLLWRNORM
;
878 spin_lock(&queue_lock
);
880 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
881 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
883 mask
|= POLLIN
| POLLRDNORM
;
886 spin_unlock(&queue_lock
);
890 static int cache_ioctl(struct inode
*ino
, struct file
*filp
,
891 unsigned int cmd
, unsigned long arg
,
892 struct cache_detail
*cd
)
895 struct cache_reader
*rp
= filp
->private_data
;
896 struct cache_queue
*cq
;
898 if (cmd
!= FIONREAD
|| !rp
)
901 spin_lock(&queue_lock
);
903 /* only find the length remaining in current request,
904 * or the length of the next request
906 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
907 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
909 struct cache_request
*cr
=
910 container_of(cq
, struct cache_request
, q
);
911 len
= cr
->len
- rp
->offset
;
914 spin_unlock(&queue_lock
);
916 return put_user(len
, (int __user
*)arg
);
919 static int cache_open(struct inode
*inode
, struct file
*filp
,
920 struct cache_detail
*cd
)
922 struct cache_reader
*rp
= NULL
;
924 if (!cd
|| !try_module_get(cd
->owner
))
926 nonseekable_open(inode
, filp
);
927 if (filp
->f_mode
& FMODE_READ
) {
928 rp
= kmalloc(sizeof(*rp
), GFP_KERNEL
);
933 atomic_inc(&cd
->readers
);
934 spin_lock(&queue_lock
);
935 list_add(&rp
->q
.list
, &cd
->queue
);
936 spin_unlock(&queue_lock
);
938 filp
->private_data
= rp
;
942 static int cache_release(struct inode
*inode
, struct file
*filp
,
943 struct cache_detail
*cd
)
945 struct cache_reader
*rp
= filp
->private_data
;
948 spin_lock(&queue_lock
);
950 struct cache_queue
*cq
;
951 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
952 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
954 container_of(cq
, struct cache_request
, q
)
960 list_del(&rp
->q
.list
);
961 spin_unlock(&queue_lock
);
963 filp
->private_data
= NULL
;
966 cd
->last_close
= seconds_since_boot();
967 atomic_dec(&cd
->readers
);
969 module_put(cd
->owner
);
975 static void cache_dequeue(struct cache_detail
*detail
, struct cache_head
*ch
)
977 struct cache_queue
*cq
;
978 spin_lock(&queue_lock
);
979 list_for_each_entry(cq
, &detail
->queue
, list
)
981 struct cache_request
*cr
= container_of(cq
, struct cache_request
, q
);
984 if (cr
->readers
!= 0)
986 list_del(&cr
->q
.list
);
987 spin_unlock(&queue_lock
);
988 cache_put(cr
->item
, detail
);
993 spin_unlock(&queue_lock
);
997 * Support routines for text-based upcalls.
998 * Fields are separated by spaces.
999 * Fields are either mangled to quote space tab newline slosh with slosh
1000 * or a hexified with a leading \x
1001 * Record is terminated with newline.
1005 void qword_add(char **bpp
, int *lp
, char *str
)
1011 if (len
< 0) return;
1013 while ((c
=*str
++) && len
)
1021 *bp
++ = '0' + ((c
& 0300)>>6);
1022 *bp
++ = '0' + ((c
& 0070)>>3);
1023 *bp
++ = '0' + ((c
& 0007)>>0);
1031 if (c
|| len
<1) len
= -1;
1039 EXPORT_SYMBOL_GPL(qword_add
);
1041 void qword_addhex(char **bpp
, int *lp
, char *buf
, int blen
)
1046 if (len
< 0) return;
1052 while (blen
&& len
>= 2) {
1053 unsigned char c
= *buf
++;
1054 *bp
++ = '0' + ((c
&0xf0)>>4) + (c
>=0xa0)*('a'-'9'-1);
1055 *bp
++ = '0' + (c
&0x0f) + ((c
&0x0f)>=0x0a)*('a'-'9'-1);
1060 if (blen
|| len
<1) len
= -1;
1068 EXPORT_SYMBOL_GPL(qword_addhex
);
1070 static void warn_no_listener(struct cache_detail
*detail
)
1072 if (detail
->last_warn
!= detail
->last_close
) {
1073 detail
->last_warn
= detail
->last_close
;
1074 if (detail
->warn_no_listener
)
1075 detail
->warn_no_listener(detail
, detail
->last_close
!= 0);
1080 * register an upcall request to user-space and queue it up for read() by the
1083 * Each request is at most one page long.
1085 int sunrpc_cache_pipe_upcall(struct cache_detail
*detail
, struct cache_head
*h
,
1086 void (*cache_request
)(struct cache_detail
*,
1087 struct cache_head
*,
1093 struct cache_request
*crq
;
1097 if (atomic_read(&detail
->readers
) == 0 &&
1098 detail
->last_close
< seconds_since_boot() - 30) {
1099 warn_no_listener(detail
);
1103 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
1107 crq
= kmalloc(sizeof (*crq
), GFP_KERNEL
);
1113 bp
= buf
; len
= PAGE_SIZE
;
1115 cache_request(detail
, h
, &bp
, &len
);
1123 crq
->item
= cache_get(h
);
1125 crq
->len
= PAGE_SIZE
- len
;
1127 spin_lock(&queue_lock
);
1128 list_add_tail(&crq
->q
.list
, &detail
->queue
);
1129 spin_unlock(&queue_lock
);
1130 wake_up(&queue_wait
);
1133 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall
);
1136 * parse a message from user-space and pass it
1137 * to an appropriate cache
1138 * Messages are, like requests, separated into fields by
1139 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1142 * reply cachename expiry key ... content....
1144 * key and content are both parsed by cache
1147 #define isodigit(c) (isdigit(c) && c <= '7')
1148 int qword_get(char **bpp
, char *dest
, int bufsize
)
1150 /* return bytes copied, or -1 on error */
1154 while (*bp
== ' ') bp
++;
1156 if (bp
[0] == '\\' && bp
[1] == 'x') {
1159 while (isxdigit(bp
[0]) && isxdigit(bp
[1]) && len
< bufsize
) {
1160 int byte
= isdigit(*bp
) ? *bp
-'0' : toupper(*bp
)-'A'+10;
1163 byte
|= isdigit(*bp
) ? *bp
-'0' : toupper(*bp
)-'A'+10;
1169 /* text with \nnn octal quoting */
1170 while (*bp
!= ' ' && *bp
!= '\n' && *bp
&& len
< bufsize
-1) {
1172 isodigit(bp
[1]) && (bp
[1] <= '3') &&
1175 int byte
= (*++bp
-'0');
1177 byte
= (byte
<< 3) | (*bp
++ - '0');
1178 byte
= (byte
<< 3) | (*bp
++ - '0');
1188 if (*bp
!= ' ' && *bp
!= '\n' && *bp
!= '\0')
1190 while (*bp
== ' ') bp
++;
1195 EXPORT_SYMBOL_GPL(qword_get
);
1199 * support /proc/sunrpc/cache/$CACHENAME/content
1201 * We call ->cache_show passing NULL for the item to
1202 * get a header, then pass each real item in the cache
1206 struct cache_detail
*cd
;
1209 static void *c_start(struct seq_file
*m
, loff_t
*pos
)
1210 __acquires(cd
->hash_lock
)
1213 unsigned hash
, entry
;
1214 struct cache_head
*ch
;
1215 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1218 read_lock(&cd
->hash_lock
);
1220 return SEQ_START_TOKEN
;
1222 entry
= n
& ((1LL<<32) - 1);
1224 for (ch
=cd
->hash_table
[hash
]; ch
; ch
=ch
->next
)
1227 n
&= ~((1LL<<32) - 1);
1231 } while(hash
< cd
->hash_size
&&
1232 cd
->hash_table
[hash
]==NULL
);
1233 if (hash
>= cd
->hash_size
)
1236 return cd
->hash_table
[hash
];
1239 static void *c_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1241 struct cache_head
*ch
= p
;
1242 int hash
= (*pos
>> 32);
1243 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1245 if (p
== SEQ_START_TOKEN
)
1247 else if (ch
->next
== NULL
) {
1254 *pos
&= ~((1LL<<32) - 1);
1255 while (hash
< cd
->hash_size
&&
1256 cd
->hash_table
[hash
] == NULL
) {
1260 if (hash
>= cd
->hash_size
)
1263 return cd
->hash_table
[hash
];
1266 static void c_stop(struct seq_file
*m
, void *p
)
1267 __releases(cd
->hash_lock
)
1269 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1270 read_unlock(&cd
->hash_lock
);
1273 static int c_show(struct seq_file
*m
, void *p
)
1275 struct cache_head
*cp
= p
;
1276 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1278 if (p
== SEQ_START_TOKEN
)
1279 return cd
->cache_show(m
, cd
, NULL
);
1282 seq_printf(m
, "# expiry=%ld refcnt=%d flags=%lx\n",
1283 convert_to_wallclock(cp
->expiry_time
),
1284 atomic_read(&cp
->ref
.refcount
), cp
->flags
);
1286 if (cache_check(cd
, cp
, NULL
))
1287 /* cache_check does a cache_put on failure */
1288 seq_printf(m
, "# ");
1292 return cd
->cache_show(m
, cd
, cp
);
1295 static const struct seq_operations cache_content_op
= {
1302 static int content_open(struct inode
*inode
, struct file
*file
,
1303 struct cache_detail
*cd
)
1307 if (!cd
|| !try_module_get(cd
->owner
))
1309 han
= __seq_open_private(file
, &cache_content_op
, sizeof(*han
));
1311 module_put(cd
->owner
);
1319 static int content_release(struct inode
*inode
, struct file
*file
,
1320 struct cache_detail
*cd
)
1322 int ret
= seq_release_private(inode
, file
);
1323 module_put(cd
->owner
);
1327 static int open_flush(struct inode
*inode
, struct file
*file
,
1328 struct cache_detail
*cd
)
1330 if (!cd
|| !try_module_get(cd
->owner
))
1332 return nonseekable_open(inode
, file
);
1335 static int release_flush(struct inode
*inode
, struct file
*file
,
1336 struct cache_detail
*cd
)
1338 module_put(cd
->owner
);
1342 static ssize_t
read_flush(struct file
*file
, char __user
*buf
,
1343 size_t count
, loff_t
*ppos
,
1344 struct cache_detail
*cd
)
1347 unsigned long p
= *ppos
;
1350 sprintf(tbuf
, "%lu\n", convert_to_wallclock(cd
->flush_time
));
1357 if (copy_to_user(buf
, (void*)(tbuf
+p
), len
))
1363 static ssize_t
write_flush(struct file
*file
, const char __user
*buf
,
1364 size_t count
, loff_t
*ppos
,
1365 struct cache_detail
*cd
)
1370 if (*ppos
|| count
> sizeof(tbuf
)-1)
1372 if (copy_from_user(tbuf
, buf
, count
))
1375 simple_strtoul(tbuf
, &ep
, 0);
1376 if (*ep
&& *ep
!= '\n')
1380 cd
->flush_time
= get_expiry(&bp
);
1381 cd
->nextcheck
= seconds_since_boot();
1388 static ssize_t
cache_read_procfs(struct file
*filp
, char __user
*buf
,
1389 size_t count
, loff_t
*ppos
)
1391 struct cache_detail
*cd
= PDE(filp
->f_path
.dentry
->d_inode
)->data
;
1393 return cache_read(filp
, buf
, count
, ppos
, cd
);
1396 static ssize_t
cache_write_procfs(struct file
*filp
, const char __user
*buf
,
1397 size_t count
, loff_t
*ppos
)
1399 struct cache_detail
*cd
= PDE(filp
->f_path
.dentry
->d_inode
)->data
;
1401 return cache_write(filp
, buf
, count
, ppos
, cd
);
1404 static unsigned int cache_poll_procfs(struct file
*filp
, poll_table
*wait
)
1406 struct cache_detail
*cd
= PDE(filp
->f_path
.dentry
->d_inode
)->data
;
1408 return cache_poll(filp
, wait
, cd
);
1411 static long cache_ioctl_procfs(struct file
*filp
,
1412 unsigned int cmd
, unsigned long arg
)
1415 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
1416 struct cache_detail
*cd
= PDE(inode
)->data
;
1419 ret
= cache_ioctl(inode
, filp
, cmd
, arg
, cd
);
1425 static int cache_open_procfs(struct inode
*inode
, struct file
*filp
)
1427 struct cache_detail
*cd
= PDE(inode
)->data
;
1429 return cache_open(inode
, filp
, cd
);
1432 static int cache_release_procfs(struct inode
*inode
, struct file
*filp
)
1434 struct cache_detail
*cd
= PDE(inode
)->data
;
1436 return cache_release(inode
, filp
, cd
);
1439 static const struct file_operations cache_file_operations_procfs
= {
1440 .owner
= THIS_MODULE
,
1441 .llseek
= no_llseek
,
1442 .read
= cache_read_procfs
,
1443 .write
= cache_write_procfs
,
1444 .poll
= cache_poll_procfs
,
1445 .unlocked_ioctl
= cache_ioctl_procfs
, /* for FIONREAD */
1446 .open
= cache_open_procfs
,
1447 .release
= cache_release_procfs
,
1450 static int content_open_procfs(struct inode
*inode
, struct file
*filp
)
1452 struct cache_detail
*cd
= PDE(inode
)->data
;
1454 return content_open(inode
, filp
, cd
);
1457 static int content_release_procfs(struct inode
*inode
, struct file
*filp
)
1459 struct cache_detail
*cd
= PDE(inode
)->data
;
1461 return content_release(inode
, filp
, cd
);
1464 static const struct file_operations content_file_operations_procfs
= {
1465 .open
= content_open_procfs
,
1467 .llseek
= seq_lseek
,
1468 .release
= content_release_procfs
,
1471 static int open_flush_procfs(struct inode
*inode
, struct file
*filp
)
1473 struct cache_detail
*cd
= PDE(inode
)->data
;
1475 return open_flush(inode
, filp
, cd
);
1478 static int release_flush_procfs(struct inode
*inode
, struct file
*filp
)
1480 struct cache_detail
*cd
= PDE(inode
)->data
;
1482 return release_flush(inode
, filp
, cd
);
1485 static ssize_t
read_flush_procfs(struct file
*filp
, char __user
*buf
,
1486 size_t count
, loff_t
*ppos
)
1488 struct cache_detail
*cd
= PDE(filp
->f_path
.dentry
->d_inode
)->data
;
1490 return read_flush(filp
, buf
, count
, ppos
, cd
);
1493 static ssize_t
write_flush_procfs(struct file
*filp
,
1494 const char __user
*buf
,
1495 size_t count
, loff_t
*ppos
)
1497 struct cache_detail
*cd
= PDE(filp
->f_path
.dentry
->d_inode
)->data
;
1499 return write_flush(filp
, buf
, count
, ppos
, cd
);
1502 static const struct file_operations cache_flush_operations_procfs
= {
1503 .open
= open_flush_procfs
,
1504 .read
= read_flush_procfs
,
1505 .write
= write_flush_procfs
,
1506 .release
= release_flush_procfs
,
1509 static void remove_cache_proc_entries(struct cache_detail
*cd
)
1511 if (cd
->u
.procfs
.proc_ent
== NULL
)
1513 if (cd
->u
.procfs
.flush_ent
)
1514 remove_proc_entry("flush", cd
->u
.procfs
.proc_ent
);
1515 if (cd
->u
.procfs
.channel_ent
)
1516 remove_proc_entry("channel", cd
->u
.procfs
.proc_ent
);
1517 if (cd
->u
.procfs
.content_ent
)
1518 remove_proc_entry("content", cd
->u
.procfs
.proc_ent
);
1519 cd
->u
.procfs
.proc_ent
= NULL
;
1520 remove_proc_entry(cd
->name
, proc_net_rpc
);
1523 #ifdef CONFIG_PROC_FS
1524 static int create_cache_proc_entries(struct cache_detail
*cd
)
1526 struct proc_dir_entry
*p
;
1528 cd
->u
.procfs
.proc_ent
= proc_mkdir(cd
->name
, proc_net_rpc
);
1529 if (cd
->u
.procfs
.proc_ent
== NULL
)
1531 cd
->u
.procfs
.channel_ent
= NULL
;
1532 cd
->u
.procfs
.content_ent
= NULL
;
1534 p
= proc_create_data("flush", S_IFREG
|S_IRUSR
|S_IWUSR
,
1535 cd
->u
.procfs
.proc_ent
,
1536 &cache_flush_operations_procfs
, cd
);
1537 cd
->u
.procfs
.flush_ent
= p
;
1541 if (cd
->cache_upcall
|| cd
->cache_parse
) {
1542 p
= proc_create_data("channel", S_IFREG
|S_IRUSR
|S_IWUSR
,
1543 cd
->u
.procfs
.proc_ent
,
1544 &cache_file_operations_procfs
, cd
);
1545 cd
->u
.procfs
.channel_ent
= p
;
1549 if (cd
->cache_show
) {
1550 p
= proc_create_data("content", S_IFREG
|S_IRUSR
|S_IWUSR
,
1551 cd
->u
.procfs
.proc_ent
,
1552 &content_file_operations_procfs
, cd
);
1553 cd
->u
.procfs
.content_ent
= p
;
1559 remove_cache_proc_entries(cd
);
1562 #else /* CONFIG_PROC_FS */
1563 static int create_cache_proc_entries(struct cache_detail
*cd
)
1569 void __init
cache_initialize(void)
1571 INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner
, do_cache_clean
);
1574 int cache_register(struct cache_detail
*cd
)
1578 sunrpc_init_cache_detail(cd
);
1579 ret
= create_cache_proc_entries(cd
);
1581 sunrpc_destroy_cache_detail(cd
);
1584 EXPORT_SYMBOL_GPL(cache_register
);
1586 void cache_unregister(struct cache_detail
*cd
)
1588 remove_cache_proc_entries(cd
);
1589 sunrpc_destroy_cache_detail(cd
);
1591 EXPORT_SYMBOL_GPL(cache_unregister
);
1593 static ssize_t
cache_read_pipefs(struct file
*filp
, char __user
*buf
,
1594 size_t count
, loff_t
*ppos
)
1596 struct cache_detail
*cd
= RPC_I(filp
->f_path
.dentry
->d_inode
)->private;
1598 return cache_read(filp
, buf
, count
, ppos
, cd
);
1601 static ssize_t
cache_write_pipefs(struct file
*filp
, const char __user
*buf
,
1602 size_t count
, loff_t
*ppos
)
1604 struct cache_detail
*cd
= RPC_I(filp
->f_path
.dentry
->d_inode
)->private;
1606 return cache_write(filp
, buf
, count
, ppos
, cd
);
1609 static unsigned int cache_poll_pipefs(struct file
*filp
, poll_table
*wait
)
1611 struct cache_detail
*cd
= RPC_I(filp
->f_path
.dentry
->d_inode
)->private;
1613 return cache_poll(filp
, wait
, cd
);
1616 static long cache_ioctl_pipefs(struct file
*filp
,
1617 unsigned int cmd
, unsigned long arg
)
1619 struct inode
*inode
= filp
->f_dentry
->d_inode
;
1620 struct cache_detail
*cd
= RPC_I(inode
)->private;
1624 ret
= cache_ioctl(inode
, filp
, cmd
, arg
, cd
);
1630 static int cache_open_pipefs(struct inode
*inode
, struct file
*filp
)
1632 struct cache_detail
*cd
= RPC_I(inode
)->private;
1634 return cache_open(inode
, filp
, cd
);
1637 static int cache_release_pipefs(struct inode
*inode
, struct file
*filp
)
1639 struct cache_detail
*cd
= RPC_I(inode
)->private;
1641 return cache_release(inode
, filp
, cd
);
1644 const struct file_operations cache_file_operations_pipefs
= {
1645 .owner
= THIS_MODULE
,
1646 .llseek
= no_llseek
,
1647 .read
= cache_read_pipefs
,
1648 .write
= cache_write_pipefs
,
1649 .poll
= cache_poll_pipefs
,
1650 .unlocked_ioctl
= cache_ioctl_pipefs
, /* for FIONREAD */
1651 .open
= cache_open_pipefs
,
1652 .release
= cache_release_pipefs
,
1655 static int content_open_pipefs(struct inode
*inode
, struct file
*filp
)
1657 struct cache_detail
*cd
= RPC_I(inode
)->private;
1659 return content_open(inode
, filp
, cd
);
1662 static int content_release_pipefs(struct inode
*inode
, struct file
*filp
)
1664 struct cache_detail
*cd
= RPC_I(inode
)->private;
1666 return content_release(inode
, filp
, cd
);
1669 const struct file_operations content_file_operations_pipefs
= {
1670 .open
= content_open_pipefs
,
1672 .llseek
= seq_lseek
,
1673 .release
= content_release_pipefs
,
1676 static int open_flush_pipefs(struct inode
*inode
, struct file
*filp
)
1678 struct cache_detail
*cd
= RPC_I(inode
)->private;
1680 return open_flush(inode
, filp
, cd
);
1683 static int release_flush_pipefs(struct inode
*inode
, struct file
*filp
)
1685 struct cache_detail
*cd
= RPC_I(inode
)->private;
1687 return release_flush(inode
, filp
, cd
);
1690 static ssize_t
read_flush_pipefs(struct file
*filp
, char __user
*buf
,
1691 size_t count
, loff_t
*ppos
)
1693 struct cache_detail
*cd
= RPC_I(filp
->f_path
.dentry
->d_inode
)->private;
1695 return read_flush(filp
, buf
, count
, ppos
, cd
);
1698 static ssize_t
write_flush_pipefs(struct file
*filp
,
1699 const char __user
*buf
,
1700 size_t count
, loff_t
*ppos
)
1702 struct cache_detail
*cd
= RPC_I(filp
->f_path
.dentry
->d_inode
)->private;
1704 return write_flush(filp
, buf
, count
, ppos
, cd
);
1707 const struct file_operations cache_flush_operations_pipefs
= {
1708 .open
= open_flush_pipefs
,
1709 .read
= read_flush_pipefs
,
1710 .write
= write_flush_pipefs
,
1711 .release
= release_flush_pipefs
,
1714 int sunrpc_cache_register_pipefs(struct dentry
*parent
,
1715 const char *name
, mode_t umode
,
1716 struct cache_detail
*cd
)
1722 sunrpc_init_cache_detail(cd
);
1724 q
.len
= strlen(name
);
1725 q
.hash
= full_name_hash(q
.name
, q
.len
);
1726 dir
= rpc_create_cache_dir(parent
, &q
, umode
, cd
);
1728 cd
->u
.pipefs
.dir
= dir
;
1730 sunrpc_destroy_cache_detail(cd
);
1735 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs
);
1737 void sunrpc_cache_unregister_pipefs(struct cache_detail
*cd
)
1739 rpc_remove_cache_dir(cd
->u
.pipefs
.dir
);
1740 cd
->u
.pipefs
.dir
= NULL
;
1741 sunrpc_destroy_cache_detail(cd
);
1743 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs
);