4 * Generic code for various authentication-related caches
5 * used by sunrpc clients and servers.
7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
9 * Released under terms in GPL version 2. See COPYING.
13 #include <linux/types.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <asm/uaccess.h>
24 #include <linux/poll.h>
25 #include <linux/seq_file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/net.h>
28 #include <linux/workqueue.h>
29 #include <linux/mutex.h>
30 #include <linux/pagemap.h>
31 #include <asm/ioctls.h>
32 #include <linux/sunrpc/types.h>
33 #include <linux/sunrpc/cache.h>
34 #include <linux/sunrpc/stats.h>
35 #include <linux/sunrpc/rpc_pipe_fs.h>
38 #define RPCDBG_FACILITY RPCDBG_CACHE
40 static bool cache_defer_req(struct cache_req
*req
, struct cache_head
*item
);
41 static void cache_revisit_request(struct cache_head
*item
);
43 static void cache_init(struct cache_head
*h
)
45 time_t now
= seconds_since_boot();
49 h
->expiry_time
= now
+ CACHE_NEW_EXPIRY
;
50 h
->last_refresh
= now
;
53 static inline int cache_is_expired(struct cache_detail
*detail
, struct cache_head
*h
)
55 return (h
->expiry_time
< seconds_since_boot()) ||
56 (detail
->flush_time
> h
->last_refresh
);
59 struct cache_head
*sunrpc_cache_lookup(struct cache_detail
*detail
,
60 struct cache_head
*key
, int hash
)
62 struct cache_head
**head
, **hp
;
63 struct cache_head
*new = NULL
, *freeme
= NULL
;
65 head
= &detail
->hash_table
[hash
];
67 read_lock(&detail
->hash_lock
);
69 for (hp
=head
; *hp
!= NULL
; hp
= &(*hp
)->next
) {
70 struct cache_head
*tmp
= *hp
;
71 if (detail
->match(tmp
, key
)) {
72 if (cache_is_expired(detail
, tmp
))
73 /* This entry is expired, we will discard it. */
76 read_unlock(&detail
->hash_lock
);
80 read_unlock(&detail
->hash_lock
);
81 /* Didn't find anything, insert an empty entry */
83 new = detail
->alloc();
86 /* must fully initialise 'new', else
87 * we might get lose if we need to
91 detail
->init(new, key
);
93 write_lock(&detail
->hash_lock
);
95 /* check if entry appeared while we slept */
96 for (hp
=head
; *hp
!= NULL
; hp
= &(*hp
)->next
) {
97 struct cache_head
*tmp
= *hp
;
98 if (detail
->match(tmp
, key
)) {
99 if (cache_is_expired(detail
, tmp
)) {
107 write_unlock(&detail
->hash_lock
);
108 cache_put(new, detail
);
116 write_unlock(&detail
->hash_lock
);
119 cache_put(freeme
, detail
);
122 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup
);
125 static void cache_dequeue(struct cache_detail
*detail
, struct cache_head
*ch
);
127 static void cache_fresh_locked(struct cache_head
*head
, time_t expiry
)
129 head
->expiry_time
= expiry
;
130 head
->last_refresh
= seconds_since_boot();
131 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
132 set_bit(CACHE_VALID
, &head
->flags
);
135 static void cache_fresh_unlocked(struct cache_head
*head
,
136 struct cache_detail
*detail
)
138 if (test_and_clear_bit(CACHE_PENDING
, &head
->flags
)) {
139 cache_revisit_request(head
);
140 cache_dequeue(detail
, head
);
144 struct cache_head
*sunrpc_cache_update(struct cache_detail
*detail
,
145 struct cache_head
*new, struct cache_head
*old
, int hash
)
147 /* The 'old' entry is to be replaced by 'new'.
148 * If 'old' is not VALID, we update it directly,
149 * otherwise we need to replace it
151 struct cache_head
**head
;
152 struct cache_head
*tmp
;
154 if (!test_bit(CACHE_VALID
, &old
->flags
)) {
155 write_lock(&detail
->hash_lock
);
156 if (!test_bit(CACHE_VALID
, &old
->flags
)) {
157 if (test_bit(CACHE_NEGATIVE
, &new->flags
))
158 set_bit(CACHE_NEGATIVE
, &old
->flags
);
160 detail
->update(old
, new);
161 cache_fresh_locked(old
, new->expiry_time
);
162 write_unlock(&detail
->hash_lock
);
163 cache_fresh_unlocked(old
, detail
);
166 write_unlock(&detail
->hash_lock
);
168 /* We need to insert a new entry */
169 tmp
= detail
->alloc();
171 cache_put(old
, detail
);
175 detail
->init(tmp
, old
);
176 head
= &detail
->hash_table
[hash
];
178 write_lock(&detail
->hash_lock
);
179 if (test_bit(CACHE_NEGATIVE
, &new->flags
))
180 set_bit(CACHE_NEGATIVE
, &tmp
->flags
);
182 detail
->update(tmp
, new);
187 cache_fresh_locked(tmp
, new->expiry_time
);
188 cache_fresh_locked(old
, 0);
189 write_unlock(&detail
->hash_lock
);
190 cache_fresh_unlocked(tmp
, detail
);
191 cache_fresh_unlocked(old
, detail
);
192 cache_put(old
, detail
);
195 EXPORT_SYMBOL_GPL(sunrpc_cache_update
);
197 static int cache_make_upcall(struct cache_detail
*cd
, struct cache_head
*h
)
199 if (cd
->cache_upcall
)
200 return cd
->cache_upcall(cd
, h
);
201 return sunrpc_cache_pipe_upcall(cd
, h
);
204 static inline int cache_is_valid(struct cache_head
*h
)
206 if (!test_bit(CACHE_VALID
, &h
->flags
))
210 if (test_bit(CACHE_NEGATIVE
, &h
->flags
))
214 * In combination with write barrier in
215 * sunrpc_cache_update, ensures that anyone
216 * using the cache entry after this sees the
225 static int try_to_negate_entry(struct cache_detail
*detail
, struct cache_head
*h
)
229 write_lock(&detail
->hash_lock
);
230 rv
= cache_is_valid(h
);
232 write_unlock(&detail
->hash_lock
);
235 set_bit(CACHE_NEGATIVE
, &h
->flags
);
236 cache_fresh_locked(h
, seconds_since_boot()+CACHE_NEW_EXPIRY
);
237 write_unlock(&detail
->hash_lock
);
238 cache_fresh_unlocked(h
, detail
);
243 * This is the generic cache management routine for all
244 * the authentication caches.
245 * It checks the currency of a cache item and will (later)
246 * initiate an upcall to fill it if needed.
249 * Returns 0 if the cache_head can be used, or cache_puts it and returns
250 * -EAGAIN if upcall is pending and request has been queued
251 * -ETIMEDOUT if upcall failed or request could not be queue or
252 * upcall completed but item is still invalid (implying that
253 * the cache item has been replaced with a newer one).
254 * -ENOENT if cache entry was negative
256 int cache_check(struct cache_detail
*detail
,
257 struct cache_head
*h
, struct cache_req
*rqstp
)
260 long refresh_age
, age
;
262 /* First decide return status as best we can */
263 rv
= cache_is_valid(h
);
265 /* now see if we want to start an upcall */
266 refresh_age
= (h
->expiry_time
- h
->last_refresh
);
267 age
= seconds_since_boot() - h
->last_refresh
;
272 } else if (rv
== -EAGAIN
|| age
> refresh_age
/2) {
273 dprintk("RPC: Want update, refage=%ld, age=%ld\n",
275 if (!test_and_set_bit(CACHE_PENDING
, &h
->flags
)) {
276 switch (cache_make_upcall(detail
, h
)) {
278 clear_bit(CACHE_PENDING
, &h
->flags
);
279 cache_revisit_request(h
);
280 rv
= try_to_negate_entry(detail
, h
);
283 clear_bit(CACHE_PENDING
, &h
->flags
);
284 cache_revisit_request(h
);
291 if (!cache_defer_req(rqstp
, h
)) {
293 * Request was not deferred; handle it as best
296 rv
= cache_is_valid(h
);
302 cache_put(h
, detail
);
305 EXPORT_SYMBOL_GPL(cache_check
);
308 * caches need to be periodically cleaned.
309 * For this we maintain a list of cache_detail and
310 * a current pointer into that list and into the table
313 * Each time clean_cache is called it finds the next non-empty entry
314 * in the current table and walks the list in that entry
315 * looking for entries that can be removed.
317 * An entry gets removed if:
318 * - The expiry is before current time
319 * - The last_refresh time is before the flush_time for that cache
321 * later we might drop old entries with non-NEVER expiry if that table
322 * is getting 'full' for some definition of 'full'
324 * The question of "how often to scan a table" is an interesting one
325 * and is answered in part by the use of the "nextcheck" field in the
327 * When a scan of a table begins, the nextcheck field is set to a time
328 * that is well into the future.
329 * While scanning, if an expiry time is found that is earlier than the
330 * current nextcheck time, nextcheck is set to that expiry time.
331 * If the flush_time is ever set to a time earlier than the nextcheck
332 * time, the nextcheck time is then set to that flush_time.
334 * A table is then only scanned if the current time is at least
335 * the nextcheck time.
339 static LIST_HEAD(cache_list
);
340 static DEFINE_SPINLOCK(cache_list_lock
);
341 static struct cache_detail
*current_detail
;
342 static int current_index
;
344 static void do_cache_clean(struct work_struct
*work
);
345 static struct delayed_work cache_cleaner
;
347 void sunrpc_init_cache_detail(struct cache_detail
*cd
)
349 rwlock_init(&cd
->hash_lock
);
350 INIT_LIST_HEAD(&cd
->queue
);
351 spin_lock(&cache_list_lock
);
354 atomic_set(&cd
->readers
, 0);
357 list_add(&cd
->others
, &cache_list
);
358 spin_unlock(&cache_list_lock
);
360 /* start the cleaning process */
361 schedule_delayed_work(&cache_cleaner
, 0);
363 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail
);
365 void sunrpc_destroy_cache_detail(struct cache_detail
*cd
)
368 spin_lock(&cache_list_lock
);
369 write_lock(&cd
->hash_lock
);
370 if (cd
->entries
|| atomic_read(&cd
->inuse
)) {
371 write_unlock(&cd
->hash_lock
);
372 spin_unlock(&cache_list_lock
);
375 if (current_detail
== cd
)
376 current_detail
= NULL
;
377 list_del_init(&cd
->others
);
378 write_unlock(&cd
->hash_lock
);
379 spin_unlock(&cache_list_lock
);
380 if (list_empty(&cache_list
)) {
381 /* module must be being unloaded so its safe to kill the worker */
382 cancel_delayed_work_sync(&cache_cleaner
);
386 printk(KERN_ERR
"nfsd: failed to unregister %s cache\n", cd
->name
);
388 EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail
);
390 /* clean cache tries to find something to clean
392 * It returns 1 if it cleaned something,
393 * 0 if it didn't find anything this time
394 * -1 if it fell off the end of the list.
396 static int cache_clean(void)
399 struct list_head
*next
;
401 spin_lock(&cache_list_lock
);
403 /* find a suitable table if we don't already have one */
404 while (current_detail
== NULL
||
405 current_index
>= current_detail
->hash_size
) {
407 next
= current_detail
->others
.next
;
409 next
= cache_list
.next
;
410 if (next
== &cache_list
) {
411 current_detail
= NULL
;
412 spin_unlock(&cache_list_lock
);
415 current_detail
= list_entry(next
, struct cache_detail
, others
);
416 if (current_detail
->nextcheck
> seconds_since_boot())
417 current_index
= current_detail
->hash_size
;
420 current_detail
->nextcheck
= seconds_since_boot()+30*60;
424 /* find a non-empty bucket in the table */
425 while (current_detail
&&
426 current_index
< current_detail
->hash_size
&&
427 current_detail
->hash_table
[current_index
] == NULL
)
430 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
432 if (current_detail
&& current_index
< current_detail
->hash_size
) {
433 struct cache_head
*ch
, **cp
;
434 struct cache_detail
*d
;
436 write_lock(¤t_detail
->hash_lock
);
438 /* Ok, now to clean this strand */
440 cp
= & current_detail
->hash_table
[current_index
];
441 for (ch
= *cp
; ch
; cp
= & ch
->next
, ch
= *cp
) {
442 if (current_detail
->nextcheck
> ch
->expiry_time
)
443 current_detail
->nextcheck
= ch
->expiry_time
+1;
444 if (!cache_is_expired(current_detail
, ch
))
449 current_detail
->entries
--;
454 write_unlock(¤t_detail
->hash_lock
);
458 spin_unlock(&cache_list_lock
);
460 if (test_and_clear_bit(CACHE_PENDING
, &ch
->flags
))
461 cache_dequeue(current_detail
, ch
);
462 cache_revisit_request(ch
);
466 spin_unlock(&cache_list_lock
);
472 * We want to regularly clean the cache, so we need to schedule some work ...
474 static void do_cache_clean(struct work_struct
*work
)
477 if (cache_clean() == -1)
478 delay
= round_jiffies_relative(30*HZ
);
480 if (list_empty(&cache_list
))
484 schedule_delayed_work(&cache_cleaner
, delay
);
489 * Clean all caches promptly. This just calls cache_clean
490 * repeatedly until we are sure that every cache has had a chance to
493 void cache_flush(void)
495 while (cache_clean() != -1)
497 while (cache_clean() != -1)
500 EXPORT_SYMBOL_GPL(cache_flush
);
502 void cache_purge(struct cache_detail
*detail
)
504 detail
->flush_time
= LONG_MAX
;
505 detail
->nextcheck
= seconds_since_boot();
507 detail
->flush_time
= 1;
509 EXPORT_SYMBOL_GPL(cache_purge
);
513 * Deferral and Revisiting of Requests.
515 * If a cache lookup finds a pending entry, we
516 * need to defer the request and revisit it later.
517 * All deferred requests are stored in a hash table,
518 * indexed by "struct cache_head *".
519 * As it may be wasteful to store a whole request
520 * structure, we allow the request to provide a
521 * deferred form, which must contain a
522 * 'struct cache_deferred_req'
523 * This cache_deferred_req contains a method to allow
524 * it to be revisited when cache info is available
527 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
528 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
530 #define DFR_MAX 300 /* ??? */
532 static DEFINE_SPINLOCK(cache_defer_lock
);
533 static LIST_HEAD(cache_defer_list
);
534 static struct hlist_head cache_defer_hash
[DFR_HASHSIZE
];
535 static int cache_defer_cnt
;
537 static void __unhash_deferred_req(struct cache_deferred_req
*dreq
)
539 hlist_del_init(&dreq
->hash
);
540 if (!list_empty(&dreq
->recent
)) {
541 list_del_init(&dreq
->recent
);
546 static void __hash_deferred_req(struct cache_deferred_req
*dreq
, struct cache_head
*item
)
548 int hash
= DFR_HASH(item
);
550 INIT_LIST_HEAD(&dreq
->recent
);
551 hlist_add_head(&dreq
->hash
, &cache_defer_hash
[hash
]);
554 static void setup_deferral(struct cache_deferred_req
*dreq
,
555 struct cache_head
*item
,
561 spin_lock(&cache_defer_lock
);
563 __hash_deferred_req(dreq
, item
);
567 list_add(&dreq
->recent
, &cache_defer_list
);
570 spin_unlock(&cache_defer_lock
);
574 struct thread_deferred_req
{
575 struct cache_deferred_req handle
;
576 struct completion completion
;
579 static void cache_restart_thread(struct cache_deferred_req
*dreq
, int too_many
)
581 struct thread_deferred_req
*dr
=
582 container_of(dreq
, struct thread_deferred_req
, handle
);
583 complete(&dr
->completion
);
586 static void cache_wait_req(struct cache_req
*req
, struct cache_head
*item
)
588 struct thread_deferred_req sleeper
;
589 struct cache_deferred_req
*dreq
= &sleeper
.handle
;
591 sleeper
.completion
= COMPLETION_INITIALIZER_ONSTACK(sleeper
.completion
);
592 dreq
->revisit
= cache_restart_thread
;
594 setup_deferral(dreq
, item
, 0);
596 if (!test_bit(CACHE_PENDING
, &item
->flags
) ||
597 wait_for_completion_interruptible_timeout(
598 &sleeper
.completion
, req
->thread_wait
) <= 0) {
599 /* The completion wasn't completed, so we need
602 spin_lock(&cache_defer_lock
);
603 if (!hlist_unhashed(&sleeper
.handle
.hash
)) {
604 __unhash_deferred_req(&sleeper
.handle
);
605 spin_unlock(&cache_defer_lock
);
607 /* cache_revisit_request already removed
608 * this from the hash table, but hasn't
609 * called ->revisit yet. It will very soon
610 * and we need to wait for it.
612 spin_unlock(&cache_defer_lock
);
613 wait_for_completion(&sleeper
.completion
);
618 static void cache_limit_defers(void)
620 /* Make sure we haven't exceed the limit of allowed deferred
623 struct cache_deferred_req
*discard
= NULL
;
625 if (cache_defer_cnt
<= DFR_MAX
)
628 spin_lock(&cache_defer_lock
);
630 /* Consider removing either the first or the last */
631 if (cache_defer_cnt
> DFR_MAX
) {
632 if (net_random() & 1)
633 discard
= list_entry(cache_defer_list
.next
,
634 struct cache_deferred_req
, recent
);
636 discard
= list_entry(cache_defer_list
.prev
,
637 struct cache_deferred_req
, recent
);
638 __unhash_deferred_req(discard
);
640 spin_unlock(&cache_defer_lock
);
642 discard
->revisit(discard
, 1);
645 /* Return true if and only if a deferred request is queued. */
646 static bool cache_defer_req(struct cache_req
*req
, struct cache_head
*item
)
648 struct cache_deferred_req
*dreq
;
650 if (req
->thread_wait
) {
651 cache_wait_req(req
, item
);
652 if (!test_bit(CACHE_PENDING
, &item
->flags
))
655 dreq
= req
->defer(req
);
658 setup_deferral(dreq
, item
, 1);
659 if (!test_bit(CACHE_PENDING
, &item
->flags
))
660 /* Bit could have been cleared before we managed to
661 * set up the deferral, so need to revisit just in case
663 cache_revisit_request(item
);
665 cache_limit_defers();
669 static void cache_revisit_request(struct cache_head
*item
)
671 struct cache_deferred_req
*dreq
;
672 struct list_head pending
;
673 struct hlist_node
*tmp
;
674 int hash
= DFR_HASH(item
);
676 INIT_LIST_HEAD(&pending
);
677 spin_lock(&cache_defer_lock
);
679 hlist_for_each_entry_safe(dreq
, tmp
, &cache_defer_hash
[hash
], hash
)
680 if (dreq
->item
== item
) {
681 __unhash_deferred_req(dreq
);
682 list_add(&dreq
->recent
, &pending
);
685 spin_unlock(&cache_defer_lock
);
687 while (!list_empty(&pending
)) {
688 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
689 list_del_init(&dreq
->recent
);
690 dreq
->revisit(dreq
, 0);
694 void cache_clean_deferred(void *owner
)
696 struct cache_deferred_req
*dreq
, *tmp
;
697 struct list_head pending
;
700 INIT_LIST_HEAD(&pending
);
701 spin_lock(&cache_defer_lock
);
703 list_for_each_entry_safe(dreq
, tmp
, &cache_defer_list
, recent
) {
704 if (dreq
->owner
== owner
) {
705 __unhash_deferred_req(dreq
);
706 list_add(&dreq
->recent
, &pending
);
709 spin_unlock(&cache_defer_lock
);
711 while (!list_empty(&pending
)) {
712 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
713 list_del_init(&dreq
->recent
);
714 dreq
->revisit(dreq
, 1);
719 * communicate with user-space
721 * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
722 * On read, you get a full request, or block.
723 * On write, an update request is processed.
724 * Poll works if anything to read, and always allows write.
726 * Implemented by linked list of requests. Each open file has
727 * a ->private that also exists in this list. New requests are added
728 * to the end and may wakeup and preceding readers.
729 * New readers are added to the head. If, on read, an item is found with
730 * CACHE_UPCALLING clear, we free it from the list.
734 static DEFINE_SPINLOCK(queue_lock
);
735 static DEFINE_MUTEX(queue_io_mutex
);
738 struct list_head list
;
739 int reader
; /* if 0, then request */
741 struct cache_request
{
742 struct cache_queue q
;
743 struct cache_head
*item
;
748 struct cache_reader
{
749 struct cache_queue q
;
750 int offset
; /* if non-0, we have a refcnt on next request */
753 static int cache_request(struct cache_detail
*detail
,
754 struct cache_request
*crq
)
759 detail
->cache_request(detail
, crq
->item
, &bp
, &len
);
762 return PAGE_SIZE
- len
;
765 static ssize_t
cache_read(struct file
*filp
, char __user
*buf
, size_t count
,
766 loff_t
*ppos
, struct cache_detail
*cd
)
768 struct cache_reader
*rp
= filp
->private_data
;
769 struct cache_request
*rq
;
770 struct inode
*inode
= file_inode(filp
);
776 mutex_lock(&inode
->i_mutex
); /* protect against multiple concurrent
777 * readers on this file */
779 spin_lock(&queue_lock
);
780 /* need to find next request */
781 while (rp
->q
.list
.next
!= &cd
->queue
&&
782 list_entry(rp
->q
.list
.next
, struct cache_queue
, list
)
784 struct list_head
*next
= rp
->q
.list
.next
;
785 list_move(&rp
->q
.list
, next
);
787 if (rp
->q
.list
.next
== &cd
->queue
) {
788 spin_unlock(&queue_lock
);
789 mutex_unlock(&inode
->i_mutex
);
790 WARN_ON_ONCE(rp
->offset
);
793 rq
= container_of(rp
->q
.list
.next
, struct cache_request
, q
.list
);
794 WARN_ON_ONCE(rq
->q
.reader
);
797 spin_unlock(&queue_lock
);
800 err
= cache_request(cd
, rq
);
806 if (rp
->offset
== 0 && !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
808 spin_lock(&queue_lock
);
809 list_move(&rp
->q
.list
, &rq
->q
.list
);
810 spin_unlock(&queue_lock
);
812 if (rp
->offset
+ count
> rq
->len
)
813 count
= rq
->len
- rp
->offset
;
815 if (copy_to_user(buf
, rq
->buf
+ rp
->offset
, count
))
818 if (rp
->offset
>= rq
->len
) {
820 spin_lock(&queue_lock
);
821 list_move(&rp
->q
.list
, &rq
->q
.list
);
822 spin_unlock(&queue_lock
);
827 if (rp
->offset
== 0) {
828 /* need to release rq */
829 spin_lock(&queue_lock
);
831 if (rq
->readers
== 0 &&
832 !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
833 list_del(&rq
->q
.list
);
834 spin_unlock(&queue_lock
);
835 cache_put(rq
->item
, cd
);
839 spin_unlock(&queue_lock
);
843 mutex_unlock(&inode
->i_mutex
);
844 return err
? err
: count
;
847 static ssize_t
cache_do_downcall(char *kaddr
, const char __user
*buf
,
848 size_t count
, struct cache_detail
*cd
)
854 if (copy_from_user(kaddr
, buf
, count
))
857 ret
= cd
->cache_parse(cd
, kaddr
, count
);
863 static ssize_t
cache_slow_downcall(const char __user
*buf
,
864 size_t count
, struct cache_detail
*cd
)
866 static char write_buf
[8192]; /* protected by queue_io_mutex */
867 ssize_t ret
= -EINVAL
;
869 if (count
>= sizeof(write_buf
))
871 mutex_lock(&queue_io_mutex
);
872 ret
= cache_do_downcall(write_buf
, buf
, count
, cd
);
873 mutex_unlock(&queue_io_mutex
);
878 static ssize_t
cache_downcall(struct address_space
*mapping
,
879 const char __user
*buf
,
880 size_t count
, struct cache_detail
*cd
)
884 ssize_t ret
= -ENOMEM
;
886 if (count
>= PAGE_CACHE_SIZE
)
889 page
= find_or_create_page(mapping
, 0, GFP_KERNEL
);
894 ret
= cache_do_downcall(kaddr
, buf
, count
, cd
);
897 page_cache_release(page
);
900 return cache_slow_downcall(buf
, count
, cd
);
903 static ssize_t
cache_write(struct file
*filp
, const char __user
*buf
,
904 size_t count
, loff_t
*ppos
,
905 struct cache_detail
*cd
)
907 struct address_space
*mapping
= filp
->f_mapping
;
908 struct inode
*inode
= file_inode(filp
);
909 ssize_t ret
= -EINVAL
;
911 if (!cd
->cache_parse
)
914 mutex_lock(&inode
->i_mutex
);
915 ret
= cache_downcall(mapping
, buf
, count
, cd
);
916 mutex_unlock(&inode
->i_mutex
);
921 static DECLARE_WAIT_QUEUE_HEAD(queue_wait
);
923 static unsigned int cache_poll(struct file
*filp
, poll_table
*wait
,
924 struct cache_detail
*cd
)
927 struct cache_reader
*rp
= filp
->private_data
;
928 struct cache_queue
*cq
;
930 poll_wait(filp
, &queue_wait
, wait
);
932 /* alway allow write */
933 mask
= POLL_OUT
| POLLWRNORM
;
938 spin_lock(&queue_lock
);
940 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
941 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
943 mask
|= POLLIN
| POLLRDNORM
;
946 spin_unlock(&queue_lock
);
950 static int cache_ioctl(struct inode
*ino
, struct file
*filp
,
951 unsigned int cmd
, unsigned long arg
,
952 struct cache_detail
*cd
)
955 struct cache_reader
*rp
= filp
->private_data
;
956 struct cache_queue
*cq
;
958 if (cmd
!= FIONREAD
|| !rp
)
961 spin_lock(&queue_lock
);
963 /* only find the length remaining in current request,
964 * or the length of the next request
966 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
967 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
969 struct cache_request
*cr
=
970 container_of(cq
, struct cache_request
, q
);
971 len
= cr
->len
- rp
->offset
;
974 spin_unlock(&queue_lock
);
976 return put_user(len
, (int __user
*)arg
);
979 static int cache_open(struct inode
*inode
, struct file
*filp
,
980 struct cache_detail
*cd
)
982 struct cache_reader
*rp
= NULL
;
984 if (!cd
|| !try_module_get(cd
->owner
))
986 nonseekable_open(inode
, filp
);
987 if (filp
->f_mode
& FMODE_READ
) {
988 rp
= kmalloc(sizeof(*rp
), GFP_KERNEL
);
990 module_put(cd
->owner
);
995 atomic_inc(&cd
->readers
);
996 spin_lock(&queue_lock
);
997 list_add(&rp
->q
.list
, &cd
->queue
);
998 spin_unlock(&queue_lock
);
1000 filp
->private_data
= rp
;
1004 static int cache_release(struct inode
*inode
, struct file
*filp
,
1005 struct cache_detail
*cd
)
1007 struct cache_reader
*rp
= filp
->private_data
;
1010 spin_lock(&queue_lock
);
1012 struct cache_queue
*cq
;
1013 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
1014 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
1016 container_of(cq
, struct cache_request
, q
)
1022 list_del(&rp
->q
.list
);
1023 spin_unlock(&queue_lock
);
1025 filp
->private_data
= NULL
;
1028 cd
->last_close
= seconds_since_boot();
1029 atomic_dec(&cd
->readers
);
1031 module_put(cd
->owner
);
1037 static void cache_dequeue(struct cache_detail
*detail
, struct cache_head
*ch
)
1039 struct cache_queue
*cq
, *tmp
;
1040 struct cache_request
*cr
;
1041 struct list_head dequeued
;
1043 INIT_LIST_HEAD(&dequeued
);
1044 spin_lock(&queue_lock
);
1045 list_for_each_entry_safe(cq
, tmp
, &detail
->queue
, list
)
1047 cr
= container_of(cq
, struct cache_request
, q
);
1050 if (test_bit(CACHE_PENDING
, &ch
->flags
))
1051 /* Lost a race and it is pending again */
1053 if (cr
->readers
!= 0)
1055 list_move(&cr
->q
.list
, &dequeued
);
1057 spin_unlock(&queue_lock
);
1058 while (!list_empty(&dequeued
)) {
1059 cr
= list_entry(dequeued
.next
, struct cache_request
, q
.list
);
1060 list_del(&cr
->q
.list
);
1061 cache_put(cr
->item
, detail
);
1068 * Support routines for text-based upcalls.
1069 * Fields are separated by spaces.
1070 * Fields are either mangled to quote space tab newline slosh with slosh
1071 * or a hexified with a leading \x
1072 * Record is terminated with newline.
1076 void qword_add(char **bpp
, int *lp
, char *str
)
1082 if (len
< 0) return;
1084 while ((c
=*str
++) && len
)
1092 *bp
++ = '0' + ((c
& 0300)>>6);
1093 *bp
++ = '0' + ((c
& 0070)>>3);
1094 *bp
++ = '0' + ((c
& 0007)>>0);
1102 if (c
|| len
<1) len
= -1;
1110 EXPORT_SYMBOL_GPL(qword_add
);
1112 void qword_addhex(char **bpp
, int *lp
, char *buf
, int blen
)
1117 if (len
< 0) return;
1123 while (blen
&& len
>= 2) {
1124 unsigned char c
= *buf
++;
1125 *bp
++ = '0' + ((c
&0xf0)>>4) + (c
>=0xa0)*('a'-'9'-1);
1126 *bp
++ = '0' + (c
&0x0f) + ((c
&0x0f)>=0x0a)*('a'-'9'-1);
1131 if (blen
|| len
<1) len
= -1;
1139 EXPORT_SYMBOL_GPL(qword_addhex
);
1141 static void warn_no_listener(struct cache_detail
*detail
)
1143 if (detail
->last_warn
!= detail
->last_close
) {
1144 detail
->last_warn
= detail
->last_close
;
1145 if (detail
->warn_no_listener
)
1146 detail
->warn_no_listener(detail
, detail
->last_close
!= 0);
1150 static bool cache_listeners_exist(struct cache_detail
*detail
)
1152 if (atomic_read(&detail
->readers
))
1154 if (detail
->last_close
== 0)
1155 /* This cache was never opened */
1157 if (detail
->last_close
< seconds_since_boot() - 30)
1159 * We allow for the possibility that someone might
1160 * restart a userspace daemon without restarting the
1161 * server; but after 30 seconds, we give up.
1168 * register an upcall request to user-space and queue it up for read() by the
1171 * Each request is at most one page long.
1173 int sunrpc_cache_pipe_upcall(struct cache_detail
*detail
, struct cache_head
*h
)
1177 struct cache_request
*crq
;
1180 if (!detail
->cache_request
)
1183 if (!cache_listeners_exist(detail
)) {
1184 warn_no_listener(detail
);
1188 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
1192 crq
= kmalloc(sizeof (*crq
), GFP_KERNEL
);
1199 crq
->item
= cache_get(h
);
1203 spin_lock(&queue_lock
);
1204 if (test_bit(CACHE_PENDING
, &h
->flags
))
1205 list_add_tail(&crq
->q
.list
, &detail
->queue
);
1207 /* Lost a race, no longer PENDING, so don't enqueue */
1209 spin_unlock(&queue_lock
);
1210 wake_up(&queue_wait
);
1211 if (ret
== -EAGAIN
) {
1217 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall
);
1220 * parse a message from user-space and pass it
1221 * to an appropriate cache
1222 * Messages are, like requests, separated into fields by
1223 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1226 * reply cachename expiry key ... content....
1228 * key and content are both parsed by cache
1231 int qword_get(char **bpp
, char *dest
, int bufsize
)
1233 /* return bytes copied, or -1 on error */
1237 while (*bp
== ' ') bp
++;
1239 if (bp
[0] == '\\' && bp
[1] == 'x') {
1242 while (len
< bufsize
) {
1245 h
= hex_to_bin(bp
[0]);
1249 l
= hex_to_bin(bp
[1]);
1253 *dest
++ = (h
<< 4) | l
;
1258 /* text with \nnn octal quoting */
1259 while (*bp
!= ' ' && *bp
!= '\n' && *bp
&& len
< bufsize
-1) {
1261 isodigit(bp
[1]) && (bp
[1] <= '3') &&
1264 int byte
= (*++bp
-'0');
1266 byte
= (byte
<< 3) | (*bp
++ - '0');
1267 byte
= (byte
<< 3) | (*bp
++ - '0');
1277 if (*bp
!= ' ' && *bp
!= '\n' && *bp
!= '\0')
1279 while (*bp
== ' ') bp
++;
1284 EXPORT_SYMBOL_GPL(qword_get
);
1288 * support /proc/sunrpc/cache/$CACHENAME/content
1290 * We call ->cache_show passing NULL for the item to
1291 * get a header, then pass each real item in the cache
1295 struct cache_detail
*cd
;
1298 static void *c_start(struct seq_file
*m
, loff_t
*pos
)
1299 __acquires(cd
->hash_lock
)
1302 unsigned int hash
, entry
;
1303 struct cache_head
*ch
;
1304 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1307 read_lock(&cd
->hash_lock
);
1309 return SEQ_START_TOKEN
;
1311 entry
= n
& ((1LL<<32) - 1);
1313 for (ch
=cd
->hash_table
[hash
]; ch
; ch
=ch
->next
)
1316 n
&= ~((1LL<<32) - 1);
1320 } while(hash
< cd
->hash_size
&&
1321 cd
->hash_table
[hash
]==NULL
);
1322 if (hash
>= cd
->hash_size
)
1325 return cd
->hash_table
[hash
];
1328 static void *c_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1330 struct cache_head
*ch
= p
;
1331 int hash
= (*pos
>> 32);
1332 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1334 if (p
== SEQ_START_TOKEN
)
1336 else if (ch
->next
== NULL
) {
1343 *pos
&= ~((1LL<<32) - 1);
1344 while (hash
< cd
->hash_size
&&
1345 cd
->hash_table
[hash
] == NULL
) {
1349 if (hash
>= cd
->hash_size
)
1352 return cd
->hash_table
[hash
];
1355 static void c_stop(struct seq_file
*m
, void *p
)
1356 __releases(cd
->hash_lock
)
1358 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1359 read_unlock(&cd
->hash_lock
);
1362 static int c_show(struct seq_file
*m
, void *p
)
1364 struct cache_head
*cp
= p
;
1365 struct cache_detail
*cd
= ((struct handle
*)m
->private)->cd
;
1367 if (p
== SEQ_START_TOKEN
)
1368 return cd
->cache_show(m
, cd
, NULL
);
1371 seq_printf(m
, "# expiry=%ld refcnt=%d flags=%lx\n",
1372 convert_to_wallclock(cp
->expiry_time
),
1373 atomic_read(&cp
->ref
.refcount
), cp
->flags
);
1375 if (cache_check(cd
, cp
, NULL
))
1376 /* cache_check does a cache_put on failure */
1377 seq_printf(m
, "# ");
1379 if (cache_is_expired(cd
, cp
))
1380 seq_printf(m
, "# ");
1384 return cd
->cache_show(m
, cd
, cp
);
1387 static const struct seq_operations cache_content_op
= {
1394 static int content_open(struct inode
*inode
, struct file
*file
,
1395 struct cache_detail
*cd
)
1399 if (!cd
|| !try_module_get(cd
->owner
))
1401 han
= __seq_open_private(file
, &cache_content_op
, sizeof(*han
));
1403 module_put(cd
->owner
);
1411 static int content_release(struct inode
*inode
, struct file
*file
,
1412 struct cache_detail
*cd
)
1414 int ret
= seq_release_private(inode
, file
);
1415 module_put(cd
->owner
);
1419 static int open_flush(struct inode
*inode
, struct file
*file
,
1420 struct cache_detail
*cd
)
1422 if (!cd
|| !try_module_get(cd
->owner
))
1424 return nonseekable_open(inode
, file
);
1427 static int release_flush(struct inode
*inode
, struct file
*file
,
1428 struct cache_detail
*cd
)
1430 module_put(cd
->owner
);
1434 static ssize_t
read_flush(struct file
*file
, char __user
*buf
,
1435 size_t count
, loff_t
*ppos
,
1436 struct cache_detail
*cd
)
1439 unsigned long p
= *ppos
;
1442 snprintf(tbuf
, sizeof(tbuf
), "%lu\n", convert_to_wallclock(cd
->flush_time
));
1449 if (copy_to_user(buf
, (void*)(tbuf
+p
), len
))
1455 static ssize_t
write_flush(struct file
*file
, const char __user
*buf
,
1456 size_t count
, loff_t
*ppos
,
1457 struct cache_detail
*cd
)
1462 if (*ppos
|| count
> sizeof(tbuf
)-1)
1464 if (copy_from_user(tbuf
, buf
, count
))
1467 simple_strtoul(tbuf
, &ep
, 0);
1468 if (*ep
&& *ep
!= '\n')
1472 cd
->flush_time
= get_expiry(&bp
);
1473 cd
->nextcheck
= seconds_since_boot();
1480 static ssize_t
cache_read_procfs(struct file
*filp
, char __user
*buf
,
1481 size_t count
, loff_t
*ppos
)
1483 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1485 return cache_read(filp
, buf
, count
, ppos
, cd
);
1488 static ssize_t
cache_write_procfs(struct file
*filp
, const char __user
*buf
,
1489 size_t count
, loff_t
*ppos
)
1491 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1493 return cache_write(filp
, buf
, count
, ppos
, cd
);
1496 static unsigned int cache_poll_procfs(struct file
*filp
, poll_table
*wait
)
1498 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1500 return cache_poll(filp
, wait
, cd
);
1503 static long cache_ioctl_procfs(struct file
*filp
,
1504 unsigned int cmd
, unsigned long arg
)
1506 struct inode
*inode
= file_inode(filp
);
1507 struct cache_detail
*cd
= PDE_DATA(inode
);
1509 return cache_ioctl(inode
, filp
, cmd
, arg
, cd
);
1512 static int cache_open_procfs(struct inode
*inode
, struct file
*filp
)
1514 struct cache_detail
*cd
= PDE_DATA(inode
);
1516 return cache_open(inode
, filp
, cd
);
1519 static int cache_release_procfs(struct inode
*inode
, struct file
*filp
)
1521 struct cache_detail
*cd
= PDE_DATA(inode
);
1523 return cache_release(inode
, filp
, cd
);
1526 static const struct file_operations cache_file_operations_procfs
= {
1527 .owner
= THIS_MODULE
,
1528 .llseek
= no_llseek
,
1529 .read
= cache_read_procfs
,
1530 .write
= cache_write_procfs
,
1531 .poll
= cache_poll_procfs
,
1532 .unlocked_ioctl
= cache_ioctl_procfs
, /* for FIONREAD */
1533 .open
= cache_open_procfs
,
1534 .release
= cache_release_procfs
,
1537 static int content_open_procfs(struct inode
*inode
, struct file
*filp
)
1539 struct cache_detail
*cd
= PDE_DATA(inode
);
1541 return content_open(inode
, filp
, cd
);
1544 static int content_release_procfs(struct inode
*inode
, struct file
*filp
)
1546 struct cache_detail
*cd
= PDE_DATA(inode
);
1548 return content_release(inode
, filp
, cd
);
1551 static const struct file_operations content_file_operations_procfs
= {
1552 .open
= content_open_procfs
,
1554 .llseek
= seq_lseek
,
1555 .release
= content_release_procfs
,
1558 static int open_flush_procfs(struct inode
*inode
, struct file
*filp
)
1560 struct cache_detail
*cd
= PDE_DATA(inode
);
1562 return open_flush(inode
, filp
, cd
);
1565 static int release_flush_procfs(struct inode
*inode
, struct file
*filp
)
1567 struct cache_detail
*cd
= PDE_DATA(inode
);
1569 return release_flush(inode
, filp
, cd
);
1572 static ssize_t
read_flush_procfs(struct file
*filp
, char __user
*buf
,
1573 size_t count
, loff_t
*ppos
)
1575 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1577 return read_flush(filp
, buf
, count
, ppos
, cd
);
1580 static ssize_t
write_flush_procfs(struct file
*filp
,
1581 const char __user
*buf
,
1582 size_t count
, loff_t
*ppos
)
1584 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1586 return write_flush(filp
, buf
, count
, ppos
, cd
);
1589 static const struct file_operations cache_flush_operations_procfs
= {
1590 .open
= open_flush_procfs
,
1591 .read
= read_flush_procfs
,
1592 .write
= write_flush_procfs
,
1593 .release
= release_flush_procfs
,
1594 .llseek
= no_llseek
,
1597 static void remove_cache_proc_entries(struct cache_detail
*cd
, struct net
*net
)
1599 struct sunrpc_net
*sn
;
1601 if (cd
->u
.procfs
.proc_ent
== NULL
)
1603 if (cd
->u
.procfs
.flush_ent
)
1604 remove_proc_entry("flush", cd
->u
.procfs
.proc_ent
);
1605 if (cd
->u
.procfs
.channel_ent
)
1606 remove_proc_entry("channel", cd
->u
.procfs
.proc_ent
);
1607 if (cd
->u
.procfs
.content_ent
)
1608 remove_proc_entry("content", cd
->u
.procfs
.proc_ent
);
1609 cd
->u
.procfs
.proc_ent
= NULL
;
1610 sn
= net_generic(net
, sunrpc_net_id
);
1611 remove_proc_entry(cd
->name
, sn
->proc_net_rpc
);
1614 #ifdef CONFIG_PROC_FS
1615 static int create_cache_proc_entries(struct cache_detail
*cd
, struct net
*net
)
1617 struct proc_dir_entry
*p
;
1618 struct sunrpc_net
*sn
;
1620 sn
= net_generic(net
, sunrpc_net_id
);
1621 cd
->u
.procfs
.proc_ent
= proc_mkdir(cd
->name
, sn
->proc_net_rpc
);
1622 if (cd
->u
.procfs
.proc_ent
== NULL
)
1624 cd
->u
.procfs
.channel_ent
= NULL
;
1625 cd
->u
.procfs
.content_ent
= NULL
;
1627 p
= proc_create_data("flush", S_IFREG
|S_IRUSR
|S_IWUSR
,
1628 cd
->u
.procfs
.proc_ent
,
1629 &cache_flush_operations_procfs
, cd
);
1630 cd
->u
.procfs
.flush_ent
= p
;
1634 if (cd
->cache_request
|| cd
->cache_parse
) {
1635 p
= proc_create_data("channel", S_IFREG
|S_IRUSR
|S_IWUSR
,
1636 cd
->u
.procfs
.proc_ent
,
1637 &cache_file_operations_procfs
, cd
);
1638 cd
->u
.procfs
.channel_ent
= p
;
1642 if (cd
->cache_show
) {
1643 p
= proc_create_data("content", S_IFREG
|S_IRUSR
,
1644 cd
->u
.procfs
.proc_ent
,
1645 &content_file_operations_procfs
, cd
);
1646 cd
->u
.procfs
.content_ent
= p
;
1652 remove_cache_proc_entries(cd
, net
);
1655 #else /* CONFIG_PROC_FS */
1656 static int create_cache_proc_entries(struct cache_detail
*cd
, struct net
*net
)
1662 void __init
cache_initialize(void)
1664 INIT_DEFERRABLE_WORK(&cache_cleaner
, do_cache_clean
);
1667 int cache_register_net(struct cache_detail
*cd
, struct net
*net
)
1671 sunrpc_init_cache_detail(cd
);
1672 ret
= create_cache_proc_entries(cd
, net
);
1674 sunrpc_destroy_cache_detail(cd
);
1677 EXPORT_SYMBOL_GPL(cache_register_net
);
1679 void cache_unregister_net(struct cache_detail
*cd
, struct net
*net
)
1681 remove_cache_proc_entries(cd
, net
);
1682 sunrpc_destroy_cache_detail(cd
);
1684 EXPORT_SYMBOL_GPL(cache_unregister_net
);
1686 struct cache_detail
*cache_create_net(struct cache_detail
*tmpl
, struct net
*net
)
1688 struct cache_detail
*cd
;
1690 cd
= kmemdup(tmpl
, sizeof(struct cache_detail
), GFP_KERNEL
);
1692 return ERR_PTR(-ENOMEM
);
1694 cd
->hash_table
= kzalloc(cd
->hash_size
* sizeof(struct cache_head
*),
1696 if (cd
->hash_table
== NULL
) {
1698 return ERR_PTR(-ENOMEM
);
1703 EXPORT_SYMBOL_GPL(cache_create_net
);
1705 void cache_destroy_net(struct cache_detail
*cd
, struct net
*net
)
1707 kfree(cd
->hash_table
);
1710 EXPORT_SYMBOL_GPL(cache_destroy_net
);
1712 static ssize_t
cache_read_pipefs(struct file
*filp
, char __user
*buf
,
1713 size_t count
, loff_t
*ppos
)
1715 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1717 return cache_read(filp
, buf
, count
, ppos
, cd
);
1720 static ssize_t
cache_write_pipefs(struct file
*filp
, const char __user
*buf
,
1721 size_t count
, loff_t
*ppos
)
1723 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1725 return cache_write(filp
, buf
, count
, ppos
, cd
);
1728 static unsigned int cache_poll_pipefs(struct file
*filp
, poll_table
*wait
)
1730 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1732 return cache_poll(filp
, wait
, cd
);
1735 static long cache_ioctl_pipefs(struct file
*filp
,
1736 unsigned int cmd
, unsigned long arg
)
1738 struct inode
*inode
= file_inode(filp
);
1739 struct cache_detail
*cd
= RPC_I(inode
)->private;
1741 return cache_ioctl(inode
, filp
, cmd
, arg
, cd
);
1744 static int cache_open_pipefs(struct inode
*inode
, struct file
*filp
)
1746 struct cache_detail
*cd
= RPC_I(inode
)->private;
1748 return cache_open(inode
, filp
, cd
);
1751 static int cache_release_pipefs(struct inode
*inode
, struct file
*filp
)
1753 struct cache_detail
*cd
= RPC_I(inode
)->private;
1755 return cache_release(inode
, filp
, cd
);
1758 const struct file_operations cache_file_operations_pipefs
= {
1759 .owner
= THIS_MODULE
,
1760 .llseek
= no_llseek
,
1761 .read
= cache_read_pipefs
,
1762 .write
= cache_write_pipefs
,
1763 .poll
= cache_poll_pipefs
,
1764 .unlocked_ioctl
= cache_ioctl_pipefs
, /* for FIONREAD */
1765 .open
= cache_open_pipefs
,
1766 .release
= cache_release_pipefs
,
1769 static int content_open_pipefs(struct inode
*inode
, struct file
*filp
)
1771 struct cache_detail
*cd
= RPC_I(inode
)->private;
1773 return content_open(inode
, filp
, cd
);
1776 static int content_release_pipefs(struct inode
*inode
, struct file
*filp
)
1778 struct cache_detail
*cd
= RPC_I(inode
)->private;
1780 return content_release(inode
, filp
, cd
);
1783 const struct file_operations content_file_operations_pipefs
= {
1784 .open
= content_open_pipefs
,
1786 .llseek
= seq_lseek
,
1787 .release
= content_release_pipefs
,
1790 static int open_flush_pipefs(struct inode
*inode
, struct file
*filp
)
1792 struct cache_detail
*cd
= RPC_I(inode
)->private;
1794 return open_flush(inode
, filp
, cd
);
1797 static int release_flush_pipefs(struct inode
*inode
, struct file
*filp
)
1799 struct cache_detail
*cd
= RPC_I(inode
)->private;
1801 return release_flush(inode
, filp
, cd
);
1804 static ssize_t
read_flush_pipefs(struct file
*filp
, char __user
*buf
,
1805 size_t count
, loff_t
*ppos
)
1807 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1809 return read_flush(filp
, buf
, count
, ppos
, cd
);
1812 static ssize_t
write_flush_pipefs(struct file
*filp
,
1813 const char __user
*buf
,
1814 size_t count
, loff_t
*ppos
)
1816 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1818 return write_flush(filp
, buf
, count
, ppos
, cd
);
1821 const struct file_operations cache_flush_operations_pipefs
= {
1822 .open
= open_flush_pipefs
,
1823 .read
= read_flush_pipefs
,
1824 .write
= write_flush_pipefs
,
1825 .release
= release_flush_pipefs
,
1826 .llseek
= no_llseek
,
1829 int sunrpc_cache_register_pipefs(struct dentry
*parent
,
1830 const char *name
, umode_t umode
,
1831 struct cache_detail
*cd
)
1838 q
.len
= strlen(name
);
1839 q
.hash
= full_name_hash(q
.name
, q
.len
);
1840 dir
= rpc_create_cache_dir(parent
, &q
, umode
, cd
);
1842 cd
->u
.pipefs
.dir
= dir
;
1847 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs
);
1849 void sunrpc_cache_unregister_pipefs(struct cache_detail
*cd
)
1851 rpc_remove_cache_dir(cd
->u
.pipefs
.dir
);
1852 cd
->u
.pipefs
.dir
= NULL
;
1854 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs
);