4 * Generic code for various authentication-related caches
5 * used by sunrpc clients and servers.
7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
9 * Released under terms in GPL version 2. See COPYING.
13 #include <linux/types.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/string_helpers.h>
24 #include <asm/uaccess.h>
25 #include <linux/poll.h>
26 #include <linux/seq_file.h>
27 #include <linux/proc_fs.h>
28 #include <linux/net.h>
29 #include <linux/workqueue.h>
30 #include <linux/mutex.h>
31 #include <linux/pagemap.h>
32 #include <asm/ioctls.h>
33 #include <linux/sunrpc/types.h>
34 #include <linux/sunrpc/cache.h>
35 #include <linux/sunrpc/stats.h>
36 #include <linux/sunrpc/rpc_pipe_fs.h>
39 #define RPCDBG_FACILITY RPCDBG_CACHE
41 static bool cache_defer_req(struct cache_req
*req
, struct cache_head
*item
);
42 static void cache_revisit_request(struct cache_head
*item
);
44 static void cache_init(struct cache_head
*h
)
46 time_t now
= seconds_since_boot();
47 INIT_HLIST_NODE(&h
->cache_list
);
50 h
->expiry_time
= now
+ CACHE_NEW_EXPIRY
;
51 h
->last_refresh
= now
;
54 struct cache_head
*sunrpc_cache_lookup(struct cache_detail
*detail
,
55 struct cache_head
*key
, int hash
)
57 struct cache_head
*new = NULL
, *freeme
= NULL
, *tmp
= NULL
;
58 struct hlist_head
*head
;
60 head
= &detail
->hash_table
[hash
];
62 read_lock(&detail
->hash_lock
);
64 hlist_for_each_entry(tmp
, head
, cache_list
) {
65 if (detail
->match(tmp
, key
)) {
66 if (cache_is_expired(detail
, tmp
))
67 /* This entry is expired, we will discard it. */
70 read_unlock(&detail
->hash_lock
);
74 read_unlock(&detail
->hash_lock
);
75 /* Didn't find anything, insert an empty entry */
77 new = detail
->alloc();
80 /* must fully initialise 'new', else
81 * we might get lose if we need to
85 detail
->init(new, key
);
87 write_lock(&detail
->hash_lock
);
89 /* check if entry appeared while we slept */
90 hlist_for_each_entry(tmp
, head
, cache_list
) {
91 if (detail
->match(tmp
, key
)) {
92 if (cache_is_expired(detail
, tmp
)) {
93 hlist_del_init(&tmp
->cache_list
);
99 write_unlock(&detail
->hash_lock
);
100 cache_put(new, detail
);
105 hlist_add_head(&new->cache_list
, head
);
108 write_unlock(&detail
->hash_lock
);
111 cache_put(freeme
, detail
);
114 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup
);
117 static void cache_dequeue(struct cache_detail
*detail
, struct cache_head
*ch
);
119 static void cache_fresh_locked(struct cache_head
*head
, time_t expiry
)
121 head
->expiry_time
= expiry
;
122 head
->last_refresh
= seconds_since_boot();
123 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
124 set_bit(CACHE_VALID
, &head
->flags
);
127 static void cache_fresh_unlocked(struct cache_head
*head
,
128 struct cache_detail
*detail
)
130 if (test_and_clear_bit(CACHE_PENDING
, &head
->flags
)) {
131 cache_revisit_request(head
);
132 cache_dequeue(detail
, head
);
136 struct cache_head
*sunrpc_cache_update(struct cache_detail
*detail
,
137 struct cache_head
*new, struct cache_head
*old
, int hash
)
139 /* The 'old' entry is to be replaced by 'new'.
140 * If 'old' is not VALID, we update it directly,
141 * otherwise we need to replace it
143 struct cache_head
*tmp
;
145 if (!test_bit(CACHE_VALID
, &old
->flags
)) {
146 write_lock(&detail
->hash_lock
);
147 if (!test_bit(CACHE_VALID
, &old
->flags
)) {
148 if (test_bit(CACHE_NEGATIVE
, &new->flags
))
149 set_bit(CACHE_NEGATIVE
, &old
->flags
);
151 detail
->update(old
, new);
152 cache_fresh_locked(old
, new->expiry_time
);
153 write_unlock(&detail
->hash_lock
);
154 cache_fresh_unlocked(old
, detail
);
157 write_unlock(&detail
->hash_lock
);
159 /* We need to insert a new entry */
160 tmp
= detail
->alloc();
162 cache_put(old
, detail
);
166 detail
->init(tmp
, old
);
168 write_lock(&detail
->hash_lock
);
169 if (test_bit(CACHE_NEGATIVE
, &new->flags
))
170 set_bit(CACHE_NEGATIVE
, &tmp
->flags
);
172 detail
->update(tmp
, new);
173 hlist_add_head(&tmp
->cache_list
, &detail
->hash_table
[hash
]);
176 cache_fresh_locked(tmp
, new->expiry_time
);
177 cache_fresh_locked(old
, 0);
178 write_unlock(&detail
->hash_lock
);
179 cache_fresh_unlocked(tmp
, detail
);
180 cache_fresh_unlocked(old
, detail
);
181 cache_put(old
, detail
);
184 EXPORT_SYMBOL_GPL(sunrpc_cache_update
);
186 static int cache_make_upcall(struct cache_detail
*cd
, struct cache_head
*h
)
188 if (cd
->cache_upcall
)
189 return cd
->cache_upcall(cd
, h
);
190 return sunrpc_cache_pipe_upcall(cd
, h
);
193 static inline int cache_is_valid(struct cache_head
*h
)
195 if (!test_bit(CACHE_VALID
, &h
->flags
))
199 if (test_bit(CACHE_NEGATIVE
, &h
->flags
))
203 * In combination with write barrier in
204 * sunrpc_cache_update, ensures that anyone
205 * using the cache entry after this sees the
214 static int try_to_negate_entry(struct cache_detail
*detail
, struct cache_head
*h
)
218 write_lock(&detail
->hash_lock
);
219 rv
= cache_is_valid(h
);
221 set_bit(CACHE_NEGATIVE
, &h
->flags
);
222 cache_fresh_locked(h
, seconds_since_boot()+CACHE_NEW_EXPIRY
);
225 write_unlock(&detail
->hash_lock
);
226 cache_fresh_unlocked(h
, detail
);
231 * This is the generic cache management routine for all
232 * the authentication caches.
233 * It checks the currency of a cache item and will (later)
234 * initiate an upcall to fill it if needed.
237 * Returns 0 if the cache_head can be used, or cache_puts it and returns
238 * -EAGAIN if upcall is pending and request has been queued
239 * -ETIMEDOUT if upcall failed or request could not be queue or
240 * upcall completed but item is still invalid (implying that
241 * the cache item has been replaced with a newer one).
242 * -ENOENT if cache entry was negative
244 int cache_check(struct cache_detail
*detail
,
245 struct cache_head
*h
, struct cache_req
*rqstp
)
248 long refresh_age
, age
;
250 /* First decide return status as best we can */
251 rv
= cache_is_valid(h
);
253 /* now see if we want to start an upcall */
254 refresh_age
= (h
->expiry_time
- h
->last_refresh
);
255 age
= seconds_since_boot() - h
->last_refresh
;
260 } else if (rv
== -EAGAIN
||
261 (h
->expiry_time
!= 0 && age
> refresh_age
/2)) {
262 dprintk("RPC: Want update, refage=%ld, age=%ld\n",
264 if (!test_and_set_bit(CACHE_PENDING
, &h
->flags
)) {
265 switch (cache_make_upcall(detail
, h
)) {
267 rv
= try_to_negate_entry(detail
, h
);
270 cache_fresh_unlocked(h
, detail
);
277 if (!cache_defer_req(rqstp
, h
)) {
279 * Request was not deferred; handle it as best
282 rv
= cache_is_valid(h
);
288 cache_put(h
, detail
);
291 EXPORT_SYMBOL_GPL(cache_check
);
294 * caches need to be periodically cleaned.
295 * For this we maintain a list of cache_detail and
296 * a current pointer into that list and into the table
299 * Each time cache_clean is called it finds the next non-empty entry
300 * in the current table and walks the list in that entry
301 * looking for entries that can be removed.
303 * An entry gets removed if:
304 * - The expiry is before current time
305 * - The last_refresh time is before the flush_time for that cache
307 * later we might drop old entries with non-NEVER expiry if that table
308 * is getting 'full' for some definition of 'full'
310 * The question of "how often to scan a table" is an interesting one
311 * and is answered in part by the use of the "nextcheck" field in the
313 * When a scan of a table begins, the nextcheck field is set to a time
314 * that is well into the future.
315 * While scanning, if an expiry time is found that is earlier than the
316 * current nextcheck time, nextcheck is set to that expiry time.
317 * If the flush_time is ever set to a time earlier than the nextcheck
318 * time, the nextcheck time is then set to that flush_time.
320 * A table is then only scanned if the current time is at least
321 * the nextcheck time.
325 static LIST_HEAD(cache_list
);
326 static DEFINE_SPINLOCK(cache_list_lock
);
327 static struct cache_detail
*current_detail
;
328 static int current_index
;
330 static void do_cache_clean(struct work_struct
*work
);
331 static struct delayed_work cache_cleaner
;
333 void sunrpc_init_cache_detail(struct cache_detail
*cd
)
335 rwlock_init(&cd
->hash_lock
);
336 INIT_LIST_HEAD(&cd
->queue
);
337 spin_lock(&cache_list_lock
);
340 atomic_set(&cd
->readers
, 0);
343 list_add(&cd
->others
, &cache_list
);
344 spin_unlock(&cache_list_lock
);
346 /* start the cleaning process */
347 schedule_delayed_work(&cache_cleaner
, 0);
349 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail
);
351 void sunrpc_destroy_cache_detail(struct cache_detail
*cd
)
354 spin_lock(&cache_list_lock
);
355 write_lock(&cd
->hash_lock
);
356 if (cd
->entries
|| atomic_read(&cd
->inuse
)) {
357 write_unlock(&cd
->hash_lock
);
358 spin_unlock(&cache_list_lock
);
361 if (current_detail
== cd
)
362 current_detail
= NULL
;
363 list_del_init(&cd
->others
);
364 write_unlock(&cd
->hash_lock
);
365 spin_unlock(&cache_list_lock
);
366 if (list_empty(&cache_list
)) {
367 /* module must be being unloaded so its safe to kill the worker */
368 cancel_delayed_work_sync(&cache_cleaner
);
372 printk(KERN_ERR
"RPC: failed to unregister %s cache\n", cd
->name
);
374 EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail
);
376 /* clean cache tries to find something to clean
378 * It returns 1 if it cleaned something,
379 * 0 if it didn't find anything this time
380 * -1 if it fell off the end of the list.
382 static int cache_clean(void)
385 struct list_head
*next
;
387 spin_lock(&cache_list_lock
);
389 /* find a suitable table if we don't already have one */
390 while (current_detail
== NULL
||
391 current_index
>= current_detail
->hash_size
) {
393 next
= current_detail
->others
.next
;
395 next
= cache_list
.next
;
396 if (next
== &cache_list
) {
397 current_detail
= NULL
;
398 spin_unlock(&cache_list_lock
);
401 current_detail
= list_entry(next
, struct cache_detail
, others
);
402 if (current_detail
->nextcheck
> seconds_since_boot())
403 current_index
= current_detail
->hash_size
;
406 current_detail
->nextcheck
= seconds_since_boot()+30*60;
410 /* find a non-empty bucket in the table */
411 while (current_detail
&&
412 current_index
< current_detail
->hash_size
&&
413 hlist_empty(¤t_detail
->hash_table
[current_index
]))
416 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
418 if (current_detail
&& current_index
< current_detail
->hash_size
) {
419 struct cache_head
*ch
= NULL
;
420 struct cache_detail
*d
;
421 struct hlist_head
*head
;
422 struct hlist_node
*tmp
;
424 write_lock(¤t_detail
->hash_lock
);
426 /* Ok, now to clean this strand */
428 head
= ¤t_detail
->hash_table
[current_index
];
429 hlist_for_each_entry_safe(ch
, tmp
, head
, cache_list
) {
430 if (current_detail
->nextcheck
> ch
->expiry_time
)
431 current_detail
->nextcheck
= ch
->expiry_time
+1;
432 if (!cache_is_expired(current_detail
, ch
))
435 hlist_del_init(&ch
->cache_list
);
436 current_detail
->entries
--;
441 write_unlock(¤t_detail
->hash_lock
);
445 spin_unlock(&cache_list_lock
);
447 set_bit(CACHE_CLEANED
, &ch
->flags
);
448 cache_fresh_unlocked(ch
, d
);
452 spin_unlock(&cache_list_lock
);
458 * We want to regularly clean the cache, so we need to schedule some work ...
460 static void do_cache_clean(struct work_struct
*work
)
463 if (cache_clean() == -1)
464 delay
= round_jiffies_relative(30*HZ
);
466 if (list_empty(&cache_list
))
470 schedule_delayed_work(&cache_cleaner
, delay
);
475 * Clean all caches promptly. This just calls cache_clean
476 * repeatedly until we are sure that every cache has had a chance to
479 void cache_flush(void)
481 while (cache_clean() != -1)
483 while (cache_clean() != -1)
486 EXPORT_SYMBOL_GPL(cache_flush
);
488 void cache_purge(struct cache_detail
*detail
)
490 detail
->flush_time
= LONG_MAX
;
491 detail
->nextcheck
= seconds_since_boot();
493 detail
->flush_time
= 1;
495 EXPORT_SYMBOL_GPL(cache_purge
);
499 * Deferral and Revisiting of Requests.
501 * If a cache lookup finds a pending entry, we
502 * need to defer the request and revisit it later.
503 * All deferred requests are stored in a hash table,
504 * indexed by "struct cache_head *".
505 * As it may be wasteful to store a whole request
506 * structure, we allow the request to provide a
507 * deferred form, which must contain a
508 * 'struct cache_deferred_req'
509 * This cache_deferred_req contains a method to allow
510 * it to be revisited when cache info is available
513 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
514 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
516 #define DFR_MAX 300 /* ??? */
518 static DEFINE_SPINLOCK(cache_defer_lock
);
519 static LIST_HEAD(cache_defer_list
);
520 static struct hlist_head cache_defer_hash
[DFR_HASHSIZE
];
521 static int cache_defer_cnt
;
523 static void __unhash_deferred_req(struct cache_deferred_req
*dreq
)
525 hlist_del_init(&dreq
->hash
);
526 if (!list_empty(&dreq
->recent
)) {
527 list_del_init(&dreq
->recent
);
532 static void __hash_deferred_req(struct cache_deferred_req
*dreq
, struct cache_head
*item
)
534 int hash
= DFR_HASH(item
);
536 INIT_LIST_HEAD(&dreq
->recent
);
537 hlist_add_head(&dreq
->hash
, &cache_defer_hash
[hash
]);
540 static void setup_deferral(struct cache_deferred_req
*dreq
,
541 struct cache_head
*item
,
547 spin_lock(&cache_defer_lock
);
549 __hash_deferred_req(dreq
, item
);
553 list_add(&dreq
->recent
, &cache_defer_list
);
556 spin_unlock(&cache_defer_lock
);
560 struct thread_deferred_req
{
561 struct cache_deferred_req handle
;
562 struct completion completion
;
565 static void cache_restart_thread(struct cache_deferred_req
*dreq
, int too_many
)
567 struct thread_deferred_req
*dr
=
568 container_of(dreq
, struct thread_deferred_req
, handle
);
569 complete(&dr
->completion
);
572 static void cache_wait_req(struct cache_req
*req
, struct cache_head
*item
)
574 struct thread_deferred_req sleeper
;
575 struct cache_deferred_req
*dreq
= &sleeper
.handle
;
577 sleeper
.completion
= COMPLETION_INITIALIZER_ONSTACK(sleeper
.completion
);
578 dreq
->revisit
= cache_restart_thread
;
580 setup_deferral(dreq
, item
, 0);
582 if (!test_bit(CACHE_PENDING
, &item
->flags
) ||
583 wait_for_completion_interruptible_timeout(
584 &sleeper
.completion
, req
->thread_wait
) <= 0) {
585 /* The completion wasn't completed, so we need
588 spin_lock(&cache_defer_lock
);
589 if (!hlist_unhashed(&sleeper
.handle
.hash
)) {
590 __unhash_deferred_req(&sleeper
.handle
);
591 spin_unlock(&cache_defer_lock
);
593 /* cache_revisit_request already removed
594 * this from the hash table, but hasn't
595 * called ->revisit yet. It will very soon
596 * and we need to wait for it.
598 spin_unlock(&cache_defer_lock
);
599 wait_for_completion(&sleeper
.completion
);
604 static void cache_limit_defers(void)
606 /* Make sure we haven't exceed the limit of allowed deferred
609 struct cache_deferred_req
*discard
= NULL
;
611 if (cache_defer_cnt
<= DFR_MAX
)
614 spin_lock(&cache_defer_lock
);
616 /* Consider removing either the first or the last */
617 if (cache_defer_cnt
> DFR_MAX
) {
618 if (prandom_u32() & 1)
619 discard
= list_entry(cache_defer_list
.next
,
620 struct cache_deferred_req
, recent
);
622 discard
= list_entry(cache_defer_list
.prev
,
623 struct cache_deferred_req
, recent
);
624 __unhash_deferred_req(discard
);
626 spin_unlock(&cache_defer_lock
);
628 discard
->revisit(discard
, 1);
631 /* Return true if and only if a deferred request is queued. */
632 static bool cache_defer_req(struct cache_req
*req
, struct cache_head
*item
)
634 struct cache_deferred_req
*dreq
;
636 if (req
->thread_wait
) {
637 cache_wait_req(req
, item
);
638 if (!test_bit(CACHE_PENDING
, &item
->flags
))
641 dreq
= req
->defer(req
);
644 setup_deferral(dreq
, item
, 1);
645 if (!test_bit(CACHE_PENDING
, &item
->flags
))
646 /* Bit could have been cleared before we managed to
647 * set up the deferral, so need to revisit just in case
649 cache_revisit_request(item
);
651 cache_limit_defers();
655 static void cache_revisit_request(struct cache_head
*item
)
657 struct cache_deferred_req
*dreq
;
658 struct list_head pending
;
659 struct hlist_node
*tmp
;
660 int hash
= DFR_HASH(item
);
662 INIT_LIST_HEAD(&pending
);
663 spin_lock(&cache_defer_lock
);
665 hlist_for_each_entry_safe(dreq
, tmp
, &cache_defer_hash
[hash
], hash
)
666 if (dreq
->item
== item
) {
667 __unhash_deferred_req(dreq
);
668 list_add(&dreq
->recent
, &pending
);
671 spin_unlock(&cache_defer_lock
);
673 while (!list_empty(&pending
)) {
674 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
675 list_del_init(&dreq
->recent
);
676 dreq
->revisit(dreq
, 0);
680 void cache_clean_deferred(void *owner
)
682 struct cache_deferred_req
*dreq
, *tmp
;
683 struct list_head pending
;
686 INIT_LIST_HEAD(&pending
);
687 spin_lock(&cache_defer_lock
);
689 list_for_each_entry_safe(dreq
, tmp
, &cache_defer_list
, recent
) {
690 if (dreq
->owner
== owner
) {
691 __unhash_deferred_req(dreq
);
692 list_add(&dreq
->recent
, &pending
);
695 spin_unlock(&cache_defer_lock
);
697 while (!list_empty(&pending
)) {
698 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
699 list_del_init(&dreq
->recent
);
700 dreq
->revisit(dreq
, 1);
705 * communicate with user-space
707 * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
708 * On read, you get a full request, or block.
709 * On write, an update request is processed.
710 * Poll works if anything to read, and always allows write.
712 * Implemented by linked list of requests. Each open file has
713 * a ->private that also exists in this list. New requests are added
714 * to the end and may wakeup and preceding readers.
715 * New readers are added to the head. If, on read, an item is found with
716 * CACHE_UPCALLING clear, we free it from the list.
720 static DEFINE_SPINLOCK(queue_lock
);
721 static DEFINE_MUTEX(queue_io_mutex
);
724 struct list_head list
;
725 int reader
; /* if 0, then request */
727 struct cache_request
{
728 struct cache_queue q
;
729 struct cache_head
*item
;
734 struct cache_reader
{
735 struct cache_queue q
;
736 int offset
; /* if non-0, we have a refcnt on next request */
739 static int cache_request(struct cache_detail
*detail
,
740 struct cache_request
*crq
)
745 detail
->cache_request(detail
, crq
->item
, &bp
, &len
);
748 return PAGE_SIZE
- len
;
751 static ssize_t
cache_read(struct file
*filp
, char __user
*buf
, size_t count
,
752 loff_t
*ppos
, struct cache_detail
*cd
)
754 struct cache_reader
*rp
= filp
->private_data
;
755 struct cache_request
*rq
;
756 struct inode
*inode
= file_inode(filp
);
762 mutex_lock(&inode
->i_mutex
); /* protect against multiple concurrent
763 * readers on this file */
765 spin_lock(&queue_lock
);
766 /* need to find next request */
767 while (rp
->q
.list
.next
!= &cd
->queue
&&
768 list_entry(rp
->q
.list
.next
, struct cache_queue
, list
)
770 struct list_head
*next
= rp
->q
.list
.next
;
771 list_move(&rp
->q
.list
, next
);
773 if (rp
->q
.list
.next
== &cd
->queue
) {
774 spin_unlock(&queue_lock
);
775 mutex_unlock(&inode
->i_mutex
);
776 WARN_ON_ONCE(rp
->offset
);
779 rq
= container_of(rp
->q
.list
.next
, struct cache_request
, q
.list
);
780 WARN_ON_ONCE(rq
->q
.reader
);
783 spin_unlock(&queue_lock
);
786 err
= cache_request(cd
, rq
);
792 if (rp
->offset
== 0 && !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
794 spin_lock(&queue_lock
);
795 list_move(&rp
->q
.list
, &rq
->q
.list
);
796 spin_unlock(&queue_lock
);
798 if (rp
->offset
+ count
> rq
->len
)
799 count
= rq
->len
- rp
->offset
;
801 if (copy_to_user(buf
, rq
->buf
+ rp
->offset
, count
))
804 if (rp
->offset
>= rq
->len
) {
806 spin_lock(&queue_lock
);
807 list_move(&rp
->q
.list
, &rq
->q
.list
);
808 spin_unlock(&queue_lock
);
813 if (rp
->offset
== 0) {
814 /* need to release rq */
815 spin_lock(&queue_lock
);
817 if (rq
->readers
== 0 &&
818 !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
819 list_del(&rq
->q
.list
);
820 spin_unlock(&queue_lock
);
821 cache_put(rq
->item
, cd
);
825 spin_unlock(&queue_lock
);
829 mutex_unlock(&inode
->i_mutex
);
830 return err
? err
: count
;
833 static ssize_t
cache_do_downcall(char *kaddr
, const char __user
*buf
,
834 size_t count
, struct cache_detail
*cd
)
840 if (copy_from_user(kaddr
, buf
, count
))
843 ret
= cd
->cache_parse(cd
, kaddr
, count
);
849 static ssize_t
cache_slow_downcall(const char __user
*buf
,
850 size_t count
, struct cache_detail
*cd
)
852 static char write_buf
[8192]; /* protected by queue_io_mutex */
853 ssize_t ret
= -EINVAL
;
855 if (count
>= sizeof(write_buf
))
857 mutex_lock(&queue_io_mutex
);
858 ret
= cache_do_downcall(write_buf
, buf
, count
, cd
);
859 mutex_unlock(&queue_io_mutex
);
864 static ssize_t
cache_downcall(struct address_space
*mapping
,
865 const char __user
*buf
,
866 size_t count
, struct cache_detail
*cd
)
870 ssize_t ret
= -ENOMEM
;
872 if (count
>= PAGE_CACHE_SIZE
)
875 page
= find_or_create_page(mapping
, 0, GFP_KERNEL
);
880 ret
= cache_do_downcall(kaddr
, buf
, count
, cd
);
883 page_cache_release(page
);
886 return cache_slow_downcall(buf
, count
, cd
);
889 static ssize_t
cache_write(struct file
*filp
, const char __user
*buf
,
890 size_t count
, loff_t
*ppos
,
891 struct cache_detail
*cd
)
893 struct address_space
*mapping
= filp
->f_mapping
;
894 struct inode
*inode
= file_inode(filp
);
895 ssize_t ret
= -EINVAL
;
897 if (!cd
->cache_parse
)
900 mutex_lock(&inode
->i_mutex
);
901 ret
= cache_downcall(mapping
, buf
, count
, cd
);
902 mutex_unlock(&inode
->i_mutex
);
907 static DECLARE_WAIT_QUEUE_HEAD(queue_wait
);
909 static unsigned int cache_poll(struct file
*filp
, poll_table
*wait
,
910 struct cache_detail
*cd
)
913 struct cache_reader
*rp
= filp
->private_data
;
914 struct cache_queue
*cq
;
916 poll_wait(filp
, &queue_wait
, wait
);
918 /* alway allow write */
919 mask
= POLLOUT
| POLLWRNORM
;
924 spin_lock(&queue_lock
);
926 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
927 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
929 mask
|= POLLIN
| POLLRDNORM
;
932 spin_unlock(&queue_lock
);
936 static int cache_ioctl(struct inode
*ino
, struct file
*filp
,
937 unsigned int cmd
, unsigned long arg
,
938 struct cache_detail
*cd
)
941 struct cache_reader
*rp
= filp
->private_data
;
942 struct cache_queue
*cq
;
944 if (cmd
!= FIONREAD
|| !rp
)
947 spin_lock(&queue_lock
);
949 /* only find the length remaining in current request,
950 * or the length of the next request
952 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
953 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
955 struct cache_request
*cr
=
956 container_of(cq
, struct cache_request
, q
);
957 len
= cr
->len
- rp
->offset
;
960 spin_unlock(&queue_lock
);
962 return put_user(len
, (int __user
*)arg
);
965 static int cache_open(struct inode
*inode
, struct file
*filp
,
966 struct cache_detail
*cd
)
968 struct cache_reader
*rp
= NULL
;
970 if (!cd
|| !try_module_get(cd
->owner
))
972 nonseekable_open(inode
, filp
);
973 if (filp
->f_mode
& FMODE_READ
) {
974 rp
= kmalloc(sizeof(*rp
), GFP_KERNEL
);
976 module_put(cd
->owner
);
981 atomic_inc(&cd
->readers
);
982 spin_lock(&queue_lock
);
983 list_add(&rp
->q
.list
, &cd
->queue
);
984 spin_unlock(&queue_lock
);
986 filp
->private_data
= rp
;
990 static int cache_release(struct inode
*inode
, struct file
*filp
,
991 struct cache_detail
*cd
)
993 struct cache_reader
*rp
= filp
->private_data
;
996 spin_lock(&queue_lock
);
998 struct cache_queue
*cq
;
999 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
1000 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
1002 container_of(cq
, struct cache_request
, q
)
1008 list_del(&rp
->q
.list
);
1009 spin_unlock(&queue_lock
);
1011 filp
->private_data
= NULL
;
1014 cd
->last_close
= seconds_since_boot();
1015 atomic_dec(&cd
->readers
);
1017 module_put(cd
->owner
);
1023 static void cache_dequeue(struct cache_detail
*detail
, struct cache_head
*ch
)
1025 struct cache_queue
*cq
, *tmp
;
1026 struct cache_request
*cr
;
1027 struct list_head dequeued
;
1029 INIT_LIST_HEAD(&dequeued
);
1030 spin_lock(&queue_lock
);
1031 list_for_each_entry_safe(cq
, tmp
, &detail
->queue
, list
)
1033 cr
= container_of(cq
, struct cache_request
, q
);
1036 if (test_bit(CACHE_PENDING
, &ch
->flags
))
1037 /* Lost a race and it is pending again */
1039 if (cr
->readers
!= 0)
1041 list_move(&cr
->q
.list
, &dequeued
);
1043 spin_unlock(&queue_lock
);
1044 while (!list_empty(&dequeued
)) {
1045 cr
= list_entry(dequeued
.next
, struct cache_request
, q
.list
);
1046 list_del(&cr
->q
.list
);
1047 cache_put(cr
->item
, detail
);
1054 * Support routines for text-based upcalls.
1055 * Fields are separated by spaces.
1056 * Fields are either mangled to quote space tab newline slosh with slosh
1057 * or a hexified with a leading \x
1058 * Record is terminated with newline.
1062 void qword_add(char **bpp
, int *lp
, char *str
)
1068 if (len
< 0) return;
1070 ret
= string_escape_str(str
, bp
, len
, ESCAPE_OCTAL
, "\\ \n\t");
1083 EXPORT_SYMBOL_GPL(qword_add
);
1085 void qword_addhex(char **bpp
, int *lp
, char *buf
, int blen
)
1090 if (len
< 0) return;
1096 while (blen
&& len
>= 2) {
1097 bp
= hex_byte_pack(bp
, *buf
++);
1102 if (blen
|| len
<1) len
= -1;
1110 EXPORT_SYMBOL_GPL(qword_addhex
);
1112 static void warn_no_listener(struct cache_detail
*detail
)
1114 if (detail
->last_warn
!= detail
->last_close
) {
1115 detail
->last_warn
= detail
->last_close
;
1116 if (detail
->warn_no_listener
)
1117 detail
->warn_no_listener(detail
, detail
->last_close
!= 0);
1121 static bool cache_listeners_exist(struct cache_detail
*detail
)
1123 if (atomic_read(&detail
->readers
))
1125 if (detail
->last_close
== 0)
1126 /* This cache was never opened */
1128 if (detail
->last_close
< seconds_since_boot() - 30)
1130 * We allow for the possibility that someone might
1131 * restart a userspace daemon without restarting the
1132 * server; but after 30 seconds, we give up.
1139 * register an upcall request to user-space and queue it up for read() by the
1142 * Each request is at most one page long.
1144 int sunrpc_cache_pipe_upcall(struct cache_detail
*detail
, struct cache_head
*h
)
1148 struct cache_request
*crq
;
1151 if (!detail
->cache_request
)
1154 if (!cache_listeners_exist(detail
)) {
1155 warn_no_listener(detail
);
1158 if (test_bit(CACHE_CLEANED
, &h
->flags
))
1159 /* Too late to make an upcall */
1162 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
1166 crq
= kmalloc(sizeof (*crq
), GFP_KERNEL
);
1173 crq
->item
= cache_get(h
);
1177 spin_lock(&queue_lock
);
1178 if (test_bit(CACHE_PENDING
, &h
->flags
))
1179 list_add_tail(&crq
->q
.list
, &detail
->queue
);
1181 /* Lost a race, no longer PENDING, so don't enqueue */
1183 spin_unlock(&queue_lock
);
1184 wake_up(&queue_wait
);
1185 if (ret
== -EAGAIN
) {
1191 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall
);
1194 * parse a message from user-space and pass it
1195 * to an appropriate cache
1196 * Messages are, like requests, separated into fields by
1197 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1200 * reply cachename expiry key ... content....
1202 * key and content are both parsed by cache
1205 int qword_get(char **bpp
, char *dest
, int bufsize
)
1207 /* return bytes copied, or -1 on error */
1211 while (*bp
== ' ') bp
++;
1213 if (bp
[0] == '\\' && bp
[1] == 'x') {
1216 while (len
< bufsize
) {
1219 h
= hex_to_bin(bp
[0]);
1223 l
= hex_to_bin(bp
[1]);
1227 *dest
++ = (h
<< 4) | l
;
1232 /* text with \nnn octal quoting */
1233 while (*bp
!= ' ' && *bp
!= '\n' && *bp
&& len
< bufsize
-1) {
1235 isodigit(bp
[1]) && (bp
[1] <= '3') &&
1238 int byte
= (*++bp
-'0');
1240 byte
= (byte
<< 3) | (*bp
++ - '0');
1241 byte
= (byte
<< 3) | (*bp
++ - '0');
1251 if (*bp
!= ' ' && *bp
!= '\n' && *bp
!= '\0')
1253 while (*bp
== ' ') bp
++;
1258 EXPORT_SYMBOL_GPL(qword_get
);
1262 * support /proc/sunrpc/cache/$CACHENAME/content
1264 * We call ->cache_show passing NULL for the item to
1265 * get a header, then pass each real item in the cache
1268 void *cache_seq_start(struct seq_file
*m
, loff_t
*pos
)
1269 __acquires(cd
->hash_lock
)
1272 unsigned int hash
, entry
;
1273 struct cache_head
*ch
;
1274 struct cache_detail
*cd
= m
->private;
1276 read_lock(&cd
->hash_lock
);
1278 return SEQ_START_TOKEN
;
1280 entry
= n
& ((1LL<<32) - 1);
1282 hlist_for_each_entry(ch
, &cd
->hash_table
[hash
], cache_list
)
1285 n
&= ~((1LL<<32) - 1);
1289 } while(hash
< cd
->hash_size
&&
1290 hlist_empty(&cd
->hash_table
[hash
]));
1291 if (hash
>= cd
->hash_size
)
1294 return hlist_entry_safe(cd
->hash_table
[hash
].first
,
1295 struct cache_head
, cache_list
);
1297 EXPORT_SYMBOL_GPL(cache_seq_start
);
1299 void *cache_seq_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1301 struct cache_head
*ch
= p
;
1302 int hash
= (*pos
>> 32);
1303 struct cache_detail
*cd
= m
->private;
1305 if (p
== SEQ_START_TOKEN
)
1307 else if (ch
->cache_list
.next
== NULL
) {
1312 return hlist_entry_safe(ch
->cache_list
.next
,
1313 struct cache_head
, cache_list
);
1315 *pos
&= ~((1LL<<32) - 1);
1316 while (hash
< cd
->hash_size
&&
1317 hlist_empty(&cd
->hash_table
[hash
])) {
1321 if (hash
>= cd
->hash_size
)
1324 return hlist_entry_safe(cd
->hash_table
[hash
].first
,
1325 struct cache_head
, cache_list
);
1327 EXPORT_SYMBOL_GPL(cache_seq_next
);
1329 void cache_seq_stop(struct seq_file
*m
, void *p
)
1330 __releases(cd
->hash_lock
)
1332 struct cache_detail
*cd
= m
->private;
1333 read_unlock(&cd
->hash_lock
);
1335 EXPORT_SYMBOL_GPL(cache_seq_stop
);
1337 static int c_show(struct seq_file
*m
, void *p
)
1339 struct cache_head
*cp
= p
;
1340 struct cache_detail
*cd
= m
->private;
1342 if (p
== SEQ_START_TOKEN
)
1343 return cd
->cache_show(m
, cd
, NULL
);
1346 seq_printf(m
, "# expiry=%ld refcnt=%d flags=%lx\n",
1347 convert_to_wallclock(cp
->expiry_time
),
1348 atomic_read(&cp
->ref
.refcount
), cp
->flags
);
1350 if (cache_check(cd
, cp
, NULL
))
1351 /* cache_check does a cache_put on failure */
1352 seq_printf(m
, "# ");
1354 if (cache_is_expired(cd
, cp
))
1355 seq_printf(m
, "# ");
1359 return cd
->cache_show(m
, cd
, cp
);
1362 static const struct seq_operations cache_content_op
= {
1363 .start
= cache_seq_start
,
1364 .next
= cache_seq_next
,
1365 .stop
= cache_seq_stop
,
1369 static int content_open(struct inode
*inode
, struct file
*file
,
1370 struct cache_detail
*cd
)
1372 struct seq_file
*seq
;
1375 if (!cd
|| !try_module_get(cd
->owner
))
1378 err
= seq_open(file
, &cache_content_op
);
1380 module_put(cd
->owner
);
1384 seq
= file
->private_data
;
1389 static int content_release(struct inode
*inode
, struct file
*file
,
1390 struct cache_detail
*cd
)
1392 int ret
= seq_release(inode
, file
);
1393 module_put(cd
->owner
);
1397 static int open_flush(struct inode
*inode
, struct file
*file
,
1398 struct cache_detail
*cd
)
1400 if (!cd
|| !try_module_get(cd
->owner
))
1402 return nonseekable_open(inode
, file
);
1405 static int release_flush(struct inode
*inode
, struct file
*file
,
1406 struct cache_detail
*cd
)
1408 module_put(cd
->owner
);
1412 static ssize_t
read_flush(struct file
*file
, char __user
*buf
,
1413 size_t count
, loff_t
*ppos
,
1414 struct cache_detail
*cd
)
1417 unsigned long p
= *ppos
;
1420 snprintf(tbuf
, sizeof(tbuf
), "%lu\n", convert_to_wallclock(cd
->flush_time
));
1427 if (copy_to_user(buf
, (void*)(tbuf
+p
), len
))
1433 static ssize_t
write_flush(struct file
*file
, const char __user
*buf
,
1434 size_t count
, loff_t
*ppos
,
1435 struct cache_detail
*cd
)
1440 if (*ppos
|| count
> sizeof(tbuf
)-1)
1442 if (copy_from_user(tbuf
, buf
, count
))
1445 simple_strtoul(tbuf
, &ep
, 0);
1446 if (*ep
&& *ep
!= '\n')
1450 cd
->flush_time
= get_expiry(&bp
);
1451 cd
->nextcheck
= seconds_since_boot();
1458 static ssize_t
cache_read_procfs(struct file
*filp
, char __user
*buf
,
1459 size_t count
, loff_t
*ppos
)
1461 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1463 return cache_read(filp
, buf
, count
, ppos
, cd
);
1466 static ssize_t
cache_write_procfs(struct file
*filp
, const char __user
*buf
,
1467 size_t count
, loff_t
*ppos
)
1469 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1471 return cache_write(filp
, buf
, count
, ppos
, cd
);
1474 static unsigned int cache_poll_procfs(struct file
*filp
, poll_table
*wait
)
1476 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1478 return cache_poll(filp
, wait
, cd
);
1481 static long cache_ioctl_procfs(struct file
*filp
,
1482 unsigned int cmd
, unsigned long arg
)
1484 struct inode
*inode
= file_inode(filp
);
1485 struct cache_detail
*cd
= PDE_DATA(inode
);
1487 return cache_ioctl(inode
, filp
, cmd
, arg
, cd
);
1490 static int cache_open_procfs(struct inode
*inode
, struct file
*filp
)
1492 struct cache_detail
*cd
= PDE_DATA(inode
);
1494 return cache_open(inode
, filp
, cd
);
1497 static int cache_release_procfs(struct inode
*inode
, struct file
*filp
)
1499 struct cache_detail
*cd
= PDE_DATA(inode
);
1501 return cache_release(inode
, filp
, cd
);
1504 static const struct file_operations cache_file_operations_procfs
= {
1505 .owner
= THIS_MODULE
,
1506 .llseek
= no_llseek
,
1507 .read
= cache_read_procfs
,
1508 .write
= cache_write_procfs
,
1509 .poll
= cache_poll_procfs
,
1510 .unlocked_ioctl
= cache_ioctl_procfs
, /* for FIONREAD */
1511 .open
= cache_open_procfs
,
1512 .release
= cache_release_procfs
,
1515 static int content_open_procfs(struct inode
*inode
, struct file
*filp
)
1517 struct cache_detail
*cd
= PDE_DATA(inode
);
1519 return content_open(inode
, filp
, cd
);
1522 static int content_release_procfs(struct inode
*inode
, struct file
*filp
)
1524 struct cache_detail
*cd
= PDE_DATA(inode
);
1526 return content_release(inode
, filp
, cd
);
1529 static const struct file_operations content_file_operations_procfs
= {
1530 .open
= content_open_procfs
,
1532 .llseek
= seq_lseek
,
1533 .release
= content_release_procfs
,
1536 static int open_flush_procfs(struct inode
*inode
, struct file
*filp
)
1538 struct cache_detail
*cd
= PDE_DATA(inode
);
1540 return open_flush(inode
, filp
, cd
);
1543 static int release_flush_procfs(struct inode
*inode
, struct file
*filp
)
1545 struct cache_detail
*cd
= PDE_DATA(inode
);
1547 return release_flush(inode
, filp
, cd
);
1550 static ssize_t
read_flush_procfs(struct file
*filp
, char __user
*buf
,
1551 size_t count
, loff_t
*ppos
)
1553 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1555 return read_flush(filp
, buf
, count
, ppos
, cd
);
1558 static ssize_t
write_flush_procfs(struct file
*filp
,
1559 const char __user
*buf
,
1560 size_t count
, loff_t
*ppos
)
1562 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1564 return write_flush(filp
, buf
, count
, ppos
, cd
);
1567 static const struct file_operations cache_flush_operations_procfs
= {
1568 .open
= open_flush_procfs
,
1569 .read
= read_flush_procfs
,
1570 .write
= write_flush_procfs
,
1571 .release
= release_flush_procfs
,
1572 .llseek
= no_llseek
,
1575 static void remove_cache_proc_entries(struct cache_detail
*cd
, struct net
*net
)
1577 struct sunrpc_net
*sn
;
1579 if (cd
->u
.procfs
.proc_ent
== NULL
)
1581 if (cd
->u
.procfs
.flush_ent
)
1582 remove_proc_entry("flush", cd
->u
.procfs
.proc_ent
);
1583 if (cd
->u
.procfs
.channel_ent
)
1584 remove_proc_entry("channel", cd
->u
.procfs
.proc_ent
);
1585 if (cd
->u
.procfs
.content_ent
)
1586 remove_proc_entry("content", cd
->u
.procfs
.proc_ent
);
1587 cd
->u
.procfs
.proc_ent
= NULL
;
1588 sn
= net_generic(net
, sunrpc_net_id
);
1589 remove_proc_entry(cd
->name
, sn
->proc_net_rpc
);
1592 #ifdef CONFIG_PROC_FS
1593 static int create_cache_proc_entries(struct cache_detail
*cd
, struct net
*net
)
1595 struct proc_dir_entry
*p
;
1596 struct sunrpc_net
*sn
;
1598 sn
= net_generic(net
, sunrpc_net_id
);
1599 cd
->u
.procfs
.proc_ent
= proc_mkdir(cd
->name
, sn
->proc_net_rpc
);
1600 if (cd
->u
.procfs
.proc_ent
== NULL
)
1602 cd
->u
.procfs
.channel_ent
= NULL
;
1603 cd
->u
.procfs
.content_ent
= NULL
;
1605 p
= proc_create_data("flush", S_IFREG
|S_IRUSR
|S_IWUSR
,
1606 cd
->u
.procfs
.proc_ent
,
1607 &cache_flush_operations_procfs
, cd
);
1608 cd
->u
.procfs
.flush_ent
= p
;
1612 if (cd
->cache_request
|| cd
->cache_parse
) {
1613 p
= proc_create_data("channel", S_IFREG
|S_IRUSR
|S_IWUSR
,
1614 cd
->u
.procfs
.proc_ent
,
1615 &cache_file_operations_procfs
, cd
);
1616 cd
->u
.procfs
.channel_ent
= p
;
1620 if (cd
->cache_show
) {
1621 p
= proc_create_data("content", S_IFREG
|S_IRUSR
,
1622 cd
->u
.procfs
.proc_ent
,
1623 &content_file_operations_procfs
, cd
);
1624 cd
->u
.procfs
.content_ent
= p
;
1630 remove_cache_proc_entries(cd
, net
);
1633 #else /* CONFIG_PROC_FS */
1634 static int create_cache_proc_entries(struct cache_detail
*cd
, struct net
*net
)
1640 void __init
cache_initialize(void)
1642 INIT_DEFERRABLE_WORK(&cache_cleaner
, do_cache_clean
);
1645 int cache_register_net(struct cache_detail
*cd
, struct net
*net
)
1649 sunrpc_init_cache_detail(cd
);
1650 ret
= create_cache_proc_entries(cd
, net
);
1652 sunrpc_destroy_cache_detail(cd
);
1655 EXPORT_SYMBOL_GPL(cache_register_net
);
1657 void cache_unregister_net(struct cache_detail
*cd
, struct net
*net
)
1659 remove_cache_proc_entries(cd
, net
);
1660 sunrpc_destroy_cache_detail(cd
);
1662 EXPORT_SYMBOL_GPL(cache_unregister_net
);
1664 struct cache_detail
*cache_create_net(struct cache_detail
*tmpl
, struct net
*net
)
1666 struct cache_detail
*cd
;
1669 cd
= kmemdup(tmpl
, sizeof(struct cache_detail
), GFP_KERNEL
);
1671 return ERR_PTR(-ENOMEM
);
1673 cd
->hash_table
= kzalloc(cd
->hash_size
* sizeof(struct hlist_head
),
1675 if (cd
->hash_table
== NULL
) {
1677 return ERR_PTR(-ENOMEM
);
1680 for (i
= 0; i
< cd
->hash_size
; i
++)
1681 INIT_HLIST_HEAD(&cd
->hash_table
[i
]);
1685 EXPORT_SYMBOL_GPL(cache_create_net
);
1687 void cache_destroy_net(struct cache_detail
*cd
, struct net
*net
)
1689 kfree(cd
->hash_table
);
1692 EXPORT_SYMBOL_GPL(cache_destroy_net
);
1694 static ssize_t
cache_read_pipefs(struct file
*filp
, char __user
*buf
,
1695 size_t count
, loff_t
*ppos
)
1697 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1699 return cache_read(filp
, buf
, count
, ppos
, cd
);
1702 static ssize_t
cache_write_pipefs(struct file
*filp
, const char __user
*buf
,
1703 size_t count
, loff_t
*ppos
)
1705 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1707 return cache_write(filp
, buf
, count
, ppos
, cd
);
1710 static unsigned int cache_poll_pipefs(struct file
*filp
, poll_table
*wait
)
1712 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1714 return cache_poll(filp
, wait
, cd
);
1717 static long cache_ioctl_pipefs(struct file
*filp
,
1718 unsigned int cmd
, unsigned long arg
)
1720 struct inode
*inode
= file_inode(filp
);
1721 struct cache_detail
*cd
= RPC_I(inode
)->private;
1723 return cache_ioctl(inode
, filp
, cmd
, arg
, cd
);
1726 static int cache_open_pipefs(struct inode
*inode
, struct file
*filp
)
1728 struct cache_detail
*cd
= RPC_I(inode
)->private;
1730 return cache_open(inode
, filp
, cd
);
1733 static int cache_release_pipefs(struct inode
*inode
, struct file
*filp
)
1735 struct cache_detail
*cd
= RPC_I(inode
)->private;
1737 return cache_release(inode
, filp
, cd
);
1740 const struct file_operations cache_file_operations_pipefs
= {
1741 .owner
= THIS_MODULE
,
1742 .llseek
= no_llseek
,
1743 .read
= cache_read_pipefs
,
1744 .write
= cache_write_pipefs
,
1745 .poll
= cache_poll_pipefs
,
1746 .unlocked_ioctl
= cache_ioctl_pipefs
, /* for FIONREAD */
1747 .open
= cache_open_pipefs
,
1748 .release
= cache_release_pipefs
,
1751 static int content_open_pipefs(struct inode
*inode
, struct file
*filp
)
1753 struct cache_detail
*cd
= RPC_I(inode
)->private;
1755 return content_open(inode
, filp
, cd
);
1758 static int content_release_pipefs(struct inode
*inode
, struct file
*filp
)
1760 struct cache_detail
*cd
= RPC_I(inode
)->private;
1762 return content_release(inode
, filp
, cd
);
1765 const struct file_operations content_file_operations_pipefs
= {
1766 .open
= content_open_pipefs
,
1768 .llseek
= seq_lseek
,
1769 .release
= content_release_pipefs
,
1772 static int open_flush_pipefs(struct inode
*inode
, struct file
*filp
)
1774 struct cache_detail
*cd
= RPC_I(inode
)->private;
1776 return open_flush(inode
, filp
, cd
);
1779 static int release_flush_pipefs(struct inode
*inode
, struct file
*filp
)
1781 struct cache_detail
*cd
= RPC_I(inode
)->private;
1783 return release_flush(inode
, filp
, cd
);
1786 static ssize_t
read_flush_pipefs(struct file
*filp
, char __user
*buf
,
1787 size_t count
, loff_t
*ppos
)
1789 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1791 return read_flush(filp
, buf
, count
, ppos
, cd
);
1794 static ssize_t
write_flush_pipefs(struct file
*filp
,
1795 const char __user
*buf
,
1796 size_t count
, loff_t
*ppos
)
1798 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1800 return write_flush(filp
, buf
, count
, ppos
, cd
);
1803 const struct file_operations cache_flush_operations_pipefs
= {
1804 .open
= open_flush_pipefs
,
1805 .read
= read_flush_pipefs
,
1806 .write
= write_flush_pipefs
,
1807 .release
= release_flush_pipefs
,
1808 .llseek
= no_llseek
,
1811 int sunrpc_cache_register_pipefs(struct dentry
*parent
,
1812 const char *name
, umode_t umode
,
1813 struct cache_detail
*cd
)
1815 struct dentry
*dir
= rpc_create_cache_dir(parent
, name
, umode
, cd
);
1817 return PTR_ERR(dir
);
1818 cd
->u
.pipefs
.dir
= dir
;
1821 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs
);
1823 void sunrpc_cache_unregister_pipefs(struct cache_detail
*cd
)
1825 rpc_remove_cache_dir(cd
->u
.pipefs
.dir
);
1826 cd
->u
.pipefs
.dir
= NULL
;
1828 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs
);