Merge branch 'slab/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg...
[deliverable/linux.git] / fs / nfsd / nfscache.c
1 /*
2 * Request reply cache. This is currently a global cache, but this may
3 * change in the future and be a per-client cache.
4 *
5 * This code is heavily inspired by the 44BSD implementation, although
6 * it does things a bit differently.
7 *
8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9 */
10
11 #include <linux/slab.h>
12 #include <linux/sunrpc/addr.h>
13 #include <linux/highmem.h>
14 #include <linux/log2.h>
15 #include <linux/hash.h>
16 #include <net/checksum.h>
17
18 #include "nfsd.h"
19 #include "cache.h"
20
21 #define NFSDDBG_FACILITY NFSDDBG_REPCACHE
22
23 /*
24 * We use this value to determine the number of hash buckets from the max
25 * cache size, the idea being that when the cache is at its maximum number
26 * of entries, then this should be the average number of entries per bucket.
27 */
28 #define TARGET_BUCKET_SIZE 64
29
30 static struct hlist_head * cache_hash;
31 static struct list_head lru_head;
32 static struct kmem_cache *drc_slab;
33
34 /* max number of entries allowed in the cache */
35 static unsigned int max_drc_entries;
36
37 /* number of significant bits in the hash value */
38 static unsigned int maskbits;
39
40 /*
41 * Stats and other tracking of on the duplicate reply cache. All of these and
42 * the "rc" fields in nfsdstats are protected by the cache_lock
43 */
44
45 /* total number of entries */
46 static unsigned int num_drc_entries;
47
48 /* cache misses due only to checksum comparison failures */
49 static unsigned int payload_misses;
50
51 /* amount of memory (in bytes) currently consumed by the DRC */
52 static unsigned int drc_mem_usage;
53
54 /* longest hash chain seen */
55 static unsigned int longest_chain;
56
57 /* size of cache when we saw the longest hash chain */
58 static unsigned int longest_chain_cachesize;
59
60 static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
61 static void cache_cleaner_func(struct work_struct *unused);
62 static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
63 struct shrink_control *sc);
64 static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
65 struct shrink_control *sc);
66
67 static struct shrinker nfsd_reply_cache_shrinker = {
68 .scan_objects = nfsd_reply_cache_scan,
69 .count_objects = nfsd_reply_cache_count,
70 .seeks = 1,
71 };
72
73 /*
74 * locking for the reply cache:
75 * A cache entry is "single use" if c_state == RC_INPROG
76 * Otherwise, it when accessing _prev or _next, the lock must be held.
77 */
78 static DEFINE_SPINLOCK(cache_lock);
79 static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
80
81 /*
82 * Put a cap on the size of the DRC based on the amount of available
83 * low memory in the machine.
84 *
85 * 64MB: 8192
86 * 128MB: 11585
87 * 256MB: 16384
88 * 512MB: 23170
89 * 1GB: 32768
90 * 2GB: 46340
91 * 4GB: 65536
92 * 8GB: 92681
93 * 16GB: 131072
94 *
95 * ...with a hard cap of 256k entries. In the worst case, each entry will be
96 * ~1k, so the above numbers should give a rough max of the amount of memory
97 * used in k.
98 */
99 static unsigned int
100 nfsd_cache_size_limit(void)
101 {
102 unsigned int limit;
103 unsigned long low_pages = totalram_pages - totalhigh_pages;
104
105 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
106 return min_t(unsigned int, limit, 256*1024);
107 }
108
109 /*
110 * Compute the number of hash buckets we need. Divide the max cachesize by
111 * the "target" max bucket size, and round up to next power of two.
112 */
113 static unsigned int
114 nfsd_hashsize(unsigned int limit)
115 {
116 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
117 }
118
119 static struct svc_cacherep *
120 nfsd_reply_cache_alloc(void)
121 {
122 struct svc_cacherep *rp;
123
124 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
125 if (rp) {
126 rp->c_state = RC_UNUSED;
127 rp->c_type = RC_NOCACHE;
128 INIT_LIST_HEAD(&rp->c_lru);
129 INIT_HLIST_NODE(&rp->c_hash);
130 }
131 return rp;
132 }
133
134 static void
135 nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
136 {
137 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
138 drc_mem_usage -= rp->c_replvec.iov_len;
139 kfree(rp->c_replvec.iov_base);
140 }
141 if (!hlist_unhashed(&rp->c_hash))
142 hlist_del(&rp->c_hash);
143 list_del(&rp->c_lru);
144 --num_drc_entries;
145 drc_mem_usage -= sizeof(*rp);
146 kmem_cache_free(drc_slab, rp);
147 }
148
149 static void
150 nfsd_reply_cache_free(struct svc_cacherep *rp)
151 {
152 spin_lock(&cache_lock);
153 nfsd_reply_cache_free_locked(rp);
154 spin_unlock(&cache_lock);
155 }
156
157 int nfsd_reply_cache_init(void)
158 {
159 unsigned int hashsize;
160
161 INIT_LIST_HEAD(&lru_head);
162 max_drc_entries = nfsd_cache_size_limit();
163 num_drc_entries = 0;
164 hashsize = nfsd_hashsize(max_drc_entries);
165 maskbits = ilog2(hashsize);
166
167 register_shrinker(&nfsd_reply_cache_shrinker);
168 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
169 0, 0, NULL);
170 if (!drc_slab)
171 goto out_nomem;
172
173 cache_hash = kcalloc(hashsize, sizeof(struct hlist_head), GFP_KERNEL);
174 if (!cache_hash)
175 goto out_nomem;
176
177 return 0;
178 out_nomem:
179 printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
180 nfsd_reply_cache_shutdown();
181 return -ENOMEM;
182 }
183
184 void nfsd_reply_cache_shutdown(void)
185 {
186 struct svc_cacherep *rp;
187
188 unregister_shrinker(&nfsd_reply_cache_shrinker);
189 cancel_delayed_work_sync(&cache_cleaner);
190
191 while (!list_empty(&lru_head)) {
192 rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
193 nfsd_reply_cache_free_locked(rp);
194 }
195
196 kfree (cache_hash);
197 cache_hash = NULL;
198
199 if (drc_slab) {
200 kmem_cache_destroy(drc_slab);
201 drc_slab = NULL;
202 }
203 }
204
205 /*
206 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
207 * not already scheduled.
208 */
209 static void
210 lru_put_end(struct svc_cacherep *rp)
211 {
212 rp->c_timestamp = jiffies;
213 list_move_tail(&rp->c_lru, &lru_head);
214 schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
215 }
216
217 /*
218 * Move a cache entry from one hash list to another
219 */
220 static void
221 hash_refile(struct svc_cacherep *rp)
222 {
223 hlist_del_init(&rp->c_hash);
224 hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits));
225 }
226
227 /*
228 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
229 * Also prune the oldest ones when the total exceeds the max number of entries.
230 */
231 static long
232 prune_cache_entries(void)
233 {
234 struct svc_cacherep *rp, *tmp;
235 long freed = 0;
236
237 list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
238 /*
239 * Don't free entries attached to calls that are still
240 * in-progress, but do keep scanning the list.
241 */
242 if (rp->c_state == RC_INPROG)
243 continue;
244 if (num_drc_entries <= max_drc_entries &&
245 time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
246 break;
247 nfsd_reply_cache_free_locked(rp);
248 freed++;
249 }
250
251 /*
252 * Conditionally rearm the job. If we cleaned out the list, then
253 * cancel any pending run (since there won't be any work to do).
254 * Otherwise, we rearm the job or modify the existing one to run in
255 * RC_EXPIRE since we just ran the pruner.
256 */
257 if (list_empty(&lru_head))
258 cancel_delayed_work(&cache_cleaner);
259 else
260 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
261 return freed;
262 }
263
264 static void
265 cache_cleaner_func(struct work_struct *unused)
266 {
267 spin_lock(&cache_lock);
268 prune_cache_entries();
269 spin_unlock(&cache_lock);
270 }
271
272 static unsigned long
273 nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
274 {
275 unsigned long num;
276
277 spin_lock(&cache_lock);
278 num = num_drc_entries;
279 spin_unlock(&cache_lock);
280
281 return num;
282 }
283
284 static unsigned long
285 nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
286 {
287 unsigned long freed;
288
289 spin_lock(&cache_lock);
290 freed = prune_cache_entries();
291 spin_unlock(&cache_lock);
292 return freed;
293 }
294 /*
295 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
296 */
297 static __wsum
298 nfsd_cache_csum(struct svc_rqst *rqstp)
299 {
300 int idx;
301 unsigned int base;
302 __wsum csum;
303 struct xdr_buf *buf = &rqstp->rq_arg;
304 const unsigned char *p = buf->head[0].iov_base;
305 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
306 RC_CSUMLEN);
307 size_t len = min(buf->head[0].iov_len, csum_len);
308
309 /* rq_arg.head first */
310 csum = csum_partial(p, len, 0);
311 csum_len -= len;
312
313 /* Continue into page array */
314 idx = buf->page_base / PAGE_SIZE;
315 base = buf->page_base & ~PAGE_MASK;
316 while (csum_len) {
317 p = page_address(buf->pages[idx]) + base;
318 len = min_t(size_t, PAGE_SIZE - base, csum_len);
319 csum = csum_partial(p, len, csum);
320 csum_len -= len;
321 base = 0;
322 ++idx;
323 }
324 return csum;
325 }
326
327 static bool
328 nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
329 {
330 /* Check RPC header info first */
331 if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc ||
332 rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers ||
333 rqstp->rq_arg.len != rp->c_len ||
334 !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
335 rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
336 return false;
337
338 /* compare checksum of NFS data */
339 if (csum != rp->c_csum) {
340 ++payload_misses;
341 return false;
342 }
343
344 return true;
345 }
346
347 /*
348 * Search the request hash for an entry that matches the given rqstp.
349 * Must be called with cache_lock held. Returns the found entry or
350 * NULL on failure.
351 */
352 static struct svc_cacherep *
353 nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum)
354 {
355 struct svc_cacherep *rp, *ret = NULL;
356 struct hlist_head *rh;
357 unsigned int entries = 0;
358
359 rh = &cache_hash[hash_32(rqstp->rq_xid, maskbits)];
360 hlist_for_each_entry(rp, rh, c_hash) {
361 ++entries;
362 if (nfsd_cache_match(rqstp, csum, rp)) {
363 ret = rp;
364 break;
365 }
366 }
367
368 /* tally hash chain length stats */
369 if (entries > longest_chain) {
370 longest_chain = entries;
371 longest_chain_cachesize = num_drc_entries;
372 } else if (entries == longest_chain) {
373 /* prefer to keep the smallest cachesize possible here */
374 longest_chain_cachesize = min(longest_chain_cachesize,
375 num_drc_entries);
376 }
377
378 return ret;
379 }
380
381 /*
382 * Try to find an entry matching the current call in the cache. When none
383 * is found, we try to grab the oldest expired entry off the LRU list. If
384 * a suitable one isn't there, then drop the cache_lock and allocate a
385 * new one, then search again in case one got inserted while this thread
386 * didn't hold the lock.
387 */
388 int
389 nfsd_cache_lookup(struct svc_rqst *rqstp)
390 {
391 struct svc_cacherep *rp, *found;
392 __be32 xid = rqstp->rq_xid;
393 u32 proto = rqstp->rq_prot,
394 vers = rqstp->rq_vers,
395 proc = rqstp->rq_proc;
396 __wsum csum;
397 unsigned long age;
398 int type = rqstp->rq_cachetype;
399 int rtn = RC_DOIT;
400
401 rqstp->rq_cacherep = NULL;
402 if (type == RC_NOCACHE) {
403 nfsdstats.rcnocache++;
404 return rtn;
405 }
406
407 csum = nfsd_cache_csum(rqstp);
408
409 /*
410 * Since the common case is a cache miss followed by an insert,
411 * preallocate an entry.
412 */
413 rp = nfsd_reply_cache_alloc();
414 spin_lock(&cache_lock);
415 if (likely(rp)) {
416 ++num_drc_entries;
417 drc_mem_usage += sizeof(*rp);
418 }
419
420 /* go ahead and prune the cache */
421 prune_cache_entries();
422
423 found = nfsd_cache_search(rqstp, csum);
424 if (found) {
425 if (likely(rp))
426 nfsd_reply_cache_free_locked(rp);
427 rp = found;
428 goto found_entry;
429 }
430
431 if (!rp) {
432 dprintk("nfsd: unable to allocate DRC entry!\n");
433 goto out;
434 }
435
436 nfsdstats.rcmisses++;
437 rqstp->rq_cacherep = rp;
438 rp->c_state = RC_INPROG;
439 rp->c_xid = xid;
440 rp->c_proc = proc;
441 rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
442 rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
443 rp->c_prot = proto;
444 rp->c_vers = vers;
445 rp->c_len = rqstp->rq_arg.len;
446 rp->c_csum = csum;
447
448 hash_refile(rp);
449 lru_put_end(rp);
450
451 /* release any buffer */
452 if (rp->c_type == RC_REPLBUFF) {
453 drc_mem_usage -= rp->c_replvec.iov_len;
454 kfree(rp->c_replvec.iov_base);
455 rp->c_replvec.iov_base = NULL;
456 }
457 rp->c_type = RC_NOCACHE;
458 out:
459 spin_unlock(&cache_lock);
460 return rtn;
461
462 found_entry:
463 nfsdstats.rchits++;
464 /* We found a matching entry which is either in progress or done. */
465 age = jiffies - rp->c_timestamp;
466 lru_put_end(rp);
467
468 rtn = RC_DROPIT;
469 /* Request being processed or excessive rexmits */
470 if (rp->c_state == RC_INPROG || age < RC_DELAY)
471 goto out;
472
473 /* From the hall of fame of impractical attacks:
474 * Is this a user who tries to snoop on the cache? */
475 rtn = RC_DOIT;
476 if (!rqstp->rq_secure && rp->c_secure)
477 goto out;
478
479 /* Compose RPC reply header */
480 switch (rp->c_type) {
481 case RC_NOCACHE:
482 break;
483 case RC_REPLSTAT:
484 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
485 rtn = RC_REPLY;
486 break;
487 case RC_REPLBUFF:
488 if (!nfsd_cache_append(rqstp, &rp->c_replvec))
489 goto out; /* should not happen */
490 rtn = RC_REPLY;
491 break;
492 default:
493 printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
494 nfsd_reply_cache_free_locked(rp);
495 }
496
497 goto out;
498 }
499
500 /*
501 * Update a cache entry. This is called from nfsd_dispatch when
502 * the procedure has been executed and the complete reply is in
503 * rqstp->rq_res.
504 *
505 * We're copying around data here rather than swapping buffers because
506 * the toplevel loop requires max-sized buffers, which would be a waste
507 * of memory for a cache with a max reply size of 100 bytes (diropokres).
508 *
509 * If we should start to use different types of cache entries tailored
510 * specifically for attrstat and fh's, we may save even more space.
511 *
512 * Also note that a cachetype of RC_NOCACHE can legally be passed when
513 * nfsd failed to encode a reply that otherwise would have been cached.
514 * In this case, nfsd_cache_update is called with statp == NULL.
515 */
516 void
517 nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
518 {
519 struct svc_cacherep *rp = rqstp->rq_cacherep;
520 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
521 int len;
522 size_t bufsize = 0;
523
524 if (!rp)
525 return;
526
527 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
528 len >>= 2;
529
530 /* Don't cache excessive amounts of data and XDR failures */
531 if (!statp || len > (256 >> 2)) {
532 nfsd_reply_cache_free(rp);
533 return;
534 }
535
536 switch (cachetype) {
537 case RC_REPLSTAT:
538 if (len != 1)
539 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
540 rp->c_replstat = *statp;
541 break;
542 case RC_REPLBUFF:
543 cachv = &rp->c_replvec;
544 bufsize = len << 2;
545 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
546 if (!cachv->iov_base) {
547 nfsd_reply_cache_free(rp);
548 return;
549 }
550 cachv->iov_len = bufsize;
551 memcpy(cachv->iov_base, statp, bufsize);
552 break;
553 case RC_NOCACHE:
554 nfsd_reply_cache_free(rp);
555 return;
556 }
557 spin_lock(&cache_lock);
558 drc_mem_usage += bufsize;
559 lru_put_end(rp);
560 rp->c_secure = rqstp->rq_secure;
561 rp->c_type = cachetype;
562 rp->c_state = RC_DONE;
563 spin_unlock(&cache_lock);
564 return;
565 }
566
567 /*
568 * Copy cached reply to current reply buffer. Should always fit.
569 * FIXME as reply is in a page, we should just attach the page, and
570 * keep a refcount....
571 */
572 static int
573 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
574 {
575 struct kvec *vec = &rqstp->rq_res.head[0];
576
577 if (vec->iov_len + data->iov_len > PAGE_SIZE) {
578 printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
579 data->iov_len);
580 return 0;
581 }
582 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
583 vec->iov_len += data->iov_len;
584 return 1;
585 }
586
587 /*
588 * Note that fields may be added, removed or reordered in the future. Programs
589 * scraping this file for info should test the labels to ensure they're
590 * getting the correct field.
591 */
592 static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
593 {
594 spin_lock(&cache_lock);
595 seq_printf(m, "max entries: %u\n", max_drc_entries);
596 seq_printf(m, "num entries: %u\n", num_drc_entries);
597 seq_printf(m, "hash buckets: %u\n", 1 << maskbits);
598 seq_printf(m, "mem usage: %u\n", drc_mem_usage);
599 seq_printf(m, "cache hits: %u\n", nfsdstats.rchits);
600 seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses);
601 seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache);
602 seq_printf(m, "payload misses: %u\n", payload_misses);
603 seq_printf(m, "longest chain len: %u\n", longest_chain);
604 seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize);
605 spin_unlock(&cache_lock);
606 return 0;
607 }
608
609 int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
610 {
611 return single_open(file, nfsd_reply_cache_stats_show, NULL);
612 }
This page took 0.108837 seconds and 5 git commands to generate.