Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Request reply cache. This is currently a global cache, but this may |
3 | * change in the future and be a per-client cache. | |
4 | * | |
5 | * This code is heavily inspired by the 44BSD implementation, although | |
6 | * it does things a bit differently. | |
7 | * | |
8 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> | |
9 | */ | |
10 | ||
5a0e3ad6 | 11 | #include <linux/slab.h> |
5976687a | 12 | #include <linux/sunrpc/addr.h> |
0338dd15 | 13 | #include <linux/highmem.h> |
01a7decf | 14 | #include <net/checksum.h> |
5a0e3ad6 | 15 | |
9a74af21 BH |
16 | #include "nfsd.h" |
17 | #include "cache.h" | |
1da177e4 | 18 | |
0338dd15 JL |
19 | #define NFSDDBG_FACILITY NFSDDBG_REPCACHE |
20 | ||
1da177e4 | 21 | #define HASHSIZE 64 |
1da177e4 | 22 | |
fca4217c | 23 | static struct hlist_head * cache_hash; |
1da177e4 | 24 | static struct list_head lru_head; |
8a8bc40d | 25 | static struct kmem_cache *drc_slab; |
9dc56143 JL |
26 | |
27 | /* max number of entries allowed in the cache */ | |
0338dd15 | 28 | static unsigned int max_drc_entries; |
1da177e4 | 29 | |
9dc56143 JL |
30 | /* |
31 | * Stats and other tracking of on the duplicate reply cache. All of these and | |
32 | * the "rc" fields in nfsdstats are protected by the cache_lock | |
33 | */ | |
34 | ||
35 | /* total number of entries */ | |
36 | static unsigned int num_drc_entries; | |
37 | ||
38 | /* cache misses due only to checksum comparison failures */ | |
39 | static unsigned int payload_misses; | |
40 | ||
6c6910cd JL |
41 | /* amount of memory (in bytes) currently consumed by the DRC */ |
42 | static unsigned int drc_mem_usage; | |
43 | ||
fca4217c GB |
44 | /* |
45 | * Calculate the hash index from an XID. | |
46 | */ | |
47 | static inline u32 request_hash(u32 xid) | |
48 | { | |
49 | u32 h = xid; | |
50 | h ^= (xid >> 24); | |
51 | return h & (HASHSIZE-1); | |
52 | } | |
53 | ||
1da177e4 | 54 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); |
aca8a23d | 55 | static void cache_cleaner_func(struct work_struct *unused); |
b4e7f2c9 JL |
56 | static int nfsd_reply_cache_shrink(struct shrinker *shrink, |
57 | struct shrink_control *sc); | |
58 | ||
59 | struct shrinker nfsd_reply_cache_shrinker = { | |
60 | .shrink = nfsd_reply_cache_shrink, | |
61 | .seeks = 1, | |
62 | }; | |
1da177e4 | 63 | |
fca4217c | 64 | /* |
1da177e4 LT |
65 | * locking for the reply cache: |
66 | * A cache entry is "single use" if c_state == RC_INPROG | |
67 | * Otherwise, it when accessing _prev or _next, the lock must be held. | |
68 | */ | |
69 | static DEFINE_SPINLOCK(cache_lock); | |
aca8a23d | 70 | static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func); |
1da177e4 | 71 | |
0338dd15 JL |
72 | /* |
73 | * Put a cap on the size of the DRC based on the amount of available | |
74 | * low memory in the machine. | |
75 | * | |
76 | * 64MB: 8192 | |
77 | * 128MB: 11585 | |
78 | * 256MB: 16384 | |
79 | * 512MB: 23170 | |
80 | * 1GB: 32768 | |
81 | * 2GB: 46340 | |
82 | * 4GB: 65536 | |
83 | * 8GB: 92681 | |
84 | * 16GB: 131072 | |
85 | * | |
86 | * ...with a hard cap of 256k entries. In the worst case, each entry will be | |
87 | * ~1k, so the above numbers should give a rough max of the amount of memory | |
88 | * used in k. | |
89 | */ | |
90 | static unsigned int | |
91 | nfsd_cache_size_limit(void) | |
92 | { | |
93 | unsigned int limit; | |
94 | unsigned long low_pages = totalram_pages - totalhigh_pages; | |
95 | ||
96 | limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); | |
97 | return min_t(unsigned int, limit, 256*1024); | |
98 | } | |
99 | ||
f09841fd JL |
100 | static struct svc_cacherep * |
101 | nfsd_reply_cache_alloc(void) | |
1da177e4 LT |
102 | { |
103 | struct svc_cacherep *rp; | |
1da177e4 | 104 | |
f09841fd JL |
105 | rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); |
106 | if (rp) { | |
1da177e4 LT |
107 | rp->c_state = RC_UNUSED; |
108 | rp->c_type = RC_NOCACHE; | |
f09841fd | 109 | INIT_LIST_HEAD(&rp->c_lru); |
1da177e4 | 110 | INIT_HLIST_NODE(&rp->c_hash); |
1da177e4 | 111 | } |
f09841fd JL |
112 | return rp; |
113 | } | |
1da177e4 | 114 | |
f09841fd JL |
115 | static void |
116 | nfsd_reply_cache_free_locked(struct svc_cacherep *rp) | |
117 | { | |
6c6910cd JL |
118 | if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { |
119 | drc_mem_usage -= rp->c_replvec.iov_len; | |
f09841fd | 120 | kfree(rp->c_replvec.iov_base); |
6c6910cd | 121 | } |
a517b608 JL |
122 | if (!hlist_unhashed(&rp->c_hash)) |
123 | hlist_del(&rp->c_hash); | |
f09841fd | 124 | list_del(&rp->c_lru); |
0ee0bf7e | 125 | --num_drc_entries; |
6c6910cd | 126 | drc_mem_usage -= sizeof(*rp); |
f09841fd JL |
127 | kmem_cache_free(drc_slab, rp); |
128 | } | |
129 | ||
2c6b691c JL |
130 | static void |
131 | nfsd_reply_cache_free(struct svc_cacherep *rp) | |
132 | { | |
133 | spin_lock(&cache_lock); | |
134 | nfsd_reply_cache_free_locked(rp); | |
135 | spin_unlock(&cache_lock); | |
136 | } | |
137 | ||
f09841fd JL |
138 | int nfsd_reply_cache_init(void) |
139 | { | |
ac534ff2 JL |
140 | INIT_LIST_HEAD(&lru_head); |
141 | max_drc_entries = nfsd_cache_size_limit(); | |
142 | num_drc_entries = 0; | |
143 | ||
b4e7f2c9 | 144 | register_shrinker(&nfsd_reply_cache_shrinker); |
8a8bc40d JL |
145 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), |
146 | 0, 0, NULL); | |
147 | if (!drc_slab) | |
148 | goto out_nomem; | |
149 | ||
0338dd15 | 150 | cache_hash = kcalloc(HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL); |
fca4217c | 151 | if (!cache_hash) |
d5c3428b | 152 | goto out_nomem; |
1da177e4 | 153 | |
d5c3428b BF |
154 | return 0; |
155 | out_nomem: | |
156 | printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); | |
157 | nfsd_reply_cache_shutdown(); | |
158 | return -ENOMEM; | |
1da177e4 LT |
159 | } |
160 | ||
d5c3428b | 161 | void nfsd_reply_cache_shutdown(void) |
1da177e4 LT |
162 | { |
163 | struct svc_cacherep *rp; | |
164 | ||
b4e7f2c9 | 165 | unregister_shrinker(&nfsd_reply_cache_shrinker); |
aca8a23d JL |
166 | cancel_delayed_work_sync(&cache_cleaner); |
167 | ||
1da177e4 LT |
168 | while (!list_empty(&lru_head)) { |
169 | rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); | |
f09841fd | 170 | nfsd_reply_cache_free_locked(rp); |
1da177e4 LT |
171 | } |
172 | ||
fca4217c GB |
173 | kfree (cache_hash); |
174 | cache_hash = NULL; | |
8a8bc40d JL |
175 | |
176 | if (drc_slab) { | |
177 | kmem_cache_destroy(drc_slab); | |
178 | drc_slab = NULL; | |
179 | } | |
1da177e4 LT |
180 | } |
181 | ||
182 | /* | |
aca8a23d JL |
183 | * Move cache entry to end of LRU list, and queue the cleaner to run if it's |
184 | * not already scheduled. | |
1da177e4 LT |
185 | */ |
186 | static void | |
187 | lru_put_end(struct svc_cacherep *rp) | |
188 | { | |
56c2548b | 189 | rp->c_timestamp = jiffies; |
f116629d | 190 | list_move_tail(&rp->c_lru, &lru_head); |
aca8a23d | 191 | schedule_delayed_work(&cache_cleaner, RC_EXPIRE); |
1da177e4 LT |
192 | } |
193 | ||
194 | /* | |
195 | * Move a cache entry from one hash list to another | |
196 | */ | |
197 | static void | |
198 | hash_refile(struct svc_cacherep *rp) | |
199 | { | |
200 | hlist_del_init(&rp->c_hash); | |
fca4217c | 201 | hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid)); |
1da177e4 LT |
202 | } |
203 | ||
d1a0774d JL |
204 | static inline bool |
205 | nfsd_cache_entry_expired(struct svc_cacherep *rp) | |
206 | { | |
207 | return rp->c_state != RC_INPROG && | |
208 | time_after(jiffies, rp->c_timestamp + RC_EXPIRE); | |
209 | } | |
210 | ||
aca8a23d JL |
211 | /* |
212 | * Walk the LRU list and prune off entries that are older than RC_EXPIRE. | |
213 | * Also prune the oldest ones when the total exceeds the max number of entries. | |
214 | */ | |
215 | static void | |
216 | prune_cache_entries(void) | |
217 | { | |
218 | struct svc_cacherep *rp, *tmp; | |
219 | ||
220 | list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { | |
221 | if (!nfsd_cache_entry_expired(rp) && | |
222 | num_drc_entries <= max_drc_entries) | |
223 | break; | |
224 | nfsd_reply_cache_free_locked(rp); | |
225 | } | |
226 | ||
227 | /* | |
228 | * Conditionally rearm the job. If we cleaned out the list, then | |
229 | * cancel any pending run (since there won't be any work to do). | |
230 | * Otherwise, we rearm the job or modify the existing one to run in | |
231 | * RC_EXPIRE since we just ran the pruner. | |
232 | */ | |
233 | if (list_empty(&lru_head)) | |
234 | cancel_delayed_work(&cache_cleaner); | |
235 | else | |
236 | mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); | |
237 | } | |
238 | ||
239 | static void | |
240 | cache_cleaner_func(struct work_struct *unused) | |
241 | { | |
242 | spin_lock(&cache_lock); | |
243 | prune_cache_entries(); | |
244 | spin_unlock(&cache_lock); | |
245 | } | |
246 | ||
b4e7f2c9 JL |
247 | static int |
248 | nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc) | |
249 | { | |
250 | unsigned int num; | |
251 | ||
252 | spin_lock(&cache_lock); | |
253 | if (sc->nr_to_scan) | |
254 | prune_cache_entries(); | |
255 | num = num_drc_entries; | |
256 | spin_unlock(&cache_lock); | |
257 | ||
258 | return num; | |
259 | } | |
260 | ||
01a7decf JL |
261 | /* |
262 | * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes | |
263 | */ | |
264 | static __wsum | |
265 | nfsd_cache_csum(struct svc_rqst *rqstp) | |
266 | { | |
267 | int idx; | |
268 | unsigned int base; | |
269 | __wsum csum; | |
270 | struct xdr_buf *buf = &rqstp->rq_arg; | |
271 | const unsigned char *p = buf->head[0].iov_base; | |
272 | size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, | |
273 | RC_CSUMLEN); | |
274 | size_t len = min(buf->head[0].iov_len, csum_len); | |
275 | ||
276 | /* rq_arg.head first */ | |
277 | csum = csum_partial(p, len, 0); | |
278 | csum_len -= len; | |
279 | ||
280 | /* Continue into page array */ | |
281 | idx = buf->page_base / PAGE_SIZE; | |
282 | base = buf->page_base & ~PAGE_MASK; | |
283 | while (csum_len) { | |
284 | p = page_address(buf->pages[idx]) + base; | |
56edc86b | 285 | len = min_t(size_t, PAGE_SIZE - base, csum_len); |
01a7decf JL |
286 | csum = csum_partial(p, len, csum); |
287 | csum_len -= len; | |
288 | base = 0; | |
289 | ++idx; | |
290 | } | |
291 | return csum; | |
292 | } | |
293 | ||
9dc56143 JL |
294 | static bool |
295 | nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp) | |
296 | { | |
297 | /* Check RPC header info first */ | |
298 | if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc || | |
299 | rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers || | |
300 | rqstp->rq_arg.len != rp->c_len || | |
301 | !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) || | |
302 | rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr)) | |
303 | return false; | |
304 | ||
305 | /* compare checksum of NFS data */ | |
306 | if (csum != rp->c_csum) { | |
307 | ++payload_misses; | |
308 | return false; | |
309 | } | |
310 | ||
311 | return true; | |
312 | } | |
313 | ||
a4a3ec32 JL |
314 | /* |
315 | * Search the request hash for an entry that matches the given rqstp. | |
316 | * Must be called with cache_lock held. Returns the found entry or | |
317 | * NULL on failure. | |
318 | */ | |
319 | static struct svc_cacherep * | |
01a7decf | 320 | nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum) |
a4a3ec32 JL |
321 | { |
322 | struct svc_cacherep *rp; | |
a4a3ec32 | 323 | struct hlist_head *rh; |
a4a3ec32 | 324 | |
9dc56143 | 325 | rh = &cache_hash[request_hash(rqstp->rq_xid)]; |
b6669737 | 326 | hlist_for_each_entry(rp, rh, c_hash) { |
9dc56143 | 327 | if (nfsd_cache_match(rqstp, csum, rp)) |
a4a3ec32 JL |
328 | return rp; |
329 | } | |
330 | return NULL; | |
331 | } | |
332 | ||
1da177e4 LT |
333 | /* |
334 | * Try to find an entry matching the current call in the cache. When none | |
1ac83629 JL |
335 | * is found, we try to grab the oldest expired entry off the LRU list. If |
336 | * a suitable one isn't there, then drop the cache_lock and allocate a | |
337 | * new one, then search again in case one got inserted while this thread | |
338 | * didn't hold the lock. | |
1da177e4 LT |
339 | */ |
340 | int | |
1091006c | 341 | nfsd_cache_lookup(struct svc_rqst *rqstp) |
1da177e4 | 342 | { |
0338dd15 | 343 | struct svc_cacherep *rp, *found; |
c7afef1f AV |
344 | __be32 xid = rqstp->rq_xid; |
345 | u32 proto = rqstp->rq_prot, | |
1da177e4 LT |
346 | vers = rqstp->rq_vers, |
347 | proc = rqstp->rq_proc; | |
01a7decf | 348 | __wsum csum; |
1da177e4 | 349 | unsigned long age; |
1091006c | 350 | int type = rqstp->rq_cachetype; |
0b9ea37f | 351 | int rtn = RC_DOIT; |
1da177e4 LT |
352 | |
353 | rqstp->rq_cacherep = NULL; | |
13cc8a78 | 354 | if (type == RC_NOCACHE) { |
1da177e4 | 355 | nfsdstats.rcnocache++; |
0b9ea37f | 356 | return rtn; |
1da177e4 LT |
357 | } |
358 | ||
01a7decf JL |
359 | csum = nfsd_cache_csum(rqstp); |
360 | ||
0b9ea37f JL |
361 | /* |
362 | * Since the common case is a cache miss followed by an insert, | |
363 | * preallocate an entry. First, try to reuse the first entry on the LRU | |
364 | * if it works, then go ahead and prune the LRU list. | |
365 | */ | |
1da177e4 | 366 | spin_lock(&cache_lock); |
0338dd15 JL |
367 | if (!list_empty(&lru_head)) { |
368 | rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); | |
369 | if (nfsd_cache_entry_expired(rp) || | |
aca8a23d JL |
370 | num_drc_entries >= max_drc_entries) { |
371 | lru_put_end(rp); | |
372 | prune_cache_entries(); | |
0b9ea37f | 373 | goto search_cache; |
1da177e4 LT |
374 | } |
375 | } | |
1da177e4 | 376 | |
0b9ea37f | 377 | /* No expired ones available, allocate a new one. */ |
0338dd15 JL |
378 | spin_unlock(&cache_lock); |
379 | rp = nfsd_reply_cache_alloc(); | |
0338dd15 | 380 | spin_lock(&cache_lock); |
6c6910cd | 381 | if (likely(rp)) { |
0b9ea37f | 382 | ++num_drc_entries; |
6c6910cd JL |
383 | drc_mem_usage += sizeof(*rp); |
384 | } | |
0338dd15 | 385 | |
0b9ea37f | 386 | search_cache: |
01a7decf | 387 | found = nfsd_cache_search(rqstp, csum); |
0338dd15 | 388 | if (found) { |
0b9ea37f JL |
389 | if (likely(rp)) |
390 | nfsd_reply_cache_free_locked(rp); | |
0338dd15 JL |
391 | rp = found; |
392 | goto found_entry; | |
1da177e4 LT |
393 | } |
394 | ||
0b9ea37f JL |
395 | if (!rp) { |
396 | dprintk("nfsd: unable to allocate DRC entry!\n"); | |
397 | goto out; | |
398 | } | |
399 | ||
0338dd15 JL |
400 | /* |
401 | * We're keeping the one we just allocated. Are we now over the | |
402 | * limit? Prune one off the tip of the LRU in trade for the one we | |
403 | * just allocated if so. | |
404 | */ | |
405 | if (num_drc_entries >= max_drc_entries) | |
406 | nfsd_reply_cache_free_locked(list_first_entry(&lru_head, | |
407 | struct svc_cacherep, c_lru)); | |
1da177e4 | 408 | |
0338dd15 | 409 | nfsdstats.rcmisses++; |
1da177e4 LT |
410 | rqstp->rq_cacherep = rp; |
411 | rp->c_state = RC_INPROG; | |
412 | rp->c_xid = xid; | |
413 | rp->c_proc = proc; | |
7b9e8522 JL |
414 | rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp)); |
415 | rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp))); | |
1da177e4 LT |
416 | rp->c_prot = proto; |
417 | rp->c_vers = vers; | |
01a7decf JL |
418 | rp->c_len = rqstp->rq_arg.len; |
419 | rp->c_csum = csum; | |
1da177e4 LT |
420 | |
421 | hash_refile(rp); | |
56c2548b | 422 | lru_put_end(rp); |
1da177e4 LT |
423 | |
424 | /* release any buffer */ | |
425 | if (rp->c_type == RC_REPLBUFF) { | |
6c6910cd | 426 | drc_mem_usage -= rp->c_replvec.iov_len; |
1da177e4 LT |
427 | kfree(rp->c_replvec.iov_base); |
428 | rp->c_replvec.iov_base = NULL; | |
429 | } | |
430 | rp->c_type = RC_NOCACHE; | |
431 | out: | |
432 | spin_unlock(&cache_lock); | |
433 | return rtn; | |
434 | ||
435 | found_entry: | |
0338dd15 | 436 | nfsdstats.rchits++; |
1da177e4 LT |
437 | /* We found a matching entry which is either in progress or done. */ |
438 | age = jiffies - rp->c_timestamp; | |
1da177e4 LT |
439 | lru_put_end(rp); |
440 | ||
441 | rtn = RC_DROPIT; | |
442 | /* Request being processed or excessive rexmits */ | |
443 | if (rp->c_state == RC_INPROG || age < RC_DELAY) | |
444 | goto out; | |
445 | ||
446 | /* From the hall of fame of impractical attacks: | |
447 | * Is this a user who tries to snoop on the cache? */ | |
448 | rtn = RC_DOIT; | |
449 | if (!rqstp->rq_secure && rp->c_secure) | |
450 | goto out; | |
451 | ||
452 | /* Compose RPC reply header */ | |
453 | switch (rp->c_type) { | |
454 | case RC_NOCACHE: | |
455 | break; | |
456 | case RC_REPLSTAT: | |
457 | svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); | |
458 | rtn = RC_REPLY; | |
459 | break; | |
460 | case RC_REPLBUFF: | |
461 | if (!nfsd_cache_append(rqstp, &rp->c_replvec)) | |
462 | goto out; /* should not happen */ | |
463 | rtn = RC_REPLY; | |
464 | break; | |
465 | default: | |
466 | printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); | |
0338dd15 | 467 | nfsd_reply_cache_free_locked(rp); |
1da177e4 LT |
468 | } |
469 | ||
470 | goto out; | |
471 | } | |
472 | ||
473 | /* | |
474 | * Update a cache entry. This is called from nfsd_dispatch when | |
475 | * the procedure has been executed and the complete reply is in | |
476 | * rqstp->rq_res. | |
477 | * | |
478 | * We're copying around data here rather than swapping buffers because | |
479 | * the toplevel loop requires max-sized buffers, which would be a waste | |
480 | * of memory for a cache with a max reply size of 100 bytes (diropokres). | |
481 | * | |
482 | * If we should start to use different types of cache entries tailored | |
483 | * specifically for attrstat and fh's, we may save even more space. | |
484 | * | |
485 | * Also note that a cachetype of RC_NOCACHE can legally be passed when | |
486 | * nfsd failed to encode a reply that otherwise would have been cached. | |
487 | * In this case, nfsd_cache_update is called with statp == NULL. | |
488 | */ | |
489 | void | |
c7afef1f | 490 | nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) |
1da177e4 | 491 | { |
13cc8a78 | 492 | struct svc_cacherep *rp = rqstp->rq_cacherep; |
1da177e4 LT |
493 | struct kvec *resv = &rqstp->rq_res.head[0], *cachv; |
494 | int len; | |
6c6910cd | 495 | size_t bufsize = 0; |
1da177e4 | 496 | |
13cc8a78 | 497 | if (!rp) |
1da177e4 LT |
498 | return; |
499 | ||
500 | len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); | |
501 | len >>= 2; | |
fca4217c | 502 | |
1da177e4 LT |
503 | /* Don't cache excessive amounts of data and XDR failures */ |
504 | if (!statp || len > (256 >> 2)) { | |
2c6b691c | 505 | nfsd_reply_cache_free(rp); |
1da177e4 LT |
506 | return; |
507 | } | |
508 | ||
509 | switch (cachetype) { | |
510 | case RC_REPLSTAT: | |
511 | if (len != 1) | |
512 | printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); | |
513 | rp->c_replstat = *statp; | |
514 | break; | |
515 | case RC_REPLBUFF: | |
516 | cachv = &rp->c_replvec; | |
6c6910cd JL |
517 | bufsize = len << 2; |
518 | cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); | |
1da177e4 | 519 | if (!cachv->iov_base) { |
2c6b691c | 520 | nfsd_reply_cache_free(rp); |
1da177e4 LT |
521 | return; |
522 | } | |
6c6910cd JL |
523 | cachv->iov_len = bufsize; |
524 | memcpy(cachv->iov_base, statp, bufsize); | |
1da177e4 | 525 | break; |
2c6b691c JL |
526 | case RC_NOCACHE: |
527 | nfsd_reply_cache_free(rp); | |
528 | return; | |
1da177e4 LT |
529 | } |
530 | spin_lock(&cache_lock); | |
6c6910cd | 531 | drc_mem_usage += bufsize; |
1da177e4 LT |
532 | lru_put_end(rp); |
533 | rp->c_secure = rqstp->rq_secure; | |
534 | rp->c_type = cachetype; | |
535 | rp->c_state = RC_DONE; | |
1da177e4 LT |
536 | spin_unlock(&cache_lock); |
537 | return; | |
538 | } | |
539 | ||
540 | /* | |
541 | * Copy cached reply to current reply buffer. Should always fit. | |
542 | * FIXME as reply is in a page, we should just attach the page, and | |
543 | * keep a refcount.... | |
544 | */ | |
545 | static int | |
546 | nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) | |
547 | { | |
548 | struct kvec *vec = &rqstp->rq_res.head[0]; | |
549 | ||
550 | if (vec->iov_len + data->iov_len > PAGE_SIZE) { | |
551 | printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n", | |
552 | data->iov_len); | |
553 | return 0; | |
554 | } | |
555 | memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); | |
556 | vec->iov_len += data->iov_len; | |
557 | return 1; | |
558 | } | |
a2f999a3 JL |
559 | |
560 | /* | |
561 | * Note that fields may be added, removed or reordered in the future. Programs | |
562 | * scraping this file for info should test the labels to ensure they're | |
563 | * getting the correct field. | |
564 | */ | |
565 | static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) | |
566 | { | |
567 | spin_lock(&cache_lock); | |
568 | seq_printf(m, "max entries: %u\n", max_drc_entries); | |
569 | seq_printf(m, "num entries: %u\n", num_drc_entries); | |
570 | seq_printf(m, "hash buckets: %u\n", HASHSIZE); | |
571 | seq_printf(m, "mem usage: %u\n", drc_mem_usage); | |
572 | seq_printf(m, "cache hits: %u\n", nfsdstats.rchits); | |
573 | seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses); | |
574 | seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache); | |
575 | seq_printf(m, "payload misses: %u\n", payload_misses); | |
576 | spin_unlock(&cache_lock); | |
577 | return 0; | |
578 | } | |
579 | ||
580 | int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file) | |
581 | { | |
582 | return single_open(file, nfsd_reply_cache_stats_show, NULL); | |
583 | } |