Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/nfs/pagelist.c | |
3 | * | |
4 | * A set of helper functions for managing NFS read and write requests. | |
5 | * The main purpose of these routines is to provide support for the | |
6 | * coalescing of several requests into a single RPC call. | |
7 | * | |
8 | * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no> | |
9 | * | |
10 | */ | |
11 | ||
1da177e4 LT |
12 | #include <linux/slab.h> |
13 | #include <linux/file.h> | |
e8edc6e0 | 14 | #include <linux/sched.h> |
1da177e4 LT |
15 | #include <linux/sunrpc/clnt.h> |
16 | #include <linux/nfs3.h> | |
17 | #include <linux/nfs4.h> | |
18 | #include <linux/nfs_page.h> | |
19 | #include <linux/nfs_fs.h> | |
20 | #include <linux/nfs_mount.h> | |
21 | ||
8d5658c9 | 22 | #include "internal.h" |
bae724ef | 23 | #include "pnfs.h" |
8d5658c9 | 24 | |
e18b890b | 25 | static struct kmem_cache *nfs_page_cachep; |
1da177e4 LT |
26 | |
27 | static inline struct nfs_page * | |
28 | nfs_page_alloc(void) | |
29 | { | |
72895b1a JJ |
30 | struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL); |
31 | if (p) | |
1da177e4 | 32 | INIT_LIST_HEAD(&p->wb_list); |
1da177e4 LT |
33 | return p; |
34 | } | |
35 | ||
36 | static inline void | |
37 | nfs_page_free(struct nfs_page *p) | |
38 | { | |
39 | kmem_cache_free(nfs_page_cachep, p); | |
40 | } | |
41 | ||
42 | /** | |
43 | * nfs_create_request - Create an NFS read/write request. | |
44 | * @file: file descriptor to use | |
45 | * @inode: inode to which the request is attached | |
46 | * @page: page to write | |
47 | * @offset: starting offset within the page for the write | |
48 | * @count: number of bytes to read/write | |
49 | * | |
50 | * The page must be locked by the caller. This makes sure we never | |
a19b89ca | 51 | * create two different requests for the same page. |
1da177e4 LT |
52 | * User should ensure it is safe to sleep in this function. |
53 | */ | |
54 | struct nfs_page * | |
55 | nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, | |
56 | struct page *page, | |
57 | unsigned int offset, unsigned int count) | |
58 | { | |
1da177e4 LT |
59 | struct nfs_page *req; |
60 | ||
18eb8842 TM |
61 | /* try to allocate the request struct */ |
62 | req = nfs_page_alloc(); | |
63 | if (req == NULL) | |
64 | return ERR_PTR(-ENOMEM); | |
1da177e4 | 65 | |
015f0212 JL |
66 | /* get lock context early so we can deal with alloc failures */ |
67 | req->wb_lock_context = nfs_get_lock_context(ctx); | |
68 | if (req->wb_lock_context == NULL) { | |
69 | nfs_page_free(req); | |
70 | return ERR_PTR(-ENOMEM); | |
71 | } | |
72 | ||
1da177e4 LT |
73 | /* Initialize the request struct. Initially, we assume a |
74 | * long write-back delay. This will be adjusted in | |
75 | * update_nfs_request below if the region is not locked. */ | |
76 | req->wb_page = page; | |
77 | atomic_set(&req->wb_complete, 0); | |
78 | req->wb_index = page->index; | |
79 | page_cache_get(page); | |
cd52ed35 TM |
80 | BUG_ON(PagePrivate(page)); |
81 | BUG_ON(!PageLocked(page)); | |
82 | BUG_ON(page->mapping->host != inode); | |
1da177e4 LT |
83 | req->wb_offset = offset; |
84 | req->wb_pgbase = offset; | |
85 | req->wb_bytes = count; | |
1da177e4 | 86 | req->wb_context = get_nfs_open_context(ctx); |
c03b4024 | 87 | kref_init(&req->wb_kref); |
1da177e4 LT |
88 | return req; |
89 | } | |
90 | ||
91 | /** | |
92 | * nfs_unlock_request - Unlock request and wake up sleepers. | |
93 | * @req: | |
94 | */ | |
95 | void nfs_unlock_request(struct nfs_page *req) | |
96 | { | |
97 | if (!NFS_WBACK_BUSY(req)) { | |
98 | printk(KERN_ERR "NFS: Invalid unlock attempted\n"); | |
99 | BUG(); | |
100 | } | |
101 | smp_mb__before_clear_bit(); | |
102 | clear_bit(PG_BUSY, &req->wb_flags); | |
103 | smp_mb__after_clear_bit(); | |
464a98bd | 104 | wake_up_bit(&req->wb_flags, PG_BUSY); |
1da177e4 LT |
105 | nfs_release_request(req); |
106 | } | |
107 | ||
c6a556b8 | 108 | /** |
9fd367f0 | 109 | * nfs_set_page_tag_locked - Tag a request as locked |
c6a556b8 TM |
110 | * @req: |
111 | */ | |
acee478a | 112 | int nfs_set_page_tag_locked(struct nfs_page *req) |
c6a556b8 | 113 | { |
acee478a | 114 | if (!nfs_lock_request_dontget(req)) |
c6a556b8 | 115 | return 0; |
2df485a7 | 116 | if (test_bit(PG_MAPPED, &req->wb_flags)) |
3d4ff43d | 117 | radix_tree_tag_set(&NFS_I(req->wb_context->dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); |
c6a556b8 TM |
118 | return 1; |
119 | } | |
120 | ||
121 | /** | |
9fd367f0 | 122 | * nfs_clear_page_tag_locked - Clear request tag and wake up sleepers |
c6a556b8 | 123 | */ |
9fd367f0 | 124 | void nfs_clear_page_tag_locked(struct nfs_page *req) |
c6a556b8 | 125 | { |
2df485a7 | 126 | if (test_bit(PG_MAPPED, &req->wb_flags)) { |
3d4ff43d | 127 | struct inode *inode = req->wb_context->dentry->d_inode; |
bb6fbc45 TM |
128 | struct nfs_inode *nfsi = NFS_I(inode); |
129 | ||
587142f8 | 130 | spin_lock(&inode->i_lock); |
9fd367f0 | 131 | radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); |
acee478a | 132 | nfs_unlock_request(req); |
587142f8 | 133 | spin_unlock(&inode->i_lock); |
acee478a TM |
134 | } else |
135 | nfs_unlock_request(req); | |
c6a556b8 TM |
136 | } |
137 | ||
4d65c520 | 138 | /* |
1da177e4 LT |
139 | * nfs_clear_request - Free up all resources allocated to the request |
140 | * @req: | |
141 | * | |
bb6fbc45 TM |
142 | * Release page and open context resources associated with a read/write |
143 | * request after it has completed. | |
1da177e4 | 144 | */ |
4d65c520 | 145 | static void nfs_clear_request(struct nfs_page *req) |
1da177e4 | 146 | { |
cd52ed35 | 147 | struct page *page = req->wb_page; |
bb6fbc45 | 148 | struct nfs_open_context *ctx = req->wb_context; |
f11ac8db | 149 | struct nfs_lock_context *l_ctx = req->wb_lock_context; |
bb6fbc45 | 150 | |
cd52ed35 | 151 | if (page != NULL) { |
cd52ed35 | 152 | page_cache_release(page); |
1da177e4 LT |
153 | req->wb_page = NULL; |
154 | } | |
f11ac8db TM |
155 | if (l_ctx != NULL) { |
156 | nfs_put_lock_context(l_ctx); | |
157 | req->wb_lock_context = NULL; | |
158 | } | |
bb6fbc45 TM |
159 | if (ctx != NULL) { |
160 | put_nfs_open_context(ctx); | |
161 | req->wb_context = NULL; | |
162 | } | |
1da177e4 LT |
163 | } |
164 | ||
165 | ||
166 | /** | |
167 | * nfs_release_request - Release the count on an NFS read/write request | |
168 | * @req: request to release | |
169 | * | |
170 | * Note: Should never be called with the spinlock held! | |
171 | */ | |
c03b4024 | 172 | static void nfs_free_request(struct kref *kref) |
1da177e4 | 173 | { |
c03b4024 | 174 | struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); |
1da177e4 | 175 | |
bb6fbc45 | 176 | /* Release struct file and open context */ |
1da177e4 | 177 | nfs_clear_request(req); |
1da177e4 LT |
178 | nfs_page_free(req); |
179 | } | |
180 | ||
c03b4024 TM |
181 | void nfs_release_request(struct nfs_page *req) |
182 | { | |
183 | kref_put(&req->wb_kref, nfs_free_request); | |
184 | } | |
185 | ||
9f557cd8 TM |
186 | static int nfs_wait_bit_uninterruptible(void *word) |
187 | { | |
188 | io_schedule(); | |
189 | return 0; | |
190 | } | |
191 | ||
1da177e4 LT |
192 | /** |
193 | * nfs_wait_on_request - Wait for a request to complete. | |
194 | * @req: request to wait upon. | |
195 | * | |
150030b7 | 196 | * Interruptible by fatal signals only. |
1da177e4 LT |
197 | * The user is responsible for holding a count on the request. |
198 | */ | |
199 | int | |
200 | nfs_wait_on_request(struct nfs_page *req) | |
201 | { | |
9f557cd8 TM |
202 | return wait_on_bit(&req->wb_flags, PG_BUSY, |
203 | nfs_wait_bit_uninterruptible, | |
204 | TASK_UNINTERRUPTIBLE); | |
1da177e4 LT |
205 | } |
206 | ||
19345cb2 | 207 | bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req) |
5b36c7dc BH |
208 | { |
209 | /* | |
210 | * FIXME: ideally we should be able to coalesce all requests | |
211 | * that are not block boundary aligned, but currently this | |
212 | * is problematic for the case of bsize < PAGE_CACHE_SIZE, | |
213 | * since nfs_flush_multi and nfs_pagein_multi assume you | |
214 | * can have only one struct nfs_page. | |
215 | */ | |
216 | if (desc->pg_bsize < PAGE_SIZE) | |
217 | return 0; | |
218 | ||
219 | return desc->pg_count + req->wb_bytes <= desc->pg_bsize; | |
220 | } | |
19345cb2 | 221 | EXPORT_SYMBOL_GPL(nfs_generic_pg_test); |
5b36c7dc | 222 | |
1da177e4 | 223 | /** |
d8a5ad75 TM |
224 | * nfs_pageio_init - initialise a page io descriptor |
225 | * @desc: pointer to descriptor | |
bcb71bba TM |
226 | * @inode: pointer to inode |
227 | * @doio: pointer to io function | |
228 | * @bsize: io block size | |
229 | * @io_flags: extra parameters for the io function | |
d8a5ad75 | 230 | */ |
bcb71bba TM |
231 | void nfs_pageio_init(struct nfs_pageio_descriptor *desc, |
232 | struct inode *inode, | |
1751c363 | 233 | const struct nfs_pageio_ops *pg_ops, |
84dde76c | 234 | size_t bsize, |
bcb71bba | 235 | int io_flags) |
d8a5ad75 TM |
236 | { |
237 | INIT_LIST_HEAD(&desc->pg_list); | |
bcb71bba | 238 | desc->pg_bytes_written = 0; |
d8a5ad75 TM |
239 | desc->pg_count = 0; |
240 | desc->pg_bsize = bsize; | |
241 | desc->pg_base = 0; | |
b31268ac | 242 | desc->pg_moreio = 0; |
d9156f9f | 243 | desc->pg_recoalesce = 0; |
bcb71bba | 244 | desc->pg_inode = inode; |
1751c363 | 245 | desc->pg_ops = pg_ops; |
bcb71bba TM |
246 | desc->pg_ioflags = io_flags; |
247 | desc->pg_error = 0; | |
94ad1c80 | 248 | desc->pg_lseg = NULL; |
d8a5ad75 TM |
249 | } |
250 | ||
251 | /** | |
252 | * nfs_can_coalesce_requests - test two requests for compatibility | |
253 | * @prev: pointer to nfs_page | |
254 | * @req: pointer to nfs_page | |
255 | * | |
256 | * The nfs_page structures 'prev' and 'req' are compared to ensure that the | |
257 | * page data area they describe is contiguous, and that their RPC | |
258 | * credentials, NFSv4 open state, and lockowners are the same. | |
259 | * | |
260 | * Return 'true' if this is the case, else return 'false'. | |
261 | */ | |
18ad0a9f BH |
262 | static bool nfs_can_coalesce_requests(struct nfs_page *prev, |
263 | struct nfs_page *req, | |
264 | struct nfs_pageio_descriptor *pgio) | |
d8a5ad75 TM |
265 | { |
266 | if (req->wb_context->cred != prev->wb_context->cred) | |
18ad0a9f | 267 | return false; |
f11ac8db | 268 | if (req->wb_lock_context->lockowner != prev->wb_lock_context->lockowner) |
18ad0a9f | 269 | return false; |
d8a5ad75 | 270 | if (req->wb_context->state != prev->wb_context->state) |
18ad0a9f | 271 | return false; |
d8a5ad75 | 272 | if (req->wb_index != (prev->wb_index + 1)) |
18ad0a9f | 273 | return false; |
d8a5ad75 | 274 | if (req->wb_pgbase != 0) |
18ad0a9f | 275 | return false; |
d8a5ad75 | 276 | if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) |
18ad0a9f | 277 | return false; |
1751c363 | 278 | return pgio->pg_ops->pg_test(pgio, prev, req); |
d8a5ad75 TM |
279 | } |
280 | ||
281 | /** | |
bcb71bba | 282 | * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list. |
d8a5ad75 TM |
283 | * @desc: destination io descriptor |
284 | * @req: request | |
285 | * | |
286 | * Returns true if the request 'req' was successfully coalesced into the | |
287 | * existing list of pages 'desc'. | |
288 | */ | |
bcb71bba TM |
289 | static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, |
290 | struct nfs_page *req) | |
d8a5ad75 | 291 | { |
d8a5ad75 TM |
292 | if (desc->pg_count != 0) { |
293 | struct nfs_page *prev; | |
294 | ||
d8a5ad75 | 295 | prev = nfs_list_entry(desc->pg_list.prev); |
94ad1c80 | 296 | if (!nfs_can_coalesce_requests(prev, req, desc)) |
d8a5ad75 | 297 | return 0; |
5b36c7dc | 298 | } else { |
d8007d4d TM |
299 | if (desc->pg_ops->pg_init) |
300 | desc->pg_ops->pg_init(desc, req); | |
d8a5ad75 | 301 | desc->pg_base = req->wb_pgbase; |
5b36c7dc | 302 | } |
d8a5ad75 TM |
303 | nfs_list_remove_request(req); |
304 | nfs_list_add_request(req, &desc->pg_list); | |
5b36c7dc | 305 | desc->pg_count += req->wb_bytes; |
d8a5ad75 TM |
306 | return 1; |
307 | } | |
308 | ||
bcb71bba TM |
309 | /* |
310 | * Helper for nfs_pageio_add_request and nfs_pageio_complete | |
311 | */ | |
312 | static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) | |
313 | { | |
314 | if (!list_empty(&desc->pg_list)) { | |
1751c363 | 315 | int error = desc->pg_ops->pg_doio(desc); |
bcb71bba TM |
316 | if (error < 0) |
317 | desc->pg_error = error; | |
318 | else | |
319 | desc->pg_bytes_written += desc->pg_count; | |
320 | } | |
321 | if (list_empty(&desc->pg_list)) { | |
322 | desc->pg_count = 0; | |
323 | desc->pg_base = 0; | |
324 | } | |
325 | } | |
326 | ||
327 | /** | |
328 | * nfs_pageio_add_request - Attempt to coalesce a request into a page list. | |
329 | * @desc: destination io descriptor | |
330 | * @req: request | |
331 | * | |
332 | * Returns true if the request 'req' was successfully coalesced into the | |
333 | * existing list of pages 'desc'. | |
334 | */ | |
d9156f9f | 335 | static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, |
8b09bee3 | 336 | struct nfs_page *req) |
bcb71bba TM |
337 | { |
338 | while (!nfs_pageio_do_add_request(desc, req)) { | |
b31268ac | 339 | desc->pg_moreio = 1; |
bcb71bba TM |
340 | nfs_pageio_doio(desc); |
341 | if (desc->pg_error < 0) | |
342 | return 0; | |
b31268ac | 343 | desc->pg_moreio = 0; |
d9156f9f TM |
344 | if (desc->pg_recoalesce) |
345 | return 0; | |
bcb71bba TM |
346 | } |
347 | return 1; | |
348 | } | |
349 | ||
d9156f9f TM |
350 | static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) |
351 | { | |
352 | LIST_HEAD(head); | |
353 | ||
354 | do { | |
355 | list_splice_init(&desc->pg_list, &head); | |
356 | desc->pg_bytes_written -= desc->pg_count; | |
357 | desc->pg_count = 0; | |
358 | desc->pg_base = 0; | |
359 | desc->pg_recoalesce = 0; | |
360 | ||
361 | while (!list_empty(&head)) { | |
362 | struct nfs_page *req; | |
363 | ||
364 | req = list_first_entry(&head, struct nfs_page, wb_list); | |
365 | nfs_list_remove_request(req); | |
366 | if (__nfs_pageio_add_request(desc, req)) | |
367 | continue; | |
368 | if (desc->pg_error < 0) | |
369 | return 0; | |
370 | break; | |
371 | } | |
372 | } while (desc->pg_recoalesce); | |
373 | return 1; | |
374 | } | |
375 | ||
376 | int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, | |
377 | struct nfs_page *req) | |
378 | { | |
379 | int ret; | |
380 | ||
381 | do { | |
382 | ret = __nfs_pageio_add_request(desc, req); | |
383 | if (ret) | |
384 | break; | |
385 | if (desc->pg_error < 0) | |
386 | break; | |
387 | ret = nfs_do_recoalesce(desc); | |
388 | } while (ret); | |
389 | return ret; | |
390 | } | |
391 | ||
bcb71bba TM |
392 | /** |
393 | * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor | |
394 | * @desc: pointer to io descriptor | |
395 | */ | |
396 | void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) | |
397 | { | |
d9156f9f TM |
398 | for (;;) { |
399 | nfs_pageio_doio(desc); | |
400 | if (!desc->pg_recoalesce) | |
401 | break; | |
402 | if (!nfs_do_recoalesce(desc)) | |
403 | break; | |
404 | } | |
bcb71bba TM |
405 | } |
406 | ||
7fe7f848 TM |
407 | /** |
408 | * nfs_pageio_cond_complete - Conditional I/O completion | |
409 | * @desc: pointer to io descriptor | |
410 | * @index: page index | |
411 | * | |
412 | * It is important to ensure that processes don't try to take locks | |
413 | * on non-contiguous ranges of pages as that might deadlock. This | |
414 | * function should be called before attempting to wait on a locked | |
415 | * nfs_page. It will complete the I/O if the page index 'index' | |
416 | * is not contiguous with the existing list of pages in 'desc'. | |
417 | */ | |
418 | void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) | |
419 | { | |
420 | if (!list_empty(&desc->pg_list)) { | |
421 | struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev); | |
422 | if (index != prev->wb_index + 1) | |
d9156f9f | 423 | nfs_pageio_complete(desc); |
7fe7f848 TM |
424 | } |
425 | } | |
426 | ||
3da28eb1 | 427 | #define NFS_SCAN_MAXENTRIES 16 |
1da177e4 LT |
428 | /** |
429 | * nfs_scan_list - Scan a list for matching requests | |
d2ccddf0 | 430 | * @nfsi: NFS inode |
1da177e4 LT |
431 | * @dst: Destination list |
432 | * @idx_start: lower bound of page->index to scan | |
433 | * @npages: idx_start + npages sets the upper bound to scan. | |
5c369683 | 434 | * @tag: tag to scan for |
1da177e4 LT |
435 | * |
436 | * Moves elements from one of the inode request lists. | |
437 | * If the number of requests is set to 0, the entire address_space | |
438 | * starting at index idx_start, is scanned. | |
439 | * The requests are *not* checked to ensure that they form a contiguous set. | |
587142f8 | 440 | * You must be holding the inode's i_lock when calling this function |
1da177e4 | 441 | */ |
5c369683 | 442 | int nfs_scan_list(struct nfs_inode *nfsi, |
ca52fec1 | 443 | struct list_head *dst, pgoff_t idx_start, |
5c369683 | 444 | unsigned int npages, int tag) |
1da177e4 | 445 | { |
d2ccddf0 TM |
446 | struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; |
447 | struct nfs_page *req; | |
ca52fec1 | 448 | pgoff_t idx_end; |
d2ccddf0 TM |
449 | int found, i; |
450 | int res; | |
a861a1e1 | 451 | struct list_head *list; |
1da177e4 LT |
452 | |
453 | res = 0; | |
454 | if (npages == 0) | |
455 | idx_end = ~0; | |
456 | else | |
457 | idx_end = idx_start + npages - 1; | |
458 | ||
d2ccddf0 | 459 | for (;;) { |
5c369683 | 460 | found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, |
d2ccddf0 | 461 | (void **)&pgvec[0], idx_start, |
5c369683 | 462 | NFS_SCAN_MAXENTRIES, tag); |
d2ccddf0 | 463 | if (found <= 0) |
1da177e4 | 464 | break; |
d2ccddf0 TM |
465 | for (i = 0; i < found; i++) { |
466 | req = pgvec[i]; | |
467 | if (req->wb_index > idx_end) | |
468 | goto out; | |
469 | idx_start = req->wb_index + 1; | |
9fd367f0 | 470 | if (nfs_set_page_tag_locked(req)) { |
acee478a | 471 | kref_get(&req->wb_kref); |
5c369683 TM |
472 | radix_tree_tag_clear(&nfsi->nfs_page_tree, |
473 | req->wb_index, tag); | |
a861a1e1 FI |
474 | list = pnfs_choose_commit_list(req, dst); |
475 | nfs_list_add_request(req, list); | |
d2ccddf0 | 476 | res++; |
dce34ce2 TM |
477 | if (res == INT_MAX) |
478 | goto out; | |
d2ccddf0 TM |
479 | } |
480 | } | |
edc05fc1 | 481 | /* for latency reduction */ |
587142f8 | 482 | cond_resched_lock(&nfsi->vfs_inode.i_lock); |
1da177e4 | 483 | } |
d2ccddf0 | 484 | out: |
1da177e4 LT |
485 | return res; |
486 | } | |
487 | ||
f7b422b1 | 488 | int __init nfs_init_nfspagecache(void) |
1da177e4 LT |
489 | { |
490 | nfs_page_cachep = kmem_cache_create("nfs_page", | |
491 | sizeof(struct nfs_page), | |
492 | 0, SLAB_HWCACHE_ALIGN, | |
20c2df83 | 493 | NULL); |
1da177e4 LT |
494 | if (nfs_page_cachep == NULL) |
495 | return -ENOMEM; | |
496 | ||
497 | return 0; | |
498 | } | |
499 | ||
266bee88 | 500 | void nfs_destroy_nfspagecache(void) |
1da177e4 | 501 | { |
1a1d92c1 | 502 | kmem_cache_destroy(nfs_page_cachep); |
1da177e4 LT |
503 | } |
504 |