2 * linux/fs/nfs/pagelist.c
4 * A set of helper functions for managing NFS read and write requests.
5 * The main purpose of these routines is to provide support for the
6 * coalescing of several requests into a single RPC call.
8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/sunrpc/clnt.h>
15 #include <linux/nfs3.h>
16 #include <linux/nfs4.h>
17 #include <linux/nfs_page.h>
18 #include <linux/nfs_fs.h>
19 #include <linux/nfs_mount.h>
23 #define NFS_PARANOIA 1
25 static struct kmem_cache
*nfs_page_cachep
;
27 static inline struct nfs_page
*
31 p
= kmem_cache_alloc(nfs_page_cachep
, GFP_KERNEL
);
33 memset(p
, 0, sizeof(*p
));
34 INIT_LIST_HEAD(&p
->wb_list
);
40 nfs_page_free(struct nfs_page
*p
)
42 kmem_cache_free(nfs_page_cachep
, p
);
46 * nfs_create_request - Create an NFS read/write request.
47 * @file: file descriptor to use
48 * @inode: inode to which the request is attached
49 * @page: page to write
50 * @offset: starting offset within the page for the write
51 * @count: number of bytes to read/write
53 * The page must be locked by the caller. This makes sure we never
54 * create two different requests for the same page.
55 * User should ensure it is safe to sleep in this function.
58 nfs_create_request(struct nfs_open_context
*ctx
, struct inode
*inode
,
60 unsigned int offset
, unsigned int count
)
62 struct nfs_server
*server
= NFS_SERVER(inode
);
66 /* try to allocate the request struct */
67 req
= nfs_page_alloc();
71 if (signalled() && (server
->flags
& NFS_MOUNT_INTR
))
72 return ERR_PTR(-ERESTARTSYS
);
76 /* Initialize the request struct. Initially, we assume a
77 * long write-back delay. This will be adjusted in
78 * update_nfs_request below if the region is not locked. */
80 atomic_set(&req
->wb_complete
, 0);
81 req
->wb_index
= page
->index
;
83 BUG_ON(PagePrivate(page
));
84 BUG_ON(!PageLocked(page
));
85 BUG_ON(page
->mapping
->host
!= inode
);
86 req
->wb_offset
= offset
;
87 req
->wb_pgbase
= offset
;
88 req
->wb_bytes
= count
;
89 atomic_set(&req
->wb_count
, 1);
90 req
->wb_context
= get_nfs_open_context(ctx
);
96 * nfs_unlock_request - Unlock request and wake up sleepers.
99 void nfs_unlock_request(struct nfs_page
*req
)
101 if (!NFS_WBACK_BUSY(req
)) {
102 printk(KERN_ERR
"NFS: Invalid unlock attempted\n");
105 smp_mb__before_clear_bit();
106 clear_bit(PG_BUSY
, &req
->wb_flags
);
107 smp_mb__after_clear_bit();
108 wake_up_bit(&req
->wb_flags
, PG_BUSY
);
109 nfs_release_request(req
);
113 * nfs_set_page_writeback_locked - Lock a request for writeback
116 int nfs_set_page_writeback_locked(struct nfs_page
*req
)
118 struct nfs_inode
*nfsi
= NFS_I(req
->wb_context
->dentry
->d_inode
);
120 if (!nfs_lock_request(req
))
122 radix_tree_tag_set(&nfsi
->nfs_page_tree
, req
->wb_index
, NFS_PAGE_TAG_WRITEBACK
);
127 * nfs_clear_page_writeback - Unlock request and wake up sleepers
129 void nfs_clear_page_writeback(struct nfs_page
*req
)
131 struct nfs_inode
*nfsi
= NFS_I(req
->wb_context
->dentry
->d_inode
);
133 if (req
->wb_page
!= NULL
) {
134 spin_lock(&nfsi
->req_lock
);
135 radix_tree_tag_clear(&nfsi
->nfs_page_tree
, req
->wb_index
, NFS_PAGE_TAG_WRITEBACK
);
136 spin_unlock(&nfsi
->req_lock
);
138 nfs_unlock_request(req
);
142 * nfs_clear_request - Free up all resources allocated to the request
145 * Release page resources associated with a write request after it
148 void nfs_clear_request(struct nfs_page
*req
)
150 struct page
*page
= req
->wb_page
;
152 page_cache_release(page
);
159 * nfs_release_request - Release the count on an NFS read/write request
160 * @req: request to release
162 * Note: Should never be called with the spinlock held!
165 nfs_release_request(struct nfs_page
*req
)
167 if (!atomic_dec_and_test(&req
->wb_count
))
171 BUG_ON (!list_empty(&req
->wb_list
));
172 BUG_ON (NFS_WBACK_BUSY(req
));
175 /* Release struct file or cached credential */
176 nfs_clear_request(req
);
177 put_nfs_open_context(req
->wb_context
);
181 static int nfs_wait_bit_interruptible(void *word
)
185 if (signal_pending(current
))
193 * nfs_wait_on_request - Wait for a request to complete.
194 * @req: request to wait upon.
196 * Interruptible by signals only if mounted with intr flag.
197 * The user is responsible for holding a count on the request.
200 nfs_wait_on_request(struct nfs_page
*req
)
202 struct rpc_clnt
*clnt
= NFS_CLIENT(req
->wb_context
->dentry
->d_inode
);
206 if (!test_bit(PG_BUSY
, &req
->wb_flags
))
209 * Note: the call to rpc_clnt_sigmask() suffices to ensure that we
210 * are not interrupted if intr flag is not set
212 rpc_clnt_sigmask(clnt
, &oldmask
);
213 ret
= out_of_line_wait_on_bit(&req
->wb_flags
, PG_BUSY
,
214 nfs_wait_bit_interruptible
, TASK_INTERRUPTIBLE
);
215 rpc_clnt_sigunmask(clnt
, &oldmask
);
221 * nfs_pageio_init - initialise a page io descriptor
222 * @desc: pointer to descriptor
223 * @inode: pointer to inode
224 * @doio: pointer to io function
225 * @bsize: io block size
226 * @io_flags: extra parameters for the io function
228 void nfs_pageio_init(struct nfs_pageio_descriptor
*desc
,
230 int (*doio
)(struct inode
*, struct list_head
*, unsigned int, size_t, int),
234 INIT_LIST_HEAD(&desc
->pg_list
);
235 desc
->pg_bytes_written
= 0;
237 desc
->pg_bsize
= bsize
;
239 desc
->pg_inode
= inode
;
240 desc
->pg_doio
= doio
;
241 desc
->pg_ioflags
= io_flags
;
246 * nfs_can_coalesce_requests - test two requests for compatibility
247 * @prev: pointer to nfs_page
248 * @req: pointer to nfs_page
250 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
251 * page data area they describe is contiguous, and that their RPC
252 * credentials, NFSv4 open state, and lockowners are the same.
254 * Return 'true' if this is the case, else return 'false'.
256 static int nfs_can_coalesce_requests(struct nfs_page
*prev
,
257 struct nfs_page
*req
)
259 if (req
->wb_context
->cred
!= prev
->wb_context
->cred
)
261 if (req
->wb_context
->lockowner
!= prev
->wb_context
->lockowner
)
263 if (req
->wb_context
->state
!= prev
->wb_context
->state
)
265 if (req
->wb_index
!= (prev
->wb_index
+ 1))
267 if (req
->wb_pgbase
!= 0)
269 if (prev
->wb_pgbase
+ prev
->wb_bytes
!= PAGE_CACHE_SIZE
)
275 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
276 * @desc: destination io descriptor
279 * Returns true if the request 'req' was successfully coalesced into the
280 * existing list of pages 'desc'.
282 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor
*desc
,
283 struct nfs_page
*req
)
285 size_t newlen
= req
->wb_bytes
;
287 if (desc
->pg_count
!= 0) {
288 struct nfs_page
*prev
;
291 * FIXME: ideally we should be able to coalesce all requests
292 * that are not block boundary aligned, but currently this
293 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
294 * since nfs_flush_multi and nfs_pagein_multi assume you
295 * can have only one struct nfs_page.
297 if (desc
->pg_bsize
< PAGE_SIZE
)
299 newlen
+= desc
->pg_count
;
300 if (newlen
> desc
->pg_bsize
)
302 prev
= nfs_list_entry(desc
->pg_list
.prev
);
303 if (!nfs_can_coalesce_requests(prev
, req
))
306 desc
->pg_base
= req
->wb_pgbase
;
307 nfs_list_remove_request(req
);
308 nfs_list_add_request(req
, &desc
->pg_list
);
309 desc
->pg_count
= newlen
;
314 * Helper for nfs_pageio_add_request and nfs_pageio_complete
316 static void nfs_pageio_doio(struct nfs_pageio_descriptor
*desc
)
318 if (!list_empty(&desc
->pg_list
)) {
319 int error
= desc
->pg_doio(desc
->pg_inode
,
321 nfs_page_array_len(desc
->pg_base
,
326 desc
->pg_error
= error
;
328 desc
->pg_bytes_written
+= desc
->pg_count
;
330 if (list_empty(&desc
->pg_list
)) {
337 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
338 * @desc: destination io descriptor
341 * Returns true if the request 'req' was successfully coalesced into the
342 * existing list of pages 'desc'.
344 int nfs_pageio_add_request(struct nfs_pageio_descriptor
*desc
,
345 struct nfs_page
*req
)
347 while (!nfs_pageio_do_add_request(desc
, req
)) {
348 nfs_pageio_doio(desc
);
349 if (desc
->pg_error
< 0)
356 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
357 * @desc: pointer to io descriptor
359 void nfs_pageio_complete(struct nfs_pageio_descriptor
*desc
)
361 nfs_pageio_doio(desc
);
364 #define NFS_SCAN_MAXENTRIES 16
366 * nfs_scan_list - Scan a list for matching requests
368 * @head: One of the NFS inode request lists
369 * @dst: Destination list
370 * @idx_start: lower bound of page->index to scan
371 * @npages: idx_start + npages sets the upper bound to scan.
373 * Moves elements from one of the inode request lists.
374 * If the number of requests is set to 0, the entire address_space
375 * starting at index idx_start, is scanned.
376 * The requests are *not* checked to ensure that they form a contiguous set.
377 * You must be holding the inode's req_lock when calling this function
379 int nfs_scan_list(struct nfs_inode
*nfsi
, struct list_head
*head
,
380 struct list_head
*dst
, pgoff_t idx_start
,
383 struct nfs_page
*pgvec
[NFS_SCAN_MAXENTRIES
];
384 struct nfs_page
*req
;
393 idx_end
= idx_start
+ npages
- 1;
396 found
= radix_tree_gang_lookup(&nfsi
->nfs_page_tree
,
397 (void **)&pgvec
[0], idx_start
,
398 NFS_SCAN_MAXENTRIES
);
401 for (i
= 0; i
< found
; i
++) {
403 if (req
->wb_index
> idx_end
)
405 idx_start
= req
->wb_index
+ 1;
406 if (req
->wb_list_head
!= head
)
408 if (nfs_set_page_writeback_locked(req
)) {
409 nfs_list_remove_request(req
);
410 nfs_list_add_request(req
, dst
);
420 int __init
nfs_init_nfspagecache(void)
422 nfs_page_cachep
= kmem_cache_create("nfs_page",
423 sizeof(struct nfs_page
),
424 0, SLAB_HWCACHE_ALIGN
,
426 if (nfs_page_cachep
== NULL
)
432 void nfs_destroy_nfspagecache(void)
434 kmem_cache_destroy(nfs_page_cachep
);