2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
21 #include <linux/genhd.h>
22 #include <linux/highmem.h>
23 #include <linux/memcontrol.h>
25 #include <linux/mutex.h>
26 #include <linux/pmem.h>
27 #include <linux/sched.h>
28 #include <linux/uio.h>
29 #include <linux/vmstat.h>
31 int dax_clear_blocks(struct inode
*inode
, sector_t block
, long size
)
33 struct block_device
*bdev
= inode
->i_sb
->s_bdev
;
34 sector_t sector
= block
<< (inode
->i_blkbits
- 9);
42 count
= bdev_direct_access(bdev
, sector
, &addr
, &pfn
, size
);
47 unsigned pgsz
= PAGE_SIZE
- offset_in_page(addr
);
50 clear_pmem(addr
, pgsz
);
63 EXPORT_SYMBOL_GPL(dax_clear_blocks
);
65 static long dax_get_addr(struct buffer_head
*bh
, void __pmem
**addr
,
69 sector_t sector
= bh
->b_blocknr
<< (blkbits
- 9);
70 return bdev_direct_access(bh
->b_bdev
, sector
, addr
, &pfn
, bh
->b_size
);
73 /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
74 static void dax_new_buf(void __pmem
*addr
, unsigned size
, unsigned first
,
75 loff_t pos
, loff_t end
)
77 loff_t final
= end
- pos
+ first
; /* The final byte of the buffer */
80 clear_pmem(addr
, first
);
82 clear_pmem(addr
+ final
, size
- final
);
85 static bool buffer_written(struct buffer_head
*bh
)
87 return buffer_mapped(bh
) && !buffer_unwritten(bh
);
91 * When ext4 encounters a hole, it returns without modifying the buffer_head
92 * which means that we can't trust b_size. To cope with this, we set b_state
93 * to 0 before calling get_block and, if any bit is set, we know we can trust
94 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
95 * and would save us time calling get_block repeatedly.
97 static bool buffer_size_valid(struct buffer_head
*bh
)
99 return bh
->b_state
!= 0;
102 static ssize_t
dax_io(struct inode
*inode
, struct iov_iter
*iter
,
103 loff_t start
, loff_t end
, get_block_t get_block
,
104 struct buffer_head
*bh
)
109 loff_t bh_max
= start
;
112 bool need_wmb
= false;
114 if (iov_iter_rw(iter
) != WRITE
)
115 end
= min(end
, i_size_read(inode
));
120 unsigned blkbits
= inode
->i_blkbits
;
121 sector_t block
= pos
>> blkbits
;
122 unsigned first
= pos
- (block
<< blkbits
);
126 bh
->b_size
= PAGE_ALIGN(end
- pos
);
128 retval
= get_block(inode
, block
, bh
,
129 iov_iter_rw(iter
) == WRITE
);
132 if (!buffer_size_valid(bh
))
133 bh
->b_size
= 1 << blkbits
;
134 bh_max
= pos
- first
+ bh
->b_size
;
136 unsigned done
= bh
->b_size
-
137 (bh_max
- (pos
- first
));
138 bh
->b_blocknr
+= done
>> blkbits
;
142 hole
= iov_iter_rw(iter
) != WRITE
&& !buffer_written(bh
);
145 size
= bh
->b_size
- first
;
147 retval
= dax_get_addr(bh
, &addr
, blkbits
);
150 if (buffer_unwritten(bh
) || buffer_new(bh
)) {
151 dax_new_buf(addr
, retval
, first
, pos
,
156 size
= retval
- first
;
158 max
= min(pos
+ size
, end
);
161 if (iov_iter_rw(iter
) == WRITE
) {
162 len
= copy_from_iter_pmem(addr
, max
- pos
, iter
);
165 len
= copy_to_iter((void __force
*)addr
, max
- pos
,
168 len
= iov_iter_zero(max
- pos
, iter
);
180 return (pos
== start
) ? retval
: pos
- start
;
184 * dax_do_io - Perform I/O to a DAX file
185 * @iocb: The control block for this I/O
186 * @inode: The file which the I/O is directed at
187 * @iter: The addresses to do I/O from or to
188 * @pos: The file offset where the I/O starts
189 * @get_block: The filesystem method used to translate file offsets to blocks
190 * @end_io: A filesystem callback for I/O completion
193 * This function uses the same locking scheme as do_blockdev_direct_IO:
194 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
195 * caller for writes. For reads, we take and release the i_mutex ourselves.
196 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
197 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
200 ssize_t
dax_do_io(struct kiocb
*iocb
, struct inode
*inode
,
201 struct iov_iter
*iter
, loff_t pos
, get_block_t get_block
,
202 dio_iodone_t end_io
, int flags
)
204 struct buffer_head bh
;
205 ssize_t retval
= -EINVAL
;
206 loff_t end
= pos
+ iov_iter_count(iter
);
208 memset(&bh
, 0, sizeof(bh
));
210 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
) {
211 struct address_space
*mapping
= inode
->i_mapping
;
212 mutex_lock(&inode
->i_mutex
);
213 retval
= filemap_write_and_wait_range(mapping
, pos
, end
- 1);
215 mutex_unlock(&inode
->i_mutex
);
220 /* Protects against truncate */
221 if (!(flags
& DIO_SKIP_DIO_COUNT
))
222 inode_dio_begin(inode
);
224 retval
= dax_io(inode
, iter
, pos
, end
, get_block
, &bh
);
226 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
)
227 mutex_unlock(&inode
->i_mutex
);
229 if ((retval
> 0) && end_io
)
230 end_io(iocb
, pos
, retval
, bh
.b_private
);
232 if (!(flags
& DIO_SKIP_DIO_COUNT
))
233 inode_dio_end(inode
);
237 EXPORT_SYMBOL_GPL(dax_do_io
);
240 * The user has performed a load from a hole in the file. Allocating
241 * a new page in the file would cause excessive storage usage for
242 * workloads with sparse files. We allocate a page cache page instead.
243 * We'll kick it out of the page cache if it's ever written to,
244 * otherwise it will simply fall out of the page cache under memory
245 * pressure without ever having been dirtied.
247 static int dax_load_hole(struct address_space
*mapping
, struct page
*page
,
248 struct vm_fault
*vmf
)
251 struct inode
*inode
= mapping
->host
;
253 page
= find_or_create_page(mapping
, vmf
->pgoff
,
254 GFP_KERNEL
| __GFP_ZERO
);
257 /* Recheck i_size under page lock to avoid truncate race */
258 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
259 if (vmf
->pgoff
>= size
) {
261 page_cache_release(page
);
262 return VM_FAULT_SIGBUS
;
266 return VM_FAULT_LOCKED
;
269 static int copy_user_bh(struct page
*to
, struct buffer_head
*bh
,
270 unsigned blkbits
, unsigned long vaddr
)
275 if (dax_get_addr(bh
, &vfrom
, blkbits
) < 0)
277 vto
= kmap_atomic(to
);
278 copy_user_page(vto
, (void __force
*)vfrom
, vaddr
, to
);
283 static int dax_insert_mapping(struct inode
*inode
, struct buffer_head
*bh
,
284 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
286 struct address_space
*mapping
= inode
->i_mapping
;
287 sector_t sector
= bh
->b_blocknr
<< (inode
->i_blkbits
- 9);
288 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
294 i_mmap_lock_read(mapping
);
297 * Check truncate didn't happen while we were allocating a block.
298 * If it did, this block may or may not be still allocated to the
299 * file. We can't tell the filesystem to free it because we can't
300 * take i_mutex here. In the worst case, the file still has blocks
301 * allocated past the end of the file.
303 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
304 if (unlikely(vmf
->pgoff
>= size
)) {
309 error
= bdev_direct_access(bh
->b_bdev
, sector
, &addr
, &pfn
, bh
->b_size
);
312 if (error
< PAGE_SIZE
) {
317 if (buffer_unwritten(bh
) || buffer_new(bh
)) {
318 clear_pmem(addr
, PAGE_SIZE
);
322 error
= vm_insert_mixed(vma
, vaddr
, pfn
);
325 i_mmap_unlock_read(mapping
);
331 * __dax_fault - handle a page fault on a DAX file
332 * @vma: The virtual memory area where the fault occurred
333 * @vmf: The description of the fault
334 * @get_block: The filesystem method used to translate file offsets to blocks
335 * @complete_unwritten: The filesystem method used to convert unwritten blocks
336 * to written so the data written to them is exposed. This is required for
337 * required by write faults for filesystems that will return unwritten
338 * extent mappings from @get_block, but it is optional for reads as
339 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
340 * not support unwritten extents, the it should pass NULL.
342 * When a page fault occurs, filesystems may call this helper in their
343 * fault handler for DAX files. __dax_fault() assumes the caller has done all
344 * the necessary locking for the page fault to proceed successfully.
346 int __dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
347 get_block_t get_block
, dax_iodone_t complete_unwritten
)
349 struct file
*file
= vma
->vm_file
;
350 struct address_space
*mapping
= file
->f_mapping
;
351 struct inode
*inode
= mapping
->host
;
353 struct buffer_head bh
;
354 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
355 unsigned blkbits
= inode
->i_blkbits
;
361 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
362 if (vmf
->pgoff
>= size
)
363 return VM_FAULT_SIGBUS
;
365 memset(&bh
, 0, sizeof(bh
));
366 block
= (sector_t
)vmf
->pgoff
<< (PAGE_SHIFT
- blkbits
);
367 bh
.b_size
= PAGE_SIZE
;
370 page
= find_get_page(mapping
, vmf
->pgoff
);
372 if (!lock_page_or_retry(page
, vma
->vm_mm
, vmf
->flags
)) {
373 page_cache_release(page
);
374 return VM_FAULT_RETRY
;
376 if (unlikely(page
->mapping
!= mapping
)) {
378 page_cache_release(page
);
381 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
382 if (unlikely(vmf
->pgoff
>= size
)) {
384 * We have a struct page covering a hole in the file
385 * from a read fault and we've raced with a truncate
392 error
= get_block(inode
, block
, &bh
, 0);
393 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
394 error
= -EIO
; /* fs corruption? */
398 if (!buffer_mapped(&bh
) && !buffer_unwritten(&bh
) && !vmf
->cow_page
) {
399 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
400 error
= get_block(inode
, block
, &bh
, 1);
401 count_vm_event(PGMAJFAULT
);
402 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
403 major
= VM_FAULT_MAJOR
;
404 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
409 return dax_load_hole(mapping
, page
, vmf
);
414 struct page
*new_page
= vmf
->cow_page
;
415 if (buffer_written(&bh
))
416 error
= copy_user_bh(new_page
, &bh
, blkbits
, vaddr
);
418 clear_user_highpage(new_page
, vaddr
);
423 i_mmap_lock_read(mapping
);
424 /* Check we didn't race with truncate */
425 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >>
427 if (vmf
->pgoff
>= size
) {
428 i_mmap_unlock_read(mapping
);
433 return VM_FAULT_LOCKED
;
436 /* Check we didn't race with a read fault installing a new page */
438 page
= find_lock_page(mapping
, vmf
->pgoff
);
441 unmap_mapping_range(mapping
, vmf
->pgoff
<< PAGE_SHIFT
,
443 delete_from_page_cache(page
);
445 page_cache_release(page
);
449 * If we successfully insert the new mapping over an unwritten extent,
450 * we need to ensure we convert the unwritten extent. If there is an
451 * error inserting the mapping, the filesystem needs to leave it as
452 * unwritten to prevent exposure of the stale underlying data to
453 * userspace, but we still need to call the completion function so
454 * the private resources on the mapping buffer can be released. We
455 * indicate what the callback should do via the uptodate variable, same
456 * as for normal BH based IO completions.
458 error
= dax_insert_mapping(inode
, &bh
, vma
, vmf
);
459 if (buffer_unwritten(&bh
)) {
460 if (complete_unwritten
)
461 complete_unwritten(&bh
, !error
);
463 WARN_ON_ONCE(!(vmf
->flags
& FAULT_FLAG_WRITE
));
467 if (error
== -ENOMEM
)
468 return VM_FAULT_OOM
| major
;
469 /* -EBUSY is fine, somebody else faulted on the same PTE */
470 if ((error
< 0) && (error
!= -EBUSY
))
471 return VM_FAULT_SIGBUS
| major
;
472 return VM_FAULT_NOPAGE
| major
;
477 page_cache_release(page
);
481 EXPORT_SYMBOL(__dax_fault
);
484 * dax_fault - handle a page fault on a DAX file
485 * @vma: The virtual memory area where the fault occurred
486 * @vmf: The description of the fault
487 * @get_block: The filesystem method used to translate file offsets to blocks
489 * When a page fault occurs, filesystems may call this helper in their
490 * fault handler for DAX files.
492 int dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
493 get_block_t get_block
, dax_iodone_t complete_unwritten
)
496 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
498 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
499 sb_start_pagefault(sb
);
500 file_update_time(vma
->vm_file
);
502 result
= __dax_fault(vma
, vmf
, get_block
, complete_unwritten
);
503 if (vmf
->flags
& FAULT_FLAG_WRITE
)
504 sb_end_pagefault(sb
);
508 EXPORT_SYMBOL_GPL(dax_fault
);
511 * dax_pfn_mkwrite - handle first write to DAX page
512 * @vma: The virtual memory area where the fault occurred
513 * @vmf: The description of the fault
516 int dax_pfn_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
518 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
520 sb_start_pagefault(sb
);
521 file_update_time(vma
->vm_file
);
522 sb_end_pagefault(sb
);
523 return VM_FAULT_NOPAGE
;
525 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite
);
528 * dax_zero_page_range - zero a range within a page of a DAX file
529 * @inode: The file being truncated
530 * @from: The file offset that is being truncated to
531 * @length: The number of bytes to zero
532 * @get_block: The filesystem method used to translate file offsets to blocks
534 * This function can be called by a filesystem when it is zeroing part of a
535 * page in a DAX file. This is intended for hole-punch operations. If
536 * you are truncating a file, the helper function dax_truncate_page() may be
539 * We work in terms of PAGE_CACHE_SIZE here for commonality with
540 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
541 * took care of disposing of the unnecessary blocks. Even if the filesystem
542 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
543 * since the file might be mmapped.
545 int dax_zero_page_range(struct inode
*inode
, loff_t from
, unsigned length
,
546 get_block_t get_block
)
548 struct buffer_head bh
;
549 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
550 unsigned offset
= from
& (PAGE_CACHE_SIZE
-1);
553 /* Block boundary? Nothing to do */
556 BUG_ON((offset
+ length
) > PAGE_CACHE_SIZE
);
558 memset(&bh
, 0, sizeof(bh
));
559 bh
.b_size
= PAGE_CACHE_SIZE
;
560 err
= get_block(inode
, index
, &bh
, 0);
563 if (buffer_written(&bh
)) {
565 err
= dax_get_addr(&bh
, &addr
, inode
->i_blkbits
);
568 clear_pmem(addr
+ offset
, length
);
574 EXPORT_SYMBOL_GPL(dax_zero_page_range
);
577 * dax_truncate_page - handle a partial page being truncated in a DAX file
578 * @inode: The file being truncated
579 * @from: The file offset that is being truncated to
580 * @get_block: The filesystem method used to translate file offsets to blocks
582 * Similar to block_truncate_page(), this function can be called by a
583 * filesystem when it is truncating a DAX file to handle the partial page.
585 * We work in terms of PAGE_CACHE_SIZE here for commonality with
586 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
587 * took care of disposing of the unnecessary blocks. Even if the filesystem
588 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
589 * since the file might be mmapped.
591 int dax_truncate_page(struct inode
*inode
, loff_t from
, get_block_t get_block
)
593 unsigned length
= PAGE_CACHE_ALIGN(from
) - from
;
594 return dax_zero_page_range(inode
, from
, length
, get_block
);
596 EXPORT_SYMBOL_GPL(dax_truncate_page
);