2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pmem.h>
28 #include <linux/sched.h>
29 #include <linux/uio.h>
30 #include <linux/vmstat.h>
33 * dax_clear_blocks() is called from within transaction context from XFS,
34 * and hence this means the stack from this point must follow GFP_NOFS
35 * semantics for all operations.
37 int dax_clear_blocks(struct inode
*inode
, sector_t block
, long size
)
39 struct block_device
*bdev
= inode
->i_sb
->s_bdev
;
40 sector_t sector
= block
<< (inode
->i_blkbits
- 9);
48 count
= bdev_direct_access(bdev
, sector
, &addr
, &pfn
, size
);
53 unsigned pgsz
= PAGE_SIZE
- offset_in_page(addr
);
56 clear_pmem(addr
, pgsz
);
69 EXPORT_SYMBOL_GPL(dax_clear_blocks
);
71 static long dax_get_addr(struct buffer_head
*bh
, void __pmem
**addr
,
75 sector_t sector
= bh
->b_blocknr
<< (blkbits
- 9);
76 return bdev_direct_access(bh
->b_bdev
, sector
, addr
, &pfn
, bh
->b_size
);
79 /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
80 static void dax_new_buf(void __pmem
*addr
, unsigned size
, unsigned first
,
81 loff_t pos
, loff_t end
)
83 loff_t final
= end
- pos
+ first
; /* The final byte of the buffer */
86 clear_pmem(addr
, first
);
88 clear_pmem(addr
+ final
, size
- final
);
91 static bool buffer_written(struct buffer_head
*bh
)
93 return buffer_mapped(bh
) && !buffer_unwritten(bh
);
97 * When ext4 encounters a hole, it returns without modifying the buffer_head
98 * which means that we can't trust b_size. To cope with this, we set b_state
99 * to 0 before calling get_block and, if any bit is set, we know we can trust
100 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
101 * and would save us time calling get_block repeatedly.
103 static bool buffer_size_valid(struct buffer_head
*bh
)
105 return bh
->b_state
!= 0;
108 static ssize_t
dax_io(struct inode
*inode
, struct iov_iter
*iter
,
109 loff_t start
, loff_t end
, get_block_t get_block
,
110 struct buffer_head
*bh
)
115 loff_t bh_max
= start
;
118 bool need_wmb
= false;
120 if (iov_iter_rw(iter
) != WRITE
)
121 end
= min(end
, i_size_read(inode
));
126 unsigned blkbits
= inode
->i_blkbits
;
127 long page
= pos
>> PAGE_SHIFT
;
128 sector_t block
= page
<< (PAGE_SHIFT
- blkbits
);
129 unsigned first
= pos
- (block
<< blkbits
);
133 bh
->b_size
= PAGE_ALIGN(end
- pos
);
135 retval
= get_block(inode
, block
, bh
,
136 iov_iter_rw(iter
) == WRITE
);
139 if (!buffer_size_valid(bh
))
140 bh
->b_size
= 1 << blkbits
;
141 bh_max
= pos
- first
+ bh
->b_size
;
143 unsigned done
= bh
->b_size
-
144 (bh_max
- (pos
- first
));
145 bh
->b_blocknr
+= done
>> blkbits
;
149 hole
= iov_iter_rw(iter
) != WRITE
&& !buffer_written(bh
);
152 size
= bh
->b_size
- first
;
154 retval
= dax_get_addr(bh
, &addr
, blkbits
);
157 if (buffer_unwritten(bh
) || buffer_new(bh
)) {
158 dax_new_buf(addr
, retval
, first
, pos
,
163 size
= retval
- first
;
165 max
= min(pos
+ size
, end
);
168 if (iov_iter_rw(iter
) == WRITE
) {
169 len
= copy_from_iter_pmem(addr
, max
- pos
, iter
);
172 len
= copy_to_iter((void __force
*)addr
, max
- pos
,
175 len
= iov_iter_zero(max
- pos
, iter
);
187 return (pos
== start
) ? retval
: pos
- start
;
191 * dax_do_io - Perform I/O to a DAX file
192 * @iocb: The control block for this I/O
193 * @inode: The file which the I/O is directed at
194 * @iter: The addresses to do I/O from or to
195 * @pos: The file offset where the I/O starts
196 * @get_block: The filesystem method used to translate file offsets to blocks
197 * @end_io: A filesystem callback for I/O completion
200 * This function uses the same locking scheme as do_blockdev_direct_IO:
201 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
202 * caller for writes. For reads, we take and release the i_mutex ourselves.
203 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
204 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
207 ssize_t
dax_do_io(struct kiocb
*iocb
, struct inode
*inode
,
208 struct iov_iter
*iter
, loff_t pos
, get_block_t get_block
,
209 dio_iodone_t end_io
, int flags
)
211 struct buffer_head bh
;
212 ssize_t retval
= -EINVAL
;
213 loff_t end
= pos
+ iov_iter_count(iter
);
215 memset(&bh
, 0, sizeof(bh
));
217 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
) {
218 struct address_space
*mapping
= inode
->i_mapping
;
219 mutex_lock(&inode
->i_mutex
);
220 retval
= filemap_write_and_wait_range(mapping
, pos
, end
- 1);
222 mutex_unlock(&inode
->i_mutex
);
227 /* Protects against truncate */
228 if (!(flags
& DIO_SKIP_DIO_COUNT
))
229 inode_dio_begin(inode
);
231 retval
= dax_io(inode
, iter
, pos
, end
, get_block
, &bh
);
233 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
)
234 mutex_unlock(&inode
->i_mutex
);
236 if ((retval
> 0) && end_io
)
237 end_io(iocb
, pos
, retval
, bh
.b_private
);
239 if (!(flags
& DIO_SKIP_DIO_COUNT
))
240 inode_dio_end(inode
);
244 EXPORT_SYMBOL_GPL(dax_do_io
);
247 * The user has performed a load from a hole in the file. Allocating
248 * a new page in the file would cause excessive storage usage for
249 * workloads with sparse files. We allocate a page cache page instead.
250 * We'll kick it out of the page cache if it's ever written to,
251 * otherwise it will simply fall out of the page cache under memory
252 * pressure without ever having been dirtied.
254 static int dax_load_hole(struct address_space
*mapping
, struct page
*page
,
255 struct vm_fault
*vmf
)
258 struct inode
*inode
= mapping
->host
;
260 page
= find_or_create_page(mapping
, vmf
->pgoff
,
261 GFP_KERNEL
| __GFP_ZERO
);
264 /* Recheck i_size under page lock to avoid truncate race */
265 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
266 if (vmf
->pgoff
>= size
) {
268 page_cache_release(page
);
269 return VM_FAULT_SIGBUS
;
273 return VM_FAULT_LOCKED
;
276 static int copy_user_bh(struct page
*to
, struct buffer_head
*bh
,
277 unsigned blkbits
, unsigned long vaddr
)
282 if (dax_get_addr(bh
, &vfrom
, blkbits
) < 0)
284 vto
= kmap_atomic(to
);
285 copy_user_page(vto
, (void __force
*)vfrom
, vaddr
, to
);
290 static int dax_insert_mapping(struct inode
*inode
, struct buffer_head
*bh
,
291 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
293 struct address_space
*mapping
= inode
->i_mapping
;
294 sector_t sector
= bh
->b_blocknr
<< (inode
->i_blkbits
- 9);
295 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
301 i_mmap_lock_read(mapping
);
304 * Check truncate didn't happen while we were allocating a block.
305 * If it did, this block may or may not be still allocated to the
306 * file. We can't tell the filesystem to free it because we can't
307 * take i_mutex here. In the worst case, the file still has blocks
308 * allocated past the end of the file.
310 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
311 if (unlikely(vmf
->pgoff
>= size
)) {
316 error
= bdev_direct_access(bh
->b_bdev
, sector
, &addr
, &pfn
, bh
->b_size
);
319 if (error
< PAGE_SIZE
) {
324 if (buffer_unwritten(bh
) || buffer_new(bh
)) {
325 clear_pmem(addr
, PAGE_SIZE
);
329 error
= vm_insert_mixed(vma
, vaddr
, pfn
);
332 i_mmap_unlock_read(mapping
);
338 * __dax_fault - handle a page fault on a DAX file
339 * @vma: The virtual memory area where the fault occurred
340 * @vmf: The description of the fault
341 * @get_block: The filesystem method used to translate file offsets to blocks
342 * @complete_unwritten: The filesystem method used to convert unwritten blocks
343 * to written so the data written to them is exposed. This is required for
344 * required by write faults for filesystems that will return unwritten
345 * extent mappings from @get_block, but it is optional for reads as
346 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
347 * not support unwritten extents, the it should pass NULL.
349 * When a page fault occurs, filesystems may call this helper in their
350 * fault handler for DAX files. __dax_fault() assumes the caller has done all
351 * the necessary locking for the page fault to proceed successfully.
353 int __dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
354 get_block_t get_block
, dax_iodone_t complete_unwritten
)
356 struct file
*file
= vma
->vm_file
;
357 struct address_space
*mapping
= file
->f_mapping
;
358 struct inode
*inode
= mapping
->host
;
360 struct buffer_head bh
;
361 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
362 unsigned blkbits
= inode
->i_blkbits
;
368 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
369 if (vmf
->pgoff
>= size
)
370 return VM_FAULT_SIGBUS
;
372 memset(&bh
, 0, sizeof(bh
));
373 block
= (sector_t
)vmf
->pgoff
<< (PAGE_SHIFT
- blkbits
);
374 bh
.b_size
= PAGE_SIZE
;
377 page
= find_get_page(mapping
, vmf
->pgoff
);
379 if (!lock_page_or_retry(page
, vma
->vm_mm
, vmf
->flags
)) {
380 page_cache_release(page
);
381 return VM_FAULT_RETRY
;
383 if (unlikely(page
->mapping
!= mapping
)) {
385 page_cache_release(page
);
388 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
389 if (unlikely(vmf
->pgoff
>= size
)) {
391 * We have a struct page covering a hole in the file
392 * from a read fault and we've raced with a truncate
399 error
= get_block(inode
, block
, &bh
, 0);
400 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
401 error
= -EIO
; /* fs corruption? */
405 if (!buffer_mapped(&bh
) && !buffer_unwritten(&bh
) && !vmf
->cow_page
) {
406 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
407 error
= get_block(inode
, block
, &bh
, 1);
408 count_vm_event(PGMAJFAULT
);
409 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
410 major
= VM_FAULT_MAJOR
;
411 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
416 return dax_load_hole(mapping
, page
, vmf
);
421 struct page
*new_page
= vmf
->cow_page
;
422 if (buffer_written(&bh
))
423 error
= copy_user_bh(new_page
, &bh
, blkbits
, vaddr
);
425 clear_user_highpage(new_page
, vaddr
);
430 i_mmap_lock_read(mapping
);
431 /* Check we didn't race with truncate */
432 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >>
434 if (vmf
->pgoff
>= size
) {
435 i_mmap_unlock_read(mapping
);
440 return VM_FAULT_LOCKED
;
443 /* Check we didn't race with a read fault installing a new page */
445 page
= find_lock_page(mapping
, vmf
->pgoff
);
448 unmap_mapping_range(mapping
, vmf
->pgoff
<< PAGE_SHIFT
,
450 delete_from_page_cache(page
);
452 page_cache_release(page
);
456 * If we successfully insert the new mapping over an unwritten extent,
457 * we need to ensure we convert the unwritten extent. If there is an
458 * error inserting the mapping, the filesystem needs to leave it as
459 * unwritten to prevent exposure of the stale underlying data to
460 * userspace, but we still need to call the completion function so
461 * the private resources on the mapping buffer can be released. We
462 * indicate what the callback should do via the uptodate variable, same
463 * as for normal BH based IO completions.
465 error
= dax_insert_mapping(inode
, &bh
, vma
, vmf
);
466 if (buffer_unwritten(&bh
)) {
467 if (complete_unwritten
)
468 complete_unwritten(&bh
, !error
);
470 WARN_ON_ONCE(!(vmf
->flags
& FAULT_FLAG_WRITE
));
474 if (error
== -ENOMEM
)
475 return VM_FAULT_OOM
| major
;
476 /* -EBUSY is fine, somebody else faulted on the same PTE */
477 if ((error
< 0) && (error
!= -EBUSY
))
478 return VM_FAULT_SIGBUS
| major
;
479 return VM_FAULT_NOPAGE
| major
;
484 page_cache_release(page
);
488 EXPORT_SYMBOL(__dax_fault
);
491 * dax_fault - handle a page fault on a DAX file
492 * @vma: The virtual memory area where the fault occurred
493 * @vmf: The description of the fault
494 * @get_block: The filesystem method used to translate file offsets to blocks
496 * When a page fault occurs, filesystems may call this helper in their
497 * fault handler for DAX files.
499 int dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
500 get_block_t get_block
, dax_iodone_t complete_unwritten
)
503 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
505 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
506 sb_start_pagefault(sb
);
507 file_update_time(vma
->vm_file
);
509 result
= __dax_fault(vma
, vmf
, get_block
, complete_unwritten
);
510 if (vmf
->flags
& FAULT_FLAG_WRITE
)
511 sb_end_pagefault(sb
);
515 EXPORT_SYMBOL_GPL(dax_fault
);
517 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
519 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
520 * more often than one might expect in the below function.
522 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
524 int __dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
525 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
,
526 dax_iodone_t complete_unwritten
)
528 struct file
*file
= vma
->vm_file
;
529 struct address_space
*mapping
= file
->f_mapping
;
530 struct inode
*inode
= mapping
->host
;
531 struct buffer_head bh
;
532 unsigned blkbits
= inode
->i_blkbits
;
533 unsigned long pmd_addr
= address
& PMD_MASK
;
534 bool write
= flags
& FAULT_FLAG_WRITE
;
538 sector_t block
, sector
;
542 /* Fall back to PTEs if we're going to COW */
543 if (write
&& !(vma
->vm_flags
& VM_SHARED
))
544 return VM_FAULT_FALLBACK
;
545 /* If the PMD would extend outside the VMA */
546 if (pmd_addr
< vma
->vm_start
)
547 return VM_FAULT_FALLBACK
;
548 if ((pmd_addr
+ PMD_SIZE
) > vma
->vm_end
)
549 return VM_FAULT_FALLBACK
;
551 pgoff
= linear_page_index(vma
, pmd_addr
);
552 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
554 return VM_FAULT_SIGBUS
;
555 /* If the PMD would cover blocks out of the file */
556 if ((pgoff
| PG_PMD_COLOUR
) >= size
)
557 return VM_FAULT_FALLBACK
;
559 memset(&bh
, 0, sizeof(bh
));
560 block
= (sector_t
)pgoff
<< (PAGE_SHIFT
- blkbits
);
562 bh
.b_size
= PMD_SIZE
;
563 length
= get_block(inode
, block
, &bh
, write
);
565 return VM_FAULT_SIGBUS
;
566 i_mmap_lock_read(mapping
);
569 * If the filesystem isn't willing to tell us the length of a hole,
570 * just fall back to PTEs. Calling get_block 512 times in a loop
573 if (!buffer_size_valid(&bh
) || bh
.b_size
< PMD_SIZE
)
577 * If we allocated new storage, make sure no process has any
578 * zero pages covering this hole
580 if (buffer_new(&bh
)) {
581 i_mmap_unlock_read(mapping
);
582 unmap_mapping_range(mapping
, pgoff
<< PAGE_SHIFT
, PMD_SIZE
, 0);
583 i_mmap_lock_read(mapping
);
587 * If a truncate happened while we were allocating blocks, we may
588 * leave blocks allocated to the file that are beyond EOF. We can't
589 * take i_mutex here, so just leave them hanging; they'll be freed
590 * when the file is deleted.
592 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
594 result
= VM_FAULT_SIGBUS
;
597 if ((pgoff
| PG_PMD_COLOUR
) >= size
)
600 if (!write
&& !buffer_mapped(&bh
) && buffer_uptodate(&bh
)) {
603 struct page
*zero_page
= get_huge_zero_page();
605 if (unlikely(!zero_page
))
608 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
609 if (!pmd_none(*pmd
)) {
614 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
615 entry
= pmd_mkhuge(entry
);
616 set_pmd_at(vma
->vm_mm
, pmd_addr
, pmd
, entry
);
617 result
= VM_FAULT_NOPAGE
;
620 sector
= bh
.b_blocknr
<< (blkbits
- 9);
621 length
= bdev_direct_access(bh
.b_bdev
, sector
, &kaddr
, &pfn
,
624 result
= VM_FAULT_SIGBUS
;
627 if ((length
< PMD_SIZE
) || (pfn
& PG_PMD_COLOUR
))
630 if (buffer_unwritten(&bh
) || buffer_new(&bh
)) {
632 for (i
= 0; i
< PTRS_PER_PMD
; i
++)
633 clear_pmem(kaddr
+ i
* PAGE_SIZE
, PAGE_SIZE
);
635 count_vm_event(PGMAJFAULT
);
636 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
637 result
|= VM_FAULT_MAJOR
;
640 result
|= vmf_insert_pfn_pmd(vma
, address
, pmd
, pfn
, write
);
644 i_mmap_unlock_read(mapping
);
646 if (buffer_unwritten(&bh
))
647 complete_unwritten(&bh
, !(result
& VM_FAULT_ERROR
));
652 count_vm_event(THP_FAULT_FALLBACK
);
653 result
= VM_FAULT_FALLBACK
;
656 EXPORT_SYMBOL_GPL(__dax_pmd_fault
);
659 * dax_pmd_fault - handle a PMD fault on a DAX file
660 * @vma: The virtual memory area where the fault occurred
661 * @vmf: The description of the fault
662 * @get_block: The filesystem method used to translate file offsets to blocks
664 * When a page fault occurs, filesystems may call this helper in their
665 * pmd_fault handler for DAX files.
667 int dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
668 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
,
669 dax_iodone_t complete_unwritten
)
672 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
674 if (flags
& FAULT_FLAG_WRITE
) {
675 sb_start_pagefault(sb
);
676 file_update_time(vma
->vm_file
);
678 result
= __dax_pmd_fault(vma
, address
, pmd
, flags
, get_block
,
680 if (flags
& FAULT_FLAG_WRITE
)
681 sb_end_pagefault(sb
);
685 EXPORT_SYMBOL_GPL(dax_pmd_fault
);
686 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
689 * dax_pfn_mkwrite - handle first write to DAX page
690 * @vma: The virtual memory area where the fault occurred
691 * @vmf: The description of the fault
694 int dax_pfn_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
696 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
698 sb_start_pagefault(sb
);
699 file_update_time(vma
->vm_file
);
700 sb_end_pagefault(sb
);
701 return VM_FAULT_NOPAGE
;
703 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite
);
706 * dax_zero_page_range - zero a range within a page of a DAX file
707 * @inode: The file being truncated
708 * @from: The file offset that is being truncated to
709 * @length: The number of bytes to zero
710 * @get_block: The filesystem method used to translate file offsets to blocks
712 * This function can be called by a filesystem when it is zeroing part of a
713 * page in a DAX file. This is intended for hole-punch operations. If
714 * you are truncating a file, the helper function dax_truncate_page() may be
717 * We work in terms of PAGE_CACHE_SIZE here for commonality with
718 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
719 * took care of disposing of the unnecessary blocks. Even if the filesystem
720 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
721 * since the file might be mmapped.
723 int dax_zero_page_range(struct inode
*inode
, loff_t from
, unsigned length
,
724 get_block_t get_block
)
726 struct buffer_head bh
;
727 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
728 unsigned offset
= from
& (PAGE_CACHE_SIZE
-1);
731 /* Block boundary? Nothing to do */
734 BUG_ON((offset
+ length
) > PAGE_CACHE_SIZE
);
736 memset(&bh
, 0, sizeof(bh
));
737 bh
.b_size
= PAGE_CACHE_SIZE
;
738 err
= get_block(inode
, index
, &bh
, 0);
741 if (buffer_written(&bh
)) {
743 err
= dax_get_addr(&bh
, &addr
, inode
->i_blkbits
);
746 clear_pmem(addr
+ offset
, length
);
752 EXPORT_SYMBOL_GPL(dax_zero_page_range
);
755 * dax_truncate_page - handle a partial page being truncated in a DAX file
756 * @inode: The file being truncated
757 * @from: The file offset that is being truncated to
758 * @get_block: The filesystem method used to translate file offsets to blocks
760 * Similar to block_truncate_page(), this function can be called by a
761 * filesystem when it is truncating a DAX file to handle the partial page.
763 * We work in terms of PAGE_CACHE_SIZE here for commonality with
764 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
765 * took care of disposing of the unnecessary blocks. Even if the filesystem
766 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
767 * since the file might be mmapped.
769 int dax_truncate_page(struct inode
*inode
, loff_t from
, get_block_t get_block
)
771 unsigned length
= PAGE_CACHE_ALIGN(from
) - from
;
772 return dax_zero_page_range(inode
, from
, length
, get_block
);
774 EXPORT_SYMBOL_GPL(dax_truncate_page
);