2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pmem.h>
28 #include <linux/sched.h>
29 #include <linux/uio.h>
30 #include <linux/vmstat.h>
32 int dax_clear_blocks(struct inode
*inode
, sector_t block
, long size
)
34 struct block_device
*bdev
= inode
->i_sb
->s_bdev
;
35 sector_t sector
= block
<< (inode
->i_blkbits
- 9);
43 count
= bdev_direct_access(bdev
, sector
, &addr
, &pfn
, size
);
48 unsigned pgsz
= PAGE_SIZE
- offset_in_page(addr
);
51 clear_pmem(addr
, pgsz
);
64 EXPORT_SYMBOL_GPL(dax_clear_blocks
);
66 static long dax_get_addr(struct buffer_head
*bh
, void __pmem
**addr
,
70 sector_t sector
= bh
->b_blocknr
<< (blkbits
- 9);
71 return bdev_direct_access(bh
->b_bdev
, sector
, addr
, &pfn
, bh
->b_size
);
74 /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
75 static void dax_new_buf(void __pmem
*addr
, unsigned size
, unsigned first
,
76 loff_t pos
, loff_t end
)
78 loff_t final
= end
- pos
+ first
; /* The final byte of the buffer */
81 clear_pmem(addr
, first
);
83 clear_pmem(addr
+ final
, size
- final
);
86 static bool buffer_written(struct buffer_head
*bh
)
88 return buffer_mapped(bh
) && !buffer_unwritten(bh
);
92 * When ext4 encounters a hole, it returns without modifying the buffer_head
93 * which means that we can't trust b_size. To cope with this, we set b_state
94 * to 0 before calling get_block and, if any bit is set, we know we can trust
95 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
96 * and would save us time calling get_block repeatedly.
98 static bool buffer_size_valid(struct buffer_head
*bh
)
100 return bh
->b_state
!= 0;
103 static ssize_t
dax_io(struct inode
*inode
, struct iov_iter
*iter
,
104 loff_t start
, loff_t end
, get_block_t get_block
,
105 struct buffer_head
*bh
)
110 loff_t bh_max
= start
;
113 bool need_wmb
= false;
115 if (iov_iter_rw(iter
) != WRITE
)
116 end
= min(end
, i_size_read(inode
));
121 unsigned blkbits
= inode
->i_blkbits
;
122 sector_t block
= pos
>> blkbits
;
123 unsigned first
= pos
- (block
<< blkbits
);
127 bh
->b_size
= PAGE_ALIGN(end
- pos
);
129 retval
= get_block(inode
, block
, bh
,
130 iov_iter_rw(iter
) == WRITE
);
133 if (!buffer_size_valid(bh
))
134 bh
->b_size
= 1 << blkbits
;
135 bh_max
= pos
- first
+ bh
->b_size
;
137 unsigned done
= bh
->b_size
-
138 (bh_max
- (pos
- first
));
139 bh
->b_blocknr
+= done
>> blkbits
;
143 hole
= iov_iter_rw(iter
) != WRITE
&& !buffer_written(bh
);
146 size
= bh
->b_size
- first
;
148 retval
= dax_get_addr(bh
, &addr
, blkbits
);
151 if (buffer_unwritten(bh
) || buffer_new(bh
)) {
152 dax_new_buf(addr
, retval
, first
, pos
,
157 size
= retval
- first
;
159 max
= min(pos
+ size
, end
);
162 if (iov_iter_rw(iter
) == WRITE
) {
163 len
= copy_from_iter_pmem(addr
, max
- pos
, iter
);
166 len
= copy_to_iter((void __force
*)addr
, max
- pos
,
169 len
= iov_iter_zero(max
- pos
, iter
);
181 return (pos
== start
) ? retval
: pos
- start
;
185 * dax_do_io - Perform I/O to a DAX file
186 * @iocb: The control block for this I/O
187 * @inode: The file which the I/O is directed at
188 * @iter: The addresses to do I/O from or to
189 * @pos: The file offset where the I/O starts
190 * @get_block: The filesystem method used to translate file offsets to blocks
191 * @end_io: A filesystem callback for I/O completion
194 * This function uses the same locking scheme as do_blockdev_direct_IO:
195 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
196 * caller for writes. For reads, we take and release the i_mutex ourselves.
197 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
198 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
201 ssize_t
dax_do_io(struct kiocb
*iocb
, struct inode
*inode
,
202 struct iov_iter
*iter
, loff_t pos
, get_block_t get_block
,
203 dio_iodone_t end_io
, int flags
)
205 struct buffer_head bh
;
206 ssize_t retval
= -EINVAL
;
207 loff_t end
= pos
+ iov_iter_count(iter
);
209 memset(&bh
, 0, sizeof(bh
));
211 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
) {
212 struct address_space
*mapping
= inode
->i_mapping
;
213 mutex_lock(&inode
->i_mutex
);
214 retval
= filemap_write_and_wait_range(mapping
, pos
, end
- 1);
216 mutex_unlock(&inode
->i_mutex
);
221 /* Protects against truncate */
222 if (!(flags
& DIO_SKIP_DIO_COUNT
))
223 inode_dio_begin(inode
);
225 retval
= dax_io(inode
, iter
, pos
, end
, get_block
, &bh
);
227 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
)
228 mutex_unlock(&inode
->i_mutex
);
230 if ((retval
> 0) && end_io
)
231 end_io(iocb
, pos
, retval
, bh
.b_private
);
233 if (!(flags
& DIO_SKIP_DIO_COUNT
))
234 inode_dio_end(inode
);
238 EXPORT_SYMBOL_GPL(dax_do_io
);
241 * The user has performed a load from a hole in the file. Allocating
242 * a new page in the file would cause excessive storage usage for
243 * workloads with sparse files. We allocate a page cache page instead.
244 * We'll kick it out of the page cache if it's ever written to,
245 * otherwise it will simply fall out of the page cache under memory
246 * pressure without ever having been dirtied.
248 static int dax_load_hole(struct address_space
*mapping
, struct page
*page
,
249 struct vm_fault
*vmf
)
252 struct inode
*inode
= mapping
->host
;
254 page
= find_or_create_page(mapping
, vmf
->pgoff
,
255 GFP_KERNEL
| __GFP_ZERO
);
258 /* Recheck i_size under page lock to avoid truncate race */
259 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
260 if (vmf
->pgoff
>= size
) {
262 page_cache_release(page
);
263 return VM_FAULT_SIGBUS
;
267 return VM_FAULT_LOCKED
;
270 static int copy_user_bh(struct page
*to
, struct buffer_head
*bh
,
271 unsigned blkbits
, unsigned long vaddr
)
276 if (dax_get_addr(bh
, &vfrom
, blkbits
) < 0)
278 vto
= kmap_atomic(to
);
279 copy_user_page(vto
, (void __force
*)vfrom
, vaddr
, to
);
284 static int dax_insert_mapping(struct inode
*inode
, struct buffer_head
*bh
,
285 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
287 sector_t sector
= bh
->b_blocknr
<< (inode
->i_blkbits
- 9);
288 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
295 * Check truncate didn't happen while we were allocating a block.
296 * If it did, this block may or may not be still allocated to the
297 * file. We can't tell the filesystem to free it because we can't
298 * take i_mutex here. In the worst case, the file still has blocks
299 * allocated past the end of the file.
301 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
302 if (unlikely(vmf
->pgoff
>= size
)) {
307 error
= bdev_direct_access(bh
->b_bdev
, sector
, &addr
, &pfn
, bh
->b_size
);
310 if (error
< PAGE_SIZE
) {
315 if (buffer_unwritten(bh
) || buffer_new(bh
)) {
316 clear_pmem(addr
, PAGE_SIZE
);
320 error
= vm_insert_mixed(vma
, vaddr
, pfn
);
327 * __dax_fault - handle a page fault on a DAX file
328 * @vma: The virtual memory area where the fault occurred
329 * @vmf: The description of the fault
330 * @get_block: The filesystem method used to translate file offsets to blocks
331 * @complete_unwritten: The filesystem method used to convert unwritten blocks
332 * to written so the data written to them is exposed. This is required for
333 * required by write faults for filesystems that will return unwritten
334 * extent mappings from @get_block, but it is optional for reads as
335 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
336 * not support unwritten extents, the it should pass NULL.
338 * When a page fault occurs, filesystems may call this helper in their
339 * fault handler for DAX files. __dax_fault() assumes the caller has done all
340 * the necessary locking for the page fault to proceed successfully.
342 int __dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
343 get_block_t get_block
, dax_iodone_t complete_unwritten
)
345 struct file
*file
= vma
->vm_file
;
346 struct address_space
*mapping
= file
->f_mapping
;
347 struct inode
*inode
= mapping
->host
;
349 struct buffer_head bh
;
350 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
351 unsigned blkbits
= inode
->i_blkbits
;
357 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
358 if (vmf
->pgoff
>= size
)
359 return VM_FAULT_SIGBUS
;
361 memset(&bh
, 0, sizeof(bh
));
362 block
= (sector_t
)vmf
->pgoff
<< (PAGE_SHIFT
- blkbits
);
363 bh
.b_size
= PAGE_SIZE
;
366 page
= find_get_page(mapping
, vmf
->pgoff
);
368 if (!lock_page_or_retry(page
, vma
->vm_mm
, vmf
->flags
)) {
369 page_cache_release(page
);
370 return VM_FAULT_RETRY
;
372 if (unlikely(page
->mapping
!= mapping
)) {
374 page_cache_release(page
);
377 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
378 if (unlikely(vmf
->pgoff
>= size
)) {
380 * We have a struct page covering a hole in the file
381 * from a read fault and we've raced with a truncate
387 i_mmap_lock_write(mapping
);
390 error
= get_block(inode
, block
, &bh
, 0);
391 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
392 error
= -EIO
; /* fs corruption? */
396 if (!buffer_mapped(&bh
) && !buffer_unwritten(&bh
) && !vmf
->cow_page
) {
397 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
398 error
= get_block(inode
, block
, &bh
, 1);
399 count_vm_event(PGMAJFAULT
);
400 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
401 major
= VM_FAULT_MAJOR
;
402 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
407 i_mmap_unlock_write(mapping
);
408 return dax_load_hole(mapping
, page
, vmf
);
413 struct page
*new_page
= vmf
->cow_page
;
414 if (buffer_written(&bh
))
415 error
= copy_user_bh(new_page
, &bh
, blkbits
, vaddr
);
417 clear_user_highpage(new_page
, vaddr
);
422 /* Check we didn't race with truncate */
423 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >>
425 if (vmf
->pgoff
>= size
) {
430 return VM_FAULT_LOCKED
;
433 /* Check we didn't race with a read fault installing a new page */
435 page
= find_lock_page(mapping
, vmf
->pgoff
);
438 unmap_mapping_range(mapping
, vmf
->pgoff
<< PAGE_SHIFT
,
440 delete_from_page_cache(page
);
442 page_cache_release(page
);
446 * If we successfully insert the new mapping over an unwritten extent,
447 * we need to ensure we convert the unwritten extent. If there is an
448 * error inserting the mapping, the filesystem needs to leave it as
449 * unwritten to prevent exposure of the stale underlying data to
450 * userspace, but we still need to call the completion function so
451 * the private resources on the mapping buffer can be released. We
452 * indicate what the callback should do via the uptodate variable, same
453 * as for normal BH based IO completions.
455 error
= dax_insert_mapping(inode
, &bh
, vma
, vmf
);
456 if (buffer_unwritten(&bh
)) {
457 if (complete_unwritten
)
458 complete_unwritten(&bh
, !error
);
460 WARN_ON_ONCE(!(vmf
->flags
& FAULT_FLAG_WRITE
));
464 i_mmap_unlock_write(mapping
);
466 if (error
== -ENOMEM
)
467 return VM_FAULT_OOM
| major
;
468 /* -EBUSY is fine, somebody else faulted on the same PTE */
469 if ((error
< 0) && (error
!= -EBUSY
))
470 return VM_FAULT_SIGBUS
| major
;
471 return VM_FAULT_NOPAGE
| major
;
476 page_cache_release(page
);
478 i_mmap_unlock_write(mapping
);
483 EXPORT_SYMBOL(__dax_fault
);
486 * dax_fault - handle a page fault on a DAX file
487 * @vma: The virtual memory area where the fault occurred
488 * @vmf: The description of the fault
489 * @get_block: The filesystem method used to translate file offsets to blocks
491 * When a page fault occurs, filesystems may call this helper in their
492 * fault handler for DAX files.
494 int dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
495 get_block_t get_block
, dax_iodone_t complete_unwritten
)
498 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
500 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
501 sb_start_pagefault(sb
);
502 file_update_time(vma
->vm_file
);
504 result
= __dax_fault(vma
, vmf
, get_block
, complete_unwritten
);
505 if (vmf
->flags
& FAULT_FLAG_WRITE
)
506 sb_end_pagefault(sb
);
510 EXPORT_SYMBOL_GPL(dax_fault
);
512 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
514 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
515 * more often than one might expect in the below function.
517 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
519 int __dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
520 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
,
521 dax_iodone_t complete_unwritten
)
523 struct file
*file
= vma
->vm_file
;
524 struct address_space
*mapping
= file
->f_mapping
;
525 struct inode
*inode
= mapping
->host
;
526 struct buffer_head bh
;
527 unsigned blkbits
= inode
->i_blkbits
;
528 unsigned long pmd_addr
= address
& PMD_MASK
;
529 bool write
= flags
& FAULT_FLAG_WRITE
;
533 sector_t block
, sector
;
537 /* Fall back to PTEs if we're going to COW */
538 if (write
&& !(vma
->vm_flags
& VM_SHARED
))
539 return VM_FAULT_FALLBACK
;
540 /* If the PMD would extend outside the VMA */
541 if (pmd_addr
< vma
->vm_start
)
542 return VM_FAULT_FALLBACK
;
543 if ((pmd_addr
+ PMD_SIZE
) > vma
->vm_end
)
544 return VM_FAULT_FALLBACK
;
546 pgoff
= linear_page_index(vma
, pmd_addr
);
547 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
549 return VM_FAULT_SIGBUS
;
550 /* If the PMD would cover blocks out of the file */
551 if ((pgoff
| PG_PMD_COLOUR
) >= size
)
552 return VM_FAULT_FALLBACK
;
554 memset(&bh
, 0, sizeof(bh
));
555 block
= (sector_t
)pgoff
<< (PAGE_SHIFT
- blkbits
);
557 bh
.b_size
= PMD_SIZE
;
558 i_mmap_lock_write(mapping
);
559 length
= get_block(inode
, block
, &bh
, write
);
561 return VM_FAULT_SIGBUS
;
564 * If the filesystem isn't willing to tell us the length of a hole,
565 * just fall back to PTEs. Calling get_block 512 times in a loop
568 if (!buffer_size_valid(&bh
) || bh
.b_size
< PMD_SIZE
)
571 if (buffer_unwritten(&bh
) || buffer_new(&bh
)) {
573 for (i
= 0; i
< PTRS_PER_PMD
; i
++)
574 clear_pmem(kaddr
+ i
* PAGE_SIZE
, PAGE_SIZE
);
576 count_vm_event(PGMAJFAULT
);
577 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
578 result
|= VM_FAULT_MAJOR
;
582 * If we allocated new storage, make sure no process has any
583 * zero pages covering this hole
585 if (buffer_new(&bh
)) {
586 i_mmap_unlock_write(mapping
);
587 unmap_mapping_range(mapping
, pgoff
<< PAGE_SHIFT
, PMD_SIZE
, 0);
588 i_mmap_lock_write(mapping
);
592 * If a truncate happened while we were allocating blocks, we may
593 * leave blocks allocated to the file that are beyond EOF. We can't
594 * take i_mutex here, so just leave them hanging; they'll be freed
595 * when the file is deleted.
597 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
599 result
= VM_FAULT_SIGBUS
;
602 if ((pgoff
| PG_PMD_COLOUR
) >= size
)
605 if (!write
&& !buffer_mapped(&bh
) && buffer_uptodate(&bh
)) {
608 struct page
*zero_page
= get_huge_zero_page();
610 if (unlikely(!zero_page
))
613 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
614 if (!pmd_none(*pmd
)) {
619 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
620 entry
= pmd_mkhuge(entry
);
621 set_pmd_at(vma
->vm_mm
, pmd_addr
, pmd
, entry
);
622 result
= VM_FAULT_NOPAGE
;
625 sector
= bh
.b_blocknr
<< (blkbits
- 9);
626 length
= bdev_direct_access(bh
.b_bdev
, sector
, &kaddr
, &pfn
,
629 result
= VM_FAULT_SIGBUS
;
632 if ((length
< PMD_SIZE
) || (pfn
& PG_PMD_COLOUR
))
635 result
|= vmf_insert_pfn_pmd(vma
, address
, pmd
, pfn
, write
);
639 if (buffer_unwritten(&bh
))
640 complete_unwritten(&bh
, !(result
& VM_FAULT_ERROR
));
642 i_mmap_unlock_write(mapping
);
647 count_vm_event(THP_FAULT_FALLBACK
);
648 result
= VM_FAULT_FALLBACK
;
651 EXPORT_SYMBOL_GPL(__dax_pmd_fault
);
654 * dax_pmd_fault - handle a PMD fault on a DAX file
655 * @vma: The virtual memory area where the fault occurred
656 * @vmf: The description of the fault
657 * @get_block: The filesystem method used to translate file offsets to blocks
659 * When a page fault occurs, filesystems may call this helper in their
660 * pmd_fault handler for DAX files.
662 int dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
663 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
,
664 dax_iodone_t complete_unwritten
)
667 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
669 if (flags
& FAULT_FLAG_WRITE
) {
670 sb_start_pagefault(sb
);
671 file_update_time(vma
->vm_file
);
673 result
= __dax_pmd_fault(vma
, address
, pmd
, flags
, get_block
,
675 if (flags
& FAULT_FLAG_WRITE
)
676 sb_end_pagefault(sb
);
680 EXPORT_SYMBOL_GPL(dax_pmd_fault
);
681 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
684 * dax_pfn_mkwrite - handle first write to DAX page
685 * @vma: The virtual memory area where the fault occurred
686 * @vmf: The description of the fault
689 int dax_pfn_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
691 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
693 sb_start_pagefault(sb
);
694 file_update_time(vma
->vm_file
);
695 sb_end_pagefault(sb
);
696 return VM_FAULT_NOPAGE
;
698 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite
);
701 * dax_zero_page_range - zero a range within a page of a DAX file
702 * @inode: The file being truncated
703 * @from: The file offset that is being truncated to
704 * @length: The number of bytes to zero
705 * @get_block: The filesystem method used to translate file offsets to blocks
707 * This function can be called by a filesystem when it is zeroing part of a
708 * page in a DAX file. This is intended for hole-punch operations. If
709 * you are truncating a file, the helper function dax_truncate_page() may be
712 * We work in terms of PAGE_CACHE_SIZE here for commonality with
713 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
714 * took care of disposing of the unnecessary blocks. Even if the filesystem
715 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
716 * since the file might be mmapped.
718 int dax_zero_page_range(struct inode
*inode
, loff_t from
, unsigned length
,
719 get_block_t get_block
)
721 struct buffer_head bh
;
722 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
723 unsigned offset
= from
& (PAGE_CACHE_SIZE
-1);
726 /* Block boundary? Nothing to do */
729 BUG_ON((offset
+ length
) > PAGE_CACHE_SIZE
);
731 memset(&bh
, 0, sizeof(bh
));
732 bh
.b_size
= PAGE_CACHE_SIZE
;
733 err
= get_block(inode
, index
, &bh
, 0);
736 if (buffer_written(&bh
)) {
738 err
= dax_get_addr(&bh
, &addr
, inode
->i_blkbits
);
741 clear_pmem(addr
+ offset
, length
);
747 EXPORT_SYMBOL_GPL(dax_zero_page_range
);
750 * dax_truncate_page - handle a partial page being truncated in a DAX file
751 * @inode: The file being truncated
752 * @from: The file offset that is being truncated to
753 * @get_block: The filesystem method used to translate file offsets to blocks
755 * Similar to block_truncate_page(), this function can be called by a
756 * filesystem when it is truncating a DAX file to handle the partial page.
758 * We work in terms of PAGE_CACHE_SIZE here for commonality with
759 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
760 * took care of disposing of the unnecessary blocks. Even if the filesystem
761 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
762 * since the file might be mmapped.
764 int dax_truncate_page(struct inode
*inode
, loff_t from
, get_block_t get_block
)
766 unsigned length
= PAGE_CACHE_ALIGN(from
) - from
;
767 return dax_zero_page_range(inode
, from
, length
, get_block
);
769 EXPORT_SYMBOL_GPL(dax_truncate_page
);