2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pmem.h>
28 #include <linux/sched.h>
29 #include <linux/uio.h>
30 #include <linux/vmstat.h>
31 #include <linux/pfn_t.h>
32 #include <linux/sizes.h>
34 static long dax_map_atomic(struct block_device
*bdev
, struct blk_dax_ctl
*dax
)
36 struct request_queue
*q
= bdev
->bd_queue
;
39 dax
->addr
= (void __pmem
*) ERR_PTR(-EIO
);
40 if (blk_queue_enter(q
, true) != 0)
43 rc
= bdev_direct_access(bdev
, dax
);
45 dax
->addr
= (void __pmem
*) ERR_PTR(rc
);
52 static void dax_unmap_atomic(struct block_device
*bdev
,
53 const struct blk_dax_ctl
*dax
)
55 if (IS_ERR(dax
->addr
))
57 blk_queue_exit(bdev
->bd_queue
);
61 * dax_clear_blocks() is called from within transaction context from XFS,
62 * and hence this means the stack from this point must follow GFP_NOFS
63 * semantics for all operations.
65 int dax_clear_blocks(struct inode
*inode
, sector_t block
, long _size
)
67 struct block_device
*bdev
= inode
->i_sb
->s_bdev
;
68 struct blk_dax_ctl dax
= {
69 .sector
= block
<< (inode
->i_blkbits
- 9),
77 count
= dax_map_atomic(bdev
, &dax
);
80 sz
= min_t(long, count
, SZ_128K
);
81 clear_pmem(dax
.addr
, sz
);
83 dax
.sector
+= sz
/ 512;
84 dax_unmap_atomic(bdev
, &dax
);
91 EXPORT_SYMBOL_GPL(dax_clear_blocks
);
93 /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
94 static void dax_new_buf(void __pmem
*addr
, unsigned size
, unsigned first
,
95 loff_t pos
, loff_t end
)
97 loff_t final
= end
- pos
+ first
; /* The final byte of the buffer */
100 clear_pmem(addr
, first
);
102 clear_pmem(addr
+ final
, size
- final
);
105 static bool buffer_written(struct buffer_head
*bh
)
107 return buffer_mapped(bh
) && !buffer_unwritten(bh
);
111 * When ext4 encounters a hole, it returns without modifying the buffer_head
112 * which means that we can't trust b_size. To cope with this, we set b_state
113 * to 0 before calling get_block and, if any bit is set, we know we can trust
114 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
115 * and would save us time calling get_block repeatedly.
117 static bool buffer_size_valid(struct buffer_head
*bh
)
119 return bh
->b_state
!= 0;
123 static sector_t
to_sector(const struct buffer_head
*bh
,
124 const struct inode
*inode
)
126 sector_t sector
= bh
->b_blocknr
<< (inode
->i_blkbits
- 9);
131 static ssize_t
dax_io(struct inode
*inode
, struct iov_iter
*iter
,
132 loff_t start
, loff_t end
, get_block_t get_block
,
133 struct buffer_head
*bh
)
135 loff_t pos
= start
, max
= start
, bh_max
= start
;
136 bool hole
= false, need_wmb
= false;
137 struct block_device
*bdev
= NULL
;
138 int rw
= iov_iter_rw(iter
), rc
;
140 struct blk_dax_ctl dax
= {
141 .addr
= (void __pmem
*) ERR_PTR(-EIO
),
145 end
= min(end
, i_size_read(inode
));
150 unsigned blkbits
= inode
->i_blkbits
;
151 long page
= pos
>> PAGE_SHIFT
;
152 sector_t block
= page
<< (PAGE_SHIFT
- blkbits
);
153 unsigned first
= pos
- (block
<< blkbits
);
157 bh
->b_size
= PAGE_ALIGN(end
- pos
);
159 rc
= get_block(inode
, block
, bh
, rw
== WRITE
);
162 if (!buffer_size_valid(bh
))
163 bh
->b_size
= 1 << blkbits
;
164 bh_max
= pos
- first
+ bh
->b_size
;
167 unsigned done
= bh
->b_size
-
168 (bh_max
- (pos
- first
));
169 bh
->b_blocknr
+= done
>> blkbits
;
173 hole
= rw
== READ
&& !buffer_written(bh
);
175 size
= bh
->b_size
- first
;
177 dax_unmap_atomic(bdev
, &dax
);
178 dax
.sector
= to_sector(bh
, inode
);
179 dax
.size
= bh
->b_size
;
180 map_len
= dax_map_atomic(bdev
, &dax
);
185 if (buffer_unwritten(bh
) || buffer_new(bh
)) {
186 dax_new_buf(dax
.addr
, map_len
, first
,
191 size
= map_len
- first
;
193 max
= min(pos
+ size
, end
);
196 if (iov_iter_rw(iter
) == WRITE
) {
197 len
= copy_from_iter_pmem(dax
.addr
, max
- pos
, iter
);
200 len
= copy_to_iter((void __force
*) dax
.addr
, max
- pos
,
203 len
= iov_iter_zero(max
- pos
, iter
);
211 if (!IS_ERR(dax
.addr
))
217 dax_unmap_atomic(bdev
, &dax
);
219 return (pos
== start
) ? rc
: pos
- start
;
223 * dax_do_io - Perform I/O to a DAX file
224 * @iocb: The control block for this I/O
225 * @inode: The file which the I/O is directed at
226 * @iter: The addresses to do I/O from or to
227 * @pos: The file offset where the I/O starts
228 * @get_block: The filesystem method used to translate file offsets to blocks
229 * @end_io: A filesystem callback for I/O completion
232 * This function uses the same locking scheme as do_blockdev_direct_IO:
233 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
234 * caller for writes. For reads, we take and release the i_mutex ourselves.
235 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
236 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
239 ssize_t
dax_do_io(struct kiocb
*iocb
, struct inode
*inode
,
240 struct iov_iter
*iter
, loff_t pos
, get_block_t get_block
,
241 dio_iodone_t end_io
, int flags
)
243 struct buffer_head bh
;
244 ssize_t retval
= -EINVAL
;
245 loff_t end
= pos
+ iov_iter_count(iter
);
247 memset(&bh
, 0, sizeof(bh
));
249 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
) {
250 struct address_space
*mapping
= inode
->i_mapping
;
251 mutex_lock(&inode
->i_mutex
);
252 retval
= filemap_write_and_wait_range(mapping
, pos
, end
- 1);
254 mutex_unlock(&inode
->i_mutex
);
259 /* Protects against truncate */
260 if (!(flags
& DIO_SKIP_DIO_COUNT
))
261 inode_dio_begin(inode
);
263 retval
= dax_io(inode
, iter
, pos
, end
, get_block
, &bh
);
265 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
)
266 mutex_unlock(&inode
->i_mutex
);
268 if ((retval
> 0) && end_io
)
269 end_io(iocb
, pos
, retval
, bh
.b_private
);
271 if (!(flags
& DIO_SKIP_DIO_COUNT
))
272 inode_dio_end(inode
);
276 EXPORT_SYMBOL_GPL(dax_do_io
);
279 * The user has performed a load from a hole in the file. Allocating
280 * a new page in the file would cause excessive storage usage for
281 * workloads with sparse files. We allocate a page cache page instead.
282 * We'll kick it out of the page cache if it's ever written to,
283 * otherwise it will simply fall out of the page cache under memory
284 * pressure without ever having been dirtied.
286 static int dax_load_hole(struct address_space
*mapping
, struct page
*page
,
287 struct vm_fault
*vmf
)
290 struct inode
*inode
= mapping
->host
;
292 page
= find_or_create_page(mapping
, vmf
->pgoff
,
293 GFP_KERNEL
| __GFP_ZERO
);
296 /* Recheck i_size under page lock to avoid truncate race */
297 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
298 if (vmf
->pgoff
>= size
) {
300 page_cache_release(page
);
301 return VM_FAULT_SIGBUS
;
305 return VM_FAULT_LOCKED
;
308 static int copy_user_bh(struct page
*to
, struct inode
*inode
,
309 struct buffer_head
*bh
, unsigned long vaddr
)
311 struct blk_dax_ctl dax
= {
312 .sector
= to_sector(bh
, inode
),
315 struct block_device
*bdev
= bh
->b_bdev
;
318 if (dax_map_atomic(bdev
, &dax
) < 0)
319 return PTR_ERR(dax
.addr
);
320 vto
= kmap_atomic(to
);
321 copy_user_page(vto
, (void __force
*)dax
.addr
, vaddr
, to
);
323 dax_unmap_atomic(bdev
, &dax
);
327 static int dax_insert_mapping(struct inode
*inode
, struct buffer_head
*bh
,
328 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
330 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
331 struct address_space
*mapping
= inode
->i_mapping
;
332 struct block_device
*bdev
= bh
->b_bdev
;
333 struct blk_dax_ctl dax
= {
334 .sector
= to_sector(bh
, inode
),
340 i_mmap_lock_read(mapping
);
343 * Check truncate didn't happen while we were allocating a block.
344 * If it did, this block may or may not be still allocated to the
345 * file. We can't tell the filesystem to free it because we can't
346 * take i_mutex here. In the worst case, the file still has blocks
347 * allocated past the end of the file.
349 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
350 if (unlikely(vmf
->pgoff
>= size
)) {
355 if (dax_map_atomic(bdev
, &dax
) < 0) {
356 error
= PTR_ERR(dax
.addr
);
360 if (buffer_unwritten(bh
) || buffer_new(bh
)) {
361 clear_pmem(dax
.addr
, PAGE_SIZE
);
364 dax_unmap_atomic(bdev
, &dax
);
366 error
= vm_insert_mixed(vma
, vaddr
, dax
.pfn
);
369 i_mmap_unlock_read(mapping
);
375 * __dax_fault - handle a page fault on a DAX file
376 * @vma: The virtual memory area where the fault occurred
377 * @vmf: The description of the fault
378 * @get_block: The filesystem method used to translate file offsets to blocks
379 * @complete_unwritten: The filesystem method used to convert unwritten blocks
380 * to written so the data written to them is exposed. This is required for
381 * required by write faults for filesystems that will return unwritten
382 * extent mappings from @get_block, but it is optional for reads as
383 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
384 * not support unwritten extents, the it should pass NULL.
386 * When a page fault occurs, filesystems may call this helper in their
387 * fault handler for DAX files. __dax_fault() assumes the caller has done all
388 * the necessary locking for the page fault to proceed successfully.
390 int __dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
391 get_block_t get_block
, dax_iodone_t complete_unwritten
)
393 struct file
*file
= vma
->vm_file
;
394 struct address_space
*mapping
= file
->f_mapping
;
395 struct inode
*inode
= mapping
->host
;
397 struct buffer_head bh
;
398 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
399 unsigned blkbits
= inode
->i_blkbits
;
405 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
406 if (vmf
->pgoff
>= size
)
407 return VM_FAULT_SIGBUS
;
409 memset(&bh
, 0, sizeof(bh
));
410 block
= (sector_t
)vmf
->pgoff
<< (PAGE_SHIFT
- blkbits
);
411 bh
.b_size
= PAGE_SIZE
;
414 page
= find_get_page(mapping
, vmf
->pgoff
);
416 if (!lock_page_or_retry(page
, vma
->vm_mm
, vmf
->flags
)) {
417 page_cache_release(page
);
418 return VM_FAULT_RETRY
;
420 if (unlikely(page
->mapping
!= mapping
)) {
422 page_cache_release(page
);
425 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
426 if (unlikely(vmf
->pgoff
>= size
)) {
428 * We have a struct page covering a hole in the file
429 * from a read fault and we've raced with a truncate
436 error
= get_block(inode
, block
, &bh
, 0);
437 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
438 error
= -EIO
; /* fs corruption? */
442 if (!buffer_mapped(&bh
) && !buffer_unwritten(&bh
) && !vmf
->cow_page
) {
443 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
444 error
= get_block(inode
, block
, &bh
, 1);
445 count_vm_event(PGMAJFAULT
);
446 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
447 major
= VM_FAULT_MAJOR
;
448 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
453 return dax_load_hole(mapping
, page
, vmf
);
458 struct page
*new_page
= vmf
->cow_page
;
459 if (buffer_written(&bh
))
460 error
= copy_user_bh(new_page
, inode
, &bh
, vaddr
);
462 clear_user_highpage(new_page
, vaddr
);
467 i_mmap_lock_read(mapping
);
468 /* Check we didn't race with truncate */
469 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >>
471 if (vmf
->pgoff
>= size
) {
472 i_mmap_unlock_read(mapping
);
477 return VM_FAULT_LOCKED
;
480 /* Check we didn't race with a read fault installing a new page */
482 page
= find_lock_page(mapping
, vmf
->pgoff
);
485 unmap_mapping_range(mapping
, vmf
->pgoff
<< PAGE_SHIFT
,
487 delete_from_page_cache(page
);
489 page_cache_release(page
);
493 * If we successfully insert the new mapping over an unwritten extent,
494 * we need to ensure we convert the unwritten extent. If there is an
495 * error inserting the mapping, the filesystem needs to leave it as
496 * unwritten to prevent exposure of the stale underlying data to
497 * userspace, but we still need to call the completion function so
498 * the private resources on the mapping buffer can be released. We
499 * indicate what the callback should do via the uptodate variable, same
500 * as for normal BH based IO completions.
502 error
= dax_insert_mapping(inode
, &bh
, vma
, vmf
);
503 if (buffer_unwritten(&bh
)) {
504 if (complete_unwritten
)
505 complete_unwritten(&bh
, !error
);
507 WARN_ON_ONCE(!(vmf
->flags
& FAULT_FLAG_WRITE
));
511 if (error
== -ENOMEM
)
512 return VM_FAULT_OOM
| major
;
513 /* -EBUSY is fine, somebody else faulted on the same PTE */
514 if ((error
< 0) && (error
!= -EBUSY
))
515 return VM_FAULT_SIGBUS
| major
;
516 return VM_FAULT_NOPAGE
| major
;
521 page_cache_release(page
);
525 EXPORT_SYMBOL(__dax_fault
);
528 * dax_fault - handle a page fault on a DAX file
529 * @vma: The virtual memory area where the fault occurred
530 * @vmf: The description of the fault
531 * @get_block: The filesystem method used to translate file offsets to blocks
533 * When a page fault occurs, filesystems may call this helper in their
534 * fault handler for DAX files.
536 int dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
537 get_block_t get_block
, dax_iodone_t complete_unwritten
)
540 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
542 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
543 sb_start_pagefault(sb
);
544 file_update_time(vma
->vm_file
);
546 result
= __dax_fault(vma
, vmf
, get_block
, complete_unwritten
);
547 if (vmf
->flags
& FAULT_FLAG_WRITE
)
548 sb_end_pagefault(sb
);
552 EXPORT_SYMBOL_GPL(dax_fault
);
554 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
556 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
557 * more often than one might expect in the below function.
559 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
561 static void __dax_dbg(struct buffer_head
*bh
, unsigned long address
,
562 const char *reason
, const char *fn
)
565 char bname
[BDEVNAME_SIZE
];
566 bdevname(bh
->b_bdev
, bname
);
567 pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
568 "length %zd fallback: %s\n", fn
, current
->comm
,
569 address
, bname
, bh
->b_state
, (u64
)bh
->b_blocknr
,
572 pr_debug("%s: %s addr: %lx fallback: %s\n", fn
,
573 current
->comm
, address
, reason
);
577 #define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
579 int __dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
580 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
,
581 dax_iodone_t complete_unwritten
)
583 struct file
*file
= vma
->vm_file
;
584 struct address_space
*mapping
= file
->f_mapping
;
585 struct inode
*inode
= mapping
->host
;
586 struct buffer_head bh
;
587 unsigned blkbits
= inode
->i_blkbits
;
588 unsigned long pmd_addr
= address
& PMD_MASK
;
589 bool write
= flags
& FAULT_FLAG_WRITE
;
590 struct block_device
*bdev
;
595 /* dax pmd mappings require pfn_t_devmap() */
596 if (!IS_ENABLED(CONFIG_FS_DAX_PMD
))
597 return VM_FAULT_FALLBACK
;
599 /* Fall back to PTEs if we're going to COW */
600 if (write
&& !(vma
->vm_flags
& VM_SHARED
)) {
601 split_huge_pmd(vma
, pmd
, address
);
602 dax_pmd_dbg(NULL
, address
, "cow write");
603 return VM_FAULT_FALLBACK
;
605 /* If the PMD would extend outside the VMA */
606 if (pmd_addr
< vma
->vm_start
) {
607 dax_pmd_dbg(NULL
, address
, "vma start unaligned");
608 return VM_FAULT_FALLBACK
;
610 if ((pmd_addr
+ PMD_SIZE
) > vma
->vm_end
) {
611 dax_pmd_dbg(NULL
, address
, "vma end unaligned");
612 return VM_FAULT_FALLBACK
;
615 pgoff
= linear_page_index(vma
, pmd_addr
);
616 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
618 return VM_FAULT_SIGBUS
;
619 /* If the PMD would cover blocks out of the file */
620 if ((pgoff
| PG_PMD_COLOUR
) >= size
) {
621 dax_pmd_dbg(NULL
, address
,
622 "offset + huge page size > file size");
623 return VM_FAULT_FALLBACK
;
626 memset(&bh
, 0, sizeof(bh
));
627 block
= (sector_t
)pgoff
<< (PAGE_SHIFT
- blkbits
);
629 bh
.b_size
= PMD_SIZE
;
630 if (get_block(inode
, block
, &bh
, write
) != 0)
631 return VM_FAULT_SIGBUS
;
633 i_mmap_lock_read(mapping
);
636 * If the filesystem isn't willing to tell us the length of a hole,
637 * just fall back to PTEs. Calling get_block 512 times in a loop
640 if (!buffer_size_valid(&bh
) || bh
.b_size
< PMD_SIZE
) {
641 dax_pmd_dbg(&bh
, address
, "allocated block too small");
646 * If we allocated new storage, make sure no process has any
647 * zero pages covering this hole
649 if (buffer_new(&bh
)) {
650 i_mmap_unlock_read(mapping
);
651 unmap_mapping_range(mapping
, pgoff
<< PAGE_SHIFT
, PMD_SIZE
, 0);
652 i_mmap_lock_read(mapping
);
656 * If a truncate happened while we were allocating blocks, we may
657 * leave blocks allocated to the file that are beyond EOF. We can't
658 * take i_mutex here, so just leave them hanging; they'll be freed
659 * when the file is deleted.
661 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
663 result
= VM_FAULT_SIGBUS
;
666 if ((pgoff
| PG_PMD_COLOUR
) >= size
) {
667 dax_pmd_dbg(&bh
, address
, "pgoff unaligned");
671 if (!write
&& !buffer_mapped(&bh
) && buffer_uptodate(&bh
)) {
674 struct page
*zero_page
= get_huge_zero_page();
676 if (unlikely(!zero_page
)) {
677 dax_pmd_dbg(&bh
, address
, "no zero page");
681 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
682 if (!pmd_none(*pmd
)) {
684 dax_pmd_dbg(&bh
, address
, "pmd already present");
688 dev_dbg(part_to_dev(bdev
->bd_part
),
689 "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
690 __func__
, current
->comm
, address
,
691 (unsigned long long) to_sector(&bh
, inode
));
693 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
694 entry
= pmd_mkhuge(entry
);
695 set_pmd_at(vma
->vm_mm
, pmd_addr
, pmd
, entry
);
696 result
= VM_FAULT_NOPAGE
;
699 struct blk_dax_ctl dax
= {
700 .sector
= to_sector(&bh
, inode
),
703 long length
= dax_map_atomic(bdev
, &dax
);
706 result
= VM_FAULT_SIGBUS
;
709 if (length
< PMD_SIZE
) {
710 dax_pmd_dbg(&bh
, address
, "dax-length too small");
711 dax_unmap_atomic(bdev
, &dax
);
714 if (pfn_t_to_pfn(dax
.pfn
) & PG_PMD_COLOUR
) {
715 dax_pmd_dbg(&bh
, address
, "pfn unaligned");
716 dax_unmap_atomic(bdev
, &dax
);
720 if (!pfn_t_devmap(dax
.pfn
)) {
721 dax_unmap_atomic(bdev
, &dax
);
722 dax_pmd_dbg(&bh
, address
, "pfn not in memmap");
726 if (buffer_unwritten(&bh
) || buffer_new(&bh
)) {
727 clear_pmem(dax
.addr
, PMD_SIZE
);
729 count_vm_event(PGMAJFAULT
);
730 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
731 result
|= VM_FAULT_MAJOR
;
733 dax_unmap_atomic(bdev
, &dax
);
735 dev_dbg(part_to_dev(bdev
->bd_part
),
736 "%s: %s addr: %lx pfn: %lx sect: %llx\n",
737 __func__
, current
->comm
, address
,
738 pfn_t_to_pfn(dax
.pfn
),
739 (unsigned long long) dax
.sector
);
740 result
|= vmf_insert_pfn_pmd(vma
, address
, pmd
,
745 i_mmap_unlock_read(mapping
);
747 if (buffer_unwritten(&bh
))
748 complete_unwritten(&bh
, !(result
& VM_FAULT_ERROR
));
753 count_vm_event(THP_FAULT_FALLBACK
);
754 result
= VM_FAULT_FALLBACK
;
757 EXPORT_SYMBOL_GPL(__dax_pmd_fault
);
760 * dax_pmd_fault - handle a PMD fault on a DAX file
761 * @vma: The virtual memory area where the fault occurred
762 * @vmf: The description of the fault
763 * @get_block: The filesystem method used to translate file offsets to blocks
765 * When a page fault occurs, filesystems may call this helper in their
766 * pmd_fault handler for DAX files.
768 int dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
769 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
,
770 dax_iodone_t complete_unwritten
)
773 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
775 if (flags
& FAULT_FLAG_WRITE
) {
776 sb_start_pagefault(sb
);
777 file_update_time(vma
->vm_file
);
779 result
= __dax_pmd_fault(vma
, address
, pmd
, flags
, get_block
,
781 if (flags
& FAULT_FLAG_WRITE
)
782 sb_end_pagefault(sb
);
786 EXPORT_SYMBOL_GPL(dax_pmd_fault
);
787 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
790 * dax_pfn_mkwrite - handle first write to DAX page
791 * @vma: The virtual memory area where the fault occurred
792 * @vmf: The description of the fault
795 int dax_pfn_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
797 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
799 sb_start_pagefault(sb
);
800 file_update_time(vma
->vm_file
);
801 sb_end_pagefault(sb
);
802 return VM_FAULT_NOPAGE
;
804 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite
);
807 * dax_zero_page_range - zero a range within a page of a DAX file
808 * @inode: The file being truncated
809 * @from: The file offset that is being truncated to
810 * @length: The number of bytes to zero
811 * @get_block: The filesystem method used to translate file offsets to blocks
813 * This function can be called by a filesystem when it is zeroing part of a
814 * page in a DAX file. This is intended for hole-punch operations. If
815 * you are truncating a file, the helper function dax_truncate_page() may be
818 * We work in terms of PAGE_CACHE_SIZE here for commonality with
819 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
820 * took care of disposing of the unnecessary blocks. Even if the filesystem
821 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
822 * since the file might be mmapped.
824 int dax_zero_page_range(struct inode
*inode
, loff_t from
, unsigned length
,
825 get_block_t get_block
)
827 struct buffer_head bh
;
828 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
829 unsigned offset
= from
& (PAGE_CACHE_SIZE
-1);
832 /* Block boundary? Nothing to do */
835 BUG_ON((offset
+ length
) > PAGE_CACHE_SIZE
);
837 memset(&bh
, 0, sizeof(bh
));
838 bh
.b_size
= PAGE_CACHE_SIZE
;
839 err
= get_block(inode
, index
, &bh
, 0);
842 if (buffer_written(&bh
)) {
843 struct block_device
*bdev
= bh
.b_bdev
;
844 struct blk_dax_ctl dax
= {
845 .sector
= to_sector(&bh
, inode
),
846 .size
= PAGE_CACHE_SIZE
,
849 if (dax_map_atomic(bdev
, &dax
) < 0)
850 return PTR_ERR(dax
.addr
);
851 clear_pmem(dax
.addr
+ offset
, length
);
853 dax_unmap_atomic(bdev
, &dax
);
858 EXPORT_SYMBOL_GPL(dax_zero_page_range
);
861 * dax_truncate_page - handle a partial page being truncated in a DAX file
862 * @inode: The file being truncated
863 * @from: The file offset that is being truncated to
864 * @get_block: The filesystem method used to translate file offsets to blocks
866 * Similar to block_truncate_page(), this function can be called by a
867 * filesystem when it is truncating a DAX file to handle the partial page.
869 * We work in terms of PAGE_CACHE_SIZE here for commonality with
870 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
871 * took care of disposing of the unnecessary blocks. Even if the filesystem
872 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
873 * since the file might be mmapped.
875 int dax_truncate_page(struct inode
*inode
, loff_t from
, get_block_t get_block
)
877 unsigned length
= PAGE_CACHE_ALIGN(from
) - from
;
878 return dax_zero_page_range(inode
, from
, length
, get_block
);
880 EXPORT_SYMBOL_GPL(dax_truncate_page
);