2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
36 * We use lowest available bit in exceptional entry for locking, other two
37 * bits to determine entry type. In total 3 special bits.
39 #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)
40 #define RADIX_DAX_PTE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
41 #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
42 #define RADIX_DAX_TYPE_MASK (RADIX_DAX_PTE | RADIX_DAX_PMD)
43 #define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_TYPE_MASK)
44 #define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
45 #define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
46 RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE) | \
47 RADIX_TREE_EXCEPTIONAL_ENTRY))
49 /* We choose 4096 entries - same as per-zone page wait tables */
50 #define DAX_WAIT_TABLE_BITS 12
51 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
53 wait_queue_head_t wait_table
[DAX_WAIT_TABLE_ENTRIES
];
55 static int __init
init_dax_wait_table(void)
59 for (i
= 0; i
< DAX_WAIT_TABLE_ENTRIES
; i
++)
60 init_waitqueue_head(wait_table
+ i
);
63 fs_initcall(init_dax_wait_table
);
65 static wait_queue_head_t
*dax_entry_waitqueue(struct address_space
*mapping
,
68 unsigned long hash
= hash_long((unsigned long)mapping
^ index
,
70 return wait_table
+ hash
;
73 static long dax_map_atomic(struct block_device
*bdev
, struct blk_dax_ctl
*dax
)
75 struct request_queue
*q
= bdev
->bd_queue
;
78 dax
->addr
= (void __pmem
*) ERR_PTR(-EIO
);
79 if (blk_queue_enter(q
, true) != 0)
82 rc
= bdev_direct_access(bdev
, dax
);
84 dax
->addr
= (void __pmem
*) ERR_PTR(rc
);
91 static void dax_unmap_atomic(struct block_device
*bdev
,
92 const struct blk_dax_ctl
*dax
)
94 if (IS_ERR(dax
->addr
))
96 blk_queue_exit(bdev
->bd_queue
);
99 struct page
*read_dax_sector(struct block_device
*bdev
, sector_t n
)
101 struct page
*page
= alloc_pages(GFP_KERNEL
, 0);
102 struct blk_dax_ctl dax
= {
104 .sector
= n
& ~((((int) PAGE_SIZE
) / 512) - 1),
109 return ERR_PTR(-ENOMEM
);
111 rc
= dax_map_atomic(bdev
, &dax
);
114 memcpy_from_pmem(page_address(page
), dax
.addr
, PAGE_SIZE
);
115 dax_unmap_atomic(bdev
, &dax
);
119 static bool buffer_written(struct buffer_head
*bh
)
121 return buffer_mapped(bh
) && !buffer_unwritten(bh
);
125 * When ext4 encounters a hole, it returns without modifying the buffer_head
126 * which means that we can't trust b_size. To cope with this, we set b_state
127 * to 0 before calling get_block and, if any bit is set, we know we can trust
128 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
129 * and would save us time calling get_block repeatedly.
131 static bool buffer_size_valid(struct buffer_head
*bh
)
133 return bh
->b_state
!= 0;
137 static sector_t
to_sector(const struct buffer_head
*bh
,
138 const struct inode
*inode
)
140 sector_t sector
= bh
->b_blocknr
<< (inode
->i_blkbits
- 9);
145 static ssize_t
dax_io(struct inode
*inode
, struct iov_iter
*iter
,
146 loff_t start
, loff_t end
, get_block_t get_block
,
147 struct buffer_head
*bh
)
149 loff_t pos
= start
, max
= start
, bh_max
= start
;
150 bool hole
= false, need_wmb
= false;
151 struct block_device
*bdev
= NULL
;
152 int rw
= iov_iter_rw(iter
), rc
;
154 struct blk_dax_ctl dax
= {
155 .addr
= (void __pmem
*) ERR_PTR(-EIO
),
157 unsigned blkbits
= inode
->i_blkbits
;
158 sector_t file_blks
= (i_size_read(inode
) + (1 << blkbits
) - 1)
162 end
= min(end
, i_size_read(inode
));
167 long page
= pos
>> PAGE_SHIFT
;
168 sector_t block
= page
<< (PAGE_SHIFT
- blkbits
);
169 unsigned first
= pos
- (block
<< blkbits
);
173 bh
->b_size
= PAGE_ALIGN(end
- pos
);
175 rc
= get_block(inode
, block
, bh
, rw
== WRITE
);
178 if (!buffer_size_valid(bh
))
179 bh
->b_size
= 1 << blkbits
;
180 bh_max
= pos
- first
+ bh
->b_size
;
183 * We allow uninitialized buffers for writes
184 * beyond EOF as those cannot race with faults
187 (buffer_new(bh
) && block
< file_blks
) ||
188 (rw
== WRITE
&& buffer_unwritten(bh
)));
190 unsigned done
= bh
->b_size
-
191 (bh_max
- (pos
- first
));
192 bh
->b_blocknr
+= done
>> blkbits
;
196 hole
= rw
== READ
&& !buffer_written(bh
);
198 size
= bh
->b_size
- first
;
200 dax_unmap_atomic(bdev
, &dax
);
201 dax
.sector
= to_sector(bh
, inode
);
202 dax
.size
= bh
->b_size
;
203 map_len
= dax_map_atomic(bdev
, &dax
);
209 size
= map_len
- first
;
211 max
= min(pos
+ size
, end
);
214 if (iov_iter_rw(iter
) == WRITE
) {
215 len
= copy_from_iter_pmem(dax
.addr
, max
- pos
, iter
);
218 len
= copy_to_iter((void __force
*) dax
.addr
, max
- pos
,
221 len
= iov_iter_zero(max
- pos
, iter
);
229 if (!IS_ERR(dax
.addr
))
235 dax_unmap_atomic(bdev
, &dax
);
237 return (pos
== start
) ? rc
: pos
- start
;
241 * dax_do_io - Perform I/O to a DAX file
242 * @iocb: The control block for this I/O
243 * @inode: The file which the I/O is directed at
244 * @iter: The addresses to do I/O from or to
245 * @pos: The file offset where the I/O starts
246 * @get_block: The filesystem method used to translate file offsets to blocks
247 * @end_io: A filesystem callback for I/O completion
250 * This function uses the same locking scheme as do_blockdev_direct_IO:
251 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
252 * caller for writes. For reads, we take and release the i_mutex ourselves.
253 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
254 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
257 ssize_t
dax_do_io(struct kiocb
*iocb
, struct inode
*inode
,
258 struct iov_iter
*iter
, loff_t pos
, get_block_t get_block
,
259 dio_iodone_t end_io
, int flags
)
261 struct buffer_head bh
;
262 ssize_t retval
= -EINVAL
;
263 loff_t end
= pos
+ iov_iter_count(iter
);
265 memset(&bh
, 0, sizeof(bh
));
266 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
268 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
)
271 /* Protects against truncate */
272 if (!(flags
& DIO_SKIP_DIO_COUNT
))
273 inode_dio_begin(inode
);
275 retval
= dax_io(inode
, iter
, pos
, end
, get_block
, &bh
);
277 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
)
283 err
= end_io(iocb
, pos
, retval
, bh
.b_private
);
288 if (!(flags
& DIO_SKIP_DIO_COUNT
))
289 inode_dio_end(inode
);
292 EXPORT_SYMBOL_GPL(dax_do_io
);
295 * DAX radix tree locking
297 struct exceptional_entry_key
{
298 struct address_space
*mapping
;
302 struct wait_exceptional_entry_queue
{
304 struct exceptional_entry_key key
;
307 static int wake_exceptional_entry_func(wait_queue_t
*wait
, unsigned int mode
,
308 int sync
, void *keyp
)
310 struct exceptional_entry_key
*key
= keyp
;
311 struct wait_exceptional_entry_queue
*ewait
=
312 container_of(wait
, struct wait_exceptional_entry_queue
, wait
);
314 if (key
->mapping
!= ewait
->key
.mapping
||
315 key
->index
!= ewait
->key
.index
)
317 return autoremove_wake_function(wait
, mode
, sync
, NULL
);
321 * Check whether the given slot is locked. The function must be called with
322 * mapping->tree_lock held
324 static inline int slot_locked(struct address_space
*mapping
, void **slot
)
326 unsigned long entry
= (unsigned long)
327 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
328 return entry
& RADIX_DAX_ENTRY_LOCK
;
332 * Mark the given slot is locked. The function must be called with
333 * mapping->tree_lock held
335 static inline void *lock_slot(struct address_space
*mapping
, void **slot
)
337 unsigned long entry
= (unsigned long)
338 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
340 entry
|= RADIX_DAX_ENTRY_LOCK
;
341 radix_tree_replace_slot(slot
, (void *)entry
);
342 return (void *)entry
;
346 * Mark the given slot is unlocked. The function must be called with
347 * mapping->tree_lock held
349 static inline void *unlock_slot(struct address_space
*mapping
, void **slot
)
351 unsigned long entry
= (unsigned long)
352 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
354 entry
&= ~(unsigned long)RADIX_DAX_ENTRY_LOCK
;
355 radix_tree_replace_slot(slot
, (void *)entry
);
356 return (void *)entry
;
360 * Lookup entry in radix tree, wait for it to become unlocked if it is
361 * exceptional entry and return it. The caller must call
362 * put_unlocked_mapping_entry() when he decided not to lock the entry or
363 * put_locked_mapping_entry() when he locked the entry and now wants to
366 * The function must be called with mapping->tree_lock held.
368 static void *get_unlocked_mapping_entry(struct address_space
*mapping
,
369 pgoff_t index
, void ***slotp
)
372 struct wait_exceptional_entry_queue ewait
;
373 wait_queue_head_t
*wq
= dax_entry_waitqueue(mapping
, index
);
375 init_wait(&ewait
.wait
);
376 ewait
.wait
.func
= wake_exceptional_entry_func
;
377 ewait
.key
.mapping
= mapping
;
378 ewait
.key
.index
= index
;
381 ret
= __radix_tree_lookup(&mapping
->page_tree
, index
, NULL
,
383 if (!ret
|| !radix_tree_exceptional_entry(ret
) ||
384 !slot_locked(mapping
, slot
)) {
389 prepare_to_wait_exclusive(wq
, &ewait
.wait
,
390 TASK_UNINTERRUPTIBLE
);
391 spin_unlock_irq(&mapping
->tree_lock
);
393 finish_wait(wq
, &ewait
.wait
);
394 spin_lock_irq(&mapping
->tree_lock
);
399 * Find radix tree entry at given index. If it points to a page, return with
400 * the page locked. If it points to the exceptional entry, return with the
401 * radix tree entry locked. If the radix tree doesn't contain given index,
402 * create empty exceptional entry for the index and return with it locked.
404 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
405 * persistent memory the benefit is doubtful. We can add that later if we can
408 static void *grab_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
413 spin_lock_irq(&mapping
->tree_lock
);
414 ret
= get_unlocked_mapping_entry(mapping
, index
, &slot
);
415 /* No entry for given index? Make sure radix tree is big enough. */
419 spin_unlock_irq(&mapping
->tree_lock
);
420 err
= radix_tree_preload(
421 mapping_gfp_mask(mapping
) & ~__GFP_HIGHMEM
);
424 ret
= (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY
|
425 RADIX_DAX_ENTRY_LOCK
);
426 spin_lock_irq(&mapping
->tree_lock
);
427 err
= radix_tree_insert(&mapping
->page_tree
, index
, ret
);
428 radix_tree_preload_end();
430 spin_unlock_irq(&mapping
->tree_lock
);
431 /* Someone already created the entry? */
436 /* Good, we have inserted empty locked entry into the tree. */
437 mapping
->nrexceptional
++;
438 spin_unlock_irq(&mapping
->tree_lock
);
441 /* Normal page in radix tree? */
442 if (!radix_tree_exceptional_entry(ret
)) {
443 struct page
*page
= ret
;
446 spin_unlock_irq(&mapping
->tree_lock
);
448 /* Page got truncated? Retry... */
449 if (unlikely(page
->mapping
!= mapping
)) {
456 ret
= lock_slot(mapping
, slot
);
457 spin_unlock_irq(&mapping
->tree_lock
);
461 void dax_wake_mapping_entry_waiter(struct address_space
*mapping
,
462 pgoff_t index
, bool wake_all
)
464 wait_queue_head_t
*wq
= dax_entry_waitqueue(mapping
, index
);
467 * Checking for locked entry and prepare_to_wait_exclusive() happens
468 * under mapping->tree_lock, ditto for entry handling in our callers.
469 * So at this point all tasks that could have seen our entry locked
470 * must be in the waitqueue and the following check will see them.
472 if (waitqueue_active(wq
)) {
473 struct exceptional_entry_key key
;
475 key
.mapping
= mapping
;
477 __wake_up(wq
, TASK_NORMAL
, wake_all
? 0 : 1, &key
);
481 static void unlock_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
485 spin_lock_irq(&mapping
->tree_lock
);
486 ret
= __radix_tree_lookup(&mapping
->page_tree
, index
, NULL
, &slot
);
487 if (WARN_ON_ONCE(!ret
|| !radix_tree_exceptional_entry(ret
) ||
488 !slot_locked(mapping
, slot
))) {
489 spin_unlock_irq(&mapping
->tree_lock
);
492 unlock_slot(mapping
, slot
);
493 spin_unlock_irq(&mapping
->tree_lock
);
494 dax_wake_mapping_entry_waiter(mapping
, index
, false);
497 static void put_locked_mapping_entry(struct address_space
*mapping
,
498 pgoff_t index
, void *entry
)
500 if (!radix_tree_exceptional_entry(entry
)) {
504 unlock_mapping_entry(mapping
, index
);
509 * Called when we are done with radix tree entry we looked up via
510 * get_unlocked_mapping_entry() and which we didn't lock in the end.
512 static void put_unlocked_mapping_entry(struct address_space
*mapping
,
513 pgoff_t index
, void *entry
)
515 if (!radix_tree_exceptional_entry(entry
))
518 /* We have to wake up next waiter for the radix tree entry lock */
519 dax_wake_mapping_entry_waiter(mapping
, index
, false);
523 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
524 * entry to get unlocked before deleting it.
526 int dax_delete_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
530 spin_lock_irq(&mapping
->tree_lock
);
531 entry
= get_unlocked_mapping_entry(mapping
, index
, NULL
);
533 * This gets called from truncate / punch_hole path. As such, the caller
534 * must hold locks protecting against concurrent modifications of the
535 * radix tree (usually fs-private i_mmap_sem for writing). Since the
536 * caller has seen exceptional entry for this index, we better find it
537 * at that index as well...
539 if (WARN_ON_ONCE(!entry
|| !radix_tree_exceptional_entry(entry
))) {
540 spin_unlock_irq(&mapping
->tree_lock
);
543 radix_tree_delete(&mapping
->page_tree
, index
);
544 mapping
->nrexceptional
--;
545 spin_unlock_irq(&mapping
->tree_lock
);
546 dax_wake_mapping_entry_waiter(mapping
, index
, true);
552 * The user has performed a load from a hole in the file. Allocating
553 * a new page in the file would cause excessive storage usage for
554 * workloads with sparse files. We allocate a page cache page instead.
555 * We'll kick it out of the page cache if it's ever written to,
556 * otherwise it will simply fall out of the page cache under memory
557 * pressure without ever having been dirtied.
559 static int dax_load_hole(struct address_space
*mapping
, void *entry
,
560 struct vm_fault
*vmf
)
564 /* Hole page already exists? Return it... */
565 if (!radix_tree_exceptional_entry(entry
)) {
567 return VM_FAULT_LOCKED
;
570 /* This will replace locked radix tree entry with a hole page */
571 page
= find_or_create_page(mapping
, vmf
->pgoff
,
572 vmf
->gfp_mask
| __GFP_ZERO
);
574 put_locked_mapping_entry(mapping
, vmf
->pgoff
, entry
);
578 return VM_FAULT_LOCKED
;
581 static int copy_user_bh(struct page
*to
, struct inode
*inode
,
582 struct buffer_head
*bh
, unsigned long vaddr
)
584 struct blk_dax_ctl dax
= {
585 .sector
= to_sector(bh
, inode
),
588 struct block_device
*bdev
= bh
->b_bdev
;
591 if (dax_map_atomic(bdev
, &dax
) < 0)
592 return PTR_ERR(dax
.addr
);
593 vto
= kmap_atomic(to
);
594 copy_user_page(vto
, (void __force
*)dax
.addr
, vaddr
, to
);
596 dax_unmap_atomic(bdev
, &dax
);
600 #define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
602 static void *dax_insert_mapping_entry(struct address_space
*mapping
,
603 struct vm_fault
*vmf
,
604 void *entry
, sector_t sector
)
606 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
608 bool hole_fill
= false;
610 pgoff_t index
= vmf
->pgoff
;
612 if (vmf
->flags
& FAULT_FLAG_WRITE
)
613 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
615 /* Replacing hole page with block mapping? */
616 if (!radix_tree_exceptional_entry(entry
)) {
619 * Unmap the page now before we remove it from page cache below.
620 * The page is locked so it cannot be faulted in again.
622 unmap_mapping_range(mapping
, vmf
->pgoff
<< PAGE_SHIFT
,
624 error
= radix_tree_preload(vmf
->gfp_mask
& ~__GFP_HIGHMEM
);
626 return ERR_PTR(error
);
629 spin_lock_irq(&mapping
->tree_lock
);
630 new_entry
= (void *)((unsigned long)RADIX_DAX_ENTRY(sector
, false) |
631 RADIX_DAX_ENTRY_LOCK
);
633 __delete_from_page_cache(entry
, NULL
);
634 /* Drop pagecache reference */
636 error
= radix_tree_insert(page_tree
, index
, new_entry
);
638 new_entry
= ERR_PTR(error
);
641 mapping
->nrexceptional
++;
646 ret
= __radix_tree_lookup(page_tree
, index
, NULL
, &slot
);
647 WARN_ON_ONCE(ret
!= entry
);
648 radix_tree_replace_slot(slot
, new_entry
);
650 if (vmf
->flags
& FAULT_FLAG_WRITE
)
651 radix_tree_tag_set(page_tree
, index
, PAGECACHE_TAG_DIRTY
);
653 spin_unlock_irq(&mapping
->tree_lock
);
655 radix_tree_preload_end();
657 * We don't need hole page anymore, it has been replaced with
658 * locked radix tree entry now.
660 if (mapping
->a_ops
->freepage
)
661 mapping
->a_ops
->freepage(entry
);
668 static int dax_writeback_one(struct block_device
*bdev
,
669 struct address_space
*mapping
, pgoff_t index
, void *entry
)
671 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
672 int type
= RADIX_DAX_TYPE(entry
);
673 struct radix_tree_node
*node
;
674 struct blk_dax_ctl dax
;
678 spin_lock_irq(&mapping
->tree_lock
);
680 * Regular page slots are stabilized by the page lock even
681 * without the tree itself locked. These unlocked entries
682 * need verification under the tree lock.
684 if (!__radix_tree_lookup(page_tree
, index
, &node
, &slot
))
689 /* another fsync thread may have already written back this entry */
690 if (!radix_tree_tag_get(page_tree
, index
, PAGECACHE_TAG_TOWRITE
))
693 if (WARN_ON_ONCE(type
!= RADIX_DAX_PTE
&& type
!= RADIX_DAX_PMD
)) {
698 dax
.sector
= RADIX_DAX_SECTOR(entry
);
699 dax
.size
= (type
== RADIX_DAX_PMD
? PMD_SIZE
: PAGE_SIZE
);
700 spin_unlock_irq(&mapping
->tree_lock
);
703 * We cannot hold tree_lock while calling dax_map_atomic() because it
704 * eventually calls cond_resched().
706 ret
= dax_map_atomic(bdev
, &dax
);
710 if (WARN_ON_ONCE(ret
< dax
.size
)) {
715 wb_cache_pmem(dax
.addr
, dax
.size
);
717 spin_lock_irq(&mapping
->tree_lock
);
718 radix_tree_tag_clear(page_tree
, index
, PAGECACHE_TAG_TOWRITE
);
719 spin_unlock_irq(&mapping
->tree_lock
);
721 dax_unmap_atomic(bdev
, &dax
);
725 spin_unlock_irq(&mapping
->tree_lock
);
730 * Flush the mapping to the persistent domain within the byte range of [start,
731 * end]. This is required by data integrity operations to ensure file data is
732 * on persistent storage prior to completion of the operation.
734 int dax_writeback_mapping_range(struct address_space
*mapping
,
735 struct block_device
*bdev
, struct writeback_control
*wbc
)
737 struct inode
*inode
= mapping
->host
;
738 pgoff_t start_index
, end_index
, pmd_index
;
739 pgoff_t indices
[PAGEVEC_SIZE
];
745 if (WARN_ON_ONCE(inode
->i_blkbits
!= PAGE_SHIFT
))
748 if (!mapping
->nrexceptional
|| wbc
->sync_mode
!= WB_SYNC_ALL
)
751 start_index
= wbc
->range_start
>> PAGE_SHIFT
;
752 end_index
= wbc
->range_end
>> PAGE_SHIFT
;
753 pmd_index
= DAX_PMD_INDEX(start_index
);
756 entry
= radix_tree_lookup(&mapping
->page_tree
, pmd_index
);
759 /* see if the start of our range is covered by a PMD entry */
760 if (entry
&& RADIX_DAX_TYPE(entry
) == RADIX_DAX_PMD
)
761 start_index
= pmd_index
;
763 tag_pages_for_writeback(mapping
, start_index
, end_index
);
765 pagevec_init(&pvec
, 0);
767 pvec
.nr
= find_get_entries_tag(mapping
, start_index
,
768 PAGECACHE_TAG_TOWRITE
, PAGEVEC_SIZE
,
769 pvec
.pages
, indices
);
774 for (i
= 0; i
< pvec
.nr
; i
++) {
775 if (indices
[i
] > end_index
) {
780 ret
= dax_writeback_one(bdev
, mapping
, indices
[i
],
789 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range
);
791 static int dax_insert_mapping(struct address_space
*mapping
,
792 struct buffer_head
*bh
, void **entryp
,
793 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
795 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
796 struct block_device
*bdev
= bh
->b_bdev
;
797 struct blk_dax_ctl dax
= {
798 .sector
= to_sector(bh
, mapping
->host
),
803 void *entry
= *entryp
;
805 i_mmap_lock_read(mapping
);
807 if (dax_map_atomic(bdev
, &dax
) < 0) {
808 error
= PTR_ERR(dax
.addr
);
811 dax_unmap_atomic(bdev
, &dax
);
813 ret
= dax_insert_mapping_entry(mapping
, vmf
, entry
, dax
.sector
);
815 error
= PTR_ERR(ret
);
820 error
= vm_insert_mixed(vma
, vaddr
, dax
.pfn
);
822 i_mmap_unlock_read(mapping
);
827 * __dax_fault - handle a page fault on a DAX file
828 * @vma: The virtual memory area where the fault occurred
829 * @vmf: The description of the fault
830 * @get_block: The filesystem method used to translate file offsets to blocks
832 * When a page fault occurs, filesystems may call this helper in their
833 * fault handler for DAX files. __dax_fault() assumes the caller has done all
834 * the necessary locking for the page fault to proceed successfully.
836 int __dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
837 get_block_t get_block
)
839 struct file
*file
= vma
->vm_file
;
840 struct address_space
*mapping
= file
->f_mapping
;
841 struct inode
*inode
= mapping
->host
;
843 struct buffer_head bh
;
844 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
845 unsigned blkbits
= inode
->i_blkbits
;
852 * Check whether offset isn't beyond end of file now. Caller is supposed
853 * to hold locks serializing us with truncate / punch hole so this is
856 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
857 if (vmf
->pgoff
>= size
)
858 return VM_FAULT_SIGBUS
;
860 memset(&bh
, 0, sizeof(bh
));
861 block
= (sector_t
)vmf
->pgoff
<< (PAGE_SHIFT
- blkbits
);
862 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
863 bh
.b_size
= PAGE_SIZE
;
865 entry
= grab_mapping_entry(mapping
, vmf
->pgoff
);
867 error
= PTR_ERR(entry
);
871 error
= get_block(inode
, block
, &bh
, 0);
872 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
873 error
= -EIO
; /* fs corruption? */
878 struct page
*new_page
= vmf
->cow_page
;
879 if (buffer_written(&bh
))
880 error
= copy_user_bh(new_page
, inode
, &bh
, vaddr
);
882 clear_user_highpage(new_page
, vaddr
);
885 if (!radix_tree_exceptional_entry(entry
)) {
888 unlock_mapping_entry(mapping
, vmf
->pgoff
);
889 i_mmap_lock_read(mapping
);
892 return VM_FAULT_LOCKED
;
895 if (!buffer_mapped(&bh
)) {
896 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
897 error
= get_block(inode
, block
, &bh
, 1);
898 count_vm_event(PGMAJFAULT
);
899 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
900 major
= VM_FAULT_MAJOR
;
901 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
906 return dax_load_hole(mapping
, entry
, vmf
);
910 /* Filesystem should not return unwritten buffers to us! */
911 WARN_ON_ONCE(buffer_unwritten(&bh
) || buffer_new(&bh
));
912 error
= dax_insert_mapping(mapping
, &bh
, &entry
, vma
, vmf
);
914 put_locked_mapping_entry(mapping
, vmf
->pgoff
, entry
);
916 if (error
== -ENOMEM
)
917 return VM_FAULT_OOM
| major
;
918 /* -EBUSY is fine, somebody else faulted on the same PTE */
919 if ((error
< 0) && (error
!= -EBUSY
))
920 return VM_FAULT_SIGBUS
| major
;
921 return VM_FAULT_NOPAGE
| major
;
923 EXPORT_SYMBOL(__dax_fault
);
926 * dax_fault - handle a page fault on a DAX file
927 * @vma: The virtual memory area where the fault occurred
928 * @vmf: The description of the fault
929 * @get_block: The filesystem method used to translate file offsets to blocks
931 * When a page fault occurs, filesystems may call this helper in their
932 * fault handler for DAX files.
934 int dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
935 get_block_t get_block
)
938 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
940 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
941 sb_start_pagefault(sb
);
942 file_update_time(vma
->vm_file
);
944 result
= __dax_fault(vma
, vmf
, get_block
);
945 if (vmf
->flags
& FAULT_FLAG_WRITE
)
946 sb_end_pagefault(sb
);
950 EXPORT_SYMBOL_GPL(dax_fault
);
952 #if defined(CONFIG_TRANSPARENT_HUGEPAGE)
954 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
955 * more often than one might expect in the below function.
957 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
959 static void __dax_dbg(struct buffer_head
*bh
, unsigned long address
,
960 const char *reason
, const char *fn
)
963 char bname
[BDEVNAME_SIZE
];
964 bdevname(bh
->b_bdev
, bname
);
965 pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
966 "length %zd fallback: %s\n", fn
, current
->comm
,
967 address
, bname
, bh
->b_state
, (u64
)bh
->b_blocknr
,
970 pr_debug("%s: %s addr: %lx fallback: %s\n", fn
,
971 current
->comm
, address
, reason
);
975 #define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
977 int __dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
978 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
)
980 struct file
*file
= vma
->vm_file
;
981 struct address_space
*mapping
= file
->f_mapping
;
982 struct inode
*inode
= mapping
->host
;
983 struct buffer_head bh
;
984 unsigned blkbits
= inode
->i_blkbits
;
985 unsigned long pmd_addr
= address
& PMD_MASK
;
986 bool write
= flags
& FAULT_FLAG_WRITE
;
987 struct block_device
*bdev
;
993 /* dax pmd mappings require pfn_t_devmap() */
994 if (!IS_ENABLED(CONFIG_FS_DAX_PMD
))
995 return VM_FAULT_FALLBACK
;
997 /* Fall back to PTEs if we're going to COW */
998 if (write
&& !(vma
->vm_flags
& VM_SHARED
)) {
999 split_huge_pmd(vma
, pmd
, address
);
1000 dax_pmd_dbg(NULL
, address
, "cow write");
1001 return VM_FAULT_FALLBACK
;
1003 /* If the PMD would extend outside the VMA */
1004 if (pmd_addr
< vma
->vm_start
) {
1005 dax_pmd_dbg(NULL
, address
, "vma start unaligned");
1006 return VM_FAULT_FALLBACK
;
1008 if ((pmd_addr
+ PMD_SIZE
) > vma
->vm_end
) {
1009 dax_pmd_dbg(NULL
, address
, "vma end unaligned");
1010 return VM_FAULT_FALLBACK
;
1013 pgoff
= linear_page_index(vma
, pmd_addr
);
1014 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1016 return VM_FAULT_SIGBUS
;
1017 /* If the PMD would cover blocks out of the file */
1018 if ((pgoff
| PG_PMD_COLOUR
) >= size
) {
1019 dax_pmd_dbg(NULL
, address
,
1020 "offset + huge page size > file size");
1021 return VM_FAULT_FALLBACK
;
1024 memset(&bh
, 0, sizeof(bh
));
1025 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
1026 block
= (sector_t
)pgoff
<< (PAGE_SHIFT
- blkbits
);
1028 bh
.b_size
= PMD_SIZE
;
1030 if (get_block(inode
, block
, &bh
, 0) != 0)
1031 return VM_FAULT_SIGBUS
;
1033 if (!buffer_mapped(&bh
) && write
) {
1034 if (get_block(inode
, block
, &bh
, 1) != 0)
1035 return VM_FAULT_SIGBUS
;
1037 WARN_ON_ONCE(buffer_unwritten(&bh
) || buffer_new(&bh
));
1043 * If the filesystem isn't willing to tell us the length of a hole,
1044 * just fall back to PTEs. Calling get_block 512 times in a loop
1047 if (!buffer_size_valid(&bh
) || bh
.b_size
< PMD_SIZE
) {
1048 dax_pmd_dbg(&bh
, address
, "allocated block too small");
1049 return VM_FAULT_FALLBACK
;
1053 * If we allocated new storage, make sure no process has any
1054 * zero pages covering this hole
1057 loff_t lstart
= pgoff
<< PAGE_SHIFT
;
1058 loff_t lend
= lstart
+ PMD_SIZE
- 1; /* inclusive */
1060 truncate_pagecache_range(inode
, lstart
, lend
);
1063 i_mmap_lock_read(mapping
);
1065 if (!write
&& !buffer_mapped(&bh
)) {
1068 struct page
*zero_page
= get_huge_zero_page();
1070 if (unlikely(!zero_page
)) {
1071 dax_pmd_dbg(&bh
, address
, "no zero page");
1075 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
1076 if (!pmd_none(*pmd
)) {
1078 dax_pmd_dbg(&bh
, address
, "pmd already present");
1082 dev_dbg(part_to_dev(bdev
->bd_part
),
1083 "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
1084 __func__
, current
->comm
, address
,
1085 (unsigned long long) to_sector(&bh
, inode
));
1087 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
1088 entry
= pmd_mkhuge(entry
);
1089 set_pmd_at(vma
->vm_mm
, pmd_addr
, pmd
, entry
);
1090 result
= VM_FAULT_NOPAGE
;
1093 struct blk_dax_ctl dax
= {
1094 .sector
= to_sector(&bh
, inode
),
1097 long length
= dax_map_atomic(bdev
, &dax
);
1100 dax_pmd_dbg(&bh
, address
, "dax-error fallback");
1103 if (length
< PMD_SIZE
) {
1104 dax_pmd_dbg(&bh
, address
, "dax-length too small");
1105 dax_unmap_atomic(bdev
, &dax
);
1108 if (pfn_t_to_pfn(dax
.pfn
) & PG_PMD_COLOUR
) {
1109 dax_pmd_dbg(&bh
, address
, "pfn unaligned");
1110 dax_unmap_atomic(bdev
, &dax
);
1114 if (!pfn_t_devmap(dax
.pfn
)) {
1115 dax_unmap_atomic(bdev
, &dax
);
1116 dax_pmd_dbg(&bh
, address
, "pfn not in memmap");
1119 dax_unmap_atomic(bdev
, &dax
);
1122 * For PTE faults we insert a radix tree entry for reads, and
1123 * leave it clean. Then on the first write we dirty the radix
1124 * tree entry via the dax_pfn_mkwrite() path. This sequence
1125 * allows the dax_pfn_mkwrite() call to be simpler and avoid a
1126 * call into get_block() to translate the pgoff to a sector in
1127 * order to be able to create a new radix tree entry.
1129 * The PMD path doesn't have an equivalent to
1130 * dax_pfn_mkwrite(), though, so for a read followed by a
1131 * write we traverse all the way through __dax_pmd_fault()
1132 * twice. This means we can just skip inserting a radix tree
1133 * entry completely on the initial read and just wait until
1134 * the write to insert a dirty entry.
1138 * We should insert radix-tree entry and dirty it here.
1139 * For now this is broken...
1143 dev_dbg(part_to_dev(bdev
->bd_part
),
1144 "%s: %s addr: %lx pfn: %lx sect: %llx\n",
1145 __func__
, current
->comm
, address
,
1146 pfn_t_to_pfn(dax
.pfn
),
1147 (unsigned long long) dax
.sector
);
1148 result
|= vmf_insert_pfn_pmd(vma
, address
, pmd
,
1153 i_mmap_unlock_read(mapping
);
1158 count_vm_event(THP_FAULT_FALLBACK
);
1159 result
= VM_FAULT_FALLBACK
;
1162 EXPORT_SYMBOL_GPL(__dax_pmd_fault
);
1165 * dax_pmd_fault - handle a PMD fault on a DAX file
1166 * @vma: The virtual memory area where the fault occurred
1167 * @vmf: The description of the fault
1168 * @get_block: The filesystem method used to translate file offsets to blocks
1170 * When a page fault occurs, filesystems may call this helper in their
1171 * pmd_fault handler for DAX files.
1173 int dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
1174 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
)
1177 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
1179 if (flags
& FAULT_FLAG_WRITE
) {
1180 sb_start_pagefault(sb
);
1181 file_update_time(vma
->vm_file
);
1183 result
= __dax_pmd_fault(vma
, address
, pmd
, flags
, get_block
);
1184 if (flags
& FAULT_FLAG_WRITE
)
1185 sb_end_pagefault(sb
);
1189 EXPORT_SYMBOL_GPL(dax_pmd_fault
);
1190 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1193 * dax_pfn_mkwrite - handle first write to DAX page
1194 * @vma: The virtual memory area where the fault occurred
1195 * @vmf: The description of the fault
1197 int dax_pfn_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1199 struct file
*file
= vma
->vm_file
;
1200 struct address_space
*mapping
= file
->f_mapping
;
1202 pgoff_t index
= vmf
->pgoff
;
1204 spin_lock_irq(&mapping
->tree_lock
);
1205 entry
= get_unlocked_mapping_entry(mapping
, index
, NULL
);
1206 if (!entry
|| !radix_tree_exceptional_entry(entry
))
1208 radix_tree_tag_set(&mapping
->page_tree
, index
, PAGECACHE_TAG_DIRTY
);
1209 put_unlocked_mapping_entry(mapping
, index
, entry
);
1211 spin_unlock_irq(&mapping
->tree_lock
);
1212 return VM_FAULT_NOPAGE
;
1214 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite
);
1216 static bool dax_range_is_aligned(struct block_device
*bdev
,
1217 unsigned int offset
, unsigned int length
)
1219 unsigned short sector_size
= bdev_logical_block_size(bdev
);
1221 if (!IS_ALIGNED(offset
, sector_size
))
1223 if (!IS_ALIGNED(length
, sector_size
))
1229 int __dax_zero_page_range(struct block_device
*bdev
, sector_t sector
,
1230 unsigned int offset
, unsigned int length
)
1232 struct blk_dax_ctl dax
= {
1237 if (dax_range_is_aligned(bdev
, offset
, length
)) {
1238 sector_t start_sector
= dax
.sector
+ (offset
>> 9);
1240 return blkdev_issue_zeroout(bdev
, start_sector
,
1241 length
>> 9, GFP_NOFS
, true);
1243 if (dax_map_atomic(bdev
, &dax
) < 0)
1244 return PTR_ERR(dax
.addr
);
1245 clear_pmem(dax
.addr
+ offset
, length
);
1247 dax_unmap_atomic(bdev
, &dax
);
1251 EXPORT_SYMBOL_GPL(__dax_zero_page_range
);
1254 * dax_zero_page_range - zero a range within a page of a DAX file
1255 * @inode: The file being truncated
1256 * @from: The file offset that is being truncated to
1257 * @length: The number of bytes to zero
1258 * @get_block: The filesystem method used to translate file offsets to blocks
1260 * This function can be called by a filesystem when it is zeroing part of a
1261 * page in a DAX file. This is intended for hole-punch operations. If
1262 * you are truncating a file, the helper function dax_truncate_page() may be
1265 int dax_zero_page_range(struct inode
*inode
, loff_t from
, unsigned length
,
1266 get_block_t get_block
)
1268 struct buffer_head bh
;
1269 pgoff_t index
= from
>> PAGE_SHIFT
;
1270 unsigned offset
= from
& (PAGE_SIZE
-1);
1273 /* Block boundary? Nothing to do */
1276 BUG_ON((offset
+ length
) > PAGE_SIZE
);
1278 memset(&bh
, 0, sizeof(bh
));
1279 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
1280 bh
.b_size
= PAGE_SIZE
;
1281 err
= get_block(inode
, index
, &bh
, 0);
1282 if (err
< 0 || !buffer_written(&bh
))
1285 return __dax_zero_page_range(bh
.b_bdev
, to_sector(&bh
, inode
),
1288 EXPORT_SYMBOL_GPL(dax_zero_page_range
);
1291 * dax_truncate_page - handle a partial page being truncated in a DAX file
1292 * @inode: The file being truncated
1293 * @from: The file offset that is being truncated to
1294 * @get_block: The filesystem method used to translate file offsets to blocks
1296 * Similar to block_truncate_page(), this function can be called by a
1297 * filesystem when it is truncating a DAX file to handle the partial page.
1299 int dax_truncate_page(struct inode
*inode
, loff_t from
, get_block_t get_block
)
1301 unsigned length
= PAGE_ALIGN(from
) - from
;
1302 return dax_zero_page_range(inode
, from
, length
, get_block
);
1304 EXPORT_SYMBOL_GPL(dax_truncate_page
);