2 * Copyright (C) 2008, 2009 Intel Corporation
3 * Authors: Andi Kleen, Fengguang Wu
5 * This software may be redistributed and/or modified under the terms of
6 * the GNU General Public License ("GPL") version 2 only as published by the
7 * Free Software Foundation.
9 * High level machine check handler. Handles pages reported by the
10 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
13 * In addition there is a "soft offline" entry point that allows stop using
14 * not-yet-corrupted-by-suspicious pages without killing anything.
16 * Handles page cache pages in various states. The tricky part
17 * here is that we can access any page asynchronously in respect to
18 * other VM users, because memory failures could happen anytime and
19 * anywhere. This could violate some of their assumptions. This is why
20 * this code has to be extremely careful. Generally it tries to use
21 * normal locking rules, as in get the standard locks, even if that means
22 * the error handling takes potentially a long time.
24 * It can be very tempting to add handling for obscure cases here.
25 * In general any code for handling new cases should only be added iff:
26 * - You know how to test it.
27 * - You have a test that can be added to mce-test
28 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
29 * - The case actually shows up as a frequent (top 10) page state in
30 * tools/vm/page-types when running a real workload.
32 * There are several operations here with exponential complexity because
33 * of unsuitable VM data structures. For example the operation to map back
34 * from RMAP chains to processes has to walk the complete process list and
35 * has non linear complexity with the number. But since memory corruptions
36 * are rare we hope to get away with this. This avoids impacting the core
39 #include <linux/kernel.h>
41 #include <linux/page-flags.h>
42 #include <linux/kernel-page-flags.h>
43 #include <linux/sched.h>
44 #include <linux/ksm.h>
45 #include <linux/rmap.h>
46 #include <linux/export.h>
47 #include <linux/pagemap.h>
48 #include <linux/swap.h>
49 #include <linux/backing-dev.h>
50 #include <linux/migrate.h>
51 #include <linux/page-isolation.h>
52 #include <linux/suspend.h>
53 #include <linux/slab.h>
54 #include <linux/swapops.h>
55 #include <linux/hugetlb.h>
56 #include <linux/memory_hotplug.h>
57 #include <linux/mm_inline.h>
58 #include <linux/kfifo.h>
61 int sysctl_memory_failure_early_kill __read_mostly
= 0;
63 int sysctl_memory_failure_recovery __read_mostly
= 1;
65 atomic_long_t num_poisoned_pages __read_mostly
= ATOMIC_LONG_INIT(0);
67 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
69 u32 hwpoison_filter_enable
= 0;
70 u32 hwpoison_filter_dev_major
= ~0U;
71 u32 hwpoison_filter_dev_minor
= ~0U;
72 u64 hwpoison_filter_flags_mask
;
73 u64 hwpoison_filter_flags_value
;
74 EXPORT_SYMBOL_GPL(hwpoison_filter_enable
);
75 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major
);
76 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor
);
77 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask
);
78 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value
);
80 static int hwpoison_filter_dev(struct page
*p
)
82 struct address_space
*mapping
;
85 if (hwpoison_filter_dev_major
== ~0U &&
86 hwpoison_filter_dev_minor
== ~0U)
90 * page_mapping() does not accept slab pages.
95 mapping
= page_mapping(p
);
96 if (mapping
== NULL
|| mapping
->host
== NULL
)
99 dev
= mapping
->host
->i_sb
->s_dev
;
100 if (hwpoison_filter_dev_major
!= ~0U &&
101 hwpoison_filter_dev_major
!= MAJOR(dev
))
103 if (hwpoison_filter_dev_minor
!= ~0U &&
104 hwpoison_filter_dev_minor
!= MINOR(dev
))
110 static int hwpoison_filter_flags(struct page
*p
)
112 if (!hwpoison_filter_flags_mask
)
115 if ((stable_page_flags(p
) & hwpoison_filter_flags_mask
) ==
116 hwpoison_filter_flags_value
)
123 * This allows stress tests to limit test scope to a collection of tasks
124 * by putting them under some memcg. This prevents killing unrelated/important
125 * processes such as /sbin/init. Note that the target task may share clean
126 * pages with init (eg. libc text), which is harmless. If the target task
127 * share _dirty_ pages with another task B, the test scheme must make sure B
128 * is also included in the memcg. At last, due to race conditions this filter
129 * can only guarantee that the page either belongs to the memcg tasks, or is
132 #ifdef CONFIG_MEMCG_SWAP
133 u64 hwpoison_filter_memcg
;
134 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg
);
135 static int hwpoison_filter_task(struct page
*p
)
137 struct mem_cgroup
*mem
;
138 struct cgroup_subsys_state
*css
;
141 if (!hwpoison_filter_memcg
)
144 mem
= try_get_mem_cgroup_from_page(p
);
148 css
= mem_cgroup_css(mem
);
149 ino
= cgroup_ino(css
->cgroup
);
152 if (ino
!= hwpoison_filter_memcg
)
158 static int hwpoison_filter_task(struct page
*p
) { return 0; }
161 int hwpoison_filter(struct page
*p
)
163 if (!hwpoison_filter_enable
)
166 if (hwpoison_filter_dev(p
))
169 if (hwpoison_filter_flags(p
))
172 if (hwpoison_filter_task(p
))
178 int hwpoison_filter(struct page
*p
)
184 EXPORT_SYMBOL_GPL(hwpoison_filter
);
187 * Send all the processes who have the page mapped a signal.
188 * ``action optional'' if they are not immediately affected by the error
189 * ``action required'' if error happened in current execution context
191 static int kill_proc(struct task_struct
*t
, unsigned long addr
, int trapno
,
192 unsigned long pfn
, struct page
*page
, int flags
)
198 "MCE %#lx: Killing %s:%d due to hardware memory corruption\n",
199 pfn
, t
->comm
, t
->pid
);
200 si
.si_signo
= SIGBUS
;
202 si
.si_addr
= (void *)addr
;
203 #ifdef __ARCH_SI_TRAPNO
204 si
.si_trapno
= trapno
;
206 si
.si_addr_lsb
= compound_order(compound_head(page
)) + PAGE_SHIFT
;
208 if ((flags
& MF_ACTION_REQUIRED
) && t
->mm
== current
->mm
) {
209 si
.si_code
= BUS_MCEERR_AR
;
210 ret
= force_sig_info(SIGBUS
, &si
, current
);
213 * Don't use force here, it's convenient if the signal
214 * can be temporarily blocked.
215 * This could cause a loop when the user sets SIGBUS
216 * to SIG_IGN, but hopefully no one will do that?
218 si
.si_code
= BUS_MCEERR_AO
;
219 ret
= send_sig_info(SIGBUS
, &si
, t
); /* synchronous? */
222 printk(KERN_INFO
"MCE: Error sending signal to %s:%d: %d\n",
223 t
->comm
, t
->pid
, ret
);
228 * When a unknown page type is encountered drain as many buffers as possible
229 * in the hope to turn the page into a LRU or free page, which we can handle.
231 void shake_page(struct page
*p
, int access
)
237 drain_all_pages(page_zone(p
));
238 if (PageLRU(p
) || is_free_buddy_page(p
))
243 * Only call shrink_node_slabs here (which would also shrink
244 * other caches) if access is not potentially fatal.
247 drop_slab_node(page_to_nid(p
));
249 EXPORT_SYMBOL_GPL(shake_page
);
252 * Kill all processes that have a poisoned page mapped and then isolate
256 * Find all processes having the page mapped and kill them.
257 * But we keep a page reference around so that the page is not
258 * actually freed yet.
259 * Then stash the page away
261 * There's no convenient way to get back to mapped processes
262 * from the VMAs. So do a brute-force search over all
265 * Remember that machine checks are not common (or rather
266 * if they are common you have other problems), so this shouldn't
267 * be a performance issue.
269 * Also there are some races possible while we get from the
270 * error detection to actually handle it.
275 struct task_struct
*tsk
;
281 * Failure handling: if we can't find or can't kill a process there's
282 * not much we can do. We just print a message and ignore otherwise.
286 * Schedule a process for later kill.
287 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
288 * TBD would GFP_NOIO be enough?
290 static void add_to_kill(struct task_struct
*tsk
, struct page
*p
,
291 struct vm_area_struct
*vma
,
292 struct list_head
*to_kill
,
293 struct to_kill
**tkc
)
301 tk
= kmalloc(sizeof(struct to_kill
), GFP_ATOMIC
);
304 "MCE: Out of memory while machine check handling\n");
308 tk
->addr
= page_address_in_vma(p
, vma
);
312 * In theory we don't have to kill when the page was
313 * munmaped. But it could be also a mremap. Since that's
314 * likely very rare kill anyways just out of paranoia, but use
315 * a SIGKILL because the error is not contained anymore.
317 if (tk
->addr
== -EFAULT
) {
318 pr_info("MCE: Unable to find user space address %lx in %s\n",
319 page_to_pfn(p
), tsk
->comm
);
322 get_task_struct(tsk
);
324 list_add_tail(&tk
->nd
, to_kill
);
328 * Kill the processes that have been collected earlier.
330 * Only do anything when DOIT is set, otherwise just free the list
331 * (this is used for clean pages which do not need killing)
332 * Also when FAIL is set do a force kill because something went
335 static void kill_procs(struct list_head
*to_kill
, int forcekill
, int trapno
,
336 int fail
, struct page
*page
, unsigned long pfn
,
339 struct to_kill
*tk
, *next
;
341 list_for_each_entry_safe (tk
, next
, to_kill
, nd
) {
344 * In case something went wrong with munmapping
345 * make sure the process doesn't catch the
346 * signal and then access the memory. Just kill it.
348 if (fail
|| tk
->addr_valid
== 0) {
350 "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
351 pfn
, tk
->tsk
->comm
, tk
->tsk
->pid
);
352 force_sig(SIGKILL
, tk
->tsk
);
356 * In theory the process could have mapped
357 * something else on the address in-between. We could
358 * check for that, but we need to tell the
361 else if (kill_proc(tk
->tsk
, tk
->addr
, trapno
,
362 pfn
, page
, flags
) < 0)
364 "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
365 pfn
, tk
->tsk
->comm
, tk
->tsk
->pid
);
367 put_task_struct(tk
->tsk
);
373 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
374 * on behalf of the thread group. Return task_struct of the (first found)
375 * dedicated thread if found, and return NULL otherwise.
377 * We already hold read_lock(&tasklist_lock) in the caller, so we don't
378 * have to call rcu_read_lock/unlock() in this function.
380 static struct task_struct
*find_early_kill_thread(struct task_struct
*tsk
)
382 struct task_struct
*t
;
384 for_each_thread(tsk
, t
)
385 if ((t
->flags
& PF_MCE_PROCESS
) && (t
->flags
& PF_MCE_EARLY
))
391 * Determine whether a given process is "early kill" process which expects
392 * to be signaled when some page under the process is hwpoisoned.
393 * Return task_struct of the dedicated thread (main thread unless explicitly
394 * specified) if the process is "early kill," and otherwise returns NULL.
396 static struct task_struct
*task_early_kill(struct task_struct
*tsk
,
399 struct task_struct
*t
;
404 t
= find_early_kill_thread(tsk
);
407 if (sysctl_memory_failure_early_kill
)
413 * Collect processes when the error hit an anonymous page.
415 static void collect_procs_anon(struct page
*page
, struct list_head
*to_kill
,
416 struct to_kill
**tkc
, int force_early
)
418 struct vm_area_struct
*vma
;
419 struct task_struct
*tsk
;
423 av
= page_lock_anon_vma_read(page
);
424 if (av
== NULL
) /* Not actually mapped anymore */
427 pgoff
= page_to_pgoff(page
);
428 read_lock(&tasklist_lock
);
429 for_each_process (tsk
) {
430 struct anon_vma_chain
*vmac
;
431 struct task_struct
*t
= task_early_kill(tsk
, force_early
);
435 anon_vma_interval_tree_foreach(vmac
, &av
->rb_root
,
438 if (!page_mapped_in_vma(page
, vma
))
440 if (vma
->vm_mm
== t
->mm
)
441 add_to_kill(t
, page
, vma
, to_kill
, tkc
);
444 read_unlock(&tasklist_lock
);
445 page_unlock_anon_vma_read(av
);
449 * Collect processes when the error hit a file mapped page.
451 static void collect_procs_file(struct page
*page
, struct list_head
*to_kill
,
452 struct to_kill
**tkc
, int force_early
)
454 struct vm_area_struct
*vma
;
455 struct task_struct
*tsk
;
456 struct address_space
*mapping
= page
->mapping
;
458 i_mmap_lock_read(mapping
);
459 read_lock(&tasklist_lock
);
460 for_each_process(tsk
) {
461 pgoff_t pgoff
= page_to_pgoff(page
);
462 struct task_struct
*t
= task_early_kill(tsk
, force_early
);
466 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, pgoff
,
469 * Send early kill signal to tasks where a vma covers
470 * the page but the corrupted page is not necessarily
471 * mapped it in its pte.
472 * Assume applications who requested early kill want
473 * to be informed of all such data corruptions.
475 if (vma
->vm_mm
== t
->mm
)
476 add_to_kill(t
, page
, vma
, to_kill
, tkc
);
479 read_unlock(&tasklist_lock
);
480 i_mmap_unlock_read(mapping
);
484 * Collect the processes who have the corrupted page mapped to kill.
485 * This is done in two steps for locking reasons.
486 * First preallocate one tokill structure outside the spin locks,
487 * so that we can kill at least one process reasonably reliable.
489 static void collect_procs(struct page
*page
, struct list_head
*tokill
,
497 tk
= kmalloc(sizeof(struct to_kill
), GFP_NOIO
);
501 collect_procs_anon(page
, tokill
, &tk
, force_early
);
503 collect_procs_file(page
, tokill
, &tk
, force_early
);
508 * Error handlers for various types of pages.
512 IGNORED
, /* Error: cannot be handled */
513 FAILED
, /* Error: handling failed */
514 DELAYED
, /* Will be handled later */
515 RECOVERED
, /* Successfully recovered */
518 static const char *action_name
[] = {
519 [IGNORED
] = "Ignored",
521 [DELAYED
] = "Delayed",
522 [RECOVERED
] = "Recovered",
525 enum action_page_type
{
527 MSG_KERNEL_HIGH_ORDER
,
529 MSG_DIFFERENT_COMPOUND
,
536 MSG_DIRTY_MLOCKED_LRU
,
537 MSG_CLEAN_MLOCKED_LRU
,
538 MSG_DIRTY_UNEVICTABLE_LRU
,
539 MSG_CLEAN_UNEVICTABLE_LRU
,
548 static const char * const action_page_types
[] = {
549 [MSG_KERNEL
] = "reserved kernel page",
550 [MSG_KERNEL_HIGH_ORDER
] = "high-order kernel page",
551 [MSG_SLAB
] = "kernel slab page",
552 [MSG_DIFFERENT_COMPOUND
] = "different compound page after locking",
553 [MSG_POISONED_HUGE
] = "huge page already hardware poisoned",
554 [MSG_HUGE
] = "huge page",
555 [MSG_FREE_HUGE
] = "free huge page",
556 [MSG_UNMAP_FAILED
] = "unmapping failed page",
557 [MSG_DIRTY_SWAPCACHE
] = "dirty swapcache page",
558 [MSG_CLEAN_SWAPCACHE
] = "clean swapcache page",
559 [MSG_DIRTY_MLOCKED_LRU
] = "dirty mlocked LRU page",
560 [MSG_CLEAN_MLOCKED_LRU
] = "clean mlocked LRU page",
561 [MSG_DIRTY_UNEVICTABLE_LRU
] = "dirty unevictable LRU page",
562 [MSG_CLEAN_UNEVICTABLE_LRU
] = "clean unevictable LRU page",
563 [MSG_DIRTY_LRU
] = "dirty LRU page",
564 [MSG_CLEAN_LRU
] = "clean LRU page",
565 [MSG_TRUNCATED_LRU
] = "already truncated LRU page",
566 [MSG_BUDDY
] = "free buddy page",
567 [MSG_BUDDY_2ND
] = "free buddy page (2nd try)",
568 [MSG_UNKNOWN
] = "unknown page",
572 * XXX: It is possible that a page is isolated from LRU cache,
573 * and then kept in swap cache or failed to remove from page cache.
574 * The page count will stop it from being freed by unpoison.
575 * Stress tests should be aware of this memory leak problem.
577 static int delete_from_lru_cache(struct page
*p
)
579 if (!isolate_lru_page(p
)) {
581 * Clear sensible page flags, so that the buddy system won't
582 * complain when the page is unpoison-and-freed.
585 ClearPageUnevictable(p
);
587 * drop the page count elevated by isolate_lru_page()
589 page_cache_release(p
);
596 * Error hit kernel page.
597 * Do nothing, try to be lucky and not touch this instead. For a few cases we
598 * could be more sophisticated.
600 static int me_kernel(struct page
*p
, unsigned long pfn
)
606 * Page in unknown state. Do nothing.
608 static int me_unknown(struct page
*p
, unsigned long pfn
)
610 printk(KERN_ERR
"MCE %#lx: Unknown page state\n", pfn
);
615 * Clean (or cleaned) page cache page.
617 static int me_pagecache_clean(struct page
*p
, unsigned long pfn
)
621 struct address_space
*mapping
;
623 delete_from_lru_cache(p
);
626 * For anonymous pages we're done the only reference left
627 * should be the one m_f() holds.
633 * Now truncate the page in the page cache. This is really
634 * more like a "temporary hole punch"
635 * Don't do this for block devices when someone else
636 * has a reference, because it could be file system metadata
637 * and that's not safe to truncate.
639 mapping
= page_mapping(p
);
642 * Page has been teared down in the meanwhile
648 * Truncation is a bit tricky. Enable it per file system for now.
650 * Open: to take i_mutex or not for this? Right now we don't.
652 if (mapping
->a_ops
->error_remove_page
) {
653 err
= mapping
->a_ops
->error_remove_page(mapping
, p
);
655 printk(KERN_INFO
"MCE %#lx: Failed to punch page: %d\n",
657 } else if (page_has_private(p
) &&
658 !try_to_release_page(p
, GFP_NOIO
)) {
659 pr_info("MCE %#lx: failed to release buffers\n", pfn
);
665 * If the file system doesn't support it just invalidate
666 * This fails on dirty or anything with private pages
668 if (invalidate_inode_page(p
))
671 printk(KERN_INFO
"MCE %#lx: Failed to invalidate\n",
678 * Dirty pagecache page
679 * Issues: when the error hit a hole page the error is not properly
682 static int me_pagecache_dirty(struct page
*p
, unsigned long pfn
)
684 struct address_space
*mapping
= page_mapping(p
);
687 /* TBD: print more information about the file. */
690 * IO error will be reported by write(), fsync(), etc.
691 * who check the mapping.
692 * This way the application knows that something went
693 * wrong with its dirty file data.
695 * There's one open issue:
697 * The EIO will be only reported on the next IO
698 * operation and then cleared through the IO map.
699 * Normally Linux has two mechanisms to pass IO error
700 * first through the AS_EIO flag in the address space
701 * and then through the PageError flag in the page.
702 * Since we drop pages on memory failure handling the
703 * only mechanism open to use is through AS_AIO.
705 * This has the disadvantage that it gets cleared on
706 * the first operation that returns an error, while
707 * the PageError bit is more sticky and only cleared
708 * when the page is reread or dropped. If an
709 * application assumes it will always get error on
710 * fsync, but does other operations on the fd before
711 * and the page is dropped between then the error
712 * will not be properly reported.
714 * This can already happen even without hwpoisoned
715 * pages: first on metadata IO errors (which only
716 * report through AS_EIO) or when the page is dropped
719 * So right now we assume that the application DTRT on
720 * the first EIO, but we're not worse than other parts
723 mapping_set_error(mapping
, EIO
);
726 return me_pagecache_clean(p
, pfn
);
730 * Clean and dirty swap cache.
732 * Dirty swap cache page is tricky to handle. The page could live both in page
733 * cache and swap cache(ie. page is freshly swapped in). So it could be
734 * referenced concurrently by 2 types of PTEs:
735 * normal PTEs and swap PTEs. We try to handle them consistently by calling
736 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
738 * - clear dirty bit to prevent IO
740 * - but keep in the swap cache, so that when we return to it on
741 * a later page fault, we know the application is accessing
742 * corrupted data and shall be killed (we installed simple
743 * interception code in do_swap_page to catch it).
745 * Clean swap cache pages can be directly isolated. A later page fault will
746 * bring in the known good data from disk.
748 static int me_swapcache_dirty(struct page
*p
, unsigned long pfn
)
751 /* Trigger EIO in shmem: */
752 ClearPageUptodate(p
);
754 if (!delete_from_lru_cache(p
))
760 static int me_swapcache_clean(struct page
*p
, unsigned long pfn
)
762 delete_from_swap_cache(p
);
764 if (!delete_from_lru_cache(p
))
771 * Huge pages. Needs work.
773 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
774 * To narrow down kill region to one page, we need to break up pmd.
776 static int me_huge_page(struct page
*p
, unsigned long pfn
)
779 struct page
*hpage
= compound_head(p
);
781 * We can safely recover from error on free or reserved (i.e.
782 * not in-use) hugepage by dequeuing it from freelist.
783 * To check whether a hugepage is in-use or not, we can't use
784 * page->lru because it can be used in other hugepage operations,
785 * such as __unmap_hugepage_range() and gather_surplus_pages().
786 * So instead we use page_mapping() and PageAnon().
787 * We assume that this function is called with page lock held,
788 * so there is no race between isolation and mapping/unmapping.
790 if (!(page_mapping(hpage
) || PageAnon(hpage
))) {
791 res
= dequeue_hwpoisoned_huge_page(hpage
);
799 * Various page states we can handle.
801 * A page state is defined by its current page->flags bits.
802 * The table matches them in order and calls the right handler.
804 * This is quite tricky because we can access page at any time
805 * in its live cycle, so all accesses have to be extremely careful.
807 * This is not complete. More states could be added.
808 * For any missing state don't attempt recovery.
811 #define dirty (1UL << PG_dirty)
812 #define sc (1UL << PG_swapcache)
813 #define unevict (1UL << PG_unevictable)
814 #define mlock (1UL << PG_mlocked)
815 #define writeback (1UL << PG_writeback)
816 #define lru (1UL << PG_lru)
817 #define swapbacked (1UL << PG_swapbacked)
818 #define head (1UL << PG_head)
819 #define tail (1UL << PG_tail)
820 #define compound (1UL << PG_compound)
821 #define slab (1UL << PG_slab)
822 #define reserved (1UL << PG_reserved)
824 static struct page_state
{
827 enum action_page_type type
;
828 int (*action
)(struct page
*p
, unsigned long pfn
);
830 { reserved
, reserved
, MSG_KERNEL
, me_kernel
},
832 * free pages are specially detected outside this table:
833 * PG_buddy pages only make a small fraction of all free pages.
837 * Could in theory check if slab page is free or if we can drop
838 * currently unused objects without touching them. But just
839 * treat it as standard kernel for now.
841 { slab
, slab
, MSG_SLAB
, me_kernel
},
843 #ifdef CONFIG_PAGEFLAGS_EXTENDED
844 { head
, head
, MSG_HUGE
, me_huge_page
},
845 { tail
, tail
, MSG_HUGE
, me_huge_page
},
847 { compound
, compound
, MSG_HUGE
, me_huge_page
},
850 { sc
|dirty
, sc
|dirty
, MSG_DIRTY_SWAPCACHE
, me_swapcache_dirty
},
851 { sc
|dirty
, sc
, MSG_CLEAN_SWAPCACHE
, me_swapcache_clean
},
853 { mlock
|dirty
, mlock
|dirty
, MSG_DIRTY_MLOCKED_LRU
, me_pagecache_dirty
},
854 { mlock
|dirty
, mlock
, MSG_CLEAN_MLOCKED_LRU
, me_pagecache_clean
},
856 { unevict
|dirty
, unevict
|dirty
, MSG_DIRTY_UNEVICTABLE_LRU
, me_pagecache_dirty
},
857 { unevict
|dirty
, unevict
, MSG_CLEAN_UNEVICTABLE_LRU
, me_pagecache_clean
},
859 { lru
|dirty
, lru
|dirty
, MSG_DIRTY_LRU
, me_pagecache_dirty
},
860 { lru
|dirty
, lru
, MSG_CLEAN_LRU
, me_pagecache_clean
},
863 * Catchall entry: must be at end.
865 { 0, 0, MSG_UNKNOWN
, me_unknown
},
882 * "Dirty/Clean" indication is not 100% accurate due to the possibility of
883 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
885 static void action_result(unsigned long pfn
, enum action_page_type type
, int result
)
887 pr_err("MCE %#lx: recovery action for %s: %s\n",
888 pfn
, action_page_types
[type
], action_name
[result
]);
891 static int page_action(struct page_state
*ps
, struct page
*p
,
897 result
= ps
->action(p
, pfn
);
899 count
= page_count(p
) - 1;
900 if (ps
->action
== me_swapcache_dirty
&& result
== DELAYED
)
904 "MCE %#lx: %s still referenced by %d users\n",
905 pfn
, action_page_types
[ps
->type
], count
);
908 action_result(pfn
, ps
->type
, result
);
910 /* Could do more checks here if page looks ok */
912 * Could adjust zone counters here to correct for the missing page.
915 return (result
== RECOVERED
|| result
== DELAYED
) ? 0 : -EBUSY
;
919 * Do all that is necessary to remove user space mappings. Unmap
920 * the pages and send SIGBUS to the processes if the data was dirty.
922 static int hwpoison_user_mappings(struct page
*p
, unsigned long pfn
,
923 int trapno
, int flags
, struct page
**hpagep
)
925 enum ttu_flags ttu
= TTU_UNMAP
| TTU_IGNORE_MLOCK
| TTU_IGNORE_ACCESS
;
926 struct address_space
*mapping
;
929 int kill
= 1, forcekill
;
930 struct page
*hpage
= *hpagep
;
933 * Here we are interested only in user-mapped pages, so skip any
934 * other types of pages.
936 if (PageReserved(p
) || PageSlab(p
))
938 if (!(PageLRU(hpage
) || PageHuge(p
)))
942 * This check implies we don't kill processes if their pages
943 * are in the swap cache early. Those are always late kills.
945 if (!page_mapped(hpage
))
949 pr_err("MCE %#lx: can't handle KSM pages.\n", pfn
);
953 if (PageSwapCache(p
)) {
955 "MCE %#lx: keeping poisoned page in swap cache\n", pfn
);
956 ttu
|= TTU_IGNORE_HWPOISON
;
960 * Propagate the dirty bit from PTEs to struct page first, because we
961 * need this to decide if we should kill or just drop the page.
962 * XXX: the dirty test could be racy: set_page_dirty() may not always
963 * be called inside page lock (it's recommended but not enforced).
965 mapping
= page_mapping(hpage
);
966 if (!(flags
& MF_MUST_KILL
) && !PageDirty(hpage
) && mapping
&&
967 mapping_cap_writeback_dirty(mapping
)) {
968 if (page_mkclean(hpage
)) {
972 ttu
|= TTU_IGNORE_HWPOISON
;
974 "MCE %#lx: corrupted page was clean: dropped without side effects\n",
980 * First collect all the processes that have the page
981 * mapped in dirty form. This has to be done before try_to_unmap,
982 * because ttu takes the rmap data structures down.
984 * Error handling: We ignore errors here because
985 * there's nothing that can be done.
988 collect_procs(hpage
, &tokill
, flags
& MF_ACTION_REQUIRED
);
990 ret
= try_to_unmap(hpage
, ttu
);
991 if (ret
!= SWAP_SUCCESS
)
992 printk(KERN_ERR
"MCE %#lx: failed to unmap page (mapcount=%d)\n",
993 pfn
, page_mapcount(hpage
));
996 * Now that the dirty bit has been propagated to the
997 * struct page and all unmaps done we can decide if
998 * killing is needed or not. Only kill when the page
999 * was dirty or the process is not restartable,
1000 * otherwise the tokill list is merely
1001 * freed. When there was a problem unmapping earlier
1002 * use a more force-full uncatchable kill to prevent
1003 * any accesses to the poisoned memory.
1005 forcekill
= PageDirty(hpage
) || (flags
& MF_MUST_KILL
);
1006 kill_procs(&tokill
, forcekill
, trapno
,
1007 ret
!= SWAP_SUCCESS
, p
, pfn
, flags
);
1012 static void set_page_hwpoison_huge_page(struct page
*hpage
)
1015 int nr_pages
= 1 << compound_order(hpage
);
1016 for (i
= 0; i
< nr_pages
; i
++)
1017 SetPageHWPoison(hpage
+ i
);
1020 static void clear_page_hwpoison_huge_page(struct page
*hpage
)
1023 int nr_pages
= 1 << compound_order(hpage
);
1024 for (i
= 0; i
< nr_pages
; i
++)
1025 ClearPageHWPoison(hpage
+ i
);
1029 * memory_failure - Handle memory failure of a page.
1030 * @pfn: Page Number of the corrupted page
1031 * @trapno: Trap number reported in the signal to user space.
1032 * @flags: fine tune action taken
1034 * This function is called by the low level machine check code
1035 * of an architecture when it detects hardware memory corruption
1036 * of a page. It tries its best to recover, which includes
1037 * dropping pages, killing processes etc.
1039 * The function is primarily of use for corruptions that
1040 * happen outside the current execution context (e.g. when
1041 * detected by a background scrubber)
1043 * Must run in process context (e.g. a work queue) with interrupts
1044 * enabled and no spinlocks hold.
1046 int memory_failure(unsigned long pfn
, int trapno
, int flags
)
1048 struct page_state
*ps
;
1051 struct page
*orig_head
;
1053 unsigned int nr_pages
;
1054 unsigned long page_flags
;
1056 if (!sysctl_memory_failure_recovery
)
1057 panic("Memory failure from trap %d on page %lx", trapno
, pfn
);
1059 if (!pfn_valid(pfn
)) {
1061 "MCE %#lx: memory outside kernel control\n",
1066 p
= pfn_to_page(pfn
);
1067 orig_head
= hpage
= compound_head(p
);
1068 if (TestSetPageHWPoison(p
)) {
1069 printk(KERN_ERR
"MCE %#lx: already hardware poisoned\n", pfn
);
1074 * Currently errors on hugetlbfs pages are measured in hugepage units,
1075 * so nr_pages should be 1 << compound_order. OTOH when errors are on
1076 * transparent hugepages, they are supposed to be split and error
1077 * measurement is done in normal page units. So nr_pages should be one
1081 nr_pages
= 1 << compound_order(hpage
);
1082 else /* normal page or thp */
1084 atomic_long_add(nr_pages
, &num_poisoned_pages
);
1087 * We need/can do nothing about count=0 pages.
1088 * 1) it's a free page, and therefore in safe hand:
1089 * prep_new_page() will be the gate keeper.
1090 * 2) it's a free hugepage, which is also safe:
1091 * an affected hugepage will be dequeued from hugepage freelist,
1092 * so there's no concern about reusing it ever after.
1093 * 3) it's part of a non-compound high order page.
1094 * Implies some kernel user: cannot stop them from
1095 * R/W the page; let's pray that the page has been
1096 * used and will be freed some time later.
1097 * In fact it's dangerous to directly bump up page count from 0,
1098 * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
1100 if (!(flags
& MF_COUNT_INCREASED
) &&
1101 !get_page_unless_zero(hpage
)) {
1102 if (is_free_buddy_page(p
)) {
1103 action_result(pfn
, MSG_BUDDY
, DELAYED
);
1105 } else if (PageHuge(hpage
)) {
1107 * Check "filter hit" and "race with other subpage."
1110 if (PageHWPoison(hpage
)) {
1111 if ((hwpoison_filter(p
) && TestClearPageHWPoison(p
))
1112 || (p
!= hpage
&& TestSetPageHWPoison(hpage
))) {
1113 atomic_long_sub(nr_pages
, &num_poisoned_pages
);
1118 set_page_hwpoison_huge_page(hpage
);
1119 res
= dequeue_hwpoisoned_huge_page(hpage
);
1120 action_result(pfn
, MSG_FREE_HUGE
,
1121 res
? IGNORED
: DELAYED
);
1125 action_result(pfn
, MSG_KERNEL_HIGH_ORDER
, IGNORED
);
1130 if (!PageHuge(p
) && PageTransHuge(hpage
)) {
1131 if (!PageAnon(hpage
)) {
1132 pr_err("MCE: %#lx: non anonymous thp\n", pfn
);
1136 if (unlikely(split_huge_page(hpage
))) {
1137 pr_err("MCE: %#lx: thp split failed\n", pfn
);
1141 VM_BUG_ON_PAGE(!page_count(p
), p
);
1142 hpage
= compound_head(p
);
1146 * We ignore non-LRU pages for good reasons.
1147 * - PG_locked is only well defined for LRU pages and a few others
1148 * - to avoid races with __set_page_locked()
1149 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
1150 * The check (unnecessarily) ignores LRU pages being isolated and
1151 * walked by the page reclaim code, however that's not a big loss.
1158 * shake_page could have turned it free.
1160 if (is_free_buddy_page(p
)) {
1161 if (flags
& MF_COUNT_INCREASED
)
1162 action_result(pfn
, MSG_BUDDY
, DELAYED
);
1164 action_result(pfn
, MSG_BUDDY_2ND
,
1174 * The page could have changed compound pages during the locking.
1175 * If this happens just bail out.
1177 if (PageCompound(p
) && compound_head(p
) != orig_head
) {
1178 action_result(pfn
, MSG_DIFFERENT_COMPOUND
, IGNORED
);
1184 * We use page flags to determine what action should be taken, but
1185 * the flags can be modified by the error containment action. One
1186 * example is an mlocked page, where PG_mlocked is cleared by
1187 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
1188 * correctly, we save a copy of the page flags at this time.
1190 page_flags
= p
->flags
;
1193 * unpoison always clear PG_hwpoison inside page lock
1195 if (!PageHWPoison(p
)) {
1196 printk(KERN_ERR
"MCE %#lx: just unpoisoned\n", pfn
);
1197 atomic_long_sub(nr_pages
, &num_poisoned_pages
);
1202 if (hwpoison_filter(p
)) {
1203 if (TestClearPageHWPoison(p
))
1204 atomic_long_sub(nr_pages
, &num_poisoned_pages
);
1210 if (!PageHuge(p
) && !PageTransTail(p
) && !PageLRU(p
))
1211 goto identify_page_state
;
1214 * For error on the tail page, we should set PG_hwpoison
1215 * on the head page to show that the hugepage is hwpoisoned
1217 if (PageHuge(p
) && PageTail(p
) && TestSetPageHWPoison(hpage
)) {
1218 action_result(pfn
, MSG_POISONED_HUGE
, IGNORED
);
1224 * Set PG_hwpoison on all pages in an error hugepage,
1225 * because containment is done in hugepage unit for now.
1226 * Since we have done TestSetPageHWPoison() for the head page with
1227 * page lock held, we can safely set PG_hwpoison bits on tail pages.
1230 set_page_hwpoison_huge_page(hpage
);
1233 * It's very difficult to mess with pages currently under IO
1234 * and in many cases impossible, so we just avoid it here.
1236 wait_on_page_writeback(p
);
1239 * Now take care of user space mappings.
1240 * Abort on fail: __delete_from_page_cache() assumes unmapped page.
1242 * When the raw error page is thp tail page, hpage points to the raw
1243 * page after thp split.
1245 if (hwpoison_user_mappings(p
, pfn
, trapno
, flags
, &hpage
)
1247 action_result(pfn
, MSG_UNMAP_FAILED
, IGNORED
);
1253 * Torn down by someone else?
1255 if (PageLRU(p
) && !PageSwapCache(p
) && p
->mapping
== NULL
) {
1256 action_result(pfn
, MSG_TRUNCATED_LRU
, IGNORED
);
1261 identify_page_state
:
1264 * The first check uses the current page flags which may not have any
1265 * relevant information. The second check with the saved page flagss is
1266 * carried out only if the first check can't determine the page status.
1268 for (ps
= error_states
;; ps
++)
1269 if ((p
->flags
& ps
->mask
) == ps
->res
)
1272 page_flags
|= (p
->flags
& (1UL << PG_dirty
));
1275 for (ps
= error_states
;; ps
++)
1276 if ((page_flags
& ps
->mask
) == ps
->res
)
1278 res
= page_action(ps
, p
, pfn
);
1283 EXPORT_SYMBOL_GPL(memory_failure
);
1285 #define MEMORY_FAILURE_FIFO_ORDER 4
1286 #define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
1288 struct memory_failure_entry
{
1294 struct memory_failure_cpu
{
1295 DECLARE_KFIFO(fifo
, struct memory_failure_entry
,
1296 MEMORY_FAILURE_FIFO_SIZE
);
1298 struct work_struct work
;
1301 static DEFINE_PER_CPU(struct memory_failure_cpu
, memory_failure_cpu
);
1304 * memory_failure_queue - Schedule handling memory failure of a page.
1305 * @pfn: Page Number of the corrupted page
1306 * @trapno: Trap number reported in the signal to user space.
1307 * @flags: Flags for memory failure handling
1309 * This function is called by the low level hardware error handler
1310 * when it detects hardware memory corruption of a page. It schedules
1311 * the recovering of error page, including dropping pages, killing
1314 * The function is primarily of use for corruptions that
1315 * happen outside the current execution context (e.g. when
1316 * detected by a background scrubber)
1318 * Can run in IRQ context.
1320 void memory_failure_queue(unsigned long pfn
, int trapno
, int flags
)
1322 struct memory_failure_cpu
*mf_cpu
;
1323 unsigned long proc_flags
;
1324 struct memory_failure_entry entry
= {
1330 mf_cpu
= &get_cpu_var(memory_failure_cpu
);
1331 spin_lock_irqsave(&mf_cpu
->lock
, proc_flags
);
1332 if (kfifo_put(&mf_cpu
->fifo
, entry
))
1333 schedule_work_on(smp_processor_id(), &mf_cpu
->work
);
1335 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
1337 spin_unlock_irqrestore(&mf_cpu
->lock
, proc_flags
);
1338 put_cpu_var(memory_failure_cpu
);
1340 EXPORT_SYMBOL_GPL(memory_failure_queue
);
1342 static void memory_failure_work_func(struct work_struct
*work
)
1344 struct memory_failure_cpu
*mf_cpu
;
1345 struct memory_failure_entry entry
= { 0, };
1346 unsigned long proc_flags
;
1349 mf_cpu
= this_cpu_ptr(&memory_failure_cpu
);
1351 spin_lock_irqsave(&mf_cpu
->lock
, proc_flags
);
1352 gotten
= kfifo_get(&mf_cpu
->fifo
, &entry
);
1353 spin_unlock_irqrestore(&mf_cpu
->lock
, proc_flags
);
1356 if (entry
.flags
& MF_SOFT_OFFLINE
)
1357 soft_offline_page(pfn_to_page(entry
.pfn
), entry
.flags
);
1359 memory_failure(entry
.pfn
, entry
.trapno
, entry
.flags
);
1363 static int __init
memory_failure_init(void)
1365 struct memory_failure_cpu
*mf_cpu
;
1368 for_each_possible_cpu(cpu
) {
1369 mf_cpu
= &per_cpu(memory_failure_cpu
, cpu
);
1370 spin_lock_init(&mf_cpu
->lock
);
1371 INIT_KFIFO(mf_cpu
->fifo
);
1372 INIT_WORK(&mf_cpu
->work
, memory_failure_work_func
);
1377 core_initcall(memory_failure_init
);
1380 * unpoison_memory - Unpoison a previously poisoned page
1381 * @pfn: Page number of the to be unpoisoned page
1383 * Software-unpoison a page that has been poisoned by
1384 * memory_failure() earlier.
1386 * This is only done on the software-level, so it only works
1387 * for linux injected failures, not real hardware failures
1389 * Returns 0 for success, otherwise -errno.
1391 int unpoison_memory(unsigned long pfn
)
1396 unsigned int nr_pages
;
1398 if (!pfn_valid(pfn
))
1401 p
= pfn_to_page(pfn
);
1402 page
= compound_head(p
);
1404 if (!PageHWPoison(p
)) {
1405 pr_info("MCE: Page was already unpoisoned %#lx\n", pfn
);
1410 * unpoison_memory() can encounter thp only when the thp is being
1411 * worked by memory_failure() and the page lock is not held yet.
1412 * In such case, we yield to memory_failure() and make unpoison fail.
1414 if (!PageHuge(page
) && PageTransHuge(page
)) {
1415 pr_info("MCE: Memory failure is now running on %#lx\n", pfn
);
1419 nr_pages
= 1 << compound_order(page
);
1421 if (!get_page_unless_zero(page
)) {
1423 * Since HWPoisoned hugepage should have non-zero refcount,
1424 * race between memory failure and unpoison seems to happen.
1425 * In such case unpoison fails and memory failure runs
1428 if (PageHuge(page
)) {
1429 pr_info("MCE: Memory failure is now running on free hugepage %#lx\n", pfn
);
1432 if (TestClearPageHWPoison(p
))
1433 atomic_long_dec(&num_poisoned_pages
);
1434 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn
);
1440 * This test is racy because PG_hwpoison is set outside of page lock.
1441 * That's acceptable because that won't trigger kernel panic. Instead,
1442 * the PG_hwpoison page will be caught and isolated on the entrance to
1443 * the free buddy page pool.
1445 if (TestClearPageHWPoison(page
)) {
1446 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn
);
1447 atomic_long_sub(nr_pages
, &num_poisoned_pages
);
1450 clear_page_hwpoison_huge_page(page
);
1455 if (freeit
&& !(pfn
== my_zero_pfn(0) && page_count(p
) == 1))
1460 EXPORT_SYMBOL(unpoison_memory
);
1462 static struct page
*new_page(struct page
*p
, unsigned long private, int **x
)
1464 int nid
= page_to_nid(p
);
1466 return alloc_huge_page_node(page_hstate(compound_head(p
)),
1469 return alloc_pages_exact_node(nid
, GFP_HIGHUSER_MOVABLE
, 0);
1473 * Safely get reference count of an arbitrary page.
1474 * Returns 0 for a free page, -EIO for a zero refcount page
1475 * that is not free, and 1 for any other page type.
1476 * For 1 the page is returned with increased page count, otherwise not.
1478 static int __get_any_page(struct page
*p
, unsigned long pfn
, int flags
)
1482 if (flags
& MF_COUNT_INCREASED
)
1486 * When the target page is a free hugepage, just remove it
1487 * from free hugepage list.
1489 if (!get_page_unless_zero(compound_head(p
))) {
1491 pr_info("%s: %#lx free huge page\n", __func__
, pfn
);
1493 } else if (is_free_buddy_page(p
)) {
1494 pr_info("%s: %#lx free buddy page\n", __func__
, pfn
);
1497 pr_info("%s: %#lx: unknown zero refcount page type %lx\n",
1498 __func__
, pfn
, p
->flags
);
1502 /* Not a free page */
1508 static int get_any_page(struct page
*page
, unsigned long pfn
, int flags
)
1510 int ret
= __get_any_page(page
, pfn
, flags
);
1512 if (ret
== 1 && !PageHuge(page
) && !PageLRU(page
)) {
1517 shake_page(page
, 1);
1522 ret
= __get_any_page(page
, pfn
, 0);
1523 if (!PageLRU(page
)) {
1524 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
1532 static int soft_offline_huge_page(struct page
*page
, int flags
)
1535 unsigned long pfn
= page_to_pfn(page
);
1536 struct page
*hpage
= compound_head(page
);
1537 LIST_HEAD(pagelist
);
1540 * This double-check of PageHWPoison is to avoid the race with
1541 * memory_failure(). See also comment in __soft_offline_page().
1544 if (PageHWPoison(hpage
)) {
1547 pr_info("soft offline: %#lx hugepage already poisoned\n", pfn
);
1552 ret
= isolate_huge_page(hpage
, &pagelist
);
1555 * get_any_page() and isolate_huge_page() takes a refcount each,
1556 * so need to drop one here.
1560 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn
);
1564 ret
= migrate_pages(&pagelist
, new_page
, NULL
, MPOL_MF_MOVE_ALL
,
1565 MIGRATE_SYNC
, MR_MEMORY_FAILURE
);
1567 pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
1568 pfn
, ret
, page
->flags
);
1570 * We know that soft_offline_huge_page() tries to migrate
1571 * only one hugepage pointed to by hpage, so we need not
1572 * run through the pagelist here.
1574 putback_active_hugepage(hpage
);
1578 /* overcommit hugetlb page will be freed to buddy */
1579 if (PageHuge(page
)) {
1580 set_page_hwpoison_huge_page(hpage
);
1581 dequeue_hwpoisoned_huge_page(hpage
);
1582 atomic_long_add(1 << compound_order(hpage
),
1583 &num_poisoned_pages
);
1585 SetPageHWPoison(page
);
1586 atomic_long_inc(&num_poisoned_pages
);
1592 static int __soft_offline_page(struct page
*page
, int flags
)
1595 unsigned long pfn
= page_to_pfn(page
);
1598 * Check PageHWPoison again inside page lock because PageHWPoison
1599 * is set by memory_failure() outside page lock. Note that
1600 * memory_failure() also double-checks PageHWPoison inside page lock,
1601 * so there's no race between soft_offline_page() and memory_failure().
1604 wait_on_page_writeback(page
);
1605 if (PageHWPoison(page
)) {
1608 pr_info("soft offline: %#lx page already poisoned\n", pfn
);
1612 * Try to invalidate first. This should work for
1613 * non dirty unmapped page cache pages.
1615 ret
= invalidate_inode_page(page
);
1618 * RED-PEN would be better to keep it isolated here, but we
1619 * would need to fix isolation locking first.
1623 pr_info("soft_offline: %#lx: invalidated\n", pfn
);
1624 SetPageHWPoison(page
);
1625 atomic_long_inc(&num_poisoned_pages
);
1630 * Simple invalidation didn't work.
1631 * Try to migrate to a new page instead. migrate.c
1632 * handles a large number of cases for us.
1634 ret
= isolate_lru_page(page
);
1636 * Drop page reference which is came from get_any_page()
1637 * successful isolate_lru_page() already took another one.
1641 LIST_HEAD(pagelist
);
1642 inc_zone_page_state(page
, NR_ISOLATED_ANON
+
1643 page_is_file_cache(page
));
1644 list_add(&page
->lru
, &pagelist
);
1645 ret
= migrate_pages(&pagelist
, new_page
, NULL
, MPOL_MF_MOVE_ALL
,
1646 MIGRATE_SYNC
, MR_MEMORY_FAILURE
);
1648 if (!list_empty(&pagelist
)) {
1649 list_del(&page
->lru
);
1650 dec_zone_page_state(page
, NR_ISOLATED_ANON
+
1651 page_is_file_cache(page
));
1652 putback_lru_page(page
);
1655 pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
1656 pfn
, ret
, page
->flags
);
1661 * After page migration succeeds, the source page can
1662 * be trapped in pagevec and actual freeing is delayed.
1663 * Freeing code works differently based on PG_hwpoison,
1664 * so there's a race. We need to make sure that the
1665 * source page should be freed back to buddy before
1666 * setting PG_hwpoison.
1668 if (!is_free_buddy_page(page
))
1669 drain_all_pages(page_zone(page
));
1670 SetPageHWPoison(page
);
1671 if (!is_free_buddy_page(page
))
1672 pr_info("soft offline: %#lx: page leaked\n",
1674 atomic_long_inc(&num_poisoned_pages
);
1677 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
1678 pfn
, ret
, page_count(page
), page
->flags
);
1684 * soft_offline_page - Soft offline a page.
1685 * @page: page to offline
1686 * @flags: flags. Same as memory_failure().
1688 * Returns 0 on success, otherwise negated errno.
1690 * Soft offline a page, by migration or invalidation,
1691 * without killing anything. This is for the case when
1692 * a page is not corrupted yet (so it's still valid to access),
1693 * but has had a number of corrected errors and is better taken
1696 * The actual policy on when to do that is maintained by
1699 * This should never impact any application or cause data loss,
1700 * however it might take some time.
1702 * This is not a 100% solution for all memory, but tries to be
1703 * ``good enough'' for the majority of memory.
1705 int soft_offline_page(struct page
*page
, int flags
)
1708 unsigned long pfn
= page_to_pfn(page
);
1709 struct page
*hpage
= compound_head(page
);
1711 if (PageHWPoison(page
)) {
1712 pr_info("soft offline: %#lx page already poisoned\n", pfn
);
1715 if (!PageHuge(page
) && PageTransHuge(hpage
)) {
1716 if (PageAnon(hpage
) && unlikely(split_huge_page(hpage
))) {
1717 pr_info("soft offline: %#lx: failed to split THP\n",
1726 * Isolate the page, so that it doesn't get reallocated if it
1727 * was free. This flag should be kept set until the source page
1728 * is freed and PG_hwpoison on it is set.
1730 if (get_pageblock_migratetype(page
) != MIGRATE_ISOLATE
)
1731 set_migratetype_isolate(page
, true);
1733 ret
= get_any_page(page
, pfn
, flags
);
1735 if (ret
> 0) { /* for in-use pages */
1737 ret
= soft_offline_huge_page(page
, flags
);
1739 ret
= __soft_offline_page(page
, flags
);
1740 } else if (ret
== 0) { /* for free pages */
1741 if (PageHuge(page
)) {
1742 set_page_hwpoison_huge_page(hpage
);
1743 if (!dequeue_hwpoisoned_huge_page(hpage
))
1744 atomic_long_add(1 << compound_order(hpage
),
1745 &num_poisoned_pages
);
1747 if (!TestSetPageHWPoison(page
))
1748 atomic_long_inc(&num_poisoned_pages
);
1751 unset_migratetype_isolate(page
, MIGRATE_MOVABLE
);