mm, oom_reaper: do not attempt to reap a task more than twice
[deliverable/linux.git] / mm / oom_kill.c
1 /*
2 * linux/mm/oom_kill.c
3 *
4 * Copyright (C) 1998,2000 Rik van Riel
5 * Thanks go out to Claus Fischer for some serious inspiration and
6 * for goading me into coding this file...
7 * Copyright (C) 2010 Google, Inc.
8 * Rewritten by David Rientjes
9 *
10 * The routines in this file are used to kill a process when
11 * we're seriously out of memory. This gets called from __alloc_pages()
12 * in mm/page_alloc.c when we really run out of memory.
13 *
14 * Since we won't call these routines often (on a well-configured
15 * machine) this file will double as a 'coding guide' and a signpost
16 * for newbie kernel hackers. It features several pointers to major
17 * kernel subsystems and hints as to where to find out what things do.
18 */
19
20 #include <linux/oom.h>
21 #include <linux/mm.h>
22 #include <linux/err.h>
23 #include <linux/gfp.h>
24 #include <linux/sched.h>
25 #include <linux/swap.h>
26 #include <linux/timex.h>
27 #include <linux/jiffies.h>
28 #include <linux/cpuset.h>
29 #include <linux/export.h>
30 #include <linux/notifier.h>
31 #include <linux/memcontrol.h>
32 #include <linux/mempolicy.h>
33 #include <linux/security.h>
34 #include <linux/ptrace.h>
35 #include <linux/freezer.h>
36 #include <linux/ftrace.h>
37 #include <linux/ratelimit.h>
38 #include <linux/kthread.h>
39 #include <linux/init.h>
40
41 #include <asm/tlb.h>
42 #include "internal.h"
43
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/oom.h>
46
47 int sysctl_panic_on_oom;
48 int sysctl_oom_kill_allocating_task;
49 int sysctl_oom_dump_tasks = 1;
50
51 DEFINE_MUTEX(oom_lock);
52
53 #ifdef CONFIG_NUMA
54 /**
55 * has_intersects_mems_allowed() - check task eligiblity for kill
56 * @start: task struct of which task to consider
57 * @mask: nodemask passed to page allocator for mempolicy ooms
58 *
59 * Task eligibility is determined by whether or not a candidate task, @tsk,
60 * shares the same mempolicy nodes as current if it is bound by such a policy
61 * and whether or not it has the same set of allowed cpuset nodes.
62 */
63 static bool has_intersects_mems_allowed(struct task_struct *start,
64 const nodemask_t *mask)
65 {
66 struct task_struct *tsk;
67 bool ret = false;
68
69 rcu_read_lock();
70 for_each_thread(start, tsk) {
71 if (mask) {
72 /*
73 * If this is a mempolicy constrained oom, tsk's
74 * cpuset is irrelevant. Only return true if its
75 * mempolicy intersects current, otherwise it may be
76 * needlessly killed.
77 */
78 ret = mempolicy_nodemask_intersects(tsk, mask);
79 } else {
80 /*
81 * This is not a mempolicy constrained oom, so only
82 * check the mems of tsk's cpuset.
83 */
84 ret = cpuset_mems_allowed_intersects(current, tsk);
85 }
86 if (ret)
87 break;
88 }
89 rcu_read_unlock();
90
91 return ret;
92 }
93 #else
94 static bool has_intersects_mems_allowed(struct task_struct *tsk,
95 const nodemask_t *mask)
96 {
97 return true;
98 }
99 #endif /* CONFIG_NUMA */
100
101 /*
102 * The process p may have detached its own ->mm while exiting or through
103 * use_mm(), but one or more of its subthreads may still have a valid
104 * pointer. Return p, or any of its subthreads with a valid ->mm, with
105 * task_lock() held.
106 */
107 struct task_struct *find_lock_task_mm(struct task_struct *p)
108 {
109 struct task_struct *t;
110
111 rcu_read_lock();
112
113 for_each_thread(p, t) {
114 task_lock(t);
115 if (likely(t->mm))
116 goto found;
117 task_unlock(t);
118 }
119 t = NULL;
120 found:
121 rcu_read_unlock();
122
123 return t;
124 }
125
126 /*
127 * order == -1 means the oom kill is required by sysrq, otherwise only
128 * for display purposes.
129 */
130 static inline bool is_sysrq_oom(struct oom_control *oc)
131 {
132 return oc->order == -1;
133 }
134
135 /* return true if the task is not adequate as candidate victim task. */
136 static bool oom_unkillable_task(struct task_struct *p,
137 struct mem_cgroup *memcg, const nodemask_t *nodemask)
138 {
139 if (is_global_init(p))
140 return true;
141 if (p->flags & PF_KTHREAD)
142 return true;
143
144 /* When mem_cgroup_out_of_memory() and p is not member of the group */
145 if (memcg && !task_in_mem_cgroup(p, memcg))
146 return true;
147
148 /* p may not have freeable memory in nodemask */
149 if (!has_intersects_mems_allowed(p, nodemask))
150 return true;
151
152 return false;
153 }
154
155 /**
156 * oom_badness - heuristic function to determine which candidate task to kill
157 * @p: task struct of which task we should calculate
158 * @totalpages: total present RAM allowed for page allocation
159 *
160 * The heuristic for determining which task to kill is made to be as simple and
161 * predictable as possible. The goal is to return the highest value for the
162 * task consuming the most memory to avoid subsequent oom failures.
163 */
164 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
165 const nodemask_t *nodemask, unsigned long totalpages)
166 {
167 long points;
168 long adj;
169
170 if (oom_unkillable_task(p, memcg, nodemask))
171 return 0;
172
173 p = find_lock_task_mm(p);
174 if (!p)
175 return 0;
176
177 /*
178 * Do not even consider tasks which are explicitly marked oom
179 * unkillable or have been already oom reaped or the are in
180 * the middle of vfork
181 */
182 adj = (long)p->signal->oom_score_adj;
183 if (adj == OOM_SCORE_ADJ_MIN ||
184 test_bit(MMF_OOM_REAPED, &p->mm->flags) ||
185 in_vfork(p)) {
186 task_unlock(p);
187 return 0;
188 }
189
190 /*
191 * The baseline for the badness score is the proportion of RAM that each
192 * task's rss, pagetable and swap space use.
193 */
194 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
195 atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm);
196 task_unlock(p);
197
198 /*
199 * Root processes get 3% bonus, just like the __vm_enough_memory()
200 * implementation used by LSMs.
201 */
202 if (has_capability_noaudit(p, CAP_SYS_ADMIN))
203 points -= (points * 3) / 100;
204
205 /* Normalize to oom_score_adj units */
206 adj *= totalpages / 1000;
207 points += adj;
208
209 /*
210 * Never return 0 for an eligible task regardless of the root bonus and
211 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
212 */
213 return points > 0 ? points : 1;
214 }
215
216 /*
217 * Determine the type of allocation constraint.
218 */
219 #ifdef CONFIG_NUMA
220 static enum oom_constraint constrained_alloc(struct oom_control *oc,
221 unsigned long *totalpages)
222 {
223 struct zone *zone;
224 struct zoneref *z;
225 enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
226 bool cpuset_limited = false;
227 int nid;
228
229 /* Default to all available memory */
230 *totalpages = totalram_pages + total_swap_pages;
231
232 if (!oc->zonelist)
233 return CONSTRAINT_NONE;
234 /*
235 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
236 * to kill current.We have to random task kill in this case.
237 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
238 */
239 if (oc->gfp_mask & __GFP_THISNODE)
240 return CONSTRAINT_NONE;
241
242 /*
243 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
244 * the page allocator means a mempolicy is in effect. Cpuset policy
245 * is enforced in get_page_from_freelist().
246 */
247 if (oc->nodemask &&
248 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
249 *totalpages = total_swap_pages;
250 for_each_node_mask(nid, *oc->nodemask)
251 *totalpages += node_spanned_pages(nid);
252 return CONSTRAINT_MEMORY_POLICY;
253 }
254
255 /* Check this allocation failure is caused by cpuset's wall function */
256 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
257 high_zoneidx, oc->nodemask)
258 if (!cpuset_zone_allowed(zone, oc->gfp_mask))
259 cpuset_limited = true;
260
261 if (cpuset_limited) {
262 *totalpages = total_swap_pages;
263 for_each_node_mask(nid, cpuset_current_mems_allowed)
264 *totalpages += node_spanned_pages(nid);
265 return CONSTRAINT_CPUSET;
266 }
267 return CONSTRAINT_NONE;
268 }
269 #else
270 static enum oom_constraint constrained_alloc(struct oom_control *oc,
271 unsigned long *totalpages)
272 {
273 *totalpages = totalram_pages + total_swap_pages;
274 return CONSTRAINT_NONE;
275 }
276 #endif
277
278 enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
279 struct task_struct *task)
280 {
281 if (oom_unkillable_task(task, NULL, oc->nodemask))
282 return OOM_SCAN_CONTINUE;
283
284 /*
285 * This task already has access to memory reserves and is being killed.
286 * Don't allow any other task to have access to the reserves.
287 */
288 if (!is_sysrq_oom(oc) && atomic_read(&task->signal->oom_victims))
289 return OOM_SCAN_ABORT;
290
291 /*
292 * If task is allocating a lot of memory and has been marked to be
293 * killed first if it triggers an oom, then select it.
294 */
295 if (oom_task_origin(task))
296 return OOM_SCAN_SELECT;
297
298 return OOM_SCAN_OK;
299 }
300
301 /*
302 * Simple selection loop. We chose the process with the highest
303 * number of 'points'. Returns -1 on scan abort.
304 */
305 static struct task_struct *select_bad_process(struct oom_control *oc,
306 unsigned int *ppoints, unsigned long totalpages)
307 {
308 struct task_struct *p;
309 struct task_struct *chosen = NULL;
310 unsigned long chosen_points = 0;
311
312 rcu_read_lock();
313 for_each_process(p) {
314 unsigned int points;
315
316 switch (oom_scan_process_thread(oc, p)) {
317 case OOM_SCAN_SELECT:
318 chosen = p;
319 chosen_points = ULONG_MAX;
320 /* fall through */
321 case OOM_SCAN_CONTINUE:
322 continue;
323 case OOM_SCAN_ABORT:
324 rcu_read_unlock();
325 return (struct task_struct *)(-1UL);
326 case OOM_SCAN_OK:
327 break;
328 };
329 points = oom_badness(p, NULL, oc->nodemask, totalpages);
330 if (!points || points < chosen_points)
331 continue;
332
333 chosen = p;
334 chosen_points = points;
335 }
336 if (chosen)
337 get_task_struct(chosen);
338 rcu_read_unlock();
339
340 *ppoints = chosen_points * 1000 / totalpages;
341 return chosen;
342 }
343
344 /**
345 * dump_tasks - dump current memory state of all system tasks
346 * @memcg: current's memory controller, if constrained
347 * @nodemask: nodemask passed to page allocator for mempolicy ooms
348 *
349 * Dumps the current memory state of all eligible tasks. Tasks not in the same
350 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
351 * are not shown.
352 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
353 * swapents, oom_score_adj value, and name.
354 */
355 static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
356 {
357 struct task_struct *p;
358 struct task_struct *task;
359
360 pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n");
361 rcu_read_lock();
362 for_each_process(p) {
363 if (oom_unkillable_task(p, memcg, nodemask))
364 continue;
365
366 task = find_lock_task_mm(p);
367 if (!task) {
368 /*
369 * This is a kthread or all of p's threads have already
370 * detached their mm's. There's no need to report
371 * them; they can't be oom killed anyway.
372 */
373 continue;
374 }
375
376 pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n",
377 task->pid, from_kuid(&init_user_ns, task_uid(task)),
378 task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
379 atomic_long_read(&task->mm->nr_ptes),
380 mm_nr_pmds(task->mm),
381 get_mm_counter(task->mm, MM_SWAPENTS),
382 task->signal->oom_score_adj, task->comm);
383 task_unlock(task);
384 }
385 rcu_read_unlock();
386 }
387
388 static void dump_header(struct oom_control *oc, struct task_struct *p)
389 {
390 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
391 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
392 current->signal->oom_score_adj);
393
394 cpuset_print_current_mems_allowed();
395 dump_stack();
396 if (oc->memcg)
397 mem_cgroup_print_oom_info(oc->memcg, p);
398 else
399 show_mem(SHOW_MEM_FILTER_NODES);
400 if (sysctl_oom_dump_tasks)
401 dump_tasks(oc->memcg, oc->nodemask);
402 }
403
404 /*
405 * Number of OOM victims in flight
406 */
407 static atomic_t oom_victims = ATOMIC_INIT(0);
408 static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
409
410 bool oom_killer_disabled __read_mostly;
411
412 #define K(x) ((x) << (PAGE_SHIFT-10))
413
414 /*
415 * task->mm can be NULL if the task is the exited group leader. So to
416 * determine whether the task is using a particular mm, we examine all the
417 * task's threads: if one of those is using this mm then this task was also
418 * using it.
419 */
420 bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
421 {
422 struct task_struct *t;
423
424 for_each_thread(p, t) {
425 struct mm_struct *t_mm = READ_ONCE(t->mm);
426 if (t_mm)
427 return t_mm == mm;
428 }
429 return false;
430 }
431
432
433 #ifdef CONFIG_MMU
434 /*
435 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
436 * victim (if that is possible) to help the OOM killer to move on.
437 */
438 static struct task_struct *oom_reaper_th;
439 static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
440 static struct task_struct *oom_reaper_list;
441 static DEFINE_SPINLOCK(oom_reaper_lock);
442
443 static bool __oom_reap_task(struct task_struct *tsk)
444 {
445 struct mmu_gather tlb;
446 struct vm_area_struct *vma;
447 struct mm_struct *mm = NULL;
448 struct task_struct *p;
449 struct zap_details details = {.check_swap_entries = true,
450 .ignore_dirty = true};
451 bool ret = true;
452
453 /*
454 * We have to make sure to not race with the victim exit path
455 * and cause premature new oom victim selection:
456 * __oom_reap_task exit_mm
457 * mmget_not_zero
458 * mmput
459 * atomic_dec_and_test
460 * exit_oom_victim
461 * [...]
462 * out_of_memory
463 * select_bad_process
464 * # no TIF_MEMDIE task selects new victim
465 * unmap_page_range # frees some memory
466 */
467 mutex_lock(&oom_lock);
468
469 /*
470 * Make sure we find the associated mm_struct even when the particular
471 * thread has already terminated and cleared its mm.
472 * We might have race with exit path so consider our work done if there
473 * is no mm.
474 */
475 p = find_lock_task_mm(tsk);
476 if (!p)
477 goto unlock_oom;
478 mm = p->mm;
479 atomic_inc(&mm->mm_count);
480 task_unlock(p);
481
482 if (!down_read_trylock(&mm->mmap_sem)) {
483 ret = false;
484 goto mm_drop;
485 }
486
487 /*
488 * increase mm_users only after we know we will reap something so
489 * that the mmput_async is called only when we have reaped something
490 * and delayed __mmput doesn't matter that much
491 */
492 if (!mmget_not_zero(mm)) {
493 up_read(&mm->mmap_sem);
494 goto mm_drop;
495 }
496
497 tlb_gather_mmu(&tlb, mm, 0, -1);
498 for (vma = mm->mmap ; vma; vma = vma->vm_next) {
499 if (is_vm_hugetlb_page(vma))
500 continue;
501
502 /*
503 * mlocked VMAs require explicit munlocking before unmap.
504 * Let's keep it simple here and skip such VMAs.
505 */
506 if (vma->vm_flags & VM_LOCKED)
507 continue;
508
509 /*
510 * Only anonymous pages have a good chance to be dropped
511 * without additional steps which we cannot afford as we
512 * are OOM already.
513 *
514 * We do not even care about fs backed pages because all
515 * which are reclaimable have already been reclaimed and
516 * we do not want to block exit_mmap by keeping mm ref
517 * count elevated without a good reason.
518 */
519 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
520 unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
521 &details);
522 }
523 tlb_finish_mmu(&tlb, 0, -1);
524 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
525 task_pid_nr(tsk), tsk->comm,
526 K(get_mm_counter(mm, MM_ANONPAGES)),
527 K(get_mm_counter(mm, MM_FILEPAGES)),
528 K(get_mm_counter(mm, MM_SHMEMPAGES)));
529 up_read(&mm->mmap_sem);
530
531 /*
532 * This task can be safely ignored because we cannot do much more
533 * to release its memory.
534 */
535 set_bit(MMF_OOM_REAPED, &mm->flags);
536 /*
537 * Drop our reference but make sure the mmput slow path is called from a
538 * different context because we shouldn't risk we get stuck there and
539 * put the oom_reaper out of the way.
540 */
541 mmput_async(mm);
542 mm_drop:
543 mmdrop(mm);
544 unlock_oom:
545 mutex_unlock(&oom_lock);
546 return ret;
547 }
548
549 #define MAX_OOM_REAP_RETRIES 10
550 static void oom_reap_task(struct task_struct *tsk)
551 {
552 int attempts = 0;
553
554 /* Retry the down_read_trylock(mmap_sem) a few times */
555 while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task(tsk))
556 schedule_timeout_idle(HZ/10);
557
558 if (attempts > MAX_OOM_REAP_RETRIES) {
559 struct task_struct *p;
560
561 pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
562 task_pid_nr(tsk), tsk->comm);
563
564 /*
565 * If we've already tried to reap this task in the past and
566 * failed it probably doesn't make much sense to try yet again
567 * so hide the mm from the oom killer so that it can move on
568 * to another task with a different mm struct.
569 */
570 p = find_lock_task_mm(tsk);
571 if (p) {
572 if (test_and_set_bit(MMF_OOM_NOT_REAPABLE, &p->mm->flags)) {
573 pr_info("oom_reaper: giving up pid:%d (%s)\n",
574 task_pid_nr(tsk), tsk->comm);
575 set_bit(MMF_OOM_REAPED, &p->mm->flags);
576 }
577 task_unlock(p);
578 }
579
580 debug_show_all_locks();
581 }
582
583 /*
584 * Clear TIF_MEMDIE because the task shouldn't be sitting on a
585 * reasonably reclaimable memory anymore or it is not a good candidate
586 * for the oom victim right now because it cannot release its memory
587 * itself nor by the oom reaper.
588 */
589 tsk->oom_reaper_list = NULL;
590 exit_oom_victim(tsk);
591
592 /* Drop a reference taken by wake_oom_reaper */
593 put_task_struct(tsk);
594 }
595
596 static int oom_reaper(void *unused)
597 {
598 set_freezable();
599
600 while (true) {
601 struct task_struct *tsk = NULL;
602
603 wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
604 spin_lock(&oom_reaper_lock);
605 if (oom_reaper_list != NULL) {
606 tsk = oom_reaper_list;
607 oom_reaper_list = tsk->oom_reaper_list;
608 }
609 spin_unlock(&oom_reaper_lock);
610
611 if (tsk)
612 oom_reap_task(tsk);
613 }
614
615 return 0;
616 }
617
618 void wake_oom_reaper(struct task_struct *tsk)
619 {
620 if (!oom_reaper_th)
621 return;
622
623 /* tsk is already queued? */
624 if (tsk == oom_reaper_list || tsk->oom_reaper_list)
625 return;
626
627 get_task_struct(tsk);
628
629 spin_lock(&oom_reaper_lock);
630 tsk->oom_reaper_list = oom_reaper_list;
631 oom_reaper_list = tsk;
632 spin_unlock(&oom_reaper_lock);
633 wake_up(&oom_reaper_wait);
634 }
635
636 static int __init oom_init(void)
637 {
638 oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
639 if (IS_ERR(oom_reaper_th)) {
640 pr_err("Unable to start OOM reaper %ld. Continuing regardless\n",
641 PTR_ERR(oom_reaper_th));
642 oom_reaper_th = NULL;
643 }
644 return 0;
645 }
646 subsys_initcall(oom_init)
647 #endif
648
649 /**
650 * mark_oom_victim - mark the given task as OOM victim
651 * @tsk: task to mark
652 *
653 * Has to be called with oom_lock held and never after
654 * oom has been disabled already.
655 */
656 void mark_oom_victim(struct task_struct *tsk)
657 {
658 WARN_ON(oom_killer_disabled);
659 /* OOM killer might race with memcg OOM */
660 if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
661 return;
662 atomic_inc(&tsk->signal->oom_victims);
663 /*
664 * Make sure that the task is woken up from uninterruptible sleep
665 * if it is frozen because OOM killer wouldn't be able to free
666 * any memory and livelock. freezing_slow_path will tell the freezer
667 * that TIF_MEMDIE tasks should be ignored.
668 */
669 __thaw_task(tsk);
670 atomic_inc(&oom_victims);
671 }
672
673 /**
674 * exit_oom_victim - note the exit of an OOM victim
675 */
676 void exit_oom_victim(struct task_struct *tsk)
677 {
678 if (!test_and_clear_tsk_thread_flag(tsk, TIF_MEMDIE))
679 return;
680 atomic_dec(&tsk->signal->oom_victims);
681
682 if (!atomic_dec_return(&oom_victims))
683 wake_up_all(&oom_victims_wait);
684 }
685
686 /**
687 * oom_killer_disable - disable OOM killer
688 *
689 * Forces all page allocations to fail rather than trigger OOM killer.
690 * Will block and wait until all OOM victims are killed.
691 *
692 * The function cannot be called when there are runnable user tasks because
693 * the userspace would see unexpected allocation failures as a result. Any
694 * new usage of this function should be consulted with MM people.
695 *
696 * Returns true if successful and false if the OOM killer cannot be
697 * disabled.
698 */
699 bool oom_killer_disable(void)
700 {
701 /*
702 * Make sure to not race with an ongoing OOM killer. Check that the
703 * current is not killed (possibly due to sharing the victim's memory).
704 */
705 if (mutex_lock_killable(&oom_lock))
706 return false;
707 oom_killer_disabled = true;
708 mutex_unlock(&oom_lock);
709
710 wait_event(oom_victims_wait, !atomic_read(&oom_victims));
711
712 return true;
713 }
714
715 /**
716 * oom_killer_enable - enable OOM killer
717 */
718 void oom_killer_enable(void)
719 {
720 oom_killer_disabled = false;
721 }
722
723 static inline bool __task_will_free_mem(struct task_struct *task)
724 {
725 struct signal_struct *sig = task->signal;
726
727 /*
728 * A coredumping process may sleep for an extended period in exit_mm(),
729 * so the oom killer cannot assume that the process will promptly exit
730 * and release memory.
731 */
732 if (sig->flags & SIGNAL_GROUP_COREDUMP)
733 return false;
734
735 if (sig->flags & SIGNAL_GROUP_EXIT)
736 return true;
737
738 if (thread_group_empty(task) && (task->flags & PF_EXITING))
739 return true;
740
741 return false;
742 }
743
744 /*
745 * Checks whether the given task is dying or exiting and likely to
746 * release its address space. This means that all threads and processes
747 * sharing the same mm have to be killed or exiting.
748 */
749 bool task_will_free_mem(struct task_struct *task)
750 {
751 struct mm_struct *mm;
752 struct task_struct *p;
753 bool ret;
754
755 if (!__task_will_free_mem(task))
756 return false;
757
758 /*
759 * If the process has passed exit_mm we have to skip it because
760 * we have lost a link to other tasks sharing this mm, we do not
761 * have anything to reap and the task might then get stuck waiting
762 * for parent as zombie and we do not want it to hold TIF_MEMDIE
763 */
764 p = find_lock_task_mm(task);
765 if (!p)
766 return false;
767
768 mm = p->mm;
769
770 /*
771 * This task has already been drained by the oom reaper so there are
772 * only small chances it will free some more
773 */
774 if (test_bit(MMF_OOM_REAPED, &mm->flags)) {
775 task_unlock(p);
776 return false;
777 }
778
779 if (atomic_read(&mm->mm_users) <= 1) {
780 task_unlock(p);
781 return true;
782 }
783
784 /* pin the mm to not get freed and reused */
785 atomic_inc(&mm->mm_count);
786 task_unlock(p);
787
788 /*
789 * This is really pessimistic but we do not have any reliable way
790 * to check that external processes share with our mm
791 */
792 rcu_read_lock();
793 for_each_process(p) {
794 if (!process_shares_mm(p, mm))
795 continue;
796 if (same_thread_group(task, p))
797 continue;
798 ret = __task_will_free_mem(p);
799 if (!ret)
800 break;
801 }
802 rcu_read_unlock();
803 mmdrop(mm);
804
805 return ret;
806 }
807
808 /*
809 * Must be called while holding a reference to p, which will be released upon
810 * returning.
811 */
812 void oom_kill_process(struct oom_control *oc, struct task_struct *p,
813 unsigned int points, unsigned long totalpages,
814 const char *message)
815 {
816 struct task_struct *victim = p;
817 struct task_struct *child;
818 struct task_struct *t;
819 struct mm_struct *mm;
820 unsigned int victim_points = 0;
821 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
822 DEFAULT_RATELIMIT_BURST);
823 bool can_oom_reap = true;
824
825 /*
826 * If the task is already exiting, don't alarm the sysadmin or kill
827 * its children or threads, just set TIF_MEMDIE so it can die quickly
828 */
829 if (task_will_free_mem(p)) {
830 mark_oom_victim(p);
831 wake_oom_reaper(p);
832 put_task_struct(p);
833 return;
834 }
835
836 if (__ratelimit(&oom_rs))
837 dump_header(oc, p);
838
839 pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
840 message, task_pid_nr(p), p->comm, points);
841
842 /*
843 * If any of p's children has a different mm and is eligible for kill,
844 * the one with the highest oom_badness() score is sacrificed for its
845 * parent. This attempts to lose the minimal amount of work done while
846 * still freeing memory.
847 */
848 read_lock(&tasklist_lock);
849 for_each_thread(p, t) {
850 list_for_each_entry(child, &t->children, sibling) {
851 unsigned int child_points;
852
853 if (process_shares_mm(child, p->mm))
854 continue;
855 /*
856 * oom_badness() returns 0 if the thread is unkillable
857 */
858 child_points = oom_badness(child,
859 oc->memcg, oc->nodemask, totalpages);
860 if (child_points > victim_points) {
861 put_task_struct(victim);
862 victim = child;
863 victim_points = child_points;
864 get_task_struct(victim);
865 }
866 }
867 }
868 read_unlock(&tasklist_lock);
869
870 p = find_lock_task_mm(victim);
871 if (!p) {
872 put_task_struct(victim);
873 return;
874 } else if (victim != p) {
875 get_task_struct(p);
876 put_task_struct(victim);
877 victim = p;
878 }
879
880 /* Get a reference to safely compare mm after task_unlock(victim) */
881 mm = victim->mm;
882 atomic_inc(&mm->mm_count);
883 /*
884 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent
885 * the OOM victim from depleting the memory reserves from the user
886 * space under its control.
887 */
888 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
889 mark_oom_victim(victim);
890 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
891 task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
892 K(get_mm_counter(victim->mm, MM_ANONPAGES)),
893 K(get_mm_counter(victim->mm, MM_FILEPAGES)),
894 K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
895 task_unlock(victim);
896
897 /*
898 * Kill all user processes sharing victim->mm in other thread groups, if
899 * any. They don't get access to memory reserves, though, to avoid
900 * depletion of all memory. This prevents mm->mmap_sem livelock when an
901 * oom killed thread cannot exit because it requires the semaphore and
902 * its contended by another thread trying to allocate memory itself.
903 * That thread will now get access to memory reserves since it has a
904 * pending fatal signal.
905 */
906 rcu_read_lock();
907 for_each_process(p) {
908 if (!process_shares_mm(p, mm))
909 continue;
910 if (same_thread_group(p, victim))
911 continue;
912 if (unlikely(p->flags & PF_KTHREAD) || is_global_init(p)) {
913 /*
914 * We cannot use oom_reaper for the mm shared by this
915 * process because it wouldn't get killed and so the
916 * memory might be still used.
917 */
918 can_oom_reap = false;
919 continue;
920 }
921 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
922 }
923 rcu_read_unlock();
924
925 if (can_oom_reap)
926 wake_oom_reaper(victim);
927
928 mmdrop(mm);
929 put_task_struct(victim);
930 }
931 #undef K
932
933 /*
934 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
935 */
936 void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint)
937 {
938 if (likely(!sysctl_panic_on_oom))
939 return;
940 if (sysctl_panic_on_oom != 2) {
941 /*
942 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
943 * does not panic for cpuset, mempolicy, or memcg allocation
944 * failures.
945 */
946 if (constraint != CONSTRAINT_NONE)
947 return;
948 }
949 /* Do not panic for oom kills triggered by sysrq */
950 if (is_sysrq_oom(oc))
951 return;
952 dump_header(oc, NULL);
953 panic("Out of memory: %s panic_on_oom is enabled\n",
954 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
955 }
956
957 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
958
959 int register_oom_notifier(struct notifier_block *nb)
960 {
961 return blocking_notifier_chain_register(&oom_notify_list, nb);
962 }
963 EXPORT_SYMBOL_GPL(register_oom_notifier);
964
965 int unregister_oom_notifier(struct notifier_block *nb)
966 {
967 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
968 }
969 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
970
971 /**
972 * out_of_memory - kill the "best" process when we run out of memory
973 * @oc: pointer to struct oom_control
974 *
975 * If we run out of memory, we have the choice between either
976 * killing a random task (bad), letting the system crash (worse)
977 * OR try to be smart about which process to kill. Note that we
978 * don't have to be perfect here, we just have to be good.
979 */
980 bool out_of_memory(struct oom_control *oc)
981 {
982 struct task_struct *p;
983 unsigned long totalpages;
984 unsigned long freed = 0;
985 unsigned int uninitialized_var(points);
986 enum oom_constraint constraint = CONSTRAINT_NONE;
987
988 if (oom_killer_disabled)
989 return false;
990
991 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
992 if (freed > 0)
993 /* Got some memory back in the last second. */
994 return true;
995
996 /*
997 * If current has a pending SIGKILL or is exiting, then automatically
998 * select it. The goal is to allow it to allocate so that it may
999 * quickly exit and free its memory.
1000 *
1001 * But don't select if current has already released its mm and cleared
1002 * TIF_MEMDIE flag at exit_mm(), otherwise an OOM livelock may occur.
1003 */
1004 if (current->mm && task_will_free_mem(current)) {
1005 mark_oom_victim(current);
1006 wake_oom_reaper(current);
1007 return true;
1008 }
1009
1010 /*
1011 * The OOM killer does not compensate for IO-less reclaim.
1012 * pagefault_out_of_memory lost its gfp context so we have to
1013 * make sure exclude 0 mask - all other users should have at least
1014 * ___GFP_DIRECT_RECLAIM to get here.
1015 */
1016 if (oc->gfp_mask && !(oc->gfp_mask & (__GFP_FS|__GFP_NOFAIL)))
1017 return true;
1018
1019 /*
1020 * Check if there were limitations on the allocation (only relevant for
1021 * NUMA) that may require different handling.
1022 */
1023 constraint = constrained_alloc(oc, &totalpages);
1024 if (constraint != CONSTRAINT_MEMORY_POLICY)
1025 oc->nodemask = NULL;
1026 check_panic_on_oom(oc, constraint);
1027
1028 if (sysctl_oom_kill_allocating_task && current->mm &&
1029 !oom_unkillable_task(current, NULL, oc->nodemask) &&
1030 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1031 get_task_struct(current);
1032 oom_kill_process(oc, current, 0, totalpages,
1033 "Out of memory (oom_kill_allocating_task)");
1034 return true;
1035 }
1036
1037 p = select_bad_process(oc, &points, totalpages);
1038 /* Found nothing?!?! Either we hang forever, or we panic. */
1039 if (!p && !is_sysrq_oom(oc)) {
1040 dump_header(oc, NULL);
1041 panic("Out of memory and no killable processes...\n");
1042 }
1043 if (p && p != (void *)-1UL) {
1044 oom_kill_process(oc, p, points, totalpages, "Out of memory");
1045 /*
1046 * Give the killed process a good chance to exit before trying
1047 * to allocate memory again.
1048 */
1049 schedule_timeout_killable(1);
1050 }
1051 return true;
1052 }
1053
1054 /*
1055 * The pagefault handler calls here because it is out of memory, so kill a
1056 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
1057 * killing is already in progress so do nothing.
1058 */
1059 void pagefault_out_of_memory(void)
1060 {
1061 struct oom_control oc = {
1062 .zonelist = NULL,
1063 .nodemask = NULL,
1064 .memcg = NULL,
1065 .gfp_mask = 0,
1066 .order = 0,
1067 };
1068
1069 if (mem_cgroup_oom_synchronize(true))
1070 return;
1071
1072 if (!mutex_trylock(&oom_lock))
1073 return;
1074
1075 if (!out_of_memory(&oc)) {
1076 /*
1077 * There shouldn't be any user tasks runnable while the
1078 * OOM killer is disabled, so the current task has to
1079 * be a racing OOM victim for which oom_killer_disable()
1080 * is waiting for.
1081 */
1082 WARN_ON(test_thread_flag(TIF_MEMDIE));
1083 }
1084
1085 mutex_unlock(&oom_lock);
1086 }
This page took 0.051755 seconds and 5 git commands to generate.