Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/oom_kill.c | |
3 | * | |
4 | * Copyright (C) 1998,2000 Rik van Riel | |
5 | * Thanks go out to Claus Fischer for some serious inspiration and | |
6 | * for goading me into coding this file... | |
a63d83f4 DR |
7 | * Copyright (C) 2010 Google, Inc. |
8 | * Rewritten by David Rientjes | |
1da177e4 LT |
9 | * |
10 | * The routines in this file are used to kill a process when | |
a49335cc PJ |
11 | * we're seriously out of memory. This gets called from __alloc_pages() |
12 | * in mm/page_alloc.c when we really run out of memory. | |
1da177e4 LT |
13 | * |
14 | * Since we won't call these routines often (on a well-configured | |
15 | * machine) this file will double as a 'coding guide' and a signpost | |
16 | * for newbie kernel hackers. It features several pointers to major | |
17 | * kernel subsystems and hints as to where to find out what things do. | |
18 | */ | |
19 | ||
8ac773b4 | 20 | #include <linux/oom.h> |
1da177e4 | 21 | #include <linux/mm.h> |
4e950f6f | 22 | #include <linux/err.h> |
5a0e3ad6 | 23 | #include <linux/gfp.h> |
1da177e4 LT |
24 | #include <linux/sched.h> |
25 | #include <linux/swap.h> | |
26 | #include <linux/timex.h> | |
27 | #include <linux/jiffies.h> | |
ef08e3b4 | 28 | #include <linux/cpuset.h> |
b95f1b31 | 29 | #include <linux/export.h> |
8bc719d3 | 30 | #include <linux/notifier.h> |
c7ba5c9e | 31 | #include <linux/memcontrol.h> |
6f48d0eb | 32 | #include <linux/mempolicy.h> |
5cd9c58f | 33 | #include <linux/security.h> |
edd45544 | 34 | #include <linux/ptrace.h> |
f660daac | 35 | #include <linux/freezer.h> |
43d2b113 | 36 | #include <linux/ftrace.h> |
dc3f21ea | 37 | #include <linux/ratelimit.h> |
aac45363 MH |
38 | #include <linux/kthread.h> |
39 | #include <linux/init.h> | |
40 | ||
41 | #include <asm/tlb.h> | |
42 | #include "internal.h" | |
43d2b113 KH |
43 | |
44 | #define CREATE_TRACE_POINTS | |
45 | #include <trace/events/oom.h> | |
1da177e4 | 46 | |
fadd8fbd | 47 | int sysctl_panic_on_oom; |
fe071d7e | 48 | int sysctl_oom_kill_allocating_task; |
ad915c43 | 49 | int sysctl_oom_dump_tasks = 1; |
dc56401f JW |
50 | |
51 | DEFINE_MUTEX(oom_lock); | |
1da177e4 | 52 | |
6f48d0eb DR |
53 | #ifdef CONFIG_NUMA |
54 | /** | |
55 | * has_intersects_mems_allowed() - check task eligiblity for kill | |
ad962441 | 56 | * @start: task struct of which task to consider |
6f48d0eb DR |
57 | * @mask: nodemask passed to page allocator for mempolicy ooms |
58 | * | |
59 | * Task eligibility is determined by whether or not a candidate task, @tsk, | |
60 | * shares the same mempolicy nodes as current if it is bound by such a policy | |
61 | * and whether or not it has the same set of allowed cpuset nodes. | |
495789a5 | 62 | */ |
ad962441 | 63 | static bool has_intersects_mems_allowed(struct task_struct *start, |
6f48d0eb | 64 | const nodemask_t *mask) |
495789a5 | 65 | { |
ad962441 ON |
66 | struct task_struct *tsk; |
67 | bool ret = false; | |
495789a5 | 68 | |
ad962441 | 69 | rcu_read_lock(); |
1da4db0c | 70 | for_each_thread(start, tsk) { |
6f48d0eb DR |
71 | if (mask) { |
72 | /* | |
73 | * If this is a mempolicy constrained oom, tsk's | |
74 | * cpuset is irrelevant. Only return true if its | |
75 | * mempolicy intersects current, otherwise it may be | |
76 | * needlessly killed. | |
77 | */ | |
ad962441 | 78 | ret = mempolicy_nodemask_intersects(tsk, mask); |
6f48d0eb DR |
79 | } else { |
80 | /* | |
81 | * This is not a mempolicy constrained oom, so only | |
82 | * check the mems of tsk's cpuset. | |
83 | */ | |
ad962441 | 84 | ret = cpuset_mems_allowed_intersects(current, tsk); |
6f48d0eb | 85 | } |
ad962441 ON |
86 | if (ret) |
87 | break; | |
1da4db0c | 88 | } |
ad962441 | 89 | rcu_read_unlock(); |
df1090a8 | 90 | |
ad962441 | 91 | return ret; |
6f48d0eb DR |
92 | } |
93 | #else | |
94 | static bool has_intersects_mems_allowed(struct task_struct *tsk, | |
95 | const nodemask_t *mask) | |
96 | { | |
97 | return true; | |
495789a5 | 98 | } |
6f48d0eb | 99 | #endif /* CONFIG_NUMA */ |
495789a5 | 100 | |
6f48d0eb DR |
101 | /* |
102 | * The process p may have detached its own ->mm while exiting or through | |
103 | * use_mm(), but one or more of its subthreads may still have a valid | |
104 | * pointer. Return p, or any of its subthreads with a valid ->mm, with | |
105 | * task_lock() held. | |
106 | */ | |
158e0a2d | 107 | struct task_struct *find_lock_task_mm(struct task_struct *p) |
dd8e8f40 | 108 | { |
1da4db0c | 109 | struct task_struct *t; |
dd8e8f40 | 110 | |
4d4048be ON |
111 | rcu_read_lock(); |
112 | ||
1da4db0c | 113 | for_each_thread(p, t) { |
dd8e8f40 ON |
114 | task_lock(t); |
115 | if (likely(t->mm)) | |
4d4048be | 116 | goto found; |
dd8e8f40 | 117 | task_unlock(t); |
1da4db0c | 118 | } |
4d4048be ON |
119 | t = NULL; |
120 | found: | |
121 | rcu_read_unlock(); | |
dd8e8f40 | 122 | |
4d4048be | 123 | return t; |
dd8e8f40 ON |
124 | } |
125 | ||
db2a0dd7 YB |
126 | /* |
127 | * order == -1 means the oom kill is required by sysrq, otherwise only | |
128 | * for display purposes. | |
129 | */ | |
130 | static inline bool is_sysrq_oom(struct oom_control *oc) | |
131 | { | |
132 | return oc->order == -1; | |
133 | } | |
134 | ||
ab290adb | 135 | /* return true if the task is not adequate as candidate victim task. */ |
e85bfd3a | 136 | static bool oom_unkillable_task(struct task_struct *p, |
2314b42d | 137 | struct mem_cgroup *memcg, const nodemask_t *nodemask) |
ab290adb KM |
138 | { |
139 | if (is_global_init(p)) | |
140 | return true; | |
141 | if (p->flags & PF_KTHREAD) | |
142 | return true; | |
143 | ||
144 | /* When mem_cgroup_out_of_memory() and p is not member of the group */ | |
72835c86 | 145 | if (memcg && !task_in_mem_cgroup(p, memcg)) |
ab290adb KM |
146 | return true; |
147 | ||
148 | /* p may not have freeable memory in nodemask */ | |
149 | if (!has_intersects_mems_allowed(p, nodemask)) | |
150 | return true; | |
151 | ||
152 | return false; | |
153 | } | |
154 | ||
1da177e4 | 155 | /** |
a63d83f4 | 156 | * oom_badness - heuristic function to determine which candidate task to kill |
1da177e4 | 157 | * @p: task struct of which task we should calculate |
a63d83f4 | 158 | * @totalpages: total present RAM allowed for page allocation |
1da177e4 | 159 | * |
a63d83f4 DR |
160 | * The heuristic for determining which task to kill is made to be as simple and |
161 | * predictable as possible. The goal is to return the highest value for the | |
162 | * task consuming the most memory to avoid subsequent oom failures. | |
1da177e4 | 163 | */ |
a7f638f9 DR |
164 | unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, |
165 | const nodemask_t *nodemask, unsigned long totalpages) | |
1da177e4 | 166 | { |
1e11ad8d | 167 | long points; |
61eafb00 | 168 | long adj; |
28b83c51 | 169 | |
72835c86 | 170 | if (oom_unkillable_task(p, memcg, nodemask)) |
26ebc984 | 171 | return 0; |
1da177e4 | 172 | |
dd8e8f40 ON |
173 | p = find_lock_task_mm(p); |
174 | if (!p) | |
1da177e4 LT |
175 | return 0; |
176 | ||
a9c58b90 | 177 | adj = (long)p->signal->oom_score_adj; |
61eafb00 | 178 | if (adj == OOM_SCORE_ADJ_MIN) { |
5aecc85a MH |
179 | task_unlock(p); |
180 | return 0; | |
181 | } | |
182 | ||
1da177e4 | 183 | /* |
a63d83f4 | 184 | * The baseline for the badness score is the proportion of RAM that each |
f755a042 | 185 | * task's rss, pagetable and swap space use. |
1da177e4 | 186 | */ |
dc6c9a35 KS |
187 | points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + |
188 | atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm); | |
a63d83f4 | 189 | task_unlock(p); |
1da177e4 LT |
190 | |
191 | /* | |
a63d83f4 DR |
192 | * Root processes get 3% bonus, just like the __vm_enough_memory() |
193 | * implementation used by LSMs. | |
1da177e4 | 194 | */ |
a63d83f4 | 195 | if (has_capability_noaudit(p, CAP_SYS_ADMIN)) |
778c14af | 196 | points -= (points * 3) / 100; |
1da177e4 | 197 | |
61eafb00 DR |
198 | /* Normalize to oom_score_adj units */ |
199 | adj *= totalpages / 1000; | |
200 | points += adj; | |
1da177e4 | 201 | |
f19e8aa1 | 202 | /* |
a7f638f9 DR |
203 | * Never return 0 for an eligible task regardless of the root bonus and |
204 | * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). | |
f19e8aa1 | 205 | */ |
1e11ad8d | 206 | return points > 0 ? points : 1; |
1da177e4 LT |
207 | } |
208 | ||
9b0f8b04 CL |
209 | /* |
210 | * Determine the type of allocation constraint. | |
211 | */ | |
9b0f8b04 | 212 | #ifdef CONFIG_NUMA |
6e0fc46d DR |
213 | static enum oom_constraint constrained_alloc(struct oom_control *oc, |
214 | unsigned long *totalpages) | |
4365a567 | 215 | { |
54a6eb5c | 216 | struct zone *zone; |
dd1a239f | 217 | struct zoneref *z; |
6e0fc46d | 218 | enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask); |
a63d83f4 DR |
219 | bool cpuset_limited = false; |
220 | int nid; | |
9b0f8b04 | 221 | |
a63d83f4 DR |
222 | /* Default to all available memory */ |
223 | *totalpages = totalram_pages + total_swap_pages; | |
224 | ||
6e0fc46d | 225 | if (!oc->zonelist) |
a63d83f4 | 226 | return CONSTRAINT_NONE; |
4365a567 KH |
227 | /* |
228 | * Reach here only when __GFP_NOFAIL is used. So, we should avoid | |
229 | * to kill current.We have to random task kill in this case. | |
230 | * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. | |
231 | */ | |
6e0fc46d | 232 | if (oc->gfp_mask & __GFP_THISNODE) |
4365a567 | 233 | return CONSTRAINT_NONE; |
9b0f8b04 | 234 | |
4365a567 | 235 | /* |
a63d83f4 DR |
236 | * This is not a __GFP_THISNODE allocation, so a truncated nodemask in |
237 | * the page allocator means a mempolicy is in effect. Cpuset policy | |
238 | * is enforced in get_page_from_freelist(). | |
4365a567 | 239 | */ |
6e0fc46d DR |
240 | if (oc->nodemask && |
241 | !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { | |
a63d83f4 | 242 | *totalpages = total_swap_pages; |
6e0fc46d | 243 | for_each_node_mask(nid, *oc->nodemask) |
a63d83f4 | 244 | *totalpages += node_spanned_pages(nid); |
9b0f8b04 | 245 | return CONSTRAINT_MEMORY_POLICY; |
a63d83f4 | 246 | } |
4365a567 KH |
247 | |
248 | /* Check this allocation failure is caused by cpuset's wall function */ | |
6e0fc46d DR |
249 | for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, |
250 | high_zoneidx, oc->nodemask) | |
251 | if (!cpuset_zone_allowed(zone, oc->gfp_mask)) | |
a63d83f4 | 252 | cpuset_limited = true; |
9b0f8b04 | 253 | |
a63d83f4 DR |
254 | if (cpuset_limited) { |
255 | *totalpages = total_swap_pages; | |
256 | for_each_node_mask(nid, cpuset_current_mems_allowed) | |
257 | *totalpages += node_spanned_pages(nid); | |
258 | return CONSTRAINT_CPUSET; | |
259 | } | |
9b0f8b04 CL |
260 | return CONSTRAINT_NONE; |
261 | } | |
4365a567 | 262 | #else |
6e0fc46d DR |
263 | static enum oom_constraint constrained_alloc(struct oom_control *oc, |
264 | unsigned long *totalpages) | |
4365a567 | 265 | { |
a63d83f4 | 266 | *totalpages = totalram_pages + total_swap_pages; |
4365a567 KH |
267 | return CONSTRAINT_NONE; |
268 | } | |
269 | #endif | |
9b0f8b04 | 270 | |
6e0fc46d DR |
271 | enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, |
272 | struct task_struct *task, unsigned long totalpages) | |
462607ec | 273 | { |
6e0fc46d | 274 | if (oom_unkillable_task(task, NULL, oc->nodemask)) |
462607ec DR |
275 | return OOM_SCAN_CONTINUE; |
276 | ||
277 | /* | |
278 | * This task already has access to memory reserves and is being killed. | |
279 | * Don't allow any other task to have access to the reserves. | |
280 | */ | |
281 | if (test_tsk_thread_flag(task, TIF_MEMDIE)) { | |
db2a0dd7 | 282 | if (!is_sysrq_oom(oc)) |
462607ec DR |
283 | return OOM_SCAN_ABORT; |
284 | } | |
285 | if (!task->mm) | |
286 | return OOM_SCAN_CONTINUE; | |
287 | ||
e1e12d2f DR |
288 | /* |
289 | * If task is allocating a lot of memory and has been marked to be | |
290 | * killed first if it triggers an oom, then select it. | |
291 | */ | |
292 | if (oom_task_origin(task)) | |
293 | return OOM_SCAN_SELECT; | |
294 | ||
462607ec DR |
295 | return OOM_SCAN_OK; |
296 | } | |
297 | ||
1da177e4 LT |
298 | /* |
299 | * Simple selection loop. We chose the process with the highest | |
6b4f2b56 | 300 | * number of 'points'. Returns -1 on scan abort. |
1da177e4 | 301 | */ |
6e0fc46d DR |
302 | static struct task_struct *select_bad_process(struct oom_control *oc, |
303 | unsigned int *ppoints, unsigned long totalpages) | |
1da177e4 | 304 | { |
3a5dda7a | 305 | struct task_struct *g, *p; |
1da177e4 | 306 | struct task_struct *chosen = NULL; |
a7f638f9 | 307 | unsigned long chosen_points = 0; |
1da177e4 | 308 | |
6b0c81b3 | 309 | rcu_read_lock(); |
1da4db0c | 310 | for_each_process_thread(g, p) { |
a63d83f4 | 311 | unsigned int points; |
a49335cc | 312 | |
6e0fc46d | 313 | switch (oom_scan_process_thread(oc, p, totalpages)) { |
462607ec DR |
314 | case OOM_SCAN_SELECT: |
315 | chosen = p; | |
316 | chosen_points = ULONG_MAX; | |
317 | /* fall through */ | |
318 | case OOM_SCAN_CONTINUE: | |
c027a474 | 319 | continue; |
462607ec | 320 | case OOM_SCAN_ABORT: |
6b0c81b3 | 321 | rcu_read_unlock(); |
6b4f2b56 | 322 | return (struct task_struct *)(-1UL); |
462607ec DR |
323 | case OOM_SCAN_OK: |
324 | break; | |
325 | }; | |
6e0fc46d | 326 | points = oom_badness(p, NULL, oc->nodemask, totalpages); |
d49ad935 DR |
327 | if (!points || points < chosen_points) |
328 | continue; | |
329 | /* Prefer thread group leaders for display purposes */ | |
330 | if (points == chosen_points && thread_group_leader(chosen)) | |
331 | continue; | |
332 | ||
333 | chosen = p; | |
334 | chosen_points = points; | |
1da4db0c | 335 | } |
6b0c81b3 DR |
336 | if (chosen) |
337 | get_task_struct(chosen); | |
338 | rcu_read_unlock(); | |
972c4ea5 | 339 | |
a7f638f9 | 340 | *ppoints = chosen_points * 1000 / totalpages; |
1da177e4 LT |
341 | return chosen; |
342 | } | |
343 | ||
fef1bdd6 | 344 | /** |
1b578df0 | 345 | * dump_tasks - dump current memory state of all system tasks |
dad7557e | 346 | * @memcg: current's memory controller, if constrained |
e85bfd3a | 347 | * @nodemask: nodemask passed to page allocator for mempolicy ooms |
1b578df0 | 348 | * |
e85bfd3a DR |
349 | * Dumps the current memory state of all eligible tasks. Tasks not in the same |
350 | * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes | |
351 | * are not shown. | |
de34d965 DR |
352 | * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes, |
353 | * swapents, oom_score_adj value, and name. | |
fef1bdd6 | 354 | */ |
2314b42d | 355 | static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) |
fef1bdd6 | 356 | { |
c55db957 KM |
357 | struct task_struct *p; |
358 | struct task_struct *task; | |
fef1bdd6 | 359 | |
dc6c9a35 | 360 | pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n"); |
6b0c81b3 | 361 | rcu_read_lock(); |
c55db957 | 362 | for_each_process(p) { |
72835c86 | 363 | if (oom_unkillable_task(p, memcg, nodemask)) |
b4416d2b | 364 | continue; |
fef1bdd6 | 365 | |
c55db957 KM |
366 | task = find_lock_task_mm(p); |
367 | if (!task) { | |
6d2661ed | 368 | /* |
74ab7f1d DR |
369 | * This is a kthread or all of p's threads have already |
370 | * detached their mm's. There's no need to report | |
c55db957 | 371 | * them; they can't be oom killed anyway. |
6d2661ed | 372 | */ |
6d2661ed DR |
373 | continue; |
374 | } | |
c55db957 | 375 | |
dc6c9a35 | 376 | pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n", |
078de5f7 EB |
377 | task->pid, from_kuid(&init_user_ns, task_uid(task)), |
378 | task->tgid, task->mm->total_vm, get_mm_rss(task->mm), | |
e1f56c89 | 379 | atomic_long_read(&task->mm->nr_ptes), |
dc6c9a35 | 380 | mm_nr_pmds(task->mm), |
de34d965 | 381 | get_mm_counter(task->mm, MM_SWAPENTS), |
a63d83f4 | 382 | task->signal->oom_score_adj, task->comm); |
c55db957 KM |
383 | task_unlock(task); |
384 | } | |
6b0c81b3 | 385 | rcu_read_unlock(); |
fef1bdd6 DR |
386 | } |
387 | ||
6e0fc46d DR |
388 | static void dump_header(struct oom_control *oc, struct task_struct *p, |
389 | struct mem_cgroup *memcg) | |
1b604d75 | 390 | { |
756a025f | 391 | pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n", |
a0795cd4 | 392 | current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order, |
a63d83f4 | 393 | current->signal->oom_score_adj); |
a0795cd4 | 394 | |
da39da3a | 395 | cpuset_print_current_mems_allowed(); |
1b604d75 | 396 | dump_stack(); |
58cf188e SZ |
397 | if (memcg) |
398 | mem_cgroup_print_oom_info(memcg, p); | |
399 | else | |
400 | show_mem(SHOW_MEM_FILTER_NODES); | |
1b604d75 | 401 | if (sysctl_oom_dump_tasks) |
6e0fc46d | 402 | dump_tasks(memcg, oc->nodemask); |
1b604d75 DR |
403 | } |
404 | ||
5695be14 | 405 | /* |
c32b3cbe | 406 | * Number of OOM victims in flight |
5695be14 | 407 | */ |
c32b3cbe MH |
408 | static atomic_t oom_victims = ATOMIC_INIT(0); |
409 | static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait); | |
5695be14 | 410 | |
c32b3cbe | 411 | bool oom_killer_disabled __read_mostly; |
5695be14 | 412 | |
bc448e89 MH |
413 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
414 | ||
3ef22dff MH |
415 | /* |
416 | * task->mm can be NULL if the task is the exited group leader. So to | |
417 | * determine whether the task is using a particular mm, we examine all the | |
418 | * task's threads: if one of those is using this mm then this task was also | |
419 | * using it. | |
420 | */ | |
421 | static bool process_shares_mm(struct task_struct *p, struct mm_struct *mm) | |
422 | { | |
423 | struct task_struct *t; | |
424 | ||
425 | for_each_thread(p, t) { | |
426 | struct mm_struct *t_mm = READ_ONCE(t->mm); | |
427 | if (t_mm) | |
428 | return t_mm == mm; | |
429 | } | |
430 | return false; | |
431 | } | |
432 | ||
433 | ||
aac45363 MH |
434 | #ifdef CONFIG_MMU |
435 | /* | |
436 | * OOM Reaper kernel thread which tries to reap the memory used by the OOM | |
437 | * victim (if that is possible) to help the OOM killer to move on. | |
438 | */ | |
439 | static struct task_struct *oom_reaper_th; | |
aac45363 | 440 | static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); |
29c696e1 | 441 | static struct task_struct *oom_reaper_list; |
03049269 MH |
442 | static DEFINE_SPINLOCK(oom_reaper_lock); |
443 | ||
aac45363 | 444 | |
36324a99 | 445 | static bool __oom_reap_task(struct task_struct *tsk) |
aac45363 MH |
446 | { |
447 | struct mmu_gather tlb; | |
448 | struct vm_area_struct *vma; | |
36324a99 MH |
449 | struct mm_struct *mm; |
450 | struct task_struct *p; | |
aac45363 MH |
451 | struct zap_details details = {.check_swap_entries = true, |
452 | .ignore_dirty = true}; | |
453 | bool ret = true; | |
454 | ||
36324a99 MH |
455 | /* |
456 | * Make sure we find the associated mm_struct even when the particular | |
457 | * thread has already terminated and cleared its mm. | |
458 | * We might have race with exit path so consider our work done if there | |
459 | * is no mm. | |
460 | */ | |
461 | p = find_lock_task_mm(tsk); | |
462 | if (!p) | |
463 | return true; | |
464 | ||
465 | mm = p->mm; | |
466 | if (!atomic_inc_not_zero(&mm->mm_users)) { | |
467 | task_unlock(p); | |
aac45363 | 468 | return true; |
36324a99 MH |
469 | } |
470 | ||
471 | task_unlock(p); | |
aac45363 MH |
472 | |
473 | if (!down_read_trylock(&mm->mmap_sem)) { | |
474 | ret = false; | |
475 | goto out; | |
476 | } | |
477 | ||
478 | tlb_gather_mmu(&tlb, mm, 0, -1); | |
479 | for (vma = mm->mmap ; vma; vma = vma->vm_next) { | |
480 | if (is_vm_hugetlb_page(vma)) | |
481 | continue; | |
482 | ||
483 | /* | |
484 | * mlocked VMAs require explicit munlocking before unmap. | |
485 | * Let's keep it simple here and skip such VMAs. | |
486 | */ | |
487 | if (vma->vm_flags & VM_LOCKED) | |
488 | continue; | |
489 | ||
490 | /* | |
491 | * Only anonymous pages have a good chance to be dropped | |
492 | * without additional steps which we cannot afford as we | |
493 | * are OOM already. | |
494 | * | |
495 | * We do not even care about fs backed pages because all | |
496 | * which are reclaimable have already been reclaimed and | |
497 | * we do not want to block exit_mmap by keeping mm ref | |
498 | * count elevated without a good reason. | |
499 | */ | |
500 | if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) | |
501 | unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end, | |
502 | &details); | |
503 | } | |
504 | tlb_finish_mmu(&tlb, 0, -1); | |
bc448e89 MH |
505 | pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", |
506 | task_pid_nr(tsk), tsk->comm, | |
507 | K(get_mm_counter(mm, MM_ANONPAGES)), | |
508 | K(get_mm_counter(mm, MM_FILEPAGES)), | |
509 | K(get_mm_counter(mm, MM_SHMEMPAGES))); | |
aac45363 | 510 | up_read(&mm->mmap_sem); |
36324a99 MH |
511 | |
512 | /* | |
449d777d MH |
513 | * This task can be safely ignored because we cannot do much more |
514 | * to release its memory. | |
36324a99 MH |
515 | */ |
516 | tsk->signal->oom_score_adj = OOM_SCORE_ADJ_MIN; | |
aac45363 MH |
517 | out: |
518 | mmput(mm); | |
519 | return ret; | |
520 | } | |
521 | ||
bc448e89 | 522 | #define MAX_OOM_REAP_RETRIES 10 |
36324a99 | 523 | static void oom_reap_task(struct task_struct *tsk) |
aac45363 MH |
524 | { |
525 | int attempts = 0; | |
526 | ||
527 | /* Retry the down_read_trylock(mmap_sem) a few times */ | |
bc448e89 | 528 | while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task(tsk)) |
aac45363 MH |
529 | schedule_timeout_idle(HZ/10); |
530 | ||
bc448e89 MH |
531 | if (attempts > MAX_OOM_REAP_RETRIES) { |
532 | pr_info("oom_reaper: unable to reap pid:%d (%s)\n", | |
533 | task_pid_nr(tsk), tsk->comm); | |
534 | debug_show_all_locks(); | |
535 | } | |
536 | ||
449d777d MH |
537 | /* |
538 | * Clear TIF_MEMDIE because the task shouldn't be sitting on a | |
539 | * reasonably reclaimable memory anymore or it is not a good candidate | |
540 | * for the oom victim right now because it cannot release its memory | |
541 | * itself nor by the oom reaper. | |
542 | */ | |
543 | tsk->oom_reaper_list = NULL; | |
544 | exit_oom_victim(tsk); | |
545 | ||
aac45363 | 546 | /* Drop a reference taken by wake_oom_reaper */ |
36324a99 | 547 | put_task_struct(tsk); |
aac45363 MH |
548 | } |
549 | ||
550 | static int oom_reaper(void *unused) | |
551 | { | |
e2679606 MH |
552 | set_freezable(); |
553 | ||
aac45363 | 554 | while (true) { |
03049269 | 555 | struct task_struct *tsk = NULL; |
aac45363 | 556 | |
29c696e1 | 557 | wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL); |
03049269 | 558 | spin_lock(&oom_reaper_lock); |
29c696e1 VD |
559 | if (oom_reaper_list != NULL) { |
560 | tsk = oom_reaper_list; | |
561 | oom_reaper_list = tsk->oom_reaper_list; | |
03049269 MH |
562 | } |
563 | spin_unlock(&oom_reaper_lock); | |
564 | ||
565 | if (tsk) | |
566 | oom_reap_task(tsk); | |
aac45363 MH |
567 | } |
568 | ||
569 | return 0; | |
570 | } | |
571 | ||
36324a99 | 572 | static void wake_oom_reaper(struct task_struct *tsk) |
aac45363 | 573 | { |
af8e15cc MH |
574 | if (!oom_reaper_th) |
575 | return; | |
576 | ||
577 | /* tsk is already queued? */ | |
578 | if (tsk == oom_reaper_list || tsk->oom_reaper_list) | |
aac45363 MH |
579 | return; |
580 | ||
36324a99 | 581 | get_task_struct(tsk); |
aac45363 | 582 | |
03049269 | 583 | spin_lock(&oom_reaper_lock); |
29c696e1 VD |
584 | tsk->oom_reaper_list = oom_reaper_list; |
585 | oom_reaper_list = tsk; | |
03049269 MH |
586 | spin_unlock(&oom_reaper_lock); |
587 | wake_up(&oom_reaper_wait); | |
aac45363 MH |
588 | } |
589 | ||
3ef22dff MH |
590 | /* Check if we can reap the given task. This has to be called with stable |
591 | * tsk->mm | |
592 | */ | |
593 | void try_oom_reaper(struct task_struct *tsk) | |
594 | { | |
595 | struct mm_struct *mm = tsk->mm; | |
596 | struct task_struct *p; | |
597 | ||
598 | if (!mm) | |
599 | return; | |
600 | ||
601 | /* | |
602 | * There might be other threads/processes which are either not | |
603 | * dying or even not killable. | |
604 | */ | |
605 | if (atomic_read(&mm->mm_users) > 1) { | |
606 | rcu_read_lock(); | |
607 | for_each_process(p) { | |
608 | bool exiting; | |
609 | ||
610 | if (!process_shares_mm(p, mm)) | |
611 | continue; | |
612 | if (same_thread_group(p, tsk)) | |
613 | continue; | |
614 | if (fatal_signal_pending(p)) | |
615 | continue; | |
616 | ||
617 | /* | |
618 | * If the task is exiting make sure the whole thread group | |
619 | * is exiting and cannot acces mm anymore. | |
620 | */ | |
621 | spin_lock_irq(&p->sighand->siglock); | |
622 | exiting = signal_group_exit(p->signal); | |
623 | spin_unlock_irq(&p->sighand->siglock); | |
624 | if (exiting) | |
625 | continue; | |
626 | ||
627 | /* Give up */ | |
628 | rcu_read_unlock(); | |
629 | return; | |
630 | } | |
631 | rcu_read_unlock(); | |
632 | } | |
633 | ||
634 | wake_oom_reaper(tsk); | |
635 | } | |
636 | ||
aac45363 MH |
637 | static int __init oom_init(void) |
638 | { | |
639 | oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper"); | |
640 | if (IS_ERR(oom_reaper_th)) { | |
641 | pr_err("Unable to start OOM reaper %ld. Continuing regardless\n", | |
642 | PTR_ERR(oom_reaper_th)); | |
643 | oom_reaper_th = NULL; | |
644 | } | |
645 | return 0; | |
646 | } | |
647 | subsys_initcall(oom_init) | |
648 | #else | |
36324a99 | 649 | static void wake_oom_reaper(struct task_struct *tsk) |
aac45363 MH |
650 | { |
651 | } | |
652 | #endif | |
653 | ||
49550b60 | 654 | /** |
16e95196 | 655 | * mark_oom_victim - mark the given task as OOM victim |
49550b60 | 656 | * @tsk: task to mark |
c32b3cbe | 657 | * |
dc56401f | 658 | * Has to be called with oom_lock held and never after |
c32b3cbe | 659 | * oom has been disabled already. |
49550b60 | 660 | */ |
16e95196 | 661 | void mark_oom_victim(struct task_struct *tsk) |
49550b60 | 662 | { |
c32b3cbe MH |
663 | WARN_ON(oom_killer_disabled); |
664 | /* OOM killer might race with memcg OOM */ | |
665 | if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) | |
666 | return; | |
63a8ca9b MH |
667 | /* |
668 | * Make sure that the task is woken up from uninterruptible sleep | |
669 | * if it is frozen because OOM killer wouldn't be able to free | |
670 | * any memory and livelock. freezing_slow_path will tell the freezer | |
671 | * that TIF_MEMDIE tasks should be ignored. | |
672 | */ | |
673 | __thaw_task(tsk); | |
c32b3cbe | 674 | atomic_inc(&oom_victims); |
49550b60 MH |
675 | } |
676 | ||
677 | /** | |
16e95196 | 678 | * exit_oom_victim - note the exit of an OOM victim |
49550b60 | 679 | */ |
36324a99 | 680 | void exit_oom_victim(struct task_struct *tsk) |
49550b60 | 681 | { |
36324a99 MH |
682 | if (!test_and_clear_tsk_thread_flag(tsk, TIF_MEMDIE)) |
683 | return; | |
c32b3cbe | 684 | |
c38f1025 | 685 | if (!atomic_dec_return(&oom_victims)) |
c32b3cbe | 686 | wake_up_all(&oom_victims_wait); |
c32b3cbe MH |
687 | } |
688 | ||
689 | /** | |
690 | * oom_killer_disable - disable OOM killer | |
691 | * | |
692 | * Forces all page allocations to fail rather than trigger OOM killer. | |
693 | * Will block and wait until all OOM victims are killed. | |
694 | * | |
695 | * The function cannot be called when there are runnable user tasks because | |
696 | * the userspace would see unexpected allocation failures as a result. Any | |
697 | * new usage of this function should be consulted with MM people. | |
698 | * | |
699 | * Returns true if successful and false if the OOM killer cannot be | |
700 | * disabled. | |
701 | */ | |
702 | bool oom_killer_disable(void) | |
703 | { | |
704 | /* | |
6afcf289 TH |
705 | * Make sure to not race with an ongoing OOM killer. Check that the |
706 | * current is not killed (possibly due to sharing the victim's memory). | |
c32b3cbe | 707 | */ |
6afcf289 | 708 | if (mutex_lock_killable(&oom_lock)) |
c32b3cbe | 709 | return false; |
c32b3cbe | 710 | oom_killer_disabled = true; |
dc56401f | 711 | mutex_unlock(&oom_lock); |
c32b3cbe MH |
712 | |
713 | wait_event(oom_victims_wait, !atomic_read(&oom_victims)); | |
714 | ||
715 | return true; | |
716 | } | |
717 | ||
718 | /** | |
719 | * oom_killer_enable - enable OOM killer | |
720 | */ | |
721 | void oom_killer_enable(void) | |
722 | { | |
c32b3cbe | 723 | oom_killer_disabled = false; |
49550b60 MH |
724 | } |
725 | ||
6b0c81b3 DR |
726 | /* |
727 | * Must be called while holding a reference to p, which will be released upon | |
728 | * returning. | |
729 | */ | |
6e0fc46d | 730 | void oom_kill_process(struct oom_control *oc, struct task_struct *p, |
9cbb78bb | 731 | unsigned int points, unsigned long totalpages, |
6e0fc46d | 732 | struct mem_cgroup *memcg, const char *message) |
1da177e4 | 733 | { |
52d3c036 | 734 | struct task_struct *victim = p; |
5e9d834a | 735 | struct task_struct *child; |
1da4db0c | 736 | struct task_struct *t; |
647f2bdf | 737 | struct mm_struct *mm; |
52d3c036 | 738 | unsigned int victim_points = 0; |
dc3f21ea DR |
739 | static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, |
740 | DEFAULT_RATELIMIT_BURST); | |
bb29902a | 741 | bool can_oom_reap = true; |
1da177e4 | 742 | |
50ec3bbf NP |
743 | /* |
744 | * If the task is already exiting, don't alarm the sysadmin or kill | |
745 | * its children or threads, just set TIF_MEMDIE so it can die quickly | |
746 | */ | |
83363b91 MH |
747 | task_lock(p); |
748 | if (p->mm && task_will_free_mem(p)) { | |
16e95196 | 749 | mark_oom_victim(p); |
3ef22dff | 750 | try_oom_reaper(p); |
83363b91 | 751 | task_unlock(p); |
6b0c81b3 | 752 | put_task_struct(p); |
2a1c9b1f | 753 | return; |
50ec3bbf | 754 | } |
83363b91 | 755 | task_unlock(p); |
50ec3bbf | 756 | |
dc3f21ea | 757 | if (__ratelimit(&oom_rs)) |
6e0fc46d | 758 | dump_header(oc, p, memcg); |
8447d950 | 759 | |
f0d6647e | 760 | pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n", |
5e9d834a | 761 | message, task_pid_nr(p), p->comm, points); |
f3af38d3 | 762 | |
5e9d834a DR |
763 | /* |
764 | * If any of p's children has a different mm and is eligible for kill, | |
11239836 | 765 | * the one with the highest oom_badness() score is sacrificed for its |
5e9d834a DR |
766 | * parent. This attempts to lose the minimal amount of work done while |
767 | * still freeing memory. | |
768 | */ | |
6b0c81b3 | 769 | read_lock(&tasklist_lock); |
1da4db0c | 770 | for_each_thread(p, t) { |
5e9d834a | 771 | list_for_each_entry(child, &t->children, sibling) { |
a63d83f4 | 772 | unsigned int child_points; |
5e9d834a | 773 | |
4d7b3394 | 774 | if (process_shares_mm(child, p->mm)) |
edd45544 | 775 | continue; |
a63d83f4 DR |
776 | /* |
777 | * oom_badness() returns 0 if the thread is unkillable | |
778 | */ | |
6e0fc46d | 779 | child_points = oom_badness(child, memcg, oc->nodemask, |
a63d83f4 | 780 | totalpages); |
5e9d834a | 781 | if (child_points > victim_points) { |
6b0c81b3 | 782 | put_task_struct(victim); |
5e9d834a DR |
783 | victim = child; |
784 | victim_points = child_points; | |
6b0c81b3 | 785 | get_task_struct(victim); |
5e9d834a | 786 | } |
dd8e8f40 | 787 | } |
1da4db0c | 788 | } |
6b0c81b3 | 789 | read_unlock(&tasklist_lock); |
dd8e8f40 | 790 | |
6b0c81b3 DR |
791 | p = find_lock_task_mm(victim); |
792 | if (!p) { | |
6b0c81b3 | 793 | put_task_struct(victim); |
647f2bdf | 794 | return; |
6b0c81b3 DR |
795 | } else if (victim != p) { |
796 | get_task_struct(p); | |
797 | put_task_struct(victim); | |
798 | victim = p; | |
799 | } | |
647f2bdf | 800 | |
880b7689 | 801 | /* Get a reference to safely compare mm after task_unlock(victim) */ |
647f2bdf | 802 | mm = victim->mm; |
880b7689 | 803 | atomic_inc(&mm->mm_count); |
426fb5e7 TH |
804 | /* |
805 | * We should send SIGKILL before setting TIF_MEMDIE in order to prevent | |
806 | * the OOM victim from depleting the memory reserves from the user | |
807 | * space under its control. | |
808 | */ | |
809 | do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true); | |
16e95196 | 810 | mark_oom_victim(victim); |
eca56ff9 | 811 | pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", |
647f2bdf DR |
812 | task_pid_nr(victim), victim->comm, K(victim->mm->total_vm), |
813 | K(get_mm_counter(victim->mm, MM_ANONPAGES)), | |
eca56ff9 JM |
814 | K(get_mm_counter(victim->mm, MM_FILEPAGES)), |
815 | K(get_mm_counter(victim->mm, MM_SHMEMPAGES))); | |
647f2bdf DR |
816 | task_unlock(victim); |
817 | ||
818 | /* | |
819 | * Kill all user processes sharing victim->mm in other thread groups, if | |
820 | * any. They don't get access to memory reserves, though, to avoid | |
821 | * depletion of all memory. This prevents mm->mmap_sem livelock when an | |
822 | * oom killed thread cannot exit because it requires the semaphore and | |
823 | * its contended by another thread trying to allocate memory itself. | |
824 | * That thread will now get access to memory reserves since it has a | |
825 | * pending fatal signal. | |
826 | */ | |
4d4048be | 827 | rcu_read_lock(); |
c319025a | 828 | for_each_process(p) { |
4d7b3394 | 829 | if (!process_shares_mm(p, mm)) |
c319025a ON |
830 | continue; |
831 | if (same_thread_group(p, victim)) | |
832 | continue; | |
aac45363 MH |
833 | if (unlikely(p->flags & PF_KTHREAD) || is_global_init(p) || |
834 | p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) { | |
835 | /* | |
836 | * We cannot use oom_reaper for the mm shared by this | |
837 | * process because it wouldn't get killed and so the | |
838 | * memory might be still used. | |
839 | */ | |
840 | can_oom_reap = false; | |
c319025a | 841 | continue; |
aac45363 | 842 | } |
c319025a ON |
843 | do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true); |
844 | } | |
6b0c81b3 | 845 | rcu_read_unlock(); |
647f2bdf | 846 | |
aac45363 | 847 | if (can_oom_reap) |
36324a99 | 848 | wake_oom_reaper(victim); |
aac45363 | 849 | |
880b7689 | 850 | mmdrop(mm); |
6b0c81b3 | 851 | put_task_struct(victim); |
1da177e4 | 852 | } |
647f2bdf | 853 | #undef K |
1da177e4 | 854 | |
309ed882 DR |
855 | /* |
856 | * Determines whether the kernel must panic because of the panic_on_oom sysctl. | |
857 | */ | |
6e0fc46d | 858 | void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint, |
2415b9f5 | 859 | struct mem_cgroup *memcg) |
309ed882 DR |
860 | { |
861 | if (likely(!sysctl_panic_on_oom)) | |
862 | return; | |
863 | if (sysctl_panic_on_oom != 2) { | |
864 | /* | |
865 | * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel | |
866 | * does not panic for cpuset, mempolicy, or memcg allocation | |
867 | * failures. | |
868 | */ | |
869 | if (constraint != CONSTRAINT_NONE) | |
870 | return; | |
871 | } | |
071a4bef | 872 | /* Do not panic for oom kills triggered by sysrq */ |
db2a0dd7 | 873 | if (is_sysrq_oom(oc)) |
071a4bef | 874 | return; |
6e0fc46d | 875 | dump_header(oc, NULL, memcg); |
309ed882 DR |
876 | panic("Out of memory: %s panic_on_oom is enabled\n", |
877 | sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); | |
878 | } | |
879 | ||
8bc719d3 MS |
880 | static BLOCKING_NOTIFIER_HEAD(oom_notify_list); |
881 | ||
882 | int register_oom_notifier(struct notifier_block *nb) | |
883 | { | |
884 | return blocking_notifier_chain_register(&oom_notify_list, nb); | |
885 | } | |
886 | EXPORT_SYMBOL_GPL(register_oom_notifier); | |
887 | ||
888 | int unregister_oom_notifier(struct notifier_block *nb) | |
889 | { | |
890 | return blocking_notifier_chain_unregister(&oom_notify_list, nb); | |
891 | } | |
892 | EXPORT_SYMBOL_GPL(unregister_oom_notifier); | |
893 | ||
1da177e4 | 894 | /** |
6e0fc46d DR |
895 | * out_of_memory - kill the "best" process when we run out of memory |
896 | * @oc: pointer to struct oom_control | |
1da177e4 LT |
897 | * |
898 | * If we run out of memory, we have the choice between either | |
899 | * killing a random task (bad), letting the system crash (worse) | |
900 | * OR try to be smart about which process to kill. Note that we | |
901 | * don't have to be perfect here, we just have to be good. | |
902 | */ | |
6e0fc46d | 903 | bool out_of_memory(struct oom_control *oc) |
1da177e4 | 904 | { |
0aad4b31 | 905 | struct task_struct *p; |
a63d83f4 | 906 | unsigned long totalpages; |
8bc719d3 | 907 | unsigned long freed = 0; |
9cbb78bb | 908 | unsigned int uninitialized_var(points); |
e3658932 | 909 | enum oom_constraint constraint = CONSTRAINT_NONE; |
8bc719d3 | 910 | |
dc56401f JW |
911 | if (oom_killer_disabled) |
912 | return false; | |
913 | ||
8bc719d3 MS |
914 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); |
915 | if (freed > 0) | |
916 | /* Got some memory back in the last second. */ | |
75e8f8b2 | 917 | return true; |
1da177e4 | 918 | |
7b98c2e4 | 919 | /* |
9ff4868e DR |
920 | * If current has a pending SIGKILL or is exiting, then automatically |
921 | * select it. The goal is to allow it to allocate so that it may | |
922 | * quickly exit and free its memory. | |
d7a94e7e TH |
923 | * |
924 | * But don't select if current has already released its mm and cleared | |
925 | * TIF_MEMDIE flag at exit_mm(), otherwise an OOM livelock may occur. | |
7b98c2e4 | 926 | */ |
d7a94e7e TH |
927 | if (current->mm && |
928 | (fatal_signal_pending(current) || task_will_free_mem(current))) { | |
16e95196 | 929 | mark_oom_victim(current); |
3ef22dff | 930 | try_oom_reaper(current); |
75e8f8b2 | 931 | return true; |
7b98c2e4 DR |
932 | } |
933 | ||
3da88fb3 MH |
934 | /* |
935 | * The OOM killer does not compensate for IO-less reclaim. | |
936 | * pagefault_out_of_memory lost its gfp context so we have to | |
937 | * make sure exclude 0 mask - all other users should have at least | |
938 | * ___GFP_DIRECT_RECLAIM to get here. | |
939 | */ | |
940 | if (oc->gfp_mask && !(oc->gfp_mask & (__GFP_FS|__GFP_NOFAIL))) | |
941 | return true; | |
942 | ||
9b0f8b04 CL |
943 | /* |
944 | * Check if there were limitations on the allocation (only relevant for | |
945 | * NUMA) that may require different handling. | |
946 | */ | |
6e0fc46d DR |
947 | constraint = constrained_alloc(oc, &totalpages); |
948 | if (constraint != CONSTRAINT_MEMORY_POLICY) | |
949 | oc->nodemask = NULL; | |
950 | check_panic_on_oom(oc, constraint, NULL); | |
0aad4b31 | 951 | |
121d1ba0 | 952 | if (sysctl_oom_kill_allocating_task && current->mm && |
6e0fc46d | 953 | !oom_unkillable_task(current, NULL, oc->nodemask) && |
121d1ba0 | 954 | current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { |
6b0c81b3 | 955 | get_task_struct(current); |
6e0fc46d | 956 | oom_kill_process(oc, current, 0, totalpages, NULL, |
2a1c9b1f | 957 | "Out of memory (oom_kill_allocating_task)"); |
75e8f8b2 | 958 | return true; |
0aad4b31 DR |
959 | } |
960 | ||
6e0fc46d | 961 | p = select_bad_process(oc, &points, totalpages); |
0aad4b31 | 962 | /* Found nothing?!?! Either we hang forever, or we panic. */ |
db2a0dd7 | 963 | if (!p && !is_sysrq_oom(oc)) { |
6e0fc46d | 964 | dump_header(oc, NULL, NULL); |
0aad4b31 DR |
965 | panic("Out of memory and no killable processes...\n"); |
966 | } | |
071a4bef | 967 | if (p && p != (void *)-1UL) { |
6e0fc46d DR |
968 | oom_kill_process(oc, p, points, totalpages, NULL, |
969 | "Out of memory"); | |
75e8f8b2 DR |
970 | /* |
971 | * Give the killed process a good chance to exit before trying | |
972 | * to allocate memory again. | |
973 | */ | |
4f774b91 | 974 | schedule_timeout_killable(1); |
75e8f8b2 | 975 | } |
dc56401f | 976 | return true; |
c32b3cbe MH |
977 | } |
978 | ||
e3658932 DR |
979 | /* |
980 | * The pagefault handler calls here because it is out of memory, so kill a | |
efacd02e DR |
981 | * memory-hogging task. If any populated zone has ZONE_OOM_LOCKED set, a |
982 | * parallel oom killing is already in progress so do nothing. | |
e3658932 DR |
983 | */ |
984 | void pagefault_out_of_memory(void) | |
985 | { | |
6e0fc46d DR |
986 | struct oom_control oc = { |
987 | .zonelist = NULL, | |
988 | .nodemask = NULL, | |
989 | .gfp_mask = 0, | |
990 | .order = 0, | |
6e0fc46d DR |
991 | }; |
992 | ||
49426420 | 993 | if (mem_cgroup_oom_synchronize(true)) |
dc56401f | 994 | return; |
3812c8c8 | 995 | |
dc56401f JW |
996 | if (!mutex_trylock(&oom_lock)) |
997 | return; | |
c32b3cbe | 998 | |
6e0fc46d | 999 | if (!out_of_memory(&oc)) { |
dc56401f JW |
1000 | /* |
1001 | * There shouldn't be any user tasks runnable while the | |
1002 | * OOM killer is disabled, so the current task has to | |
1003 | * be a racing OOM victim for which oom_killer_disable() | |
1004 | * is waiting for. | |
1005 | */ | |
1006 | WARN_ON(test_thread_flag(TIF_MEMDIE)); | |
e3658932 | 1007 | } |
dc56401f JW |
1008 | |
1009 | mutex_unlock(&oom_lock); | |
e3658932 | 1010 | } |