oom: remove constraint argument from select_bad_process and __out_of_memory
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / oom_kill.c
1 /*
2 * linux/mm/oom_kill.c
3 *
4 * Copyright (C) 1998,2000 Rik van Riel
5 * Thanks go out to Claus Fischer for some serious inspiration and
6 * for goading me into coding this file...
7 *
8 * The routines in this file are used to kill a process when
9 * we're seriously out of memory. This gets called from __alloc_pages()
10 * in mm/page_alloc.c when we really run out of memory.
11 *
12 * Since we won't call these routines often (on a well-configured
13 * machine) this file will double as a 'coding guide' and a signpost
14 * for newbie kernel hackers. It features several pointers to major
15 * kernel subsystems and hints as to where to find out what things do.
16 */
17
18 #include <linux/oom.h>
19 #include <linux/mm.h>
20 #include <linux/err.h>
21 #include <linux/gfp.h>
22 #include <linux/sched.h>
23 #include <linux/swap.h>
24 #include <linux/timex.h>
25 #include <linux/jiffies.h>
26 #include <linux/cpuset.h>
27 #include <linux/module.h>
28 #include <linux/notifier.h>
29 #include <linux/memcontrol.h>
30 #include <linux/mempolicy.h>
31 #include <linux/security.h>
32
33 int sysctl_panic_on_oom;
34 int sysctl_oom_kill_allocating_task;
35 int sysctl_oom_dump_tasks = 1;
36 static DEFINE_SPINLOCK(zone_scan_lock);
37 /* #define DEBUG */
38
39 #ifdef CONFIG_NUMA
40 /**
41 * has_intersects_mems_allowed() - check task eligiblity for kill
42 * @tsk: task struct of which task to consider
43 * @mask: nodemask passed to page allocator for mempolicy ooms
44 *
45 * Task eligibility is determined by whether or not a candidate task, @tsk,
46 * shares the same mempolicy nodes as current if it is bound by such a policy
47 * and whether or not it has the same set of allowed cpuset nodes.
48 */
49 static bool has_intersects_mems_allowed(struct task_struct *tsk,
50 const nodemask_t *mask)
51 {
52 struct task_struct *start = tsk;
53
54 do {
55 if (mask) {
56 /*
57 * If this is a mempolicy constrained oom, tsk's
58 * cpuset is irrelevant. Only return true if its
59 * mempolicy intersects current, otherwise it may be
60 * needlessly killed.
61 */
62 if (mempolicy_nodemask_intersects(tsk, mask))
63 return true;
64 } else {
65 /*
66 * This is not a mempolicy constrained oom, so only
67 * check the mems of tsk's cpuset.
68 */
69 if (cpuset_mems_allowed_intersects(current, tsk))
70 return true;
71 }
72 tsk = next_thread(tsk);
73 } while (tsk != start);
74 return false;
75 }
76 #else
77 static bool has_intersects_mems_allowed(struct task_struct *tsk,
78 const nodemask_t *mask)
79 {
80 return true;
81 }
82 #endif /* CONFIG_NUMA */
83
84 /*
85 * The process p may have detached its own ->mm while exiting or through
86 * use_mm(), but one or more of its subthreads may still have a valid
87 * pointer. Return p, or any of its subthreads with a valid ->mm, with
88 * task_lock() held.
89 */
90 static struct task_struct *find_lock_task_mm(struct task_struct *p)
91 {
92 struct task_struct *t = p;
93
94 do {
95 task_lock(t);
96 if (likely(t->mm))
97 return t;
98 task_unlock(t);
99 } while_each_thread(p, t);
100
101 return NULL;
102 }
103
104 /**
105 * badness - calculate a numeric value for how bad this task has been
106 * @p: task struct of which task we should calculate
107 * @uptime: current uptime in seconds
108 *
109 * The formula used is relatively simple and documented inline in the
110 * function. The main rationale is that we want to select a good task
111 * to kill when we run out of memory.
112 *
113 * Good in this context means that:
114 * 1) we lose the minimum amount of work done
115 * 2) we recover a large amount of memory
116 * 3) we don't kill anything innocent of eating tons of memory
117 * 4) we want to kill the minimum amount of processes (one)
118 * 5) we try to kill the process the user expects us to kill, this
119 * algorithm has been meticulously tuned to meet the principle
120 * of least surprise ... (be careful when you change it)
121 */
122
123 unsigned long badness(struct task_struct *p, unsigned long uptime)
124 {
125 unsigned long points, cpu_time, run_time;
126 struct task_struct *child;
127 struct task_struct *c, *t;
128 int oom_adj = p->signal->oom_adj;
129 struct task_cputime task_time;
130 unsigned long utime;
131 unsigned long stime;
132
133 if (oom_adj == OOM_DISABLE)
134 return 0;
135
136 p = find_lock_task_mm(p);
137 if (!p)
138 return 0;
139
140 /*
141 * The memory size of the process is the basis for the badness.
142 */
143 points = p->mm->total_vm;
144 task_unlock(p);
145
146 /*
147 * swapoff can easily use up all memory, so kill those first.
148 */
149 if (p->flags & PF_OOM_ORIGIN)
150 return ULONG_MAX;
151
152 /*
153 * Processes which fork a lot of child processes are likely
154 * a good choice. We add half the vmsize of the children if they
155 * have an own mm. This prevents forking servers to flood the
156 * machine with an endless amount of children. In case a single
157 * child is eating the vast majority of memory, adding only half
158 * to the parents will make the child our kill candidate of choice.
159 */
160 t = p;
161 do {
162 list_for_each_entry(c, &t->children, sibling) {
163 child = find_lock_task_mm(c);
164 if (child) {
165 if (child->mm != p->mm)
166 points += child->mm->total_vm/2 + 1;
167 task_unlock(child);
168 }
169 }
170 } while_each_thread(p, t);
171
172 /*
173 * CPU time is in tens of seconds and run time is in thousands
174 * of seconds. There is no particular reason for this other than
175 * that it turned out to work very well in practice.
176 */
177 thread_group_cputime(p, &task_time);
178 utime = cputime_to_jiffies(task_time.utime);
179 stime = cputime_to_jiffies(task_time.stime);
180 cpu_time = (utime + stime) >> (SHIFT_HZ + 3);
181
182
183 if (uptime >= p->start_time.tv_sec)
184 run_time = (uptime - p->start_time.tv_sec) >> 10;
185 else
186 run_time = 0;
187
188 if (cpu_time)
189 points /= int_sqrt(cpu_time);
190 if (run_time)
191 points /= int_sqrt(int_sqrt(run_time));
192
193 /*
194 * Niced processes are most likely less important, so double
195 * their badness points.
196 */
197 if (task_nice(p) > 0)
198 points *= 2;
199
200 /*
201 * Superuser processes are usually more important, so we make it
202 * less likely that we kill those.
203 */
204 if (has_capability_noaudit(p, CAP_SYS_ADMIN) ||
205 has_capability_noaudit(p, CAP_SYS_RESOURCE))
206 points /= 4;
207
208 /*
209 * We don't want to kill a process with direct hardware access.
210 * Not only could that mess up the hardware, but usually users
211 * tend to only have this flag set on applications they think
212 * of as important.
213 */
214 if (has_capability_noaudit(p, CAP_SYS_RAWIO))
215 points /= 4;
216
217 /*
218 * Adjust the score by oom_adj.
219 */
220 if (oom_adj) {
221 if (oom_adj > 0) {
222 if (!points)
223 points = 1;
224 points <<= oom_adj;
225 } else
226 points >>= -(oom_adj);
227 }
228
229 #ifdef DEBUG
230 printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n",
231 p->pid, p->comm, points);
232 #endif
233 return points;
234 }
235
236 /*
237 * Determine the type of allocation constraint.
238 */
239 #ifdef CONFIG_NUMA
240 static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
241 gfp_t gfp_mask, nodemask_t *nodemask)
242 {
243 struct zone *zone;
244 struct zoneref *z;
245 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
246
247 /*
248 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
249 * to kill current.We have to random task kill in this case.
250 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
251 */
252 if (gfp_mask & __GFP_THISNODE)
253 return CONSTRAINT_NONE;
254
255 /*
256 * The nodemask here is a nodemask passed to alloc_pages(). Now,
257 * cpuset doesn't use this nodemask for its hardwall/softwall/hierarchy
258 * feature. mempolicy is an only user of nodemask here.
259 * check mempolicy's nodemask contains all N_HIGH_MEMORY
260 */
261 if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask))
262 return CONSTRAINT_MEMORY_POLICY;
263
264 /* Check this allocation failure is caused by cpuset's wall function */
265 for_each_zone_zonelist_nodemask(zone, z, zonelist,
266 high_zoneidx, nodemask)
267 if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
268 return CONSTRAINT_CPUSET;
269
270 return CONSTRAINT_NONE;
271 }
272 #else
273 static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
274 gfp_t gfp_mask, nodemask_t *nodemask)
275 {
276 return CONSTRAINT_NONE;
277 }
278 #endif
279
280 /*
281 * Simple selection loop. We chose the process with the highest
282 * number of 'points'. We expect the caller will lock the tasklist.
283 *
284 * (not docbooked, we don't want this one cluttering up the manual)
285 */
286 static struct task_struct *select_bad_process(unsigned long *ppoints,
287 struct mem_cgroup *mem, const nodemask_t *nodemask)
288 {
289 struct task_struct *p;
290 struct task_struct *chosen = NULL;
291 struct timespec uptime;
292 *ppoints = 0;
293
294 do_posix_clock_monotonic_gettime(&uptime);
295 for_each_process(p) {
296 unsigned long points;
297
298 /* skip the init task and kthreads */
299 if (is_global_init(p) || (p->flags & PF_KTHREAD))
300 continue;
301 if (mem && !task_in_mem_cgroup(p, mem))
302 continue;
303 if (!has_intersects_mems_allowed(p, nodemask))
304 continue;
305
306 /*
307 * This task already has access to memory reserves and is
308 * being killed. Don't allow any other task access to the
309 * memory reserve.
310 *
311 * Note: this may have a chance of deadlock if it gets
312 * blocked waiting for another task which itself is waiting
313 * for memory. Is there a better alternative?
314 */
315 if (test_tsk_thread_flag(p, TIF_MEMDIE))
316 return ERR_PTR(-1UL);
317
318 /*
319 * This is in the process of releasing memory so wait for it
320 * to finish before killing some other task by mistake.
321 *
322 * However, if p is the current task, we allow the 'kill' to
323 * go ahead if it is exiting: this will simply set TIF_MEMDIE,
324 * which will allow it to gain access to memory reserves in
325 * the process of exiting and releasing its resources.
326 * Otherwise we could get an easy OOM deadlock.
327 */
328 if ((p->flags & PF_EXITING) && p->mm) {
329 if (p != current)
330 return ERR_PTR(-1UL);
331
332 chosen = p;
333 *ppoints = ULONG_MAX;
334 }
335
336 if (p->signal->oom_adj == OOM_DISABLE)
337 continue;
338
339 points = badness(p, uptime.tv_sec);
340 if (points > *ppoints || !chosen) {
341 chosen = p;
342 *ppoints = points;
343 }
344 }
345
346 return chosen;
347 }
348
349 /**
350 * dump_tasks - dump current memory state of all system tasks
351 * @mem: current's memory controller, if constrained
352 *
353 * Dumps the current memory state of all system tasks, excluding kernel threads.
354 * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
355 * score, and name.
356 *
357 * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
358 * shown.
359 *
360 * Call with tasklist_lock read-locked.
361 */
362 static void dump_tasks(const struct mem_cgroup *mem)
363 {
364 struct task_struct *p;
365 struct task_struct *task;
366
367 printk(KERN_INFO "[ pid ] uid tgid total_vm rss cpu oom_adj "
368 "name\n");
369 for_each_process(p) {
370 if (p->flags & PF_KTHREAD)
371 continue;
372 if (mem && !task_in_mem_cgroup(p, mem))
373 continue;
374
375 task = find_lock_task_mm(p);
376 if (!task) {
377 /*
378 * This is a kthread or all of p's threads have already
379 * detached their mm's. There's no need to report
380 * them; they can't be oom killed anyway.
381 */
382 continue;
383 }
384
385 printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3u %3d %s\n",
386 task->pid, __task_cred(task)->uid, task->tgid,
387 task->mm->total_vm, get_mm_rss(task->mm),
388 task_cpu(task), task->signal->oom_adj, task->comm);
389 task_unlock(task);
390 }
391 }
392
393 static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
394 struct mem_cgroup *mem)
395 {
396 task_lock(current);
397 pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
398 "oom_adj=%d\n",
399 current->comm, gfp_mask, order, current->signal->oom_adj);
400 cpuset_print_task_mems_allowed(current);
401 task_unlock(current);
402 dump_stack();
403 mem_cgroup_print_oom_info(mem, p);
404 show_mem();
405 if (sysctl_oom_dump_tasks)
406 dump_tasks(mem);
407 }
408
409 #define K(x) ((x) << (PAGE_SHIFT-10))
410 static int oom_kill_task(struct task_struct *p)
411 {
412 p = find_lock_task_mm(p);
413 if (!p || p->signal->oom_adj == OOM_DISABLE) {
414 task_unlock(p);
415 return 1;
416 }
417 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
418 task_pid_nr(p), p->comm, K(p->mm->total_vm),
419 K(get_mm_counter(p->mm, MM_ANONPAGES)),
420 K(get_mm_counter(p->mm, MM_FILEPAGES)));
421 task_unlock(p);
422
423 p->rt.time_slice = HZ;
424 set_tsk_thread_flag(p, TIF_MEMDIE);
425 force_sig(SIGKILL, p);
426 return 0;
427 }
428 #undef K
429
430 static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
431 unsigned long points, struct mem_cgroup *mem,
432 const char *message)
433 {
434 struct task_struct *victim = p;
435 struct task_struct *child;
436 struct task_struct *t = p;
437 unsigned long victim_points = 0;
438 struct timespec uptime;
439
440 if (printk_ratelimit())
441 dump_header(p, gfp_mask, order, mem);
442
443 /*
444 * If the task is already exiting, don't alarm the sysadmin or kill
445 * its children or threads, just set TIF_MEMDIE so it can die quickly
446 */
447 if (p->flags & PF_EXITING) {
448 set_tsk_thread_flag(p, TIF_MEMDIE);
449 return 0;
450 }
451
452 task_lock(p);
453 pr_err("%s: Kill process %d (%s) score %lu or sacrifice child\n",
454 message, task_pid_nr(p), p->comm, points);
455 task_unlock(p);
456
457 /*
458 * If any of p's children has a different mm and is eligible for kill,
459 * the one with the highest badness() score is sacrificed for its
460 * parent. This attempts to lose the minimal amount of work done while
461 * still freeing memory.
462 */
463 do_posix_clock_monotonic_gettime(&uptime);
464 do {
465 list_for_each_entry(child, &t->children, sibling) {
466 unsigned long child_points;
467
468 if (child->mm == p->mm)
469 continue;
470 if (mem && !task_in_mem_cgroup(child, mem))
471 continue;
472
473 /* badness() returns 0 if the thread is unkillable */
474 child_points = badness(child, uptime.tv_sec);
475 if (child_points > victim_points) {
476 victim = child;
477 victim_points = child_points;
478 }
479 }
480 } while_each_thread(p, t);
481
482 return oom_kill_task(victim);
483 }
484
485 /*
486 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
487 */
488 static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
489 int order)
490 {
491 if (likely(!sysctl_panic_on_oom))
492 return;
493 if (sysctl_panic_on_oom != 2) {
494 /*
495 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
496 * does not panic for cpuset, mempolicy, or memcg allocation
497 * failures.
498 */
499 if (constraint != CONSTRAINT_NONE)
500 return;
501 }
502 read_lock(&tasklist_lock);
503 dump_header(NULL, gfp_mask, order, NULL);
504 read_unlock(&tasklist_lock);
505 panic("Out of memory: %s panic_on_oom is enabled\n",
506 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
507 }
508
509 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
510 void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
511 {
512 unsigned long points = 0;
513 struct task_struct *p;
514
515 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0);
516 read_lock(&tasklist_lock);
517 retry:
518 p = select_bad_process(&points, mem, NULL);
519 if (!p || PTR_ERR(p) == -1UL)
520 goto out;
521
522 if (oom_kill_process(p, gfp_mask, 0, points, mem,
523 "Memory cgroup out of memory"))
524 goto retry;
525 out:
526 read_unlock(&tasklist_lock);
527 }
528 #endif
529
530 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
531
532 int register_oom_notifier(struct notifier_block *nb)
533 {
534 return blocking_notifier_chain_register(&oom_notify_list, nb);
535 }
536 EXPORT_SYMBOL_GPL(register_oom_notifier);
537
538 int unregister_oom_notifier(struct notifier_block *nb)
539 {
540 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
541 }
542 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
543
544 /*
545 * Try to acquire the OOM killer lock for the zones in zonelist. Returns zero
546 * if a parallel OOM killing is already taking place that includes a zone in
547 * the zonelist. Otherwise, locks all zones in the zonelist and returns 1.
548 */
549 int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
550 {
551 struct zoneref *z;
552 struct zone *zone;
553 int ret = 1;
554
555 spin_lock(&zone_scan_lock);
556 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
557 if (zone_is_oom_locked(zone)) {
558 ret = 0;
559 goto out;
560 }
561 }
562
563 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
564 /*
565 * Lock each zone in the zonelist under zone_scan_lock so a
566 * parallel invocation of try_set_zonelist_oom() doesn't succeed
567 * when it shouldn't.
568 */
569 zone_set_flag(zone, ZONE_OOM_LOCKED);
570 }
571
572 out:
573 spin_unlock(&zone_scan_lock);
574 return ret;
575 }
576
577 /*
578 * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
579 * allocation attempts with zonelists containing them may now recall the OOM
580 * killer, if necessary.
581 */
582 void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
583 {
584 struct zoneref *z;
585 struct zone *zone;
586
587 spin_lock(&zone_scan_lock);
588 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
589 zone_clear_flag(zone, ZONE_OOM_LOCKED);
590 }
591 spin_unlock(&zone_scan_lock);
592 }
593
594 /*
595 * Try to acquire the oom killer lock for all system zones. Returns zero if a
596 * parallel oom killing is taking place, otherwise locks all zones and returns
597 * non-zero.
598 */
599 static int try_set_system_oom(void)
600 {
601 struct zone *zone;
602 int ret = 1;
603
604 spin_lock(&zone_scan_lock);
605 for_each_populated_zone(zone)
606 if (zone_is_oom_locked(zone)) {
607 ret = 0;
608 goto out;
609 }
610 for_each_populated_zone(zone)
611 zone_set_flag(zone, ZONE_OOM_LOCKED);
612 out:
613 spin_unlock(&zone_scan_lock);
614 return ret;
615 }
616
617 /*
618 * Clears ZONE_OOM_LOCKED for all system zones so that failed allocation
619 * attempts or page faults may now recall the oom killer, if necessary.
620 */
621 static void clear_system_oom(void)
622 {
623 struct zone *zone;
624
625 spin_lock(&zone_scan_lock);
626 for_each_populated_zone(zone)
627 zone_clear_flag(zone, ZONE_OOM_LOCKED);
628 spin_unlock(&zone_scan_lock);
629 }
630
631
632 /*
633 * Must be called with tasklist_lock held for read.
634 */
635 static void __out_of_memory(gfp_t gfp_mask, int order, const nodemask_t *mask)
636 {
637 struct task_struct *p;
638 unsigned long points;
639
640 if (sysctl_oom_kill_allocating_task)
641 if (!oom_kill_process(current, gfp_mask, order, 0, NULL,
642 "Out of memory (oom_kill_allocating_task)"))
643 return;
644 retry:
645 /*
646 * Rambo mode: Shoot down a process and hope it solves whatever
647 * issues we may have.
648 */
649 p = select_bad_process(&points, NULL, mask);
650
651 if (PTR_ERR(p) == -1UL)
652 return;
653
654 /* Found nothing?!?! Either we hang forever, or we panic. */
655 if (!p) {
656 dump_header(NULL, gfp_mask, order, NULL);
657 read_unlock(&tasklist_lock);
658 panic("Out of memory and no killable processes...\n");
659 }
660
661 if (oom_kill_process(p, gfp_mask, order, points, NULL,
662 "Out of memory"))
663 goto retry;
664 }
665
666 /**
667 * out_of_memory - kill the "best" process when we run out of memory
668 * @zonelist: zonelist pointer
669 * @gfp_mask: memory allocation flags
670 * @order: amount of memory being requested as a power of 2
671 * @nodemask: nodemask passed to page allocator
672 *
673 * If we run out of memory, we have the choice between either
674 * killing a random task (bad), letting the system crash (worse)
675 * OR try to be smart about which process to kill. Note that we
676 * don't have to be perfect here, we just have to be good.
677 */
678 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
679 int order, nodemask_t *nodemask)
680 {
681 unsigned long freed = 0;
682 enum oom_constraint constraint = CONSTRAINT_NONE;
683
684 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
685 if (freed > 0)
686 /* Got some memory back in the last second. */
687 return;
688
689 /*
690 * If current has a pending SIGKILL, then automatically select it. The
691 * goal is to allow it to allocate so that it may quickly exit and free
692 * its memory.
693 */
694 if (fatal_signal_pending(current)) {
695 set_thread_flag(TIF_MEMDIE);
696 return;
697 }
698
699 /*
700 * Check if there were limitations on the allocation (only relevant for
701 * NUMA) that may require different handling.
702 */
703 if (zonelist)
704 constraint = constrained_alloc(zonelist, gfp_mask, nodemask);
705 check_panic_on_oom(constraint, gfp_mask, order);
706 read_lock(&tasklist_lock);
707 __out_of_memory(gfp_mask, order,
708 constraint == CONSTRAINT_MEMORY_POLICY ? nodemask :
709 NULL);
710 read_unlock(&tasklist_lock);
711
712 /*
713 * Give "p" a good chance of killing itself before we
714 * retry to allocate memory unless "p" is current
715 */
716 if (!test_thread_flag(TIF_MEMDIE))
717 schedule_timeout_uninterruptible(1);
718 }
719
720 /*
721 * The pagefault handler calls here because it is out of memory, so kill a
722 * memory-hogging task. If a populated zone has ZONE_OOM_LOCKED set, a parallel
723 * oom killing is already in progress so do nothing. If a task is found with
724 * TIF_MEMDIE set, it has been killed so do nothing and allow it to exit.
725 */
726 void pagefault_out_of_memory(void)
727 {
728 if (try_set_system_oom()) {
729 out_of_memory(NULL, 0, 0, NULL);
730 clear_system_oom();
731 }
732 if (!test_thread_flag(TIF_MEMDIE))
733 schedule_timeout_uninterruptible(1);
734 }