4 * Copyright (C) 1998,2000 Rik van Riel
5 * Thanks go out to Claus Fischer for some serious inspiration and
6 * for goading me into coding this file...
7 * Copyright (C) 2010 Google, Inc.
8 * Rewritten by David Rientjes
10 * The routines in this file are used to kill a process when
11 * we're seriously out of memory. This gets called from __alloc_pages()
12 * in mm/page_alloc.c when we really run out of memory.
14 * Since we won't call these routines often (on a well-configured
15 * machine) this file will double as a 'coding guide' and a signpost
16 * for newbie kernel hackers. It features several pointers to major
17 * kernel subsystems and hints as to where to find out what things do.
20 #include <linux/oom.h>
22 #include <linux/err.h>
23 #include <linux/gfp.h>
24 #include <linux/sched.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/coredump.h>
27 #include <linux/sched/task.h>
28 #include <linux/swap.h>
29 #include <linux/timex.h>
30 #include <linux/jiffies.h>
31 #include <linux/cpuset.h>
32 #include <linux/export.h>
33 #include <linux/notifier.h>
34 #include <linux/memcontrol.h>
35 #include <linux/mempolicy.h>
36 #include <linux/security.h>
37 #include <linux/ptrace.h>
38 #include <linux/freezer.h>
39 #include <linux/ftrace.h>
40 #include <linux/ratelimit.h>
41 #include <linux/kthread.h>
42 #include <linux/init.h>
43 #include <linux/mmu_notifier.h>
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/oom.h>
51 int sysctl_panic_on_oom
;
52 int sysctl_oom_kill_allocating_task
;
53 int sysctl_oom_dump_tasks
= 1;
54 int sysctl_reap_mem_on_sigkill
;
56 DEFINE_MUTEX(oom_lock
);
60 * has_intersects_mems_allowed() - check task eligiblity for kill
61 * @start: task struct of which task to consider
62 * @mask: nodemask passed to page allocator for mempolicy ooms
64 * Task eligibility is determined by whether or not a candidate task, @tsk,
65 * shares the same mempolicy nodes as current if it is bound by such a policy
66 * and whether or not it has the same set of allowed cpuset nodes.
68 static bool has_intersects_mems_allowed(struct task_struct
*start
,
69 const nodemask_t
*mask
)
71 struct task_struct
*tsk
;
75 for_each_thread(start
, tsk
) {
78 * If this is a mempolicy constrained oom, tsk's
79 * cpuset is irrelevant. Only return true if its
80 * mempolicy intersects current, otherwise it may be
83 ret
= mempolicy_nodemask_intersects(tsk
, mask
);
86 * This is not a mempolicy constrained oom, so only
87 * check the mems of tsk's cpuset.
89 ret
= cpuset_mems_allowed_intersects(current
, tsk
);
99 static bool has_intersects_mems_allowed(struct task_struct
*tsk
,
100 const nodemask_t
*mask
)
104 #endif /* CONFIG_NUMA */
107 * The process p may have detached its own ->mm while exiting or through
108 * use_mm(), but one or more of its subthreads may still have a valid
109 * pointer. Return p, or any of its subthreads with a valid ->mm, with
112 struct task_struct
*find_lock_task_mm(struct task_struct
*p
)
114 struct task_struct
*t
;
118 for_each_thread(p
, t
) {
132 * order == -1 means the oom kill is required by sysrq, otherwise only
133 * for display purposes.
135 static inline bool is_sysrq_oom(struct oom_control
*oc
)
137 return oc
->order
== -1;
140 static inline bool is_memcg_oom(struct oom_control
*oc
)
142 return oc
->memcg
!= NULL
;
145 /* return true if the task is not adequate as candidate victim task. */
146 static bool oom_unkillable_task(struct task_struct
*p
,
147 struct mem_cgroup
*memcg
, const nodemask_t
*nodemask
)
149 if (is_global_init(p
))
151 if (p
->flags
& PF_KTHREAD
)
154 /* When mem_cgroup_out_of_memory() and p is not member of the group */
155 if (memcg
&& !task_in_mem_cgroup(p
, memcg
))
158 /* p may not have freeable memory in nodemask */
159 if (!has_intersects_mems_allowed(p
, nodemask
))
166 * oom_badness - heuristic function to determine which candidate task to kill
167 * @p: task struct of which task we should calculate
168 * @totalpages: total present RAM allowed for page allocation
170 * The heuristic for determining which task to kill is made to be as simple and
171 * predictable as possible. The goal is to return the highest value for the
172 * task consuming the most memory to avoid subsequent oom failures.
174 unsigned long oom_badness(struct task_struct
*p
, struct mem_cgroup
*memcg
,
175 const nodemask_t
*nodemask
, unsigned long totalpages
)
180 if (oom_unkillable_task(p
, memcg
, nodemask
))
183 p
= find_lock_task_mm(p
);
188 * Do not even consider tasks which are explicitly marked oom
189 * unkillable or have been already oom reaped or the are in
190 * the middle of vfork
192 adj
= (long)p
->signal
->oom_score_adj
;
193 if (adj
== OOM_SCORE_ADJ_MIN
||
194 test_bit(MMF_OOM_SKIP
, &p
->mm
->flags
) ||
201 * The baseline for the badness score is the proportion of RAM that each
202 * task's rss, pagetable and swap space use.
204 points
= get_mm_rss(p
->mm
) + get_mm_counter(p
->mm
, MM_SWAPENTS
) +
205 atomic_long_read(&p
->mm
->nr_ptes
) + mm_nr_pmds(p
->mm
);
209 * Root processes get 3% bonus, just like the __vm_enough_memory()
210 * implementation used by LSMs.
212 if (has_capability_noaudit(p
, CAP_SYS_ADMIN
))
213 points
-= (points
* 3) / 100;
215 /* Normalize to oom_score_adj units */
216 adj
*= totalpages
/ 1000;
220 * Never return 0 for an eligible task regardless of the root bonus and
221 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
223 return points
> 0 ? points
: 1;
226 enum oom_constraint
{
229 CONSTRAINT_MEMORY_POLICY
,
234 * Determine the type of allocation constraint.
236 static enum oom_constraint
constrained_alloc(struct oom_control
*oc
)
240 enum zone_type high_zoneidx
= gfp_zone(oc
->gfp_mask
);
241 bool cpuset_limited
= false;
244 if (is_memcg_oom(oc
)) {
245 oc
->totalpages
= mem_cgroup_get_limit(oc
->memcg
) ?: 1;
246 return CONSTRAINT_MEMCG
;
249 /* Default to all available memory */
250 oc
->totalpages
= totalram_pages
+ total_swap_pages
;
252 if (!IS_ENABLED(CONFIG_NUMA
))
253 return CONSTRAINT_NONE
;
256 return CONSTRAINT_NONE
;
258 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
259 * to kill current.We have to random task kill in this case.
260 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
262 if (oc
->gfp_mask
& __GFP_THISNODE
)
263 return CONSTRAINT_NONE
;
266 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
267 * the page allocator means a mempolicy is in effect. Cpuset policy
268 * is enforced in get_page_from_freelist().
271 !nodes_subset(node_states
[N_MEMORY
], *oc
->nodemask
)) {
272 oc
->totalpages
= total_swap_pages
;
273 for_each_node_mask(nid
, *oc
->nodemask
)
274 oc
->totalpages
+= node_spanned_pages(nid
);
275 return CONSTRAINT_MEMORY_POLICY
;
278 /* Check this allocation failure is caused by cpuset's wall function */
279 for_each_zone_zonelist_nodemask(zone
, z
, oc
->zonelist
,
280 high_zoneidx
, oc
->nodemask
)
281 if (!cpuset_zone_allowed(zone
, oc
->gfp_mask
))
282 cpuset_limited
= true;
284 if (cpuset_limited
) {
285 oc
->totalpages
= total_swap_pages
;
286 for_each_node_mask(nid
, cpuset_current_mems_allowed
)
287 oc
->totalpages
+= node_spanned_pages(nid
);
288 return CONSTRAINT_CPUSET
;
290 return CONSTRAINT_NONE
;
293 static int oom_evaluate_task(struct task_struct
*task
, void *arg
)
295 struct oom_control
*oc
= arg
;
296 unsigned long points
;
298 if (oom_unkillable_task(task
, NULL
, oc
->nodemask
))
302 * This task already has access to memory reserves and is being killed.
303 * Don't allow any other task to have access to the reserves unless
304 * the task has MMF_OOM_SKIP because chances that it would release
305 * any memory is quite low.
307 if (!is_sysrq_oom(oc
) && tsk_is_oom_victim(task
)) {
308 if (test_bit(MMF_OOM_SKIP
, &task
->signal
->oom_mm
->flags
))
314 * If task is allocating a lot of memory and has been marked to be
315 * killed first if it triggers an oom, then select it.
317 if (oom_task_origin(task
)) {
322 points
= oom_badness(task
, NULL
, oc
->nodemask
, oc
->totalpages
);
323 if (!points
|| points
< oc
->chosen_points
)
326 /* Prefer thread group leaders for display purposes */
327 if (points
== oc
->chosen_points
&& thread_group_leader(oc
->chosen
))
331 put_task_struct(oc
->chosen
);
332 get_task_struct(task
);
334 oc
->chosen_points
= points
;
339 put_task_struct(oc
->chosen
);
340 oc
->chosen
= (void *)-1UL;
345 * Simple selection loop. We choose the process with the highest number of
346 * 'points'. In case scan was aborted, oc->chosen is set to -1.
348 static void select_bad_process(struct oom_control
*oc
)
350 if (is_memcg_oom(oc
))
351 mem_cgroup_scan_tasks(oc
->memcg
, oom_evaluate_task
, oc
);
353 struct task_struct
*p
;
357 if (oom_evaluate_task(p
, oc
))
362 oc
->chosen_points
= oc
->chosen_points
* 1000 / oc
->totalpages
;
366 * dump_tasks - dump current memory state of all system tasks
367 * @memcg: current's memory controller, if constrained
368 * @nodemask: nodemask passed to page allocator for mempolicy ooms
370 * Dumps the current memory state of all eligible tasks. Tasks not in the same
371 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
373 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
374 * swapents, oom_score_adj value, and name.
376 static void dump_tasks(struct mem_cgroup
*memcg
, const nodemask_t
*nodemask
)
378 struct task_struct
*p
;
379 struct task_struct
*task
;
381 pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n");
383 for_each_process(p
) {
384 if (oom_unkillable_task(p
, memcg
, nodemask
))
387 task
= find_lock_task_mm(p
);
390 * This is a kthread or all of p's threads have already
391 * detached their mm's. There's no need to report
392 * them; they can't be oom killed anyway.
397 pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n",
398 task
->pid
, from_kuid(&init_user_ns
, task_uid(task
)),
399 task
->tgid
, task
->mm
->total_vm
, get_mm_rss(task
->mm
),
400 atomic_long_read(&task
->mm
->nr_ptes
),
401 mm_nr_pmds(task
->mm
),
402 get_mm_counter(task
->mm
, MM_SWAPENTS
),
403 task
->signal
->oom_score_adj
, task
->comm
);
409 static void dump_header(struct oom_control
*oc
, struct task_struct
*p
)
411 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=",
412 current
->comm
, oc
->gfp_mask
, &oc
->gfp_mask
);
414 pr_cont("%*pbl", nodemask_pr_args(oc
->nodemask
));
417 pr_cont(", order=%d, oom_score_adj=%hd\n",
418 oc
->order
, current
->signal
->oom_score_adj
);
419 if (!IS_ENABLED(CONFIG_COMPACTION
) && oc
->order
)
420 pr_warn("COMPACTION is disabled!!!\n");
422 cpuset_print_current_mems_allowed();
425 mem_cgroup_print_oom_info(oc
->memcg
, p
);
427 show_mem(SHOW_MEM_FILTER_NODES
, oc
->nodemask
);
428 if (sysctl_oom_dump_tasks
)
429 dump_tasks(oc
->memcg
, oc
->nodemask
);
433 * Number of OOM victims in flight
435 static atomic_t oom_victims
= ATOMIC_INIT(0);
436 static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait
);
438 static bool oom_killer_disabled __read_mostly
;
440 #define K(x) ((x) << (PAGE_SHIFT-10))
443 * task->mm can be NULL if the task is the exited group leader. So to
444 * determine whether the task is using a particular mm, we examine all the
445 * task's threads: if one of those is using this mm then this task was also
448 bool process_shares_mm(struct task_struct
*p
, struct mm_struct
*mm
)
450 struct task_struct
*t
;
452 for_each_thread(p
, t
) {
453 struct mm_struct
*t_mm
= READ_ONCE(t
->mm
);
462 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
463 * victim (if that is possible) to help the OOM killer to move on.
465 static struct task_struct
*oom_reaper_th
;
466 static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait
);
467 static struct task_struct
*oom_reaper_list
;
468 static DEFINE_SPINLOCK(oom_reaper_lock
);
470 void __oom_reap_task_mm(struct mm_struct
*mm
)
472 struct vm_area_struct
*vma
;
475 * Tell all users of get_user/copy_from_user etc... that the content
476 * is no longer stable. No barriers really needed because unmapping
477 * should imply barriers already and the reader would hit a page fault
478 * if it stumbled over a reaped memory.
480 set_bit(MMF_UNSTABLE
, &mm
->flags
);
482 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
483 if (!can_madv_dontneed_vma(vma
))
487 * Only anonymous pages have a good chance to be dropped
488 * without additional steps which we cannot afford as we
491 * We do not even care about fs backed pages because all
492 * which are reclaimable have already been reclaimed and
493 * we do not want to block exit_mmap by keeping mm ref
494 * count elevated without a good reason.
496 if (vma_is_anonymous(vma
) || !(vma
->vm_flags
& VM_SHARED
)) {
497 struct mmu_gather tlb
;
499 tlb_gather_mmu(&tlb
, mm
, vma
->vm_start
, vma
->vm_end
);
500 unmap_page_range(&tlb
, vma
, vma
->vm_start
, vma
->vm_end
,
502 tlb_finish_mmu(&tlb
, vma
->vm_start
, vma
->vm_end
);
507 static bool oom_reap_task_mm(struct task_struct
*tsk
, struct mm_struct
*mm
)
512 * We have to make sure to not race with the victim exit path
513 * and cause premature new oom victim selection:
514 * oom_reap_task_mm exit_mm
517 * atomic_dec_and_test
522 * # no TIF_MEMDIE task selects new victim
523 * unmap_page_range # frees some memory
525 mutex_lock(&oom_lock
);
527 if (!down_read_trylock(&mm
->mmap_sem
)) {
529 trace_skip_task_reaping(tsk
->pid
);
534 * If the mm has notifiers then we would need to invalidate them around
535 * unmap_page_range and that is risky because notifiers can sleep and
536 * what they do is basically undeterministic. So let's have a short
537 * sleep to give the oom victim some more time.
538 * TODO: we really want to get rid of this ugly hack and make sure that
539 * notifiers cannot block for unbounded amount of time and add
540 * mmu_notifier_invalidate_range_{start,end} around unmap_page_range
542 if (mm_has_notifiers(mm
)) {
543 up_read(&mm
->mmap_sem
);
544 schedule_timeout_idle(HZ
);
549 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
550 * work on the mm anymore. The check for MMF_OOM_SKIP must run
551 * under mmap_sem for reading because it serializes against the
552 * down_write();up_write() cycle in exit_mmap().
554 if (test_bit(MMF_OOM_SKIP
, &mm
->flags
)) {
555 up_read(&mm
->mmap_sem
);
556 trace_skip_task_reaping(tsk
->pid
);
560 trace_start_task_reaping(tsk
->pid
);
562 __oom_reap_task_mm(mm
);
564 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
565 task_pid_nr(tsk
), tsk
->comm
,
566 K(get_mm_counter(mm
, MM_ANONPAGES
)),
567 K(get_mm_counter(mm
, MM_FILEPAGES
)),
568 K(get_mm_counter(mm
, MM_SHMEMPAGES
)));
569 up_read(&mm
->mmap_sem
);
571 trace_finish_task_reaping(tsk
->pid
);
573 mutex_unlock(&oom_lock
);
577 #define MAX_OOM_REAP_RETRIES 10
578 static void oom_reap_task(struct task_struct
*tsk
)
581 struct mm_struct
*mm
= tsk
->signal
->oom_mm
;
583 /* Retry the down_read_trylock(mmap_sem) a few times */
584 while (attempts
++ < MAX_OOM_REAP_RETRIES
&& !oom_reap_task_mm(tsk
, mm
))
585 schedule_timeout_idle(HZ
/10);
587 if (attempts
<= MAX_OOM_REAP_RETRIES
)
590 pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
591 task_pid_nr(tsk
), tsk
->comm
);
592 debug_show_all_locks();
595 tsk
->oom_reaper_list
= NULL
;
598 * Hide this mm from OOM killer because it has been either reaped or
599 * somebody can't call up_write(mmap_sem).
601 set_bit(MMF_OOM_SKIP
, &mm
->flags
);
603 /* Drop a reference taken by wake_oom_reaper */
604 put_task_struct(tsk
);
607 static int oom_reaper(void *unused
)
610 struct task_struct
*tsk
= NULL
;
612 wait_event_freezable(oom_reaper_wait
, oom_reaper_list
!= NULL
);
613 spin_lock(&oom_reaper_lock
);
614 if (oom_reaper_list
!= NULL
) {
615 tsk
= oom_reaper_list
;
616 oom_reaper_list
= tsk
->oom_reaper_list
;
618 spin_unlock(&oom_reaper_lock
);
627 static void wake_oom_reaper(struct task_struct
*tsk
)
632 * Move the lock here to avoid scenario of queuing
633 * the same task by both OOM killer and any other SIGKILL
637 spin_lock(&oom_reaper_lock
);
639 /* mm is already queued? */
640 if (test_and_set_bit(MMF_OOM_REAP_QUEUED
, &tsk
->signal
->oom_mm
->flags
)) {
641 spin_unlock(&oom_reaper_lock
);
645 get_task_struct(tsk
);
647 tsk
->oom_reaper_list
= oom_reaper_list
;
648 oom_reaper_list
= tsk
;
649 spin_unlock(&oom_reaper_lock
);
650 trace_wake_reaper(tsk
->pid
);
651 wake_up(&oom_reaper_wait
);
654 static int __init
oom_init(void)
656 oom_reaper_th
= kthread_run(oom_reaper
, NULL
, "oom_reaper");
657 if (IS_ERR(oom_reaper_th
)) {
658 pr_err("Unable to start OOM reaper %ld. Continuing regardless\n",
659 PTR_ERR(oom_reaper_th
));
660 oom_reaper_th
= NULL
;
664 subsys_initcall(oom_init
)
666 static inline void wake_oom_reaper(struct task_struct
*tsk
)
669 #endif /* CONFIG_MMU */
671 static void __mark_oom_victim(struct task_struct
*tsk
)
673 struct mm_struct
*mm
= tsk
->mm
;
675 if (!cmpxchg(&tsk
->signal
->oom_mm
, NULL
, mm
)) {
676 mmgrab(tsk
->signal
->oom_mm
);
677 set_bit(MMF_OOM_VICTIM
, &mm
->flags
);
682 * mark_oom_victim - mark the given task as OOM victim
685 * Has to be called with oom_lock held and never after
686 * oom has been disabled already.
688 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
689 * under task_lock or operate on the current).
691 static void mark_oom_victim(struct task_struct
*tsk
)
693 WARN_ON(oom_killer_disabled
);
694 /* OOM killer might race with memcg OOM */
695 if (test_and_set_tsk_thread_flag(tsk
, TIF_MEMDIE
))
698 /* oom_mm is bound to the signal struct life time. */
699 __mark_oom_victim(tsk
);
702 * Make sure that the task is woken up from uninterruptible sleep
703 * if it is frozen because OOM killer wouldn't be able to free
704 * any memory and livelock. freezing_slow_path will tell the freezer
705 * that TIF_MEMDIE tasks should be ignored.
708 atomic_inc(&oom_victims
);
709 trace_mark_victim(tsk
->pid
);
713 * exit_oom_victim - note the exit of an OOM victim
715 void exit_oom_victim(void)
717 clear_thread_flag(TIF_MEMDIE
);
719 if (!atomic_dec_return(&oom_victims
))
720 wake_up_all(&oom_victims_wait
);
724 * oom_killer_enable - enable OOM killer
726 void oom_killer_enable(void)
728 oom_killer_disabled
= false;
729 pr_info("OOM killer enabled.\n");
733 * oom_killer_disable - disable OOM killer
734 * @timeout: maximum timeout to wait for oom victims in jiffies
736 * Forces all page allocations to fail rather than trigger OOM killer.
737 * Will block and wait until all OOM victims are killed or the given
740 * The function cannot be called when there are runnable user tasks because
741 * the userspace would see unexpected allocation failures as a result. Any
742 * new usage of this function should be consulted with MM people.
744 * Returns true if successful and false if the OOM killer cannot be
747 bool oom_killer_disable(signed long timeout
)
752 * Make sure to not race with an ongoing OOM killer. Check that the
753 * current is not killed (possibly due to sharing the victim's memory).
755 if (mutex_lock_killable(&oom_lock
))
757 oom_killer_disabled
= true;
758 mutex_unlock(&oom_lock
);
760 ret
= wait_event_interruptible_timeout(oom_victims_wait
,
761 !atomic_read(&oom_victims
), timeout
);
766 pr_info("OOM killer disabled.\n");
771 static inline bool __task_will_free_mem(struct task_struct
*task
)
773 struct signal_struct
*sig
= task
->signal
;
776 * A coredumping process may sleep for an extended period in exit_mm(),
777 * so the oom killer cannot assume that the process will promptly exit
778 * and release memory.
780 if (sig
->flags
& SIGNAL_GROUP_COREDUMP
)
783 if (sig
->flags
& SIGNAL_GROUP_EXIT
)
786 if (thread_group_empty(task
) && (task
->flags
& PF_EXITING
))
793 * Checks whether the given task is dying or exiting and likely to
794 * release its address space. This means that all threads and processes
795 * sharing the same mm have to be killed or exiting.
796 * Caller has to make sure that task->mm is stable (hold task_lock or
797 * it operates on the current).
799 static bool task_will_free_mem(struct task_struct
*task
)
801 struct mm_struct
*mm
= task
->mm
;
802 struct task_struct
*p
;
806 * Skip tasks without mm because it might have passed its exit_mm and
807 * exit_oom_victim. oom_reaper could have rescued that but do not rely
808 * on that for now. We can consider find_lock_task_mm in future.
813 if (!__task_will_free_mem(task
))
817 * This task has already been drained by the oom reaper so there are
818 * only small chances it will free some more
820 if (test_bit(MMF_OOM_SKIP
, &mm
->flags
))
823 if (atomic_read(&mm
->mm_users
) <= 1)
827 * Make sure that all tasks which share the mm with the given tasks
828 * are dying as well to make sure that a) nobody pins its mm and
829 * b) the task is also reapable by the oom reaper.
832 for_each_process(p
) {
833 if (!process_shares_mm(p
, mm
))
835 if (same_thread_group(task
, p
))
837 ret
= __task_will_free_mem(p
);
846 static void oom_kill_process(struct oom_control
*oc
, const char *message
)
848 struct task_struct
*p
= oc
->chosen
;
849 unsigned int points
= oc
->chosen_points
;
850 struct task_struct
*victim
= p
;
851 struct task_struct
*child
;
852 struct task_struct
*t
;
853 struct mm_struct
*mm
;
854 unsigned int victim_points
= 0;
855 static DEFINE_RATELIMIT_STATE(oom_rs
, DEFAULT_RATELIMIT_INTERVAL
,
856 DEFAULT_RATELIMIT_BURST
);
857 bool can_oom_reap
= true;
860 * If the task is already exiting, don't alarm the sysadmin or kill
861 * its children or threads, just give it access to memory reserves
862 * so it can die quickly
865 if (task_will_free_mem(p
)) {
874 if (__ratelimit(&oom_rs
))
877 pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
878 message
, task_pid_nr(p
), p
->comm
, points
);
881 * If any of p's children has a different mm and is eligible for kill,
882 * the one with the highest oom_badness() score is sacrificed for its
883 * parent. This attempts to lose the minimal amount of work done while
884 * still freeing memory.
886 read_lock(&tasklist_lock
);
889 * The task 'p' might have already exited before reaching here. The
890 * put_task_struct() will free task_struct 'p' while the loop still try
891 * to access the field of 'p', so, get an extra reference.
894 for_each_thread(p
, t
) {
895 list_for_each_entry(child
, &t
->children
, sibling
) {
896 unsigned int child_points
;
898 if (process_shares_mm(child
, p
->mm
))
901 * oom_badness() returns 0 if the thread is unkillable
903 child_points
= oom_badness(child
,
904 oc
->memcg
, oc
->nodemask
, oc
->totalpages
);
905 if (child_points
> victim_points
) {
906 put_task_struct(victim
);
908 victim_points
= child_points
;
909 get_task_struct(victim
);
914 read_unlock(&tasklist_lock
);
916 p
= find_lock_task_mm(victim
);
918 put_task_struct(victim
);
920 } else if (victim
!= p
) {
922 put_task_struct(victim
);
926 /* Get a reference to safely compare mm after task_unlock(victim) */
930 /* Raise event before sending signal: task reaper must see this */
931 count_vm_event(OOM_KILL
);
932 count_memcg_event_mm(mm
, OOM_KILL
);
935 * We should send SIGKILL before granting access to memory reserves
936 * in order to prevent the OOM victim from depleting the memory
937 * reserves from the user space under its control.
939 do_send_sig_info(SIGKILL
, SEND_SIG_FORCED
, victim
, true);
940 mark_oom_victim(victim
);
941 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
942 task_pid_nr(victim
), victim
->comm
, K(victim
->mm
->total_vm
),
943 K(get_mm_counter(victim
->mm
, MM_ANONPAGES
)),
944 K(get_mm_counter(victim
->mm
, MM_FILEPAGES
)),
945 K(get_mm_counter(victim
->mm
, MM_SHMEMPAGES
)));
949 * Kill all user processes sharing victim->mm in other thread groups, if
950 * any. They don't get access to memory reserves, though, to avoid
951 * depletion of all memory. This prevents mm->mmap_sem livelock when an
952 * oom killed thread cannot exit because it requires the semaphore and
953 * its contended by another thread trying to allocate memory itself.
954 * That thread will now get access to memory reserves since it has a
955 * pending fatal signal.
958 for_each_process(p
) {
959 if (!process_shares_mm(p
, mm
))
961 if (same_thread_group(p
, victim
))
963 if (is_global_init(p
)) {
964 can_oom_reap
= false;
965 set_bit(MMF_OOM_SKIP
, &mm
->flags
);
966 pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
967 task_pid_nr(victim
), victim
->comm
,
968 task_pid_nr(p
), p
->comm
);
972 * No use_mm() user needs to read from the userspace so we are
975 if (unlikely(p
->flags
& PF_KTHREAD
))
977 do_send_sig_info(SIGKILL
, SEND_SIG_FORCED
, p
, true);
982 wake_oom_reaper(victim
);
985 put_task_struct(victim
);
990 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
992 static void check_panic_on_oom(struct oom_control
*oc
,
993 enum oom_constraint constraint
)
995 if (likely(!sysctl_panic_on_oom
))
997 if (sysctl_panic_on_oom
!= 2) {
999 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
1000 * does not panic for cpuset, mempolicy, or memcg allocation
1003 if (constraint
!= CONSTRAINT_NONE
)
1006 /* Do not panic for oom kills triggered by sysrq */
1007 if (is_sysrq_oom(oc
))
1009 dump_header(oc
, NULL
);
1010 panic("Out of memory: %s panic_on_oom is enabled\n",
1011 sysctl_panic_on_oom
== 2 ? "compulsory" : "system-wide");
1014 static BLOCKING_NOTIFIER_HEAD(oom_notify_list
);
1016 int register_oom_notifier(struct notifier_block
*nb
)
1018 return blocking_notifier_chain_register(&oom_notify_list
, nb
);
1020 EXPORT_SYMBOL_GPL(register_oom_notifier
);
1022 int unregister_oom_notifier(struct notifier_block
*nb
)
1024 return blocking_notifier_chain_unregister(&oom_notify_list
, nb
);
1026 EXPORT_SYMBOL_GPL(unregister_oom_notifier
);
1029 * out_of_memory - kill the "best" process when we run out of memory
1030 * @oc: pointer to struct oom_control
1032 * If we run out of memory, we have the choice between either
1033 * killing a random task (bad), letting the system crash (worse)
1034 * OR try to be smart about which process to kill. Note that we
1035 * don't have to be perfect here, we just have to be good.
1037 bool out_of_memory(struct oom_control
*oc
)
1039 unsigned long freed
= 0;
1040 enum oom_constraint constraint
= CONSTRAINT_NONE
;
1042 if (oom_killer_disabled
)
1045 if (!is_memcg_oom(oc
)) {
1046 blocking_notifier_call_chain(&oom_notify_list
, 0, &freed
);
1048 /* Got some memory back in the last second. */
1053 * If current has a pending SIGKILL or is exiting, then automatically
1054 * select it. The goal is to allow it to allocate so that it may
1055 * quickly exit and free its memory.
1057 if (task_will_free_mem(current
)) {
1058 mark_oom_victim(current
);
1059 wake_oom_reaper(current
);
1064 * The OOM killer does not compensate for IO-less reclaim.
1065 * pagefault_out_of_memory lost its gfp context so we have to
1066 * make sure exclude 0 mask - all other users should have at least
1067 * ___GFP_DIRECT_RECLAIM to get here.
1069 if (oc
->gfp_mask
&& !(oc
->gfp_mask
& __GFP_FS
))
1073 * Check if there were limitations on the allocation (only relevant for
1074 * NUMA and memcg) that may require different handling.
1076 constraint
= constrained_alloc(oc
);
1077 if (constraint
!= CONSTRAINT_MEMORY_POLICY
)
1078 oc
->nodemask
= NULL
;
1079 check_panic_on_oom(oc
, constraint
);
1081 if (!is_memcg_oom(oc
) && sysctl_oom_kill_allocating_task
&&
1082 current
->mm
&& !oom_unkillable_task(current
, NULL
, oc
->nodemask
) &&
1083 current
->signal
->oom_score_adj
!= OOM_SCORE_ADJ_MIN
) {
1084 get_task_struct(current
);
1085 oc
->chosen
= current
;
1086 oom_kill_process(oc
, "Out of memory (oom_kill_allocating_task)");
1090 select_bad_process(oc
);
1091 /* Found nothing?!?! Either we hang forever, or we panic. */
1092 if (!oc
->chosen
&& !is_sysrq_oom(oc
) && !is_memcg_oom(oc
)) {
1093 dump_header(oc
, NULL
);
1094 panic("Out of memory and no killable processes...\n");
1096 if (oc
->chosen
&& oc
->chosen
!= (void *)-1UL) {
1097 oom_kill_process(oc
, !is_memcg_oom(oc
) ? "Out of memory" :
1098 "Memory cgroup out of memory");
1100 * Give the killed process a good chance to exit before trying
1101 * to allocate memory again.
1103 schedule_timeout_killable(1);
1105 return !!oc
->chosen
;
1109 * The pagefault handler calls here because it is out of memory, so kill a
1110 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
1111 * killing is already in progress so do nothing.
1113 void pagefault_out_of_memory(void)
1115 struct oom_control oc
= {
1123 if (mem_cgroup_oom_synchronize(true))
1126 if (!mutex_trylock(&oom_lock
))
1129 mutex_unlock(&oom_lock
);
1132 void add_to_oom_reaper(struct task_struct
*p
)
1134 if (!sysctl_reap_mem_on_sigkill
)
1137 p
= find_lock_task_mm(p
);
1142 if (task_will_free_mem(p
)) {
1143 __mark_oom_victim(p
);