The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mm/oom_kill.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  *  linux/mm/oom_kill.c
    3  * 
    4  *  Copyright (C)  1998,2000  Rik van Riel
    5  *      Thanks go out to Claus Fischer for some serious inspiration and
    6  *      for goading me into coding this file...
    7  *  Copyright (C)  2010  Google, Inc.
    8  *      Rewritten by David Rientjes
    9  *
   10  *  The routines in this file are used to kill a process when
   11  *  we're seriously out of memory. This gets called from __alloc_pages()
   12  *  in mm/page_alloc.c when we really run out of memory.
   13  *
   14  *  Since we won't call these routines often (on a well-configured
   15  *  machine) this file will double as a 'coding guide' and a signpost
   16  *  for newbie kernel hackers. It features several pointers to major
   17  *  kernel subsystems and hints as to where to find out what things do.
   18  */
   19 
   20 #include <linux/oom.h>
   21 #include <linux/mm.h>
   22 #include <linux/err.h>
   23 #include <linux/gfp.h>
   24 #include <linux/sched.h>
   25 #include <linux/swap.h>
   26 #include <linux/timex.h>
   27 #include <linux/jiffies.h>
   28 #include <linux/cpuset.h>
   29 #include <linux/export.h>
   30 #include <linux/notifier.h>
   31 #include <linux/memcontrol.h>
   32 #include <linux/mempolicy.h>
   33 #include <linux/security.h>
   34 #include <linux/ptrace.h>
   35 #include <linux/freezer.h>
   36 #include <linux/ftrace.h>
   37 #include <linux/ratelimit.h>
   38 
   39 #define CREATE_TRACE_POINTS
   40 #include <trace/events/oom.h>
   41 
   42 int sysctl_panic_on_oom;
   43 int sysctl_oom_kill_allocating_task;
   44 int sysctl_oom_dump_tasks = 1;
   45 static DEFINE_SPINLOCK(zone_scan_lock);
   46 
   47 #ifdef CONFIG_NUMA
   48 /**
   49  * has_intersects_mems_allowed() - check task eligiblity for kill
   50  * @tsk: task struct of which task to consider
   51  * @mask: nodemask passed to page allocator for mempolicy ooms
   52  *
   53  * Task eligibility is determined by whether or not a candidate task, @tsk,
   54  * shares the same mempolicy nodes as current if it is bound by such a policy
   55  * and whether or not it has the same set of allowed cpuset nodes.
   56  */
   57 static bool has_intersects_mems_allowed(struct task_struct *tsk,
   58                                         const nodemask_t *mask)
   59 {
   60         struct task_struct *start = tsk;
   61 
   62         do {
   63                 if (mask) {
   64                         /*
   65                          * If this is a mempolicy constrained oom, tsk's
   66                          * cpuset is irrelevant.  Only return true if its
   67                          * mempolicy intersects current, otherwise it may be
   68                          * needlessly killed.
   69                          */
   70                         if (mempolicy_nodemask_intersects(tsk, mask))
   71                                 return true;
   72                 } else {
   73                         /*
   74                          * This is not a mempolicy constrained oom, so only
   75                          * check the mems of tsk's cpuset.
   76                          */
   77                         if (cpuset_mems_allowed_intersects(current, tsk))
   78                                 return true;
   79                 }
   80         } while_each_thread(start, tsk);
   81 
   82         return false;
   83 }
   84 #else
   85 static bool has_intersects_mems_allowed(struct task_struct *tsk,
   86                                         const nodemask_t *mask)
   87 {
   88         return true;
   89 }
   90 #endif /* CONFIG_NUMA */
   91 
   92 /*
   93  * The process p may have detached its own ->mm while exiting or through
   94  * use_mm(), but one or more of its subthreads may still have a valid
   95  * pointer.  Return p, or any of its subthreads with a valid ->mm, with
   96  * task_lock() held.
   97  */
   98 struct task_struct *find_lock_task_mm(struct task_struct *p)
   99 {
  100         struct task_struct *t = p;
  101 
  102         do {
  103                 task_lock(t);
  104                 if (likely(t->mm))
  105                         return t;
  106                 task_unlock(t);
  107         } while_each_thread(p, t);
  108 
  109         return NULL;
  110 }
  111 
  112 /* return true if the task is not adequate as candidate victim task. */
  113 static bool oom_unkillable_task(struct task_struct *p,
  114                 const struct mem_cgroup *memcg, const nodemask_t *nodemask)
  115 {
  116         if (is_global_init(p))
  117                 return true;
  118         if (p->flags & PF_KTHREAD)
  119                 return true;
  120 
  121         /* When mem_cgroup_out_of_memory() and p is not member of the group */
  122         if (memcg && !task_in_mem_cgroup(p, memcg))
  123                 return true;
  124 
  125         /* p may not have freeable memory in nodemask */
  126         if (!has_intersects_mems_allowed(p, nodemask))
  127                 return true;
  128 
  129         return false;
  130 }
  131 
  132 /**
  133  * oom_badness - heuristic function to determine which candidate task to kill
  134  * @p: task struct of which task we should calculate
  135  * @totalpages: total present RAM allowed for page allocation
  136  *
  137  * The heuristic for determining which task to kill is made to be as simple and
  138  * predictable as possible.  The goal is to return the highest value for the
  139  * task consuming the most memory to avoid subsequent oom failures.
  140  */
  141 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
  142                           const nodemask_t *nodemask, unsigned long totalpages)
  143 {
  144         long points;
  145         long adj;
  146 
  147         if (oom_unkillable_task(p, memcg, nodemask))
  148                 return 0;
  149 
  150         p = find_lock_task_mm(p);
  151         if (!p)
  152                 return 0;
  153 
  154         adj = (long)p->signal->oom_score_adj;
  155         if (adj == OOM_SCORE_ADJ_MIN) {
  156                 task_unlock(p);
  157                 return 0;
  158         }
  159 
  160         /*
  161          * The baseline for the badness score is the proportion of RAM that each
  162          * task's rss, pagetable and swap space use.
  163          */
  164         points = get_mm_rss(p->mm) + p->mm->nr_ptes +
  165                  get_mm_counter(p->mm, MM_SWAPENTS);
  166         task_unlock(p);
  167 
  168         /*
  169          * Root processes get 3% bonus, just like the __vm_enough_memory()
  170          * implementation used by LSMs.
  171          */
  172         if (has_capability_noaudit(p, CAP_SYS_ADMIN))
  173                 adj -= 30;
  174 
  175         /* Normalize to oom_score_adj units */
  176         adj *= totalpages / 1000;
  177         points += adj;
  178 
  179         /*
  180          * Never return 0 for an eligible task regardless of the root bonus and
  181          * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
  182          */
  183         return points > 0 ? points : 1;
  184 }
  185 
  186 /*
  187  * Determine the type of allocation constraint.
  188  */
  189 #ifdef CONFIG_NUMA
  190 static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
  191                                 gfp_t gfp_mask, nodemask_t *nodemask,
  192                                 unsigned long *totalpages)
  193 {
  194         struct zone *zone;
  195         struct zoneref *z;
  196         enum zone_type high_zoneidx = gfp_zone(gfp_mask);
  197         bool cpuset_limited = false;
  198         int nid;
  199 
  200         /* Default to all available memory */
  201         *totalpages = totalram_pages + total_swap_pages;
  202 
  203         if (!zonelist)
  204                 return CONSTRAINT_NONE;
  205         /*
  206          * Reach here only when __GFP_NOFAIL is used. So, we should avoid
  207          * to kill current.We have to random task kill in this case.
  208          * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
  209          */
  210         if (gfp_mask & __GFP_THISNODE)
  211                 return CONSTRAINT_NONE;
  212 
  213         /*
  214          * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
  215          * the page allocator means a mempolicy is in effect.  Cpuset policy
  216          * is enforced in get_page_from_freelist().
  217          */
  218         if (nodemask && !nodes_subset(node_states[N_MEMORY], *nodemask)) {
  219                 *totalpages = total_swap_pages;
  220                 for_each_node_mask(nid, *nodemask)
  221                         *totalpages += node_spanned_pages(nid);
  222                 return CONSTRAINT_MEMORY_POLICY;
  223         }
  224 
  225         /* Check this allocation failure is caused by cpuset's wall function */
  226         for_each_zone_zonelist_nodemask(zone, z, zonelist,
  227                         high_zoneidx, nodemask)
  228                 if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
  229                         cpuset_limited = true;
  230 
  231         if (cpuset_limited) {
  232                 *totalpages = total_swap_pages;
  233                 for_each_node_mask(nid, cpuset_current_mems_allowed)
  234                         *totalpages += node_spanned_pages(nid);
  235                 return CONSTRAINT_CPUSET;
  236         }
  237         return CONSTRAINT_NONE;
  238 }
  239 #else
  240 static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
  241                                 gfp_t gfp_mask, nodemask_t *nodemask,
  242                                 unsigned long *totalpages)
  243 {
  244         *totalpages = totalram_pages + total_swap_pages;
  245         return CONSTRAINT_NONE;
  246 }
  247 #endif
  248 
  249 enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
  250                 unsigned long totalpages, const nodemask_t *nodemask,
  251                 bool force_kill)
  252 {
  253         if (task->exit_state)
  254                 return OOM_SCAN_CONTINUE;
  255         if (oom_unkillable_task(task, NULL, nodemask))
  256                 return OOM_SCAN_CONTINUE;
  257 
  258         /*
  259          * This task already has access to memory reserves and is being killed.
  260          * Don't allow any other task to have access to the reserves.
  261          */
  262         if (test_tsk_thread_flag(task, TIF_MEMDIE)) {
  263                 if (unlikely(frozen(task)))
  264                         __thaw_task(task);
  265                 if (!force_kill)
  266                         return OOM_SCAN_ABORT;
  267         }
  268         if (!task->mm)
  269                 return OOM_SCAN_CONTINUE;
  270 
  271         /*
  272          * If task is allocating a lot of memory and has been marked to be
  273          * killed first if it triggers an oom, then select it.
  274          */
  275         if (oom_task_origin(task))
  276                 return OOM_SCAN_SELECT;
  277 
  278         if (task->flags & PF_EXITING && !force_kill) {
  279                 /*
  280                  * If this task is not being ptraced on exit, then wait for it
  281                  * to finish before killing some other task unnecessarily.
  282                  */
  283                 if (!(task->group_leader->ptrace & PT_TRACE_EXIT))
  284                         return OOM_SCAN_ABORT;
  285         }
  286         return OOM_SCAN_OK;
  287 }
  288 
  289 /*
  290  * Simple selection loop. We chose the process with the highest
  291  * number of 'points'.
  292  *
  293  * (not docbooked, we don't want this one cluttering up the manual)
  294  */
  295 static struct task_struct *select_bad_process(unsigned int *ppoints,
  296                 unsigned long totalpages, const nodemask_t *nodemask,
  297                 bool force_kill)
  298 {
  299         struct task_struct *g, *p;
  300         struct task_struct *chosen = NULL;
  301         unsigned long chosen_points = 0;
  302 
  303         rcu_read_lock();
  304         do_each_thread(g, p) {
  305                 unsigned int points;
  306 
  307                 switch (oom_scan_process_thread(p, totalpages, nodemask,
  308                                                 force_kill)) {
  309                 case OOM_SCAN_SELECT:
  310                         chosen = p;
  311                         chosen_points = ULONG_MAX;
  312                         /* fall through */
  313                 case OOM_SCAN_CONTINUE:
  314                         continue;
  315                 case OOM_SCAN_ABORT:
  316                         rcu_read_unlock();
  317                         return ERR_PTR(-1UL);
  318                 case OOM_SCAN_OK:
  319                         break;
  320                 };
  321                 points = oom_badness(p, NULL, nodemask, totalpages);
  322                 if (points > chosen_points) {
  323                         chosen = p;
  324                         chosen_points = points;
  325                 }
  326         } while_each_thread(g, p);
  327         if (chosen)
  328                 get_task_struct(chosen);
  329         rcu_read_unlock();
  330 
  331         *ppoints = chosen_points * 1000 / totalpages;
  332         return chosen;
  333 }
  334 
  335 /**
  336  * dump_tasks - dump current memory state of all system tasks
  337  * @memcg: current's memory controller, if constrained
  338  * @nodemask: nodemask passed to page allocator for mempolicy ooms
  339  *
  340  * Dumps the current memory state of all eligible tasks.  Tasks not in the same
  341  * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
  342  * are not shown.
  343  * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
  344  * swapents, oom_score_adj value, and name.
  345  */
  346 static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemask)
  347 {
  348         struct task_struct *p;
  349         struct task_struct *task;
  350 
  351         pr_info("[ pid ]   uid  tgid total_vm      rss nr_ptes swapents oom_score_adj name\n");
  352         rcu_read_lock();
  353         for_each_process(p) {
  354                 if (oom_unkillable_task(p, memcg, nodemask))
  355                         continue;
  356 
  357                 task = find_lock_task_mm(p);
  358                 if (!task) {
  359                         /*
  360                          * This is a kthread or all of p's threads have already
  361                          * detached their mm's.  There's no need to report
  362                          * them; they can't be oom killed anyway.
  363                          */
  364                         continue;
  365                 }
  366 
  367                 pr_info("[%5d] %5d %5d %8lu %8lu %7lu %8lu         %5hd %s\n",
  368                         task->pid, from_kuid(&init_user_ns, task_uid(task)),
  369                         task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
  370                         task->mm->nr_ptes,
  371                         get_mm_counter(task->mm, MM_SWAPENTS),
  372                         task->signal->oom_score_adj, task->comm);
  373                 task_unlock(task);
  374         }
  375         rcu_read_unlock();
  376 }
  377 
  378 static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
  379                         struct mem_cgroup *memcg, const nodemask_t *nodemask)
  380 {
  381         task_lock(current);
  382         pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
  383                 "oom_score_adj=%hd\n",
  384                 current->comm, gfp_mask, order,
  385                 current->signal->oom_score_adj);
  386         cpuset_print_task_mems_allowed(current);
  387         task_unlock(current);
  388         dump_stack();
  389         mem_cgroup_print_oom_info(memcg, p);
  390         show_mem(SHOW_MEM_FILTER_NODES);
  391         if (sysctl_oom_dump_tasks)
  392                 dump_tasks(memcg, nodemask);
  393 }
  394 
  395 #define K(x) ((x) << (PAGE_SHIFT-10))
  396 /*
  397  * Must be called while holding a reference to p, which will be released upon
  398  * returning.
  399  */
  400 void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
  401                       unsigned int points, unsigned long totalpages,
  402                       struct mem_cgroup *memcg, nodemask_t *nodemask,
  403                       const char *message)
  404 {
  405         struct task_struct *victim = p;
  406         struct task_struct *child;
  407         struct task_struct *t = p;
  408         struct mm_struct *mm;
  409         unsigned int victim_points = 0;
  410         static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
  411                                               DEFAULT_RATELIMIT_BURST);
  412 
  413         /*
  414          * If the task is already exiting, don't alarm the sysadmin or kill
  415          * its children or threads, just set TIF_MEMDIE so it can die quickly
  416          */
  417         if (p->flags & PF_EXITING) {
  418                 set_tsk_thread_flag(p, TIF_MEMDIE);
  419                 put_task_struct(p);
  420                 return;
  421         }
  422 
  423         if (__ratelimit(&oom_rs))
  424                 dump_header(p, gfp_mask, order, memcg, nodemask);
  425 
  426         task_lock(p);
  427         pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
  428                 message, task_pid_nr(p), p->comm, points);
  429         task_unlock(p);
  430 
  431         /*
  432          * If any of p's children has a different mm and is eligible for kill,
  433          * the one with the highest oom_badness() score is sacrificed for its
  434          * parent.  This attempts to lose the minimal amount of work done while
  435          * still freeing memory.
  436          */
  437         read_lock(&tasklist_lock);
  438         do {
  439                 list_for_each_entry(child, &t->children, sibling) {
  440                         unsigned int child_points;
  441 
  442                         if (child->mm == p->mm)
  443                                 continue;
  444                         /*
  445                          * oom_badness() returns 0 if the thread is unkillable
  446                          */
  447                         child_points = oom_badness(child, memcg, nodemask,
  448                                                                 totalpages);
  449                         if (child_points > victim_points) {
  450                                 put_task_struct(victim);
  451                                 victim = child;
  452                                 victim_points = child_points;
  453                                 get_task_struct(victim);
  454                         }
  455                 }
  456         } while_each_thread(p, t);
  457         read_unlock(&tasklist_lock);
  458 
  459         rcu_read_lock();
  460         p = find_lock_task_mm(victim);
  461         if (!p) {
  462                 rcu_read_unlock();
  463                 put_task_struct(victim);
  464                 return;
  465         } else if (victim != p) {
  466                 get_task_struct(p);
  467                 put_task_struct(victim);
  468                 victim = p;
  469         }
  470 
  471         /* mm cannot safely be dereferenced after task_unlock(victim) */
  472         mm = victim->mm;
  473         pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
  474                 task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
  475                 K(get_mm_counter(victim->mm, MM_ANONPAGES)),
  476                 K(get_mm_counter(victim->mm, MM_FILEPAGES)));
  477         task_unlock(victim);
  478 
  479         /*
  480          * Kill all user processes sharing victim->mm in other thread groups, if
  481          * any.  They don't get access to memory reserves, though, to avoid
  482          * depletion of all memory.  This prevents mm->mmap_sem livelock when an
  483          * oom killed thread cannot exit because it requires the semaphore and
  484          * its contended by another thread trying to allocate memory itself.
  485          * That thread will now get access to memory reserves since it has a
  486          * pending fatal signal.
  487          */
  488         for_each_process(p)
  489                 if (p->mm == mm && !same_thread_group(p, victim) &&
  490                     !(p->flags & PF_KTHREAD)) {
  491                         if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
  492                                 continue;
  493 
  494                         task_lock(p);   /* Protect ->comm from prctl() */
  495                         pr_err("Kill process %d (%s) sharing same memory\n",
  496                                 task_pid_nr(p), p->comm);
  497                         task_unlock(p);
  498                         do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
  499                 }
  500         rcu_read_unlock();
  501 
  502         set_tsk_thread_flag(victim, TIF_MEMDIE);
  503         do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
  504         put_task_struct(victim);
  505 }
  506 #undef K
  507 
  508 /*
  509  * Determines whether the kernel must panic because of the panic_on_oom sysctl.
  510  */
  511 void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
  512                         int order, const nodemask_t *nodemask)
  513 {
  514         if (likely(!sysctl_panic_on_oom))
  515                 return;
  516         if (sysctl_panic_on_oom != 2) {
  517                 /*
  518                  * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
  519                  * does not panic for cpuset, mempolicy, or memcg allocation
  520                  * failures.
  521                  */
  522                 if (constraint != CONSTRAINT_NONE)
  523                         return;
  524         }
  525         dump_header(NULL, gfp_mask, order, NULL, nodemask);
  526         panic("Out of memory: %s panic_on_oom is enabled\n",
  527                 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
  528 }
  529 
  530 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
  531 
  532 int register_oom_notifier(struct notifier_block *nb)
  533 {
  534         return blocking_notifier_chain_register(&oom_notify_list, nb);
  535 }
  536 EXPORT_SYMBOL_GPL(register_oom_notifier);
  537 
  538 int unregister_oom_notifier(struct notifier_block *nb)
  539 {
  540         return blocking_notifier_chain_unregister(&oom_notify_list, nb);
  541 }
  542 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
  543 
  544 /*
  545  * Try to acquire the OOM killer lock for the zones in zonelist.  Returns zero
  546  * if a parallel OOM killing is already taking place that includes a zone in
  547  * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
  548  */
  549 int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
  550 {
  551         struct zoneref *z;
  552         struct zone *zone;
  553         int ret = 1;
  554 
  555         spin_lock(&zone_scan_lock);
  556         for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
  557                 if (zone_is_oom_locked(zone)) {
  558                         ret = 0;
  559                         goto out;
  560                 }
  561         }
  562 
  563         for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
  564                 /*
  565                  * Lock each zone in the zonelist under zone_scan_lock so a
  566                  * parallel invocation of try_set_zonelist_oom() doesn't succeed
  567                  * when it shouldn't.
  568                  */
  569                 zone_set_flag(zone, ZONE_OOM_LOCKED);
  570         }
  571 
  572 out:
  573         spin_unlock(&zone_scan_lock);
  574         return ret;
  575 }
  576 
  577 /*
  578  * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
  579  * allocation attempts with zonelists containing them may now recall the OOM
  580  * killer, if necessary.
  581  */
  582 void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
  583 {
  584         struct zoneref *z;
  585         struct zone *zone;
  586 
  587         spin_lock(&zone_scan_lock);
  588         for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
  589                 zone_clear_flag(zone, ZONE_OOM_LOCKED);
  590         }
  591         spin_unlock(&zone_scan_lock);
  592 }
  593 
  594 /**
  595  * out_of_memory - kill the "best" process when we run out of memory
  596  * @zonelist: zonelist pointer
  597  * @gfp_mask: memory allocation flags
  598  * @order: amount of memory being requested as a power of 2
  599  * @nodemask: nodemask passed to page allocator
  600  * @force_kill: true if a task must be killed, even if others are exiting
  601  *
  602  * If we run out of memory, we have the choice between either
  603  * killing a random task (bad), letting the system crash (worse)
  604  * OR try to be smart about which process to kill. Note that we
  605  * don't have to be perfect here, we just have to be good.
  606  */
  607 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
  608                 int order, nodemask_t *nodemask, bool force_kill)
  609 {
  610         const nodemask_t *mpol_mask;
  611         struct task_struct *p;
  612         unsigned long totalpages;
  613         unsigned long freed = 0;
  614         unsigned int uninitialized_var(points);
  615         enum oom_constraint constraint = CONSTRAINT_NONE;
  616         int killed = 0;
  617 
  618         blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
  619         if (freed > 0)
  620                 /* Got some memory back in the last second. */
  621                 return;
  622 
  623         /*
  624          * If current has a pending SIGKILL or is exiting, then automatically
  625          * select it.  The goal is to allow it to allocate so that it may
  626          * quickly exit and free its memory.
  627          */
  628         if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
  629                 set_thread_flag(TIF_MEMDIE);
  630                 return;
  631         }
  632 
  633         /*
  634          * Check if there were limitations on the allocation (only relevant for
  635          * NUMA) that may require different handling.
  636          */
  637         constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
  638                                                 &totalpages);
  639         mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
  640         check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
  641 
  642         if (sysctl_oom_kill_allocating_task && current->mm &&
  643             !oom_unkillable_task(current, NULL, nodemask) &&
  644             current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
  645                 get_task_struct(current);
  646                 oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL,
  647                                  nodemask,
  648                                  "Out of memory (oom_kill_allocating_task)");
  649                 goto out;
  650         }
  651 
  652         p = select_bad_process(&points, totalpages, mpol_mask, force_kill);
  653         /* Found nothing?!?! Either we hang forever, or we panic. */
  654         if (!p) {
  655                 dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
  656                 panic("Out of memory and no killable processes...\n");
  657         }
  658         if (PTR_ERR(p) != -1UL) {
  659                 oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
  660                                  nodemask, "Out of memory");
  661                 killed = 1;
  662         }
  663 out:
  664         /*
  665          * Give the killed threads a good chance of exiting before trying to
  666          * allocate memory again.
  667          */
  668         if (killed)
  669                 schedule_timeout_killable(1);
  670 }
  671 
  672 /*
  673  * The pagefault handler calls here because it is out of memory, so kill a
  674  * memory-hogging task.  If any populated zone has ZONE_OOM_LOCKED set, a
  675  * parallel oom killing is already in progress so do nothing.
  676  */
  677 void pagefault_out_of_memory(void)
  678 {
  679         struct zonelist *zonelist = node_zonelist(first_online_node,
  680                                                   GFP_KERNEL);
  681 
  682         if (try_set_zonelist_oom(zonelist, GFP_KERNEL)) {
  683                 out_of_memory(NULL, 0, 0, NULL, false);
  684                 clear_zonelist_oom(zonelist, GFP_KERNEL);
  685         }
  686 }

Cache object: 97dd2cfae17eddb98e78ef532b1b466f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.