summaryrefslogtreecommitdiff
path: root/mm/oom_kill.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r--mm/oom_kill.c53
1 files changed, 32 insertions, 21 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 626303b52f3c..e916168b6e0a 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -32,12 +32,32 @@
#include <linux/mempolicy.h>
#include <linux/security.h>
#include <linux/ptrace.h>
+#include <linux/freezer.h>
int sysctl_panic_on_oom;
int sysctl_oom_kill_allocating_task;
int sysctl_oom_dump_tasks = 1;
static DEFINE_SPINLOCK(zone_scan_lock);
+/*
+ * compare_swap_oom_score_adj() - compare and swap current's oom_score_adj
+ * @old_val: old oom_score_adj for compare
+ * @new_val: new oom_score_adj for swap
+ *
+ * Sets the oom_score_adj value for current to @new_val iff its present value is
+ * @old_val. Usually used to reinstate a previous value to prevent racing with
+ * userspacing tuning the value in the interim.
+ */
+void compare_swap_oom_score_adj(int old_val, int new_val)
+{
+ struct sighand_struct *sighand = current->sighand;
+
+ spin_lock_irq(&sighand->siglock);
+ if (current->signal->oom_score_adj == old_val)
+ current->signal->oom_score_adj = new_val;
+ spin_unlock_irq(&sighand->siglock);
+}
+
/**
* test_set_oom_score_adj() - set current's oom_score_adj and return old value
* @new_val: new oom_score_adj value
@@ -53,13 +73,7 @@ int test_set_oom_score_adj(int new_val)
spin_lock_irq(&sighand->siglock);
old_val = current->signal->oom_score_adj;
- if (new_val != old_val) {
- if (new_val == OOM_SCORE_ADJ_MIN)
- atomic_inc(&current->mm->oom_disable_count);
- else if (old_val == OOM_SCORE_ADJ_MIN)
- atomic_dec(&current->mm->oom_disable_count);
- current->signal->oom_score_adj = new_val;
- }
+ current->signal->oom_score_adj = new_val;
spin_unlock_irq(&sighand->siglock);
return old_val;
@@ -172,16 +186,6 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
return 0;
/*
- * Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN
- * so the entire heuristic doesn't need to be executed for something
- * that cannot be killed.
- */
- if (atomic_read(&p->mm->oom_disable_count)) {
- task_unlock(p);
- return 0;
- }
-
- /*
* The memory controller may have a limit of 0 bytes, so avoid a divide
* by zero, if necessary.
*/
@@ -317,8 +321,11 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
* blocked waiting for another task which itself is waiting
* for memory. Is there a better alternative?
*/
- if (test_tsk_thread_flag(p, TIF_MEMDIE))
+ if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
+ if (unlikely(frozen(p)))
+ thaw_process(p);
return ERR_PTR(-1UL);
+ }
if (!p->mm)
continue;
@@ -435,7 +442,7 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
task_unlock(p);
/*
- * Kill all processes sharing p->mm in other thread groups, if any.
+ * Kill all user processes sharing p->mm in other thread groups, if any.
* They don't get access to memory reserves or a higher scheduler
* priority, though, to avoid depletion of all memory or task
* starvation. This prevents mm->mmap_sem livelock when an oom killed
@@ -445,7 +452,11 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
* signal.
*/
for_each_process(q)
- if (q->mm == mm && !same_thread_group(q, p)) {
+ if (q->mm == mm && !same_thread_group(q, p) &&
+ !(q->flags & PF_KTHREAD)) {
+ if (q->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
+ continue;
+
task_lock(q); /* Protect ->comm from prctl() */
pr_err("Kill process %d (%s) sharing same memory\n",
task_pid_nr(q), q->comm);
@@ -722,7 +733,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
read_lock(&tasklist_lock);
if (sysctl_oom_kill_allocating_task &&
!oom_unkillable_task(current, NULL, nodemask) &&
- current->mm && !atomic_read(&current->mm->oom_disable_count)) {
+ current->mm) {
/*
* oom_kill_process() needs tasklist_lock held. If it returns
* non-zero, current could not be killed so we must fallback to