mm, oom: force oom kill on sysrq+f
The oom killer chooses not to kill a thread if: - an eligible thread has already been oom killed and has yet to exit, and - an eligible thread is exiting but has yet to free all its memory and is not the thread attempting to currently allocate memory. SysRq+F manually invokes the global oom killer to kill a memory-hogging task. This is normally done as a last resort to free memory when no progress is being made or to test the oom killer itself. For both uses, we always want to kill a thread and never defer. This patch causes SysRq+F to always kill an eligible thread and can be used to force a kill even if another oom killed thread has failed to exit. Signed-off-by: David Rientjes <rientjes@google.com> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Pekka Enberg <penberg@kernel.org> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b76437579d
commit
08ab9b10d4
4 changed files with 13 additions and 10 deletions
|
@ -346,7 +346,7 @@ static struct sysrq_key_op sysrq_term_op = {
|
|||
|
||||
static void moom_callback(struct work_struct *ignored)
|
||||
{
|
||||
out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0, NULL);
|
||||
out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0, NULL, true);
|
||||
}
|
||||
|
||||
static DECLARE_WORK(moom_work, moom_callback);
|
||||
|
|
|
@ -49,7 +49,7 @@ extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
|
|||
extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
|
||||
|
||||
extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
|
||||
int order, nodemask_t *mask);
|
||||
int order, nodemask_t *mask, bool force_kill);
|
||||
extern int register_oom_notifier(struct notifier_block *nb);
|
||||
extern int unregister_oom_notifier(struct notifier_block *nb);
|
||||
|
||||
|
|
|
@ -310,7 +310,7 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
|
|||
*/
|
||||
static struct task_struct *select_bad_process(unsigned int *ppoints,
|
||||
unsigned long totalpages, struct mem_cgroup *memcg,
|
||||
const nodemask_t *nodemask)
|
||||
const nodemask_t *nodemask, bool force_kill)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
struct task_struct *chosen = NULL;
|
||||
|
@ -336,7 +336,8 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
|
|||
if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
|
||||
if (unlikely(frozen(p)))
|
||||
__thaw_task(p);
|
||||
return ERR_PTR(-1UL);
|
||||
if (!force_kill)
|
||||
return ERR_PTR(-1UL);
|
||||
}
|
||||
if (!p->mm)
|
||||
continue;
|
||||
|
@ -354,7 +355,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
|
|||
if (p == current) {
|
||||
chosen = p;
|
||||
*ppoints = 1000;
|
||||
} else {
|
||||
} else if (!force_kill) {
|
||||
/*
|
||||
* If this task is not being ptraced on exit,
|
||||
* then wait for it to finish before killing
|
||||
|
@ -572,7 +573,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask)
|
|||
check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
|
||||
limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT;
|
||||
read_lock(&tasklist_lock);
|
||||
p = select_bad_process(&points, limit, memcg, NULL);
|
||||
p = select_bad_process(&points, limit, memcg, NULL, false);
|
||||
if (p && PTR_ERR(p) != -1UL)
|
||||
oom_kill_process(p, gfp_mask, 0, points, limit, memcg, NULL,
|
||||
"Memory cgroup out of memory");
|
||||
|
@ -687,6 +688,7 @@ static void clear_system_oom(void)
|
|||
* @gfp_mask: memory allocation flags
|
||||
* @order: amount of memory being requested as a power of 2
|
||||
* @nodemask: nodemask passed to page allocator
|
||||
* @force_kill: true if a task must be killed, even if others are exiting
|
||||
*
|
||||
* If we run out of memory, we have the choice between either
|
||||
* killing a random task (bad), letting the system crash (worse)
|
||||
|
@ -694,7 +696,7 @@ static void clear_system_oom(void)
|
|||
* don't have to be perfect here, we just have to be good.
|
||||
*/
|
||||
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
|
||||
int order, nodemask_t *nodemask)
|
||||
int order, nodemask_t *nodemask, bool force_kill)
|
||||
{
|
||||
const nodemask_t *mpol_mask;
|
||||
struct task_struct *p;
|
||||
|
@ -738,7 +740,8 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
|
|||
goto out;
|
||||
}
|
||||
|
||||
p = select_bad_process(&points, totalpages, NULL, mpol_mask);
|
||||
p = select_bad_process(&points, totalpages, NULL, mpol_mask,
|
||||
force_kill);
|
||||
/* Found nothing?!?! Either we hang forever, or we panic. */
|
||||
if (!p) {
|
||||
dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
|
||||
|
@ -770,7 +773,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
|
|||
void pagefault_out_of_memory(void)
|
||||
{
|
||||
if (try_set_system_oom()) {
|
||||
out_of_memory(NULL, 0, 0, NULL);
|
||||
out_of_memory(NULL, 0, 0, NULL, false);
|
||||
clear_system_oom();
|
||||
}
|
||||
if (!test_thread_flag(TIF_MEMDIE))
|
||||
|
|
|
@ -1968,7 +1968,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
|
|||
goto out;
|
||||
}
|
||||
/* Exhausted what can be done so it's blamo time */
|
||||
out_of_memory(zonelist, gfp_mask, order, nodemask);
|
||||
out_of_memory(zonelist, gfp_mask, order, nodemask, false);
|
||||
|
||||
out:
|
||||
clear_zonelist_oom(zonelist, gfp_mask);
|
||||
|
|
Loading…
Reference in a new issue