2008-10-18 21:27:19 -06:00
|
|
|
/*
|
|
|
|
* kernel/freezer.c - Function to freeze a process
|
|
|
|
*
|
|
|
|
* Originally from kernel/power/process.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/suspend.h>
|
2011-05-23 12:51:41 -06:00
|
|
|
#include <linux/export.h>
|
2008-10-18 21:27:19 -06:00
|
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <linux/freezer.h>
|
2011-11-21 13:32:23 -07:00
|
|
|
#include <linux/kthread.h>
|
2008-10-18 21:27:19 -06:00
|
|
|
|
2011-11-21 13:32:25 -07:00
|
|
|
/* total number of freezing conditions in effect */
|
|
|
|
atomic_t system_freezing_cnt = ATOMIC_INIT(0);
|
|
|
|
EXPORT_SYMBOL(system_freezing_cnt);
|
|
|
|
|
|
|
|
/* indicate whether PM freezing is in effect, protected by pm_mutex */
|
|
|
|
bool pm_freezing;
|
|
|
|
bool pm_nosig_freezing;
|
|
|
|
|
2013-12-18 05:07:32 -07:00
|
|
|
/*
|
|
|
|
* Temporary export for the deadlock workaround in ata_scsi_hotplug().
|
|
|
|
* Remove once the hack becomes unnecessary.
|
|
|
|
*/
|
|
|
|
EXPORT_SYMBOL_GPL(pm_freezing);
|
|
|
|
|
2011-11-21 13:32:24 -07:00
|
|
|
/* protects freezing and frozen transitions */
|
|
|
|
static DEFINE_SPINLOCK(freezer_lock);
|
2008-10-18 21:27:19 -06:00
|
|
|
|
2011-11-21 13:32:25 -07:00
|
|
|
/**
|
|
|
|
* freezing_slow_path - slow path for testing whether a task needs to be frozen
|
|
|
|
* @p: task to be tested
|
|
|
|
*
|
|
|
|
* This function is called by freezing() if system_freezing_cnt isn't zero
|
|
|
|
* and tests whether @p needs to enter and stay in frozen state. Can be
|
|
|
|
* called under any context. The freezers are responsible for ensuring the
|
|
|
|
* target tasks see the updated state.
|
|
|
|
*/
|
|
|
|
bool freezing_slow_path(struct task_struct *p)
|
|
|
|
{
|
2013-07-24 18:41:33 -06:00
|
|
|
if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
|
2011-11-21 13:32:25 -07:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (pm_nosig_freezing || cgroup_freezing(p))
|
|
|
|
return true;
|
|
|
|
|
2011-11-23 10:28:17 -07:00
|
|
|
if (pm_freezing && !(p->flags & PF_KTHREAD))
|
2011-11-21 13:32:25 -07:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(freezing_slow_path);
|
|
|
|
|
2008-10-18 21:27:19 -06:00
|
|
|
/* Refrigerator is place where frozen processes are stored :-). */
|
2011-11-21 13:32:23 -07:00
|
|
|
bool __refrigerator(bool check_kthr_stop)
|
2008-10-18 21:27:19 -06:00
|
|
|
{
|
|
|
|
/* Hmm, should we be allowed to suspend when there are realtime
|
|
|
|
processes around? */
|
2011-11-21 13:32:22 -07:00
|
|
|
bool was_frozen = false;
|
2011-11-21 13:32:26 -07:00
|
|
|
long save = current->state;
|
2008-10-18 21:27:19 -06:00
|
|
|
|
|
|
|
pr_debug("%s entered refrigerator\n", current->comm);
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
2011-11-21 13:32:26 -07:00
|
|
|
|
|
|
|
spin_lock_irq(&freezer_lock);
|
|
|
|
current->flags |= PF_FROZEN;
|
2011-11-21 13:32:24 -07:00
|
|
|
if (!freezing(current) ||
|
2011-11-21 13:32:23 -07:00
|
|
|
(check_kthr_stop && kthread_should_stop()))
|
2011-11-21 13:32:26 -07:00
|
|
|
current->flags &= ~PF_FROZEN;
|
|
|
|
spin_unlock_irq(&freezer_lock);
|
|
|
|
|
|
|
|
if (!(current->flags & PF_FROZEN))
|
2008-10-18 21:27:19 -06:00
|
|
|
break;
|
2011-11-21 13:32:22 -07:00
|
|
|
was_frozen = true;
|
2008-10-18 21:27:19 -06:00
|
|
|
schedule();
|
|
|
|
}
|
2009-07-17 06:15:47 -06:00
|
|
|
|
2008-10-18 21:27:19 -06:00
|
|
|
pr_debug("%s left refrigerator\n", current->comm);
|
2011-11-21 13:32:22 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Restore saved task state before returning. The mb'd version
|
|
|
|
* needs to be used; otherwise, it might silently break
|
|
|
|
* synchronization which depends on ordered task state change.
|
|
|
|
*/
|
|
|
|
set_current_state(save);
|
2011-11-21 13:32:22 -07:00
|
|
|
|
|
|
|
return was_frozen;
|
2008-10-18 21:27:19 -06:00
|
|
|
}
|
2011-11-21 13:32:22 -07:00
|
|
|
EXPORT_SYMBOL(__refrigerator);
|
2008-10-18 21:27:19 -06:00
|
|
|
|
|
|
|
static void fake_signal_wake_up(struct task_struct *p)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
2011-11-21 13:32:26 -07:00
|
|
|
if (lock_task_sighand(p, &flags)) {
|
|
|
|
signal_wake_up(p, 0);
|
|
|
|
unlock_task_sighand(p, &flags);
|
|
|
|
}
|
2008-10-18 21:27:19 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2011-11-21 13:32:26 -07:00
|
|
|
* freeze_task - send a freeze request to given task
|
|
|
|
* @p: task to send the request to
|
2008-10-18 21:27:19 -06:00
|
|
|
*
|
2012-02-21 15:57:47 -07:00
|
|
|
* If @p is freezing, the freeze request is sent either by sending a fake
|
|
|
|
* signal (if it's not a kernel thread) or waking it up (if it's a kernel
|
|
|
|
* thread).
|
2011-11-21 13:32:26 -07:00
|
|
|
*
|
|
|
|
* RETURNS:
|
|
|
|
* %false, if @p is not freezing or already frozen; %true, otherwise
|
2008-10-18 21:27:19 -06:00
|
|
|
*/
|
2011-11-21 13:32:26 -07:00
|
|
|
bool freeze_task(struct task_struct *p)
|
2008-10-18 21:27:19 -06:00
|
|
|
{
|
2011-11-21 13:32:24 -07:00
|
|
|
unsigned long flags;
|
|
|
|
|
2013-05-06 17:50:11 -06:00
|
|
|
/*
|
|
|
|
* This check can race with freezer_do_not_count, but worst case that
|
|
|
|
* will result in an extra wakeup being sent to the task. It does not
|
|
|
|
* race with freezer_count(), the barriers in freezer_count() and
|
|
|
|
* freezer_should_skip() ensure that either freezer_count() sees
|
|
|
|
* freezing == true in try_to_freeze() and freezes, or
|
|
|
|
* freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task
|
|
|
|
* normally.
|
|
|
|
*/
|
|
|
|
if (freezer_should_skip(p))
|
|
|
|
return false;
|
|
|
|
|
2011-11-21 13:32:24 -07:00
|
|
|
spin_lock_irqsave(&freezer_lock, flags);
|
2011-11-21 13:32:25 -07:00
|
|
|
if (!freezing(p) || frozen(p)) {
|
|
|
|
spin_unlock_irqrestore(&freezer_lock, flags);
|
|
|
|
return false;
|
|
|
|
}
|
2008-10-18 21:27:19 -06:00
|
|
|
|
2012-10-26 11:46:06 -06:00
|
|
|
if (!(p->flags & PF_KTHREAD))
|
2010-11-26 15:07:27 -07:00
|
|
|
fake_signal_wake_up(p);
|
2012-10-26 11:46:06 -06:00
|
|
|
else
|
2008-10-18 21:27:19 -06:00
|
|
|
wake_up_state(p, TASK_INTERRUPTIBLE);
|
2011-11-21 13:32:25 -07:00
|
|
|
|
2011-11-21 13:32:24 -07:00
|
|
|
spin_unlock_irqrestore(&freezer_lock, flags);
|
2011-11-21 13:32:25 -07:00
|
|
|
return true;
|
2008-10-18 21:27:19 -06:00
|
|
|
}
|
|
|
|
|
2011-11-21 13:32:23 -07:00
|
|
|
void __thaw_task(struct task_struct *p)
|
2008-10-18 21:27:21 -06:00
|
|
|
{
|
2011-11-21 13:32:24 -07:00
|
|
|
unsigned long flags;
|
2011-11-21 13:32:23 -07:00
|
|
|
|
2011-11-21 13:32:24 -07:00
|
|
|
/*
|
|
|
|
* Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
|
|
|
|
* be visible to @p as waking up implies wmb. Waking up inside
|
|
|
|
* freezer_lock also prevents wakeups from leaking outside
|
|
|
|
* refrigerator.
|
|
|
|
*/
|
2011-11-21 13:32:24 -07:00
|
|
|
spin_lock_irqsave(&freezer_lock, flags);
|
2011-11-23 10:28:17 -07:00
|
|
|
if (frozen(p))
|
2011-11-21 13:32:23 -07:00
|
|
|
wake_up_process(p);
|
2011-11-21 13:32:24 -07:00
|
|
|
spin_unlock_irqrestore(&freezer_lock, flags);
|
2008-10-18 21:27:21 -06:00
|
|
|
}
|
2011-11-21 13:32:25 -07:00
|
|
|
|
|
|
|
/**
|
2011-11-23 10:28:17 -07:00
|
|
|
* set_freezable - make %current freezable
|
2011-11-21 13:32:25 -07:00
|
|
|
*
|
|
|
|
* Mark %current freezable and enter refrigerator if necessary.
|
|
|
|
*/
|
2011-11-23 10:28:17 -07:00
|
|
|
bool set_freezable(void)
|
2011-11-21 13:32:25 -07:00
|
|
|
{
|
|
|
|
might_sleep();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Modify flags while holding freezer_lock. This ensures the
|
|
|
|
* freezer notices that we aren't frozen yet or the freezing
|
|
|
|
* condition is visible to try_to_freeze() below.
|
|
|
|
*/
|
|
|
|
spin_lock_irq(&freezer_lock);
|
|
|
|
current->flags &= ~PF_NOFREEZE;
|
|
|
|
spin_unlock_irq(&freezer_lock);
|
|
|
|
|
|
|
|
return try_to_freeze();
|
|
|
|
}
|
2011-11-23 10:28:17 -07:00
|
|
|
EXPORT_SYMBOL(set_freezable);
|