ACPI and power management fixes for 3.11-rc4
- Revert two cpuidle commits added during the 3.8 development cycle that turn out to have introduced a significant performance regression as requested by Jeremy Eder. - The recent patches that made the freezer less heavy-weight introduced a regression causing user-space-driven hibernation using the ioctl() interface to block indefinitely when the hibernate process executes try_to_freeze(). Fix from Colin Cross addresses this by adding a process flag to mark the hibernate/suspend process to inform the freezer that that process should be ignored. - One of the recent cpufreq reverts uncovered a problem in the core causing the cpufreq driver module refcount to become negative after a system suspend-resume cycle. Fix from Rafael J Wysocki. - The evaluation of the ACPI battery _BIX method has never worked correctly, because the commit that added support for it forgot to take the "Revision" field in the return package into account. As a result, the reading of battery info doesn't work at all on some systems, which is addressed by a fix from Lan Tianyu. / -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.19 (GNU/Linux) iQIcBAABAgAGBQJR+6ptAAoJEKhOf7ml8uNsRpIP/0P2HbCFM52/4Rv/Iltnt4fI 9Vo2dyuL7JKP2U8jtHxfhFGg3oMdYQoUIdnpjtKr4O3obhzl4vHwE9vtrRlhHpRZ SnHGe0W5v0eQOdCbVzdwS1NrJwckkTy1JuybV+PH66T84Usu0QoxE4iNveK2LX23 eJvOgWGBoyEEWJb+1/KJNIcKk77A0Cnc2CCLMN5bmhwH1QGDRZdzSnrjK5fGniF0 akCGq8jJhBaI1xJF/42LgNBiPpAYk42SPuiSOqniKzweUK1P6YzHjArh0qaTBoUj 27HRkZlY6Y8WLFxqQio7zvbbLSdRuwosESofw2kCFkAAEnCc71kw2nbebNr3sCap MqrmEMcxqT803PiB2RGyS53WNE7mM3NFCPRLOPL+cWeNQhoYzbZ+UiNx4Dw667cr Ow+egCY+jyAZm5TFqY6Y75lG61UM6oCs6M6iIwiv/BOmJqCmkTjvNBxHWrVcWxin YhiLJGyt7iAcIaxhy+fCs2j2a7B0Ai62kZ6YLqaEtNBzjuDbm6sr61A6Nu8bpOTU C7e76AocyfuDpdU99uawDvuazCGWEg+f8eH8C/ij19jF1/Mrlr0x+4x9MmMm9Iz5 ux0uroTteEuswz9aHmY270qdDLIuSGUsmqD05RoaO61U8dVigWw+ZKqUCImrAM7x 4bK1+2eOig794g9vSsen =7x7r -----END PGP SIGNATURE----- Merge tag 'pm+acpi-3.11-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull ACPI and power management fixes from Rafael Wysocki: - Revert two cpuidle commits added during the 3.8 development cycle that turn out to have introduced a significant performance regression as requested by Jeremy Eder. - The recent patches that made the freezer less heavy-weight introduced a regression causing user-space-driven hibernation using the ioctl() interface to block indefinitely when the hibernate process executes try_to_freeze(). Fix from Colin Cross addresses this by adding a process flag to mark the hibernate/suspend process to inform the freezer that that process should be ignored. - One of the recent cpufreq reverts uncovered a problem in the core causing the cpufreq driver module refcount to become negative after a system suspend-resume cycle. Fix from Rafael J Wysocki. - The evaluation of the ACPI battery _BIX method has never worked correctly, because the commit that added support for it forgot to take the "Revision" field in the return package into account. As a result, the reading of battery info doesn't work at all on some systems, which is addressed by a fix from Lan Tianyu. * tag 'pm+acpi-3.11-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: freezer: set PF_SUSPEND_TASK flag on tasks that call freeze_processes ACPI / battery: Fix parsing _BIX return value cpufreq: Fix cpufreq driver module refcount balance after suspend/resume Revert "cpuidle: Quickly notice prediction failure for repeat mode" Revert "cpuidle: Quickly notice prediction failure in general case"
This commit is contained in:
commit
1fe0135b9e
8 changed files with 31 additions and 125 deletions
|
@ -117,6 +117,7 @@ struct acpi_battery {
|
||||||
struct acpi_device *device;
|
struct acpi_device *device;
|
||||||
struct notifier_block pm_nb;
|
struct notifier_block pm_nb;
|
||||||
unsigned long update_time;
|
unsigned long update_time;
|
||||||
|
int revision;
|
||||||
int rate_now;
|
int rate_now;
|
||||||
int capacity_now;
|
int capacity_now;
|
||||||
int voltage_now;
|
int voltage_now;
|
||||||
|
@ -359,6 +360,7 @@ static struct acpi_offsets info_offsets[] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct acpi_offsets extended_info_offsets[] = {
|
static struct acpi_offsets extended_info_offsets[] = {
|
||||||
|
{offsetof(struct acpi_battery, revision), 0},
|
||||||
{offsetof(struct acpi_battery, power_unit), 0},
|
{offsetof(struct acpi_battery, power_unit), 0},
|
||||||
{offsetof(struct acpi_battery, design_capacity), 0},
|
{offsetof(struct acpi_battery, design_capacity), 0},
|
||||||
{offsetof(struct acpi_battery, full_charge_capacity), 0},
|
{offsetof(struct acpi_battery, full_charge_capacity), 0},
|
||||||
|
|
|
@ -1177,14 +1177,11 @@ static int __cpufreq_remove_dev(struct device *dev,
|
||||||
__func__, cpu_dev->id, cpu);
|
__func__, cpu_dev->id, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((cpus == 1) && (cpufreq_driver->target))
|
|
||||||
__cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
|
|
||||||
|
|
||||||
pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
|
|
||||||
cpufreq_cpu_put(data);
|
|
||||||
|
|
||||||
/* If cpu is last user of policy, free policy */
|
/* If cpu is last user of policy, free policy */
|
||||||
if (cpus == 1) {
|
if (cpus == 1) {
|
||||||
|
if (cpufreq_driver->target)
|
||||||
|
__cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
|
||||||
|
|
||||||
lock_policy_rwsem_read(cpu);
|
lock_policy_rwsem_read(cpu);
|
||||||
kobj = &data->kobj;
|
kobj = &data->kobj;
|
||||||
cmp = &data->kobj_unregister;
|
cmp = &data->kobj_unregister;
|
||||||
|
@ -1205,10 +1202,14 @@ static int __cpufreq_remove_dev(struct device *dev,
|
||||||
free_cpumask_var(data->related_cpus);
|
free_cpumask_var(data->related_cpus);
|
||||||
free_cpumask_var(data->cpus);
|
free_cpumask_var(data->cpus);
|
||||||
kfree(data);
|
kfree(data);
|
||||||
} else if (cpufreq_driver->target) {
|
} else {
|
||||||
|
pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
|
||||||
|
cpufreq_cpu_put(data);
|
||||||
|
if (cpufreq_driver->target) {
|
||||||
__cpufreq_governor(data, CPUFREQ_GOV_START);
|
__cpufreq_governor(data, CPUFREQ_GOV_START);
|
||||||
__cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
|
__cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
per_cpu(cpufreq_policy_cpu, cpu) = -1;
|
per_cpu(cpufreq_policy_cpu, cpu) = -1;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -28,13 +28,6 @@
|
||||||
#define MAX_INTERESTING 50000
|
#define MAX_INTERESTING 50000
|
||||||
#define STDDEV_THRESH 400
|
#define STDDEV_THRESH 400
|
||||||
|
|
||||||
/* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */
|
|
||||||
#define MAX_DEVIATION 60
|
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer);
|
|
||||||
static DEFINE_PER_CPU(int, hrtimer_status);
|
|
||||||
/* menu hrtimer mode */
|
|
||||||
enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Concepts and ideas behind the menu governor
|
* Concepts and ideas behind the menu governor
|
||||||
|
@ -116,13 +109,6 @@ enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
|
||||||
* The C-state residency is so long that is is worthwhile to exit
|
|
||||||
* from the shallow C-state and re-enter into a deeper C-state.
|
|
||||||
*/
|
|
||||||
static unsigned int perfect_cstate_ms __read_mostly = 30;
|
|
||||||
module_param(perfect_cstate_ms, uint, 0000);
|
|
||||||
|
|
||||||
struct menu_device {
|
struct menu_device {
|
||||||
int last_state_idx;
|
int last_state_idx;
|
||||||
int needs_update;
|
int needs_update;
|
||||||
|
@ -205,52 +191,17 @@ static u64 div_round64(u64 dividend, u32 divisor)
|
||||||
return div_u64(dividend + (divisor / 2), divisor);
|
return div_u64(dividend + (divisor / 2), divisor);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Cancel the hrtimer if it is not triggered yet */
|
|
||||||
void menu_hrtimer_cancel(void)
|
|
||||||
{
|
|
||||||
int cpu = smp_processor_id();
|
|
||||||
struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
|
|
||||||
|
|
||||||
/* The timer is still not time out*/
|
|
||||||
if (per_cpu(hrtimer_status, cpu)) {
|
|
||||||
hrtimer_cancel(hrtmr);
|
|
||||||
per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(menu_hrtimer_cancel);
|
|
||||||
|
|
||||||
/* Call back for hrtimer is triggered */
|
|
||||||
static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer)
|
|
||||||
{
|
|
||||||
int cpu = smp_processor_id();
|
|
||||||
struct menu_device *data = &per_cpu(menu_devices, cpu);
|
|
||||||
|
|
||||||
/* In general case, the expected residency is much larger than
|
|
||||||
* deepest C-state target residency, but prediction logic still
|
|
||||||
* predicts a small predicted residency, so the prediction
|
|
||||||
* history is totally broken if the timer is triggered.
|
|
||||||
* So reset the correction factor.
|
|
||||||
*/
|
|
||||||
if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL)
|
|
||||||
data->correction_factor[data->bucket] = RESOLUTION * DECAY;
|
|
||||||
|
|
||||||
per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
|
|
||||||
|
|
||||||
return HRTIMER_NORESTART;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try detecting repeating patterns by keeping track of the last 8
|
* Try detecting repeating patterns by keeping track of the last 8
|
||||||
* intervals, and checking if the standard deviation of that set
|
* intervals, and checking if the standard deviation of that set
|
||||||
* of points is below a threshold. If it is... then use the
|
* of points is below a threshold. If it is... then use the
|
||||||
* average of these 8 points as the estimated value.
|
* average of these 8 points as the estimated value.
|
||||||
*/
|
*/
|
||||||
static u32 get_typical_interval(struct menu_device *data)
|
static void get_typical_interval(struct menu_device *data)
|
||||||
{
|
{
|
||||||
int i = 0, divisor = 0;
|
int i = 0, divisor = 0;
|
||||||
uint64_t max = 0, avg = 0, stddev = 0;
|
uint64_t max = 0, avg = 0, stddev = 0;
|
||||||
int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */
|
int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */
|
||||||
unsigned int ret = 0;
|
|
||||||
|
|
||||||
again:
|
again:
|
||||||
|
|
||||||
|
@ -291,16 +242,13 @@ static u32 get_typical_interval(struct menu_device *data)
|
||||||
if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
|
if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
|
||||||
|| stddev <= 20) {
|
|| stddev <= 20) {
|
||||||
data->predicted_us = avg;
|
data->predicted_us = avg;
|
||||||
ret = 1;
|
return;
|
||||||
return ret;
|
|
||||||
|
|
||||||
} else if ((divisor * 4) > INTERVALS * 3) {
|
} else if ((divisor * 4) > INTERVALS * 3) {
|
||||||
/* Exclude the max interval */
|
/* Exclude the max interval */
|
||||||
thresh = max - 1;
|
thresh = max - 1;
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -315,9 +263,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||||
int i;
|
int i;
|
||||||
int multiplier;
|
int multiplier;
|
||||||
struct timespec t;
|
struct timespec t;
|
||||||
int repeat = 0, low_predicted = 0;
|
|
||||||
int cpu = smp_processor_id();
|
|
||||||
struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
|
|
||||||
|
|
||||||
if (data->needs_update) {
|
if (data->needs_update) {
|
||||||
menu_update(drv, dev);
|
menu_update(drv, dev);
|
||||||
|
@ -352,7 +297,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||||
data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
|
data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
|
||||||
RESOLUTION * DECAY);
|
RESOLUTION * DECAY);
|
||||||
|
|
||||||
repeat = get_typical_interval(data);
|
get_typical_interval(data);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We want to default to C1 (hlt), not to busy polling
|
* We want to default to C1 (hlt), not to busy polling
|
||||||
|
@ -373,10 +318,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||||
|
|
||||||
if (s->disabled || su->disable)
|
if (s->disabled || su->disable)
|
||||||
continue;
|
continue;
|
||||||
if (s->target_residency > data->predicted_us) {
|
if (s->target_residency > data->predicted_us)
|
||||||
low_predicted = 1;
|
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
if (s->exit_latency > latency_req)
|
if (s->exit_latency > latency_req)
|
||||||
continue;
|
continue;
|
||||||
if (s->exit_latency * multiplier > data->predicted_us)
|
if (s->exit_latency * multiplier > data->predicted_us)
|
||||||
|
@ -386,44 +329,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||||
data->exit_us = s->exit_latency;
|
data->exit_us = s->exit_latency;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* not deepest C-state chosen for low predicted residency */
|
|
||||||
if (low_predicted) {
|
|
||||||
unsigned int timer_us = 0;
|
|
||||||
unsigned int perfect_us = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set a timer to detect whether this sleep is much
|
|
||||||
* longer than repeat mode predicted. If the timer
|
|
||||||
* triggers, the code will evaluate whether to put
|
|
||||||
* the CPU into a deeper C-state.
|
|
||||||
* The timer is cancelled on CPU wakeup.
|
|
||||||
*/
|
|
||||||
timer_us = 2 * (data->predicted_us + MAX_DEVIATION);
|
|
||||||
|
|
||||||
perfect_us = perfect_cstate_ms * 1000;
|
|
||||||
|
|
||||||
if (repeat && (4 * timer_us < data->expected_us)) {
|
|
||||||
RCU_NONIDLE(hrtimer_start(hrtmr,
|
|
||||||
ns_to_ktime(1000 * timer_us),
|
|
||||||
HRTIMER_MODE_REL_PINNED));
|
|
||||||
/* In repeat case, menu hrtimer is started */
|
|
||||||
per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
|
|
||||||
} else if (perfect_us < data->expected_us) {
|
|
||||||
/*
|
|
||||||
* The next timer is long. This could be because
|
|
||||||
* we did not make a useful prediction.
|
|
||||||
* In that case, it makes sense to re-enter
|
|
||||||
* into a deeper C-state after some time.
|
|
||||||
*/
|
|
||||||
RCU_NONIDLE(hrtimer_start(hrtmr,
|
|
||||||
ns_to_ktime(1000 * timer_us),
|
|
||||||
HRTIMER_MODE_REL_PINNED));
|
|
||||||
/* In general case, menu hrtimer is started */
|
|
||||||
per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
return data->last_state_idx;
|
return data->last_state_idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -514,9 +419,6 @@ static int menu_enable_device(struct cpuidle_driver *drv,
|
||||||
struct cpuidle_device *dev)
|
struct cpuidle_device *dev)
|
||||||
{
|
{
|
||||||
struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
|
struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
|
||||||
struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu);
|
|
||||||
hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
||||||
t->function = menu_hrtimer_notify;
|
|
||||||
|
|
||||||
memset(data, 0, sizeof(struct menu_device));
|
memset(data, 0, sizeof(struct menu_device));
|
||||||
|
|
||||||
|
|
|
@ -1628,6 +1628,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
|
||||||
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
|
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
|
||||||
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
|
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
|
||||||
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
|
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
|
||||||
|
#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only the _current_ task can read/write to tsk->flags, but other
|
* Only the _current_ task can read/write to tsk->flags, but other
|
||||||
|
|
|
@ -174,10 +174,4 @@ static inline void tick_nohz_task_switch(struct task_struct *tsk) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
# ifdef CONFIG_CPU_IDLE_GOV_MENU
|
|
||||||
extern void menu_hrtimer_cancel(void);
|
|
||||||
# else
|
|
||||||
static inline void menu_hrtimer_cancel(void) {}
|
|
||||||
# endif /* CONFIG_CPU_IDLE_GOV_MENU */
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -33,7 +33,7 @@ static DEFINE_SPINLOCK(freezer_lock);
|
||||||
*/
|
*/
|
||||||
bool freezing_slow_path(struct task_struct *p)
|
bool freezing_slow_path(struct task_struct *p)
|
||||||
{
|
{
|
||||||
if (p->flags & PF_NOFREEZE)
|
if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (pm_nosig_freezing || cgroup_freezing(p))
|
if (pm_nosig_freezing || cgroup_freezing(p))
|
||||||
|
|
|
@ -109,6 +109,8 @@ static int try_to_freeze_tasks(bool user_only)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* freeze_processes - Signal user space processes to enter the refrigerator.
|
* freeze_processes - Signal user space processes to enter the refrigerator.
|
||||||
|
* The current thread will not be frozen. The same process that calls
|
||||||
|
* freeze_processes must later call thaw_processes.
|
||||||
*
|
*
|
||||||
* On success, returns 0. On failure, -errno and system is fully thawed.
|
* On success, returns 0. On failure, -errno and system is fully thawed.
|
||||||
*/
|
*/
|
||||||
|
@ -120,6 +122,9 @@ int freeze_processes(void)
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
|
|
||||||
|
/* Make sure this task doesn't get frozen */
|
||||||
|
current->flags |= PF_SUSPEND_TASK;
|
||||||
|
|
||||||
if (!pm_freezing)
|
if (!pm_freezing)
|
||||||
atomic_inc(&system_freezing_cnt);
|
atomic_inc(&system_freezing_cnt);
|
||||||
|
|
||||||
|
@ -168,6 +173,7 @@ int freeze_kernel_threads(void)
|
||||||
void thaw_processes(void)
|
void thaw_processes(void)
|
||||||
{
|
{
|
||||||
struct task_struct *g, *p;
|
struct task_struct *g, *p;
|
||||||
|
struct task_struct *curr = current;
|
||||||
|
|
||||||
if (pm_freezing)
|
if (pm_freezing)
|
||||||
atomic_dec(&system_freezing_cnt);
|
atomic_dec(&system_freezing_cnt);
|
||||||
|
@ -182,10 +188,15 @@ void thaw_processes(void)
|
||||||
|
|
||||||
read_lock(&tasklist_lock);
|
read_lock(&tasklist_lock);
|
||||||
do_each_thread(g, p) {
|
do_each_thread(g, p) {
|
||||||
|
/* No other threads should have PF_SUSPEND_TASK set */
|
||||||
|
WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
|
||||||
__thaw_task(p);
|
__thaw_task(p);
|
||||||
} while_each_thread(g, p);
|
} while_each_thread(g, p);
|
||||||
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
||||||
|
|
||||||
|
WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
|
||||||
|
curr->flags &= ~PF_SUSPEND_TASK;
|
||||||
|
|
||||||
usermodehelper_enable();
|
usermodehelper_enable();
|
||||||
|
|
||||||
schedule();
|
schedule();
|
||||||
|
|
|
@ -827,13 +827,10 @@ void tick_nohz_irq_exit(void)
|
||||||
{
|
{
|
||||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
||||||
|
|
||||||
if (ts->inidle) {
|
if (ts->inidle)
|
||||||
/* Cancel the timer because CPU already waken up from the C-states*/
|
|
||||||
menu_hrtimer_cancel();
|
|
||||||
__tick_nohz_idle_enter(ts);
|
__tick_nohz_idle_enter(ts);
|
||||||
} else {
|
else
|
||||||
tick_nohz_full_stop_tick(ts);
|
tick_nohz_full_stop_tick(ts);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -931,8 +928,6 @@ void tick_nohz_idle_exit(void)
|
||||||
|
|
||||||
ts->inidle = 0;
|
ts->inidle = 0;
|
||||||
|
|
||||||
/* Cancel the timer because CPU already waken up from the C-states*/
|
|
||||||
menu_hrtimer_cancel();
|
|
||||||
if (ts->idle_active || ts->tick_stopped)
|
if (ts->idle_active || ts->tick_stopped)
|
||||||
now = ktime_get();
|
now = ktime_get();
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue