Merge branch 'pm-cpuidle'
* pm-cpuidle: (25 commits) cpuidle: Change struct menu_device field types cpuidle: Add a comment warning about possible overflow cpuidle: Fix variable domains in get_typical_interval() cpuidle: Fix menu_device->intervals type cpuidle: CodingStyle: Break up multiple assignments on single line cpuidle: Check called function parameter in get_typical_interval() cpuidle: Rearrange code and comments in get_typical_interval() cpuidle: Ignore interval prediction result when timer is shorter cpuidle-kirkwood.c: simplify use of devm_ioremap_resource() cpuidle: kirkwood: Make kirkwood_cpuidle_remove function static cpuidle: calxeda: Add missing __iomem annotation SH: cpuidle: Add missing parameter for cpuidle_register() ARM: ux500: cpuidle: Move ux500 cpuidle driver to drivers/cpuidle ARM: ux500: cpuidle: Remove pointless include ARM: ux500: cpuidle: Instantiate the driver from platform device ARM: davinci: cpuidle: Fix target residency cpuidle: Add Kconfig.arm and move calxeda, kirkwood and zynq cpuidle: Check if device is already registered cpuidle: Introduce __cpuidle_device_init() cpuidle: Introduce __cpuidle_unregister_device() ...
This commit is contained in:
commit
c7878810f2
15 changed files with 256 additions and 163 deletions
|
@ -65,7 +65,7 @@ static struct cpuidle_driver davinci_idle_driver = {
|
|||
.states[1] = {
|
||||
.enter = davinci_enter_idle,
|
||||
.exit_latency = 10,
|
||||
.target_residency = 100000,
|
||||
.target_residency = 10000,
|
||||
.flags = CPUIDLE_FLAG_TIME_VALID,
|
||||
.name = "DDR SR",
|
||||
.desc = "WFI and DDR Self Refresh",
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
|
||||
obj-y := cpu.o devices.o devices-common.o \
|
||||
id.o usb.o timer.o pm.o
|
||||
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
|
||||
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
|
||||
obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o
|
||||
obj-$(CONFIG_MACH_MOP500) += board-mop500.o board-mop500-sdi.o \
|
||||
|
|
|
@ -91,13 +91,11 @@ static struct cpuidle_driver cpuidle_driver = {
|
|||
|
||||
int __init sh_mobile_setup_cpuidle(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (sh_mobile_sleep_supported & SUSP_SH_SF)
|
||||
cpuidle_driver.states[1].disabled = false;
|
||||
|
||||
if (sh_mobile_sleep_supported & SUSP_SH_STANDBY)
|
||||
cpuidle_driver.states[2].disabled = false;
|
||||
|
||||
return cpuidle_register(&cpuidle_driver);
|
||||
return cpuidle_register(&cpuidle_driver, NULL);
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
menu "CPU Idle"
|
||||
|
||||
menuconfig CPU_IDLE
|
||||
config CPU_IDLE
|
||||
bool "CPU idle PM support"
|
||||
default y if ACPI || PPC_PSERIES
|
||||
select CPU_IDLE_GOV_LADDER if (!NO_HZ && !NO_HZ_IDLE)
|
||||
|
@ -29,20 +30,13 @@ config CPU_IDLE_GOV_MENU
|
|||
bool "Menu governor (for tickless system)"
|
||||
default y
|
||||
|
||||
config CPU_IDLE_CALXEDA
|
||||
bool "CPU Idle Driver for Calxeda processors"
|
||||
depends on ARCH_HIGHBANK
|
||||
select ARM_CPU_SUSPEND
|
||||
help
|
||||
Select this to enable cpuidle on Calxeda processors.
|
||||
|
||||
config CPU_IDLE_ZYNQ
|
||||
bool "CPU Idle Driver for Xilinx Zynq processors"
|
||||
depends on ARCH_ZYNQ
|
||||
help
|
||||
Select this to enable cpuidle on Xilinx Zynq processors.
|
||||
menu "ARM CPU Idle Drivers"
|
||||
depends on ARM
|
||||
source "drivers/cpuidle/Kconfig.arm"
|
||||
endmenu
|
||||
|
||||
endif
|
||||
|
||||
config ARCH_NEEDS_CPU_IDLE_COUPLED
|
||||
def_bool n
|
||||
endmenu
|
||||
|
|
29
drivers/cpuidle/Kconfig.arm
Normal file
29
drivers/cpuidle/Kconfig.arm
Normal file
|
@ -0,0 +1,29 @@
|
|||
#
|
||||
# ARM CPU Idle drivers
|
||||
#
|
||||
|
||||
config ARM_HIGHBANK_CPUIDLE
|
||||
bool "CPU Idle Driver for Calxeda processors"
|
||||
depends on ARCH_HIGHBANK
|
||||
select ARM_CPU_SUSPEND
|
||||
help
|
||||
Select this to enable cpuidle on Calxeda processors.
|
||||
|
||||
config ARM_KIRKWOOD_CPUIDLE
|
||||
bool "CPU Idle Driver for Marvell Kirkwood SoCs"
|
||||
depends on ARCH_KIRKWOOD
|
||||
help
|
||||
This adds the CPU Idle driver for Marvell Kirkwood SoCs.
|
||||
|
||||
config ARM_ZYNQ_CPUIDLE
|
||||
bool "CPU Idle Driver for Xilinx Zynq processors"
|
||||
depends on ARCH_ZYNQ
|
||||
help
|
||||
Select this to enable cpuidle on Xilinx Zynq processors.
|
||||
|
||||
config ARM_U8500_CPUIDLE
|
||||
bool "Cpu Idle Driver for the ST-E u8500 processors"
|
||||
depends on ARCH_U8500
|
||||
help
|
||||
Select this to enable cpuidle for ST-E u8500 processors
|
||||
|
|
@ -5,6 +5,9 @@
|
|||
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
|
||||
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
|
||||
|
||||
obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
|
||||
obj-$(CONFIG_ARCH_KIRKWOOD) += cpuidle-kirkwood.o
|
||||
obj-$(CONFIG_CPU_IDLE_ZYNQ) += cpuidle-zynq.o
|
||||
##################################################################################
|
||||
# ARM SoC drivers
|
||||
obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE) += cpuidle-calxeda.o
|
||||
obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o
|
||||
obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o
|
||||
obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#include <asm/cp15.h>
|
||||
|
||||
extern void highbank_set_cpu_jump(int cpu, void *jump_addr);
|
||||
extern void *scu_base_addr;
|
||||
extern void __iomem *scu_base_addr;
|
||||
|
||||
static noinline void calxeda_idle_restore(void)
|
||||
{
|
||||
|
|
|
@ -60,9 +60,6 @@ static int kirkwood_cpuidle_probe(struct platform_device *pdev)
|
|||
struct resource *res;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (res == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
ddr_operation_base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(ddr_operation_base))
|
||||
return PTR_ERR(ddr_operation_base);
|
||||
|
@ -70,7 +67,7 @@ static int kirkwood_cpuidle_probe(struct platform_device *pdev)
|
|||
return cpuidle_register(&kirkwood_idle_driver, NULL);
|
||||
}
|
||||
|
||||
int kirkwood_cpuidle_remove(struct platform_device *pdev)
|
||||
static int kirkwood_cpuidle_remove(struct platform_device *pdev)
|
||||
{
|
||||
cpuidle_unregister(&kirkwood_idle_driver);
|
||||
return 0;
|
||||
|
|
|
@ -16,13 +16,11 @@
|
|||
#include <linux/smp.h>
|
||||
#include <linux/mfd/dbx500-prcmu.h>
|
||||
#include <linux/platform_data/arm-ux500-pm.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include <asm/cpuidle.h>
|
||||
#include <asm/proc-fns.h>
|
||||
|
||||
#include "db8500-regs.h"
|
||||
#include "id.h"
|
||||
|
||||
static atomic_t master = ATOMIC_INIT(0);
|
||||
static DEFINE_SPINLOCK(master_lock);
|
||||
|
||||
|
@ -113,11 +111,8 @@ static struct cpuidle_driver ux500_idle_driver = {
|
|||
.state_count = 2,
|
||||
};
|
||||
|
||||
int __init ux500_idle_init(void)
|
||||
static int __init dbx500_cpuidle_probe(struct platform_device *pdev)
|
||||
{
|
||||
if (!(cpu_is_u8500_family() || cpu_is_ux540_family()))
|
||||
return -ENODEV;
|
||||
|
||||
/* Configure wake up reasons */
|
||||
prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
|
||||
PRCMU_WAKEUP(ABB));
|
||||
|
@ -125,4 +120,12 @@ int __init ux500_idle_init(void)
|
|||
return cpuidle_register(&ux500_idle_driver, NULL);
|
||||
}
|
||||
|
||||
device_initcall(ux500_idle_init);
|
||||
static struct platform_driver dbx500_cpuidle_plat_driver = {
|
||||
.driver = {
|
||||
.name = "cpuidle-dbx500",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
.probe = dbx500_cpuidle_probe,
|
||||
};
|
||||
|
||||
module_platform_driver(dbx500_cpuidle_plat_driver);
|
|
@ -42,8 +42,6 @@ void disable_cpuidle(void)
|
|||
off = 1;
|
||||
}
|
||||
|
||||
static int __cpuidle_register_device(struct cpuidle_device *dev);
|
||||
|
||||
/**
|
||||
* cpuidle_play_dead - cpu off-lining
|
||||
*
|
||||
|
@ -278,7 +276,7 @@ static void poll_idle_init(struct cpuidle_driver *drv) {}
|
|||
*/
|
||||
int cpuidle_enable_device(struct cpuidle_device *dev)
|
||||
{
|
||||
int ret, i;
|
||||
int ret;
|
||||
struct cpuidle_driver *drv;
|
||||
|
||||
if (!dev)
|
||||
|
@ -292,15 +290,12 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
|
|||
if (!drv || !cpuidle_curr_governor)
|
||||
return -EIO;
|
||||
|
||||
if (!dev->registered)
|
||||
return -EINVAL;
|
||||
|
||||
if (!dev->state_count)
|
||||
dev->state_count = drv->state_count;
|
||||
|
||||
if (dev->registered == 0) {
|
||||
ret = __cpuidle_register_device(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
poll_idle_init(drv);
|
||||
|
||||
ret = cpuidle_add_device_sysfs(dev);
|
||||
|
@ -311,12 +306,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
|
|||
(ret = cpuidle_curr_governor->enable(drv, dev)))
|
||||
goto fail_sysfs;
|
||||
|
||||
for (i = 0; i < dev->state_count; i++) {
|
||||
dev->states_usage[i].usage = 0;
|
||||
dev->states_usage[i].time = 0;
|
||||
}
|
||||
dev->last_residency = 0;
|
||||
|
||||
smp_wmb();
|
||||
|
||||
dev->enabled = 1;
|
||||
|
@ -360,6 +349,23 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
|
|||
|
||||
EXPORT_SYMBOL_GPL(cpuidle_disable_device);
|
||||
|
||||
static void __cpuidle_unregister_device(struct cpuidle_device *dev)
|
||||
{
|
||||
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
|
||||
|
||||
list_del(&dev->device_list);
|
||||
per_cpu(cpuidle_devices, dev->cpu) = NULL;
|
||||
module_put(drv->owner);
|
||||
}
|
||||
|
||||
static int __cpuidle_device_init(struct cpuidle_device *dev)
|
||||
{
|
||||
memset(dev->states_usage, 0, sizeof(dev->states_usage));
|
||||
dev->last_residency = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __cpuidle_register_device - internal register function called before register
|
||||
* and enable routines
|
||||
|
@ -377,24 +383,15 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
|
|||
|
||||
per_cpu(cpuidle_devices, dev->cpu) = dev;
|
||||
list_add(&dev->device_list, &cpuidle_detected_devices);
|
||||
ret = cpuidle_add_sysfs(dev);
|
||||
if (ret)
|
||||
goto err_sysfs;
|
||||
|
||||
ret = cpuidle_coupled_register_device(dev);
|
||||
if (ret)
|
||||
goto err_coupled;
|
||||
if (ret) {
|
||||
__cpuidle_unregister_device(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev->registered = 1;
|
||||
return 0;
|
||||
|
||||
err_coupled:
|
||||
cpuidle_remove_sysfs(dev);
|
||||
err_sysfs:
|
||||
list_del(&dev->device_list);
|
||||
per_cpu(cpuidle_devices, dev->cpu) = NULL;
|
||||
module_put(drv->owner);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -403,25 +400,44 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
|
|||
*/
|
||||
int cpuidle_register_device(struct cpuidle_device *dev)
|
||||
{
|
||||
int ret;
|
||||
int ret = -EBUSY;
|
||||
|
||||
if (!dev)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&cpuidle_lock);
|
||||
|
||||
if ((ret = __cpuidle_register_device(dev))) {
|
||||
mutex_unlock(&cpuidle_lock);
|
||||
return ret;
|
||||
}
|
||||
if (dev->registered)
|
||||
goto out_unlock;
|
||||
|
||||
ret = __cpuidle_device_init(dev);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = __cpuidle_register_device(dev);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = cpuidle_add_sysfs(dev);
|
||||
if (ret)
|
||||
goto out_unregister;
|
||||
|
||||
ret = cpuidle_enable_device(dev);
|
||||
if (ret)
|
||||
goto out_sysfs;
|
||||
|
||||
cpuidle_enable_device(dev);
|
||||
cpuidle_install_idle_handler();
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&cpuidle_lock);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
|
||||
out_sysfs:
|
||||
cpuidle_remove_sysfs(dev);
|
||||
out_unregister:
|
||||
__cpuidle_unregister_device(dev);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(cpuidle_register_device);
|
||||
|
@ -432,8 +448,6 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
|
|||
*/
|
||||
void cpuidle_unregister_device(struct cpuidle_device *dev)
|
||||
{
|
||||
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
|
||||
|
||||
if (dev->registered == 0)
|
||||
return;
|
||||
|
||||
|
@ -442,14 +456,12 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
|
|||
cpuidle_disable_device(dev);
|
||||
|
||||
cpuidle_remove_sysfs(dev);
|
||||
list_del(&dev->device_list);
|
||||
per_cpu(cpuidle_devices, dev->cpu) = NULL;
|
||||
|
||||
__cpuidle_unregister_device(dev);
|
||||
|
||||
cpuidle_coupled_unregister_device(dev);
|
||||
|
||||
cpuidle_resume_and_unlock();
|
||||
|
||||
module_put(drv->owner);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
|
||||
|
|
|
@ -192,14 +192,4 @@ static int __init init_ladder(void)
|
|||
return cpuidle_register_governor(&ladder_governor);
|
||||
}
|
||||
|
||||
/**
|
||||
* exit_ladder - exits the governor
|
||||
*/
|
||||
static void __exit exit_ladder(void)
|
||||
{
|
||||
cpuidle_unregister_governor(&ladder_governor);
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
module_init(init_ladder);
|
||||
module_exit(exit_ladder);
|
||||
postcore_initcall(init_ladder);
|
||||
|
|
|
@ -21,6 +21,15 @@
|
|||
#include <linux/math64.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/*
|
||||
* Please note when changing the tuning values:
|
||||
* If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
|
||||
* a scaling operation multiplication may overflow on 32 bit platforms.
|
||||
* In that case, #define RESOLUTION as ULL to get 64 bit result:
|
||||
* #define RESOLUTION 1024ULL
|
||||
*
|
||||
* The default values do not overflow.
|
||||
*/
|
||||
#define BUCKETS 12
|
||||
#define INTERVALS 8
|
||||
#define RESOLUTION 1024
|
||||
|
@ -114,11 +123,11 @@ struct menu_device {
|
|||
int needs_update;
|
||||
|
||||
unsigned int expected_us;
|
||||
u64 predicted_us;
|
||||
unsigned int predicted_us;
|
||||
unsigned int exit_us;
|
||||
unsigned int bucket;
|
||||
u64 correction_factor[BUCKETS];
|
||||
u32 intervals[INTERVALS];
|
||||
unsigned int correction_factor[BUCKETS];
|
||||
unsigned int intervals[INTERVALS];
|
||||
int interval_ptr;
|
||||
};
|
||||
|
||||
|
@ -199,16 +208,20 @@ static u64 div_round64(u64 dividend, u32 divisor)
|
|||
*/
|
||||
static void get_typical_interval(struct menu_device *data)
|
||||
{
|
||||
int i = 0, divisor = 0;
|
||||
uint64_t max = 0, avg = 0, stddev = 0;
|
||||
int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */
|
||||
int i, divisor;
|
||||
unsigned int max, thresh;
|
||||
uint64_t avg, stddev;
|
||||
|
||||
thresh = UINT_MAX; /* Discard outliers above this value */
|
||||
|
||||
again:
|
||||
|
||||
/* first calculate average and standard deviation of the past */
|
||||
max = avg = divisor = stddev = 0;
|
||||
/* First calculate the average of past intervals */
|
||||
max = 0;
|
||||
avg = 0;
|
||||
divisor = 0;
|
||||
for (i = 0; i < INTERVALS; i++) {
|
||||
int64_t value = data->intervals[i];
|
||||
unsigned int value = data->intervals[i];
|
||||
if (value <= thresh) {
|
||||
avg += value;
|
||||
divisor++;
|
||||
|
@ -218,15 +231,38 @@ static void get_typical_interval(struct menu_device *data)
|
|||
}
|
||||
do_div(avg, divisor);
|
||||
|
||||
/* Then try to determine standard deviation */
|
||||
stddev = 0;
|
||||
for (i = 0; i < INTERVALS; i++) {
|
||||
int64_t value = data->intervals[i];
|
||||
unsigned int value = data->intervals[i];
|
||||
if (value <= thresh) {
|
||||
int64_t diff = value - avg;
|
||||
stddev += diff * diff;
|
||||
}
|
||||
}
|
||||
do_div(stddev, divisor);
|
||||
stddev = int_sqrt(stddev);
|
||||
/*
|
||||
* The typical interval is obtained when standard deviation is small
|
||||
* or standard deviation is small compared to the average interval.
|
||||
*
|
||||
* int_sqrt() formal parameter type is unsigned long. When the
|
||||
* greatest difference to an outlier exceeds ~65 ms * sqrt(divisor)
|
||||
* the resulting squared standard deviation exceeds the input domain
|
||||
* of int_sqrt on platforms where unsigned long is 32 bits in size.
|
||||
* In such case reject the candidate average.
|
||||
*
|
||||
* Use this result only if there is no timer to wake us up sooner.
|
||||
*/
|
||||
if (likely(stddev <= ULONG_MAX)) {
|
||||
stddev = int_sqrt(stddev);
|
||||
if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
|
||||
|| stddev <= 20) {
|
||||
if (data->expected_us > avg)
|
||||
data->predicted_us = avg;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have outliers to the upside in our distribution, discard
|
||||
* those by setting the threshold to exclude these outliers, then
|
||||
|
@ -235,20 +271,12 @@ static void get_typical_interval(struct menu_device *data)
|
|||
*
|
||||
* This can deal with workloads that have long pauses interspersed
|
||||
* with sporadic activity with a bunch of short pauses.
|
||||
*
|
||||
* The typical interval is obtained when standard deviation is small
|
||||
* or standard deviation is small compared to the average interval.
|
||||
*/
|
||||
if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
|
||||
|| stddev <= 20) {
|
||||
data->predicted_us = avg;
|
||||
if ((divisor * 4) <= INTERVALS * 3)
|
||||
return;
|
||||
|
||||
} else if ((divisor * 4) > INTERVALS * 3) {
|
||||
/* Exclude the max interval */
|
||||
thresh = max - 1;
|
||||
goto again;
|
||||
}
|
||||
thresh = max - 1;
|
||||
goto again;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -293,8 +321,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
|||
if (data->correction_factor[data->bucket] == 0)
|
||||
data->correction_factor[data->bucket] = RESOLUTION * DECAY;
|
||||
|
||||
/* Make sure to round up for half microseconds */
|
||||
data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
|
||||
/*
|
||||
* Force the result of multiplication to be 64 bits even if both
|
||||
* operands are 32 bits.
|
||||
* Make sure to round up for half microseconds.
|
||||
*/
|
||||
data->predicted_us = div_round64((uint64_t)data->expected_us *
|
||||
data->correction_factor[data->bucket],
|
||||
RESOLUTION * DECAY);
|
||||
|
||||
get_typical_interval(data);
|
||||
|
@ -360,7 +393,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
|||
unsigned int last_idle_us = cpuidle_get_last_residency(dev);
|
||||
struct cpuidle_state *target = &drv->states[last_idx];
|
||||
unsigned int measured_us;
|
||||
u64 new_factor;
|
||||
unsigned int new_factor;
|
||||
|
||||
/*
|
||||
* Ugh, this idle state doesn't support residency measurements, so we
|
||||
|
@ -381,10 +414,9 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
|||
measured_us -= data->exit_us;
|
||||
|
||||
|
||||
/* update our correction ratio */
|
||||
|
||||
new_factor = data->correction_factor[data->bucket]
|
||||
* (DECAY - 1) / DECAY;
|
||||
/* Update our correction ratio */
|
||||
new_factor = data->correction_factor[data->bucket];
|
||||
new_factor -= new_factor / DECAY;
|
||||
|
||||
if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
|
||||
new_factor += RESOLUTION * measured_us / data->expected_us;
|
||||
|
@ -397,9 +429,11 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
|||
|
||||
/*
|
||||
* We don't want 0 as factor; we always want at least
|
||||
* a tiny bit of estimated time.
|
||||
* a tiny bit of estimated time. Fortunately, due to rounding,
|
||||
* new_factor will stay nonzero regardless of measured_us values
|
||||
* and the compiler can eliminate this test as long as DECAY > 1.
|
||||
*/
|
||||
if (new_factor == 0)
|
||||
if (DECAY == 1 && unlikely(new_factor == 0))
|
||||
new_factor = 1;
|
||||
|
||||
data->correction_factor[data->bucket] = new_factor;
|
||||
|
@ -442,14 +476,4 @@ static int __init init_menu(void)
|
|||
return cpuidle_register_governor(&menu_governor);
|
||||
}
|
||||
|
||||
/**
|
||||
* exit_menu - exits the governor
|
||||
*/
|
||||
static void __exit exit_menu(void)
|
||||
{
|
||||
cpuidle_unregister_governor(&menu_governor);
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
module_init(init_menu);
|
||||
module_exit(exit_menu);
|
||||
postcore_initcall(init_menu);
|
||||
|
|
|
@ -11,8 +11,10 @@
|
|||
#include <linux/sysfs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/kobject.h>
|
||||
|
||||
#include "cpuidle.h"
|
||||
|
||||
|
@ -33,7 +35,8 @@ static ssize_t show_available_governors(struct device *dev,
|
|||
|
||||
mutex_lock(&cpuidle_lock);
|
||||
list_for_each_entry(tmp, &cpuidle_governors, governor_list) {
|
||||
if (i >= (ssize_t) ((PAGE_SIZE/sizeof(char)) - CPUIDLE_NAME_LEN - 2))
|
||||
if (i >= (ssize_t) ((PAGE_SIZE/sizeof(char)) -
|
||||
CPUIDLE_NAME_LEN - 2))
|
||||
goto out;
|
||||
i += scnprintf(&buf[i], CPUIDLE_NAME_LEN, "%s ", tmp->name);
|
||||
}
|
||||
|
@ -166,13 +169,28 @@ struct cpuidle_attr {
|
|||
#define define_one_rw(_name, show, store) \
|
||||
static struct cpuidle_attr attr_##_name = __ATTR(_name, 0644, show, store)
|
||||
|
||||
#define kobj_to_cpuidledev(k) container_of(k, struct cpuidle_device, kobj)
|
||||
#define attr_to_cpuidleattr(a) container_of(a, struct cpuidle_attr, attr)
|
||||
static ssize_t cpuidle_show(struct kobject * kobj, struct attribute * attr ,char * buf)
|
||||
|
||||
struct cpuidle_device_kobj {
|
||||
struct cpuidle_device *dev;
|
||||
struct completion kobj_unregister;
|
||||
struct kobject kobj;
|
||||
};
|
||||
|
||||
static inline struct cpuidle_device *to_cpuidle_device(struct kobject *kobj)
|
||||
{
|
||||
struct cpuidle_device_kobj *kdev =
|
||||
container_of(kobj, struct cpuidle_device_kobj, kobj);
|
||||
|
||||
return kdev->dev;
|
||||
}
|
||||
|
||||
static ssize_t cpuidle_show(struct kobject *kobj, struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
int ret = -EIO;
|
||||
struct cpuidle_device *dev = kobj_to_cpuidledev(kobj);
|
||||
struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr);
|
||||
struct cpuidle_device *dev = to_cpuidle_device(kobj);
|
||||
struct cpuidle_attr *cattr = attr_to_cpuidleattr(attr);
|
||||
|
||||
if (cattr->show) {
|
||||
mutex_lock(&cpuidle_lock);
|
||||
|
@ -182,12 +200,12 @@ static ssize_t cpuidle_show(struct kobject * kobj, struct attribute * attr ,char
|
|||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
|
||||
const char * buf, size_t count)
|
||||
static ssize_t cpuidle_store(struct kobject *kobj, struct attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int ret = -EIO;
|
||||
struct cpuidle_device *dev = kobj_to_cpuidledev(kobj);
|
||||
struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr);
|
||||
struct cpuidle_device *dev = to_cpuidle_device(kobj);
|
||||
struct cpuidle_attr *cattr = attr_to_cpuidleattr(attr);
|
||||
|
||||
if (cattr->store) {
|
||||
mutex_lock(&cpuidle_lock);
|
||||
|
@ -204,9 +222,10 @@ static const struct sysfs_ops cpuidle_sysfs_ops = {
|
|||
|
||||
static void cpuidle_sysfs_release(struct kobject *kobj)
|
||||
{
|
||||
struct cpuidle_device *dev = kobj_to_cpuidledev(kobj);
|
||||
struct cpuidle_device_kobj *kdev =
|
||||
container_of(kobj, struct cpuidle_device_kobj, kobj);
|
||||
|
||||
complete(&dev->kobj_unregister);
|
||||
complete(&kdev->kobj_unregister);
|
||||
}
|
||||
|
||||
static struct kobj_type ktype_cpuidle = {
|
||||
|
@ -237,8 +256,8 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
|
|||
|
||||
#define define_store_state_ull_function(_name) \
|
||||
static ssize_t store_state_##_name(struct cpuidle_state *state, \
|
||||
struct cpuidle_state_usage *state_usage, \
|
||||
const char *buf, size_t size) \
|
||||
struct cpuidle_state_usage *state_usage, \
|
||||
const char *buf, size_t size) \
|
||||
{ \
|
||||
unsigned long long value; \
|
||||
int err; \
|
||||
|
@ -256,14 +275,16 @@ static ssize_t store_state_##_name(struct cpuidle_state *state, \
|
|||
|
||||
#define define_show_state_ull_function(_name) \
|
||||
static ssize_t show_state_##_name(struct cpuidle_state *state, \
|
||||
struct cpuidle_state_usage *state_usage, char *buf) \
|
||||
struct cpuidle_state_usage *state_usage, \
|
||||
char *buf) \
|
||||
{ \
|
||||
return sprintf(buf, "%llu\n", state_usage->_name);\
|
||||
}
|
||||
|
||||
#define define_show_state_str_function(_name) \
|
||||
static ssize_t show_state_##_name(struct cpuidle_state *state, \
|
||||
struct cpuidle_state_usage *state_usage, char *buf) \
|
||||
struct cpuidle_state_usage *state_usage, \
|
||||
char *buf) \
|
||||
{ \
|
||||
if (state->_name[0] == '\0')\
|
||||
return sprintf(buf, "<null>\n");\
|
||||
|
@ -309,8 +330,9 @@ struct cpuidle_state_kobj {
|
|||
#define kobj_to_state(k) (kobj_to_state_obj(k)->state)
|
||||
#define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage)
|
||||
#define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr)
|
||||
static ssize_t cpuidle_state_show(struct kobject * kobj,
|
||||
struct attribute * attr ,char * buf)
|
||||
|
||||
static ssize_t cpuidle_state_show(struct kobject *kobj, struct attribute *attr,
|
||||
char * buf)
|
||||
{
|
||||
int ret = -EIO;
|
||||
struct cpuidle_state *state = kobj_to_state(kobj);
|
||||
|
@ -323,8 +345,8 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t cpuidle_state_store(struct kobject *kobj,
|
||||
struct attribute *attr, const char *buf, size_t size)
|
||||
static ssize_t cpuidle_state_store(struct kobject *kobj, struct attribute *attr,
|
||||
const char *buf, size_t size)
|
||||
{
|
||||
int ret = -EIO;
|
||||
struct cpuidle_state *state = kobj_to_state(kobj);
|
||||
|
@ -371,6 +393,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
|
|||
{
|
||||
int i, ret = -ENOMEM;
|
||||
struct cpuidle_state_kobj *kobj;
|
||||
struct cpuidle_device_kobj *kdev = device->kobj_dev;
|
||||
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
|
||||
|
||||
/* state statistics */
|
||||
|
@ -383,7 +406,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
|
|||
init_completion(&kobj->kobj_unregister);
|
||||
|
||||
ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle,
|
||||
&device->kobj, "state%d", i);
|
||||
&kdev->kobj, "state%d", i);
|
||||
if (ret) {
|
||||
kfree(kobj);
|
||||
goto error_state;
|
||||
|
@ -449,8 +472,8 @@ static void cpuidle_driver_sysfs_release(struct kobject *kobj)
|
|||
complete(&driver_kobj->kobj_unregister);
|
||||
}
|
||||
|
||||
static ssize_t cpuidle_driver_show(struct kobject *kobj, struct attribute * attr,
|
||||
char * buf)
|
||||
static ssize_t cpuidle_driver_show(struct kobject *kobj, struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
int ret = -EIO;
|
||||
struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj);
|
||||
|
@ -500,6 +523,7 @@ static struct kobj_type ktype_driver_cpuidle = {
|
|||
static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
|
||||
{
|
||||
struct cpuidle_driver_kobj *kdrv;
|
||||
struct cpuidle_device_kobj *kdev = dev->kobj_dev;
|
||||
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
|
||||
int ret;
|
||||
|
||||
|
@ -511,7 +535,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
|
|||
init_completion(&kdrv->kobj_unregister);
|
||||
|
||||
ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle,
|
||||
&dev->kobj, "driver");
|
||||
&kdev->kobj, "driver");
|
||||
if (ret) {
|
||||
kfree(kdrv);
|
||||
return ret;
|
||||
|
@ -580,16 +604,28 @@ void cpuidle_remove_device_sysfs(struct cpuidle_device *device)
|
|||
*/
|
||||
int cpuidle_add_sysfs(struct cpuidle_device *dev)
|
||||
{
|
||||
struct cpuidle_device_kobj *kdev;
|
||||
struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
|
||||
int error;
|
||||
|
||||
init_completion(&dev->kobj_unregister);
|
||||
kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
|
||||
if (!kdev)
|
||||
return -ENOMEM;
|
||||
kdev->dev = dev;
|
||||
dev->kobj_dev = kdev;
|
||||
|
||||
error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
|
||||
"cpuidle");
|
||||
if (!error)
|
||||
kobject_uevent(&dev->kobj, KOBJ_ADD);
|
||||
return error;
|
||||
init_completion(&kdev->kobj_unregister);
|
||||
|
||||
error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
|
||||
"cpuidle");
|
||||
if (error) {
|
||||
kfree(kdev);
|
||||
return error;
|
||||
}
|
||||
|
||||
kobject_uevent(&kdev->kobj, KOBJ_ADD);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -598,6 +634,9 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
|
|||
*/
|
||||
void cpuidle_remove_sysfs(struct cpuidle_device *dev)
|
||||
{
|
||||
kobject_put(&dev->kobj);
|
||||
wait_for_completion(&dev->kobj_unregister);
|
||||
struct cpuidle_device_kobj *kdev = dev->kobj_dev;
|
||||
|
||||
kobject_put(&kdev->kobj);
|
||||
wait_for_completion(&kdev->kobj_unregister);
|
||||
kfree(kdev);
|
||||
}
|
||||
|
|
|
@ -3093,6 +3093,10 @@ static struct mfd_cell db8500_prcmu_devs[] = {
|
|||
.platform_data = &db8500_cpufreq_table,
|
||||
.pdata_size = sizeof(db8500_cpufreq_table),
|
||||
},
|
||||
{
|
||||
.name = "cpuidle-dbx500",
|
||||
.of_compatible = "stericsson,cpuidle-dbx500",
|
||||
},
|
||||
{
|
||||
.name = "db8500-thermal",
|
||||
.num_resources = ARRAY_SIZE(db8500_thsens_resources),
|
||||
|
|
|
@ -13,8 +13,6 @@
|
|||
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/hrtimer.h>
|
||||
|
||||
#define CPUIDLE_STATE_MAX 10
|
||||
|
@ -61,6 +59,10 @@ struct cpuidle_state {
|
|||
|
||||
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
|
||||
|
||||
struct cpuidle_device_kobj;
|
||||
struct cpuidle_state_kobj;
|
||||
struct cpuidle_driver_kobj;
|
||||
|
||||
struct cpuidle_device {
|
||||
unsigned int registered:1;
|
||||
unsigned int enabled:1;
|
||||
|
@ -71,9 +73,8 @@ struct cpuidle_device {
|
|||
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
|
||||
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
|
||||
struct cpuidle_driver_kobj *kobj_driver;
|
||||
struct cpuidle_device_kobj *kobj_dev;
|
||||
struct list_head device_list;
|
||||
struct kobject kobj;
|
||||
struct completion kobj_unregister;
|
||||
|
||||
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
|
||||
int safe_state_index;
|
||||
|
|
Loading…
Add table
Reference in a new issue