ARM: OMAP2+: clockdomain: convert existing atomic usecounts into spinlock-protected shorts/ints
The atomic usecounts seem to be confusing, and are no longer needed since the operations that they are attached to really should take place under lock. Replace the atomic counters with simple integers, protected by the enclosing powerdomain spinlock. Signed-off-by: Paul Walmsley <paul@pwsan.com> Cc: Kevin Hilman <khilman@deeprootsystems.com>
This commit is contained in:
parent
65958fb6ca
commit
9249387019
7 changed files with 92 additions and 30 deletions
|
@ -210,7 +210,8 @@ static int _clkdm_add_wkdep(struct clockdomain *clkdm1,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (atomic_inc_return(&cd->wkdep_usecount) == 1) {
|
||||
cd->wkdep_usecount++;
|
||||
if (cd->wkdep_usecount == 1) {
|
||||
pr_debug("clockdomain: hardware will wake up %s when %s wakes up\n",
|
||||
clkdm1->name, clkdm2->name);
|
||||
|
||||
|
@ -252,7 +253,8 @@ static int _clkdm_del_wkdep(struct clockdomain *clkdm1,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (atomic_dec_return(&cd->wkdep_usecount) == 0) {
|
||||
cd->wkdep_usecount--;
|
||||
if (cd->wkdep_usecount == 0) {
|
||||
pr_debug("clockdomain: hardware will no longer wake up %s after %s wakes up\n",
|
||||
clkdm1->name, clkdm2->name);
|
||||
|
||||
|
@ -296,7 +298,8 @@ static int _clkdm_add_sleepdep(struct clockdomain *clkdm1,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (atomic_inc_return(&cd->sleepdep_usecount) == 1) {
|
||||
cd->sleepdep_usecount++;
|
||||
if (cd->sleepdep_usecount == 1) {
|
||||
pr_debug("clockdomain: will prevent %s from sleeping if %s is active\n",
|
||||
clkdm1->name, clkdm2->name);
|
||||
|
||||
|
@ -340,7 +343,8 @@ static int _clkdm_del_sleepdep(struct clockdomain *clkdm1,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (atomic_dec_return(&cd->sleepdep_usecount) == 0) {
|
||||
cd->sleepdep_usecount--;
|
||||
if (cd->sleepdep_usecount == 0) {
|
||||
pr_debug("clockdomain: will no longer prevent %s from sleeping if %s is active\n",
|
||||
clkdm1->name, clkdm2->name);
|
||||
|
||||
|
@ -567,7 +571,21 @@ struct powerdomain *clkdm_get_pwrdm(struct clockdomain *clkdm)
|
|||
*/
|
||||
int clkdm_add_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
|
||||
{
|
||||
return _clkdm_add_wkdep(clkdm1, clkdm2);
|
||||
struct clkdm_dep *cd;
|
||||
int ret;
|
||||
|
||||
if (!clkdm1 || !clkdm2)
|
||||
return -EINVAL;
|
||||
|
||||
cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);
|
||||
if (IS_ERR(cd))
|
||||
return PTR_ERR(cd);
|
||||
|
||||
pwrdm_lock(cd->clkdm->pwrdm.ptr);
|
||||
ret = _clkdm_add_wkdep(clkdm1, clkdm2);
|
||||
pwrdm_unlock(cd->clkdm->pwrdm.ptr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -582,7 +600,21 @@ int clkdm_add_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
|
|||
*/
|
||||
int clkdm_del_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
|
||||
{
|
||||
return _clkdm_del_wkdep(clkdm1, clkdm2);
|
||||
struct clkdm_dep *cd;
|
||||
int ret;
|
||||
|
||||
if (!clkdm1 || !clkdm2)
|
||||
return -EINVAL;
|
||||
|
||||
cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);
|
||||
if (IS_ERR(cd))
|
||||
return PTR_ERR(cd);
|
||||
|
||||
pwrdm_lock(cd->clkdm->pwrdm.ptr);
|
||||
ret = _clkdm_del_wkdep(clkdm1, clkdm2);
|
||||
pwrdm_unlock(cd->clkdm->pwrdm.ptr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -620,7 +652,7 @@ int clkdm_read_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* XXX It's faster to return the atomic wkdep_usecount */
|
||||
/* XXX It's faster to return the wkdep_usecount */
|
||||
return arch_clkdm->clkdm_read_wkdep(clkdm1, clkdm2);
|
||||
}
|
||||
|
||||
|
@ -659,7 +691,21 @@ int clkdm_clear_all_wkdeps(struct clockdomain *clkdm)
|
|||
*/
|
||||
int clkdm_add_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
|
||||
{
|
||||
return _clkdm_add_sleepdep(clkdm1, clkdm2);
|
||||
struct clkdm_dep *cd;
|
||||
int ret;
|
||||
|
||||
if (!clkdm1 || !clkdm2)
|
||||
return -EINVAL;
|
||||
|
||||
cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);
|
||||
if (IS_ERR(cd))
|
||||
return PTR_ERR(cd);
|
||||
|
||||
pwrdm_lock(cd->clkdm->pwrdm.ptr);
|
||||
ret = _clkdm_add_sleepdep(clkdm1, clkdm2);
|
||||
pwrdm_unlock(cd->clkdm->pwrdm.ptr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -676,7 +722,21 @@ int clkdm_add_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
|
|||
*/
|
||||
int clkdm_del_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
|
||||
{
|
||||
return _clkdm_del_sleepdep(clkdm1, clkdm2);
|
||||
struct clkdm_dep *cd;
|
||||
int ret;
|
||||
|
||||
if (!clkdm1 || !clkdm2)
|
||||
return -EINVAL;
|
||||
|
||||
cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);
|
||||
if (IS_ERR(cd))
|
||||
return PTR_ERR(cd);
|
||||
|
||||
pwrdm_lock(cd->clkdm->pwrdm.ptr);
|
||||
ret = _clkdm_del_sleepdep(clkdm1, clkdm2);
|
||||
pwrdm_unlock(cd->clkdm->pwrdm.ptr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -716,7 +776,7 @@ int clkdm_read_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* XXX It's faster to return the atomic sleepdep_usecount */
|
||||
/* XXX It's faster to return the sleepdep_usecount */
|
||||
return arch_clkdm->clkdm_read_sleepdep(clkdm1, clkdm2);
|
||||
}
|
||||
|
||||
|
@ -1063,7 +1123,8 @@ static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)
|
|||
* should be called for every clock instance or hwmod that is
|
||||
* enabled, so the clkdm can be force woken up.
|
||||
*/
|
||||
if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps) {
|
||||
clkdm->usecount++;
|
||||
if (clkdm->usecount > 1 && autodeps) {
|
||||
pwrdm_unlock(clkdm->pwrdm.ptr);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1125,17 +1186,17 @@ int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
|
|||
pwrdm_lock(clkdm->pwrdm.ptr);
|
||||
|
||||
/* corner case: disabling unused clocks */
|
||||
if ((__clk_get_enable_count(clk) == 0) &&
|
||||
(atomic_read(&clkdm->usecount) == 0))
|
||||
if ((__clk_get_enable_count(clk) == 0) && clkdm->usecount == 0)
|
||||
goto ccd_exit;
|
||||
|
||||
if (atomic_read(&clkdm->usecount) == 0) {
|
||||
if (clkdm->usecount == 0) {
|
||||
pwrdm_unlock(clkdm->pwrdm.ptr);
|
||||
WARN_ON(1); /* underflow */
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
if (atomic_dec_return(&clkdm->usecount) > 0) {
|
||||
clkdm->usecount--;
|
||||
if (clkdm->usecount > 0) {
|
||||
pwrdm_unlock(clkdm->pwrdm.ptr);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1213,13 +1274,14 @@ int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh)
|
|||
|
||||
pwrdm_lock(clkdm->pwrdm.ptr);
|
||||
|
||||
if (atomic_read(&clkdm->usecount) == 0) {
|
||||
if (clkdm->usecount == 0) {
|
||||
pwrdm_unlock(clkdm->pwrdm.ptr);
|
||||
WARN_ON(1); /* underflow */
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
if (atomic_dec_return(&clkdm->usecount) > 0) {
|
||||
clkdm->usecount--;
|
||||
if (clkdm->usecount > 0) {
|
||||
pwrdm_unlock(clkdm->pwrdm.ptr);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -91,8 +91,8 @@ struct clkdm_autodep {
|
|||
struct clkdm_dep {
|
||||
const char *clkdm_name;
|
||||
struct clockdomain *clkdm;
|
||||
atomic_t wkdep_usecount;
|
||||
atomic_t sleepdep_usecount;
|
||||
s16 wkdep_usecount;
|
||||
s16 sleepdep_usecount;
|
||||
};
|
||||
|
||||
/* Possible flags for struct clockdomain._flags */
|
||||
|
@ -136,7 +136,7 @@ struct clockdomain {
|
|||
const u16 clkdm_offs;
|
||||
struct clkdm_dep *wkdep_srcs;
|
||||
struct clkdm_dep *sleepdep_srcs;
|
||||
atomic_t usecount;
|
||||
int usecount;
|
||||
struct list_head node;
|
||||
};
|
||||
|
||||
|
|
|
@ -186,7 +186,7 @@ static int omap3xxx_clkdm_clear_all_sleepdeps(struct clockdomain *clkdm)
|
|||
continue; /* only happens if data is erroneous */
|
||||
|
||||
mask |= 1 << cd->clkdm->dep_bit;
|
||||
atomic_set(&cd->sleepdep_usecount, 0);
|
||||
cd->sleepdep_usecount = 0;
|
||||
}
|
||||
omap2_cm_clear_mod_reg_bits(mask, clkdm->pwrdm.ptr->prcm_offs,
|
||||
OMAP3430_CM_SLEEPDEP);
|
||||
|
@ -209,7 +209,7 @@ static int omap3xxx_clkdm_wakeup(struct clockdomain *clkdm)
|
|||
|
||||
static void omap3xxx_clkdm_allow_idle(struct clockdomain *clkdm)
|
||||
{
|
||||
if (atomic_read(&clkdm->usecount) > 0)
|
||||
if (clkdm->usecount > 0)
|
||||
clkdm_add_autodeps(clkdm);
|
||||
|
||||
omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs,
|
||||
|
@ -221,7 +221,7 @@ static void omap3xxx_clkdm_deny_idle(struct clockdomain *clkdm)
|
|||
omap3xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs,
|
||||
clkdm->clktrctrl_mask);
|
||||
|
||||
if (atomic_read(&clkdm->usecount) > 0)
|
||||
if (clkdm->usecount > 0)
|
||||
clkdm_del_autodeps(clkdm);
|
||||
}
|
||||
|
||||
|
|
|
@ -393,7 +393,7 @@ static int omap4_clkdm_clear_all_wkup_sleep_deps(struct clockdomain *clkdm)
|
|||
continue; /* only happens if data is erroneous */
|
||||
|
||||
mask |= 1 << cd->clkdm->dep_bit;
|
||||
atomic_set(&cd->wkdep_usecount, 0);
|
||||
cd->wkdep_usecount = 0;
|
||||
}
|
||||
|
||||
omap4_cminst_clear_inst_reg_bits(mask, clkdm->prcm_partition,
|
||||
|
|
|
@ -83,10 +83,8 @@ static int clkdm_dbg_show_counter(struct clockdomain *clkdm, void *user)
|
|||
strncmp(clkdm->name, "dpll", 4) == 0)
|
||||
return 0;
|
||||
|
||||
seq_printf(s, "%s->%s (%d)", clkdm->name,
|
||||
clkdm->pwrdm.ptr->name,
|
||||
atomic_read(&clkdm->usecount));
|
||||
seq_printf(s, "\n");
|
||||
seq_printf(s, "%s->%s (%d)\n", clkdm->name, clkdm->pwrdm.ptr->name,
|
||||
clkdm->usecount);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -110,11 +110,12 @@ static void __init omap2_init_processor_devices(void)
|
|||
|
||||
int __init omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused)
|
||||
{
|
||||
/* XXX The usecount test is racy */
|
||||
if ((clkdm->flags & CLKDM_CAN_ENABLE_AUTO) &&
|
||||
!(clkdm->flags & CLKDM_MISSING_IDLE_REPORTING))
|
||||
clkdm_allow_idle(clkdm);
|
||||
else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
|
||||
atomic_read(&clkdm->usecount) == 0)
|
||||
clkdm->usecount == 0)
|
||||
clkdm_sleep(clkdm);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -210,6 +210,7 @@ int omap2_clkdm_read_wkdep(struct clockdomain *clkdm1,
|
|||
PM_WKDEP, (1 << clkdm2->dep_bit));
|
||||
}
|
||||
|
||||
/* XXX Caller must hold the clkdm's powerdomain lock */
|
||||
int omap2_clkdm_clear_all_wkdeps(struct clockdomain *clkdm)
|
||||
{
|
||||
struct clkdm_dep *cd;
|
||||
|
@ -221,7 +222,7 @@ int omap2_clkdm_clear_all_wkdeps(struct clockdomain *clkdm)
|
|||
|
||||
/* PRM accesses are slow, so minimize them */
|
||||
mask |= 1 << cd->clkdm->dep_bit;
|
||||
atomic_set(&cd->wkdep_usecount, 0);
|
||||
cd->wkdep_usecount = 0;
|
||||
}
|
||||
|
||||
omap2_prm_clear_mod_reg_bits(mask, clkdm->pwrdm.ptr->prcm_offs,
|
||||
|
|
Loading…
Reference in a new issue