ARM: OMAP: clockdomain: Fix locking on _clkdm_clk_hwmod_enable / disable
Previously the code only acquired spinlock after increasing / decreasing the usecount value, which is wrong. This leaves a small window where a task switch may occur between the check of the usecount and the actual wakeup / sleep of the domain. Fixed by moving the spinlock locking before the usecount access. Left the usecount as atomic_t if someone wants an easy access to the parameter through atomic_read. Signed-off-by: Tero Kristo <t-kristo@ti.com> Acked-by: Paul Walmsley <paul@pwsan.com> Signed-off-by: Tony Lindgren <tony@atomide.com>
This commit is contained in:
parent
49c58e8202
commit
64e29fd5ed
1 changed files with 11 additions and 4 deletions
|
@ -925,15 +925,18 @@ static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)
|
||||||
if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_enable)
|
if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_enable)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&clkdm->lock, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For arch's with no autodeps, clkcm_clk_enable
|
* For arch's with no autodeps, clkcm_clk_enable
|
||||||
* should be called for every clock instance or hwmod that is
|
* should be called for every clock instance or hwmod that is
|
||||||
* enabled, so the clkdm can be force woken up.
|
* enabled, so the clkdm can be force woken up.
|
||||||
*/
|
*/
|
||||||
if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps)
|
if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps) {
|
||||||
|
spin_unlock_irqrestore(&clkdm->lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&clkdm->lock, flags);
|
|
||||||
arch_clkdm->clkdm_clk_enable(clkdm);
|
arch_clkdm->clkdm_clk_enable(clkdm);
|
||||||
pwrdm_state_switch(clkdm->pwrdm.ptr);
|
pwrdm_state_switch(clkdm->pwrdm.ptr);
|
||||||
spin_unlock_irqrestore(&clkdm->lock, flags);
|
spin_unlock_irqrestore(&clkdm->lock, flags);
|
||||||
|
@ -950,15 +953,19 @@ static int _clkdm_clk_hwmod_disable(struct clockdomain *clkdm)
|
||||||
if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)
|
if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&clkdm->lock, flags);
|
||||||
|
|
||||||
if (atomic_read(&clkdm->usecount) == 0) {
|
if (atomic_read(&clkdm->usecount) == 0) {
|
||||||
|
spin_unlock_irqrestore(&clkdm->lock, flags);
|
||||||
WARN_ON(1); /* underflow */
|
WARN_ON(1); /* underflow */
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (atomic_dec_return(&clkdm->usecount) > 0)
|
if (atomic_dec_return(&clkdm->usecount) > 0) {
|
||||||
|
spin_unlock_irqrestore(&clkdm->lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&clkdm->lock, flags);
|
|
||||||
arch_clkdm->clkdm_clk_disable(clkdm);
|
arch_clkdm->clkdm_clk_disable(clkdm);
|
||||||
pwrdm_state_switch(clkdm->pwrdm.ptr);
|
pwrdm_state_switch(clkdm->pwrdm.ptr);
|
||||||
spin_unlock_irqrestore(&clkdm->lock, flags);
|
spin_unlock_irqrestore(&clkdm->lock, flags);
|
||||||
|
|
Loading…
Reference in a new issue