Merge "mhi: core: add mhi_device_get_sync_atomic() to wait until M0"

This commit is contained in:
qctecmdr 2020-06-10 23:48:03 -07:00 committed by Gerrit - the friendly Code Review server
commit 757f1d1466
3 changed files with 109 additions and 25 deletions

View file

@ -751,6 +751,8 @@ struct mhi_bus {
/* default MHI timeout */ /* default MHI timeout */
#define MHI_TIMEOUT_MS (1000) #define MHI_TIMEOUT_MS (1000)
#define MHI_FORCE_WAKE_DELAY_US (100)
extern struct mhi_bus mhi_bus; extern struct mhi_bus mhi_bus;
struct mhi_controller *find_mhi_controller_by_name(const char *name); struct mhi_controller *find_mhi_controller_by_name(const char *name);

View file

@ -405,35 +405,42 @@ void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
enum MHI_PM_STATE state; enum MHI_PM_STATE state;
write_lock_irq(&mhi_cntrl->pm_lock); write_lock_irq(&mhi_cntrl->pm_lock);
/* Just check if we are racing with device_wake assertion */
if (atomic_read(&mhi_cntrl->dev_wake))
MHI_VERB("M2 transition request post dev_wake:%d\n",
atomic_read(&mhi_cntrl->dev_wake));
/* if it fails, means we transition to M3 */ /* if it fails, means we transition to M3 */
state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2); state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
if (state == MHI_PM_M2) { if (state != MHI_PM_M2) {
MHI_VERB("Entered M2 State\n"); /* Nothing to be done, handle M3 transition later */
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
mhi_cntrl->dev_state = MHI_STATE_M2;
mhi_cntrl->M2++;
write_unlock_irq(&mhi_cntrl->pm_lock);
wake_up_all(&mhi_cntrl->state_event);
/* transfer pending, exit M2 immediately */
if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
atomic_read(&mhi_cntrl->dev_wake))) {
MHI_VERB(
"Exiting M2 Immediately, pending_pkts:%d dev_wake:%d\n",
atomic_read(&mhi_cntrl->pending_pkts),
atomic_read(&mhi_cntrl->dev_wake));
read_lock_bh(&mhi_cntrl->pm_lock);
mhi_cntrl->wake_get(mhi_cntrl, true);
mhi_cntrl->wake_put(mhi_cntrl, true);
read_unlock_bh(&mhi_cntrl->pm_lock);
} else {
mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
MHI_CB_IDLE);
}
} else {
write_unlock_irq(&mhi_cntrl->pm_lock); write_unlock_irq(&mhi_cntrl->pm_lock);
return;
} }
MHI_VERB("Entered M2 State\n");
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
mhi_cntrl->dev_state = MHI_STATE_M2;
mhi_cntrl->M2++;
write_unlock_irq(&mhi_cntrl->pm_lock);
wake_up_all(&mhi_cntrl->state_event);
/* transfer pending, exit M2 immediately */
if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
atomic_read(&mhi_cntrl->dev_wake))) {
MHI_VERB(
"Exiting M2 Immediately, pending_pkts:%d dev_wake:%d\n",
atomic_read(&mhi_cntrl->pending_pkts),
atomic_read(&mhi_cntrl->dev_wake));
read_lock_bh(&mhi_cntrl->pm_lock);
mhi_cntrl->wake_get(mhi_cntrl, true);
mhi_cntrl->wake_put(mhi_cntrl, true);
read_unlock_bh(&mhi_cntrl->pm_lock);
return;
}
mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, MHI_CB_IDLE);
} }
int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl) int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
@ -1631,6 +1638,57 @@ int mhi_device_get_sync(struct mhi_device *mhi_dev, int vote)
} }
EXPORT_SYMBOL(mhi_device_get_sync); EXPORT_SYMBOL(mhi_device_get_sync);
int mhi_device_get_sync_atomic(struct mhi_device *mhi_dev, int timeout_us)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
read_lock_bh(&mhi_cntrl->pm_lock);
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
read_unlock_bh(&mhi_cntrl->pm_lock);
return -EIO;
}
mhi_cntrl->wake_get(mhi_cntrl, true);
read_unlock_bh(&mhi_cntrl->pm_lock);
atomic_inc(&mhi_dev->dev_vote);
pm_wakeup_hard_event(&mhi_cntrl->mhi_dev->dev);
mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
/* Return if client doesn't want us to wait */
if (!timeout_us) {
if (mhi_cntrl->pm_state != MHI_PM_M0)
MHI_ERR("Return without waiting for M0\n");
mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
return 0;
}
while (mhi_cntrl->pm_state != MHI_PM_M0 &&
!MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) &&
timeout_us > 0) {
udelay(MHI_FORCE_WAKE_DELAY_US);
timeout_us -= MHI_FORCE_WAKE_DELAY_US;
}
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || timeout_us <= 0) {
MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n",
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
to_mhi_pm_state_str(mhi_cntrl->pm_state));
read_lock_bh(&mhi_cntrl->pm_lock);
mhi_cntrl->wake_put(mhi_cntrl, false);
read_unlock_bh(&mhi_cntrl->pm_lock);
atomic_dec(&mhi_dev->dev_vote);
mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
return -ETIMEDOUT;
}
mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
return 0;
}
EXPORT_SYMBOL(mhi_device_get_sync_atomic);
void mhi_device_put(struct mhi_device *mhi_dev, int vote) void mhi_device_put(struct mhi_device *mhi_dev, int vote)
{ {
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;

View file

@ -609,6 +609,30 @@ void mhi_device_get(struct mhi_device *mhi_dev, int vote);
*/ */
int mhi_device_get_sync(struct mhi_device *mhi_dev, int vote); int mhi_device_get_sync(struct mhi_device *mhi_dev, int vote);
/**
* mhi_device_get_sync_atomic - Asserts device_wait and moves device to M0
* @mhi_dev: Device associated with the channels
* @timeout_us: timeout, in micro-seconds
*
* The device_wake is asserted to keep device in M0 or bring it to M0.
* If device is not in M0 state, then this function will wait for device to
* move to M0, until @timeout_us elapses.
* However, if device's M1 state-change event races with this function
* then there is a possiblity of device moving from M0 to M2 and back
* to M0. That can't be avoided as host must transition device from M1 to M2
* as per the spec.
* Clients can ignore that transition after this function returns as the device
* is expected to immediately move from M2 to M0 as wake is asserted and
* wouldn't enter low power state.
*
* Returns:
* 0 if operation was successful (however, M0 -> M2 -> M0 is possible later) as
* mentioned above.
* -ETIMEDOUT is device faled to move to M0 before @timeout_us elapsed
* -EIO if the MHI state is one of the ERROR states.
*/
int mhi_device_get_sync_atomic(struct mhi_device *mhi_dev, int timeout_us);
/** /**
* mhi_device_put - re-enable low power modes * mhi_device_put - re-enable low power modes
* @mhi_dev: Device associated with the channels * @mhi_dev: Device associated with the channels