drm/atomic: Prepare drm_modeset_lock infrastructure for interruptible waiting, v2.
When we want to make drm_atomic_commit interruptible, there are a lot of places that call the lock function, which we don't have control over. Rather than trying to convert every single one, it's easier to toggle interruptible waiting per acquire_ctx. If drm_modeset_acquire_init is called with DRM_MODESET_ACQUIRE_INTERRUPTIBLE, then we will perform interruptible waits in drm_modeset_lock and drm_modeset_backoff. Changes since v1: - Fix locking example in drm_modeset_lock.c to be compatible with interruptible waiting (xexaxo) and make it default. Uninterruptible waiting shouldn't happen except in corner cases, but the example will still apply if the flag is removed. - Add drm_modeset_lock_single_interruptible() to documentation. - Fix dead link to removed drm_modeset_lock_interruptible() in drm_modeset_lock(). Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> #v1 Cc: Emil Velikov <emil.l.velikov@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20170912133749.6532-2-maarten.lankhorst@linux.intel.com Reviewed-by: Emil Velikov <emil.l.velikov@gmail.com>
This commit is contained in:
parent
9ab12e88a0
commit
6f8bcc744a
3 changed files with 57 additions and 53 deletions
|
@ -155,7 +155,7 @@ static int crtc_crc_open(struct inode *inode, struct file *filep)
|
|||
int ret = 0;
|
||||
|
||||
if (drm_drv_uses_atomic_modeset(crtc->dev)) {
|
||||
ret = drm_modeset_lock_interruptible(&crtc->mutex, NULL);
|
||||
ret = drm_modeset_lock_single_interruptible(&crtc->mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -39,23 +39,28 @@
|
|||
*
|
||||
* The basic usage pattern is to::
|
||||
*
|
||||
* drm_modeset_acquire_init(&ctx)
|
||||
* drm_modeset_acquire_init(ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
|
||||
* retry:
|
||||
* foreach (lock in random_ordered_set_of_locks) {
|
||||
* ret = drm_modeset_lock(lock, &ctx)
|
||||
* ret = drm_modeset_lock(lock, ctx)
|
||||
* if (ret == -EDEADLK) {
|
||||
* drm_modeset_backoff(&ctx);
|
||||
* goto retry;
|
||||
* ret = drm_modeset_backoff(ctx);
|
||||
* if (!ret)
|
||||
* goto retry;
|
||||
* }
|
||||
* if (ret)
|
||||
* goto out;
|
||||
* }
|
||||
* ... do stuff ...
|
||||
* drm_modeset_drop_locks(&ctx);
|
||||
* drm_modeset_acquire_fini(&ctx);
|
||||
* out:
|
||||
* drm_modeset_drop_locks(ctx);
|
||||
* drm_modeset_acquire_fini(ctx);
|
||||
*
|
||||
* If all that is needed is a single modeset lock, then the &struct
|
||||
* drm_modeset_acquire_ctx is not needed and the locking can be simplified
|
||||
* by passing a NULL instead of ctx in the drm_modeset_lock()
|
||||
* call and, when done, by calling drm_modeset_unlock().
|
||||
* by passing a NULL instead of ctx in the drm_modeset_lock() call or
|
||||
* calling drm_modeset_lock_single_interruptible(). To unlock afterwards
|
||||
* call drm_modeset_unlock().
|
||||
*
|
||||
* On top of these per-object locks using &ww_mutex there's also an overall
|
||||
* &drm_mode_config.mutex, for protecting everything else. Mostly this means
|
||||
|
@ -178,7 +183,11 @@ EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
|
|||
/**
|
||||
* drm_modeset_acquire_init - initialize acquire context
|
||||
* @ctx: the acquire context
|
||||
* @flags: for future
|
||||
* @flags: 0 or %DRM_MODESET_ACQUIRE_INTERRUPTIBLE
|
||||
*
|
||||
* When passing %DRM_MODESET_ACQUIRE_INTERRUPTIBLE to @flags,
|
||||
* all calls to drm_modeset_lock() will perform an interruptible
|
||||
* wait.
|
||||
*/
|
||||
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
|
||||
uint32_t flags)
|
||||
|
@ -186,6 +195,9 @@ void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
|
|||
memset(ctx, 0, sizeof(*ctx));
|
||||
ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
|
||||
INIT_LIST_HEAD(&ctx->locked);
|
||||
|
||||
if (flags & DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
|
||||
ctx->interruptible = true;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_modeset_acquire_init);
|
||||
|
||||
|
@ -261,8 +273,19 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
|
||||
bool interruptible)
|
||||
/**
|
||||
* drm_modeset_backoff - deadlock avoidance backoff
|
||||
* @ctx: the acquire context
|
||||
*
|
||||
* If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
|
||||
* you must call this function to drop all currently held locks and
|
||||
* block until the contended lock becomes available.
|
||||
*
|
||||
* This function returns 0 on success, or -ERESTARTSYS if this context
|
||||
* is initialized with %DRM_MODESET_ACQUIRE_INTERRUPTIBLE and the
|
||||
* wait has been interrupted.
|
||||
*/
|
||||
int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
struct drm_modeset_lock *contended = ctx->contended;
|
||||
|
||||
|
@ -273,35 +296,10 @@ static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
|
|||
|
||||
drm_modeset_drop_locks(ctx);
|
||||
|
||||
return modeset_lock(contended, ctx, interruptible, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_modeset_backoff - deadlock avoidance backoff
|
||||
* @ctx: the acquire context
|
||||
*
|
||||
* If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
|
||||
* you must call this function to drop all currently held locks and
|
||||
* block until the contended lock becomes available.
|
||||
*/
|
||||
void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
modeset_backoff(ctx, false);
|
||||
return modeset_lock(contended, ctx, ctx->interruptible, true);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_modeset_backoff);
|
||||
|
||||
/**
|
||||
* drm_modeset_backoff_interruptible - deadlock avoidance backoff
|
||||
* @ctx: the acquire context
|
||||
*
|
||||
* Interruptible version of drm_modeset_backoff()
|
||||
*/
|
||||
int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
return modeset_backoff(ctx, true);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
|
||||
|
||||
/**
|
||||
* drm_modeset_lock_init - initialize lock
|
||||
* @lock: lock to init
|
||||
|
@ -324,14 +322,18 @@ EXPORT_SYMBOL(drm_modeset_lock_init);
|
|||
* deadlock scenario has been detected and it is an error to attempt
|
||||
* to take any more locks without first calling drm_modeset_backoff().
|
||||
*
|
||||
* If the @ctx is not NULL and initialized with
|
||||
* %DRM_MODESET_ACQUIRE_INTERRUPTIBLE, this function will fail with
|
||||
* -ERESTARTSYS when interrupted.
|
||||
*
|
||||
* If @ctx is NULL then the function call behaves like a normal,
|
||||
* non-nesting mutex_lock() call.
|
||||
* uninterruptible non-nesting mutex_lock() call.
|
||||
*/
|
||||
int drm_modeset_lock(struct drm_modeset_lock *lock,
|
||||
struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
if (ctx)
|
||||
return modeset_lock(lock, ctx, false, false);
|
||||
return modeset_lock(lock, ctx, ctx->interruptible, false);
|
||||
|
||||
ww_mutex_lock(&lock->mutex, NULL);
|
||||
return 0;
|
||||
|
@ -339,21 +341,19 @@ int drm_modeset_lock(struct drm_modeset_lock *lock,
|
|||
EXPORT_SYMBOL(drm_modeset_lock);
|
||||
|
||||
/**
|
||||
* drm_modeset_lock_interruptible - take modeset lock
|
||||
* drm_modeset_lock_single_interruptible - take a single modeset lock
|
||||
* @lock: lock to take
|
||||
* @ctx: acquire ctx
|
||||
*
|
||||
* Interruptible version of drm_modeset_lock()
|
||||
* This function behaves as drm_modeset_lock() with a NULL context,
|
||||
* but performs interruptible waits.
|
||||
*
|
||||
* This function returns 0 on success, or -ERESTARTSYS when interrupted.
|
||||
*/
|
||||
int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
|
||||
struct drm_modeset_acquire_ctx *ctx)
|
||||
int drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock)
|
||||
{
|
||||
if (ctx)
|
||||
return modeset_lock(lock, ctx, true, false);
|
||||
|
||||
return ww_mutex_lock_interruptible(&lock->mutex, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_modeset_lock_interruptible);
|
||||
EXPORT_SYMBOL(drm_modeset_lock_single_interruptible);
|
||||
|
||||
/**
|
||||
* drm_modeset_unlock - drop modeset lock
|
||||
|
|
|
@ -34,6 +34,7 @@ struct drm_modeset_lock;
|
|||
* @contended: used internally for -EDEADLK handling
|
||||
* @locked: list of held locks
|
||||
* @trylock_only: trylock mode used in atomic contexts/panic notifiers
|
||||
* @interruptible: whether interruptible locking should be used.
|
||||
*
|
||||
* Each thread competing for a set of locks must use one acquire
|
||||
* ctx. And if any lock fxn returns -EDEADLK, it must backoff and
|
||||
|
@ -59,6 +60,9 @@ struct drm_modeset_acquire_ctx {
|
|||
* Trylock mode, use only for panic handlers!
|
||||
*/
|
||||
bool trylock_only;
|
||||
|
||||
/* Perform interruptible waits on this context. */
|
||||
bool interruptible;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -82,12 +86,13 @@ struct drm_modeset_lock {
|
|||
struct list_head head;
|
||||
};
|
||||
|
||||
#define DRM_MODESET_ACQUIRE_INTERRUPTIBLE BIT(0)
|
||||
|
||||
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
|
||||
uint32_t flags);
|
||||
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx);
|
||||
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx);
|
||||
void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx);
|
||||
int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx);
|
||||
int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx);
|
||||
|
||||
void drm_modeset_lock_init(struct drm_modeset_lock *lock);
|
||||
|
||||
|
@ -111,8 +116,7 @@ static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock)
|
|||
|
||||
int drm_modeset_lock(struct drm_modeset_lock *lock,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
int __must_check drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock);
|
||||
void drm_modeset_unlock(struct drm_modeset_lock *lock);
|
||||
|
||||
struct drm_device;
|
||||
|
|
Loading…
Reference in a new issue