drm/i915: Move the irq_counter inside the spinlock
Rather than have multiple locked instructions inside the notify_ring() irq handler, move them inside the spinlock and reduce their intrinsic locking. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180627201304.15817-3-chris@chris-wilson.co.uk
This commit is contained in:
parent
69dc4d003e
commit
78796877c3
4 changed files with 12 additions and 9 deletions
|
@ -1162,8 +1162,6 @@ static void notify_ring(struct intel_engine_cs *engine)
|
|||
if (unlikely(!engine->breadcrumbs.irq_armed))
|
||||
return;
|
||||
|
||||
atomic_inc(&engine->irq_count);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
spin_lock(&engine->breadcrumbs.irq_lock);
|
||||
|
@ -1198,6 +1196,8 @@ static void notify_ring(struct intel_engine_cs *engine)
|
|||
tsk = wait->tsk;
|
||||
}
|
||||
}
|
||||
|
||||
engine->breadcrumbs.irq_count++;
|
||||
} else {
|
||||
if (engine->breadcrumbs.irq_armed)
|
||||
__intel_engine_disarm_breadcrumbs(engine);
|
||||
|
|
|
@ -1196,7 +1196,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
|
|||
* takes to sleep on a request, on the order of a microsecond.
|
||||
*/
|
||||
|
||||
irq = atomic_read(&engine->irq_count);
|
||||
irq = READ_ONCE(engine->breadcrumbs.irq_count);
|
||||
timeout_us += local_clock_us(&cpu);
|
||||
do {
|
||||
if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
|
||||
|
@ -1208,7 +1208,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
|
|||
* assume we won't see one in the near future but require
|
||||
* the engine->seqno_barrier() to fixup coherency.
|
||||
*/
|
||||
if (atomic_read(&engine->irq_count) != irq)
|
||||
if (READ_ONCE(engine->breadcrumbs.irq_count) != irq)
|
||||
break;
|
||||
|
||||
if (signal_pending_state(state, current))
|
||||
|
|
|
@ -98,12 +98,14 @@ static void intel_breadcrumbs_hangcheck(struct timer_list *t)
|
|||
struct intel_engine_cs *engine =
|
||||
from_timer(engine, t, breadcrumbs.hangcheck);
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
unsigned int irq_count;
|
||||
|
||||
if (!b->irq_armed)
|
||||
return;
|
||||
|
||||
if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) {
|
||||
b->hangcheck_interrupts = atomic_read(&engine->irq_count);
|
||||
irq_count = READ_ONCE(b->irq_count);
|
||||
if (b->hangcheck_interrupts != irq_count) {
|
||||
b->hangcheck_interrupts = irq_count;
|
||||
mod_timer(&b->hangcheck, wait_timeout());
|
||||
return;
|
||||
}
|
||||
|
@ -272,13 +274,14 @@ static bool use_fake_irq(const struct intel_breadcrumbs *b)
|
|||
if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
|
||||
return false;
|
||||
|
||||
/* Only start with the heavy weight fake irq timer if we have not
|
||||
/*
|
||||
* Only start with the heavy weight fake irq timer if we have not
|
||||
* seen any interrupts since enabling it the first time. If the
|
||||
* interrupts are still arriving, it means we made a mistake in our
|
||||
* engine->seqno_barrier(), a timing error that should be transient
|
||||
* and unlikely to reoccur.
|
||||
*/
|
||||
return atomic_read(&engine->irq_count) == b->hangcheck_interrupts;
|
||||
return READ_ONCE(b->irq_count) == b->hangcheck_interrupts;
|
||||
}
|
||||
|
||||
static void enable_fake_irq(struct intel_breadcrumbs *b)
|
||||
|
|
|
@ -345,7 +345,6 @@ struct intel_engine_cs {
|
|||
struct drm_i915_gem_object *default_state;
|
||||
void *pinned_default_state;
|
||||
|
||||
atomic_t irq_count;
|
||||
unsigned long irq_posted;
|
||||
#define ENGINE_IRQ_BREADCRUMB 0
|
||||
#define ENGINE_IRQ_EXECLIST 1
|
||||
|
@ -380,6 +379,7 @@ struct intel_engine_cs {
|
|||
|
||||
unsigned int hangcheck_interrupts;
|
||||
unsigned int irq_enabled;
|
||||
unsigned int irq_count;
|
||||
|
||||
bool irq_armed : 1;
|
||||
I915_SELFTEST_DECLARE(bool mock : 1);
|
||||
|
|
Loading…
Add table
Reference in a new issue