drm/i915: paper over missed irq issues with force wake voodoo
Two things seem to do the trick on my ivb machine here: - prevent the gt from powering down while waiting for seqno notification interrupts by grabbing the force_wake in get_irq (and dropping it in put_irq again). - ordering writes from the ring's CS by reading a CS register, ACTHD seems to work. Only the blt&bsd ring on ivb seem to be massively affected by this, but for paranoia do this dance also on the render ring and on snb (i.e. all gpus with forcewake). Tested with Eric's glCopyPixels loop which without this patch scores a missed irq every few seconds. This patch needs my forcewake rework to use a spinlock instead of dev->struct_mutex. After crawling through docs a lot I've found the following nugget: Internal doc "SNB GT PM Programming Guide", Section 4.3.1: "GT does not generate interrupts while in RC6 (by design)" So it looks like rc6 and irq generation are indeed related. v2: Improve the comment per Eugeni Dodonov's suggestion. v3: Add the documentation snipped. Also restrict the w/a to ivb only for -fixes, as suggested by Keith Packard. Cc: stable@kernel.org Cc: Eric Anholt <eric@anholt.net> Cc: Kenneth Graunke <kenneth@whitecape.org> Cc: Eugeni Dodonov <eugeni.dodonov@intel.com> Tested-by: Eugeni Dodonov <eugeni.dodonov@intel.com> Reviewed-by: Eugeni Dodonov <eugeni.dodonov@intel.com> Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Keith Packard <keithp@keithp.com>
This commit is contained in:
parent
c937504e2b
commit
4cd53c0c8b
1 changed files with 25 additions and 2 deletions
|
@ -635,6 +635,19 @@ render_ring_add_request(struct intel_ring_buffer *ring,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u32
|
||||||
|
gen6_ring_get_seqno(struct intel_ring_buffer *ring)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = ring->dev;
|
||||||
|
|
||||||
|
/* Workaround to force correct ordering between irq and seqno writes on
|
||||||
|
* ivb (and maybe also on snb) by reading from a CS register (like
|
||||||
|
* ACTHD) before reading the status page. */
|
||||||
|
if (IS_GEN7(dev))
|
||||||
|
intel_ring_get_active_head(ring);
|
||||||
|
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
|
||||||
|
}
|
||||||
|
|
||||||
static u32
|
static u32
|
||||||
ring_get_seqno(struct intel_ring_buffer *ring)
|
ring_get_seqno(struct intel_ring_buffer *ring)
|
||||||
{
|
{
|
||||||
|
@ -811,6 +824,12 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
|
||||||
if (!dev->irq_enabled)
|
if (!dev->irq_enabled)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
/* It looks like we need to prevent the gt from suspending while waiting
|
||||||
|
* for an notifiy irq, otherwise irqs seem to get lost on at least the
|
||||||
|
* blt/bsd rings on ivb. */
|
||||||
|
if (IS_GEN7(dev))
|
||||||
|
gen6_gt_force_wake_get(dev_priv);
|
||||||
|
|
||||||
spin_lock(&ring->irq_lock);
|
spin_lock(&ring->irq_lock);
|
||||||
if (ring->irq_refcount++ == 0) {
|
if (ring->irq_refcount++ == 0) {
|
||||||
ring->irq_mask &= ~rflag;
|
ring->irq_mask &= ~rflag;
|
||||||
|
@ -835,6 +854,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
|
||||||
ironlake_disable_irq(dev_priv, gflag);
|
ironlake_disable_irq(dev_priv, gflag);
|
||||||
}
|
}
|
||||||
spin_unlock(&ring->irq_lock);
|
spin_unlock(&ring->irq_lock);
|
||||||
|
|
||||||
|
if (IS_GEN7(dev))
|
||||||
|
gen6_gt_force_wake_put(dev_priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
|
@ -1341,7 +1363,7 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
|
||||||
.write_tail = gen6_bsd_ring_write_tail,
|
.write_tail = gen6_bsd_ring_write_tail,
|
||||||
.flush = gen6_ring_flush,
|
.flush = gen6_ring_flush,
|
||||||
.add_request = gen6_add_request,
|
.add_request = gen6_add_request,
|
||||||
.get_seqno = ring_get_seqno,
|
.get_seqno = gen6_ring_get_seqno,
|
||||||
.irq_get = gen6_bsd_ring_get_irq,
|
.irq_get = gen6_bsd_ring_get_irq,
|
||||||
.irq_put = gen6_bsd_ring_put_irq,
|
.irq_put = gen6_bsd_ring_put_irq,
|
||||||
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
|
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
|
||||||
|
@ -1476,7 +1498,7 @@ static const struct intel_ring_buffer gen6_blt_ring = {
|
||||||
.write_tail = ring_write_tail,
|
.write_tail = ring_write_tail,
|
||||||
.flush = blt_ring_flush,
|
.flush = blt_ring_flush,
|
||||||
.add_request = gen6_add_request,
|
.add_request = gen6_add_request,
|
||||||
.get_seqno = ring_get_seqno,
|
.get_seqno = gen6_ring_get_seqno,
|
||||||
.irq_get = blt_ring_get_irq,
|
.irq_get = blt_ring_get_irq,
|
||||||
.irq_put = blt_ring_put_irq,
|
.irq_put = blt_ring_put_irq,
|
||||||
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
|
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
|
||||||
|
@ -1499,6 +1521,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
||||||
ring->flush = gen6_render_ring_flush;
|
ring->flush = gen6_render_ring_flush;
|
||||||
ring->irq_get = gen6_render_ring_get_irq;
|
ring->irq_get = gen6_render_ring_get_irq;
|
||||||
ring->irq_put = gen6_render_ring_put_irq;
|
ring->irq_put = gen6_render_ring_put_irq;
|
||||||
|
ring->get_seqno = gen6_ring_get_seqno;
|
||||||
} else if (IS_GEN5(dev)) {
|
} else if (IS_GEN5(dev)) {
|
||||||
ring->add_request = pc_render_add_request;
|
ring->add_request = pc_render_add_request;
|
||||||
ring->get_seqno = pc_render_get_seqno;
|
ring->get_seqno = pc_render_get_seqno;
|
||||||
|
|
Loading…
Reference in a new issue