drm/i915/ringbuffer: Make IRQ refcnting atomic
In order to enforce the correct memory barriers for irq get/put, we need to perform the actual counting using atomic operations. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
8d5203ca62
commit
b13c2b96bf
4 changed files with 56 additions and 46 deletions
|
@ -2000,17 +2000,19 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
|
|||
trace_i915_gem_request_wait_begin(dev, seqno);
|
||||
|
||||
ring->waiting_seqno = seqno;
|
||||
ring->irq_get(ring);
|
||||
if (interruptible)
|
||||
ret = wait_event_interruptible(ring->irq_queue,
|
||||
i915_seqno_passed(ring->get_seqno(ring), seqno)
|
||||
|| atomic_read(&dev_priv->mm.wedged));
|
||||
else
|
||||
wait_event(ring->irq_queue,
|
||||
i915_seqno_passed(ring->get_seqno(ring), seqno)
|
||||
|| atomic_read(&dev_priv->mm.wedged));
|
||||
ret = -ENODEV;
|
||||
if (ring->irq_get(ring)) {
|
||||
if (interruptible)
|
||||
ret = wait_event_interruptible(ring->irq_queue,
|
||||
i915_seqno_passed(ring->get_seqno(ring), seqno)
|
||||
|| atomic_read(&dev_priv->mm.wedged));
|
||||
else
|
||||
wait_event(ring->irq_queue,
|
||||
i915_seqno_passed(ring->get_seqno(ring), seqno)
|
||||
|| atomic_read(&dev_priv->mm.wedged));
|
||||
|
||||
ring->irq_put(ring);
|
||||
ring->irq_put(ring);
|
||||
}
|
||||
ring->waiting_seqno = 0;
|
||||
|
||||
trace_i915_gem_request_wait_end(dev, seqno);
|
||||
|
@ -3157,14 +3159,15 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
|
|||
* generation is designed to be run atomically and so is
|
||||
* lockless.
|
||||
*/
|
||||
ring->irq_get(ring);
|
||||
ret = wait_event_interruptible(ring->irq_queue,
|
||||
i915_seqno_passed(ring->get_seqno(ring), seqno)
|
||||
|| atomic_read(&dev_priv->mm.wedged));
|
||||
ring->irq_put(ring);
|
||||
if (ring->irq_get(ring)) {
|
||||
ret = wait_event_interruptible(ring->irq_queue,
|
||||
i915_seqno_passed(ring->get_seqno(ring), seqno)
|
||||
|| atomic_read(&dev_priv->mm.wedged));
|
||||
ring->irq_put(ring);
|
||||
|
||||
if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
|
||||
ret = -EIO;
|
||||
if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
|
||||
ret = -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == 0)
|
||||
|
|
|
@ -1186,10 +1186,9 @@ void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
|
|||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
||||
|
||||
if (dev_priv->trace_irq_seqno == 0)
|
||||
ring->irq_get(ring);
|
||||
|
||||
dev_priv->trace_irq_seqno = seqno;
|
||||
if (dev_priv->trace_irq_seqno == 0 &&
|
||||
ring->irq_get(ring))
|
||||
dev_priv->trace_irq_seqno = seqno;
|
||||
}
|
||||
|
||||
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
|
||||
|
@ -1211,10 +1210,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
|
|||
if (master_priv->sarea_priv)
|
||||
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
|
||||
|
||||
ring->irq_get(ring);
|
||||
DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
|
||||
READ_BREADCRUMB(dev_priv) >= irq_nr);
|
||||
ring->irq_put(ring);
|
||||
ret = -ENODEV;
|
||||
if (ring->irq_get(ring)) {
|
||||
DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
|
||||
READ_BREADCRUMB(dev_priv) >= irq_nr);
|
||||
ring->irq_put(ring);
|
||||
}
|
||||
|
||||
if (ret == -EBUSY) {
|
||||
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
|
||||
|
|
|
@ -327,25 +327,28 @@ ring_get_seqno(struct intel_ring_buffer *ring)
|
|||
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
|
||||
}
|
||||
|
||||
static void
|
||||
static bool
|
||||
render_ring_get_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
|
||||
if (dev->irq_enabled && ++ring->irq_refcount == 1) {
|
||||
if (!dev->irq_enabled)
|
||||
return false;
|
||||
|
||||
if (atomic_inc_return(&ring->irq_refcount) == 1) {
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
ironlake_enable_graphics_irq(dev_priv,
|
||||
GT_USER_INTERRUPT);
|
||||
else
|
||||
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -353,8 +356,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring)
|
|||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
|
||||
BUG_ON(dev->irq_enabled && ring->irq_refcount == 0);
|
||||
if (dev->irq_enabled && --ring->irq_refcount == 0) {
|
||||
if (atomic_dec_and_test(&ring->irq_refcount)) {
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
|
@ -417,12 +419,15 @@ ring_add_request(struct intel_ring_buffer *ring,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
static bool
|
||||
ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
|
||||
if (dev->irq_enabled && ++ring->irq_refcount == 1) {
|
||||
if (!dev->irq_enabled)
|
||||
return false;
|
||||
|
||||
if (atomic_inc_return(&ring->irq_refcount) == 1) {
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
|
@ -430,6 +435,8 @@ ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
|
|||
ironlake_enable_graphics_irq(dev_priv, flag);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -437,7 +444,7 @@ ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
|
|||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
|
||||
if (dev->irq_enabled && --ring->irq_refcount == 0) {
|
||||
if (atomic_dec_and_test(&ring->irq_refcount)) {
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
|
||||
|
@ -447,16 +454,15 @@ ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
static bool
|
||||
bsd_ring_get_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
|
||||
return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
|
||||
}
|
||||
static void
|
||||
bsd_ring_put_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
|
||||
ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -846,16 +852,16 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
static bool
|
||||
gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
|
||||
return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
static void
|
||||
gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
|
||||
ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
/* ring buffer for Video Codec for Gen6+ */
|
||||
|
@ -876,16 +882,16 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
|
|||
|
||||
/* Blitter support (SandyBridge+) */
|
||||
|
||||
static void
|
||||
static bool
|
||||
blt_ring_get_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
|
||||
return ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
static void
|
||||
blt_ring_put_irq(struct intel_ring_buffer *ring)
|
||||
{
|
||||
ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
|
||||
ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -54,8 +54,8 @@ struct intel_ring_buffer {
|
|||
u32 irq_seqno; /* last seq seem at irq time */
|
||||
u32 waiting_seqno;
|
||||
u32 sync_seqno[I915_NUM_RINGS-1];
|
||||
u32 irq_refcount;
|
||||
void (*irq_get)(struct intel_ring_buffer *ring);
|
||||
atomic_t irq_refcount;
|
||||
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
|
||||
void (*irq_put)(struct intel_ring_buffer *ring);
|
||||
|
||||
int (*init)(struct intel_ring_buffer *ring);
|
||||
|
|
Loading…
Reference in a new issue