drm/nouveau: Simplify tile region handling.
Instead of emptying the caches to avoid a race with the PFIFO puller, go straight ahead and try to recover from it when it happens. Also, kill pfifo->cache_flush and tile->lock, we don't need them anymore. Signed-off-by: Francisco Jerez <currojerez@riseup.net> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
4b5c152a79
commit
9f56b1265d
6 changed files with 36 additions and 60 deletions
|
@ -296,7 +296,6 @@ struct nouveau_fifo_engine {
|
|||
void (*disable)(struct drm_device *);
|
||||
void (*enable)(struct drm_device *);
|
||||
bool (*reassign)(struct drm_device *, bool enable);
|
||||
bool (*cache_flush)(struct drm_device *dev);
|
||||
bool (*cache_pull)(struct drm_device *dev, bool enable);
|
||||
|
||||
int (*channel_id)(struct drm_device *);
|
||||
|
@ -569,10 +568,7 @@ struct drm_nouveau_private {
|
|||
} gart_info;
|
||||
|
||||
/* nv10-nv40 tiling regions */
|
||||
struct {
|
||||
struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
|
||||
spinlock_t lock;
|
||||
} tile;
|
||||
struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR];
|
||||
|
||||
/* VRAM/fb configuration */
|
||||
uint64_t vram_size;
|
||||
|
@ -917,7 +913,6 @@ extern int nv04_fifo_init(struct drm_device *);
|
|||
extern void nv04_fifo_disable(struct drm_device *);
|
||||
extern void nv04_fifo_enable(struct drm_device *);
|
||||
extern bool nv04_fifo_reassign(struct drm_device *, bool);
|
||||
extern bool nv04_fifo_cache_flush(struct drm_device *);
|
||||
extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
|
||||
extern int nv04_fifo_channel_id(struct drm_device *);
|
||||
extern int nv04_fifo_create_context(struct nouveau_channel *);
|
||||
|
@ -955,7 +950,6 @@ extern void nvc0_fifo_takedown(struct drm_device *);
|
|||
extern void nvc0_fifo_disable(struct drm_device *);
|
||||
extern void nvc0_fifo_enable(struct drm_device *);
|
||||
extern bool nvc0_fifo_reassign(struct drm_device *, bool);
|
||||
extern bool nvc0_fifo_cache_flush(struct drm_device *);
|
||||
extern bool nvc0_fifo_cache_pull(struct drm_device *, bool);
|
||||
extern int nvc0_fifo_channel_id(struct drm_device *);
|
||||
extern int nvc0_fifo_create_context(struct nouveau_channel *);
|
||||
|
|
|
@ -47,18 +47,14 @@ nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
|
|||
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
|
||||
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile[i];
|
||||
|
||||
tile->addr = addr;
|
||||
tile->size = size;
|
||||
tile->used = !!pitch;
|
||||
nouveau_fence_unref((void **)&tile->fence);
|
||||
|
||||
if (!pfifo->cache_flush(dev))
|
||||
return;
|
||||
|
||||
pfifo->reassign(dev, false);
|
||||
pfifo->cache_flush(dev);
|
||||
pfifo->cache_pull(dev, false);
|
||||
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
@ -76,34 +72,36 @@ nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
|
|||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
|
||||
struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
|
||||
int i;
|
||||
struct nouveau_tile_reg *found = NULL;
|
||||
unsigned long i, flags;
|
||||
|
||||
spin_lock(&dev_priv->tile.lock);
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
|
||||
for (i = 0; i < pfb->num_tiles; i++) {
|
||||
if (tile[i].used)
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile[i];
|
||||
|
||||
if (tile->used)
|
||||
/* Tile region in use. */
|
||||
continue;
|
||||
|
||||
if (tile[i].fence &&
|
||||
!nouveau_fence_signalled(tile[i].fence, NULL))
|
||||
if (tile->fence &&
|
||||
!nouveau_fence_signalled(tile->fence, NULL))
|
||||
/* Pending tile region. */
|
||||
continue;
|
||||
|
||||
if (max(tile[i].addr, addr) <
|
||||
min(tile[i].addr + tile[i].size, addr + size))
|
||||
if (max(tile->addr, addr) <
|
||||
min(tile->addr + tile->size, addr + size))
|
||||
/* Kill an intersecting tile region. */
|
||||
nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
|
||||
|
||||
if (pitch && !found) {
|
||||
/* Free tile region. */
|
||||
nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
|
||||
found = &tile[i];
|
||||
found = tile;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&dev_priv->tile.lock);
|
||||
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
|
||||
|
||||
return found;
|
||||
}
|
||||
|
@ -568,8 +566,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
spin_lock_init(&dev_priv->tile.lock);
|
||||
|
||||
dev_priv->fb_available_size = dev_priv->vram_size;
|
||||
dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
|
||||
if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
|
||||
|
|
|
@ -551,6 +551,8 @@
|
|||
#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
|
||||
#define NV03_PFIFO_CACHE1_PULL0 0x00003240
|
||||
#define NV04_PFIFO_CACHE1_PULL0 0x00003250
|
||||
# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
|
||||
# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
|
||||
#define NV03_PFIFO_CACHE1_PULL1 0x00003250
|
||||
#define NV04_PFIFO_CACHE1_PULL1 0x00003254
|
||||
#define NV04_PFIFO_CACHE1_HASH 0x00003258
|
||||
|
|
|
@ -79,7 +79,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->fifo.disable = nv04_fifo_disable;
|
||||
engine->fifo.enable = nv04_fifo_enable;
|
||||
engine->fifo.reassign = nv04_fifo_reassign;
|
||||
engine->fifo.cache_flush = nv04_fifo_cache_flush;
|
||||
engine->fifo.cache_pull = nv04_fifo_cache_pull;
|
||||
engine->fifo.channel_id = nv04_fifo_channel_id;
|
||||
engine->fifo.create_context = nv04_fifo_create_context;
|
||||
|
@ -131,7 +130,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->fifo.disable = nv04_fifo_disable;
|
||||
engine->fifo.enable = nv04_fifo_enable;
|
||||
engine->fifo.reassign = nv04_fifo_reassign;
|
||||
engine->fifo.cache_flush = nv04_fifo_cache_flush;
|
||||
engine->fifo.cache_pull = nv04_fifo_cache_pull;
|
||||
engine->fifo.channel_id = nv10_fifo_channel_id;
|
||||
engine->fifo.create_context = nv10_fifo_create_context;
|
||||
|
@ -183,7 +181,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->fifo.disable = nv04_fifo_disable;
|
||||
engine->fifo.enable = nv04_fifo_enable;
|
||||
engine->fifo.reassign = nv04_fifo_reassign;
|
||||
engine->fifo.cache_flush = nv04_fifo_cache_flush;
|
||||
engine->fifo.cache_pull = nv04_fifo_cache_pull;
|
||||
engine->fifo.channel_id = nv10_fifo_channel_id;
|
||||
engine->fifo.create_context = nv10_fifo_create_context;
|
||||
|
@ -235,7 +232,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->fifo.disable = nv04_fifo_disable;
|
||||
engine->fifo.enable = nv04_fifo_enable;
|
||||
engine->fifo.reassign = nv04_fifo_reassign;
|
||||
engine->fifo.cache_flush = nv04_fifo_cache_flush;
|
||||
engine->fifo.cache_pull = nv04_fifo_cache_pull;
|
||||
engine->fifo.channel_id = nv10_fifo_channel_id;
|
||||
engine->fifo.create_context = nv10_fifo_create_context;
|
||||
|
@ -288,7 +284,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->fifo.disable = nv04_fifo_disable;
|
||||
engine->fifo.enable = nv04_fifo_enable;
|
||||
engine->fifo.reassign = nv04_fifo_reassign;
|
||||
engine->fifo.cache_flush = nv04_fifo_cache_flush;
|
||||
engine->fifo.cache_pull = nv04_fifo_cache_pull;
|
||||
engine->fifo.channel_id = nv10_fifo_channel_id;
|
||||
engine->fifo.create_context = nv40_fifo_create_context;
|
||||
|
|
|
@ -70,38 +70,33 @@ nv04_fifo_reassign(struct drm_device *dev, bool enable)
|
|||
return (reassign == 1);
|
||||
}
|
||||
|
||||
bool
|
||||
nv04_fifo_cache_flush(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
|
||||
uint64_t start = ptimer->read(dev);
|
||||
|
||||
do {
|
||||
if (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) ==
|
||||
nv_rd32(dev, NV03_PFIFO_CACHE1_PUT))
|
||||
return true;
|
||||
|
||||
} while (ptimer->read(dev) - start < 100000000);
|
||||
|
||||
NV_ERROR(dev, "Timeout flushing the PFIFO cache.\n");
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
|
||||
{
|
||||
uint32_t pull = nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0);
|
||||
int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable);
|
||||
|
||||
if (!enable) {
|
||||
/* In some cases the PFIFO puller may be left in an
|
||||
* inconsistent state if you try to stop it when it's
|
||||
* busy translating handles. Sometimes you get a
|
||||
* PFIFO_CACHE_ERROR, sometimes it just fails silently
|
||||
* sending incorrect instance offsets to PGRAPH after
|
||||
* it's started up again. To avoid the latter we
|
||||
* invalidate the most recently calculated instance.
|
||||
*/
|
||||
if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
|
||||
NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
|
||||
NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
|
||||
|
||||
if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
|
||||
NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
|
||||
nv_wr32(dev, NV03_PFIFO_INTR_0,
|
||||
NV_PFIFO_INTR_CACHE_ERROR);
|
||||
|
||||
if (enable) {
|
||||
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull | 1);
|
||||
} else {
|
||||
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull & ~1);
|
||||
nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
|
||||
}
|
||||
|
||||
return !!(pull & 1);
|
||||
return pull & 1;
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -42,12 +42,6 @@ nvc0_fifo_reassign(struct drm_device *dev, bool enable)
|
|||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
nvc0_fifo_cache_flush(struct drm_device *dev)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
nvc0_fifo_cache_pull(struct drm_device *dev, bool enable)
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue