drm/nouveau: Rework tile region handling.
The point is to share more code between the PFB/PGRAPH tile region hooks, and give the hardware specific functions a chance to allocate per-region resources. Signed-off-by: Francisco Jerez <currojerez@riseup.net> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
e419cf0954
commit
a5cf68b04b
10 changed files with 219 additions and 158 deletions
|
@ -46,9 +46,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
|
|||
if (unlikely(nvbo->gem))
|
||||
DRM_ERROR("bo %p still attached to GEM object\n", bo);
|
||||
|
||||
if (nvbo->tile)
|
||||
nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
|
||||
|
||||
nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
|
||||
kfree(nvbo);
|
||||
}
|
||||
|
||||
|
@ -792,7 +790,8 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
|
|||
|
||||
} else if (dev_priv->card_type >= NV_10) {
|
||||
*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
|
||||
nvbo->tile_mode);
|
||||
nvbo->tile_mode,
|
||||
nvbo->tile_flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -808,9 +807,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
|
|||
|
||||
if (dev_priv->card_type >= NV_10 &&
|
||||
dev_priv->card_type < NV_50) {
|
||||
if (*old_tile)
|
||||
nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
|
||||
|
||||
nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
|
||||
*old_tile = new_tile;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,10 +66,11 @@ struct nouveau_grctx;
|
|||
#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
|
||||
|
||||
struct nouveau_tile_reg {
|
||||
struct nouveau_fence *fence;
|
||||
uint32_t addr;
|
||||
uint32_t size;
|
||||
bool used;
|
||||
uint32_t addr;
|
||||
uint32_t limit;
|
||||
uint32_t pitch;
|
||||
struct nouveau_fence *fence;
|
||||
};
|
||||
|
||||
struct nouveau_bo {
|
||||
|
@ -309,8 +310,11 @@ struct nouveau_fb_engine {
|
|||
int (*init)(struct drm_device *dev);
|
||||
void (*takedown)(struct drm_device *dev);
|
||||
|
||||
void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch);
|
||||
void (*init_tile_region)(struct drm_device *dev, int i,
|
||||
uint32_t addr, uint32_t size,
|
||||
uint32_t pitch, uint32_t flags);
|
||||
void (*set_tile_region)(struct drm_device *dev, int i);
|
||||
void (*free_tile_region)(struct drm_device *dev, int i);
|
||||
};
|
||||
|
||||
struct nouveau_fifo_engine {
|
||||
|
@ -356,8 +360,7 @@ struct nouveau_pgraph_engine {
|
|||
int (*unload_context)(struct drm_device *);
|
||||
void (*tlb_flush)(struct drm_device *dev);
|
||||
|
||||
void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch);
|
||||
void (*set_tile_region)(struct drm_device *dev, int i);
|
||||
};
|
||||
|
||||
struct nouveau_display_engine {
|
||||
|
@ -668,7 +671,10 @@ struct drm_nouveau_private {
|
|||
} gart_info;
|
||||
|
||||
/* nv10-nv40 tiling regions */
|
||||
struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR];
|
||||
struct {
|
||||
struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
|
||||
spinlock_t lock;
|
||||
} tile;
|
||||
|
||||
/* VRAM/fb configuration */
|
||||
uint64_t vram_size;
|
||||
|
@ -798,13 +804,12 @@ extern void nouveau_mem_gart_fini(struct drm_device *);
|
|||
extern int nouveau_mem_init_agp(struct drm_device *);
|
||||
extern int nouveau_mem_reset_agp(struct drm_device *);
|
||||
extern void nouveau_mem_close(struct drm_device *);
|
||||
extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
|
||||
uint32_t addr,
|
||||
uint32_t size,
|
||||
uint32_t pitch);
|
||||
extern void nv10_mem_expire_tiling(struct drm_device *dev,
|
||||
struct nouveau_tile_reg *tile,
|
||||
struct nouveau_fence *fence);
|
||||
extern struct nouveau_tile_reg *nv10_mem_set_tiling(
|
||||
struct drm_device *dev, uint32_t addr, uint32_t size,
|
||||
uint32_t pitch, uint32_t flags);
|
||||
extern void nv10_mem_put_tile_region(struct drm_device *dev,
|
||||
struct nouveau_tile_reg *tile,
|
||||
struct nouveau_fence *fence);
|
||||
extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
|
||||
uint32_t size, uint32_t flags,
|
||||
uint64_t phys);
|
||||
|
@ -1011,18 +1016,25 @@ extern void nv04_fb_takedown(struct drm_device *);
|
|||
/* nv10_fb.c */
|
||||
extern int nv10_fb_init(struct drm_device *);
|
||||
extern void nv10_fb_takedown(struct drm_device *);
|
||||
extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
|
||||
uint32_t, uint32_t);
|
||||
extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
|
||||
uint32_t addr, uint32_t size,
|
||||
uint32_t pitch, uint32_t flags);
|
||||
extern void nv10_fb_set_tile_region(struct drm_device *dev, int i);
|
||||
extern void nv10_fb_free_tile_region(struct drm_device *dev, int i);
|
||||
|
||||
/* nv30_fb.c */
|
||||
extern int nv30_fb_init(struct drm_device *);
|
||||
extern void nv30_fb_takedown(struct drm_device *);
|
||||
extern void nv30_fb_init_tile_region(struct drm_device *dev, int i,
|
||||
uint32_t addr, uint32_t size,
|
||||
uint32_t pitch, uint32_t flags);
|
||||
extern void nv30_fb_free_tile_region(struct drm_device *dev, int i);
|
||||
|
||||
/* nv40_fb.c */
|
||||
extern int nv40_fb_init(struct drm_device *);
|
||||
extern void nv40_fb_takedown(struct drm_device *);
|
||||
extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
|
||||
uint32_t, uint32_t);
|
||||
extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
|
||||
|
||||
/* nv50_fb.c */
|
||||
extern int nv50_fb_init(struct drm_device *);
|
||||
extern void nv50_fb_takedown(struct drm_device *);
|
||||
|
@ -1102,8 +1114,7 @@ extern void nv10_graph_destroy_context(struct nouveau_channel *);
|
|||
extern int nv10_graph_load_context(struct nouveau_channel *);
|
||||
extern int nv10_graph_unload_context(struct drm_device *);
|
||||
extern void nv10_graph_context_switch(struct drm_device *);
|
||||
extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t,
|
||||
uint32_t, uint32_t);
|
||||
extern void nv10_graph_set_tile_region(struct drm_device *dev, int i);
|
||||
|
||||
/* nv20_graph.c */
|
||||
extern int nv20_graph_create_context(struct nouveau_channel *);
|
||||
|
@ -1113,8 +1124,7 @@ extern int nv20_graph_unload_context(struct drm_device *);
|
|||
extern int nv20_graph_init(struct drm_device *);
|
||||
extern void nv20_graph_takedown(struct drm_device *);
|
||||
extern int nv30_graph_init(struct drm_device *);
|
||||
extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t,
|
||||
uint32_t, uint32_t);
|
||||
extern void nv20_graph_set_tile_region(struct drm_device *dev, int i);
|
||||
|
||||
/* nv40_graph.c */
|
||||
extern int nv40_graph_init(struct drm_device *);
|
||||
|
@ -1125,8 +1135,7 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *);
|
|||
extern int nv40_graph_load_context(struct nouveau_channel *);
|
||||
extern int nv40_graph_unload_context(struct drm_device *);
|
||||
extern void nv40_grctx_init(struct nouveau_grctx *);
|
||||
extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t,
|
||||
uint32_t, uint32_t);
|
||||
extern void nv40_graph_set_tile_region(struct drm_device *dev, int i);
|
||||
|
||||
/* nv50_graph.c */
|
||||
extern int nv50_graph_init(struct drm_device *);
|
||||
|
|
|
@ -42,85 +42,106 @@
|
|||
*/
|
||||
|
||||
static void
|
||||
nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch)
|
||||
nv10_mem_update_tile_region(struct drm_device *dev,
|
||||
struct nouveau_tile_reg *tile, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch, uint32_t flags)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
|
||||
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile[i];
|
||||
int i = tile - dev_priv->tile.reg;
|
||||
unsigned long save;
|
||||
|
||||
tile->addr = addr;
|
||||
tile->size = size;
|
||||
tile->used = !!pitch;
|
||||
nouveau_fence_unref(&tile->fence);
|
||||
|
||||
if (tile->pitch)
|
||||
pfb->free_tile_region(dev, i);
|
||||
|
||||
if (pitch)
|
||||
pfb->init_tile_region(dev, i, addr, size, pitch, flags);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, save);
|
||||
pfifo->reassign(dev, false);
|
||||
pfifo->cache_pull(dev, false);
|
||||
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
||||
pgraph->set_region_tiling(dev, i, addr, size, pitch);
|
||||
pfb->set_region_tiling(dev, i, addr, size, pitch);
|
||||
pfb->set_tile_region(dev, i);
|
||||
pgraph->set_tile_region(dev, i);
|
||||
|
||||
pfifo->cache_pull(dev, true);
|
||||
pfifo->reassign(dev, true);
|
||||
spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
|
||||
}
|
||||
|
||||
static struct nouveau_tile_reg *
|
||||
nv10_mem_get_tile_region(struct drm_device *dev, int i)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
spin_lock(&dev_priv->tile.lock);
|
||||
|
||||
if (!tile->used &&
|
||||
(!tile->fence || nouveau_fence_signalled(tile->fence)))
|
||||
tile->used = true;
|
||||
else
|
||||
tile = NULL;
|
||||
|
||||
spin_unlock(&dev_priv->tile.lock);
|
||||
return tile;
|
||||
}
|
||||
|
||||
void
|
||||
nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
|
||||
struct nouveau_fence *fence)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (tile) {
|
||||
spin_lock(&dev_priv->tile.lock);
|
||||
if (fence) {
|
||||
/* Mark it as pending. */
|
||||
tile->fence = fence;
|
||||
nouveau_fence_ref(fence);
|
||||
}
|
||||
|
||||
tile->used = false;
|
||||
spin_unlock(&dev_priv->tile.lock);
|
||||
}
|
||||
}
|
||||
|
||||
struct nouveau_tile_reg *
|
||||
nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
|
||||
uint32_t pitch)
|
||||
uint32_t pitch, uint32_t flags)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
|
||||
struct nouveau_tile_reg *found = NULL;
|
||||
unsigned long i, flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
|
||||
struct nouveau_tile_reg *tile, *found = NULL;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pfb->num_tiles; i++) {
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile[i];
|
||||
|
||||
if (tile->used)
|
||||
/* Tile region in use. */
|
||||
continue;
|
||||
|
||||
if (tile->fence &&
|
||||
!nouveau_fence_signalled(tile->fence))
|
||||
/* Pending tile region. */
|
||||
continue;
|
||||
|
||||
if (max(tile->addr, addr) <
|
||||
min(tile->addr + tile->size, addr + size))
|
||||
/* Kill an intersecting tile region. */
|
||||
nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
|
||||
tile = nv10_mem_get_tile_region(dev, i);
|
||||
|
||||
if (pitch && !found) {
|
||||
/* Free tile region. */
|
||||
nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
|
||||
found = tile;
|
||||
continue;
|
||||
|
||||
} else if (tile && tile->pitch) {
|
||||
/* Kill an unused tile region. */
|
||||
nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
nv10_mem_put_tile_region(dev, tile, NULL);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
|
||||
|
||||
if (found)
|
||||
nv10_mem_update_tile_region(dev, found, addr, size,
|
||||
pitch, flags);
|
||||
return found;
|
||||
}
|
||||
|
||||
void
|
||||
nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
|
||||
struct nouveau_fence *fence)
|
||||
{
|
||||
if (fence) {
|
||||
/* Mark it as pending. */
|
||||
tile->fence = fence;
|
||||
nouveau_fence_ref(fence);
|
||||
}
|
||||
|
||||
tile->used = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* NV50 VM helpers
|
||||
*/
|
||||
|
|
|
@ -118,7 +118,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->timer.takedown = nv04_timer_takedown;
|
||||
engine->fb.init = nv10_fb_init;
|
||||
engine->fb.takedown = nv10_fb_takedown;
|
||||
engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
|
||||
engine->fb.init_tile_region = nv10_fb_init_tile_region;
|
||||
engine->fb.set_tile_region = nv10_fb_set_tile_region;
|
||||
engine->fb.free_tile_region = nv10_fb_free_tile_region;
|
||||
engine->graph.init = nv10_graph_init;
|
||||
engine->graph.takedown = nv10_graph_takedown;
|
||||
engine->graph.channel = nv10_graph_channel;
|
||||
|
@ -127,7 +129,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->graph.fifo_access = nv04_graph_fifo_access;
|
||||
engine->graph.load_context = nv10_graph_load_context;
|
||||
engine->graph.unload_context = nv10_graph_unload_context;
|
||||
engine->graph.set_region_tiling = nv10_graph_set_region_tiling;
|
||||
engine->graph.set_tile_region = nv10_graph_set_tile_region;
|
||||
engine->fifo.channels = 32;
|
||||
engine->fifo.init = nv10_fifo_init;
|
||||
engine->fifo.takedown = nouveau_stub_takedown;
|
||||
|
@ -173,7 +175,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->timer.takedown = nv04_timer_takedown;
|
||||
engine->fb.init = nv10_fb_init;
|
||||
engine->fb.takedown = nv10_fb_takedown;
|
||||
engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
|
||||
engine->fb.init_tile_region = nv10_fb_init_tile_region;
|
||||
engine->fb.set_tile_region = nv10_fb_set_tile_region;
|
||||
engine->fb.free_tile_region = nv10_fb_free_tile_region;
|
||||
engine->graph.init = nv20_graph_init;
|
||||
engine->graph.takedown = nv20_graph_takedown;
|
||||
engine->graph.channel = nv10_graph_channel;
|
||||
|
@ -182,7 +186,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->graph.fifo_access = nv04_graph_fifo_access;
|
||||
engine->graph.load_context = nv20_graph_load_context;
|
||||
engine->graph.unload_context = nv20_graph_unload_context;
|
||||
engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
|
||||
engine->graph.set_tile_region = nv20_graph_set_tile_region;
|
||||
engine->fifo.channels = 32;
|
||||
engine->fifo.init = nv10_fifo_init;
|
||||
engine->fifo.takedown = nouveau_stub_takedown;
|
||||
|
@ -228,7 +232,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->timer.takedown = nv04_timer_takedown;
|
||||
engine->fb.init = nv30_fb_init;
|
||||
engine->fb.takedown = nv30_fb_takedown;
|
||||
engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
|
||||
engine->fb.init_tile_region = nv30_fb_init_tile_region;
|
||||
engine->fb.set_tile_region = nv10_fb_set_tile_region;
|
||||
engine->fb.free_tile_region = nv30_fb_free_tile_region;
|
||||
engine->graph.init = nv30_graph_init;
|
||||
engine->graph.takedown = nv20_graph_takedown;
|
||||
engine->graph.fifo_access = nv04_graph_fifo_access;
|
||||
|
@ -237,7 +243,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->graph.destroy_context = nv20_graph_destroy_context;
|
||||
engine->graph.load_context = nv20_graph_load_context;
|
||||
engine->graph.unload_context = nv20_graph_unload_context;
|
||||
engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
|
||||
engine->graph.set_tile_region = nv20_graph_set_tile_region;
|
||||
engine->fifo.channels = 32;
|
||||
engine->fifo.init = nv10_fifo_init;
|
||||
engine->fifo.takedown = nouveau_stub_takedown;
|
||||
|
@ -286,7 +292,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->timer.takedown = nv04_timer_takedown;
|
||||
engine->fb.init = nv40_fb_init;
|
||||
engine->fb.takedown = nv40_fb_takedown;
|
||||
engine->fb.set_region_tiling = nv40_fb_set_region_tiling;
|
||||
engine->fb.init_tile_region = nv30_fb_init_tile_region;
|
||||
engine->fb.set_tile_region = nv40_fb_set_tile_region;
|
||||
engine->fb.free_tile_region = nv30_fb_free_tile_region;
|
||||
engine->graph.init = nv40_graph_init;
|
||||
engine->graph.takedown = nv40_graph_takedown;
|
||||
engine->graph.fifo_access = nv04_graph_fifo_access;
|
||||
|
@ -295,7 +303,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->graph.destroy_context = nv40_graph_destroy_context;
|
||||
engine->graph.load_context = nv40_graph_load_context;
|
||||
engine->graph.unload_context = nv40_graph_unload_context;
|
||||
engine->graph.set_region_tiling = nv40_graph_set_region_tiling;
|
||||
engine->graph.set_tile_region = nv40_graph_set_tile_region;
|
||||
engine->fifo.channels = 32;
|
||||
engine->fifo.init = nv40_fifo_init;
|
||||
engine->fifo.takedown = nouveau_stub_takedown;
|
||||
|
@ -596,6 +604,7 @@ nouveau_card_init(struct drm_device *dev)
|
|||
goto out;
|
||||
engine = &dev_priv->engine;
|
||||
spin_lock_init(&dev_priv->channels.lock);
|
||||
spin_lock_init(&dev_priv->tile.lock);
|
||||
spin_lock_init(&dev_priv->context_switch_lock);
|
||||
|
||||
/* Make the CRTCs and I2C buses accessible */
|
||||
|
|
|
@ -4,22 +4,40 @@
|
|||
#include "nouveau_drm.h"
|
||||
|
||||
void
|
||||
nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch)
|
||||
nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch, uint32_t flags)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t limit = max(1u, addr + size) - 1;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
if (pitch) {
|
||||
if (dev_priv->card_type >= NV_20)
|
||||
addr |= 1;
|
||||
else
|
||||
addr |= 1 << 31;
|
||||
}
|
||||
tile->addr = addr;
|
||||
tile->limit = max(1u, addr + size) - 1;
|
||||
tile->pitch = pitch;
|
||||
|
||||
nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
|
||||
nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
|
||||
nv_wr32(dev, NV10_PFB_TILE(i), addr);
|
||||
if (dev_priv->card_type == NV_20)
|
||||
tile->addr |= 1;
|
||||
else
|
||||
tile->addr |= 1 << 31;
|
||||
}
|
||||
|
||||
void
|
||||
nv10_fb_free_tile_region(struct drm_device *dev, int i)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
tile->addr = tile->limit = tile->pitch = 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv10_fb_set_tile_region(struct drm_device *dev, int i)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
|
||||
nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
|
||||
nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -33,7 +51,7 @@ nv10_fb_init(struct drm_device *dev)
|
|||
|
||||
/* Turn all the tiling regions off. */
|
||||
for (i = 0; i < pfb->num_tiles; i++)
|
||||
pfb->set_region_tiling(dev, i, 0, 0, 0);
|
||||
pfb->set_tile_region(dev, i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -899,17 +899,14 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan)
|
|||
}
|
||||
|
||||
void
|
||||
nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch)
|
||||
nv10_graph_set_tile_region(struct drm_device *dev, int i)
|
||||
{
|
||||
uint32_t limit = max(1u, addr + size) - 1;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
if (pitch)
|
||||
addr |= 1 << 31;
|
||||
|
||||
nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit);
|
||||
nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch);
|
||||
nv_wr32(dev, NV10_PGRAPH_TILE(i), addr);
|
||||
nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
|
||||
nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
|
||||
nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
|
||||
}
|
||||
|
||||
int nv10_graph_init(struct drm_device *dev)
|
||||
|
@ -949,7 +946,7 @@ int nv10_graph_init(struct drm_device *dev)
|
|||
|
||||
/* Turn all the tiling regions off. */
|
||||
for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
|
||||
nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
|
||||
nv10_graph_set_tile_region(dev, i);
|
||||
|
||||
nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
|
||||
nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
|
||||
|
|
|
@ -511,24 +511,21 @@ nv20_graph_rdi(struct drm_device *dev)
|
|||
}
|
||||
|
||||
void
|
||||
nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch)
|
||||
nv20_graph_set_tile_region(struct drm_device *dev, int i)
|
||||
{
|
||||
uint32_t limit = max(1u, addr + size) - 1;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
if (pitch)
|
||||
addr |= 1;
|
||||
|
||||
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
|
||||
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
|
||||
nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
|
||||
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
|
||||
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
|
||||
nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
|
||||
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr);
|
||||
nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -612,7 +609,7 @@ nv20_graph_init(struct drm_device *dev)
|
|||
|
||||
/* Turn all the tiling regions off. */
|
||||
for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
|
||||
nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
|
||||
nv20_graph_set_tile_region(dev, i);
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
|
||||
|
@ -751,7 +748,7 @@ nv30_graph_init(struct drm_device *dev)
|
|||
|
||||
/* Turn all the tiling regions off. */
|
||||
for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
|
||||
nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
|
||||
nv20_graph_set_tile_region(dev, i);
|
||||
|
||||
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
|
||||
nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
|
||||
|
|
|
@ -29,6 +29,27 @@
|
|||
#include "nouveau_drv.h"
|
||||
#include "nouveau_drm.h"
|
||||
|
||||
void
|
||||
nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch, uint32_t flags)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
tile->addr = addr | 1;
|
||||
tile->limit = max(1u, addr + size) - 1;
|
||||
tile->pitch = pitch;
|
||||
}
|
||||
|
||||
void
|
||||
nv30_fb_free_tile_region(struct drm_device *dev, int i)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
tile->addr = tile->limit = tile->pitch = 0;
|
||||
}
|
||||
|
||||
static int
|
||||
calc_bias(struct drm_device *dev, int k, int i, int j)
|
||||
{
|
||||
|
@ -65,7 +86,7 @@ nv30_fb_init(struct drm_device *dev)
|
|||
|
||||
/* Turn all the tiling regions off. */
|
||||
for (i = 0; i < pfb->num_tiles; i++)
|
||||
pfb->set_region_tiling(dev, i, 0, 0, 0);
|
||||
pfb->set_tile_region(dev, i);
|
||||
|
||||
/* Init the memory timing regs at 0x10037c/0x1003ac */
|
||||
if (dev_priv->chipset == 0x30 ||
|
||||
|
|
|
@ -4,26 +4,22 @@
|
|||
#include "nouveau_drm.h"
|
||||
|
||||
void
|
||||
nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch)
|
||||
nv40_fb_set_tile_region(struct drm_device *dev, int i)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t limit = max(1u, addr + size) - 1;
|
||||
|
||||
if (pitch)
|
||||
addr |= 1;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x40:
|
||||
nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
|
||||
nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
|
||||
nv_wr32(dev, NV10_PFB_TILE(i), addr);
|
||||
nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
|
||||
nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
|
||||
nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
|
||||
break;
|
||||
|
||||
default:
|
||||
nv_wr32(dev, NV40_PFB_TLIMIT(i), limit);
|
||||
nv_wr32(dev, NV40_PFB_TSIZE(i), pitch);
|
||||
nv_wr32(dev, NV40_PFB_TILE(i), addr);
|
||||
nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit);
|
||||
nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch);
|
||||
nv_wr32(dev, NV40_PFB_TILE(i), tile->addr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -64,7 +60,7 @@ nv40_fb_init(struct drm_device *dev)
|
|||
|
||||
/* Turn all the tiling regions off. */
|
||||
for (i = 0; i < pfb->num_tiles; i++)
|
||||
pfb->set_region_tiling(dev, i, 0, 0, 0);
|
||||
pfb->set_tile_region(dev, i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -192,43 +192,39 @@ nv40_graph_unload_context(struct drm_device *dev)
|
|||
}
|
||||
|
||||
void
|
||||
nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch)
|
||||
nv40_graph_set_tile_region(struct drm_device *dev, int i)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
uint32_t limit = max(1u, addr + size) - 1;
|
||||
|
||||
if (pitch)
|
||||
addr |= 1;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x44:
|
||||
case 0x4a:
|
||||
case 0x4e:
|
||||
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
|
||||
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
|
||||
nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
|
||||
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
|
||||
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
|
||||
nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
|
||||
break;
|
||||
|
||||
case 0x46:
|
||||
case 0x47:
|
||||
case 0x49:
|
||||
case 0x4b:
|
||||
nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch);
|
||||
nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit);
|
||||
nv_wr32(dev, NV47_PGRAPH_TILE(i), addr);
|
||||
nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
|
||||
nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
|
||||
nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
|
||||
nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
|
||||
nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
|
||||
nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
|
||||
nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
|
||||
nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
|
||||
nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
|
||||
break;
|
||||
|
||||
default:
|
||||
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
|
||||
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
|
||||
nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
|
||||
nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
|
||||
nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
|
||||
nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
|
||||
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
|
||||
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
|
||||
nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
|
||||
nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
|
||||
nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
|
||||
nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -369,7 +365,7 @@ nv40_graph_init(struct drm_device *dev)
|
|||
|
||||
/* Turn all the tiling regions off. */
|
||||
for (i = 0; i < pfb->num_tiles; i++)
|
||||
nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
|
||||
nv40_graph_set_tile_region(dev, i);
|
||||
|
||||
/* begin RAM config */
|
||||
vramsz = pci_resource_len(dev->pdev, 0) - 1;
|
||||
|
|
Loading…
Reference in a new issue