drm/nouveau: rework gpu-specific instmem interfaces
Reviewed-by: Francisco Jerez <currojerez@riseup.net> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
dc1e5c0dbf
commit
e41115d0ad
6 changed files with 284 additions and 257 deletions
|
@ -146,15 +146,16 @@ enum nouveau_flags {
|
||||||
|
|
||||||
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
|
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
|
||||||
#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
|
#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
|
||||||
|
|
||||||
|
#define NVOBJ_CINST_GLOBAL 0xdeadbeef
|
||||||
|
|
||||||
struct nouveau_gpuobj {
|
struct nouveau_gpuobj {
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
struct kref refcount;
|
struct kref refcount;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
|
|
||||||
struct drm_mm_node *im_pramin;
|
void *node;
|
||||||
struct nouveau_bo *im_backing;
|
|
||||||
u32 *suspend;
|
u32 *suspend;
|
||||||
int im_bound;
|
|
||||||
|
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
|
|
||||||
|
@ -288,11 +289,11 @@ struct nouveau_instmem_engine {
|
||||||
int (*suspend)(struct drm_device *dev);
|
int (*suspend)(struct drm_device *dev);
|
||||||
void (*resume)(struct drm_device *dev);
|
void (*resume)(struct drm_device *dev);
|
||||||
|
|
||||||
int (*populate)(struct drm_device *, struct nouveau_gpuobj *,
|
int (*get)(struct nouveau_gpuobj *, u32 size, u32 align);
|
||||||
u32 *size, u32 align);
|
void (*put)(struct nouveau_gpuobj *);
|
||||||
void (*clear)(struct drm_device *, struct nouveau_gpuobj *);
|
int (*map)(struct nouveau_gpuobj *);
|
||||||
int (*bind)(struct drm_device *, struct nouveau_gpuobj *);
|
void (*unmap)(struct nouveau_gpuobj *);
|
||||||
int (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
|
|
||||||
void (*flush)(struct drm_device *);
|
void (*flush)(struct drm_device *);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1182,11 +1183,10 @@ extern int nv04_instmem_init(struct drm_device *);
|
||||||
extern void nv04_instmem_takedown(struct drm_device *);
|
extern void nv04_instmem_takedown(struct drm_device *);
|
||||||
extern int nv04_instmem_suspend(struct drm_device *);
|
extern int nv04_instmem_suspend(struct drm_device *);
|
||||||
extern void nv04_instmem_resume(struct drm_device *);
|
extern void nv04_instmem_resume(struct drm_device *);
|
||||||
extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
|
extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
|
||||||
u32 *size, u32 align);
|
extern void nv04_instmem_put(struct nouveau_gpuobj *);
|
||||||
extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
|
extern int nv04_instmem_map(struct nouveau_gpuobj *);
|
||||||
extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
|
extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
|
||||||
extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
|
|
||||||
extern void nv04_instmem_flush(struct drm_device *);
|
extern void nv04_instmem_flush(struct drm_device *);
|
||||||
|
|
||||||
/* nv50_instmem.c */
|
/* nv50_instmem.c */
|
||||||
|
@ -1194,11 +1194,10 @@ extern int nv50_instmem_init(struct drm_device *);
|
||||||
extern void nv50_instmem_takedown(struct drm_device *);
|
extern void nv50_instmem_takedown(struct drm_device *);
|
||||||
extern int nv50_instmem_suspend(struct drm_device *);
|
extern int nv50_instmem_suspend(struct drm_device *);
|
||||||
extern void nv50_instmem_resume(struct drm_device *);
|
extern void nv50_instmem_resume(struct drm_device *);
|
||||||
extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
|
extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
|
||||||
u32 *size, u32 align);
|
extern void nv50_instmem_put(struct nouveau_gpuobj *);
|
||||||
extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
|
extern int nv50_instmem_map(struct nouveau_gpuobj *);
|
||||||
extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
|
extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
|
||||||
extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
|
|
||||||
extern void nv50_instmem_flush(struct drm_device *);
|
extern void nv50_instmem_flush(struct drm_device *);
|
||||||
extern void nv84_instmem_flush(struct drm_device *);
|
extern void nv84_instmem_flush(struct drm_device *);
|
||||||
extern void nv50_vm_flush(struct drm_device *, int engine);
|
extern void nv50_vm_flush(struct drm_device *, int engine);
|
||||||
|
@ -1208,11 +1207,10 @@ extern int nvc0_instmem_init(struct drm_device *);
|
||||||
extern void nvc0_instmem_takedown(struct drm_device *);
|
extern void nvc0_instmem_takedown(struct drm_device *);
|
||||||
extern int nvc0_instmem_suspend(struct drm_device *);
|
extern int nvc0_instmem_suspend(struct drm_device *);
|
||||||
extern void nvc0_instmem_resume(struct drm_device *);
|
extern void nvc0_instmem_resume(struct drm_device *);
|
||||||
extern int nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
|
extern int nvc0_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
|
||||||
u32 *size, u32 align);
|
extern void nvc0_instmem_put(struct nouveau_gpuobj *);
|
||||||
extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
|
extern int nvc0_instmem_map(struct nouveau_gpuobj *);
|
||||||
extern int nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
|
extern void nvc0_instmem_unmap(struct nouveau_gpuobj *);
|
||||||
extern int nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
|
|
||||||
extern void nvc0_instmem_flush(struct drm_device *);
|
extern void nvc0_instmem_flush(struct drm_device *);
|
||||||
|
|
||||||
/* nv04_mc.c */
|
/* nv04_mc.c */
|
||||||
|
|
|
@ -168,17 +168,14 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
|
||||||
struct nouveau_gpuobj **gpuobj_ret)
|
struct nouveau_gpuobj **gpuobj_ret)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
struct nouveau_engine *engine = &dev_priv->engine;
|
struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
|
||||||
struct nouveau_gpuobj *gpuobj;
|
struct nouveau_gpuobj *gpuobj;
|
||||||
struct drm_mm_node *ramin = NULL;
|
struct drm_mm_node *ramin = NULL;
|
||||||
int ret;
|
int ret, i;
|
||||||
|
|
||||||
NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
|
NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
|
||||||
chan ? chan->id : -1, size, align, flags);
|
chan ? chan->id : -1, size, align, flags);
|
||||||
|
|
||||||
if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
|
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
|
||||||
if (!gpuobj)
|
if (!gpuobj)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -193,88 +190,45 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
|
||||||
spin_unlock(&dev_priv->ramin_lock);
|
spin_unlock(&dev_priv->ramin_lock);
|
||||||
|
|
||||||
if (chan) {
|
if (chan) {
|
||||||
NV_DEBUG(dev, "channel heap\n");
|
|
||||||
|
|
||||||
ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
|
ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
|
||||||
if (ramin)
|
if (ramin)
|
||||||
ramin = drm_mm_get_block(ramin, size, align);
|
ramin = drm_mm_get_block(ramin, size, align);
|
||||||
|
|
||||||
if (!ramin) {
|
if (!ramin) {
|
||||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
nouveau_gpuobj_ref(NULL, &gpuobj);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
NV_DEBUG(dev, "global heap\n");
|
|
||||||
|
|
||||||
/* allocate backing pages, sets vinst */
|
|
||||||
ret = engine->instmem.populate(dev, gpuobj, &size, align);
|
|
||||||
if (ret) {
|
|
||||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* try and get aperture space */
|
|
||||||
do {
|
|
||||||
if (drm_mm_pre_get(&dev_priv->ramin_heap))
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
spin_lock(&dev_priv->ramin_lock);
|
|
||||||
ramin = drm_mm_search_free(&dev_priv->ramin_heap, size,
|
|
||||||
align, 0);
|
|
||||||
if (ramin == NULL) {
|
|
||||||
spin_unlock(&dev_priv->ramin_lock);
|
|
||||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
ramin = drm_mm_get_block_atomic(ramin, size, align);
|
|
||||||
spin_unlock(&dev_priv->ramin_lock);
|
|
||||||
} while (ramin == NULL);
|
|
||||||
|
|
||||||
/* on nv50 it's ok to fail, we have a fallback path */
|
|
||||||
if (!ramin && dev_priv->card_type < NV_50) {
|
|
||||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* if we got a chunk of the aperture, map pages into it */
|
|
||||||
gpuobj->im_pramin = ramin;
|
|
||||||
if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) {
|
|
||||||
ret = engine->instmem.bind(dev, gpuobj);
|
|
||||||
if (ret) {
|
|
||||||
nouveau_gpuobj_ref(NULL, &gpuobj);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* calculate the various different addresses for the object */
|
|
||||||
if (chan) {
|
|
||||||
gpuobj->pinst = chan->ramin->pinst;
|
gpuobj->pinst = chan->ramin->pinst;
|
||||||
if (gpuobj->pinst != ~0)
|
if (gpuobj->pinst != ~0)
|
||||||
gpuobj->pinst += gpuobj->im_pramin->start;
|
gpuobj->pinst += ramin->start;
|
||||||
|
|
||||||
if (dev_priv->card_type < NV_50) {
|
if (dev_priv->card_type < NV_50)
|
||||||
gpuobj->cinst = gpuobj->pinst;
|
gpuobj->cinst = gpuobj->pinst;
|
||||||
} else {
|
|
||||||
gpuobj->cinst = gpuobj->im_pramin->start;
|
|
||||||
gpuobj->vinst = gpuobj->im_pramin->start +
|
|
||||||
chan->ramin->vinst;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (gpuobj->im_pramin)
|
|
||||||
gpuobj->pinst = gpuobj->im_pramin->start;
|
|
||||||
else
|
else
|
||||||
|
gpuobj->cinst = ramin->start;
|
||||||
|
|
||||||
|
gpuobj->vinst = ramin->start + chan->ramin->vinst;
|
||||||
|
gpuobj->node = ramin;
|
||||||
|
} else {
|
||||||
|
ret = instmem->get(gpuobj, size, align);
|
||||||
|
if (ret) {
|
||||||
|
nouveau_gpuobj_ref(NULL, &gpuobj);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = -ENOSYS;
|
||||||
|
if (dev_priv->ramin_available)
|
||||||
|
ret = instmem->map(gpuobj);
|
||||||
|
if (ret)
|
||||||
gpuobj->pinst = ~0;
|
gpuobj->pinst = ~0;
|
||||||
gpuobj->cinst = 0xdeadbeef;
|
|
||||||
|
gpuobj->cinst = NVOBJ_CINST_GLOBAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
|
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < gpuobj->size; i += 4)
|
for (i = 0; i < gpuobj->size; i += 4)
|
||||||
nv_wo32(gpuobj, i, 0);
|
nv_wo32(gpuobj, i, 0);
|
||||||
engine->instmem.flush(dev);
|
instmem->flush(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -326,26 +280,34 @@ nouveau_gpuobj_del(struct kref *ref)
|
||||||
container_of(ref, struct nouveau_gpuobj, refcount);
|
container_of(ref, struct nouveau_gpuobj, refcount);
|
||||||
struct drm_device *dev = gpuobj->dev;
|
struct drm_device *dev = gpuobj->dev;
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
struct nouveau_engine *engine = &dev_priv->engine;
|
struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
|
NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
|
||||||
|
|
||||||
if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
|
if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
|
||||||
for (i = 0; i < gpuobj->size; i += 4)
|
for (i = 0; i < gpuobj->size; i += 4)
|
||||||
nv_wo32(gpuobj, i, 0);
|
nv_wo32(gpuobj, i, 0);
|
||||||
engine->instmem.flush(dev);
|
instmem->flush(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gpuobj->dtor)
|
if (gpuobj->dtor)
|
||||||
gpuobj->dtor(dev, gpuobj);
|
gpuobj->dtor(dev, gpuobj);
|
||||||
|
|
||||||
if (gpuobj->im_backing)
|
if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
|
||||||
engine->instmem.clear(dev, gpuobj);
|
if (gpuobj->node) {
|
||||||
|
instmem->unmap(gpuobj);
|
||||||
|
instmem->put(gpuobj);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (gpuobj->node) {
|
||||||
|
spin_lock(&dev_priv->ramin_lock);
|
||||||
|
drm_mm_put_block(gpuobj->node);
|
||||||
|
spin_unlock(&dev_priv->ramin_lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock(&dev_priv->ramin_lock);
|
spin_lock(&dev_priv->ramin_lock);
|
||||||
if (gpuobj->im_pramin)
|
|
||||||
drm_mm_put_block(gpuobj->im_pramin);
|
|
||||||
list_del(&gpuobj->list);
|
list_del(&gpuobj->list);
|
||||||
spin_unlock(&dev_priv->ramin_lock);
|
spin_unlock(&dev_priv->ramin_lock);
|
||||||
|
|
||||||
|
@ -385,7 +347,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
|
||||||
kref_init(&gpuobj->refcount);
|
kref_init(&gpuobj->refcount);
|
||||||
gpuobj->size = size;
|
gpuobj->size = size;
|
||||||
gpuobj->pinst = pinst;
|
gpuobj->pinst = pinst;
|
||||||
gpuobj->cinst = 0xdeadbeef;
|
gpuobj->cinst = NVOBJ_CINST_GLOBAL;
|
||||||
gpuobj->vinst = vinst;
|
gpuobj->vinst = vinst;
|
||||||
|
|
||||||
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
|
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
|
||||||
|
@ -935,7 +897,7 @@ nouveau_gpuobj_suspend(struct drm_device *dev)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
|
list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
|
||||||
if (gpuobj->cinst != 0xdeadbeef)
|
if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
gpuobj->suspend = vmalloc(gpuobj->size);
|
gpuobj->suspend = vmalloc(gpuobj->size);
|
||||||
|
|
|
@ -53,10 +53,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||||
engine->instmem.takedown = nv04_instmem_takedown;
|
engine->instmem.takedown = nv04_instmem_takedown;
|
||||||
engine->instmem.suspend = nv04_instmem_suspend;
|
engine->instmem.suspend = nv04_instmem_suspend;
|
||||||
engine->instmem.resume = nv04_instmem_resume;
|
engine->instmem.resume = nv04_instmem_resume;
|
||||||
engine->instmem.populate = nv04_instmem_populate;
|
engine->instmem.get = nv04_instmem_get;
|
||||||
engine->instmem.clear = nv04_instmem_clear;
|
engine->instmem.put = nv04_instmem_put;
|
||||||
engine->instmem.bind = nv04_instmem_bind;
|
engine->instmem.map = nv04_instmem_map;
|
||||||
engine->instmem.unbind = nv04_instmem_unbind;
|
engine->instmem.unmap = nv04_instmem_unmap;
|
||||||
engine->instmem.flush = nv04_instmem_flush;
|
engine->instmem.flush = nv04_instmem_flush;
|
||||||
engine->mc.init = nv04_mc_init;
|
engine->mc.init = nv04_mc_init;
|
||||||
engine->mc.takedown = nv04_mc_takedown;
|
engine->mc.takedown = nv04_mc_takedown;
|
||||||
|
@ -106,10 +106,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||||
engine->instmem.takedown = nv04_instmem_takedown;
|
engine->instmem.takedown = nv04_instmem_takedown;
|
||||||
engine->instmem.suspend = nv04_instmem_suspend;
|
engine->instmem.suspend = nv04_instmem_suspend;
|
||||||
engine->instmem.resume = nv04_instmem_resume;
|
engine->instmem.resume = nv04_instmem_resume;
|
||||||
engine->instmem.populate = nv04_instmem_populate;
|
engine->instmem.get = nv04_instmem_get;
|
||||||
engine->instmem.clear = nv04_instmem_clear;
|
engine->instmem.put = nv04_instmem_put;
|
||||||
engine->instmem.bind = nv04_instmem_bind;
|
engine->instmem.map = nv04_instmem_map;
|
||||||
engine->instmem.unbind = nv04_instmem_unbind;
|
engine->instmem.unmap = nv04_instmem_unmap;
|
||||||
engine->instmem.flush = nv04_instmem_flush;
|
engine->instmem.flush = nv04_instmem_flush;
|
||||||
engine->mc.init = nv04_mc_init;
|
engine->mc.init = nv04_mc_init;
|
||||||
engine->mc.takedown = nv04_mc_takedown;
|
engine->mc.takedown = nv04_mc_takedown;
|
||||||
|
@ -163,10 +163,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||||
engine->instmem.takedown = nv04_instmem_takedown;
|
engine->instmem.takedown = nv04_instmem_takedown;
|
||||||
engine->instmem.suspend = nv04_instmem_suspend;
|
engine->instmem.suspend = nv04_instmem_suspend;
|
||||||
engine->instmem.resume = nv04_instmem_resume;
|
engine->instmem.resume = nv04_instmem_resume;
|
||||||
engine->instmem.populate = nv04_instmem_populate;
|
engine->instmem.get = nv04_instmem_get;
|
||||||
engine->instmem.clear = nv04_instmem_clear;
|
engine->instmem.put = nv04_instmem_put;
|
||||||
engine->instmem.bind = nv04_instmem_bind;
|
engine->instmem.map = nv04_instmem_map;
|
||||||
engine->instmem.unbind = nv04_instmem_unbind;
|
engine->instmem.unmap = nv04_instmem_unmap;
|
||||||
engine->instmem.flush = nv04_instmem_flush;
|
engine->instmem.flush = nv04_instmem_flush;
|
||||||
engine->mc.init = nv04_mc_init;
|
engine->mc.init = nv04_mc_init;
|
||||||
engine->mc.takedown = nv04_mc_takedown;
|
engine->mc.takedown = nv04_mc_takedown;
|
||||||
|
@ -220,10 +220,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||||
engine->instmem.takedown = nv04_instmem_takedown;
|
engine->instmem.takedown = nv04_instmem_takedown;
|
||||||
engine->instmem.suspend = nv04_instmem_suspend;
|
engine->instmem.suspend = nv04_instmem_suspend;
|
||||||
engine->instmem.resume = nv04_instmem_resume;
|
engine->instmem.resume = nv04_instmem_resume;
|
||||||
engine->instmem.populate = nv04_instmem_populate;
|
engine->instmem.get = nv04_instmem_get;
|
||||||
engine->instmem.clear = nv04_instmem_clear;
|
engine->instmem.put = nv04_instmem_put;
|
||||||
engine->instmem.bind = nv04_instmem_bind;
|
engine->instmem.map = nv04_instmem_map;
|
||||||
engine->instmem.unbind = nv04_instmem_unbind;
|
engine->instmem.unmap = nv04_instmem_unmap;
|
||||||
engine->instmem.flush = nv04_instmem_flush;
|
engine->instmem.flush = nv04_instmem_flush;
|
||||||
engine->mc.init = nv04_mc_init;
|
engine->mc.init = nv04_mc_init;
|
||||||
engine->mc.takedown = nv04_mc_takedown;
|
engine->mc.takedown = nv04_mc_takedown;
|
||||||
|
@ -280,10 +280,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||||
engine->instmem.takedown = nv04_instmem_takedown;
|
engine->instmem.takedown = nv04_instmem_takedown;
|
||||||
engine->instmem.suspend = nv04_instmem_suspend;
|
engine->instmem.suspend = nv04_instmem_suspend;
|
||||||
engine->instmem.resume = nv04_instmem_resume;
|
engine->instmem.resume = nv04_instmem_resume;
|
||||||
engine->instmem.populate = nv04_instmem_populate;
|
engine->instmem.get = nv04_instmem_get;
|
||||||
engine->instmem.clear = nv04_instmem_clear;
|
engine->instmem.put = nv04_instmem_put;
|
||||||
engine->instmem.bind = nv04_instmem_bind;
|
engine->instmem.map = nv04_instmem_map;
|
||||||
engine->instmem.unbind = nv04_instmem_unbind;
|
engine->instmem.unmap = nv04_instmem_unmap;
|
||||||
engine->instmem.flush = nv04_instmem_flush;
|
engine->instmem.flush = nv04_instmem_flush;
|
||||||
engine->mc.init = nv40_mc_init;
|
engine->mc.init = nv40_mc_init;
|
||||||
engine->mc.takedown = nv40_mc_takedown;
|
engine->mc.takedown = nv40_mc_takedown;
|
||||||
|
@ -343,10 +343,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||||
engine->instmem.takedown = nv50_instmem_takedown;
|
engine->instmem.takedown = nv50_instmem_takedown;
|
||||||
engine->instmem.suspend = nv50_instmem_suspend;
|
engine->instmem.suspend = nv50_instmem_suspend;
|
||||||
engine->instmem.resume = nv50_instmem_resume;
|
engine->instmem.resume = nv50_instmem_resume;
|
||||||
engine->instmem.populate = nv50_instmem_populate;
|
engine->instmem.get = nv50_instmem_get;
|
||||||
engine->instmem.clear = nv50_instmem_clear;
|
engine->instmem.put = nv50_instmem_put;
|
||||||
engine->instmem.bind = nv50_instmem_bind;
|
engine->instmem.map = nv50_instmem_map;
|
||||||
engine->instmem.unbind = nv50_instmem_unbind;
|
engine->instmem.unmap = nv50_instmem_unmap;
|
||||||
if (dev_priv->chipset == 0x50)
|
if (dev_priv->chipset == 0x50)
|
||||||
engine->instmem.flush = nv50_instmem_flush;
|
engine->instmem.flush = nv50_instmem_flush;
|
||||||
else
|
else
|
||||||
|
@ -449,10 +449,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||||
engine->instmem.takedown = nvc0_instmem_takedown;
|
engine->instmem.takedown = nvc0_instmem_takedown;
|
||||||
engine->instmem.suspend = nvc0_instmem_suspend;
|
engine->instmem.suspend = nvc0_instmem_suspend;
|
||||||
engine->instmem.resume = nvc0_instmem_resume;
|
engine->instmem.resume = nvc0_instmem_resume;
|
||||||
engine->instmem.populate = nvc0_instmem_populate;
|
engine->instmem.get = nvc0_instmem_get;
|
||||||
engine->instmem.clear = nvc0_instmem_clear;
|
engine->instmem.put = nvc0_instmem_put;
|
||||||
engine->instmem.bind = nvc0_instmem_bind;
|
engine->instmem.map = nvc0_instmem_map;
|
||||||
engine->instmem.unbind = nvc0_instmem_unbind;
|
engine->instmem.unmap = nvc0_instmem_unmap;
|
||||||
engine->instmem.flush = nvc0_instmem_flush;
|
engine->instmem.flush = nvc0_instmem_flush;
|
||||||
engine->mc.init = nv50_mc_init;
|
engine->mc.init = nv50_mc_init;
|
||||||
engine->mc.takedown = nv50_mc_takedown;
|
engine->mc.takedown = nv50_mc_takedown;
|
||||||
|
|
|
@ -97,35 +97,6 @@ nv04_instmem_takedown(struct drm_device *dev)
|
||||||
nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
|
nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
|
||||||
nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
|
|
||||||
u32 *size, u32 align)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
nv04_instmem_flush(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
int
|
||||||
nv04_instmem_suspend(struct drm_device *dev)
|
nv04_instmem_suspend(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
|
@ -137,3 +108,56 @@ nv04_instmem_resume(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
|
||||||
|
{
|
||||||
|
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||||
|
struct drm_mm_node *ramin = NULL;
|
||||||
|
|
||||||
|
do {
|
||||||
|
if (drm_mm_pre_get(&dev_priv->ramin_heap))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
spin_lock(&dev_priv->ramin_lock);
|
||||||
|
ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
|
||||||
|
if (ramin == NULL) {
|
||||||
|
spin_unlock(&dev_priv->ramin_lock);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
ramin = drm_mm_get_block_atomic(ramin, size, align);
|
||||||
|
spin_unlock(&dev_priv->ramin_lock);
|
||||||
|
} while (ramin == NULL);
|
||||||
|
|
||||||
|
gpuobj->node = ramin;
|
||||||
|
gpuobj->vinst = ramin->start;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
|
||||||
|
{
|
||||||
|
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||||
|
|
||||||
|
spin_lock(&dev_priv->ramin_lock);
|
||||||
|
drm_mm_put_block(gpuobj->node);
|
||||||
|
gpuobj->node = NULL;
|
||||||
|
spin_unlock(&dev_priv->ramin_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
|
||||||
|
{
|
||||||
|
gpuobj->pinst = gpuobj->vinst;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
nv04_instmem_flush(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
|
@ -157,10 +157,7 @@ nv50_instmem_init(struct drm_device *dev)
|
||||||
nv_wo32(priv->pramin_bar, 0x10, 0x00000000);
|
nv_wo32(priv->pramin_bar, 0x10, 0x00000000);
|
||||||
nv_wo32(priv->pramin_bar, 0x14, 0x00000000);
|
nv_wo32(priv->pramin_bar, 0x14, 0x00000000);
|
||||||
|
|
||||||
/* map channel into PRAMIN, gpuobj didn't do it for us */
|
nv50_instmem_map(chan->ramin);
|
||||||
ret = nv50_instmem_bind(dev, chan->ramin);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/* poke regs... */
|
/* poke regs... */
|
||||||
nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
|
nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
|
||||||
|
@ -305,72 +302,91 @@ nv50_instmem_resume(struct drm_device *dev)
|
||||||
dev_priv->ramin_available = true;
|
dev_priv->ramin_available = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct nv50_gpuobj_node {
|
||||||
|
struct nouveau_bo *vram;
|
||||||
|
struct drm_mm_node *ramin;
|
||||||
|
u32 align;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
int
|
int
|
||||||
nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
|
nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
|
||||||
u32 *size, u32 align)
|
|
||||||
{
|
{
|
||||||
|
struct drm_device *dev = gpuobj->dev;
|
||||||
|
struct nv50_gpuobj_node *node = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (gpuobj->im_backing)
|
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||||
return -EINVAL;
|
if (!node)
|
||||||
|
return -ENOMEM;
|
||||||
|
node->align = align;
|
||||||
|
|
||||||
*size = ALIGN(*size, 4096);
|
ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM,
|
||||||
if (*size == 0)
|
0, 0x0000, true, false, &node->vram);
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
ret = nouveau_bo_new(dev, NULL, *size, align, TTM_PL_FLAG_VRAM,
|
|
||||||
0, 0x0000, true, false, &gpuobj->im_backing);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
|
NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
|
ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
|
NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
|
||||||
nouveau_bo_ref(NULL, &gpuobj->im_backing);
|
nouveau_bo_ref(NULL, &node->vram);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
|
gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT;
|
||||||
|
gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT;
|
||||||
|
gpuobj->node = node;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
nv50_instmem_put(struct nouveau_gpuobj *gpuobj)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct nv50_gpuobj_node *node;
|
||||||
|
|
||||||
if (gpuobj && gpuobj->im_backing) {
|
node = gpuobj->node;
|
||||||
if (gpuobj->im_bound)
|
gpuobj->node = NULL;
|
||||||
dev_priv->engine.instmem.unbind(dev, gpuobj);
|
|
||||||
nouveau_bo_unpin(gpuobj->im_backing);
|
nouveau_bo_unpin(node->vram);
|
||||||
nouveau_bo_ref(NULL, &gpuobj->im_backing);
|
nouveau_bo_ref(NULL, &node->vram);
|
||||||
gpuobj->im_backing = NULL;
|
kfree(node);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||||
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
|
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
|
||||||
struct nouveau_gpuobj *pramin_pt = priv->pramin_pt;
|
struct nv50_gpuobj_node *node = gpuobj->node;
|
||||||
uint32_t pte, pte_end;
|
struct drm_device *dev = gpuobj->dev;
|
||||||
uint64_t vram;
|
struct drm_mm_node *ramin = NULL;
|
||||||
|
u32 pte, pte_end;
|
||||||
|
u64 vram;
|
||||||
|
|
||||||
if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
|
do {
|
||||||
return -EINVAL;
|
if (drm_mm_pre_get(&dev_priv->ramin_heap))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
|
spin_lock(&dev_priv->ramin_lock);
|
||||||
gpuobj->im_pramin->start, gpuobj->im_pramin->size);
|
ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size,
|
||||||
|
node->align, 0);
|
||||||
|
if (ramin == NULL) {
|
||||||
|
spin_unlock(&dev_priv->ramin_lock);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
pte = (gpuobj->im_pramin->start >> 12) << 1;
|
ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align);
|
||||||
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
|
spin_unlock(&dev_priv->ramin_lock);
|
||||||
|
} while (ramin == NULL);
|
||||||
|
|
||||||
|
pte = (ramin->start >> 12) << 1;
|
||||||
|
pte_end = ((ramin->size >> 12) << 1) + pte;
|
||||||
vram = gpuobj->vinst;
|
vram = gpuobj->vinst;
|
||||||
|
|
||||||
NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
|
NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
|
||||||
gpuobj->im_pramin->start, pte, pte_end);
|
ramin->start, pte, pte_end);
|
||||||
NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
|
NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
|
||||||
|
|
||||||
vram |= 1;
|
vram |= 1;
|
||||||
|
@ -380,8 +396,8 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
||||||
}
|
}
|
||||||
|
|
||||||
while (pte < pte_end) {
|
while (pte < pte_end) {
|
||||||
nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram));
|
nv_wo32(priv->pramin_pt, (pte * 4) + 0, lower_32_bits(vram));
|
||||||
nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram));
|
nv_wo32(priv->pramin_pt, (pte * 4) + 4, upper_32_bits(vram));
|
||||||
vram += 0x1000;
|
vram += 0x1000;
|
||||||
pte += 2;
|
pte += 2;
|
||||||
}
|
}
|
||||||
|
@ -389,36 +405,36 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
||||||
|
|
||||||
nv50_vm_flush(dev, 6);
|
nv50_vm_flush(dev, 6);
|
||||||
|
|
||||||
gpuobj->im_bound = 1;
|
node->ramin = ramin;
|
||||||
|
gpuobj->pinst = ramin->start;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
void
|
||||||
nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||||
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
|
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
|
||||||
uint32_t pte, pte_end;
|
struct nv50_gpuobj_node *node = gpuobj->node;
|
||||||
|
u32 pte, pte_end;
|
||||||
|
|
||||||
if (gpuobj->im_bound == 0)
|
if (!node->ramin || !dev_priv->ramin_available)
|
||||||
return -EINVAL;
|
return;
|
||||||
|
|
||||||
/* can happen during late takedown */
|
pte = (node->ramin->start >> 12) << 1;
|
||||||
if (unlikely(!dev_priv->ramin_available))
|
pte_end = ((node->ramin->size >> 12) << 1) + pte;
|
||||||
return 0;
|
|
||||||
|
|
||||||
pte = (gpuobj->im_pramin->start >> 12) << 1;
|
|
||||||
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
|
|
||||||
|
|
||||||
while (pte < pte_end) {
|
while (pte < pte_end) {
|
||||||
nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000);
|
nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000);
|
||||||
nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000);
|
nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000);
|
||||||
pte += 2;
|
pte += 2;
|
||||||
}
|
}
|
||||||
dev_priv->engine.instmem.flush(dev);
|
dev_priv->engine.instmem.flush(gpuobj->dev);
|
||||||
|
|
||||||
gpuobj->im_bound = 0;
|
spin_lock(&dev_priv->ramin_lock);
|
||||||
return 0;
|
drm_mm_put_block(node->ramin);
|
||||||
|
node->ramin = NULL;
|
||||||
|
spin_unlock(&dev_priv->ramin_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|
|
@ -26,67 +26,89 @@
|
||||||
|
|
||||||
#include "nouveau_drv.h"
|
#include "nouveau_drv.h"
|
||||||
|
|
||||||
|
struct nvc0_gpuobj_node {
|
||||||
|
struct nouveau_bo *vram;
|
||||||
|
struct drm_mm_node *ramin;
|
||||||
|
u32 align;
|
||||||
|
};
|
||||||
|
|
||||||
int
|
int
|
||||||
nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
|
nvc0_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
|
||||||
u32 *size, u32 align)
|
|
||||||
{
|
{
|
||||||
|
struct drm_device *dev = gpuobj->dev;
|
||||||
|
struct nvc0_gpuobj_node *node = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
*size = ALIGN(*size, 4096);
|
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||||
if (*size == 0)
|
if (!node)
|
||||||
return -EINVAL;
|
return -ENOMEM;
|
||||||
|
node->align = align;
|
||||||
|
|
||||||
ret = nouveau_bo_new(dev, NULL, *size, align, TTM_PL_FLAG_VRAM,
|
ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM,
|
||||||
0, 0x0000, true, false, &gpuobj->im_backing);
|
0, 0x0000, true, false, &node->vram);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
|
NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
|
ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
|
NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
|
||||||
nouveau_bo_ref(NULL, &gpuobj->im_backing);
|
nouveau_bo_ref(NULL, &node->vram);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
|
gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT;
|
||||||
|
gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT;
|
||||||
|
gpuobj->node = node;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
nvc0_instmem_put(struct nouveau_gpuobj *gpuobj)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct nvc0_gpuobj_node *node;
|
||||||
|
|
||||||
if (gpuobj && gpuobj->im_backing) {
|
node = gpuobj->node;
|
||||||
if (gpuobj->im_bound)
|
gpuobj->node = NULL;
|
||||||
dev_priv->engine.instmem.unbind(dev, gpuobj);
|
|
||||||
nouveau_bo_unpin(gpuobj->im_backing);
|
nouveau_bo_unpin(node->vram);
|
||||||
nouveau_bo_ref(NULL, &gpuobj->im_backing);
|
nouveau_bo_ref(NULL, &node->vram);
|
||||||
gpuobj->im_backing = NULL;
|
kfree(node);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
nvc0_instmem_map(struct nouveau_gpuobj *gpuobj)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||||
uint32_t pte, pte_end;
|
struct nvc0_gpuobj_node *node = gpuobj->node;
|
||||||
uint64_t vram;
|
struct drm_device *dev = gpuobj->dev;
|
||||||
|
struct drm_mm_node *ramin = NULL;
|
||||||
|
u32 pte, pte_end;
|
||||||
|
u64 vram;
|
||||||
|
|
||||||
if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
|
do {
|
||||||
return -EINVAL;
|
if (drm_mm_pre_get(&dev_priv->ramin_heap))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
|
spin_lock(&dev_priv->ramin_lock);
|
||||||
gpuobj->im_pramin->start, gpuobj->im_pramin->size);
|
ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size,
|
||||||
|
node->align, 0);
|
||||||
|
if (ramin == NULL) {
|
||||||
|
spin_unlock(&dev_priv->ramin_lock);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
pte = gpuobj->im_pramin->start >> 12;
|
ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align);
|
||||||
pte_end = (gpuobj->im_pramin->size >> 12) + pte;
|
spin_unlock(&dev_priv->ramin_lock);
|
||||||
|
} while (ramin == NULL);
|
||||||
|
|
||||||
|
pte = (ramin->start >> 12) << 1;
|
||||||
|
pte_end = ((ramin->size >> 12) << 1) + pte;
|
||||||
vram = gpuobj->vinst;
|
vram = gpuobj->vinst;
|
||||||
|
|
||||||
NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
|
NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
|
||||||
gpuobj->im_pramin->start, pte, pte_end);
|
ramin->start, pte, pte_end);
|
||||||
NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
|
NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
|
||||||
|
|
||||||
while (pte < pte_end) {
|
while (pte < pte_end) {
|
||||||
|
@ -103,30 +125,35 @@ nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
||||||
nv_wr32(dev, 0x100cbc, 0x80000005);
|
nv_wr32(dev, 0x100cbc, 0x80000005);
|
||||||
}
|
}
|
||||||
|
|
||||||
gpuobj->im_bound = 1;
|
node->ramin = ramin;
|
||||||
|
gpuobj->pinst = ramin->start;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
void
|
||||||
nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
|
nvc0_instmem_unmap(struct nouveau_gpuobj *gpuobj)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
|
||||||
uint32_t pte, pte_end;
|
struct nvc0_gpuobj_node *node = gpuobj->node;
|
||||||
|
u32 pte, pte_end;
|
||||||
|
|
||||||
if (gpuobj->im_bound == 0)
|
if (!node->ramin || !dev_priv->ramin_available)
|
||||||
return -EINVAL;
|
return;
|
||||||
|
|
||||||
|
pte = (node->ramin->start >> 12) << 1;
|
||||||
|
pte_end = ((node->ramin->size >> 12) << 1) + pte;
|
||||||
|
|
||||||
pte = gpuobj->im_pramin->start >> 12;
|
|
||||||
pte_end = (gpuobj->im_pramin->size >> 12) + pte;
|
|
||||||
while (pte < pte_end) {
|
while (pte < pte_end) {
|
||||||
nv_wr32(dev, 0x702000 + (pte * 8), 0);
|
nv_wr32(gpuobj->dev, 0x702000 + (pte * 8), 0);
|
||||||
nv_wr32(dev, 0x702004 + (pte * 8), 0);
|
nv_wr32(gpuobj->dev, 0x702004 + (pte * 8), 0);
|
||||||
pte++;
|
pte++;
|
||||||
}
|
}
|
||||||
dev_priv->engine.instmem.flush(dev);
|
dev_priv->engine.instmem.flush(gpuobj->dev);
|
||||||
|
|
||||||
gpuobj->im_bound = 0;
|
spin_lock(&dev_priv->ramin_lock);
|
||||||
return 0;
|
drm_mm_put_block(node->ramin);
|
||||||
|
node->ramin = NULL;
|
||||||
|
spin_unlock(&dev_priv->ramin_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|
Loading…
Reference in a new issue