drm/nouveau: delay busy bo vma removal until fence signals
As opposed to an explicit wait. Allows userspace to not stall waiting on buffer deletion. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
780194b1b9
commit
c4c7044ffc
4 changed files with 108 additions and 15 deletions
drivers/gpu/drm/nouveau
|
@ -1550,13 +1550,8 @@ void
|
|||
nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
|
||||
{
|
||||
if (vma->node) {
|
||||
if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
|
||||
spin_lock(&nvbo->bo.bdev->fence_lock);
|
||||
ttm_bo_wait(&nvbo->bo, false, false, false);
|
||||
spin_unlock(&nvbo->bo.bdev->fence_lock);
|
||||
if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
|
||||
nouveau_vm_unmap(vma);
|
||||
}
|
||||
|
||||
nouveau_vm_put(vma);
|
||||
list_del(&vma->head);
|
||||
}
|
||||
|
|
|
@ -35,15 +35,34 @@
|
|||
|
||||
#include <engine/fifo.h>
|
||||
|
||||
struct fence_work {
|
||||
struct work_struct base;
|
||||
struct list_head head;
|
||||
void (*func)(void *);
|
||||
void *data;
|
||||
};
|
||||
|
||||
static void
|
||||
nouveau_fence_signal(struct nouveau_fence *fence)
|
||||
{
|
||||
struct fence_work *work, *temp;
|
||||
|
||||
list_for_each_entry_safe(work, temp, &fence->work, head) {
|
||||
schedule_work(&work->base);
|
||||
list_del(&work->head);
|
||||
}
|
||||
|
||||
fence->channel = NULL;
|
||||
list_del(&fence->head);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
|
||||
{
|
||||
struct nouveau_fence *fence, *fnext;
|
||||
spin_lock(&fctx->lock);
|
||||
list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
|
||||
fence->channel = NULL;
|
||||
list_del(&fence->head);
|
||||
nouveau_fence_unref(&fence);
|
||||
nouveau_fence_signal(fence);
|
||||
}
|
||||
spin_unlock(&fctx->lock);
|
||||
}
|
||||
|
@ -56,6 +75,50 @@ nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
|
|||
spin_lock_init(&fctx->lock);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_fence_work_handler(struct work_struct *kwork)
|
||||
{
|
||||
struct fence_work *work = container_of(kwork, typeof(*work), base);
|
||||
work->func(work->data);
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_fence_work(struct nouveau_fence *fence,
|
||||
void (*func)(void *), void *data)
|
||||
{
|
||||
struct nouveau_channel *chan = fence->channel;
|
||||
struct nouveau_fence_chan *fctx;
|
||||
struct fence_work *work = NULL;
|
||||
|
||||
if (nouveau_fence_done(fence)) {
|
||||
func(data);
|
||||
return;
|
||||
}
|
||||
|
||||
fctx = chan->fence;
|
||||
work = kmalloc(sizeof(*work), GFP_KERNEL);
|
||||
if (!work) {
|
||||
WARN_ON(nouveau_fence_wait(fence, false, false));
|
||||
func(data);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&fctx->lock);
|
||||
if (!fence->channel) {
|
||||
spin_unlock(&fctx->lock);
|
||||
kfree(work);
|
||||
func(data);
|
||||
return;
|
||||
}
|
||||
|
||||
INIT_WORK(&work->base, nouveau_fence_work_handler);
|
||||
work->func = func;
|
||||
work->data = data;
|
||||
list_add(&work->head, &fence->work);
|
||||
spin_unlock(&fctx->lock);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_fence_update(struct nouveau_channel *chan)
|
||||
{
|
||||
|
@ -67,8 +130,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
|
|||
if (fctx->read(chan) < fence->sequence)
|
||||
break;
|
||||
|
||||
fence->channel = NULL;
|
||||
list_del(&fence->head);
|
||||
nouveau_fence_signal(fence);
|
||||
nouveau_fence_unref(&fence);
|
||||
}
|
||||
spin_unlock(&fctx->lock);
|
||||
|
@ -265,6 +327,7 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
|
|||
if (!fence)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&fence->work);
|
||||
fence->sysmem = sysmem;
|
||||
kref_init(&fence->kref);
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ struct nouveau_drm;
|
|||
|
||||
struct nouveau_fence {
|
||||
struct list_head head;
|
||||
struct list_head work;
|
||||
struct kref kref;
|
||||
|
||||
bool sysmem;
|
||||
|
@ -22,6 +23,7 @@ void nouveau_fence_unref(struct nouveau_fence **);
|
|||
|
||||
int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
|
||||
bool nouveau_fence_done(struct nouveau_fence *);
|
||||
void nouveau_fence_work(struct nouveau_fence *, void (*)(void *), void *);
|
||||
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
|
||||
int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
|
||||
|
||||
|
|
|
@ -101,6 +101,41 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_gem_object_delete(void *data)
|
||||
{
|
||||
struct nouveau_vma *vma = data;
|
||||
nouveau_vm_unmap(vma);
|
||||
nouveau_vm_put(vma);
|
||||
kfree(vma);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
|
||||
{
|
||||
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
|
||||
struct nouveau_fence *fence = NULL;
|
||||
|
||||
list_del(&vma->head);
|
||||
|
||||
if (mapped) {
|
||||
spin_lock(&nvbo->bo.bdev->fence_lock);
|
||||
if (nvbo->bo.sync_obj)
|
||||
fence = nouveau_fence_ref(nvbo->bo.sync_obj);
|
||||
spin_unlock(&nvbo->bo.bdev->fence_lock);
|
||||
}
|
||||
|
||||
if (fence) {
|
||||
nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
|
||||
} else {
|
||||
if (mapped)
|
||||
nouveau_vm_unmap(vma);
|
||||
nouveau_vm_put(vma);
|
||||
kfree(vma);
|
||||
}
|
||||
nouveau_fence_unref(&fence);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
|
||||
{
|
||||
|
@ -118,10 +153,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
|
|||
|
||||
vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
|
||||
if (vma) {
|
||||
if (--vma->refcount == 0) {
|
||||
nouveau_bo_vma_del(nvbo, vma);
|
||||
kfree(vma);
|
||||
}
|
||||
if (--vma->refcount == 0)
|
||||
nouveau_gem_object_unmap(nvbo, vma);
|
||||
}
|
||||
ttm_bo_unreserve(&nvbo->bo);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue