dma-buf: use reservation objects
This allows reservation objects to be used in dma-buf. it's required for implementing polling support on the fences that belong to a dma-buf. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Acked-by: Mauro Carvalho Chehab <m.chehab@samsung.com> #drivers/media/v4l2-core/ Acked-by: Thomas Hellstrom <thellstrom@vmware.com> #drivers/gpu/drm/ttm Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Acked-by: Daniel Vetter <daniel@ffwll.ch> Signed-off-by: Vincent Stehlé <vincent.stehle@laposte.net> #drivers/gpu/drm/armada/ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
606b23ad60
commit
3aac4502fd
17 changed files with 65 additions and 14 deletions
|
@ -25,10 +25,12 @@
|
|||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/fence.h>
|
||||
#include <linux/anon_inodes.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/reservation.h>
|
||||
|
||||
static inline int is_dma_buf_file(struct file *);
|
||||
|
||||
|
@ -56,6 +58,9 @@ static int dma_buf_release(struct inode *inode, struct file *file)
|
|||
list_del(&dmabuf->list_node);
|
||||
mutex_unlock(&db_list.lock);
|
||||
|
||||
if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
|
||||
reservation_object_fini(dmabuf->resv);
|
||||
|
||||
kfree(dmabuf);
|
||||
return 0;
|
||||
}
|
||||
|
@ -128,6 +133,7 @@ static inline int is_dma_buf_file(struct file *file)
|
|||
* @size: [in] Size of the buffer
|
||||
* @flags: [in] mode flags for the file.
|
||||
* @exp_name: [in] name of the exporting module - useful for debugging.
|
||||
* @resv: [in] reservation-object, NULL to allocate default one.
|
||||
*
|
||||
* Returns, on success, a newly created dma_buf object, which wraps the
|
||||
* supplied private data and operations for dma_buf_ops. On either missing
|
||||
|
@ -135,10 +141,17 @@ static inline int is_dma_buf_file(struct file *file)
|
|||
*
|
||||
*/
|
||||
struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
|
||||
size_t size, int flags, const char *exp_name)
|
||||
size_t size, int flags, const char *exp_name,
|
||||
struct reservation_object *resv)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
struct file *file;
|
||||
size_t alloc_size = sizeof(struct dma_buf);
|
||||
if (!resv)
|
||||
alloc_size += sizeof(struct reservation_object);
|
||||
else
|
||||
/* prevent &dma_buf[1] == dma_buf->resv */
|
||||
alloc_size += 1;
|
||||
|
||||
if (WARN_ON(!priv || !ops
|
||||
|| !ops->map_dma_buf
|
||||
|
@ -150,7 +163,7 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
|
|||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL);
|
||||
dmabuf = kzalloc(alloc_size, GFP_KERNEL);
|
||||
if (dmabuf == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -158,6 +171,11 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
|
|||
dmabuf->ops = ops;
|
||||
dmabuf->size = size;
|
||||
dmabuf->exp_name = exp_name;
|
||||
if (!resv) {
|
||||
resv = (struct reservation_object *)&dmabuf[1];
|
||||
reservation_object_init(resv);
|
||||
}
|
||||
dmabuf->resv = resv;
|
||||
|
||||
file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
|
||||
if (IS_ERR(file)) {
|
||||
|
|
|
@ -539,7 +539,7 @@ armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
|
|||
int flags)
|
||||
{
|
||||
return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
|
||||
O_RDWR);
|
||||
O_RDWR, NULL);
|
||||
}
|
||||
|
||||
struct drm_gem_object *
|
||||
|
|
|
@ -336,7 +336,13 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
|
|||
struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, int flags)
|
||||
{
|
||||
return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
|
||||
struct reservation_object *robj = NULL;
|
||||
|
||||
if (dev->driver->gem_prime_res_obj)
|
||||
robj = dev->driver->gem_prime_res_obj(obj);
|
||||
|
||||
return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
|
||||
flags, robj);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_prime_export);
|
||||
|
||||
|
|
|
@ -187,7 +187,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
|
|||
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
|
||||
|
||||
return dma_buf_export(obj, &exynos_dmabuf_ops,
|
||||
exynos_gem_obj->base.size, flags);
|
||||
exynos_gem_obj->base.size, flags, NULL);
|
||||
}
|
||||
|
||||
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
|
||||
|
|
|
@ -237,7 +237,8 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
|
|||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
|
||||
return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags,
|
||||
NULL);
|
||||
}
|
||||
|
||||
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
|
||||
|
|
|
@ -844,6 +844,7 @@ driver = {
|
|||
.gem_prime_export = drm_gem_prime_export,
|
||||
.gem_prime_import = drm_gem_prime_import,
|
||||
.gem_prime_pin = nouveau_gem_prime_pin,
|
||||
.gem_prime_res_obj = nouveau_gem_prime_res_obj,
|
||||
.gem_prime_unpin = nouveau_gem_prime_unpin,
|
||||
.gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
|
||||
.gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
|
||||
|
|
|
@ -35,6 +35,7 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
|
|||
struct drm_file *);
|
||||
|
||||
extern int nouveau_gem_prime_pin(struct drm_gem_object *);
|
||||
struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *);
|
||||
extern void nouveau_gem_prime_unpin(struct drm_gem_object *);
|
||||
extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
|
||||
extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
|
||||
|
|
|
@ -102,3 +102,10 @@ void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
|
|||
|
||||
nouveau_bo_unpin(nvbo);
|
||||
}
|
||||
|
||||
struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *obj)
|
||||
{
|
||||
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
|
||||
|
||||
return nvbo->bo.resv;
|
||||
}
|
||||
|
|
|
@ -171,7 +171,7 @@ static struct dma_buf_ops omap_dmabuf_ops = {
|
|||
struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, int flags)
|
||||
{
|
||||
return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags);
|
||||
return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags, NULL);
|
||||
}
|
||||
|
||||
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
|
||||
|
|
|
@ -132,6 +132,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
|
|||
struct sg_table *sg);
|
||||
int radeon_gem_prime_pin(struct drm_gem_object *obj);
|
||||
void radeon_gem_prime_unpin(struct drm_gem_object *obj);
|
||||
struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *);
|
||||
void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
|
||||
void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
|
||||
extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
|
@ -566,6 +567,7 @@ static struct drm_driver kms_driver = {
|
|||
.gem_prime_import = drm_gem_prime_import,
|
||||
.gem_prime_pin = radeon_gem_prime_pin,
|
||||
.gem_prime_unpin = radeon_gem_prime_unpin,
|
||||
.gem_prime_res_obj = radeon_gem_prime_res_obj,
|
||||
.gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
|
||||
.gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
|
||||
.gem_prime_vmap = radeon_gem_prime_vmap,
|
||||
|
|
|
@ -103,3 +103,11 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj)
|
|||
radeon_bo_unpin(bo);
|
||||
radeon_bo_unreserve(bo);
|
||||
}
|
||||
|
||||
|
||||
struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj)
|
||||
{
|
||||
struct radeon_bo *bo = gem_to_radeon_bo(obj);
|
||||
|
||||
return bo->tbo.resv;
|
||||
}
|
||||
|
|
|
@ -420,7 +420,7 @@ struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
|
|||
int flags)
|
||||
{
|
||||
return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
|
||||
flags);
|
||||
flags, NULL);
|
||||
}
|
||||
|
||||
struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
|
||||
|
|
|
@ -695,7 +695,7 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
|
|||
}
|
||||
|
||||
dma_buf = dma_buf_export(prime, &tdev->ops,
|
||||
prime->size, flags);
|
||||
prime->size, flags, NULL);
|
||||
if (IS_ERR(dma_buf)) {
|
||||
ret = PTR_ERR(dma_buf);
|
||||
ttm_mem_global_free(tdev->mem_glob,
|
||||
|
|
|
@ -404,7 +404,7 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
|
|||
if (WARN_ON(!buf->sgt_base))
|
||||
return NULL;
|
||||
|
||||
dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags);
|
||||
dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags, NULL);
|
||||
if (IS_ERR(dbuf))
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -1120,7 +1120,8 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client,
|
|||
ion_buffer_get(buffer);
|
||||
mutex_unlock(&client->lock);
|
||||
|
||||
dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
|
||||
dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR,
|
||||
NULL);
|
||||
if (IS_ERR(dmabuf)) {
|
||||
ion_buffer_put(buffer);
|
||||
return dmabuf;
|
||||
|
|
|
@ -83,6 +83,7 @@ struct drm_device;
|
|||
|
||||
struct device_node;
|
||||
struct videomode;
|
||||
struct reservation_object;
|
||||
|
||||
#include <drm/drm_os_linux.h>
|
||||
#include <drm/drm_hashtab.h>
|
||||
|
@ -923,6 +924,8 @@ struct drm_driver {
|
|||
/* low-level interface used by drm_gem_prime_{import,export} */
|
||||
int (*gem_prime_pin)(struct drm_gem_object *obj);
|
||||
void (*gem_prime_unpin)(struct drm_gem_object *obj);
|
||||
struct reservation_object * (*gem_prime_res_obj)(
|
||||
struct drm_gem_object *obj);
|
||||
struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
|
||||
struct drm_gem_object *(*gem_prime_import_sg_table)(
|
||||
struct drm_device *dev, size_t size,
|
||||
|
|
|
@ -115,6 +115,7 @@ struct dma_buf_ops {
|
|||
* @exp_name: name of the exporter; useful for debugging.
|
||||
* @list_node: node for dma_buf accounting and debugging.
|
||||
* @priv: exporter specific private data for this buffer object.
|
||||
* @resv: reservation object linked to this dma-buf
|
||||
*/
|
||||
struct dma_buf {
|
||||
size_t size;
|
||||
|
@ -128,6 +129,7 @@ struct dma_buf {
|
|||
const char *exp_name;
|
||||
struct list_head list_node;
|
||||
void *priv;
|
||||
struct reservation_object *resv;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -168,10 +170,11 @@ void dma_buf_detach(struct dma_buf *dmabuf,
|
|||
struct dma_buf_attachment *dmabuf_attach);
|
||||
|
||||
struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
|
||||
size_t size, int flags, const char *);
|
||||
size_t size, int flags, const char *,
|
||||
struct reservation_object *);
|
||||
|
||||
#define dma_buf_export(priv, ops, size, flags) \
|
||||
dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME)
|
||||
#define dma_buf_export(priv, ops, size, flags, resv) \
|
||||
dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME, resv)
|
||||
|
||||
int dma_buf_fd(struct dma_buf *dmabuf, int flags);
|
||||
struct dma_buf *dma_buf_get(int fd);
|
||||
|
|
Loading…
Reference in a new issue