drm: rename dev->count_lock to dev->buf_lock
Since really that's all it protects - legacy horror stories in drm_bufs.c. Since I don't want to waste any more time on this I didn't bother to actually look at what it protects in there, but it's at least contained now. v2: Move the spurious hunk to the right patch (Thierry). Cc: Thierry Reding <thierry.reding@gmail.com> Reviewed-by: Thierry Reding <treding@nvidia.com> Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
fc8fd40eb2
commit
2177a2182f
3 changed files with 18 additions and 18 deletions
|
@ -656,13 +656,13 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
|
||||||
DRM_DEBUG("zone invalid\n");
|
DRM_DEBUG("zone invalid\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
spin_lock(&dev->count_lock);
|
spin_lock(&dev->buf_lock);
|
||||||
if (dev->buf_use) {
|
if (dev->buf_use) {
|
||||||
spin_unlock(&dev->count_lock);
|
spin_unlock(&dev->buf_lock);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
atomic_inc(&dev->buf_alloc);
|
atomic_inc(&dev->buf_alloc);
|
||||||
spin_unlock(&dev->count_lock);
|
spin_unlock(&dev->buf_lock);
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
entry = &dma->bufs[order];
|
entry = &dma->bufs[order];
|
||||||
|
@ -805,13 +805,13 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
|
||||||
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
|
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
|
||||||
total = PAGE_SIZE << page_order;
|
total = PAGE_SIZE << page_order;
|
||||||
|
|
||||||
spin_lock(&dev->count_lock);
|
spin_lock(&dev->buf_lock);
|
||||||
if (dev->buf_use) {
|
if (dev->buf_use) {
|
||||||
spin_unlock(&dev->count_lock);
|
spin_unlock(&dev->buf_lock);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
atomic_inc(&dev->buf_alloc);
|
atomic_inc(&dev->buf_alloc);
|
||||||
spin_unlock(&dev->count_lock);
|
spin_unlock(&dev->buf_lock);
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
entry = &dma->bufs[order];
|
entry = &dma->bufs[order];
|
||||||
|
@ -1015,13 +1015,13 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
|
||||||
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
|
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spin_lock(&dev->count_lock);
|
spin_lock(&dev->buf_lock);
|
||||||
if (dev->buf_use) {
|
if (dev->buf_use) {
|
||||||
spin_unlock(&dev->count_lock);
|
spin_unlock(&dev->buf_lock);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
atomic_inc(&dev->buf_alloc);
|
atomic_inc(&dev->buf_alloc);
|
||||||
spin_unlock(&dev->count_lock);
|
spin_unlock(&dev->buf_lock);
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
entry = &dma->bufs[order];
|
entry = &dma->bufs[order];
|
||||||
|
@ -1175,7 +1175,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
|
||||||
* \param arg pointer to a drm_buf_info structure.
|
* \param arg pointer to a drm_buf_info structure.
|
||||||
* \return zero on success or a negative number on failure.
|
* \return zero on success or a negative number on failure.
|
||||||
*
|
*
|
||||||
* Increments drm_device::buf_use while holding the drm_device::count_lock
|
* Increments drm_device::buf_use while holding the drm_device::buf_lock
|
||||||
* lock, preventing of allocating more buffers after this call. Information
|
* lock, preventing of allocating more buffers after this call. Information
|
||||||
* about each requested buffer is then copied into user space.
|
* about each requested buffer is then copied into user space.
|
||||||
*/
|
*/
|
||||||
|
@ -1196,13 +1196,13 @@ int drm_infobufs(struct drm_device *dev, void *data,
|
||||||
if (!dma)
|
if (!dma)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spin_lock(&dev->count_lock);
|
spin_lock(&dev->buf_lock);
|
||||||
if (atomic_read(&dev->buf_alloc)) {
|
if (atomic_read(&dev->buf_alloc)) {
|
||||||
spin_unlock(&dev->count_lock);
|
spin_unlock(&dev->buf_lock);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
++dev->buf_use; /* Can't allocate more after this call */
|
++dev->buf_use; /* Can't allocate more after this call */
|
||||||
spin_unlock(&dev->count_lock);
|
spin_unlock(&dev->buf_lock);
|
||||||
|
|
||||||
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
|
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
|
||||||
if (dma->bufs[i].buf_count)
|
if (dma->bufs[i].buf_count)
|
||||||
|
@ -1381,13 +1381,13 @@ int drm_mapbufs(struct drm_device *dev, void *data,
|
||||||
if (!dma)
|
if (!dma)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spin_lock(&dev->count_lock);
|
spin_lock(&dev->buf_lock);
|
||||||
if (atomic_read(&dev->buf_alloc)) {
|
if (atomic_read(&dev->buf_alloc)) {
|
||||||
spin_unlock(&dev->count_lock);
|
spin_unlock(&dev->buf_lock);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
dev->buf_use++; /* Can't allocate more after this call */
|
dev->buf_use++; /* Can't allocate more after this call */
|
||||||
spin_unlock(&dev->count_lock);
|
spin_unlock(&dev->buf_lock);
|
||||||
|
|
||||||
if (request->count >= dma->buf_count) {
|
if (request->count >= dma->buf_count) {
|
||||||
if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
|
if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
|
||||||
|
|
|
@ -569,7 +569,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
|
||||||
INIT_LIST_HEAD(&dev->maplist);
|
INIT_LIST_HEAD(&dev->maplist);
|
||||||
INIT_LIST_HEAD(&dev->vblank_event_list);
|
INIT_LIST_HEAD(&dev->vblank_event_list);
|
||||||
|
|
||||||
spin_lock_init(&dev->count_lock);
|
spin_lock_init(&dev->buf_lock);
|
||||||
spin_lock_init(&dev->event_lock);
|
spin_lock_init(&dev->event_lock);
|
||||||
mutex_init(&dev->struct_mutex);
|
mutex_init(&dev->struct_mutex);
|
||||||
mutex_init(&dev->ctxlist_mutex);
|
mutex_init(&dev->ctxlist_mutex);
|
||||||
|
|
|
@ -1069,7 +1069,6 @@ struct drm_device {
|
||||||
|
|
||||||
/** \name Locks */
|
/** \name Locks */
|
||||||
/*@{ */
|
/*@{ */
|
||||||
spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */
|
|
||||||
struct mutex struct_mutex; /**< For others */
|
struct mutex struct_mutex; /**< For others */
|
||||||
struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */
|
struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */
|
||||||
/*@} */
|
/*@} */
|
||||||
|
@ -1077,6 +1076,7 @@ struct drm_device {
|
||||||
/** \name Usage Counters */
|
/** \name Usage Counters */
|
||||||
/*@{ */
|
/*@{ */
|
||||||
int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
|
int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
|
||||||
|
spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
|
||||||
int buf_use; /**< Buffers in use -- cannot alloc */
|
int buf_use; /**< Buffers in use -- cannot alloc */
|
||||||
atomic_t buf_alloc; /**< Buffer allocation in progress */
|
atomic_t buf_alloc; /**< Buffer allocation in progress */
|
||||||
/*@} */
|
/*@} */
|
||||||
|
|
Loading…
Reference in a new issue