drm: mark drm_buf and drm_map as legacy
Move internal declarations to drm_legacy.h and add drm_legacy_*() prefix to all legacy functions. [airlied: add a bit of an explaination to drm_legacy.h] Signed-off-by: David Herrmann <dh.herrmann@gmail.com> Reviewed-by: Thierry Reding <treding@nvidia.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
parent
9f50bd8905
commit
9fc5cde7fb
12 changed files with 136 additions and 133 deletions
|
@ -1,18 +1,13 @@
|
|||
/**
|
||||
* \file drm_bufs.c
|
||||
* Generic buffer template
|
||||
*
|
||||
* \author Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* \author Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
/*
|
||||
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
|
||||
* Legacy: Generic DRM Buffer Management
|
||||
*
|
||||
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Author: Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Author: Gareth Hughes <gareth@valinux.com>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
|
@ -39,6 +34,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <asm/shmparam.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "drm_legacy.h"
|
||||
|
||||
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
|
||||
struct drm_local_map *map)
|
||||
|
@ -365,9 +361,9 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int drm_addmap(struct drm_device * dev, resource_size_t offset,
|
||||
unsigned int size, enum drm_map_type type,
|
||||
enum drm_map_flags flags, struct drm_local_map ** map_ptr)
|
||||
int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
|
||||
unsigned int size, enum drm_map_type type,
|
||||
enum drm_map_flags flags, struct drm_local_map **map_ptr)
|
||||
{
|
||||
struct drm_map_list *list;
|
||||
int rc;
|
||||
|
@ -377,8 +373,7 @@ int drm_addmap(struct drm_device * dev, resource_size_t offset,
|
|||
*map_ptr = list->map;
|
||||
return rc;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(drm_addmap);
|
||||
EXPORT_SYMBOL(drm_legacy_addmap);
|
||||
|
||||
/**
|
||||
* Ioctl to specify a range of memory that is available for mapping by a
|
||||
|
@ -391,8 +386,8 @@ EXPORT_SYMBOL(drm_addmap);
|
|||
* \return zero on success or a negative value on error.
|
||||
*
|
||||
*/
|
||||
int drm_addmap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_map *map = data;
|
||||
struct drm_map_list *maplist;
|
||||
|
@ -429,9 +424,9 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
|
|||
* its being used, and free any associate resource (such as MTRR's) if it's not
|
||||
* being on use.
|
||||
*
|
||||
* \sa drm_addmap
|
||||
* \sa drm_legacy_addmap
|
||||
*/
|
||||
int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
|
||||
int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
|
||||
{
|
||||
struct drm_map_list *r_list = NULL, *list_t;
|
||||
drm_dma_handle_t dmah;
|
||||
|
@ -485,19 +480,19 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_rmmap_locked);
|
||||
EXPORT_SYMBOL(drm_legacy_rmmap_locked);
|
||||
|
||||
int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
|
||||
int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = drm_rmmap_locked(dev, map);
|
||||
ret = drm_legacy_rmmap_locked(dev, map);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_rmmap);
|
||||
EXPORT_SYMBOL(drm_legacy_rmmap);
|
||||
|
||||
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
|
||||
* the last close of the device, and this is necessary for cleanup when things
|
||||
|
@ -514,8 +509,8 @@ EXPORT_SYMBOL(drm_rmmap);
|
|||
* \param arg pointer to a struct drm_map structure.
|
||||
* \return zero on success or a negative value on error.
|
||||
*/
|
||||
int drm_rmmap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_map *request = data;
|
||||
struct drm_local_map *map = NULL;
|
||||
|
@ -546,7 +541,7 @@ int drm_rmmap_ioctl(struct drm_device *dev, void *data,
|
|||
return 0;
|
||||
}
|
||||
|
||||
ret = drm_rmmap_locked(dev, map);
|
||||
ret = drm_legacy_rmmap_locked(dev, map);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
|
@ -599,7 +594,8 @@ static void drm_cleanup_buf_error(struct drm_device * dev,
|
|||
* reallocates the buffer list of the same size order to accommodate the new
|
||||
* buffers.
|
||||
*/
|
||||
int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
|
||||
int drm_legacy_addbufs_agp(struct drm_device *dev,
|
||||
struct drm_buf_desc *request)
|
||||
{
|
||||
struct drm_device_dma *dma = dev->dma;
|
||||
struct drm_buf_entry *entry;
|
||||
|
@ -759,10 +755,11 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
|
|||
atomic_dec(&dev->buf_alloc);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_addbufs_agp);
|
||||
EXPORT_SYMBOL(drm_legacy_addbufs_agp);
|
||||
#endif /* __OS_HAS_AGP */
|
||||
|
||||
int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
|
||||
int drm_legacy_addbufs_pci(struct drm_device *dev,
|
||||
struct drm_buf_desc *request)
|
||||
{
|
||||
struct drm_device_dma *dma = dev->dma;
|
||||
int count;
|
||||
|
@ -964,9 +961,10 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
|
|||
return 0;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(drm_addbufs_pci);
|
||||
EXPORT_SYMBOL(drm_legacy_addbufs_pci);
|
||||
|
||||
static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
|
||||
static int drm_legacy_addbufs_sg(struct drm_device *dev,
|
||||
struct drm_buf_desc *request)
|
||||
{
|
||||
struct drm_device_dma *dma = dev->dma;
|
||||
struct drm_buf_entry *entry;
|
||||
|
@ -1135,8 +1133,8 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
|
|||
* addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
|
||||
* PCI memory respectively.
|
||||
*/
|
||||
int drm_addbufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
int drm_legacy_addbufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_buf_desc *request = data;
|
||||
int ret;
|
||||
|
@ -1149,15 +1147,15 @@ int drm_addbufs(struct drm_device *dev, void *data,
|
|||
|
||||
#if __OS_HAS_AGP
|
||||
if (request->flags & _DRM_AGP_BUFFER)
|
||||
ret = drm_addbufs_agp(dev, request);
|
||||
ret = drm_legacy_addbufs_agp(dev, request);
|
||||
else
|
||||
#endif
|
||||
if (request->flags & _DRM_SG_BUFFER)
|
||||
ret = drm_addbufs_sg(dev, request);
|
||||
ret = drm_legacy_addbufs_sg(dev, request);
|
||||
else if (request->flags & _DRM_FB_BUFFER)
|
||||
ret = -EINVAL;
|
||||
else
|
||||
ret = drm_addbufs_pci(dev, request);
|
||||
ret = drm_legacy_addbufs_pci(dev, request);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1179,8 +1177,8 @@ int drm_addbufs(struct drm_device *dev, void *data,
|
|||
* lock, preventing of allocating more buffers after this call. Information
|
||||
* about each requested buffer is then copied into user space.
|
||||
*/
|
||||
int drm_infobufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
int drm_legacy_infobufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_device_dma *dma = dev->dma;
|
||||
struct drm_buf_info *request = data;
|
||||
|
@ -1260,8 +1258,8 @@ int drm_infobufs(struct drm_device *dev, void *data,
|
|||
*
|
||||
* \note This ioctl is deprecated and mostly never used.
|
||||
*/
|
||||
int drm_markbufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
int drm_legacy_markbufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_device_dma *dma = dev->dma;
|
||||
struct drm_buf_desc *request = data;
|
||||
|
@ -1307,8 +1305,8 @@ int drm_markbufs(struct drm_device *dev, void *data,
|
|||
* Calls free_buffer() for each used buffer.
|
||||
* This function is primarily used for debugging.
|
||||
*/
|
||||
int drm_freebufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
int drm_legacy_freebufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_device_dma *dma = dev->dma;
|
||||
struct drm_buf_free *request = data;
|
||||
|
@ -1360,8 +1358,8 @@ int drm_freebufs(struct drm_device *dev, void *data,
|
|||
* offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
|
||||
* drm_mmap_dma().
|
||||
*/
|
||||
int drm_mapbufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
int drm_legacy_mapbufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_device_dma *dma = dev->dma;
|
||||
int retcode = 0;
|
||||
|
@ -1448,7 +1446,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
|
|||
return retcode;
|
||||
}
|
||||
|
||||
int drm_dma_ioctl(struct drm_device *dev, void *data,
|
||||
int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
|
@ -1460,7 +1458,7 @@ int drm_dma_ioctl(struct drm_device *dev, void *data,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
struct drm_local_map *drm_getsarea(struct drm_device *dev)
|
||||
struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
|
||||
{
|
||||
struct drm_map_list *entry;
|
||||
|
||||
|
@ -1472,4 +1470,4 @@ struct drm_local_map *drm_getsarea(struct drm_device *dev)
|
|||
}
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_getsarea);
|
||||
EXPORT_SYMBOL(drm_legacy_getsarea);
|
||||
|
|
|
@ -143,7 +143,7 @@ static void drm_master_destroy(struct kref *kref)
|
|||
|
||||
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
|
||||
if (r_list->master == master) {
|
||||
drm_rmmap_locked(dev, r_list->map);
|
||||
drm_legacy_rmmap_locked(dev, r_list->map);
|
||||
r_list = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -779,7 +779,7 @@ void drm_dev_unregister(struct drm_device *dev)
|
|||
drm_vblank_cleanup(dev);
|
||||
|
||||
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
|
||||
drm_rmmap(dev, r_list->map);
|
||||
drm_legacy_rmmap(dev, r_list->map);
|
||||
|
||||
drm_minor_unregister(dev, DRM_MINOR_LEGACY);
|
||||
drm_minor_unregister(dev, DRM_MINOR_RENDER);
|
||||
|
|
|
@ -62,8 +62,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
|
|||
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
|
||||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
|
||||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
|
||||
|
@ -87,12 +87,12 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
|
|||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
|
||||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
|
||||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
|
||||
|
|
|
@ -23,6 +23,11 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file contains legacy interfaces that modern drm drivers
|
||||
* should no longer be using. They cannot be removed as legacy
|
||||
* drivers use them, and removing them are API breaks.
|
||||
*/
|
||||
struct drm_device;
|
||||
struct drm_file;
|
||||
|
||||
|
@ -48,4 +53,19 @@ int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
|
|||
int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
|
||||
int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
|
||||
|
||||
/*
|
||||
* Generic Buffer Management
|
||||
*/
|
||||
|
||||
#define DRM_MAP_HASH_OFFSET 0x10000000
|
||||
|
||||
int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
|
||||
int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
|
||||
int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
|
||||
int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f);
|
||||
int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f);
|
||||
int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f);
|
||||
int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f);
|
||||
int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f);
|
||||
|
||||
#endif /* __DRM_LEGACY_H__ */
|
||||
|
|
|
@ -196,7 +196,7 @@ static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
|
|||
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
|
||||
int ret;
|
||||
|
||||
master_priv->sarea = drm_getsarea(dev);
|
||||
master_priv->sarea = drm_legacy_getsarea(dev);
|
||||
if (master_priv->sarea) {
|
||||
master_priv->sarea_priv = (drm_i915_sarea_t *)
|
||||
((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
|
||||
|
|
|
@ -502,31 +502,31 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
|
|||
return err;
|
||||
}
|
||||
|
||||
/* Make drm_addbufs happy by not trying to create a mapping for less
|
||||
* than a page.
|
||||
/* Make drm_legacy_addbufs happy by not trying to create a mapping for
|
||||
* less than a page.
|
||||
*/
|
||||
if (warp_size < PAGE_SIZE)
|
||||
warp_size = PAGE_SIZE;
|
||||
|
||||
offset = 0;
|
||||
err = drm_addmap(dev, offset, warp_size,
|
||||
_DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
|
||||
err = drm_legacy_addmap(dev, offset, warp_size,
|
||||
_DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
|
||||
if (err) {
|
||||
DRM_ERROR("Unable to map WARP microcode: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
offset += warp_size;
|
||||
err = drm_addmap(dev, offset, dma_bs->primary_size,
|
||||
_DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
|
||||
err = drm_legacy_addmap(dev, offset, dma_bs->primary_size,
|
||||
_DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
|
||||
if (err) {
|
||||
DRM_ERROR("Unable to map primary DMA region: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
offset += dma_bs->primary_size;
|
||||
err = drm_addmap(dev, offset, secondary_size,
|
||||
_DRM_AGP, 0, &dev->agp_buffer_map);
|
||||
err = drm_legacy_addmap(dev, offset, secondary_size,
|
||||
_DRM_AGP, 0, &dev->agp_buffer_map);
|
||||
if (err) {
|
||||
DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
|
||||
return err;
|
||||
|
@ -538,7 +538,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
|
|||
req.flags = _DRM_AGP_BUFFER;
|
||||
req.agp_start = offset;
|
||||
|
||||
err = drm_addbufs_agp(dev, &req);
|
||||
err = drm_legacy_addbufs_agp(dev, &req);
|
||||
if (err) {
|
||||
DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
|
||||
return err;
|
||||
|
@ -559,8 +559,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
|
|||
}
|
||||
|
||||
offset += secondary_size;
|
||||
err = drm_addmap(dev, offset, agp_size - offset,
|
||||
_DRM_AGP, 0, &dev_priv->agp_textures);
|
||||
err = drm_legacy_addmap(dev, offset, agp_size - offset,
|
||||
_DRM_AGP, 0, &dev_priv->agp_textures);
|
||||
if (err) {
|
||||
DRM_ERROR("Unable to map AGP texture region %d\n", err);
|
||||
return err;
|
||||
|
@ -602,7 +602,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
|
|||
*
|
||||
* \todo
|
||||
* Determine whether the maximum address passed to drm_pci_alloc is correct.
|
||||
* The same goes for drm_addbufs_pci.
|
||||
* The same goes for drm_legacy_addbufs_pci.
|
||||
*
|
||||
* \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
|
||||
*/
|
||||
|
@ -622,15 +622,15 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* Make drm_addbufs happy by not trying to create a mapping for less
|
||||
* than a page.
|
||||
/* Make drm_legacy_addbufs happy by not trying to create a mapping for
|
||||
* less than a page.
|
||||
*/
|
||||
if (warp_size < PAGE_SIZE)
|
||||
warp_size = PAGE_SIZE;
|
||||
|
||||
/* The proper alignment is 0x100 for this mapping */
|
||||
err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
|
||||
_DRM_READ_ONLY, &dev_priv->warp);
|
||||
err = drm_legacy_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
|
||||
_DRM_READ_ONLY, &dev_priv->warp);
|
||||
if (err != 0) {
|
||||
DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
|
||||
err);
|
||||
|
@ -645,8 +645,8 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
|
|||
for (primary_size = dma_bs->primary_size; primary_size != 0;
|
||||
primary_size >>= 1) {
|
||||
/* The proper alignment for this mapping is 0x04 */
|
||||
err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
|
||||
_DRM_READ_ONLY, &dev_priv->primary);
|
||||
err = drm_legacy_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
|
||||
_DRM_READ_ONLY, &dev_priv->primary);
|
||||
if (!err)
|
||||
break;
|
||||
}
|
||||
|
@ -669,7 +669,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
|
|||
req.count = bin_count;
|
||||
req.size = dma_bs->secondary_bin_size;
|
||||
|
||||
err = drm_addbufs_pci(dev, &req);
|
||||
err = drm_legacy_addbufs_pci(dev, &req);
|
||||
if (!err)
|
||||
break;
|
||||
}
|
||||
|
@ -708,15 +708,16 @@ static int mga_do_dma_bootstrap(struct drm_device *dev,
|
|||
/* The first steps are the same for both PCI and AGP based DMA. Map
|
||||
* the cards MMIO registers and map a status page.
|
||||
*/
|
||||
err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
|
||||
_DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
|
||||
err = drm_legacy_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
|
||||
_DRM_REGISTERS, _DRM_READ_ONLY,
|
||||
&dev_priv->mmio);
|
||||
if (err) {
|
||||
DRM_ERROR("Unable to map MMIO region: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
|
||||
_DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
|
||||
err = drm_legacy_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
|
||||
_DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
|
||||
&dev_priv->status);
|
||||
if (err) {
|
||||
DRM_ERROR("Unable to map status region: %d\n", err);
|
||||
|
@ -809,7 +810,7 @@ static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init)
|
|||
dev_priv->texture_offset = init->texture_offset[0];
|
||||
dev_priv->texture_size = init->texture_size[0];
|
||||
|
||||
dev_priv->sarea = drm_getsarea(dev);
|
||||
dev_priv->sarea = drm_legacy_getsarea(dev);
|
||||
if (!dev_priv->sarea) {
|
||||
DRM_ERROR("failed to find sarea!\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -452,7 +452,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
|
|||
dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
|
||||
(dev_priv->span_offset >> 5));
|
||||
|
||||
dev_priv->sarea = drm_getsarea(dev);
|
||||
dev_priv->sarea = drm_legacy_getsarea(dev);
|
||||
if (!dev_priv->sarea) {
|
||||
DRM_ERROR("could not find sarea!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
|
|
@ -2052,7 +2052,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
|
|||
dev_priv->buffers_offset = init->buffers_offset;
|
||||
dev_priv->gart_textures_offset = init->gart_textures_offset;
|
||||
|
||||
master_priv->sarea = drm_getsarea(dev);
|
||||
master_priv->sarea = drm_legacy_getsarea(dev);
|
||||
if (!master_priv->sarea) {
|
||||
DRM_ERROR("could not find sarea!\n");
|
||||
r600_do_cleanup_cp(dev);
|
||||
|
|
|
@ -1298,7 +1298,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
|
|||
dev_priv->buffers_offset = init->buffers_offset;
|
||||
dev_priv->gart_textures_offset = init->gart_textures_offset;
|
||||
|
||||
master_priv->sarea = drm_getsarea(dev);
|
||||
master_priv->sarea = drm_legacy_getsarea(dev);
|
||||
if (!master_priv->sarea) {
|
||||
DRM_ERROR("could not find sarea!\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
|
@ -2106,9 +2106,9 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
else
|
||||
dev_priv->flags |= RADEON_IS_PCI;
|
||||
|
||||
ret = drm_addmap(dev, pci_resource_start(dev->pdev, 2),
|
||||
pci_resource_len(dev->pdev, 2), _DRM_REGISTERS,
|
||||
_DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio);
|
||||
ret = drm_legacy_addmap(dev, pci_resource_start(dev->pdev, 2),
|
||||
pci_resource_len(dev->pdev, 2), _DRM_REGISTERS,
|
||||
_DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
|
@ -2135,8 +2135,8 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master)
|
|||
|
||||
/* prebuild the SAREA */
|
||||
sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
|
||||
ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK,
|
||||
&master_priv->sarea);
|
||||
ret = drm_legacy_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK,
|
||||
&master_priv->sarea);
|
||||
if (ret) {
|
||||
DRM_ERROR("SAREA setup failed\n");
|
||||
kfree(master_priv);
|
||||
|
@ -2162,7 +2162,7 @@ void radeon_master_destroy(struct drm_device *dev, struct drm_master *master)
|
|||
|
||||
master_priv->sarea_priv = NULL;
|
||||
if (master_priv->sarea)
|
||||
drm_rmmap_locked(dev, master_priv->sarea);
|
||||
drm_legacy_rmmap_locked(dev, master_priv->sarea);
|
||||
|
||||
kfree(master_priv);
|
||||
|
||||
|
@ -2181,9 +2181,9 @@ int radeon_driver_firstopen(struct drm_device *dev)
|
|||
dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
|
||||
|
||||
dev_priv->fb_aper_offset = pci_resource_start(dev->pdev, 0);
|
||||
ret = drm_addmap(dev, dev_priv->fb_aper_offset,
|
||||
pci_resource_len(dev->pdev, 0), _DRM_FRAME_BUFFER,
|
||||
_DRM_WRITE_COMBINING, &map);
|
||||
ret = drm_legacy_addmap(dev, dev_priv->fb_aper_offset,
|
||||
pci_resource_len(dev->pdev, 0),
|
||||
_DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &map);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
|
@ -2196,7 +2196,7 @@ int radeon_driver_unload(struct drm_device *dev)
|
|||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
drm_rmmap(dev, dev_priv->mmio);
|
||||
drm_legacy_rmmap(dev, dev_priv->mmio);
|
||||
|
||||
kfree(dev_priv);
|
||||
|
||||
|
|
|
@ -556,7 +556,7 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||
/*
|
||||
* Initialize mappings. On Savage4 and SavageIX the alignment
|
||||
* and size of the aperture is not suitable for automatic MTRR setup
|
||||
* in drm_addmap. Therefore we add them manually before the maps are
|
||||
* in drm_legacy_addmap. Therefore we add them manually before the maps are
|
||||
* initialized, and tear them down on last close.
|
||||
*/
|
||||
int savage_driver_firstopen(struct drm_device *dev)
|
||||
|
@ -624,19 +624,20 @@ int savage_driver_firstopen(struct drm_device *dev)
|
|||
/* Automatic MTRR setup will do the right thing. */
|
||||
}
|
||||
|
||||
ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
|
||||
_DRM_READ_ONLY, &dev_priv->mmio);
|
||||
ret = drm_legacy_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE,
|
||||
_DRM_REGISTERS, _DRM_READ_ONLY,
|
||||
&dev_priv->mmio);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
|
||||
_DRM_WRITE_COMBINING, &dev_priv->fb);
|
||||
ret = drm_legacy_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
|
||||
_DRM_WRITE_COMBINING, &dev_priv->fb);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
|
||||
_DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
|
||||
&dev_priv->aperture);
|
||||
ret = drm_legacy_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
|
||||
_DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
|
||||
&dev_priv->aperture);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -698,7 +699,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
|
|||
dev_priv->texture_offset = init->texture_offset;
|
||||
dev_priv->texture_size = init->texture_size;
|
||||
|
||||
dev_priv->sarea = drm_getsarea(dev);
|
||||
dev_priv->sarea = drm_legacy_getsarea(dev);
|
||||
if (!dev_priv->sarea) {
|
||||
DRM_ERROR("could not find sarea!\n");
|
||||
savage_do_cleanup_bci(dev);
|
||||
|
|
|
@ -31,7 +31,7 @@ static int via_do_init_map(struct drm_device *dev, drm_via_init_t *init)
|
|||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
dev_priv->sarea = drm_getsarea(dev);
|
||||
dev_priv->sarea = drm_legacy_getsarea(dev);
|
||||
if (!dev_priv->sarea) {
|
||||
DRM_ERROR("could not find sarea!\n");
|
||||
dev->dev_private = (void *)dev_priv;
|
||||
|
|
|
@ -153,8 +153,6 @@ int drm_err(const char *func, const char *format, ...);
|
|||
|
||||
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
|
||||
|
||||
#define DRM_MAP_HASH_OFFSET 0x10000000
|
||||
|
||||
/*@}*/
|
||||
|
||||
/***********************************************************************/
|
||||
|
@ -1243,31 +1241,6 @@ extern void drm_idlelock_release(struct drm_lock_data *lock_data);
|
|||
|
||||
extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
|
||||
|
||||
/* Buffer management support (drm_bufs.h) */
|
||||
extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
|
||||
extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
|
||||
extern int drm_addmap(struct drm_device *dev, resource_size_t offset,
|
||||
unsigned int size, enum drm_map_type type,
|
||||
enum drm_map_flags flags, struct drm_local_map **map_ptr);
|
||||
extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_rmmap(struct drm_device *dev, struct drm_local_map *map);
|
||||
extern int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map);
|
||||
extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_addbufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_infobufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_markbufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_freebufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_mapbufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_dma_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
/* DMA support (drm_dma.h) */
|
||||
extern int drm_legacy_dma_setup(struct drm_device *dev);
|
||||
extern void drm_legacy_dma_takedown(struct drm_device *dev);
|
||||
|
@ -1354,8 +1327,6 @@ extern unsigned int drm_timestamp_monotonic;
|
|||
|
||||
extern struct class *drm_class;
|
||||
|
||||
extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
|
||||
|
||||
/* Debugfs support */
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
|
||||
|
@ -1466,6 +1437,18 @@ extern int drm_pci_set_unique(struct drm_device *dev,
|
|||
struct drm_master *master,
|
||||
struct drm_unique *u);
|
||||
|
||||
/* Legacy Support */
|
||||
|
||||
int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
|
||||
unsigned int size, enum drm_map_type type,
|
||||
enum drm_map_flags flags, struct drm_local_map **map_p);
|
||||
int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
|
||||
int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
|
||||
struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
|
||||
|
||||
int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
|
||||
int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
|
||||
|
||||
/* sysfs support (drm_sysfs.c) */
|
||||
struct drm_sysfs_class;
|
||||
extern struct class *drm_sysfs_create(struct module *owner, char *name);
|
||||
|
|
Loading…
Reference in a new issue