drm: major update from CVS for radeon and core

This patch pull in a lot of changes from CVS to the main core DRM,
and updates the radeon driver to 1.21.0 that supports r300 texrect
and radeon card type ioctl.

Signed-off-by: Dave Airlie <airlied@linux.ie>
This commit is contained in:
Dave Airlie 2006-01-02 21:32:48 +11:00 committed by Dave Airlie
parent b0cae664eb
commit d985c10881
12 changed files with 366 additions and 381 deletions

View file

@ -574,12 +574,11 @@ struct drm_driver {
void (*irq_postinstall) (struct drm_device * dev);
void (*irq_uninstall) (struct drm_device * dev);
void (*reclaim_buffers) (struct drm_device * dev, struct file * filp);
void (*reclaim_buffers_locked) (struct drm_device *drv,
void (*reclaim_buffers_locked) (struct drm_device *dev,
struct file *filp);
unsigned long (*get_map_ofs) (drm_map_t * map);
unsigned long (*get_reg_ofs) (struct drm_device * dev);
void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
int (*version) (drm_version_t * version);
int major;
int minor;
@ -774,10 +773,6 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
/** \name Internal function definitions */
/*@{*/
/* Misc. support (drm_init.h) */
extern int drm_flags;
extern void drm_parse_options(char *s);
/* Driver support (drm_drv.h) */
extern int drm_init(struct drm_driver *driver);
extern void drm_exit(struct drm_driver *driver);
@ -831,6 +826,8 @@ extern int drm_getstats(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_setversion(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_noop(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* Context IOCTL support (drm_context.h) */
extern int drm_resctx(struct inode *inode, struct file *filp,
@ -867,10 +864,6 @@ extern int drm_rmdraw(struct inode *inode, struct file *filp,
extern int drm_getmagic(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_authmagic(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* Placeholder for ioctls past */
extern int drm_noop(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* Locking IOCTL support (drm_lock.h) */
@ -885,6 +878,7 @@ extern int drm_lock_free(drm_device_t * dev,
/* Buffer management support (drm_bufs.h) */
extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request);
extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request);
extern int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request);
extern int drm_addmap(drm_device_t * dev, unsigned int offset,
unsigned int size, drm_map_type_t type,
drm_map_flags_t flags, drm_local_map_t ** map_ptr);
@ -920,8 +914,8 @@ extern void drm_core_reclaim_buffers(drm_device_t * dev, struct file *filp);
/* IRQ support (drm_irq.h) */
extern int drm_control(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_irq_uninstall(drm_device_t * dev);
extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
extern int drm_irq_uninstall(drm_device_t * dev);
extern void drm_driver_irq_preinstall(drm_device_t * dev);
extern void drm_driver_irq_postinstall(drm_device_t * dev);
extern void drm_driver_irq_uninstall(drm_device_t * dev);

View file

@ -1,5 +1,5 @@
/**
* \file drm_agpsupport.h
* \file drm_agpsupport.c
* DRM support for AGP/GART backend
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
@ -91,7 +91,7 @@ int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
/**
* Acquire the AGP device.
*
* \param dev DRM device that is to acquire AGP
* \param dev DRM device that is to acquire AGP.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device hasn't been acquired before and calls
@ -134,7 +134,7 @@ int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
/**
* Release the AGP device.
*
* \param dev DRM device that is to release AGP
* \param dev DRM device that is to release AGP.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been acquired and calls \c agp_backend_release.
@ -147,7 +147,6 @@ int drm_agp_release(drm_device_t * dev)
dev->agp->acquired = 0;
return 0;
}
EXPORT_SYMBOL(drm_agp_release);
int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
@ -447,6 +446,9 @@ int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
*
* \return pointer to a drm_agp_head structure.
*
* Gets the drm_agp_t structure which is made available by the agpgart module
* via the inter_module_* functions. Creates and initializes a drm_agp_head
* structure.
*/
drm_agp_head_t *drm_agp_init(drm_device_t * dev)
{

View file

@ -40,7 +40,6 @@ unsigned long drm_get_resource_start(drm_device_t * dev, unsigned int resource)
{
return pci_resource_start(dev->pdev, resource);
}
EXPORT_SYMBOL(drm_get_resource_start);
unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
@ -301,6 +300,9 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
return -EFAULT;
}
if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
return -EPERM;
err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
&maplist);
@ -384,7 +386,6 @@ int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map)
return 0;
}
EXPORT_SYMBOL(drm_rmmap_locked);
int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
@ -397,7 +398,6 @@ int drm_rmmap(drm_device_t * dev, drm_local_map_t * map)
return ret;
}
EXPORT_SYMBOL(drm_rmmap);
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
@ -548,7 +548,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %lu\n", agp_offset);
DRM_DEBUG("agp_offset: %lx\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
@ -649,6 +649,8 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
}
dma->buf_count += entry->buf_count;
dma->seg_count += entry->seg_count;
dma->page_count += byte_count >> PAGE_SHIFT;
dma->byte_count += byte_count;
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
@ -664,7 +666,6 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
atomic_dec(&dev->buf_alloc);
return 0;
}
EXPORT_SYMBOL(drm_addbufs_agp);
#endif /* __OS_HAS_AGP */
@ -689,9 +690,13 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
return -EINVAL;
if (!dma)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
count = request->count;
order = drm_order(request->size);
size = 1 << order;
@ -882,7 +887,6 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
return 0;
}
EXPORT_SYMBOL(drm_addbufs_pci);
static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
@ -908,6 +912,9 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
if (!dma)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
count = request->count;
order = drm_order(request->size);
size = 1 << order;
@ -1026,6 +1033,8 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
}
dma->buf_count += entry->buf_count;
dma->seg_count += entry->seg_count;
dma->page_count += byte_count >> PAGE_SHIFT;
dma->byte_count += byte_count;
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
@ -1042,7 +1051,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
return 0;
}
static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_entry_t *entry;
@ -1065,6 +1074,9 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
if (!dma)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
count = request->count;
order = drm_order(request->size);
size = 1 << order;
@ -1181,6 +1193,8 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
}
dma->buf_count += entry->buf_count;
dma->seg_count += entry->seg_count;
dma->page_count += byte_count >> PAGE_SHIFT;
dma->byte_count += byte_count;
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
@ -1196,6 +1210,8 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
atomic_dec(&dev->buf_alloc);
return 0;
}
EXPORT_SYMBOL(drm_addbufs_fb);
/**
* Add buffers for DMA transfers (ioctl).
@ -1577,5 +1593,6 @@ int drm_order(unsigned long size)
return order;
}
EXPORT_SYMBOL(drm_order);

View file

@ -35,6 +35,7 @@
*/
#include "drmP.h"
#include "drm_sarea.h"
#include <linux/poll.h>
static int drm_open_helper(struct inode *inode, struct file *filp,
@ -42,6 +43,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
static int drm_setup(drm_device_t * dev)
{
drm_local_map_t *map;
int i;
int ret;
@ -51,6 +53,11 @@ static int drm_setup(drm_device_t * dev)
return ret;
}
/* prebuild the SAREA */
i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
if (i != 0)
return i;
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
@ -152,9 +159,167 @@ int drm_open(struct inode *inode, struct file *filp)
return retcode;
}
EXPORT_SYMBOL(drm_open);
/**
* File \c open operation.
*
* \param inode device inode.
* \param filp file pointer.
*
* Puts the dev->fops corresponding to the device minor number into
* \p filp, call the \c open method, and restore the file operations.
*/
int drm_stub_open(struct inode *inode, struct file *filp)
{
drm_device_t *dev = NULL;
int minor = iminor(inode);
int err = -ENODEV;
struct file_operations *old_fops;
DRM_DEBUG("\n");
if (!((minor >= 0) && (minor < drm_cards_limit)))
return -ENODEV;
if (!drm_heads[minor])
return -ENODEV;
if (!(dev = drm_heads[minor]->dev))
return -ENODEV;
old_fops = filp->f_op;
filp->f_op = fops_get(&dev->driver->fops);
if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
fops_put(filp->f_op);
filp->f_op = fops_get(old_fops);
}
fops_put(old_fops);
return err;
}
/**
* Check whether DRI will run on this CPU.
*
* \return non-zero if the DRI will run on this CPU, or zero otherwise.
*/
static int drm_cpu_valid(void)
{
#if defined(__i386__)
if (boot_cpu_data.x86 == 3)
return 0; /* No cmpxchg on a 386 */
#endif
#if defined(__sparc__) && !defined(__sparc_v9__)
return 0; /* No cmpxchg before v9 sparc. */
#endif
return 1;
}
/**
* Called whenever a process opens /dev/drm.
*
* \param inode device inode.
* \param filp file pointer.
* \param dev device.
* \return zero on success or a negative number on failure.
*
* Creates and initializes a drm_file structure for the file private data in \p
* filp and add it into the double linked list in \p dev.
*/
static int drm_open_helper(struct inode *inode, struct file *filp,
drm_device_t * dev)
{
int minor = iminor(inode);
drm_file_t *priv;
int ret;
if (filp->f_flags & O_EXCL)
return -EBUSY; /* No exclusive opens */
if (!drm_cpu_valid())
return -EINVAL;
DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
if (!priv)
return -ENOMEM;
memset(priv, 0, sizeof(*priv));
filp->private_data = priv;
priv->uid = current->euid;
priv->pid = current->pid;
priv->minor = minor;
priv->head = drm_heads[minor];
priv->ioctl_count = 0;
/* for compatibility root is always authenticated */
priv->authenticated = capable(CAP_SYS_ADMIN);
priv->lock_count = 0;
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
goto out_free;
}
down(&dev->struct_sem);
if (!dev->file_last) {
priv->next = NULL;
priv->prev = NULL;
dev->file_first = priv;
dev->file_last = priv;
/* first opener automatically becomes master */
priv->master = 1;
} else {
priv->next = NULL;
priv->prev = dev->file_last;
dev->file_last->next = priv;
dev->file_last = priv;
}
up(&dev->struct_sem);
#ifdef __alpha__
/*
* Default the hose
*/
if (!dev->hose) {
struct pci_dev *pci_dev;
pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
if (pci_dev) {
dev->hose = pci_dev->sysdata;
pci_dev_put(pci_dev);
}
if (!dev->hose) {
struct pci_bus *b = pci_bus_b(pci_root_buses.next);
if (b)
dev->hose = b->sysdata;
}
}
#endif
return 0;
out_free:
drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
filp->private_data = NULL;
return ret;
}
/** No-op. */
int drm_fasync(int fd, struct file *filp, int on)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
int retcode;
DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
(long)old_encode_dev(priv->head->device));
retcode = fasync_helper(fd, filp, on, &dev->buf_async);
if (retcode < 0)
return retcode;
return 0;
}
EXPORT_SYMBOL(drm_fasync);
/**
* Release file.
*
@ -291,7 +456,6 @@ int drm_release(struct inode *inode, struct file *filp)
if (dev->driver->postclose)
dev->driver->postclose(dev, priv);
drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
/* ========================================================
@ -318,132 +482,11 @@ int drm_release(struct inode *inode, struct file *filp)
return retcode;
}
EXPORT_SYMBOL(drm_release);
/**
* Check whether DRI will run on this CPU.
*
* \return non-zero if the DRI will run on this CPU, or zero otherwise.
*/
static int drm_cpu_valid(void)
{
#if defined(__i386__)
if (boot_cpu_data.x86 == 3)
return 0; /* No cmpxchg on a 386 */
#endif
#if defined(__sparc__) && !defined(__sparc_v9__)
return 0; /* No cmpxchg before v9 sparc. */
#endif
return 1;
}
/**
* Called whenever a process opens /dev/drm.
*
* \param inode device inode.
* \param filp file pointer.
* \param dev device.
* \return zero on success or a negative number on failure.
*
* Creates and initializes a drm_file structure for the file private data in \p
* filp and add it into the double linked list in \p dev.
*/
static int drm_open_helper(struct inode *inode, struct file *filp,
drm_device_t * dev)
{
int minor = iminor(inode);
drm_file_t *priv;
int ret;
if (filp->f_flags & O_EXCL)
return -EBUSY; /* No exclusive opens */
if (!drm_cpu_valid())
return -EINVAL;
DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
if (!priv)
return -ENOMEM;
memset(priv, 0, sizeof(*priv));
filp->private_data = priv;
priv->uid = current->euid;
priv->pid = current->pid;
priv->minor = minor;
priv->head = drm_heads[minor];
priv->ioctl_count = 0;
priv->authenticated = capable(CAP_SYS_ADMIN);
priv->lock_count = 0;
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
goto out_free;
}
down(&dev->struct_sem);
if (!dev->file_last) {
priv->next = NULL;
priv->prev = NULL;
dev->file_first = priv;
dev->file_last = priv;
} else {
priv->next = NULL;
priv->prev = dev->file_last;
dev->file_last->next = priv;
dev->file_last = priv;
}
up(&dev->struct_sem);
#ifdef __alpha__
/*
* Default the hose
*/
if (!dev->hose) {
struct pci_dev *pci_dev;
pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
if (pci_dev) {
dev->hose = pci_dev->sysdata;
pci_dev_put(pci_dev);
}
if (!dev->hose) {
struct pci_bus *b = pci_bus_b(pci_root_buses.next);
if (b)
dev->hose = b->sysdata;
}
}
#endif
return 0;
out_free:
drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
filp->private_data = NULL;
return ret;
}
/** No-op. */
int drm_fasync(int fd, struct file *filp, int on)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
int retcode;
DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
(long)old_encode_dev(priv->head->device));
retcode = fasync_helper(fd, filp, on, &dev->buf_async);
if (retcode < 0)
return retcode;
return 0;
}
EXPORT_SYMBOL(drm_fasync);
/** No-op. */
unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
{
return 0;
}
EXPORT_SYMBOL(drm_poll);

View file

@ -104,6 +104,9 @@ int drm_lock(struct inode *inode, struct file *filp,
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry);
DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
if (ret) return ret;
sigemptyset(&dev->sigmask);
sigaddset(&dev->sigmask, SIGSTOP);
sigaddset(&dev->sigmask, SIGTSTP);
@ -116,21 +119,20 @@ int drm_lock(struct inode *inode, struct file *filp,
if (dev->driver->dma_ready && (lock.flags & _DRM_LOCK_READY))
dev->driver->dma_ready(dev);
if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT))
return dev->driver->dma_quiescent(dev);
/* dev->driver->kernel_context_switch isn't used by any of the x86
* drivers but is used by the Sparc driver.
*/
if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) {
if (dev->driver->dma_quiescent(dev)) {
DRM_DEBUG( "%d waiting for DMA quiescent\n", lock.context);
return DRM_ERR(EBUSY);
}
}
if (dev->driver->kernel_context_switch &&
dev->last_context != lock.context) {
dev->driver->kernel_context_switch(dev, dev->last_context,
lock.context);
}
DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
return ret;
return 0;
}
/**

View file

@ -128,43 +128,6 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
return retcode;
}
/**
* File \c open operation.
*
* \param inode device inode.
* \param filp file pointer.
*
* Puts the dev->fops corresponding to the device minor number into
* \p filp, call the \c open method, and restore the file operations.
*/
int drm_stub_open(struct inode *inode, struct file *filp)
{
drm_device_t *dev = NULL;
int minor = iminor(inode);
int err = -ENODEV;
struct file_operations *old_fops;
DRM_DEBUG("\n");
if (!((minor >= 0) && (minor < drm_cards_limit)))
return -ENODEV;
if (!drm_heads[minor])
return -ENODEV;
if (!(dev = drm_heads[minor]->dev))
return -ENODEV;
old_fops = filp->f_op;
filp->f_op = fops_get(&dev->driver->fops);
if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
fops_put(filp->f_op);
filp->f_op = fops_get(old_fops);
}
fops_put(old_fops);
return err;
}
/**
* Get a secondary minor number.

View file

@ -216,6 +216,7 @@ void r300_init_reg_flags(void)
ADD_RANGE(R300_TX_UNK1_0, 16);
ADD_RANGE(R300_TX_SIZE_0, 16);
ADD_RANGE(R300_TX_FORMAT_0, 16);
ADD_RANGE(R300_TX_PITCH_0, 16);
/* Texture offset is dangerous and needs more checking */
ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
ADD_RANGE(R300_TX_UNK4_0, 16);
@ -659,7 +660,8 @@ static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
*/
int r300_do_cp_cmdbuf(drm_device_t *dev,
DRMFILE filp,
drm_file_t * filp_priv, drm_radeon_kcmd_buffer_t * cmdbuf)
drm_file_t *filp_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;

View file

@ -797,6 +797,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_TX_FORMAT_YUV_MODE 0x00800000
#define R300_TX_PITCH_0 0x4500
#define R300_TX_OFFSET_0 0x4540
/* BEGIN: Guess from R200 */
# define R300_TXO_ENDIAN_NO_SWAP (0 << 0)

View file

@ -1125,7 +1125,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
| (dev_priv->fb_location >> 16));
#if __OS_HAS_AGP
if (!dev_priv->is_pci) {
if (dev_priv->flags & CHIP_IS_AGP) {
RADEON_WRITE(RADEON_MC_AGP_LOCATION,
(((dev_priv->gart_vm_start - 1 +
dev_priv->gart_size) & 0xffff0000) |
@ -1152,7 +1152,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
dev_priv->ring.tail = cur_read_ptr;
#if __OS_HAS_AGP
if (!dev_priv->is_pci) {
if (dev_priv->flags & CHIP_IS_AGP) {
/* set RADEON_AGP_BASE here instead of relying on X from user space */
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
@ -1278,13 +1278,15 @@ static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
/* Enable or disable PCI GART on the chip */
static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
{
u32 tmp = RADEON_READ(RADEON_AIC_CNTL);
u32 tmp;
if (dev_priv->flags & CHIP_IS_PCIE) {
radeon_set_pciegart(dev_priv, on);
return;
}
tmp = RADEON_READ(RADEON_AIC_CNTL);
if (on) {
RADEON_WRITE(RADEON_AIC_CNTL,
tmp | RADEON_PCIGART_TRANSLATE_EN);
@ -1311,14 +1313,18 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
{
drm_radeon_private_t *dev_priv = dev->dev_private;;
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
dev_priv->is_pci = init->is_pci;
if (init->is_pci && (dev_priv->flags & CHIP_IS_AGP))
{
DRM_DEBUG("Forcing AGP card to PCI mode\n");
dev_priv->flags &= ~CHIP_IS_AGP;
}
if (dev_priv->is_pci && !dev->sg) {
if ((!(dev_priv->flags & CHIP_IS_AGP)) && !dev->sg) {
DRM_ERROR("PCI GART memory not allocated!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
@ -1327,7 +1333,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
if (dev_priv->usec_timeout < 1 ||
dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
DRM_DEBUG("TIMEOUT problem!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
@ -1353,7 +1358,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
(init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
@ -1416,8 +1420,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
DRM_GETSAREA();
dev_priv->fb_offset = init->fb_offset;
dev_priv->mmio_offset = init->mmio_offset;
dev_priv->ring_offset = init->ring_offset;
dev_priv->ring_rptr_offset = init->ring_rptr_offset;
dev_priv->buffers_offset = init->buffers_offset;
@ -1425,29 +1427,19 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio) {
DRM_ERROR("could not find mmio region!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
if (!dev_priv->cp_ring) {
DRM_ERROR("could not find cp ring region!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
if (!dev_priv->ring_rptr) {
DRM_ERROR("could not find ring read pointer!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
@ -1455,7 +1447,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if (!dev->agp_buffer_map) {
DRM_ERROR("could not find dma buffer region!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
@ -1465,7 +1456,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
drm_core_findmap(dev, init->gart_textures_offset);
if (!dev_priv->gart_textures) {
DRM_ERROR("could not find GART texture region!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
@ -1476,7 +1466,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
init->sarea_priv_offset);
#if __OS_HAS_AGP
if (!dev_priv->is_pci) {
if (dev_priv->flags & CHIP_IS_AGP) {
drm_core_ioremap(dev_priv->cp_ring, dev);
drm_core_ioremap(dev_priv->ring_rptr, dev);
drm_core_ioremap(dev->agp_buffer_map, dev);
@ -1484,7 +1474,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
!dev_priv->ring_rptr->handle ||
!dev->agp_buffer_map->handle) {
DRM_ERROR("could not find ioremap agp regions!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
@ -1525,7 +1514,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
+ RADEON_READ(RADEON_CONFIG_APER_SIZE);
#if __OS_HAS_AGP
if (!dev_priv->is_pci)
if (dev_priv->flags & CHIP_IS_AGP)
dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
- dev->agp->base
+ dev_priv->gart_vm_start);
@ -1551,7 +1540,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
#if __OS_HAS_AGP
if (!dev_priv->is_pci) {
if (dev_priv->flags & CHIP_IS_AGP) {
/* Turn off PCI GART */
radeon_set_pcigart(dev_priv, 0);
} else
@ -1593,7 +1582,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
DRM_ERROR("failed to init PCI GART!\n");
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(ENOMEM);
}
@ -1607,8 +1595,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
dev_priv->last_buf = 0;
dev->dev_private = (void *)dev_priv;
radeon_do_engine_reset(dev);
return 0;
@ -1627,11 +1613,15 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
drm_irq_uninstall(dev);
#if __OS_HAS_AGP
if (!dev_priv->is_pci) {
if (dev_priv->cp_ring != NULL)
if (dev_priv->flags & CHIP_IS_AGP) {
if (dev_priv->cp_ring != NULL) {
drm_core_ioremapfree(dev_priv->cp_ring, dev);
if (dev_priv->ring_rptr != NULL)
dev_priv->cp_ring = NULL;
}
if (dev_priv->ring_rptr != NULL) {
drm_core_ioremapfree(dev_priv->ring_rptr, dev);
dev_priv->ring_rptr = NULL;
}
if (dev->agp_buffer_map != NULL) {
drm_core_ioremapfree(dev->agp_buffer_map, dev);
dev->agp_buffer_map = NULL;
@ -1639,16 +1629,20 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
} else
#endif
{
if (dev_priv->gart_info.bus_addr)
if (dev_priv->gart_info.bus_addr) {
/* Turn off PCI GART */
radeon_set_pcigart(dev_priv, 0);
if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
DRM_ERROR("failed to cleanup PCI GART!\n");
}
if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) {
if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
{
drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
dev_priv->gart_info.addr = 0;
}
}
/* only clear to the start of flags */
memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
@ -1674,7 +1668,7 @@ static int radeon_do_resume_cp(drm_device_t * dev)
DRM_DEBUG("Starting radeon_do_resume_cp()\n");
#if __OS_HAS_AGP
if (!dev_priv->is_pci) {
if (dev_priv->flags & CHIP_IS_AGP) {
/* Turn off PCI GART */
radeon_set_pcigart(dev_priv, 0);
} else
@ -2138,7 +2132,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->flags |= CHIP_IS_PCIE;
DRM_DEBUG("%s card detected\n",
((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : "PCI"));
((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : (((dev_priv->flags & CHIP_IS_PCIE) ? "PCIE" : "PCI"))));
return ret;
}
@ -2171,7 +2165,6 @@ int radeon_driver_unload(struct drm_device *dev)
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
dev->dev_private = NULL;

View file

@ -624,6 +624,11 @@ typedef struct drm_radeon_indirect {
int discard;
} drm_radeon_indirect_t;
/* enum for card type parameters */
#define RADEON_CARD_PCI 0
#define RADEON_CARD_AGP 1
#define RADEON_CARD_PCIE 2
/* 1.3: An ioctl to get parameters that aren't available to the 3d
* client any other way.
*/
@ -640,6 +645,7 @@ typedef struct drm_radeon_indirect {
#define RADEON_PARAM_SAREA_HANDLE 9
#define RADEON_PARAM_GART_TEX_HANDLE 10
#define RADEON_PARAM_SCRATCH_OFFSET 11
#define RADEON_PARAM_CARD_TYPE 12
typedef struct drm_radeon_getparam {
int param;

View file

@ -38,7 +38,7 @@
#define DRIVER_NAME "radeon"
#define DRIVER_DESC "ATI Radeon"
#define DRIVER_DATE "20050911"
#define DRIVER_DATE "20051229"
/* Interface history:
*
@ -73,7 +73,7 @@
* 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color
* and GL_EXT_blend_[func|equation]_separate on r200
* 1.12- Add R300 CP microcode support - this just loads the CP on r300
* (No 3D support yet - just microcode loading)
* (No 3D support yet - just microcode loading).
* 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters
* - Add hyperz support, add hyperz flags to clear ioctl.
* 1.14- Add support for color tiling
@ -88,14 +88,13 @@
* R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR
* (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6)
* 1.19- Add support for gart table in FB memory and PCIE r300
* 1.20- Add support for r300 texrect
* 1.21- Add support for card type getparam
*/
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 19
#define DRIVER_MINOR 21
#define DRIVER_PATCHLEVEL 0
#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
/*
* Radeon chip families
*/
@ -138,6 +137,9 @@ enum radeon_chip_flags {
CHIP_IS_PCIE = 0x00200000UL,
};
#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
typedef struct drm_radeon_freelist {
unsigned int age;
drm_buf_t *buf;
@ -214,8 +216,6 @@ typedef struct drm_radeon_private {
int microcode_version;
int is_pci;
struct {
u32 boxes;
int freelist_timeouts;
@ -247,8 +247,6 @@ typedef struct drm_radeon_private {
drm_radeon_depth_clear_t depth_clear;
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long ring_offset;
unsigned long ring_rptr_offset;
unsigned long buffers_offset;
@ -362,6 +360,8 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
*/
#define RADEON_AGP_COMMAND 0x0f60
#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */
# define RADEON_AGP_ENABLE (1<<8)
#define RADEON_AUX_SCISSOR_CNTL 0x26f0
# define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24)
# define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25)
@ -377,6 +377,7 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
# define RADEON_PLL_WR_EN (1 << 7)
#define RADEON_CLOCK_CNTL_INDEX 0x0008
#define RADEON_CONFIG_APER_SIZE 0x0108
#define RADEON_CONFIG_MEMSIZE 0x00f8
#define RADEON_CRTC_OFFSET 0x0224
#define RADEON_CRTC_OFFSET_CNTL 0x0228
# define RADEON_CRTC_TILE_EN (1 << 15)
@ -648,6 +649,8 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
#define RADEON_WAIT_UNTIL 0x1720
# define RADEON_WAIT_CRTC_PFLIP (1 << 0)
# define RADEON_WAIT_2D_IDLE (1 << 14)
# define RADEON_WAIT_3D_IDLE (1 << 15)
# define RADEON_WAIT_2D_IDLECLEAN (1 << 16)
# define RADEON_WAIT_3D_IDLECLEAN (1 << 17)
# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18)
@ -1102,7 +1105,6 @@ do { \
write = 0; \
_tab += _i; \
} \
\
while (_size > 0) { \
*(ring + write) = *_tab++; \
write++; \

View file

@ -1,5 +1,5 @@
/* radeon_state.c -- State support for Radeon -*- linux-c -*-
*
/* radeon_state.c -- State support for Radeon -*- linux-c -*- */
/*
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* All Rights Reserved.
*
@ -72,10 +72,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case RADEON_EMIT_PP_MISC:
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
&data[(RADEON_RB3D_DEPTHOFFSET
-
RADEON_PP_MISC) /
4])) {
&data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
DRM_ERROR("Invalid depth buffer offset\n");
return DRM_ERR(EINVAL);
}
@ -83,10 +80,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case RADEON_EMIT_PP_CNTL:
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
&data[(RADEON_RB3D_COLOROFFSET
-
RADEON_PP_CNTL) /
4])) {
&data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
DRM_ERROR("Invalid colour buffer offset\n");
return DRM_ERR(EINVAL);
}
@ -109,10 +103,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case RADEON_EMIT_PP_TXFILTER_1:
case RADEON_EMIT_PP_TXFILTER_2:
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
&data[(RADEON_PP_TXOFFSET_0
-
RADEON_PP_TXFILTER_0) /
4])) {
&data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
DRM_ERROR("Invalid R100 texture offset\n");
return DRM_ERR(EINVAL);
}
@ -126,8 +117,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case R200_EMIT_PP_CUBIC_OFFSETS_5:{
int i;
for (i = 0; i < 5; i++) {
if (radeon_check_and_fixup_offset
(dev_priv, filp_priv, &data[i])) {
if (radeon_check_and_fixup_offset(dev_priv,
filp_priv,
&data[i])) {
DRM_ERROR
("Invalid R200 cubic texture offset\n");
return DRM_ERR(EINVAL);
@ -240,7 +232,8 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
dev_priv,
drm_file_t *filp_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
drm_radeon_kcmd_buffer_t *
cmdbuf,
unsigned int *cmdsz)
{
u32 *cmd = (u32 *) cmdbuf->buf;
@ -555,7 +548,8 @@ static struct {
{R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
{R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
{R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
{R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
{R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
"R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
{R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
{R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
{R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
@ -985,7 +979,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
* rendering a quad into just those buffers. Thus, we have to
* make sure the 3D engine is configured correctly.
*/
if ((dev_priv->microcode_version == UCODE_R200) &&
else if ((dev_priv->microcode_version == UCODE_R200) &&
(flags & (RADEON_DEPTH | RADEON_STENCIL))) {
int tempPP_CNTL;
@ -1637,6 +1631,14 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
(u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
dwords = size / 4;
#define RADEON_COPY_MT(_buf, _data, _width) \
do { \
if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
return DRM_ERR(EFAULT); \
} \
} while(0)
if (microtile) {
/* texture micro tiling in use, minimum texture width is thus 16 bytes.
however, we cannot use blitter directly for texture width < 64 bytes,
@ -1648,46 +1650,19 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
from user space. */
if (tex->height == 1) {
if (tex_width >= 64 || tex_width <= 16) {
if (DRM_COPY_FROM_USER(buffer, data,
tex_width *
sizeof(u32))) {
DRM_ERROR
("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer, data,
tex_width * sizeof(u32));
} else if (tex_width == 32) {
if (DRM_COPY_FROM_USER
(buffer, data, 16)) {
DRM_ERROR
("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
if (DRM_COPY_FROM_USER
(buffer + 8, data + 16, 16)) {
DRM_ERROR
("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer, data, 16);
RADEON_COPY_MT(buffer + 8,
data + 16, 16);
}
} else if (tex_width >= 64 || tex_width == 16) {
if (DRM_COPY_FROM_USER(buffer, data,
dwords * sizeof(u32))) {
DRM_ERROR("EFAULT on data, %d dwords\n",
dwords);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer, data,
dwords * sizeof(u32));
} else if (tex_width < 16) {
for (i = 0; i < tex->height; i++) {
if (DRM_COPY_FROM_USER
(buffer, data, tex_width)) {
DRM_ERROR
("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer, data, tex_width);
buffer += 4;
data += tex_width;
}
@ -1695,37 +1670,13 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
/* TODO: make sure this works when not fitting in one buffer
(i.e. 32bytes x 2048...) */
for (i = 0; i < tex->height; i += 2) {
if (DRM_COPY_FROM_USER
(buffer, data, 16)) {
DRM_ERROR
("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer, data, 16);
data += 16;
if (DRM_COPY_FROM_USER
(buffer + 8, data, 16)) {
DRM_ERROR
("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer + 8, data, 16);
data += 16;
if (DRM_COPY_FROM_USER
(buffer + 4, data, 16)) {
DRM_ERROR
("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer + 4, data, 16);
data += 16;
if (DRM_COPY_FROM_USER
(buffer + 12, data, 16)) {
DRM_ERROR
("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer + 12, data, 16);
data += 16;
buffer += 16;
}
@ -1735,31 +1686,22 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
/* Texture image width is larger than the minimum, so we
* can upload it directly.
*/
if (DRM_COPY_FROM_USER(buffer, data,
dwords * sizeof(u32))) {
DRM_ERROR("EFAULT on data, %d dwords\n",
dwords);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer, data,
dwords * sizeof(u32));
} else {
/* Texture image width is less than the minimum, so we
* need to pad out each image scanline to the minimum
* width.
*/
for (i = 0; i < tex->height; i++) {
if (DRM_COPY_FROM_USER
(buffer, data, tex_width)) {
DRM_ERROR
("EFAULT on pad, %d bytes\n",
tex_width);
return DRM_ERR(EFAULT);
}
RADEON_COPY_MT(buffer, data, tex_width);
buffer += 8;
data += tex_width;
}
}
}
#undef RADEON_COPY_MT
buf->filp = filp;
buf->used = size;
offset = dev_priv->gart_buffers_offset + buf->offset;
@ -2158,6 +2100,11 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
LOCK_TEST_WITH_RETURN(dev, filp);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data,
@ -2818,7 +2765,8 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER);
if (kbuf == NULL)
return DRM_ERR(ENOMEM);
if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, cmdbuf.bufsz)) {
if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf,
cmdbuf.bufsz)) {
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
return DRM_ERR(EFAULT);
}
@ -2981,7 +2929,7 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
value = dev_priv->gart_vm_start;
break;
case RADEON_PARAM_REGISTER_HANDLE:
value = dev_priv->mmio_offset;
value = dev_priv->mmio->offset;
break;
case RADEON_PARAM_STATUS_HANDLE:
value = dev_priv->ring_rptr_offset;
@ -3004,6 +2952,15 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
case RADEON_PARAM_GART_TEX_HANDLE:
value = dev_priv->gart_textures_offset;
break;
case RADEON_PARAM_CARD_TYPE:
if (dev_priv->flags & CHIP_IS_PCIE)
value = RADEON_CARD_PCIE;
else if (dev_priv->flags & CHIP_IS_AGP)
value = RADEON_CARD_AGP;
else
value = RADEON_CARD_PCI;
break;
default:
return DRM_ERR(EINVAL);
}
@ -3066,6 +3023,7 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
/* When a client dies:
* - Check for and clean up flipped page state
* - Free any alloced GART memory.
* - Free any alloced radeon surfaces.
*
* DRM infrastructure takes care of reclaiming dma buffers.
*/
@ -3092,6 +3050,7 @@ int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv)
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_radeon_driver_file_fields *radeon_priv;
DRM_DEBUG("\n");
radeon_priv =
(struct drm_radeon_driver_file_fields *)
drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES);
@ -3100,6 +3059,7 @@ int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv)
return -ENOMEM;
filp_priv->driver_priv = radeon_priv;
if (dev_priv)
radeon_priv->radeon_fb_delta = dev_priv->fb_location;
else