Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "Mostly quiet now: i915: fixing userspace visiblie issues, all stable marked radeon: one more pll fix, two crashers, one suspend/resume regression" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: drm/radeon: Resume fbcon last drm/radeon: only allocate necessary size for vm bo list drm/radeon: don't allow RADEON_GEM_DOMAIN_CPU for command submission drm/radeon: avoid crash if VM command submission isn't available drm/radeon: lower the ref * post PLL maximum once more drm/i915: Prevent negative relocation deltas from wrapping drm/i915: Only copy back the modified fields to userspace from execbuffer drm/i915: Fix dynamic allocation of physical handles
This commit is contained in:
commit
80e0679469
12 changed files with 298 additions and 304 deletions
|
@ -1833,7 +1833,6 @@ int i915_driver_unload(struct drm_device *dev)
|
||||||
flush_workqueue(dev_priv->wq);
|
flush_workqueue(dev_priv->wq);
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
i915_gem_free_all_phys_object(dev);
|
|
||||||
i915_gem_cleanup_ringbuffer(dev);
|
i915_gem_cleanup_ringbuffer(dev);
|
||||||
i915_gem_context_fini(dev);
|
i915_gem_context_fini(dev);
|
||||||
WARN_ON(dev_priv->mm.aliasing_ppgtt);
|
WARN_ON(dev_priv->mm.aliasing_ppgtt);
|
||||||
|
|
|
@ -242,18 +242,6 @@ struct intel_ddi_plls {
|
||||||
#define WATCH_LISTS 0
|
#define WATCH_LISTS 0
|
||||||
#define WATCH_GTT 0
|
#define WATCH_GTT 0
|
||||||
|
|
||||||
#define I915_GEM_PHYS_CURSOR_0 1
|
|
||||||
#define I915_GEM_PHYS_CURSOR_1 2
|
|
||||||
#define I915_GEM_PHYS_OVERLAY_REGS 3
|
|
||||||
#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
|
|
||||||
|
|
||||||
struct drm_i915_gem_phys_object {
|
|
||||||
int id;
|
|
||||||
struct page **page_list;
|
|
||||||
drm_dma_handle_t *handle;
|
|
||||||
struct drm_i915_gem_object *cur_obj;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct opregion_header;
|
struct opregion_header;
|
||||||
struct opregion_acpi;
|
struct opregion_acpi;
|
||||||
struct opregion_swsci;
|
struct opregion_swsci;
|
||||||
|
@ -1187,9 +1175,6 @@ struct i915_gem_mm {
|
||||||
/** Bit 6 swizzling required for Y tiling */
|
/** Bit 6 swizzling required for Y tiling */
|
||||||
uint32_t bit_6_swizzle_y;
|
uint32_t bit_6_swizzle_y;
|
||||||
|
|
||||||
/* storage for physical objects */
|
|
||||||
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
|
|
||||||
|
|
||||||
/* accounting, useful for userland debugging */
|
/* accounting, useful for userland debugging */
|
||||||
spinlock_t object_stat_lock;
|
spinlock_t object_stat_lock;
|
||||||
size_t object_memory;
|
size_t object_memory;
|
||||||
|
@ -1769,7 +1754,7 @@ struct drm_i915_gem_object {
|
||||||
struct drm_file *pin_filp;
|
struct drm_file *pin_filp;
|
||||||
|
|
||||||
/** for phy allocated objects */
|
/** for phy allocated objects */
|
||||||
struct drm_i915_gem_phys_object *phys_obj;
|
drm_dma_handle_t *phys_handle;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
|
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
|
||||||
|
@ -2204,10 +2189,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
|
||||||
#define PIN_MAPPABLE 0x1
|
#define PIN_MAPPABLE 0x1
|
||||||
#define PIN_NONBLOCK 0x2
|
#define PIN_NONBLOCK 0x2
|
||||||
#define PIN_GLOBAL 0x4
|
#define PIN_GLOBAL 0x4
|
||||||
|
#define PIN_OFFSET_BIAS 0x8
|
||||||
|
#define PIN_OFFSET_MASK (~4095)
|
||||||
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
||||||
struct i915_address_space *vm,
|
struct i915_address_space *vm,
|
||||||
uint32_t alignment,
|
uint32_t alignment,
|
||||||
unsigned flags);
|
uint64_t flags);
|
||||||
int __must_check i915_vma_unbind(struct i915_vma *vma);
|
int __must_check i915_vma_unbind(struct i915_vma *vma);
|
||||||
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
|
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
|
||||||
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
|
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
|
||||||
|
@ -2334,13 +2321,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||||
u32 alignment,
|
u32 alignment,
|
||||||
struct intel_ring_buffer *pipelined);
|
struct intel_ring_buffer *pipelined);
|
||||||
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
|
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
|
||||||
int i915_gem_attach_phys_object(struct drm_device *dev,
|
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
|
||||||
struct drm_i915_gem_object *obj,
|
|
||||||
int id,
|
|
||||||
int align);
|
int align);
|
||||||
void i915_gem_detach_phys_object(struct drm_device *dev,
|
|
||||||
struct drm_i915_gem_object *obj);
|
|
||||||
void i915_gem_free_all_phys_object(struct drm_device *dev);
|
|
||||||
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
|
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
|
||||||
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
|
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
|
||||||
|
|
||||||
|
@ -2465,6 +2447,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
|
||||||
int min_size,
|
int min_size,
|
||||||
unsigned alignment,
|
unsigned alignment,
|
||||||
unsigned cache_level,
|
unsigned cache_level,
|
||||||
|
unsigned long start,
|
||||||
|
unsigned long end,
|
||||||
unsigned flags);
|
unsigned flags);
|
||||||
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
|
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
|
||||||
int i915_gem_evict_everything(struct drm_device *dev);
|
int i915_gem_evict_everything(struct drm_device *dev);
|
||||||
|
|
|
@ -43,10 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
|
||||||
static __must_check int
|
static __must_check int
|
||||||
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
|
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
|
||||||
bool readonly);
|
bool readonly);
|
||||||
static int i915_gem_phys_pwrite(struct drm_device *dev,
|
|
||||||
struct drm_i915_gem_object *obj,
|
|
||||||
struct drm_i915_gem_pwrite *args,
|
|
||||||
struct drm_file *file);
|
|
||||||
|
|
||||||
static void i915_gem_write_fence(struct drm_device *dev, int reg,
|
static void i915_gem_write_fence(struct drm_device *dev, int reg,
|
||||||
struct drm_i915_gem_object *obj);
|
struct drm_i915_gem_object *obj);
|
||||||
|
@ -209,6 +205,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
|
||||||
|
{
|
||||||
|
drm_dma_handle_t *phys = obj->phys_handle;
|
||||||
|
|
||||||
|
if (!phys)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (obj->madv == I915_MADV_WILLNEED) {
|
||||||
|
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
|
||||||
|
char *vaddr = phys->vaddr;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
|
||||||
|
struct page *page = shmem_read_mapping_page(mapping, i);
|
||||||
|
if (!IS_ERR(page)) {
|
||||||
|
char *dst = kmap_atomic(page);
|
||||||
|
memcpy(dst, vaddr, PAGE_SIZE);
|
||||||
|
drm_clflush_virt_range(dst, PAGE_SIZE);
|
||||||
|
kunmap_atomic(dst);
|
||||||
|
|
||||||
|
set_page_dirty(page);
|
||||||
|
mark_page_accessed(page);
|
||||||
|
page_cache_release(page);
|
||||||
|
}
|
||||||
|
vaddr += PAGE_SIZE;
|
||||||
|
}
|
||||||
|
i915_gem_chipset_flush(obj->base.dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86
|
||||||
|
set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
|
||||||
|
#endif
|
||||||
|
drm_pci_free(obj->base.dev, phys);
|
||||||
|
obj->phys_handle = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
|
||||||
|
int align)
|
||||||
|
{
|
||||||
|
drm_dma_handle_t *phys;
|
||||||
|
struct address_space *mapping;
|
||||||
|
char *vaddr;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (obj->phys_handle) {
|
||||||
|
if ((unsigned long)obj->phys_handle->vaddr & (align -1))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (obj->madv != I915_MADV_WILLNEED)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
if (obj->base.filp == NULL)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* create a new object */
|
||||||
|
phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
|
||||||
|
if (!phys)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
vaddr = phys->vaddr;
|
||||||
|
#ifdef CONFIG_X86
|
||||||
|
set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
|
||||||
|
#endif
|
||||||
|
mapping = file_inode(obj->base.filp)->i_mapping;
|
||||||
|
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
|
||||||
|
struct page *page;
|
||||||
|
char *src;
|
||||||
|
|
||||||
|
page = shmem_read_mapping_page(mapping, i);
|
||||||
|
if (IS_ERR(page)) {
|
||||||
|
#ifdef CONFIG_X86
|
||||||
|
set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
|
||||||
|
#endif
|
||||||
|
drm_pci_free(obj->base.dev, phys);
|
||||||
|
return PTR_ERR(page);
|
||||||
|
}
|
||||||
|
|
||||||
|
src = kmap_atomic(page);
|
||||||
|
memcpy(vaddr, src, PAGE_SIZE);
|
||||||
|
kunmap_atomic(src);
|
||||||
|
|
||||||
|
mark_page_accessed(page);
|
||||||
|
page_cache_release(page);
|
||||||
|
|
||||||
|
vaddr += PAGE_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
obj->phys_handle = phys;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
|
||||||
|
struct drm_i915_gem_pwrite *args,
|
||||||
|
struct drm_file *file_priv)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = obj->base.dev;
|
||||||
|
void *vaddr = obj->phys_handle->vaddr + args->offset;
|
||||||
|
char __user *user_data = to_user_ptr(args->data_ptr);
|
||||||
|
|
||||||
|
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
|
||||||
|
unsigned long unwritten;
|
||||||
|
|
||||||
|
/* The physical object once assigned is fixed for the lifetime
|
||||||
|
* of the obj, so we can safely drop the lock and continue
|
||||||
|
* to access vaddr.
|
||||||
|
*/
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
unwritten = copy_from_user(vaddr, user_data, args->size);
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
if (unwritten)
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
|
i915_gem_chipset_flush(dev);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
void *i915_gem_object_alloc(struct drm_device *dev)
|
void *i915_gem_object_alloc(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
@ -921,8 +1039,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||||
* pread/pwrite currently are reading and writing from the CPU
|
* pread/pwrite currently are reading and writing from the CPU
|
||||||
* perspective, requiring manual detiling by the client.
|
* perspective, requiring manual detiling by the client.
|
||||||
*/
|
*/
|
||||||
if (obj->phys_obj) {
|
if (obj->phys_handle) {
|
||||||
ret = i915_gem_phys_pwrite(dev, obj, args, file);
|
ret = i915_gem_phys_pwrite(obj, args, file);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3208,12 +3326,14 @@ static struct i915_vma *
|
||||||
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
||||||
struct i915_address_space *vm,
|
struct i915_address_space *vm,
|
||||||
unsigned alignment,
|
unsigned alignment,
|
||||||
unsigned flags)
|
uint64_t flags)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = obj->base.dev;
|
struct drm_device *dev = obj->base.dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
u32 size, fence_size, fence_alignment, unfenced_alignment;
|
u32 size, fence_size, fence_alignment, unfenced_alignment;
|
||||||
size_t gtt_max =
|
unsigned long start =
|
||||||
|
flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
|
||||||
|
unsigned long end =
|
||||||
flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
|
flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -3242,11 +3362,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
||||||
/* If the object is bigger than the entire aperture, reject it early
|
/* If the object is bigger than the entire aperture, reject it early
|
||||||
* before evicting everything in a vain attempt to find space.
|
* before evicting everything in a vain attempt to find space.
|
||||||
*/
|
*/
|
||||||
if (obj->base.size > gtt_max) {
|
if (obj->base.size > end) {
|
||||||
DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
|
DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
|
||||||
obj->base.size,
|
obj->base.size,
|
||||||
flags & PIN_MAPPABLE ? "mappable" : "total",
|
flags & PIN_MAPPABLE ? "mappable" : "total",
|
||||||
gtt_max);
|
end);
|
||||||
return ERR_PTR(-E2BIG);
|
return ERR_PTR(-E2BIG);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3263,12 +3383,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
||||||
search_free:
|
search_free:
|
||||||
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
|
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
|
||||||
size, alignment,
|
size, alignment,
|
||||||
obj->cache_level, 0, gtt_max,
|
obj->cache_level,
|
||||||
|
start, end,
|
||||||
DRM_MM_SEARCH_DEFAULT,
|
DRM_MM_SEARCH_DEFAULT,
|
||||||
DRM_MM_CREATE_DEFAULT);
|
DRM_MM_CREATE_DEFAULT);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ret = i915_gem_evict_something(dev, vm, size, alignment,
|
ret = i915_gem_evict_something(dev, vm, size, alignment,
|
||||||
obj->cache_level, flags);
|
obj->cache_level,
|
||||||
|
start, end,
|
||||||
|
flags);
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
goto search_free;
|
goto search_free;
|
||||||
|
|
||||||
|
@ -3828,11 +3951,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
|
||||||
|
{
|
||||||
|
struct drm_i915_gem_object *obj = vma->obj;
|
||||||
|
|
||||||
|
if (alignment &&
|
||||||
|
vma->node.start & (alignment - 1))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (flags & PIN_OFFSET_BIAS &&
|
||||||
|
vma->node.start < (flags & PIN_OFFSET_MASK))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
||||||
struct i915_address_space *vm,
|
struct i915_address_space *vm,
|
||||||
uint32_t alignment,
|
uint32_t alignment,
|
||||||
unsigned flags)
|
uint64_t flags)
|
||||||
{
|
{
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -3845,15 +3987,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
||||||
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
|
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
if ((alignment &&
|
if (i915_vma_misplaced(vma, alignment, flags)) {
|
||||||
vma->node.start & (alignment - 1)) ||
|
|
||||||
(flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
|
|
||||||
WARN(vma->pin_count,
|
WARN(vma->pin_count,
|
||||||
"bo is already pinned with incorrect alignment:"
|
"bo is already pinned with incorrect alignment:"
|
||||||
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
|
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
|
||||||
" obj->map_and_fenceable=%d\n",
|
" obj->map_and_fenceable=%d\n",
|
||||||
i915_gem_obj_offset(obj, vm), alignment,
|
i915_gem_obj_offset(obj, vm), alignment,
|
||||||
flags & PIN_MAPPABLE,
|
!!(flags & PIN_MAPPABLE),
|
||||||
obj->map_and_fenceable);
|
obj->map_and_fenceable);
|
||||||
ret = i915_vma_unbind(vma);
|
ret = i915_vma_unbind(vma);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -4163,9 +4303,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
||||||
|
|
||||||
trace_i915_gem_object_destroy(obj);
|
trace_i915_gem_object_destroy(obj);
|
||||||
|
|
||||||
if (obj->phys_obj)
|
|
||||||
i915_gem_detach_phys_object(dev, obj);
|
|
||||||
|
|
||||||
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
|
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -4183,6 +4320,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
i915_gem_object_detach_phys(obj);
|
||||||
|
|
||||||
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
|
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
|
||||||
* before progressing. */
|
* before progressing. */
|
||||||
if (obj->stolen)
|
if (obj->stolen)
|
||||||
|
@ -4646,190 +4785,6 @@ i915_gem_load(struct drm_device *dev)
|
||||||
register_shrinker(&dev_priv->mm.inactive_shrinker);
|
register_shrinker(&dev_priv->mm.inactive_shrinker);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Create a physically contiguous memory object for this object
|
|
||||||
* e.g. for cursor + overlay regs
|
|
||||||
*/
|
|
||||||
static int i915_gem_init_phys_object(struct drm_device *dev,
|
|
||||||
int id, int size, int align)
|
|
||||||
{
|
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct drm_i915_gem_phys_object *phys_obj;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (dev_priv->mm.phys_objs[id - 1] || !size)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
|
|
||||||
if (!phys_obj)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
phys_obj->id = id;
|
|
||||||
|
|
||||||
phys_obj->handle = drm_pci_alloc(dev, size, align);
|
|
||||||
if (!phys_obj->handle) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto kfree_obj;
|
|
||||||
}
|
|
||||||
#ifdef CONFIG_X86
|
|
||||||
set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
dev_priv->mm.phys_objs[id - 1] = phys_obj;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
kfree_obj:
|
|
||||||
kfree(phys_obj);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void i915_gem_free_phys_object(struct drm_device *dev, int id)
|
|
||||||
{
|
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct drm_i915_gem_phys_object *phys_obj;
|
|
||||||
|
|
||||||
if (!dev_priv->mm.phys_objs[id - 1])
|
|
||||||
return;
|
|
||||||
|
|
||||||
phys_obj = dev_priv->mm.phys_objs[id - 1];
|
|
||||||
if (phys_obj->cur_obj) {
|
|
||||||
i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86
|
|
||||||
set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
|
|
||||||
#endif
|
|
||||||
drm_pci_free(dev, phys_obj->handle);
|
|
||||||
kfree(phys_obj);
|
|
||||||
dev_priv->mm.phys_objs[id - 1] = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
void i915_gem_free_all_phys_object(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
|
|
||||||
i915_gem_free_phys_object(dev, i);
|
|
||||||
}
|
|
||||||
|
|
||||||
void i915_gem_detach_phys_object(struct drm_device *dev,
|
|
||||||
struct drm_i915_gem_object *obj)
|
|
||||||
{
|
|
||||||
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
|
|
||||||
char *vaddr;
|
|
||||||
int i;
|
|
||||||
int page_count;
|
|
||||||
|
|
||||||
if (!obj->phys_obj)
|
|
||||||
return;
|
|
||||||
vaddr = obj->phys_obj->handle->vaddr;
|
|
||||||
|
|
||||||
page_count = obj->base.size / PAGE_SIZE;
|
|
||||||
for (i = 0; i < page_count; i++) {
|
|
||||||
struct page *page = shmem_read_mapping_page(mapping, i);
|
|
||||||
if (!IS_ERR(page)) {
|
|
||||||
char *dst = kmap_atomic(page);
|
|
||||||
memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
|
|
||||||
kunmap_atomic(dst);
|
|
||||||
|
|
||||||
drm_clflush_pages(&page, 1);
|
|
||||||
|
|
||||||
set_page_dirty(page);
|
|
||||||
mark_page_accessed(page);
|
|
||||||
page_cache_release(page);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
i915_gem_chipset_flush(dev);
|
|
||||||
|
|
||||||
obj->phys_obj->cur_obj = NULL;
|
|
||||||
obj->phys_obj = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
i915_gem_attach_phys_object(struct drm_device *dev,
|
|
||||||
struct drm_i915_gem_object *obj,
|
|
||||||
int id,
|
|
||||||
int align)
|
|
||||||
{
|
|
||||||
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
|
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
int ret = 0;
|
|
||||||
int page_count;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (id > I915_MAX_PHYS_OBJECT)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (obj->phys_obj) {
|
|
||||||
if (obj->phys_obj->id == id)
|
|
||||||
return 0;
|
|
||||||
i915_gem_detach_phys_object(dev, obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* create a new object */
|
|
||||||
if (!dev_priv->mm.phys_objs[id - 1]) {
|
|
||||||
ret = i915_gem_init_phys_object(dev, id,
|
|
||||||
obj->base.size, align);
|
|
||||||
if (ret) {
|
|
||||||
DRM_ERROR("failed to init phys object %d size: %zu\n",
|
|
||||||
id, obj->base.size);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* bind to the object */
|
|
||||||
obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
|
|
||||||
obj->phys_obj->cur_obj = obj;
|
|
||||||
|
|
||||||
page_count = obj->base.size / PAGE_SIZE;
|
|
||||||
|
|
||||||
for (i = 0; i < page_count; i++) {
|
|
||||||
struct page *page;
|
|
||||||
char *dst, *src;
|
|
||||||
|
|
||||||
page = shmem_read_mapping_page(mapping, i);
|
|
||||||
if (IS_ERR(page))
|
|
||||||
return PTR_ERR(page);
|
|
||||||
|
|
||||||
src = kmap_atomic(page);
|
|
||||||
dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
|
|
||||||
memcpy(dst, src, PAGE_SIZE);
|
|
||||||
kunmap_atomic(src);
|
|
||||||
|
|
||||||
mark_page_accessed(page);
|
|
||||||
page_cache_release(page);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
i915_gem_phys_pwrite(struct drm_device *dev,
|
|
||||||
struct drm_i915_gem_object *obj,
|
|
||||||
struct drm_i915_gem_pwrite *args,
|
|
||||||
struct drm_file *file_priv)
|
|
||||||
{
|
|
||||||
void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
|
|
||||||
char __user *user_data = to_user_ptr(args->data_ptr);
|
|
||||||
|
|
||||||
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
|
|
||||||
unsigned long unwritten;
|
|
||||||
|
|
||||||
/* The physical object once assigned is fixed for the lifetime
|
|
||||||
* of the obj, so we can safely drop the lock and continue
|
|
||||||
* to access vaddr.
|
|
||||||
*/
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
unwritten = copy_from_user(vaddr, user_data, args->size);
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
|
||||||
if (unwritten)
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
i915_gem_chipset_flush(dev);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
|
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
|
||||||
{
|
{
|
||||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||||
|
|
|
@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
|
||||||
int
|
int
|
||||||
i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
|
i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
|
||||||
int min_size, unsigned alignment, unsigned cache_level,
|
int min_size, unsigned alignment, unsigned cache_level,
|
||||||
|
unsigned long start, unsigned long end,
|
||||||
unsigned flags)
|
unsigned flags)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct list_head eviction_list, unwind_list;
|
struct list_head eviction_list, unwind_list;
|
||||||
struct i915_vma *vma;
|
struct i915_vma *vma;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
INIT_LIST_HEAD(&unwind_list);
|
INIT_LIST_HEAD(&unwind_list);
|
||||||
if (flags & PIN_MAPPABLE) {
|
if (start != 0 || end != vm->total) {
|
||||||
BUG_ON(!i915_is_ggtt(vm));
|
|
||||||
drm_mm_init_scan_with_range(&vm->mm, min_size,
|
drm_mm_init_scan_with_range(&vm->mm, min_size,
|
||||||
alignment, cache_level, 0,
|
alignment, cache_level,
|
||||||
dev_priv->gtt.mappable_end);
|
start, end);
|
||||||
} else
|
} else
|
||||||
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
|
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
|
||||||
|
|
||||||
|
|
|
@ -35,6 +35,9 @@
|
||||||
|
|
||||||
#define __EXEC_OBJECT_HAS_PIN (1<<31)
|
#define __EXEC_OBJECT_HAS_PIN (1<<31)
|
||||||
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
|
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
|
||||||
|
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
|
||||||
|
|
||||||
|
#define BATCH_OFFSET_BIAS (256*1024)
|
||||||
|
|
||||||
struct eb_vmas {
|
struct eb_vmas {
|
||||||
struct list_head vmas;
|
struct list_head vmas;
|
||||||
|
@ -545,7 +548,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
||||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||||
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
|
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
|
||||||
bool need_fence;
|
bool need_fence;
|
||||||
unsigned flags;
|
uint64_t flags;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
flags = 0;
|
flags = 0;
|
||||||
|
@ -559,6 +562,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
||||||
|
|
||||||
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
|
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
|
||||||
flags |= PIN_GLOBAL;
|
flags |= PIN_GLOBAL;
|
||||||
|
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
|
||||||
|
flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
|
||||||
|
|
||||||
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
|
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -592,6 +597,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
|
||||||
|
{
|
||||||
|
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||||
|
struct drm_i915_gem_object *obj = vma->obj;
|
||||||
|
bool need_fence, need_mappable;
|
||||||
|
|
||||||
|
need_fence =
|
||||||
|
has_fenced_gpu_access &&
|
||||||
|
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
||||||
|
obj->tiling_mode != I915_TILING_NONE;
|
||||||
|
need_mappable = need_fence || need_reloc_mappable(vma);
|
||||||
|
|
||||||
|
WARN_ON((need_mappable || need_fence) &&
|
||||||
|
!i915_is_ggtt(vma->vm));
|
||||||
|
|
||||||
|
if (entry->alignment &&
|
||||||
|
vma->node.start & (entry->alignment - 1))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (need_mappable && !obj->map_and_fenceable)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
|
||||||
|
vma->node.start < BATCH_OFFSET_BIAS)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
||||||
struct list_head *vmas,
|
struct list_head *vmas,
|
||||||
|
@ -653,26 +688,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
||||||
|
|
||||||
/* Unbind any ill-fitting objects or pin. */
|
/* Unbind any ill-fitting objects or pin. */
|
||||||
list_for_each_entry(vma, vmas, exec_list) {
|
list_for_each_entry(vma, vmas, exec_list) {
|
||||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
|
||||||
bool need_fence, need_mappable;
|
|
||||||
|
|
||||||
obj = vma->obj;
|
|
||||||
|
|
||||||
if (!drm_mm_node_allocated(&vma->node))
|
if (!drm_mm_node_allocated(&vma->node))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
need_fence =
|
if (eb_vma_misplaced(vma, has_fenced_gpu_access))
|
||||||
has_fenced_gpu_access &&
|
|
||||||
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
|
||||||
obj->tiling_mode != I915_TILING_NONE;
|
|
||||||
need_mappable = need_fence || need_reloc_mappable(vma);
|
|
||||||
|
|
||||||
WARN_ON((need_mappable || need_fence) &&
|
|
||||||
!i915_is_ggtt(vma->vm));
|
|
||||||
|
|
||||||
if ((entry->alignment &&
|
|
||||||
vma->node.start & (entry->alignment - 1)) ||
|
|
||||||
(need_mappable && !obj->map_and_fenceable))
|
|
||||||
ret = i915_vma_unbind(vma);
|
ret = i915_vma_unbind(vma);
|
||||||
else
|
else
|
||||||
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
|
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
|
||||||
|
@ -773,9 +792,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
||||||
* relocations were valid.
|
* relocations were valid.
|
||||||
*/
|
*/
|
||||||
for (j = 0; j < exec[i].relocation_count; j++) {
|
for (j = 0; j < exec[i].relocation_count; j++) {
|
||||||
if (copy_to_user(&user_relocs[j].presumed_offset,
|
if (__copy_to_user(&user_relocs[j].presumed_offset,
|
||||||
&invalid_offset,
|
&invalid_offset,
|
||||||
sizeof(invalid_offset))) {
|
sizeof(invalid_offset))) {
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
goto err;
|
goto err;
|
||||||
|
@ -999,6 +1018,25 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct drm_i915_gem_object *
|
||||||
|
eb_get_batch(struct eb_vmas *eb)
|
||||||
|
{
|
||||||
|
struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SNA is doing fancy tricks with compressing batch buffers, which leads
|
||||||
|
* to negative relocation deltas. Usually that works out ok since the
|
||||||
|
* relocate address is still positive, except when the batch is placed
|
||||||
|
* very low in the GTT. Ensure this doesn't happen.
|
||||||
|
*
|
||||||
|
* Note that actual hangs have only been observed on gen7, but for
|
||||||
|
* paranoia do it everywhere.
|
||||||
|
*/
|
||||||
|
vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
|
||||||
|
|
||||||
|
return vma->obj;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file,
|
struct drm_file *file,
|
||||||
|
@ -1153,7 +1191,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
/* take note of the batch buffer before we might reorder the lists */
|
/* take note of the batch buffer before we might reorder the lists */
|
||||||
batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
|
batch_obj = eb_get_batch(eb);
|
||||||
|
|
||||||
/* Move the objects en-masse into the GTT, evicting if necessary. */
|
/* Move the objects en-masse into the GTT, evicting if necessary. */
|
||||||
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
||||||
|
@ -1355,18 +1393,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
||||||
|
|
||||||
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
|
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
|
struct drm_i915_gem_exec_object __user *user_exec_list =
|
||||||
|
to_user_ptr(args->buffers_ptr);
|
||||||
|
|
||||||
/* Copy the new buffer offsets back to the user's exec list. */
|
/* Copy the new buffer offsets back to the user's exec list. */
|
||||||
for (i = 0; i < args->buffer_count; i++)
|
for (i = 0; i < args->buffer_count; i++) {
|
||||||
exec_list[i].offset = exec2_list[i].offset;
|
ret = __copy_to_user(&user_exec_list[i].offset,
|
||||||
/* ... and back out to userspace */
|
&exec2_list[i].offset,
|
||||||
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
|
sizeof(user_exec_list[i].offset));
|
||||||
exec_list,
|
if (ret) {
|
||||||
sizeof(*exec_list) * args->buffer_count);
|
ret = -EFAULT;
|
||||||
if (ret) {
|
DRM_DEBUG("failed to copy %d exec entries "
|
||||||
ret = -EFAULT;
|
"back to user (%d)\n",
|
||||||
DRM_DEBUG("failed to copy %d exec entries "
|
args->buffer_count, ret);
|
||||||
"back to user (%d)\n",
|
break;
|
||||||
args->buffer_count, ret);
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1412,14 +1453,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
||||||
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
|
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
/* Copy the new buffer offsets back to the user's exec list. */
|
/* Copy the new buffer offsets back to the user's exec list. */
|
||||||
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
|
struct drm_i915_gem_exec_object2 *user_exec_list =
|
||||||
exec2_list,
|
to_user_ptr(args->buffers_ptr);
|
||||||
sizeof(*exec2_list) * args->buffer_count);
|
int i;
|
||||||
if (ret) {
|
|
||||||
ret = -EFAULT;
|
for (i = 0; i < args->buffer_count; i++) {
|
||||||
DRM_DEBUG("failed to copy %d exec entries "
|
ret = __copy_to_user(&user_exec_list[i].offset,
|
||||||
"back to user (%d)\n",
|
&exec2_list[i].offset,
|
||||||
args->buffer_count, ret);
|
sizeof(user_exec_list[i].offset));
|
||||||
|
if (ret) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
DRM_DEBUG("failed to copy %d exec entries "
|
||||||
|
"back to user\n",
|
||||||
|
args->buffer_count);
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1089,7 +1089,9 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
|
||||||
if (ret == -ENOSPC && !retried) {
|
if (ret == -ENOSPC && !retried) {
|
||||||
ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
|
ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
|
||||||
GEN6_PD_SIZE, GEN6_PD_ALIGN,
|
GEN6_PD_SIZE, GEN6_PD_ALIGN,
|
||||||
I915_CACHE_NONE, 0);
|
I915_CACHE_NONE,
|
||||||
|
0, dev_priv->gtt.base.total,
|
||||||
|
0);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -7825,14 +7825,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
||||||
addr = i915_gem_obj_ggtt_offset(obj);
|
addr = i915_gem_obj_ggtt_offset(obj);
|
||||||
} else {
|
} else {
|
||||||
int align = IS_I830(dev) ? 16 * 1024 : 256;
|
int align = IS_I830(dev) ? 16 * 1024 : 256;
|
||||||
ret = i915_gem_attach_phys_object(dev, obj,
|
ret = i915_gem_object_attach_phys(obj, align);
|
||||||
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
|
|
||||||
align);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_DEBUG_KMS("failed to attach phys object\n");
|
DRM_DEBUG_KMS("failed to attach phys object\n");
|
||||||
goto fail_locked;
|
goto fail_locked;
|
||||||
}
|
}
|
||||||
addr = obj->phys_obj->handle->busaddr;
|
addr = obj->phys_handle->busaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_GEN2(dev))
|
if (IS_GEN2(dev))
|
||||||
|
@ -7840,10 +7838,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
||||||
|
|
||||||
finish:
|
finish:
|
||||||
if (intel_crtc->cursor_bo) {
|
if (intel_crtc->cursor_bo) {
|
||||||
if (INTEL_INFO(dev)->cursor_needs_physical) {
|
if (!INTEL_INFO(dev)->cursor_needs_physical)
|
||||||
if (intel_crtc->cursor_bo != obj)
|
|
||||||
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
|
|
||||||
} else
|
|
||||||
i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
|
i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
|
||||||
drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
|
drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
|
||||||
}
|
}
|
||||||
|
|
|
@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
|
||||||
struct overlay_registers __iomem *regs;
|
struct overlay_registers __iomem *regs;
|
||||||
|
|
||||||
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
|
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
|
||||||
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
|
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
|
||||||
else
|
else
|
||||||
regs = io_mapping_map_wc(dev_priv->gtt.mappable,
|
regs = io_mapping_map_wc(dev_priv->gtt.mappable,
|
||||||
i915_gem_obj_ggtt_offset(overlay->reg_bo));
|
i915_gem_obj_ggtt_offset(overlay->reg_bo));
|
||||||
|
@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev)
|
||||||
overlay->reg_bo = reg_bo;
|
overlay->reg_bo = reg_bo;
|
||||||
|
|
||||||
if (OVERLAY_NEEDS_PHYSICAL(dev)) {
|
if (OVERLAY_NEEDS_PHYSICAL(dev)) {
|
||||||
ret = i915_gem_attach_phys_object(dev, reg_bo,
|
ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
|
||||||
I915_GEM_PHYS_OVERLAY_REGS,
|
|
||||||
PAGE_SIZE);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("failed to attach phys overlay regs\n");
|
DRM_ERROR("failed to attach phys overlay regs\n");
|
||||||
goto out_free_bo;
|
goto out_free_bo;
|
||||||
}
|
}
|
||||||
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
|
overlay->flip_addr = reg_bo->phys_handle->busaddr;
|
||||||
} else {
|
} else {
|
||||||
ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
|
ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
|
||||||
/* Cast to make sparse happy, but it's wc memory anyway, so
|
/* Cast to make sparse happy, but it's wc memory anyway, so
|
||||||
* equivalent to the wc io mapping on X86. */
|
* equivalent to the wc io mapping on X86. */
|
||||||
regs = (struct overlay_registers __iomem *)
|
regs = (struct overlay_registers __iomem *)
|
||||||
overlay->reg_bo->phys_obj->handle->vaddr;
|
overlay->reg_bo->phys_handle->vaddr;
|
||||||
else
|
else
|
||||||
regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
||||||
i915_gem_obj_ggtt_offset(overlay->reg_bo));
|
i915_gem_obj_ggtt_offset(overlay->reg_bo));
|
||||||
|
@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
|
||||||
error->dovsta = I915_READ(DOVSTA);
|
error->dovsta = I915_READ(DOVSTA);
|
||||||
error->isr = I915_READ(ISR);
|
error->isr = I915_READ(ISR);
|
||||||
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
|
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
|
||||||
error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
|
error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
|
||||||
else
|
else
|
||||||
error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
|
error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
|
||||||
|
|
||||||
|
|
|
@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
||||||
uint32_t domain = r->write_domain ?
|
uint32_t domain = r->write_domain ?
|
||||||
r->write_domain : r->read_domains;
|
r->write_domain : r->read_domains;
|
||||||
|
|
||||||
|
if (domain & RADEON_GEM_DOMAIN_CPU) {
|
||||||
|
DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
|
||||||
|
"for command submission\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
p->relocs[i].domain = domain;
|
p->relocs[i].domain = domain;
|
||||||
if (domain == RADEON_GEM_DOMAIN_VRAM)
|
if (domain == RADEON_GEM_DOMAIN_VRAM)
|
||||||
domain |= RADEON_GEM_DOMAIN_GTT;
|
domain |= RADEON_GEM_DOMAIN_GTT;
|
||||||
|
@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* we only support VM on some SI+ rings */
|
/* we only support VM on some SI+ rings */
|
||||||
if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) &&
|
if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
|
||||||
((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
|
if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
|
||||||
DRM_ERROR("Ring %d requires VM!\n", p->ring);
|
DRM_ERROR("Ring %d requires VM!\n", p->ring);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
|
||||||
|
DRM_ERROR("VM not supported on ring %d!\n",
|
||||||
|
p->ring);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1533,11 +1533,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
|
||||||
|
|
||||||
radeon_restore_bios_scratch_regs(rdev);
|
radeon_restore_bios_scratch_regs(rdev);
|
||||||
|
|
||||||
if (fbcon) {
|
|
||||||
radeon_fbdev_set_suspend(rdev, 0);
|
|
||||||
console_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* init dig PHYs, disp eng pll */
|
/* init dig PHYs, disp eng pll */
|
||||||
if (rdev->is_atom_bios) {
|
if (rdev->is_atom_bios) {
|
||||||
radeon_atom_encoder_init(rdev);
|
radeon_atom_encoder_init(rdev);
|
||||||
|
@ -1562,6 +1557,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_kms_helper_poll_enable(dev);
|
drm_kms_helper_poll_enable(dev);
|
||||||
|
|
||||||
|
if (fbcon) {
|
||||||
|
radeon_fbdev_set_suspend(rdev, 0);
|
||||||
|
console_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -862,7 +862,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
|
||||||
unsigned *fb_div, unsigned *ref_div)
|
unsigned *fb_div, unsigned *ref_div)
|
||||||
{
|
{
|
||||||
/* limit reference * post divider to a maximum */
|
/* limit reference * post divider to a maximum */
|
||||||
ref_div_max = min(128 / post_div, ref_div_max);
|
ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
|
||||||
|
|
||||||
/* get matching reference and feedback divider */
|
/* get matching reference and feedback divider */
|
||||||
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
|
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
|
||||||
|
|
|
@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
|
||||||
struct list_head *head)
|
struct list_head *head)
|
||||||
{
|
{
|
||||||
struct radeon_cs_reloc *list;
|
struct radeon_cs_reloc *list;
|
||||||
unsigned i, idx, size;
|
unsigned i, idx;
|
||||||
|
|
||||||
size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc);
|
list = kmalloc_array(vm->max_pde_used + 1,
|
||||||
list = kmalloc(size, GFP_KERNEL);
|
sizeof(struct radeon_cs_reloc), GFP_KERNEL);
|
||||||
if (!list)
|
if (!list)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue