Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull DRM fixes from Dave Airlie:
 "I was going to leave this until post -rc1 but sysfs fixes broke
  hotplug in userspace, so I had to fix it harder, otherwise a set of
  pulls from intel, radeon and vmware,

  The vmware/ttm changes are bit larger but since its early and they are
  unlikely to break anything else I put them in, it lets vmware work
  with dri3"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (36 commits)
  drm/sysfs: fix hotplug regression since lifetime changes
  drm/exynos: g2d: fix memory leak to userptr
  drm/i915: Fix gen3 self-refresh watermarks
  drm/ttm: Remove set_need_resched from the ttm fault handler
  drm/ttm: Don't move non-existing data
  drm/radeon: hook up backlight functions for CI and KV family.
  drm/i915: Replicate BIOS eDP bpp clamping hack for hsw
  drm/i915: Do not enable package C8 on unsupported hardware
  drm/i915: Hold pc8 lock around toggling pc8.gpu_idle
  drm/i915: encoder->get_config is no longer optional
  drm/i915/tv: add ->get_config callback
  drm/radeon/cik: Add macrotile mode array query
  drm/radeon/cik: Return backend map information to userspace
  drm/vmwgfx: Make vmwgfx dma buffers prime aware
  drm/vmwgfx: Make surfaces prime-aware
  drm/vmwgfx: Hook up the prime ioctls
  drm/ttm: Add a minimal prime implementation for ttm base objects
  drm/vmwgfx: Fix false lockdep warning
  drm/ttm: Allow execbuf util reserves without ticket
  drm/i915: restore the early forcewake cleanup
  ...
This commit is contained in:
Linus Torvalds 2013-11-22 10:56:11 -08:00
commit aecde27c4f
54 changed files with 1060 additions and 327 deletions

View file

@ -330,8 +330,8 @@ static struct pci_device_id intel_stolen_ids[] __initdata = {
INTEL_I915GM_IDS(gen3_stolen_size),
INTEL_I945G_IDS(gen3_stolen_size),
INTEL_I945GM_IDS(gen3_stolen_size),
INTEL_VLV_M_IDS(gen3_stolen_size),
INTEL_VLV_D_IDS(gen3_stolen_size),
INTEL_VLV_M_IDS(gen6_stolen_size),
INTEL_VLV_D_IDS(gen6_stolen_size),
INTEL_PINEVIEW_IDS(gen3_stolen_size),
INTEL_I965G_IDS(gen3_stolen_size),
INTEL_G33_IDS(gen3_stolen_size),

View file

@ -489,6 +489,11 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_sysfs_hotplug_event);
static void drm_sysfs_release(struct device *dev)
{
kfree(dev);
}
/**
* drm_sysfs_device_add - adds a class device to sysfs for a character driver
* @dev: DRM device to be added
@ -501,6 +506,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
int drm_sysfs_device_add(struct drm_minor *minor)
{
char *minor_str;
int r;
if (minor->type == DRM_MINOR_CONTROL)
minor_str = "controlD%d";
@ -509,14 +515,34 @@ int drm_sysfs_device_add(struct drm_minor *minor)
else
minor_str = "card%d";
minor->kdev = device_create(drm_class, minor->dev->dev,
MKDEV(DRM_MAJOR, minor->index),
minor, minor_str, minor->index);
if (IS_ERR(minor->kdev)) {
DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev));
return PTR_ERR(minor->kdev);
minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL);
if (!minor->dev) {
r = -ENOMEM;
goto error;
}
device_initialize(minor->kdev);
minor->kdev->devt = MKDEV(DRM_MAJOR, minor->index);
minor->kdev->class = drm_class;
minor->kdev->type = &drm_sysfs_device_minor;
minor->kdev->parent = minor->dev->dev;
minor->kdev->release = drm_sysfs_release;
dev_set_drvdata(minor->kdev, minor);
r = dev_set_name(minor->kdev, minor_str, minor->index);
if (r < 0)
goto error;
r = device_add(minor->kdev);
if (r < 0)
goto error;
return 0;
error:
DRM_ERROR("device create failed %d\n", r);
put_device(minor->kdev);
return r;
}
/**
@ -529,7 +555,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
void drm_sysfs_device_remove(struct drm_minor *minor)
{
if (minor->kdev)
device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index));
device_unregister(minor->kdev);
minor->kdev = NULL;
}

View file

@ -383,6 +383,8 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
g2d_userptr->npages,
g2d_userptr->vma);
exynos_gem_put_vma(g2d_userptr->vma);
if (!g2d_userptr->out_of_list)
list_del_init(&g2d_userptr->list);

View file

@ -1816,6 +1816,7 @@ struct drm_i915_file_private {
#define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00

View file

@ -790,7 +790,12 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* Default to using SSC */
dev_priv->vbt.lvds_use_ssc = 1;
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
/*
* Core/SandyBridge/IvyBridge use alternative (120MHz) reference
* clock for LVDS.
*/
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev,
!HAS_PCH_SPLIT(dev));
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
for (port = PORT_A; port < I915_MAX_PORTS; port++) {

View file

@ -1406,6 +1406,26 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
default:
break;
}
if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
/*
* This is a big fat ugly hack.
*
* Some machines in UEFI boot mode provide us a VBT that has 18
* bpp and 1.62 GHz link bandwidth for eDP, which for reasons
* unknown we fail to light up. Yet the same BIOS boots up with
* 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
* max, not what it tells us to use.
*
* Note: This will still be broken if the eDP panel is not lit
* up by the BIOS, and thus we can't get the mode at module
* load.
*/
DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
}
}
static void intel_ddi_destroy(struct drm_encoder *encoder)

View file

@ -6518,6 +6518,9 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
{
if (!HAS_PC8(dev_priv->dev))
return;
mutex_lock(&dev_priv->pc8.lock);
__hsw_enable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
@ -6525,6 +6528,9 @@ void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
{
if (!HAS_PC8(dev_priv->dev))
return;
mutex_lock(&dev_priv->pc8.lock);
__hsw_disable_package_c8(dev_priv);
mutex_unlock(&dev_priv->pc8.lock);
@ -6562,6 +6568,9 @@ static void hsw_update_package_c8(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
bool allow;
if (!HAS_PC8(dev_priv->dev))
return;
if (!i915_enable_pc8)
return;
@ -6585,18 +6594,28 @@ static void hsw_update_package_c8(struct drm_device *dev)
static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
{
if (!HAS_PC8(dev_priv->dev))
return;
mutex_lock(&dev_priv->pc8.lock);
if (!dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = true;
hsw_enable_package_c8(dev_priv);
__hsw_enable_package_c8(dev_priv);
}
mutex_unlock(&dev_priv->pc8.lock);
}
static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
{
if (!HAS_PC8(dev_priv->dev))
return;
mutex_lock(&dev_priv->pc8.lock);
if (dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = false;
hsw_disable_package_c8(dev_priv);
__hsw_disable_package_c8(dev_priv);
}
mutex_unlock(&dev_priv->pc8.lock);
}
#define for_each_power_domain(domain, mask) \
@ -7184,7 +7203,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
POSTING_READ(CURCNTR(pipe));
I915_WRITE(CURBASE(pipe), base);
POSTING_READ(CURBASE(pipe));
}
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
@ -7213,7 +7234,9 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
POSTING_READ(CURCNTR_IVB(pipe));
I915_WRITE(CURBASE_IVB(pipe), base);
POSTING_READ(CURBASE_IVB(pipe));
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@ -9248,8 +9271,7 @@ check_crtc_state(struct drm_device *dev)
enum pipe pipe;
if (encoder->base.crtc != &crtc->base)
continue;
if (encoder->get_config &&
encoder->get_hw_state(encoder, &pipe))
if (encoder->get_hw_state(encoder, &pipe))
encoder->get_config(encoder, &pipe_config);
}
@ -10909,8 +10931,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (encoder->get_hw_state(encoder, &pipe)) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
encoder->base.crtc = &crtc->base;
if (encoder->get_config)
encoder->get_config(encoder, &crtc->config);
encoder->get_config(encoder, &crtc->config);
} else {
encoder->base.crtc = NULL;
}

View file

@ -1774,7 +1774,7 @@ static void intel_disable_dp(struct intel_encoder *encoder)
* ensure that we have vdd while we switch off the panel. */
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */

View file

@ -1625,7 +1625,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
&to_intel_crtc(enabled)->config.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->htotal;
int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
int pixel_size = enabled->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
@ -3888,7 +3888,7 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
if (IS_IVYBRIDGE(dev))
I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
else
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);

View file

@ -902,6 +902,13 @@ intel_tv_mode_valid(struct drm_connector *connector,
}
static void
intel_tv_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static bool
intel_tv_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
@ -1621,6 +1628,7 @@ intel_tv_init(struct drm_device *dev)
DRM_MODE_ENCODER_TVDAC);
intel_encoder->compute_config = intel_tv_compute_config;
intel_encoder->get_config = intel_tv_get_config;
intel_encoder->mode_set = intel_tv_mode_set;
intel_encoder->enable = intel_enable_tv;
intel_encoder->disable = intel_disable_tv;

View file

@ -217,6 +217,19 @@ static void gen6_force_wake_work(struct work_struct *work)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void intel_uncore_forcewake_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_VALLEYVIEW(dev)) {
vlv_force_wake_reset(dev_priv);
} else if (INTEL_INFO(dev)->gen >= 6) {
__gen6_gt_force_wake_reset(dev_priv);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv);
}
}
void intel_uncore_early_sanitize(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -234,19 +247,8 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
dev_priv->ellc_size = 128;
DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
}
}
static void intel_uncore_forcewake_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_VALLEYVIEW(dev)) {
vlv_force_wake_reset(dev_priv);
} else if (INTEL_INFO(dev)->gen >= 6) {
__gen6_gt_force_wake_reset(dev_priv);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv);
}
intel_uncore_forcewake_reset(dev);
}
void intel_uncore_sanitize(struct drm_device *dev)

View file

@ -56,8 +56,10 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
return -EINVAL;
}
args.ucRegIndex = buf[0];
if (num > 1)
memcpy(&out, &buf[1], num - 1);
if (num > 1) {
num--;
memcpy(&out, &buf[1], num);
}
args.lpI2CDataOut = cpu_to_le16(out);
} else {
if (num > ATOM_MAX_HW_I2C_READ) {

View file

@ -1560,17 +1560,17 @@ u32 cik_get_xclk(struct radeon_device *rdev)
* cik_mm_rdoorbell - read a doorbell dword
*
* @rdev: radeon_device pointer
* @offset: byte offset into the aperture
* @index: doorbell index
*
* Returns the value in the doorbell aperture at the
* requested offset (CIK).
* requested doorbell index (CIK).
*/
u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index)
{
if (offset < rdev->doorbell.size) {
return readl(((void __iomem *)rdev->doorbell.ptr) + offset);
if (index < rdev->doorbell.num_doorbells) {
return readl(rdev->doorbell.ptr + index);
} else {
DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset);
DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
return 0;
}
}
@ -1579,18 +1579,18 @@ u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
* cik_mm_wdoorbell - write a doorbell dword
*
* @rdev: radeon_device pointer
* @offset: byte offset into the aperture
* @index: doorbell index
* @v: value to write
*
* Writes @v to the doorbell aperture at the
* requested offset (CIK).
* requested doorbell index (CIK).
*/
void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v)
void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v)
{
if (offset < rdev->doorbell.size) {
writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset);
if (index < rdev->doorbell.num_doorbells) {
writel(v, rdev->doorbell.ptr + index);
} else {
DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset);
DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
}
}
@ -2427,6 +2427,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (num_pipe_configs == 4) {
@ -2773,6 +2774,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (num_pipe_configs == 2) {
@ -2990,6 +2992,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else
@ -3556,17 +3559,24 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 0);
}
void cik_semaphore_ring_emit(struct radeon_device *rdev,
bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
/* TODO: figure out why semaphore cause lockups */
#if 0
uint64_t addr = semaphore->gpu_addr;
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
return true;
#else
return false;
#endif
}
/**
@ -3609,13 +3619,8 @@ int cik_copy_cpdma(struct radeon_device *rdev,
return r;
}
if (radeon_fence_need_sync(*fence, ring->idx)) {
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
@ -4052,7 +4057,7 @@ void cik_compute_ring_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
WDOORBELL32(ring->doorbell_offset, ring->wptr);
WDOORBELL32(ring->doorbell_index, ring->wptr);
}
/**
@ -4393,10 +4398,6 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
return r;
}
/* doorbell offset */
rdev->ring[idx].doorbell_offset =
(rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0;
/* init the mqd struct */
memset(buf, 0, sizeof(struct bonaire_mqd));
@ -4508,7 +4509,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
mqd->queue_state.cp_hqd_pq_doorbell_control |=
DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4);
DOORBELL_OFFSET(rdev->ring[idx].doorbell_index);
mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
mqd->queue_state.cp_hqd_pq_doorbell_control &=
~(DOORBELL_SOURCE | DOORBELL_HIT);
@ -7839,14 +7840,14 @@ int cik_init(struct radeon_device *rdev)
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
r = radeon_doorbell_get(rdev, &ring->doorbell_index);
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
r = radeon_doorbell_get(rdev, &ring->doorbell_index);
if (r)
return r;

View file

@ -130,7 +130,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
* Add a DMA semaphore packet to the ring wait on or signal
* other rings (CIK).
*/
void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@ -141,6 +141,8 @@ void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
radeon_ring_write(ring, addr & 0xfffffff8);
radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
return true;
}
/**
@ -443,13 +445,8 @@ int cik_copy_dma(struct radeon_device *rdev,
return r;
}
if (radeon_fence_need_sync(*fence, ring->idx)) {
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;

View file

@ -299,7 +299,9 @@ void cypress_program_response_times(struct radeon_device *rdev)
static int cypress_pcie_performance_request(struct radeon_device *rdev,
u8 perf_req, bool advertise)
{
#if defined(CONFIG_ACPI)
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
#endif
u32 tmp;
udelay(10);

View file

@ -131,13 +131,8 @@ int evergreen_copy_dma(struct radeon_device *rdev,
return r;
}
if (radeon_fence_need_sync(*fence, ring->idx)) {
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;

View file

@ -3445,9 +3445,9 @@ static int ni_enable_smc_cac(struct radeon_device *rdev,
static int ni_pcie_performance_request(struct radeon_device *rdev,
u8 perf_req, bool advertise)
{
#if defined(CONFIG_ACPI)
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
#if defined(CONFIG_ACPI)
if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
(perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
if (eg_pi->pcie_performance_request_registered == false)

View file

@ -869,13 +869,14 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, RADEON_SW_INT_FIRE);
}
void r100_semaphore_ring_emit(struct radeon_device *rdev,
bool r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
/* Unused on older asics, since we don't have semaphores or multiple rings */
BUG();
return false;
}
int r100_copy_blit(struct radeon_device *rdev,

View file

@ -2650,7 +2650,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
}
}
void r600_semaphore_ring_emit(struct radeon_device *rdev,
bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@ -2664,6 +2664,8 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
return true;
}
/**
@ -2706,13 +2708,8 @@ int r600_copy_cpdma(struct radeon_device *rdev,
return r;
}
if (radeon_fence_need_sync(*fence, ring->idx)) {
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);

View file

@ -311,7 +311,7 @@ void r600_dma_fence_ring_emit(struct radeon_device *rdev,
* Add a DMA semaphore packet to the ring wait on or signal
* other rings (r6xx-SI).
*/
void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@ -322,6 +322,8 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
radeon_ring_write(ring, addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
return true;
}
/**
@ -462,13 +464,8 @@ int r600_copy_dma(struct radeon_device *rdev,
return r;
}
if (radeon_fence_need_sync(*fence, ring->idx)) {
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;

View file

@ -348,6 +348,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, i
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_locked(struct radeon_fence *fence);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
@ -548,17 +549,20 @@ struct radeon_semaphore {
struct radeon_sa_bo *sa_bo;
signed waiters;
uint64_t gpu_addr;
struct radeon_fence *sync_to[RADEON_NUM_RINGS];
};
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore);
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
struct radeon_fence *fence);
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
int signaler, int waiter);
int waiting_ring);
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore,
struct radeon_fence *fence);
@ -645,13 +649,15 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
/*
* GPU doorbell structures, functions & helpers
*/
#define RADEON_MAX_DOORBELLS 1024 /* Reserve at most 1024 doorbell slots for radeon-owned rings. */
struct radeon_doorbell {
u32 num_pages;
bool free[1024];
/* doorbell mmio */
resource_size_t base;
resource_size_t size;
void __iomem *ptr;
resource_size_t base;
resource_size_t size;
u32 __iomem *ptr;
u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */
unsigned long used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, BITS_PER_LONG)];
};
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
@ -765,7 +771,6 @@ struct radeon_ib {
struct radeon_fence *fence;
struct radeon_vm *vm;
bool is_const_ib;
struct radeon_fence *sync_to[RADEON_NUM_RINGS];
struct radeon_semaphore *semaphore;
};
@ -799,8 +804,7 @@ struct radeon_ring {
u32 pipe;
u32 queue;
struct radeon_bo *mqd_obj;
u32 doorbell_page_num;
u32 doorbell_offset;
u32 doorbell_index;
unsigned wptr_offs;
};
@ -921,7 +925,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
@ -1638,7 +1641,7 @@ struct radeon_asic_ring {
/* command emmit functions */
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait);
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
@ -1979,6 +1982,7 @@ struct cik_asic {
unsigned tile_config;
uint32_t tile_mode_array[32];
uint32_t macrotile_mode_array[16];
};
union radeon_asic_config {
@ -2239,8 +2243,8 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset);
void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index);
void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
/*
* Cast helper
@ -2303,8 +2307,8 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
#define RDOORBELL32(offset) cik_mm_rdoorbell(rdev, (offset))
#define WDOORBELL32(offset, v) cik_mm_wdoorbell(rdev, (offset), (v))
#define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index))
#define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v))
/*
* Indirect registers accessor

View file

@ -2015,6 +2015,8 @@ static struct radeon_asic ci_asic = {
.bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
},
@ -2114,6 +2116,8 @@ static struct radeon_asic kv_asic = {
.bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
},

View file

@ -80,7 +80,7 @@ int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void r100_semaphore_ring_emit(struct radeon_device *rdev,
bool r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
@ -313,13 +313,13 @@ int r600_cs_parse(struct radeon_cs_parser *p);
int r600_dma_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void r600_semaphore_ring_emit(struct radeon_device *rdev,
bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
void r600_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
@ -566,10 +566,6 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev,
*/
void cayman_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
@ -697,7 +693,7 @@ void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
@ -717,7 +713,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void cik_fence_compute_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void cik_semaphore_ring_emit(struct radeon_device *rdev,
bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
@ -807,7 +803,7 @@ void uvd_v1_0_stop(struct radeon_device *rdev);
int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
@ -819,7 +815,7 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
/* uvd v3.1 */
void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);

View file

@ -159,7 +159,8 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
if (!p->relocs[i].robj)
continue;
radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj);
radeon_semaphore_sync_to(p->ib.semaphore,
p->relocs[i].robj->tbo.sync_obj);
}
}
@ -411,9 +412,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
goto out;
}
radeon_cs_sync_rings(parser);
radeon_ib_sync_to(&parser->ib, vm->fence);
radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id(
rdev, vm, parser->ring));
radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
radeon_semaphore_sync_to(parser->ib.semaphore,
radeon_vm_grab_id(rdev, vm, parser->ring));
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {

View file

@ -251,28 +251,23 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
*/
int radeon_doorbell_init(struct radeon_device *rdev)
{
int i;
/* doorbell bar mapping */
rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
/* limit to 4 MB for now */
if (rdev->doorbell.size > (4 * 1024 * 1024))
rdev->doorbell.size = 4 * 1024 * 1024;
rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
if (rdev->doorbell.num_doorbells == 0)
return -EINVAL;
rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size);
rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
if (rdev->doorbell.ptr == NULL) {
return -ENOMEM;
}
DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE;
memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
for (i = 0; i < rdev->doorbell.num_pages; i++) {
rdev->doorbell.free[i] = true;
}
return 0;
}
@ -290,40 +285,38 @@ void radeon_doorbell_fini(struct radeon_device *rdev)
}
/**
* radeon_doorbell_get - Allocate a doorbell page
* radeon_doorbell_get - Allocate a doorbell entry
*
* @rdev: radeon_device pointer
* @doorbell: doorbell page number
* @doorbell: doorbell index
*
* Allocate a doorbell page for use by the driver (all asics).
* Allocate a doorbell for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure.
*/
int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
{
int i;
for (i = 0; i < rdev->doorbell.num_pages; i++) {
if (rdev->doorbell.free[i]) {
rdev->doorbell.free[i] = false;
*doorbell = i;
return 0;
}
unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
if (offset < rdev->doorbell.num_doorbells) {
__set_bit(offset, rdev->doorbell.used);
*doorbell = offset;
return 0;
} else {
return -EINVAL;
}
return -EINVAL;
}
/**
* radeon_doorbell_free - Free a doorbell page
* radeon_doorbell_free - Free a doorbell entry
*
* @rdev: radeon_device pointer
* @doorbell: doorbell page number
* @doorbell: doorbell index
*
* Free a doorbell page allocated for use by the driver (all asics)
* Free a doorbell allocated for use by the driver (all asics)
*/
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
{
if (doorbell < rdev->doorbell.num_pages)
rdev->doorbell.free[doorbell] = true;
if (doorbell < rdev->doorbell.num_doorbells)
__clear_bit(doorbell, rdev->doorbell.used);
}
/*

View file

@ -76,9 +76,10 @@
* 2.32.0 - new info request for rings working
* 2.33.0 - Add SI tiling mode array query
* 2.34.0 - Add CIK tiling mode array query
* 2.35.0 - Add CIK macrotile mode array query
*/
#define KMS_DRIVER_MAJOR 2
#define KMS_DRIVER_MINOR 34
#define KMS_DRIVER_MINOR 35
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);

View file

@ -471,6 +471,36 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
return 0;
}
/**
* radeon_fence_wait_locked - wait for a fence to signal
*
* @fence: radeon fence object
*
* Wait for the requested fence to signal (all asics).
* Returns 0 if the fence has passed, error for all other cases.
*/
int radeon_fence_wait_locked(struct radeon_fence *fence)
{
uint64_t seq[RADEON_NUM_RINGS] = {};
int r;
if (fence == NULL) {
WARN(1, "Querying an invalid fence : %p !\n", fence);
return -EINVAL;
}
seq[fence->ring] = fence->seq;
if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
return 0;
r = radeon_fence_wait_seq(fence->rdev, seq, false, false);
if (r)
return r;
fence->seq = RADEON_FENCE_SIGNALED_SEQ;
return 0;
}
/**
* radeon_fence_wait_next_locked - wait for the next fence to signal
*

View file

@ -651,7 +651,7 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
0, pd_entries, 0, 0);
radeon_ib_sync_to(&ib, vm->fence);
radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_ib_free(rdev, &ib);
@ -1209,6 +1209,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
return -ENOMEM;
r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
if (r)
return r;
ib.length_dw = 0;
r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
@ -1220,7 +1222,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
addr, radeon_vm_page_flags(bo_va->flags));
radeon_ib_sync_to(&ib, vm->fence);
radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_ib_free(rdev, &ib);

View file

@ -340,7 +340,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
break;
case RADEON_INFO_BACKEND_MAP:
if (rdev->family >= CHIP_BONAIRE)
return -EINVAL;
*value = rdev->config.cik.backend_map;
else if (rdev->family >= CHIP_TAHITI)
*value = rdev->config.si.backend_map;
else if (rdev->family >= CHIP_CAYMAN)
@ -449,6 +449,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
if (rdev->family >= CHIP_BONAIRE) {
value = rdev->config.cik.macrotile_mode_array;
value_size = sizeof(uint32_t)*16;
} else {
DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
return -EINVAL;
}
break;
case RADEON_INFO_SI_CP_DMA_COMPUTE:
*value = 1;
break;

View file

@ -422,6 +422,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
/* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
rbo = gem_to_radeon_bo(obj);
retry:
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@ -430,6 +431,33 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
&base);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
/* On old GPU like RN50 with little vram pining can fails because
* current fb is taking all space needed. So instead of unpining
* the old buffer after pining the new one, first unpin old one
* and then retry pining new one.
*
* As only master can set mode only master can pin and it is
* unlikely the master client will race with itself especialy
* on those old gpu with single crtc.
*
* We don't shutdown the display controller because new buffer
* will end up in same spot.
*/
if (!atomic && fb && fb != crtc->fb) {
struct radeon_bo *old_rbo;
unsigned long nsize, osize;
old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj);
osize = radeon_bo_size(old_rbo);
nsize = radeon_bo_size(rbo);
if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
radeon_bo_unpin(old_rbo);
radeon_bo_unreserve(old_rbo);
fb = NULL;
goto retry;
}
}
return -EINVAL;
}
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);

View file

@ -1252,7 +1252,6 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_RS780:
case CHIP_RS880:
case CHIP_CAYMAN:
case CHIP_ARUBA:
case CHIP_BONAIRE:
case CHIP_KABINI:
case CHIP_KAVERI:
@ -1284,6 +1283,7 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_BARTS:
case CHIP_TURKS:
case CHIP_CAICOS:
case CHIP_ARUBA:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:

View file

@ -61,7 +61,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size)
{
int i, r;
int r;
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
if (r) {
@ -87,8 +87,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
}
ib->is_const_ib = false;
for (i = 0; i < RADEON_NUM_RINGS; ++i)
ib->sync_to[i] = NULL;
return 0;
}
@ -108,25 +106,6 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_fence_unref(&ib->fence);
}
/**
* radeon_ib_sync_to - sync to fence before executing the IB
*
* @ib: IB object to add fence to
* @fence: fence to sync to
*
* Sync to the fence before executing the IB
*/
void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
{
struct radeon_fence *other;
if (!fence)
return;
other = ib->sync_to[fence->ring];
ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
}
/**
* radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
*
@ -151,8 +130,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
bool need_sync = false;
int i, r = 0;
int r = 0;
if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */
@ -166,19 +144,15 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
struct radeon_fence *fence = ib->sync_to[i];
if (radeon_fence_need_sync(fence, ib->ring)) {
need_sync = true;
radeon_semaphore_sync_rings(rdev, ib->semaphore,
fence->ring, ib->ring);
radeon_fence_note_sync(fence, ib->ring);
}
}
/* immediately free semaphore when we don't need to sync */
if (!need_sync) {
radeon_semaphore_free(rdev, &ib->semaphore, NULL);
/* sync with other rings */
r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
if (r) {
dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
radeon_ring_unlock_undo(rdev, ring);
return r;
}
/* if we can't remember our last VM flush then flush now! */
/* XXX figure out why we have to flush for every IB */
if (ib->vm /*&& !ib->vm->last_flush*/) {

View file

@ -29,12 +29,12 @@
*/
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_trace.h"
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
int r;
int i, r;
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
@ -50,55 +50,122 @@ int radeon_semaphore_create(struct radeon_device *rdev,
(*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
*((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
for (i = 0; i < RADEON_NUM_RINGS; ++i)
(*semaphore)->sync_to[i] = NULL;
return 0;
}
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
struct radeon_semaphore *semaphore)
{
--semaphore->waiters;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
struct radeon_ring *ring = &rdev->ring[ridx];
trace_radeon_semaphore_signale(ridx, semaphore);
if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) {
--semaphore->waiters;
/* for debugging lockup only, used by sysfs debug files */
ring->last_semaphore_signal_addr = semaphore->gpu_addr;
return true;
}
return false;
}
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
struct radeon_semaphore *semaphore)
{
++semaphore->waiters;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
struct radeon_ring *ring = &rdev->ring[ridx];
trace_radeon_semaphore_wait(ridx, semaphore);
if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) {
++semaphore->waiters;
/* for debugging lockup only, used by sysfs debug files */
ring->last_semaphore_wait_addr = semaphore->gpu_addr;
return true;
}
return false;
}
/* caller must hold ring lock */
/**
* radeon_semaphore_sync_to - use the semaphore to sync to a fence
*
* @semaphore: semaphore object to add fence to
* @fence: fence to sync to
*
* Sync to the fence using this semaphore object
*/
void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
struct radeon_fence *fence)
{
struct radeon_fence *other;
if (!fence)
return;
other = semaphore->sync_to[fence->ring];
semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other);
}
/**
* radeon_semaphore_sync_rings - sync ring to all registered fences
*
* @rdev: radeon_device pointer
* @semaphore: semaphore object to use for sync
* @ring: ring that needs sync
*
* Ensure that all registered fences are signaled before letting
* the ring continue. The caller must hold the ring lock.
*/
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
int signaler, int waiter)
int ring)
{
int r;
int i, r;
/* no need to signal and wait on the same ring */
if (signaler == waiter) {
return 0;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
struct radeon_fence *fence = semaphore->sync_to[i];
/* check if we really need to sync */
if (!radeon_fence_need_sync(fence, ring))
continue;
/* prevent GPU deadlocks */
if (!rdev->ring[i].ready) {
dev_err(rdev->dev, "Syncing to a disabled ring!");
return -EINVAL;
}
/* allocate enough space for sync command */
r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
if (r) {
return r;
}
/* emit the signal semaphore */
if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
/* signaling wasn't successful wait manually */
radeon_ring_undo(&rdev->ring[i]);
radeon_fence_wait_locked(fence);
continue;
}
/* we assume caller has already allocated space on waiters ring */
if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
/* waiting wasn't successful wait manually */
radeon_ring_undo(&rdev->ring[i]);
radeon_fence_wait_locked(fence);
continue;
}
radeon_ring_commit(rdev, &rdev->ring[i]);
radeon_fence_note_sync(fence, ring);
}
/* prevent GPU deadlocks */
if (!rdev->ring[signaler].ready) {
dev_err(rdev->dev, "Trying to sync to a disabled ring!");
return -EINVAL;
}
r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8);
if (r) {
return r;
}
radeon_semaphore_emit_signal(rdev, signaler, semaphore);
radeon_ring_commit(rdev, &rdev->ring[signaler]);
/* we assume caller has already allocated space on waiters ring */
radeon_semaphore_emit_wait(rdev, waiter, semaphore);
/* for debugging lockup only, used by sysfs debug files */
rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
return 0;
}

View file

@ -111,6 +111,42 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
TP_ARGS(dev, seqno)
);
DECLARE_EVENT_CLASS(radeon_semaphore_request,
TP_PROTO(int ring, struct radeon_semaphore *sem),
TP_ARGS(ring, sem),
TP_STRUCT__entry(
__field(int, ring)
__field(signed, waiters)
__field(uint64_t, gpu_addr)
),
TP_fast_assign(
__entry->ring = ring;
__entry->waiters = sem->waiters;
__entry->gpu_addr = sem->gpu_addr;
),
TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring,
__entry->waiters, __entry->gpu_addr)
);
DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_signale,
TP_PROTO(int ring, struct radeon_semaphore *sem),
TP_ARGS(ring, sem)
);
DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_wait,
TP_PROTO(int ring, struct radeon_semaphore *sem),
TP_ARGS(ring, sem)
);
#endif
/* This part must be outside protection */

View file

@ -66,13 +66,8 @@ int rv770_copy_dma(struct radeon_device *rdev,
return r;
}
if (radeon_fence_need_sync(*fence, ring->idx)) {
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;

View file

@ -195,13 +195,8 @@ int si_copy_dma(struct radeon_device *rdev,
return r;
}
if (radeon_fence_need_sync(*fence, ring->idx)) {
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;

View file

@ -1873,9 +1873,9 @@ int trinity_dpm_init(struct radeon_device *rdev)
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;
pi->enable_gfx_clock_gating = true;
pi->enable_mg_clock_gating = true;
pi->enable_gfx_dynamic_mgpg = true; /* ??? */
pi->override_dynamic_mgpg = true;
pi->enable_mg_clock_gating = false;
pi->enable_gfx_dynamic_mgpg = false;
pi->override_dynamic_mgpg = false;
pi->enable_auto_thermal_throttling = true;
pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */
pi->uvd_dpm = true; /* ??? */

View file

@ -357,7 +357,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
*
* Emit a semaphore command (either wait or signal) to the UVD ring.
*/
void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@ -372,6 +372,8 @@ void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, emit_wait ? 1 : 0);
return true;
}
/**

View file

@ -37,7 +37,7 @@
*
* Emit a semaphore command (either wait or signal) to the UVD ring.
*/
void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@ -52,4 +52,6 @@ void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
return true;
}

View file

@ -151,7 +151,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
atomic_dec(&bo->glob->bo_count);
if (bo->resv == &bo->ttm_resv)
reservation_object_fini(&bo->ttm_resv);
mutex_destroy(&bo->wu_mutex);
if (bo->destroy)
bo->destroy(bo);
else {
@ -1123,6 +1123,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
INIT_LIST_HEAD(&bo->io_reserve_lru);
mutex_init(&bo->wu_mutex);
bo->bdev = bdev;
bo->glob = bdev->glob;
bo->type = type;
@ -1704,3 +1705,35 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
;
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
/**
* ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
* unreserved
*
* @bo: Pointer to buffer
*/
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
{
int ret;
/*
* In the absense of a wait_unlocked API,
* Use the bo::wu_mutex to avoid triggering livelocks due to
* concurrent use of this function. Note that this use of
* bo::wu_mutex can go away if we change locking order to
* mmap_sem -> bo::reserve.
*/
ret = mutex_lock_interruptible(&bo->wu_mutex);
if (unlikely(ret != 0))
return -ERESTARTSYS;
if (!ww_mutex_is_locked(&bo->resv->lock))
goto out_unlock;
ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL);
if (unlikely(ret != 0))
goto out_unlock;
ww_mutex_unlock(&bo->resv->lock);
out_unlock:
mutex_unlock(&bo->wu_mutex);
return ret;
}

View file

@ -350,10 +350,13 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
goto out2;
/*
* Move nonexistent data. NOP.
* Don't move nonexistent data. Clear destination instead.
*/
if (old_iomap == NULL && ttm == NULL)
if (old_iomap == NULL &&
(ttm == NULL || ttm->state == tt_unpopulated)) {
memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
goto out2;
}
/*
* TTM might be null for moves within the same region.

View file

@ -107,13 +107,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/*
* Work around locking order reversal in fault / nopfn
* between mmap_sem and bo_reserve: Perform a trylock operation
* for reserve, and if it fails, retry the fault after scheduling.
* for reserve, and if it fails, retry the fault after waiting
* for the buffer to become unreserved.
*/
ret = ttm_bo_reserve(bo, true, true, false, 0);
ret = ttm_bo_reserve(bo, true, true, false, NULL);
if (unlikely(ret != 0)) {
if (ret == -EBUSY)
set_need_resched();
if (ret != -EBUSY)
return VM_FAULT_NOPAGE;
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
up_read(&vma->vm_mm->mmap_sem);
(void) ttm_bo_wait_unreserved(bo);
}
return VM_FAULT_RETRY;
}
/*
* If we'd want to change locking order to
* mmap_sem -> bo::reserve, we'd use a blocking reserve here
* instead of retrying the fault...
*/
return VM_FAULT_NOPAGE;
}
@ -123,7 +138,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
case 0:
break;
case -EBUSY:
set_need_resched();
case -ERESTARTSYS:
retval = VM_FAULT_NOPAGE;
goto out_unlock;

View file

@ -32,8 +32,7 @@
#include <linux/sched.h>
#include <linux/module.h>
static void ttm_eu_backoff_reservation_locked(struct list_head *list,
struct ww_acquire_ctx *ticket)
static void ttm_eu_backoff_reservation_locked(struct list_head *list)
{
struct ttm_validate_buffer *entry;
@ -93,8 +92,9 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
spin_lock(&glob->lru_lock);
ttm_eu_backoff_reservation_locked(list, ticket);
ww_acquire_fini(ticket);
ttm_eu_backoff_reservation_locked(list);
if (ticket)
ww_acquire_fini(ticket);
spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@ -130,7 +130,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
ww_acquire_init(ticket, &reservation_ww_class);
if (ticket)
ww_acquire_init(ticket, &reservation_ww_class);
retry:
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
@ -139,16 +140,17 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
if (entry->reserved)
continue;
ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true,
ticket);
if (ret == -EDEADLK) {
/* uh oh, we lost out, drop every reservation and try
* to only reserve this buffer, then start over if
* this succeeds.
*/
BUG_ON(ticket == NULL);
spin_lock(&glob->lru_lock);
ttm_eu_backoff_reservation_locked(list, ticket);
ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
@ -175,7 +177,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
}
}
ww_acquire_done(ticket);
if (ticket)
ww_acquire_done(ticket);
spin_lock(&glob->lru_lock);
ttm_eu_del_from_lru_locked(list);
spin_unlock(&glob->lru_lock);
@ -184,12 +187,14 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
err:
spin_lock(&glob->lru_lock);
ttm_eu_backoff_reservation_locked(list, ticket);
ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
err_fini:
ww_acquire_done(ticket);
ww_acquire_fini(ticket);
if (ticket) {
ww_acquire_done(ticket);
ww_acquire_fini(ticket);
}
return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@ -224,7 +229,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
}
spin_unlock(&bdev->fence_lock);
spin_unlock(&glob->lru_lock);
ww_acquire_fini(ticket);
if (ticket)
ww_acquire_fini(ticket);
list_for_each_entry(entry, list, head) {
if (entry->old_sync_obj)

View file

@ -1,6 +1,6 @@
/**************************************************************************
*
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
* Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -26,6 +26,12 @@
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*
* While no substantial code is shared, the prime code is inspired by
* drm_prime.c, with
* Authors:
* Dave Airlie <airlied@redhat.com>
* Rob Clark <rob.clark@linaro.org>
*/
/** @file ttm_ref_object.c
*
@ -34,6 +40,7 @@
* and release on file close.
*/
/**
* struct ttm_object_file
*
@ -84,6 +91,9 @@ struct ttm_object_device {
struct drm_open_hash object_hash;
atomic_t object_count;
struct ttm_mem_global *mem_glob;
struct dma_buf_ops ops;
void (*dmabuf_release)(struct dma_buf *dma_buf);
size_t dma_buf_size;
};
/**
@ -116,6 +126,8 @@ struct ttm_ref_object {
struct ttm_object_file *tfile;
};
static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
static inline struct ttm_object_file *
ttm_object_file_ref(struct ttm_object_file *tfile)
{
@ -416,9 +428,10 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
}
EXPORT_SYMBOL(ttm_object_file_init);
struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
*mem_glob,
unsigned int hash_order)
struct ttm_object_device *
ttm_object_device_init(struct ttm_mem_global *mem_glob,
unsigned int hash_order,
const struct dma_buf_ops *ops)
{
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
int ret;
@ -430,10 +443,17 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0);
ret = drm_ht_create(&tdev->object_hash, hash_order);
if (ret != 0)
goto out_no_object_hash;
if (likely(ret == 0))
return tdev;
tdev->ops = *ops;
tdev->dmabuf_release = tdev->ops.release;
tdev->ops.release = ttm_prime_dmabuf_release;
tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
ttm_round_pot(sizeof(struct file));
return tdev;
out_no_object_hash:
kfree(tdev);
return NULL;
}
@ -452,3 +472,225 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
kfree(tdev);
}
EXPORT_SYMBOL(ttm_object_device_release);
/**
* get_dma_buf_unless_doomed - get a dma_buf reference if possible.
*
* @dma_buf: Non-refcounted pointer to a struct dma-buf.
*
* Obtain a file reference from a lookup structure that doesn't refcount
* the file, but synchronizes with its release method to make sure it has
* not been freed yet. See for example kref_get_unless_zero documentation.
* Returns true if refcounting succeeds, false otherwise.
*
* Nobody really wants this as a public API yet, so let it mature here
* for some time...
*/
static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
{
return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
}
/**
* ttm_prime_refcount_release - refcount release method for a prime object.
*
* @p_base: Pointer to ttm_base_object pointer.
*
* This is a wrapper that calls the refcount_release founction of the
* underlying object. At the same time it cleans up the prime object.
* This function is called when all references to the base object we
* derive from are gone.
*/
static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct ttm_prime_object *prime;
*p_base = NULL;
prime = container_of(base, struct ttm_prime_object, base);
BUG_ON(prime->dma_buf != NULL);
mutex_destroy(&prime->mutex);
if (prime->refcount_release)
prime->refcount_release(&base);
}
/**
* ttm_prime_dmabuf_release - Release method for the dma-bufs we export
*
* @dma_buf:
*
* This function first calls the dma_buf release method the driver
* provides. Then it cleans up our dma_buf pointer used for lookup,
* and finally releases the reference the dma_buf has on our base
* object.
*/
static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
{
struct ttm_prime_object *prime =
(struct ttm_prime_object *) dma_buf->priv;
struct ttm_base_object *base = &prime->base;
struct ttm_object_device *tdev = base->tfile->tdev;
if (tdev->dmabuf_release)
tdev->dmabuf_release(dma_buf);
mutex_lock(&prime->mutex);
if (prime->dma_buf == dma_buf)
prime->dma_buf = NULL;
mutex_unlock(&prime->mutex);
ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
ttm_base_object_unref(&base);
}
/**
* ttm_prime_fd_to_handle - Get a base object handle from a prime fd
*
* @tfile: A struct ttm_object_file identifying the caller.
* @fd: The prime / dmabuf fd.
* @handle: The returned handle.
*
* This function returns a handle to an object that previously exported
* a dma-buf. Note that we don't handle imports yet, because we simply
* have no consumers of that implementation.
*/
int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
int fd, u32 *handle)
{
struct ttm_object_device *tdev = tfile->tdev;
struct dma_buf *dma_buf;
struct ttm_prime_object *prime;
struct ttm_base_object *base;
int ret;
dma_buf = dma_buf_get(fd);
if (IS_ERR(dma_buf))
return PTR_ERR(dma_buf);
if (dma_buf->ops != &tdev->ops)
return -ENOSYS;
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
*handle = base->hash.key;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
dma_buf_put(dma_buf);
return ret;
}
EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
/**
* ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
*
* @tfile: Struct ttm_object_file identifying the caller.
* @handle: Handle to the object we're exporting from.
* @flags: flags for dma-buf creation. We just pass them on.
* @prime_fd: The returned file descriptor.
*
*/
int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
uint32_t handle, uint32_t flags,
int *prime_fd)
{
struct ttm_object_device *tdev = tfile->tdev;
struct ttm_base_object *base;
struct dma_buf *dma_buf;
struct ttm_prime_object *prime;
int ret;
base = ttm_base_object_lookup(tfile, handle);
if (unlikely(base == NULL ||
base->object_type != ttm_prime_type)) {
ret = -ENOENT;
goto out_unref;
}
prime = container_of(base, struct ttm_prime_object, base);
if (unlikely(!base->shareable)) {
ret = -EPERM;
goto out_unref;
}
ret = mutex_lock_interruptible(&prime->mutex);
if (unlikely(ret != 0)) {
ret = -ERESTARTSYS;
goto out_unref;
}
dma_buf = prime->dma_buf;
if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
/*
* Need to create a new dma_buf, with memory accounting.
*/
ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
false, true);
if (unlikely(ret != 0)) {
mutex_unlock(&prime->mutex);
goto out_unref;
}
dma_buf = dma_buf_export(prime, &tdev->ops,
prime->size, flags);
if (IS_ERR(dma_buf)) {
ret = PTR_ERR(dma_buf);
ttm_mem_global_free(tdev->mem_glob,
tdev->dma_buf_size);
mutex_unlock(&prime->mutex);
goto out_unref;
}
/*
* dma_buf has taken the base object reference
*/
base = NULL;
prime->dma_buf = dma_buf;
}
mutex_unlock(&prime->mutex);
ret = dma_buf_fd(dma_buf, flags);
if (ret >= 0) {
*prime_fd = ret;
ret = 0;
} else
dma_buf_put(dma_buf);
out_unref:
if (base)
ttm_base_object_unref(&base);
return ret;
}
EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
/**
* ttm_prime_object_init - Initialize a ttm_prime_object
*
* @tfile: struct ttm_object_file identifying the caller
* @size: The size of the dma_bufs we export.
* @prime: The object to be initialized.
* @shareable: See ttm_base_object_init
* @type: See ttm_base_object_init
* @refcount_release: See ttm_base_object_init
* @ref_obj_release: See ttm_base_object_init
*
* Initializes an object which is compatible with the drm_prime model
* for data sharing between processes and devices.
*/
int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
struct ttm_prime_object *prime, bool shareable,
enum ttm_object_type type,
void (*refcount_release) (struct ttm_base_object **),
void (*ref_obj_release) (struct ttm_base_object *,
enum ttm_ref_type ref_type))
{
mutex_init(&prime->mutex);
prime->size = PAGE_ALIGN(size);
prime->real_type = type;
prime->dma_buf = NULL;
prime->refcount_release = refcount_release;
return ttm_base_object_init(tfile, &prime->base, shareable,
ttm_prime_type,
ttm_prime_refcount_release,
ref_obj_release);
}
EXPORT_SYMBOL(ttm_prime_object_init);

View file

@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o
vmwgfx_surface.o vmwgfx_prime.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o

View file

@ -677,7 +677,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
dev_priv->tdev = ttm_object_device_init
(dev_priv->mem_global_ref.object, 12);
(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
if (unlikely(dev_priv->tdev == NULL)) {
DRM_ERROR("Unable to initialize TTM object management.\n");
@ -1210,7 +1210,7 @@ static const struct file_operations vmwgfx_driver_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_MODESET,
DRIVER_MODESET | DRIVER_PRIME,
.load = vmw_driver_load,
.unload = vmw_driver_unload,
.lastclose = vmw_lastclose,
@ -1235,6 +1235,9 @@ static struct drm_driver driver = {
.dumb_map_offset = vmw_dumb_map_offset,
.dumb_destroy = vmw_dumb_destroy,
.prime_fd_to_handle = vmw_prime_fd_to_handle,
.prime_handle_to_fd = vmw_prime_handle_to_fd,
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,

View file

@ -818,6 +818,20 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
/**
* Prime - vmwgfx_prime.c
*/
extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
extern int vmw_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv,
int fd, u32 *handle);
extern int vmw_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t handle, uint32_t flags,
int *prime_fd);
/**
* Inline helper functions
*/

View file

@ -0,0 +1,137 @@
/**************************************************************************
*
* Copyright © 2013 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors:
* Thomas Hellstrom <thellstrom@vmware.com>
*
*/
#include "vmwgfx_drv.h"
#include <linux/dma-buf.h>
#include <drm/ttm/ttm_object.h>
/*
* DMA-BUF attach- and mapping methods. No need to implement
* these until we have other virtual devices use them.
*/
static int vmw_prime_map_attach(struct dma_buf *dma_buf,
struct device *target_dev,
struct dma_buf_attachment *attach)
{
return -ENOSYS;
}
static void vmw_prime_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
}
static struct sg_table *vmw_prime_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
return ERR_PTR(-ENOSYS);
}
static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgb,
enum dma_data_direction dir)
{
}
static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf)
{
return NULL;
}
static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
}
static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
unsigned long page_num)
{
return NULL;
}
static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
unsigned long page_num, void *addr)
{
}
static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
unsigned long page_num)
{
return NULL;
}
static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf,
unsigned long page_num, void *addr)
{
}
static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf,
struct vm_area_struct *vma)
{
WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n");
return -ENOSYS;
}
const struct dma_buf_ops vmw_prime_dmabuf_ops = {
.attach = vmw_prime_map_attach,
.detach = vmw_prime_map_detach,
.map_dma_buf = vmw_prime_map_dma_buf,
.unmap_dma_buf = vmw_prime_unmap_dma_buf,
.release = NULL,
.kmap = vmw_prime_dmabuf_kmap,
.kmap_atomic = vmw_prime_dmabuf_kmap_atomic,
.kunmap = vmw_prime_dmabuf_kunmap,
.kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
.mmap = vmw_prime_dmabuf_mmap,
.vmap = vmw_prime_dmabuf_vmap,
.vunmap = vmw_prime_dmabuf_vunmap,
};
int vmw_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv,
int fd, u32 *handle)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
return ttm_prime_fd_to_handle(tfile, fd, handle);
}
int vmw_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t handle, uint32_t flags,
int *prime_fd)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
}

View file

@ -35,7 +35,7 @@
#define VMW_RES_EVICT_ERR_COUNT 10
struct vmw_user_dma_buffer {
struct ttm_base_object base;
struct ttm_prime_object prime;
struct vmw_dma_buffer dma;
};
@ -297,7 +297,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
if (unlikely(base == NULL))
return -EINVAL;
if (unlikely(base->object_type != converter->object_type))
if (unlikely(ttm_base_object_type(base) != converter->object_type))
goto out_bad_resource;
res = converter->base_obj_to_res(base);
@ -387,7 +387,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
ttm_base_object_kfree(vmw_user_bo, base);
ttm_prime_object_kfree(vmw_user_bo, prime);
}
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
@ -401,7 +401,8 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
if (unlikely(base == NULL))
return;
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
prime.base);
bo = &vmw_user_bo->dma.base;
ttm_bo_unref(&bo);
}
@ -442,18 +443,19 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
return ret;
tmp = ttm_bo_reference(&user_bo->dma.base);
ret = ttm_base_object_init(tfile,
&user_bo->base,
shareable,
ttm_buffer_type,
&vmw_user_dmabuf_release, NULL);
ret = ttm_prime_object_init(tfile,
size,
&user_bo->prime,
shareable,
ttm_buffer_type,
&vmw_user_dmabuf_release, NULL);
if (unlikely(ret != 0)) {
ttm_bo_unref(&tmp);
goto out_no_base_object;
}
*p_dma_buf = &user_bo->dma;
*handle = user_bo->base.hash.key;
*handle = user_bo->prime.base.hash.key;
out_no_base_object:
return ret;
@ -475,8 +477,8 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
return -EPERM;
vmw_user_bo = vmw_user_dma_buffer(bo);
return (vmw_user_bo->base.tfile == tfile ||
vmw_user_bo->base.shareable) ? 0 : -EPERM;
return (vmw_user_bo->prime.base.tfile == tfile ||
vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
}
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
@ -538,14 +540,15 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
return -ESRCH;
}
if (unlikely(base->object_type != ttm_buffer_type)) {
if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
ttm_base_object_unref(&base);
printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return -EINVAL;
}
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
prime.base);
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
ttm_base_object_unref(&base);
*out = &vmw_user_bo->dma;
@ -562,7 +565,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
return -EINVAL;
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
return ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_USAGE, NULL);
}
/*
@ -807,15 +811,16 @@ int vmw_dumb_create(struct drm_file *file_priv,
goto out_no_dmabuf;
tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
&vmw_user_bo->base,
false,
ttm_buffer_type,
&vmw_user_dmabuf_release, NULL);
ret = ttm_prime_object_init(vmw_fpriv(file_priv)->tfile,
args->size,
&vmw_user_bo->prime,
false,
ttm_buffer_type,
&vmw_user_dmabuf_release, NULL);
if (unlikely(ret != 0))
goto out_no_base_object;
args->handle = vmw_user_bo->base.hash.key;
args->handle = vmw_user_bo->prime.base.hash.key;
out_no_base_object:
ttm_bo_unref(&tmp);
@ -994,7 +999,6 @@ void vmw_resource_unreserve(struct vmw_resource *res,
*/
static int
vmw_resource_check_buffer(struct vmw_resource *res,
struct ww_acquire_ctx *ticket,
bool interruptible,
struct ttm_validate_buffer *val_buf)
{
@ -1011,7 +1015,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base);
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list);
ret = ttm_eu_reserve_buffers(NULL, &val_list);
if (unlikely(ret != 0))
goto out_no_reserve;
@ -1029,7 +1033,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
return 0;
out_no_validate:
ttm_eu_backoff_reservation(ticket, &val_list);
ttm_eu_backoff_reservation(NULL, &val_list);
out_no_reserve:
ttm_bo_unref(&val_buf->bo);
if (backup_dirty)
@ -1074,8 +1078,7 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
* @val_buf: Backup buffer information.
*/
static void
vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
struct ttm_validate_buffer *val_buf)
vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
{
struct list_head val_list;
@ -1084,7 +1087,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
ttm_eu_backoff_reservation(ticket, &val_list);
ttm_eu_backoff_reservation(NULL, &val_list);
ttm_bo_unref(&val_buf->bo);
}
@ -1099,14 +1102,12 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
{
struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func;
struct ww_acquire_ctx ticket;
int ret;
BUG_ON(!func->may_evict);
val_buf.bo = NULL;
ret = vmw_resource_check_buffer(res, &ticket, interruptible,
&val_buf);
ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
if (unlikely(ret != 0))
return ret;
@ -1121,7 +1122,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
res->backup_dirty = true;
res->res_dirty = false;
out_no_unbind:
vmw_resource_backoff_reservation(&ticket, &val_buf);
vmw_resource_backoff_reservation(&val_buf);
return ret;
}

View file

@ -38,7 +38,7 @@
* @size: TTM accounting size for the surface.
*/
struct vmw_user_surface {
struct ttm_base_object base;
struct ttm_prime_object prime;
struct vmw_surface srf;
uint32_t size;
uint32_t backup_handle;
@ -580,7 +580,8 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
static struct vmw_resource *
vmw_user_surface_base_to_res(struct ttm_base_object *base)
{
return &(container_of(base, struct vmw_user_surface, base)->srf.res);
return &(container_of(base, struct vmw_user_surface,
prime.base)->srf.res);
}
/**
@ -599,7 +600,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
kfree(srf->offsets);
kfree(srf->sizes);
kfree(srf->snooper.image);
ttm_base_object_kfree(user_srf, base);
ttm_prime_object_kfree(user_srf, prime);
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
@ -616,7 +617,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct vmw_user_surface *user_srf =
container_of(base, struct vmw_user_surface, base);
container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
*p_base = NULL;
@ -790,8 +791,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
srf->snooper.crtc = NULL;
user_srf->base.shareable = false;
user_srf->base.tfile = NULL;
user_srf->prime.base.shareable = false;
user_srf->prime.base.tfile = NULL;
/**
* From this point, the generic resource management functions
@ -803,9 +804,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
tmp = vmw_resource_reference(&srf->res);
ret = ttm_base_object_init(tfile, &user_srf->base,
req->shareable, VMW_RES_SURFACE,
&vmw_user_surface_base_release, NULL);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->shareable, VMW_RES_SURFACE,
&vmw_user_surface_base_release, NULL);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
@ -813,7 +814,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
rep->sid = user_srf->base.hash.key;
rep->sid = user_srf->prime.base.hash.key;
vmw_resource_unreference(&res);
ttm_read_unlock(&vmaster->lock);
@ -823,7 +824,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
out_no_offsets:
kfree(srf->sizes);
out_no_sizes:
ttm_base_object_kfree(user_srf, base);
ttm_prime_object_kfree(user_srf, prime);
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
@ -859,13 +860,14 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
if (unlikely(base->object_type != VMW_RES_SURFACE))
if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
goto out_bad_resource;
user_srf = container_of(base, struct vmw_user_surface, base);
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
TTM_REF_USAGE, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_no_reference;

View file

@ -169,6 +169,7 @@ struct ttm_tt;
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
* @cur_placement: Hint of current placement.
* @wu_mutex: Wait unreserved mutex.
*
* Base class for TTM buffer object, that deals with data placement and CPU
* mappings. GPU mappings are really up to the driver, but for simpler GPUs
@ -250,6 +251,7 @@ struct ttm_buffer_object {
struct reservation_object *resv;
struct reservation_object ttm_resv;
struct mutex wu_mutex;
};
/**
@ -702,5 +704,5 @@ extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
size_t count, loff_t *f_pos, bool write);
extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
#endif

View file

@ -70,7 +70,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
/**
* function ttm_eu_reserve_buffers
*
* @ticket: [out] ww_acquire_ctx returned by call.
* @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only
* non-blocking reserves should be tried.
* @list: thread private list of ttm_validate_buffer structs.
*
* Tries to reserve bos pointed to by the list entries for validation.

View file

@ -41,6 +41,7 @@
#include <drm/drm_hashtab.h>
#include <linux/kref.h>
#include <linux/rcupdate.h>
#include <linux/dma-buf.h>
#include <ttm/ttm_memory.h>
/**
@ -77,6 +78,7 @@ enum ttm_object_type {
ttm_fence_type,
ttm_buffer_type,
ttm_lock_type,
ttm_prime_type,
ttm_driver_type0 = 256,
ttm_driver_type1,
ttm_driver_type2,
@ -132,6 +134,30 @@ struct ttm_base_object {
enum ttm_ref_type ref_type);
};
/**
* struct ttm_prime_object - Modified base object that is prime-aware
*
* @base: struct ttm_base_object that we derive from
* @mutex: Mutex protecting the @dma_buf member.
* @size: Size of the dma_buf associated with this object
* @real_type: Type of the underlying object. Needed since we're setting
* the value of @base::object_type to ttm_prime_type
* @dma_buf: Non ref-coutned pointer to a struct dma_buf created from this
* object.
* @refcount_release: The underlying object's release method. Needed since
* we set @base::refcount_release to our own release method.
*/
struct ttm_prime_object {
struct ttm_base_object base;
struct mutex mutex;
size_t size;
enum ttm_object_type real_type;
struct dma_buf *dma_buf;
void (*refcount_release) (struct ttm_base_object **);
};
/**
* ttm_base_object_init
*
@ -248,14 +274,18 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
/**
* ttm_object device init - initialize a struct ttm_object_device
*
* @mem_glob: struct ttm_mem_global for memory accounting.
* @hash_order: Order of hash table used to hash the base objects.
* @ops: DMA buf ops for prime objects of this device.
*
* This function is typically called on device initialization to prepare
* data structures needed for ttm base and ref objects.
*/
extern struct ttm_object_device *ttm_object_device_init
(struct ttm_mem_global *mem_glob, unsigned int hash_order);
extern struct ttm_object_device *
ttm_object_device_init(struct ttm_mem_global *mem_glob,
unsigned int hash_order,
const struct dma_buf_ops *ops);
/**
* ttm_object_device_release - release data held by a ttm_object_device
@ -272,4 +302,31 @@ extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
#define ttm_base_object_kfree(__object, __base)\
kfree_rcu(__object, __base.rhead)
extern int ttm_prime_object_init(struct ttm_object_file *tfile,
size_t size,
struct ttm_prime_object *prime,
bool shareable,
enum ttm_object_type type,
void (*refcount_release)
(struct ttm_base_object **),
void (*ref_obj_release)
(struct ttm_base_object *,
enum ttm_ref_type ref_type));
static inline enum ttm_object_type
ttm_base_object_type(struct ttm_base_object *base)
{
return (base->object_type == ttm_prime_type) ?
container_of(base, struct ttm_prime_object, base)->real_type :
base->object_type;
}
extern int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
int fd, u32 *handle);
extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
uint32_t handle, uint32_t flags,
int *prime_fd);
#define ttm_prime_object_kfree(__obj, __prime) \
kfree_rcu(__obj, __prime.base.rhead)
#endif

View file

@ -981,6 +981,8 @@ struct drm_radeon_cs {
#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16
/* query if CP DMA is supported on the compute ring */
#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
/* CIK macrotile mode array */
#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18
struct drm_radeon_info {