Merge remote branch 'intel/drm-intel-next' of ../drm-next into drm-core-next
* 'intel/drm-intel-next' of ../drm-next: (63 commits) drm/i915: Move gpu_write_list to per-ring drm/i915: Invalidate the to-ring, flush the old-ring when updating domains drm/i915/ringbuffer: Write the value passed in to the tail register agp/intel: Restore valid PTE bit for Sandybridge afterbdd3072
drm/i915: Fix flushing regression from9af90d19f
drm/i915/sdvo: Remove unused encoding member i915: enable AVI infoframe for intel_hdmi.c [v4] drm/i915: Fix current fb blocking for page flip drm/i915: IS_IRONLAKE is synonymous with gen == 5 drm/i915: Enable SandyBridge blitter ring drm/i915/ringbuffer: Remove broken intel_fill_struct() drm/i915/ringbuffer: Fix emit batch buffer regression from8187a2b
drm/i915: Copy the updated reloc->presumed_offset back to the user drm/i915: Track objects in global active list (as well as per-ring) drm/i915: Simplify most HAS_BSD() checks drm/i915: cache the last object lookup during pin_and_relocate() drm/i915: Do interrupible mutex lock first to avoid locking for unreference drivers: gpu: drm: i915: Fix a typo. agp/intel: Also add B43.1 to list of supported devices drm/i915: rearrange mutex acquisition for pread ...
This commit is contained in:
commit
e3ce8a0b27
29 changed files with 1988 additions and 1375 deletions
|
@ -895,6 +895,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
|
|||
ID(PCI_DEVICE_ID_INTEL_G45_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_G41_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_B43_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
|
||||
|
|
|
@ -1211,13 +1211,13 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
|
|||
u32 pte_flags;
|
||||
|
||||
if (type_mask == AGP_USER_UNCACHED_MEMORY)
|
||||
pte_flags = GEN6_PTE_UNCACHED;
|
||||
pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
|
||||
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
|
||||
pte_flags = GEN6_PTE_LLC;
|
||||
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
|
||||
if (gfdt)
|
||||
pte_flags |= GEN6_PTE_GFDT;
|
||||
} else { /* set 'normal'/'cached' to LLC by default */
|
||||
pte_flags = GEN6_PTE_LLC_MLC;
|
||||
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
|
||||
if (gfdt)
|
||||
pte_flags |= GEN6_PTE_GFDT;
|
||||
}
|
||||
|
|
|
@ -1267,7 +1267,35 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
|
|||
}
|
||||
|
||||
#define HDMI_IDENTIFIER 0x000C03
|
||||
#define AUDIO_BLOCK 0x01
|
||||
#define VENDOR_BLOCK 0x03
|
||||
#define EDID_BASIC_AUDIO (1 << 6)
|
||||
|
||||
/**
|
||||
* Search EDID for CEA extension block.
|
||||
*/
|
||||
static u8 *drm_find_cea_extension(struct edid *edid)
|
||||
{
|
||||
u8 *edid_ext = NULL;
|
||||
int i;
|
||||
|
||||
/* No EDID or EDID extensions */
|
||||
if (edid == NULL || edid->extensions == 0)
|
||||
return NULL;
|
||||
|
||||
/* Find CEA extension */
|
||||
for (i = 0; i < edid->extensions; i++) {
|
||||
edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
|
||||
if (edid_ext[0] == CEA_EXT)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == edid->extensions)
|
||||
return NULL;
|
||||
|
||||
return edid_ext;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_detect_hdmi_monitor - detect whether monitor is hdmi.
|
||||
* @edid: monitor EDID information
|
||||
|
@ -1277,24 +1305,13 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
|
|||
*/
|
||||
bool drm_detect_hdmi_monitor(struct edid *edid)
|
||||
{
|
||||
char *edid_ext = NULL;
|
||||
u8 *edid_ext;
|
||||
int i, hdmi_id;
|
||||
int start_offset, end_offset;
|
||||
bool is_hdmi = false;
|
||||
|
||||
/* No EDID or EDID extensions */
|
||||
if (edid == NULL || edid->extensions == 0)
|
||||
goto end;
|
||||
|
||||
/* Find CEA extension */
|
||||
for (i = 0; i < edid->extensions; i++) {
|
||||
edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
|
||||
/* This block is CEA extension */
|
||||
if (edid_ext[0] == 0x02)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == edid->extensions)
|
||||
edid_ext = drm_find_cea_extension(edid);
|
||||
if (!edid_ext)
|
||||
goto end;
|
||||
|
||||
/* Data block offset in CEA extension block */
|
||||
|
@ -1324,6 +1341,53 @@ bool drm_detect_hdmi_monitor(struct edid *edid)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_detect_hdmi_monitor);
|
||||
|
||||
/**
|
||||
* drm_detect_monitor_audio - check monitor audio capability
|
||||
*
|
||||
* Monitor should have CEA extension block.
|
||||
* If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
|
||||
* audio' only. If there is any audio extension block and supported
|
||||
* audio format, assume at least 'basic audio' support, even if 'basic
|
||||
* audio' is not defined in EDID.
|
||||
*
|
||||
*/
|
||||
bool drm_detect_monitor_audio(struct edid *edid)
|
||||
{
|
||||
u8 *edid_ext;
|
||||
int i, j;
|
||||
bool has_audio = false;
|
||||
int start_offset, end_offset;
|
||||
|
||||
edid_ext = drm_find_cea_extension(edid);
|
||||
if (!edid_ext)
|
||||
goto end;
|
||||
|
||||
has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
|
||||
|
||||
if (has_audio) {
|
||||
DRM_DEBUG_KMS("Monitor has basic audio support\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Data block offset in CEA extension block */
|
||||
start_offset = 4;
|
||||
end_offset = edid_ext[2];
|
||||
|
||||
for (i = start_offset; i < end_offset;
|
||||
i += ((edid_ext[i] & 0x1f) + 1)) {
|
||||
if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
|
||||
has_audio = true;
|
||||
for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
|
||||
DRM_DEBUG_KMS("CEA audio format %d\n",
|
||||
(edid_ext[i + j] >> 3) & 0xf);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
end:
|
||||
return has_audio;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_detect_monitor_audio);
|
||||
|
||||
/**
|
||||
* drm_add_edid_modes - add modes from EDID data, if available
|
||||
* @connector: connector we're probing
|
||||
|
|
|
@ -35,6 +35,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
|
|||
|
||||
i915-$(CONFIG_COMPAT) += i915_ioc32.o
|
||||
|
||||
i915-$(CONFIG_ACPI) += intel_acpi.o
|
||||
|
||||
obj-$(CONFIG_DRM_I915) += i915.o
|
||||
|
||||
CFLAGS_i915_trace_points.o := -I$(src)
|
||||
|
|
|
@ -41,8 +41,7 @@
|
|||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
enum {
|
||||
RENDER_LIST,
|
||||
BSD_LIST,
|
||||
ACTIVE_LIST,
|
||||
FLUSHING_LIST,
|
||||
INACTIVE_LIST,
|
||||
PINNED_LIST,
|
||||
|
@ -72,7 +71,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
|
|||
B(is_pineview);
|
||||
B(is_broadwater);
|
||||
B(is_crestline);
|
||||
B(is_ironlake);
|
||||
B(has_fbc);
|
||||
B(has_rc6);
|
||||
B(has_pipe_cxsr);
|
||||
|
@ -81,6 +79,8 @@ static int i915_capabilities(struct seq_file *m, void *data)
|
|||
B(has_overlay);
|
||||
B(overlay_needs_physical);
|
||||
B(supports_tv);
|
||||
B(has_bsd_ring);
|
||||
B(has_blt_ring);
|
||||
#undef B
|
||||
|
||||
return 0;
|
||||
|
@ -125,6 +125,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
|||
seq_printf(m, " (fence: %d)", obj->fence_reg);
|
||||
if (obj->gtt_space != NULL)
|
||||
seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
|
||||
if (obj->ring != NULL)
|
||||
seq_printf(m, " (%s)", obj->ring->name);
|
||||
}
|
||||
|
||||
static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
||||
|
@ -143,13 +145,9 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
|||
return ret;
|
||||
|
||||
switch (list) {
|
||||
case RENDER_LIST:
|
||||
seq_printf(m, "Render:\n");
|
||||
head = &dev_priv->render_ring.active_list;
|
||||
break;
|
||||
case BSD_LIST:
|
||||
seq_printf(m, "BSD:\n");
|
||||
head = &dev_priv->bsd_ring.active_list;
|
||||
case ACTIVE_LIST:
|
||||
seq_printf(m, "Active:\n");
|
||||
head = &dev_priv->mm.active_list;
|
||||
break;
|
||||
case INACTIVE_LIST:
|
||||
seq_printf(m, "Inactive:\n");
|
||||
|
@ -173,7 +171,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
|||
}
|
||||
|
||||
total_obj_size = total_gtt_size = count = 0;
|
||||
list_for_each_entry(obj_priv, head, list) {
|
||||
list_for_each_entry(obj_priv, head, mm_list) {
|
||||
seq_printf(m, " ");
|
||||
describe_obj(m, obj_priv);
|
||||
seq_printf(m, "\n");
|
||||
|
@ -460,8 +458,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
|
||||
list) {
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
|
||||
obj = &obj_priv->base;
|
||||
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
|
||||
seq_printf(m, "--- gtt_offset = 0x%08x\n",
|
||||
|
@ -797,7 +794,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
|
|||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
bool sr_enabled = false;
|
||||
|
||||
if (IS_IRONLAKE(dev))
|
||||
if (IS_GEN5(dev))
|
||||
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
|
||||
else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
|
||||
sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
|
||||
|
@ -1020,8 +1017,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
|
|||
static struct drm_info_list i915_debugfs_list[] = {
|
||||
{"i915_capabilities", i915_capabilities, 0, 0},
|
||||
{"i915_gem_objects", i915_gem_object_info, 0},
|
||||
{"i915_gem_render_active", i915_gem_object_list_info, 0, (void *) RENDER_LIST},
|
||||
{"i915_gem_bsd_active", i915_gem_object_list_info, 0, (void *) BSD_LIST},
|
||||
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
|
||||
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
|
||||
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
|
||||
{"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
|
||||
|
|
|
@ -132,8 +132,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
|
|||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
|
||||
if (HAS_BSD(dev))
|
||||
intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
|
||||
intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
|
||||
intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/* Clear the HWS virtual address at teardown */
|
||||
|
@ -499,7 +499,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
|
|||
}
|
||||
|
||||
|
||||
if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
|
||||
if (IS_G4X(dev) || IS_GEN5(dev)) {
|
||||
BEGIN_LP_RING(2);
|
||||
OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
|
||||
OUT_RING(MI_NOOP);
|
||||
|
@ -764,6 +764,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
|||
case I915_PARAM_HAS_BSD:
|
||||
value = HAS_BSD(dev);
|
||||
break;
|
||||
case I915_PARAM_HAS_BLT:
|
||||
value = HAS_BLT(dev);
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
|
||||
param->param);
|
||||
|
@ -1199,9 +1202,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
|
|||
/* Basic memrange allocator for stolen space (aka mm.vram) */
|
||||
drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
|
||||
|
||||
/* We're off and running w/KMS */
|
||||
dev_priv->mm.suspended = 0;
|
||||
|
||||
/* Let GEM Manage from end of prealloc space to end of aperture.
|
||||
*
|
||||
* However, leave one page at the end still bound to the scratch page.
|
||||
|
@ -1235,7 +1235,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
|
|||
*/
|
||||
dev_priv->allow_batchbuffer = 1;
|
||||
|
||||
ret = intel_init_bios(dev);
|
||||
ret = intel_parse_bios(dev);
|
||||
if (ret)
|
||||
DRM_INFO("failed to find VBIOS tables\n");
|
||||
|
||||
|
@ -1244,6 +1244,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
|
|||
if (ret)
|
||||
goto cleanup_ringbuffer;
|
||||
|
||||
intel_register_dsm_handler();
|
||||
|
||||
ret = vga_switcheroo_register_client(dev->pdev,
|
||||
i915_switcheroo_set_state,
|
||||
i915_switcheroo_can_switch);
|
||||
|
@ -1269,6 +1271,10 @@ static int i915_load_modeset_init(struct drm_device *dev,
|
|||
goto cleanup_irq;
|
||||
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
||||
/* We're off and running w/KMS */
|
||||
dev_priv->mm.suspended = 0;
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup_irq:
|
||||
|
@ -1989,7 +1995,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
|
||||
dev->driver->get_vblank_counter = i915_get_vblank_counter;
|
||||
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
|
||||
if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
|
||||
if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
|
||||
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
|
||||
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
|
||||
}
|
||||
|
@ -1999,6 +2005,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
intel_setup_gmbus(dev);
|
||||
intel_opregion_setup(dev);
|
||||
|
||||
/* Make sure the bios did its job and set up vital registers */
|
||||
intel_setup_bios(dev);
|
||||
|
||||
i915_gem_load(dev);
|
||||
|
||||
/* Init HWS */
|
||||
|
@ -2010,7 +2019,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
|
||||
if (IS_PINEVIEW(dev))
|
||||
i915_pineview_get_mem_freq(dev);
|
||||
else if (IS_IRONLAKE(dev))
|
||||
else if (IS_GEN5(dev))
|
||||
i915_ironlake_get_mem_freq(dev);
|
||||
|
||||
/* On the 945G/GM, the chipset reports the MSI capability on the
|
||||
|
@ -2063,9 +2072,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
dev_priv->mchdev_lock = &mchdev_lock;
|
||||
spin_unlock(&mchdev_lock);
|
||||
|
||||
/* XXX Prevent module unload due to memory corruption bugs. */
|
||||
__module_get(THIS_MODULE);
|
||||
|
||||
return 0;
|
||||
|
||||
out_workqueue_free:
|
||||
|
@ -2134,9 +2140,6 @@ int i915_driver_unload(struct drm_device *dev)
|
|||
if (dev->pdev->msi_enabled)
|
||||
pci_disable_msi(dev->pdev);
|
||||
|
||||
if (dev_priv->regs != NULL)
|
||||
iounmap(dev_priv->regs);
|
||||
|
||||
intel_opregion_fini(dev);
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
|
@ -2153,8 +2156,14 @@ int i915_driver_unload(struct drm_device *dev)
|
|||
drm_mm_takedown(&dev_priv->mm.vram);
|
||||
|
||||
intel_cleanup_overlay(dev);
|
||||
|
||||
if (!I915_NEED_GFX_HWS(dev))
|
||||
i915_free_hws(dev);
|
||||
}
|
||||
|
||||
if (dev_priv->regs != NULL)
|
||||
iounmap(dev_priv->regs);
|
||||
|
||||
intel_teardown_gmbus(dev);
|
||||
intel_teardown_mchbar(dev);
|
||||
|
||||
|
|
|
@ -143,13 +143,13 @@ static const struct intel_device_info intel_pineview_info = {
|
|||
};
|
||||
|
||||
static const struct intel_device_info intel_ironlake_d_info = {
|
||||
.gen = 5, .is_ironlake = 1,
|
||||
.gen = 5,
|
||||
.need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
|
||||
.has_bsd_ring = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ironlake_m_info = {
|
||||
.gen = 5, .is_ironlake = 1, .is_mobile = 1,
|
||||
.gen = 5, .is_mobile = 1,
|
||||
.need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
|
||||
.has_bsd_ring = 1,
|
||||
};
|
||||
|
@ -158,12 +158,14 @@ static const struct intel_device_info intel_sandybridge_d_info = {
|
|||
.gen = 6,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_sandybridge_m_info = {
|
||||
.gen = 6, .is_mobile = 1,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
};
|
||||
|
||||
static const struct pci_device_id pciidlist[] = { /* aka */
|
||||
|
|
|
@ -206,7 +206,6 @@ struct intel_device_info {
|
|||
u8 is_pineview : 1;
|
||||
u8 is_broadwater : 1;
|
||||
u8 is_crestline : 1;
|
||||
u8 is_ironlake : 1;
|
||||
u8 has_fbc : 1;
|
||||
u8 has_rc6 : 1;
|
||||
u8 has_pipe_cxsr : 1;
|
||||
|
@ -216,6 +215,7 @@ struct intel_device_info {
|
|||
u8 overlay_needs_physical : 1;
|
||||
u8 supports_tv : 1;
|
||||
u8 has_bsd_ring : 1;
|
||||
u8 has_blt_ring : 1;
|
||||
};
|
||||
|
||||
enum no_fbc_reason {
|
||||
|
@ -255,6 +255,7 @@ typedef struct drm_i915_private {
|
|||
struct pci_dev *bridge_dev;
|
||||
struct intel_ring_buffer render_ring;
|
||||
struct intel_ring_buffer bsd_ring;
|
||||
struct intel_ring_buffer blt_ring;
|
||||
uint32_t next_seqno;
|
||||
|
||||
drm_dma_handle_t *status_page_dmah;
|
||||
|
@ -339,17 +340,18 @@ typedef struct drm_i915_private {
|
|||
unsigned int int_crt_support:1;
|
||||
unsigned int lvds_use_ssc:1;
|
||||
int lvds_ssc_freq;
|
||||
|
||||
struct {
|
||||
u8 rate:4;
|
||||
u8 lanes:4;
|
||||
u8 preemphasis:4;
|
||||
u8 vswing:4;
|
||||
int rate;
|
||||
int lanes;
|
||||
int preemphasis;
|
||||
int vswing;
|
||||
|
||||
u8 initialized:1;
|
||||
u8 support:1;
|
||||
u8 bpp:6;
|
||||
bool initialized;
|
||||
bool support;
|
||||
int bpp;
|
||||
struct edp_power_seq pps;
|
||||
} edp;
|
||||
bool no_aux_handshake;
|
||||
|
||||
struct notifier_block lid_notifier;
|
||||
|
||||
|
@ -546,6 +548,17 @@ typedef struct drm_i915_private {
|
|||
*/
|
||||
struct list_head shrink_list;
|
||||
|
||||
/**
|
||||
* List of objects currently involved in rendering.
|
||||
*
|
||||
* Includes buffers having the contents of their GPU caches
|
||||
* flushed, not necessarily primitives. last_rendering_seqno
|
||||
* represents when the rendering involved will be completed.
|
||||
*
|
||||
* A reference is held on the buffer while on this list.
|
||||
*/
|
||||
struct list_head active_list;
|
||||
|
||||
/**
|
||||
* List of objects which are not in the ringbuffer but which
|
||||
* still have a write_domain which needs to be flushed before
|
||||
|
@ -557,15 +570,6 @@ typedef struct drm_i915_private {
|
|||
*/
|
||||
struct list_head flushing_list;
|
||||
|
||||
/**
|
||||
* List of objects currently pending a GPU write flush.
|
||||
*
|
||||
* All elements on this list will belong to either the
|
||||
* active_list or flushing_list, last_rendering_seqno can
|
||||
* be used to differentiate between the two elements.
|
||||
*/
|
||||
struct list_head gpu_write_list;
|
||||
|
||||
/**
|
||||
* LRU list of objects which are not in the ringbuffer and
|
||||
* are ready to unbind, but are still in the GTT.
|
||||
|
@ -713,7 +717,8 @@ struct drm_i915_gem_object {
|
|||
struct drm_mm_node *gtt_space;
|
||||
|
||||
/** This object's place on the active/flushing/inactive lists */
|
||||
struct list_head list;
|
||||
struct list_head ring_list;
|
||||
struct list_head mm_list;
|
||||
/** This object's place on GPU write list */
|
||||
struct list_head gpu_write_list;
|
||||
/** This object's place on eviction list */
|
||||
|
@ -1136,6 +1141,15 @@ static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
|
|||
static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
|
||||
#endif
|
||||
|
||||
/* intel_acpi.c */
|
||||
#ifdef CONFIG_ACPI
|
||||
extern void intel_register_dsm_handler(void);
|
||||
extern void intel_unregister_dsm_handler(void);
|
||||
#else
|
||||
static inline void intel_register_dsm_handler(void) { return; }
|
||||
static inline void intel_unregister_dsm_handler(void) { return; }
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
/* modesetting */
|
||||
extern void intel_modeset_init(struct drm_device *dev);
|
||||
extern void intel_modeset_cleanup(struct drm_device *dev);
|
||||
|
@ -1268,7 +1282,6 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
|
|||
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
|
||||
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
|
||||
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
|
||||
#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
|
||||
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
|
||||
|
||||
#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
|
||||
|
@ -1278,6 +1291,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
|
|||
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
|
||||
|
||||
#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
|
||||
#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
|
||||
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
|
||||
|
||||
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
|
||||
|
@ -1289,8 +1303,8 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
|
|||
#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
|
||||
IS_I915GM(dev)))
|
||||
#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
|
||||
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
|
||||
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
|
||||
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
|
||||
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
|
||||
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
|
||||
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
|
||||
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
|
||||
|
@ -1302,9 +1316,8 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
|
|||
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
|
||||
#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
|
||||
|
||||
#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
|
||||
IS_GEN6(dev))
|
||||
#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
|
||||
#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
|
||||
#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
|
||||
|
||||
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
|
||||
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -31,49 +31,6 @@
|
|||
#include "i915_drv.h"
|
||||
#include "i915_drm.h"
|
||||
|
||||
static struct drm_i915_gem_object *
|
||||
i915_gem_next_active_object(struct drm_device *dev,
|
||||
struct list_head **render_iter,
|
||||
struct list_head **bsd_iter)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
|
||||
|
||||
if (*render_iter != &dev_priv->render_ring.active_list)
|
||||
render_obj = list_entry(*render_iter,
|
||||
struct drm_i915_gem_object,
|
||||
list);
|
||||
|
||||
if (HAS_BSD(dev)) {
|
||||
if (*bsd_iter != &dev_priv->bsd_ring.active_list)
|
||||
bsd_obj = list_entry(*bsd_iter,
|
||||
struct drm_i915_gem_object,
|
||||
list);
|
||||
|
||||
if (render_obj == NULL) {
|
||||
*bsd_iter = (*bsd_iter)->next;
|
||||
return bsd_obj;
|
||||
}
|
||||
|
||||
if (bsd_obj == NULL) {
|
||||
*render_iter = (*render_iter)->next;
|
||||
return render_obj;
|
||||
}
|
||||
|
||||
/* XXX can we handle seqno wrapping? */
|
||||
if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
|
||||
*render_iter = (*render_iter)->next;
|
||||
return render_obj;
|
||||
} else {
|
||||
*bsd_iter = (*bsd_iter)->next;
|
||||
return bsd_obj;
|
||||
}
|
||||
} else {
|
||||
*render_iter = (*render_iter)->next;
|
||||
return render_obj;
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
mark_free(struct drm_i915_gem_object *obj_priv,
|
||||
struct list_head *unwind)
|
||||
|
@ -83,18 +40,12 @@ mark_free(struct drm_i915_gem_object *obj_priv,
|
|||
return drm_mm_scan_add_block(obj_priv->gtt_space);
|
||||
}
|
||||
|
||||
#define i915_for_each_active_object(OBJ, R, B) \
|
||||
*(R) = dev_priv->render_ring.active_list.next; \
|
||||
*(B) = dev_priv->bsd_ring.active_list.next; \
|
||||
while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
|
||||
|
||||
int
|
||||
i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct list_head eviction_list, unwind_list;
|
||||
struct drm_i915_gem_object *obj_priv;
|
||||
struct list_head *render_iter, *bsd_iter;
|
||||
int ret = 0;
|
||||
|
||||
i915_gem_retire_requests(dev);
|
||||
|
@ -131,13 +82,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
|
|||
drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
|
||||
|
||||
/* First see if there is a large enough contiguous idle region... */
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
|
||||
if (mark_free(obj_priv, &unwind_list))
|
||||
goto found;
|
||||
}
|
||||
|
||||
/* Now merge in the soon-to-be-expired objects... */
|
||||
i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
|
||||
/* Does the object require an outstanding flush? */
|
||||
if (obj_priv->base.write_domain || obj_priv->pin_count)
|
||||
continue;
|
||||
|
@ -147,14 +98,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
|
|||
}
|
||||
|
||||
/* Finally add anything with a pending flush (in order of retirement) */
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
|
||||
if (obj_priv->pin_count)
|
||||
continue;
|
||||
|
||||
if (mark_free(obj_priv, &unwind_list))
|
||||
goto found;
|
||||
}
|
||||
i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
|
||||
if (! obj_priv->base.write_domain || obj_priv->pin_count)
|
||||
continue;
|
||||
|
||||
|
@ -215,8 +166,8 @@ i915_gem_evict_everything(struct drm_device *dev)
|
|||
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
|
||||
list_empty(&dev_priv->mm.flushing_list) &&
|
||||
list_empty(&dev_priv->render_ring.active_list) &&
|
||||
(!HAS_BSD(dev)
|
||||
|| list_empty(&dev_priv->bsd_ring.active_list)));
|
||||
list_empty(&dev_priv->bsd_ring.active_list) &&
|
||||
list_empty(&dev_priv->blt_ring.active_list));
|
||||
if (lists_empty)
|
||||
return -ENOSPC;
|
||||
|
||||
|
@ -234,8 +185,8 @@ i915_gem_evict_everything(struct drm_device *dev)
|
|||
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
|
||||
list_empty(&dev_priv->mm.flushing_list) &&
|
||||
list_empty(&dev_priv->render_ring.active_list) &&
|
||||
(!HAS_BSD(dev)
|
||||
|| list_empty(&dev_priv->bsd_ring.active_list)));
|
||||
list_empty(&dev_priv->bsd_ring.active_list) &&
|
||||
list_empty(&dev_priv->blt_ring.active_list));
|
||||
BUG_ON(!lists_empty);
|
||||
|
||||
return 0;
|
||||
|
@ -253,7 +204,7 @@ i915_gem_evict_inactive(struct drm_device *dev)
|
|||
|
||||
obj = &list_first_entry(&dev_priv->mm.inactive_list,
|
||||
struct drm_i915_gem_object,
|
||||
list)->base;
|
||||
mm_list)->base;
|
||||
|
||||
ret = i915_gem_object_unbind(obj);
|
||||
if (ret != 0) {
|
||||
|
|
|
@ -92,7 +92,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
|
|||
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
|
||||
if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
|
||||
if (IS_GEN5(dev) || IS_GEN6(dev)) {
|
||||
/* On Ironlake whatever DRAM config, GPU always do
|
||||
* same swizzling setup.
|
||||
*/
|
||||
|
|
|
@ -293,13 +293,26 @@ static void i915_handle_rps_change(struct drm_device *dev)
|
|||
return;
|
||||
}
|
||||
|
||||
static void notify_ring(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 seqno = ring->get_seqno(dev, ring);
|
||||
ring->irq_gem_seqno = seqno;
|
||||
trace_i915_gem_request_complete(dev, seqno);
|
||||
wake_up_all(&ring->irq_queue);
|
||||
dev_priv->hangcheck_count = 0;
|
||||
mod_timer(&dev_priv->hangcheck_timer,
|
||||
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
|
||||
}
|
||||
|
||||
static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
int ret = IRQ_NONE;
|
||||
u32 de_iir, gt_iir, de_ier, pch_iir;
|
||||
u32 hotplug_mask;
|
||||
struct drm_i915_master_private *master_priv;
|
||||
struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
|
||||
u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
|
||||
|
||||
if (IS_GEN6(dev))
|
||||
|
@ -317,6 +330,11 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
|
|||
if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
|
||||
goto done;
|
||||
|
||||
if (HAS_PCH_CPT(dev))
|
||||
hotplug_mask = SDE_HOTPLUG_MASK_CPT;
|
||||
else
|
||||
hotplug_mask = SDE_HOTPLUG_MASK;
|
||||
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
if (dev->primary->master) {
|
||||
|
@ -326,17 +344,12 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
|
|||
READ_BREADCRUMB(dev_priv);
|
||||
}
|
||||
|
||||
if (gt_iir & GT_PIPE_NOTIFY) {
|
||||
u32 seqno = render_ring->get_seqno(dev, render_ring);
|
||||
render_ring->irq_gem_seqno = seqno;
|
||||
trace_i915_gem_request_complete(dev, seqno);
|
||||
wake_up_all(&dev_priv->render_ring.irq_queue);
|
||||
dev_priv->hangcheck_count = 0;
|
||||
mod_timer(&dev_priv->hangcheck_timer,
|
||||
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
|
||||
}
|
||||
if (gt_iir & GT_PIPE_NOTIFY)
|
||||
notify_ring(dev, &dev_priv->render_ring);
|
||||
if (gt_iir & bsd_usr_interrupt)
|
||||
wake_up_all(&dev_priv->bsd_ring.irq_queue);
|
||||
notify_ring(dev, &dev_priv->bsd_ring);
|
||||
if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT)
|
||||
notify_ring(dev, &dev_priv->blt_ring);
|
||||
|
||||
if (de_iir & DE_GSE)
|
||||
intel_opregion_gse_intr(dev);
|
||||
|
@ -358,10 +371,8 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
|
|||
drm_handle_vblank(dev, 1);
|
||||
|
||||
/* check event from PCH */
|
||||
if ((de_iir & DE_PCH_EVENT) &&
|
||||
(pch_iir & SDE_HOTPLUG_MASK)) {
|
||||
if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask))
|
||||
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
|
||||
}
|
||||
|
||||
if (de_iir & DE_PCU_EVENT) {
|
||||
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
|
||||
|
@ -604,9 +615,7 @@ static void i915_capture_error_state(struct drm_device *dev)
|
|||
batchbuffer[0] = NULL;
|
||||
batchbuffer[1] = NULL;
|
||||
count = 0;
|
||||
list_for_each_entry(obj_priv,
|
||||
&dev_priv->render_ring.active_list, list) {
|
||||
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
|
||||
struct drm_gem_object *obj = &obj_priv->base;
|
||||
|
||||
if (batchbuffer[0] == NULL &&
|
||||
|
@ -623,7 +632,7 @@ static void i915_capture_error_state(struct drm_device *dev)
|
|||
}
|
||||
/* Scan the other lists for completeness for those bizarre errors. */
|
||||
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
|
||||
struct drm_gem_object *obj = &obj_priv->base;
|
||||
|
||||
if (batchbuffer[0] == NULL &&
|
||||
|
@ -641,7 +650,7 @@ static void i915_capture_error_state(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
|
||||
struct drm_gem_object *obj = &obj_priv->base;
|
||||
|
||||
if (batchbuffer[0] == NULL &&
|
||||
|
@ -660,7 +669,7 @@ static void i915_capture_error_state(struct drm_device *dev)
|
|||
}
|
||||
|
||||
/* We need to copy these to an anonymous buffer as the simplest
|
||||
* method to avoid being overwritten by userpace.
|
||||
* method to avoid being overwritten by userspace.
|
||||
*/
|
||||
error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
|
||||
if (batchbuffer[1] != batchbuffer[0])
|
||||
|
@ -682,8 +691,7 @@ static void i915_capture_error_state(struct drm_device *dev)
|
|||
|
||||
if (error->active_bo) {
|
||||
int i = 0;
|
||||
list_for_each_entry(obj_priv,
|
||||
&dev_priv->render_ring.active_list, list) {
|
||||
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
|
||||
struct drm_gem_object *obj = &obj_priv->base;
|
||||
|
||||
error->active_bo[i].size = obj->size;
|
||||
|
@ -880,6 +888,8 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
|
|||
wake_up_all(&dev_priv->render_ring.irq_queue);
|
||||
if (HAS_BSD(dev))
|
||||
wake_up_all(&dev_priv->bsd_ring.irq_queue);
|
||||
if (HAS_BLT(dev))
|
||||
wake_up_all(&dev_priv->blt_ring.irq_queue);
|
||||
}
|
||||
|
||||
queue_work(dev_priv->wq, &dev_priv->error_work);
|
||||
|
@ -940,7 +950,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
unsigned long irqflags;
|
||||
int irq_received;
|
||||
int ret = IRQ_NONE;
|
||||
struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
|
||||
|
||||
atomic_inc(&dev_priv->irq_received);
|
||||
|
||||
|
@ -1017,18 +1026,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
READ_BREADCRUMB(dev_priv);
|
||||
}
|
||||
|
||||
if (iir & I915_USER_INTERRUPT) {
|
||||
u32 seqno = render_ring->get_seqno(dev, render_ring);
|
||||
render_ring->irq_gem_seqno = seqno;
|
||||
trace_i915_gem_request_complete(dev, seqno);
|
||||
wake_up_all(&dev_priv->render_ring.irq_queue);
|
||||
dev_priv->hangcheck_count = 0;
|
||||
mod_timer(&dev_priv->hangcheck_timer,
|
||||
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
|
||||
}
|
||||
|
||||
if (iir & I915_USER_INTERRUPT)
|
||||
notify_ring(dev, &dev_priv->render_ring);
|
||||
if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
|
||||
wake_up_all(&dev_priv->bsd_ring.irq_queue);
|
||||
notify_ring(dev, &dev_priv->bsd_ring);
|
||||
|
||||
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
|
||||
intel_prepare_page_flip(dev, 0);
|
||||
|
@ -1357,6 +1358,12 @@ void i915_hangcheck_elapsed(unsigned long data)
|
|||
missed_wakeup = true;
|
||||
}
|
||||
|
||||
if (dev_priv->blt_ring.waiting_gem_seqno &&
|
||||
waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
|
||||
wake_up_all(&dev_priv->blt_ring.irq_queue);
|
||||
missed_wakeup = true;
|
||||
}
|
||||
|
||||
if (missed_wakeup)
|
||||
DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
|
||||
return;
|
||||
|
@ -1431,8 +1438,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
|||
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
|
||||
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
|
||||
u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
|
||||
u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
|
||||
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
|
||||
u32 hotplug_mask;
|
||||
|
||||
dev_priv->irq_mask_reg = ~display_mask;
|
||||
dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
|
||||
|
@ -1443,8 +1449,12 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
|||
I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
|
||||
(void) I915_READ(DEIER);
|
||||
|
||||
if (IS_GEN6(dev))
|
||||
render_mask = GT_PIPE_NOTIFY | GT_GEN6_BSD_USER_INTERRUPT;
|
||||
if (IS_GEN6(dev)) {
|
||||
render_mask =
|
||||
GT_PIPE_NOTIFY |
|
||||
GT_GEN6_BSD_USER_INTERRUPT |
|
||||
GT_BLT_USER_INTERRUPT;
|
||||
}
|
||||
|
||||
dev_priv->gt_irq_mask_reg = ~render_mask;
|
||||
dev_priv->gt_irq_enable_reg = render_mask;
|
||||
|
@ -1454,11 +1464,20 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
|||
if (IS_GEN6(dev)) {
|
||||
I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
|
||||
I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
|
||||
I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
|
||||
}
|
||||
|
||||
I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
|
||||
(void) I915_READ(GTIER);
|
||||
|
||||
if (HAS_PCH_CPT(dev)) {
|
||||
hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
|
||||
SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
|
||||
} else {
|
||||
hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
|
||||
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
|
||||
}
|
||||
|
||||
dev_priv->pch_irq_mask_reg = ~hotplug_mask;
|
||||
dev_priv->pch_irq_enable_reg = hotplug_mask;
|
||||
|
||||
|
@ -1515,9 +1534,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
|
|||
u32 error_mask;
|
||||
|
||||
DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
|
||||
|
||||
if (HAS_BSD(dev))
|
||||
DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
|
||||
if (HAS_BLT(dev))
|
||||
DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue);
|
||||
|
||||
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
|
||||
|
||||
|
|
|
@ -263,6 +263,7 @@
|
|||
#define RENDER_RING_BASE 0x02000
|
||||
#define BSD_RING_BASE 0x04000
|
||||
#define GEN6_BSD_RING_BASE 0x12000
|
||||
#define BLT_RING_BASE 0x22000
|
||||
#define RING_TAIL(base) ((base)+0x30)
|
||||
#define RING_HEAD(base) ((base)+0x34)
|
||||
#define RING_START(base) ((base)+0x38)
|
||||
|
@ -661,13 +662,6 @@
|
|||
#define LVDS 0x61180
|
||||
#define LVDS_ON (1<<31)
|
||||
|
||||
#define ADPA 0x61100
|
||||
#define ADPA_DPMS_MASK (~(3<<10))
|
||||
#define ADPA_DPMS_ON (0<<10)
|
||||
#define ADPA_DPMS_SUSPEND (1<<10)
|
||||
#define ADPA_DPMS_STANDBY (2<<10)
|
||||
#define ADPA_DPMS_OFF (3<<10)
|
||||
|
||||
/* Scratch pad debug 0 reg:
|
||||
*/
|
||||
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
|
||||
|
@ -1200,6 +1194,7 @@
|
|||
#define ADPA_DPMS_STANDBY (2<<10)
|
||||
#define ADPA_DPMS_OFF (3<<10)
|
||||
|
||||
|
||||
/* Hotplug control (945+ only) */
|
||||
#define PORT_HOTPLUG_EN 0x61110
|
||||
#define HDMIB_HOTPLUG_INT_EN (1 << 29)
|
||||
|
@ -1358,6 +1353,22 @@
|
|||
#define LVDS_B0B3_POWER_DOWN (0 << 2)
|
||||
#define LVDS_B0B3_POWER_UP (3 << 2)
|
||||
|
||||
/* Video Data Island Packet control */
|
||||
#define VIDEO_DIP_DATA 0x61178
|
||||
#define VIDEO_DIP_CTL 0x61170
|
||||
#define VIDEO_DIP_ENABLE (1 << 31)
|
||||
#define VIDEO_DIP_PORT_B (1 << 29)
|
||||
#define VIDEO_DIP_PORT_C (2 << 29)
|
||||
#define VIDEO_DIP_ENABLE_AVI (1 << 21)
|
||||
#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
|
||||
#define VIDEO_DIP_ENABLE_SPD (8 << 21)
|
||||
#define VIDEO_DIP_SELECT_AVI (0 << 19)
|
||||
#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
|
||||
#define VIDEO_DIP_SELECT_SPD (3 << 19)
|
||||
#define VIDEO_DIP_FREQ_ONCE (0 << 16)
|
||||
#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
|
||||
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
|
||||
|
||||
/* Panel power sequencing */
|
||||
#define PP_STATUS 0x61200
|
||||
#define PP_ON (1 << 31)
|
||||
|
@ -1373,6 +1384,9 @@
|
|||
#define PP_SEQUENCE_ON (1 << 28)
|
||||
#define PP_SEQUENCE_OFF (2 << 28)
|
||||
#define PP_SEQUENCE_MASK 0x30000000
|
||||
#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
|
||||
#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
|
||||
#define PP_SEQUENCE_STATE_MASK 0x0000000f
|
||||
#define PP_CONTROL 0x61204
|
||||
#define POWER_TARGET_ON (1 << 0)
|
||||
#define PP_ON_DELAYS 0x61208
|
||||
|
@ -2564,6 +2578,7 @@
|
|||
#define GT_USER_INTERRUPT (1 << 0)
|
||||
#define GT_BSD_USER_INTERRUPT (1 << 5)
|
||||
#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
|
||||
#define GT_BLT_USER_INTERRUPT (1 << 22)
|
||||
|
||||
#define GTISR 0x44010
|
||||
#define GTIMR 0x44014
|
||||
|
@ -2598,6 +2613,10 @@
|
|||
#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
|
||||
#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
|
||||
#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
|
||||
#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
|
||||
SDE_PORTD_HOTPLUG_CPT | \
|
||||
SDE_PORTC_HOTPLUG_CPT | \
|
||||
SDE_PORTB_HOTPLUG_CPT)
|
||||
|
||||
#define SDEISR 0xc4000
|
||||
#define SDEIMR 0xc4004
|
||||
|
@ -2779,6 +2798,10 @@
|
|||
#define FDI_RXA_CHICKEN 0xc200c
|
||||
#define FDI_RXB_CHICKEN 0xc2010
|
||||
#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1)
|
||||
#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN)
|
||||
|
||||
#define SOUTH_DSPCLK_GATE_D 0xc2020
|
||||
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
|
||||
|
||||
/* CPU: FDI_TX */
|
||||
#define FDI_TXA_CTL 0x60100
|
||||
|
|
286
drivers/gpu/drm/i915/intel_acpi.c
Normal file
286
drivers/gpu/drm/i915/intel_acpi.c
Normal file
|
@ -0,0 +1,286 @@
|
|||
/*
|
||||
* Intel ACPI functions
|
||||
*
|
||||
* _DSM related code stolen from nouveau_acpi.c.
|
||||
*/
|
||||
#include <linux/pci.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <acpi/acpi_drivers.h>
|
||||
|
||||
#include "drmP.h"
|
||||
|
||||
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
|
||||
|
||||
#define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */
|
||||
#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
|
||||
|
||||
static struct intel_dsm_priv {
|
||||
acpi_handle dhandle;
|
||||
} intel_dsm_priv;
|
||||
|
||||
static const u8 intel_dsm_guid[] = {
|
||||
0xd3, 0x73, 0xd8, 0x7e,
|
||||
0xd0, 0xc2,
|
||||
0x4f, 0x4e,
|
||||
0xa8, 0x54,
|
||||
0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
|
||||
};
|
||||
|
||||
static int intel_dsm(acpi_handle handle, int func, int arg)
|
||||
{
|
||||
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
struct acpi_object_list input;
|
||||
union acpi_object params[4];
|
||||
union acpi_object *obj;
|
||||
u32 result;
|
||||
int ret = 0;
|
||||
|
||||
input.count = 4;
|
||||
input.pointer = params;
|
||||
params[0].type = ACPI_TYPE_BUFFER;
|
||||
params[0].buffer.length = sizeof(intel_dsm_guid);
|
||||
params[0].buffer.pointer = (char *)intel_dsm_guid;
|
||||
params[1].type = ACPI_TYPE_INTEGER;
|
||||
params[1].integer.value = INTEL_DSM_REVISION_ID;
|
||||
params[2].type = ACPI_TYPE_INTEGER;
|
||||
params[2].integer.value = func;
|
||||
params[3].type = ACPI_TYPE_INTEGER;
|
||||
params[3].integer.value = arg;
|
||||
|
||||
ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
obj = (union acpi_object *)output.pointer;
|
||||
|
||||
result = 0;
|
||||
switch (obj->type) {
|
||||
case ACPI_TYPE_INTEGER:
|
||||
result = obj->integer.value;
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_BUFFER:
|
||||
if (obj->buffer.length == 4) {
|
||||
result =(obj->buffer.pointer[0] |
|
||||
(obj->buffer.pointer[1] << 8) |
|
||||
(obj->buffer.pointer[2] << 16) |
|
||||
(obj->buffer.pointer[3] << 24));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (result == 0x80000002)
|
||||
ret = -ENODEV;
|
||||
|
||||
kfree(output.pointer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static char *intel_dsm_port_name(u8 id)
|
||||
{
|
||||
switch (id) {
|
||||
case 0:
|
||||
return "Reserved";
|
||||
case 1:
|
||||
return "Analog VGA";
|
||||
case 2:
|
||||
return "LVDS";
|
||||
case 3:
|
||||
return "Reserved";
|
||||
case 4:
|
||||
return "HDMI/DVI_B";
|
||||
case 5:
|
||||
return "HDMI/DVI_C";
|
||||
case 6:
|
||||
return "HDMI/DVI_D";
|
||||
case 7:
|
||||
return "DisplayPort_A";
|
||||
case 8:
|
||||
return "DisplayPort_B";
|
||||
case 9:
|
||||
return "DisplayPort_C";
|
||||
case 0xa:
|
||||
return "DisplayPort_D";
|
||||
case 0xb:
|
||||
case 0xc:
|
||||
case 0xd:
|
||||
return "Reserved";
|
||||
case 0xe:
|
||||
return "WiDi";
|
||||
default:
|
||||
return "bad type";
|
||||
}
|
||||
}
|
||||
|
||||
static char *intel_dsm_mux_type(u8 type)
|
||||
{
|
||||
switch (type) {
|
||||
case 0:
|
||||
return "unknown";
|
||||
case 1:
|
||||
return "No MUX, iGPU only";
|
||||
case 2:
|
||||
return "No MUX, dGPU only";
|
||||
case 3:
|
||||
return "MUXed between iGPU and dGPU";
|
||||
default:
|
||||
return "bad type";
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_dsm_platform_mux_info(void)
|
||||
{
|
||||
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
struct acpi_object_list input;
|
||||
union acpi_object params[4];
|
||||
union acpi_object *pkg;
|
||||
int i, ret;
|
||||
|
||||
input.count = 4;
|
||||
input.pointer = params;
|
||||
params[0].type = ACPI_TYPE_BUFFER;
|
||||
params[0].buffer.length = sizeof(intel_dsm_guid);
|
||||
params[0].buffer.pointer = (char *)intel_dsm_guid;
|
||||
params[1].type = ACPI_TYPE_INTEGER;
|
||||
params[1].integer.value = INTEL_DSM_REVISION_ID;
|
||||
params[2].type = ACPI_TYPE_INTEGER;
|
||||
params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
|
||||
params[3].type = ACPI_TYPE_INTEGER;
|
||||
params[3].integer.value = 0;
|
||||
|
||||
ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
|
||||
&output);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
pkg = (union acpi_object *)output.pointer;
|
||||
|
||||
if (pkg->type == ACPI_TYPE_PACKAGE) {
|
||||
union acpi_object *connector_count = &pkg->package.elements[0];
|
||||
DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
|
||||
(unsigned long long)connector_count->integer.value);
|
||||
for (i = 1; i < pkg->package.count; i++) {
|
||||
union acpi_object *obj = &pkg->package.elements[i];
|
||||
union acpi_object *connector_id =
|
||||
&obj->package.elements[0];
|
||||
union acpi_object *info = &obj->package.elements[1];
|
||||
DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
|
||||
(unsigned long long)connector_id->integer.value);
|
||||
DRM_DEBUG_DRIVER(" port id: %s\n",
|
||||
intel_dsm_port_name(info->buffer.pointer[0]));
|
||||
DRM_DEBUG_DRIVER(" display mux info: %s\n",
|
||||
intel_dsm_mux_type(info->buffer.pointer[1]));
|
||||
DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n",
|
||||
intel_dsm_mux_type(info->buffer.pointer[2]));
|
||||
DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
|
||||
intel_dsm_mux_type(info->buffer.pointer[3]));
|
||||
}
|
||||
} else {
|
||||
DRM_ERROR("MUX INFO call failed\n");
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(output.pointer);
|
||||
}
|
||||
|
||||
static int intel_dsm_switchto(enum vga_switcheroo_client_id id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_dsm_power_state(enum vga_switcheroo_client_id id,
|
||||
enum vga_switcheroo_state state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_dsm_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_dsm_get_client_id(struct pci_dev *pdev)
|
||||
{
|
||||
if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
|
||||
return VGA_SWITCHEROO_IGD;
|
||||
else
|
||||
return VGA_SWITCHEROO_DIS;
|
||||
}
|
||||
|
||||
static struct vga_switcheroo_handler intel_dsm_handler = {
|
||||
.switchto = intel_dsm_switchto,
|
||||
.power_state = intel_dsm_power_state,
|
||||
.init = intel_dsm_init,
|
||||
.get_client_id = intel_dsm_get_client_id,
|
||||
};
|
||||
|
||||
static bool intel_dsm_pci_probe(struct pci_dev *pdev)
|
||||
{
|
||||
acpi_handle dhandle, intel_handle;
|
||||
acpi_status status;
|
||||
int ret;
|
||||
|
||||
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
|
||||
if (!dhandle)
|
||||
return false;
|
||||
|
||||
status = acpi_get_handle(dhandle, "_DSM", &intel_handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
DRM_DEBUG_KMS("no _DSM method for intel device\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to get supported _DSM functions\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
intel_dsm_priv.dhandle = dhandle;
|
||||
|
||||
intel_dsm_platform_mux_info();
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool intel_dsm_detect(void)
|
||||
{
|
||||
char acpi_method_name[255] = { 0 };
|
||||
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
|
||||
struct pci_dev *pdev = NULL;
|
||||
bool has_dsm = false;
|
||||
int vga_count = 0;
|
||||
|
||||
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
|
||||
vga_count++;
|
||||
has_dsm |= intel_dsm_pci_probe(pdev);
|
||||
}
|
||||
|
||||
if (vga_count == 2 && has_dsm) {
|
||||
acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
|
||||
DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n",
|
||||
acpi_method_name);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void intel_register_dsm_handler(void)
|
||||
{
|
||||
if (!intel_dsm_detect())
|
||||
return;
|
||||
|
||||
vga_switcheroo_register_handler(&intel_dsm_handler);
|
||||
}
|
||||
|
||||
void intel_unregister_dsm_handler(void)
|
||||
{
|
||||
vga_switcheroo_unregister_handler();
|
||||
}
|
|
@ -24,6 +24,7 @@
|
|||
* Eric Anholt <eric@anholt.net>
|
||||
*
|
||||
*/
|
||||
#include <drm/drm_dp_helper.h>
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "i915_drm.h"
|
||||
|
@ -264,10 +265,10 @@ parse_general_features(struct drm_i915_private *dev_priv,
|
|||
dev_priv->lvds_use_ssc = general->enable_ssc;
|
||||
|
||||
if (dev_priv->lvds_use_ssc) {
|
||||
if (IS_I85X(dev_priv->dev))
|
||||
if (IS_I85X(dev))
|
||||
dev_priv->lvds_ssc_freq =
|
||||
general->ssc_freq ? 66 : 48;
|
||||
else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
|
||||
else if (IS_GEN5(dev) || IS_GEN6(dev))
|
||||
dev_priv->lvds_ssc_freq =
|
||||
general->ssc_freq ? 100 : 120;
|
||||
else
|
||||
|
@ -413,6 +414,8 @@ static void
|
|||
parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
|
||||
{
|
||||
struct bdb_edp *edp;
|
||||
struct edp_power_seq *edp_pps;
|
||||
struct edp_link_params *edp_link_params;
|
||||
|
||||
edp = find_section(bdb, BDB_EDP);
|
||||
if (!edp) {
|
||||
|
@ -437,19 +440,54 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
|
|||
break;
|
||||
}
|
||||
|
||||
dev_priv->edp.rate = edp->link_params[panel_type].rate;
|
||||
dev_priv->edp.lanes = edp->link_params[panel_type].lanes;
|
||||
dev_priv->edp.preemphasis = edp->link_params[panel_type].preemphasis;
|
||||
dev_priv->edp.vswing = edp->link_params[panel_type].vswing;
|
||||
/* Get the eDP sequencing and link info */
|
||||
edp_pps = &edp->power_seqs[panel_type];
|
||||
edp_link_params = &edp->link_params[panel_type];
|
||||
|
||||
DRM_DEBUG_KMS("eDP vBIOS settings: bpp=%d, rate=%d, lanes=%d, preemphasis=%d, vswing=%d\n",
|
||||
dev_priv->edp.bpp,
|
||||
dev_priv->edp.rate,
|
||||
dev_priv->edp.lanes,
|
||||
dev_priv->edp.preemphasis,
|
||||
dev_priv->edp.vswing);
|
||||
dev_priv->edp.pps = *edp_pps;
|
||||
|
||||
dev_priv->edp.initialized = true;
|
||||
dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
|
||||
DP_LINK_BW_1_62;
|
||||
switch (edp_link_params->lanes) {
|
||||
case 0:
|
||||
dev_priv->edp.lanes = 1;
|
||||
break;
|
||||
case 1:
|
||||
dev_priv->edp.lanes = 2;
|
||||
break;
|
||||
case 3:
|
||||
default:
|
||||
dev_priv->edp.lanes = 4;
|
||||
break;
|
||||
}
|
||||
switch (edp_link_params->preemphasis) {
|
||||
case 0:
|
||||
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
|
||||
break;
|
||||
case 1:
|
||||
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
|
||||
break;
|
||||
case 2:
|
||||
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
|
||||
break;
|
||||
case 3:
|
||||
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
|
||||
break;
|
||||
}
|
||||
switch (edp_link_params->vswing) {
|
||||
case 0:
|
||||
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
|
||||
break;
|
||||
case 1:
|
||||
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
|
||||
break;
|
||||
case 2:
|
||||
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
|
||||
break;
|
||||
case 3:
|
||||
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -539,7 +577,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
|
||||
/**
|
||||
* intel_init_bios - initialize VBIOS settings & find VBT
|
||||
* intel_parse_bios - find VBT and initialize settings from the BIOS
|
||||
* @dev: DRM device
|
||||
*
|
||||
* Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
|
||||
|
@ -548,7 +586,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
|
|||
* Returns 0 on success, nonzero on failure.
|
||||
*/
|
||||
bool
|
||||
intel_init_bios(struct drm_device *dev)
|
||||
intel_parse_bios(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct pci_dev *pdev = dev->pdev;
|
||||
|
@ -609,3 +647,20 @@ intel_init_bios(struct drm_device *dev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Ensure that vital registers have been initialised, even if the BIOS
|
||||
* is absent or just failing to do its job.
|
||||
*/
|
||||
void intel_setup_bios(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* Set the Panel Power On/Off timings if uninitialized. */
|
||||
if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
|
||||
/* Set T2 to 40ms and T5 to 200ms */
|
||||
I915_WRITE(PP_ON_DELAYS, 0x019007d0);
|
||||
|
||||
/* Set T3 to 35ms and Tx to 200ms */
|
||||
I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -467,7 +467,8 @@ struct bdb_edp {
|
|||
struct edp_link_params link_params[16];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
bool intel_init_bios(struct drm_device *dev);
|
||||
void intel_setup_bios(struct drm_device *dev);
|
||||
bool intel_parse_bios(struct drm_device *dev);
|
||||
|
||||
/*
|
||||
* Driver<->VBIOS interaction occurs through scratch bits in
|
||||
|
|
|
@ -191,7 +191,8 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
|
|||
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
|
||||
|
||||
if (turn_off_dac) {
|
||||
I915_WRITE(PCH_ADPA, temp);
|
||||
/* Make sure hotplug is enabled */
|
||||
I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE);
|
||||
(void)I915_READ(PCH_ADPA);
|
||||
}
|
||||
|
||||
|
|
|
@ -345,8 +345,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
|
|||
static inline u32 /* units of 100MHz */
|
||||
intel_fdi_link_freq(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
|
||||
if (IS_GEN5(dev)) {
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
|
||||
} else
|
||||
return 27;
|
||||
}
|
||||
|
||||
static const intel_limit_t intel_limits_i8xx_dvo = {
|
||||
|
@ -932,10 +935,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
|
|||
struct drm_device *dev = crtc->dev;
|
||||
intel_clock_t clock;
|
||||
|
||||
/* return directly when it is eDP */
|
||||
if (HAS_eDP)
|
||||
return true;
|
||||
|
||||
if (target < 200000) {
|
||||
clock.n = 1;
|
||||
clock.p1 = 2;
|
||||
|
@ -1719,6 +1718,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
|
|||
POSTING_READ(reg);
|
||||
udelay(150);
|
||||
|
||||
/* Ironlake workaround, enable clock pointer after FDI enable*/
|
||||
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE);
|
||||
|
||||
reg = FDI_RX_IIR(pipe);
|
||||
for (tries = 0; tries < 5; tries++) {
|
||||
temp = I915_READ(reg);
|
||||
|
@ -1764,6 +1766,28 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
|
|||
DRM_ERROR("FDI train 2 fail!\n");
|
||||
|
||||
DRM_DEBUG_KMS("FDI train done\n");
|
||||
|
||||
/* enable normal train */
|
||||
reg = FDI_TX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
temp &= ~FDI_LINK_TRAIN_NONE;
|
||||
temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
|
||||
I915_WRITE(reg, temp);
|
||||
|
||||
reg = FDI_RX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
if (HAS_PCH_CPT(dev)) {
|
||||
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
|
||||
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
|
||||
} else {
|
||||
temp &= ~FDI_LINK_TRAIN_NONE;
|
||||
temp |= FDI_LINK_TRAIN_NONE;
|
||||
}
|
||||
I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
|
||||
|
||||
/* wait one idle pattern time */
|
||||
POSTING_READ(reg);
|
||||
udelay(1000);
|
||||
}
|
||||
|
||||
static const int const snb_b_fdi_train_param [] = {
|
||||
|
@ -2002,8 +2026,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|||
|
||||
/* Enable panel fitting for LVDS */
|
||||
if (dev_priv->pch_pf_size &&
|
||||
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
|
||||
|| HAS_eDP || intel_pch_has_edp(crtc))) {
|
||||
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
|
||||
/* Force use of hard-coded filter coefficients
|
||||
* as some pre-programmed values are broken,
|
||||
* e.g. x201.
|
||||
|
@ -2022,7 +2045,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|||
if ((temp & PIPECONF_ENABLE) == 0) {
|
||||
I915_WRITE(reg, temp | PIPECONF_ENABLE);
|
||||
POSTING_READ(reg);
|
||||
udelay(100);
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
}
|
||||
|
||||
/* configure and enable CPU plane */
|
||||
|
@ -2067,28 +2090,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|||
I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
|
||||
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
|
||||
|
||||
/* enable normal train */
|
||||
reg = FDI_TX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
temp &= ~FDI_LINK_TRAIN_NONE;
|
||||
temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
|
||||
I915_WRITE(reg, temp);
|
||||
|
||||
reg = FDI_RX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
if (HAS_PCH_CPT(dev)) {
|
||||
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
|
||||
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
|
||||
} else {
|
||||
temp &= ~FDI_LINK_TRAIN_NONE;
|
||||
temp |= FDI_LINK_TRAIN_NONE;
|
||||
}
|
||||
I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
|
||||
|
||||
/* wait one idle pattern time */
|
||||
POSTING_READ(reg);
|
||||
udelay(100);
|
||||
|
||||
/* For PCH DP, enable TRANS_DP_CTL */
|
||||
if (HAS_PCH_CPT(dev) &&
|
||||
intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
|
||||
|
@ -2134,7 +2135,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
|||
temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
|
||||
I915_WRITE(reg, temp | TRANS_ENABLE);
|
||||
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
|
||||
DRM_ERROR("failed to enable transcoder\n");
|
||||
DRM_ERROR("failed to enable transcoder %d\n", pipe);
|
||||
|
||||
intel_crtc_load_lut(crtc);
|
||||
intel_update_fbc(dev);
|
||||
|
@ -2174,9 +2175,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
|
|||
temp = I915_READ(reg);
|
||||
if (temp & PIPECONF_ENABLE) {
|
||||
I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
|
||||
POSTING_READ(reg);
|
||||
/* wait for cpu pipe off, pipe state */
|
||||
if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 50))
|
||||
DRM_ERROR("failed to turn off cpu pipe\n");
|
||||
intel_wait_for_pipe_off(dev, intel_crtc->pipe);
|
||||
}
|
||||
|
||||
/* Disable PF */
|
||||
|
@ -2198,6 +2199,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
|
|||
POSTING_READ(reg);
|
||||
udelay(100);
|
||||
|
||||
/* Ironlake workaround, disable clock pointer after downing FDI */
|
||||
I915_WRITE(FDI_RX_CHICKEN(pipe),
|
||||
I915_READ(FDI_RX_CHICKEN(pipe) &
|
||||
~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
|
||||
|
||||
/* still set train pattern 1 */
|
||||
reg = FDI_TX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
|
@ -3623,7 +3629,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
|
|||
refclk / 1000);
|
||||
} else if (!IS_GEN2(dev)) {
|
||||
refclk = 96000;
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
if (HAS_PCH_SPLIT(dev) &&
|
||||
(!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)))
|
||||
refclk = 120000; /* 120Mhz refclk */
|
||||
} else {
|
||||
refclk = 48000;
|
||||
|
@ -3685,16 +3692,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
|
|||
/* FDI link */
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
int lane = 0, link_bw, bpp;
|
||||
/* eDP doesn't require FDI link, so just set DP M/N
|
||||
/* CPU eDP doesn't require FDI link, so just set DP M/N
|
||||
according to current link config */
|
||||
if (has_edp_encoder) {
|
||||
if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) {
|
||||
target_clock = mode->clock;
|
||||
intel_edp_link_config(has_edp_encoder,
|
||||
&lane, &link_bw);
|
||||
} else {
|
||||
/* DP over FDI requires target mode clock
|
||||
/* [e]DP over FDI requires target mode clock
|
||||
instead of link clock */
|
||||
if (is_dp)
|
||||
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
|
||||
target_clock = mode->clock;
|
||||
else
|
||||
target_clock = adjusted_mode->clock;
|
||||
|
@ -3718,7 +3725,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
|
|||
temp |= PIPE_8BPC;
|
||||
else
|
||||
temp |= PIPE_6BPC;
|
||||
} else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
|
||||
} else if (has_edp_encoder) {
|
||||
switch (dev_priv->edp.bpp/3) {
|
||||
case 8:
|
||||
temp |= PIPE_8BPC;
|
||||
|
@ -3794,13 +3801,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
|
|||
|
||||
POSTING_READ(PCH_DREF_CONTROL);
|
||||
udelay(200);
|
||||
}
|
||||
temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
|
||||
|
||||
temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
|
||||
temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
|
||||
/* Enable CPU source on CPU attached eDP */
|
||||
if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
|
||||
if (dev_priv->lvds_use_ssc)
|
||||
temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
|
||||
else
|
||||
temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
|
||||
} else {
|
||||
temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
|
||||
/* Enable SSC on PCH eDP if needed */
|
||||
if (dev_priv->lvds_use_ssc) {
|
||||
DRM_ERROR("enabling SSC on PCH\n");
|
||||
temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
|
||||
}
|
||||
}
|
||||
I915_WRITE(PCH_DREF_CONTROL, temp);
|
||||
POSTING_READ(PCH_DREF_CONTROL);
|
||||
udelay(200);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3835,7 +3854,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
|
|||
}
|
||||
dpll |= DPLL_DVO_HIGH_SPEED;
|
||||
}
|
||||
if (is_dp)
|
||||
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
|
||||
dpll |= DPLL_DVO_HIGH_SPEED;
|
||||
|
||||
/* compute bitmask from p1 value */
|
||||
|
@ -3934,7 +3953,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
|
|||
dpll_reg = DPLL(pipe);
|
||||
}
|
||||
|
||||
if (!has_edp_encoder) {
|
||||
/* PCH eDP needs FDI, but CPU eDP does not */
|
||||
if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
|
||||
I915_WRITE(fp_reg, fp);
|
||||
I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
|
||||
|
||||
|
@ -4011,9 +4031,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
|
|||
}
|
||||
}
|
||||
|
||||
if (is_dp)
|
||||
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
|
||||
intel_dp_set_m_n(crtc, mode, adjusted_mode);
|
||||
else if (HAS_PCH_SPLIT(dev)) {
|
||||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
/* For non-DP output, clear any trans DP clock recovery setting.*/
|
||||
if (pipe == 0) {
|
||||
I915_WRITE(TRANSA_DATA_M1, 0);
|
||||
|
@ -4028,7 +4048,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
|
|||
}
|
||||
}
|
||||
|
||||
if (!has_edp_encoder) {
|
||||
if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
|
||||
I915_WRITE(fp_reg, fp);
|
||||
I915_WRITE(dpll_reg, dpll);
|
||||
|
||||
|
@ -4122,29 +4142,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
|
|||
I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
|
||||
I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
|
||||
|
||||
if (has_edp_encoder) {
|
||||
if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
|
||||
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
|
||||
} else {
|
||||
/* enable FDI RX PLL too */
|
||||
reg = FDI_RX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
|
||||
|
||||
POSTING_READ(reg);
|
||||
udelay(200);
|
||||
|
||||
/* enable FDI TX PLL too */
|
||||
reg = FDI_TX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
|
||||
|
||||
/* enable FDI RX PCDCLK */
|
||||
reg = FDI_RX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
I915_WRITE(reg, temp | FDI_PCDCLK);
|
||||
|
||||
POSTING_READ(reg);
|
||||
udelay(200);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4153,7 +4152,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
|
|||
|
||||
intel_wait_for_vblank(dev, pipe);
|
||||
|
||||
if (IS_IRONLAKE(dev)) {
|
||||
if (IS_GEN5(dev)) {
|
||||
/* enable address swizzle for tiling buffer */
|
||||
temp = I915_READ(DISP_ARB_CTL);
|
||||
I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
|
||||
|
@ -4992,11 +4991,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
|
|||
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
obj_priv = to_intel_bo(work->pending_flip_obj);
|
||||
|
||||
/* Initial scanout buffer will have a 0 pending flip count */
|
||||
if ((atomic_read(&obj_priv->pending_flip) == 0) ||
|
||||
atomic_dec_and_test(&obj_priv->pending_flip))
|
||||
obj_priv = to_intel_bo(work->old_fb_obj);
|
||||
atomic_clear_mask(1 << intel_crtc->plane,
|
||||
&obj_priv->pending_flip.counter);
|
||||
if (atomic_read(&obj_priv->pending_flip) == 0)
|
||||
wake_up(&dev_priv->pending_flip_queue);
|
||||
schedule_work(&work->work);
|
||||
|
||||
|
@ -5092,9 +5090,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
if (ret)
|
||||
goto cleanup_objs;
|
||||
|
||||
obj_priv = to_intel_bo(obj);
|
||||
atomic_inc(&obj_priv->pending_flip);
|
||||
/* Block clients from rendering to the new back buffer until
|
||||
* the flip occurs and the object is no longer visible.
|
||||
*/
|
||||
atomic_add(1 << intel_crtc->plane,
|
||||
&to_intel_bo(work->old_fb_obj)->pending_flip);
|
||||
|
||||
work->pending_flip_obj = obj;
|
||||
obj_priv = to_intel_bo(obj);
|
||||
|
||||
if (IS_GEN3(dev) || IS_GEN2(dev)) {
|
||||
u32 flip_mask;
|
||||
|
@ -5736,7 +5739,7 @@ void intel_init_clock_gating(struct drm_device *dev)
|
|||
if (HAS_PCH_SPLIT(dev)) {
|
||||
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
|
||||
|
||||
if (IS_IRONLAKE(dev)) {
|
||||
if (IS_GEN5(dev)) {
|
||||
/* Required for FBC */
|
||||
dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE;
|
||||
/* Required for CxSR */
|
||||
|
@ -5749,6 +5752,13 @@ void intel_init_clock_gating(struct drm_device *dev)
|
|||
|
||||
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
|
||||
|
||||
/*
|
||||
* On Ibex Peak and Cougar Point, we need to disable clock
|
||||
* gating for the panel power sequencer or it will fail to
|
||||
* start up when no ports are active.
|
||||
*/
|
||||
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
/*
|
||||
* According to the spec the following bits should be set in
|
||||
* order to enable memory self-refresh
|
||||
|
@ -5756,7 +5766,7 @@ void intel_init_clock_gating(struct drm_device *dev)
|
|||
* The bit 5 of 0x42020
|
||||
* The bit 15 of 0x45000
|
||||
*/
|
||||
if (IS_IRONLAKE(dev)) {
|
||||
if (IS_GEN5(dev)) {
|
||||
I915_WRITE(ILK_DISPLAY_CHICKEN2,
|
||||
(I915_READ(ILK_DISPLAY_CHICKEN2) |
|
||||
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
|
||||
|
@ -5932,7 +5942,7 @@ static void intel_init_display(struct drm_device *dev)
|
|||
|
||||
/* For FIFO watermark updates */
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
if (IS_IRONLAKE(dev)) {
|
||||
if (IS_GEN5(dev)) {
|
||||
if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
|
||||
dev_priv->display.update_wm = ironlake_update_wm;
|
||||
else {
|
||||
|
@ -6131,6 +6141,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
|||
drm_kms_helper_poll_fini(dev);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
intel_unregister_dsm_handler();
|
||||
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
/* Skip inactive CRTCs */
|
||||
if (!crtc->fb)
|
||||
|
|
|
@ -42,15 +42,13 @@
|
|||
|
||||
#define DP_LINK_CONFIGURATION_SIZE 9
|
||||
|
||||
#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP)
|
||||
#define IS_PCH_eDP(i) ((i)->is_pch_edp)
|
||||
|
||||
struct intel_dp {
|
||||
struct intel_encoder base;
|
||||
uint32_t output_reg;
|
||||
uint32_t DP;
|
||||
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
|
||||
bool has_audio;
|
||||
int force_audio;
|
||||
int dpms_mode;
|
||||
uint8_t link_bw;
|
||||
uint8_t lane_count;
|
||||
|
@ -60,8 +58,35 @@ struct intel_dp {
|
|||
bool is_pch_edp;
|
||||
uint8_t train_set[4];
|
||||
uint8_t link_status[DP_LINK_STATUS_SIZE];
|
||||
|
||||
struct drm_property *force_audio_property;
|
||||
};
|
||||
|
||||
/**
|
||||
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
|
||||
* @intel_dp: DP struct
|
||||
*
|
||||
* If a CPU or PCH DP output is attached to an eDP panel, this function
|
||||
* will return true, and false otherwise.
|
||||
*/
|
||||
static bool is_edp(struct intel_dp *intel_dp)
|
||||
{
|
||||
return intel_dp->base.type == INTEL_OUTPUT_EDP;
|
||||
}
|
||||
|
||||
/**
|
||||
* is_pch_edp - is the port on the PCH and attached to an eDP panel?
|
||||
* @intel_dp: DP struct
|
||||
*
|
||||
* Returns true if the given DP struct corresponds to a PCH DP port attached
|
||||
* to an eDP panel, false otherwise. Helpful for determining whether we
|
||||
* may need FDI resources for a given DP output or not.
|
||||
*/
|
||||
static bool is_pch_edp(struct intel_dp *intel_dp)
|
||||
{
|
||||
return intel_dp->is_pch_edp;
|
||||
}
|
||||
|
||||
static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
|
||||
{
|
||||
return container_of(encoder, struct intel_dp, base.base);
|
||||
|
@ -73,6 +98,25 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
|
|||
struct intel_dp, base);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
|
||||
* @encoder: DRM encoder
|
||||
*
|
||||
* Return true if @encoder corresponds to a PCH attached eDP panel. Needed
|
||||
* by intel_display.c.
|
||||
*/
|
||||
bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp;
|
||||
|
||||
if (!encoder)
|
||||
return false;
|
||||
|
||||
intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
return is_pch_edp(intel_dp);
|
||||
}
|
||||
|
||||
static void intel_dp_start_link_train(struct intel_dp *intel_dp);
|
||||
static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
|
||||
static void intel_dp_link_down(struct intel_dp *intel_dp);
|
||||
|
@ -138,7 +182,7 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
|
||||
if (is_edp(intel_dp))
|
||||
return (pixel_clock * dev_priv->edp.bpp + 7) / 8;
|
||||
else
|
||||
return pixel_clock * 3;
|
||||
|
@ -160,8 +204,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
|
|||
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
|
||||
int max_lanes = intel_dp_max_lane_count(intel_dp);
|
||||
|
||||
if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
|
||||
dev_priv->panel_fixed_mode) {
|
||||
if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
|
||||
if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
|
||||
return MODE_PANEL;
|
||||
|
||||
|
@ -171,7 +214,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
|
|||
|
||||
/* only refuse the mode on non eDP since we have seen some wierd eDP panels
|
||||
which are outside spec tolerances but somehow work by magic */
|
||||
if (!IS_eDP(intel_dp) &&
|
||||
if (!is_edp(intel_dp) &&
|
||||
(intel_dp_link_required(connector->dev, intel_dp, mode->clock)
|
||||
> intel_dp_max_data_rate(max_link_clock, max_lanes)))
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
@ -258,7 +301,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
|||
* Note that PCH attached eDP panels should use a 125MHz input
|
||||
* clock divider.
|
||||
*/
|
||||
if (IS_eDP(intel_dp) && !IS_PCH_eDP(intel_dp)) {
|
||||
if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
|
||||
if (IS_GEN6(dev))
|
||||
aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
|
||||
else
|
||||
|
@ -530,8 +573,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
|||
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
|
||||
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
|
||||
|
||||
if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
|
||||
dev_priv->panel_fixed_mode) {
|
||||
if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
|
||||
intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
|
||||
intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
|
||||
mode, adjusted_mode);
|
||||
|
@ -542,6 +584,17 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
|||
mode->clock = dev_priv->panel_fixed_mode->clock;
|
||||
}
|
||||
|
||||
/* Just use VBT values for eDP */
|
||||
if (is_edp(intel_dp)) {
|
||||
intel_dp->lane_count = dev_priv->edp.lanes;
|
||||
intel_dp->link_bw = dev_priv->edp.rate;
|
||||
adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
|
||||
DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n",
|
||||
intel_dp->link_bw, intel_dp->lane_count,
|
||||
adjusted_mode->clock);
|
||||
return true;
|
||||
}
|
||||
|
||||
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
|
||||
for (clock = 0; clock <= max_clock; clock++) {
|
||||
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
|
||||
|
@ -560,19 +613,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
|||
}
|
||||
}
|
||||
|
||||
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
|
||||
/* okay we failed just pick the highest */
|
||||
intel_dp->lane_count = max_lane_count;
|
||||
intel_dp->link_bw = bws[max_clock];
|
||||
adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
|
||||
DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
|
||||
"count %d clock %d\n",
|
||||
intel_dp->link_bw, intel_dp->lane_count,
|
||||
adjusted_mode->clock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -609,25 +649,6 @@ intel_dp_compute_m_n(int bpp,
|
|||
intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
|
||||
}
|
||||
|
||||
bool intel_pch_has_edp(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct drm_encoder *encoder;
|
||||
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
|
||||
struct intel_dp *intel_dp;
|
||||
|
||||
if (encoder->crtc != crtc)
|
||||
continue;
|
||||
|
||||
intel_dp = enc_to_intel_dp(encoder);
|
||||
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
|
||||
return intel_dp->is_pch_edp;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
|
@ -652,8 +673,10 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
|||
intel_dp = enc_to_intel_dp(encoder);
|
||||
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
|
||||
lane_count = intel_dp->lane_count;
|
||||
if (IS_PCH_eDP(intel_dp))
|
||||
bpp = dev_priv->edp.bpp;
|
||||
break;
|
||||
} else if (is_edp(intel_dp)) {
|
||||
lane_count = dev_priv->edp.lanes;
|
||||
bpp = dev_priv->edp.bpp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -720,7 +743,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
|||
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
|
||||
intel_dp->DP |= DP_SYNC_VS_HIGH;
|
||||
|
||||
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
|
||||
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
|
||||
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
|
||||
else
|
||||
intel_dp->DP |= DP_LINK_TRAIN_OFF;
|
||||
|
@ -755,7 +778,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
|||
if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
|
||||
intel_dp->DP |= DP_PIPEB_SELECT;
|
||||
|
||||
if (IS_eDP(intel_dp)) {
|
||||
if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
|
||||
/* don't miss out required setting for eDP */
|
||||
intel_dp->DP |= DP_PLL_ENABLE;
|
||||
if (adjusted_mode->clock < 200000)
|
||||
|
@ -766,10 +789,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
|||
}
|
||||
|
||||
/* Returns true if the panel was already on when called */
|
||||
static bool ironlake_edp_panel_on (struct drm_device *dev)
|
||||
static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp;
|
||||
u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
|
||||
|
||||
if (I915_READ(PCH_PP_STATUS) & PP_ON)
|
||||
return true;
|
||||
|
@ -781,19 +805,20 @@ static bool ironlake_edp_panel_on (struct drm_device *dev)
|
|||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
|
||||
pp |= POWER_TARGET_ON;
|
||||
pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
|
||||
/* Ouch. We need to wait here for some panels, like Dell e6510
|
||||
* https://bugs.freedesktop.org/show_bug.cgi?id=29278i
|
||||
*/
|
||||
msleep(300);
|
||||
|
||||
if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000))
|
||||
if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
|
||||
5000))
|
||||
DRM_ERROR("panel on wait timed out: 0x%08x\n",
|
||||
I915_READ(PCH_PP_STATUS));
|
||||
|
||||
pp &= ~(PANEL_UNLOCK_REGS);
|
||||
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
|
@ -804,7 +829,8 @@ static bool ironlake_edp_panel_on (struct drm_device *dev)
|
|||
static void ironlake_edp_panel_off (struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp;
|
||||
u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
|
||||
PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
|
||||
|
||||
pp = I915_READ(PCH_PP_CONTROL);
|
||||
|
||||
|
@ -815,12 +841,12 @@ static void ironlake_edp_panel_off (struct drm_device *dev)
|
|||
|
||||
pp &= ~POWER_TARGET_ON;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
|
||||
if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000))
|
||||
if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
|
||||
DRM_ERROR("panel off wait timed out: 0x%08x\n",
|
||||
I915_READ(PCH_PP_STATUS));
|
||||
|
||||
/* Make sure VDD is enabled so DP AUX will work */
|
||||
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
|
@ -831,36 +857,19 @@ static void ironlake_edp_panel_off (struct drm_device *dev)
|
|||
msleep(300);
|
||||
}
|
||||
|
||||
static void ironlake_edp_panel_vdd_on(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp;
|
||||
|
||||
pp = I915_READ(PCH_PP_CONTROL);
|
||||
pp |= EDP_FORCE_VDD;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
msleep(300);
|
||||
}
|
||||
|
||||
static void ironlake_edp_panel_vdd_off(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp;
|
||||
|
||||
pp = I915_READ(PCH_PP_CONTROL);
|
||||
pp &= ~EDP_FORCE_VDD;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
POSTING_READ(PCH_PP_CONTROL);
|
||||
msleep(300);
|
||||
}
|
||||
|
||||
static void ironlake_edp_backlight_on (struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pp;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
/*
|
||||
* If we enable the backlight right away following a panel power
|
||||
* on, we may see slight flicker as the panel syncs with the eDP
|
||||
* link. So delay a bit to make sure the image is solid before
|
||||
* allowing it to appear.
|
||||
*/
|
||||
msleep(300);
|
||||
pp = I915_READ(PCH_PP_CONTROL);
|
||||
pp |= EDP_BLC_ENABLE;
|
||||
I915_WRITE(PCH_PP_CONTROL, pp);
|
||||
|
@ -885,8 +894,10 @@ static void ironlake_edp_pll_on(struct drm_encoder *encoder)
|
|||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
dpa_ctl = I915_READ(DP_A);
|
||||
dpa_ctl &= ~DP_PLL_ENABLE;
|
||||
dpa_ctl |= DP_PLL_ENABLE;
|
||||
I915_WRITE(DP_A, dpa_ctl);
|
||||
POSTING_READ(DP_A);
|
||||
udelay(200);
|
||||
}
|
||||
|
||||
static void ironlake_edp_pll_off(struct drm_encoder *encoder)
|
||||
|
@ -896,7 +907,7 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
|
|||
u32 dpa_ctl;
|
||||
|
||||
dpa_ctl = I915_READ(DP_A);
|
||||
dpa_ctl |= DP_PLL_ENABLE;
|
||||
dpa_ctl &= ~DP_PLL_ENABLE;
|
||||
I915_WRITE(DP_A, dpa_ctl);
|
||||
POSTING_READ(DP_A);
|
||||
udelay(200);
|
||||
|
@ -906,17 +917,16 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
|
|||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
|
||||
|
||||
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
|
||||
ironlake_edp_panel_off(dev);
|
||||
if (is_edp(intel_dp)) {
|
||||
ironlake_edp_backlight_off(dev);
|
||||
ironlake_edp_panel_vdd_on(dev);
|
||||
ironlake_edp_pll_on(encoder);
|
||||
ironlake_edp_panel_on(intel_dp);
|
||||
if (!is_pch_edp(intel_dp))
|
||||
ironlake_edp_pll_on(encoder);
|
||||
else
|
||||
ironlake_edp_pll_off(encoder);
|
||||
}
|
||||
if (dp_reg & DP_PORT_EN)
|
||||
intel_dp_link_down(intel_dp);
|
||||
intel_dp_link_down(intel_dp);
|
||||
}
|
||||
|
||||
static void intel_dp_commit(struct drm_encoder *encoder)
|
||||
|
@ -926,14 +936,13 @@ static void intel_dp_commit(struct drm_encoder *encoder)
|
|||
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
|
||||
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
|
||||
ironlake_edp_panel_on(dev);
|
||||
if (is_edp(intel_dp))
|
||||
ironlake_edp_panel_on(intel_dp);
|
||||
|
||||
intel_dp_complete_link_train(intel_dp);
|
||||
|
||||
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
|
||||
if (is_edp(intel_dp))
|
||||
ironlake_edp_backlight_on(dev);
|
||||
intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -945,23 +954,22 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
|
|||
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
|
||||
|
||||
if (mode != DRM_MODE_DPMS_ON) {
|
||||
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
|
||||
if (is_edp(intel_dp))
|
||||
ironlake_edp_backlight_off(dev);
|
||||
intel_dp_link_down(intel_dp);
|
||||
if (is_edp(intel_dp))
|
||||
ironlake_edp_panel_off(dev);
|
||||
}
|
||||
if (dp_reg & DP_PORT_EN)
|
||||
intel_dp_link_down(intel_dp);
|
||||
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
|
||||
if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
|
||||
ironlake_edp_pll_off(encoder);
|
||||
} else {
|
||||
if (is_edp(intel_dp))
|
||||
ironlake_edp_panel_on(intel_dp);
|
||||
if (!(dp_reg & DP_PORT_EN)) {
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
|
||||
ironlake_edp_panel_on(dev);
|
||||
intel_dp_complete_link_train(intel_dp);
|
||||
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
|
||||
ironlake_edp_backlight_on(dev);
|
||||
}
|
||||
if (is_edp(intel_dp))
|
||||
ironlake_edp_backlight_on(dev);
|
||||
}
|
||||
intel_dp->dpms_mode = mode;
|
||||
}
|
||||
|
@ -1079,11 +1087,21 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
|
|||
}
|
||||
|
||||
static uint32_t
|
||||
intel_dp_signal_levels(uint8_t train_set, int lane_count)
|
||||
intel_dp_signal_levels(struct intel_dp *intel_dp)
|
||||
{
|
||||
uint32_t signal_levels = 0;
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t signal_levels = 0;
|
||||
u8 train_set = intel_dp->train_set[0];
|
||||
u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK;
|
||||
u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK;
|
||||
|
||||
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
|
||||
if (is_edp(intel_dp)) {
|
||||
vswing = dev_priv->edp.vswing;
|
||||
preemphasis = dev_priv->edp.preemphasis;
|
||||
}
|
||||
|
||||
switch (vswing) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_400:
|
||||
default:
|
||||
signal_levels |= DP_VOLTAGE_0_4;
|
||||
|
@ -1098,7 +1116,7 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
|
|||
signal_levels |= DP_VOLTAGE_1_2;
|
||||
break;
|
||||
}
|
||||
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
|
||||
switch (preemphasis) {
|
||||
case DP_TRAIN_PRE_EMPHASIS_0:
|
||||
default:
|
||||
signal_levels |= DP_PRE_EMPHASIS_0;
|
||||
|
@ -1184,6 +1202,18 @@ intel_channel_eq_ok(struct intel_dp *intel_dp)
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_aux_handshake_required(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (is_edp(intel_dp) && dev_priv->no_aux_handshake)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_set_link_train(struct intel_dp *intel_dp,
|
||||
uint32_t dp_reg_value,
|
||||
|
@ -1196,6 +1226,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
|
|||
I915_WRITE(intel_dp->output_reg, dp_reg_value);
|
||||
POSTING_READ(intel_dp->output_reg);
|
||||
|
||||
if (!intel_dp_aux_handshake_required(intel_dp))
|
||||
return true;
|
||||
|
||||
intel_dp_aux_native_write_1(intel_dp,
|
||||
DP_TRAINING_PATTERN_SET,
|
||||
dp_train_pat);
|
||||
|
@ -1228,13 +1261,14 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
|||
POSTING_READ(intel_dp->output_reg);
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
|
||||
/* Write the link configuration data */
|
||||
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
|
||||
intel_dp->link_configuration,
|
||||
DP_LINK_CONFIGURATION_SIZE);
|
||||
if (intel_dp_aux_handshake_required(intel_dp))
|
||||
/* Write the link configuration data */
|
||||
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
|
||||
intel_dp->link_configuration,
|
||||
DP_LINK_CONFIGURATION_SIZE);
|
||||
|
||||
DP |= DP_PORT_EN;
|
||||
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
|
||||
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
|
||||
DP &= ~DP_LINK_TRAIN_MASK_CPT;
|
||||
else
|
||||
DP &= ~DP_LINK_TRAIN_MASK;
|
||||
|
@ -1245,15 +1279,15 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
|||
for (;;) {
|
||||
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
|
||||
uint32_t signal_levels;
|
||||
if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
|
||||
if (IS_GEN6(dev) && is_edp(intel_dp)) {
|
||||
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
|
||||
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
|
||||
} else {
|
||||
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
|
||||
signal_levels = intel_dp_signal_levels(intel_dp);
|
||||
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
|
||||
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
|
||||
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
|
||||
else
|
||||
reg = DP | DP_LINK_TRAIN_PAT_1;
|
||||
|
@ -1263,33 +1297,37 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
|||
break;
|
||||
/* Set training pattern 1 */
|
||||
|
||||
udelay(100);
|
||||
if (!intel_dp_get_link_status(intel_dp))
|
||||
udelay(500);
|
||||
if (intel_dp_aux_handshake_required(intel_dp)) {
|
||||
break;
|
||||
} else {
|
||||
if (!intel_dp_get_link_status(intel_dp))
|
||||
break;
|
||||
|
||||
if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
|
||||
clock_recovery = true;
|
||||
break;
|
||||
if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
|
||||
clock_recovery = true;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Check to see if we've tried the max voltage */
|
||||
for (i = 0; i < intel_dp->lane_count; i++)
|
||||
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
|
||||
break;
|
||||
if (i == intel_dp->lane_count)
|
||||
break;
|
||||
|
||||
/* Check to see if we've tried the same voltage 5 times */
|
||||
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
|
||||
++tries;
|
||||
if (tries == 5)
|
||||
break;
|
||||
} else
|
||||
tries = 0;
|
||||
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
|
||||
|
||||
/* Compute new intel_dp->train_set as requested by target */
|
||||
intel_get_adjust_train(intel_dp);
|
||||
}
|
||||
|
||||
/* Check to see if we've tried the max voltage */
|
||||
for (i = 0; i < intel_dp->lane_count; i++)
|
||||
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
|
||||
break;
|
||||
if (i == intel_dp->lane_count)
|
||||
break;
|
||||
|
||||
/* Check to see if we've tried the same voltage 5 times */
|
||||
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
|
||||
++tries;
|
||||
if (tries == 5)
|
||||
break;
|
||||
} else
|
||||
tries = 0;
|
||||
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
|
||||
|
||||
/* Compute new intel_dp->train_set as requested by target */
|
||||
intel_get_adjust_train(intel_dp);
|
||||
}
|
||||
|
||||
intel_dp->DP = DP;
|
||||
|
@ -1312,15 +1350,15 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
|||
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
|
||||
uint32_t signal_levels;
|
||||
|
||||
if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
|
||||
if (IS_GEN6(dev) && is_edp(intel_dp)) {
|
||||
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
|
||||
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
|
||||
} else {
|
||||
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
|
||||
signal_levels = intel_dp_signal_levels(intel_dp);
|
||||
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
|
||||
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
|
||||
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
|
||||
else
|
||||
reg = DP | DP_LINK_TRAIN_PAT_2;
|
||||
|
@ -1330,25 +1368,29 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
|||
DP_TRAINING_PATTERN_2))
|
||||
break;
|
||||
|
||||
udelay(400);
|
||||
if (!intel_dp_get_link_status(intel_dp))
|
||||
break;
|
||||
udelay(500);
|
||||
|
||||
if (intel_channel_eq_ok(intel_dp)) {
|
||||
channel_eq = true;
|
||||
if (!intel_dp_aux_handshake_required(intel_dp)) {
|
||||
break;
|
||||
} else {
|
||||
if (!intel_dp_get_link_status(intel_dp))
|
||||
break;
|
||||
|
||||
if (intel_channel_eq_ok(intel_dp)) {
|
||||
channel_eq = true;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Try 5 times */
|
||||
if (tries > 5)
|
||||
break;
|
||||
|
||||
/* Compute new intel_dp->train_set as requested by target */
|
||||
intel_get_adjust_train(intel_dp);
|
||||
++tries;
|
||||
}
|
||||
|
||||
/* Try 5 times */
|
||||
if (tries > 5)
|
||||
break;
|
||||
|
||||
/* Compute new intel_dp->train_set as requested by target */
|
||||
intel_get_adjust_train(intel_dp);
|
||||
++tries;
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
|
||||
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
|
||||
reg = DP | DP_LINK_TRAIN_OFF_CPT;
|
||||
else
|
||||
reg = DP | DP_LINK_TRAIN_OFF;
|
||||
|
@ -1368,14 +1410,14 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
|||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
if (IS_eDP(intel_dp)) {
|
||||
if (is_edp(intel_dp)) {
|
||||
DP &= ~DP_PLL_ENABLE;
|
||||
I915_WRITE(intel_dp->output_reg, DP);
|
||||
POSTING_READ(intel_dp->output_reg);
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) {
|
||||
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) {
|
||||
DP &= ~DP_LINK_TRAIN_MASK_CPT;
|
||||
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
|
||||
} else {
|
||||
|
@ -1386,7 +1428,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
|||
|
||||
msleep(17);
|
||||
|
||||
if (IS_eDP(intel_dp))
|
||||
if (is_edp(intel_dp))
|
||||
DP |= DP_LINK_TRAIN_OFF;
|
||||
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
|
||||
POSTING_READ(intel_dp->output_reg);
|
||||
|
@ -1419,48 +1461,34 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
|
|||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
ironlake_dp_detect(struct drm_connector *connector)
|
||||
ironlake_dp_detect(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
enum drm_connector_status status;
|
||||
|
||||
/* Panel needs power for AUX to work */
|
||||
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
|
||||
ironlake_edp_panel_vdd_on(connector->dev);
|
||||
/* Can't disconnect eDP */
|
||||
if (is_edp(intel_dp))
|
||||
return connector_status_connected;
|
||||
|
||||
status = connector_status_disconnected;
|
||||
if (intel_dp_aux_native_read(intel_dp,
|
||||
0x000, intel_dp->dpcd,
|
||||
sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
|
||||
{
|
||||
sizeof (intel_dp->dpcd))
|
||||
== sizeof(intel_dp->dpcd)) {
|
||||
if (intel_dp->dpcd[0] != 0)
|
||||
status = connector_status_connected;
|
||||
}
|
||||
DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
|
||||
intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
|
||||
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
|
||||
ironlake_edp_panel_vdd_off(connector->dev);
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
|
||||
*
|
||||
* \return true if DP port is connected.
|
||||
* \return false if DP port is disconnected.
|
||||
*/
|
||||
static enum drm_connector_status
|
||||
intel_dp_detect(struct drm_connector *connector, bool force)
|
||||
g4x_dp_detect(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t temp, bit;
|
||||
enum drm_connector_status status;
|
||||
|
||||
intel_dp->has_audio = false;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
return ironlake_dp_detect(connector);
|
||||
uint32_t temp, bit;
|
||||
|
||||
switch (intel_dp->output_reg) {
|
||||
case DP_B:
|
||||
|
@ -1482,14 +1510,51 @@ intel_dp_detect(struct drm_connector *connector, bool force)
|
|||
return connector_status_disconnected;
|
||||
|
||||
status = connector_status_disconnected;
|
||||
if (intel_dp_aux_native_read(intel_dp,
|
||||
0x000, intel_dp->dpcd,
|
||||
if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
|
||||
sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
|
||||
{
|
||||
if (intel_dp->dpcd[0] != 0)
|
||||
status = connector_status_connected;
|
||||
}
|
||||
return status;
|
||||
|
||||
return bit;
|
||||
}
|
||||
|
||||
/**
|
||||
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
|
||||
*
|
||||
* \return true if DP port is connected.
|
||||
* \return false if DP port is disconnected.
|
||||
*/
|
||||
static enum drm_connector_status
|
||||
intel_dp_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
enum drm_connector_status status;
|
||||
struct edid *edid = NULL;
|
||||
|
||||
intel_dp->has_audio = false;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
status = ironlake_dp_detect(intel_dp);
|
||||
else
|
||||
status = g4x_dp_detect(intel_dp);
|
||||
if (status != connector_status_connected)
|
||||
return status;
|
||||
|
||||
if (intel_dp->force_audio) {
|
||||
intel_dp->has_audio = intel_dp->force_audio > 0;
|
||||
} else {
|
||||
edid = drm_get_edid(connector, &intel_dp->adapter);
|
||||
if (edid) {
|
||||
intel_dp->has_audio = drm_detect_monitor_audio(edid);
|
||||
connector->display_info.raw_edid = NULL;
|
||||
kfree(edid);
|
||||
}
|
||||
}
|
||||
|
||||
return connector_status_connected;
|
||||
}
|
||||
|
||||
static int intel_dp_get_modes(struct drm_connector *connector)
|
||||
|
@ -1504,8 +1569,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
|
|||
|
||||
ret = intel_ddc_get_modes(connector, &intel_dp->adapter);
|
||||
if (ret) {
|
||||
if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
|
||||
!dev_priv->panel_fixed_mode) {
|
||||
if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) {
|
||||
struct drm_display_mode *newmode;
|
||||
list_for_each_entry(newmode, &connector->probed_modes,
|
||||
head) {
|
||||
|
@ -1521,7 +1585,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
|
|||
}
|
||||
|
||||
/* if eDP has no EDID, try to use fixed panel mode from VBT */
|
||||
if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
|
||||
if (is_edp(intel_dp)) {
|
||||
if (dev_priv->panel_fixed_mode != NULL) {
|
||||
struct drm_display_mode *mode;
|
||||
mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
|
||||
|
@ -1532,6 +1596,46 @@ static int intel_dp_get_modes(struct drm_connector *connector)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_dp_set_property(struct drm_connector *connector,
|
||||
struct drm_property *property,
|
||||
uint64_t val)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
int ret;
|
||||
|
||||
ret = drm_connector_property_set_value(connector, property, val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (property == intel_dp->force_audio_property) {
|
||||
if (val == intel_dp->force_audio)
|
||||
return 0;
|
||||
|
||||
intel_dp->force_audio = val;
|
||||
|
||||
if (val > 0 && intel_dp->has_audio)
|
||||
return 0;
|
||||
if (val < 0 && !intel_dp->has_audio)
|
||||
return 0;
|
||||
|
||||
intel_dp->has_audio = val > 0;
|
||||
goto done;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
|
||||
done:
|
||||
if (intel_dp->base.base.crtc) {
|
||||
struct drm_crtc *crtc = intel_dp->base.base.crtc;
|
||||
drm_crtc_helper_set_mode(crtc, &crtc->mode,
|
||||
crtc->x, crtc->y,
|
||||
crtc->fb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_destroy (struct drm_connector *connector)
|
||||
{
|
||||
|
@ -1561,6 +1665,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
|
|||
.dpms = drm_helper_connector_dpms,
|
||||
.detect = intel_dp_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = intel_dp_set_property,
|
||||
.destroy = intel_dp_destroy,
|
||||
};
|
||||
|
||||
|
@ -1625,6 +1730,20 @@ bool intel_dpd_is_edp(struct drm_device *dev)
|
|||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
|
||||
intel_dp->force_audio_property =
|
||||
drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
|
||||
if (intel_dp->force_audio_property) {
|
||||
intel_dp->force_audio_property->values[0] = -1;
|
||||
intel_dp->force_audio_property->values[1] = 1;
|
||||
drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
intel_dp_init(struct drm_device *dev, int output_reg)
|
||||
{
|
||||
|
@ -1651,7 +1770,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
|
|||
if (intel_dpd_is_edp(dev))
|
||||
intel_dp->is_pch_edp = true;
|
||||
|
||||
if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
|
||||
if (output_reg == DP_A || is_pch_edp(intel_dp)) {
|
||||
type = DRM_MODE_CONNECTOR_eDP;
|
||||
intel_encoder->type = INTEL_OUTPUT_EDP;
|
||||
} else {
|
||||
|
@ -1672,7 +1791,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
|
|||
else if (output_reg == DP_D || output_reg == PCH_DP_D)
|
||||
intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
|
||||
|
||||
if (IS_eDP(intel_dp))
|
||||
if (is_edp(intel_dp))
|
||||
intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
|
||||
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
|
||||
|
@ -1717,9 +1836,29 @@ intel_dp_init(struct drm_device *dev, int output_reg)
|
|||
|
||||
intel_dp_i2c_init(intel_dp, intel_connector, name);
|
||||
|
||||
/* Cache some DPCD data in the eDP case */
|
||||
if (is_edp(intel_dp)) {
|
||||
int ret;
|
||||
bool was_on;
|
||||
|
||||
was_on = ironlake_edp_panel_on(intel_dp);
|
||||
ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
|
||||
intel_dp->dpcd,
|
||||
sizeof(intel_dp->dpcd));
|
||||
if (ret == sizeof(intel_dp->dpcd)) {
|
||||
if (intel_dp->dpcd[0] >= 0x11)
|
||||
dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
|
||||
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
|
||||
} else {
|
||||
DRM_ERROR("failed to retrieve link info\n");
|
||||
}
|
||||
if (!was_on)
|
||||
ironlake_edp_panel_off(dev);
|
||||
}
|
||||
|
||||
intel_encoder->hot_plug = intel_dp_hot_plug;
|
||||
|
||||
if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
|
||||
if (is_edp(intel_dp)) {
|
||||
/* initialize panel mode from VBT if available for eDP */
|
||||
if (dev_priv->lfp_lvds_vbt_mode) {
|
||||
dev_priv->panel_fixed_mode =
|
||||
|
@ -1731,6 +1870,8 @@ intel_dp_init(struct drm_device *dev, int output_reg)
|
|||
}
|
||||
}
|
||||
|
||||
intel_dp_add_properties(intel_dp, connector);
|
||||
|
||||
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
|
||||
* 0xd. Failure to do so will result in spurious interrupts being
|
||||
* generated on the port when a cable is not attached.
|
||||
|
|
|
@ -178,6 +178,38 @@ struct intel_crtc {
|
|||
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
|
||||
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
|
||||
|
||||
#define DIP_TYPE_AVI 0x82
|
||||
#define DIP_VERSION_AVI 0x2
|
||||
#define DIP_LEN_AVI 13
|
||||
|
||||
struct dip_infoframe {
|
||||
uint8_t type; /* HB0 */
|
||||
uint8_t ver; /* HB1 */
|
||||
uint8_t len; /* HB2 - body len, not including checksum */
|
||||
uint8_t ecc; /* Header ECC */
|
||||
uint8_t checksum; /* PB0 */
|
||||
union {
|
||||
struct {
|
||||
/* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
|
||||
uint8_t Y_A_B_S;
|
||||
/* PB2 - C 7:6, M 5:4, R 3:0 */
|
||||
uint8_t C_M_R;
|
||||
/* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
|
||||
uint8_t ITC_EC_Q_SC;
|
||||
/* PB4 - VIC 6:0 */
|
||||
uint8_t VIC;
|
||||
/* PB5 - PR 3:0 */
|
||||
uint8_t PR;
|
||||
/* PB6 to PB13 */
|
||||
uint16_t top_bar_end;
|
||||
uint16_t bottom_bar_start;
|
||||
uint16_t left_bar_end;
|
||||
uint16_t right_bar_start;
|
||||
} avi;
|
||||
uint8_t payload[27];
|
||||
} __attribute__ ((packed)) body;
|
||||
} __attribute__((packed));
|
||||
|
||||
static inline struct drm_crtc *
|
||||
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
|
||||
{
|
||||
|
@ -200,6 +232,7 @@ extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
|
|||
|
||||
extern void intel_crt_init(struct drm_device *dev);
|
||||
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
|
||||
void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
|
||||
extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
|
||||
extern void intel_dvo_init(struct drm_device *dev);
|
||||
extern void intel_tv_init(struct drm_device *dev);
|
||||
|
@ -209,9 +242,9 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
|
|||
void
|
||||
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
extern bool intel_pch_has_edp(struct drm_crtc *crtc);
|
||||
extern bool intel_dpd_is_edp(struct drm_device *dev);
|
||||
extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
|
||||
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
|
||||
|
||||
/* intel_panel.c */
|
||||
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
|
||||
|
|
|
@ -225,7 +225,7 @@ static void intel_fbdev_destroy(struct drm_device *dev,
|
|||
|
||||
drm_framebuffer_cleanup(&ifb->base);
|
||||
if (ifb->obj) {
|
||||
drm_gem_object_unreference(ifb->obj);
|
||||
drm_gem_object_unreference_unlocked(ifb->obj);
|
||||
ifb->obj = NULL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,6 +42,9 @@ struct intel_hdmi {
|
|||
u32 sdvox_reg;
|
||||
int ddc_bus;
|
||||
bool has_hdmi_sink;
|
||||
bool has_audio;
|
||||
int force_audio;
|
||||
struct drm_property *force_audio_property;
|
||||
};
|
||||
|
||||
static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
|
||||
|
@ -55,6 +58,60 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
|
|||
struct intel_hdmi, base);
|
||||
}
|
||||
|
||||
void intel_dip_infoframe_csum(struct dip_infoframe *avi_if)
|
||||
{
|
||||
uint8_t *data = (uint8_t *)avi_if;
|
||||
uint8_t sum = 0;
|
||||
unsigned i;
|
||||
|
||||
avi_if->checksum = 0;
|
||||
avi_if->ecc = 0;
|
||||
|
||||
for (i = 0; i < sizeof(*avi_if); i++)
|
||||
sum += data[i];
|
||||
|
||||
avi_if->checksum = 0x100 - sum;
|
||||
}
|
||||
|
||||
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
|
||||
{
|
||||
struct dip_infoframe avi_if = {
|
||||
.type = DIP_TYPE_AVI,
|
||||
.ver = DIP_VERSION_AVI,
|
||||
.len = DIP_LEN_AVI,
|
||||
};
|
||||
uint32_t *data = (uint32_t *)&avi_if;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
u32 port;
|
||||
unsigned i;
|
||||
|
||||
if (!intel_hdmi->has_hdmi_sink)
|
||||
return;
|
||||
|
||||
/* XXX first guess at handling video port, is this corrent? */
|
||||
if (intel_hdmi->sdvox_reg == SDVOB)
|
||||
port = VIDEO_DIP_PORT_B;
|
||||
else if (intel_hdmi->sdvox_reg == SDVOC)
|
||||
port = VIDEO_DIP_PORT_C;
|
||||
else
|
||||
return;
|
||||
|
||||
I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
|
||||
VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC);
|
||||
|
||||
intel_dip_infoframe_csum(&avi_if);
|
||||
for (i = 0; i < sizeof(avi_if); i += 4) {
|
||||
I915_WRITE(VIDEO_DIP_DATA, *data);
|
||||
data++;
|
||||
}
|
||||
|
||||
I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
|
||||
VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC |
|
||||
VIDEO_DIP_ENABLE_AVI);
|
||||
}
|
||||
|
||||
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
|
@ -72,10 +129,13 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
|
|||
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
|
||||
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
|
||||
|
||||
if (intel_hdmi->has_hdmi_sink) {
|
||||
/* Required on CPT */
|
||||
if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
|
||||
sdvox |= HDMI_MODE_SELECT;
|
||||
|
||||
if (intel_hdmi->has_audio) {
|
||||
sdvox |= SDVO_AUDIO_ENABLE;
|
||||
if (HAS_PCH_CPT(dev))
|
||||
sdvox |= HDMI_MODE_SELECT;
|
||||
sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
|
||||
}
|
||||
|
||||
if (intel_crtc->pipe == 1) {
|
||||
|
@ -87,6 +147,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
|
|||
|
||||
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
|
||||
POSTING_READ(intel_hdmi->sdvox_reg);
|
||||
|
||||
intel_hdmi_set_avi_infoframe(encoder);
|
||||
}
|
||||
|
||||
static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
|
||||
|
@ -154,6 +216,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
|
|||
enum drm_connector_status status = connector_status_disconnected;
|
||||
|
||||
intel_hdmi->has_hdmi_sink = false;
|
||||
intel_hdmi->has_audio = false;
|
||||
edid = drm_get_edid(connector,
|
||||
&dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
|
||||
|
||||
|
@ -161,11 +224,17 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
|
|||
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
|
||||
status = connector_status_connected;
|
||||
intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
|
||||
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
|
||||
}
|
||||
connector->display_info.raw_edid = NULL;
|
||||
kfree(edid);
|
||||
}
|
||||
|
||||
if (status == connector_status_connected) {
|
||||
if (intel_hdmi->force_audio)
|
||||
intel_hdmi->has_audio = intel_hdmi->force_audio > 0;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -182,6 +251,46 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
|
|||
&dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
|
||||
}
|
||||
|
||||
static int
|
||||
intel_hdmi_set_property(struct drm_connector *connector,
|
||||
struct drm_property *property,
|
||||
uint64_t val)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
|
||||
int ret;
|
||||
|
||||
ret = drm_connector_property_set_value(connector, property, val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (property == intel_hdmi->force_audio_property) {
|
||||
if (val == intel_hdmi->force_audio)
|
||||
return 0;
|
||||
|
||||
intel_hdmi->force_audio = val;
|
||||
|
||||
if (val > 0 && intel_hdmi->has_audio)
|
||||
return 0;
|
||||
if (val < 0 && !intel_hdmi->has_audio)
|
||||
return 0;
|
||||
|
||||
intel_hdmi->has_audio = val > 0;
|
||||
goto done;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
|
||||
done:
|
||||
if (intel_hdmi->base.base.crtc) {
|
||||
struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
|
||||
drm_crtc_helper_set_mode(crtc, &crtc->mode,
|
||||
crtc->x, crtc->y,
|
||||
crtc->fb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_hdmi_destroy(struct drm_connector *connector)
|
||||
{
|
||||
drm_sysfs_connector_remove(connector);
|
||||
|
@ -201,6 +310,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
|
|||
.dpms = drm_helper_connector_dpms,
|
||||
.detect = intel_hdmi_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = intel_hdmi_set_property,
|
||||
.destroy = intel_hdmi_destroy,
|
||||
};
|
||||
|
||||
|
@ -214,6 +324,20 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
|
|||
.destroy = intel_encoder_destroy,
|
||||
};
|
||||
|
||||
static void
|
||||
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
|
||||
intel_hdmi->force_audio_property =
|
||||
drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
|
||||
if (intel_hdmi->force_audio_property) {
|
||||
intel_hdmi->force_audio_property->values[0] = -1;
|
||||
intel_hdmi->force_audio_property->values[1] = 1;
|
||||
drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -275,6 +399,8 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
|
|||
|
||||
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
|
||||
|
||||
intel_hdmi_add_properties(intel_hdmi, connector);
|
||||
|
||||
intel_connector_attach_encoder(intel_connector, intel_encoder);
|
||||
drm_sysfs_connector_add(connector);
|
||||
|
||||
|
|
|
@ -155,6 +155,7 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
|
|||
GPIOC,
|
||||
GPIOD,
|
||||
GPIOE,
|
||||
0,
|
||||
GPIOF,
|
||||
};
|
||||
struct intel_gpio *gpio;
|
||||
|
|
|
@ -119,12 +119,12 @@ render_ring_flush(struct drm_device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
static void ring_set_tail(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 value)
|
||||
static void ring_write_tail(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 value)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
I915_WRITE_TAIL(ring, ring->tail);
|
||||
I915_WRITE_TAIL(ring, value);
|
||||
}
|
||||
|
||||
u32 intel_ring_get_active_head(struct drm_device *dev,
|
||||
|
@ -148,7 +148,7 @@ static int init_ring_common(struct drm_device *dev,
|
|||
/* Stop the ring if it's running. */
|
||||
I915_WRITE_CTL(ring, 0);
|
||||
I915_WRITE_HEAD(ring, 0);
|
||||
ring->set_tail(dev, ring, 0);
|
||||
ring->write_tail(dev, ring, 0);
|
||||
|
||||
/* Initialize the ring. */
|
||||
I915_WRITE_START(ring, obj_priv->gtt_offset);
|
||||
|
@ -383,9 +383,9 @@ static int init_bsd_ring(struct drm_device *dev,
|
|||
}
|
||||
|
||||
static u32
|
||||
bsd_ring_add_request(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 flush_domains)
|
||||
ring_add_request(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 flush_domains)
|
||||
{
|
||||
u32 seqno;
|
||||
|
||||
|
@ -418,18 +418,18 @@ bsd_ring_put_user_irq(struct drm_device *dev,
|
|||
}
|
||||
|
||||
static u32
|
||||
bsd_ring_get_seqno(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring)
|
||||
ring_status_page_get_seqno(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
|
||||
}
|
||||
|
||||
static int
|
||||
bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct drm_i915_gem_execbuffer2 *exec,
|
||||
struct drm_clip_rect *cliprects,
|
||||
uint64_t exec_offset)
|
||||
ring_dispatch_gem_execbuffer(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct drm_i915_gem_execbuffer2 *exec,
|
||||
struct drm_clip_rect *cliprects,
|
||||
uint64_t exec_offset)
|
||||
{
|
||||
uint32_t exec_start;
|
||||
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
|
||||
|
@ -441,7 +441,6 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
|
@ -476,7 +475,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
|
|||
intel_ring_emit(dev, ring, exec_start + exec_len - 4);
|
||||
intel_ring_emit(dev, ring, 0);
|
||||
} else {
|
||||
intel_ring_begin(dev, ring, 4);
|
||||
intel_ring_begin(dev, ring, 2);
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
intel_ring_emit(dev, ring,
|
||||
MI_BATCH_BUFFER_START | (2 << 6)
|
||||
|
@ -492,7 +491,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
|
|||
intel_ring_advance(dev, ring);
|
||||
}
|
||||
|
||||
if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
|
||||
if (IS_G4X(dev) || IS_GEN5(dev)) {
|
||||
intel_ring_begin(dev, ring, 2);
|
||||
intel_ring_emit(dev, ring, MI_FLUSH |
|
||||
MI_NO_WRITE_FLUSH |
|
||||
|
@ -581,6 +580,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
|
|||
ring->dev = dev;
|
||||
INIT_LIST_HEAD(&ring->active_list);
|
||||
INIT_LIST_HEAD(&ring->request_list);
|
||||
INIT_LIST_HEAD(&ring->gpu_write_list);
|
||||
|
||||
if (I915_NEED_GFX_HWS(dev)) {
|
||||
ret = init_status_page(dev, ring);
|
||||
|
@ -707,7 +707,7 @@ int intel_wait_ring_buffer(struct drm_device *dev,
|
|||
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
|
||||
}
|
||||
|
||||
yield();
|
||||
msleep(1);
|
||||
} while (!time_after(jiffies, end));
|
||||
trace_i915_ring_wait_end (dev);
|
||||
return -EBUSY;
|
||||
|
@ -730,22 +730,7 @@ void intel_ring_advance(struct drm_device *dev,
|
|||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
ring->tail &= ring->size - 1;
|
||||
ring->set_tail(dev, ring, ring->tail);
|
||||
}
|
||||
|
||||
void intel_fill_struct(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
void *data,
|
||||
unsigned int len)
|
||||
{
|
||||
unsigned int *virt = ring->virtual_start + ring->tail;
|
||||
BUG_ON((len&~(4-1)) != 0);
|
||||
intel_ring_begin(dev, ring, len/4);
|
||||
memcpy(virt, data, len);
|
||||
ring->tail += len;
|
||||
ring->tail &= ring->size - 1;
|
||||
ring->space -= len;
|
||||
intel_ring_advance(dev, ring);
|
||||
ring->write_tail(dev, ring, ring->tail);
|
||||
}
|
||||
|
||||
static const struct intel_ring_buffer render_ring = {
|
||||
|
@ -754,7 +739,7 @@ static const struct intel_ring_buffer render_ring = {
|
|||
.mmio_base = RENDER_RING_BASE,
|
||||
.size = 32 * PAGE_SIZE,
|
||||
.init = init_render_ring,
|
||||
.set_tail = ring_set_tail,
|
||||
.write_tail = ring_write_tail,
|
||||
.flush = render_ring_flush,
|
||||
.add_request = render_ring_add_request,
|
||||
.get_seqno = render_ring_get_seqno,
|
||||
|
@ -771,19 +756,19 @@ static const struct intel_ring_buffer bsd_ring = {
|
|||
.mmio_base = BSD_RING_BASE,
|
||||
.size = 32 * PAGE_SIZE,
|
||||
.init = init_bsd_ring,
|
||||
.set_tail = ring_set_tail,
|
||||
.write_tail = ring_write_tail,
|
||||
.flush = bsd_ring_flush,
|
||||
.add_request = bsd_ring_add_request,
|
||||
.get_seqno = bsd_ring_get_seqno,
|
||||
.add_request = ring_add_request,
|
||||
.get_seqno = ring_status_page_get_seqno,
|
||||
.user_irq_get = bsd_ring_get_user_irq,
|
||||
.user_irq_put = bsd_ring_put_user_irq,
|
||||
.dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
|
||||
.dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer,
|
||||
};
|
||||
|
||||
|
||||
static void gen6_bsd_ring_set_tail(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 value)
|
||||
static void gen6_bsd_ring_write_tail(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 value)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
|
@ -804,10 +789,10 @@ static void gen6_bsd_ring_set_tail(struct drm_device *dev,
|
|||
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
|
||||
}
|
||||
|
||||
static void gen6_bsd_ring_flush(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
static void gen6_ring_flush(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains)
|
||||
{
|
||||
intel_ring_begin(dev, ring, 4);
|
||||
intel_ring_emit(dev, ring, MI_FLUSH_DW);
|
||||
|
@ -818,11 +803,11 @@ static void gen6_bsd_ring_flush(struct drm_device *dev,
|
|||
}
|
||||
|
||||
static int
|
||||
gen6_bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct drm_i915_gem_execbuffer2 *exec,
|
||||
struct drm_clip_rect *cliprects,
|
||||
uint64_t exec_offset)
|
||||
gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct drm_i915_gem_execbuffer2 *exec,
|
||||
struct drm_clip_rect *cliprects,
|
||||
uint64_t exec_offset)
|
||||
{
|
||||
uint32_t exec_start;
|
||||
|
||||
|
@ -845,13 +830,43 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
|
|||
.mmio_base = GEN6_BSD_RING_BASE,
|
||||
.size = 32 * PAGE_SIZE,
|
||||
.init = init_bsd_ring,
|
||||
.set_tail = gen6_bsd_ring_set_tail,
|
||||
.flush = gen6_bsd_ring_flush,
|
||||
.add_request = bsd_ring_add_request,
|
||||
.get_seqno = bsd_ring_get_seqno,
|
||||
.write_tail = gen6_bsd_ring_write_tail,
|
||||
.flush = gen6_ring_flush,
|
||||
.add_request = ring_add_request,
|
||||
.get_seqno = ring_status_page_get_seqno,
|
||||
.user_irq_get = bsd_ring_get_user_irq,
|
||||
.user_irq_put = bsd_ring_put_user_irq,
|
||||
.dispatch_gem_execbuffer = gen6_bsd_ring_dispatch_gem_execbuffer,
|
||||
.dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
|
||||
};
|
||||
|
||||
/* Blitter support (SandyBridge+) */
|
||||
|
||||
static void
|
||||
blt_ring_get_user_irq(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
/* do nothing */
|
||||
}
|
||||
static void
|
||||
blt_ring_put_user_irq(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
/* do nothing */
|
||||
}
|
||||
|
||||
static const struct intel_ring_buffer gen6_blt_ring = {
|
||||
.name = "blt ring",
|
||||
.id = RING_BLT,
|
||||
.mmio_base = BLT_RING_BASE,
|
||||
.size = 32 * PAGE_SIZE,
|
||||
.init = init_ring_common,
|
||||
.write_tail = ring_write_tail,
|
||||
.flush = gen6_ring_flush,
|
||||
.add_request = ring_add_request,
|
||||
.get_seqno = ring_status_page_get_seqno,
|
||||
.user_irq_get = blt_ring_get_user_irq,
|
||||
.user_irq_put = blt_ring_put_user_irq,
|
||||
.dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
|
||||
};
|
||||
|
||||
int intel_init_render_ring_buffer(struct drm_device *dev)
|
||||
|
@ -881,3 +896,12 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
|
|||
|
||||
return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
|
||||
}
|
||||
|
||||
int intel_init_blt_ring_buffer(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
dev_priv->blt_ring = gen6_blt_ring;
|
||||
|
||||
return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ struct intel_ring_buffer {
|
|||
enum intel_ring_id {
|
||||
RING_RENDER = 0x1,
|
||||
RING_BSD = 0x2,
|
||||
RING_BLT = 0x4,
|
||||
} id;
|
||||
u32 mmio_base;
|
||||
unsigned long size;
|
||||
|
@ -45,9 +46,9 @@ struct intel_ring_buffer {
|
|||
int (*init)(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring);
|
||||
|
||||
void (*set_tail)(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 value);
|
||||
void (*write_tail)(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 value);
|
||||
void (*flush)(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 invalidate_domains,
|
||||
|
@ -81,6 +82,15 @@ struct intel_ring_buffer {
|
|||
*/
|
||||
struct list_head request_list;
|
||||
|
||||
/**
|
||||
* List of objects currently pending a GPU write flush.
|
||||
*
|
||||
* All elements on this list will belong to either the
|
||||
* active_list or flushing_list, last_rendering_seqno can
|
||||
* be used to differentiate between the two elements.
|
||||
*/
|
||||
struct list_head gpu_write_list;
|
||||
|
||||
/**
|
||||
* Do we have some not yet emitted requests outstanding?
|
||||
*/
|
||||
|
@ -116,10 +126,6 @@ static inline void intel_ring_emit(struct drm_device *dev,
|
|||
ring->tail += 4;
|
||||
}
|
||||
|
||||
void intel_fill_struct(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
void *data,
|
||||
unsigned int len);
|
||||
void intel_ring_advance(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring);
|
||||
|
||||
|
@ -128,6 +134,7 @@ u32 intel_ring_get_seqno(struct drm_device *dev,
|
|||
|
||||
int intel_init_render_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_bsd_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_blt_ring_buffer(struct drm_device *dev);
|
||||
|
||||
u32 intel_ring_get_active_head(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring);
|
||||
|
|
|
@ -107,6 +107,7 @@ struct intel_sdvo {
|
|||
* This is set if we treat the device as HDMI, instead of DVI.
|
||||
*/
|
||||
bool is_hdmi;
|
||||
bool has_audio;
|
||||
|
||||
/**
|
||||
* This is set if we detect output of sdvo device as LVDS and
|
||||
|
@ -119,12 +120,6 @@ struct intel_sdvo {
|
|||
*/
|
||||
struct drm_display_mode *sdvo_lvds_fixed_mode;
|
||||
|
||||
/*
|
||||
* supported encoding mode, used to determine whether HDMI is
|
||||
* supported
|
||||
*/
|
||||
struct intel_sdvo_encode encode;
|
||||
|
||||
/* DDC bus used by this SDVO encoder */
|
||||
uint8_t ddc_bus;
|
||||
|
||||
|
@ -138,11 +133,15 @@ struct intel_sdvo_connector {
|
|||
/* Mark the type of connector */
|
||||
uint16_t output_flag;
|
||||
|
||||
int force_audio;
|
||||
|
||||
/* This contains all current supported TV format */
|
||||
u8 tv_format_supported[TV_FORMAT_NUM];
|
||||
int format_supported_num;
|
||||
struct drm_property *tv_format;
|
||||
|
||||
struct drm_property *force_audio_property;
|
||||
|
||||
/* add the property for the SDVO-TV */
|
||||
struct drm_property *left;
|
||||
struct drm_property *right;
|
||||
|
@ -794,17 +793,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
|
|||
mode->flags |= DRM_MODE_FLAG_PVSYNC;
|
||||
}
|
||||
|
||||
static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo,
|
||||
struct intel_sdvo_encode *encode)
|
||||
static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
|
||||
{
|
||||
if (intel_sdvo_get_value(intel_sdvo,
|
||||
SDVO_CMD_GET_SUPP_ENCODE,
|
||||
encode, sizeof(*encode)))
|
||||
return true;
|
||||
struct intel_sdvo_encode encode;
|
||||
|
||||
/* non-support means DVI */
|
||||
memset(encode, 0, sizeof(*encode));
|
||||
return false;
|
||||
return intel_sdvo_get_value(intel_sdvo,
|
||||
SDVO_CMD_GET_SUPP_ENCODE,
|
||||
&encode, sizeof(encode));
|
||||
}
|
||||
|
||||
static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
|
||||
|
@ -849,115 +844,33 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
|
|||
}
|
||||
#endif
|
||||
|
||||
static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo,
|
||||
int index,
|
||||
uint8_t *data, int8_t size, uint8_t tx_rate)
|
||||
{
|
||||
uint8_t set_buf_index[2];
|
||||
|
||||
set_buf_index[0] = index;
|
||||
set_buf_index[1] = 0;
|
||||
|
||||
if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
|
||||
set_buf_index, 2))
|
||||
return false;
|
||||
|
||||
for (; size > 0; size -= 8) {
|
||||
if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8))
|
||||
return false;
|
||||
|
||||
data += 8;
|
||||
}
|
||||
|
||||
return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
|
||||
}
|
||||
|
||||
static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
|
||||
{
|
||||
uint8_t csum = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < size; i++)
|
||||
csum += data[i];
|
||||
|
||||
return 0x100 - csum;
|
||||
}
|
||||
|
||||
#define DIP_TYPE_AVI 0x82
|
||||
#define DIP_VERSION_AVI 0x2
|
||||
#define DIP_LEN_AVI 13
|
||||
|
||||
struct dip_infoframe {
|
||||
uint8_t type;
|
||||
uint8_t version;
|
||||
uint8_t len;
|
||||
uint8_t checksum;
|
||||
union {
|
||||
struct {
|
||||
/* Packet Byte #1 */
|
||||
uint8_t S:2;
|
||||
uint8_t B:2;
|
||||
uint8_t A:1;
|
||||
uint8_t Y:2;
|
||||
uint8_t rsvd1:1;
|
||||
/* Packet Byte #2 */
|
||||
uint8_t R:4;
|
||||
uint8_t M:2;
|
||||
uint8_t C:2;
|
||||
/* Packet Byte #3 */
|
||||
uint8_t SC:2;
|
||||
uint8_t Q:2;
|
||||
uint8_t EC:3;
|
||||
uint8_t ITC:1;
|
||||
/* Packet Byte #4 */
|
||||
uint8_t VIC:7;
|
||||
uint8_t rsvd2:1;
|
||||
/* Packet Byte #5 */
|
||||
uint8_t PR:4;
|
||||
uint8_t rsvd3:4;
|
||||
/* Packet Byte #6~13 */
|
||||
uint16_t top_bar_end;
|
||||
uint16_t bottom_bar_start;
|
||||
uint16_t left_bar_end;
|
||||
uint16_t right_bar_start;
|
||||
} avi;
|
||||
struct {
|
||||
/* Packet Byte #1 */
|
||||
uint8_t channel_count:3;
|
||||
uint8_t rsvd1:1;
|
||||
uint8_t coding_type:4;
|
||||
/* Packet Byte #2 */
|
||||
uint8_t sample_size:2; /* SS0, SS1 */
|
||||
uint8_t sample_frequency:3;
|
||||
uint8_t rsvd2:3;
|
||||
/* Packet Byte #3 */
|
||||
uint8_t coding_type_private:5;
|
||||
uint8_t rsvd3:3;
|
||||
/* Packet Byte #4 */
|
||||
uint8_t channel_allocation;
|
||||
/* Packet Byte #5 */
|
||||
uint8_t rsvd4:3;
|
||||
uint8_t level_shift:4;
|
||||
uint8_t downmix_inhibit:1;
|
||||
} audio;
|
||||
uint8_t payload[28];
|
||||
} __attribute__ ((packed)) u;
|
||||
} __attribute__((packed));
|
||||
|
||||
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
|
||||
struct drm_display_mode * mode)
|
||||
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
|
||||
{
|
||||
struct dip_infoframe avi_if = {
|
||||
.type = DIP_TYPE_AVI,
|
||||
.version = DIP_VERSION_AVI,
|
||||
.ver = DIP_VERSION_AVI,
|
||||
.len = DIP_LEN_AVI,
|
||||
};
|
||||
uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
|
||||
uint8_t set_buf_index[2] = { 1, 0 };
|
||||
uint64_t *data = (uint64_t *)&avi_if;
|
||||
unsigned i;
|
||||
|
||||
avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
|
||||
4 + avi_if.len);
|
||||
return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if,
|
||||
4 + avi_if.len,
|
||||
SDVO_HBUF_TX_VSYNC);
|
||||
intel_dip_infoframe_csum(&avi_if);
|
||||
|
||||
if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
|
||||
set_buf_index, 2))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < sizeof(avi_if); i += 8) {
|
||||
if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA,
|
||||
data, 8))
|
||||
return false;
|
||||
data++;
|
||||
}
|
||||
|
||||
return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE,
|
||||
&tx_rate, 1);
|
||||
}
|
||||
|
||||
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
|
||||
|
@ -1111,7 +1024,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
|
|||
return;
|
||||
|
||||
if (intel_sdvo->is_hdmi &&
|
||||
!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
|
||||
!intel_sdvo_set_avi_infoframe(intel_sdvo))
|
||||
return;
|
||||
|
||||
if (intel_sdvo->is_tv &&
|
||||
|
@ -1150,7 +1063,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
|
|||
}
|
||||
if (intel_crtc->pipe == 1)
|
||||
sdvox |= SDVO_PIPE_B_SELECT;
|
||||
if (intel_sdvo->is_hdmi)
|
||||
if (intel_sdvo->has_audio)
|
||||
sdvox |= SDVO_AUDIO_ENABLE;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
|
@ -1476,11 +1389,18 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
|
|||
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
|
||||
status = connector_status_connected;
|
||||
intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
|
||||
intel_sdvo->has_audio = drm_detect_monitor_audio(edid);
|
||||
}
|
||||
connector->display_info.raw_edid = NULL;
|
||||
kfree(edid);
|
||||
}
|
||||
|
||||
|
||||
if (status == connector_status_connected) {
|
||||
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
|
||||
if (intel_sdvo_connector->force_audio)
|
||||
intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1787,6 +1707,21 @@ intel_sdvo_set_property(struct drm_connector *connector,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (property == intel_sdvo_connector->force_audio_property) {
|
||||
if (val == intel_sdvo_connector->force_audio)
|
||||
return 0;
|
||||
|
||||
intel_sdvo_connector->force_audio = val;
|
||||
|
||||
if (val > 0 && intel_sdvo->has_audio)
|
||||
return 0;
|
||||
if (val < 0 && !intel_sdvo->has_audio)
|
||||
return 0;
|
||||
|
||||
intel_sdvo->has_audio = val > 0;
|
||||
goto done;
|
||||
}
|
||||
|
||||
#define CHECK_PROPERTY(name, NAME) \
|
||||
if (intel_sdvo_connector->name == property) { \
|
||||
if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
|
||||
|
@ -2013,12 +1948,22 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
|
||||
static bool
|
||||
intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device)
|
||||
intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
|
||||
{
|
||||
return intel_sdvo_set_target_output(intel_sdvo,
|
||||
device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) &&
|
||||
intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
|
||||
&intel_sdvo->is_hdmi, 1);
|
||||
int is_hdmi;
|
||||
|
||||
if (!intel_sdvo_check_supp_encode(intel_sdvo))
|
||||
return false;
|
||||
|
||||
if (!intel_sdvo_set_target_output(intel_sdvo,
|
||||
device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1))
|
||||
return false;
|
||||
|
||||
is_hdmi = 0;
|
||||
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1))
|
||||
return false;
|
||||
|
||||
return !!is_hdmi;
|
||||
}
|
||||
|
||||
static u8
|
||||
|
@ -2078,6 +2023,21 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
|
|||
drm_sysfs_connector_add(&connector->base.base);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.base.dev;
|
||||
|
||||
connector->force_audio_property =
|
||||
drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
|
||||
if (connector->force_audio_property) {
|
||||
connector->force_audio_property->values[0] = -1;
|
||||
connector->force_audio_property->values[1] = 1;
|
||||
drm_connector_attach_property(&connector->base.base,
|
||||
connector->force_audio_property, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
|
||||
{
|
||||
|
@ -2104,20 +2064,21 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
|
|||
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
|
||||
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
|
||||
|
||||
if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode)
|
||||
&& intel_sdvo_get_digital_encoding_mode(intel_sdvo, device)
|
||||
&& intel_sdvo->is_hdmi) {
|
||||
if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
|
||||
/* enable hdmi encoding mode if supported */
|
||||
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
|
||||
intel_sdvo_set_colorimetry(intel_sdvo,
|
||||
SDVO_COLORIMETRY_RGB256);
|
||||
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
|
||||
intel_sdvo->is_hdmi = true;
|
||||
}
|
||||
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
|
||||
(1 << INTEL_ANALOG_CLONE_BIT));
|
||||
|
||||
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
|
||||
|
||||
intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -763,6 +763,7 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
|
|||
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *file_priv);
|
||||
extern bool drm_detect_hdmi_monitor(struct edid *edid);
|
||||
extern bool drm_detect_monitor_audio(struct edid *edid);
|
||||
extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *file_priv);
|
||||
extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
|
||||
|
|
|
@ -23,6 +23,9 @@
|
|||
#ifndef _DRM_DP_HELPER_H_
|
||||
#define _DRM_DP_HELPER_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/i2c.h>
|
||||
|
||||
/* From the VESA DisplayPort spec */
|
||||
|
||||
#define AUX_NATIVE_WRITE 0x8
|
||||
|
|
|
@ -286,6 +286,7 @@ typedef struct drm_i915_irq_wait {
|
|||
#define I915_PARAM_HAS_PAGEFLIPPING 8
|
||||
#define I915_PARAM_HAS_EXECBUF2 9
|
||||
#define I915_PARAM_HAS_BSD 10
|
||||
#define I915_PARAM_HAS_BLT 11
|
||||
|
||||
typedef struct drm_i915_getparam {
|
||||
int param;
|
||||
|
@ -627,8 +628,11 @@ struct drm_i915_gem_execbuffer2 {
|
|||
__u32 num_cliprects;
|
||||
/** This is a struct drm_clip_rect *cliprects */
|
||||
__u64 cliprects_ptr;
|
||||
#define I915_EXEC_RING_MASK (7<<0)
|
||||
#define I915_EXEC_DEFAULT (0<<0)
|
||||
#define I915_EXEC_RENDER (1<<0)
|
||||
#define I915_EXEC_BSD (1<<1)
|
||||
#define I915_EXEC_BSD (2<<0)
|
||||
#define I915_EXEC_BLT (3<<0)
|
||||
__u64 flags;
|
||||
__u64 rsvd1;
|
||||
__u64 rsvd2;
|
||||
|
|
Loading…
Reference in a new issue