Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Daniel writes: A few leftover fixes for 3.8: - VIC support for hdmi infoframes with the associated drm helper, fixes some black TVs (Paulo Zanoni) - Modeset state check (and fixup if the BIOS messed with the hw) for lid-open. modeset-rework fallout. Somehow the original reporter went awol, so this stalled for way too long until we've found a new victim^Wreporter with broken BIOS. - seqno wrap fixes from Mika and Chris. - Some minor fixes all over from various people. - Another race fix in the pageflip vs. unpin code from Chris. - hsw vga resume support and a few more fdi link fixes (only used for vga on hsw) from Paulo. - Regression fix for DMAR from Zhenyu Wang - I've scavenged memory from my DMAR for a while and it broke right away :( - Regression fix from Takashi Iwai for ivb lvds - some w/a needs to be (partially) moved back into place. Note that these are regressions in -next. - One more fix for ivb 3 pipe support - it now actually seems to work. * 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel: (25 commits) drm/i915: Fix missed needs_dmar setting drm/i915: Fix shifted screen on top of LVDS on IVY laptop drm/i915: disable cpt phase pointer fdi rx workaround drm/i915: set the LPT FDI RX polarity reversal bit when needed drm/i915: add lpt_init_pch_refclk drm/i915: add support for mPHY destination on intel_sbi_{read, write} drm/i915: reject modes the LPT FDI receiver can't handle drm/i915: fix hsw_fdi_link_train "retry" code drm/i915: Close race between processing unpin task and queueing the flip drm/i915: fixup l3 parity sysfs access check drm/i915: Clear the existing watermarks for g4x when modifying the cursor sr drm/i915: do not access BLC_PWM_CTL2 on pre-gen4 hardware drm/i915: Don't allow ring tail to reach the same cacheline as head drm/i915: Decouple the object from the unbound list before freeing pages drm/i915: Set sync_seqno properly after seqno wrap drm/i915: Include the last semaphore sync point in the error-state drm/i915: Rearrange code to only have a single method for waiting upon the ring drm/i915: Simplify flushing activity on the ring drm/i915: Preallocate next seqno before touching the ring drm/i915: force restore on lid open ...
This commit is contained in:
commit
55bde6b144
24 changed files with 591 additions and 280 deletions
|
@ -2079,3 +2079,22 @@ int drm_add_modes_noedid(struct drm_connector *connector,
|
|||
return num_modes;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_add_modes_noedid);
|
||||
|
||||
/**
|
||||
* drm_mode_cea_vic - return the CEA-861 VIC of a given mode
|
||||
* @mode: mode
|
||||
*
|
||||
* RETURNS:
|
||||
* The VIC number, 0 in case it's not a CEA-861 mode.
|
||||
*/
|
||||
uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
|
||||
{
|
||||
uint8_t i;
|
||||
|
||||
for (i = 0; i < drm_num_cea_modes; i++)
|
||||
if (drm_mode_equal(mode, &edid_cea_modes[i]))
|
||||
return i + 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mode_cea_vic);
|
||||
|
|
|
@ -317,7 +317,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
|
|||
seq_printf(m, "No flip due on pipe %c (plane %c)\n",
|
||||
pipe, plane);
|
||||
} else {
|
||||
if (!work->pending) {
|
||||
if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
|
||||
seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
|
||||
pipe, plane);
|
||||
} else {
|
||||
|
@ -328,7 +328,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
|
|||
seq_printf(m, "Stall check enabled, ");
|
||||
else
|
||||
seq_printf(m, "Stall check waiting for page flip ioctl, ");
|
||||
seq_printf(m, "%d prepares\n", work->pending);
|
||||
seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
|
||||
|
||||
if (work->old_fb_obj) {
|
||||
struct drm_i915_gem_object *obj = work->old_fb_obj;
|
||||
|
@ -655,10 +655,12 @@ static void i915_ring_error_state(struct seq_file *m,
|
|||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
|
||||
seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
|
||||
seq_printf(m, " SYNC_0: 0x%08x\n",
|
||||
error->semaphore_mboxes[ring][0]);
|
||||
seq_printf(m, " SYNC_1: 0x%08x\n",
|
||||
error->semaphore_mboxes[ring][1]);
|
||||
seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
|
||||
error->semaphore_mboxes[ring][0],
|
||||
error->semaphore_seqno[ring][0]);
|
||||
seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
|
||||
error->semaphore_mboxes[ring][1],
|
||||
error->semaphore_seqno[ring][1]);
|
||||
}
|
||||
seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
|
||||
seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
|
||||
|
|
|
@ -141,7 +141,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
|
|||
|
||||
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
|
||||
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
|
||||
ring->space = ring->head - (ring->tail + 8);
|
||||
ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
|
||||
if (ring->space < 0)
|
||||
ring->space += ring->size;
|
||||
|
||||
|
@ -592,10 +592,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
|
|||
|
||||
static int i915_quiescent(struct drm_device *dev)
|
||||
{
|
||||
struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
|
||||
|
||||
i915_kernel_lost_context(dev);
|
||||
return intel_wait_ring_idle(ring);
|
||||
return intel_ring_idle(LP_RING(dev->dev_private));
|
||||
}
|
||||
|
||||
static int i915_flush_ioctl(struct drm_device *dev, void *data,
|
||||
|
@ -1045,7 +1043,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
|
|||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
drm_i915_hws_addr_t *hws = data;
|
||||
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
||||
struct intel_ring_buffer *ring;
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return -ENODEV;
|
||||
|
@ -1065,6 +1063,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
|
|||
|
||||
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
|
||||
|
||||
ring = LP_RING(dev_priv);
|
||||
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
|
||||
|
||||
dev_priv->dri1.gfx_hws_cpu_addr =
|
||||
|
|
|
@ -554,8 +554,7 @@ static int __i915_drm_thaw(struct drm_device *dev)
|
|||
|
||||
/* KMS EnterVT equivalent */
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
|
||||
ironlake_init_pch_refclk(dev);
|
||||
intel_init_pch_refclk(dev);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
dev_priv->mm.suspended = 0;
|
||||
|
@ -564,7 +563,7 @@ static int __i915_drm_thaw(struct drm_device *dev)
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
intel_modeset_init_hw(dev);
|
||||
intel_modeset_setup_hw_state(dev);
|
||||
intel_modeset_setup_hw_state(dev, false);
|
||||
drm_irq_install(dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -197,6 +197,7 @@ struct drm_i915_error_state {
|
|||
u32 instdone[I915_NUM_RINGS];
|
||||
u32 acthd[I915_NUM_RINGS];
|
||||
u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
|
||||
u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
|
||||
u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
|
||||
/* our own tracking of ring head and tail */
|
||||
u32 cpu_ring_head[I915_NUM_RINGS];
|
||||
|
@ -381,6 +382,11 @@ enum intel_pch {
|
|||
PCH_LPT, /* Lynxpoint PCH */
|
||||
};
|
||||
|
||||
enum intel_sbi_destination {
|
||||
SBI_ICLK,
|
||||
SBI_MPHY,
|
||||
};
|
||||
|
||||
#define QUIRK_PIPEA_FORCE (1<<0)
|
||||
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
|
||||
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
|
||||
|
@ -909,6 +915,8 @@ typedef struct drm_i915_private {
|
|||
bool hw_contexts_disabled;
|
||||
uint32_t hw_context_size;
|
||||
|
||||
bool fdi_rx_polarity_reversed;
|
||||
|
||||
struct i915_suspend_saved_registers regfile;
|
||||
|
||||
/* Old dri1 support infrastructure, beware the dragons ya fools entering
|
||||
|
@ -1417,8 +1425,7 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
|
|||
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *to);
|
||||
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 seqno);
|
||||
struct intel_ring_buffer *ring);
|
||||
|
||||
int i915_gem_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
|
@ -1436,7 +1443,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
|
|||
return (int32_t)(seq1 - seq2) >= 0;
|
||||
}
|
||||
|
||||
u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
|
||||
extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
|
||||
|
||||
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
|
||||
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
|
||||
|
@ -1652,11 +1659,12 @@ extern void intel_modeset_init(struct drm_device *dev);
|
|||
extern void intel_modeset_gem_init(struct drm_device *dev);
|
||||
extern void intel_modeset_cleanup(struct drm_device *dev);
|
||||
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
|
||||
extern void intel_modeset_setup_hw_state(struct drm_device *dev);
|
||||
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
|
||||
bool force_restore);
|
||||
extern bool intel_fbc_enabled(struct drm_device *dev);
|
||||
extern void intel_disable_fbc(struct drm_device *dev);
|
||||
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
|
||||
extern void ironlake_init_pch_refclk(struct drm_device *dev);
|
||||
extern void intel_init_pch_refclk(struct drm_device *dev);
|
||||
extern void gen6_set_rps(struct drm_device *dev, u8 val);
|
||||
extern void intel_detect_pch(struct drm_device *dev);
|
||||
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
|
||||
|
|
|
@ -1696,10 +1696,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
|
|||
if (obj->pages_pin_count)
|
||||
return -EBUSY;
|
||||
|
||||
/* ->put_pages might need to allocate memory for the bit17 swizzle
|
||||
* array, hence protect them from being reaped by removing them from gtt
|
||||
* lists early. */
|
||||
list_del(&obj->gtt_list);
|
||||
|
||||
ops->put_pages(obj);
|
||||
obj->pages = NULL;
|
||||
|
||||
list_del(&obj->gtt_list);
|
||||
if (i915_gem_object_is_purgeable(obj))
|
||||
i915_gem_object_truncate(obj);
|
||||
|
||||
|
@ -1857,11 +1861,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
|||
|
||||
void
|
||||
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 seqno)
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 seqno = intel_ring_get_seqno(ring);
|
||||
|
||||
BUG_ON(ring == NULL);
|
||||
obj->ring = ring;
|
||||
|
@ -1922,26 +1926,54 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
|
|||
WARN_ON(i915_verify_lists(dev));
|
||||
}
|
||||
|
||||
static u32
|
||||
i915_gem_get_seqno(struct drm_device *dev)
|
||||
static int
|
||||
i915_gem_handle_seqno_wrap(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
u32 seqno = dev_priv->next_seqno;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
int ret, i, j;
|
||||
|
||||
/* reserve 0 for non-seqno */
|
||||
if (++dev_priv->next_seqno == 0)
|
||||
dev_priv->next_seqno = 1;
|
||||
/* The hardware uses various monotonic 32-bit counters, if we
|
||||
* detect that they will wraparound we need to idle the GPU
|
||||
* and reset those counters.
|
||||
*/
|
||||
ret = 0;
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
|
||||
ret |= ring->sync_seqno[j] != 0;
|
||||
}
|
||||
if (ret == 0)
|
||||
return ret;
|
||||
|
||||
return seqno;
|
||||
ret = i915_gpu_idle(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_retire_requests(dev);
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
|
||||
ring->sync_seqno[j] = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32
|
||||
i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
|
||||
int
|
||||
i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
|
||||
{
|
||||
if (ring->outstanding_lazy_request == 0)
|
||||
ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
return ring->outstanding_lazy_request;
|
||||
/* reserve 0 for non-seqno */
|
||||
if (dev_priv->next_seqno == 0) {
|
||||
int ret = i915_gem_handle_seqno_wrap(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_priv->next_seqno = 1;
|
||||
}
|
||||
|
||||
*seqno = dev_priv->next_seqno++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -1952,7 +1984,6 @@ i915_add_request(struct intel_ring_buffer *ring,
|
|||
drm_i915_private_t *dev_priv = ring->dev->dev_private;
|
||||
struct drm_i915_gem_request *request;
|
||||
u32 request_ring_position;
|
||||
u32 seqno;
|
||||
int was_empty;
|
||||
int ret;
|
||||
|
||||
|
@ -1971,7 +2002,6 @@ i915_add_request(struct intel_ring_buffer *ring,
|
|||
if (request == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
seqno = i915_gem_next_request_seqno(ring);
|
||||
|
||||
/* Record the position of the start of the request so that
|
||||
* should we detect the updated seqno part-way through the
|
||||
|
@ -1980,15 +2010,13 @@ i915_add_request(struct intel_ring_buffer *ring,
|
|||
*/
|
||||
request_ring_position = intel_ring_get_tail(ring);
|
||||
|
||||
ret = ring->add_request(ring, &seqno);
|
||||
ret = ring->add_request(ring);
|
||||
if (ret) {
|
||||
kfree(request);
|
||||
return ret;
|
||||
}
|
||||
|
||||
trace_i915_gem_request_add(ring, seqno);
|
||||
|
||||
request->seqno = seqno;
|
||||
request->seqno = intel_ring_get_seqno(ring);
|
||||
request->ring = ring;
|
||||
request->tail = request_ring_position;
|
||||
request->emitted_jiffies = jiffies;
|
||||
|
@ -2006,6 +2034,7 @@ i915_add_request(struct intel_ring_buffer *ring,
|
|||
spin_unlock(&file_priv->mm.lock);
|
||||
}
|
||||
|
||||
trace_i915_gem_request_add(ring, request->seqno);
|
||||
ring->outstanding_lazy_request = 0;
|
||||
|
||||
if (!dev_priv->mm.suspended) {
|
||||
|
@ -2022,7 +2051,7 @@ i915_add_request(struct intel_ring_buffer *ring,
|
|||
}
|
||||
|
||||
if (out_seqno)
|
||||
*out_seqno = seqno;
|
||||
*out_seqno = request->seqno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2120,7 +2149,6 @@ void
|
|||
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
|
||||
{
|
||||
uint32_t seqno;
|
||||
int i;
|
||||
|
||||
if (list_empty(&ring->request_list))
|
||||
return;
|
||||
|
@ -2129,10 +2157,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
|
|||
|
||||
seqno = ring->get_seqno(ring, true);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
|
||||
if (seqno >= ring->sync_seqno[i])
|
||||
ring->sync_seqno[i] = 0;
|
||||
|
||||
while (!list_empty(&ring->request_list)) {
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
|
@ -2377,7 +2401,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
|||
|
||||
ret = to->sync_to(to, from, seqno);
|
||||
if (!ret)
|
||||
from->sync_seqno[idx] = seqno;
|
||||
/* We use last_read_seqno because sync_to()
|
||||
* might have just caused seqno wrap under
|
||||
* the radar.
|
||||
*/
|
||||
from->sync_seqno[idx] = obj->last_read_seqno;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2460,14 +2488,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int i915_ring_idle(struct intel_ring_buffer *ring)
|
||||
{
|
||||
if (list_empty(&ring->active_list))
|
||||
return 0;
|
||||
|
||||
return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
|
||||
}
|
||||
|
||||
int i915_gpu_idle(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -2480,7 +2500,7 @@ int i915_gpu_idle(struct drm_device *dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_ring_idle(ring);
|
||||
ret = intel_ring_idle(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -410,9 +410,8 @@ static int do_switch(struct i915_hw_context *to)
|
|||
* MI_SET_CONTEXT instead of when the next seqno has completed.
|
||||
*/
|
||||
if (from_obj != NULL) {
|
||||
u32 seqno = i915_gem_next_request_seqno(ring);
|
||||
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
|
||||
i915_gem_object_move_to_active(from_obj, ring, seqno);
|
||||
i915_gem_object_move_to_active(from_obj, ring);
|
||||
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
|
||||
* whole damn pipeline, we don't need to explicitly mark the
|
||||
* object dirty. The only exception is that the context must be
|
||||
|
|
|
@ -713,8 +713,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
|
|||
|
||||
static void
|
||||
i915_gem_execbuffer_move_to_active(struct list_head *objects,
|
||||
struct intel_ring_buffer *ring,
|
||||
u32 seqno)
|
||||
struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
|
@ -726,10 +725,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
|
|||
obj->base.write_domain = obj->base.pending_write_domain;
|
||||
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
|
||||
|
||||
i915_gem_object_move_to_active(obj, ring, seqno);
|
||||
i915_gem_object_move_to_active(obj, ring);
|
||||
if (obj->base.write_domain) {
|
||||
obj->dirty = 1;
|
||||
obj->last_write_seqno = seqno;
|
||||
obj->last_write_seqno = intel_ring_get_seqno(ring);
|
||||
if (obj->pin_count) /* check for potential scanout */
|
||||
intel_mark_fb_busy(obj);
|
||||
}
|
||||
|
@ -789,7 +788,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
struct intel_ring_buffer *ring;
|
||||
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
|
||||
u32 exec_start, exec_len;
|
||||
u32 seqno;
|
||||
u32 mask;
|
||||
u32 flags;
|
||||
int ret, mode, i;
|
||||
|
@ -994,22 +992,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
if (ret)
|
||||
goto err;
|
||||
|
||||
seqno = i915_gem_next_request_seqno(ring);
|
||||
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
|
||||
if (seqno < ring->sync_seqno[i]) {
|
||||
/* The GPU can not handle its semaphore value wrapping,
|
||||
* so every billion or so execbuffers, we need to stall
|
||||
* the GPU in order to reset the counters.
|
||||
*/
|
||||
ret = i915_gpu_idle(dev);
|
||||
if (ret)
|
||||
goto err;
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
BUG_ON(ring->sync_seqno[i]);
|
||||
}
|
||||
}
|
||||
|
||||
ret = i915_switch_context(ring, file, ctx_id);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -1035,8 +1017,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
goto err;
|
||||
}
|
||||
|
||||
trace_i915_gem_ring_dispatch(ring, seqno, flags);
|
||||
|
||||
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
|
||||
exec_len = args->batch_len;
|
||||
if (cliprects) {
|
||||
|
@ -1060,7 +1040,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
goto err;
|
||||
}
|
||||
|
||||
i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
|
||||
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
|
||||
|
||||
i915_gem_execbuffer_move_to_active(&objects, ring);
|
||||
i915_gem_execbuffer_retire_commands(dev, file, ring);
|
||||
|
||||
err:
|
||||
|
|
|
@ -639,6 +639,10 @@ int i915_gem_gtt_init(struct drm_device *dev)
|
|||
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
|
||||
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
dev_priv->mm.gtt->needs_dmar = 1;
|
||||
#endif
|
||||
|
||||
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
|
||||
gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
|
||||
dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
|
||||
|
|
|
@ -1120,6 +1120,8 @@ static void i915_record_ring_state(struct drm_device *dev,
|
|||
= I915_READ(RING_SYNC_0(ring->mmio_base));
|
||||
error->semaphore_mboxes[ring->id][1]
|
||||
= I915_READ(RING_SYNC_1(ring->mmio_base));
|
||||
error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
|
||||
error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
|
@ -1464,7 +1466,9 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
|
|||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
work = intel_crtc->unpin_work;
|
||||
|
||||
if (work == NULL || work->pending || !work->enable_stall_check) {
|
||||
if (work == NULL ||
|
||||
atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
|
||||
!work->enable_stall_check) {
|
||||
/* Either the pending flip IRQ arrived, or we're too early. Don't check */
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
return;
|
||||
|
|
|
@ -3843,7 +3843,9 @@
|
|||
#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
|
||||
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
|
||||
#define SOUTH_CHICKEN2 0xc2004
|
||||
#define DPLS_EDP_PPS_FIX_DIS (1<<0)
|
||||
#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
|
||||
#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
|
||||
#define DPLS_EDP_PPS_FIX_DIS (1<<0)
|
||||
|
||||
#define _FDI_RXA_CHICKEN 0xc200c
|
||||
#define _FDI_RXB_CHICKEN 0xc2010
|
||||
|
@ -3915,6 +3917,7 @@
|
|||
#define FDI_FS_ERRC_ENABLE (1<<27)
|
||||
#define FDI_FE_ERRC_ENABLE (1<<26)
|
||||
#define FDI_DP_PORT_WIDTH_X8 (7<<19)
|
||||
#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
|
||||
#define FDI_8BPC (0<<16)
|
||||
#define FDI_10BPC (1<<16)
|
||||
#define FDI_6BPC (2<<16)
|
||||
|
@ -4534,6 +4537,10 @@
|
|||
#define SBI_ADDR 0xC6000
|
||||
#define SBI_DATA 0xC6004
|
||||
#define SBI_CTL_STAT 0xC6008
|
||||
#define SBI_CTL_DEST_ICLK (0x0<<16)
|
||||
#define SBI_CTL_DEST_MPHY (0x1<<16)
|
||||
#define SBI_CTL_OP_IORD (0x2<<8)
|
||||
#define SBI_CTL_OP_IOWR (0x3<<8)
|
||||
#define SBI_CTL_OP_CRRD (0x6<<8)
|
||||
#define SBI_CTL_OP_CRWR (0x7<<8)
|
||||
#define SBI_RESPONSE_FAIL (0x1<<1)
|
||||
|
@ -4551,10 +4558,12 @@
|
|||
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
|
||||
#define SBI_SSCCTL 0x020c
|
||||
#define SBI_SSCCTL6 0x060C
|
||||
#define SBI_SSCCTL_PATHALT (1<<3)
|
||||
#define SBI_SSCCTL_DISABLE (1<<0)
|
||||
#define SBI_SSCAUXDIV6 0x0610
|
||||
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
|
||||
#define SBI_DBUFF0 0x2a00
|
||||
#define SBI_DBUFF0_ENABLE (1<<0)
|
||||
|
||||
/* LPT PIXCLK_GATE */
|
||||
#define PIXCLK_GATE 0xC6020
|
||||
|
|
|
@ -97,7 +97,7 @@ static struct attribute_group rc6_attr_group = {
|
|||
|
||||
static int l3_access_valid(struct drm_device *dev, loff_t offset)
|
||||
{
|
||||
if (!IS_IVYBRIDGE(dev))
|
||||
if (!HAS_L3_GPU_CACHE(dev))
|
||||
return -EPERM;
|
||||
|
||||
if (offset % 4 != 0)
|
||||
|
|
|
@ -198,6 +198,11 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
|
|||
if (mode->clock > max_clock)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
|
||||
if (HAS_PCH_LPT(dev) &&
|
||||
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
|
@ -793,4 +798,12 @@ void intel_crt_init(struct drm_device *dev)
|
|||
crt->force_hotplug_required = 0;
|
||||
|
||||
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
|
||||
|
||||
/*
|
||||
* TODO: find a proper way to discover whether we need to set the
|
||||
* polarity reversal bit or not, instead of relying on the BIOS.
|
||||
*/
|
||||
if (HAS_PCH_LPT(dev))
|
||||
dev_priv->fdi_rx_polarity_reversed =
|
||||
!!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
|
||||
}
|
||||
|
|
|
@ -138,6 +138,19 @@ static const long hsw_ddi_buf_ctl_values[] = {
|
|||
DDI_BUF_EMP_800MV_3_5DB_HSW
|
||||
};
|
||||
|
||||
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
{
|
||||
uint32_t reg = DDI_BUF_CTL(port);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
udelay(1);
|
||||
if (I915_READ(reg) & DDI_BUF_IS_IDLE)
|
||||
return;
|
||||
}
|
||||
DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
|
||||
}
|
||||
|
||||
/* Starting with Haswell, different DDI ports can work in FDI mode for
|
||||
* connection to the PCH-located connectors. For this, it is necessary to train
|
||||
|
@ -167,6 +180,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
|
|||
/* Enable the PCH Receiver FDI PLL */
|
||||
rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
|
||||
((intel_crtc->fdi_lanes - 1) << 19);
|
||||
if (dev_priv->fdi_rx_polarity_reversed)
|
||||
rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
|
||||
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
|
||||
POSTING_READ(_FDI_RXA_CTL);
|
||||
udelay(220);
|
||||
|
@ -231,18 +246,30 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
|
|||
return;
|
||||
}
|
||||
|
||||
temp = I915_READ(DDI_BUF_CTL(PORT_E));
|
||||
temp &= ~DDI_BUF_CTL_ENABLE;
|
||||
I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
|
||||
POSTING_READ(DDI_BUF_CTL(PORT_E));
|
||||
|
||||
/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
|
||||
I915_WRITE(DP_TP_CTL(PORT_E),
|
||||
I915_READ(DP_TP_CTL(PORT_E)) & ~DP_TP_CTL_ENABLE);
|
||||
temp = I915_READ(DP_TP_CTL(PORT_E));
|
||||
temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
|
||||
temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
|
||||
I915_WRITE(DP_TP_CTL(PORT_E), temp);
|
||||
POSTING_READ(DP_TP_CTL(PORT_E));
|
||||
|
||||
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
|
||||
|
||||
rx_ctl_val &= ~FDI_RX_ENABLE;
|
||||
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
|
||||
POSTING_READ(_FDI_RXA_CTL);
|
||||
|
||||
/* Reset FDI_RX_MISC pwrdn lanes */
|
||||
temp = I915_READ(_FDI_RXA_MISC);
|
||||
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
|
||||
temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
|
||||
I915_WRITE(_FDI_RXA_MISC, temp);
|
||||
POSTING_READ(_FDI_RXA_MISC);
|
||||
}
|
||||
|
||||
DRM_ERROR("FDI link training failed!\n");
|
||||
|
@ -1222,20 +1249,6 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
|
|||
}
|
||||
}
|
||||
|
||||
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
{
|
||||
uint32_t reg = DDI_BUF_CTL(port);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
udelay(1);
|
||||
if (I915_READ(reg) & DDI_BUF_IS_IDLE)
|
||||
return;
|
||||
}
|
||||
DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
|
||||
}
|
||||
|
||||
static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
|
|
|
@ -1506,24 +1506,26 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
|
|||
|
||||
/* SBI access */
|
||||
static void
|
||||
intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
|
||||
intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
|
||||
enum intel_sbi_destination destination)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 tmp;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
|
||||
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
|
||||
100)) {
|
||||
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
|
||||
DRM_ERROR("timeout waiting for SBI to become ready\n");
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
I915_WRITE(SBI_ADDR,
|
||||
(reg << 16));
|
||||
I915_WRITE(SBI_DATA,
|
||||
value);
|
||||
I915_WRITE(SBI_CTL_STAT,
|
||||
SBI_BUSY |
|
||||
SBI_CTL_OP_CRWR);
|
||||
I915_WRITE(SBI_ADDR, (reg << 16));
|
||||
I915_WRITE(SBI_DATA, value);
|
||||
|
||||
if (destination == SBI_ICLK)
|
||||
tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
|
||||
else
|
||||
tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
|
||||
I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
|
||||
|
||||
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
|
||||
100)) {
|
||||
|
@ -1536,23 +1538,25 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
|
|||
}
|
||||
|
||||
static u32
|
||||
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
|
||||
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
|
||||
enum intel_sbi_destination destination)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 value = 0;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
|
||||
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
|
||||
100)) {
|
||||
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
|
||||
DRM_ERROR("timeout waiting for SBI to become ready\n");
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
I915_WRITE(SBI_ADDR,
|
||||
(reg << 16));
|
||||
I915_WRITE(SBI_CTL_STAT,
|
||||
SBI_BUSY |
|
||||
SBI_CTL_OP_CRRD);
|
||||
I915_WRITE(SBI_ADDR, (reg << 16));
|
||||
|
||||
if (destination == SBI_ICLK)
|
||||
value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
|
||||
else
|
||||
value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
|
||||
I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
|
||||
|
||||
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
|
||||
100)) {
|
||||
|
@ -2424,18 +2428,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
|
|||
FDI_FE_ERRC_ENABLE);
|
||||
}
|
||||
|
||||
static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 flags = I915_READ(SOUTH_CHICKEN1);
|
||||
|
||||
flags |= FDI_PHASE_SYNC_OVR(pipe);
|
||||
I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
|
||||
flags |= FDI_PHASE_SYNC_EN(pipe);
|
||||
I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
|
||||
POSTING_READ(SOUTH_CHICKEN1);
|
||||
}
|
||||
|
||||
static void ivb_modeset_global_resources(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -2610,8 +2602,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
|
|||
POSTING_READ(reg);
|
||||
udelay(150);
|
||||
|
||||
cpt_phase_pointer_enable(dev, pipe);
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
reg = FDI_TX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
|
@ -2744,8 +2734,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
|
|||
POSTING_READ(reg);
|
||||
udelay(150);
|
||||
|
||||
cpt_phase_pointer_enable(dev, pipe);
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
reg = FDI_TX_CTL(pipe);
|
||||
temp = I915_READ(reg);
|
||||
|
@ -2884,17 +2872,6 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
|
|||
udelay(100);
|
||||
}
|
||||
|
||||
static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 flags = I915_READ(SOUTH_CHICKEN1);
|
||||
|
||||
flags &= ~(FDI_PHASE_SYNC_EN(pipe));
|
||||
I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
|
||||
flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
|
||||
I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
|
||||
POSTING_READ(SOUTH_CHICKEN1);
|
||||
}
|
||||
static void ironlake_fdi_disable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
|
@ -2921,8 +2898,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
|
|||
/* Ironlake workaround, disable clock pointer after downing FDI */
|
||||
if (HAS_PCH_IBX(dev)) {
|
||||
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
|
||||
} else if (HAS_PCH_CPT(dev)) {
|
||||
cpt_phase_pointer_disable(dev, pipe);
|
||||
}
|
||||
|
||||
/* still set train pattern 1 */
|
||||
|
@ -3024,8 +2999,9 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
|
|||
|
||||
/* Disable SSCCTL */
|
||||
intel_sbi_write(dev_priv, SBI_SSCCTL6,
|
||||
intel_sbi_read(dev_priv, SBI_SSCCTL6) |
|
||||
SBI_SSCCTL_DISABLE);
|
||||
intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
|
||||
SBI_SSCCTL_DISABLE,
|
||||
SBI_ICLK);
|
||||
|
||||
/* 20MHz is a corner case which is out of range for the 7-bit divisor */
|
||||
if (crtc->mode.clock == 20000) {
|
||||
|
@ -3066,33 +3042,25 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
|
|||
phaseinc);
|
||||
|
||||
/* Program SSCDIVINTPHASE6 */
|
||||
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
|
||||
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
|
||||
temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
|
||||
temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
|
||||
temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
|
||||
temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
|
||||
temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
|
||||
temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
|
||||
|
||||
intel_sbi_write(dev_priv,
|
||||
SBI_SSCDIVINTPHASE6,
|
||||
temp);
|
||||
intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
|
||||
|
||||
/* Program SSCAUXDIV */
|
||||
temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
|
||||
temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
|
||||
temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
|
||||
temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
|
||||
intel_sbi_write(dev_priv,
|
||||
SBI_SSCAUXDIV6,
|
||||
temp);
|
||||
|
||||
intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
|
||||
|
||||
/* Enable modulator and associated divider */
|
||||
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
|
||||
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
|
||||
temp &= ~SBI_SSCCTL_DISABLE;
|
||||
intel_sbi_write(dev_priv,
|
||||
SBI_SSCCTL6,
|
||||
temp);
|
||||
intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
|
||||
|
||||
/* Wait for initialization time */
|
||||
udelay(24);
|
||||
|
@ -4878,10 +4846,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize reference clocks when the driver loads
|
||||
*/
|
||||
void ironlake_init_pch_refclk(struct drm_device *dev)
|
||||
static void ironlake_init_pch_refclk(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
|
@ -4995,6 +4960,182 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
|
||||
static void lpt_init_pch_refclk(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_encoder *encoder;
|
||||
bool has_vga = false;
|
||||
bool is_sdv = false;
|
||||
u32 tmp;
|
||||
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
|
||||
switch (encoder->type) {
|
||||
case INTEL_OUTPUT_ANALOG:
|
||||
has_vga = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!has_vga)
|
||||
return;
|
||||
|
||||
/* XXX: Rip out SDV support once Haswell ships for real. */
|
||||
if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
|
||||
is_sdv = true;
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
|
||||
tmp &= ~SBI_SSCCTL_DISABLE;
|
||||
tmp |= SBI_SSCCTL_PATHALT;
|
||||
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
|
||||
|
||||
udelay(24);
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
|
||||
tmp &= ~SBI_SSCCTL_PATHALT;
|
||||
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
|
||||
|
||||
if (!is_sdv) {
|
||||
tmp = I915_READ(SOUTH_CHICKEN2);
|
||||
tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
|
||||
I915_WRITE(SOUTH_CHICKEN2, tmp);
|
||||
|
||||
if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
|
||||
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
|
||||
DRM_ERROR("FDI mPHY reset assert timeout\n");
|
||||
|
||||
tmp = I915_READ(SOUTH_CHICKEN2);
|
||||
tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
|
||||
I915_WRITE(SOUTH_CHICKEN2, tmp);
|
||||
|
||||
if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
|
||||
FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
|
||||
100))
|
||||
DRM_ERROR("FDI mPHY reset de-assert timeout\n");
|
||||
}
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
|
||||
tmp &= ~(0xFF << 24);
|
||||
tmp |= (0x12 << 24);
|
||||
intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
|
||||
|
||||
if (!is_sdv) {
|
||||
tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
|
||||
tmp &= ~(0x3 << 6);
|
||||
tmp |= (1 << 6) | (1 << 0);
|
||||
intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
|
||||
}
|
||||
|
||||
if (is_sdv) {
|
||||
tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
|
||||
tmp |= 0x7FFF;
|
||||
intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
|
||||
}
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
|
||||
tmp |= (1 << 11);
|
||||
intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
|
||||
tmp |= (1 << 11);
|
||||
intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
|
||||
|
||||
if (is_sdv) {
|
||||
tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
|
||||
tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
|
||||
intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
|
||||
tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
|
||||
intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
|
||||
tmp |= (0x3F << 8);
|
||||
intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
|
||||
tmp |= (0x3F << 8);
|
||||
intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
|
||||
}
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
|
||||
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
|
||||
intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
|
||||
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
|
||||
intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
|
||||
|
||||
if (!is_sdv) {
|
||||
tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
|
||||
tmp &= ~(7 << 13);
|
||||
tmp |= (5 << 13);
|
||||
intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
|
||||
tmp &= ~(7 << 13);
|
||||
tmp |= (5 << 13);
|
||||
intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
|
||||
}
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
|
||||
tmp &= ~0xFF;
|
||||
tmp |= 0x1C;
|
||||
intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
|
||||
tmp &= ~0xFF;
|
||||
tmp |= 0x1C;
|
||||
intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
|
||||
tmp &= ~(0xFF << 16);
|
||||
tmp |= (0x1C << 16);
|
||||
intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
|
||||
tmp &= ~(0xFF << 16);
|
||||
tmp |= (0x1C << 16);
|
||||
intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
|
||||
|
||||
if (!is_sdv) {
|
||||
tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
|
||||
tmp |= (1 << 27);
|
||||
intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
|
||||
tmp |= (1 << 27);
|
||||
intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
|
||||
tmp &= ~(0xF << 28);
|
||||
tmp |= (4 << 28);
|
||||
intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
|
||||
|
||||
tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
|
||||
tmp &= ~(0xF << 28);
|
||||
tmp |= (4 << 28);
|
||||
intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
|
||||
}
|
||||
|
||||
/* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
|
||||
tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
|
||||
tmp |= SBI_DBUFF0_ENABLE;
|
||||
intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize reference clocks when the driver loads
|
||||
*/
|
||||
void intel_init_pch_refclk(struct drm_device *dev)
|
||||
{
|
||||
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
|
||||
ironlake_init_pch_refclk(dev);
|
||||
else if (HAS_PCH_LPT(dev))
|
||||
lpt_init_pch_refclk(dev);
|
||||
}
|
||||
|
||||
static int ironlake_get_refclk(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
|
@ -5239,6 +5380,17 @@ static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
|
|||
}
|
||||
}
|
||||
|
||||
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
|
||||
{
|
||||
/*
|
||||
* Account for spread spectrum to avoid
|
||||
* oversubscribing the link. Max center spread
|
||||
* is 2.5%; use 5% for safety's sake.
|
||||
*/
|
||||
u32 bps = target_clock * bpp * 21 / 20;
|
||||
return bps / (link_bw * 8) + 1;
|
||||
}
|
||||
|
||||
static void ironlake_set_m_n(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
|
@ -5292,15 +5444,9 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
|
|||
else
|
||||
target_clock = adjusted_mode->clock;
|
||||
|
||||
if (!lane) {
|
||||
/*
|
||||
* Account for spread spectrum to avoid
|
||||
* oversubscribing the link. Max center spread
|
||||
* is 2.5%; use 5% for safety's sake.
|
||||
*/
|
||||
u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
|
||||
lane = bps / (link_bw * 8) + 1;
|
||||
}
|
||||
if (!lane)
|
||||
lane = ironlake_get_lanes_required(target_clock, link_bw,
|
||||
intel_crtc->bpp);
|
||||
|
||||
intel_crtc->fdi_lanes = lane;
|
||||
|
||||
|
@ -6940,11 +7086,18 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
|
|||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
work = intel_crtc->unpin_work;
|
||||
if (work == NULL || !work->pending) {
|
||||
|
||||
/* Ensure we don't miss a work->pending update ... */
|
||||
smp_rmb();
|
||||
|
||||
if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/* and that the unpin work is consistent wrt ->pending. */
|
||||
smp_rmb();
|
||||
|
||||
intel_crtc->unpin_work = NULL;
|
||||
|
||||
if (work->event)
|
||||
|
@ -6988,16 +7141,25 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
|
|||
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
|
||||
unsigned long flags;
|
||||
|
||||
/* NB: An MMIO update of the plane base pointer will also
|
||||
* generate a page-flip completion irq, i.e. every modeset
|
||||
* is also accompanied by a spurious intel_prepare_page_flip().
|
||||
*/
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
if (intel_crtc->unpin_work) {
|
||||
if ((++intel_crtc->unpin_work->pending) > 1)
|
||||
DRM_ERROR("Prepared flip multiple times\n");
|
||||
} else {
|
||||
DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
|
||||
}
|
||||
if (intel_crtc->unpin_work)
|
||||
atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
|
||||
inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
|
||||
{
|
||||
/* Ensure that the work item is consistent when activating it ... */
|
||||
smp_wmb();
|
||||
atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
|
||||
/* and that it is marked active as soon as the irq could fire. */
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
static int intel_gen2_queue_flip(struct drm_device *dev,
|
||||
struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
|
@ -7031,6 +7193,8 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
|
|||
intel_ring_emit(ring, fb->pitches[0]);
|
||||
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
|
||||
intel_ring_emit(ring, 0); /* aux display base address, unused */
|
||||
|
||||
intel_mark_page_flip_active(intel_crtc);
|
||||
intel_ring_advance(ring);
|
||||
return 0;
|
||||
|
||||
|
@ -7071,6 +7235,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
|
|||
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
|
||||
intel_mark_page_flip_active(intel_crtc);
|
||||
intel_ring_advance(ring);
|
||||
return 0;
|
||||
|
||||
|
@ -7117,6 +7282,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
|
|||
pf = 0;
|
||||
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
|
||||
intel_ring_emit(ring, pf | pipesrc);
|
||||
|
||||
intel_mark_page_flip_active(intel_crtc);
|
||||
intel_ring_advance(ring);
|
||||
return 0;
|
||||
|
||||
|
@ -7159,6 +7326,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
|
|||
pf = 0;
|
||||
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
|
||||
intel_ring_emit(ring, pf | pipesrc);
|
||||
|
||||
intel_mark_page_flip_active(intel_crtc);
|
||||
intel_ring_advance(ring);
|
||||
return 0;
|
||||
|
||||
|
@ -7213,6 +7382,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
|
|||
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
|
||||
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
|
||||
intel_ring_emit(ring, (MI_NOOP));
|
||||
|
||||
intel_mark_page_flip_active(intel_crtc);
|
||||
intel_ring_advance(ring);
|
||||
return 0;
|
||||
|
||||
|
@ -8394,8 +8565,7 @@ static void intel_setup_outputs(struct drm_device *dev)
|
|||
intel_encoder_clones(encoder);
|
||||
}
|
||||
|
||||
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
|
||||
ironlake_init_pch_refclk(dev);
|
||||
intel_init_pch_refclk(dev);
|
||||
|
||||
drm_helper_move_panel_connectors_to_head(dev);
|
||||
}
|
||||
|
@ -8999,7 +9169,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
|
|||
|
||||
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
|
||||
* and i915 state tracking structures. */
|
||||
void intel_modeset_setup_hw_state(struct drm_device *dev)
|
||||
void intel_modeset_setup_hw_state(struct drm_device *dev,
|
||||
bool force_restore)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum pipe pipe;
|
||||
|
@ -9098,7 +9269,15 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
|
|||
intel_sanitize_crtc(crtc);
|
||||
}
|
||||
|
||||
intel_modeset_update_staged_output_state(dev);
|
||||
if (force_restore) {
|
||||
for_each_pipe(pipe) {
|
||||
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
|
||||
intel_set_mode(&crtc->base, &crtc->base.mode,
|
||||
crtc->base.x, crtc->base.y, crtc->base.fb);
|
||||
}
|
||||
} else {
|
||||
intel_modeset_update_staged_output_state(dev);
|
||||
}
|
||||
|
||||
intel_modeset_check_state(dev);
|
||||
|
||||
|
@ -9111,7 +9290,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
|
|||
|
||||
intel_setup_overlay(dev);
|
||||
|
||||
intel_modeset_setup_hw_state(dev);
|
||||
intel_modeset_setup_hw_state(dev, false);
|
||||
}
|
||||
|
||||
void intel_modeset_cleanup(struct drm_device *dev)
|
||||
|
|
|
@ -401,7 +401,10 @@ struct intel_unpin_work {
|
|||
struct drm_i915_gem_object *old_fb_obj;
|
||||
struct drm_i915_gem_object *pending_flip_obj;
|
||||
struct drm_pending_vblank_event *event;
|
||||
int pending;
|
||||
atomic_t pending;
|
||||
#define INTEL_FLIP_INACTIVE 0
|
||||
#define INTEL_FLIP_PENDING 1
|
||||
#define INTEL_FLIP_COMPLETE 2
|
||||
bool enable_stall_check;
|
||||
};
|
||||
|
||||
|
@ -556,6 +559,7 @@ intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
|
|||
enum pipe pipe);
|
||||
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
|
||||
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
|
||||
extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
|
||||
|
||||
struct intel_load_detect_pipe {
|
||||
struct drm_framebuffer *release_fb;
|
||||
|
|
|
@ -340,6 +340,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
|
|||
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
|
||||
|
||||
avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
|
||||
|
||||
intel_set_infoframe(encoder, &avi_if);
|
||||
}
|
||||
|
||||
|
|
|
@ -532,7 +532,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
|
|||
dev_priv->modeset_on_lid = 0;
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
intel_modeset_check_state(dev);
|
||||
intel_modeset_setup_hw_state(dev, true);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
return NOTIFY_OK;
|
||||
|
|
|
@ -130,8 +130,9 @@ static int is_backlight_combination_mode(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
|
||||
static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 val;
|
||||
|
||||
/* Restore the CTL value if it lost, e.g. GPU reset */
|
||||
|
@ -141,21 +142,22 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
|
|||
if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
|
||||
dev_priv->regfile.saveBLC_PWM_CTL2 = val;
|
||||
} else if (val == 0) {
|
||||
I915_WRITE(BLC_PWM_PCH_CTL2,
|
||||
dev_priv->regfile.saveBLC_PWM_CTL2);
|
||||
val = dev_priv->regfile.saveBLC_PWM_CTL2;
|
||||
I915_WRITE(BLC_PWM_PCH_CTL2, val);
|
||||
}
|
||||
} else {
|
||||
val = I915_READ(BLC_PWM_CTL);
|
||||
if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
|
||||
dev_priv->regfile.saveBLC_PWM_CTL = val;
|
||||
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
dev_priv->regfile.saveBLC_PWM_CTL2 =
|
||||
I915_READ(BLC_PWM_CTL2);
|
||||
} else if (val == 0) {
|
||||
I915_WRITE(BLC_PWM_CTL,
|
||||
dev_priv->regfile.saveBLC_PWM_CTL);
|
||||
I915_WRITE(BLC_PWM_CTL2,
|
||||
dev_priv->regfile.saveBLC_PWM_CTL2);
|
||||
val = dev_priv->regfile.saveBLC_PWM_CTL;
|
||||
I915_WRITE(BLC_PWM_CTL, val);
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
I915_WRITE(BLC_PWM_CTL2,
|
||||
dev_priv->regfile.saveBLC_PWM_CTL2);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -164,10 +166,9 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
|
|||
|
||||
static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 max;
|
||||
|
||||
max = i915_read_blc_pwm_ctl(dev_priv);
|
||||
max = i915_read_blc_pwm_ctl(dev);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
max >>= 16;
|
||||
|
|
|
@ -1325,10 +1325,11 @@ static void valleyview_update_wm(struct drm_device *dev)
|
|||
(planeb_wm << DSPFW_PLANEB_SHIFT) |
|
||||
planea_wm);
|
||||
I915_WRITE(DSPFW2,
|
||||
(I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
|
||||
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
|
||||
(cursora_wm << DSPFW_CURSORA_SHIFT));
|
||||
I915_WRITE(DSPFW3,
|
||||
(I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
|
||||
(I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
|
||||
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
|
||||
}
|
||||
|
||||
static void g4x_update_wm(struct drm_device *dev)
|
||||
|
@ -1374,11 +1375,11 @@ static void g4x_update_wm(struct drm_device *dev)
|
|||
(planeb_wm << DSPFW_PLANEB_SHIFT) |
|
||||
planea_wm);
|
||||
I915_WRITE(DSPFW2,
|
||||
(I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
|
||||
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
|
||||
(cursora_wm << DSPFW_CURSORA_SHIFT));
|
||||
/* HPLL off in SR has some issues on G4x... disable it */
|
||||
I915_WRITE(DSPFW3,
|
||||
(I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
|
||||
(I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
|
||||
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
|
||||
}
|
||||
|
||||
|
@ -2647,6 +2648,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||
bool was_interruptible;
|
||||
int ret;
|
||||
|
||||
/* rc6 disabled by default due to repeated reports of hanging during
|
||||
|
@ -2661,6 +2663,9 @@ static void ironlake_enable_rc6(struct drm_device *dev)
|
|||
if (ret)
|
||||
return;
|
||||
|
||||
was_interruptible = dev_priv->mm.interruptible;
|
||||
dev_priv->mm.interruptible = false;
|
||||
|
||||
/*
|
||||
* GPU can automatically power down the render unit if given a page
|
||||
* to save state.
|
||||
|
@ -2668,6 +2673,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
|
|||
ret = intel_ring_begin(ring, 6);
|
||||
if (ret) {
|
||||
ironlake_teardown_rc6(dev);
|
||||
dev_priv->mm.interruptible = was_interruptible;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2688,7 +2694,8 @@ static void ironlake_enable_rc6(struct drm_device *dev)
|
|||
* does an implicit flush, combined with MI_FLUSH above, it should be
|
||||
* safe to assume that renderctx is valid
|
||||
*/
|
||||
ret = intel_wait_ring_idle(ring);
|
||||
ret = intel_ring_idle(ring);
|
||||
dev_priv->mm.interruptible = was_interruptible;
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to enable ironlake power power savings\n");
|
||||
ironlake_teardown_rc6(dev);
|
||||
|
@ -3440,6 +3447,11 @@ static void cpt_init_clock_gating(struct drm_device *dev)
|
|||
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
|
||||
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
|
||||
DPLS_EDP_PPS_FIX_DIS);
|
||||
/* The below fixes the weird display corruption, a few pixels shifted
|
||||
* downward, on (only) LVDS of some HP laptops with IVY.
|
||||
*/
|
||||
for_each_pipe(pipe)
|
||||
I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
|
||||
/* WADP0ClockGatingDisable */
|
||||
for_each_pipe(pipe) {
|
||||
I915_WRITE(TRANS_CHICKEN1(pipe),
|
||||
|
|
|
@ -45,7 +45,7 @@ struct pipe_control {
|
|||
|
||||
static inline int ring_space(struct intel_ring_buffer *ring)
|
||||
{
|
||||
int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
|
||||
int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
|
||||
if (space < 0)
|
||||
space += ring->size;
|
||||
return space;
|
||||
|
@ -555,12 +555,11 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
|
|||
|
||||
static void
|
||||
update_mboxes(struct intel_ring_buffer *ring,
|
||||
u32 seqno,
|
||||
u32 mmio_offset)
|
||||
u32 mmio_offset)
|
||||
{
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit(ring, mmio_offset);
|
||||
intel_ring_emit(ring, seqno);
|
||||
intel_ring_emit(ring, ring->outstanding_lazy_request);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -573,8 +572,7 @@ update_mboxes(struct intel_ring_buffer *ring,
|
|||
* This acts like a signal in the canonical semaphore.
|
||||
*/
|
||||
static int
|
||||
gen6_add_request(struct intel_ring_buffer *ring,
|
||||
u32 *seqno)
|
||||
gen6_add_request(struct intel_ring_buffer *ring)
|
||||
{
|
||||
u32 mbox1_reg;
|
||||
u32 mbox2_reg;
|
||||
|
@ -587,13 +585,11 @@ gen6_add_request(struct intel_ring_buffer *ring,
|
|||
mbox1_reg = ring->signal_mbox[0];
|
||||
mbox2_reg = ring->signal_mbox[1];
|
||||
|
||||
*seqno = i915_gem_next_request_seqno(ring);
|
||||
|
||||
update_mboxes(ring, *seqno, mbox1_reg);
|
||||
update_mboxes(ring, *seqno, mbox2_reg);
|
||||
update_mboxes(ring, mbox1_reg);
|
||||
update_mboxes(ring, mbox2_reg);
|
||||
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
|
||||
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
|
||||
intel_ring_emit(ring, *seqno);
|
||||
intel_ring_emit(ring, ring->outstanding_lazy_request);
|
||||
intel_ring_emit(ring, MI_USER_INTERRUPT);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
|
@ -650,10 +646,8 @@ do { \
|
|||
} while (0)
|
||||
|
||||
static int
|
||||
pc_render_add_request(struct intel_ring_buffer *ring,
|
||||
u32 *result)
|
||||
pc_render_add_request(struct intel_ring_buffer *ring)
|
||||
{
|
||||
u32 seqno = i915_gem_next_request_seqno(ring);
|
||||
struct pipe_control *pc = ring->private;
|
||||
u32 scratch_addr = pc->gtt_offset + 128;
|
||||
int ret;
|
||||
|
@ -674,7 +668,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
|
|||
PIPE_CONTROL_WRITE_FLUSH |
|
||||
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
|
||||
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
|
||||
intel_ring_emit(ring, seqno);
|
||||
intel_ring_emit(ring, ring->outstanding_lazy_request);
|
||||
intel_ring_emit(ring, 0);
|
||||
PIPE_CONTROL_FLUSH(ring, scratch_addr);
|
||||
scratch_addr += 128; /* write to separate cachelines */
|
||||
|
@ -693,11 +687,10 @@ pc_render_add_request(struct intel_ring_buffer *ring,
|
|||
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
|
||||
PIPE_CONTROL_NOTIFY);
|
||||
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
|
||||
intel_ring_emit(ring, seqno);
|
||||
intel_ring_emit(ring, ring->outstanding_lazy_request);
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
*result = seqno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -885,25 +878,20 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
|
|||
}
|
||||
|
||||
static int
|
||||
i9xx_add_request(struct intel_ring_buffer *ring,
|
||||
u32 *result)
|
||||
i9xx_add_request(struct intel_ring_buffer *ring)
|
||||
{
|
||||
u32 seqno;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
seqno = i915_gem_next_request_seqno(ring);
|
||||
|
||||
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
|
||||
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
|
||||
intel_ring_emit(ring, seqno);
|
||||
intel_ring_emit(ring, ring->outstanding_lazy_request);
|
||||
intel_ring_emit(ring, MI_USER_INTERRUPT);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
*result = seqno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1110,6 +1098,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
|||
INIT_LIST_HEAD(&ring->active_list);
|
||||
INIT_LIST_HEAD(&ring->request_list);
|
||||
ring->size = 32 * PAGE_SIZE;
|
||||
memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
|
||||
|
||||
init_waitqueue_head(&ring->irq_queue);
|
||||
|
||||
|
@ -1186,7 +1175,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
|
|||
|
||||
/* Disable the ring buffer. The ring must be idle at this point */
|
||||
dev_priv = ring->dev->dev_private;
|
||||
ret = intel_wait_ring_idle(ring);
|
||||
ret = intel_ring_idle(ring);
|
||||
if (ret)
|
||||
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
|
||||
ring->name, ret);
|
||||
|
@ -1205,28 +1194,6 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
|
|||
cleanup_status_page(ring);
|
||||
}
|
||||
|
||||
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
|
||||
{
|
||||
uint32_t __iomem *virt;
|
||||
int rem = ring->size - ring->tail;
|
||||
|
||||
if (ring->space < rem) {
|
||||
int ret = intel_wait_ring_buffer(ring, rem);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
virt = ring->virtual_start + ring->tail;
|
||||
rem /= 4;
|
||||
while (rem--)
|
||||
iowrite32(MI_NOOP, virt++);
|
||||
|
||||
ring->tail = 0;
|
||||
ring->space = ring_space(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1260,7 +1227,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
|
|||
if (request->tail == -1)
|
||||
continue;
|
||||
|
||||
space = request->tail - (ring->tail + 8);
|
||||
space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
|
||||
if (space < 0)
|
||||
space += ring->size;
|
||||
if (space >= n) {
|
||||
|
@ -1295,7 +1262,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
|
||||
static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -1338,6 +1305,60 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
|
||||
{
|
||||
uint32_t __iomem *virt;
|
||||
int rem = ring->size - ring->tail;
|
||||
|
||||
if (ring->space < rem) {
|
||||
int ret = ring_wait_for_space(ring, rem);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
virt = ring->virtual_start + ring->tail;
|
||||
rem /= 4;
|
||||
while (rem--)
|
||||
iowrite32(MI_NOOP, virt++);
|
||||
|
||||
ring->tail = 0;
|
||||
ring->space = ring_space(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_ring_idle(struct intel_ring_buffer *ring)
|
||||
{
|
||||
u32 seqno;
|
||||
int ret;
|
||||
|
||||
/* We need to add any requests required to flush the objects and ring */
|
||||
if (ring->outstanding_lazy_request) {
|
||||
ret = i915_add_request(ring, NULL, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Wait upon the last request to be completed */
|
||||
if (list_empty(&ring->request_list))
|
||||
return 0;
|
||||
|
||||
seqno = list_entry(ring->request_list.prev,
|
||||
struct drm_i915_gem_request,
|
||||
list)->seqno;
|
||||
|
||||
return i915_wait_seqno(ring, seqno);
|
||||
}
|
||||
|
||||
static int
|
||||
intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
|
||||
{
|
||||
if (ring->outstanding_lazy_request)
|
||||
return 0;
|
||||
|
||||
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
|
||||
}
|
||||
|
||||
int intel_ring_begin(struct intel_ring_buffer *ring,
|
||||
int num_dwords)
|
||||
{
|
||||
|
@ -1349,6 +1370,11 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Preallocate the olr before touching the ring */
|
||||
ret = intel_ring_alloc_seqno(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (unlikely(ring->tail + n > ring->effective_size)) {
|
||||
ret = intel_wrap_ring_buffer(ring);
|
||||
if (unlikely(ret))
|
||||
|
@ -1356,7 +1382,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
|
|||
}
|
||||
|
||||
if (unlikely(ring->space < n)) {
|
||||
ret = intel_wait_ring_buffer(ring, n);
|
||||
ret = ring_wait_for_space(ring, n);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,17 @@
|
|||
#ifndef _INTEL_RINGBUFFER_H_
|
||||
#define _INTEL_RINGBUFFER_H_
|
||||
|
||||
/*
|
||||
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
|
||||
* Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
|
||||
* Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
|
||||
*
|
||||
* "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
|
||||
* cacheline, the Head Pointer must not be greater than the Tail
|
||||
* Pointer."
|
||||
*/
|
||||
#define I915_RING_FREE_SPACE 64
|
||||
|
||||
struct intel_hw_status_page {
|
||||
u32 *page_addr;
|
||||
unsigned int gfx_addr;
|
||||
|
@ -70,8 +81,7 @@ struct intel_ring_buffer {
|
|||
int __must_check (*flush)(struct intel_ring_buffer *ring,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains);
|
||||
int (*add_request)(struct intel_ring_buffer *ring,
|
||||
u32 *seqno);
|
||||
int (*add_request)(struct intel_ring_buffer *ring);
|
||||
/* Some chipsets are not quite as coherent as advertised and need
|
||||
* an expensive kick to force a true read of the up-to-date seqno.
|
||||
* However, the up-to-date seqno is not always required and the last
|
||||
|
@ -188,24 +198,16 @@ intel_read_status_page(struct intel_ring_buffer *ring,
|
|||
|
||||
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
|
||||
|
||||
int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
|
||||
static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
|
||||
{
|
||||
return intel_wait_ring_buffer(ring, ring->size - 8);
|
||||
}
|
||||
|
||||
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
|
||||
|
||||
static inline void intel_ring_emit(struct intel_ring_buffer *ring,
|
||||
u32 data)
|
||||
{
|
||||
iowrite32(data, ring->virtual_start + ring->tail);
|
||||
ring->tail += 4;
|
||||
}
|
||||
|
||||
void intel_ring_advance(struct intel_ring_buffer *ring);
|
||||
int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
|
||||
|
||||
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
|
||||
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
|
||||
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
|
||||
|
||||
|
@ -221,6 +223,12 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
|
|||
return ring->tail;
|
||||
}
|
||||
|
||||
static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
|
||||
{
|
||||
BUG_ON(ring->outstanding_lazy_request == 0);
|
||||
return ring->outstanding_lazy_request;
|
||||
}
|
||||
|
||||
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
|
||||
{
|
||||
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
|
||||
|
|
|
@ -509,7 +509,7 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
|
|||
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
|
||||
void *response, int response_len)
|
||||
{
|
||||
u8 retry = 5;
|
||||
u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
|
||||
u8 status;
|
||||
int i;
|
||||
|
||||
|
@ -522,14 +522,27 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
|
|||
* command to be complete.
|
||||
*
|
||||
* Check 5 times in case the hardware failed to read the docs.
|
||||
*
|
||||
* Also beware that the first response by many devices is to
|
||||
* reply PENDING and stall for time. TVs are notorious for
|
||||
* requiring longer than specified to complete their replies.
|
||||
* Originally (in the DDX long ago), the delay was only ever 15ms
|
||||
* with an additional delay of 30ms applied for TVs added later after
|
||||
* many experiments. To accommodate both sets of delays, we do a
|
||||
* sequence of slow checks if the device is falling behind and fails
|
||||
* to reply within 5*15µs.
|
||||
*/
|
||||
if (!intel_sdvo_read_byte(intel_sdvo,
|
||||
SDVO_I2C_CMD_STATUS,
|
||||
&status))
|
||||
goto log_fail;
|
||||
|
||||
while (status == SDVO_CMD_STATUS_PENDING && retry--) {
|
||||
udelay(15);
|
||||
while (status == SDVO_CMD_STATUS_PENDING && --retry) {
|
||||
if (retry < 10)
|
||||
msleep(15);
|
||||
else
|
||||
udelay(15);
|
||||
|
||||
if (!intel_sdvo_read_byte(intel_sdvo,
|
||||
SDVO_I2C_CMD_STATUS,
|
||||
&status))
|
||||
|
@ -1535,15 +1548,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
|
|||
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
|
||||
enum drm_connector_status ret;
|
||||
|
||||
if (!intel_sdvo_write_cmd(intel_sdvo,
|
||||
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
|
||||
return connector_status_unknown;
|
||||
|
||||
/* add 30ms delay when the output type might be TV */
|
||||
if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
|
||||
msleep(30);
|
||||
|
||||
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
|
||||
if (!intel_sdvo_get_value(intel_sdvo,
|
||||
SDVO_CMD_GET_ATTACHED_DISPLAYS,
|
||||
&response, 2))
|
||||
return connector_status_unknown;
|
||||
|
||||
DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
|
||||
|
|
|
@ -1047,6 +1047,7 @@ extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
|
|||
int GTF_2C, int GTF_K, int GTF_2J);
|
||||
extern int drm_add_modes_noedid(struct drm_connector *connector,
|
||||
int hdisplay, int vdisplay);
|
||||
extern uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode);
|
||||
|
||||
extern int drm_edid_header_is_valid(const u8 *raw_edid);
|
||||
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
|
||||
|
|
Loading…
Reference in a new issue