Merge tag 'drm-intel-next-2014-11-21-fixed' of git://anongit.freedesktop.org/drm-intel into drm-next

drm-intel-next-2014-11-21:
- infoframe tracking (for fastboot) from Jesse
- start of the dri1/ums support removal
- vlv forcewake timeout fixes (Imre)
- bunch of patches to polish the rps code (Imre) and improve it on bdw (Tom
  O'Rourke)
- on-demand pinning for execlist contexts
- vlv/chv backlight improvements (Ville)
- gen8+ render ctx w/a work from various people
- skl edp programming (Satheeshakrishna et al.)
- psr docbook (Rodrigo)
- piles of little fixes and improvements all over, as usual

* tag 'drm-intel-next-2014-11-21-fixed' of git://anongit.freedesktop.org/drm-intel: (117 commits)
  drm/i915: Don't pin LRC in GGTT when dumping in debugfs
  drm/i915: Update DRIVER_DATE to 20141121
  drm/i915/g4x: fix g4x infoframe readout
  drm/i915: Only call mod_timer() if not already pending
  drm/i915: Don't rely upon encoder->type for infoframe hw state readout
  drm/i915: remove the IRQs enabled WARN from intel_disable_gt_powersave
  drm/i915: Use ggtt error obj capture helper for gen8 semaphores
  drm/i915: vlv: increase timeout when setting idle GPU freq
  drm/i915: vlv: fix cdclk setting during modeset while suspended
  drm/i915: Dump hdmi pipe_config state
  drm/i915: Gen9 shadowed registers
  drm/i915/skl: Gen9 multi-engine forcewake
  drm/i915: Read power well status before other registers for drpc info
  drm/i915: Pin tiled objects for L-shaped configs
  drm/i915: Update ring freq for full gpu freq range
  drm/i915: change initial rps frequency for gen8
  drm/i915: Keep min freq above floor on HSW/BDW
  drm/i915: Use efficient frequency for HSW/BDW
  drm/i915: Can i915_gem_init_ioctl
  drm/i915: Sanitize ->lastclose
  ...
This commit is contained in:
Dave Airlie 2014-12-03 08:25:59 +10:00
commit 26045b53c9
40 changed files with 3020 additions and 2478 deletions

View file

@ -3919,6 +3919,11 @@ int num_ioctls;</synopsis>
<title>High Definition Audio</title>
!Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
!Idrivers/gpu/drm/i915/intel_audio.c
</sect2>
<sect2>
<title>Panel Self Refresh PSR (PSR/SRD)</title>
!Pdrivers/gpu/drm/i915/intel_psr.c Panel Self Refresh (PSR/SRD)
!Idrivers/gpu/drm/i915/intel_psr.c
</sect2>
<sect2>
<title>DPIO</title>
@ -4029,6 +4034,27 @@ int num_ioctls;</synopsis>
!Idrivers/gpu/drm/i915/intel_lrc.c
</sect2>
</sect1>
<sect1>
<title> Tracing </title>
<para>
This sections covers all things related to the tracepoints implemented in
the i915 driver.
</para>
<sect2>
<title> i915_ppgtt_create and i915_ppgtt_release </title>
!Pdrivers/gpu/drm/i915/i915_trace.h i915_ppgtt_create and i915_ppgtt_release tracepoints
</sect2>
<sect2>
<title> i915_context_create and i915_context_free </title>
!Pdrivers/gpu/drm/i915/i915_trace.h i915_context_create and i915_context_free tracepoints
</sect2>
<sect2>
<title> switch_mm </title>
!Pdrivers/gpu/drm/i915/i915_trace.h switch_mm tracepoint
</sect2>
</sect1>
</chapter>
!Cdrivers/gpu/drm/i915/i915_irq.c
</part>

View file

@ -3145,9 +3145,12 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
}
}
eld[5] |= sad_count << 4;
eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
eld[DRM_ELD_BASELINE_ELD_LEN] =
DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
DRM_DEBUG_KMS("ELD size %d, SAD count %d\n",
drm_eld_size(eld), sad_count);
}
EXPORT_SYMBOL(drm_edid_to_eld);

View file

@ -51,6 +51,7 @@ i915-y += intel_audio.o \
intel_frontbuffer.o \
intel_modes.o \
intel_overlay.o \
intel_psr.o \
intel_sideband.o \
intel_sprite.o
i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o

View file

@ -413,6 +413,8 @@ static const u32 gen7_render_regs[] = {
REG64(PS_INVOCATION_COUNT),
REG64(PS_DEPTH_COUNT),
OACONTROL, /* Only allowed for LRI and SRM. See below. */
REG64(MI_PREDICATE_SRC0),
REG64(MI_PREDICATE_SRC1),
GEN7_3DPRIM_END_OFFSET,
GEN7_3DPRIM_START_VERTEX,
GEN7_3DPRIM_VERTEX_COUNT,
@ -1072,6 +1074,8 @@ int i915_cmd_parser_get_version(void)
*
* 1. Initial version. Checks batches and reports violations, but leaves
* hardware parsing enabled (so does not allow new use cases).
* 2. Allow access to the MI_PREDICATE_SRC0 and
* MI_PREDICATE_SRC1 registers.
*/
return 1;
return 2;
}

View file

@ -1240,11 +1240,12 @@ static int vlv_drpc_info(struct seq_file *m)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rpmodectl1, rcctl1;
u32 rpmodectl1, rcctl1, pw_status;
unsigned fw_rendercount = 0, fw_mediacount = 0;
intel_runtime_pm_get(dev_priv);
pw_status = I915_READ(VLV_GTLC_PW_STATUS);
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
@ -1263,11 +1264,9 @@ static int vlv_drpc_info(struct seq_file *m)
yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
GEN6_RC_CTL_EI_MODE(1))));
seq_printf(m, "Render Power Well: %s\n",
(I915_READ(VLV_GTLC_PW_STATUS) &
VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
(pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
seq_printf(m, "Media Power Well: %s\n",
(I915_READ(VLV_GTLC_PW_STATUS) &
VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
(pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
seq_printf(m, "Render RC6 residency since boot: %u\n",
I915_READ(VLV_GT_RENDER_RC6));
@ -1773,6 +1772,50 @@ static int i915_context_status(struct seq_file *m, void *unused)
return 0;
}
static void i915_dump_lrc_obj(struct seq_file *m,
struct intel_engine_cs *ring,
struct drm_i915_gem_object *ctx_obj)
{
struct page *page;
uint32_t *reg_state;
int j;
unsigned long ggtt_offset = 0;
if (ctx_obj == NULL) {
seq_printf(m, "Context on %s with no gem object\n",
ring->name);
return;
}
seq_printf(m, "CONTEXT: %s %u\n", ring->name,
intel_execlists_ctx_id(ctx_obj));
if (!i915_gem_obj_ggtt_bound(ctx_obj))
seq_puts(m, "\tNot bound in GGTT\n");
else
ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
if (i915_gem_object_get_pages(ctx_obj)) {
seq_puts(m, "\tFailed to get pages for context object\n");
return;
}
page = i915_gem_object_get_page(ctx_obj, 1);
if (!WARN_ON(page == NULL)) {
reg_state = kmap_atomic(page);
for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
ggtt_offset + 4096 + (j * 4),
reg_state[j], reg_state[j + 1],
reg_state[j + 2], reg_state[j + 3]);
}
kunmap_atomic(reg_state);
}
seq_putc(m, '\n');
}
static int i915_dump_lrc(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@ -1793,29 +1836,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
list_for_each_entry(ctx, &dev_priv->context_list, link) {
for_each_ring(ring, dev_priv, i) {
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
if (ring->default_context == ctx)
continue;
if (ctx_obj) {
struct page *page = i915_gem_object_get_page(ctx_obj, 1);
uint32_t *reg_state = kmap_atomic(page);
int j;
seq_printf(m, "CONTEXT: %s %u\n", ring->name,
intel_execlists_ctx_id(ctx_obj));
for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4),
reg_state[j], reg_state[j + 1],
reg_state[j + 2], reg_state[j + 3]);
}
kunmap_atomic(reg_state);
seq_putc(m, '\n');
}
if (ring->default_context != ctx)
i915_dump_lrc_obj(m, ring,
ctx->engine[i].state);
}
}
@ -1975,6 +1998,8 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
if (IS_GEN3(dev) || IS_GEN4(dev)) {
seq_printf(m, "DDC = 0x%08x\n",
I915_READ(DCC));
seq_printf(m, "DDC2 = 0x%08x\n",
I915_READ(DCC2));
seq_printf(m, "C0DRB3 = 0x%04x\n",
I915_READ16(C0DRB3));
seq_printf(m, "C1DRB3 = 0x%04x\n",
@ -1997,6 +2022,10 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
I915_READ(DISP_ARB_CTL));
}
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
seq_puts(m, "L-shaped memory detected\n");
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);

File diff suppressed because it is too large Load diff

View file

@ -584,6 +584,8 @@ static int i915_drm_suspend(struct drm_device *dev)
return error;
}
intel_suspend_gt_powersave(dev);
/*
* Disable CRTCs directly since we want to preserve sw state
* for _thaw. Also, power gate the CRTC power wells.
@ -595,15 +597,11 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_dp_mst_suspend(dev);
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
intel_runtime_pm_disable_interrupts(dev_priv);
intel_hpd_cancel_work(dev_priv);
intel_suspend_encoders(dev_priv);
intel_suspend_gt_powersave(dev);
intel_suspend_hw(dev);
}
@ -703,12 +701,10 @@ static int i915_drm_resume(struct drm_device *dev)
intel_modeset_init_hw(dev);
{
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
}
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
intel_dp_mst_resume(dev);
drm_modeset_lock_all(dev);
@ -856,10 +852,7 @@ int i915_reset(struct drm_device *dev)
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->ums.mm_suspended) {
dev_priv->ums.mm_suspended = 0;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
dev_priv->gpu_error.reload_in_reset = true;
@ -1395,9 +1388,8 @@ static int intel_runtime_suspend(struct device *device)
i915_gem_release_all_mmaps(dev_priv);
mutex_unlock(&dev->struct_mutex);
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
intel_runtime_pm_disable_interrupts(dev_priv);
intel_suspend_gt_powersave(dev);
intel_runtime_pm_disable_interrupts(dev_priv);
ret = intel_suspend_complete(dev_priv);
if (ret) {
@ -1578,8 +1570,6 @@ static struct drm_driver driver = {
.resume = i915_resume_legacy,
.device_is_agp = i915_driver_device_is_agp,
.master_create = i915_master_create,
.master_destroy = i915_master_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,

View file

@ -55,7 +55,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20141107"
#define DRIVER_DATE "20141121"
#undef WARN_ON
#define WARN_ON(x) WARN(x, "WARN_ON(" #x ")")
@ -213,10 +213,15 @@ enum intel_dpll_id {
/* real shared dpll ids must be >= 0 */
DPLL_ID_PCH_PLL_A = 0,
DPLL_ID_PCH_PLL_B = 1,
/* hsw/bdw */
DPLL_ID_WRPLL1 = 0,
DPLL_ID_WRPLL2 = 1,
/* skl */
DPLL_ID_SKL_DPLL1 = 0,
DPLL_ID_SKL_DPLL2 = 1,
DPLL_ID_SKL_DPLL3 = 2,
};
#define I915_NUM_PLLS 2
#define I915_NUM_PLLS 3
struct intel_dpll_hw_state {
/* i9xx, pch plls */
@ -227,6 +232,17 @@ struct intel_dpll_hw_state {
/* hsw, bdw */
uint32_t wrpll;
/* skl */
/*
* DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
* lower part of crtl1 and they get shifted into position when writing
* the register. This allows us to easily compare the state to share
* the DPLL.
*/
uint32_t ctrl1;
/* HDMI only, 0 when used for DP */
uint32_t cfgcr1, cfgcr2;
};
struct intel_shared_dpll_config {
@ -256,6 +272,11 @@ struct intel_shared_dpll {
struct intel_dpll_hw_state *hw_state);
};
#define SKL_DPLL0 0
#define SKL_DPLL1 1
#define SKL_DPLL2 2
#define SKL_DPLL3 3
/* Used by dp and fdi links */
struct intel_link_m_n {
uint32_t tu;
@ -306,12 +327,6 @@ struct intel_opregion {
struct intel_overlay;
struct intel_overlay_error_state;
struct drm_local_map;
struct drm_i915_master_private {
struct drm_local_map *sarea;
struct _drm_i915_sarea *sarea_priv;
};
#define I915_FENCE_REG_NONE -1
#define I915_MAX_NUM_FENCES 32
/* 32 fences + sign bit for FENCE_REG_NONE */
@ -510,7 +525,7 @@ struct drm_i915_display_funcs {
/* display clock increase/decrease */
/* pll clock increase/decrease */
int (*setup_backlight)(struct intel_connector *connector);
int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe);
uint32_t (*get_backlight)(struct intel_connector *connector);
void (*set_backlight)(struct intel_connector *connector,
uint32_t level);
@ -664,6 +679,7 @@ struct intel_context {
struct {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
int unpin_count;
} engine[I915_NUM_RINGS];
struct list_head link;
@ -748,6 +764,7 @@ enum intel_sbi_destination {
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
#define QUIRK_PIPEB_FORCE (1<<4)
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
struct intel_fbdev;
struct intel_fbc_work;
@ -799,7 +816,6 @@ struct i915_suspend_saved_registers {
u32 saveBLC_HIST_CTL;
u32 saveBLC_PWM_CTL;
u32 saveBLC_PWM_CTL2;
u32 saveBLC_HIST_CTL_B;
u32 saveBLC_CPU_PWM_CTL;
u32 saveBLC_CPU_PWM_CTL2;
u32 saveFPB0;
@ -978,8 +994,12 @@ struct intel_rps_ei {
};
struct intel_gen6_power_mgmt {
/* work and pm_iir are protected by dev_priv->irq_lock */
/*
* work, interrupts_enabled and pm_iir are protected by
* dev_priv->irq_lock
*/
struct work_struct work;
bool interrupts_enabled;
u32 pm_iir;
/* Frequencies are stored in potentially platform dependent multiples.
@ -1102,31 +1122,6 @@ struct i915_power_domains {
struct i915_power_well *power_wells;
};
struct i915_dri1_state {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
unsigned int cpp;
int back_offset;
int front_offset;
int current_page;
int page_flipping;
uint32_t counter;
};
struct i915_ums_state {
/**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
* This is set between LeaveVT and EnterVT. It needs to be
* replaced with a semaphore. It also needs to be
* transitioned away from for kernel modesetting.
*/
int mm_suspended;
};
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info[MAX_L3_SLICES];
@ -1762,12 +1757,6 @@ struct drm_i915_private {
uint32_t bios_vgacntr;
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
/* Old ums support infrastructure, same warning applies. */
struct i915_ums_state ums;
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
@ -1957,10 +1946,10 @@ struct drm_i915_gem_object {
unsigned long user_pin_count;
struct drm_file *pin_filp;
/** for phy allocated objects */
struct drm_dma_handle *phys_handle;
union {
/** for phy allocated objects */
struct drm_dma_handle *phys_handle;
struct i915_gem_userptr {
uintptr_t ptr;
unsigned read_only :1;
@ -2326,8 +2315,6 @@ struct i915_params {
extern struct i915_params i915 __read_mostly;
/* i915_dma.c */
void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
@ -2341,9 +2328,6 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#endif
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *box,
int DR1, int DR4);
extern int intel_gpu_reset(struct drm_device *dev);
extern int i915_reset(struct drm_device *dev);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@ -2395,8 +2379,6 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
ibx_display_interrupt_update((dev_priv), (bits), 0)
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
@ -2443,10 +2425,6 @@ int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_tiling(struct drm_device *dev, void *data,
@ -2489,7 +2467,6 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
int *needs_clflush);
@ -2956,8 +2933,8 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
/* intel_sideband.c */
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);

View file

@ -159,33 +159,6 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
return i915_gem_obj_bound_any(obj) && !obj->active;
}
int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_init *args = data;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
if (args->gtt_start >= args->gtt_end ||
(args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
return -EINVAL;
/* GEM with user mode setting was never supported on ilk and later. */
if (INTEL_INFO(dev)->gen >= 5)
return -ENODEV;
mutex_lock(&dev->struct_mutex);
i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
args->gtt_end);
dev_priv->gtt.mappable_end = args->gtt_end;
mutex_unlock(&dev->struct_mutex);
return 0;
}
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@ -208,40 +181,137 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
static int
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
drm_dma_handle_t *phys = obj->phys_handle;
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
char *vaddr = obj->phys_handle->vaddr;
struct sg_table *st;
struct scatterlist *sg;
int i;
if (!phys)
return;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return -EINVAL;
if (obj->madv == I915_MADV_WILLNEED) {
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
char *src;
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
return PTR_ERR(page);
src = kmap_atomic(page);
memcpy(vaddr, src, PAGE_SIZE);
drm_clflush_virt_range(vaddr, PAGE_SIZE);
kunmap_atomic(src);
page_cache_release(page);
vaddr += PAGE_SIZE;
}
i915_gem_chipset_flush(obj->base.dev);
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
return -ENOMEM;
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
return -ENOMEM;
}
sg = st->sgl;
sg->offset = 0;
sg->length = obj->base.size;
sg_dma_address(sg) = obj->phys_handle->busaddr;
sg_dma_len(sg) = obj->base.size;
obj->pages = st;
obj->has_dma_mapping = true;
return 0;
}
static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
{
int ret;
BUG_ON(obj->madv == __I915_MADV_PURGED);
ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret) {
/* In the event of a disaster, abandon all caches and
* hope for the best.
*/
WARN_ON(ret != -EIO);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
if (obj->dirty) {
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
char *vaddr = phys->vaddr;
char *vaddr = obj->phys_handle->vaddr;
int i;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page = shmem_read_mapping_page(mapping, i);
if (!IS_ERR(page)) {
char *dst = kmap_atomic(page);
memcpy(dst, vaddr, PAGE_SIZE);
drm_clflush_virt_range(dst, PAGE_SIZE);
kunmap_atomic(dst);
struct page *page;
char *dst;
set_page_dirty(page);
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
continue;
dst = kmap_atomic(page);
drm_clflush_virt_range(vaddr, PAGE_SIZE);
memcpy(dst, vaddr, PAGE_SIZE);
kunmap_atomic(dst);
set_page_dirty(page);
if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
page_cache_release(page);
}
page_cache_release(page);
vaddr += PAGE_SIZE;
}
i915_gem_chipset_flush(obj->base.dev);
obj->dirty = 0;
}
#ifdef CONFIG_X86
set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
#endif
drm_pci_free(obj->base.dev, phys);
obj->phys_handle = NULL;
sg_free_table(obj->pages);
kfree(obj->pages);
obj->has_dma_mapping = false;
}
static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
drm_pci_free(obj->base.dev, obj->phys_handle);
}
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.get_pages = i915_gem_object_get_pages_phys,
.put_pages = i915_gem_object_put_pages_phys,
.release = i915_gem_object_release_phys,
};
static int
drop_pages(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma, *next;
int ret;
drm_gem_object_reference(&obj->base);
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
if (i915_vma_unbind(vma))
break;
ret = i915_gem_object_put_pages(obj);
drm_gem_object_unreference(&obj->base);
return ret;
}
int
@ -249,9 +319,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align)
{
drm_dma_handle_t *phys;
struct address_space *mapping;
char *vaddr;
int i;
int ret;
if (obj->phys_handle) {
if ((unsigned long)obj->phys_handle->vaddr & (align -1))
@ -266,41 +334,19 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (obj->base.filp == NULL)
return -EINVAL;
ret = drop_pages(obj);
if (ret)
return ret;
/* create a new object */
phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
if (!phys)
return -ENOMEM;
vaddr = phys->vaddr;
#ifdef CONFIG_X86
set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
#endif
mapping = file_inode(obj->base.filp)->i_mapping;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
char *src;
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) {
#ifdef CONFIG_X86
set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
#endif
drm_pci_free(obj->base.dev, phys);
return PTR_ERR(page);
}
src = kmap_atomic(page);
memcpy(vaddr, src, PAGE_SIZE);
kunmap_atomic(src);
mark_page_accessed(page);
page_cache_release(page);
vaddr += PAGE_SIZE;
}
obj->phys_handle = phys;
return 0;
obj->ops = &i915_gem_phys_ops;
return i915_gem_object_get_pages(obj);
}
static int
@ -311,6 +357,14 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = to_user_ptr(args->data_ptr);
int ret;
/* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
*/
ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
@ -326,6 +380,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
return -EFAULT;
}
drm_clflush_virt_range(vaddr, args->size);
i915_gem_chipset_flush(dev);
return 0;
}
@ -1048,11 +1103,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
if (obj->phys_handle) {
ret = i915_gem_phys_pwrite(obj, args, file);
goto out;
}
if (obj->tiling_mode == I915_TILING_NONE &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
cpu_write_needs_clflush(obj)) {
@ -1062,8 +1112,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* textures). Fallback to the shmem path in that case. */
}
if (ret == -EFAULT || ret == -ENOSPC)
ret = i915_gem_shmem_pwrite(dev, obj, args, file);
if (ret == -EFAULT || ret == -ENOSPC) {
if (obj->phys_handle)
ret = i915_gem_phys_pwrite(obj, args, file);
else
ret = i915_gem_shmem_pwrite(dev, obj, args, file);
}
out:
drm_gem_object_unreference(&obj->base);
@ -2140,6 +2194,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
if (obj->tiling_mode != I915_TILING_NONE &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
i915_gem_object_pin_pages(obj);
return 0;
err_pages:
@ -2438,15 +2496,13 @@ int __i915_add_request(struct intel_engine_cs *ring,
ring->outstanding_lazy_seqno = 0;
ring->preallocated_lazy_request = NULL;
if (!dev_priv->ums.mm_suspended) {
i915_queue_hangcheck(ring->dev);
i915_queue_hangcheck(ring->dev);
cancel_delayed_work_sync(&dev_priv->mm.idle_work);
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
}
cancel_delayed_work_sync(&dev_priv->mm.idle_work);
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
if (out_seqno)
*out_seqno = request->seqno;
@ -2513,12 +2569,18 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv,
static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
struct intel_context *ctx = request->ctx;
list_del(&request->list);
i915_gem_request_remove_from_client(request);
if (request->ctx)
i915_gem_context_unreference(request->ctx);
if (i915.enable_execlists && ctx) {
struct intel_engine_cs *ring = request->ring;
if (ctx != ring->default_context)
intel_lr_context_unpin(ring, ctx);
i915_gem_context_unreference(ctx);
}
kfree(request);
}
@ -2572,6 +2634,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_object_move_to_inactive(obj);
}
/*
* Clear the execlists queue up before freeing the requests, as those
* are the ones that keep the context and ringbuffer backing objects
* pinned in place.
*/
while (!list_empty(&ring->execlist_queue)) {
struct intel_ctx_submit_request *submit_req;
submit_req = list_first_entry(&ring->execlist_queue,
struct intel_ctx_submit_request,
execlist_link);
list_del(&submit_req->execlist_link);
intel_runtime_pm_put(dev_priv);
i915_gem_context_unreference(submit_req->ctx);
kfree(submit_req);
}
/*
* We must free the requests after all the corresponding objects have
* been moved off active lists. Which is the same order as the normal
@ -2589,18 +2668,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_free_request(request);
}
while (!list_empty(&ring->execlist_queue)) {
struct intel_ctx_submit_request *submit_req;
submit_req = list_first_entry(&ring->execlist_queue,
struct intel_ctx_submit_request,
execlist_link);
list_del(&submit_req->execlist_link);
intel_runtime_pm_put(dev_priv);
i915_gem_context_unreference(submit_req->ctx);
kfree(submit_req);
}
/* These may not have been flush before the reset, do so now */
kfree(ring->preallocated_lazy_request);
ring->preallocated_lazy_request = NULL;
@ -2737,6 +2804,15 @@ i915_gem_retire_requests(struct drm_device *dev)
for_each_ring(ring, dev_priv, i) {
i915_gem_retire_requests_ring(ring);
idle &= list_empty(&ring->request_list);
if (i915.enable_execlists) {
unsigned long flags;
spin_lock_irqsave(&ring->execlist_lock, flags);
idle &= list_empty(&ring->execlist_queue);
spin_unlock_irqrestore(&ring->execlist_lock, flags);
intel_execlists_retire_requests(ring);
}
}
if (idle)
@ -3527,7 +3603,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
*/
if (obj->stolen)
if (obj->stolen || obj->phys_handle)
return false;
/* If the GPU is snooping the contents of the CPU cache,
@ -4320,6 +4396,7 @@ int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
int ret;
@ -4347,6 +4424,15 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
goto out;
}
if (obj->pages &&
obj->tiling_mode != I915_TILING_NONE &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->madv == I915_MADV_WILLNEED)
i915_gem_object_unpin_pages(obj);
if (args->madv == I915_MADV_WILLNEED)
i915_gem_object_pin_pages(obj);
}
if (obj->madv != __I915_MADV_PURGED)
obj->madv = args->madv;
@ -4489,8 +4575,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
}
}
i915_gem_object_detach_phys(obj);
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
* before progressing. */
if (obj->stolen)
@ -4498,6 +4582,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
WARN_ON(obj->frontbuffer_bits);
if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
obj->tiling_mode != I915_TILING_NONE)
i915_gem_object_unpin_pages(obj);
if (WARN_ON(obj->pages_pin_count))
obj->pages_pin_count = 0;
if (discard_backing_storage(obj))
@ -4570,9 +4659,6 @@ i915_gem_suspend(struct drm_device *dev)
int ret = 0;
mutex_lock(&dev->struct_mutex);
if (dev_priv->ums.mm_suspended)
goto err;
ret = i915_gpu_idle(dev);
if (ret)
goto err;
@ -4583,15 +4669,7 @@ i915_gem_suspend(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
i915_kernel_lost_context(dev);
i915_gem_stop_ringbuffers(dev);
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
* And not confound ums.mm_suspended!
*/
dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
DRIVER_MODESET);
mutex_unlock(&dev->struct_mutex);
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
@ -4882,9 +4960,6 @@ int i915_gem_init(struct drm_device *dev)
}
mutex_unlock(&dev->struct_mutex);
/* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
dev_priv->dri1.allow_batchbuffer = 1;
return ret;
}
@ -4899,74 +4974,6 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
dev_priv->gt.cleanup_ring(ring);
}
int
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
if (i915_reset_in_progress(&dev_priv->gpu_error)) {
DRM_ERROR("Reenabling wedged hardware, good luck\n");
atomic_set(&dev_priv->gpu_error.reset_counter, 0);
}
mutex_lock(&dev->struct_mutex);
dev_priv->ums.mm_suspended = 0;
ret = i915_gem_init_hw(dev);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
ret = drm_irq_install(dev, dev->pdev->irq);
if (ret)
goto cleanup_ringbuffer;
mutex_unlock(&dev->struct_mutex);
return 0;
cleanup_ringbuffer:
i915_gem_cleanup_ringbuffer(dev);
dev_priv->ums.mm_suspended = 1;
mutex_unlock(&dev->struct_mutex);
return ret;
}
int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
mutex_lock(&dev->struct_mutex);
drm_irq_uninstall(dev);
mutex_unlock(&dev->struct_mutex);
return i915_gem_suspend(dev);
}
void
i915_gem_lastclose(struct drm_device *dev)
{
int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
ret = i915_gem_suspend(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
}
static void
init_ring_lists(struct intel_engine_cs *ring)
{

View file

@ -88,6 +88,7 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
/* This is a HW constraint. The value below is the largest known requirement
* I've seen in a spec to date, and that was a workaround for a non-shipping
@ -137,6 +138,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
struct intel_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
trace_i915_context_free(ctx);
if (i915.enable_execlists)
intel_lr_context_free(ctx);
@ -274,6 +277,8 @@ i915_gem_create_context(struct drm_device *dev,
ctx->ppgtt = ppgtt;
}
trace_i915_context_create(ctx);
return ctx;
err_unpin:
@ -549,6 +554,7 @@ static int do_switch(struct intel_engine_cs *ring,
from = ring->last_context;
if (to->ppgtt) {
trace_switch_mm(ring, to);
ret = to->ppgtt->switch_mm(to->ppgtt, ring);
if (ret)
goto unpin_out;
@ -629,7 +635,7 @@ static int do_switch(struct intel_engine_cs *ring,
if (uninitialized) {
if (ring->init_context) {
ret = ring->init_context(ring);
ret = ring->init_context(ring, to);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}

View file

@ -1023,6 +1023,47 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return 0;
}
static int
i915_emit_box(struct intel_engine_cs *ring,
struct drm_clip_rect *box,
int DR1, int DR4)
{
int ret;
if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
box->y2 <= 0 || box->x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
box->x1, box->y1, box->x2, box->y2);
return -EINVAL;
}
if (INTEL_INFO(ring->dev)->gen >= 4) {
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
intel_ring_emit(ring, DR4);
} else {
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
intel_ring_emit(ring, DR1);
intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
intel_ring_emit(ring, DR4);
intel_ring_emit(ring, 0);
}
intel_ring_advance(ring);
return 0;
}
int
i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
@ -1151,7 +1192,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
ret = i915_emit_box(dev, &cliprects[i],
ret = i915_emit_box(ring, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto error;
@ -1300,12 +1341,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto pre_mutex_err;
if (dev_priv->ums.mm_suspended) {
mutex_unlock(&dev->struct_mutex);
ret = -EBUSY;
goto pre_mutex_err;
}
ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);

View file

@ -43,7 +43,12 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
if (IS_GEN8(dev))
has_full_ppgtt = false; /* XXX why? */
if (enable_ppgtt == 0 || !has_aliasing_ppgtt)
/*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
* execlists, the sole mechanism available to submit work.
*/
if (INTEL_INFO(dev)->gen < 9 &&
(enable_ppgtt == 0 || !has_aliasing_ppgtt))
return 0;
if (enable_ppgtt == 1)
@ -164,9 +169,6 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
/* Mark the page as writeable. Other platforms don't have a
* setting for read-only/writable, so this matches that behavior.
*/
if (!(flags & PTE_READ_ONLY))
pte |= BYT_PTE_WRITEABLE;
@ -1174,6 +1176,8 @@ i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
ppgtt->file_priv = fpriv;
trace_i915_ppgtt_create(&ppgtt->base);
return ppgtt;
}
@ -1182,6 +1186,8 @@ void i915_ppgtt_release(struct kref *kref)
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
trace_i915_ppgtt_release(&ppgtt->base);
/* vmas should already be unbound */
WARN_ON(!list_empty(&ppgtt->base.active_list));
WARN_ON(!list_empty(&ppgtt->base.inactive_list));
@ -1658,10 +1664,10 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
}
}
int i915_gem_setup_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end)
static int i915_gem_setup_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end)
{
/* Let GEM Manage all of the aperture.
*
@ -1952,9 +1958,17 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
* Only the snoop bit has meaning for CHV, the rest is
* ignored.
*
* Note that the harware enforces snooping for all page
* table accesses. The snoop bit is actually ignored for
* PDEs.
* The hardware will never snoop for certain types of accesses:
* - CPU GTT (GMADR->GGTT->no snoop->memory)
* - PPGTT page tables
* - some other special cycles
*
* As with BDW, we also need to consider the following for GT accesses:
* "For GGTT, there is NO pat_sel[2:0] from the entry,
* so RTL will always use the value corresponding to
* pat_sel = 000".
* Which means we must set the snoop bit in PAT entry 0
* in order to keep the global status page working.
*/
pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
GEN8_PPAT(1, 0) |

View file

@ -274,8 +274,6 @@ struct i915_hw_ppgtt {
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
int i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
unsigned long mappable_end, unsigned long end);
void i915_global_gtt_cleanup(struct drm_device *dev);

View file

@ -178,6 +178,15 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
break;
}
/* check for L-shaped memory aka modified enhanced addressing */
if (IS_GEN4(dev)) {
uint32_t ddc2 = I915_READ(DCC2);
if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
}
if (dcc == 0xffffffff) {
DRM_ERROR("Couldn't read from MCHBAR. "
"Disabling tiling.\n");
@ -380,6 +389,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
ret = i915_gem_object_ggtt_unbind(obj);
if (ret == 0) {
if (obj->pages &&
obj->madv == I915_MADV_WILLNEED &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (args->tiling_mode == I915_TILING_NONE)
i915_gem_object_unpin_pages(obj);
if (obj->tiling_mode == I915_TILING_NONE)
i915_gem_object_pin_pages(obj);
}
obj->fence_dirty =
obj->last_fenced_seqno ||
obj->fence_reg != I915_FENCE_REG_NONE;

View file

@ -242,11 +242,15 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
struct drm_device *dev,
struct drm_i915_error_ring *ring)
struct drm_i915_error_state *error,
int ring_idx)
{
struct drm_i915_error_ring *ring = &error->ring[ring_idx];
if (!ring->valid)
return;
err_printf(m, "%s command stream:\n", ring_str(ring_idx));
err_printf(m, " HEAD: 0x%08x\n", ring->head);
err_printf(m, " TAIL: 0x%08x\n", ring->tail);
err_printf(m, " CTL: 0x%08x\n", ring->ctl);
@ -388,10 +392,8 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (INTEL_INFO(dev)->gen == 7)
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
err_printf(m, "%s command stream:\n", ring_str(i));
i915_ring_error_state(m, dev, &error->ring[i]);
}
for (i = 0; i < ARRAY_SIZE(error->ring); i++)
i915_ring_error_state(m, dev, error, i);
for (i = 0; i < error->vm_count; i++) {
err_printf(m, "vm[%d]\n", i);
@ -807,9 +809,8 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
if (!error->semaphore_obj)
error->semaphore_obj =
i915_error_object_create(dev_priv,
dev_priv->semaphore_obj,
&dev_priv->gtt.base);
i915_error_ggtt_object_create(dev_priv,
dev_priv->semaphore_obj);
for_each_ring(to, dev_priv, i) {
int idx;

View file

@ -138,6 +138,8 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
POSTING_READ(type##IMR); \
} while (0)
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
/* For display hotplug interrupt */
void
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
@ -200,6 +202,21 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
ilk_update_gt_irq(dev_priv, mask, 0);
}
static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
{
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}
static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
{
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
}
static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
{
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
}
/**
* snb_update_pm_irq - update GEN6_PMIMR
* @dev_priv: driver private
@ -223,8 +240,8 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
if (new_val != dev_priv->pm_irq_mask) {
dev_priv->pm_irq_mask = new_val;
I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
POSTING_READ(GEN6_PMIMR);
I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
POSTING_READ(gen6_pm_imr(dev_priv));
}
}
@ -238,44 +255,50 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
snb_update_pm_irq(dev_priv, mask, 0);
}
/**
* bdw_update_pm_irq - update GT interrupt 2
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*
* Copied from the snb function, updated with relevant register offsets
*/
static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
void gen6_reset_rps_interrupts(struct drm_device *dev)
{
uint32_t new_val;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t reg = gen6_pm_iir(dev_priv);
assert_spin_locked(&dev_priv->irq_lock);
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
new_val = dev_priv->pm_irq_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != dev_priv->pm_irq_mask) {
dev_priv->pm_irq_mask = new_val;
I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
POSTING_READ(GEN8_GT_IMR(2));
}
spin_lock_irq(&dev_priv->irq_lock);
I915_WRITE(reg, dev_priv->pm_rps_events);
I915_WRITE(reg, dev_priv->pm_rps_events);
POSTING_READ(reg);
spin_unlock_irq(&dev_priv->irq_lock);
}
void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
void gen6_enable_rps_interrupts(struct drm_device *dev)
{
bdw_update_pm_irq(dev_priv, mask, mask);
struct drm_i915_private *dev_priv = dev->dev_private;
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
dev_priv->rps.interrupts_enabled = true;
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
void gen6_disable_rps_interrupts(struct drm_device *dev)
{
bdw_update_pm_irq(dev_priv, mask, 0);
struct drm_i915_private *dev_priv = dev->dev_private;
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.interrupts_enabled = false;
spin_unlock_irq(&dev_priv->irq_lock);
cancel_work_sync(&dev_priv->rps.work);
I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
~dev_priv->pm_rps_events);
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
}
/**
@ -980,7 +1003,6 @@ static void notify_ring(struct drm_device *dev,
trace_i915_gem_request_complete(ring);
wake_up_all(&ring->irq_queue);
i915_queue_hangcheck(dev);
}
static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
@ -1116,14 +1138,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
int new_delay, adj;
spin_lock_irq(&dev_priv->irq_lock);
/* Speed up work cancelation during disabling rps interrupts. */
if (!dev_priv->rps.interrupts_enabled) {
spin_unlock_irq(&dev_priv->irq_lock);
return;
}
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
if (INTEL_INFO(dev_priv->dev)->gen >= 8)
gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
else {
/* Make sure not to corrupt PMIMR state used by ringbuffer */
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
}
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
@ -1325,19 +1348,6 @@ static void snb_gt_irq_handler(struct drm_device *dev,
ivybridge_parity_error_irq_handler(dev, gt_iir);
}
static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
if ((pm_iir & dev_priv->pm_rps_events) == 0)
return;
spin_lock(&dev_priv->irq_lock);
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
spin_unlock(&dev_priv->irq_lock);
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 master_ctl)
@ -1399,7 +1409,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
I915_WRITE(GEN8_GT_IIR(2),
tmp & dev_priv->pm_rps_events);
ret = IRQ_HANDLED;
gen8_rps_irq_handler(dev_priv, tmp);
gen6_rps_irq_handler(dev_priv, tmp);
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
}
@ -1699,15 +1709,24 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
* the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
/* TODO: RPS on GEN9+ is not supported yet. */
if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
"GEN9+: unexpected RPS IRQ\n"))
return;
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
if (dev_priv->rps.interrupts_enabled) {
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
spin_unlock(&dev_priv->irq_lock);
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
if (INTEL_INFO(dev_priv)->gen >= 8)
return;
if (HAS_VEBOX(dev_priv->dev)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
@ -2222,6 +2241,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
irqreturn_t ret = IRQ_NONE;
uint32_t tmp = 0;
enum pipe pipe;
u32 aux_mask = GEN8_AUX_CHANNEL_A;
if (IS_GEN9(dev))
aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
master_ctl = I915_READ(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
@ -2254,7 +2278,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
if (tmp) {
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
ret = IRQ_HANDLED;
if (tmp & GEN8_AUX_CHANNEL_A)
if (tmp & aux_mask)
dp_aux_irq_handler(dev);
else
DRM_ERROR("Unexpected DE Port interrupt\n");
@ -3036,10 +3061,15 @@ static void i915_hangcheck_elapsed(unsigned long data)
void i915_queue_hangcheck(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer;
if (!i915.enable_hangcheck)
return;
mod_timer(&dev_priv->gpu_error.hangcheck_timer,
/* Don't continually defer the hangcheck, but make sure it is active */
if (timer_pending(timer))
return;
mod_timer(timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
@ -3488,11 +3518,14 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
uint32_t de_pipe_enables;
int pipe;
u32 aux_en = GEN8_AUX_CHANNEL_A;
if (IS_GEN9(dev_priv))
if (IS_GEN9(dev_priv)) {
de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
else
aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
} else
de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
@ -3510,7 +3543,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->de_irq_mask[pipe],
de_pipe_enables);
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
}
static int gen8_irq_postinstall(struct drm_device *dev)
@ -3533,34 +3566,8 @@ static int gen8_irq_postinstall(struct drm_device *dev)
static int cherryview_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
int pipe;
/*
* Leave vblank interrupts masked initially. enable/disable will
* toggle them based on usage.
*/
dev_priv->irq_mask = ~enable_mask;
for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
for_each_pipe(dev_priv, pipe)
i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
spin_unlock_irq(&dev_priv->irq_lock);
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IER, enable_mask);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
POSTING_READ(VLV_IMR);
vlv_display_irq_postinstall(dev_priv);
gen8_gt_irq_postinstall(dev_priv);
@ -3580,6 +3587,20 @@ static void gen8_irq_uninstall(struct drm_device *dev)
gen8_irq_reset(dev);
}
static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
{
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
valleyview_display_irqs_uninstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
vlv_display_irq_reset(dev_priv);
dev_priv->irq_mask = 0;
}
static void valleyview_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -3593,22 +3614,12 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
I915_WRITE(HWSTAM, 0xffffffff);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
valleyview_display_irqs_uninstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
vlv_display_irq_reset(dev_priv);
dev_priv->irq_mask = 0;
vlv_display_irq_uninstall(dev_priv);
}
static void cherryview_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
if (!dev_priv)
return;
@ -3620,13 +3631,7 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_PCU_);
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
GEN5_IRQ_RESET(VLV_);
vlv_display_irq_uninstall(dev_priv);
}
static void ironlake_irq_uninstall(struct drm_device *dev)
@ -3760,8 +3765,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
I915_WRITE16(IIR, iir & ~flip_mask);
new_iir = I915_READ16(IIR); /* Flush posted writes */
i915_update_dri1_breadcrumb(dev);
if (iir & I915_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
@ -3998,8 +4001,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
iir = new_iir;
} while (iir & ~flip_mask);
i915_update_dri1_breadcrumb(dev);
return ret;
}
@ -4227,8 +4228,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
iir = new_iir;
}
i915_update_dri1_breadcrumb(dev);
return ret;
}

View file

@ -248,6 +248,16 @@
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
/* SKL ones */
#define MI_DISPLAY_FLIP_SKL_PLANE_1_A (0 << 8)
#define MI_DISPLAY_FLIP_SKL_PLANE_1_B (1 << 8)
#define MI_DISPLAY_FLIP_SKL_PLANE_1_C (2 << 8)
#define MI_DISPLAY_FLIP_SKL_PLANE_2_A (4 << 8)
#define MI_DISPLAY_FLIP_SKL_PLANE_2_B (5 << 8)
#define MI_DISPLAY_FLIP_SKL_PLANE_2_C (6 << 8)
#define MI_DISPLAY_FLIP_SKL_PLANE_3_A (7 << 8)
#define MI_DISPLAY_FLIP_SKL_PLANE_3_B (8 << 8)
#define MI_DISPLAY_FLIP_SKL_PLANE_3_C (9 << 8)
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
@ -314,6 +324,8 @@
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
#define MI_PREDICATE_SRC0 (0x2400)
#define MI_PREDICATE_SRC1 (0x2408)
#define MI_PREDICATE_RESULT_2 (0x2214)
#define LOWER_SLICE_ENABLED (1<<0)
@ -564,6 +576,7 @@ enum punit_power_well {
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8
#define GPLLENABLE (1<<4)
#define GENFREQSTATUS (1<<0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
#define PUNIT_REG_CZ_TIMESTAMP 0xce
@ -2030,6 +2043,8 @@ enum punit_power_well {
#define DCC_ADDRESSING_MODE_MASK (3 << 0)
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
#define DCC2 0x10204
#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20)
/* Pineview MCH register contains DDR3 setting */
#define CSHRDDR3CTL 0x101a8
@ -2313,7 +2328,6 @@ enum punit_power_well {
#define GEN6_GT_THREAD_STATUS_REG 0x13805c
#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
@ -4904,6 +4918,18 @@ enum punit_power_well {
#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
#define _PSA_CTL 0x68180
#define _PSB_CTL 0x68980
#define PS_ENABLE (1<<31)
#define _PSA_WIN_SZ 0x68174
#define _PSB_WIN_SZ 0x68974
#define _PSA_WIN_POS 0x68170
#define _PSB_WIN_POS 0x68970
#define PS_CTL(pipe) _PIPE(pipe, _PSA_CTL, _PSB_CTL)
#define PS_WIN_SZ(pipe) _PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ)
#define PS_WIN_POS(pipe) _PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS)
/* legacy palette */
#define _LGC_PALETTE_A 0x4a000
#define _LGC_PALETTE_B 0x4a800
@ -5048,6 +5074,9 @@ enum punit_power_well {
#define GEN8_DE_PORT_IIR 0x44448
#define GEN8_DE_PORT_IER 0x4444c
#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
#define GEN9_AUX_CHANNEL_D (1 << 27)
#define GEN9_AUX_CHANNEL_C (1 << 26)
#define GEN9_AUX_CHANNEL_B (1 << 25)
#define GEN8_AUX_CHANNEL_A (1 << 0)
#define GEN8_DE_MISC_ISR 0x44460
@ -5131,6 +5160,7 @@ enum punit_power_well {
/* GEN8 chicken */
#define HDC_CHICKEN0 0x7300
#define HDC_FORCE_NON_COHERENT (1<<4)
#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
/* WaCatErrorRejectionIssue */
@ -6010,11 +6040,12 @@ enum punit_power_well {
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define DISPLAY_IPS_CONTROL 0x19
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
#define GEN6_PCODE_DATA1 0x13812C
#define GEN9_PCODE_DATA1 0x13812C
#define GEN9_PCODE_READ_MEM_LATENCY 0x6
#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
@ -6427,6 +6458,83 @@ enum punit_power_well {
#define LCPLL_CD_SOURCE_FCLK (1<<21)
#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
/*
* SKL Clocks
*/
/* CDCLK_CTL */
#define CDCLK_CTL 0x46000
#define CDCLK_FREQ_SEL_MASK (3<<26)
#define CDCLK_FREQ_450_432 (0<<26)
#define CDCLK_FREQ_540 (1<<26)
#define CDCLK_FREQ_337_308 (2<<26)
#define CDCLK_FREQ_675_617 (3<<26)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
/* LCPLL_CTL */
#define LCPLL1_CTL 0x46010
#define LCPLL2_CTL 0x46014
#define LCPLL_PLL_ENABLE (1<<31)
/* DPLL control1 */
#define DPLL_CTRL1 0x6C058
#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
#define DPLL_CRTL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
#define DPLL_CRTL1_LINK_RATE_SHIFT(id) ((id)*6+1)
#define DPLL_CRTL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
#define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6))
#define DPLL_CRTL1_LINK_RATE_2700 0
#define DPLL_CRTL1_LINK_RATE_1350 1
#define DPLL_CRTL1_LINK_RATE_810 2
#define DPLL_CRTL1_LINK_RATE_1620 3
#define DPLL_CRTL1_LINK_RATE_1080 4
#define DPLL_CRTL1_LINK_RATE_2160 5
/* DPLL control2 */
#define DPLL_CTRL2 0x6C05C
#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<(port+15))
#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1))
#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1)
#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) (clk<<((port)*3+1))
#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3))
/* DPLL Status */
#define DPLL_STATUS 0x6C060
#define DPLL_LOCK(id) (1<<((id)*8))
/* DPLL cfg */
#define DPLL1_CFGCR1 0x6C040
#define DPLL2_CFGCR1 0x6C048
#define DPLL3_CFGCR1 0x6C050
#define DPLL_CFGCR1_FREQ_ENABLE (1<<31)
#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
#define DPLL_CFGCR1_DCO_FRACTION(x) (x<<9)
#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
#define DPLL1_CFGCR2 0x6C044
#define DPLL2_CFGCR2 0x6C04C
#define DPLL3_CFGCR2 0x6C054
#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8)
#define DPLL_CFGCR2_QDIV_RATIO(x) (x<<8)
#define DPLL_CFGCR2_QDIV_MODE(x) (x<<7)
#define DPLL_CFGCR2_KDIV_MASK (3<<5)
#define DPLL_CFGCR2_KDIV(x) (x<<5)
#define DPLL_CFGCR2_KDIV_5 (0<<5)
#define DPLL_CFGCR2_KDIV_2 (1<<5)
#define DPLL_CFGCR2_KDIV_3 (2<<5)
#define DPLL_CFGCR2_KDIV_1 (3<<5)
#define DPLL_CFGCR2_PDIV_MASK (7<<2)
#define DPLL_CFGCR2_PDIV(x) (x<<2)
#define DPLL_CFGCR2_PDIV_1 (0<<2)
#define DPLL_CFGCR2_PDIV_2 (1<<2)
#define DPLL_CFGCR2_PDIV_3 (2<<2)
#define DPLL_CFGCR2_PDIV_7 (4<<2)
#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
#define GET_CFG_CR1_REG(id) (DPLL1_CFGCR1 + (id - SKL_DPLL1) * 8)
#define GET_CFG_CR2_REG(id) (DPLL1_CFGCR2 + (id - SKL_DPLL1) * 8)
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)

View file

@ -203,34 +203,19 @@ static void i915_save_display(struct drm_device *dev)
i915_save_display_reg(dev);
/* LVDS state */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
dev_priv->regfile.saveLVDS = I915_READ(LVDS);
/* Panel power sequencer */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
dev_priv->regfile.saveBLC_HIST_CTL =
I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
dev_priv->regfile.saveBLC_HIST_CTL_B =
I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
} else {
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
if (IS_MOBILE(dev) && !IS_I830(dev))
dev_priv->regfile.saveLVDS = I915_READ(LVDS);
}
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
} else {
} else if (!IS_VALLEYVIEW(dev)) {
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
@ -259,29 +244,19 @@ static void i915_restore_display(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
mask = ~LVDS_PORT_EN;
/* LVDS state */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
/* Panel power sequencer */
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
I915_WRITE(RSTDBYCTL,
dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
} else if (IS_VALLEYVIEW(dev)) {
I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
dev_priv->regfile.saveBLC_HIST_CTL);
I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
dev_priv->regfile.saveBLC_HIST_CTL);
} else {
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
} else if (!IS_VALLEYVIEW(dev)) {
I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
@ -368,6 +343,8 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
I915_WRITE(RSTDBYCTL,
dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
} else {
I915_WRITE(IER, dev_priv->regfile.saveIER);
I915_WRITE(IMR, dev_priv->regfile.saveIMR);

View file

@ -587,6 +587,110 @@ TRACE_EVENT(intel_gpu_freq_change,
TP_printk("new_freq=%u", __entry->freq)
);
/**
* DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
*
* With full ppgtt enabled each process using drm will allocate at least one
* translation table. With these traces it is possible to keep track of the
* allocation and of the lifetime of the tables; this can be used during
* testing/debug to verify that we are not leaking ppgtts.
* These traces identify the ppgtt through the vm pointer, which is also printed
* by the i915_vma_bind and i915_vma_unbind tracepoints.
*/
DECLARE_EVENT_CLASS(i915_ppgtt,
TP_PROTO(struct i915_address_space *vm),
TP_ARGS(vm),
TP_STRUCT__entry(
__field(struct i915_address_space *, vm)
__field(u32, dev)
),
TP_fast_assign(
__entry->vm = vm;
__entry->dev = vm->dev->primary->index;
),
TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
)
DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
TP_PROTO(struct i915_address_space *vm),
TP_ARGS(vm)
);
DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
TP_PROTO(struct i915_address_space *vm),
TP_ARGS(vm)
);
/**
* DOC: i915_context_create and i915_context_free tracepoints
*
* These tracepoints are used to track creation and deletion of contexts.
* If full ppgtt is enabled, they also print the address of the vm assigned to
* the context.
*/
DECLARE_EVENT_CLASS(i915_context,
TP_PROTO(struct intel_context *ctx),
TP_ARGS(ctx),
TP_STRUCT__entry(
__field(u32, dev)
__field(struct intel_context *, ctx)
__field(struct i915_address_space *, vm)
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
__entry->dev = ctx->file_priv->dev_priv->dev->primary->index;
),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
__entry->dev, __entry->ctx, __entry->vm)
)
DEFINE_EVENT(i915_context, i915_context_create,
TP_PROTO(struct intel_context *ctx),
TP_ARGS(ctx)
);
DEFINE_EVENT(i915_context, i915_context_free,
TP_PROTO(struct intel_context *ctx),
TP_ARGS(ctx)
);
/**
* DOC: switch_mm tracepoint
*
* This tracepoint allows tracking of the mm switch, which is an important point
* in the lifetime of the vm in the legacy submission path. This tracepoint is
* called only if full ppgtt is enabled.
*/
TRACE_EVENT(switch_mm,
TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
TP_ARGS(ring, to),
TP_STRUCT__entry(
__field(u32, ring)
__field(struct intel_context *, to)
__field(struct i915_address_space *, vm)
__field(u32, dev)
),
TP_fast_assign(
__entry->ring = ring->id;
__entry->to = to;
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
__entry->dev = ring->dev->primary->index;
),
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
__entry->dev, __entry->ring, __entry->to, __entry->vm)
);
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */

View file

@ -270,6 +270,12 @@ void i915_save_display_reg(struct drm_device *dev)
}
/* FIXME: regfile.save TV & SDVO state */
/* Panel fitter */
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
}
/* Backlight */
if (INTEL_INFO(dev)->gen <= 4)
pci_read_config_byte(dev->pdev, PCI_LBPC,
@ -284,6 +290,7 @@ void i915_save_display_reg(struct drm_device *dev)
dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
if (INTEL_INFO(dev)->gen >= 4)
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
}
return;
@ -313,6 +320,13 @@ void i915_restore_display_reg(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
}
/* Panel fitter */
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
}
/* Display port ratios (must be done before clock is set) */

View file

@ -107,7 +107,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
tmp &= ~bits_elda;
I915_WRITE(reg_elda, tmp);
for (i = 0; i < eld[2]; i++)
for (i = 0; i < drm_eld_size(eld) / 4; i++)
if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
return false;
@ -162,7 +162,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
len = (tmp >> 9) & 0x1f; /* ELD buffer size */
I915_WRITE(G4X_AUD_CNTL_ST, tmp);
len = min_t(int, eld[2], len);
len = min(drm_eld_size(eld) / 4, len);
DRM_DEBUG_DRIVER("ELD size %d\n", len);
for (i = 0; i < len; i++)
I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
@ -194,6 +194,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
/* Invalidate ELD */
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~AUDIO_ELD_VALID(pipe);
tmp &= ~AUDIO_OUTPUT_ENABLE(pipe);
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
}
@ -209,7 +210,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
int len, i;
DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
pipe_name(pipe), eld[2]);
pipe_name(pipe), drm_eld_size(eld));
/* Enable audio presence detect, invalidate ELD */
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
@ -230,8 +231,8 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
I915_WRITE(HSW_AUD_DIP_ELD_CTRL(pipe), tmp);
/* Up to 84 bytes of hw ELD buffer */
len = min_t(int, eld[2], 21);
for (i = 0; i < len; i++)
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i));
/* ELD valid */
@ -320,7 +321,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
int aud_cntrl_st2;
DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
port_name(port), pipe_name(pipe), eld[2]);
port_name(port), pipe_name(pipe), drm_eld_size(eld));
/*
* FIXME: We're supposed to wait for vblank here, but we have vblanks
@ -364,8 +365,8 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
I915_WRITE(aud_cntl_st, tmp);
/* Up to 84 bytes of hw ELD buffer */
len = min_t(int, eld[2], 21);
for (i = 0; i < len; i++)
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
/* ELD valid */

View file

@ -670,6 +670,111 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
return (refclk * n * 100) / (p * r);
}
static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
uint32_t dpll)
{
uint32_t cfgcr1_reg, cfgcr2_reg;
uint32_t cfgcr1_val, cfgcr2_val;
uint32_t p0, p1, p2, dco_freq;
cfgcr1_reg = GET_CFG_CR1_REG(dpll);
cfgcr2_reg = GET_CFG_CR2_REG(dpll);
cfgcr1_val = I915_READ(cfgcr1_reg);
cfgcr2_val = I915_READ(cfgcr2_reg);
p0 = cfgcr2_val & DPLL_CFGCR2_PDIV_MASK;
p2 = cfgcr2_val & DPLL_CFGCR2_KDIV_MASK;
if (cfgcr2_val & DPLL_CFGCR2_QDIV_MODE(1))
p1 = (cfgcr2_val & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
else
p1 = 1;
switch (p0) {
case DPLL_CFGCR2_PDIV_1:
p0 = 1;
break;
case DPLL_CFGCR2_PDIV_2:
p0 = 2;
break;
case DPLL_CFGCR2_PDIV_3:
p0 = 3;
break;
case DPLL_CFGCR2_PDIV_7:
p0 = 7;
break;
}
switch (p2) {
case DPLL_CFGCR2_KDIV_5:
p2 = 5;
break;
case DPLL_CFGCR2_KDIV_2:
p2 = 2;
break;
case DPLL_CFGCR2_KDIV_3:
p2 = 3;
break;
case DPLL_CFGCR2_KDIV_1:
p2 = 1;
break;
}
dco_freq = (cfgcr1_val & DPLL_CFGCR1_DCO_INTEGER_MASK) * 24 * 1000;
dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
1000) / 0x8000;
return dco_freq / (p0 * p1 * p2 * 5);
}
static void skl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
int link_clock = 0;
uint32_t dpll_ctl1, dpll;
dpll = pipe_config->ddi_pll_sel;
dpll_ctl1 = I915_READ(DPLL_CTRL1);
if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) {
link_clock = skl_calc_wrpll_link(dev_priv, dpll);
} else {
link_clock = dpll_ctl1 & DPLL_CRTL1_LINK_RATE_MASK(dpll);
link_clock >>= DPLL_CRTL1_LINK_RATE_SHIFT(dpll);
switch (link_clock) {
case DPLL_CRTL1_LINK_RATE_810:
link_clock = 81000;
break;
case DPLL_CRTL1_LINK_RATE_1350:
link_clock = 135000;
break;
case DPLL_CRTL1_LINK_RATE_2700:
link_clock = 270000;
break;
default:
WARN(1, "Unsupported link rate\n");
break;
}
link_clock *= 2;
}
pipe_config->port_clock = link_clock;
if (pipe_config->has_dp_encoder)
pipe_config->adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
else
pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void hsw_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
@ -828,6 +933,228 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
return true;
}
struct skl_wrpll_params {
uint32_t dco_fraction;
uint32_t dco_integer;
uint32_t qdiv_ratio;
uint32_t qdiv_mode;
uint32_t kdiv;
uint32_t pdiv;
uint32_t central_freq;
};
static void
skl_ddi_calculate_wrpll(int clock /* in Hz */,
struct skl_wrpll_params *wrpll_params)
{
uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
uint64_t dco_central_freq[3] = {8400000000ULL,
9000000000ULL,
9600000000ULL};
uint32_t min_dco_deviation = 400;
uint32_t min_dco_index = 3;
uint32_t P0[4] = {1, 2, 3, 7};
uint32_t P2[4] = {1, 2, 3, 5};
bool found = false;
uint32_t candidate_p = 0;
uint32_t candidate_p0[3] = {0}, candidate_p1[3] = {0};
uint32_t candidate_p2[3] = {0};
uint32_t dco_central_freq_deviation[3];
uint32_t i, P1, k, dco_count;
bool retry_with_odd = false;
uint64_t dco_freq;
/* Determine P0, P1 or P2 */
for (dco_count = 0; dco_count < 3; dco_count++) {
found = false;
candidate_p =
div64_u64(dco_central_freq[dco_count], afe_clock);
if (retry_with_odd == false)
candidate_p = (candidate_p % 2 == 0 ?
candidate_p : candidate_p + 1);
for (P1 = 1; P1 < candidate_p; P1++) {
for (i = 0; i < 4; i++) {
if (!(P0[i] != 1 || P1 == 1))
continue;
for (k = 0; k < 4; k++) {
if (P1 != 1 && P2[k] != 2)
continue;
if (candidate_p == P0[i] * P1 * P2[k]) {
/* Found possible P0, P1, P2 */
found = true;
candidate_p0[dco_count] = P0[i];
candidate_p1[dco_count] = P1;
candidate_p2[dco_count] = P2[k];
goto found;
}
}
}
}
found:
if (found) {
dco_central_freq_deviation[dco_count] =
div64_u64(10000 *
abs_diff((candidate_p * afe_clock),
dco_central_freq[dco_count]),
dco_central_freq[dco_count]);
if (dco_central_freq_deviation[dco_count] <
min_dco_deviation) {
min_dco_deviation =
dco_central_freq_deviation[dco_count];
min_dco_index = dco_count;
}
}
if (min_dco_index > 2 && dco_count == 2) {
retry_with_odd = true;
dco_count = 0;
}
}
if (min_dco_index > 2) {
WARN(1, "No valid values found for the given pixel clock\n");
} else {
wrpll_params->central_freq = dco_central_freq[min_dco_index];
switch (dco_central_freq[min_dco_index]) {
case 9600000000ULL:
wrpll_params->central_freq = 0;
break;
case 9000000000ULL:
wrpll_params->central_freq = 1;
break;
case 8400000000ULL:
wrpll_params->central_freq = 3;
}
switch (candidate_p0[min_dco_index]) {
case 1:
wrpll_params->pdiv = 0;
break;
case 2:
wrpll_params->pdiv = 1;
break;
case 3:
wrpll_params->pdiv = 2;
break;
case 7:
wrpll_params->pdiv = 4;
break;
default:
WARN(1, "Incorrect PDiv\n");
}
switch (candidate_p2[min_dco_index]) {
case 5:
wrpll_params->kdiv = 0;
break;
case 2:
wrpll_params->kdiv = 1;
break;
case 3:
wrpll_params->kdiv = 2;
break;
case 1:
wrpll_params->kdiv = 3;
break;
default:
WARN(1, "Incorrect KDiv\n");
}
wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
wrpll_params->qdiv_mode =
(wrpll_params->qdiv_ratio == 1) ? 0 : 1;
dco_freq = candidate_p0[min_dco_index] *
candidate_p1[min_dco_index] *
candidate_p2[min_dco_index] * afe_clock;
/*
* Intermediate values are in Hz.
* Divide by MHz to match bsepc
*/
wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
wrpll_params->dco_fraction =
div_u64(((div_u64(dco_freq, 24) -
wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
}
}
static bool
skl_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_encoder *intel_encoder,
int clock)
{
struct intel_shared_dpll *pll;
uint32_t ctrl1, cfgcr1, cfgcr2;
/*
* See comment in intel_dpll_hw_state to understand why we always use 0
* as the DPLL id in this function.
*/
ctrl1 = DPLL_CTRL1_OVERRIDE(0);
if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
struct skl_wrpll_params wrpll_params = { 0, };
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params);
cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
wrpll_params.dco_integer;
cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq;
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
struct drm_encoder *encoder = &intel_encoder->base;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
switch (intel_dp->link_bw) {
case DP_LINK_BW_1_62:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, 0);
break;
case DP_LINK_BW_2_7:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, 0);
break;
case DP_LINK_BW_5_4:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, 0);
break;
}
cfgcr1 = cfgcr2 = 0;
} else /* eDP */
return true;
intel_crtc->new_config->dpll_hw_state.ctrl1 = ctrl1;
intel_crtc->new_config->dpll_hw_state.cfgcr1 = cfgcr1;
intel_crtc->new_config->dpll_hw_state.cfgcr2 = cfgcr2;
pll = intel_get_shared_dpll(intel_crtc);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
return false;
}
/* shared DPLL id 0 is DPLL 1 */
intel_crtc->new_config->ddi_pll_sel = pll->id + 1;
return true;
}
/*
* Tries to find a *shared* PLL for the CRTC and store it in
@ -838,11 +1165,15 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
*/
bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(intel_crtc);
int clock = intel_crtc->new_config->port_clock;
return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
if (IS_SKYLAKE(dev))
return skl_ddi_pll_select(intel_crtc, intel_encoder, clock);
else
return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
}
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
@ -1134,7 +1465,8 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
@ -1144,8 +1476,42 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_edp_panel_on(intel_dp);
}
WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
if (IS_SKYLAKE(dev)) {
uint32_t dpll = crtc->config.ddi_pll_sel;
uint32_t val;
/*
* DPLL0 is used for eDP and is the only "private" DPLL (as
* opposed to shared) on SKL
*/
if (type == INTEL_OUTPUT_EDP) {
WARN_ON(dpll != SKL_DPLL0);
val = I915_READ(DPLL_CTRL1);
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
DPLL_CTRL1_SSC(dpll) |
DPLL_CRTL1_LINK_RATE_MASK(dpll));
val |= crtc->config.dpll_hw_state.ctrl1 << (dpll * 6);
I915_WRITE(DPLL_CTRL1, val);
POSTING_READ(DPLL_CTRL1);
}
/* DDI -> PLL mapping */
val = I915_READ(DPLL_CTRL2);
val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
I915_WRITE(DPLL_CTRL2, val);
} else {
WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
}
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@ -1155,7 +1521,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
if (port != PORT_A)
if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
intel_dp_stop_link_train(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
@ -1169,7 +1535,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t val;
@ -1197,7 +1564,11 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
intel_edp_panel_off(intel_dp);
}
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
if (IS_SKYLAKE(dev))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port)));
else
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
}
static void intel_enable_ddi(struct intel_encoder *intel_encoder)
@ -1224,11 +1595,11 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (port == PORT_A)
if (port == PORT_A && INTEL_INFO(dev)->gen < 9)
intel_dp_stop_link_train(intel_dp);
intel_edp_backlight_on(intel_dp);
intel_edp_psr_enable(intel_dp);
intel_psr_enable(intel_dp);
}
if (intel_crtc->config.has_audio) {
@ -1254,11 +1625,59 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_edp_psr_disable(intel_dp);
intel_psr_disable(intel_dp);
intel_edp_backlight_off(intel_dp);
}
}
static int skl_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
uint32_t cdctl = I915_READ(CDCLK_CTL);
uint32_t linkrate;
if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
WARN(1, "LCPLL1 not enabled\n");
return 24000; /* 24MHz is the cd freq with NSSC ref */
}
if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
return 540000;
linkrate = (I915_READ(DPLL_CTRL1) &
DPLL_CRTL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
if (linkrate == DPLL_CRTL1_LINK_RATE_2160 ||
linkrate == DPLL_CRTL1_LINK_RATE_1080) {
/* vco 8640 */
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
return 432000;
case CDCLK_FREQ_337_308:
return 308570;
case CDCLK_FREQ_675_617:
return 617140;
default:
WARN(1, "Unknown cd freq selection\n");
}
} else {
/* vco 8100 */
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
return 450000;
case CDCLK_FREQ_337_308:
return 337500;
case CDCLK_FREQ_675_617:
return 675000;
default:
WARN(1, "Unknown cd freq selection\n");
}
}
/* error case, do as if DPLL0 isn't enabled */
return 24000;
}
static int bdw_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
uint32_t lcpll = I915_READ(LCPLL_CTL);
@ -1300,6 +1719,9 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
if (IS_SKYLAKE(dev))
return skl_get_cdclk_freq(dev_priv);
if (IS_BROADWELL(dev))
return bdw_get_cdclk_freq(dev_priv);
@ -1361,26 +1783,156 @@ static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
}
}
static const char * const skl_ddi_pll_names[] = {
"DPLL 1",
"DPLL 2",
"DPLL 3",
};
struct skl_dpll_regs {
u32 ctl, cfgcr1, cfgcr2;
};
/* this array is indexed by the *shared* pll id */
static const struct skl_dpll_regs skl_dpll_regs[3] = {
{
/* DPLL 1 */
.ctl = LCPLL2_CTL,
.cfgcr1 = DPLL1_CFGCR1,
.cfgcr2 = DPLL1_CFGCR2,
},
{
/* DPLL 2 */
.ctl = WRPLL_CTL1,
.cfgcr1 = DPLL2_CFGCR1,
.cfgcr2 = DPLL2_CFGCR2,
},
{
/* DPLL 3 */
.ctl = WRPLL_CTL2,
.cfgcr1 = DPLL3_CFGCR1,
.cfgcr2 = DPLL3_CFGCR2,
},
};
static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
uint32_t val;
unsigned int dpll;
const struct skl_dpll_regs *regs = skl_dpll_regs;
/* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
dpll = pll->id + 1;
val = I915_READ(DPLL_CTRL1);
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) |
DPLL_CRTL1_LINK_RATE_MASK(dpll));
val |= pll->config.hw_state.ctrl1 << (dpll * 6);
I915_WRITE(DPLL_CTRL1, val);
POSTING_READ(DPLL_CTRL1);
I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
POSTING_READ(regs[pll->id].cfgcr1);
POSTING_READ(regs[pll->id].cfgcr2);
/* the enable bit is always bit 31 */
I915_WRITE(regs[pll->id].ctl,
I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(dpll), 5))
DRM_ERROR("DPLL %d not locked\n", dpll);
}
static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const struct skl_dpll_regs *regs = skl_dpll_regs;
/* the enable bit is always bit 31 */
I915_WRITE(regs[pll->id].ctl,
I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
POSTING_READ(regs[pll->id].ctl);
}
static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
uint32_t val;
unsigned int dpll;
const struct skl_dpll_regs *regs = skl_dpll_regs;
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
/* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
dpll = pll->id + 1;
val = I915_READ(regs[pll->id].ctl);
if (!(val & LCPLL_PLL_ENABLE))
return false;
val = I915_READ(DPLL_CTRL1);
hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
/* avoid reading back stale values if HDMI mode is not enabled */
if (val & DPLL_CTRL1_HDMI_MODE(dpll)) {
hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
}
return true;
}
static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
{
int i;
dev_priv->num_shared_dpll = 3;
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
dev_priv->shared_dplls[i].id = i;
dev_priv->shared_dplls[i].name = skl_ddi_pll_names[i];
dev_priv->shared_dplls[i].disable = skl_ddi_pll_disable;
dev_priv->shared_dplls[i].enable = skl_ddi_pll_enable;
dev_priv->shared_dplls[i].get_hw_state =
skl_ddi_pll_get_hw_state;
}
}
void intel_ddi_pll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL);
hsw_shared_dplls_init(dev_priv);
/* The LCPLL register should be turned on by the BIOS. For now let's
* just check its state and print errors in case something is wrong.
* Don't even try to turn it on.
*/
if (IS_SKYLAKE(dev))
skl_shared_dplls_init(dev_priv);
else
hsw_shared_dplls_init(dev_priv);
DRM_DEBUG_KMS("CDCLK running at %dKHz\n",
intel_ddi_get_cdclk_freq(dev_priv));
if (val & LCPLL_CD_SOURCE_FCLK)
DRM_ERROR("CDCLK source is not LCPLL\n");
if (IS_SKYLAKE(dev)) {
if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
DRM_ERROR("LCPLL1 is disabled\n");
} else {
/*
* The LCPLL register should be turned on by the BIOS. For now
* let's just check its state and print errors in case
* something is wrong. Don't even try to turn it on.
*/
if (val & LCPLL_PLL_DISABLE)
DRM_ERROR("LCPLL is disabled\n");
if (val & LCPLL_CD_SOURCE_FCLK)
DRM_ERROR("CDCLK source is not LCPLL\n");
if (val & LCPLL_PLL_DISABLE)
DRM_ERROR("LCPLL is disabled\n");
}
}
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
@ -1475,7 +2027,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct intel_hdmi *intel_hdmi;
u32 temp, flags = 0;
struct drm_device *dev = dev_priv->dev;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
@ -1509,6 +2063,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
pipe_config->has_hdmi_sink = true;
intel_hdmi = enc_to_intel_hdmi(&encoder->base);
if (intel_hdmi->infoframe_enabled(&encoder->base))
pipe_config->has_infoframe = true;
break;
case TRANS_DDI_MODE_SELECT_DVI:
case TRANS_DDI_MODE_SELECT_FDI:
break;
@ -1547,7 +2106,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
}
hsw_ddi_clock_get(encoder, pipe_config);
if (INTEL_INFO(dev)->gen <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
else
skl_ddi_clock_get(encoder, pipe_config);
}
static void intel_ddi_destroy(struct drm_encoder *encoder)

View file

@ -2931,8 +2931,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
intel_update_pipe_size(intel_crtc);
dev_priv->display.update_primary_plane(crtc, fb, x, y);
if (intel_crtc->active)
@ -4005,6 +4003,19 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
}
}
static void skylake_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
if (crtc->config.pch_pfit.enabled) {
I915_WRITE(PS_CTL(pipe), PS_ENABLE);
I915_WRITE(PS_WIN_POS(pipe), crtc->config.pch_pfit.pos);
I915_WRITE(PS_WIN_SZ(pipe), crtc->config.pch_pfit.size);
}
}
static void ironlake_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@ -4388,7 +4399,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_ddi_enable_pipe_clock(intel_crtc);
ironlake_pfit_enable(intel_crtc);
if (IS_SKYLAKE(dev))
skylake_pfit_enable(intel_crtc);
else
ironlake_pfit_enable(intel_crtc);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
@ -4422,6 +4436,21 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_crtc_enable_planes(crtc);
}
static void skylake_pfit_disable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
if (crtc->config.pch_pfit.enabled) {
I915_WRITE(PS_CTL(pipe), 0);
I915_WRITE(PS_WIN_POS(pipe), 0);
I915_WRITE(PS_WIN_SZ(pipe), 0);
}
}
static void ironlake_pfit_disable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@ -4534,7 +4563,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
ironlake_pfit_disable(intel_crtc);
if (IS_SKYLAKE(dev))
skylake_pfit_disable(intel_crtc);
else
ironlake_pfit_disable(intel_crtc);
intel_ddi_disable_pipe_clock(intel_crtc);
@ -4907,10 +4939,23 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
if (req_cdclk != dev_priv->vlv_cdclk_freq) {
/*
* FIXME: We can end up here with all power domains off, yet
* with a CDCLK frequency other than the minimum. To account
* for this take the PIPE-A power domain, which covers the HW
* blocks needed for the following programming. This can be
* removed once it's guaranteed that we get here either with
* the minimum CDCLK set, or the required power domains
* enabled.
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
if (IS_CHERRYVIEW(dev))
cherryview_set_cdclk(dev, req_cdclk);
else
valleyview_set_cdclk(dev, req_cdclk);
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
}
}
@ -5153,36 +5198,6 @@ static void i9xx_crtc_off(struct drm_crtc *crtc)
{
}
static void intel_crtc_update_sarea(struct drm_crtc *crtc,
bool enabled)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
if (!dev->primary->master)
return;
master_priv = dev->primary->master->driver_priv;
if (!master_priv->sarea_priv)
return;
switch (pipe) {
case 0:
master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
break;
case 1:
master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
break;
default:
DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
break;
}
}
/* Master function to enable/disable CRTC and corresponding power wells */
void intel_crtc_control(struct drm_crtc *crtc, bool enable)
{
@ -5226,8 +5241,6 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc)
enable |= intel_encoder->connectors_active;
intel_crtc_control(crtc, enable);
intel_crtc_update_sarea(crtc, enable);
}
static void intel_crtc_disable(struct drm_crtc *crtc)
@ -5242,7 +5255,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
WARN_ON(!crtc->enabled);
dev_priv->display.crtc_disable(crtc);
intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
if (crtc->primary->fb) {
@ -7549,6 +7561,22 @@ static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
&pipe_config->fdi_m_n, NULL);
}
static void skylake_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
tmp = I915_READ(PS_CTL(crtc->pipe));
if (tmp & PS_ENABLE) {
pipe_config->pch_pfit.enabled = true;
pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe));
pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe));
}
}
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
@ -7962,6 +7990,28 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
return 0;
}
static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_config *pipe_config)
{
u32 temp;
temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
switch (pipe_config->ddi_pll_sel) {
case SKL_DPLL1:
pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
break;
case SKL_DPLL2:
pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
break;
case SKL_DPLL3:
pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
break;
}
}
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_config *pipe_config)
@ -7991,7 +8041,10 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
haswell_get_ddi_pll(dev_priv, port, pipe_config);
if (IS_SKYLAKE(dev))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
else
haswell_get_ddi_pll(dev_priv, port, pipe_config);
if (pipe_config->shared_dpll >= 0) {
pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
@ -8067,8 +8120,12 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_timings(crtc, pipe_config);
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_is_enabled(dev_priv, pfit_domain))
ironlake_get_pfit_config(crtc, pipe_config);
if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
if (IS_SKYLAKE(dev))
skylake_get_pfit_config(crtc, pipe_config);
else
ironlake_get_pfit_config(crtc, pipe_config);
}
if (IS_HASWELL(dev))
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
@ -8292,7 +8349,7 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
unsigned old_width;
@ -8421,7 +8478,7 @@ __intel_framebuffer_create(struct drm_device *dev,
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
drm_gem_object_unreference_unlocked(&obj->base);
drm_gem_object_unreference(&obj->base);
return ERR_PTR(-ENOMEM);
}
@ -8431,7 +8488,7 @@ __intel_framebuffer_create(struct drm_device *dev,
return &intel_fb->base;
err:
drm_gem_object_unreference_unlocked(&obj->base);
drm_gem_object_unreference(&obj->base);
kfree(intel_fb);
return ERR_PTR(ret);
@ -9465,6 +9522,69 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
return 0;
}
static int intel_gen9_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring,
uint32_t flags)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t plane = 0, stride;
int ret;
switch(intel_crtc->pipe) {
case PIPE_A:
plane = MI_DISPLAY_FLIP_SKL_PLANE_1_A;
break;
case PIPE_B:
plane = MI_DISPLAY_FLIP_SKL_PLANE_1_B;
break;
case PIPE_C:
plane = MI_DISPLAY_FLIP_SKL_PLANE_1_C;
break;
default:
WARN_ONCE(1, "unknown plane in flip command\n");
return -ENODEV;
}
switch (obj->tiling_mode) {
case I915_TILING_NONE:
stride = fb->pitches[0] >> 6;
break;
case I915_TILING_X:
stride = fb->pitches[0] >> 9;
break;
default:
WARN_ONCE(1, "unknown tiling in flip command\n");
return -ENODEV;
}
ret = intel_ring_begin(ring, 10);
if (ret)
return ret;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, DERRMR);
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit(ring, DERRMR);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane);
intel_ring_emit(ring, stride << 6 | obj->tiling_mode);
intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
intel_mark_page_flip_active(intel_crtc);
__intel_ring_advance(ring);
return 0;
}
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@ -9904,6 +10024,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->dp_m2_n2.link_n,
pipe_config->dp_m2_n2.tu);
DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
pipe_config->has_audio,
pipe_config->has_infoframe);
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->requested_mode);
DRM_DEBUG_KMS("adjusted mode:\n");
@ -10372,6 +10496,7 @@ intel_pipe_config_compare(struct drm_device *dev,
if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
IS_VALLEYVIEW(dev))
PIPE_CONF_CHECK_I(limited_color_range);
PIPE_CONF_CHECK_I(has_infoframe);
PIPE_CONF_CHECK_I(has_audio);
@ -10428,6 +10553,9 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
@ -10751,45 +10879,60 @@ static void update_scanline_offset(struct intel_crtc *crtc)
crtc->scanline_offset = 1;
}
static struct intel_crtc_config *
intel_modeset_compute_config(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_framebuffer *fb,
unsigned *modeset_pipes,
unsigned *prepare_pipes,
unsigned *disable_pipes)
{
struct intel_crtc_config *pipe_config = NULL;
intel_modeset_affected_pipes(crtc, modeset_pipes,
prepare_pipes, disable_pipes);
if ((*modeset_pipes) == 0)
goto out;
/*
* Note this needs changes when we start tracking multiple modes
* and crtcs. At that point we'll need to compute the whole config
* (i.e. one pipe_config for each crtc) rather than just the one
* for this crtc.
*/
pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
if (IS_ERR(pipe_config)) {
goto out;
}
intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
"[modeset]");
to_intel_crtc(crtc)->new_config = pipe_config;
out:
return pipe_config;
}
static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
int x, int y, struct drm_framebuffer *fb,
struct intel_crtc_config *pipe_config,
unsigned modeset_pipes,
unsigned prepare_pipes,
unsigned disable_pipes)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *saved_mode;
struct intel_crtc_config *pipe_config = NULL;
struct intel_crtc *intel_crtc;
unsigned disable_pipes, prepare_pipes, modeset_pipes;
int ret = 0;
saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
if (!saved_mode)
return -ENOMEM;
intel_modeset_affected_pipes(crtc, &modeset_pipes,
&prepare_pipes, &disable_pipes);
*saved_mode = crtc->mode;
/* Hack: Because we don't (yet) support global modeset on multiple
* crtcs, we don't keep track of the new mode for more than one crtc.
* Hence simply check whether any bit is set in modeset_pipes in all the
* pieces of code that are not yet converted to deal with mutliple crtcs
* changing their mode at the same time. */
if (modeset_pipes) {
pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
if (IS_ERR(pipe_config)) {
ret = PTR_ERR(pipe_config);
pipe_config = NULL;
goto out;
}
intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
"[modeset]");
to_intel_crtc(crtc)->new_config = pipe_config;
}
/*
* See if the config requires any additional preparation, e.g.
* to adjust global state with pipes off. We need to do this
@ -10830,6 +10973,10 @@ static int __intel_set_mode(struct drm_crtc *crtc,
/* crtc->mode is already used by the ->mode_set callbacks, hence we need
* to set it here already despite that we pass it down the callchain.
*
* Note we'll need to fix this up when we start tracking multiple
* pipes; here we assume a single modeset_pipe and only track the
* single crtc and mode.
*/
if (modeset_pipes) {
crtc->mode = *mode;
@ -10891,24 +11038,48 @@ static int __intel_set_mode(struct drm_crtc *crtc,
if (ret && crtc->enabled)
crtc->mode = *saved_mode;
out:
kfree(pipe_config);
kfree(saved_mode);
return ret;
}
static int intel_set_mode_pipes(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb,
struct intel_crtc_config *pipe_config,
unsigned modeset_pipes,
unsigned prepare_pipes,
unsigned disable_pipes)
{
int ret;
ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes,
prepare_pipes, disable_pipes);
if (ret == 0)
intel_modeset_check_state(crtc->dev);
return ret;
}
static int intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
{
int ret;
struct intel_crtc_config *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
ret = __intel_set_mode(crtc, mode, x, y, fb);
pipe_config = intel_modeset_compute_config(crtc, mode, fb,
&modeset_pipes,
&prepare_pipes,
&disable_pipes);
if (ret == 0)
intel_modeset_check_state(crtc->dev);
if (IS_ERR(pipe_config))
return PTR_ERR(pipe_config);
return ret;
return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
modeset_pipes, prepare_pipes,
disable_pipes);
}
void intel_crtc_restore_mode(struct drm_crtc *crtc)
@ -11239,6 +11410,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
struct drm_device *dev;
struct drm_mode_set save_set;
struct intel_set_config *config;
struct intel_crtc_config *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
int ret;
BUG_ON(!set);
@ -11284,9 +11457,36 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
if (ret)
goto fail;
pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
set->fb,
&modeset_pipes,
&prepare_pipes,
&disable_pipes);
if (IS_ERR(pipe_config)) {
ret = PTR_ERR(pipe_config);
goto fail;
} else if (pipe_config) {
if (to_intel_crtc(set->crtc)->new_config->has_audio !=
to_intel_crtc(set->crtc)->config.has_audio)
config->mode_changed = true;
/* Force mode sets for any infoframe stuff */
if (to_intel_crtc(set->crtc)->new_config->has_infoframe ||
to_intel_crtc(set->crtc)->config.has_infoframe)
config->mode_changed = true;
}
/* set_mode will free it in the mode_changed case */
if (!config->mode_changed)
kfree(pipe_config);
intel_update_pipe_size(to_intel_crtc(set->crtc));
if (config->mode_changed) {
ret = intel_set_mode(set->crtc, set->mode,
set->x, set->y, set->fb);
ret = intel_set_mode_pipes(set->crtc, set->mode,
set->x, set->y, set->fb, pipe_config,
modeset_pipes, prepare_pipes,
disable_pipes);
} else if (config->fb_changed) {
struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
@ -11559,8 +11759,8 @@ intel_commit_primary_plane(struct drm_plane *plane,
struct drm_rect *src = &state->src;
crtc->primary->fb = fb;
crtc->x = src->x1;
crtc->y = src->y1;
crtc->x = src->x1 >> 16;
crtc->y = src->y1 >> 16;
intel_plane->crtc_x = state->orig_dst.x1;
intel_plane->crtc_y = state->orig_dst.y1;
@ -12009,7 +12209,7 @@ enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
if (!encoder)
if (!encoder || WARN_ON(!encoder->crtc))
return INVALID_PIPE;
return to_intel_crtc(encoder->crtc)->pipe;
@ -12244,7 +12444,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
intel_edp_psr_init(dev);
intel_psr_init(dev);
for_each_intel_encoder(dev, encoder) {
encoder->base.possible_crtcs = encoder->crtc_mask;
@ -12558,6 +12758,9 @@ static void intel_init_display(struct drm_device *dev)
case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
dev_priv->display.queue_flip = intel_gen7_queue_flip;
break;
case 9:
dev_priv->display.queue_flip = intel_gen9_queue_flip;
break;
}
intel_panel_init_backlight_funcs(dev);
@ -13262,8 +13465,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_crtc *crtc =
dev_priv->pipe_to_crtc_mapping[pipe];
__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
crtc->primary->fb);
intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
crtc->primary->fb);
}
} else {
intel_modeset_update_staged_output_state(dev);
@ -13274,6 +13477,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
void intel_modeset_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
@ -13281,6 +13485,16 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_init_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
/*
* There may be no VBT; and if the BIOS enabled SSC we can
* just keep using it to avoid unnecessary flicker. Whereas if the
* BIOS isn't using it, don't assume it will work even if the VBT
* indicates as much.
*/
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE);
intel_modeset_init_hw(dev);
intel_setup_overlay(dev);
@ -13306,6 +13520,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
}
}
mutex_unlock(&dev->struct_mutex);
intel_backlight_register(dev);
}
void intel_connector_unregister(struct intel_connector *intel_connector)
@ -13321,9 +13537,13 @@ void intel_modeset_cleanup(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
intel_disable_gt_powersave(dev);
intel_backlight_unregister(dev);
/*
* Interrupts and polling as the first thing to avoid creating havoc.
* Too much stuff here (turning of rps, connectors, ...) would
* Too much stuff here (turning of connectors, ...) would
* experience fancy races otherwise.
*/
intel_irq_uninstall(dev_priv);
@ -13340,8 +13560,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_disable_fbc(dev);
intel_disable_gt_powersave(dev);
ironlake_teardown_rc6(dev);
mutex_unlock(&dev->struct_mutex);

View file

@ -227,8 +227,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
static uint32_t
pack_aux(const uint8_t *src, int src_bytes)
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
{
int i;
uint32_t v = 0;
@ -240,8 +239,7 @@ pack_aux(const uint8_t *src, int src_bytes)
return v;
}
static void
unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
{
int i;
if (dst_bytes > 4)
@ -863,7 +861,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4)
I915_WRITE(ch_data + i,
pack_aux(send + i, send_bytes - i));
intel_dp_pack_aux(send + i,
send_bytes - i));
/* Send the command and wait for it to complete */
I915_WRITE(ch_ctl, send_ctl);
@ -917,8 +916,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
recv_bytes = recv_size;
for (i = 0; i < recv_bytes; i += 4)
unpack_aux(I915_READ(ch_data + i),
recv + i, recv_bytes - i);
intel_dp_unpack_aux(I915_READ(ch_data + i),
recv + i, recv_bytes - i);
ret = recv_bytes;
out:
@ -1074,6 +1073,33 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
intel_connector_unregister(intel_connector);
}
static void
skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
{
u32 ctrl1;
pipe_config->ddi_pll_sel = SKL_DPLL0;
pipe_config->dpll_hw_state.cfgcr1 = 0;
pipe_config->dpll_hw_state.cfgcr2 = 0;
ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
switch (link_bw) {
case DP_LINK_BW_1_62:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
SKL_DPLL0);
break;
case DP_LINK_BW_2_7:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
SKL_DPLL0);
break;
case DP_LINK_BW_5_4:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
SKL_DPLL0);
break;
}
pipe_config->dpll_hw_state.ctrl1 = ctrl1;
}
static void
hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
{
@ -1251,7 +1277,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
&pipe_config->dp_m2_n2);
}
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
if (IS_SKYLAKE(dev) && is_edp(intel_dp))
skl_edp_set_pll_config(pipe_config, intel_dp->link_bw);
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
else
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@ -2067,385 +2095,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
}
static bool is_edp_psr(struct intel_dp *intel_dp)
{
return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
static bool intel_edp_is_psr_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!HAS_PSR(dev))
return false;
return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
}
static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
struct edp_vsc_psr *vsc_psr)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
uint32_t *data = (uint32_t *) vsc_psr;
unsigned int i;
/* As per BSPec (Pipe Video Data Island Packet), we need to disable
the video DIP being updated before program video DIP data buffer
registers for DIP being updated. */
I915_WRITE(ctl_reg, 0);
POSTING_READ(ctl_reg);
for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
if (i < sizeof(struct edp_vsc_psr))
I915_WRITE(data_reg + i, *data++);
else
I915_WRITE(data_reg + i, 0);
}
I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
POSTING_READ(ctl_reg);
}
static void intel_edp_psr_setup_vsc(struct intel_dp *intel_dp)
{
struct edp_vsc_psr psr_vsc;
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
psr_vsc.sdp_header.HB0 = 0;
psr_vsc.sdp_header.HB1 = 0x7;
psr_vsc.sdp_header.HB2 = 0x2;
psr_vsc.sdp_header.HB3 = 0x8;
intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
}
static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider;
int precharge = 0x3;
bool only_standby = false;
static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
[2] = DP_SET_POWER & 0xff,
[3] = 1 - 1,
[4] = DP_SET_POWER_D0,
};
int i;
BUILD_BUG_ON(sizeof(aux_msg) > 20);
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
only_standby = true;
/* Enable PSR in sink */
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
else
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
/* Setup AUX registers */
for (i = 0; i < sizeof(aux_msg); i += 4)
I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
I915_WRITE(EDP_PSR_AUX_CTL(dev),
DP_AUX_CH_CTL_TIME_OUT_400us |
(sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
}
static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f;
uint32_t idle_frames = 1;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
bool only_standby = false;
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
only_standby = true;
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
val |= EDP_PSR_LINK_STANDBY;
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_TP1_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
} else
val |= EDP_PSR_LINK_DISABLE;
I915_WRITE(EDP_PSR_CTL(dev), val |
(IS_BROADWELL(dev) ? 0 : link_entry_time) |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
EDP_PSR_ENABLE);
}
static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
lockdep_assert_held(&dev_priv->psr.lock);
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
dev_priv->psr.source_ok = false;
if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
return false;
}
if (!i915.enable_psr) {
DRM_DEBUG_KMS("PSR disable by flag\n");
return false;
}
/* Below limitations aren't valid for Broadwell */
if (IS_BROADWELL(dev))
goto out;
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
return false;
}
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
return false;
}
out:
dev_priv->psr.source_ok = true;
return true;
}
static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
/* Enable/Re-enable PSR on the host */
intel_edp_psr_enable_source(intel_dp);
dev_priv->psr.active = true;
}
void intel_edp_psr_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
if (!HAS_PSR(dev)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
return;
}
if (!is_edp_psr(intel_dp)) {
DRM_DEBUG_KMS("PSR not supported by this panel\n");
return;
}
mutex_lock(&dev_priv->psr.lock);
if (dev_priv->psr.enabled) {
DRM_DEBUG_KMS("PSR already in use\n");
goto unlock;
}
if (!intel_edp_psr_match_conditions(intel_dp))
goto unlock;
dev_priv->psr.busy_frontbuffer_bits = 0;
intel_edp_psr_setup_vsc(intel_dp);
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
/* Enable PSR on the panel */
intel_edp_psr_enable_sink(intel_dp);
dev_priv->psr.enabled = intel_dp;
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
void intel_edp_psr_disable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
if (dev_priv->psr.active) {
I915_WRITE(EDP_PSR_CTL(dev),
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
/* Wait till PSR is idle */
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
dev_priv->psr.active = false;
} else {
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
}
dev_priv->psr.enabled = NULL;
mutex_unlock(&dev_priv->psr.lock);
cancel_delayed_work_sync(&dev_priv->psr.work);
}
static void intel_edp_psr_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), psr.work.work);
struct intel_dp *intel_dp = dev_priv->psr.enabled;
/* We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
return;
}
mutex_lock(&dev_priv->psr.lock);
intel_dp = dev_priv->psr.enabled;
if (!intel_dp)
goto unlock;
/*
* The delayed work can race with an invalidate hence we need to
* recheck. Since psr_flush first clears this and then reschedules we
* won't ever miss a flush when bailing out here.
*/
if (dev_priv->psr.busy_frontbuffer_bits)
goto unlock;
intel_edp_psr_do_enable(intel_dp);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
static void intel_edp_psr_do_exit(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->psr.active) {
u32 val = I915_READ(EDP_PSR_CTL(dev));
WARN_ON(!(val & EDP_PSR_ENABLE));
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
dev_priv->psr.active = false;
}
}
void intel_edp_psr_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
intel_edp_psr_do_exit(dev);
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
mutex_unlock(&dev_priv->psr.lock);
}
void intel_edp_psr_flush(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
/*
* On Haswell sprite plane updates don't result in a psr invalidating
* signal in the hardware. Which means we need to manually fake this in
* software for all flushes, not just when we've seen a preceding
* invalidation through frontbuffer rendering.
*/
if (IS_HASWELL(dev) &&
(frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
intel_edp_psr_do_exit(dev);
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(100));
mutex_unlock(&dev_priv->psr.lock);
}
void intel_edp_psr_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
mutex_init(&dev_priv->psr.lock);
}
static void intel_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@ -4052,8 +3701,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
} while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
if (attempts == 0) {
DRM_ERROR("Panel is unable to calculate CRC after 6 vblanks\n");
return -EIO;
DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
return -ETIMEDOUT;
}
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
@ -5117,7 +4766,7 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
* hard to tell without seeing the user of this function of this code.
* Check locking and ordering once that lands.
*/
if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) {
DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
return;
}
@ -5233,6 +4882,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
bool has_dpcd;
struct drm_display_mode *scan;
struct edid *edid;
enum pipe pipe = INVALID_PIPE;
intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
@ -5301,11 +4951,30 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (IS_VALLEYVIEW(dev)) {
intel_dp->edp_notifier.notifier_call = edp_notify_handler;
register_reboot_notifier(&intel_dp->edp_notifier);
/*
* Figure out the current pipe for the initial backlight setup.
* If the current pipe isn't valid, try the PPS pipe, and if that
* fails just assume pipe A.
*/
if (IS_CHERRYVIEW(dev))
pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
else
pipe = PORT_TO_PIPE(intel_dp->DP);
if (pipe != PIPE_A && pipe != PIPE_B)
pipe = intel_dp->pps_pipe;
if (pipe != PIPE_A && pipe != PIPE_B)
pipe = PIPE_A;
DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
pipe_name(pipe));
}
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_connector->panel.backlight_power = intel_edp_backlight_power;
intel_panel_setup_backlight(connector);
intel_panel_setup_backlight(connector, pipe);
return true;
}

View file

@ -292,6 +292,9 @@ struct intel_crtc_config {
* between pch encoders and cpu encoders. */
bool has_pch_encoder;
/* Are we sending infoframes on the attached port */
bool has_infoframe;
/* CPU Transcoder for the pipe. Currently this can only differ from the
* pipe on Haswell (where we have a special eDP transcoder). */
enum transcoder cpu_transcoder;
@ -340,7 +343,10 @@ struct intel_crtc_config {
/* Selected dpll when shared or DPLL_ID_PRIVATE. */
enum intel_dpll_id shared_dpll;
/* PORT_CLK_SEL for DDI ports. */
/*
* - PORT_CLK_SEL for DDI ports on HSW/BDW.
* - enum skl_dpll on SKL
*/
uint32_t ddi_pll_sel;
/* Actual register state of the dpll, for shared dpll cross-checking. */
@ -552,6 +558,7 @@ struct intel_hdmi {
void (*set_infoframes)(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode);
bool (*infoframe_enabled)(struct drm_encoder *encoder);
};
struct intel_dp_mst_encoder;
@ -784,8 +791,9 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_device *dev);
void gen6_enable_rps_interrupts(struct drm_device *dev);
void gen6_disable_rps_interrupts(struct drm_device *dev);
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
@ -992,21 +1000,16 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_edp_psr_enable(struct intel_dp *intel_dp);
void intel_edp_psr_disable(struct intel_dp *intel_dp);
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
void intel_edp_psr_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_edp_psr_flush(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_edp_psr_init(struct drm_device *dev);
void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
void intel_dp_mst_suspend(struct drm_device *dev);
void intel_dp_mst_resume(struct drm_device *dev);
int intel_dp_max_link_bw(struct intel_dp *intel_dp);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
@ -1096,7 +1099,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc,
int fitting_mode);
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
u32 level, u32 max);
int intel_panel_setup_backlight(struct drm_connector *connector);
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe);
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
void intel_panel_destroy_backlight(struct drm_connector *connector);
@ -1106,6 +1109,19 @@ extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_device *dev,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector);
void intel_backlight_register(struct drm_device *dev);
void intel_backlight_unregister(struct drm_device *dev);
/* intel_psr.c */
bool intel_psr_is_enabled(struct drm_device *dev);
void intel_psr_enable(struct intel_dp *intel_dp);
void intel_psr_disable(struct intel_dp *intel_dp);
void intel_psr_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_psr_flush(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_psr_init(struct drm_device *dev);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);

View file

@ -156,7 +156,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
intel_psr_invalidate(dev, obj->frontbuffer_bits);
}
/**
@ -182,7 +182,7 @@ void intel_frontbuffer_flush(struct drm_device *dev,
intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
intel_edp_psr_flush(dev, frontbuffer_bits);
intel_psr_flush(dev, frontbuffer_bits);
/*
* FIXME: Unconditional fbc flushing here is a rather gross hack and

View file

@ -166,6 +166,19 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(VIDEO_DIP_CTL);
}
static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
u32 val = I915_READ(VIDEO_DIP_CTL);
if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
return val & VIDEO_DIP_ENABLE;
return false;
}
static void ibx_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
@ -204,6 +217,17 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
return val & VIDEO_DIP_ENABLE;
}
static void cpt_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
@ -245,6 +269,17 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
return val & VIDEO_DIP_ENABLE;
}
static void vlv_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
@ -283,6 +318,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
return val & VIDEO_DIP_ENABLE;
}
static void hsw_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
@ -320,6 +366,18 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(ctl_reg);
}
static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
u32 val = I915_READ(ctl_reg);
return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
VIDEO_DIP_ENABLE_VS_HSW);
}
/*
* The data we write to the DIP data buffer registers is 1 byte bigger than the
* HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
@ -724,6 +782,9 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
if (intel_hdmi->infoframe_enabled(&encoder->base))
pipe_config->has_infoframe = true;
if (tmp & SDVO_AUDIO_ENABLE)
pipe_config->has_audio = true;
@ -925,6 +986,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
if (pipe_config->has_hdmi_sink)
pipe_config->has_infoframe = true;
if (intel_hdmi->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
if (pipe_config->has_hdmi_sink &&
@ -1619,18 +1683,23 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
} else if (IS_G4X(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
} else if (HAS_DDI(dev)) {
intel_hdmi->write_infoframe = hsw_write_infoframe;
intel_hdmi->set_infoframes = hsw_set_infoframes;
intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
} else if (HAS_PCH_IBX(dev)) {
intel_hdmi->write_infoframe = ibx_write_infoframe;
intel_hdmi->set_infoframes = ibx_set_infoframes;
intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
} else {
intel_hdmi->write_infoframe = cpt_write_infoframe;
intel_hdmi->set_infoframes = cpt_set_infoframes;
intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
}
if (HAS_DDI(dev))

View file

@ -136,11 +136,10 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_ALIGN 4096
#define RING_EXECLIST_QFULL (1 << 0x2)
#define RING_EXECLIST1_VALID (1 << 0x3)
#define RING_EXECLIST0_VALID (1 << 0x4)
@ -204,6 +203,9 @@ enum {
};
#define GEN8_CTX_ID_SHIFT 32
static int intel_lr_context_pin(struct intel_engine_cs *ring,
struct intel_context *ctx);
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
* @dev: DRM device.
@ -219,6 +221,9 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
{
WARN_ON(i915.enable_ppgtt == -1);
if (INTEL_INFO(dev)->gen >= 9)
return 1;
if (enable_execlists == 0)
return 0;
@ -275,7 +280,8 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
struct drm_i915_gem_object *ctx_obj0,
struct drm_i915_gem_object *ctx_obj1)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t temp = 0;
uint32_t desc[4];
unsigned long flags;
@ -300,13 +306,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
* Instead, we do the runtime_pm_get/put when creating/destroying requests.
*/
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
if (IS_CHERRYVIEW(dev_priv->dev)) {
if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
if (dev_priv->uncore.fw_rendercount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_RENDER);
if (dev_priv->uncore.fw_mediacount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_MEDIA);
if (INTEL_INFO(dev)->gen >= 9) {
if (dev_priv->uncore.fw_blittercount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_BLITTER);
}
} else {
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
@ -325,13 +336,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
/* Release Force Wakeup (see the big comment above). */
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
if (IS_CHERRYVIEW(dev_priv->dev)) {
if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
if (--dev_priv->uncore.fw_rendercount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_RENDER);
if (--dev_priv->uncore.fw_mediacount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_MEDIA);
if (INTEL_INFO(dev)->gen >= 9) {
if (--dev_priv->uncore.fw_blittercount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_BLITTER);
}
} else {
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
@ -341,7 +357,9 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
}
static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail)
static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
struct drm_i915_gem_object *ring_obj,
u32 tail)
{
struct page *page;
uint32_t *reg_state;
@ -350,6 +368,7 @@ static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tai
reg_state = kmap_atomic(page);
reg_state[CTX_RING_TAIL+1] = tail;
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
kunmap_atomic(reg_state);
@ -360,21 +379,25 @@ static void execlists_submit_contexts(struct intel_engine_cs *ring,
struct intel_context *to0, u32 tail0,
struct intel_context *to1, u32 tail1)
{
struct drm_i915_gem_object *ctx_obj0;
struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
struct drm_i915_gem_object *ctx_obj1 = NULL;
struct intel_ringbuffer *ringbuf1 = NULL;
ctx_obj0 = to0->engine[ring->id].state;
BUG_ON(!ctx_obj0);
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
execlists_ctx_write_tail(ctx_obj0, tail0);
execlists_update_context(ctx_obj0, ringbuf0->obj, tail0);
if (to1) {
ringbuf1 = to1->engine[ring->id].ringbuf;
ctx_obj1 = to1->engine[ring->id].state;
BUG_ON(!ctx_obj1);
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
execlists_ctx_write_tail(ctx_obj1, tail1);
execlists_update_context(ctx_obj1, ringbuf1->obj, tail1);
}
execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
@ -384,7 +407,6 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
{
struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
assert_spin_locked(&ring->execlist_lock);
@ -401,7 +423,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
list_del(&req0->execlist_link);
queue_work(dev_priv->wq, &req0->work);
list_add_tail(&req0->execlist_link,
&ring->execlist_retired_req_list);
req0 = cursor;
} else {
req1 = cursor;
@ -423,7 +446,6 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
static bool execlists_check_remove_request(struct intel_engine_cs *ring,
u32 request_id)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_ctx_submit_request *head_req;
assert_spin_locked(&ring->execlist_lock);
@ -441,7 +463,8 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
if (--head_req->elsp_submitted <= 0) {
list_del(&head_req->execlist_link);
queue_work(dev_priv->wq, &head_req->work);
list_add_tail(&head_req->execlist_link,
&ring->execlist_retired_req_list);
return true;
}
}
@ -510,22 +533,6 @@ void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
((u32)ring->next_context_status_buffer & 0x07) << 8);
}
static void execlists_free_request_task(struct work_struct *work)
{
struct intel_ctx_submit_request *req =
container_of(work, struct intel_ctx_submit_request, work);
struct drm_device *dev = req->ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
intel_runtime_pm_put(dev_priv);
mutex_lock(&dev->struct_mutex);
i915_gem_context_unreference(req->ctx);
mutex_unlock(&dev->struct_mutex);
kfree(req);
}
static int execlists_context_queue(struct intel_engine_cs *ring,
struct intel_context *to,
u32 tail)
@ -540,9 +547,12 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
return -ENOMEM;
req->ctx = to;
i915_gem_context_reference(req->ctx);
if (to != ring->default_context)
intel_lr_context_pin(ring, to);
req->ring = ring;
req->tail = tail;
INIT_WORK(&req->work, execlists_free_request_task);
intel_runtime_pm_get(dev_priv);
@ -561,9 +571,10 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
if (to == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
"More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link);
queue_work(dev_priv->wq, &tail_req->work);
list_add_tail(&tail_req->execlist_link,
&ring->execlist_retired_req_list);
}
}
@ -731,6 +742,36 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
return 0;
}
void intel_execlists_retire_requests(struct intel_engine_cs *ring)
{
struct intel_ctx_submit_request *req, *tmp;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
unsigned long flags;
struct list_head retired_list;
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
if (list_empty(&ring->execlist_retired_req_list))
return;
INIT_LIST_HEAD(&retired_list);
spin_lock_irqsave(&ring->execlist_lock, flags);
list_replace_init(&ring->execlist_retired_req_list, &retired_list);
spin_unlock_irqrestore(&ring->execlist_lock, flags);
list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
struct intel_context *ctx = req->ctx;
struct drm_i915_gem_object *ctx_obj =
ctx->engine[ring->id].state;
if (ctx_obj && (ctx != ring->default_context))
intel_lr_context_unpin(ring, ctx);
intel_runtime_pm_put(dev_priv);
i915_gem_context_unreference(req->ctx);
list_del(&req->execlist_link);
kfree(req);
}
}
void intel_logical_ring_stop(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@ -791,9 +832,55 @@ void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
execlists_context_queue(ring, ctx, ringbuf->tail);
}
static int intel_lr_context_pin(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
int ret = 0;
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
if (ctx->engine[ring->id].unpin_count++ == 0) {
ret = i915_gem_obj_ggtt_pin(ctx_obj,
GEN8_LR_CONTEXT_ALIGN, 0);
if (ret)
goto reset_unpin_count;
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret)
goto unpin_ctx_obj;
}
return ret;
unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj);
reset_unpin_count:
ctx->engine[ring->id].unpin_count = 0;
return ret;
}
void intel_lr_context_unpin(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
if (ctx_obj) {
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
if (--ctx->engine[ring->id].unpin_count == 0) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
}
}
static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
int ret;
if (ring->outstanding_lazy_seqno)
return 0;
@ -804,6 +891,14 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
if (request == NULL)
return -ENOMEM;
if (ctx != ring->default_context) {
ret = intel_lr_context_pin(ring, ctx);
if (ret) {
kfree(request);
return ret;
}
}
/* Hold a reference to the context this request belongs to
* (we will need it when the time comes to emit/retire the
* request).
@ -989,6 +1084,44 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
return 0;
}
static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
int ret, i;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
if (WARN_ON(w->count == 0))
return 0;
ring->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(ringbuf);
if (ret)
return ret;
ret = intel_logical_ring_begin(ringbuf, w->count * 2 + 2);
if (ret)
return ret;
intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
for (i = 0; i < w->count; i++) {
intel_logical_ring_emit(ringbuf, w->reg[i].addr);
intel_logical_ring_emit(ringbuf, w->reg[i].value);
}
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance(ringbuf);
ring->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(ringbuf);
if (ret)
return ret;
return 0;
}
static int gen8_init_common_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
@ -1032,7 +1165,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
return ret;
return init_workarounds_ring(ring);
}
static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
@ -1248,6 +1381,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
init_waitqueue_head(&ring->irq_queue);
INIT_LIST_HEAD(&ring->execlist_queue);
INIT_LIST_HEAD(&ring->execlist_retired_req_list);
spin_lock_init(&ring->execlist_lock);
ring->next_context_status_buffer = 0;
@ -1282,6 +1416,7 @@ static int logical_render_ring_init(struct drm_device *dev)
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
ring->init = gen8_init_render_ring;
ring->init_context = intel_logical_ring_workarounds_emit;
ring->cleanup = intel_fini_pipe_control;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
@ -1495,7 +1630,6 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *ring_obj = ringbuf->obj;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
struct page *page;
uint32_t *reg_state;
@ -1541,7 +1675,9 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
reg_state[CTX_RING_TAIL+1] = 0;
reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
/* Ring buffer start address is not known until the buffer is pinned.
* It is written to the context image in execlists_update_context()
*/
reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
reg_state[CTX_RING_BUFFER_CONTROL+1] =
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
@ -1617,12 +1753,18 @@ void intel_lr_context_free(struct intel_context *ctx)
for (i = 0; i < I915_NUM_RINGS; i++) {
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
if (ctx_obj) {
struct intel_ringbuffer *ringbuf =
ctx->engine[i].ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
if (ctx == ring->default_context) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
intel_destroy_ringbuffer_obj(ringbuf);
kfree(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
drm_gem_object_unreference(&ctx_obj->base);
}
}
@ -1632,11 +1774,14 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
{
int ret = 0;
WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
WARN_ON(INTEL_INFO(ring->dev)->gen < 8);
switch (ring->id) {
case RCS:
ret = GEN8_LR_CONTEXT_RENDER_SIZE;
if (INTEL_INFO(ring->dev)->gen >= 9)
ret = GEN9_LR_CONTEXT_RENDER_SIZE;
else
ret = GEN8_LR_CONTEXT_RENDER_SIZE;
break;
case VCS:
case BCS:
@ -1649,7 +1794,7 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
return ret;
}
static int lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
struct drm_i915_gem_object *default_ctx_obj)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@ -1659,15 +1804,11 @@ static int lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj);
ring->status_page.page_addr =
kmap(sg_page(default_ctx_obj->pages->sgl));
if (ring->status_page.page_addr == NULL)
return -ENOMEM;
ring->status_page.obj = default_ctx_obj;
I915_WRITE(RING_HWS_PGA(ring->mmio_base),
(u32)ring->status_page.gfx_addr);
POSTING_READ(RING_HWS_PGA(ring->mmio_base));
return 0;
}
/**
@ -1686,6 +1827,7 @@ static int lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
const bool is_global_default_ctx = (ctx == ring->default_context);
struct drm_device *dev = ring->dev;
struct drm_i915_gem_object *ctx_obj;
uint32_t context_size;
@ -1705,21 +1847,22 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
return ret;
}
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
if (ret) {
DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
drm_gem_object_unreference(&ctx_obj->base);
return ret;
if (is_global_default_ctx) {
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
if (ret) {
DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
ret);
drm_gem_object_unreference(&ctx_obj->base);
return ret;
}
}
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
if (!ringbuf) {
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
ring->name);
i915_gem_object_ggtt_unpin(ctx_obj);
drm_gem_object_unreference(&ctx_obj->base);
ret = -ENOMEM;
return ret;
goto error_unpin_ctx;
}
ringbuf->ring = ring;
@ -1732,43 +1875,51 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
ringbuf->space = ringbuf->size;
ringbuf->last_retired_head = -1;
/* TODO: For now we put this in the mappable region so that we can reuse
* the existing ringbuffer code which ioremaps it. When we start
* creating many contexts, this will no longer work and we must switch
* to a kmapish interface.
*/
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
if (ringbuf->obj == NULL) {
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_DEBUG_DRIVER(
"Failed to allocate ringbuffer obj %s: %d\n",
ring->name, ret);
goto error;
goto error_free_rbuf;
}
if (is_global_default_ctx) {
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR(
"Failed to pin and map ringbuffer %s: %d\n",
ring->name, ret);
goto error_destroy_rbuf;
}
}
}
ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
if (ret) {
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
intel_destroy_ringbuffer_obj(ringbuf);
goto error;
}
ctx->engine[ring->id].ringbuf = ringbuf;
ctx->engine[ring->id].state = ctx_obj;
if (ctx == ring->default_context) {
ret = lrc_setup_hardware_status_page(ring, ctx_obj);
if (ret) {
DRM_ERROR("Failed to setup hardware status page\n");
goto error;
}
}
if (ctx == ring->default_context)
lrc_setup_hardware_status_page(ring, ctx_obj);
if (ring->id == RCS && !ctx->rcs_initialized) {
if (ring->init_context) {
ret = ring->init_context(ring, ctx);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
ret = intel_lr_context_render_state_init(ring, ctx);
if (ret) {
DRM_ERROR("Init render state failed: %d\n", ret);
ctx->engine[ring->id].ringbuf = NULL;
ctx->engine[ring->id].state = NULL;
intel_destroy_ringbuffer_obj(ringbuf);
goto error;
}
ctx->rcs_initialized = true;
@ -1777,8 +1928,15 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
return 0;
error:
if (is_global_default_ctx)
intel_unpin_ringbuffer_obj(ringbuf);
error_destroy_rbuf:
intel_destroy_ringbuffer_obj(ringbuf);
error_free_rbuf:
kfree(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
error_unpin_ctx:
if (is_global_default_ctx)
i915_gem_object_ggtt_unpin(ctx_obj);
drm_gem_object_unreference(&ctx_obj->base);
return ret;
}

View file

@ -24,6 +24,8 @@
#ifndef _INTEL_LRC_H_
#define _INTEL_LRC_H_
#define GEN8_LR_CONTEXT_ALIGN 4096
/* Execlists regs */
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
@ -67,6 +69,8 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
void intel_lr_context_free(struct intel_context *ctx);
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring);
void intel_lr_context_unpin(struct intel_engine_cs *ring,
struct intel_context *ctx);
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@ -104,11 +108,11 @@ struct intel_ctx_submit_request {
u32 tail;
struct list_head execlist_link;
struct work_struct work;
int elsp_submitted;
};
void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
void intel_execlists_retire_requests(struct intel_engine_cs *ring);
#endif /* _INTEL_LRC_H_ */

View file

@ -1116,7 +1116,7 @@ void intel_lvds_init(struct drm_device *dev)
drm_connector_register(connector);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector);
intel_panel_setup_backlight(connector, INVALID_PIPE);
return;

View file

@ -521,6 +521,9 @@ static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return 0;
return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
}
@ -536,12 +539,15 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
struct intel_panel *panel = &connector->panel;
u32 val = 0;
mutex_lock(&dev_priv->backlight_lock);
val = dev_priv->display.get_backlight(connector);
val = intel_panel_compute_brightness(connector, val);
if (panel->backlight.enabled) {
val = dev_priv->display.get_backlight(connector);
val = intel_panel_compute_brightness(connector, val);
}
mutex_unlock(&dev_priv->backlight_lock);
@ -602,6 +608,9 @@ static void vlv_set_backlight(struct intel_connector *connector, u32 level)
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 tmp;
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return;
tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
}
@ -625,10 +634,9 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 hw_level;
if (!panel->backlight.present || pipe == INVALID_PIPE)
if (!panel->backlight.present)
return;
mutex_lock(&dev_priv->backlight_lock);
@ -656,6 +664,12 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 hw_level;
/*
* INVALID_PIPE may occur during driver init because
* connection_mutex isn't held across the entire backlight
* setup + modeset readout, and the BIOS can issue the
* requests at any time.
*/
if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
@ -717,6 +731,9 @@ static void vlv_disable_backlight(struct intel_connector *connector)
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 tmp;
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return;
intel_panel_actually_set_backlight(connector, 0);
tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
@ -728,9 +745,8 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
if (!panel->backlight.present || pipe == INVALID_PIPE)
if (!panel->backlight.present)
return;
/*
@ -906,6 +922,9 @@ static void vlv_enable_backlight(struct intel_connector *connector)
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 ctl, ctl2;
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return;
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
DRM_DEBUG_KMS("backlight already enabled\n");
@ -934,7 +953,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
if (!panel->backlight.present || pipe == INVALID_PIPE)
if (!panel->backlight.present)
return;
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
@ -1026,6 +1045,9 @@ static int intel_backlight_device_register(struct intel_connector *connector)
if (WARN_ON(panel->backlight.device))
return -ENODEV;
if (!panel->backlight.present)
return 0;
WARN_ON(panel->backlight.max == 0);
memset(&props, 0, sizeof(props));
@ -1061,6 +1083,10 @@ static int intel_backlight_device_register(struct intel_connector *connector)
panel->backlight.device = NULL;
return -ENODEV;
}
DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n",
connector->base.name);
return 0;
}
@ -1115,7 +1141,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
return scale(min, 0, 255, 0, panel->backlight.max);
}
static int bdw_setup_backlight(struct intel_connector *connector)
static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -1141,7 +1167,7 @@ static int bdw_setup_backlight(struct intel_connector *connector)
return 0;
}
static int pch_setup_backlight(struct intel_connector *connector)
static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -1168,7 +1194,7 @@ static int pch_setup_backlight(struct intel_connector *connector)
return 0;
}
static int i9xx_setup_backlight(struct intel_connector *connector)
static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -1200,7 +1226,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
return 0;
}
static int i965_setup_backlight(struct intel_connector *connector)
static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -1230,37 +1256,40 @@ static int i965_setup_backlight(struct intel_connector *connector)
return 0;
}
static int vlv_setup_backlight(struct intel_connector *connector)
static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe;
enum pipe p;
u32 ctl, ctl2, val;
for_each_pipe(dev_priv, pipe) {
u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
for_each_pipe(dev_priv, p) {
u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(p));
/* Skip if the modulation freq is already set */
if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
continue;
cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
I915_WRITE(VLV_BLC_PWM_CTL(p), (0xf42 << 16) |
cur_val);
}
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return -ENODEV;
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
panel->backlight.max = ctl >> 16;
if (!panel->backlight.max)
return -ENODEV;
panel->backlight.min = get_backlight_min_vbt(connector);
val = _vlv_get_backlight(dev, PIPE_A);
val = _vlv_get_backlight(dev, pipe);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
@ -1269,7 +1298,7 @@ static int vlv_setup_backlight(struct intel_connector *connector)
return 0;
}
int intel_panel_setup_backlight(struct drm_connector *connector)
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -1288,7 +1317,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
/* set level and max in panel struct */
mutex_lock(&dev_priv->backlight_lock);
ret = dev_priv->display.setup_backlight(intel_connector);
ret = dev_priv->display.setup_backlight(intel_connector, pipe);
mutex_unlock(&dev_priv->backlight_lock);
if (ret) {
@ -1297,15 +1326,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
return ret;
}
intel_backlight_device_register(intel_connector);
panel->backlight.present = true;
DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, "
"sysfs interface %sregistered\n",
DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
connector->name,
panel->backlight.enabled ? "enabled" : "disabled",
panel->backlight.level, panel->backlight.max,
panel->backlight.device ? "" : "not ");
panel->backlight.level, panel->backlight.max);
return 0;
}
@ -1316,7 +1342,6 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
struct intel_panel *panel = &intel_connector->panel;
panel->backlight.present = false;
intel_backlight_device_unregister(intel_connector);
}
/* Set up chip specific backlight functions */
@ -1379,3 +1404,19 @@ void intel_panel_fini(struct intel_panel *panel)
drm_mode_destroy(intel_connector->base.dev,
panel->downclock_mode);
}
void intel_backlight_register(struct drm_device *dev)
{
struct intel_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
intel_backlight_device_register(connector);
}
void intel_backlight_unregister(struct drm_device *dev)
{
struct intel_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
intel_backlight_device_unregister(connector);
}

View file

@ -4451,7 +4451,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
dev_priv->rps.min_freq_softlimit);
if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
& GENFREQSTATUS) == 0, 5))
& GENFREQSTATUS) == 0, 100))
DRM_ERROR("timed out waiting for Punit\n");
vlv_force_gfx_clock(dev_priv, false);
@ -4504,14 +4504,8 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
"Odd GPU freq value\n"))
val &= ~1;
if (val != dev_priv->rps.cur_freq) {
DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
dev_priv->rps.cur_freq,
vlv_gpu_freq(dev_priv, val), val);
if (val != dev_priv->rps.cur_freq)
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
}
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
@ -4519,26 +4513,6 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
}
static void gen8_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
~dev_priv->pm_rps_events);
/* Complete PM interrupt masking here doesn't race with the rps work
* item again unmasking PM interrupts because that is using a different
* register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
* leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
* gen8_enable_rps will clean up. */
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
}
static void gen9_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -4546,36 +4520,12 @@ static void gen9_disable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_CONTROL, 0);
}
static void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
~dev_priv->pm_rps_events);
/* Complete PM interrupt masking here doesn't race with the rps work
* item again unmasking PM interrupts because that is using a different
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
}
static void gen6_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
if (IS_BROADWELL(dev))
gen8_disable_rps_interrupts(dev);
else
gen6_disable_rps_interrupts(dev);
}
static void cherryview_disable_rps(struct drm_device *dev)
@ -4583,8 +4533,6 @@ static void cherryview_disable_rps(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0);
gen8_disable_rps_interrupts(dev);
}
static void valleyview_disable_rps(struct drm_device *dev)
@ -4598,8 +4546,6 @@ static void valleyview_disable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_CONTROL, 0);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
gen6_disable_rps_interrupts(dev);
}
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
@ -4663,47 +4609,46 @@ int intel_enable_rc6(const struct drm_device *dev)
return i915.enable_rc6;
}
static void gen8_enable_rps_interrupts(struct drm_device *dev)
static void gen6_init_rps_frequencies(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t rp_state_cap;
u32 ddcc_status = 0;
int ret;
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
static void gen6_enable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
{
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
/* All of these values are in units of 50MHz */
dev_priv->rps.cur_freq = 0;
/* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
/* XXX: only BYT has a special efficient freq */
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
/* hw_max = RP0 until we check for overclocking */
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ret = sandybridge_pcode_read(dev_priv,
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status);
if (0 == ret)
dev_priv->rps.efficient_freq =
(ddcc_status >> 8) & 0xff;
}
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
if (dev_priv->rps.min_freq_softlimit == 0)
dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
if (dev_priv->rps.min_freq_softlimit == 0) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
dev_priv->rps.min_freq_softlimit =
/* max(RPe, 450 MHz) */
max(dev_priv->rps.efficient_freq, (u8) 9);
else
dev_priv->rps.min_freq_softlimit =
dev_priv->rps.min_freq;
}
}
static void gen9_enable_rps(struct drm_device *dev)
@ -4749,7 +4694,7 @@ static void gen8_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
uint32_t rc6_mask = 0, rp_state_cap;
uint32_t rc6_mask = 0;
int unused;
/* 1a: Software RC state - RC0 */
@ -4762,8 +4707,8 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
parse_rp_state_cap(dev_priv, rp_state_cap);
/* Initialize rps frequencies */
gen6_init_rps_frequencies(dev);
/* 2b: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@ -4821,9 +4766,8 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 6: Ring frequency + overclocking (our driver does this later */
gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
gen8_enable_rps_interrupts(dev);
dev_priv->rps.power = HIGH_POWER; /* force a reset */
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
@ -4832,7 +4776,6 @@ static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
u32 rp_state_cap;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
@ -4856,9 +4799,8 @@ static void gen6_enable_rps(struct drm_device *dev)
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
parse_rp_state_cap(dev_priv, rp_state_cap);
/* Initialize rps frequencies */
gen6_init_rps_frequencies(dev);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@ -4921,8 +4863,6 @@ static void gen6_enable_rps(struct drm_device *dev)
dev_priv->rps.power = HIGH_POWER; /* force a reset */
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
gen6_enable_rps_interrupts(dev);
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
if (IS_GEN6(dev) && ret) {
@ -4975,9 +4915,9 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
gpu_freq--) {
int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
int diff = dev_priv->rps.max_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
if (INTEL_INFO(dev)->gen >= 8) {
@ -5132,12 +5072,15 @@ static void cherryview_setup_pctx(struct drm_device *dev)
pcbr = I915_READ(VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
paddr = (dev_priv->mm.stolen_base +
(gtt->stolen_size - pctx_size));
pctx_paddr = (paddr & (~4095));
I915_WRITE(VLV_PCBR, pctx_paddr);
}
DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
}
static void valleyview_setup_pctx(struct drm_device *dev)
@ -5163,6 +5106,8 @@ static void valleyview_setup_pctx(struct drm_device *dev)
goto out;
}
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
/*
* From the Gunit register HAS:
* The Gfx driver is expected to program this register and ensure
@ -5181,6 +5126,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
I915_WRITE(VLV_PCBR, pctx_paddr);
out:
DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
dev_priv->vlv_pctx = pctx;
}
@ -5217,7 +5163,7 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
dev_priv->mem_freq = 1333;
break;
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
@ -5259,7 +5205,10 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
mutex_lock(&dev_priv->rps.hw_lock);
val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
mutex_lock(&dev_priv->dpio_lock);
val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
mutex_unlock(&dev_priv->dpio_lock);
switch ((val >> 2) & 0x7) {
case 0:
case 1:
@ -5283,7 +5232,7 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
dev_priv->mem_freq = 1600;
break;
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
@ -5369,8 +5318,6 @@ static void cherryview_enable_rps(struct drm_device *dev)
/* For now we assume BIOS is allocating and populating the PCBR */
pcbr = I915_READ(VLV_PCBR);
DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
/* 3: Enable RC6 */
if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
(pcbr >> VLV_PCBR_ADDR_SHIFT))
@ -5400,7 +5347,10 @@ static void cherryview_enable_rps(struct drm_device *dev)
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
/* RPS code assumes GPLL is used */
WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@ -5414,8 +5364,6 @@ static void cherryview_enable_rps(struct drm_device *dev)
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
gen8_enable_rps_interrupts(dev);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
@ -5480,7 +5428,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
/* RPS code assumes GPLL is used */
WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@ -5494,8 +5445,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
gen6_enable_rps_interrupts(dev);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
@ -6254,12 +6203,17 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Interrupts should be disabled already to avoid re-arming. */
WARN_ON(intel_irqs_enabled(dev_priv));
if (INTEL_INFO(dev)->gen < 6)
return;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
cancel_work_sync(&dev_priv->rps.work);
/*
* TODO: disable RPS interrupts on GEN9+ too once RPS support
* is added for it.
*/
if (INTEL_INFO(dev)->gen < 9)
gen6_disable_rps_interrupts(dev);
/* Force GPU to min freq during suspend */
gen6_rps_idle(dev_priv);
@ -6269,9 +6223,6 @@ void intel_disable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Interrupts should be disabled already to avoid re-arming. */
WARN_ON(intel_irqs_enabled(dev_priv));
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
@ -6287,6 +6238,7 @@ void intel_disable_gt_powersave(struct drm_device *dev)
valleyview_disable_rps(dev);
else
gen6_disable_rps(dev);
dev_priv->rps.enabled = false;
mutex_unlock(&dev_priv->rps.hw_lock);
}
@ -6301,6 +6253,13 @@ static void intel_gen6_powersave_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
/*
* TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is
* added for it.
*/
if (INTEL_INFO(dev)->gen < 9)
gen6_reset_rps_interrupts(dev);
if (IS_CHERRYVIEW(dev)) {
cherryview_enable_rps(dev);
} else if (IS_VALLEYVIEW(dev)) {
@ -6315,6 +6274,10 @@ static void intel_gen6_powersave_work(struct work_struct *work)
__gen6_update_ring_freq(dev);
}
dev_priv->rps.enabled = true;
if (INTEL_INFO(dev)->gen < 9)
gen6_enable_rps_interrupts(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
@ -6953,18 +6916,6 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
/* WaDisableSDEUnitClockGating:chv */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaDisableGunitClockGating:chv (pre-production hw) */
I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
GINT_DIS);
/* WaDisableFfDopClockGating:chv (pre-production hw) */
I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
_MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
/* WaDisableDopClockGating:chv (pre-production hw) */
I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
}
static void g4x_init_clock_gating(struct drm_device *dev)
@ -7135,7 +7086,7 @@ void intel_init_pm(struct drm_device *dev)
i915_ironlake_get_mem_freq(dev);
/* For FIFO watermark updates */
if (IS_GEN9(dev)) {
if (INTEL_INFO(dev)->gen >= 9) {
skl_setup_wm_latency(dev);
dev_priv->display.init_clock_gating = gen9_init_clock_gating;
@ -7222,7 +7173,7 @@ void intel_init_pm(struct drm_device *dev)
}
}
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@ -7232,8 +7183,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
}
I915_WRITE(GEN6_PCODE_DATA, *val);
if (INTEL_INFO(dev_priv)->gen >= 9)
I915_WRITE(GEN9_PCODE_DATA1, 0);
I915_WRITE(GEN6_PCODE_DATA1, 0);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
@ -7248,7 +7198,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
return 0;
}
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@ -7271,99 +7221,66 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
return 0;
}
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
static int vlv_gpu_freq_div(unsigned int czclk_freq)
{
int div;
/* 4 x czclk */
switch (dev_priv->mem_freq) {
case 800:
div = 10;
break;
case 1066:
div = 12;
break;
case 1333:
div = 16;
break;
switch (czclk_freq) {
case 200:
return 10;
case 267:
return 12;
case 320:
case 333:
return 16;
case 400:
return 20;
default:
return -1;
}
}
return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
div = vlv_gpu_freq_div(czclk_freq);
if (div < 0)
return div;
return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
}
static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
int mul;
int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
/* 4 x czclk */
switch (dev_priv->mem_freq) {
case 800:
mul = 10;
break;
case 1066:
mul = 12;
break;
case 1333:
mul = 16;
break;
default:
return -1;
}
mul = vlv_gpu_freq_div(czclk_freq);
if (mul < 0)
return mul;
return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
}
static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
int div, freq;
int div, czclk_freq = dev_priv->rps.cz_freq;
switch (dev_priv->rps.cz_freq) {
case 200:
div = 5;
break;
case 267:
div = 6;
break;
case 320:
case 333:
case 400:
div = 8;
break;
default:
return -1;
}
div = vlv_gpu_freq_div(czclk_freq) / 2;
if (div < 0)
return div;
freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
return freq;
return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
}
static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
int mul, opcode;
int mul, czclk_freq = dev_priv->rps.cz_freq;
switch (dev_priv->rps.cz_freq) {
case 200:
mul = 5;
break;
case 267:
mul = 6;
break;
case 320:
case 333:
case 400:
mul = 8;
break;
default:
return -1;
}
mul = vlv_gpu_freq_div(czclk_freq) / 2;
if (mul < 0)
return mul;
/* CHV needs even values */
opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
return opcode;
return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
}
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)

View file

@ -0,0 +1,481 @@
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/**
* DOC: Panel Self Refresh (PSR/SRD)
*
* Since Haswell Display controller supports Panel Self-Refresh on display
* panels witch have a remote frame buffer (RFB) implemented according to PSR
* spec in eDP1.3. PSR feature allows the display to go to lower standby states
* when system is idle but display is on as it eliminates display refresh
* request to DDR memory completely as long as the frame buffer for that
* display is unchanged.
*
* Panel Self Refresh must be supported by both Hardware (source) and
* Panel (sink).
*
* PSR saves power by caching the framebuffer in the panel RFB, which allows us
* to power down the link and memory controller. For DSI panels the same idea
* is called "manual mode".
*
* The implementation uses the hardware-based PSR support which automatically
* enters/exits self-refresh mode. The hardware takes care of sending the
* required DP aux message and could even retrain the link (that part isn't
* enabled yet though). The hardware also keeps track of any frontbuffer
* changes to know when to exit self-refresh mode again. Unfortunately that
* part doesn't work too well, hence why the i915 PSR support uses the
* software frontbuffer tracking to make sure it doesn't miss a screen
* update. For this integration intel_psr_invalidate() and intel_psr_flush()
* get called by the frontbuffer tracking code. Note that because of locking
* issues the self-refresh re-enable code is done from a work queue, which
* must be correctly synchronized/cancelled when shutting down the pipe."
*/
#include <drm/drmP.h>
#include "intel_drv.h"
#include "i915_drv.h"
static bool is_edp_psr(struct intel_dp *intel_dp)
{
return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
bool intel_psr_is_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!HAS_PSR(dev))
return false;
return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
}
static void intel_psr_write_vsc(struct intel_dp *intel_dp,
struct edp_vsc_psr *vsc_psr)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
uint32_t *data = (uint32_t *) vsc_psr;
unsigned int i;
/* As per BSPec (Pipe Video Data Island Packet), we need to disable
the video DIP being updated before program video DIP data buffer
registers for DIP being updated. */
I915_WRITE(ctl_reg, 0);
POSTING_READ(ctl_reg);
for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
if (i < sizeof(struct edp_vsc_psr))
I915_WRITE(data_reg + i, *data++);
else
I915_WRITE(data_reg + i, 0);
}
I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
POSTING_READ(ctl_reg);
}
static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
{
struct edp_vsc_psr psr_vsc;
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
psr_vsc.sdp_header.HB0 = 0;
psr_vsc.sdp_header.HB1 = 0x7;
psr_vsc.sdp_header.HB2 = 0x2;
psr_vsc.sdp_header.HB3 = 0x8;
intel_psr_write_vsc(intel_dp, &psr_vsc);
}
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider;
int precharge = 0x3;
bool only_standby = false;
static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
[2] = DP_SET_POWER & 0xff,
[3] = 1 - 1,
[4] = DP_SET_POWER_D0,
};
int i;
BUILD_BUG_ON(sizeof(aux_msg) > 20);
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
only_standby = true;
/* Enable PSR in sink */
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
else
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
/* Setup AUX registers */
for (i = 0; i < sizeof(aux_msg); i += 4)
I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
I915_WRITE(EDP_PSR_AUX_CTL(dev),
DP_AUX_CH_CTL_TIME_OUT_400us |
(sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
}
static void intel_psr_enable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f;
uint32_t idle_frames = 1;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
bool only_standby = false;
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
only_standby = true;
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
val |= EDP_PSR_LINK_STANDBY;
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_TP1_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
} else
val |= EDP_PSR_LINK_DISABLE;
I915_WRITE(EDP_PSR_CTL(dev), val |
(IS_BROADWELL(dev) ? 0 : link_entry_time) |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
EDP_PSR_ENABLE);
}
static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
lockdep_assert_held(&dev_priv->psr.lock);
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
dev_priv->psr.source_ok = false;
if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
return false;
}
if (!i915.enable_psr) {
DRM_DEBUG_KMS("PSR disable by flag\n");
return false;
}
/* Below limitations aren't valid for Broadwell */
if (IS_BROADWELL(dev))
goto out;
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
return false;
}
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
return false;
}
out:
dev_priv->psr.source_ok = true;
return true;
}
static void intel_psr_do_enable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
/* Enable/Re-enable PSR on the host */
intel_psr_enable_source(intel_dp);
dev_priv->psr.active = true;
}
/**
* intel_psr_enable - Enable PSR
* @intel_dp: Intel DP
*
* This function can only be called after the pipe is fully trained and enabled.
*/
void intel_psr_enable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (!HAS_PSR(dev)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
return;
}
if (!is_edp_psr(intel_dp)) {
DRM_DEBUG_KMS("PSR not supported by this panel\n");
return;
}
mutex_lock(&dev_priv->psr.lock);
if (dev_priv->psr.enabled) {
DRM_DEBUG_KMS("PSR already in use\n");
goto unlock;
}
if (!intel_psr_match_conditions(intel_dp))
goto unlock;
dev_priv->psr.busy_frontbuffer_bits = 0;
intel_psr_setup_vsc(intel_dp);
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
/* Enable PSR on the panel */
intel_psr_enable_sink(intel_dp);
dev_priv->psr.enabled = intel_dp;
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
/**
* intel_psr_disable - Disable PSR
* @intel_dp: Intel DP
*
* This function needs to be called before disabling pipe.
*/
void intel_psr_disable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
if (dev_priv->psr.active) {
I915_WRITE(EDP_PSR_CTL(dev),
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
/* Wait till PSR is idle */
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
dev_priv->psr.active = false;
} else {
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
}
dev_priv->psr.enabled = NULL;
mutex_unlock(&dev_priv->psr.lock);
cancel_delayed_work_sync(&dev_priv->psr.work);
}
static void intel_psr_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), psr.work.work);
struct intel_dp *intel_dp = dev_priv->psr.enabled;
/* We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
return;
}
mutex_lock(&dev_priv->psr.lock);
intel_dp = dev_priv->psr.enabled;
if (!intel_dp)
goto unlock;
/*
* The delayed work can race with an invalidate hence we need to
* recheck. Since psr_flush first clears this and then reschedules we
* won't ever miss a flush when bailing out here.
*/
if (dev_priv->psr.busy_frontbuffer_bits)
goto unlock;
intel_psr_do_enable(intel_dp);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
static void intel_psr_exit(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->psr.active) {
u32 val = I915_READ(EDP_PSR_CTL(dev));
WARN_ON(!(val & EDP_PSR_ENABLE));
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
dev_priv->psr.active = false;
}
}
/**
* intel_psr_invalidate - Invalidade PSR
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* Since the hardware frontbuffer tracking has gaps we need to integrate
* with the software frontbuffer tracking. This function gets called every
* time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
* disabled if the frontbuffer mask contains a buffer relevant to PSR.
*
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
*/
void intel_psr_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
intel_psr_exit(dev);
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
mutex_unlock(&dev_priv->psr.lock);
}
/**
* intel_psr_flush - Flush PSR
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* Since the hardware frontbuffer tracking has gaps we need to integrate
* with the software frontbuffer tracking. This function gets called every
* time frontbuffer rendering has completed and flushed out to memory. PSR
* can be enabled again if no other frontbuffer relevant to PSR is dirty.
*
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
*/
void intel_psr_flush(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
/*
* On Haswell sprite plane updates don't result in a psr invalidating
* signal in the hardware. Which means we need to manually fake this in
* software for all flushes, not just when we've seen a preceding
* invalidation through frontbuffer rendering.
*/
if (IS_HASWELL(dev) &&
(frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
intel_psr_exit(dev);
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(100));
mutex_unlock(&dev_priv->psr.lock);
}
/**
* intel_psr_init - Init basic PSR work and mutex.
* @dev: DRM device
*
* This function is called only once at driver load to initialize basic
* PSR stuff.
*/
void intel_psr_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
mutex_init(&dev_priv->psr.lock);
}

View file

@ -589,14 +589,10 @@ static int init_ring_common(struct intel_engine_cs *ring)
goto out;
}
if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
i915_kernel_lost_context(ring->dev);
else {
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ringbuf->space = intel_ring_space(ringbuf);
ringbuf->last_retired_head = -1;
}
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ringbuf->space = intel_ring_space(ringbuf);
ringbuf->last_retired_head = -1;
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
@ -665,7 +661,8 @@ intel_init_pipe_control(struct intel_engine_cs *ring)
return ret;
}
static int intel_ring_workarounds_emit(struct intel_engine_cs *ring)
static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
int ret, i;
struct drm_device *dev = ring->dev;
@ -788,25 +785,25 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
/* WaDisablePartialInstShootdown:chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* WaDisableThreadStallDopClockGating:chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
STALL_DOP_GATING_DISABLE);
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
STALL_DOP_GATING_DISABLE);
/* WaDisableDopClockGating:chv (pre-production hw) */
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
DOP_CLOCK_GATING_DISABLE);
/* WaDisableSamplerPowerBypass:chv (pre-production hw) */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
/* WaForceEnableNonCoherent:chv */
/* WaHdcDisableFetchWhenMasked:chv */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT |
HDC_DONOT_FETCH_MEM_WHEN_MASKED);
return 0;
}
static int init_workarounds_ring(struct intel_engine_cs *ring)
int init_workarounds_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -1721,13 +1718,42 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
return 0;
}
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
iounmap(ringbuf->virtual_start);
ringbuf->virtual_start = NULL;
i915_gem_object_ggtt_unpin(ringbuf->obj);
}
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = ringbuf->obj;
int ret;
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
if (ret)
return ret;
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret) {
i915_gem_object_ggtt_unpin(obj);
return ret;
}
ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
i915_gem_obj_ggtt_offset(obj), ringbuf->size);
if (ringbuf->virtual_start == NULL) {
i915_gem_object_ggtt_unpin(obj);
return -EINVAL;
}
return 0;
}
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
if (!ringbuf->obj)
return;
iounmap(ringbuf->virtual_start);
i915_gem_object_ggtt_unpin(ringbuf->obj);
drm_gem_object_unreference(&ringbuf->obj->base);
ringbuf->obj = NULL;
}
@ -1735,12 +1761,7 @@ void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
int ret;
if (ringbuf->obj)
return 0;
obj = NULL;
if (!HAS_LLC(dev))
@ -1753,30 +1774,9 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
/* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1;
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
if (ret)
goto err_unref;
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
goto err_unpin;
ringbuf->virtual_start =
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
ringbuf->size);
if (ringbuf->virtual_start == NULL) {
ret = -EINVAL;
goto err_unpin;
}
ringbuf->obj = obj;
return 0;
err_unpin:
i915_gem_object_ggtt_unpin(obj);
err_unref:
drm_gem_object_unreference(&obj->base);
return ret;
return 0;
}
static int intel_init_ring_buffer(struct drm_device *dev,
@ -1813,10 +1813,21 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error;
}
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
goto error;
if (ringbuf->obj == NULL) {
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
ring->name, ret);
goto error;
}
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
ring->name, ret);
intel_destroy_ringbuffer_obj(ringbuf);
goto error;
}
}
/* Workaround an erratum on the i830 which causes a hang if
@ -1857,6 +1868,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
intel_stop_ring_buffer(ring);
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
intel_unpin_ringbuffer_obj(ringbuf);
intel_destroy_ringbuffer_obj(ringbuf);
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
@ -1942,13 +1954,6 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
break;
}
if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
dev->primary->master) {
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
}
msleep(1);
if (dev_priv->mm.interruptible && signal_pending(current)) {
@ -2439,91 +2444,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return intel_init_ring_buffer(dev, ring);
}
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
struct intel_ringbuffer *ringbuf = ring->buffer;
int ret;
if (ringbuf == NULL) {
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
if (!ringbuf)
return -ENOMEM;
ring->buffer = ringbuf;
}
ring->name = "render ring";
ring->id = RCS;
ring->mmio_base = RENDER_RING_BASE;
if (INTEL_INFO(dev)->gen >= 6) {
/* non-kms not supported on gen6+ */
ret = -ENODEV;
goto err_ringbuf;
}
/* Note: gem is not supported on gen5/ilk without kms (the corresponding
* gem_init ioctl returns with -ENODEV). Hence we do not need to set up
* the special gen5 functions. */
ring->add_request = i9xx_add_request;
if (INTEL_INFO(dev)->gen < 4)
ring->flush = gen2_render_ring_flush;
else
ring->flush = gen4_render_ring_flush;
ring->get_seqno = ring_get_seqno;
ring->set_seqno = ring_set_seqno;
if (IS_GEN2(dev)) {
ring->irq_get = i8xx_ring_get_irq;
ring->irq_put = i8xx_ring_put_irq;
} else {
ring->irq_get = i9xx_ring_get_irq;
ring->irq_put = i9xx_ring_put_irq;
}
ring->irq_enable_mask = I915_USER_INTERRUPT;
ring->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 4)
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
else if (IS_I830(dev) || IS_845G(dev))
ring->dispatch_execbuffer = i830_dispatch_execbuffer;
else
ring->dispatch_execbuffer = i915_dispatch_execbuffer;
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
ringbuf->size = size;
ringbuf->effective_size = ringbuf->size;
if (IS_I830(ring->dev) || IS_845G(ring->dev))
ringbuf->effective_size -= 2 * CACHELINE_BYTES;
ringbuf->virtual_start = ioremap_wc(start, size);
if (ringbuf->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
ret = -ENOMEM;
goto err_ringbuf;
}
if (!I915_NEED_GFX_HWS(dev)) {
ret = init_phys_status_page(ring);
if (ret)
goto err_vstart;
}
return 0;
err_vstart:
iounmap(ringbuf->virtual_start);
err_ringbuf:
kfree(ringbuf);
ring->buffer = NULL;
return ret;
}
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;

View file

@ -148,7 +148,8 @@ struct intel_engine_cs {
int (*init)(struct intel_engine_cs *ring);
int (*init_context)(struct intel_engine_cs *ring);
int (*init_context)(struct intel_engine_cs *ring,
struct intel_context *ctx);
void (*write_tail)(struct intel_engine_cs *ring,
u32 value);
@ -235,6 +236,7 @@ struct intel_engine_cs {
/* Execlists */
spinlock_t execlist_lock;
struct list_head execlist_queue;
struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct intel_ringbuffer *ringbuf);
@ -381,6 +383,9 @@ intel_write_status_page(struct intel_engine_cs *ring,
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf);
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf);
@ -424,6 +429,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
void intel_ring_setup_status_page(struct intel_engine_cs *ring);
int init_workarounds_ring(struct intel_engine_cs *ring);
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{
return ringbuf->tail;
@ -441,7 +448,4 @@ static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
ring->trace_irq_seqno = seqno;
}
/* DRI warts */
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
#endif /* _INTEL_RINGBUFFER_H_ */

View file

@ -577,6 +577,23 @@ static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
power_well->data != PIPE_C);
chv_set_pipe_power_well(dev_priv, power_well, true);
if (power_well->data == PIPE_A) {
spin_lock_irq(&dev_priv->irq_lock);
valleyview_enable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
/*
* During driver initialization/resume we can avoid restoring the
* part of the HW/SW state that will be inited anyway explicitly.
*/
if (dev_priv->power_domains.initializing)
return;
intel_hpd_init(dev_priv);
i915_redisable_vga_power_on(dev_priv->dev);
}
}
static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
@ -586,6 +603,12 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
power_well->data != PIPE_B &&
power_well->data != PIPE_C);
if (power_well->data == PIPE_A) {
spin_lock_irq(&dev_priv->irq_lock);
valleyview_disable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
chv_set_pipe_power_well(dev_priv, power_well, false);
if (power_well->data == PIPE_A)

View file

@ -1264,10 +1264,11 @@ intel_prepare_sprite_plane(struct drm_plane *plane,
struct drm_device *dev = plane->dev;
struct drm_crtc *crtc = state->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *fb = state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
struct drm_i915_gem_object *old_obj = intel_plane->obj;
int ret;
if (old_obj != obj) {
@ -1302,7 +1303,7 @@ intel_commit_sprite_plane(struct drm_plane *plane,
enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *fb = state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
struct drm_i915_gem_object *old_obj = intel_plane->obj;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;

View file

@ -49,17 +49,11 @@ assert_device_not_suspended(struct drm_i915_private *dev_priv)
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
u32 gt_thread_status_mask;
if (IS_HASWELL(dev_priv->dev))
gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
else
gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
/* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
DRM_ERROR("GT thread status wait timed out\n");
}
@ -120,8 +114,7 @@ static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
/* WaRsForcewakeWaitTC0:ivb,hsw */
if (INTEL_INFO(dev_priv->dev)->gen < 8)
__gen6_gt_wait_for_thread_c0(dev_priv);
__gen6_gt_wait_for_thread_c0(dev_priv);
}
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
@ -229,10 +222,6 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for media to ack.\n");
}
/* WaRsForcewakeWaitTC0:vlv */
if (!IS_CHERRYVIEW(dev_priv->dev))
__gen6_gt_wait_for_thread_c0(dev_priv);
}
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
@ -681,6 +670,34 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
REG_RANGE((reg), 0x14000, 0x14400) || \
REG_RANGE((reg), 0x22000, 0x24000))
#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
REG_RANGE((reg), 0xC00, 0x2000)
#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x2000, 0x4000) || \
REG_RANGE((reg), 0x5200, 0x8000) || \
REG_RANGE((reg), 0x8300, 0x8500) || \
REG_RANGE((reg), 0x8C00, 0x8D00) || \
REG_RANGE((reg), 0xB000, 0xB480) || \
REG_RANGE((reg), 0xE000, 0xE800))
#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x8800, 0x8A00) || \
REG_RANGE((reg), 0xD000, 0xD800) || \
REG_RANGE((reg), 0x12000, 0x14000) || \
REG_RANGE((reg), 0x1A000, 0x1EA00) || \
REG_RANGE((reg), 0x30000, 0x40000))
#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
REG_RANGE((reg), 0x9400, 0x9800)
#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
((reg) < 0x40000 &&\
!FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
!FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
!FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
!FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@ -811,6 +828,45 @@ chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_FOOTER; \
}
#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
#define __gen9_read(x) \
static u##x \
gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
val = __raw_i915_read##x(dev_priv, reg); \
} else { \
unsigned fwengine = 0; \
if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine = FORCEWAKE_RENDER; \
} else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine = FORCEWAKE_MEDIA; \
} else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine |= FORCEWAKE_RENDER; \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine |= FORCEWAKE_MEDIA; \
} else { \
if (dev_priv->uncore.fw_blittercount == 0) \
fwengine = FORCEWAKE_BLITTER; \
} \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
val = __raw_i915_read##x(dev_priv, reg); \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
} \
REG_READ_FOOTER; \
}
__gen9_read(8)
__gen9_read(16)
__gen9_read(32)
__gen9_read(64)
__chv_read(8)
__chv_read(16)
__chv_read(32)
@ -832,6 +888,7 @@ __gen4_read(16)
__gen4_read(32)
__gen4_read(64)
#undef __gen9_read
#undef __chv_read
#undef __vlv_read
#undef __gen6_read
@ -969,6 +1026,69 @@ chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
REG_WRITE_FOOTER; \
}
static const u32 gen9_shadowed_regs[] = {
RING_TAIL(RENDER_RING_BASE),
RING_TAIL(GEN6_BSD_RING_BASE),
RING_TAIL(VEBOX_RING_BASE),
RING_TAIL(BLT_RING_BASE),
FORCEWAKE_BLITTER_GEN9,
FORCEWAKE_RENDER_GEN9,
FORCEWAKE_MEDIA_GEN9,
GEN6_RPNSWREQ,
GEN6_RC_VIDEO_FREQ,
/* TODO: Other registers are not yet used */
};
static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
{
int i;
for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
if (reg == gen9_shadowed_regs[i])
return true;
return false;
}
#define __gen9_write(x) \
static void \
gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
bool trace) { \
REG_WRITE_HEADER; \
if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
is_gen9_shadowed(dev_priv, reg)) { \
__raw_i915_write##x(dev_priv, reg, val); \
} else { \
unsigned fwengine = 0; \
if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine = FORCEWAKE_RENDER; \
} else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine = FORCEWAKE_MEDIA; \
} else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine |= FORCEWAKE_RENDER; \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine |= FORCEWAKE_MEDIA; \
} else { \
if (dev_priv->uncore.fw_blittercount == 0) \
fwengine = FORCEWAKE_BLITTER; \
} \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
fwengine); \
__raw_i915_write##x(dev_priv, reg, val); \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_put(dev_priv, \
fwengine); \
} \
REG_WRITE_FOOTER; \
}
__gen9_write(8)
__gen9_write(16)
__gen9_write(32)
__gen9_write(64)
__chv_write(8)
__chv_write(16)
__chv_write(32)
@ -994,6 +1114,7 @@ __gen4_write(16)
__gen4_write(32)
__gen4_write(64)
#undef __gen9_write
#undef __chv_write
#undef __gen8_write
#undef __hsw_write
@ -1077,6 +1198,13 @@ void intel_uncore_init(struct drm_device *dev)
switch (INTEL_INFO(dev)->gen) {
default:
WARN_ON(1);
return;
case 9:
ASSIGN_WRITE_MMIO_VFUNCS(gen9);
ASSIGN_READ_MMIO_VFUNCS(gen9);
break;
case 8:
if (IS_CHERRYVIEW(dev)) {
ASSIGN_WRITE_MMIO_VFUNCS(chv);
ASSIGN_READ_MMIO_VFUNCS(chv);

View file

@ -1699,7 +1699,8 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
nvif_mthd(disp->disp, 0, &args, sizeof(args.base) + args.data[2] * 4);
nvif_mthd(disp->disp, 0, &args,
sizeof(args.base) + drm_eld_size(args.data));
}
static void

View file

@ -340,6 +340,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
#define I915_PARAM_HAS_WT 27
#define I915_PARAM_CMD_PARSER_VERSION 28
#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
typedef struct drm_i915_getparam {
int param;