Merge tag 'drm-intel-next-2014-05-23' of git://anongit.freedesktop.org/drm-intel into drm-next
- prep refactoring for execlists (Oscar Mateo) - corner-case fixes for runtime pm (Imre) - tons of vblank improvements from Ville - prep work for atomic plane/sprite updates (Ville) - more chv code, now almost complete (tons of different people) - refactoring and improvements for drm_irq.c merged through drm-intel-next - g4x/ilk reset improvements (Ville) - removal of encoder->mode_set - moved audio state tracking into pipe_config - shuffled fb pinning out of the platform crtc modeset callbacks into core code - userptr support (Chris) - OOM handling improvements from Chris, with now have a neat oom notifier which jumps additional debug information. - topdown allocation of ppgtt PDEs (Ben) - fixes and small improvements all over * tag 'drm-intel-next-2014-05-23' of git://anongit.freedesktop.org/drm-intel: (187 commits) drm/i915: Kill private_default_ctx off drm/i915: s/i915_hw_context/intel_context drm/i915: Split the ringbuffers from the rings (3/3) drm/i915: Split the ringbuffers from the rings (2/3) drm/i915: Split the ringbuffers from the rings (1/3) drm/i915: s/intel_ring_buffer/intel_engine_cs drm/i915: disable GT power saving early during system suspend drm/i915: fix possible RPM ref leaking during RPS disabling drm/i915: remove user GTT mappings early during runtime suspend drm/i915: Implement WaVcpClkGateDisableForMediaReset:ctg, elk drm/i915: Fix gen2 and hsw+ scanline counter drm/i915: Draw a picture about video timings drm/i915: Improve gen3/4 frame counter drm/i915: Add a small adjustment to the pixel counter on interlaced modes drm/i915: Hold CRTC lock whilst freezing the planes drm/i915: Only discard backing storage on releasing the last ref drm/i915: Wait for pending page flips before enabling/disabling the primary plane drm/i915: grab the audio power domain when enabling audio on HSW+ drm/i915: don't read HSW_AUD_PIN_ELD_CP_VLD when the power well is off drm/i915: move bsd dispatch index somewhere better ...
This commit is contained in:
commit
c4e8541269
49 changed files with 6782 additions and 1775 deletions
|
@ -3368,6 +3368,10 @@ void (*disable_vblank) (struct drm_device *dev, int crtc);</synopsis>
|
|||
with a call to <function>drm_vblank_cleanup</function> in the driver
|
||||
<methodname>unload</methodname> operation handler.
|
||||
</para>
|
||||
<sect2>
|
||||
<title>Vertical Blanking and Interrupt Handling Functions Reference</title>
|
||||
!Edrivers/gpu/drm/drm_irq.c
|
||||
</sect2>
|
||||
</sect1>
|
||||
|
||||
<!-- Internals: open/close, file operations and ioctls -->
|
||||
|
@ -3710,17 +3714,16 @@ int num_ioctls;</synopsis>
|
|||
<term>DRM_IOCTL_MODESET_CTL</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This should be called by application level drivers before and
|
||||
after mode setting, since on many devices the vertical blank
|
||||
counter is reset at that time. Internally, the DRM snapshots
|
||||
the last vblank count when the ioctl is called with the
|
||||
_DRM_PRE_MODESET command, so that the counter won't go backwards
|
||||
(which is dealt with when _DRM_POST_MODESET is used).
|
||||
This was only used for user-mode-settind drivers around
|
||||
modesetting changes to allow the kernel to update the vblank
|
||||
interrupt after mode setting, since on many devices the vertical
|
||||
blank counter is reset to 0 at some point during modeset. Modern
|
||||
drivers should not call this any more since with kernel mode
|
||||
setting it is a no-op.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
<!--!Edrivers/char/drm/drm_irq.c-->
|
||||
</para>
|
||||
</sect1>
|
||||
|
||||
|
@ -3783,6 +3786,96 @@ int num_ioctls;</synopsis>
|
|||
probing, so those sections fully apply.
|
||||
</para>
|
||||
</sect2>
|
||||
<sect2>
|
||||
<title>DPIO</title>
|
||||
!Pdrivers/gpu/drm/i915/i915_reg.h DPIO
|
||||
<table id="dpiox2">
|
||||
<title>Dual channel PHY (VLV/CHV)</title>
|
||||
<tgroup cols="8">
|
||||
<colspec colname="c0" />
|
||||
<colspec colname="c1" />
|
||||
<colspec colname="c2" />
|
||||
<colspec colname="c3" />
|
||||
<colspec colname="c4" />
|
||||
<colspec colname="c5" />
|
||||
<colspec colname="c6" />
|
||||
<colspec colname="c7" />
|
||||
<spanspec spanname="ch0" namest="c0" nameend="c3" />
|
||||
<spanspec spanname="ch1" namest="c4" nameend="c7" />
|
||||
<spanspec spanname="ch0pcs01" namest="c0" nameend="c1" />
|
||||
<spanspec spanname="ch0pcs23" namest="c2" nameend="c3" />
|
||||
<spanspec spanname="ch1pcs01" namest="c4" nameend="c5" />
|
||||
<spanspec spanname="ch1pcs23" namest="c6" nameend="c7" />
|
||||
<thead>
|
||||
<row>
|
||||
<entry spanname="ch0">CH0</entry>
|
||||
<entry spanname="ch1">CH1</entry>
|
||||
</row>
|
||||
</thead>
|
||||
<tbody valign="top" align="center">
|
||||
<row>
|
||||
<entry spanname="ch0">CMN/PLL/REF</entry>
|
||||
<entry spanname="ch1">CMN/PLL/REF</entry>
|
||||
</row>
|
||||
<row>
|
||||
<entry spanname="ch0pcs01">PCS01</entry>
|
||||
<entry spanname="ch0pcs23">PCS23</entry>
|
||||
<entry spanname="ch1pcs01">PCS01</entry>
|
||||
<entry spanname="ch1pcs23">PCS23</entry>
|
||||
</row>
|
||||
<row>
|
||||
<entry>TX0</entry>
|
||||
<entry>TX1</entry>
|
||||
<entry>TX2</entry>
|
||||
<entry>TX3</entry>
|
||||
<entry>TX0</entry>
|
||||
<entry>TX1</entry>
|
||||
<entry>TX2</entry>
|
||||
<entry>TX3</entry>
|
||||
</row>
|
||||
<row>
|
||||
<entry spanname="ch0">DDI0</entry>
|
||||
<entry spanname="ch1">DDI1</entry>
|
||||
</row>
|
||||
</tbody>
|
||||
</tgroup>
|
||||
</table>
|
||||
<table id="dpiox1">
|
||||
<title>Single channel PHY (CHV)</title>
|
||||
<tgroup cols="4">
|
||||
<colspec colname="c0" />
|
||||
<colspec colname="c1" />
|
||||
<colspec colname="c2" />
|
||||
<colspec colname="c3" />
|
||||
<spanspec spanname="ch0" namest="c0" nameend="c3" />
|
||||
<spanspec spanname="ch0pcs01" namest="c0" nameend="c1" />
|
||||
<spanspec spanname="ch0pcs23" namest="c2" nameend="c3" />
|
||||
<thead>
|
||||
<row>
|
||||
<entry spanname="ch0">CH0</entry>
|
||||
</row>
|
||||
</thead>
|
||||
<tbody valign="top" align="center">
|
||||
<row>
|
||||
<entry spanname="ch0">CMN/PLL/REF</entry>
|
||||
</row>
|
||||
<row>
|
||||
<entry spanname="ch0pcs01">PCS01</entry>
|
||||
<entry spanname="ch0pcs23">PCS23</entry>
|
||||
</row>
|
||||
<row>
|
||||
<entry>TX0</entry>
|
||||
<entry>TX1</entry>
|
||||
<entry>TX2</entry>
|
||||
<entry>TX3</entry>
|
||||
</row>
|
||||
<row>
|
||||
<entry spanname="ch0">DDI2</entry>
|
||||
</row>
|
||||
</tbody>
|
||||
</tgroup>
|
||||
</table>
|
||||
</sect2>
|
||||
</sect1>
|
||||
|
||||
<sect1>
|
||||
|
|
|
@ -418,7 +418,7 @@ static size_t __init gen6_stolen_size(int num, int slot, int func)
|
|||
return gmch_ctrl << 25; /* 32 MB units */
|
||||
}
|
||||
|
||||
static size_t gen8_stolen_size(int num, int slot, int func)
|
||||
static size_t __init gen8_stolen_size(int num, int slot, int func)
|
||||
{
|
||||
u16 gmch_ctrl;
|
||||
|
||||
|
@ -428,48 +428,73 @@ static size_t gen8_stolen_size(int num, int slot, int func)
|
|||
return gmch_ctrl << 25; /* 32 MB units */
|
||||
}
|
||||
|
||||
static size_t __init chv_stolen_size(int num, int slot, int func)
|
||||
{
|
||||
u16 gmch_ctrl;
|
||||
|
||||
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
|
||||
gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
|
||||
gmch_ctrl &= SNB_GMCH_GMS_MASK;
|
||||
|
||||
/*
|
||||
* 0x0 to 0x10: 32MB increments starting at 0MB
|
||||
* 0x11 to 0x16: 4MB increments starting at 8MB
|
||||
* 0x17 to 0x1d: 4MB increments start at 36MB
|
||||
*/
|
||||
if (gmch_ctrl < 0x11)
|
||||
return gmch_ctrl << 25;
|
||||
else if (gmch_ctrl < 0x17)
|
||||
return (gmch_ctrl - 0x11 + 2) << 22;
|
||||
else
|
||||
return (gmch_ctrl - 0x17 + 9) << 22;
|
||||
}
|
||||
|
||||
struct intel_stolen_funcs {
|
||||
size_t (*size)(int num, int slot, int func);
|
||||
u32 (*base)(int num, int slot, int func, size_t size);
|
||||
};
|
||||
|
||||
static const struct intel_stolen_funcs i830_stolen_funcs = {
|
||||
static const struct intel_stolen_funcs i830_stolen_funcs __initconst = {
|
||||
.base = i830_stolen_base,
|
||||
.size = i830_stolen_size,
|
||||
};
|
||||
|
||||
static const struct intel_stolen_funcs i845_stolen_funcs = {
|
||||
static const struct intel_stolen_funcs i845_stolen_funcs __initconst = {
|
||||
.base = i845_stolen_base,
|
||||
.size = i830_stolen_size,
|
||||
};
|
||||
|
||||
static const struct intel_stolen_funcs i85x_stolen_funcs = {
|
||||
static const struct intel_stolen_funcs i85x_stolen_funcs __initconst = {
|
||||
.base = i85x_stolen_base,
|
||||
.size = gen3_stolen_size,
|
||||
};
|
||||
|
||||
static const struct intel_stolen_funcs i865_stolen_funcs = {
|
||||
static const struct intel_stolen_funcs i865_stolen_funcs __initconst = {
|
||||
.base = i865_stolen_base,
|
||||
.size = gen3_stolen_size,
|
||||
};
|
||||
|
||||
static const struct intel_stolen_funcs gen3_stolen_funcs = {
|
||||
static const struct intel_stolen_funcs gen3_stolen_funcs __initconst = {
|
||||
.base = intel_stolen_base,
|
||||
.size = gen3_stolen_size,
|
||||
};
|
||||
|
||||
static const struct intel_stolen_funcs gen6_stolen_funcs = {
|
||||
static const struct intel_stolen_funcs gen6_stolen_funcs __initconst = {
|
||||
.base = intel_stolen_base,
|
||||
.size = gen6_stolen_size,
|
||||
};
|
||||
|
||||
static const struct intel_stolen_funcs gen8_stolen_funcs = {
|
||||
static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = {
|
||||
.base = intel_stolen_base,
|
||||
.size = gen8_stolen_size,
|
||||
};
|
||||
|
||||
static struct pci_device_id intel_stolen_ids[] __initdata = {
|
||||
static const struct intel_stolen_funcs chv_stolen_funcs __initconst = {
|
||||
.base = intel_stolen_base,
|
||||
.size = chv_stolen_size,
|
||||
};
|
||||
|
||||
static const struct pci_device_id intel_stolen_ids[] __initconst = {
|
||||
INTEL_I830_IDS(&i830_stolen_funcs),
|
||||
INTEL_I845G_IDS(&i845_stolen_funcs),
|
||||
INTEL_I85X_IDS(&i85x_stolen_funcs),
|
||||
|
@ -495,7 +520,8 @@ static struct pci_device_id intel_stolen_ids[] __initdata = {
|
|||
INTEL_HSW_D_IDS(&gen6_stolen_funcs),
|
||||
INTEL_HSW_M_IDS(&gen6_stolen_funcs),
|
||||
INTEL_BDW_M_IDS(&gen8_stolen_funcs),
|
||||
INTEL_BDW_D_IDS(&gen8_stolen_funcs)
|
||||
INTEL_BDW_D_IDS(&gen8_stolen_funcs),
|
||||
INTEL_CHV_IDS(&chv_stolen_funcs),
|
||||
};
|
||||
|
||||
static void __init intel_graphics_stolen(int num, int slot, int func)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
/**
|
||||
* \file drm_irq.c
|
||||
* IRQ support
|
||||
/*
|
||||
* drm_irq.c IRQ and vblank support
|
||||
*
|
||||
* \author Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* \author Gareth Hughes <gareth@valinux.com>
|
||||
|
@ -140,33 +139,40 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
|
|||
|
||||
static void vblank_disable_fn(unsigned long arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *)arg;
|
||||
struct drm_vblank_crtc *vblank = (void *)arg;
|
||||
struct drm_device *dev = vblank->dev;
|
||||
unsigned long irqflags;
|
||||
int i;
|
||||
int crtc = vblank->crtc;
|
||||
|
||||
if (!dev->vblank_disable_allowed)
|
||||
return;
|
||||
|
||||
for (i = 0; i < dev->num_crtcs; i++) {
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
if (atomic_read(&dev->vblank[i].refcount) == 0 &&
|
||||
dev->vblank[i].enabled) {
|
||||
DRM_DEBUG("disabling vblank on crtc %d\n", i);
|
||||
vblank_disable_and_save(dev, i);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
|
||||
DRM_DEBUG("disabling vblank on crtc %d\n", crtc);
|
||||
vblank_disable_and_save(dev, crtc);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_vblank_cleanup - cleanup vblank support
|
||||
* @dev: DRM device
|
||||
*
|
||||
* This function cleans up any resources allocated in drm_vblank_init.
|
||||
*/
|
||||
void drm_vblank_cleanup(struct drm_device *dev)
|
||||
{
|
||||
int crtc;
|
||||
|
||||
/* Bail if the driver didn't call drm_vblank_init() */
|
||||
if (dev->num_crtcs == 0)
|
||||
return;
|
||||
|
||||
del_timer_sync(&dev->vblank_disable_timer);
|
||||
|
||||
vblank_disable_fn((unsigned long)dev);
|
||||
for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
|
||||
del_timer_sync(&dev->vblank[crtc].disable_timer);
|
||||
vblank_disable_fn((unsigned long)&dev->vblank[crtc]);
|
||||
}
|
||||
|
||||
kfree(dev->vblank);
|
||||
|
||||
|
@ -174,12 +180,20 @@ void drm_vblank_cleanup(struct drm_device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_vblank_cleanup);
|
||||
|
||||
/**
|
||||
* drm_vblank_init - initialize vblank support
|
||||
* @dev: drm_device
|
||||
* @num_crtcs: number of crtcs supported by @dev
|
||||
*
|
||||
* This function initializes vblank support for @num_crtcs display pipelines.
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success or a negative error code on failure.
|
||||
*/
|
||||
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
|
||||
{
|
||||
int i, ret = -ENOMEM;
|
||||
|
||||
setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
|
||||
(unsigned long)dev);
|
||||
spin_lock_init(&dev->vbl_lock);
|
||||
spin_lock_init(&dev->vblank_time_lock);
|
||||
|
||||
|
@ -189,8 +203,13 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
|
|||
if (!dev->vblank)
|
||||
goto err;
|
||||
|
||||
for (i = 0; i < num_crtcs; i++)
|
||||
for (i = 0; i < num_crtcs; i++) {
|
||||
dev->vblank[i].dev = dev;
|
||||
dev->vblank[i].crtc = i;
|
||||
init_waitqueue_head(&dev->vblank[i].queue);
|
||||
setup_timer(&dev->vblank[i].disable_timer, vblank_disable_fn,
|
||||
(unsigned long)&dev->vblank[i]);
|
||||
}
|
||||
|
||||
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
|
||||
|
||||
|
@ -234,13 +253,21 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state)
|
|||
}
|
||||
|
||||
/**
|
||||
* Install IRQ handler.
|
||||
*
|
||||
* \param dev DRM device.
|
||||
* drm_irq_install - install IRQ handler
|
||||
* @dev: DRM device
|
||||
* @irq: IRQ number to install the handler for
|
||||
*
|
||||
* Initializes the IRQ related data. Installs the handler, calling the driver
|
||||
* \c irq_preinstall() and \c irq_postinstall() functions
|
||||
* before and after the installation.
|
||||
* irq_preinstall() and irq_postinstall() functions before and after the
|
||||
* installation.
|
||||
*
|
||||
* This is the simplified helper interface provided for drivers with no special
|
||||
* needs. Drivers which need to install interrupt handlers for multiple
|
||||
* interrupts must instead set drm_device->irq_enabled to signal the DRM core
|
||||
* that vblank interrupts are available.
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success or a negative error code on failure.
|
||||
*/
|
||||
int drm_irq_install(struct drm_device *dev, int irq)
|
||||
{
|
||||
|
@ -300,11 +327,20 @@ int drm_irq_install(struct drm_device *dev, int irq)
|
|||
EXPORT_SYMBOL(drm_irq_install);
|
||||
|
||||
/**
|
||||
* Uninstall the IRQ handler.
|
||||
* drm_irq_uninstall - uninstall the IRQ handler
|
||||
* @dev: DRM device
|
||||
*
|
||||
* \param dev DRM device.
|
||||
* Calls the driver's irq_uninstall() function and unregisters the IRQ handler.
|
||||
* This should only be called by drivers which used drm_irq_install() to set up
|
||||
* their interrupt handler. Other drivers must only reset
|
||||
* drm_device->irq_enabled to false.
|
||||
*
|
||||
* Calls the driver's \c irq_uninstall() function, and stops the irq.
|
||||
* Note that for kernel modesetting drivers it is a bug if this function fails.
|
||||
* The sanity checks are only to catch buggy user modesetting drivers which call
|
||||
* the same function through an ioctl.
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success or a negative error code on failure.
|
||||
*/
|
||||
int drm_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
|
@ -349,7 +385,7 @@ int drm_irq_uninstall(struct drm_device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_irq_uninstall);
|
||||
|
||||
/**
|
||||
/*
|
||||
* IRQ control ioctl.
|
||||
*
|
||||
* \param inode device inode.
|
||||
|
@ -402,15 +438,14 @@ int drm_control(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
/**
|
||||
* drm_calc_timestamping_constants - Calculate vblank timestamp constants
|
||||
*
|
||||
* @crtc drm_crtc whose timestamp constants should be updated.
|
||||
* @mode display mode containing the scanout timings
|
||||
* drm_calc_timestamping_constants - calculate vblank timestamp constants
|
||||
* @crtc: drm_crtc whose timestamp constants should be updated.
|
||||
* @mode: display mode containing the scanout timings
|
||||
*
|
||||
* Calculate and store various constants which are later
|
||||
* needed by vblank and swap-completion timestamping, e.g,
|
||||
* by drm_calc_vbltimestamp_from_scanoutpos(). They are
|
||||
* derived from crtc's true scanout timing, so they take
|
||||
* derived from CRTC's true scanout timing, so they take
|
||||
* things like panel scaling or other adjustments into account.
|
||||
*/
|
||||
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
|
||||
|
@ -455,11 +490,22 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
|
|||
EXPORT_SYMBOL(drm_calc_timestamping_constants);
|
||||
|
||||
/**
|
||||
* drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
|
||||
* drivers. Implements calculation of exact vblank timestamps from
|
||||
* given drm_display_mode timings and current video scanout position
|
||||
* of a crtc. This can be called from within get_vblank_timestamp()
|
||||
* implementation of a kms driver to implement the actual timestamping.
|
||||
* drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
|
||||
* @dev: DRM device
|
||||
* @crtc: Which CRTC's vblank timestamp to retrieve
|
||||
* @max_error: Desired maximum allowable error in timestamps (nanosecs)
|
||||
* On return contains true maximum error of timestamp
|
||||
* @vblank_time: Pointer to struct timeval which should receive the timestamp
|
||||
* @flags: Flags to pass to driver:
|
||||
* 0 = Default,
|
||||
* DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
|
||||
* @refcrtc: CRTC which defines scanout timing
|
||||
* @mode: mode which defines the scanout timings
|
||||
*
|
||||
* Implements calculation of exact vblank timestamps from given drm_display_mode
|
||||
* timings and current video scanout position of a CRTC. This can be called from
|
||||
* within get_vblank_timestamp() implementation of a kms driver to implement the
|
||||
* actual timestamping.
|
||||
*
|
||||
* Should return timestamps conforming to the OML_sync_control OpenML
|
||||
* extension specification. The timestamp corresponds to the end of
|
||||
|
@ -474,21 +520,11 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
|
|||
* returns as no operation if a doublescan or interlaced video mode is
|
||||
* active. Higher level code is expected to handle this.
|
||||
*
|
||||
* @dev: DRM device.
|
||||
* @crtc: Which crtc's vblank timestamp to retrieve.
|
||||
* @max_error: Desired maximum allowable error in timestamps (nanosecs).
|
||||
* On return contains true maximum error of timestamp.
|
||||
* @vblank_time: Pointer to struct timeval which should receive the timestamp.
|
||||
* @flags: Flags to pass to driver:
|
||||
* 0 = Default.
|
||||
* DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
|
||||
* @refcrtc: drm_crtc* of crtc which defines scanout timing.
|
||||
* @mode: mode which defines the scanout timings
|
||||
*
|
||||
* Returns negative value on error, failure or if not supported in current
|
||||
* Returns:
|
||||
* Negative value on error, failure or if not supported in current
|
||||
* video mode:
|
||||
*
|
||||
* -EINVAL - Invalid crtc.
|
||||
* -EINVAL - Invalid CRTC.
|
||||
* -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
|
||||
* -ENOTSUPP - Function not supported in current display mode.
|
||||
* -EIO - Failed, e.g., due to failed scanout position query.
|
||||
|
@ -637,23 +673,23 @@ static struct timeval get_drm_timestamp(void)
|
|||
|
||||
/**
|
||||
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
|
||||
* vblank interval.
|
||||
*
|
||||
* vblank interval
|
||||
* @dev: DRM device
|
||||
* @crtc: which crtc's vblank timestamp to retrieve
|
||||
* @crtc: which CRTC's vblank timestamp to retrieve
|
||||
* @tvblank: Pointer to target struct timeval which should receive the timestamp
|
||||
* @flags: Flags to pass to driver:
|
||||
* 0 = Default.
|
||||
* DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
|
||||
* 0 = Default,
|
||||
* DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
|
||||
*
|
||||
* Fetches the system timestamp corresponding to the time of the most recent
|
||||
* vblank interval on specified crtc. May call into kms-driver to
|
||||
* vblank interval on specified CRTC. May call into kms-driver to
|
||||
* compute the timestamp with a high-precision GPU specific method.
|
||||
*
|
||||
* Returns zero if timestamp originates from uncorrected do_gettimeofday()
|
||||
* call, i.e., it isn't very precisely locked to the true vblank.
|
||||
*
|
||||
* Returns non-zero if timestamp is considered to be very precise.
|
||||
* Returns:
|
||||
* Non-zero if timestamp is considered to be very precise, zero otherwise.
|
||||
*/
|
||||
u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
|
||||
struct timeval *tvblank, unsigned flags)
|
||||
|
@ -688,6 +724,9 @@ EXPORT_SYMBOL(drm_get_last_vbltimestamp);
|
|||
* Fetches the "cooked" vblank count value that represents the number of
|
||||
* vblank events since the system was booted, including lost events due to
|
||||
* modesetting activity.
|
||||
*
|
||||
* Returns:
|
||||
* The software vblank counter.
|
||||
*/
|
||||
u32 drm_vblank_count(struct drm_device *dev, int crtc)
|
||||
{
|
||||
|
@ -706,8 +745,7 @@ EXPORT_SYMBOL(drm_vblank_count);
|
|||
* Fetches the "cooked" vblank count value that represents the number of
|
||||
* vblank events since the system was booted, including lost events due to
|
||||
* modesetting activity. Returns corresponding system timestamp of the time
|
||||
* of the vblank interval that corresponds to the current value vblank counter
|
||||
* value.
|
||||
* of the vblank interval that corresponds to the current vblank counter value.
|
||||
*/
|
||||
u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
|
||||
struct timeval *vblanktime)
|
||||
|
@ -835,6 +873,42 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
|
|||
smp_mb__after_atomic_inc();
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_vblank_enable - enable the vblank interrupt on a CRTC
|
||||
* @dev: DRM device
|
||||
* @crtc: CRTC in question
|
||||
*/
|
||||
static int drm_vblank_enable(struct drm_device *dev, int crtc)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
assert_spin_locked(&dev->vbl_lock);
|
||||
|
||||
spin_lock(&dev->vblank_time_lock);
|
||||
|
||||
if (!dev->vblank[crtc].enabled) {
|
||||
/*
|
||||
* Enable vblank irqs under vblank_time_lock protection.
|
||||
* All vblank count & timestamp updates are held off
|
||||
* until we are done reinitializing master counter and
|
||||
* timestamps. Filtercode in drm_handle_vblank() will
|
||||
* prevent double-accounting of same vblank interval.
|
||||
*/
|
||||
ret = dev->driver->enable_vblank(dev, crtc);
|
||||
DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
|
||||
if (ret)
|
||||
atomic_dec(&dev->vblank[crtc].refcount);
|
||||
else {
|
||||
dev->vblank[crtc].enabled = true;
|
||||
drm_update_vblank_count(dev, crtc);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&dev->vblank_time_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_vblank_get - get a reference count on vblank events
|
||||
* @dev: DRM device
|
||||
|
@ -843,36 +917,20 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
|
|||
* Acquire a reference count on vblank events to avoid having them disabled
|
||||
* while in use.
|
||||
*
|
||||
* RETURNS
|
||||
* This is the legacy version of drm_crtc_vblank_get().
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, nonzero on failure.
|
||||
*/
|
||||
int drm_vblank_get(struct drm_device *dev, int crtc)
|
||||
{
|
||||
unsigned long irqflags, irqflags2;
|
||||
unsigned long irqflags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
/* Going from 0->1 means we have to enable interrupts again */
|
||||
if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
|
||||
spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
|
||||
if (!dev->vblank[crtc].enabled) {
|
||||
/* Enable vblank irqs under vblank_time_lock protection.
|
||||
* All vblank count & timestamp updates are held off
|
||||
* until we are done reinitializing master counter and
|
||||
* timestamps. Filtercode in drm_handle_vblank() will
|
||||
* prevent double-accounting of same vblank interval.
|
||||
*/
|
||||
ret = dev->driver->enable_vblank(dev, crtc);
|
||||
DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
|
||||
crtc, ret);
|
||||
if (ret)
|
||||
atomic_dec(&dev->vblank[crtc].refcount);
|
||||
else {
|
||||
dev->vblank[crtc].enabled = true;
|
||||
drm_update_vblank_count(dev, crtc);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
|
||||
ret = drm_vblank_enable(dev, crtc);
|
||||
} else {
|
||||
if (!dev->vblank[crtc].enabled) {
|
||||
atomic_dec(&dev->vblank[crtc].refcount);
|
||||
|
@ -885,6 +943,24 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_vblank_get);
|
||||
|
||||
/**
|
||||
* drm_crtc_vblank_get - get a reference count on vblank events
|
||||
* @crtc: which CRTC to own
|
||||
*
|
||||
* Acquire a reference count on vblank events to avoid having them disabled
|
||||
* while in use.
|
||||
*
|
||||
* This is the native kms version of drm_vblank_off().
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, nonzero on failure.
|
||||
*/
|
||||
int drm_crtc_vblank_get(struct drm_crtc *crtc)
|
||||
{
|
||||
return drm_vblank_get(crtc->dev, drm_crtc_index(crtc));
|
||||
}
|
||||
EXPORT_SYMBOL(drm_crtc_vblank_get);
|
||||
|
||||
/**
|
||||
* drm_vblank_put - give up ownership of vblank events
|
||||
* @dev: DRM device
|
||||
|
@ -892,6 +968,8 @@ EXPORT_SYMBOL(drm_vblank_get);
|
|||
*
|
||||
* Release ownership of a given vblank counter, turning off interrupts
|
||||
* if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
|
||||
*
|
||||
* This is the legacy version of drm_crtc_vblank_put().
|
||||
*/
|
||||
void drm_vblank_put(struct drm_device *dev, int crtc)
|
||||
{
|
||||
|
@ -900,17 +978,39 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
|
|||
/* Last user schedules interrupt disable */
|
||||
if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
|
||||
(drm_vblank_offdelay > 0))
|
||||
mod_timer(&dev->vblank_disable_timer,
|
||||
mod_timer(&dev->vblank[crtc].disable_timer,
|
||||
jiffies + ((drm_vblank_offdelay * HZ)/1000));
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vblank_put);
|
||||
|
||||
/**
|
||||
* drm_crtc_vblank_put - give up ownership of vblank events
|
||||
* @crtc: which counter to give up
|
||||
*
|
||||
* Release ownership of a given vblank counter, turning off interrupts
|
||||
* if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
|
||||
*
|
||||
* This is the native kms version of drm_vblank_put().
|
||||
*/
|
||||
void drm_crtc_vblank_put(struct drm_crtc *crtc)
|
||||
{
|
||||
drm_vblank_put(crtc->dev, drm_crtc_index(crtc));
|
||||
}
|
||||
EXPORT_SYMBOL(drm_crtc_vblank_put);
|
||||
|
||||
/**
|
||||
* drm_vblank_off - disable vblank events on a CRTC
|
||||
* @dev: DRM device
|
||||
* @crtc: CRTC in question
|
||||
*
|
||||
* Caller must hold event lock.
|
||||
* Drivers can use this function to shut down the vblank interrupt handling when
|
||||
* disabling a crtc. This function ensures that the latest vblank frame count is
|
||||
* stored so that drm_vblank_on() can restore it again.
|
||||
*
|
||||
* Drivers must use this function when the hardware vblank counter can get
|
||||
* reset, e.g. when suspending.
|
||||
*
|
||||
* This is the legacy version of drm_crtc_vblank_off().
|
||||
*/
|
||||
void drm_vblank_off(struct drm_device *dev, int crtc)
|
||||
{
|
||||
|
@ -943,6 +1043,66 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_vblank_off);
|
||||
|
||||
/**
|
||||
* drm_crtc_vblank_off - disable vblank events on a CRTC
|
||||
* @crtc: CRTC in question
|
||||
*
|
||||
* Drivers can use this function to shut down the vblank interrupt handling when
|
||||
* disabling a crtc. This function ensures that the latest vblank frame count is
|
||||
* stored so that drm_vblank_on can restore it again.
|
||||
*
|
||||
* Drivers must use this function when the hardware vblank counter can get
|
||||
* reset, e.g. when suspending.
|
||||
*
|
||||
* This is the native kms version of drm_vblank_off().
|
||||
*/
|
||||
void drm_crtc_vblank_off(struct drm_crtc *crtc)
|
||||
{
|
||||
drm_vblank_off(crtc->dev, drm_crtc_index(crtc));
|
||||
}
|
||||
EXPORT_SYMBOL(drm_crtc_vblank_off);
|
||||
|
||||
/**
|
||||
* drm_vblank_on - enable vblank events on a CRTC
|
||||
* @dev: DRM device
|
||||
* @crtc: CRTC in question
|
||||
*
|
||||
* This functions restores the vblank interrupt state captured with
|
||||
* drm_vblank_off() again. Note that calls to drm_vblank_on() and
|
||||
* drm_vblank_off() can be unbalanced and so can also be unconditionaly called
|
||||
* in driver load code to reflect the current hardware state of the crtc.
|
||||
*
|
||||
* This is the legacy version of drm_crtc_vblank_on().
|
||||
*/
|
||||
void drm_vblank_on(struct drm_device *dev, int crtc)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
/* re-enable interrupts if there's are users left */
|
||||
if (atomic_read(&dev->vblank[crtc].refcount) != 0)
|
||||
WARN_ON(drm_vblank_enable(dev, crtc));
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vblank_on);
|
||||
|
||||
/**
|
||||
* drm_crtc_vblank_on - enable vblank events on a CRTC
|
||||
* @crtc: CRTC in question
|
||||
*
|
||||
* This functions restores the vblank interrupt state captured with
|
||||
* drm_vblank_off() again. Note that calls to drm_vblank_on() and
|
||||
* drm_vblank_off() can be unbalanced and so can also be unconditionaly called
|
||||
* in driver load code to reflect the current hardware state of the crtc.
|
||||
*
|
||||
* This is the native kms version of drm_vblank_on().
|
||||
*/
|
||||
void drm_crtc_vblank_on(struct drm_crtc *crtc)
|
||||
{
|
||||
drm_vblank_on(crtc->dev, drm_crtc_index(crtc));
|
||||
}
|
||||
EXPORT_SYMBOL(drm_crtc_vblank_on);
|
||||
|
||||
/**
|
||||
* drm_vblank_pre_modeset - account for vblanks across mode sets
|
||||
* @dev: DRM device
|
||||
|
@ -950,6 +1110,21 @@ EXPORT_SYMBOL(drm_vblank_off);
|
|||
*
|
||||
* Account for vblank events across mode setting events, which will likely
|
||||
* reset the hardware frame counter.
|
||||
*
|
||||
* This is done by grabbing a temporary vblank reference to ensure that the
|
||||
* vblank interrupt keeps running across the modeset sequence. With this the
|
||||
* software-side vblank frame counting will ensure that there are no jumps or
|
||||
* discontinuities.
|
||||
*
|
||||
* Unfortunately this approach is racy and also doesn't work when the vblank
|
||||
* interrupt stops running, e.g. across system suspend resume. It is therefore
|
||||
* highly recommended that drivers use the newer drm_vblank_off() and
|
||||
* drm_vblank_on() instead. drm_vblank_pre_modeset() only works correctly when
|
||||
* using "cooked" software vblank frame counters and not relying on any hardware
|
||||
* counters.
|
||||
*
|
||||
* Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
|
||||
* again.
|
||||
*/
|
||||
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
|
||||
{
|
||||
|
@ -971,6 +1146,14 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_vblank_pre_modeset);
|
||||
|
||||
/**
|
||||
* drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
|
||||
* @dev: DRM device
|
||||
* @crtc: CRTC in question
|
||||
*
|
||||
* This function again drops the temporary vblank reference acquired in
|
||||
* drm_vblank_pre_modeset.
|
||||
*/
|
||||
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
@ -992,7 +1175,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_vblank_post_modeset);
|
||||
|
||||
/**
|
||||
/*
|
||||
* drm_modeset_ctl - handle vblank event counter changes across mode switch
|
||||
* @DRM_IOCTL_ARGS: standard ioctl arguments
|
||||
*
|
||||
|
@ -1105,7 +1288,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Wait for VBLANK.
|
||||
*
|
||||
* \param inode device inode.
|
||||
|
@ -1116,7 +1299,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
|
|||
*
|
||||
* This function enables the vblank interrupt on the pipe requested, then
|
||||
* sleeps waiting for the requested sequence number to occur, and drops
|
||||
* the vblank interrupt refcount afterwards. (vblank irq disable follows that
|
||||
* the vblank interrupt refcount afterwards. (vblank IRQ disable follows that
|
||||
* after a timeout with no further vblank waits scheduled).
|
||||
*/
|
||||
int drm_wait_vblank(struct drm_device *dev, void *data,
|
||||
|
@ -1187,6 +1370,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
|
|||
DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ,
|
||||
(((drm_vblank_count(dev, crtc) -
|
||||
vblwait->request.sequence) <= (1 << 23)) ||
|
||||
!dev->vblank[crtc].enabled ||
|
||||
!dev->irq_enabled));
|
||||
|
||||
if (ret != -EINTR) {
|
||||
|
|
|
@ -5,6 +5,7 @@ config DRM_I915
|
|||
depends on (AGP || AGP=n)
|
||||
select INTEL_GTT
|
||||
select AGP_INTEL if AGP
|
||||
select INTERVAL_TREE
|
||||
# we need shmfs for the swappable backing store, and in particular
|
||||
# the shmem_readpage() which depends upon tmpfs
|
||||
select SHMEM
|
||||
|
|
|
@ -18,6 +18,7 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
|
|||
# GEM code
|
||||
i915-y += i915_cmd_parser.o \
|
||||
i915_gem_context.o \
|
||||
i915_gem_render_state.o \
|
||||
i915_gem_debug.o \
|
||||
i915_gem_dmabuf.o \
|
||||
i915_gem_evict.o \
|
||||
|
@ -26,12 +27,18 @@ i915-y += i915_cmd_parser.o \
|
|||
i915_gem.o \
|
||||
i915_gem_stolen.o \
|
||||
i915_gem_tiling.o \
|
||||
i915_gem_userptr.o \
|
||||
i915_gpu_error.o \
|
||||
i915_irq.o \
|
||||
i915_trace_points.o \
|
||||
intel_ringbuffer.o \
|
||||
intel_uncore.o
|
||||
|
||||
# autogenerated null render state
|
||||
i915-y += intel_renderstate_gen6.o \
|
||||
intel_renderstate_gen7.o \
|
||||
intel_renderstate_gen8.o
|
||||
|
||||
# modesetting core code
|
||||
i915-y += intel_bios.o \
|
||||
intel_display.o \
|
||||
|
|
|
@ -498,16 +498,18 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool validate_cmds_sorted(struct intel_ring_buffer *ring)
|
||||
static bool validate_cmds_sorted(struct intel_engine_cs *ring,
|
||||
const struct drm_i915_cmd_table *cmd_tables,
|
||||
int cmd_table_count)
|
||||
{
|
||||
int i;
|
||||
bool ret = true;
|
||||
|
||||
if (!ring->cmd_tables || ring->cmd_table_count == 0)
|
||||
if (!cmd_tables || cmd_table_count == 0)
|
||||
return true;
|
||||
|
||||
for (i = 0; i < ring->cmd_table_count; i++) {
|
||||
const struct drm_i915_cmd_table *table = &ring->cmd_tables[i];
|
||||
for (i = 0; i < cmd_table_count; i++) {
|
||||
const struct drm_i915_cmd_table *table = &cmd_tables[i];
|
||||
u32 previous = 0;
|
||||
int j;
|
||||
|
||||
|
@ -550,35 +552,103 @@ static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool validate_regs_sorted(struct intel_ring_buffer *ring)
|
||||
static bool validate_regs_sorted(struct intel_engine_cs *ring)
|
||||
{
|
||||
return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
|
||||
check_sorted(ring->id, ring->master_reg_table,
|
||||
ring->master_reg_count);
|
||||
}
|
||||
|
||||
struct cmd_node {
|
||||
const struct drm_i915_cmd_descriptor *desc;
|
||||
struct hlist_node node;
|
||||
};
|
||||
|
||||
/*
|
||||
* Different command ranges have different numbers of bits for the opcode. For
|
||||
* example, MI commands use bits 31:23 while 3D commands use bits 31:16. The
|
||||
* problem is that, for example, MI commands use bits 22:16 for other fields
|
||||
* such as GGTT vs PPGTT bits. If we include those bits in the mask then when
|
||||
* we mask a command from a batch it could hash to the wrong bucket due to
|
||||
* non-opcode bits being set. But if we don't include those bits, some 3D
|
||||
* commands may hash to the same bucket due to not including opcode bits that
|
||||
* make the command unique. For now, we will risk hashing to the same bucket.
|
||||
*
|
||||
* If we attempt to generate a perfect hash, we should be able to look at bits
|
||||
* 31:29 of a command from a batch buffer and use the full mask for that
|
||||
* client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this.
|
||||
*/
|
||||
#define CMD_HASH_MASK STD_MI_OPCODE_MASK
|
||||
|
||||
static int init_hash_table(struct intel_engine_cs *ring,
|
||||
const struct drm_i915_cmd_table *cmd_tables,
|
||||
int cmd_table_count)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
hash_init(ring->cmd_hash);
|
||||
|
||||
for (i = 0; i < cmd_table_count; i++) {
|
||||
const struct drm_i915_cmd_table *table = &cmd_tables[i];
|
||||
|
||||
for (j = 0; j < table->count; j++) {
|
||||
const struct drm_i915_cmd_descriptor *desc =
|
||||
&table->table[j];
|
||||
struct cmd_node *desc_node =
|
||||
kmalloc(sizeof(*desc_node), GFP_KERNEL);
|
||||
|
||||
if (!desc_node)
|
||||
return -ENOMEM;
|
||||
|
||||
desc_node->desc = desc;
|
||||
hash_add(ring->cmd_hash, &desc_node->node,
|
||||
desc->cmd.value & CMD_HASH_MASK);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fini_hash_table(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct hlist_node *tmp;
|
||||
struct cmd_node *desc_node;
|
||||
int i;
|
||||
|
||||
hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
|
||||
hash_del(&desc_node->node);
|
||||
kfree(desc_node);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
|
||||
* @ring: the ringbuffer to initialize
|
||||
*
|
||||
* Optionally initializes fields related to batch buffer command parsing in the
|
||||
* struct intel_ring_buffer based on whether the platform requires software
|
||||
* struct intel_engine_cs based on whether the platform requires software
|
||||
* command parsing.
|
||||
*
|
||||
* Return: non-zero if initialization fails
|
||||
*/
|
||||
void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
|
||||
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
|
||||
{
|
||||
const struct drm_i915_cmd_table *cmd_tables;
|
||||
int cmd_table_count;
|
||||
int ret;
|
||||
|
||||
if (!IS_GEN7(ring->dev))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
switch (ring->id) {
|
||||
case RCS:
|
||||
if (IS_HASWELL(ring->dev)) {
|
||||
ring->cmd_tables = hsw_render_ring_cmds;
|
||||
ring->cmd_table_count =
|
||||
cmd_tables = hsw_render_ring_cmds;
|
||||
cmd_table_count =
|
||||
ARRAY_SIZE(hsw_render_ring_cmds);
|
||||
} else {
|
||||
ring->cmd_tables = gen7_render_cmds;
|
||||
ring->cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
|
||||
cmd_tables = gen7_render_cmds;
|
||||
cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
|
||||
}
|
||||
|
||||
ring->reg_table = gen7_render_regs;
|
||||
|
@ -595,17 +665,17 @@ void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
|
|||
ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
|
||||
break;
|
||||
case VCS:
|
||||
ring->cmd_tables = gen7_video_cmds;
|
||||
ring->cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
|
||||
cmd_tables = gen7_video_cmds;
|
||||
cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
|
||||
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
|
||||
break;
|
||||
case BCS:
|
||||
if (IS_HASWELL(ring->dev)) {
|
||||
ring->cmd_tables = hsw_blt_ring_cmds;
|
||||
ring->cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
|
||||
cmd_tables = hsw_blt_ring_cmds;
|
||||
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
|
||||
} else {
|
||||
ring->cmd_tables = gen7_blt_cmds;
|
||||
ring->cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
|
||||
cmd_tables = gen7_blt_cmds;
|
||||
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
|
||||
}
|
||||
|
||||
ring->reg_table = gen7_blt_regs;
|
||||
|
@ -622,8 +692,8 @@ void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
|
|||
ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
|
||||
break;
|
||||
case VECS:
|
||||
ring->cmd_tables = hsw_vebox_cmds;
|
||||
ring->cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
|
||||
cmd_tables = hsw_vebox_cmds;
|
||||
cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
|
||||
/* VECS can use the same length_mask function as VCS */
|
||||
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
|
||||
break;
|
||||
|
@ -633,18 +703,45 @@ void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
|
|||
BUG();
|
||||
}
|
||||
|
||||
BUG_ON(!validate_cmds_sorted(ring));
|
||||
BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
|
||||
BUG_ON(!validate_regs_sorted(ring));
|
||||
|
||||
ret = init_hash_table(ring, cmd_tables, cmd_table_count);
|
||||
if (ret) {
|
||||
DRM_ERROR("CMD: cmd_parser_init failed!\n");
|
||||
fini_hash_table(ring);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ring->needs_cmd_parser = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_cmd_parser_fini_ring() - clean up cmd parser related fields
|
||||
* @ring: the ringbuffer to clean up
|
||||
*
|
||||
* Releases any resources related to command parsing that may have been
|
||||
* initialized for the specified ring.
|
||||
*/
|
||||
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
|
||||
{
|
||||
if (!ring->needs_cmd_parser)
|
||||
return;
|
||||
|
||||
fini_hash_table(ring);
|
||||
}
|
||||
|
||||
static const struct drm_i915_cmd_descriptor*
|
||||
find_cmd_in_table(const struct drm_i915_cmd_table *table,
|
||||
find_cmd_in_table(struct intel_engine_cs *ring,
|
||||
u32 cmd_header)
|
||||
{
|
||||
int i;
|
||||
struct cmd_node *desc_node;
|
||||
|
||||
for (i = 0; i < table->count; i++) {
|
||||
const struct drm_i915_cmd_descriptor *desc = &table->table[i];
|
||||
hash_for_each_possible(ring->cmd_hash, desc_node, node,
|
||||
cmd_header & CMD_HASH_MASK) {
|
||||
const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
|
||||
u32 masked_cmd = desc->cmd.mask & cmd_header;
|
||||
u32 masked_value = desc->cmd.value & desc->cmd.mask;
|
||||
|
||||
|
@ -664,20 +761,16 @@ find_cmd_in_table(const struct drm_i915_cmd_table *table,
|
|||
* ring's default length encoding and returns default_desc.
|
||||
*/
|
||||
static const struct drm_i915_cmd_descriptor*
|
||||
find_cmd(struct intel_ring_buffer *ring,
|
||||
find_cmd(struct intel_engine_cs *ring,
|
||||
u32 cmd_header,
|
||||
struct drm_i915_cmd_descriptor *default_desc)
|
||||
{
|
||||
const struct drm_i915_cmd_descriptor *desc;
|
||||
u32 mask;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ring->cmd_table_count; i++) {
|
||||
const struct drm_i915_cmd_descriptor *desc;
|
||||
|
||||
desc = find_cmd_in_table(&ring->cmd_tables[i], cmd_header);
|
||||
if (desc)
|
||||
return desc;
|
||||
}
|
||||
desc = find_cmd_in_table(ring, cmd_header);
|
||||
if (desc)
|
||||
return desc;
|
||||
|
||||
mask = ring->get_cmd_length_mask(cmd_header);
|
||||
if (!mask)
|
||||
|
@ -744,12 +837,11 @@ static u32 *vmap_batch(struct drm_i915_gem_object *obj)
|
|||
*
|
||||
* Return: true if the ring requires software command parsing
|
||||
*/
|
||||
bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
|
||||
bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
|
||||
/* No command tables indicates a platform without parsing */
|
||||
if (!ring->cmd_tables)
|
||||
if (!ring->needs_cmd_parser)
|
||||
return false;
|
||||
|
||||
/*
|
||||
|
@ -763,7 +855,7 @@ bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
|
|||
return (i915.enable_cmd_parser == 1);
|
||||
}
|
||||
|
||||
static bool check_cmd(const struct intel_ring_buffer *ring,
|
||||
static bool check_cmd(const struct intel_engine_cs *ring,
|
||||
const struct drm_i915_cmd_descriptor *desc,
|
||||
const u32 *cmd,
|
||||
const bool is_master,
|
||||
|
@ -865,7 +957,7 @@ static bool check_cmd(const struct intel_ring_buffer *ring,
|
|||
*
|
||||
* Return: non-zero if the parser finds violations or otherwise fails
|
||||
*/
|
||||
int i915_parse_cmds(struct intel_ring_buffer *ring,
|
||||
int i915_parse_cmds(struct intel_engine_cs *ring,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
u32 batch_start_offset,
|
||||
bool is_master)
|
||||
|
|
|
@ -79,7 +79,7 @@ drm_add_fake_info_node(struct drm_minor *minor,
|
|||
|
||||
static int i915_capabilities(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
const struct intel_device_info *info = INTEL_INFO(dev);
|
||||
|
||||
|
@ -172,7 +172,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
|||
seq_printf(m, " (%s)", obj->ring->name);
|
||||
}
|
||||
|
||||
static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
|
||||
static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
|
||||
{
|
||||
seq_putc(m, ctx->is_initialized ? 'I' : 'i');
|
||||
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
|
||||
|
@ -181,7 +181,7 @@ static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
|
|||
|
||||
static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
uintptr_t list = (uintptr_t) node->info_ent->data;
|
||||
struct list_head *head;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
|
@ -239,7 +239,7 @@ static int obj_rank_by_stolen(void *priv,
|
|||
|
||||
static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
@ -371,7 +371,7 @@ static int per_file_stats(int id, void *ptr, void *data)
|
|||
|
||||
static int i915_gem_object_info(struct seq_file *m, void* data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 count, mappable_count, purgeable_count;
|
||||
|
@ -474,7 +474,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
|
|||
|
||||
static int i915_gem_gtt_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
uintptr_t list = (uintptr_t) node->info_ent->data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -509,12 +509,12 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
|
|||
|
||||
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
unsigned long flags;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
const char pipe = pipe_name(crtc->pipe);
|
||||
const char plane = plane_name(crtc->plane);
|
||||
struct intel_unpin_work *work;
|
||||
|
@ -559,10 +559,10 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
|
|||
|
||||
static int i915_gem_request_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
struct drm_i915_gem_request *gem_request;
|
||||
int ret, count, i;
|
||||
|
||||
|
@ -594,7 +594,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
|
|||
}
|
||||
|
||||
static void i915_ring_seqno_info(struct seq_file *m,
|
||||
struct intel_ring_buffer *ring)
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
if (ring->get_seqno) {
|
||||
seq_printf(m, "Current sequence (%s): %u\n",
|
||||
|
@ -604,10 +604,10 @@ static void i915_ring_seqno_info(struct seq_file *m,
|
|||
|
||||
static int i915_gem_seqno_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
int ret, i;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
|
@ -627,10 +627,10 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
|
|||
|
||||
static int i915_interrupt_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
int ret, i, pipe;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
|
@ -638,7 +638,47 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
|||
return ret;
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
int i;
|
||||
seq_printf(m, "Master Interrupt Control:\t%08x\n",
|
||||
I915_READ(GEN8_MASTER_IRQ));
|
||||
|
||||
seq_printf(m, "Display IER:\t%08x\n",
|
||||
I915_READ(VLV_IER));
|
||||
seq_printf(m, "Display IIR:\t%08x\n",
|
||||
I915_READ(VLV_IIR));
|
||||
seq_printf(m, "Display IIR_RW:\t%08x\n",
|
||||
I915_READ(VLV_IIR_RW));
|
||||
seq_printf(m, "Display IMR:\t%08x\n",
|
||||
I915_READ(VLV_IMR));
|
||||
for_each_pipe(pipe)
|
||||
seq_printf(m, "Pipe %c stat:\t%08x\n",
|
||||
pipe_name(pipe),
|
||||
I915_READ(PIPESTAT(pipe)));
|
||||
|
||||
seq_printf(m, "Port hotplug:\t%08x\n",
|
||||
I915_READ(PORT_HOTPLUG_EN));
|
||||
seq_printf(m, "DPFLIPSTAT:\t%08x\n",
|
||||
I915_READ(VLV_DPFLIPSTAT));
|
||||
seq_printf(m, "DPINVGTT:\t%08x\n",
|
||||
I915_READ(DPINVGTT));
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
|
||||
i, I915_READ(GEN8_GT_IMR(i)));
|
||||
seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
|
||||
i, I915_READ(GEN8_GT_IIR(i)));
|
||||
seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
|
||||
i, I915_READ(GEN8_GT_IER(i)));
|
||||
}
|
||||
|
||||
seq_printf(m, "PCU interrupt mask:\t%08x\n",
|
||||
I915_READ(GEN8_PCU_IMR));
|
||||
seq_printf(m, "PCU interrupt identity:\t%08x\n",
|
||||
I915_READ(GEN8_PCU_IIR));
|
||||
seq_printf(m, "PCU interrupt enable:\t%08x\n",
|
||||
I915_READ(GEN8_PCU_IER));
|
||||
} else if (INTEL_INFO(dev)->gen >= 8) {
|
||||
seq_printf(m, "Master Interrupt Control:\t%08x\n",
|
||||
I915_READ(GEN8_MASTER_IRQ));
|
||||
|
||||
|
@ -768,7 +808,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
|||
|
||||
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i, ret;
|
||||
|
@ -797,10 +837,10 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
|
|||
|
||||
static int i915_hws_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
const u32 *hws;
|
||||
int i;
|
||||
|
||||
|
@ -945,7 +985,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
|
|||
|
||||
static int i915_rstdby_delays(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u16 crstanddelay;
|
||||
|
@ -968,7 +1008,7 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
|
|||
|
||||
static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret = 0;
|
||||
|
@ -1108,7 +1148,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
|||
|
||||
static int i915_delayfreq_table(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 delayfreq;
|
||||
|
@ -1139,7 +1179,7 @@ static inline int MAP_TO_MV(int map)
|
|||
|
||||
static int i915_inttoext_table(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 inttoext;
|
||||
|
@ -1163,7 +1203,7 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
|
|||
|
||||
static int ironlake_drpc_info(struct seq_file *m)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 rgvmodectl, rstdbyctl;
|
||||
|
@ -1233,7 +1273,7 @@ static int ironlake_drpc_info(struct seq_file *m)
|
|||
static int vlv_drpc_info(struct seq_file *m)
|
||||
{
|
||||
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 rpmodectl1, rcctl1;
|
||||
|
@ -1286,7 +1326,7 @@ static int vlv_drpc_info(struct seq_file *m)
|
|||
static int gen6_drpc_info(struct seq_file *m)
|
||||
{
|
||||
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
|
||||
|
@ -1385,7 +1425,7 @@ static int gen6_drpc_info(struct seq_file *m)
|
|||
|
||||
static int i915_drpc_info(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
|
@ -1398,7 +1438,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
|
|||
|
||||
static int i915_fbc_status(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
|
@ -1460,7 +1500,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
|
|||
|
||||
static int i915_ips_status(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
|
@ -1483,7 +1523,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
|
|||
|
||||
static int i915_sr_status(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
bool sr_enabled = false;
|
||||
|
@ -1509,7 +1549,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
|
|||
|
||||
static int i915_emon_status(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long temp, chipset, gfx;
|
||||
|
@ -1537,7 +1577,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
|
|||
|
||||
static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret = 0;
|
||||
|
@ -1580,7 +1620,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
|||
|
||||
static int i915_gfxec(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
@ -1600,7 +1640,7 @@ static int i915_gfxec(struct seq_file *m, void *unused)
|
|||
|
||||
static int i915_opregion(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_opregion *opregion = &dev_priv->opregion;
|
||||
|
@ -1628,7 +1668,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
|
|||
|
||||
static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct intel_fbdev *ifbdev = NULL;
|
||||
struct intel_framebuffer *fb;
|
||||
|
@ -1674,11 +1714,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
|
|||
|
||||
static int i915_context_status(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct i915_hw_context *ctx;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_context *ctx;
|
||||
int ret, i;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->mode_config.mutex);
|
||||
|
@ -1718,7 +1758,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
|||
|
||||
static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
|
||||
|
@ -1766,7 +1806,7 @@ static const char *swizzle_string(unsigned swizzle)
|
|||
|
||||
static int i915_swizzle_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
@ -1814,10 +1854,14 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
|
|||
|
||||
static int per_file_ctx(int id, void *ptr, void *data)
|
||||
{
|
||||
struct i915_hw_context *ctx = ptr;
|
||||
struct intel_context *ctx = ptr;
|
||||
struct seq_file *m = data;
|
||||
struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
|
||||
|
||||
if (i915_gem_context_is_default(ctx))
|
||||
seq_puts(m, " default context:\n");
|
||||
else
|
||||
seq_printf(m, " context %d:\n", ctx->id);
|
||||
ppgtt->debug_dump(ppgtt, m);
|
||||
|
||||
return 0;
|
||||
|
@ -1826,7 +1870,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
|
|||
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
int unused, i;
|
||||
|
||||
|
@ -1850,7 +1894,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
|
|||
static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
struct drm_file *file;
|
||||
int i;
|
||||
|
||||
|
@ -1877,12 +1921,9 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
|
|||
|
||||
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
struct i915_hw_ppgtt *pvt_ppgtt;
|
||||
|
||||
pvt_ppgtt = ctx_to_ppgtt(file_priv->private_default_ctx);
|
||||
seq_printf(m, "proc: %s\n",
|
||||
get_pid_task(file->pid, PIDTYPE_PID)->comm);
|
||||
seq_puts(m, " default context:\n");
|
||||
idr_for_each(&file_priv->context_idr, per_file_ctx, m);
|
||||
}
|
||||
seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
|
||||
|
@ -1890,7 +1931,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
|
|||
|
||||
static int i915_ppgtt_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
|
@ -1912,7 +1953,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
|
|||
|
||||
static int i915_llc(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
|
@ -2018,7 +2059,7 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
|
|||
|
||||
static int i915_pc8_status(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
|
@ -2093,7 +2134,7 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
|
|||
|
||||
static int i915_power_domain_info(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
|
@ -2148,7 +2189,7 @@ static void intel_encoder_info(struct seq_file *m,
|
|||
struct intel_crtc *intel_crtc,
|
||||
struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_crtc *crtc = &intel_crtc->base;
|
||||
struct intel_connector *intel_connector;
|
||||
|
@ -2175,7 +2216,7 @@ static void intel_encoder_info(struct seq_file *m,
|
|||
|
||||
static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_crtc *crtc = &intel_crtc->base;
|
||||
struct intel_encoder *intel_encoder;
|
||||
|
@ -2264,10 +2305,8 @@ static bool cursor_active(struct drm_device *dev, int pipe)
|
|||
|
||||
if (IS_845G(dev) || IS_I865G(dev))
|
||||
state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
|
||||
else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
|
||||
state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
|
||||
else
|
||||
state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
|
||||
state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
|
||||
|
||||
return state;
|
||||
}
|
||||
|
@ -2277,10 +2316,7 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 pos;
|
||||
|
||||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
pos = I915_READ(CURPOS_IVB(pipe));
|
||||
else
|
||||
pos = I915_READ(CURPOS(pipe));
|
||||
pos = I915_READ(CURPOS(pipe));
|
||||
|
||||
*x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
|
||||
if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
|
||||
|
@ -2295,7 +2331,7 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
|
|||
|
||||
static int i915_display_info(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc;
|
||||
|
@ -2305,7 +2341,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
|
|||
drm_modeset_lock_all(dev);
|
||||
seq_printf(m, "CRTC info\n");
|
||||
seq_printf(m, "---------\n");
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
bool active;
|
||||
int x, y;
|
||||
|
||||
|
@ -3084,7 +3120,7 @@ static const struct file_operations i915_display_crc_ctl_fops = {
|
|||
static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
|
||||
{
|
||||
struct drm_device *dev = m->private;
|
||||
int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
|
||||
int num_levels = ilk_wm_max_level(dev) + 1;
|
||||
int level;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
|
@ -3167,7 +3203,7 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
|
|||
struct seq_file *m = file->private_data;
|
||||
struct drm_device *dev = m->private;
|
||||
uint16_t new[5] = { 0 };
|
||||
int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4;
|
||||
int num_levels = ilk_wm_max_level(dev) + 1;
|
||||
int level;
|
||||
int ret;
|
||||
char tmp[32];
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
#include <acpi/video.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/oom.h>
|
||||
|
||||
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
|
||||
|
||||
|
@ -63,7 +64,7 @@
|
|||
* has access to the ring.
|
||||
*/
|
||||
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
|
||||
if (LP_RING(dev->dev_private)->obj == NULL) \
|
||||
if (LP_RING(dev->dev_private)->buffer->obj == NULL) \
|
||||
LOCK_TEST_WITH_RETURN(dev, file); \
|
||||
} while (0)
|
||||
|
||||
|
@ -119,7 +120,7 @@ static void i915_write_hws_pga(struct drm_device *dev)
|
|||
static void i915_free_hws(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
||||
struct intel_engine_cs *ring = LP_RING(dev_priv);
|
||||
|
||||
if (dev_priv->status_page_dmah) {
|
||||
drm_pci_free(dev, dev_priv->status_page_dmah);
|
||||
|
@ -139,7 +140,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_master_private *master_priv;
|
||||
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
||||
struct intel_engine_cs *ring = LP_RING(dev_priv);
|
||||
struct intel_ringbuffer *ringbuf = ring->buffer;
|
||||
|
||||
/*
|
||||
* We should never lose context on the ring with modesetting
|
||||
|
@ -148,17 +150,17 @@ void i915_kernel_lost_context(struct drm_device * dev)
|
|||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return;
|
||||
|
||||
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
|
||||
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
|
||||
ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
|
||||
if (ring->space < 0)
|
||||
ring->space += ring->size;
|
||||
ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
|
||||
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
|
||||
ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
|
||||
if (ringbuf->space < 0)
|
||||
ringbuf->space += ringbuf->size;
|
||||
|
||||
if (!dev->primary->master)
|
||||
return;
|
||||
|
||||
master_priv = dev->primary->master->driver_priv;
|
||||
if (ring->head == ring->tail && master_priv->sarea_priv)
|
||||
if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
|
||||
master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
|
||||
}
|
||||
|
||||
|
@ -201,7 +203,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
|
|||
}
|
||||
|
||||
if (init->ring_size != 0) {
|
||||
if (LP_RING(dev_priv)->obj != NULL) {
|
||||
if (LP_RING(dev_priv)->buffer->obj != NULL) {
|
||||
i915_dma_cleanup(dev);
|
||||
DRM_ERROR("Client tried to initialize ringbuffer in "
|
||||
"GEM mode\n");
|
||||
|
@ -234,11 +236,11 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
|
|||
static int i915_dma_resume(struct drm_device * dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
||||
struct intel_engine_cs *ring = LP_RING(dev_priv);
|
||||
|
||||
DRM_DEBUG_DRIVER("%s\n", __func__);
|
||||
|
||||
if (ring->virtual_start == NULL) {
|
||||
if (ring->buffer->virtual_start == NULL) {
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" ring buffer\n");
|
||||
return -ENOMEM;
|
||||
|
@ -360,7 +362,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i, ret;
|
||||
|
||||
if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
|
||||
if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < dwords;) {
|
||||
|
@ -782,7 +784,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
|
||||
int ret = 0;
|
||||
struct intel_ring_buffer *ring = LP_RING(dev_priv);
|
||||
struct intel_engine_cs *ring = LP_RING(dev_priv);
|
||||
|
||||
DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
|
||||
READ_BREADCRUMB(dev_priv));
|
||||
|
@ -823,7 +825,7 @@ static int i915_irq_emit(struct drm_device *dev, void *data,
|
|||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return -ENODEV;
|
||||
|
||||
if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
|
||||
if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
|
||||
DRM_ERROR("called with no initialization\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1073,7 +1075,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
drm_i915_hws_addr_t *hws = data;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return -ENODEV;
|
||||
|
@ -1570,7 +1572,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
spin_lock_init(&dev_priv->backlight_lock);
|
||||
spin_lock_init(&dev_priv->uncore.lock);
|
||||
spin_lock_init(&dev_priv->mm.object_stat_lock);
|
||||
dev_priv->ring_index = 0;
|
||||
mutex_init(&dev_priv->dpio_lock);
|
||||
mutex_init(&dev_priv->modeset_restore_lock);
|
||||
|
||||
|
@ -1741,8 +1742,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
intel_power_domains_remove(dev_priv);
|
||||
drm_vblank_cleanup(dev);
|
||||
out_gem_unload:
|
||||
if (dev_priv->mm.inactive_shrinker.scan_objects)
|
||||
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
|
||||
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
|
||||
unregister_shrinker(&dev_priv->mm.shrinker);
|
||||
|
||||
if (dev->pdev->msi_enabled)
|
||||
pci_disable_msi(dev->pdev);
|
||||
|
@ -1793,8 +1794,8 @@ int i915_driver_unload(struct drm_device *dev)
|
|||
|
||||
i915_teardown_sysfs(dev);
|
||||
|
||||
if (dev_priv->mm.inactive_shrinker.scan_objects)
|
||||
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
|
||||
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
|
||||
unregister_shrinker(&dev_priv->mm.shrinker);
|
||||
|
||||
io_mapping_free(dev_priv->gtt.mappable);
|
||||
arch_phys_wc_del(dev_priv->gtt.mtrr);
|
||||
|
@ -1867,7 +1868,7 @@ int i915_driver_unload(struct drm_device *dev)
|
|||
kmem_cache_destroy(dev_priv->slab);
|
||||
|
||||
pci_dev_put(dev_priv->bridge_dev);
|
||||
kfree(dev->dev_private);
|
||||
kfree(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1983,6 +1984,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
|
|||
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
};
|
||||
|
||||
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
|
||||
#include <linux/console.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
|
||||
static struct drm_driver driver;
|
||||
|
@ -49,12 +50,30 @@ static struct drm_driver driver;
|
|||
.dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
|
||||
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
|
||||
|
||||
#define GEN_CHV_PIPEOFFSETS \
|
||||
.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
|
||||
CHV_PIPE_C_OFFSET }, \
|
||||
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
|
||||
CHV_TRANSCODER_C_OFFSET, }, \
|
||||
.dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \
|
||||
CHV_DPLL_C_OFFSET }, \
|
||||
.dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \
|
||||
CHV_DPLL_C_MD_OFFSET }, \
|
||||
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
|
||||
CHV_PALETTE_C_OFFSET }
|
||||
|
||||
#define CURSOR_OFFSETS \
|
||||
.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
|
||||
|
||||
#define IVB_CURSOR_OFFSETS \
|
||||
.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
|
||||
|
||||
static const struct intel_device_info intel_i830_info = {
|
||||
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_845g_info = {
|
||||
|
@ -62,6 +81,7 @@ static const struct intel_device_info intel_845g_info = {
|
|||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i85x_info = {
|
||||
|
@ -71,6 +91,7 @@ static const struct intel_device_info intel_i85x_info = {
|
|||
.has_fbc = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i865g_info = {
|
||||
|
@ -78,6 +99,7 @@ static const struct intel_device_info intel_i865g_info = {
|
|||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i915g_info = {
|
||||
|
@ -85,6 +107,7 @@ static const struct intel_device_info intel_i915g_info = {
|
|||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
static const struct intel_device_info intel_i915gm_info = {
|
||||
.gen = 3, .is_mobile = 1, .num_pipes = 2,
|
||||
|
@ -94,12 +117,14 @@ static const struct intel_device_info intel_i915gm_info = {
|
|||
.has_fbc = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
static const struct intel_device_info intel_i945g_info = {
|
||||
.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
static const struct intel_device_info intel_i945gm_info = {
|
||||
.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
|
||||
|
@ -109,6 +134,7 @@ static const struct intel_device_info intel_i945gm_info = {
|
|||
.has_fbc = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i965g_info = {
|
||||
|
@ -117,6 +143,7 @@ static const struct intel_device_info intel_i965g_info = {
|
|||
.has_overlay = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i965gm_info = {
|
||||
|
@ -126,6 +153,7 @@ static const struct intel_device_info intel_i965gm_info = {
|
|||
.supports_tv = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_g33_info = {
|
||||
|
@ -134,6 +162,7 @@ static const struct intel_device_info intel_g33_info = {
|
|||
.has_overlay = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_g45_info = {
|
||||
|
@ -141,6 +170,7 @@ static const struct intel_device_info intel_g45_info = {
|
|||
.has_pipe_cxsr = 1, .has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_gm45_info = {
|
||||
|
@ -150,6 +180,7 @@ static const struct intel_device_info intel_gm45_info = {
|
|||
.supports_tv = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_pineview_info = {
|
||||
|
@ -157,6 +188,7 @@ static const struct intel_device_info intel_pineview_info = {
|
|||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_overlay = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ironlake_d_info = {
|
||||
|
@ -164,6 +196,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
|
|||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ironlake_m_info = {
|
||||
|
@ -172,6 +205,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
|
|||
.has_fbc = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_sandybridge_d_info = {
|
||||
|
@ -181,6 +215,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
|
|||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
|
||||
.has_llc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_sandybridge_m_info = {
|
||||
|
@ -190,6 +225,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
|
|||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
|
||||
.has_llc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
#define GEN7_FEATURES \
|
||||
|
@ -203,6 +239,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
|
|||
GEN7_FEATURES,
|
||||
.is_ivybridge = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ivybridge_m_info = {
|
||||
|
@ -210,6 +247,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
|
|||
.is_ivybridge = 1,
|
||||
.is_mobile = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ivybridge_q_info = {
|
||||
|
@ -217,6 +255,7 @@ static const struct intel_device_info intel_ivybridge_q_info = {
|
|||
.is_ivybridge = 1,
|
||||
.num_pipes = 0, /* legal, last one wins */
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_valleyview_m_info = {
|
||||
|
@ -228,6 +267,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
|
|||
.has_fbc = 0, /* legal, last one wins */
|
||||
.has_llc = 0, /* legal, last one wins */
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_valleyview_d_info = {
|
||||
|
@ -238,6 +278,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
|
|||
.has_fbc = 0, /* legal, last one wins */
|
||||
.has_llc = 0, /* legal, last one wins */
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_haswell_d_info = {
|
||||
|
@ -247,6 +288,7 @@ static const struct intel_device_info intel_haswell_d_info = {
|
|||
.has_fpga_dbg = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_haswell_m_info = {
|
||||
|
@ -257,6 +299,7 @@ static const struct intel_device_info intel_haswell_m_info = {
|
|||
.has_fpga_dbg = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_broadwell_d_info = {
|
||||
|
@ -267,6 +310,7 @@ static const struct intel_device_info intel_broadwell_d_info = {
|
|||
.has_ddi = 1,
|
||||
.has_fbc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_broadwell_m_info = {
|
||||
|
@ -297,6 +341,18 @@ static const struct intel_device_info intel_broadwell_gt3m_info = {
|
|||
.has_ddi = 1,
|
||||
.has_fbc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_cherryview_info = {
|
||||
.is_preliminary = 1,
|
||||
.gen = 8, .num_pipes = 3,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
.is_valleyview = 1,
|
||||
.display_mmio_offset = VLV_DISPLAY_BASE,
|
||||
GEN_CHV_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -334,7 +390,8 @@ static const struct intel_device_info intel_broadwell_gt3m_info = {
|
|||
INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \
|
||||
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
|
||||
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
|
||||
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info)
|
||||
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
|
||||
INTEL_CHV_IDS(&intel_cherryview_info)
|
||||
|
||||
static const struct pci_device_id pciidlist[] = { /* aka */
|
||||
INTEL_PCI_IDS,
|
||||
|
@ -467,17 +524,21 @@ static int i915_drm_freeze(struct drm_device *dev)
|
|||
return error;
|
||||
}
|
||||
|
||||
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
drm_irq_uninstall(dev);
|
||||
dev_priv->enable_hotplug_processing = false;
|
||||
|
||||
intel_disable_gt_powersave(dev);
|
||||
|
||||
/*
|
||||
* Disable CRTCs directly since we want to preserve sw state
|
||||
* for _thaw.
|
||||
*/
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
|
||||
for_each_crtc(dev, crtc) {
|
||||
mutex_lock(&crtc->mutex);
|
||||
dev_priv->display.crtc_disable(crtc);
|
||||
mutex_unlock(&crtc->mutex);
|
||||
}
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
intel_modeset_suspend_hw(dev);
|
||||
|
@ -541,24 +602,6 @@ void intel_console_resume(struct work_struct *work)
|
|||
console_unlock();
|
||||
}
|
||||
|
||||
static void intel_resume_hotplug(struct drm_device *dev)
|
||||
{
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
mutex_lock(&mode_config->mutex);
|
||||
DRM_DEBUG_KMS("running encoder hotplug functions\n");
|
||||
|
||||
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
|
||||
if (encoder->hot_plug)
|
||||
encoder->hot_plug(encoder);
|
||||
|
||||
mutex_unlock(&mode_config->mutex);
|
||||
|
||||
/* Just fire off a uevent and let userspace tell us what to do */
|
||||
drm_helper_hpd_irq_event(dev);
|
||||
}
|
||||
|
||||
static int i915_drm_thaw_early(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -614,7 +657,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
|||
intel_hpd_init(dev);
|
||||
dev_priv->enable_hotplug_processing = true;
|
||||
/* Config may have changed between suspend and resume */
|
||||
intel_resume_hotplug(dev);
|
||||
drm_helper_hpd_irq_event(dev);
|
||||
}
|
||||
|
||||
intel_opregion_init(dev);
|
||||
|
@ -916,21 +959,219 @@ static int i915_pm_poweroff(struct device *dev)
|
|||
return i915_drm_freeze(drm_dev);
|
||||
}
|
||||
|
||||
static void hsw_runtime_suspend(struct drm_i915_private *dev_priv)
|
||||
static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
hsw_enable_pc8(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void snb_runtime_resume(struct drm_i915_private *dev_priv)
|
||||
static int snb_runtime_resume(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
intel_init_pch_refclk(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hsw_runtime_resume(struct drm_i915_private *dev_priv)
|
||||
static int hsw_runtime_resume(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
hsw_disable_pc8(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save all Gunit registers that may be lost after a D3 and a subsequent
|
||||
* S0i[R123] transition. The list of registers needing a save/restore is
|
||||
* defined in the VLV2_S0IXRegs document. This documents marks all Gunit
|
||||
* registers in the following way:
|
||||
* - Driver: saved/restored by the driver
|
||||
* - Punit : saved/restored by the Punit firmware
|
||||
* - No, w/o marking: no need to save/restore, since the register is R/O or
|
||||
* used internally by the HW in a way that doesn't depend
|
||||
* keeping the content across a suspend/resume.
|
||||
* - Debug : used for debugging
|
||||
*
|
||||
* We save/restore all registers marked with 'Driver', with the following
|
||||
* exceptions:
|
||||
* - Registers out of use, including also registers marked with 'Debug'.
|
||||
* These have no effect on the driver's operation, so we don't save/restore
|
||||
* them to reduce the overhead.
|
||||
* - Registers that are fully setup by an initialization function called from
|
||||
* the resume path. For example many clock gating and RPS/RC6 registers.
|
||||
* - Registers that provide the right functionality with their reset defaults.
|
||||
*
|
||||
* TODO: Except for registers that based on the above 3 criteria can be safely
|
||||
* ignored, we save/restore all others, practically treating the HW context as
|
||||
* a black-box for the driver. Further investigation is needed to reduce the
|
||||
* saved/restored registers even further, by following the same 3 criteria.
|
||||
*/
|
||||
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
|
||||
int i;
|
||||
|
||||
/* GAM 0x4000-0x4770 */
|
||||
s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
|
||||
s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
|
||||
s->arb_mode = I915_READ(ARB_MODE);
|
||||
s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
|
||||
s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
|
||||
s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
|
||||
|
||||
s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
|
||||
s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
|
||||
|
||||
s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
|
||||
s->ecochk = I915_READ(GAM_ECOCHK);
|
||||
s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
|
||||
s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
|
||||
|
||||
s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
|
||||
|
||||
/* MBC 0x9024-0x91D0, 0x8500 */
|
||||
s->g3dctl = I915_READ(VLV_G3DCTL);
|
||||
s->gsckgctl = I915_READ(VLV_GSCKGCTL);
|
||||
s->mbctl = I915_READ(GEN6_MBCTL);
|
||||
|
||||
/* GCP 0x9400-0x9424, 0x8100-0x810C */
|
||||
s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
|
||||
s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
|
||||
s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
|
||||
s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
|
||||
s->rstctl = I915_READ(GEN6_RSTCTL);
|
||||
s->misccpctl = I915_READ(GEN7_MISCCPCTL);
|
||||
|
||||
/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
|
||||
s->gfxpause = I915_READ(GEN6_GFXPAUSE);
|
||||
s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
|
||||
s->rpdeuc = I915_READ(GEN6_RPDEUC);
|
||||
s->ecobus = I915_READ(ECOBUS);
|
||||
s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
|
||||
s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
|
||||
s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
|
||||
s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
|
||||
s->rcedata = I915_READ(VLV_RCEDATA);
|
||||
s->spare2gh = I915_READ(VLV_SPAREG2H);
|
||||
|
||||
/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
|
||||
s->gt_imr = I915_READ(GTIMR);
|
||||
s->gt_ier = I915_READ(GTIER);
|
||||
s->pm_imr = I915_READ(GEN6_PMIMR);
|
||||
s->pm_ier = I915_READ(GEN6_PMIER);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
|
||||
s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
|
||||
|
||||
/* GT SA CZ domain, 0x100000-0x138124 */
|
||||
s->tilectl = I915_READ(TILECTL);
|
||||
s->gt_fifoctl = I915_READ(GTFIFOCTL);
|
||||
s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
|
||||
s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
|
||||
s->pmwgicz = I915_READ(VLV_PMWGICZ);
|
||||
|
||||
/* Gunit-Display CZ domain, 0x182028-0x1821CF */
|
||||
s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
|
||||
s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
|
||||
s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
|
||||
|
||||
/*
|
||||
* Not saving any of:
|
||||
* DFT, 0x9800-0x9EC0
|
||||
* SARB, 0xB000-0xB1FC
|
||||
* GAC, 0x5208-0x524C, 0x14000-0x14C000
|
||||
* PCI CFG
|
||||
*/
|
||||
}
|
||||
|
||||
static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
|
||||
u32 val;
|
||||
int i;
|
||||
|
||||
/* GAM 0x4000-0x4770 */
|
||||
I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
|
||||
I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
|
||||
I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
|
||||
I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
|
||||
I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
|
||||
I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
|
||||
|
||||
I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
|
||||
I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
|
||||
|
||||
I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
|
||||
I915_WRITE(GAM_ECOCHK, s->ecochk);
|
||||
I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
|
||||
I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
|
||||
|
||||
I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
|
||||
|
||||
/* MBC 0x9024-0x91D0, 0x8500 */
|
||||
I915_WRITE(VLV_G3DCTL, s->g3dctl);
|
||||
I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
|
||||
I915_WRITE(GEN6_MBCTL, s->mbctl);
|
||||
|
||||
/* GCP 0x9400-0x9424, 0x8100-0x810C */
|
||||
I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
|
||||
I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
|
||||
I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
|
||||
I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
|
||||
I915_WRITE(GEN6_RSTCTL, s->rstctl);
|
||||
I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
|
||||
|
||||
/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
|
||||
I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
|
||||
I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
|
||||
I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
|
||||
I915_WRITE(ECOBUS, s->ecobus);
|
||||
I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
|
||||
I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
|
||||
I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
|
||||
I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
|
||||
I915_WRITE(VLV_RCEDATA, s->rcedata);
|
||||
I915_WRITE(VLV_SPAREG2H, s->spare2gh);
|
||||
|
||||
/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
|
||||
I915_WRITE(GTIMR, s->gt_imr);
|
||||
I915_WRITE(GTIER, s->gt_ier);
|
||||
I915_WRITE(GEN6_PMIMR, s->pm_imr);
|
||||
I915_WRITE(GEN6_PMIER, s->pm_ier);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
|
||||
I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
|
||||
|
||||
/* GT SA CZ domain, 0x100000-0x138124 */
|
||||
I915_WRITE(TILECTL, s->tilectl);
|
||||
I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
|
||||
/*
|
||||
* Preserve the GT allow wake and GFX force clock bit, they are not
|
||||
* be restored, as they are used to control the s0ix suspend/resume
|
||||
* sequence by the caller.
|
||||
*/
|
||||
val = I915_READ(VLV_GTLC_WAKE_CTRL);
|
||||
val &= VLV_GTLC_ALLOWWAKEREQ;
|
||||
val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
|
||||
I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
|
||||
|
||||
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
|
||||
val &= VLV_GFX_CLK_FORCE_ON_BIT;
|
||||
val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
|
||||
I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
|
||||
|
||||
I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
|
||||
|
||||
/* Gunit-Display CZ domain, 0x182028-0x1821CF */
|
||||
I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
|
||||
I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
|
||||
I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
|
||||
}
|
||||
|
||||
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
|
||||
|
@ -970,11 +1211,143 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
|
|||
#undef COND
|
||||
}
|
||||
|
||||
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
|
||||
{
|
||||
u32 val;
|
||||
int err = 0;
|
||||
|
||||
val = I915_READ(VLV_GTLC_WAKE_CTRL);
|
||||
val &= ~VLV_GTLC_ALLOWWAKEREQ;
|
||||
if (allow)
|
||||
val |= VLV_GTLC_ALLOWWAKEREQ;
|
||||
I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
|
||||
POSTING_READ(VLV_GTLC_WAKE_CTRL);
|
||||
|
||||
#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
|
||||
allow)
|
||||
err = wait_for(COND, 1);
|
||||
if (err)
|
||||
DRM_ERROR("timeout disabling GT waking\n");
|
||||
return err;
|
||||
#undef COND
|
||||
}
|
||||
|
||||
static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
|
||||
bool wait_for_on)
|
||||
{
|
||||
u32 mask;
|
||||
u32 val;
|
||||
int err;
|
||||
|
||||
mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
|
||||
val = wait_for_on ? mask : 0;
|
||||
#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
|
||||
if (COND)
|
||||
return 0;
|
||||
|
||||
DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
|
||||
wait_for_on ? "on" : "off",
|
||||
I915_READ(VLV_GTLC_PW_STATUS));
|
||||
|
||||
/*
|
||||
* RC6 transitioning can be delayed up to 2 msec (see
|
||||
* valleyview_enable_rps), use 3 msec for safety.
|
||||
*/
|
||||
err = wait_for(COND, 3);
|
||||
if (err)
|
||||
DRM_ERROR("timeout waiting for GT wells to go %s\n",
|
||||
wait_for_on ? "on" : "off");
|
||||
|
||||
return err;
|
||||
#undef COND
|
||||
}
|
||||
|
||||
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
|
||||
return;
|
||||
|
||||
DRM_ERROR("GT register access while GT waking disabled\n");
|
||||
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
|
||||
}
|
||||
|
||||
static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 mask;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Bspec defines the following GT well on flags as debug only, so
|
||||
* don't treat them as hard failures.
|
||||
*/
|
||||
(void)vlv_wait_for_gt_wells(dev_priv, false);
|
||||
|
||||
mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
|
||||
WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
|
||||
|
||||
vlv_check_no_gt_access(dev_priv);
|
||||
|
||||
err = vlv_force_gfx_clock(dev_priv, true);
|
||||
if (err)
|
||||
goto err1;
|
||||
|
||||
err = vlv_allow_gt_wake(dev_priv, false);
|
||||
if (err)
|
||||
goto err2;
|
||||
vlv_save_gunit_s0ix_state(dev_priv);
|
||||
|
||||
err = vlv_force_gfx_clock(dev_priv, false);
|
||||
if (err)
|
||||
goto err2;
|
||||
|
||||
return 0;
|
||||
|
||||
err2:
|
||||
/* For safety always re-enable waking and disable gfx clock forcing */
|
||||
vlv_allow_gt_wake(dev_priv, true);
|
||||
err1:
|
||||
vlv_force_gfx_clock(dev_priv, false);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int err;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If any of the steps fail just try to continue, that's the best we
|
||||
* can do at this point. Return the first error code (which will also
|
||||
* leave RPM permanently disabled).
|
||||
*/
|
||||
ret = vlv_force_gfx_clock(dev_priv, true);
|
||||
|
||||
vlv_restore_gunit_s0ix_state(dev_priv);
|
||||
|
||||
err = vlv_allow_gt_wake(dev_priv, true);
|
||||
if (!ret)
|
||||
ret = err;
|
||||
|
||||
err = vlv_force_gfx_clock(dev_priv, false);
|
||||
if (!ret)
|
||||
ret = err;
|
||||
|
||||
vlv_check_no_gt_access(dev_priv);
|
||||
|
||||
intel_init_clock_gating(dev);
|
||||
i915_gem_restore_fences(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_runtime_suspend(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
|
||||
return -ENODEV;
|
||||
|
@ -984,6 +1357,30 @@ static int intel_runtime_suspend(struct device *device)
|
|||
|
||||
DRM_DEBUG_KMS("Suspending device\n");
|
||||
|
||||
/*
|
||||
* We could deadlock here in case another thread holding struct_mutex
|
||||
* calls RPM suspend concurrently, since the RPM suspend will wait
|
||||
* first for this RPM suspend to finish. In this case the concurrent
|
||||
* RPM resume will be followed by its RPM suspend counterpart. Still
|
||||
* for consistency return -EAGAIN, which will reschedule this suspend.
|
||||
*/
|
||||
if (!mutex_trylock(&dev->struct_mutex)) {
|
||||
DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
|
||||
/*
|
||||
* Bump the expiration timestamp, otherwise the suspend won't
|
||||
* be rescheduled.
|
||||
*/
|
||||
pm_runtime_mark_last_busy(device);
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
/*
|
||||
* We are safe here against re-faults, since the fault handler takes
|
||||
* an RPM reference.
|
||||
*/
|
||||
i915_gem_release_all_mmaps(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/*
|
||||
* rps.work can't be rearmed here, since we get here only after making
|
||||
* sure the GPU is idle and the RPS freq is set to the minimum. See
|
||||
|
@ -992,14 +1389,23 @@ static int intel_runtime_suspend(struct device *device)
|
|||
cancel_work_sync(&dev_priv->rps.work);
|
||||
intel_runtime_pm_disable_interrupts(dev);
|
||||
|
||||
if (IS_GEN6(dev))
|
||||
;
|
||||
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
hsw_runtime_suspend(dev_priv);
|
||||
else
|
||||
if (IS_GEN6(dev)) {
|
||||
ret = 0;
|
||||
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
ret = hsw_runtime_suspend(dev_priv);
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
ret = vlv_runtime_suspend(dev_priv);
|
||||
} else {
|
||||
ret = -ENODEV;
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
i915_gem_release_all_mmaps(dev_priv);
|
||||
if (ret) {
|
||||
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
|
||||
intel_runtime_pm_restore_interrupts(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
|
||||
dev_priv->pm.suspended = true;
|
||||
|
@ -1022,6 +1428,7 @@ static int intel_runtime_resume(struct device *device)
|
|||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
WARN_ON(!HAS_RUNTIME_PM(dev));
|
||||
|
||||
|
@ -1030,21 +1437,33 @@ static int intel_runtime_resume(struct device *device)
|
|||
intel_opregion_notify_adapter(dev, PCI_D0);
|
||||
dev_priv->pm.suspended = false;
|
||||
|
||||
if (IS_GEN6(dev))
|
||||
snb_runtime_resume(dev_priv);
|
||||
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
hsw_runtime_resume(dev_priv);
|
||||
else
|
||||
if (IS_GEN6(dev)) {
|
||||
ret = snb_runtime_resume(dev_priv);
|
||||
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
ret = hsw_runtime_resume(dev_priv);
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
ret = vlv_runtime_resume(dev_priv);
|
||||
} else {
|
||||
WARN_ON(1);
|
||||
ret = -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* No point of rolling back things in case of an error, as the best
|
||||
* we can do is to hope that things will still work (and disable RPM).
|
||||
*/
|
||||
i915_gem_init_swizzling(dev);
|
||||
gen6_update_ring_freq(dev);
|
||||
|
||||
intel_runtime_pm_restore_interrupts(dev);
|
||||
intel_reset_gt_powersave(dev);
|
||||
|
||||
DRM_DEBUG_KMS("Device resumed\n");
|
||||
return 0;
|
||||
if (ret)
|
||||
DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
|
||||
else
|
||||
DRM_DEBUG_KMS("Device resumed\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops i915_pm_ops = {
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <linux/i2c-algo-bit.h>
|
||||
#include <drm/intel-gtt.h>
|
||||
#include <linux/backlight.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <linux/intel-iommu.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/pm_qos.h>
|
||||
|
@ -92,7 +93,7 @@ enum port {
|
|||
};
|
||||
#define port_name(p) ((p) + 'A')
|
||||
|
||||
#define I915_NUM_PHYS_VLV 1
|
||||
#define I915_NUM_PHYS_VLV 2
|
||||
|
||||
enum dpio_channel {
|
||||
DPIO_CH0,
|
||||
|
@ -163,6 +164,12 @@ enum hpd_pin {
|
|||
#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
|
||||
#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
|
||||
|
||||
#define for_each_crtc(dev, crtc) \
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
|
||||
|
||||
#define for_each_intel_crtc(dev, intel_crtc) \
|
||||
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
|
||||
|
||||
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
|
||||
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
|
||||
if ((intel_encoder)->base.crtc == (__crtc))
|
||||
|
@ -172,6 +179,7 @@ enum hpd_pin {
|
|||
if ((intel_connector)->base.encoder == (__encoder))
|
||||
|
||||
struct drm_i915_private;
|
||||
struct i915_mmu_object;
|
||||
|
||||
enum intel_dpll_id {
|
||||
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
|
||||
|
@ -397,6 +405,7 @@ struct drm_i915_error_state {
|
|||
u32 tiling:2;
|
||||
u32 dirty:1;
|
||||
u32 purgeable:1;
|
||||
u32 userptr:1;
|
||||
s32 ring:4;
|
||||
u32 cache_level:3;
|
||||
} **active_bo, **pinned_bo;
|
||||
|
@ -461,10 +470,11 @@ struct drm_i915_display_funcs {
|
|||
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct intel_engine_cs *ring,
|
||||
uint32_t flags);
|
||||
int (*update_primary_plane)(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
int x, int y);
|
||||
void (*update_primary_plane)(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
int x, int y);
|
||||
void (*hpd_irq_setup)(struct drm_device *dev);
|
||||
/* clock updates for mode set */
|
||||
/* cursor updates */
|
||||
|
@ -557,6 +567,7 @@ struct intel_device_info {
|
|||
int dpll_offsets[I915_MAX_PIPES];
|
||||
int dpll_md_offsets[I915_MAX_PIPES];
|
||||
int palette_offsets[I915_MAX_PIPES];
|
||||
int cursor_offsets[I915_MAX_PIPES];
|
||||
};
|
||||
|
||||
#undef DEFINE_FLAG
|
||||
|
@ -588,13 +599,13 @@ struct i915_ctx_hang_stats {
|
|||
|
||||
/* This must match up with the value previously used for execbuf2.rsvd1. */
|
||||
#define DEFAULT_CONTEXT_ID 0
|
||||
struct i915_hw_context {
|
||||
struct intel_context {
|
||||
struct kref ref;
|
||||
int id;
|
||||
bool is_initialized;
|
||||
uint8_t remap_slice;
|
||||
struct drm_i915_file_private *file_priv;
|
||||
struct intel_ring_buffer *last_ring;
|
||||
struct intel_engine_cs *last_ring;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_ctx_hang_stats hang_stats;
|
||||
struct i915_address_space *vm;
|
||||
|
@ -819,6 +830,67 @@ struct i915_suspend_saved_registers {
|
|||
u32 savePCH_PORT_HOTPLUG;
|
||||
};
|
||||
|
||||
struct vlv_s0ix_state {
|
||||
/* GAM */
|
||||
u32 wr_watermark;
|
||||
u32 gfx_prio_ctrl;
|
||||
u32 arb_mode;
|
||||
u32 gfx_pend_tlb0;
|
||||
u32 gfx_pend_tlb1;
|
||||
u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
|
||||
u32 media_max_req_count;
|
||||
u32 gfx_max_req_count;
|
||||
u32 render_hwsp;
|
||||
u32 ecochk;
|
||||
u32 bsd_hwsp;
|
||||
u32 blt_hwsp;
|
||||
u32 tlb_rd_addr;
|
||||
|
||||
/* MBC */
|
||||
u32 g3dctl;
|
||||
u32 gsckgctl;
|
||||
u32 mbctl;
|
||||
|
||||
/* GCP */
|
||||
u32 ucgctl1;
|
||||
u32 ucgctl3;
|
||||
u32 rcgctl1;
|
||||
u32 rcgctl2;
|
||||
u32 rstctl;
|
||||
u32 misccpctl;
|
||||
|
||||
/* GPM */
|
||||
u32 gfxpause;
|
||||
u32 rpdeuhwtc;
|
||||
u32 rpdeuc;
|
||||
u32 ecobus;
|
||||
u32 pwrdwnupctl;
|
||||
u32 rp_down_timeout;
|
||||
u32 rp_deucsw;
|
||||
u32 rcubmabdtmr;
|
||||
u32 rcedata;
|
||||
u32 spare2gh;
|
||||
|
||||
/* Display 1 CZ domain */
|
||||
u32 gt_imr;
|
||||
u32 gt_ier;
|
||||
u32 pm_imr;
|
||||
u32 pm_ier;
|
||||
u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
|
||||
|
||||
/* GT SA CZ domain */
|
||||
u32 tilectl;
|
||||
u32 gt_fifoctl;
|
||||
u32 gtlc_wake_ctrl;
|
||||
u32 gtlc_survive;
|
||||
u32 pmwgicz;
|
||||
|
||||
/* Display 2 CZ domain */
|
||||
u32 gu_ctl0;
|
||||
u32 gu_ctl1;
|
||||
u32 clock_gate_dis2;
|
||||
};
|
||||
|
||||
struct intel_gen6_power_mgmt {
|
||||
/* work and pm_iir are protected by dev_priv->irq_lock */
|
||||
struct work_struct work;
|
||||
|
@ -987,7 +1059,8 @@ struct i915_gem_mm {
|
|||
/** PPGTT used for aliasing the PPGTT with the GTT */
|
||||
struct i915_hw_ppgtt *aliasing_ppgtt;
|
||||
|
||||
struct shrinker inactive_shrinker;
|
||||
struct notifier_block oom_notifier;
|
||||
struct shrinker shrinker;
|
||||
bool shrinker_no_lock_stealing;
|
||||
|
||||
/** LRU list of objects with fence regs on them. */
|
||||
|
@ -1025,6 +1098,9 @@ struct i915_gem_mm {
|
|||
*/
|
||||
bool busy;
|
||||
|
||||
/* the indicator for dispatch video commands on two BSD rings */
|
||||
int bsd_ring_dispatch_index;
|
||||
|
||||
/** Bit 6 swizzling required for X tiling */
|
||||
uint32_t bit_6_swizzle_x;
|
||||
/** Bit 6 swizzling required for Y tiling */
|
||||
|
@ -1290,10 +1366,13 @@ struct drm_i915_private {
|
|||
*/
|
||||
uint32_t gpio_mmio_base;
|
||||
|
||||
/* MMIO base address for MIPI regs */
|
||||
uint32_t mipi_mmio_base;
|
||||
|
||||
wait_queue_head_t gmbus_wait_queue;
|
||||
|
||||
struct pci_dev *bridge_dev;
|
||||
struct intel_ring_buffer ring[I915_NUM_RINGS];
|
||||
struct intel_engine_cs ring[I915_NUM_RINGS];
|
||||
uint32_t last_seqno, next_seqno;
|
||||
|
||||
drm_dma_handle_t *status_page_dmah;
|
||||
|
@ -1380,6 +1459,9 @@ struct drm_i915_private {
|
|||
struct i915_gtt gtt; /* VM representing the global address space */
|
||||
|
||||
struct i915_gem_mm mm;
|
||||
#if defined(CONFIG_MMU_NOTIFIER)
|
||||
DECLARE_HASHTABLE(mmu_notifiers, 7);
|
||||
#endif
|
||||
|
||||
/* Kernel Modesetting */
|
||||
|
||||
|
@ -1448,6 +1530,7 @@ struct drm_i915_private {
|
|||
|
||||
u32 suspend_count;
|
||||
struct i915_suspend_saved_registers regfile;
|
||||
struct vlv_s0ix_state vlv_s0ix_state;
|
||||
|
||||
struct {
|
||||
/*
|
||||
|
@ -1473,8 +1556,11 @@ struct drm_i915_private {
|
|||
struct i915_dri1_state dri1;
|
||||
/* Old ums support infrastructure, same warning applies. */
|
||||
struct i915_ums_state ums;
|
||||
/* the indicator for dispatch video commands on two BSD rings */
|
||||
int ring_index;
|
||||
|
||||
/*
|
||||
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
|
||||
* will be rejected. Instead look for a better place.
|
||||
*/
|
||||
};
|
||||
|
||||
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
|
||||
|
@ -1512,6 +1598,8 @@ struct drm_i915_gem_object_ops {
|
|||
*/
|
||||
int (*get_pages)(struct drm_i915_gem_object *);
|
||||
void (*put_pages)(struct drm_i915_gem_object *);
|
||||
int (*dmabuf_export)(struct drm_i915_gem_object *);
|
||||
void (*release)(struct drm_i915_gem_object *);
|
||||
};
|
||||
|
||||
struct drm_i915_gem_object {
|
||||
|
@ -1602,7 +1690,7 @@ struct drm_i915_gem_object {
|
|||
void *dma_buf_vmapping;
|
||||
int vmapping_count;
|
||||
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
|
||||
/** Breadcrumb of last rendering to the buffer. */
|
||||
uint32_t last_read_seqno;
|
||||
|
@ -1625,8 +1713,20 @@ struct drm_i915_gem_object {
|
|||
|
||||
/** for phy allocated objects */
|
||||
struct drm_i915_gem_phys_object *phys_obj;
|
||||
};
|
||||
|
||||
union {
|
||||
struct i915_gem_userptr {
|
||||
uintptr_t ptr;
|
||||
unsigned read_only :1;
|
||||
unsigned workers :4;
|
||||
#define I915_GEM_USERPTR_MAX_WORKERS 15
|
||||
|
||||
struct mm_struct *mm;
|
||||
struct i915_mmu_object *mn;
|
||||
struct work_struct *work;
|
||||
} userptr;
|
||||
};
|
||||
};
|
||||
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
|
||||
|
||||
/**
|
||||
|
@ -1641,7 +1741,7 @@ struct drm_i915_gem_object {
|
|||
*/
|
||||
struct drm_i915_gem_request {
|
||||
/** On Which ring this request was generated */
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
|
||||
/** GEM sequence number associated with this request. */
|
||||
uint32_t seqno;
|
||||
|
@ -1653,7 +1753,7 @@ struct drm_i915_gem_request {
|
|||
u32 tail;
|
||||
|
||||
/** Context related to this request */
|
||||
struct i915_hw_context *ctx;
|
||||
struct intel_context *ctx;
|
||||
|
||||
/** Batch buffer related to this request if any */
|
||||
struct drm_i915_gem_object *batch_obj;
|
||||
|
@ -1680,9 +1780,8 @@ struct drm_i915_file_private {
|
|||
} mm;
|
||||
struct idr context_idr;
|
||||
|
||||
struct i915_hw_context *private_default_ctx;
|
||||
atomic_t rps_wait_boost;
|
||||
struct intel_ring_buffer *bsd_ring;
|
||||
struct intel_engine_cs *bsd_ring;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -1848,9 +1947,10 @@ struct drm_i915_cmd_table {
|
|||
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
|
||||
|
||||
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
|
||||
#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev))
|
||||
#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) \
|
||||
&& !IS_BROADWELL(dev))
|
||||
#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && \
|
||||
(!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
|
||||
#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 \
|
||||
&& !IS_GEN8(dev))
|
||||
#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
|
||||
#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
|
||||
|
||||
|
@ -1889,7 +1989,7 @@ struct drm_i915_cmd_table {
|
|||
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
|
||||
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
|
||||
IS_BROADWELL(dev))
|
||||
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
|
||||
|
||||
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
|
||||
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
|
||||
|
@ -2050,6 +2150,9 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv);
|
||||
int i915_gem_get_tiling(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_init_userptr(struct drm_device *dev);
|
||||
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
|
||||
|
@ -2105,9 +2208,9 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
|
|||
|
||||
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
|
||||
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *to);
|
||||
struct intel_engine_cs *to);
|
||||
void i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct intel_ring_buffer *ring);
|
||||
struct intel_engine_cs *ring);
|
||||
int i915_gem_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
|
@ -2127,31 +2230,14 @@ int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
|
|||
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
|
||||
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->fence_reg != I915_FENCE_REG_NONE) {
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
dev_priv->fence_regs[obj->fence_reg].pin_count++;
|
||||
return true;
|
||||
} else
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->fence_reg != I915_FENCE_REG_NONE) {
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
|
||||
dev_priv->fence_regs[obj->fence_reg].pin_count--;
|
||||
}
|
||||
}
|
||||
bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
|
||||
|
||||
struct drm_i915_gem_request *
|
||||
i915_gem_find_active_request(struct intel_ring_buffer *ring);
|
||||
i915_gem_find_active_request(struct intel_engine_cs *ring);
|
||||
|
||||
bool i915_gem_retire_requests(struct drm_device *dev);
|
||||
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
|
||||
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
|
||||
bool interruptible);
|
||||
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
|
||||
|
@ -2187,18 +2273,18 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
|
|||
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
|
||||
int __must_check i915_gem_init(struct drm_device *dev);
|
||||
int __must_check i915_gem_init_hw(struct drm_device *dev);
|
||||
int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
|
||||
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
|
||||
void i915_gem_init_swizzling(struct drm_device *dev);
|
||||
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
|
||||
int __must_check i915_gpu_idle(struct drm_device *dev);
|
||||
int __must_check i915_gem_suspend(struct drm_device *dev);
|
||||
int __i915_add_request(struct intel_ring_buffer *ring,
|
||||
int __i915_add_request(struct intel_engine_cs *ring,
|
||||
struct drm_file *file,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
u32 *seqno);
|
||||
#define i915_add_request(ring, seqno) \
|
||||
__i915_add_request(ring, NULL, NULL, seqno)
|
||||
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
|
||||
int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
|
||||
uint32_t seqno);
|
||||
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
int __must_check
|
||||
|
@ -2209,7 +2295,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
|
|||
int __must_check
|
||||
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
u32 alignment,
|
||||
struct intel_ring_buffer *pipelined);
|
||||
struct intel_engine_cs *pipelined);
|
||||
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
|
||||
int i915_gem_attach_phys_object(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj,
|
||||
|
@ -2311,22 +2397,22 @@ void i915_gem_context_reset(struct drm_device *dev);
|
|||
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
|
||||
int i915_gem_context_enable(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
|
||||
int i915_switch_context(struct intel_ring_buffer *ring,
|
||||
struct i915_hw_context *to);
|
||||
struct i915_hw_context *
|
||||
int i915_switch_context(struct intel_engine_cs *ring,
|
||||
struct intel_context *to);
|
||||
struct intel_context *
|
||||
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
|
||||
void i915_gem_context_free(struct kref *ctx_ref);
|
||||
static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
|
||||
static inline void i915_gem_context_reference(struct intel_context *ctx)
|
||||
{
|
||||
kref_get(&ctx->ref);
|
||||
}
|
||||
|
||||
static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
|
||||
static inline void i915_gem_context_unreference(struct intel_context *ctx)
|
||||
{
|
||||
kref_put(&ctx->ref, i915_gem_context_free);
|
||||
}
|
||||
|
||||
static inline bool i915_gem_context_is_default(const struct i915_hw_context *c)
|
||||
static inline bool i915_gem_context_is_default(const struct intel_context *c)
|
||||
{
|
||||
return c->id == DEFAULT_CONTEXT_ID;
|
||||
}
|
||||
|
@ -2336,6 +2422,8 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
|||
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
|
||||
/* i915_gem_render_state.c */
|
||||
int i915_gem_render_state_init(struct intel_engine_cs *ring);
|
||||
/* i915_gem_evict.c */
|
||||
int __must_check i915_gem_evict_something(struct drm_device *dev,
|
||||
struct i915_address_space *vm,
|
||||
|
@ -2420,9 +2508,10 @@ const char *i915_cache_level_str(int type);
|
|||
|
||||
/* i915_cmd_parser.c */
|
||||
int i915_cmd_parser_get_version(void);
|
||||
void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring);
|
||||
bool i915_needs_cmd_parser(struct intel_ring_buffer *ring);
|
||||
int i915_parse_cmds(struct intel_ring_buffer *ring,
|
||||
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
|
||||
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
|
||||
bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
|
||||
int i915_parse_cmds(struct intel_engine_cs *ring,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
u32 batch_start_offset,
|
||||
bool is_master);
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
#include <linux/oom.h>
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/swap.h>
|
||||
|
@ -57,14 +58,15 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
|
|||
struct drm_i915_fence_reg *fence,
|
||||
bool enable);
|
||||
|
||||
static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
|
||||
static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
|
||||
struct shrink_control *sc);
|
||||
static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
|
||||
static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
|
||||
struct shrink_control *sc);
|
||||
static int i915_gem_shrinker_oom(struct notifier_block *nb,
|
||||
unsigned long event,
|
||||
void *ptr);
|
||||
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
|
||||
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
|
||||
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
|
||||
static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
|
||||
|
||||
static bool cpu_cache_is_coherent(struct drm_device *dev,
|
||||
enum i915_cache_level level)
|
||||
|
@ -977,7 +979,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
|
|||
* equal.
|
||||
*/
|
||||
static int
|
||||
i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
|
||||
i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -996,7 +998,7 @@ static void fake_irq(unsigned long data)
|
|||
}
|
||||
|
||||
static bool missed_irq(struct drm_i915_private *dev_priv,
|
||||
struct intel_ring_buffer *ring)
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
|
||||
}
|
||||
|
@ -1027,7 +1029,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
|
|||
* Returns 0 if the seqno was found within the alloted time. Else returns the
|
||||
* errno with remaining time filled in timeout argument.
|
||||
*/
|
||||
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
|
||||
static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
|
||||
unsigned reset_counter,
|
||||
bool interruptible,
|
||||
struct timespec *timeout,
|
||||
|
@ -1134,7 +1136,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
|
|||
* request and object lists appropriately for that event.
|
||||
*/
|
||||
int
|
||||
i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
|
||||
i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -1159,7 +1161,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
|
|||
|
||||
static int
|
||||
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *ring)
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
if (!obj->active)
|
||||
return 0;
|
||||
|
@ -1184,7 +1186,7 @@ static __must_check int
|
|||
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
|
||||
bool readonly)
|
||||
{
|
||||
struct intel_ring_buffer *ring = obj->ring;
|
||||
struct intel_engine_cs *ring = obj->ring;
|
||||
u32 seqno;
|
||||
int ret;
|
||||
|
||||
|
@ -1209,7 +1211,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
|
|||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = obj->ring;
|
||||
struct intel_engine_cs *ring = obj->ring;
|
||||
unsigned reset_counter;
|
||||
u32 seqno;
|
||||
int ret;
|
||||
|
@ -1692,12 +1694,16 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
|
|||
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
|
||||
}
|
||||
|
||||
static inline int
|
||||
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->madv == I915_MADV_DONTNEED;
|
||||
}
|
||||
|
||||
/* Immediately discard the backing storage */
|
||||
static void
|
||||
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct inode *inode;
|
||||
|
||||
i915_gem_object_free_mmap_offset(obj);
|
||||
|
||||
if (obj->base.filp == NULL)
|
||||
|
@ -1708,16 +1714,28 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
|
|||
* To do this we must instruct the shmfs to drop all of its
|
||||
* backing pages, *now*.
|
||||
*/
|
||||
inode = file_inode(obj->base.filp);
|
||||
shmem_truncate_range(inode, 0, (loff_t)-1);
|
||||
|
||||
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
|
||||
obj->madv = __I915_MADV_PURGED;
|
||||
}
|
||||
|
||||
static inline int
|
||||
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
|
||||
/* Try to discard unwanted pages */
|
||||
static void
|
||||
i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->madv == I915_MADV_DONTNEED;
|
||||
struct address_space *mapping;
|
||||
|
||||
switch (obj->madv) {
|
||||
case I915_MADV_DONTNEED:
|
||||
i915_gem_object_truncate(obj);
|
||||
case __I915_MADV_PURGED:
|
||||
return;
|
||||
}
|
||||
|
||||
if (obj->base.filp == NULL)
|
||||
return;
|
||||
|
||||
mapping = file_inode(obj->base.filp)->i_mapping,
|
||||
invalidate_mapping_pages(mapping, 0, (loff_t)-1);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1782,8 +1800,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
|
|||
ops->put_pages(obj);
|
||||
obj->pages = NULL;
|
||||
|
||||
if (i915_gem_object_is_purgeable(obj))
|
||||
i915_gem_object_truncate(obj);
|
||||
i915_gem_object_invalidate(obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1974,7 +1991,19 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
|||
page_cache_release(sg_page_iter_page(&sg_iter));
|
||||
sg_free_table(st);
|
||||
kfree(st);
|
||||
return PTR_ERR(page);
|
||||
|
||||
/* shmemfs first checks if there is enough memory to allocate the page
|
||||
* and reports ENOSPC should there be insufficient, along with the usual
|
||||
* ENOMEM for a genuine allocation failure.
|
||||
*
|
||||
* We use ENOSPC in our driver to mean that we have run out of aperture
|
||||
* space and so want to translate the error from shmemfs back to our
|
||||
* usual understanding of ENOMEM.
|
||||
*/
|
||||
if (PTR_ERR(page) == -ENOSPC)
|
||||
return -ENOMEM;
|
||||
else
|
||||
return PTR_ERR(page);
|
||||
}
|
||||
|
||||
/* Ensure that the associated pages are gathered from the backing storage
|
||||
|
@ -2011,7 +2040,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
|||
|
||||
static void
|
||||
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *ring)
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -2049,7 +2078,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
|
|||
}
|
||||
|
||||
void i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct intel_ring_buffer *ring)
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
list_move_tail(&vma->mm_list, &vma->vm->active_list);
|
||||
return i915_gem_object_move_to_active(vma->obj, ring);
|
||||
|
@ -2090,7 +2119,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
|
|||
static void
|
||||
i915_gem_object_retire(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct intel_ring_buffer *ring = obj->ring;
|
||||
struct intel_engine_cs *ring = obj->ring;
|
||||
|
||||
if (ring == NULL)
|
||||
return;
|
||||
|
@ -2104,7 +2133,7 @@ static int
|
|||
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
int ret, i, j;
|
||||
|
||||
/* Carefully retire all requests without writing to the rings */
|
||||
|
@ -2170,7 +2199,7 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int __i915_add_request(struct intel_ring_buffer *ring,
|
||||
int __i915_add_request(struct intel_engine_cs *ring,
|
||||
struct drm_file *file,
|
||||
struct drm_i915_gem_object *obj,
|
||||
u32 *out_seqno)
|
||||
|
@ -2275,7 +2304,7 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
|
|||
}
|
||||
|
||||
static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
|
||||
const struct i915_hw_context *ctx)
|
||||
const struct intel_context *ctx)
|
||||
{
|
||||
unsigned long elapsed;
|
||||
|
||||
|
@ -2299,7 +2328,7 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
|
||||
static void i915_set_reset_status(struct drm_i915_private *dev_priv,
|
||||
struct i915_hw_context *ctx,
|
||||
struct intel_context *ctx,
|
||||
const bool guilty)
|
||||
{
|
||||
struct i915_ctx_hang_stats *hs;
|
||||
|
@ -2330,7 +2359,7 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
|
|||
}
|
||||
|
||||
struct drm_i915_gem_request *
|
||||
i915_gem_find_active_request(struct intel_ring_buffer *ring)
|
||||
i915_gem_find_active_request(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_gem_request *request;
|
||||
u32 completed_seqno;
|
||||
|
@ -2348,7 +2377,7 @@ i915_gem_find_active_request(struct intel_ring_buffer *ring)
|
|||
}
|
||||
|
||||
static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
|
||||
struct intel_ring_buffer *ring)
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_gem_request *request;
|
||||
bool ring_hung;
|
||||
|
@ -2367,7 +2396,7 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
|
||||
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
|
||||
struct intel_ring_buffer *ring)
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
while (!list_empty(&ring->active_list)) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
@ -2426,7 +2455,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
|
|||
void i915_gem_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
|
||||
/*
|
||||
|
@ -2448,8 +2477,8 @@ void i915_gem_reset(struct drm_device *dev)
|
|||
/**
|
||||
* This function clears the request list as sequence numbers are passed.
|
||||
*/
|
||||
static void
|
||||
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
|
||||
void
|
||||
i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
|
||||
{
|
||||
uint32_t seqno;
|
||||
|
||||
|
@ -2494,7 +2523,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
|
|||
* of tail of the request to update the last known position
|
||||
* of the GPU head.
|
||||
*/
|
||||
ring->last_retired_head = request->tail;
|
||||
ring->buffer->last_retired_head = request->tail;
|
||||
|
||||
i915_gem_free_request(request);
|
||||
}
|
||||
|
@ -2512,7 +2541,7 @@ bool
|
|||
i915_gem_retire_requests(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
bool idle = true;
|
||||
int i;
|
||||
|
||||
|
@ -2606,7 +2635,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_wait *args = data;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct intel_ring_buffer *ring = NULL;
|
||||
struct intel_engine_cs *ring = NULL;
|
||||
struct timespec timeout_stack, *timeout = NULL;
|
||||
unsigned reset_counter;
|
||||
u32 seqno = 0;
|
||||
|
@ -2677,9 +2706,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|||
*/
|
||||
int
|
||||
i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *to)
|
||||
struct intel_engine_cs *to)
|
||||
{
|
||||
struct intel_ring_buffer *from = obj->ring;
|
||||
struct intel_engine_cs *from = obj->ring;
|
||||
u32 seqno;
|
||||
int ret, idx;
|
||||
|
||||
|
@ -2762,12 +2791,14 @@ int i915_vma_unbind(struct i915_vma *vma)
|
|||
* cause memory corruption through use-after-free.
|
||||
*/
|
||||
|
||||
i915_gem_object_finish_gtt(obj);
|
||||
if (i915_is_ggtt(vma->vm)) {
|
||||
i915_gem_object_finish_gtt(obj);
|
||||
|
||||
/* release the fence reg _after_ flushing */
|
||||
ret = i915_gem_object_put_fence(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* release the fence reg _after_ flushing */
|
||||
ret = i915_gem_object_put_fence(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
trace_i915_vma_unbind(vma);
|
||||
|
||||
|
@ -2800,7 +2831,7 @@ int i915_vma_unbind(struct i915_vma *vma)
|
|||
int i915_gpu_idle(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
int ret, i;
|
||||
|
||||
/* Flush everything onto the inactive list. */
|
||||
|
@ -3041,6 +3072,9 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
|
|||
|
||||
fence = &dev_priv->fence_regs[obj->fence_reg];
|
||||
|
||||
if (WARN_ON(fence->pin_count))
|
||||
return -EBUSY;
|
||||
|
||||
i915_gem_object_fence_lost(obj);
|
||||
i915_gem_object_update_fence(obj, fence, false);
|
||||
|
||||
|
@ -3637,6 +3671,15 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
static bool is_pin_display(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
|
||||
if (list_empty(&obj->vma_list))
|
||||
return false;
|
||||
|
||||
vma = i915_gem_obj_to_ggtt(obj);
|
||||
if (!vma)
|
||||
return false;
|
||||
|
||||
/* There are 3 sources that pin objects:
|
||||
* 1. The display engine (scanouts, sprites, cursors);
|
||||
* 2. Reservations for execbuffer;
|
||||
|
@ -3648,7 +3691,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
|
|||
* subtracting the potential reference by the user, any pin_count
|
||||
* remains, it must be due to another use by the display engine.
|
||||
*/
|
||||
return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
|
||||
return vma->pin_count - !!obj->user_pin_count;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3659,9 +3702,10 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
|
|||
int
|
||||
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
u32 alignment,
|
||||
struct intel_ring_buffer *pipelined)
|
||||
struct intel_engine_cs *pipelined)
|
||||
{
|
||||
u32 old_read_domains, old_write_domain;
|
||||
bool was_pin_display;
|
||||
int ret;
|
||||
|
||||
if (pipelined != obj->ring) {
|
||||
|
@ -3673,6 +3717,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
|||
/* Mark the pin_display early so that we account for the
|
||||
* display coherency whilst setting up the cache domains.
|
||||
*/
|
||||
was_pin_display = obj->pin_display;
|
||||
obj->pin_display = true;
|
||||
|
||||
/* The display engine is not coherent with the LLC cache on gen6. As
|
||||
|
@ -3715,7 +3760,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
|||
return 0;
|
||||
|
||||
err_unpin_display:
|
||||
obj->pin_display = is_pin_display(obj);
|
||||
WARN_ON(was_pin_display != is_pin_display(obj));
|
||||
obj->pin_display = was_pin_display;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3812,7 +3858,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
|
|||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
|
||||
struct drm_i915_gem_request *request;
|
||||
struct intel_ring_buffer *ring = NULL;
|
||||
struct intel_engine_cs *ring = NULL;
|
||||
unsigned reset_counter;
|
||||
u32 seqno = 0;
|
||||
int ret;
|
||||
|
@ -3852,9 +3898,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
|||
uint32_t alignment,
|
||||
unsigned flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
struct i915_vma *vma;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
|
||||
return -ENODEV;
|
||||
|
||||
if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -3910,6 +3960,32 @@ i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
|
|||
obj->pin_mappable = false;
|
||||
}
|
||||
|
||||
bool
|
||||
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->fence_reg != I915_FENCE_REG_NONE) {
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
|
||||
|
||||
WARN_ON(!ggtt_vma ||
|
||||
dev_priv->fence_regs[obj->fence_reg].pin_count >
|
||||
ggtt_vma->pin_count);
|
||||
dev_priv->fence_regs[obj->fence_reg].pin_count++;
|
||||
return true;
|
||||
} else
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->fence_reg != I915_FENCE_REG_NONE) {
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
|
||||
dev_priv->fence_regs[obj->fence_reg].pin_count--;
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
|
@ -4170,6 +4246,30 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
|
|||
return obj;
|
||||
}
|
||||
|
||||
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
/* If we are the last user of the backing storage (be it shmemfs
|
||||
* pages or stolen etc), we know that the pages are going to be
|
||||
* immediately released. In this case, we can then skip copying
|
||||
* back the contents from the GPU.
|
||||
*/
|
||||
|
||||
if (obj->madv != I915_MADV_WILLNEED)
|
||||
return false;
|
||||
|
||||
if (obj->base.filp == NULL)
|
||||
return true;
|
||||
|
||||
/* At first glance, this looks racy, but then again so would be
|
||||
* userspace racing mmap against close. However, the first external
|
||||
* reference to the filp can only be obtained through the
|
||||
* i915_gem_mmap_ioctl() which safeguards us against the user
|
||||
* acquiring such a reference whilst we are in the middle of
|
||||
* freeing the object.
|
||||
*/
|
||||
return atomic_long_read(&obj->base.filp->f_count) == 1;
|
||||
}
|
||||
|
||||
void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
|
||||
|
@ -4208,6 +4308,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
|||
|
||||
if (WARN_ON(obj->pages_pin_count))
|
||||
obj->pages_pin_count = 0;
|
||||
if (discard_backing_storage(obj))
|
||||
obj->madv = I915_MADV_DONTNEED;
|
||||
i915_gem_object_put_pages(obj);
|
||||
i915_gem_object_free_mmap_offset(obj);
|
||||
i915_gem_object_release_stolen(obj);
|
||||
|
@ -4217,6 +4319,9 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
|||
if (obj->base.import_attach)
|
||||
drm_prime_gem_destroy(&obj->base, NULL);
|
||||
|
||||
if (obj->ops->release)
|
||||
obj->ops->release(obj);
|
||||
|
||||
drm_gem_object_release(&obj->base);
|
||||
i915_gem_info_remove_obj(dev_priv, obj->base.size);
|
||||
|
||||
|
@ -4254,7 +4359,7 @@ static void
|
|||
i915_gem_stop_ringbuffers(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
|
@ -4303,7 +4408,7 @@ i915_gem_suspend(struct drm_device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
|
||||
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -4496,6 +4601,7 @@ int i915_gem_init(struct drm_device *dev)
|
|||
DRM_DEBUG_DRIVER("allow wake ack timed out\n");
|
||||
}
|
||||
|
||||
i915_gem_init_userptr(dev);
|
||||
i915_gem_init_global_gtt(dev);
|
||||
|
||||
ret = i915_gem_context_init(dev);
|
||||
|
@ -4526,7 +4632,7 @@ void
|
|||
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
|
@ -4602,7 +4708,7 @@ i915_gem_lastclose(struct drm_device *dev)
|
|||
}
|
||||
|
||||
static void
|
||||
init_ring_lists(struct intel_ring_buffer *ring)
|
||||
init_ring_lists(struct intel_engine_cs *ring)
|
||||
{
|
||||
INIT_LIST_HEAD(&ring->active_list);
|
||||
INIT_LIST_HEAD(&ring->request_list);
|
||||
|
@ -4677,10 +4783,13 @@ i915_gem_load(struct drm_device *dev)
|
|||
|
||||
dev_priv->mm.interruptible = true;
|
||||
|
||||
dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
|
||||
dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
|
||||
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
|
||||
register_shrinker(&dev_priv->mm.inactive_shrinker);
|
||||
dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
|
||||
dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
|
||||
dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
|
||||
register_shrinker(&dev_priv->mm.shrinker);
|
||||
|
||||
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
|
||||
register_oom_notifier(&dev_priv->mm.oom_notifier);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4939,27 +5048,46 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
|
|||
#endif
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(shrinker,
|
||||
struct drm_i915_private,
|
||||
mm.inactive_shrinker);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_i915_gem_object *obj;
|
||||
bool unlock = true;
|
||||
unsigned long count;
|
||||
|
||||
if (!mutex_trylock(&dev->struct_mutex)) {
|
||||
if (!mutex_is_locked_by(&dev->struct_mutex, current))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if (dev_priv->mm.shrinker_no_lock_stealing)
|
||||
return 0;
|
||||
if (to_i915(dev)->mm.shrinker_no_lock_stealing)
|
||||
return false;
|
||||
|
||||
unlock = false;
|
||||
}
|
||||
*unlock = false;
|
||||
} else
|
||||
*unlock = true;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int num_vma_bound(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
int count = 0;
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, vma_link)
|
||||
if (drm_mm_node_allocated(&vma->node))
|
||||
count++;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(shrinker, struct drm_i915_private, mm.shrinker);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long count;
|
||||
bool unlock;
|
||||
|
||||
if (!i915_gem_shrinker_lock(dev, &unlock))
|
||||
return 0;
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
|
||||
|
@ -4967,10 +5095,8 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
|
|||
count += obj->base.size >> PAGE_SHIFT;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
if (obj->active)
|
||||
continue;
|
||||
|
||||
if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
|
||||
if (!i915_gem_obj_is_pinned(obj) &&
|
||||
obj->pages_pin_count == num_vma_bound(obj))
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
|
@ -5043,44 +5169,99 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
|
|||
}
|
||||
|
||||
static unsigned long
|
||||
i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(shrinker,
|
||||
struct drm_i915_private,
|
||||
mm.inactive_shrinker);
|
||||
container_of(shrinker, struct drm_i915_private, mm.shrinker);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
unsigned long freed;
|
||||
bool unlock = true;
|
||||
bool unlock;
|
||||
|
||||
if (!mutex_trylock(&dev->struct_mutex)) {
|
||||
if (!mutex_is_locked_by(&dev->struct_mutex, current))
|
||||
return SHRINK_STOP;
|
||||
|
||||
if (dev_priv->mm.shrinker_no_lock_stealing)
|
||||
return SHRINK_STOP;
|
||||
|
||||
unlock = false;
|
||||
}
|
||||
if (!i915_gem_shrinker_lock(dev, &unlock))
|
||||
return SHRINK_STOP;
|
||||
|
||||
freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
|
||||
if (freed < sc->nr_to_scan)
|
||||
freed += __i915_gem_shrink(dev_priv,
|
||||
sc->nr_to_scan - freed,
|
||||
false);
|
||||
if (freed < sc->nr_to_scan)
|
||||
freed += i915_gem_shrink_all(dev_priv);
|
||||
|
||||
if (unlock)
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return freed;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(nb, struct drm_i915_private, mm.oom_notifier);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long timeout = msecs_to_jiffies(5000) + 1;
|
||||
unsigned long pinned, bound, unbound, freed;
|
||||
bool was_interruptible;
|
||||
bool unlock;
|
||||
|
||||
while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout)
|
||||
schedule_timeout_killable(1);
|
||||
if (timeout == 0) {
|
||||
pr_err("Unable to purge GPU memory due lock contention.\n");
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
was_interruptible = dev_priv->mm.interruptible;
|
||||
dev_priv->mm.interruptible = false;
|
||||
|
||||
freed = i915_gem_shrink_all(dev_priv);
|
||||
|
||||
dev_priv->mm.interruptible = was_interruptible;
|
||||
|
||||
/* Because we may be allocating inside our own driver, we cannot
|
||||
* assert that there are no objects with pinned pages that are not
|
||||
* being pointed to by hardware.
|
||||
*/
|
||||
unbound = bound = pinned = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
|
||||
if (!obj->base.filp) /* not backed by a freeable object */
|
||||
continue;
|
||||
|
||||
if (obj->pages_pin_count)
|
||||
pinned += obj->base.size;
|
||||
else
|
||||
unbound += obj->base.size;
|
||||
}
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
if (!obj->base.filp)
|
||||
continue;
|
||||
|
||||
if (obj->pages_pin_count)
|
||||
pinned += obj->base.size;
|
||||
else
|
||||
bound += obj->base.size;
|
||||
}
|
||||
|
||||
if (unlock)
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
|
||||
freed, pinned);
|
||||
if (unbound || bound)
|
||||
pr_err("%lu and %lu bytes still available in the "
|
||||
"bound and unbound GPU page lists.\n",
|
||||
bound, unbound);
|
||||
|
||||
*(unsigned long *)ptr += freed;
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
|
||||
/* This WARN has probably outlived its usefulness (callers already
|
||||
* WARN if they don't find the GGTT vma they expect). When removing,
|
||||
* remember to remove the pre-check in is_pin_display() as well */
|
||||
if (WARN_ON(list_empty(&obj->vma_list)))
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ static int get_context_size(struct drm_device *dev)
|
|||
|
||||
void i915_gem_context_free(struct kref *ctx_ref)
|
||||
{
|
||||
struct i915_hw_context *ctx = container_of(ctx_ref,
|
||||
struct intel_context *ctx = container_of(ctx_ref,
|
||||
typeof(*ctx), ref);
|
||||
struct i915_hw_ppgtt *ppgtt = NULL;
|
||||
|
||||
|
@ -199,7 +199,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
|||
}
|
||||
|
||||
static struct i915_hw_ppgtt *
|
||||
create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx)
|
||||
create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
int ret;
|
||||
|
@ -218,12 +218,12 @@ create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx)
|
|||
return ppgtt;
|
||||
}
|
||||
|
||||
static struct i915_hw_context *
|
||||
static struct intel_context *
|
||||
__create_hw_context(struct drm_device *dev,
|
||||
struct drm_i915_file_private *file_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_hw_context *ctx;
|
||||
struct intel_context *ctx;
|
||||
int ret;
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
|
@ -285,14 +285,14 @@ __create_hw_context(struct drm_device *dev,
|
|||
* context state of the GPU for applications that don't utilize HW contexts, as
|
||||
* well as an idle case.
|
||||
*/
|
||||
static struct i915_hw_context *
|
||||
static struct intel_context *
|
||||
i915_gem_create_context(struct drm_device *dev,
|
||||
struct drm_i915_file_private *file_priv,
|
||||
bool create_vm)
|
||||
{
|
||||
const bool is_global_default_ctx = file_priv == NULL;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_hw_context *ctx;
|
||||
struct intel_context *ctx;
|
||||
int ret = 0;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
@ -364,8 +364,8 @@ void i915_gem_context_reset(struct drm_device *dev)
|
|||
/* Prevent the hardware from restoring the last context (which hung) on
|
||||
* the next switch */
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[i];
|
||||
struct i915_hw_context *dctx = ring->default_context;
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||
struct intel_context *dctx = ring->default_context;
|
||||
|
||||
/* Do a fake switch to the default context */
|
||||
if (ring->last_context == dctx)
|
||||
|
@ -391,7 +391,7 @@ void i915_gem_context_reset(struct drm_device *dev)
|
|||
int i915_gem_context_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_hw_context *ctx;
|
||||
struct intel_context *ctx;
|
||||
int i;
|
||||
|
||||
/* Init should only be called once per module load. Eventually the
|
||||
|
@ -426,7 +426,7 @@ int i915_gem_context_init(struct drm_device *dev)
|
|||
void i915_gem_context_fini(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
|
||||
struct intel_context *dctx = dev_priv->ring[RCS].default_context;
|
||||
int i;
|
||||
|
||||
if (dctx->obj) {
|
||||
|
@ -449,10 +449,12 @@ void i915_gem_context_fini(struct drm_device *dev)
|
|||
i915_gem_context_unreference(dctx);
|
||||
dev_priv->ring[RCS].last_context = NULL;
|
||||
}
|
||||
|
||||
i915_gem_object_ggtt_unpin(dctx->obj);
|
||||
}
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[i];
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||
|
||||
if (ring->last_context)
|
||||
i915_gem_context_unreference(ring->last_context);
|
||||
|
@ -461,13 +463,12 @@ void i915_gem_context_fini(struct drm_device *dev)
|
|||
ring->last_context = NULL;
|
||||
}
|
||||
|
||||
i915_gem_object_ggtt_unpin(dctx->obj);
|
||||
i915_gem_context_unreference(dctx);
|
||||
}
|
||||
|
||||
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
int ret, i;
|
||||
|
||||
/* This is the only place the aliasing PPGTT gets enabled, which means
|
||||
|
@ -494,11 +495,7 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
|
|||
|
||||
static int context_idr_cleanup(int id, void *p, void *data)
|
||||
{
|
||||
struct i915_hw_context *ctx = p;
|
||||
|
||||
/* Ignore the default context because close will handle it */
|
||||
if (i915_gem_context_is_default(ctx))
|
||||
return 0;
|
||||
struct intel_context *ctx = p;
|
||||
|
||||
i915_gem_context_unreference(ctx);
|
||||
return 0;
|
||||
|
@ -507,17 +504,17 @@ static int context_idr_cleanup(int id, void *p, void *data)
|
|||
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
struct intel_context *ctx;
|
||||
|
||||
idr_init(&file_priv->context_idr);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
file_priv->private_default_ctx =
|
||||
i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
|
||||
ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (IS_ERR(file_priv->private_default_ctx)) {
|
||||
if (IS_ERR(ctx)) {
|
||||
idr_destroy(&file_priv->context_idr);
|
||||
return PTR_ERR(file_priv->private_default_ctx);
|
||||
return PTR_ERR(ctx);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -529,16 +526,14 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
|
|||
|
||||
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
|
||||
idr_destroy(&file_priv->context_idr);
|
||||
|
||||
i915_gem_context_unreference(file_priv->private_default_ctx);
|
||||
}
|
||||
|
||||
struct i915_hw_context *
|
||||
struct intel_context *
|
||||
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
|
||||
{
|
||||
struct i915_hw_context *ctx;
|
||||
struct intel_context *ctx;
|
||||
|
||||
ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
|
||||
ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
|
||||
if (!ctx)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
|
@ -546,8 +541,8 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
|
|||
}
|
||||
|
||||
static inline int
|
||||
mi_set_context(struct intel_ring_buffer *ring,
|
||||
struct i915_hw_context *new_context,
|
||||
mi_set_context(struct intel_engine_cs *ring,
|
||||
struct intel_context *new_context,
|
||||
u32 hw_flags)
|
||||
{
|
||||
int ret;
|
||||
|
@ -567,7 +562,7 @@ mi_set_context(struct intel_ring_buffer *ring,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw */
|
||||
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
|
||||
if (INTEL_INFO(ring->dev)->gen >= 7)
|
||||
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
|
||||
else
|
||||
|
@ -596,11 +591,11 @@ mi_set_context(struct intel_ring_buffer *ring,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int do_switch(struct intel_ring_buffer *ring,
|
||||
struct i915_hw_context *to)
|
||||
static int do_switch(struct intel_engine_cs *ring,
|
||||
struct intel_context *to)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct i915_hw_context *from = ring->last_context;
|
||||
struct intel_context *from = ring->last_context;
|
||||
struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
|
||||
u32 hw_flags = 0;
|
||||
int ret, i;
|
||||
|
@ -701,13 +696,19 @@ static int do_switch(struct intel_ring_buffer *ring,
|
|||
i915_gem_context_unreference(from);
|
||||
}
|
||||
|
||||
to->is_initialized = true;
|
||||
|
||||
done:
|
||||
i915_gem_context_reference(to);
|
||||
ring->last_context = to;
|
||||
to->last_ring = ring;
|
||||
|
||||
if (ring->id == RCS && !to->is_initialized && from == NULL) {
|
||||
ret = i915_gem_render_state_init(ring);
|
||||
if (ret)
|
||||
DRM_ERROR("init render state: %d\n", ret);
|
||||
}
|
||||
|
||||
to->is_initialized = true;
|
||||
|
||||
return 0;
|
||||
|
||||
unpin_out:
|
||||
|
@ -726,8 +727,8 @@ static int do_switch(struct intel_ring_buffer *ring,
|
|||
* it will have a refoucnt > 1. This allows us to destroy the context abstract
|
||||
* object while letting the normal object tracking destroy the backing BO.
|
||||
*/
|
||||
int i915_switch_context(struct intel_ring_buffer *ring,
|
||||
struct i915_hw_context *to)
|
||||
int i915_switch_context(struct intel_engine_cs *ring,
|
||||
struct intel_context *to)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
|
||||
|
@ -756,7 +757,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
|||
{
|
||||
struct drm_i915_gem_context_create *args = data;
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
struct i915_hw_context *ctx;
|
||||
struct intel_context *ctx;
|
||||
int ret;
|
||||
|
||||
if (!hw_context_enabled(dev))
|
||||
|
@ -782,7 +783,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
|
|||
{
|
||||
struct drm_i915_gem_context_destroy *args = data;
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
struct i915_hw_context *ctx;
|
||||
struct intel_context *ctx;
|
||||
int ret;
|
||||
|
||||
if (args->ctx_id == DEFAULT_CONTEXT_ID)
|
||||
|
|
|
@ -229,6 +229,14 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
|
|||
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *gem_obj, int flags)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
|
||||
|
||||
if (obj->ops->dmabuf_export) {
|
||||
int ret = obj->ops->dmabuf_export(obj);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -541,7 +541,7 @@ need_reloc_mappable(struct i915_vma *vma)
|
|||
|
||||
static int
|
||||
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct intel_engine_cs *ring,
|
||||
bool *need_reloc)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
@ -596,7 +596,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
|||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
||||
i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
|
||||
struct list_head *vmas,
|
||||
bool *need_relocs)
|
||||
{
|
||||
|
@ -610,6 +610,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
|||
if (list_empty(vmas))
|
||||
return 0;
|
||||
|
||||
i915_gem_retire_requests_ring(ring);
|
||||
|
||||
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
|
||||
|
||||
INIT_LIST_HEAD(&ordered_vmas);
|
||||
|
@ -711,7 +713,7 @@ static int
|
|||
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct drm_file *file,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct intel_engine_cs *ring,
|
||||
struct eb_vmas *eb,
|
||||
struct drm_i915_gem_exec_object2 *exec)
|
||||
{
|
||||
|
@ -827,7 +829,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
|
||||
i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
|
||||
struct list_head *vmas)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
|
@ -910,11 +912,11 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct i915_hw_context *
|
||||
static struct intel_context *
|
||||
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
|
||||
struct intel_ring_buffer *ring, const u32 ctx_id)
|
||||
struct intel_engine_cs *ring, const u32 ctx_id)
|
||||
{
|
||||
struct i915_hw_context *ctx = NULL;
|
||||
struct intel_context *ctx = NULL;
|
||||
struct i915_ctx_hang_stats *hs;
|
||||
|
||||
if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
|
||||
|
@ -935,7 +937,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
|
|||
|
||||
static void
|
||||
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
||||
struct intel_ring_buffer *ring)
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
|
||||
|
@ -970,7 +972,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
|||
static void
|
||||
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
|
||||
struct drm_file *file,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct intel_engine_cs *ring,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
/* Unconditionally force add_request to emit a full flush. */
|
||||
|
@ -982,7 +984,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
|
|||
|
||||
static int
|
||||
i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring)
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret, i;
|
||||
|
@ -1025,12 +1027,12 @@ static int gen8_dispatch_bsd_ring(struct drm_device *dev,
|
|||
int ring_id;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (dev_priv->ring_index == 0) {
|
||||
if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
|
||||
ring_id = VCS;
|
||||
dev_priv->ring_index = 1;
|
||||
dev_priv->mm.bsd_ring_dispatch_index = 1;
|
||||
} else {
|
||||
ring_id = VCS2;
|
||||
dev_priv->ring_index = 0;
|
||||
dev_priv->mm.bsd_ring_dispatch_index = 0;
|
||||
}
|
||||
file_priv->bsd_ring = &dev_priv->ring[ring_id];
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -1048,8 +1050,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
struct eb_vmas *eb;
|
||||
struct drm_i915_gem_object *batch_obj;
|
||||
struct drm_clip_rect *cliprects = NULL;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct i915_hw_context *ctx;
|
||||
struct intel_engine_cs *ring;
|
||||
struct intel_context *ctx;
|
||||
struct i915_address_space *vm;
|
||||
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
|
||||
u64 exec_start = args->batch_start_offset, exec_len;
|
||||
|
@ -1168,6 +1170,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
goto pre_mutex_err;
|
||||
}
|
||||
} else {
|
||||
if (args->DR4 == 0xffffffff) {
|
||||
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
|
||||
args->DR4 = 0;
|
||||
}
|
||||
|
||||
if (args->DR1 || args->DR4 || args->cliprects_ptr) {
|
||||
DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -30,7 +30,8 @@
|
|||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv);
|
||||
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
|
||||
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
|
||||
|
||||
bool intel_enable_ppgtt(struct drm_device *dev, bool full)
|
||||
{
|
||||
|
@ -196,7 +197,7 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
|
|||
}
|
||||
|
||||
/* Broadwell Page Directory Pointer Descriptors */
|
||||
static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
|
||||
static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
|
||||
uint64_t val, bool synchronous)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
|
@ -226,7 +227,7 @@ static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
|
|||
}
|
||||
|
||||
static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct intel_engine_cs *ring,
|
||||
bool synchronous)
|
||||
{
|
||||
int i, ret;
|
||||
|
@ -275,6 +276,8 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
|
|||
num_entries--;
|
||||
}
|
||||
|
||||
if (!HAS_LLC(ppgtt->base.dev))
|
||||
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
|
||||
kunmap_atomic(pt_vaddr);
|
||||
|
||||
pte = 0;
|
||||
|
@ -311,6 +314,8 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
|
|||
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
|
||||
cache_level, true);
|
||||
if (++pte == GEN8_PTES_PER_PAGE) {
|
||||
if (!HAS_LLC(ppgtt->base.dev))
|
||||
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
|
||||
kunmap_atomic(pt_vaddr);
|
||||
pt_vaddr = NULL;
|
||||
if (++pde == GEN8_PDES_PER_PAGE) {
|
||||
|
@ -320,8 +325,11 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
|
|||
pte = 0;
|
||||
}
|
||||
}
|
||||
if (pt_vaddr)
|
||||
if (pt_vaddr) {
|
||||
if (!HAS_LLC(ppgtt->base.dev))
|
||||
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
|
||||
kunmap_atomic(pt_vaddr);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen8_free_page_tables(struct page **pt_pages)
|
||||
|
@ -584,6 +592,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
|
|||
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
|
||||
I915_CACHE_LLC);
|
||||
}
|
||||
if (!HAS_LLC(ppgtt->base.dev))
|
||||
drm_clflush_virt_range(pd_vaddr, PAGE_SIZE);
|
||||
kunmap_atomic(pd_vaddr);
|
||||
}
|
||||
|
||||
|
@ -696,7 +706,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
|
|||
}
|
||||
|
||||
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct intel_engine_cs *ring,
|
||||
bool synchronous)
|
||||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
|
@ -740,7 +750,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
|||
}
|
||||
|
||||
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct intel_engine_cs *ring,
|
||||
bool synchronous)
|
||||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
|
@ -791,7 +801,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
|||
}
|
||||
|
||||
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct intel_engine_cs *ring,
|
||||
bool synchronous)
|
||||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
|
@ -812,7 +822,7 @@ static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
|
|||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
int j, ret;
|
||||
|
||||
for_each_ring(ring, dev_priv, j) {
|
||||
|
@ -842,7 +852,7 @@ static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
|
|||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
uint32_t ecochk, ecobits;
|
||||
int i;
|
||||
|
||||
|
@ -881,7 +891,7 @@ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
|
|||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
uint32_t ecochk, gab_ctl, ecobits;
|
||||
int i;
|
||||
|
||||
|
@ -1025,8 +1035,7 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
|
|||
&ppgtt->node, GEN6_PD_SIZE,
|
||||
GEN6_PD_ALIGN, 0,
|
||||
0, dev_priv->gtt.base.total,
|
||||
DRM_MM_SEARCH_DEFAULT,
|
||||
DRM_MM_CREATE_DEFAULT);
|
||||
DRM_MM_TOPDOWN);
|
||||
if (ret == -ENOSPC && !retried) {
|
||||
ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
|
||||
GEN6_PD_SIZE, GEN6_PD_ALIGN,
|
||||
|
@ -1250,7 +1259,7 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
|
|||
void i915_check_and_clear_faults(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 6)
|
||||
|
@ -1325,7 +1334,11 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
|||
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
gen8_setup_private_ppat(dev_priv);
|
||||
if (IS_CHERRYVIEW(dev))
|
||||
chv_setup_private_ppat(dev_priv);
|
||||
else
|
||||
bdw_setup_private_ppat(dev_priv);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1753,6 +1766,17 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
|
|||
return bdw_gmch_ctl << 20;
|
||||
}
|
||||
|
||||
static inline unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
|
||||
{
|
||||
gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
|
||||
gmch_ctrl &= SNB_GMCH_GGMS_MASK;
|
||||
|
||||
if (gmch_ctrl)
|
||||
return 1 << (20 + gmch_ctrl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
|
||||
{
|
||||
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
|
||||
|
@ -1767,6 +1791,24 @@ static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
|
|||
return bdw_gmch_ctl << 25; /* 32 MB units */
|
||||
}
|
||||
|
||||
static size_t chv_get_stolen_size(u16 gmch_ctrl)
|
||||
{
|
||||
gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
|
||||
gmch_ctrl &= SNB_GMCH_GMS_MASK;
|
||||
|
||||
/*
|
||||
* 0x0 to 0x10: 32MB increments starting at 0MB
|
||||
* 0x11 to 0x16: 4MB increments starting at 8MB
|
||||
* 0x17 to 0x1d: 4MB increments start at 36MB
|
||||
*/
|
||||
if (gmch_ctrl < 0x11)
|
||||
return gmch_ctrl << 25;
|
||||
else if (gmch_ctrl < 0x17)
|
||||
return (gmch_ctrl - 0x11 + 2) << 22;
|
||||
else
|
||||
return (gmch_ctrl - 0x17 + 9) << 22;
|
||||
}
|
||||
|
||||
static int ggtt_probe_common(struct drm_device *dev,
|
||||
size_t gtt_size)
|
||||
{
|
||||
|
@ -1797,7 +1839,7 @@ static int ggtt_probe_common(struct drm_device *dev,
|
|||
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
|
||||
* bits. When using advanced contexts each context stores its own PAT, but
|
||||
* writing this data shouldn't be harmful even in those cases. */
|
||||
static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
|
||||
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint64_t pat;
|
||||
|
||||
|
@ -1816,6 +1858,33 @@ static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv)
|
|||
I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
|
||||
}
|
||||
|
||||
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint64_t pat;
|
||||
|
||||
/*
|
||||
* Map WB on BDW to snooped on CHV.
|
||||
*
|
||||
* Only the snoop bit has meaning for CHV, the rest is
|
||||
* ignored.
|
||||
*
|
||||
* Note that the harware enforces snooping for all page
|
||||
* table accesses. The snoop bit is actually ignored for
|
||||
* PDEs.
|
||||
*/
|
||||
pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(1, 0) |
|
||||
GEN8_PPAT(2, 0) |
|
||||
GEN8_PPAT(3, 0) |
|
||||
GEN8_PPAT(4, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(5, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(6, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(7, CHV_PPAT_SNOOP);
|
||||
|
||||
I915_WRITE(GEN8_PRIVATE_PAT, pat);
|
||||
I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
|
||||
}
|
||||
|
||||
static int gen8_gmch_probe(struct drm_device *dev,
|
||||
size_t *gtt_total,
|
||||
size_t *stolen,
|
||||
|
@ -1836,12 +1905,20 @@ static int gen8_gmch_probe(struct drm_device *dev,
|
|||
|
||||
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
||||
|
||||
*stolen = gen8_get_stolen_size(snb_gmch_ctl);
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
*stolen = chv_get_stolen_size(snb_gmch_ctl);
|
||||
gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
|
||||
} else {
|
||||
*stolen = gen8_get_stolen_size(snb_gmch_ctl);
|
||||
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
|
||||
}
|
||||
|
||||
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
|
||||
*gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
|
||||
|
||||
gen8_setup_private_ppat(dev_priv);
|
||||
if (IS_CHERRYVIEW(dev))
|
||||
chv_setup_private_ppat(dev_priv);
|
||||
else
|
||||
bdw_setup_private_ppat(dev_priv);
|
||||
|
||||
ret = ggtt_probe_common(dev, gtt_size);
|
||||
|
||||
|
|
|
@ -95,6 +95,7 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
|
|||
#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
|
||||
#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
|
||||
|
||||
#define CHV_PPAT_SNOOP (1<<6)
|
||||
#define GEN8_PPAT_AGE(x) (x<<4)
|
||||
#define GEN8_PPAT_LLCeLLC (3<<2)
|
||||
#define GEN8_PPAT_LLCELLC (2<<2)
|
||||
|
@ -256,11 +257,11 @@ struct i915_hw_ppgtt {
|
|||
dma_addr_t *gen8_pt_dma_addr[4];
|
||||
};
|
||||
|
||||
struct i915_hw_context *ctx;
|
||||
struct intel_context *ctx;
|
||||
|
||||
int (*enable)(struct i915_hw_ppgtt *ppgtt);
|
||||
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct intel_engine_cs *ring,
|
||||
bool synchronous);
|
||||
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
|
||||
};
|
||||
|
|
198
drivers/gpu/drm/i915/i915_gem_render_state.c
Normal file
198
drivers/gpu/drm/i915/i915_gem_render_state.c
Normal file
|
@ -0,0 +1,198 @@
|
|||
/*
|
||||
* Copyright © 2014 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Mika Kuoppala <mika.kuoppala@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_renderstate.h"
|
||||
|
||||
struct i915_render_state {
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long ggtt_offset;
|
||||
void *batch;
|
||||
u32 size;
|
||||
u32 len;
|
||||
};
|
||||
|
||||
static struct i915_render_state *render_state_alloc(struct drm_device *dev)
|
||||
{
|
||||
struct i915_render_state *so;
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
so = kzalloc(sizeof(*so), GFP_KERNEL);
|
||||
if (!so)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
so->obj = i915_gem_alloc_object(dev, 4096);
|
||||
if (so->obj == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto free;
|
||||
}
|
||||
so->size = 4096;
|
||||
|
||||
ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
|
||||
if (ret)
|
||||
goto free_gem;
|
||||
|
||||
BUG_ON(so->obj->pages->nents != 1);
|
||||
page = sg_page(so->obj->pages->sgl);
|
||||
|
||||
so->batch = kmap(page);
|
||||
if (!so->batch) {
|
||||
ret = -ENOMEM;
|
||||
goto unpin;
|
||||
}
|
||||
|
||||
so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
|
||||
|
||||
return so;
|
||||
unpin:
|
||||
i915_gem_object_ggtt_unpin(so->obj);
|
||||
free_gem:
|
||||
drm_gem_object_unreference(&so->obj->base);
|
||||
free:
|
||||
kfree(so);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static void render_state_free(struct i915_render_state *so)
|
||||
{
|
||||
kunmap(so->batch);
|
||||
i915_gem_object_ggtt_unpin(so->obj);
|
||||
drm_gem_object_unreference(&so->obj->base);
|
||||
kfree(so);
|
||||
}
|
||||
|
||||
static const struct intel_renderstate_rodata *
|
||||
render_state_get_rodata(struct drm_device *dev, const int gen)
|
||||
{
|
||||
switch (gen) {
|
||||
case 6:
|
||||
return &gen6_null_state;
|
||||
case 7:
|
||||
return &gen7_null_state;
|
||||
case 8:
|
||||
return &gen8_null_state;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int render_state_setup(const int gen,
|
||||
const struct intel_renderstate_rodata *rodata,
|
||||
struct i915_render_state *so)
|
||||
{
|
||||
const u64 goffset = i915_gem_obj_ggtt_offset(so->obj);
|
||||
u32 reloc_index = 0;
|
||||
u32 * const d = so->batch;
|
||||
unsigned int i = 0;
|
||||
int ret;
|
||||
|
||||
if (!rodata || rodata->batch_items * 4 > so->size)
|
||||
return -EINVAL;
|
||||
|
||||
ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
while (i < rodata->batch_items) {
|
||||
u32 s = rodata->batch[i];
|
||||
|
||||
if (reloc_index < rodata->reloc_items &&
|
||||
i * 4 == rodata->reloc[reloc_index]) {
|
||||
|
||||
s += goffset & 0xffffffff;
|
||||
|
||||
/* We keep batch offsets max 32bit */
|
||||
if (gen >= 8) {
|
||||
if (i + 1 >= rodata->batch_items ||
|
||||
rodata->batch[i + 1] != 0)
|
||||
return -EINVAL;
|
||||
|
||||
d[i] = s;
|
||||
i++;
|
||||
s = (goffset & 0xffffffff00000000ull) >> 32;
|
||||
}
|
||||
|
||||
reloc_index++;
|
||||
}
|
||||
|
||||
d[i] = s;
|
||||
i++;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (rodata->reloc_items != reloc_index) {
|
||||
DRM_ERROR("not all relocs resolved, %d out of %d\n",
|
||||
reloc_index, rodata->reloc_items);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
so->len = rodata->batch_items * 4;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_gem_render_state_init(struct intel_engine_cs *ring)
|
||||
{
|
||||
const int gen = INTEL_INFO(ring->dev)->gen;
|
||||
struct i915_render_state *so;
|
||||
const struct intel_renderstate_rodata *rodata;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(ring->id != RCS))
|
||||
return -ENOENT;
|
||||
|
||||
rodata = render_state_get_rodata(ring->dev, gen);
|
||||
if (rodata == NULL)
|
||||
return 0;
|
||||
|
||||
so = render_state_alloc(ring->dev);
|
||||
if (IS_ERR(so))
|
||||
return PTR_ERR(so);
|
||||
|
||||
ret = render_state_setup(gen, rodata, so);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = ring->dispatch_execbuffer(ring,
|
||||
i915_gem_obj_ggtt_offset(so->obj),
|
||||
so->len,
|
||||
I915_DISPATCH_SECURE);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so->obj), ring);
|
||||
|
||||
ret = __i915_add_request(ring, NULL, so->obj, NULL);
|
||||
/* __i915_add_request moves object to inactive if it fails */
|
||||
out:
|
||||
render_state_free(so);
|
||||
return ret;
|
||||
}
|
711
drivers/gpu/drm/i915/i915_gem_userptr.c
Normal file
711
drivers/gpu/drm/i915/i915_gem_userptr.c
Normal file
|
@ -0,0 +1,711 @@
|
|||
/*
|
||||
* Copyright © 2012-2014 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "i915_drm.h"
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
#include <linux/mmu_context.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <linux/mempolicy.h>
|
||||
#include <linux/swap.h>
|
||||
|
||||
#if defined(CONFIG_MMU_NOTIFIER)
|
||||
#include <linux/interval_tree.h>
|
||||
|
||||
struct i915_mmu_notifier {
|
||||
spinlock_t lock;
|
||||
struct hlist_node node;
|
||||
struct mmu_notifier mn;
|
||||
struct rb_root objects;
|
||||
struct drm_device *dev;
|
||||
struct mm_struct *mm;
|
||||
struct work_struct work;
|
||||
unsigned long count;
|
||||
unsigned long serial;
|
||||
};
|
||||
|
||||
struct i915_mmu_object {
|
||||
struct i915_mmu_notifier *mmu;
|
||||
struct interval_tree_node it;
|
||||
struct drm_i915_gem_object *obj;
|
||||
};
|
||||
|
||||
static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
|
||||
struct interval_tree_node *it = NULL;
|
||||
unsigned long serial = 0;
|
||||
|
||||
end--; /* interval ranges are inclusive, but invalidate range is exclusive */
|
||||
while (start < end) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = NULL;
|
||||
spin_lock(&mn->lock);
|
||||
if (serial == mn->serial)
|
||||
it = interval_tree_iter_next(it, start, end);
|
||||
else
|
||||
it = interval_tree_iter_first(&mn->objects, start, end);
|
||||
if (it != NULL) {
|
||||
obj = container_of(it, struct i915_mmu_object, it)->obj;
|
||||
drm_gem_object_reference(&obj->base);
|
||||
serial = mn->serial;
|
||||
}
|
||||
spin_unlock(&mn->lock);
|
||||
if (obj == NULL)
|
||||
return;
|
||||
|
||||
mutex_lock(&mn->dev->struct_mutex);
|
||||
/* Cancel any active worker and force us to re-evaluate gup */
|
||||
obj->userptr.work = NULL;
|
||||
|
||||
if (obj->pages != NULL) {
|
||||
struct drm_i915_private *dev_priv = to_i915(mn->dev);
|
||||
struct i915_vma *vma, *tmp;
|
||||
bool was_interruptible;
|
||||
|
||||
was_interruptible = dev_priv->mm.interruptible;
|
||||
dev_priv->mm.interruptible = false;
|
||||
|
||||
list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
|
||||
int ret = i915_vma_unbind(vma);
|
||||
WARN_ON(ret && ret != -EIO);
|
||||
}
|
||||
WARN_ON(i915_gem_object_put_pages(obj));
|
||||
|
||||
dev_priv->mm.interruptible = was_interruptible;
|
||||
}
|
||||
|
||||
start = obj->userptr.ptr + obj->base.size;
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&mn->dev->struct_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
|
||||
.invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
|
||||
};
|
||||
|
||||
static struct i915_mmu_notifier *
|
||||
__i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i915_mmu_notifier *mmu;
|
||||
|
||||
/* Protected by dev->struct_mutex */
|
||||
hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
|
||||
if (mmu->mm == mm)
|
||||
return mmu;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct i915_mmu_notifier *
|
||||
i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i915_mmu_notifier *mmu;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&dev->struct_mutex);
|
||||
|
||||
mmu = __i915_mmu_notifier_lookup(dev, mm);
|
||||
if (mmu)
|
||||
return mmu;
|
||||
|
||||
mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
|
||||
if (mmu == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
spin_lock_init(&mmu->lock);
|
||||
mmu->dev = dev;
|
||||
mmu->mn.ops = &i915_gem_userptr_notifier;
|
||||
mmu->mm = mm;
|
||||
mmu->objects = RB_ROOT;
|
||||
mmu->count = 0;
|
||||
mmu->serial = 0;
|
||||
|
||||
/* Protected by mmap_sem (write-lock) */
|
||||
ret = __mmu_notifier_register(&mmu->mn, mm);
|
||||
if (ret) {
|
||||
kfree(mmu);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/* Protected by dev->struct_mutex */
|
||||
hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
|
||||
return mmu;
|
||||
}
|
||||
|
||||
static void
|
||||
__i915_mmu_notifier_destroy_worker(struct work_struct *work)
|
||||
{
|
||||
struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work);
|
||||
mmu_notifier_unregister(&mmu->mn, mmu->mm);
|
||||
kfree(mmu);
|
||||
}
|
||||
|
||||
static void
|
||||
__i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
|
||||
{
|
||||
lockdep_assert_held(&mmu->dev->struct_mutex);
|
||||
|
||||
/* Protected by dev->struct_mutex */
|
||||
hash_del(&mmu->node);
|
||||
|
||||
/* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
|
||||
* We enter the function holding struct_mutex, therefore we need
|
||||
* to drop our mutex prior to calling mmu_notifier_unregister in
|
||||
* order to prevent lock inversion (and system-wide deadlock)
|
||||
* between the mmap_sem and struct-mutex. Hence we defer the
|
||||
* unregistration to a workqueue where we hold no locks.
|
||||
*/
|
||||
INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
|
||||
schedule_work(&mmu->work);
|
||||
}
|
||||
|
||||
static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
|
||||
{
|
||||
if (++mmu->serial == 0)
|
||||
mmu->serial = 1;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
|
||||
struct i915_mmu_object *mn)
|
||||
{
|
||||
lockdep_assert_held(&mmu->dev->struct_mutex);
|
||||
|
||||
spin_lock(&mmu->lock);
|
||||
interval_tree_remove(&mn->it, &mmu->objects);
|
||||
__i915_mmu_notifier_update_serial(mmu);
|
||||
spin_unlock(&mmu->lock);
|
||||
|
||||
/* Protected against _add() by dev->struct_mutex */
|
||||
if (--mmu->count == 0)
|
||||
__i915_mmu_notifier_destroy(mmu);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
|
||||
struct i915_mmu_object *mn)
|
||||
{
|
||||
struct interval_tree_node *it;
|
||||
int ret;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(mmu->dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Make sure we drop the final active reference (and thereby
|
||||
* remove the objects from the interval tree) before we do
|
||||
* the check for overlapping objects.
|
||||
*/
|
||||
i915_gem_retire_requests(mmu->dev);
|
||||
|
||||
/* Disallow overlapping userptr objects */
|
||||
spin_lock(&mmu->lock);
|
||||
it = interval_tree_iter_first(&mmu->objects,
|
||||
mn->it.start, mn->it.last);
|
||||
if (it) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
/* We only need to check the first object in the range as it
|
||||
* either has cancelled gup work queued and we need to
|
||||
* return back to the user to give time for the gup-workers
|
||||
* to flush their object references upon which the object will
|
||||
* be removed from the interval-tree, or the the range is
|
||||
* still in use by another client and the overlap is invalid.
|
||||
*/
|
||||
|
||||
obj = container_of(it, struct i915_mmu_object, it)->obj;
|
||||
ret = obj->userptr.workers ? -EAGAIN : -EINVAL;
|
||||
} else {
|
||||
interval_tree_insert(&mn->it, &mmu->objects);
|
||||
__i915_mmu_notifier_update_serial(mmu);
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock(&mmu->lock);
|
||||
mutex_unlock(&mmu->dev->struct_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct i915_mmu_object *mn;
|
||||
|
||||
mn = obj->userptr.mn;
|
||||
if (mn == NULL)
|
||||
return;
|
||||
|
||||
i915_mmu_notifier_del(mn->mmu, mn);
|
||||
obj->userptr.mn = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
|
||||
unsigned flags)
|
||||
{
|
||||
struct i915_mmu_notifier *mmu;
|
||||
struct i915_mmu_object *mn;
|
||||
int ret;
|
||||
|
||||
if (flags & I915_USERPTR_UNSYNCHRONIZED)
|
||||
return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
|
||||
|
||||
down_write(&obj->userptr.mm->mmap_sem);
|
||||
ret = i915_mutex_lock_interruptible(obj->base.dev);
|
||||
if (ret == 0) {
|
||||
mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
|
||||
if (!IS_ERR(mmu))
|
||||
mmu->count++; /* preemptive add to act as a refcount */
|
||||
else
|
||||
ret = PTR_ERR(mmu);
|
||||
mutex_unlock(&obj->base.dev->struct_mutex);
|
||||
}
|
||||
up_write(&obj->userptr.mm->mmap_sem);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mn = kzalloc(sizeof(*mn), GFP_KERNEL);
|
||||
if (mn == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto destroy_mmu;
|
||||
}
|
||||
|
||||
mn->mmu = mmu;
|
||||
mn->it.start = obj->userptr.ptr;
|
||||
mn->it.last = mn->it.start + obj->base.size - 1;
|
||||
mn->obj = obj;
|
||||
|
||||
ret = i915_mmu_notifier_add(mmu, mn);
|
||||
if (ret)
|
||||
goto free_mn;
|
||||
|
||||
obj->userptr.mn = mn;
|
||||
return 0;
|
||||
|
||||
free_mn:
|
||||
kfree(mn);
|
||||
destroy_mmu:
|
||||
mutex_lock(&obj->base.dev->struct_mutex);
|
||||
if (--mmu->count == 0)
|
||||
__i915_mmu_notifier_destroy(mmu);
|
||||
mutex_unlock(&obj->base.dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static void
|
||||
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
|
||||
unsigned flags)
|
||||
{
|
||||
if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
|
||||
return -ENODEV;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct get_pages_work {
|
||||
struct work_struct work;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct task_struct *task;
|
||||
};
|
||||
|
||||
|
||||
#if IS_ENABLED(CONFIG_SWIOTLB)
|
||||
#define swiotlb_active() swiotlb_nr_tbl()
|
||||
#else
|
||||
#define swiotlb_active() 0
|
||||
#endif
|
||||
|
||||
static int
|
||||
st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int ret, n;
|
||||
|
||||
*st = kmalloc(sizeof(**st), GFP_KERNEL);
|
||||
if (*st == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (swiotlb_active()) {
|
||||
ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
for_each_sg((*st)->sgl, sg, num_pages, n)
|
||||
sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
|
||||
} else {
|
||||
ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
|
||||
0, num_pages << PAGE_SHIFT,
|
||||
GFP_KERNEL);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
kfree(*st);
|
||||
*st = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
||||
{
|
||||
struct get_pages_work *work = container_of(_work, typeof(*work), work);
|
||||
struct drm_i915_gem_object *obj = work->obj;
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
const int num_pages = obj->base.size >> PAGE_SHIFT;
|
||||
struct page **pvec;
|
||||
int pinned, ret;
|
||||
|
||||
ret = -ENOMEM;
|
||||
pinned = 0;
|
||||
|
||||
pvec = kmalloc(num_pages*sizeof(struct page *),
|
||||
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
||||
if (pvec == NULL)
|
||||
pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
|
||||
if (pvec != NULL) {
|
||||
struct mm_struct *mm = obj->userptr.mm;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
while (pinned < num_pages) {
|
||||
ret = get_user_pages(work->task, mm,
|
||||
obj->userptr.ptr + pinned * PAGE_SIZE,
|
||||
num_pages - pinned,
|
||||
!obj->userptr.read_only, 0,
|
||||
pvec + pinned, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
pinned += ret;
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
}
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (obj->userptr.work != &work->work) {
|
||||
ret = 0;
|
||||
} else if (pinned == num_pages) {
|
||||
ret = st_set_pages(&obj->pages, pvec, num_pages);
|
||||
if (ret == 0) {
|
||||
list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
|
||||
pinned = 0;
|
||||
}
|
||||
}
|
||||
|
||||
obj->userptr.work = ERR_PTR(ret);
|
||||
obj->userptr.workers--;
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
release_pages(pvec, pinned, 0);
|
||||
drm_free_large(pvec);
|
||||
|
||||
put_task_struct(work->task);
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
const int num_pages = obj->base.size >> PAGE_SHIFT;
|
||||
struct page **pvec;
|
||||
int pinned, ret;
|
||||
|
||||
/* If userspace should engineer that these pages are replaced in
|
||||
* the vma between us binding this page into the GTT and completion
|
||||
* of rendering... Their loss. If they change the mapping of their
|
||||
* pages they need to create a new bo to point to the new vma.
|
||||
*
|
||||
* However, that still leaves open the possibility of the vma
|
||||
* being copied upon fork. Which falls under the same userspace
|
||||
* synchronisation issue as a regular bo, except that this time
|
||||
* the process may not be expecting that a particular piece of
|
||||
* memory is tied to the GPU.
|
||||
*
|
||||
* Fortunately, we can hook into the mmu_notifier in order to
|
||||
* discard the page references prior to anything nasty happening
|
||||
* to the vma (discard or cloning) which should prevent the more
|
||||
* egregious cases from causing harm.
|
||||
*/
|
||||
|
||||
pvec = NULL;
|
||||
pinned = 0;
|
||||
if (obj->userptr.mm == current->mm) {
|
||||
pvec = kmalloc(num_pages*sizeof(struct page *),
|
||||
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
||||
if (pvec == NULL) {
|
||||
pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
|
||||
if (pvec == NULL)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
|
||||
!obj->userptr.read_only, pvec);
|
||||
}
|
||||
if (pinned < num_pages) {
|
||||
if (pinned < 0) {
|
||||
ret = pinned;
|
||||
pinned = 0;
|
||||
} else {
|
||||
/* Spawn a worker so that we can acquire the
|
||||
* user pages without holding our mutex. Access
|
||||
* to the user pages requires mmap_sem, and we have
|
||||
* a strict lock ordering of mmap_sem, struct_mutex -
|
||||
* we already hold struct_mutex here and so cannot
|
||||
* call gup without encountering a lock inversion.
|
||||
*
|
||||
* Userspace will keep on repeating the operation
|
||||
* (thanks to EAGAIN) until either we hit the fast
|
||||
* path or the worker completes. If the worker is
|
||||
* cancelled or superseded, the task is still run
|
||||
* but the results ignored. (This leads to
|
||||
* complications that we may have a stray object
|
||||
* refcount that we need to be wary of when
|
||||
* checking for existing objects during creation.)
|
||||
* If the worker encounters an error, it reports
|
||||
* that error back to this function through
|
||||
* obj->userptr.work = ERR_PTR.
|
||||
*/
|
||||
ret = -EAGAIN;
|
||||
if (obj->userptr.work == NULL &&
|
||||
obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) {
|
||||
struct get_pages_work *work;
|
||||
|
||||
work = kmalloc(sizeof(*work), GFP_KERNEL);
|
||||
if (work != NULL) {
|
||||
obj->userptr.work = &work->work;
|
||||
obj->userptr.workers++;
|
||||
|
||||
work->obj = obj;
|
||||
drm_gem_object_reference(&obj->base);
|
||||
|
||||
work->task = current;
|
||||
get_task_struct(work->task);
|
||||
|
||||
INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
|
||||
schedule_work(&work->work);
|
||||
} else
|
||||
ret = -ENOMEM;
|
||||
} else {
|
||||
if (IS_ERR(obj->userptr.work)) {
|
||||
ret = PTR_ERR(obj->userptr.work);
|
||||
obj->userptr.work = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ret = st_set_pages(&obj->pages, pvec, num_pages);
|
||||
if (ret == 0) {
|
||||
obj->userptr.work = NULL;
|
||||
pinned = 0;
|
||||
}
|
||||
}
|
||||
|
||||
release_pages(pvec, pinned, 0);
|
||||
drm_free_large(pvec);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
BUG_ON(obj->userptr.work != NULL);
|
||||
|
||||
if (obj->madv != I915_MADV_WILLNEED)
|
||||
obj->dirty = 0;
|
||||
|
||||
for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
|
||||
struct page *page = sg_page(sg);
|
||||
|
||||
if (obj->dirty)
|
||||
set_page_dirty(page);
|
||||
|
||||
mark_page_accessed(page);
|
||||
page_cache_release(page);
|
||||
}
|
||||
obj->dirty = 0;
|
||||
|
||||
sg_free_table(obj->pages);
|
||||
kfree(obj->pages);
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_userptr_release(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
i915_gem_userptr_release__mmu_notifier(obj);
|
||||
|
||||
if (obj->userptr.mm) {
|
||||
mmput(obj->userptr.mm);
|
||||
obj->userptr.mm = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->userptr.mn)
|
||||
return 0;
|
||||
|
||||
return i915_gem_userptr_init__mmu_notifier(obj, 0);
|
||||
}
|
||||
|
||||
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
|
||||
.dmabuf_export = i915_gem_userptr_dmabuf_export,
|
||||
.get_pages = i915_gem_userptr_get_pages,
|
||||
.put_pages = i915_gem_userptr_put_pages,
|
||||
.release = i915_gem_userptr_release,
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a new mm object that wraps some normal memory from the process
|
||||
* context - user memory.
|
||||
*
|
||||
* We impose several restrictions upon the memory being mapped
|
||||
* into the GPU.
|
||||
* 1. It must be page aligned (both start/end addresses, i.e ptr and size).
|
||||
* 2. It cannot overlap any other userptr object in the same address space.
|
||||
* 3. It must be normal system memory, not a pointer into another map of IO
|
||||
* space (e.g. it must not be a GTT mmapping of another object).
|
||||
* 4. We only allow a bo as large as we could in theory map into the GTT,
|
||||
* that is we limit the size to the total size of the GTT.
|
||||
* 5. The bo is marked as being snoopable. The backing pages are left
|
||||
* accessible directly by the CPU, but reads and writes by the GPU may
|
||||
* incur the cost of a snoop (unless you have an LLC architecture).
|
||||
*
|
||||
* Synchronisation between multiple users and the GPU is left to userspace
|
||||
* through the normal set-domain-ioctl. The kernel will enforce that the
|
||||
* GPU relinquishes the VMA before it is returned back to the system
|
||||
* i.e. upon free(), munmap() or process termination. However, the userspace
|
||||
* malloc() library may not immediately relinquish the VMA after free() and
|
||||
* instead reuse it whilst the GPU is still reading and writing to the VMA.
|
||||
* Caveat emptor.
|
||||
*
|
||||
* Also note, that the object created here is not currently a "first class"
|
||||
* object, in that several ioctls are banned. These are the CPU access
|
||||
* ioctls: mmap(), pwrite and pread. In practice, you are expected to use
|
||||
* direct access via your pointer rather than use those ioctls.
|
||||
*
|
||||
* If you think this is a good interface to use to pass GPU memory between
|
||||
* drivers, please use dma-buf instead. In fact, wherever possible use
|
||||
* dma-buf instead.
|
||||
*/
|
||||
int
|
||||
i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_userptr *args = data;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
u32 handle;
|
||||
|
||||
if (args->flags & ~(I915_USERPTR_READ_ONLY |
|
||||
I915_USERPTR_UNSYNCHRONIZED))
|
||||
return -EINVAL;
|
||||
|
||||
if (offset_in_page(args->user_ptr | args->user_size))
|
||||
return -EINVAL;
|
||||
|
||||
if (args->user_size > dev_priv->gtt.base.total)
|
||||
return -E2BIG;
|
||||
|
||||
if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
|
||||
(char __user *)(unsigned long)args->user_ptr, args->user_size))
|
||||
return -EFAULT;
|
||||
|
||||
if (args->flags & I915_USERPTR_READ_ONLY) {
|
||||
/* On almost all of the current hw, we cannot tell the GPU that a
|
||||
* page is readonly, so this is just a placeholder in the uAPI.
|
||||
*/
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Allocate the new object */
|
||||
obj = i915_gem_object_alloc(dev);
|
||||
if (obj == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_gem_private_object_init(dev, &obj->base, args->user_size);
|
||||
i915_gem_object_init(obj, &i915_gem_userptr_ops);
|
||||
obj->cache_level = I915_CACHE_LLC;
|
||||
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
|
||||
|
||||
obj->userptr.ptr = args->user_ptr;
|
||||
obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
|
||||
|
||||
/* And keep a pointer to the current->mm for resolving the user pages
|
||||
* at binding. This means that we need to hook into the mmu_notifier
|
||||
* in order to detect if the mmu is destroyed.
|
||||
*/
|
||||
ret = -ENOMEM;
|
||||
if ((obj->userptr.mm = get_task_mm(current)))
|
||||
ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
|
||||
if (ret == 0)
|
||||
ret = drm_gem_handle_create(file, &obj->base, &handle);
|
||||
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(&obj->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
args->handle = handle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_init_userptr(struct drm_device *dev)
|
||||
{
|
||||
#if defined(CONFIG_MMU_NOTIFIER)
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
hash_init(dev_priv->mmu_notifiers);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
|
@ -205,6 +205,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
|
|||
err_puts(m, tiling_flag(err->tiling));
|
||||
err_puts(m, dirty_flag(err->dirty));
|
||||
err_puts(m, purgeable_flag(err->purgeable));
|
||||
err_puts(m, err->userptr ? " userptr" : "");
|
||||
err_puts(m, err->ring != -1 ? " " : "");
|
||||
err_puts(m, ring_str(err->ring));
|
||||
err_puts(m, i915_cache_level_str(err->cache_level));
|
||||
|
@ -641,6 +642,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
|
|||
err->tiling = obj->tiling_mode;
|
||||
err->dirty = obj->dirty;
|
||||
err->purgeable = obj->madv != I915_MADV_WILLNEED;
|
||||
err->userptr = obj->userptr.mm != NULL;
|
||||
err->ring = obj->ring ? obj->ring->id : -1;
|
||||
err->cache_level = obj->cache_level;
|
||||
}
|
||||
|
@ -745,7 +747,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
|
|||
}
|
||||
|
||||
static void i915_record_ring_state(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring,
|
||||
struct intel_engine_cs *ring,
|
||||
struct drm_i915_error_ring *ering)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -823,8 +825,8 @@ static void i915_record_ring_state(struct drm_device *dev,
|
|||
ering->hws = I915_READ(mmio);
|
||||
}
|
||||
|
||||
ering->cpu_ring_head = ring->head;
|
||||
ering->cpu_ring_tail = ring->tail;
|
||||
ering->cpu_ring_head = ring->buffer->head;
|
||||
ering->cpu_ring_tail = ring->buffer->tail;
|
||||
|
||||
ering->hangcheck_score = ring->hangcheck.score;
|
||||
ering->hangcheck_action = ring->hangcheck.action;
|
||||
|
@ -857,7 +859,7 @@ static void i915_record_ring_state(struct drm_device *dev,
|
|||
}
|
||||
|
||||
|
||||
static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
|
||||
static void i915_gem_record_active_context(struct intel_engine_cs *ring,
|
||||
struct drm_i915_error_state *error,
|
||||
struct drm_i915_error_ring *ering)
|
||||
{
|
||||
|
@ -884,7 +886,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
|||
int i, count;
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[i];
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||
|
||||
if (ring->dev == NULL)
|
||||
continue;
|
||||
|
@ -928,7 +930,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
|||
}
|
||||
|
||||
error->ring[i].ringbuffer =
|
||||
i915_error_ggtt_object_create(dev_priv, ring->obj);
|
||||
i915_error_ggtt_object_create(dev_priv, ring->buffer->obj);
|
||||
|
||||
if (ring->status_page.obj)
|
||||
error->ring[i].hws_page =
|
||||
|
|
|
@ -248,6 +248,46 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* bdw_update_pm_irq - update GT interrupt 2
|
||||
* @dev_priv: driver private
|
||||
* @interrupt_mask: mask of interrupt bits to update
|
||||
* @enabled_irq_mask: mask of interrupt bits to enable
|
||||
*
|
||||
* Copied from the snb function, updated with relevant register offsets
|
||||
*/
|
||||
static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
|
||||
uint32_t interrupt_mask,
|
||||
uint32_t enabled_irq_mask)
|
||||
{
|
||||
uint32_t new_val;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if (WARN_ON(dev_priv->pm.irqs_disabled))
|
||||
return;
|
||||
|
||||
new_val = dev_priv->pm_irq_mask;
|
||||
new_val &= ~interrupt_mask;
|
||||
new_val |= (~enabled_irq_mask & interrupt_mask);
|
||||
|
||||
if (new_val != dev_priv->pm_irq_mask) {
|
||||
dev_priv->pm_irq_mask = new_val;
|
||||
I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
|
||||
POSTING_READ(GEN8_GT_IMR(2));
|
||||
}
|
||||
}
|
||||
|
||||
void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
||||
{
|
||||
bdw_update_pm_irq(dev_priv, mask, mask);
|
||||
}
|
||||
|
||||
void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
||||
{
|
||||
bdw_update_pm_irq(dev_priv, mask, 0);
|
||||
}
|
||||
|
||||
static bool cpt_can_enable_serr_int(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -266,16 +306,50 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
|
|||
return true;
|
||||
}
|
||||
|
||||
static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe)
|
||||
void i9xx_check_fifo_underruns(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
u32 reg = PIPESTAT(crtc->pipe);
|
||||
u32 pipestat;
|
||||
|
||||
if (crtc->cpu_fifo_underrun_disabled)
|
||||
continue;
|
||||
|
||||
pipestat = I915_READ(reg) & 0xffff0000;
|
||||
if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
|
||||
continue;
|
||||
|
||||
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
|
||||
POSTING_READ(reg);
|
||||
|
||||
DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
}
|
||||
|
||||
static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 reg = PIPESTAT(pipe);
|
||||
u32 pipestat = I915_READ(reg) & 0x7fff0000;
|
||||
u32 pipestat = I915_READ(reg) & 0xffff0000;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
|
||||
POSTING_READ(reg);
|
||||
if (enable) {
|
||||
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
|
||||
POSTING_READ(reg);
|
||||
} else {
|
||||
if (pipestat & PIPE_FIFO_UNDERRUN_STATUS)
|
||||
DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
|
||||
}
|
||||
}
|
||||
|
||||
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
|
@ -303,15 +377,11 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
|
|||
|
||||
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
|
||||
} else {
|
||||
bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
|
||||
|
||||
/* Change the state _after_ we've read out the current one. */
|
||||
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
|
||||
|
||||
if (!was_enabled &&
|
||||
(I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
|
||||
DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
|
||||
pipe_name(pipe));
|
||||
if (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
|
||||
DRM_ERROR("uncleared fifo underrun on pipe %c\n",
|
||||
pipe_name(pipe));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -387,16 +457,11 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
|
|||
|
||||
ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
|
||||
} else {
|
||||
uint32_t tmp = I915_READ(SERR_INT);
|
||||
bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
|
||||
|
||||
/* Change the state _after_ we've read out the current one. */
|
||||
ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
|
||||
|
||||
if (!was_enabled &&
|
||||
(tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
|
||||
DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
|
||||
transcoder_name(pch_transcoder));
|
||||
if (I915_READ(SERR_INT) & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
|
||||
DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
|
||||
transcoder_name(pch_transcoder));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -415,8 +480,8 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
|
|||
*
|
||||
* Returns the previous state of underrun reporting.
|
||||
*/
|
||||
bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable)
|
||||
static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
|
@ -432,8 +497,8 @@ bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
|||
|
||||
intel_crtc->cpu_fifo_underrun_disabled = !enable;
|
||||
|
||||
if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)))
|
||||
i9xx_clear_fifo_underrun(dev, pipe);
|
||||
if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
|
||||
i9xx_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
else if (IS_GEN5(dev) || IS_GEN6(dev))
|
||||
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
else if (IS_GEN7(dev))
|
||||
|
@ -578,11 +643,17 @@ static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
|
|||
u32 enable_mask = status_mask << 16;
|
||||
|
||||
/*
|
||||
* On pipe A we don't support the PSR interrupt yet, on pipe B the
|
||||
* same bit MBZ.
|
||||
* On pipe A we don't support the PSR interrupt yet,
|
||||
* on pipe B and C the same bit MBZ.
|
||||
*/
|
||||
if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
|
||||
return 0;
|
||||
/*
|
||||
* On pipe B and C we don't support the PSR interrupt yet, on pipe
|
||||
* A the same bit is for perf counters which we don't use either.
|
||||
*/
|
||||
if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
|
||||
return 0;
|
||||
|
||||
enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
|
||||
SPRITE0_FLIP_DONE_INT_EN_VLV |
|
||||
|
@ -669,6 +740,56 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This timing diagram depicts the video signal in and
|
||||
* around the vertical blanking period.
|
||||
*
|
||||
* Assumptions about the fictitious mode used in this example:
|
||||
* vblank_start >= 3
|
||||
* vsync_start = vblank_start + 1
|
||||
* vsync_end = vblank_start + 2
|
||||
* vtotal = vblank_start + 3
|
||||
*
|
||||
* start of vblank:
|
||||
* latch double buffered registers
|
||||
* increment frame counter (ctg+)
|
||||
* generate start of vblank interrupt (gen4+)
|
||||
* |
|
||||
* | frame start:
|
||||
* | generate frame start interrupt (aka. vblank interrupt) (gmch)
|
||||
* | may be shifted forward 1-3 extra lines via PIPECONF
|
||||
* | |
|
||||
* | | start of vsync:
|
||||
* | | generate vsync interrupt
|
||||
* | | |
|
||||
* ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
|
||||
* . \hs/ . \hs/ \hs/ \hs/ . \hs/
|
||||
* ----va---> <-----------------vb--------------------> <--------va-------------
|
||||
* | | <----vs-----> |
|
||||
* -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
|
||||
* -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
|
||||
* -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
|
||||
* | | |
|
||||
* last visible pixel first visible pixel
|
||||
* | increment frame counter (gen3/4)
|
||||
* pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
|
||||
*
|
||||
* x = horizontal active
|
||||
* _ = horizontal blanking
|
||||
* hs = horizontal sync
|
||||
* va = vertical active
|
||||
* vb = vertical blanking
|
||||
* vs = vertical sync
|
||||
* vbs = vblank_start (number)
|
||||
*
|
||||
* Summary:
|
||||
* - most events happen at the start of horizontal sync
|
||||
* - frame start happens at the start of horizontal blank, 1-4 lines
|
||||
* (depending on PIPECONF settings) after the start of vblank
|
||||
* - gen3/4 pixel and frame counter are synchronized with the start
|
||||
* of horizontal active on the first line of vertical active
|
||||
*/
|
||||
|
||||
static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
|
||||
{
|
||||
/* Gen2 doesn't have a hardware frame counter */
|
||||
|
@ -683,7 +804,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long high_frame;
|
||||
unsigned long low_frame;
|
||||
u32 high1, high2, low, pixel, vbl_start;
|
||||
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
|
||||
|
||||
if (!i915_pipe_enabled(dev, pipe)) {
|
||||
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
|
||||
|
@ -697,17 +818,28 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
|
|||
const struct drm_display_mode *mode =
|
||||
&intel_crtc->config.adjusted_mode;
|
||||
|
||||
vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
|
||||
htotal = mode->crtc_htotal;
|
||||
hsync_start = mode->crtc_hsync_start;
|
||||
vbl_start = mode->crtc_vblank_start;
|
||||
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
vbl_start = DIV_ROUND_UP(vbl_start, 2);
|
||||
} else {
|
||||
enum transcoder cpu_transcoder = (enum transcoder) pipe;
|
||||
u32 htotal;
|
||||
|
||||
htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
|
||||
hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
|
||||
vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
|
||||
|
||||
vbl_start *= htotal;
|
||||
if ((I915_READ(PIPECONF(cpu_transcoder)) &
|
||||
PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
|
||||
vbl_start = DIV_ROUND_UP(vbl_start, 2);
|
||||
}
|
||||
|
||||
/* Convert to pixel count */
|
||||
vbl_start *= htotal;
|
||||
|
||||
/* Start of vblank event occurs at start of hsync */
|
||||
vbl_start -= htotal - hsync_start;
|
||||
|
||||
high_frame = PIPEFRAME(pipe);
|
||||
low_frame = PIPEFRAMEPIXEL(pipe);
|
||||
|
||||
|
@ -757,9 +889,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
int vtotal = mode->crtc_vtotal;
|
||||
int position;
|
||||
int position, vtotal;
|
||||
|
||||
vtotal = mode->crtc_vtotal;
|
||||
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
vtotal /= 2;
|
||||
|
||||
|
@ -769,14 +901,10 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
|
|||
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
|
||||
|
||||
/*
|
||||
* Scanline counter increments at leading edge of hsync, and
|
||||
* it starts counting from vtotal-1 on the first active line.
|
||||
* That means the scanline counter value is always one less
|
||||
* than what we would expect. Ie. just after start of vblank,
|
||||
* which also occurs at start of hsync (on the last active line),
|
||||
* the scanline counter will read vblank_start-1.
|
||||
* See update_scanline_offset() for the details on the
|
||||
* scanline_offset adjustment.
|
||||
*/
|
||||
return (position + 1) % vtotal;
|
||||
return (position + crtc->scanline_offset) % vtotal;
|
||||
}
|
||||
|
||||
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
||||
|
@ -843,6 +971,18 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
|
|||
vbl_end *= htotal;
|
||||
vtotal *= htotal;
|
||||
|
||||
/*
|
||||
* In interlaced modes, the pixel counter counts all pixels,
|
||||
* so one field will have htotal more pixels. In order to avoid
|
||||
* the reported position from jumping backwards when the pixel
|
||||
* counter is beyond the length of the shorter field, just
|
||||
* clamp the position the length of the shorter field. This
|
||||
* matches how the scanline counter based position works since
|
||||
* the scanline counter doesn't count the two half lines.
|
||||
*/
|
||||
if (position >= vtotal)
|
||||
position = vtotal - 1;
|
||||
|
||||
/*
|
||||
* Start of vblank interrupt is triggered at start of hsync,
|
||||
* just prior to the first active line of vblank. However we
|
||||
|
@ -1077,9 +1217,9 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
|
|||
}
|
||||
|
||||
static void notify_ring(struct drm_device *dev,
|
||||
struct intel_ring_buffer *ring)
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
if (ring->obj == NULL)
|
||||
if (!intel_ring_initialized(ring))
|
||||
return;
|
||||
|
||||
trace_i915_gem_request_complete(ring);
|
||||
|
@ -1098,8 +1238,12 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
|||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
pm_iir = dev_priv->rps.pm_iir;
|
||||
dev_priv->rps.pm_iir = 0;
|
||||
/* Make sure not to corrupt PMIMR state used by ringbuffer code */
|
||||
snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
|
||||
if (IS_BROADWELL(dev_priv->dev))
|
||||
bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
|
||||
else {
|
||||
/* Make sure not to corrupt PMIMR state used by ringbuffer */
|
||||
snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
|
||||
}
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
/* Make sure we didn't queue anything we're not going to process. */
|
||||
|
@ -1296,6 +1440,19 @@ static void snb_gt_irq_handler(struct drm_device *dev,
|
|||
ivybridge_parity_error_irq_handler(dev, gt_iir);
|
||||
}
|
||||
|
||||
static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
|
||||
{
|
||||
if ((pm_iir & dev_priv->pm_rps_events) == 0)
|
||||
return;
|
||||
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
|
||||
bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
|
||||
queue_work(dev_priv->wq, &dev_priv->rps.work);
|
||||
}
|
||||
|
||||
static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
|
||||
struct drm_i915_private *dev_priv,
|
||||
u32 master_ctl)
|
||||
|
@ -1334,6 +1491,17 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
|
|||
DRM_ERROR("The master control interrupt lied (GT1)!\n");
|
||||
}
|
||||
|
||||
if (master_ctl & GEN8_GT_PM_IRQ) {
|
||||
tmp = I915_READ(GEN8_GT_IIR(2));
|
||||
if (tmp & dev_priv->pm_rps_events) {
|
||||
ret = IRQ_HANDLED;
|
||||
gen8_rps_irq_handler(dev_priv, tmp);
|
||||
I915_WRITE(GEN8_GT_IIR(2),
|
||||
tmp & dev_priv->pm_rps_events);
|
||||
} else
|
||||
DRM_ERROR("The master control interrupt lied (PM)!\n");
|
||||
}
|
||||
|
||||
if (master_ctl & GEN8_GT_VECS_IRQ) {
|
||||
tmp = I915_READ(GEN8_GT_IIR(3));
|
||||
if (tmp) {
|
||||
|
@ -1598,6 +1766,9 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
|
|||
case PIPE_B:
|
||||
iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
|
||||
break;
|
||||
case PIPE_C:
|
||||
iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
|
||||
break;
|
||||
}
|
||||
if (iir & iir_bit)
|
||||
mask |= dev_priv->pipestat_irq_mask[pipe];
|
||||
|
@ -1668,7 +1839,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
|
|||
|
||||
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 iir, gt_iir, pm_iir;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
|
@ -1703,6 +1874,40 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 master_ctl, iir;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
|
||||
for (;;) {
|
||||
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
|
||||
iir = I915_READ(VLV_IIR);
|
||||
|
||||
if (master_ctl == 0 && iir == 0)
|
||||
break;
|
||||
|
||||
I915_WRITE(GEN8_MASTER_IRQ, 0);
|
||||
|
||||
gen8_gt_irq_handler(dev, dev_priv, master_ctl);
|
||||
|
||||
valleyview_pipestat_irq_handler(dev, iir);
|
||||
|
||||
/* Consume port. Then clear IIR or we'll miss events */
|
||||
i9xx_hpd_irq_handler(dev);
|
||||
|
||||
I915_WRITE(VLV_IIR, iir);
|
||||
|
||||
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
|
||||
POSTING_READ(GEN8_MASTER_IRQ);
|
||||
|
||||
ret = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -1935,7 +2140,7 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
|
|||
|
||||
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
|
@ -2111,7 +2316,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
|
|||
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
|
||||
bool reset_completed)
|
||||
{
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
|
||||
/*
|
||||
|
@ -2544,14 +2749,14 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
|
|||
}
|
||||
|
||||
static u32
|
||||
ring_last_seqno(struct intel_ring_buffer *ring)
|
||||
ring_last_seqno(struct intel_engine_cs *ring)
|
||||
{
|
||||
return list_entry(ring->request_list.prev,
|
||||
struct drm_i915_gem_request, list)->seqno;
|
||||
}
|
||||
|
||||
static bool
|
||||
ring_idle(struct intel_ring_buffer *ring, u32 seqno)
|
||||
ring_idle(struct intel_engine_cs *ring, u32 seqno)
|
||||
{
|
||||
return (list_empty(&ring->request_list) ||
|
||||
i915_seqno_passed(seqno, ring_last_seqno(ring)));
|
||||
|
@ -2574,11 +2779,11 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
|
|||
}
|
||||
}
|
||||
|
||||
static struct intel_ring_buffer *
|
||||
semaphore_wait_to_signaller_ring(struct intel_ring_buffer *ring, u32 ipehr)
|
||||
static struct intel_engine_cs *
|
||||
semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct intel_ring_buffer *signaller;
|
||||
struct intel_engine_cs *signaller;
|
||||
int i;
|
||||
|
||||
if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
|
||||
|
@ -2606,8 +2811,8 @@ semaphore_wait_to_signaller_ring(struct intel_ring_buffer *ring, u32 ipehr)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct intel_ring_buffer *
|
||||
semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
|
||||
static struct intel_engine_cs *
|
||||
semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
u32 cmd, ipehr, head;
|
||||
|
@ -2632,10 +2837,10 @@ semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
|
|||
* our ring is smaller than what the hardware (and hence
|
||||
* HEAD_ADDR) allows. Also handles wrap-around.
|
||||
*/
|
||||
head &= ring->size - 1;
|
||||
head &= ring->buffer->size - 1;
|
||||
|
||||
/* This here seems to blow up */
|
||||
cmd = ioread32(ring->virtual_start + head);
|
||||
cmd = ioread32(ring->buffer->virtual_start + head);
|
||||
if (cmd == ipehr)
|
||||
break;
|
||||
|
||||
|
@ -2645,14 +2850,14 @@ semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
|
|||
if (!i)
|
||||
return NULL;
|
||||
|
||||
*seqno = ioread32(ring->virtual_start + head + 4) + 1;
|
||||
*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
|
||||
return semaphore_wait_to_signaller_ring(ring, ipehr);
|
||||
}
|
||||
|
||||
static int semaphore_passed(struct intel_ring_buffer *ring)
|
||||
static int semaphore_passed(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct intel_ring_buffer *signaller;
|
||||
struct intel_engine_cs *signaller;
|
||||
u32 seqno, ctl;
|
||||
|
||||
ring->hangcheck.deadlock = true;
|
||||
|
@ -2671,7 +2876,7 @@ static int semaphore_passed(struct intel_ring_buffer *ring)
|
|||
|
||||
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
|
@ -2679,7 +2884,7 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
|
||||
static enum intel_ring_hangcheck_action
|
||||
ring_stuck(struct intel_ring_buffer *ring, u64 acthd)
|
||||
ring_stuck(struct intel_engine_cs *ring, u64 acthd)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -2735,7 +2940,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
|
|||
{
|
||||
struct drm_device *dev = (struct drm_device *)data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
int i;
|
||||
int busy_count = 0, rings_hung = 0;
|
||||
bool stuck[I915_NUM_RINGS] = { 0 };
|
||||
|
@ -2974,6 +3179,37 @@ static void gen8_irq_preinstall(struct drm_device *dev)
|
|||
gen8_irq_reset(dev);
|
||||
}
|
||||
|
||||
static void cherryview_irq_preinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int pipe;
|
||||
|
||||
I915_WRITE(GEN8_MASTER_IRQ, 0);
|
||||
POSTING_READ(GEN8_MASTER_IRQ);
|
||||
|
||||
GEN8_IRQ_RESET_NDX(GT, 0);
|
||||
GEN8_IRQ_RESET_NDX(GT, 1);
|
||||
GEN8_IRQ_RESET_NDX(GT, 2);
|
||||
GEN8_IRQ_RESET_NDX(GT, 3);
|
||||
|
||||
GEN5_IRQ_RESET(GEN8_PCU_);
|
||||
|
||||
POSTING_READ(GEN8_PCU_IIR);
|
||||
|
||||
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
|
||||
|
||||
I915_WRITE(PORT_HOTPLUG_EN, 0);
|
||||
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
|
||||
|
||||
for_each_pipe(pipe)
|
||||
I915_WRITE(PIPESTAT(pipe), 0xffff);
|
||||
|
||||
I915_WRITE(VLV_IMR, 0xffffffff);
|
||||
I915_WRITE(VLV_IER, 0x0);
|
||||
I915_WRITE(VLV_IIR, 0xffffffff);
|
||||
POSTING_READ(VLV_IIR);
|
||||
}
|
||||
|
||||
static void ibx_hpd_irq_setup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -3252,6 +3488,8 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
|
|||
|
||||
for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
|
||||
GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
|
||||
|
||||
dev_priv->pm_irq_mask = 0xffffffff;
|
||||
}
|
||||
|
||||
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
|
@ -3291,6 +3529,45 @@ static int gen8_irq_postinstall(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int cherryview_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
|
||||
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
|
||||
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
|
||||
I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
|
||||
u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
|
||||
PIPE_CRC_DONE_INTERRUPT_STATUS;
|
||||
unsigned long irqflags;
|
||||
int pipe;
|
||||
|
||||
/*
|
||||
* Leave vblank interrupts masked initially. enable/disable will
|
||||
* toggle them based on usage.
|
||||
*/
|
||||
dev_priv->irq_mask = ~enable_mask;
|
||||
|
||||
for_each_pipe(pipe)
|
||||
I915_WRITE(PIPESTAT(pipe), 0xffff);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
|
||||
for_each_pipe(pipe)
|
||||
i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
|
||||
I915_WRITE(VLV_IIR, 0xffffffff);
|
||||
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
|
||||
I915_WRITE(VLV_IER, enable_mask);
|
||||
|
||||
gen8_gt_irq_postinstall(dev_priv);
|
||||
|
||||
I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
|
||||
POSTING_READ(GEN8_MASTER_IRQ);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gen8_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -3336,6 +3613,57 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
|
|||
POSTING_READ(VLV_IER);
|
||||
}
|
||||
|
||||
static void cherryview_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int pipe;
|
||||
|
||||
if (!dev_priv)
|
||||
return;
|
||||
|
||||
I915_WRITE(GEN8_MASTER_IRQ, 0);
|
||||
POSTING_READ(GEN8_MASTER_IRQ);
|
||||
|
||||
#define GEN8_IRQ_FINI_NDX(type, which) \
|
||||
do { \
|
||||
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
|
||||
I915_WRITE(GEN8_##type##_IER(which), 0); \
|
||||
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
|
||||
POSTING_READ(GEN8_##type##_IIR(which)); \
|
||||
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
|
||||
} while (0)
|
||||
|
||||
#define GEN8_IRQ_FINI(type) \
|
||||
do { \
|
||||
I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
|
||||
I915_WRITE(GEN8_##type##_IER, 0); \
|
||||
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
|
||||
POSTING_READ(GEN8_##type##_IIR); \
|
||||
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
|
||||
} while (0)
|
||||
|
||||
GEN8_IRQ_FINI_NDX(GT, 0);
|
||||
GEN8_IRQ_FINI_NDX(GT, 1);
|
||||
GEN8_IRQ_FINI_NDX(GT, 2);
|
||||
GEN8_IRQ_FINI_NDX(GT, 3);
|
||||
|
||||
GEN8_IRQ_FINI(PCU);
|
||||
|
||||
#undef GEN8_IRQ_FINI
|
||||
#undef GEN8_IRQ_FINI_NDX
|
||||
|
||||
I915_WRITE(PORT_HOTPLUG_EN, 0);
|
||||
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
|
||||
|
||||
for_each_pipe(pipe)
|
||||
I915_WRITE(PIPESTAT(pipe), 0xffff);
|
||||
|
||||
I915_WRITE(VLV_IMR, 0xffffffff);
|
||||
I915_WRITE(VLV_IER, 0x0);
|
||||
I915_WRITE(VLV_IIR, 0xffffffff);
|
||||
POSTING_READ(VLV_IIR);
|
||||
}
|
||||
|
||||
static void ironlake_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -3427,7 +3755,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
|
|||
|
||||
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u16 iir, new_iir;
|
||||
u32 pipe_stats[2];
|
||||
|
@ -3612,7 +3940,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
|
|||
|
||||
static irqreturn_t i915_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
|
||||
unsigned long irqflags;
|
||||
|
@ -3842,7 +4170,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
|
|||
|
||||
static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
struct drm_device *dev = arg;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 iir, new_iir;
|
||||
u32 pipe_stats[I915_MAX_PIPES];
|
||||
|
@ -4041,7 +4369,15 @@ void intel_irq_init(struct drm_device *dev)
|
|||
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
|
||||
}
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
dev->driver->irq_handler = cherryview_irq_handler;
|
||||
dev->driver->irq_preinstall = cherryview_irq_preinstall;
|
||||
dev->driver->irq_postinstall = cherryview_irq_postinstall;
|
||||
dev->driver->irq_uninstall = cherryview_irq_uninstall;
|
||||
dev->driver->enable_vblank = valleyview_enable_vblank;
|
||||
dev->driver->disable_vblank = valleyview_disable_vblank;
|
||||
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
dev->driver->irq_handler = valleyview_irq_handler;
|
||||
dev->driver->irq_preinstall = valleyview_irq_preinstall;
|
||||
dev->driver->irq_postinstall = valleyview_irq_postinstall;
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -328,8 +328,6 @@ int i915_save_state(struct drm_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
intel_disable_gt_powersave(dev);
|
||||
|
||||
/* Cache mode state */
|
||||
if (INTEL_INFO(dev)->gen < 7)
|
||||
dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
|
||||
|
|
|
@ -186,7 +186,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
|
|||
struct drm_minor *dminor = dev_to_drm_minor(dev);
|
||||
struct drm_device *drm_dev = dminor->dev;
|
||||
struct drm_i915_private *dev_priv = drm_dev->dev_private;
|
||||
struct i915_hw_context *ctx;
|
||||
struct intel_context *ctx;
|
||||
u32 *temp = NULL; /* Just here to make handling failures easy */
|
||||
int slice = (int)(uintptr_t)attr->private;
|
||||
int ret;
|
||||
|
|
|
@ -326,8 +326,8 @@ TRACE_EVENT(i915_gem_evict_vm,
|
|||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_ring_sync_to,
|
||||
TP_PROTO(struct intel_ring_buffer *from,
|
||||
struct intel_ring_buffer *to,
|
||||
TP_PROTO(struct intel_engine_cs *from,
|
||||
struct intel_engine_cs *to,
|
||||
u32 seqno),
|
||||
TP_ARGS(from, to, seqno),
|
||||
|
||||
|
@ -352,7 +352,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
|
|||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_ring_dispatch,
|
||||
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
|
||||
TP_PROTO(struct intel_engine_cs *ring, u32 seqno, u32 flags),
|
||||
TP_ARGS(ring, seqno, flags),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
|
@ -375,7 +375,7 @@ TRACE_EVENT(i915_gem_ring_dispatch,
|
|||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_ring_flush,
|
||||
TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush),
|
||||
TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
|
||||
TP_ARGS(ring, invalidate, flush),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
|
@ -398,7 +398,7 @@ TRACE_EVENT(i915_gem_ring_flush,
|
|||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(i915_gem_request,
|
||||
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
|
||||
TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
|
||||
TP_ARGS(ring, seqno),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
|
@ -418,12 +418,12 @@ DECLARE_EVENT_CLASS(i915_gem_request,
|
|||
);
|
||||
|
||||
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
|
||||
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
|
||||
TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
|
||||
TP_ARGS(ring, seqno)
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_request_complete,
|
||||
TP_PROTO(struct intel_ring_buffer *ring),
|
||||
TP_PROTO(struct intel_engine_cs *ring),
|
||||
TP_ARGS(ring),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
|
@ -443,12 +443,12 @@ TRACE_EVENT(i915_gem_request_complete,
|
|||
);
|
||||
|
||||
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
|
||||
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
|
||||
TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
|
||||
TP_ARGS(ring, seqno)
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_request_wait_begin,
|
||||
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
|
||||
TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
|
||||
TP_ARGS(ring, seqno),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
|
@ -477,12 +477,12 @@ TRACE_EVENT(i915_gem_request_wait_begin,
|
|||
);
|
||||
|
||||
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
|
||||
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
|
||||
TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
|
||||
TP_ARGS(ring, seqno)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(i915_ring,
|
||||
TP_PROTO(struct intel_ring_buffer *ring),
|
||||
TP_PROTO(struct intel_engine_cs *ring),
|
||||
TP_ARGS(ring),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
|
@ -499,12 +499,12 @@ DECLARE_EVENT_CLASS(i915_ring,
|
|||
);
|
||||
|
||||
DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
|
||||
TP_PROTO(struct intel_ring_buffer *ring),
|
||||
TP_PROTO(struct intel_engine_cs *ring),
|
||||
TP_ARGS(ring)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(i915_ring, i915_ring_wait_end,
|
||||
TP_PROTO(struct intel_ring_buffer *ring),
|
||||
TP_PROTO(struct intel_engine_cs *ring),
|
||||
TP_ARGS(ring)
|
||||
);
|
||||
|
||||
|
|
|
@ -364,55 +364,6 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
|
|||
DRM_ERROR("FDI link training failed!\n");
|
||||
}
|
||||
|
||||
static void intel_ddi_mode_set(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
int port = intel_ddi_get_encoder_port(encoder);
|
||||
int pipe = crtc->pipe;
|
||||
int type = encoder->type;
|
||||
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
|
||||
|
||||
DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n",
|
||||
port_name(port), pipe_name(pipe));
|
||||
|
||||
crtc->eld_vld = false;
|
||||
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
enc_to_dig_port(&encoder->base);
|
||||
|
||||
intel_dp->DP = intel_dig_port->saved_port_bits |
|
||||
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
|
||||
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
|
||||
|
||||
if (intel_dp->has_audio) {
|
||||
DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
|
||||
pipe_name(crtc->pipe));
|
||||
|
||||
/* write eld */
|
||||
DRM_DEBUG_DRIVER("DP audio: write eld information\n");
|
||||
intel_write_eld(&encoder->base, adjusted_mode);
|
||||
}
|
||||
} else if (type == INTEL_OUTPUT_HDMI) {
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
|
||||
if (intel_hdmi->has_audio) {
|
||||
/* Proper support for digital audio needs a new logic
|
||||
* and a new set of registers, so we leave it for future
|
||||
* patch bombing.
|
||||
*/
|
||||
DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
|
||||
pipe_name(crtc->pipe));
|
||||
|
||||
/* write eld */
|
||||
DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
|
||||
intel_write_eld(&encoder->base, adjusted_mode);
|
||||
}
|
||||
|
||||
intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
|
||||
}
|
||||
}
|
||||
|
||||
static struct intel_encoder *
|
||||
intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
|
||||
{
|
||||
|
@ -1062,9 +1013,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
|
|||
}
|
||||
|
||||
if (type == INTEL_OUTPUT_HDMI) {
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
|
||||
if (intel_hdmi->has_hdmi_sink)
|
||||
if (intel_crtc->config.has_hdmi_sink)
|
||||
temp |= TRANS_DDI_MODE_SELECT_HDMI;
|
||||
else
|
||||
temp |= TRANS_DDI_MODE_SELECT_DVI;
|
||||
|
@ -1293,28 +1242,48 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
|
|||
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
struct drm_crtc *crtc = encoder->crtc;
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
|
||||
enum port port = intel_ddi_get_encoder_port(intel_encoder);
|
||||
int type = intel_encoder->type;
|
||||
|
||||
if (crtc->config.has_audio) {
|
||||
DRM_DEBUG_DRIVER("Audio on pipe %c on DDI\n",
|
||||
pipe_name(crtc->pipe));
|
||||
|
||||
/* write eld */
|
||||
DRM_DEBUG_DRIVER("DDI audio: write eld information\n");
|
||||
intel_write_eld(encoder, &crtc->config.adjusted_mode);
|
||||
}
|
||||
|
||||
if (type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
intel_edp_panel_on(intel_dp);
|
||||
}
|
||||
|
||||
WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
|
||||
I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
|
||||
WARN_ON(crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
|
||||
I915_WRITE(PORT_CLK_SEL(port), crtc->ddi_pll_sel);
|
||||
|
||||
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
enc_to_dig_port(encoder);
|
||||
|
||||
intel_dp->DP = intel_dig_port->saved_port_bits |
|
||||
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
|
||||
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
|
||||
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
intel_dp_complete_link_train(intel_dp);
|
||||
if (port != PORT_A)
|
||||
intel_dp_stop_link_train(intel_dp);
|
||||
} else if (type == INTEL_OUTPUT_HDMI) {
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
|
||||
intel_hdmi->set_infoframes(encoder,
|
||||
crtc->config.has_hdmi_sink,
|
||||
&crtc->config.adjusted_mode);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1385,7 +1354,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
|
|||
intel_edp_psr_enable(intel_dp);
|
||||
}
|
||||
|
||||
if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
|
||||
if (intel_crtc->config.has_audio) {
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
|
||||
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
|
||||
tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
|
||||
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
|
||||
|
@ -1403,11 +1373,14 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t tmp;
|
||||
|
||||
if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
|
||||
/* We can't touch HSW_AUD_PIN_ELD_CP_VLD uncionditionally because this
|
||||
* register is part of the power well on Haswell. */
|
||||
if (intel_crtc->config.has_audio) {
|
||||
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
|
||||
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) <<
|
||||
(pipe * 4));
|
||||
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
|
||||
}
|
||||
|
||||
if (type == INTEL_OUTPUT_EDP) {
|
||||
|
@ -1580,6 +1553,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
|||
|
||||
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
|
||||
case TRANS_DDI_MODE_SELECT_HDMI:
|
||||
pipe_config->has_hdmi_sink = true;
|
||||
case TRANS_DDI_MODE_SELECT_DVI:
|
||||
case TRANS_DDI_MODE_SELECT_FDI:
|
||||
break;
|
||||
|
@ -1592,6 +1566,12 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
|||
break;
|
||||
}
|
||||
|
||||
if (intel_display_power_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
|
||||
temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
|
||||
if (temp & (AUDIO_OUTPUT_ENABLE_A << (intel_crtc->pipe * 4)))
|
||||
pipe_config->has_audio = true;
|
||||
}
|
||||
|
||||
if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
|
||||
pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
|
||||
/*
|
||||
|
@ -1708,7 +1688,6 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
|||
DRM_MODE_ENCODER_TMDS);
|
||||
|
||||
intel_encoder->compute_config = intel_ddi_compute_config;
|
||||
intel_encoder->mode_set = intel_ddi_mode_set;
|
||||
intel_encoder->enable = intel_enable_ddi;
|
||||
intel_encoder->pre_enable = intel_ddi_pre_enable;
|
||||
intel_encoder->disable = intel_disable_ddi;
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -64,6 +64,24 @@ static const struct dp_link_dpll vlv_dpll[] = {
|
|||
{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
|
||||
};
|
||||
|
||||
/*
|
||||
* CHV supports eDP 1.4 that have more link rates.
|
||||
* Below only provides the fixed rate but exclude variable rate.
|
||||
*/
|
||||
static const struct dp_link_dpll chv_dpll[] = {
|
||||
/*
|
||||
* CHV requires to program fractional division for m2.
|
||||
* m2 is stored in fixed point format using formula below
|
||||
* (m2_int << 22) | m2_fraction
|
||||
*/
|
||||
{ DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
|
||||
{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
|
||||
{ DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
|
||||
{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
|
||||
{ DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
|
||||
{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
|
||||
};
|
||||
|
||||
/**
|
||||
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
|
||||
* @intel_dp: DP struct
|
||||
|
@ -726,6 +744,9 @@ intel_dp_set_clock(struct intel_encoder *encoder,
|
|||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
divisor = pch_dpll;
|
||||
count = ARRAY_SIZE(pch_dpll);
|
||||
} else if (IS_CHERRYVIEW(dev)) {
|
||||
divisor = chv_dpll;
|
||||
count = ARRAY_SIZE(chv_dpll);
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
divisor = vlv_dpll;
|
||||
count = ARRAY_SIZE(vlv_dpll);
|
||||
|
@ -779,6 +800,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
|||
pipe_config->has_pch_encoder = true;
|
||||
|
||||
pipe_config->has_dp_encoder = true;
|
||||
pipe_config->has_audio = intel_dp->has_audio;
|
||||
|
||||
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
|
||||
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
|
||||
|
@ -903,7 +925,7 @@ static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
|
|||
udelay(500);
|
||||
}
|
||||
|
||||
static void intel_dp_mode_set(struct intel_encoder *encoder)
|
||||
static void intel_dp_prepare(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -938,7 +960,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
|
|||
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
|
||||
intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
|
||||
|
||||
if (intel_dp->has_audio) {
|
||||
if (crtc->config.has_audio) {
|
||||
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
|
||||
pipe_name(crtc->pipe));
|
||||
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
|
||||
|
@ -971,14 +993,15 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
|
|||
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
|
||||
intel_dp->DP |= DP_ENHANCED_FRAMING;
|
||||
|
||||
if (crtc->pipe == 1)
|
||||
intel_dp->DP |= DP_PIPEB_SELECT;
|
||||
if (!IS_CHERRYVIEW(dev)) {
|
||||
if (crtc->pipe == 1)
|
||||
intel_dp->DP |= DP_PIPEB_SELECT;
|
||||
} else {
|
||||
intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
|
||||
}
|
||||
} else {
|
||||
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
|
||||
}
|
||||
|
||||
if (port == PORT_A && !IS_VALLEYVIEW(dev))
|
||||
ironlake_set_pll_cpu_edp(intel_dp);
|
||||
}
|
||||
|
||||
#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
|
||||
|
@ -1434,6 +1457,8 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
|
|||
|
||||
if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
|
||||
*pipe = PORT_TO_PIPE_CPT(tmp);
|
||||
} else if (IS_CHERRYVIEW(dev)) {
|
||||
*pipe = DP_PORT_TO_PIPE_CHV(tmp);
|
||||
} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
|
||||
*pipe = PORT_TO_PIPE(tmp);
|
||||
} else {
|
||||
|
@ -1481,8 +1506,11 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
|
|||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
int dotclock;
|
||||
|
||||
tmp = I915_READ(intel_dp->output_reg);
|
||||
if (tmp & DP_AUDIO_OUTPUT_ENABLE)
|
||||
pipe_config->has_audio = true;
|
||||
|
||||
if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
|
||||
tmp = I915_READ(intel_dp->output_reg);
|
||||
if (tmp & DP_SYNC_HS_HIGH)
|
||||
flags |= DRM_MODE_FLAG_PHSYNC;
|
||||
else
|
||||
|
@ -1837,6 +1865,42 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder)
|
|||
intel_dp_link_down(intel_dp);
|
||||
}
|
||||
|
||||
static void chv_post_disable_dp(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
enum dpio_channel ch = vlv_dport_to_channel(dport);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
u32 val;
|
||||
|
||||
intel_dp_link_down(intel_dp);
|
||||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
|
||||
/* Propagate soft reset to data lane reset */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
|
||||
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
|
||||
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
|
||||
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
}
|
||||
|
||||
static void intel_enable_dp(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
|
@ -1876,8 +1940,13 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder)
|
|||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
|
||||
|
||||
if (dport->port == PORT_A)
|
||||
intel_dp_prepare(encoder);
|
||||
|
||||
/* Only ilk+ has port A */
|
||||
if (dport->port == PORT_A) {
|
||||
ironlake_set_pll_cpu_edp(intel_dp);
|
||||
ironlake_edp_pll_on(intel_dp);
|
||||
}
|
||||
}
|
||||
|
||||
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
|
||||
|
@ -1929,6 +1998,8 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
|
|||
enum dpio_channel port = vlv_dport_to_channel(dport);
|
||||
int pipe = intel_crtc->pipe;
|
||||
|
||||
intel_dp_prepare(encoder);
|
||||
|
||||
/* Program Tx lane resets to default */
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
|
||||
|
@ -1947,6 +2018,69 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
|
|||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
}
|
||||
|
||||
static void chv_pre_enable_dp(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct edp_power_seq power_seq;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
enum dpio_channel ch = vlv_dport_to_channel(dport);
|
||||
int pipe = intel_crtc->pipe;
|
||||
int data, i;
|
||||
u32 val;
|
||||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
|
||||
/* Deassert soft data lane reset*/
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
|
||||
val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
|
||||
val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
|
||||
|
||||
/* Program Tx lane latency optimal setting*/
|
||||
for (i = 0; i < 4; i++) {
|
||||
/* Set the latency optimal bit */
|
||||
data = (i == 1) ? 0x0 : 0x6;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
|
||||
data << DPIO_FRC_LATENCY_SHFIT);
|
||||
|
||||
/* Set the upar bit */
|
||||
data = (i == 1) ? 0x0 : 0x1;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
|
||||
data << DPIO_UPAR_SHIFT);
|
||||
}
|
||||
|
||||
/* Data lane stagger programming */
|
||||
/* FIXME: Fix up value only after power analysis */
|
||||
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
if (is_edp(intel_dp)) {
|
||||
/* init power sequencer on this pipe and port */
|
||||
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
|
||||
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
|
||||
&power_seq);
|
||||
}
|
||||
|
||||
intel_enable_dp(encoder);
|
||||
|
||||
vlv_wait_port_ready(dev_priv, dport);
|
||||
}
|
||||
|
||||
/*
|
||||
* Native read with retry for link status and receiver capability reads for
|
||||
* cases where the sink may still be asleep.
|
||||
|
@ -2171,6 +2305,166 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
|
||||
u32 deemph_reg_value, margin_reg_value, val;
|
||||
uint8_t train_set = intel_dp->train_set[0];
|
||||
enum dpio_channel ch = vlv_dport_to_channel(dport);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
int i;
|
||||
|
||||
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
|
||||
case DP_TRAIN_PRE_EMPHASIS_0:
|
||||
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_400:
|
||||
deemph_reg_value = 128;
|
||||
margin_reg_value = 52;
|
||||
break;
|
||||
case DP_TRAIN_VOLTAGE_SWING_600:
|
||||
deemph_reg_value = 128;
|
||||
margin_reg_value = 77;
|
||||
break;
|
||||
case DP_TRAIN_VOLTAGE_SWING_800:
|
||||
deemph_reg_value = 128;
|
||||
margin_reg_value = 102;
|
||||
break;
|
||||
case DP_TRAIN_VOLTAGE_SWING_1200:
|
||||
deemph_reg_value = 128;
|
||||
margin_reg_value = 154;
|
||||
/* FIXME extra to set for 1200 */
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
case DP_TRAIN_PRE_EMPHASIS_3_5:
|
||||
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_400:
|
||||
deemph_reg_value = 85;
|
||||
margin_reg_value = 78;
|
||||
break;
|
||||
case DP_TRAIN_VOLTAGE_SWING_600:
|
||||
deemph_reg_value = 85;
|
||||
margin_reg_value = 116;
|
||||
break;
|
||||
case DP_TRAIN_VOLTAGE_SWING_800:
|
||||
deemph_reg_value = 85;
|
||||
margin_reg_value = 154;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
case DP_TRAIN_PRE_EMPHASIS_6:
|
||||
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_400:
|
||||
deemph_reg_value = 64;
|
||||
margin_reg_value = 104;
|
||||
break;
|
||||
case DP_TRAIN_VOLTAGE_SWING_600:
|
||||
deemph_reg_value = 64;
|
||||
margin_reg_value = 154;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
case DP_TRAIN_PRE_EMPHASIS_9_5:
|
||||
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_400:
|
||||
deemph_reg_value = 43;
|
||||
margin_reg_value = 154;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
|
||||
/* Clear calc init */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
|
||||
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
|
||||
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
|
||||
|
||||
/* Program swing deemph */
|
||||
for (i = 0; i < 4; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
|
||||
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
|
||||
val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
|
||||
}
|
||||
|
||||
/* Program swing margin */
|
||||
for (i = 0; i < 4; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
|
||||
val &= ~DPIO_SWING_MARGIN_MASK;
|
||||
val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
|
||||
}
|
||||
|
||||
/* Disable unique transition scale */
|
||||
for (i = 0; i < 4; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
|
||||
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
|
||||
}
|
||||
|
||||
if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
|
||||
== DP_TRAIN_PRE_EMPHASIS_0) &&
|
||||
((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
|
||||
== DP_TRAIN_VOLTAGE_SWING_1200)) {
|
||||
|
||||
/*
|
||||
* The document said it needs to set bit 27 for ch0 and bit 26
|
||||
* for ch1. Might be a typo in the doc.
|
||||
* For now, for this unique transition scale selection, set bit
|
||||
* 27 for ch0 and ch1.
|
||||
*/
|
||||
for (i = 0; i < 4; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
|
||||
val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
|
||||
}
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
|
||||
val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
|
||||
val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
|
||||
}
|
||||
}
|
||||
|
||||
/* Start swing calculation */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
|
||||
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
|
||||
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
|
||||
|
||||
/* LRC Bypass */
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
|
||||
val |= DPIO_LRC_BYPASS;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
|
||||
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_get_adjust_train(struct intel_dp *intel_dp,
|
||||
const uint8_t link_status[DP_LINK_STATUS_SIZE])
|
||||
|
@ -2385,6 +2679,9 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
|
|||
} else if (IS_HASWELL(dev)) {
|
||||
signal_levels = intel_hsw_signal_levels(train_set);
|
||||
mask = DDI_BUF_EMP_MASK;
|
||||
} else if (IS_CHERRYVIEW(dev)) {
|
||||
signal_levels = intel_chv_signal_levels(intel_dp);
|
||||
mask = 0;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
signal_levels = intel_vlv_signal_levels(intel_dp);
|
||||
mask = 0;
|
||||
|
@ -2751,22 +3048,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
|||
to_intel_crtc(intel_dig_port->base.base.crtc);
|
||||
uint32_t DP = intel_dp->DP;
|
||||
|
||||
/*
|
||||
* DDI code has a strict mode set sequence and we should try to respect
|
||||
* it, otherwise we might hang the machine in many different ways. So we
|
||||
* really should be disabling the port only on a complete crtc_disable
|
||||
* sequence. This function is just called under two conditions on DDI
|
||||
* code:
|
||||
* - Link train failed while doing crtc_enable, and on this case we
|
||||
* really should respect the mode set sequence and wait for a
|
||||
* crtc_disable.
|
||||
* - Someone turned the monitor off and intel_dp_check_link_status
|
||||
* called us. We don't need to disable the whole port on this case, so
|
||||
* when someone turns the monitor on again,
|
||||
* intel_ddi_prepare_link_retrain will take care of redoing the link
|
||||
* train.
|
||||
*/
|
||||
if (HAS_DDI(dev))
|
||||
if (WARN_ON(HAS_DDI(dev)))
|
||||
return;
|
||||
|
||||
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
|
||||
|
@ -4012,11 +4294,14 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
|
|||
DRM_MODE_ENCODER_TMDS);
|
||||
|
||||
intel_encoder->compute_config = intel_dp_compute_config;
|
||||
intel_encoder->mode_set = intel_dp_mode_set;
|
||||
intel_encoder->disable = intel_disable_dp;
|
||||
intel_encoder->get_hw_state = intel_dp_get_hw_state;
|
||||
intel_encoder->get_config = intel_dp_get_config;
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
intel_encoder->pre_enable = chv_pre_enable_dp;
|
||||
intel_encoder->enable = vlv_enable_dp;
|
||||
intel_encoder->post_disable = chv_post_disable_dp;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
|
||||
intel_encoder->pre_enable = vlv_pre_enable_dp;
|
||||
intel_encoder->enable = vlv_enable_dp;
|
||||
|
@ -4031,7 +4316,14 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
|
|||
intel_dig_port->dp.output_reg = output_reg;
|
||||
|
||||
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
if (port == PORT_D)
|
||||
intel_encoder->crtc_mask = 1 << 2;
|
||||
else
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
|
||||
} else {
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
|
||||
}
|
||||
intel_encoder->cloneable = 0;
|
||||
intel_encoder->hot_plug = intel_dp_hot_plug;
|
||||
|
||||
|
|
|
@ -106,8 +106,8 @@
|
|||
#define INTEL_DVO_CHIP_TMDS 2
|
||||
#define INTEL_DVO_CHIP_TVOUT 4
|
||||
|
||||
#define INTEL_DSI_COMMAND_MODE 0
|
||||
#define INTEL_DSI_VIDEO_MODE 1
|
||||
#define INTEL_DSI_VIDEO_MODE 0
|
||||
#define INTEL_DSI_COMMAND_MODE 1
|
||||
|
||||
struct intel_framebuffer {
|
||||
struct drm_framebuffer base;
|
||||
|
@ -273,6 +273,13 @@ struct intel_crtc_config {
|
|||
* accordingly. */
|
||||
bool has_dp_encoder;
|
||||
|
||||
/* Whether we should send NULL infoframes. Required for audio. */
|
||||
bool has_hdmi_sink;
|
||||
|
||||
/* Audio enabled on this pipe. Only valid if either has_hdmi_sink or
|
||||
* has_dp_encoder is set. */
|
||||
bool has_audio;
|
||||
|
||||
/*
|
||||
* Enable dithering, used when the selected pipe bpp doesn't match the
|
||||
* plane bpp.
|
||||
|
@ -363,7 +370,6 @@ struct intel_crtc {
|
|||
*/
|
||||
bool active;
|
||||
unsigned long enabled_power_domains;
|
||||
bool eld_vld;
|
||||
bool primary_enabled; /* is the primary plane (partially) visible? */
|
||||
bool lowfreq_avail;
|
||||
struct intel_overlay *overlay;
|
||||
|
@ -403,6 +409,8 @@ struct intel_crtc {
|
|||
} wm;
|
||||
|
||||
wait_queue_head_t vbl_wait;
|
||||
|
||||
int scanline_offset;
|
||||
};
|
||||
|
||||
struct intel_plane_wm_parameters {
|
||||
|
@ -486,6 +494,7 @@ struct intel_hdmi {
|
|||
enum hdmi_infoframe_type type,
|
||||
const void *frame, ssize_t len);
|
||||
void (*set_infoframes)(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
};
|
||||
|
||||
|
@ -561,6 +570,7 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
|
|||
{
|
||||
switch (dport->port) {
|
||||
case PORT_B:
|
||||
case PORT_D:
|
||||
return DPIO_CH0;
|
||||
case PORT_C:
|
||||
return DPIO_CH1;
|
||||
|
@ -569,6 +579,20 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
|
|||
}
|
||||
}
|
||||
|
||||
static inline int
|
||||
vlv_pipe_to_channel(enum pipe pipe)
|
||||
{
|
||||
switch (pipe) {
|
||||
case PIPE_A:
|
||||
case PIPE_C:
|
||||
return DPIO_CH0;
|
||||
case PIPE_B:
|
||||
return DPIO_CH1;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct drm_crtc *
|
||||
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
|
||||
{
|
||||
|
@ -593,6 +617,8 @@ struct intel_unpin_work {
|
|||
#define INTEL_FLIP_INACTIVE 0
|
||||
#define INTEL_FLIP_PENDING 1
|
||||
#define INTEL_FLIP_COMPLETE 2
|
||||
u32 flip_count;
|
||||
u32 gtt_offset;
|
||||
bool enable_stall_check;
|
||||
};
|
||||
|
||||
|
@ -644,8 +670,6 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
|
|||
/* i915_irq.c */
|
||||
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable);
|
||||
bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable);
|
||||
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum transcoder pch_transcoder,
|
||||
bool enable);
|
||||
|
@ -653,9 +677,12 @@ void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
|||
void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
|
||||
void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
|
||||
int intel_get_crtc_scanline(struct intel_crtc *crtc);
|
||||
void i9xx_check_fifo_underruns(struct drm_device *dev);
|
||||
|
||||
|
||||
/* intel_crt.c */
|
||||
|
@ -694,7 +721,7 @@ int intel_pch_rawclk(struct drm_device *dev);
|
|||
int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
|
||||
void intel_mark_busy(struct drm_device *dev);
|
||||
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *ring);
|
||||
struct intel_engine_cs *ring);
|
||||
void intel_mark_idle(struct drm_device *dev);
|
||||
void intel_crtc_restore_mode(struct drm_crtc *crtc);
|
||||
void intel_crtc_update_dpms(struct drm_crtc *crtc);
|
||||
|
@ -726,7 +753,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
|
|||
struct intel_load_detect_pipe *old);
|
||||
int intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *pipelined);
|
||||
struct intel_engine_cs *pipelined);
|
||||
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
|
||||
struct drm_framebuffer *
|
||||
__intel_framebuffer_create(struct drm_device *dev,
|
||||
|
@ -777,6 +804,8 @@ int valleyview_get_vco(struct drm_i915_private *dev_priv);
|
|||
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
|
||||
struct intel_crtc_config *pipe_config);
|
||||
int intel_format_to_fourcc(int format);
|
||||
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
|
||||
|
||||
|
||||
/* intel_dp.c */
|
||||
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
|
||||
|
@ -902,6 +931,7 @@ extern struct drm_display_mode *intel_find_panel_downclock(
|
|||
/* intel_pm.c */
|
||||
void intel_init_clock_gating(struct drm_device *dev);
|
||||
void intel_suspend_hw(struct drm_device *dev);
|
||||
int ilk_wm_max_level(const struct drm_device *dev);
|
||||
void intel_update_watermarks(struct drm_crtc *crtc);
|
||||
void intel_update_sprite_watermarks(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc,
|
||||
|
|
|
@ -59,12 +59,12 @@ static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
|
|||
|
||||
static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
|
||||
{
|
||||
return intel_dsi->dev.type == INTEL_DSI_VIDEO_MODE;
|
||||
return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
|
||||
}
|
||||
|
||||
static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
|
||||
{
|
||||
return intel_dsi->dev.type == INTEL_DSI_COMMAND_MODE;
|
||||
return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
|
||||
}
|
||||
|
||||
static void intel_dsi_hot_plug(struct intel_encoder *encoder)
|
||||
|
@ -94,13 +94,6 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
vlv_enable_dsi_pll(encoder);
|
||||
}
|
||||
|
||||
static void intel_dsi_device_ready(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
|
@ -185,6 +178,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
|
|||
/* put device in ready state */
|
||||
intel_dsi_device_ready(encoder);
|
||||
|
||||
msleep(intel_dsi->panel_on_delay);
|
||||
|
||||
if (intel_dsi->dev.dev_ops->panel_reset)
|
||||
intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
|
||||
|
||||
|
@ -301,6 +296,9 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
|
|||
|
||||
if (intel_dsi->dev.dev_ops->disable_panel_power)
|
||||
intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
|
||||
|
||||
msleep(intel_dsi->panel_off_delay);
|
||||
msleep(intel_dsi->panel_pwr_cycle_delay);
|
||||
}
|
||||
|
||||
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
|
@ -428,7 +426,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
|
|||
I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
|
||||
}
|
||||
|
||||
static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
|
||||
static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
|
@ -525,6 +523,9 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
|
|||
/* recovery disables */
|
||||
I915_WRITE(MIPI_EOT_DISABLE(pipe), val);
|
||||
|
||||
/* in terms of low power clock */
|
||||
I915_WRITE(MIPI_INIT_COUNT(pipe), intel_dsi->init_count);
|
||||
|
||||
/* in terms of txbyteclkhs. actual high to low switch +
|
||||
* MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
|
||||
*
|
||||
|
@ -562,6 +563,15 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
|
|||
RANDOM_DPI_DISPLAY_RESOLUTION);
|
||||
}
|
||||
|
||||
static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
intel_dsi_prepare(encoder);
|
||||
|
||||
vlv_enable_dsi_pll(encoder);
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
intel_dsi_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
|
@ -639,6 +649,7 @@ bool intel_dsi_init(struct drm_device *dev)
|
|||
struct intel_connector *intel_connector;
|
||||
struct drm_connector *connector;
|
||||
struct drm_display_mode *fixed_mode = NULL;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const struct intel_dsi_device *dsi;
|
||||
unsigned int i;
|
||||
|
||||
|
@ -658,6 +669,13 @@ bool intel_dsi_init(struct drm_device *dev)
|
|||
encoder = &intel_encoder->base;
|
||||
intel_dsi->attached_connector = intel_connector;
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
|
||||
} else {
|
||||
DRM_ERROR("Unsupported Mipi device to reg base");
|
||||
return false;
|
||||
}
|
||||
|
||||
connector = &intel_connector->base;
|
||||
|
||||
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
|
||||
|
@ -668,7 +686,6 @@ bool intel_dsi_init(struct drm_device *dev)
|
|||
intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
|
||||
intel_encoder->pre_enable = intel_dsi_pre_enable;
|
||||
intel_encoder->enable = intel_dsi_enable_nop;
|
||||
intel_encoder->mode_set = intel_dsi_mode_set;
|
||||
intel_encoder->disable = intel_dsi_disable;
|
||||
intel_encoder->post_disable = intel_dsi_post_disable;
|
||||
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
struct intel_dsi_device {
|
||||
unsigned int panel_id;
|
||||
const char *name;
|
||||
int type;
|
||||
const struct intel_dsi_dev_ops *dev_ops;
|
||||
void *dev_priv;
|
||||
};
|
||||
|
@ -85,6 +84,9 @@ struct intel_dsi {
|
|||
/* virtual channel */
|
||||
int channel;
|
||||
|
||||
/* Video mode or command mode */
|
||||
u16 operation_mode;
|
||||
|
||||
/* number of DSI lanes */
|
||||
unsigned int lane_count;
|
||||
|
||||
|
@ -112,6 +114,15 @@ struct intel_dsi {
|
|||
u16 hs_to_lp_count;
|
||||
u16 clk_lp_to_hs_count;
|
||||
u16 clk_hs_to_lp_count;
|
||||
|
||||
u16 init_count;
|
||||
|
||||
/* all delays in ms */
|
||||
u16 backlight_off_delay;
|
||||
u16 backlight_on_delay;
|
||||
u16 panel_on_delay;
|
||||
u16 panel_off_delay;
|
||||
u16 panel_pwr_cycle_delay;
|
||||
};
|
||||
|
||||
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
|
||||
|
|
|
@ -343,15 +343,15 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|||
num_connectors_detected++;
|
||||
|
||||
if (!enabled[i]) {
|
||||
DRM_DEBUG_KMS("connector %d not enabled, skipping\n",
|
||||
connector->base.id);
|
||||
DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
|
||||
drm_get_connector_name(connector));
|
||||
continue;
|
||||
}
|
||||
|
||||
encoder = connector->encoder;
|
||||
if (!encoder || WARN_ON(!encoder->crtc)) {
|
||||
DRM_DEBUG_KMS("connector %d has no encoder or crtc, skipping\n",
|
||||
connector->base.id);
|
||||
DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
|
||||
drm_get_connector_name(connector));
|
||||
enabled[i] = false;
|
||||
continue;
|
||||
}
|
||||
|
@ -373,16 +373,16 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|||
}
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
|
||||
fb_conn->connector->base.id);
|
||||
DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
|
||||
drm_get_connector_name(connector));
|
||||
|
||||
/* go for command line mode first */
|
||||
modes[i] = drm_pick_cmdline_mode(fb_conn, width, height);
|
||||
|
||||
/* try for preferred next */
|
||||
if (!modes[i]) {
|
||||
DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
|
||||
fb_conn->connector->base.id);
|
||||
DRM_DEBUG_KMS("looking for preferred mode on connector %s\n",
|
||||
drm_get_connector_name(connector));
|
||||
modes[i] = drm_has_preferred_mode(fb_conn, width,
|
||||
height);
|
||||
}
|
||||
|
@ -400,16 +400,20 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|||
* since the fb helper layer wants a pointer to
|
||||
* something we own.
|
||||
*/
|
||||
DRM_DEBUG_KMS("looking for current mode on connector %s\n",
|
||||
drm_get_connector_name(connector));
|
||||
intel_mode_from_pipe_config(&encoder->crtc->hwmode,
|
||||
&to_intel_crtc(encoder->crtc)->config);
|
||||
modes[i] = &encoder->crtc->hwmode;
|
||||
}
|
||||
crtcs[i] = new_crtc;
|
||||
|
||||
DRM_DEBUG_KMS("connector %s on crtc %d: %s\n",
|
||||
DRM_DEBUG_KMS("connector %s on pipe %d [CRTC:%d]: %dx%d%s\n",
|
||||
drm_get_connector_name(connector),
|
||||
pipe_name(to_intel_crtc(encoder->crtc)->pipe),
|
||||
encoder->crtc->base.id,
|
||||
modes[i]->name);
|
||||
modes[i]->hdisplay, modes[i]->vdisplay,
|
||||
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
|
||||
|
||||
fallback = false;
|
||||
}
|
||||
|
@ -488,7 +492,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
|||
return false;
|
||||
|
||||
/* Find the largest fb */
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
for_each_crtc(dev, crtc) {
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
if (!intel_crtc->active || !crtc->primary->fb) {
|
||||
|
@ -512,7 +516,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
|||
}
|
||||
|
||||
/* Now make sure all the pipes will fit into it */
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
for_each_crtc(dev, crtc) {
|
||||
unsigned int cur_size;
|
||||
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
|
@ -577,7 +581,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
|||
drm_framebuffer_reference(&ifbdev->fb->base);
|
||||
|
||||
/* Final pass to check if any active pipes don't have fbs */
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
for_each_crtc(dev, crtc) {
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
if (!intel_crtc->active)
|
||||
|
|
|
@ -418,6 +418,7 @@ intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
|
|||
}
|
||||
|
||||
static void g4x_set_infoframes(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
|
@ -440,7 +441,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
|
|||
* either. */
|
||||
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
|
||||
|
||||
if (!intel_hdmi->has_hdmi_sink) {
|
||||
if (!enable) {
|
||||
if (!(val & VIDEO_DIP_ENABLE))
|
||||
return;
|
||||
val &= ~VIDEO_DIP_ENABLE;
|
||||
|
@ -471,6 +472,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
|
|||
}
|
||||
|
||||
static void ibx_set_infoframes(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
|
@ -486,7 +488,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
|
|||
/* See the big comment in g4x_set_infoframes() */
|
||||
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
|
||||
|
||||
if (!intel_hdmi->has_hdmi_sink) {
|
||||
if (!enable) {
|
||||
if (!(val & VIDEO_DIP_ENABLE))
|
||||
return;
|
||||
val &= ~VIDEO_DIP_ENABLE;
|
||||
|
@ -518,6 +520,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
|
|||
}
|
||||
|
||||
static void cpt_set_infoframes(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
|
@ -531,7 +534,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
|
|||
/* See the big comment in g4x_set_infoframes() */
|
||||
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
|
||||
|
||||
if (!intel_hdmi->has_hdmi_sink) {
|
||||
if (!enable) {
|
||||
if (!(val & VIDEO_DIP_ENABLE))
|
||||
return;
|
||||
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
|
||||
|
@ -554,6 +557,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
|
|||
}
|
||||
|
||||
static void vlv_set_infoframes(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
|
@ -569,7 +573,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
|
|||
/* See the big comment in g4x_set_infoframes() */
|
||||
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
|
||||
|
||||
if (!intel_hdmi->has_hdmi_sink) {
|
||||
if (!enable) {
|
||||
if (!(val & VIDEO_DIP_ENABLE))
|
||||
return;
|
||||
val &= ~VIDEO_DIP_ENABLE;
|
||||
|
@ -601,6 +605,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
|
|||
}
|
||||
|
||||
static void hsw_set_infoframes(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
|
@ -611,7 +616,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
|
|||
|
||||
assert_hdmi_port_disabled(intel_hdmi);
|
||||
|
||||
if (!intel_hdmi->has_hdmi_sink) {
|
||||
if (!enable) {
|
||||
I915_WRITE(reg, 0);
|
||||
POSTING_READ(reg);
|
||||
return;
|
||||
|
@ -628,7 +633,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
|
|||
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
|
||||
}
|
||||
|
||||
static void intel_hdmi_mode_set(struct intel_encoder *encoder)
|
||||
static void intel_hdmi_prepare(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -650,20 +655,21 @@ static void intel_hdmi_mode_set(struct intel_encoder *encoder)
|
|||
else
|
||||
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
|
||||
|
||||
if (intel_hdmi->has_hdmi_sink &&
|
||||
(HAS_PCH_CPT(dev) || IS_VALLEYVIEW(dev)))
|
||||
if (crtc->config.has_hdmi_sink)
|
||||
hdmi_val |= HDMI_MODE_SELECT_HDMI;
|
||||
|
||||
if (intel_hdmi->has_audio) {
|
||||
if (crtc->config.has_audio) {
|
||||
WARN_ON(!crtc->config.has_hdmi_sink);
|
||||
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
|
||||
pipe_name(crtc->pipe));
|
||||
hdmi_val |= SDVO_AUDIO_ENABLE;
|
||||
hdmi_val |= HDMI_MODE_SELECT_HDMI;
|
||||
intel_write_eld(&encoder->base, adjusted_mode);
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev))
|
||||
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
|
||||
else if (IS_CHERRYVIEW(dev))
|
||||
hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
|
||||
else
|
||||
hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
|
||||
|
||||
|
@ -691,6 +697,8 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
|
|||
|
||||
if (HAS_PCH_CPT(dev))
|
||||
*pipe = PORT_TO_PIPE_CPT(tmp);
|
||||
else if (IS_CHERRYVIEW(dev))
|
||||
*pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
|
||||
else
|
||||
*pipe = PORT_TO_PIPE(tmp);
|
||||
|
||||
|
@ -717,6 +725,12 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
|
|||
else
|
||||
flags |= DRM_MODE_FLAG_NVSYNC;
|
||||
|
||||
if (tmp & HDMI_MODE_SELECT_HDMI)
|
||||
pipe_config->has_hdmi_sink = true;
|
||||
|
||||
if (tmp & HDMI_MODE_SELECT_HDMI)
|
||||
pipe_config->has_audio = true;
|
||||
|
||||
pipe_config->adjusted_mode.flags |= flags;
|
||||
|
||||
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
|
||||
|
@ -739,7 +753,7 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
|
|||
u32 temp;
|
||||
u32 enable_bits = SDVO_ENABLE;
|
||||
|
||||
if (intel_hdmi->has_audio)
|
||||
if (intel_crtc->config.has_audio)
|
||||
enable_bits |= SDVO_AUDIO_ENABLE;
|
||||
|
||||
temp = I915_READ(intel_hdmi->hdmi_reg);
|
||||
|
@ -893,9 +907,11 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
|||
int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
|
||||
int desired_bpp;
|
||||
|
||||
pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
|
||||
|
||||
if (intel_hdmi->color_range_auto) {
|
||||
/* See CEA-861-E - 5.1 Default Encoding Parameters */
|
||||
if (intel_hdmi->has_hdmi_sink &&
|
||||
if (pipe_config->has_hdmi_sink &&
|
||||
drm_match_cea_mode(adjusted_mode) > 1)
|
||||
intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
|
||||
else
|
||||
|
@ -908,13 +924,16 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
|||
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
|
||||
pipe_config->has_pch_encoder = true;
|
||||
|
||||
if (pipe_config->has_hdmi_sink && intel_hdmi->has_audio)
|
||||
pipe_config->has_audio = true;
|
||||
|
||||
/*
|
||||
* HDMI is either 12 or 8, so if the display lets 10bpc sneak
|
||||
* through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
|
||||
* outputs. We also need to check that the higher clock still fits
|
||||
* within limits.
|
||||
*/
|
||||
if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
|
||||
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
|
||||
clock_12bpc <= portclock_limit &&
|
||||
hdmi_12bpc_possible(encoder->new_crtc)) {
|
||||
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
|
||||
|
@ -1121,7 +1140,11 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
|
|||
struct drm_display_mode *adjusted_mode =
|
||||
&intel_crtc->config.adjusted_mode;
|
||||
|
||||
intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
|
||||
intel_hdmi_prepare(encoder);
|
||||
|
||||
intel_hdmi->set_infoframes(&encoder->base,
|
||||
intel_crtc->config.has_hdmi_sink,
|
||||
adjusted_mode);
|
||||
}
|
||||
|
||||
static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
|
@ -1138,9 +1161,6 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
|
|||
int pipe = intel_crtc->pipe;
|
||||
u32 val;
|
||||
|
||||
if (!IS_VALLEYVIEW(dev))
|
||||
return;
|
||||
|
||||
/* Enable clock channels for this port */
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
|
||||
|
@ -1167,7 +1187,9 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
|
|||
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
|
||||
intel_hdmi->set_infoframes(&encoder->base,
|
||||
intel_crtc->config.has_hdmi_sink,
|
||||
adjusted_mode);
|
||||
|
||||
intel_enable_hdmi(encoder);
|
||||
|
||||
|
@ -1184,8 +1206,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
|
|||
enum dpio_channel port = vlv_dport_to_channel(dport);
|
||||
int pipe = intel_crtc->pipe;
|
||||
|
||||
if (!IS_VALLEYVIEW(dev))
|
||||
return;
|
||||
intel_hdmi_prepare(encoder);
|
||||
|
||||
/* Program Tx lane resets to default */
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
|
@ -1224,6 +1245,152 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
|
|||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
}
|
||||
|
||||
static void chv_hdmi_post_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
enum dpio_channel ch = vlv_dport_to_channel(dport);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
u32 val;
|
||||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
|
||||
/* Propagate soft reset to data lane reset */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
|
||||
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
|
||||
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
|
||||
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
}
|
||||
|
||||
static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
enum dpio_channel ch = vlv_dport_to_channel(dport);
|
||||
int pipe = intel_crtc->pipe;
|
||||
int data, i;
|
||||
u32 val;
|
||||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
|
||||
/* Deassert soft data lane reset*/
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
|
||||
val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
|
||||
val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
|
||||
|
||||
/* Program Tx latency optimal setting */
|
||||
for (i = 0; i < 4; i++) {
|
||||
/* Set the latency optimal bit */
|
||||
data = (i == 1) ? 0x0 : 0x6;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
|
||||
data << DPIO_FRC_LATENCY_SHFIT);
|
||||
|
||||
/* Set the upar bit */
|
||||
data = (i == 1) ? 0x0 : 0x1;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
|
||||
data << DPIO_UPAR_SHIFT);
|
||||
}
|
||||
|
||||
/* Data lane stagger programming */
|
||||
/* FIXME: Fix up value only after power analysis */
|
||||
|
||||
/* Clear calc init */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
|
||||
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
|
||||
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
|
||||
|
||||
/* FIXME: Program the support xxx V-dB */
|
||||
/* Use 800mV-0dB */
|
||||
for (i = 0; i < 4; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
|
||||
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
|
||||
val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
|
||||
}
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
|
||||
val &= ~DPIO_SWING_MARGIN_MASK;
|
||||
val |= 102 << DPIO_SWING_MARGIN_SHIFT;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
|
||||
}
|
||||
|
||||
/* Disable unique transition scale */
|
||||
for (i = 0; i < 4; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
|
||||
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
|
||||
}
|
||||
|
||||
/* Additional steps for 1200mV-0dB */
|
||||
#if 0
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_TX_DW3(ch));
|
||||
if (ch)
|
||||
val |= DPIO_TX_UNIQ_TRANS_SCALE_CH1;
|
||||
else
|
||||
val |= DPIO_TX_UNIQ_TRANS_SCALE_CH0;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(ch), val);
|
||||
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(ch),
|
||||
vlv_dpio_read(dev_priv, pipe, VLV_TX_DW2(ch)) |
|
||||
(0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT));
|
||||
#endif
|
||||
/* Start swing calculation */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
|
||||
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
|
||||
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
|
||||
|
||||
/* LRC Bypass */
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
|
||||
val |= DPIO_LRC_BYPASS;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
|
||||
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
intel_enable_hdmi(encoder);
|
||||
|
||||
vlv_wait_port_ready(dev_priv, dport);
|
||||
}
|
||||
|
||||
static void intel_hdmi_destroy(struct drm_connector *connector)
|
||||
{
|
||||
drm_connector_cleanup(connector);
|
||||
|
@ -1284,7 +1451,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
|||
intel_encoder->hpd_pin = HPD_PORT_C;
|
||||
break;
|
||||
case PORT_D:
|
||||
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
|
||||
if (IS_CHERRYVIEW(dev))
|
||||
intel_hdmi->ddc_bus = GMBUS_PORT_DPD_CHV;
|
||||
else
|
||||
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
|
||||
intel_encoder->hpd_pin = HPD_PORT_D;
|
||||
break;
|
||||
case PORT_A:
|
||||
|
@ -1354,11 +1524,14 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
|
|||
DRM_MODE_ENCODER_TMDS);
|
||||
|
||||
intel_encoder->compute_config = intel_hdmi_compute_config;
|
||||
intel_encoder->mode_set = intel_hdmi_mode_set;
|
||||
intel_encoder->disable = intel_disable_hdmi;
|
||||
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
|
||||
intel_encoder->get_config = intel_hdmi_get_config;
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
intel_encoder->pre_enable = chv_hdmi_pre_enable;
|
||||
intel_encoder->enable = vlv_enable_hdmi;
|
||||
intel_encoder->post_disable = chv_hdmi_post_disable;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
|
||||
intel_encoder->pre_enable = vlv_hdmi_pre_enable;
|
||||
intel_encoder->enable = vlv_enable_hdmi;
|
||||
|
@ -1369,7 +1542,14 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
|
|||
}
|
||||
|
||||
intel_encoder->type = INTEL_OUTPUT_HDMI;
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
if (port == PORT_D)
|
||||
intel_encoder->crtc_mask = 1 << 2;
|
||||
else
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
|
||||
} else {
|
||||
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
|
||||
}
|
||||
intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
|
||||
/*
|
||||
* BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
|
||||
|
|
|
@ -119,10 +119,6 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
|
|||
pipe_config->adjusted_mode.crtc_clock = dotclock;
|
||||
}
|
||||
|
||||
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
|
||||
* This is an exception to the general rule that mode_set doesn't turn
|
||||
* things on.
|
||||
*/
|
||||
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
|
@ -324,15 +320,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void intel_lvds_mode_set(struct intel_encoder *encoder)
|
||||
{
|
||||
/*
|
||||
* We don't do anything here, the LVDS port is fully set up in the pre
|
||||
* enable hook - the ordering constraints for enabling the lvds port vs.
|
||||
* enabling the display pll are too strict.
|
||||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect the LVDS connection.
|
||||
*
|
||||
|
@ -946,7 +933,6 @@ void intel_lvds_init(struct drm_device *dev)
|
|||
intel_encoder->enable = intel_enable_lvds;
|
||||
intel_encoder->pre_enable = intel_pre_enable_lvds;
|
||||
intel_encoder->compute_config = intel_lvds_compute_config;
|
||||
intel_encoder->mode_set = intel_lvds_mode_set;
|
||||
intel_encoder->disable = intel_disable_lvds;
|
||||
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
|
||||
intel_encoder->get_config = intel_lvds_get_config;
|
||||
|
|
|
@ -213,7 +213,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
|
|||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
||||
int ret;
|
||||
|
||||
BUG_ON(overlay->last_flip_req);
|
||||
|
@ -236,7 +236,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
||||
int ret;
|
||||
|
||||
BUG_ON(overlay->active);
|
||||
|
@ -263,7 +263,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
u32 tmp;
|
||||
int ret;
|
||||
|
@ -320,7 +320,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
int ret;
|
||||
|
||||
|
@ -363,7 +363,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
|
|||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
||||
int ret;
|
||||
|
||||
if (overlay->last_flip_req == 0)
|
||||
|
@ -389,7 +389,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
|||
{
|
||||
struct drm_device *dev = overlay->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
||||
int ret;
|
||||
|
||||
/* Only wait if there is actually an old frame to release to
|
||||
|
|
|
@ -42,6 +42,59 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
|
|||
drm_mode_set_crtcinfo(adjusted_mode, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
|
||||
* @dev: drm device
|
||||
* @fixed_mode : panel native mode
|
||||
* @connector: LVDS/eDP connector
|
||||
*
|
||||
* Return downclock_avail
|
||||
* Find the reduced downclock for LVDS/eDP in EDID.
|
||||
*/
|
||||
struct drm_display_mode *
|
||||
intel_find_panel_downclock(struct drm_device *dev,
|
||||
struct drm_display_mode *fixed_mode,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
struct drm_display_mode *scan, *tmp_mode;
|
||||
int temp_downclock;
|
||||
|
||||
temp_downclock = fixed_mode->clock;
|
||||
tmp_mode = NULL;
|
||||
|
||||
list_for_each_entry(scan, &connector->probed_modes, head) {
|
||||
/*
|
||||
* If one mode has the same resolution with the fixed_panel
|
||||
* mode while they have the different refresh rate, it means
|
||||
* that the reduced downclock is found. In such
|
||||
* case we can set the different FPx0/1 to dynamically select
|
||||
* between low and high frequency.
|
||||
*/
|
||||
if (scan->hdisplay == fixed_mode->hdisplay &&
|
||||
scan->hsync_start == fixed_mode->hsync_start &&
|
||||
scan->hsync_end == fixed_mode->hsync_end &&
|
||||
scan->htotal == fixed_mode->htotal &&
|
||||
scan->vdisplay == fixed_mode->vdisplay &&
|
||||
scan->vsync_start == fixed_mode->vsync_start &&
|
||||
scan->vsync_end == fixed_mode->vsync_end &&
|
||||
scan->vtotal == fixed_mode->vtotal) {
|
||||
if (scan->clock < temp_downclock) {
|
||||
/*
|
||||
* The downclock is already found. But we
|
||||
* expect to find the lower downclock.
|
||||
*/
|
||||
temp_downclock = scan->clock;
|
||||
tmp_mode = scan;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (temp_downclock < fixed_mode->clock)
|
||||
return drm_mode_duplicate(dev, tmp_mode);
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* adjusted_mode has been preset to be the panel's fixed mode */
|
||||
void
|
||||
intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
|
||||
|
@ -323,6 +376,28 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
|
|||
pipe_config->gmch_pfit.lvds_border_bits = border;
|
||||
}
|
||||
|
||||
enum drm_connector_status
|
||||
intel_panel_detect(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* Assume that the BIOS does not lie through the OpRegion... */
|
||||
if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
|
||||
return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
|
||||
connector_status_connected :
|
||||
connector_status_disconnected;
|
||||
}
|
||||
|
||||
switch (i915.panel_ignore_lid) {
|
||||
case -2:
|
||||
return connector_status_connected;
|
||||
case -1:
|
||||
return connector_status_disconnected;
|
||||
default:
|
||||
return connector_status_unknown;
|
||||
}
|
||||
}
|
||||
|
||||
static u32 intel_panel_compute_brightness(struct intel_connector *connector,
|
||||
u32 val)
|
||||
{
|
||||
|
@ -795,28 +870,6 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
|
|||
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
|
||||
}
|
||||
|
||||
enum drm_connector_status
|
||||
intel_panel_detect(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* Assume that the BIOS does not lie through the OpRegion... */
|
||||
if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
|
||||
return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
|
||||
connector_status_connected :
|
||||
connector_status_disconnected;
|
||||
}
|
||||
|
||||
switch (i915.panel_ignore_lid) {
|
||||
case -2:
|
||||
return connector_status_connected;
|
||||
case -1:
|
||||
return connector_status_disconnected;
|
||||
default:
|
||||
return connector_status_unknown;
|
||||
}
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
|
||||
static int intel_backlight_device_update_status(struct backlight_device *bd)
|
||||
{
|
||||
|
@ -1103,59 +1156,6 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
|
|||
intel_backlight_device_unregister(intel_connector);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
|
||||
* @dev: drm device
|
||||
* @fixed_mode : panel native mode
|
||||
* @connector: LVDS/eDP connector
|
||||
*
|
||||
* Return downclock_avail
|
||||
* Find the reduced downclock for LVDS/eDP in EDID.
|
||||
*/
|
||||
struct drm_display_mode *
|
||||
intel_find_panel_downclock(struct drm_device *dev,
|
||||
struct drm_display_mode *fixed_mode,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
struct drm_display_mode *scan, *tmp_mode;
|
||||
int temp_downclock;
|
||||
|
||||
temp_downclock = fixed_mode->clock;
|
||||
tmp_mode = NULL;
|
||||
|
||||
list_for_each_entry(scan, &connector->probed_modes, head) {
|
||||
/*
|
||||
* If one mode has the same resolution with the fixed_panel
|
||||
* mode while they have the different refresh rate, it means
|
||||
* that the reduced downclock is found. In such
|
||||
* case we can set the different FPx0/1 to dynamically select
|
||||
* between low and high frequency.
|
||||
*/
|
||||
if (scan->hdisplay == fixed_mode->hdisplay &&
|
||||
scan->hsync_start == fixed_mode->hsync_start &&
|
||||
scan->hsync_end == fixed_mode->hsync_end &&
|
||||
scan->htotal == fixed_mode->htotal &&
|
||||
scan->vdisplay == fixed_mode->vdisplay &&
|
||||
scan->vsync_start == fixed_mode->vsync_start &&
|
||||
scan->vsync_end == fixed_mode->vsync_end &&
|
||||
scan->vtotal == fixed_mode->vtotal) {
|
||||
if (scan->clock < temp_downclock) {
|
||||
/*
|
||||
* The downclock is already found. But we
|
||||
* expect to find the lower downclock.
|
||||
*/
|
||||
temp_downclock = scan->clock;
|
||||
tmp_mode = scan;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (temp_downclock < fixed_mode->clock)
|
||||
return drm_mode_duplicate(dev, tmp_mode);
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Set up chip specific backlight functions */
|
||||
void intel_panel_init_backlight_funcs(struct drm_device *dev)
|
||||
{
|
||||
|
|
|
@ -487,7 +487,7 @@ void intel_update_fbc(struct drm_device *dev)
|
|||
* - new fb is too large to fit in compressed buffer
|
||||
* - going to an unsupported config (interlace, pixel multiply, etc.)
|
||||
*/
|
||||
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
|
||||
for_each_crtc(dev, tmp_crtc) {
|
||||
if (intel_crtc_active(tmp_crtc) &&
|
||||
to_intel_crtc(tmp_crtc)->primary_enabled) {
|
||||
if (crtc) {
|
||||
|
@ -1010,7 +1010,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
|
|||
{
|
||||
struct drm_crtc *crtc, *enabled = NULL;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
for_each_crtc(dev, crtc) {
|
||||
if (intel_crtc_active(crtc)) {
|
||||
if (enabled)
|
||||
return NULL;
|
||||
|
@ -2077,7 +2077,7 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
|
|||
wm[3] *= 2;
|
||||
}
|
||||
|
||||
static int ilk_wm_max_level(const struct drm_device *dev)
|
||||
int ilk_wm_max_level(const struct drm_device *dev)
|
||||
{
|
||||
/* how many WM levels are we expecting */
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
|
@ -2170,7 +2170,7 @@ static void ilk_compute_wm_config(struct drm_device *dev,
|
|||
struct intel_crtc *intel_crtc;
|
||||
|
||||
/* Compute the currently _active_ config */
|
||||
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
|
||||
for_each_intel_crtc(dev, intel_crtc) {
|
||||
const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
|
||||
|
||||
if (!wm->pipe_enabled)
|
||||
|
@ -2254,7 +2254,7 @@ static void ilk_merge_wm_level(struct drm_device *dev,
|
|||
|
||||
ret_wm->enable = true;
|
||||
|
||||
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
|
||||
for_each_intel_crtc(dev, intel_crtc) {
|
||||
const struct intel_pipe_wm *active = &intel_crtc->wm.active;
|
||||
const struct intel_wm_level *wm = &active->wm[level];
|
||||
|
||||
|
@ -2400,7 +2400,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
|
|||
}
|
||||
|
||||
/* LP0 register values */
|
||||
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
|
||||
for_each_intel_crtc(dev, intel_crtc) {
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
const struct intel_wm_level *r =
|
||||
&intel_crtc->wm.active.wm[0];
|
||||
|
@ -2747,7 +2747,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
|
|||
struct ilk_wm_values *hw = &dev_priv->wm.hw;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
|
||||
for_each_crtc(dev, crtc)
|
||||
ilk_pipe_wm_get_hw_state(crtc);
|
||||
|
||||
hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
|
||||
|
@ -3114,6 +3114,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
|
|||
if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
|
||||
mask |= GEN6_PM_RP_UP_EI_EXPIRED;
|
||||
|
||||
if (IS_GEN8(dev_priv->dev))
|
||||
mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
|
||||
|
||||
return ~mask;
|
||||
}
|
||||
|
||||
|
@ -3246,6 +3249,26 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
|
|||
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
|
||||
}
|
||||
|
||||
static void gen8_disable_rps_interrupts(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
|
||||
I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
|
||||
~dev_priv->pm_rps_events);
|
||||
/* Complete PM interrupt masking here doesn't race with the rps work
|
||||
* item again unmasking PM interrupts because that is using a different
|
||||
* register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
|
||||
* leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
|
||||
* gen8_enable_rps will clean up. */
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
dev_priv->rps.pm_iir = 0;
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
|
||||
}
|
||||
|
||||
static void gen6_disable_rps_interrupts(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -3272,7 +3295,10 @@ static void gen6_disable_rps(struct drm_device *dev)
|
|||
I915_WRITE(GEN6_RC_CONTROL, 0);
|
||||
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
|
||||
|
||||
gen6_disable_rps_interrupts(dev);
|
||||
if (IS_BROADWELL(dev))
|
||||
gen8_disable_rps_interrupts(dev);
|
||||
else
|
||||
gen6_disable_rps_interrupts(dev);
|
||||
}
|
||||
|
||||
static void valleyview_disable_rps(struct drm_device *dev)
|
||||
|
@ -3308,10 +3334,6 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
|
|||
if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
|
||||
return 0;
|
||||
|
||||
/* Disable RC6 on Broadwell for now */
|
||||
if (IS_BROADWELL(dev))
|
||||
return 0;
|
||||
|
||||
/* Respect the kernel parameter if it is set */
|
||||
if (enable_rc6 >= 0) {
|
||||
int mask;
|
||||
|
@ -3324,7 +3346,7 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
|
|||
|
||||
if ((enable_rc6 & mask) != enable_rc6)
|
||||
DRM_INFO("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
|
||||
enable_rc6, enable_rc6 & mask, mask);
|
||||
enable_rc6 & mask, enable_rc6, mask);
|
||||
|
||||
return enable_rc6 & mask;
|
||||
}
|
||||
|
@ -3344,6 +3366,17 @@ int intel_enable_rc6(const struct drm_device *dev)
|
|||
return i915.enable_rc6;
|
||||
}
|
||||
|
||||
static void gen8_enable_rps_interrupts(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
WARN_ON(dev_priv->rps.pm_iir);
|
||||
bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
|
||||
I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
static void gen6_enable_rps_interrupts(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -3379,7 +3412,7 @@ static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_c
|
|||
static void gen8_enable_rps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
uint32_t rc6_mask = 0, rp_state_cap;
|
||||
int unused;
|
||||
|
||||
|
@ -3433,11 +3466,15 @@ static void gen8_enable_rps(struct drm_device *dev)
|
|||
|
||||
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
|
||||
|
||||
/* WaDisablePwrmtrEvent:chv (pre-production hw) */
|
||||
I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
|
||||
I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
|
||||
|
||||
/* 5: Enable RPS */
|
||||
I915_WRITE(GEN6_RP_CONTROL,
|
||||
GEN6_RP_MEDIA_TURBO |
|
||||
GEN6_RP_MEDIA_HW_NORMAL_MODE |
|
||||
GEN6_RP_MEDIA_IS_GFX |
|
||||
GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
|
||||
GEN6_RP_ENABLE |
|
||||
GEN6_RP_UP_BUSY_AVG |
|
||||
GEN6_RP_DOWN_IDLE_AVG);
|
||||
|
@ -3446,7 +3483,7 @@ static void gen8_enable_rps(struct drm_device *dev)
|
|||
|
||||
gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
|
||||
|
||||
gen6_enable_rps_interrupts(dev);
|
||||
gen8_enable_rps_interrupts(dev);
|
||||
|
||||
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
|
||||
}
|
||||
|
@ -3454,7 +3491,7 @@ static void gen8_enable_rps(struct drm_device *dev)
|
|||
static void gen6_enable_rps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
u32 rp_state_cap;
|
||||
u32 gt_perf_status;
|
||||
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
|
||||
|
@ -3783,7 +3820,7 @@ static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
|
|||
static void valleyview_enable_rps(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
u32 gtfifodbg, val, rc6_mode = 0;
|
||||
int i;
|
||||
|
||||
|
@ -3914,7 +3951,7 @@ static int ironlake_setup_rc6(struct drm_device *dev)
|
|||
static void ironlake_enable_rc6(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
||||
bool was_interruptible;
|
||||
int ret;
|
||||
|
||||
|
@ -4426,7 +4463,7 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
|
|||
bool i915_gpu_busy(void)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct intel_ring_buffer *ring;
|
||||
struct intel_engine_cs *ring;
|
||||
bool ret = false;
|
||||
int i;
|
||||
|
||||
|
@ -4608,8 +4645,10 @@ void intel_disable_gt_powersave(struct drm_device *dev)
|
|||
if (IS_IRONLAKE_M(dev)) {
|
||||
ironlake_disable_drps(dev);
|
||||
ironlake_disable_rc6(dev);
|
||||
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
|
||||
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
|
||||
} else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
|
||||
if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work))
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
cancel_work_sync(&dev_priv->rps.work);
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
|
@ -4655,7 +4694,7 @@ void intel_enable_gt_powersave(struct drm_device *dev)
|
|||
ironlake_enable_rc6(dev);
|
||||
intel_init_emon(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
|
||||
} else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
|
||||
/*
|
||||
* PCU communication is slow and this doesn't need to be
|
||||
* done at any specific time, so do this out of our fast path
|
||||
|
@ -5335,6 +5374,59 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
|
|||
I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
|
||||
}
|
||||
|
||||
static void cherryview_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
|
||||
|
||||
/* WaDisablePartialInstShootdown:chv */
|
||||
I915_WRITE(GEN8_ROW_CHICKEN,
|
||||
_MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
|
||||
|
||||
/* WaDisableThreadStallDopClockGating:chv */
|
||||
I915_WRITE(GEN8_ROW_CHICKEN,
|
||||
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
|
||||
|
||||
/* WaVSRefCountFullforceMissDisable:chv */
|
||||
/* WaDSRefCountFullforceMissDisable:chv */
|
||||
I915_WRITE(GEN7_FF_THREAD_MODE,
|
||||
I915_READ(GEN7_FF_THREAD_MODE) &
|
||||
~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
|
||||
|
||||
/* WaDisableSemaphoreAndSyncFlipWait:chv */
|
||||
I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
|
||||
_MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
|
||||
|
||||
/* WaDisableCSUnitClockGating:chv */
|
||||
I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
|
||||
GEN6_CSUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
/* WaDisableSDEUnitClockGating:chv */
|
||||
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
|
||||
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
/* WaDisableSamplerPowerBypass:chv (pre-production hw) */
|
||||
I915_WRITE(HALF_SLICE_CHICKEN3,
|
||||
_MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
|
||||
|
||||
/* WaDisableGunitClockGating:chv (pre-production hw) */
|
||||
I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
|
||||
GINT_DIS);
|
||||
|
||||
/* WaDisableFfDopClockGating:chv (pre-production hw) */
|
||||
I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
|
||||
_MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
|
||||
|
||||
/* WaDisableDopClockGating:chv (pre-production hw) */
|
||||
I915_WRITE(GEN7_ROW_CHICKEN2,
|
||||
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
|
||||
I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
|
||||
GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
|
||||
}
|
||||
|
||||
static void g4x_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -5545,33 +5637,6 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
}
|
||||
|
||||
static void reset_vblank_counter(struct drm_device *dev, enum pipe pipe)
|
||||
{
|
||||
assert_spin_locked(&dev->vbl_lock);
|
||||
|
||||
dev->vblank[pipe].last = 0;
|
||||
}
|
||||
|
||||
static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
enum pipe pipe;
|
||||
unsigned long irqflags;
|
||||
|
||||
/*
|
||||
* After this, the registers on the pipes that are part of the power
|
||||
* well will become zero, so we have to adjust our counters according to
|
||||
* that.
|
||||
*
|
||||
* FIXME: Should we do this in general in drm_vblank_post_modeset?
|
||||
*/
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
for_each_pipe(pipe)
|
||||
if (pipe != PIPE_A)
|
||||
reset_vblank_counter(dev, pipe);
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
}
|
||||
|
||||
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well, bool enable)
|
||||
{
|
||||
|
@ -5600,8 +5665,6 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
|
||||
POSTING_READ(HSW_PWR_WELL_DRIVER);
|
||||
DRM_DEBUG_KMS("Requesting to disable the power well\n");
|
||||
|
||||
hsw_power_well_post_disable(dev_priv);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5758,23 +5821,12 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
|
|||
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
enum pipe pipe;
|
||||
|
||||
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
for_each_pipe(pipe)
|
||||
__intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
|
||||
|
||||
valleyview_disable_display_irqs(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
spin_lock_irq(&dev->vbl_lock);
|
||||
for_each_pipe(pipe)
|
||||
reset_vblank_counter(dev, pipe);
|
||||
spin_unlock_irq(&dev->vbl_lock);
|
||||
|
||||
vlv_set_power_well(dev_priv, power_well, false);
|
||||
}
|
||||
|
||||
|
@ -6270,6 +6322,10 @@ void intel_init_pm(struct drm_device *dev)
|
|||
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
|
||||
else if (INTEL_INFO(dev)->gen == 8)
|
||||
dev_priv->display.init_clock_gating = gen8_init_clock_gating;
|
||||
} else if (IS_CHERRYVIEW(dev)) {
|
||||
dev_priv->display.update_wm = valleyview_update_wm;
|
||||
dev_priv->display.init_clock_gating =
|
||||
cherryview_init_clock_gating;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
dev_priv->display.update_wm = valleyview_update_wm;
|
||||
dev_priv->display.init_clock_gating =
|
||||
|
|
48
drivers/gpu/drm/i915/intel_renderstate.h
Normal file
48
drivers/gpu/drm/i915/intel_renderstate.h
Normal file
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Copyright © 2014 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _INTEL_RENDERSTATE_H
|
||||
#define _INTEL_RENDERSTATE_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct intel_renderstate_rodata {
|
||||
const u32 *reloc;
|
||||
const u32 reloc_items;
|
||||
const u32 *batch;
|
||||
const u32 batch_items;
|
||||
};
|
||||
|
||||
extern const struct intel_renderstate_rodata gen6_null_state;
|
||||
extern const struct intel_renderstate_rodata gen7_null_state;
|
||||
extern const struct intel_renderstate_rodata gen8_null_state;
|
||||
|
||||
#define RO_RENDERSTATE(_g) \
|
||||
const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
|
||||
.reloc = gen ## _g ## _null_state_relocs, \
|
||||
.reloc_items = sizeof(gen ## _g ## _null_state_relocs)/4, \
|
||||
.batch = gen ## _g ## _null_state_batch, \
|
||||
.batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
|
||||
}
|
||||
|
||||
#endif /* INTEL_RENDERSTATE_H */
|
289
drivers/gpu/drm/i915/intel_renderstate_gen6.c
Normal file
289
drivers/gpu/drm/i915/intel_renderstate_gen6.c
Normal file
|
@ -0,0 +1,289 @@
|
|||
#include "intel_renderstate.h"
|
||||
|
||||
static const u32 gen6_null_state_relocs[] = {
|
||||
0x00000020,
|
||||
0x00000024,
|
||||
0x0000002c,
|
||||
0x000001e0,
|
||||
0x000001e4,
|
||||
};
|
||||
|
||||
static const u32 gen6_null_state_batch[] = {
|
||||
0x69040000,
|
||||
0x790d0001,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78180000,
|
||||
0x00000001,
|
||||
0x61010008,
|
||||
0x00000000,
|
||||
0x00000001, /* reloc */
|
||||
0x00000001, /* reloc */
|
||||
0x00000000,
|
||||
0x00000001, /* reloc */
|
||||
0x00000000,
|
||||
0x00000001,
|
||||
0x00000000,
|
||||
0x00000001,
|
||||
0x61020000,
|
||||
0x00000000,
|
||||
0x78050001,
|
||||
0x00000018,
|
||||
0x00000000,
|
||||
0x780d1002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000420,
|
||||
0x78150003,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78100004,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78160003,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78110005,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78120002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78170003,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x79050005,
|
||||
0xe0040000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x79100000,
|
||||
0x00000000,
|
||||
0x79000002,
|
||||
0xffffffff,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x780e0002,
|
||||
0x00000441,
|
||||
0x00000401,
|
||||
0x00000401,
|
||||
0x78021002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000400,
|
||||
0x78130012,
|
||||
0x00400810,
|
||||
0x00000000,
|
||||
0x20000000,
|
||||
0x04000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78140007,
|
||||
0x00000280,
|
||||
0x08080000,
|
||||
0x00000000,
|
||||
0x00060000,
|
||||
0x4e080002,
|
||||
0x00100400,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78090005,
|
||||
0x02000000,
|
||||
0x22220000,
|
||||
0x02f60000,
|
||||
0x11330000,
|
||||
0x02850004,
|
||||
0x11220000,
|
||||
0x78011002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000200,
|
||||
0x78080003,
|
||||
0x00002000,
|
||||
0x00000448, /* reloc */
|
||||
0x00000448, /* reloc */
|
||||
0x00000000,
|
||||
0x05000000, /* cmds end */
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000220, /* state start */
|
||||
0x00000240,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x0060005a,
|
||||
0x204077be,
|
||||
0x000000c0,
|
||||
0x008d0040,
|
||||
0x0060005a,
|
||||
0x206077be,
|
||||
0x000000c0,
|
||||
0x008d0080,
|
||||
0x0060005a,
|
||||
0x208077be,
|
||||
0x000000d0,
|
||||
0x008d0040,
|
||||
0x0060005a,
|
||||
0x20a077be,
|
||||
0x000000d0,
|
||||
0x008d0080,
|
||||
0x00000201,
|
||||
0x20080061,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00600001,
|
||||
0x20200022,
|
||||
0x008d0000,
|
||||
0x00000000,
|
||||
0x02800031,
|
||||
0x21c01cc9,
|
||||
0x00000020,
|
||||
0x0a8a0001,
|
||||
0x00600001,
|
||||
0x204003be,
|
||||
0x008d01c0,
|
||||
0x00000000,
|
||||
0x00600001,
|
||||
0x206003be,
|
||||
0x008d01e0,
|
||||
0x00000000,
|
||||
0x00600001,
|
||||
0x208003be,
|
||||
0x008d0200,
|
||||
0x00000000,
|
||||
0x00600001,
|
||||
0x20a003be,
|
||||
0x008d0220,
|
||||
0x00000000,
|
||||
0x00600001,
|
||||
0x20c003be,
|
||||
0x008d0240,
|
||||
0x00000000,
|
||||
0x00600001,
|
||||
0x20e003be,
|
||||
0x008d0260,
|
||||
0x00000000,
|
||||
0x00600001,
|
||||
0x210003be,
|
||||
0x008d0280,
|
||||
0x00000000,
|
||||
0x00600001,
|
||||
0x212003be,
|
||||
0x008d02a0,
|
||||
0x00000000,
|
||||
0x05800031,
|
||||
0x24001cc8,
|
||||
0x00000040,
|
||||
0x90019000,
|
||||
0x0000007e,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x0000007e,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x0000007e,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x0000007e,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x0000007e,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x0000007e,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x0000007e,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x0000007e,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x30000000,
|
||||
0x00000124,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0xf99a130c,
|
||||
0x799a130c,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x80000031,
|
||||
0x00000003,
|
||||
0x00000000, /* state end */
|
||||
};
|
||||
|
||||
RO_RENDERSTATE(6);
|
253
drivers/gpu/drm/i915/intel_renderstate_gen7.c
Normal file
253
drivers/gpu/drm/i915/intel_renderstate_gen7.c
Normal file
|
@ -0,0 +1,253 @@
|
|||
#include "intel_renderstate.h"
|
||||
|
||||
static const u32 gen7_null_state_relocs[] = {
|
||||
0x0000000c,
|
||||
0x00000010,
|
||||
0x00000018,
|
||||
0x000001ec,
|
||||
};
|
||||
|
||||
static const u32 gen7_null_state_batch[] = {
|
||||
0x69040000,
|
||||
0x61010008,
|
||||
0x00000000,
|
||||
0x00000001, /* reloc */
|
||||
0x00000001, /* reloc */
|
||||
0x00000000,
|
||||
0x00000001, /* reloc */
|
||||
0x00000000,
|
||||
0x00000001,
|
||||
0x00000000,
|
||||
0x00000001,
|
||||
0x790d0002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78180000,
|
||||
0x00000001,
|
||||
0x79160000,
|
||||
0x00000008,
|
||||
0x78300000,
|
||||
0x02010040,
|
||||
0x78310000,
|
||||
0x04000000,
|
||||
0x78320000,
|
||||
0x04000000,
|
||||
0x78330000,
|
||||
0x02000000,
|
||||
0x78100004,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x781b0005,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x781c0002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x781d0004,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78110005,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78120002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78210000,
|
||||
0x00000000,
|
||||
0x78130005,
|
||||
0x00000000,
|
||||
0x20000000,
|
||||
0x04000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78140001,
|
||||
0x20000800,
|
||||
0x00000000,
|
||||
0x781e0001,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78050005,
|
||||
0xe0040000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78040001,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78240000,
|
||||
0x00000240,
|
||||
0x78230000,
|
||||
0x00000260,
|
||||
0x782f0000,
|
||||
0x00000280,
|
||||
0x781f000c,
|
||||
0x00400810,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78200006,
|
||||
0x000002c0,
|
||||
0x08080000,
|
||||
0x00000000,
|
||||
0x28000402,
|
||||
0x00060000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78090005,
|
||||
0x02000000,
|
||||
0x22220000,
|
||||
0x02f60000,
|
||||
0x11230000,
|
||||
0x02f60004,
|
||||
0x11230000,
|
||||
0x78080003,
|
||||
0x00006008,
|
||||
0x00000340, /* reloc */
|
||||
0xffffffff,
|
||||
0x00000000,
|
||||
0x782a0000,
|
||||
0x00000360,
|
||||
0x79000002,
|
||||
0xffffffff,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x7b000005,
|
||||
0x0000000f,
|
||||
0x00000003,
|
||||
0x00000000,
|
||||
0x00000001,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x05000000, /* cmds end */
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000031, /* state start */
|
||||
0x00000003,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0xf99a130c,
|
||||
0x799a130c,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000492,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x0080005a,
|
||||
0x2e2077bd,
|
||||
0x000000c0,
|
||||
0x008d0040,
|
||||
0x0080005a,
|
||||
0x2e6077bd,
|
||||
0x000000d0,
|
||||
0x008d0040,
|
||||
0x02800031,
|
||||
0x21801fa9,
|
||||
0x008d0e20,
|
||||
0x08840001,
|
||||
0x00800001,
|
||||
0x2e2003bd,
|
||||
0x008d0180,
|
||||
0x00000000,
|
||||
0x00800001,
|
||||
0x2e6003bd,
|
||||
0x008d01c0,
|
||||
0x00000000,
|
||||
0x00800001,
|
||||
0x2ea003bd,
|
||||
0x008d0200,
|
||||
0x00000000,
|
||||
0x00800001,
|
||||
0x2ee003bd,
|
||||
0x008d0240,
|
||||
0x00000000,
|
||||
0x05800031,
|
||||
0x20001fa8,
|
||||
0x008d0e20,
|
||||
0x90031000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000380,
|
||||
0x000003a0,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000, /* state end */
|
||||
};
|
||||
|
||||
RO_RENDERSTATE(7);
|
479
drivers/gpu/drm/i915/intel_renderstate_gen8.c
Normal file
479
drivers/gpu/drm/i915/intel_renderstate_gen8.c
Normal file
|
@ -0,0 +1,479 @@
|
|||
#include "intel_renderstate.h"
|
||||
|
||||
static const u32 gen8_null_state_relocs[] = {
|
||||
0x00000048,
|
||||
0x00000050,
|
||||
0x00000060,
|
||||
0x000003ec,
|
||||
};
|
||||
|
||||
static const u32 gen8_null_state_batch[] = {
|
||||
0x69040000,
|
||||
0x61020001,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x79120000,
|
||||
0x00000000,
|
||||
0x79130000,
|
||||
0x00000000,
|
||||
0x79140000,
|
||||
0x00000000,
|
||||
0x79150000,
|
||||
0x00000000,
|
||||
0x79160000,
|
||||
0x00000000,
|
||||
0x6101000e,
|
||||
0x00000001,
|
||||
0x00000000,
|
||||
0x00000001,
|
||||
0x00000001, /* reloc */
|
||||
0x00000000,
|
||||
0x00000001, /* reloc */
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000001, /* reloc */
|
||||
0x00000000,
|
||||
0xfffff001,
|
||||
0x00001001,
|
||||
0xfffff001,
|
||||
0x00001001,
|
||||
0x78230000,
|
||||
0x000006e0,
|
||||
0x78210000,
|
||||
0x00000700,
|
||||
0x78300000,
|
||||
0x08010040,
|
||||
0x78330000,
|
||||
0x08000000,
|
||||
0x78310000,
|
||||
0x08000000,
|
||||
0x78320000,
|
||||
0x08000000,
|
||||
0x78240000,
|
||||
0x00000641,
|
||||
0x780e0000,
|
||||
0x00000601,
|
||||
0x780d0000,
|
||||
0x00000000,
|
||||
0x78180000,
|
||||
0x00000001,
|
||||
0x78520003,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78190009,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x781b0007,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78270000,
|
||||
0x00000000,
|
||||
0x782c0000,
|
||||
0x00000000,
|
||||
0x781c0002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78160009,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78110008,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78290000,
|
||||
0x00000000,
|
||||
0x782e0000,
|
||||
0x00000000,
|
||||
0x781a0009,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x781d0007,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78280000,
|
||||
0x00000000,
|
||||
0x782d0000,
|
||||
0x00000000,
|
||||
0x78260000,
|
||||
0x00000000,
|
||||
0x782b0000,
|
||||
0x00000000,
|
||||
0x78150009,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78100007,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x781e0003,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78120002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x781f0002,
|
||||
0x30400820,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78510009,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78500003,
|
||||
0x00210000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78130002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x782a0000,
|
||||
0x00000480,
|
||||
0x782f0000,
|
||||
0x00000540,
|
||||
0x78140000,
|
||||
0x00000800,
|
||||
0x78170009,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x7820000a,
|
||||
0x00000580,
|
||||
0x00000000,
|
||||
0x08080000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x1f000002,
|
||||
0x00060000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x784d0000,
|
||||
0x40000000,
|
||||
0x784f0000,
|
||||
0x80000100,
|
||||
0x780f0000,
|
||||
0x00000740,
|
||||
0x78050006,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78070003,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78060003,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78040001,
|
||||
0x00000000,
|
||||
0x00000001,
|
||||
0x79000002,
|
||||
0xffffffff,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78080003,
|
||||
0x00006000,
|
||||
0x000005e0, /* reloc */
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78090005,
|
||||
0x02000000,
|
||||
0x22220000,
|
||||
0x02f60000,
|
||||
0x11230000,
|
||||
0x02850004,
|
||||
0x11230000,
|
||||
0x784b0000,
|
||||
0x0000000f,
|
||||
0x78490001,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x7b000005,
|
||||
0x00000000,
|
||||
0x00000003,
|
||||
0x00000000,
|
||||
0x00000001,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x05000000, /* cmds end */
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x000004c0, /* state start */
|
||||
0x00000500,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000092,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x0060005a,
|
||||
0x21403ae8,
|
||||
0x3a0000c0,
|
||||
0x008d0040,
|
||||
0x0060005a,
|
||||
0x21603ae8,
|
||||
0x3a0000c0,
|
||||
0x008d0080,
|
||||
0x0060005a,
|
||||
0x21803ae8,
|
||||
0x3a0000d0,
|
||||
0x008d0040,
|
||||
0x0060005a,
|
||||
0x21a03ae8,
|
||||
0x3a0000d0,
|
||||
0x008d0080,
|
||||
0x02800031,
|
||||
0x2e0022e8,
|
||||
0x0e000140,
|
||||
0x08840001,
|
||||
0x05800031,
|
||||
0x200022e0,
|
||||
0x0e000e00,
|
||||
0x90031000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x06200000,
|
||||
0x00000002,
|
||||
0x06200000,
|
||||
0x00000002,
|
||||
0x06200000,
|
||||
0x00000002,
|
||||
0x06200000,
|
||||
0x00000002,
|
||||
0x06200000,
|
||||
0x00000002,
|
||||
0x06200000,
|
||||
0x00000002,
|
||||
0x06200000,
|
||||
0x00000002,
|
||||
0x06200000,
|
||||
0x00000002,
|
||||
0x06200000,
|
||||
0x00000002,
|
||||
0x06200000,
|
||||
0x00000002,
|
||||
0x06200000,
|
||||
0x00000002,
|
||||
0x06200000,
|
||||
0x00000002,
|
||||
0x06200000,
|
||||
0x00000002,
|
||||
0x06200000,
|
||||
0x00000002,
|
||||
0x06200000,
|
||||
0x00000002,
|
||||
0x06200000,
|
||||
0x00000002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0xf99a130c,
|
||||
0x799a130c,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x3f800000,
|
||||
0x00000000,
|
||||
0x3f800000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000, /* state end */
|
||||
};
|
||||
|
||||
RO_RENDERSTATE(8);
|
File diff suppressed because it is too large
Load diff
|
@ -1,6 +1,10 @@
|
|||
#ifndef _INTEL_RINGBUFFER_H_
|
||||
#define _INTEL_RINGBUFFER_H_
|
||||
|
||||
#include <linux/hashtable.h>
|
||||
|
||||
#define I915_CMD_HASH_ORDER 9
|
||||
|
||||
/*
|
||||
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
|
||||
* Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
|
||||
|
@ -54,7 +58,28 @@ struct intel_ring_hangcheck {
|
|||
bool deadlock;
|
||||
};
|
||||
|
||||
struct intel_ring_buffer {
|
||||
struct intel_ringbuffer {
|
||||
struct drm_i915_gem_object *obj;
|
||||
void __iomem *virtual_start;
|
||||
|
||||
u32 head;
|
||||
u32 tail;
|
||||
int space;
|
||||
int size;
|
||||
int effective_size;
|
||||
|
||||
/** We track the position of the requests in the ring buffer, and
|
||||
* when each is retired we increment last_retired_head as the GPU
|
||||
* must have finished processing the request and so we know we
|
||||
* can advance the ringbuffer up to that position.
|
||||
*
|
||||
* last_retired_head is set to -1 after the value is consumed so
|
||||
* we can detect new retirements.
|
||||
*/
|
||||
u32 last_retired_head;
|
||||
};
|
||||
|
||||
struct intel_engine_cs {
|
||||
const char *name;
|
||||
enum intel_ring_id {
|
||||
RCS = 0x0,
|
||||
|
@ -66,57 +91,41 @@ struct intel_ring_buffer {
|
|||
#define I915_NUM_RINGS 5
|
||||
#define LAST_USER_RING (VECS + 1)
|
||||
u32 mmio_base;
|
||||
void __iomem *virtual_start;
|
||||
struct drm_device *dev;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct intel_ringbuffer *buffer;
|
||||
|
||||
u32 head;
|
||||
u32 tail;
|
||||
int space;
|
||||
int size;
|
||||
int effective_size;
|
||||
struct intel_hw_status_page status_page;
|
||||
|
||||
/** We track the position of the requests in the ring buffer, and
|
||||
* when each is retired we increment last_retired_head as the GPU
|
||||
* must have finished processing the request and so we know we
|
||||
* can advance the ringbuffer up to that position.
|
||||
*
|
||||
* last_retired_head is set to -1 after the value is consumed so
|
||||
* we can detect new retirements.
|
||||
*/
|
||||
u32 last_retired_head;
|
||||
|
||||
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
|
||||
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
|
||||
u32 trace_irq_seqno;
|
||||
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
|
||||
void (*irq_put)(struct intel_ring_buffer *ring);
|
||||
bool __must_check (*irq_get)(struct intel_engine_cs *ring);
|
||||
void (*irq_put)(struct intel_engine_cs *ring);
|
||||
|
||||
int (*init)(struct intel_ring_buffer *ring);
|
||||
int (*init)(struct intel_engine_cs *ring);
|
||||
|
||||
void (*write_tail)(struct intel_ring_buffer *ring,
|
||||
void (*write_tail)(struct intel_engine_cs *ring,
|
||||
u32 value);
|
||||
int __must_check (*flush)(struct intel_ring_buffer *ring,
|
||||
int __must_check (*flush)(struct intel_engine_cs *ring,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains);
|
||||
int (*add_request)(struct intel_ring_buffer *ring);
|
||||
int (*add_request)(struct intel_engine_cs *ring);
|
||||
/* Some chipsets are not quite as coherent as advertised and need
|
||||
* an expensive kick to force a true read of the up-to-date seqno.
|
||||
* However, the up-to-date seqno is not always required and the last
|
||||
* seen value is good enough. Note that the seqno will always be
|
||||
* monotonic, even if not coherent.
|
||||
*/
|
||||
u32 (*get_seqno)(struct intel_ring_buffer *ring,
|
||||
u32 (*get_seqno)(struct intel_engine_cs *ring,
|
||||
bool lazy_coherency);
|
||||
void (*set_seqno)(struct intel_ring_buffer *ring,
|
||||
void (*set_seqno)(struct intel_engine_cs *ring,
|
||||
u32 seqno);
|
||||
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
|
||||
int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
|
||||
u64 offset, u32 length,
|
||||
unsigned flags);
|
||||
#define I915_DISPATCH_SECURE 0x1
|
||||
#define I915_DISPATCH_PINNED 0x2
|
||||
void (*cleanup)(struct intel_ring_buffer *ring);
|
||||
void (*cleanup)(struct intel_engine_cs *ring);
|
||||
|
||||
struct {
|
||||
u32 sync_seqno[I915_NUM_RINGS-1];
|
||||
|
@ -129,10 +138,10 @@ struct intel_ring_buffer {
|
|||
} mbox;
|
||||
|
||||
/* AKA wait() */
|
||||
int (*sync_to)(struct intel_ring_buffer *ring,
|
||||
struct intel_ring_buffer *to,
|
||||
int (*sync_to)(struct intel_engine_cs *ring,
|
||||
struct intel_engine_cs *to,
|
||||
u32 seqno);
|
||||
int (*signal)(struct intel_ring_buffer *signaller,
|
||||
int (*signal)(struct intel_engine_cs *signaller,
|
||||
/* num_dwords needed by caller */
|
||||
unsigned int num_dwords);
|
||||
} semaphore;
|
||||
|
@ -165,8 +174,8 @@ struct intel_ring_buffer {
|
|||
|
||||
wait_queue_head_t irq_queue;
|
||||
|
||||
struct i915_hw_context *default_context;
|
||||
struct i915_hw_context *last_context;
|
||||
struct intel_context *default_context;
|
||||
struct intel_context *last_context;
|
||||
|
||||
struct intel_ring_hangcheck hangcheck;
|
||||
|
||||
|
@ -176,12 +185,13 @@ struct intel_ring_buffer {
|
|||
volatile u32 *cpu_page;
|
||||
} scratch;
|
||||
|
||||
bool needs_cmd_parser;
|
||||
|
||||
/*
|
||||
* Tables of commands the command parser needs to know about
|
||||
* Table of commands the command parser needs to know about
|
||||
* for this ring.
|
||||
*/
|
||||
const struct drm_i915_cmd_table *cmd_tables;
|
||||
int cmd_table_count;
|
||||
DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
|
||||
|
||||
/*
|
||||
* Table of registers allowed in commands that read/write registers.
|
||||
|
@ -210,20 +220,20 @@ struct intel_ring_buffer {
|
|||
};
|
||||
|
||||
static inline bool
|
||||
intel_ring_initialized(struct intel_ring_buffer *ring)
|
||||
intel_ring_initialized(struct intel_engine_cs *ring)
|
||||
{
|
||||
return ring->obj != NULL;
|
||||
return ring->buffer && ring->buffer->obj;
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
intel_ring_flag(struct intel_ring_buffer *ring)
|
||||
intel_ring_flag(struct intel_engine_cs *ring)
|
||||
{
|
||||
return 1 << ring->id;
|
||||
}
|
||||
|
||||
static inline u32
|
||||
intel_ring_sync_index(struct intel_ring_buffer *ring,
|
||||
struct intel_ring_buffer *other)
|
||||
intel_ring_sync_index(struct intel_engine_cs *ring,
|
||||
struct intel_engine_cs *other)
|
||||
{
|
||||
int idx;
|
||||
|
||||
|
@ -241,7 +251,7 @@ intel_ring_sync_index(struct intel_ring_buffer *ring,
|
|||
}
|
||||
|
||||
static inline u32
|
||||
intel_read_status_page(struct intel_ring_buffer *ring,
|
||||
intel_read_status_page(struct intel_engine_cs *ring,
|
||||
int reg)
|
||||
{
|
||||
/* Ensure that the compiler doesn't optimize away the load. */
|
||||
|
@ -250,7 +260,7 @@ intel_read_status_page(struct intel_ring_buffer *ring,
|
|||
}
|
||||
|
||||
static inline void
|
||||
intel_write_status_page(struct intel_ring_buffer *ring,
|
||||
intel_write_status_page(struct intel_engine_cs *ring,
|
||||
int reg, u32 value)
|
||||
{
|
||||
ring->status_page.page_addr[reg] = value;
|
||||
|
@ -275,27 +285,29 @@ intel_write_status_page(struct intel_ring_buffer *ring,
|
|||
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
|
||||
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
|
||||
|
||||
void intel_stop_ring_buffer(struct intel_ring_buffer *ring);
|
||||
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
|
||||
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
|
||||
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
|
||||
|
||||
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
|
||||
int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
|
||||
static inline void intel_ring_emit(struct intel_ring_buffer *ring,
|
||||
int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
|
||||
int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
|
||||
static inline void intel_ring_emit(struct intel_engine_cs *ring,
|
||||
u32 data)
|
||||
{
|
||||
iowrite32(data, ring->virtual_start + ring->tail);
|
||||
ring->tail += 4;
|
||||
struct intel_ringbuffer *ringbuf = ring->buffer;
|
||||
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
|
||||
ringbuf->tail += 4;
|
||||
}
|
||||
static inline void intel_ring_advance(struct intel_ring_buffer *ring)
|
||||
static inline void intel_ring_advance(struct intel_engine_cs *ring)
|
||||
{
|
||||
ring->tail &= ring->size - 1;
|
||||
struct intel_ringbuffer *ringbuf = ring->buffer;
|
||||
ringbuf->tail &= ringbuf->size - 1;
|
||||
}
|
||||
void __intel_ring_advance(struct intel_ring_buffer *ring);
|
||||
void __intel_ring_advance(struct intel_engine_cs *ring);
|
||||
|
||||
int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
|
||||
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
|
||||
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
|
||||
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
|
||||
int __must_check intel_ring_idle(struct intel_engine_cs *ring);
|
||||
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
|
||||
int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
|
||||
int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
|
||||
|
||||
int intel_init_render_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_bsd_ring_buffer(struct drm_device *dev);
|
||||
|
@ -303,21 +315,21 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev);
|
|||
int intel_init_blt_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_vebox_ring_buffer(struct drm_device *dev);
|
||||
|
||||
u64 intel_ring_get_active_head(struct intel_ring_buffer *ring);
|
||||
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
|
||||
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
|
||||
void intel_ring_setup_status_page(struct intel_engine_cs *ring);
|
||||
|
||||
static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
|
||||
static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
|
||||
{
|
||||
return ring->tail;
|
||||
return ring->buffer->tail;
|
||||
}
|
||||
|
||||
static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
|
||||
static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
|
||||
{
|
||||
BUG_ON(ring->outstanding_lazy_seqno == 0);
|
||||
return ring->outstanding_lazy_seqno;
|
||||
}
|
||||
|
||||
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
|
||||
static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
|
||||
{
|
||||
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
|
||||
ring->trace_irq_seqno = seqno;
|
||||
|
|
|
@ -1153,20 +1153,21 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
|
|||
pipe_config->pixel_multiplier =
|
||||
intel_sdvo_get_pixel_multiplier(adjusted_mode);
|
||||
|
||||
pipe_config->has_hdmi_sink = intel_sdvo->has_hdmi_monitor;
|
||||
|
||||
if (intel_sdvo->color_range_auto) {
|
||||
/* See CEA-861-E - 5.1 Default Encoding Parameters */
|
||||
/* FIXME: This bit is only valid when using TMDS encoding and 8
|
||||
* bit per color mode. */
|
||||
if (intel_sdvo->has_hdmi_monitor &&
|
||||
if (pipe_config->has_hdmi_sink &&
|
||||
drm_match_cea_mode(adjusted_mode) > 1)
|
||||
intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
|
||||
else
|
||||
intel_sdvo->color_range = 0;
|
||||
pipe_config->limited_color_range = true;
|
||||
} else {
|
||||
if (pipe_config->has_hdmi_sink &&
|
||||
intel_sdvo->color_range == HDMI_COLOR_RANGE_16_235)
|
||||
pipe_config->limited_color_range = true;
|
||||
}
|
||||
|
||||
if (intel_sdvo->color_range)
|
||||
pipe_config->limited_color_range = true;
|
||||
|
||||
/* Clock computation needs to happen after pixel multiplier. */
|
||||
if (intel_sdvo->is_tv)
|
||||
i9xx_adjust_sdvo_tv_clock(pipe_config);
|
||||
|
@ -1223,7 +1224,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
|
|||
if (!intel_sdvo_set_target_input(intel_sdvo))
|
||||
return;
|
||||
|
||||
if (intel_sdvo->has_hdmi_monitor) {
|
||||
if (crtc->config.has_hdmi_sink) {
|
||||
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
|
||||
intel_sdvo_set_colorimetry(intel_sdvo,
|
||||
SDVO_COLORIMETRY_RGB256);
|
||||
|
@ -1258,8 +1259,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
|
|||
/* The real mode polarity is set by the SDVO commands, using
|
||||
* struct intel_sdvo_dtd. */
|
||||
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
|
||||
if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi)
|
||||
sdvox |= intel_sdvo->color_range;
|
||||
if (!HAS_PCH_SPLIT(dev) && crtc->config.limited_color_range)
|
||||
sdvox |= HDMI_COLOR_RANGE_16_235;
|
||||
if (INTEL_INFO(dev)->gen < 5)
|
||||
sdvox |= SDVO_BORDER_ENABLE;
|
||||
} else {
|
||||
|
@ -1349,6 +1350,8 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
|
|||
u8 val;
|
||||
bool ret;
|
||||
|
||||
sdvox = I915_READ(intel_sdvo->sdvo_reg);
|
||||
|
||||
ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
|
||||
if (!ret) {
|
||||
/* Some sdvo encoders are not spec compliant and don't
|
||||
|
@ -1377,7 +1380,6 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
|
|||
* other platfroms.
|
||||
*/
|
||||
if (IS_I915G(dev) || IS_I915GM(dev)) {
|
||||
sdvox = I915_READ(intel_sdvo->sdvo_reg);
|
||||
pipe_config->pixel_multiplier =
|
||||
((sdvox & SDVO_PORT_MULTIPLY_MASK)
|
||||
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
|
||||
|
@ -1406,6 +1408,15 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
|
|||
}
|
||||
}
|
||||
|
||||
if (sdvox & HDMI_COLOR_RANGE_16_235)
|
||||
pipe_config->limited_color_range = true;
|
||||
|
||||
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
|
||||
&val, 1)) {
|
||||
if (val == SDVO_ENCODE_HDMI)
|
||||
pipe_config->has_hdmi_sink = true;
|
||||
}
|
||||
|
||||
WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
|
||||
"SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
|
||||
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
|
||||
|
|
|
@ -29,12 +29,21 @@
|
|||
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
|
||||
* VLV_VLV2_PUNIT_HAS_0.8.docx
|
||||
*/
|
||||
|
||||
/* Standard MMIO read, non-posted */
|
||||
#define SB_MRD_NP 0x00
|
||||
/* Standard MMIO write, non-posted */
|
||||
#define SB_MWR_NP 0x01
|
||||
/* Private register read, double-word addressing, non-posted */
|
||||
#define SB_CRRDDA_NP 0x06
|
||||
/* Private register write, double-word addressing, non-posted */
|
||||
#define SB_CRWRDA_NP 0x07
|
||||
|
||||
static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
|
||||
u32 port, u32 opcode, u32 addr, u32 *val)
|
||||
{
|
||||
u32 cmd, be = 0xf, bar = 0;
|
||||
bool is_read = (opcode == PUNIT_OPCODE_REG_READ ||
|
||||
opcode == DPIO_OPCODE_REG_READ);
|
||||
bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
|
||||
|
||||
cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
|
||||
(port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
|
||||
|
@ -74,7 +83,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
|
|||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
|
||||
PUNIT_OPCODE_REG_READ, addr, &val);
|
||||
SB_CRRDDA_NP, addr, &val);
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
return val;
|
||||
|
@ -86,7 +95,7 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
|
|||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
|
||||
PUNIT_OPCODE_REG_WRITE, addr, &val);
|
||||
SB_CRWRDA_NP, addr, &val);
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
}
|
||||
|
||||
|
@ -95,7 +104,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
|
|||
u32 val = 0;
|
||||
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
|
||||
PUNIT_OPCODE_REG_READ, reg, &val);
|
||||
SB_CRRDDA_NP, reg, &val);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
@ -103,7 +112,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
|
|||
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
|
||||
{
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
|
||||
PUNIT_OPCODE_REG_WRITE, reg, &val);
|
||||
SB_CRWRDA_NP, reg, &val);
|
||||
}
|
||||
|
||||
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
|
||||
|
@ -114,7 +123,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
|
|||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
|
||||
PUNIT_OPCODE_REG_READ, addr, &val);
|
||||
SB_CRRDDA_NP, addr, &val);
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
return val;
|
||||
|
@ -124,56 +133,56 @@ u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
|
|||
{
|
||||
u32 val = 0;
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
|
||||
PUNIT_OPCODE_REG_READ, reg, &val);
|
||||
SB_CRRDDA_NP, reg, &val);
|
||||
return val;
|
||||
}
|
||||
|
||||
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
|
||||
{
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
|
||||
PUNIT_OPCODE_REG_WRITE, reg, &val);
|
||||
SB_CRWRDA_NP, reg, &val);
|
||||
}
|
||||
|
||||
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
|
||||
{
|
||||
u32 val = 0;
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
|
||||
PUNIT_OPCODE_REG_READ, reg, &val);
|
||||
SB_CRRDDA_NP, reg, &val);
|
||||
return val;
|
||||
}
|
||||
|
||||
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
|
||||
{
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
|
||||
PUNIT_OPCODE_REG_WRITE, reg, &val);
|
||||
SB_CRWRDA_NP, reg, &val);
|
||||
}
|
||||
|
||||
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
|
||||
{
|
||||
u32 val = 0;
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
|
||||
PUNIT_OPCODE_REG_READ, reg, &val);
|
||||
SB_CRRDDA_NP, reg, &val);
|
||||
return val;
|
||||
}
|
||||
|
||||
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
|
||||
{
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
|
||||
PUNIT_OPCODE_REG_WRITE, reg, &val);
|
||||
SB_CRWRDA_NP, reg, &val);
|
||||
}
|
||||
|
||||
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
|
||||
{
|
||||
u32 val = 0;
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
|
||||
PUNIT_OPCODE_REG_READ, reg, &val);
|
||||
SB_CRRDDA_NP, reg, &val);
|
||||
return val;
|
||||
}
|
||||
|
||||
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
|
||||
{
|
||||
vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
|
||||
PUNIT_OPCODE_REG_WRITE, reg, &val);
|
||||
SB_CRWRDA_NP, reg, &val);
|
||||
}
|
||||
|
||||
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
|
||||
|
@ -181,7 +190,7 @@ u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
|
|||
u32 val = 0;
|
||||
|
||||
vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
|
||||
DPIO_OPCODE_REG_READ, reg, &val);
|
||||
SB_MRD_NP, reg, &val);
|
||||
|
||||
/*
|
||||
* FIXME: There might be some registers where all 1's is a valid value,
|
||||
|
@ -196,7 +205,7 @@ u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
|
|||
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
|
||||
{
|
||||
vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
|
||||
DPIO_OPCODE_REG_WRITE, reg, &val);
|
||||
SB_MWR_NP, reg, &val);
|
||||
}
|
||||
|
||||
/* SBI access */
|
||||
|
@ -261,13 +270,13 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
|
|||
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
|
||||
{
|
||||
u32 val = 0;
|
||||
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
|
||||
DPIO_OPCODE_REG_READ, reg, &val);
|
||||
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
|
||||
reg, &val);
|
||||
return val;
|
||||
}
|
||||
|
||||
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
|
||||
{
|
||||
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI,
|
||||
DPIO_OPCODE_REG_WRITE, reg, &val);
|
||||
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
|
||||
reg, &val);
|
||||
}
|
||||
|
|
|
@ -696,10 +696,7 @@ intel_post_enable_primary(struct drm_crtc *crtc)
|
|||
* when going from primary only to sprite only and vice
|
||||
* versa.
|
||||
*/
|
||||
if (intel_crtc->config.ips_enabled) {
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
hsw_enable_ips(intel_crtc);
|
||||
}
|
||||
hsw_enable_ips(intel_crtc);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_update_fbc(dev);
|
||||
|
@ -1021,6 +1018,9 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
|
||||
intel_crtc->primary_enabled = primary_enabled;
|
||||
|
||||
if (primary_was_enabled != primary_enabled)
|
||||
intel_crtc_wait_for_pending_flips(crtc);
|
||||
|
||||
if (primary_was_enabled && !primary_enabled)
|
||||
intel_pre_disable_primary(crtc);
|
||||
|
||||
|
|
|
@ -921,7 +921,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
|
|||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_reset_stats *args = data;
|
||||
struct i915_ctx_hang_stats *hs;
|
||||
struct i915_hw_context *ctx;
|
||||
struct intel_context *ctx;
|
||||
int ret;
|
||||
|
||||
if (args->flags || args->pad)
|
||||
|
@ -976,7 +976,6 @@ static int i965_do_reset(struct drm_device *dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* We can't reset render&media without also resetting display ... */
|
||||
pci_write_config_byte(dev->pdev, I965_GDRST,
|
||||
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
|
||||
|
||||
|
@ -989,26 +988,58 @@ static int i965_do_reset(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ironlake_do_reset(struct drm_device *dev)
|
||||
static int g4x_do_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 gdrst;
|
||||
int ret;
|
||||
|
||||
gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
|
||||
gdrst &= ~GRDOM_MASK;
|
||||
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
|
||||
gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
|
||||
ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
|
||||
pci_write_config_byte(dev->pdev, I965_GDRST,
|
||||
GRDOM_RENDER | GRDOM_RESET_ENABLE);
|
||||
ret = wait_for(i965_reset_complete(dev), 500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* We can't reset render&media without also resetting display ... */
|
||||
gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
|
||||
gdrst &= ~GRDOM_MASK;
|
||||
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
|
||||
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
|
||||
POSTING_READ(VDECCLK_GATE_D);
|
||||
|
||||
pci_write_config_byte(dev->pdev, I965_GDRST,
|
||||
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
|
||||
ret = wait_for(i965_reset_complete(dev), 500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
|
||||
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
|
||||
POSTING_READ(VDECCLK_GATE_D);
|
||||
|
||||
pci_write_config_byte(dev->pdev, I965_GDRST, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ironlake_do_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
|
||||
gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
|
||||
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
|
||||
ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
|
||||
ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
|
||||
ILK_GRDOM_RESET_ENABLE) == 0, 500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
|
||||
ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
|
||||
ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
|
||||
ILK_GRDOM_RESET_ENABLE) == 0, 500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen6_do_reset(struct drm_device *dev)
|
||||
|
@ -1039,7 +1070,11 @@ int intel_gpu_reset(struct drm_device *dev)
|
|||
case 7:
|
||||
case 6: return gen6_do_reset(dev);
|
||||
case 5: return ironlake_do_reset(dev);
|
||||
case 4: return i965_do_reset(dev);
|
||||
case 4:
|
||||
if (IS_G4X(dev))
|
||||
return g4x_do_reset(dev);
|
||||
else
|
||||
return i965_do_reset(dev);
|
||||
default: return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1024,14 +1024,17 @@ struct drm_pending_vblank_event {
|
|||
};
|
||||
|
||||
struct drm_vblank_crtc {
|
||||
struct drm_device *dev; /* pointer to the drm_device */
|
||||
wait_queue_head_t queue; /**< VBLANK wait queue */
|
||||
struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */
|
||||
struct timer_list disable_timer; /* delayed disable timer */
|
||||
atomic_t count; /**< number of VBLANK interrupts */
|
||||
atomic_t refcount; /* number of users of vblank interruptsper crtc */
|
||||
u32 last; /* protected by dev->vbl_lock, used */
|
||||
/* for wraparound handling */
|
||||
u32 last_wait; /* Last vblank seqno waited per CRTC */
|
||||
unsigned int inmodeset; /* Display driver is setting mode */
|
||||
int crtc; /* crtc index */
|
||||
bool enabled; /* so we don't call enable more than
|
||||
once per disable */
|
||||
};
|
||||
|
@ -1119,7 +1122,6 @@ struct drm_device {
|
|||
|
||||
spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
|
||||
spinlock_t vbl_lock;
|
||||
struct timer_list vblank_disable_timer;
|
||||
|
||||
u32 max_vblank_count; /**< size of vblank counter register */
|
||||
|
||||
|
@ -1357,8 +1359,14 @@ extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
|
|||
extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
|
||||
extern int drm_vblank_get(struct drm_device *dev, int crtc);
|
||||
extern void drm_vblank_put(struct drm_device *dev, int crtc);
|
||||
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
|
||||
extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
|
||||
extern void drm_vblank_off(struct drm_device *dev, int crtc);
|
||||
extern void drm_vblank_on(struct drm_device *dev, int crtc);
|
||||
extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
|
||||
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
|
||||
extern void drm_vblank_cleanup(struct drm_device *dev);
|
||||
|
||||
extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
|
||||
struct timeval *tvblank, unsigned flags);
|
||||
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
|
||||
|
|
|
@ -245,4 +245,10 @@
|
|||
INTEL_BDW_GT12D_IDS(info), \
|
||||
INTEL_BDW_GT3D_IDS(info)
|
||||
|
||||
#define INTEL_CHV_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x22b0, info), \
|
||||
INTEL_VGA_DEVICE(0x22b1, info), \
|
||||
INTEL_VGA_DEVICE(0x22b2, info), \
|
||||
INTEL_VGA_DEVICE(0x22b3, info)
|
||||
|
||||
#endif /* _I915_PCIIDS_H */
|
||||
|
|
|
@ -223,6 +223,7 @@ typedef struct _drm_i915_sarea {
|
|||
#define DRM_I915_GEM_GET_CACHING 0x30
|
||||
#define DRM_I915_REG_READ 0x31
|
||||
#define DRM_I915_GET_RESET_STATS 0x32
|
||||
#define DRM_I915_GEM_USERPTR 0x33
|
||||
|
||||
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
||||
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
|
||||
|
@ -273,6 +274,7 @@ typedef struct _drm_i915_sarea {
|
|||
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
|
||||
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
|
||||
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
|
||||
#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
|
||||
|
||||
/* Allow drivers to submit batchbuffers directly to hardware, relying
|
||||
* on the security mechanisms provided by hardware.
|
||||
|
@ -1050,4 +1052,18 @@ struct drm_i915_reset_stats {
|
|||
__u32 pad;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_userptr {
|
||||
__u64 user_ptr;
|
||||
__u64 user_size;
|
||||
__u32 flags;
|
||||
#define I915_USERPTR_READ_ONLY 0x1
|
||||
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
|
||||
/**
|
||||
* Returned handle for the object.
|
||||
*
|
||||
* Object handles are nonzero.
|
||||
*/
|
||||
__u32 handle;
|
||||
};
|
||||
|
||||
#endif /* _UAPI_I915_DRM_H_ */
|
||||
|
|
Loading…
Reference in a new issue