Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "Fixes all over the place. The rockchip and imx fixes I missed while on holidays, so I've queued them now which makes this a bit bigger. The rest is misc amdgpu, radeon, i915 and armada. I think the most important thing is the ioctl fix, we dropped the avoid compat ball, so we get to add a compat wrapper. There is also an i915 revert to avoid a regression with existing userspace" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (43 commits) drm/ttm: improve uncached page deallocation. drm/ttm: fix uncached page deallocation to properly fill page pool v3. drm/amdgpu/dce8: Re-set VBLANK interrupt state when enabling a CRTC drm/radeon/ci: silence a harmless PCC warning drm/amdgpu/cz: silence some dpm debug output drm/amdgpu/cz: store the forced dpm level drm/amdgpu/cz: unforce dpm levels before forcing to low/high drm/amdgpu: remove bogus check in gfx8 rb setup drm/amdgpu: set proper index/data pair for smc regs on CZ (v2) drm/amdgpu: disable the IP module if early_init returns -ENOENT (v2) drm/amdgpu: stop context leak in the error path drm/amdgpu: validate the context id in the dependencies drm/radeon: fix user ptr race condition drm/radeon: Don't flush the GART TLB if rdev->gart.ptr == NULL drm/radeon: add a dpm quirk for Sapphire Radeon R9 270X 2GB GDDR5 drm/armada: avoid saving the adjusted mode to crtc->mode drm/armada: fix overlay when partially off-screen drm/armada: convert overlay to use drm_plane_helper_check_update() drm/armada: fix gem object free after failed prime import drm/armada: fix incorrect overlay plane cleanup ...
This commit is contained in:
commit
e05bf4f366
39 changed files with 416 additions and 201 deletions
|
@ -3383,7 +3383,7 @@ void intel_crt_init(struct drm_device *dev)
|
|||
<td valign="top" >TBD</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td rowspan="2" valign="top" >omap</td>
|
||||
<td valign="top" >omap</td>
|
||||
<td valign="top" >Generic</td>
|
||||
<td valign="top" >“zorder”</td>
|
||||
<td valign="top" >RANGE</td>
|
||||
|
|
|
@ -65,8 +65,10 @@ Optional properties:
|
|||
- edid: verbatim EDID data block describing attached display.
|
||||
- ddc: phandle describing the i2c bus handling the display data
|
||||
channel
|
||||
- port: A port node with endpoint definitions as defined in
|
||||
- port@[0-1]: Port nodes with endpoint definitions as defined in
|
||||
Documentation/devicetree/bindings/media/video-interfaces.txt.
|
||||
Port 0 is the input port connected to the IPU display interface,
|
||||
port 1 is the output port connected to a panel.
|
||||
|
||||
example:
|
||||
|
||||
|
@ -75,9 +77,29 @@ display@di0 {
|
|||
edid = [edid-data];
|
||||
interface-pix-fmt = "rgb24";
|
||||
|
||||
port {
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
|
||||
display_in: endpoint {
|
||||
remote-endpoint = <&ipu_di0_disp0>;
|
||||
};
|
||||
};
|
||||
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
|
||||
display_out: endpoint {
|
||||
remote-endpoint = <&panel_in>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
panel {
|
||||
...
|
||||
|
||||
port {
|
||||
panel_in: endpoint {
|
||||
remote-endpoint = <&display_out>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -669,6 +669,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
|||
static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
|
||||
struct amdgpu_cs_parser *p)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct amdgpu_ib *ib;
|
||||
int i, j, r;
|
||||
|
||||
|
@ -694,6 +695,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
|
|||
for (j = 0; j < num_deps; ++j) {
|
||||
struct amdgpu_fence *fence;
|
||||
struct amdgpu_ring *ring;
|
||||
struct amdgpu_ctx *ctx;
|
||||
|
||||
r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
|
||||
deps[j].ip_instance,
|
||||
|
@ -701,14 +703,21 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
|
||||
if (ctx == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
r = amdgpu_fence_recreate(ring, p->filp,
|
||||
deps[j].handle,
|
||||
&fence);
|
||||
if (r)
|
||||
if (r) {
|
||||
amdgpu_ctx_put(ctx);
|
||||
return r;
|
||||
}
|
||||
|
||||
amdgpu_sync_fence(&ib->sync, fence);
|
||||
amdgpu_fence_unref(&fence);
|
||||
amdgpu_ctx_put(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -808,12 +817,16 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
|
||||
wait->in.ring, &ring);
|
||||
if (r)
|
||||
if (r) {
|
||||
amdgpu_ctx_put(ctx);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
|
||||
if (r)
|
||||
if (r) {
|
||||
amdgpu_ctx_put(ctx);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = fence_wait_timeout(&fence->base, true, timeout);
|
||||
amdgpu_fence_unref(&fence);
|
||||
|
|
|
@ -1207,10 +1207,15 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
|
|||
} else {
|
||||
if (adev->ip_blocks[i].funcs->early_init) {
|
||||
r = adev->ip_blocks[i].funcs->early_init((void *)adev);
|
||||
if (r)
|
||||
if (r == -ENOENT)
|
||||
adev->ip_block_enabled[i] = false;
|
||||
else if (r)
|
||||
return r;
|
||||
else
|
||||
adev->ip_block_enabled[i] = true;
|
||||
} else {
|
||||
adev->ip_block_enabled[i] = true;
|
||||
}
|
||||
adev->ip_block_enabled[i] = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1679,25 +1679,31 @@ static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
DRM_INFO("DPM unforce state min=%d, max=%d.\n",
|
||||
pi->sclk_dpm.soft_min_clk,
|
||||
pi->sclk_dpm.soft_max_clk);
|
||||
DRM_DEBUG("DPM unforce state min=%d, max=%d.\n",
|
||||
pi->sclk_dpm.soft_min_clk,
|
||||
pi->sclk_dpm.soft_max_clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
|
||||
enum amdgpu_dpm_forced_level level)
|
||||
enum amdgpu_dpm_forced_level level)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch (level) {
|
||||
case AMDGPU_DPM_FORCED_LEVEL_HIGH:
|
||||
ret = cz_dpm_unforce_dpm_levels(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = cz_dpm_force_highest(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case AMDGPU_DPM_FORCED_LEVEL_LOW:
|
||||
ret = cz_dpm_unforce_dpm_levels(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = cz_dpm_force_lowest(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1711,6 +1717,8 @@ static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
|
|||
break;
|
||||
}
|
||||
|
||||
adev->pm.dpm.forced_level = level;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -2566,6 +2566,7 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
unsigned type;
|
||||
|
||||
switch (mode) {
|
||||
case DRM_MODE_DPMS_ON:
|
||||
|
@ -2574,6 +2575,9 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
dce_v8_0_vga_enable(crtc, true);
|
||||
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
|
||||
dce_v8_0_vga_enable(crtc, false);
|
||||
/* Make sure VBLANK interrupt is still enabled */
|
||||
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
|
||||
amdgpu_irq_update(adev, &adev->crtc_irq, type);
|
||||
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
|
||||
dce_v8_0_crtc_load_lut(crtc);
|
||||
break;
|
||||
|
|
|
@ -1813,10 +1813,7 @@ static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev,
|
|||
u32 data, mask;
|
||||
|
||||
data = RREG32(mmCC_RB_BACKEND_DISABLE);
|
||||
if (data & 1)
|
||||
data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
|
||||
else
|
||||
data = 0;
|
||||
data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
|
||||
|
||||
data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
|
||||
|
||||
|
|
|
@ -122,6 +122,32 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
|||
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
|
||||
}
|
||||
|
||||
/* smu_8_0_d.h */
|
||||
#define mmMP0PUB_IND_INDEX 0x180
|
||||
#define mmMP0PUB_IND_DATA 0x181
|
||||
|
||||
static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 r;
|
||||
|
||||
spin_lock_irqsave(&adev->smc_idx_lock, flags);
|
||||
WREG32(mmMP0PUB_IND_INDEX, (reg));
|
||||
r = RREG32(mmMP0PUB_IND_DATA);
|
||||
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&adev->smc_idx_lock, flags);
|
||||
WREG32(mmMP0PUB_IND_INDEX, (reg));
|
||||
WREG32(mmMP0PUB_IND_DATA, (v));
|
||||
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
|
||||
}
|
||||
|
||||
static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -1222,8 +1248,13 @@ static int vi_common_early_init(void *handle)
|
|||
bool smc_enabled = false;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->smc_rreg = &vi_smc_rreg;
|
||||
adev->smc_wreg = &vi_smc_wreg;
|
||||
if (adev->flags & AMDGPU_IS_APU) {
|
||||
adev->smc_rreg = &cz_smc_rreg;
|
||||
adev->smc_wreg = &cz_smc_wreg;
|
||||
} else {
|
||||
adev->smc_rreg = &vi_smc_rreg;
|
||||
adev->smc_wreg = &vi_smc_wreg;
|
||||
}
|
||||
adev->pcie_rreg = &vi_pcie_rreg;
|
||||
adev->pcie_wreg = &vi_pcie_wreg;
|
||||
adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
|
||||
|
|
|
@ -531,8 +531,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
|
|||
|
||||
drm_crtc_vblank_off(crtc);
|
||||
|
||||
crtc->mode = *adj;
|
||||
|
||||
val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
|
||||
if (val != dcrtc->dumb_ctrl) {
|
||||
dcrtc->dumb_ctrl = val;
|
||||
|
|
|
@ -69,8 +69,9 @@ void armada_gem_free_object(struct drm_gem_object *obj)
|
|||
|
||||
if (dobj->obj.import_attach) {
|
||||
/* We only ever display imported data */
|
||||
dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
|
||||
DMA_TO_DEVICE);
|
||||
if (dobj->sgt)
|
||||
dma_buf_unmap_attachment(dobj->obj.import_attach,
|
||||
dobj->sgt, DMA_TO_DEVICE);
|
||||
drm_prime_gem_destroy(&dobj->obj, NULL);
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
#include "armada_crtc.h"
|
||||
#include "armada_drm.h"
|
||||
#include "armada_fb.h"
|
||||
|
@ -85,16 +86,8 @@ static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
|
|||
|
||||
if (fb)
|
||||
armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
|
||||
}
|
||||
|
||||
static unsigned armada_limit(int start, unsigned size, unsigned max)
|
||||
{
|
||||
int end = start + size;
|
||||
if (end < 0)
|
||||
return 0;
|
||||
if (start < 0)
|
||||
start = 0;
|
||||
return (unsigned)end > max ? max - start : end - start;
|
||||
wake_up(&dplane->vbl.wait);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -105,26 +98,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
{
|
||||
struct armada_plane *dplane = drm_to_armada_plane(plane);
|
||||
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
|
||||
struct drm_rect src = {
|
||||
.x1 = src_x,
|
||||
.y1 = src_y,
|
||||
.x2 = src_x + src_w,
|
||||
.y2 = src_y + src_h,
|
||||
};
|
||||
struct drm_rect dest = {
|
||||
.x1 = crtc_x,
|
||||
.y1 = crtc_y,
|
||||
.x2 = crtc_x + crtc_w,
|
||||
.y2 = crtc_y + crtc_h,
|
||||
};
|
||||
const struct drm_rect clip = {
|
||||
.x2 = crtc->mode.hdisplay,
|
||||
.y2 = crtc->mode.vdisplay,
|
||||
};
|
||||
uint32_t val, ctrl0;
|
||||
unsigned idx = 0;
|
||||
bool visible;
|
||||
int ret;
|
||||
|
||||
crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay);
|
||||
crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay);
|
||||
ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
|
||||
0, INT_MAX, true, false, &visible);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
|
||||
CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
|
||||
CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
|
||||
|
||||
/* Does the position/size result in nothing to display? */
|
||||
if (crtc_w == 0 || crtc_h == 0) {
|
||||
if (!visible)
|
||||
ctrl0 &= ~CFG_DMA_ENA;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: if the starting point is off screen, we need to
|
||||
* adjust src_x, src_y, src_w, src_h appropriately, and
|
||||
* according to the scale.
|
||||
*/
|
||||
|
||||
if (!dcrtc->plane) {
|
||||
dcrtc->plane = plane;
|
||||
|
@ -134,15 +140,19 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
/* FIXME: overlay on an interlaced display */
|
||||
/* Just updating the position/size? */
|
||||
if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
|
||||
val = (src_h & 0xffff0000) | src_w >> 16;
|
||||
val = (drm_rect_height(&src) & 0xffff0000) |
|
||||
drm_rect_width(&src) >> 16;
|
||||
dplane->src_hw = val;
|
||||
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
|
||||
val = crtc_h << 16 | crtc_w;
|
||||
|
||||
val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
|
||||
dplane->dst_hw = val;
|
||||
writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
|
||||
val = crtc_y << 16 | crtc_x;
|
||||
|
||||
val = dest.y1 << 16 | dest.x1;
|
||||
dplane->dst_yx = val;
|
||||
writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
|
||||
|
||||
return 0;
|
||||
} else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
|
||||
/* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
|
||||
|
@ -150,15 +160,14 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
dcrtc->base + LCD_SPU_SRAM_PARA1);
|
||||
}
|
||||
|
||||
ret = wait_event_timeout(dplane->vbl.wait,
|
||||
list_empty(&dplane->vbl.update.node),
|
||||
HZ/25);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
wait_event_timeout(dplane->vbl.wait,
|
||||
list_empty(&dplane->vbl.update.node),
|
||||
HZ/25);
|
||||
|
||||
if (plane->fb != fb) {
|
||||
struct armada_gem_object *obj = drm_fb_obj(fb);
|
||||
uint32_t sy, su, sv;
|
||||
uint32_t addr[3], pixel_format;
|
||||
int i, num_planes, hsub;
|
||||
|
||||
/*
|
||||
* Take a reference on the new framebuffer - we want to
|
||||
|
@ -178,26 +187,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
older_fb);
|
||||
}
|
||||
|
||||
src_y >>= 16;
|
||||
src_x >>= 16;
|
||||
sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
|
||||
src_x * fb->bits_per_pixel / 8;
|
||||
su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
|
||||
src_x;
|
||||
sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
|
||||
src_x;
|
||||
src_y = src.y1 >> 16;
|
||||
src_x = src.x1 >> 16;
|
||||
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, sy,
|
||||
pixel_format = fb->pixel_format;
|
||||
hsub = drm_format_horz_chroma_subsampling(pixel_format);
|
||||
num_planes = drm_format_num_planes(pixel_format);
|
||||
|
||||
/*
|
||||
* Annoyingly, shifting a YUYV-format image by one pixel
|
||||
* causes the U/V planes to toggle. Toggle the UV swap.
|
||||
* (Unfortunately, this causes momentary colour flickering.)
|
||||
*/
|
||||
if (src_x & (hsub - 1) && num_planes == 1)
|
||||
ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
|
||||
|
||||
for (i = 0; i < num_planes; i++)
|
||||
addr[i] = obj->dev_addr + fb->offsets[i] +
|
||||
src_y * fb->pitches[i] +
|
||||
src_x * drm_format_plane_cpp(pixel_format, i);
|
||||
for (; i < ARRAY_SIZE(addr); i++)
|
||||
addr[i] = 0;
|
||||
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
|
||||
LCD_SPU_DMA_START_ADDR_Y0);
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, su,
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
|
||||
LCD_SPU_DMA_START_ADDR_U0);
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, sv,
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
|
||||
LCD_SPU_DMA_START_ADDR_V0);
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, sy,
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
|
||||
LCD_SPU_DMA_START_ADDR_Y1);
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, su,
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
|
||||
LCD_SPU_DMA_START_ADDR_U1);
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, sv,
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
|
||||
LCD_SPU_DMA_START_ADDR_V1);
|
||||
|
||||
val = fb->pitches[0] << 16 | fb->pitches[0];
|
||||
|
@ -208,24 +230,27 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
LCD_SPU_DMA_PITCH_UV);
|
||||
}
|
||||
|
||||
val = (src_h & 0xffff0000) | src_w >> 16;
|
||||
val = (drm_rect_height(&src) & 0xffff0000) | drm_rect_width(&src) >> 16;
|
||||
if (dplane->src_hw != val) {
|
||||
dplane->src_hw = val;
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, val,
|
||||
LCD_SPU_DMA_HPXL_VLN);
|
||||
}
|
||||
val = crtc_h << 16 | crtc_w;
|
||||
|
||||
val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
|
||||
if (dplane->dst_hw != val) {
|
||||
dplane->dst_hw = val;
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, val,
|
||||
LCD_SPU_DZM_HPXL_VLN);
|
||||
}
|
||||
val = crtc_y << 16 | crtc_x;
|
||||
|
||||
val = dest.y1 << 16 | dest.x1;
|
||||
if (dplane->dst_yx != val) {
|
||||
dplane->dst_yx = val;
|
||||
armada_reg_queue_set(dplane->vbl.regs, idx, val,
|
||||
LCD_SPU_DMA_OVSA_HPXL_VLN);
|
||||
}
|
||||
|
||||
if (dplane->ctrl0 != ctrl0) {
|
||||
dplane->ctrl0 = ctrl0;
|
||||
armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
|
||||
|
@ -279,7 +304,11 @@ static int armada_plane_disable(struct drm_plane *plane)
|
|||
|
||||
static void armada_plane_destroy(struct drm_plane *plane)
|
||||
{
|
||||
kfree(plane);
|
||||
struct armada_plane *dplane = drm_to_armada_plane(plane);
|
||||
|
||||
drm_plane_cleanup(plane);
|
||||
|
||||
kfree(dplane);
|
||||
}
|
||||
|
||||
static int armada_plane_set_property(struct drm_plane *plane,
|
||||
|
|
|
@ -2706,8 +2706,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
|||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return -EINVAL;
|
||||
|
||||
/* For some reason crtc x/y offsets are signed internally. */
|
||||
if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
|
||||
/*
|
||||
* Universal plane src offsets are only 16.16, prevent havoc for
|
||||
* drivers using universal plane code internally.
|
||||
*/
|
||||
if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
|
||||
return -ERANGE;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
|
|
|
@ -70,6 +70,8 @@
|
|||
|
||||
#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
|
||||
|
||||
#define DRM_IOCTL_MODE_ADDFB232 DRM_IOWR(0xb8, drm_mode_fb_cmd232_t)
|
||||
|
||||
typedef struct drm_version_32 {
|
||||
int version_major; /**< Major version */
|
||||
int version_minor; /**< Minor version */
|
||||
|
@ -1016,6 +1018,63 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
|
|||
return 0;
|
||||
}
|
||||
|
||||
typedef struct drm_mode_fb_cmd232 {
|
||||
u32 fb_id;
|
||||
u32 width;
|
||||
u32 height;
|
||||
u32 pixel_format;
|
||||
u32 flags;
|
||||
u32 handles[4];
|
||||
u32 pitches[4];
|
||||
u32 offsets[4];
|
||||
u64 modifier[4];
|
||||
} __attribute__((packed)) drm_mode_fb_cmd232_t;
|
||||
|
||||
static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg;
|
||||
struct drm_mode_fb_cmd232 req32;
|
||||
struct drm_mode_fb_cmd2 __user *req64;
|
||||
int i;
|
||||
int err;
|
||||
|
||||
if (copy_from_user(&req32, argp, sizeof(req32)))
|
||||
return -EFAULT;
|
||||
|
||||
req64 = compat_alloc_user_space(sizeof(*req64));
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, req64, sizeof(*req64))
|
||||
|| __put_user(req32.width, &req64->width)
|
||||
|| __put_user(req32.height, &req64->height)
|
||||
|| __put_user(req32.pixel_format, &req64->pixel_format)
|
||||
|| __put_user(req32.flags, &req64->flags))
|
||||
return -EFAULT;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (__put_user(req32.handles[i], &req64->handles[i]))
|
||||
return -EFAULT;
|
||||
if (__put_user(req32.pitches[i], &req64->pitches[i]))
|
||||
return -EFAULT;
|
||||
if (__put_user(req32.offsets[i], &req64->offsets[i]))
|
||||
return -EFAULT;
|
||||
if (__put_user(req32.modifier[i], &req64->modifier[i]))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
err = drm_ioctl(file, DRM_IOCTL_MODE_ADDFB2, (unsigned long)req64);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (__get_user(req32.fb_id, &req64->fb_id))
|
||||
return -EFAULT;
|
||||
|
||||
if (copy_to_user(argp, &req32, sizeof(req32)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static drm_ioctl_compat_t *drm_compat_ioctls[] = {
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
|
||||
|
@ -1048,6 +1107,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
|
|||
[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
|
||||
#endif
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -826,6 +826,7 @@ struct intel_context {
|
|||
struct kref ref;
|
||||
int user_handle;
|
||||
uint8_t remap_slice;
|
||||
struct drm_i915_private *i915;
|
||||
struct drm_i915_file_private *file_priv;
|
||||
struct i915_ctx_hang_stats hang_stats;
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
|
@ -2036,8 +2037,6 @@ struct drm_i915_gem_object {
|
|||
unsigned int cache_level:3;
|
||||
unsigned int cache_dirty:1;
|
||||
|
||||
unsigned int has_dma_mapping:1;
|
||||
|
||||
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
|
||||
|
||||
unsigned int pin_display;
|
||||
|
@ -3116,7 +3115,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor);
|
|||
int i915_debugfs_connector_add(struct drm_connector *connector);
|
||||
void intel_display_crc_init(struct drm_device *dev);
|
||||
#else
|
||||
static inline int i915_debugfs_connector_add(struct drm_connector *connector) {}
|
||||
static inline int i915_debugfs_connector_add(struct drm_connector *connector)
|
||||
{ return 0; }
|
||||
static inline void intel_display_crc_init(struct drm_device *dev) {}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -213,7 +213,6 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
|
|||
sg_dma_len(sg) = obj->base.size;
|
||||
|
||||
obj->pages = st;
|
||||
obj->has_dma_mapping = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -265,8 +264,6 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
|
|||
|
||||
sg_free_table(obj->pages);
|
||||
kfree(obj->pages);
|
||||
|
||||
obj->has_dma_mapping = false;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -2139,6 +2136,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
|
|||
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
}
|
||||
|
||||
i915_gem_gtt_finish_object(obj);
|
||||
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj))
|
||||
i915_gem_object_save_bit_17_swizzle(obj);
|
||||
|
||||
|
@ -2199,6 +2198,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
|||
struct sg_page_iter sg_iter;
|
||||
struct page *page;
|
||||
unsigned long last_pfn = 0; /* suppress gcc warning */
|
||||
int ret;
|
||||
gfp_t gfp;
|
||||
|
||||
/* Assert that the object is not currently in any GPU domain. As it
|
||||
|
@ -2246,8 +2246,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
|||
*/
|
||||
i915_gem_shrink_all(dev_priv);
|
||||
page = shmem_read_mapping_page(mapping, i);
|
||||
if (IS_ERR(page))
|
||||
if (IS_ERR(page)) {
|
||||
ret = PTR_ERR(page);
|
||||
goto err_pages;
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (swiotlb_nr_tbl()) {
|
||||
|
@ -2276,6 +2278,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
|||
sg_mark_end(sg);
|
||||
obj->pages = st;
|
||||
|
||||
ret = i915_gem_gtt_prepare_object(obj);
|
||||
if (ret)
|
||||
goto err_pages;
|
||||
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj))
|
||||
i915_gem_object_do_bit_17_swizzle(obj);
|
||||
|
||||
|
@ -2300,10 +2306,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
|||
* space and so want to translate the error from shmemfs back to our
|
||||
* usual understanding of ENOMEM.
|
||||
*/
|
||||
if (PTR_ERR(page) == -ENOSPC)
|
||||
return -ENOMEM;
|
||||
else
|
||||
return PTR_ERR(page);
|
||||
if (ret == -ENOSPC)
|
||||
ret = -ENOMEM;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Ensure that the associated pages are gathered from the backing storage
|
||||
|
@ -2542,6 +2548,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
|
|||
}
|
||||
|
||||
request->emitted_jiffies = jiffies;
|
||||
ring->last_submitted_seqno = request->seqno;
|
||||
list_add_tail(&request->list, &ring->request_list);
|
||||
request->file_priv = NULL;
|
||||
|
||||
|
@ -3247,10 +3254,8 @@ int i915_vma_unbind(struct i915_vma *vma)
|
|||
|
||||
/* Since the unbound list is global, only move to that list if
|
||||
* no more VMAs exist. */
|
||||
if (list_empty(&obj->vma_list)) {
|
||||
i915_gem_gtt_finish_object(obj);
|
||||
if (list_empty(&obj->vma_list))
|
||||
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
|
||||
}
|
||||
|
||||
/* And finally now the object is completely decoupled from this vma,
|
||||
* we can drop its hold on the backing storage and allow it to be
|
||||
|
@ -3768,22 +3773,16 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
|||
goto err_remove_node;
|
||||
}
|
||||
|
||||
ret = i915_gem_gtt_prepare_object(obj);
|
||||
if (ret)
|
||||
goto err_remove_node;
|
||||
|
||||
trace_i915_vma_bind(vma, flags);
|
||||
ret = i915_vma_bind(vma, obj->cache_level, flags);
|
||||
if (ret)
|
||||
goto err_finish_gtt;
|
||||
goto err_remove_node;
|
||||
|
||||
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
|
||||
list_add_tail(&vma->mm_list, &vm->inactive_list);
|
||||
|
||||
return vma;
|
||||
|
||||
err_finish_gtt:
|
||||
i915_gem_gtt_finish_object(obj);
|
||||
err_remove_node:
|
||||
drm_mm_remove_node(&vma->node);
|
||||
err_free_vma:
|
||||
|
|
|
@ -135,8 +135,7 @@ static int get_context_size(struct drm_device *dev)
|
|||
|
||||
void i915_gem_context_free(struct kref *ctx_ref)
|
||||
{
|
||||
struct intel_context *ctx = container_of(ctx_ref,
|
||||
typeof(*ctx), ref);
|
||||
struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
|
||||
|
||||
trace_i915_context_free(ctx);
|
||||
|
||||
|
@ -195,6 +194,7 @@ __create_hw_context(struct drm_device *dev,
|
|||
|
||||
kref_init(&ctx->ref);
|
||||
list_add_tail(&ctx->link, &dev_priv->context_list);
|
||||
ctx->i915 = dev_priv;
|
||||
|
||||
if (dev_priv->hw_context_size) {
|
||||
struct drm_i915_gem_object *obj =
|
||||
|
|
|
@ -256,7 +256,6 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
|
|||
return PTR_ERR(sg);
|
||||
|
||||
obj->pages = sg;
|
||||
obj->has_dma_mapping = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -264,7 +263,6 @@ static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
|
|||
{
|
||||
dma_buf_unmap_attachment(obj->base.import_attach,
|
||||
obj->pages, DMA_BIDIRECTIONAL);
|
||||
obj->has_dma_mapping = false;
|
||||
}
|
||||
|
||||
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
|
||||
|
|
|
@ -1723,9 +1723,6 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
|
|||
|
||||
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->has_dma_mapping)
|
||||
return 0;
|
||||
|
||||
if (!dma_map_sg(&obj->base.dev->pdev->dev,
|
||||
obj->pages->sgl, obj->pages->nents,
|
||||
PCI_DMA_BIDIRECTIONAL))
|
||||
|
@ -1972,10 +1969,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
|
|||
|
||||
interruptible = do_idling(dev_priv);
|
||||
|
||||
if (!obj->has_dma_mapping)
|
||||
dma_unmap_sg(&dev->pdev->dev,
|
||||
obj->pages->sgl, obj->pages->nents,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
undo_idling(dev_priv, interruptible);
|
||||
}
|
||||
|
|
|
@ -416,7 +416,6 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
|
|||
if (obj->pages == NULL)
|
||||
goto cleanup;
|
||||
|
||||
obj->has_dma_mapping = true;
|
||||
i915_gem_object_pin_pages(obj);
|
||||
obj->stolen = stolen;
|
||||
|
||||
|
|
|
@ -183,18 +183,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
|
|||
if (IS_GEN4(dev)) {
|
||||
uint32_t ddc2 = I915_READ(DCC2);
|
||||
|
||||
if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) {
|
||||
/* Since the swizzling may vary within an
|
||||
* object, we have no idea what the swizzling
|
||||
* is for any page in particular. Thus we
|
||||
* cannot migrate tiled pages using the GPU,
|
||||
* nor can we tell userspace what the exact
|
||||
* swizzling is for any object.
|
||||
*/
|
||||
if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
|
||||
dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
if (dcc == 0xffffffff) {
|
||||
|
|
|
@ -545,6 +545,26 @@ st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
|
||||
struct page **pvec, int num_pages)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = st_set_pages(&obj->pages, pvec, num_pages);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_gtt_prepare_object(obj);
|
||||
if (ret) {
|
||||
sg_free_table(obj->pages);
|
||||
kfree(obj->pages);
|
||||
obj->pages = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
||||
{
|
||||
|
@ -584,9 +604,12 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
|||
if (obj->userptr.work != &work->work) {
|
||||
ret = 0;
|
||||
} else if (pinned == num_pages) {
|
||||
ret = st_set_pages(&obj->pages, pvec, num_pages);
|
||||
ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
|
||||
if (ret == 0) {
|
||||
list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
|
||||
obj->get_page.sg = obj->pages->sgl;
|
||||
obj->get_page.last = 0;
|
||||
|
||||
pinned = 0;
|
||||
}
|
||||
}
|
||||
|
@ -693,7 +716,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
|
|||
}
|
||||
}
|
||||
} else {
|
||||
ret = st_set_pages(&obj->pages, pvec, num_pages);
|
||||
ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
|
||||
if (ret == 0) {
|
||||
obj->userptr.work = NULL;
|
||||
pinned = 0;
|
||||
|
@ -715,6 +738,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
|
|||
if (obj->madv != I915_MADV_WILLNEED)
|
||||
obj->dirty = 0;
|
||||
|
||||
i915_gem_gtt_finish_object(obj);
|
||||
|
||||
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
|
||||
struct page *page = sg_page_iter_page(&sg_iter);
|
||||
|
||||
|
|
|
@ -204,7 +204,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|||
drm_ioctl_compat_t *fn = NULL;
|
||||
int ret;
|
||||
|
||||
if (nr < DRM_COMMAND_BASE)
|
||||
if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)
|
||||
return drm_compat_ioctl(filp, cmd, arg);
|
||||
|
||||
if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
|
||||
|
|
|
@ -2706,18 +2706,11 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
|
|||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_request *
|
||||
ring_last_request(struct intel_engine_cs *ring)
|
||||
{
|
||||
return list_entry(ring->request_list.prev,
|
||||
struct drm_i915_gem_request, list);
|
||||
}
|
||||
|
||||
static bool
|
||||
ring_idle(struct intel_engine_cs *ring)
|
||||
ring_idle(struct intel_engine_cs *ring, u32 seqno)
|
||||
{
|
||||
return (list_empty(&ring->request_list) ||
|
||||
i915_gem_request_completed(ring_last_request(ring), false));
|
||||
i915_seqno_passed(seqno, ring->last_submitted_seqno));
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -2939,7 +2932,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
acthd = intel_ring_get_active_head(ring);
|
||||
|
||||
if (ring->hangcheck.seqno == seqno) {
|
||||
if (ring_idle(ring)) {
|
||||
if (ring_idle(ring, seqno)) {
|
||||
ring->hangcheck.action = HANGCHECK_IDLE;
|
||||
|
||||
if (waitqueue_active(&ring->irq_queue)) {
|
||||
|
|
|
@ -727,7 +727,7 @@ DECLARE_EVENT_CLASS(i915_context,
|
|||
TP_fast_assign(
|
||||
__entry->ctx = ctx;
|
||||
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
|
||||
__entry->dev = ctx->file_priv->dev_priv->dev->primary->index;
|
||||
__entry->dev = ctx->i915->dev->primary->index;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
|
||||
|
|
|
@ -6315,9 +6315,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
|
|||
struct drm_connector *connector;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* crtc should still be enabled when we disable it. */
|
||||
WARN_ON(!crtc->state->enable);
|
||||
|
||||
intel_crtc_disable_planes(crtc);
|
||||
dev_priv->display.crtc_disable(crtc);
|
||||
dev_priv->display.off(crtc);
|
||||
|
@ -12591,7 +12588,8 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
|
|||
continue;
|
||||
|
||||
if (!crtc_state->enable) {
|
||||
intel_crtc_disable(crtc);
|
||||
if (crtc->state->enable)
|
||||
intel_crtc_disable(crtc);
|
||||
} else if (crtc->state->enable) {
|
||||
intel_crtc_disable_planes(crtc);
|
||||
dev_priv->display.crtc_disable(crtc);
|
||||
|
@ -13276,7 +13274,7 @@ intel_check_primary_plane(struct drm_plane *plane,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (intel_crtc->active) {
|
||||
if (crtc_state ? crtc_state->base.active : intel_crtc->active) {
|
||||
struct intel_plane_state *old_state =
|
||||
to_intel_plane_state(plane->state);
|
||||
|
||||
|
|
|
@ -275,6 +275,13 @@ struct intel_engine_cs {
|
|||
* Do we have some not yet emitted requests outstanding?
|
||||
*/
|
||||
struct drm_i915_gem_request *outstanding_lazy_request;
|
||||
/**
|
||||
* Seqno of request most recently submitted to request_list.
|
||||
* Used exclusively by hang checker to avoid grabbing lock while
|
||||
* inspecting request list.
|
||||
*/
|
||||
u32 last_submitted_seqno;
|
||||
|
||||
bool gpu_caches_dirty;
|
||||
|
||||
wait_queue_head_t irq_queue;
|
||||
|
|
|
@ -301,7 +301,7 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
|
|||
|
||||
switch (tve->mode) {
|
||||
case TVE_MODE_VGA:
|
||||
imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_YUV8_1X24,
|
||||
imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24,
|
||||
tve->hsync_pin, tve->vsync_pin);
|
||||
break;
|
||||
case TVE_MODE_TVOUT:
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <drm/drm_panel.h>
|
||||
#include <linux/videodev2.h>
|
||||
#include <video/of_display_timing.h>
|
||||
#include <linux/of_graph.h>
|
||||
|
||||
#include "imx-drm.h"
|
||||
|
||||
|
@ -208,7 +209,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
|
|||
{
|
||||
struct drm_device *drm = data;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct device_node *panel_node;
|
||||
struct device_node *port;
|
||||
const u8 *edidp;
|
||||
struct imx_parallel_display *imxpd;
|
||||
int ret;
|
||||
|
@ -234,11 +235,19 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
|
|||
imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
|
||||
}
|
||||
|
||||
panel_node = of_parse_phandle(np, "fsl,panel", 0);
|
||||
if (panel_node) {
|
||||
imxpd->panel = of_drm_find_panel(panel_node);
|
||||
if (!imxpd->panel)
|
||||
return -EPROBE_DEFER;
|
||||
/* port@1 is the output port */
|
||||
port = of_graph_get_port_by_id(np, 1);
|
||||
if (port) {
|
||||
struct device_node *endpoint, *remote;
|
||||
|
||||
endpoint = of_get_child_by_name(port, "endpoint");
|
||||
if (endpoint) {
|
||||
remote = of_graph_get_remote_port_parent(endpoint);
|
||||
if (remote)
|
||||
imxpd->panel = of_drm_find_panel(remote);
|
||||
if (!imxpd->panel)
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
}
|
||||
|
||||
imxpd->dev = dev;
|
||||
|
|
|
@ -5818,7 +5818,7 @@ int ci_dpm_init(struct radeon_device *rdev)
|
|||
tmp |= DPM_ENABLED;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
|
||||
DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
|
||||
break;
|
||||
}
|
||||
WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
|
||||
|
|
|
@ -260,8 +260,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
|||
}
|
||||
}
|
||||
}
|
||||
mb();
|
||||
radeon_gart_tlb_flush(rdev);
|
||||
if (rdev->gart.ptr) {
|
||||
mb();
|
||||
radeon_gart_tlb_flush(rdev);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -306,8 +308,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
|||
page_base += RADEON_GPU_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
mb();
|
||||
radeon_gart_tlb_flush(rdev);
|
||||
if (rdev->gart.ptr) {
|
||||
mb();
|
||||
radeon_gart_tlb_flush(rdev);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
|
|||
if (robj) {
|
||||
if (robj->gem_base.import_attach)
|
||||
drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
|
||||
radeon_mn_unregister(robj);
|
||||
radeon_bo_unref(&robj);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -75,7 +75,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
|||
bo = container_of(tbo, struct radeon_bo, tbo);
|
||||
|
||||
radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
|
||||
radeon_mn_unregister(bo);
|
||||
|
||||
mutex_lock(&bo->rdev->gem.mutex);
|
||||
list_del_init(&bo->list);
|
||||
|
|
|
@ -2926,6 +2926,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
|
|||
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
|
||||
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
|
||||
{ 0, 0, 0, 0 },
|
||||
};
|
||||
|
||||
|
|
|
@ -555,7 +555,6 @@ static struct platform_driver rockchip_drm_platform_driver = {
|
|||
.probe = rockchip_drm_platform_probe,
|
||||
.remove = rockchip_drm_platform_remove,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "rockchip-drm",
|
||||
.of_match_table = rockchip_drm_dt_ids,
|
||||
.pm = &rockchip_drm_pm_ops,
|
||||
|
|
|
@ -162,7 +162,8 @@ static void rockchip_drm_output_poll_changed(struct drm_device *dev)
|
|||
struct rockchip_drm_private *private = dev->dev_private;
|
||||
struct drm_fb_helper *fb_helper = &private->fbdev_helper;
|
||||
|
||||
drm_fb_helper_hotplug_event(fb_helper);
|
||||
if (fb_helper)
|
||||
drm_fb_helper_hotplug_event(fb_helper);
|
||||
}
|
||||
|
||||
static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
|
||||
|
|
|
@ -54,55 +54,56 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
|
|||
&rk_obj->dma_attrs);
|
||||
}
|
||||
|
||||
static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
|
||||
struct vm_area_struct *vma)
|
||||
|
||||
{
|
||||
int ret;
|
||||
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
|
||||
struct drm_device *drm = obj->dev;
|
||||
|
||||
/*
|
||||
* dma_alloc_attrs() allocated a struct page table for rk_obj, so clear
|
||||
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
|
||||
*/
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
|
||||
ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
|
||||
obj->size, &rk_obj->dma_attrs);
|
||||
if (ret)
|
||||
drm_gem_vm_close(vma);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
|
||||
struct drm_device *drm = obj->dev;
|
||||
unsigned long vm_size;
|
||||
int ret;
|
||||
|
||||
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vm_size = vma->vm_end - vma->vm_start;
|
||||
mutex_lock(&drm->struct_mutex);
|
||||
ret = drm_gem_mmap_obj(obj, obj->size, vma);
|
||||
mutex_unlock(&drm->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (vm_size > obj->size)
|
||||
return -EINVAL;
|
||||
|
||||
return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
|
||||
obj->size, &rk_obj->dma_attrs);
|
||||
return rockchip_drm_gem_object_mmap(obj, vma);
|
||||
}
|
||||
|
||||
/* drm driver mmap file operations */
|
||||
int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_file *priv = filp->private_data;
|
||||
struct drm_device *dev = priv->minor->dev;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_vma_offset_node *node;
|
||||
int ret;
|
||||
|
||||
if (drm_device_is_unplugged(dev))
|
||||
return -ENODEV;
|
||||
ret = drm_gem_mmap(filp, vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
obj = vma->vm_private_data;
|
||||
|
||||
node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
|
||||
vma->vm_pgoff,
|
||||
vma_pages(vma));
|
||||
if (!node) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
DRM_ERROR("failed to find vma node.\n");
|
||||
return -EINVAL;
|
||||
} else if (!drm_vma_node_is_allowed(node, filp)) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
obj = container_of(node, struct drm_gem_object, vma_node);
|
||||
ret = rockchip_gem_mmap_buf(obj, vma);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return ret;
|
||||
return rockchip_drm_gem_object_mmap(obj, vma);
|
||||
}
|
||||
|
||||
struct rockchip_gem_object *
|
||||
|
|
|
@ -170,6 +170,7 @@ struct vop_win_phy {
|
|||
|
||||
struct vop_reg enable;
|
||||
struct vop_reg format;
|
||||
struct vop_reg rb_swap;
|
||||
struct vop_reg act_info;
|
||||
struct vop_reg dsp_info;
|
||||
struct vop_reg dsp_st;
|
||||
|
@ -199,8 +200,12 @@ struct vop_data {
|
|||
static const uint32_t formats_01[] = {
|
||||
DRM_FORMAT_XRGB8888,
|
||||
DRM_FORMAT_ARGB8888,
|
||||
DRM_FORMAT_XBGR8888,
|
||||
DRM_FORMAT_ABGR8888,
|
||||
DRM_FORMAT_RGB888,
|
||||
DRM_FORMAT_BGR888,
|
||||
DRM_FORMAT_RGB565,
|
||||
DRM_FORMAT_BGR565,
|
||||
DRM_FORMAT_NV12,
|
||||
DRM_FORMAT_NV16,
|
||||
DRM_FORMAT_NV24,
|
||||
|
@ -209,8 +214,12 @@ static const uint32_t formats_01[] = {
|
|||
static const uint32_t formats_234[] = {
|
||||
DRM_FORMAT_XRGB8888,
|
||||
DRM_FORMAT_ARGB8888,
|
||||
DRM_FORMAT_XBGR8888,
|
||||
DRM_FORMAT_ABGR8888,
|
||||
DRM_FORMAT_RGB888,
|
||||
DRM_FORMAT_BGR888,
|
||||
DRM_FORMAT_RGB565,
|
||||
DRM_FORMAT_BGR565,
|
||||
};
|
||||
|
||||
static const struct vop_win_phy win01_data = {
|
||||
|
@ -218,6 +227,7 @@ static const struct vop_win_phy win01_data = {
|
|||
.nformats = ARRAY_SIZE(formats_01),
|
||||
.enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
|
||||
.format = VOP_REG(WIN0_CTRL0, 0x7, 1),
|
||||
.rb_swap = VOP_REG(WIN0_CTRL0, 0x1, 12),
|
||||
.act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0),
|
||||
.dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0),
|
||||
.dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0),
|
||||
|
@ -234,6 +244,7 @@ static const struct vop_win_phy win23_data = {
|
|||
.nformats = ARRAY_SIZE(formats_234),
|
||||
.enable = VOP_REG(WIN2_CTRL0, 0x1, 0),
|
||||
.format = VOP_REG(WIN2_CTRL0, 0x7, 1),
|
||||
.rb_swap = VOP_REG(WIN2_CTRL0, 0x1, 12),
|
||||
.dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0),
|
||||
.dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0),
|
||||
.yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0),
|
||||
|
@ -242,15 +253,6 @@ static const struct vop_win_phy win23_data = {
|
|||
.dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0),
|
||||
};
|
||||
|
||||
static const struct vop_win_phy cursor_data = {
|
||||
.data_formats = formats_234,
|
||||
.nformats = ARRAY_SIZE(formats_234),
|
||||
.enable = VOP_REG(HWC_CTRL0, 0x1, 0),
|
||||
.format = VOP_REG(HWC_CTRL0, 0x7, 1),
|
||||
.dsp_st = VOP_REG(HWC_DSP_ST, 0x1fff1fff, 0),
|
||||
.yrgb_mst = VOP_REG(HWC_MST, 0xffffffff, 0),
|
||||
};
|
||||
|
||||
static const struct vop_ctrl ctrl_data = {
|
||||
.standby = VOP_REG(SYS_CTRL, 0x1, 22),
|
||||
.gate_en = VOP_REG(SYS_CTRL, 0x1, 23),
|
||||
|
@ -282,14 +284,14 @@ static const struct vop_reg_data vop_init_reg_table[] = {
|
|||
/*
|
||||
* Note: rk3288 has a dedicated 'cursor' window, however, that window requires
|
||||
* special support to get alpha blending working. For now, just use overlay
|
||||
* window 1 for the drm cursor.
|
||||
* window 3 for the drm cursor.
|
||||
*
|
||||
*/
|
||||
static const struct vop_win_data rk3288_vop_win_data[] = {
|
||||
{ .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY },
|
||||
{ .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_CURSOR },
|
||||
{ .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_OVERLAY },
|
||||
{ .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
|
||||
{ .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
|
||||
{ .base = 0x00, .phy = &cursor_data, .type = DRM_PLANE_TYPE_OVERLAY },
|
||||
{ .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_CURSOR },
|
||||
};
|
||||
|
||||
static const struct vop_data rk3288_vop = {
|
||||
|
@ -352,15 +354,32 @@ static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
|
|||
}
|
||||
}
|
||||
|
||||
static bool has_rb_swapped(uint32_t format)
|
||||
{
|
||||
switch (format) {
|
||||
case DRM_FORMAT_XBGR8888:
|
||||
case DRM_FORMAT_ABGR8888:
|
||||
case DRM_FORMAT_BGR888:
|
||||
case DRM_FORMAT_BGR565:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static enum vop_data_format vop_convert_format(uint32_t format)
|
||||
{
|
||||
switch (format) {
|
||||
case DRM_FORMAT_XRGB8888:
|
||||
case DRM_FORMAT_ARGB8888:
|
||||
case DRM_FORMAT_XBGR8888:
|
||||
case DRM_FORMAT_ABGR8888:
|
||||
return VOP_FMT_ARGB8888;
|
||||
case DRM_FORMAT_RGB888:
|
||||
case DRM_FORMAT_BGR888:
|
||||
return VOP_FMT_RGB888;
|
||||
case DRM_FORMAT_RGB565:
|
||||
case DRM_FORMAT_BGR565:
|
||||
return VOP_FMT_RGB565;
|
||||
case DRM_FORMAT_NV12:
|
||||
return VOP_FMT_YUV420SP;
|
||||
|
@ -378,6 +397,7 @@ static bool is_alpha_support(uint32_t format)
|
|||
{
|
||||
switch (format) {
|
||||
case DRM_FORMAT_ARGB8888:
|
||||
case DRM_FORMAT_ABGR8888:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
|
@ -588,6 +608,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
|
|||
enum vop_data_format format;
|
||||
uint32_t val;
|
||||
bool is_alpha;
|
||||
bool rb_swap;
|
||||
bool visible;
|
||||
int ret;
|
||||
struct drm_rect dest = {
|
||||
|
@ -621,6 +642,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
|
|||
return 0;
|
||||
|
||||
is_alpha = is_alpha_support(fb->pixel_format);
|
||||
rb_swap = has_rb_swapped(fb->pixel_format);
|
||||
format = vop_convert_format(fb->pixel_format);
|
||||
if (format < 0)
|
||||
return format;
|
||||
|
@ -689,6 +711,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
|
|||
val = (dsp_sty - 1) << 16;
|
||||
val |= (dsp_stx - 1) & 0xffff;
|
||||
VOP_WIN_SET(vop, win, dsp_st, val);
|
||||
VOP_WIN_SET(vop, win, rb_swap, rb_swap);
|
||||
|
||||
if (is_alpha) {
|
||||
VOP_WIN_SET(vop, win, dst_alpha_ctl,
|
||||
|
|
|
@ -963,14 +963,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
|||
} else {
|
||||
pool->npages_free += count;
|
||||
list_splice(&ttm_dma->pages_list, &pool->free_list);
|
||||
npages = count;
|
||||
if (pool->npages_free > _manager->options.max_size) {
|
||||
/*
|
||||
* Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
|
||||
* to free in order to minimize calls to set_memory_wb().
|
||||
*/
|
||||
if (pool->npages_free >= (_manager->options.max_size +
|
||||
NUM_PAGES_TO_ALLOC))
|
||||
npages = pool->npages_free - _manager->options.max_size;
|
||||
/* free at least NUM_PAGES_TO_ALLOC number of pages
|
||||
* to reduce calls to set_memory_wb */
|
||||
if (npages < NUM_PAGES_TO_ALLOC)
|
||||
npages = NUM_PAGES_TO_ALLOC;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
||||
|
||||
|
|
|
@ -1107,6 +1107,9 @@ static int ipu_irq_init(struct ipu_soc *ipu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < IPU_NUM_IRQS; i += 32)
|
||||
ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
|
||||
|
||||
for (i = 0; i < IPU_NUM_IRQS; i += 32) {
|
||||
gc = irq_get_domain_generic_chip(ipu->domain, i);
|
||||
gc->reg_base = ipu->cm_reg;
|
||||
|
|
Loading…
Add table
Reference in a new issue