Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"I Was Almost Tempted To Capitalise Every Word, but then I decided I
couldn't read it myself!
I've also got one pull request for the sti driver outstanding. It
relied on a commit in Greg's tree and I didn't find out in time, that
commit is in your tree now so I might send that along once this is
merged.
I also had the accidental misfortune to have access to a Skylake on my
desk for a few days, and I've had to encourage Intel to try harder,
which seems to be happening now.
Here is the main drm-next pull request for 4.4.
Highlights:
New driver:
vc4 driver for the Rasberry Pi VPU.
(From Eric Anholt at Broadcom.)
Core:
Atomic fbdev support
Atomic helpers for runtime pm
dp/aux i2c STATUS_UPDATE handling
struct_mutex usage cleanups.
Generic of probing support.
Documentation:
Kerneldoc for VGA switcheroo code.
Rename to gpu instead of drm to reflect scope.
i915:
Skylake GuC firmware fixes
HPD A support
VBT backlight fallbacks
Fastboot by default for some systems
FBC work
BXT/SKL workarounds
Skylake deeper sleep state fixes
amdgpu:
Enable GPU scheduler by default
New atombios opcodes
GPUVM debugging options
Stoney support.
Fencing cleanups.
radeon:
More efficient CS checking
nouveau:
gk20a instance memory handling improvements.
Improved PGOB detection and GK107 support
Kepler GDDR5 PLL statbility improvement
G8x/GT2xx reclock improvements
new userspace API compatiblity fixes.
virtio-gpu:
Add 3D support - qemu 2.5 has it merged for it's gtk backend.
msm:
Initial msm88896 (snapdragon 8200)
exynos:
HDMI cleanups
Enable mixer driver byt default
Add DECON-TV support
vmwgfx:
Move to using memremap + fixes.
rcar-du:
Add support for R8A7793/4 DU
armada:
Remove support for non-component mode
Improved plane handling
Power savings while in DPMS off.
tda998x:
Remove unused slave encoder support
Use more HDMI helpers
Fix EDID read handling
dwhdmi:
Interlace video mode support for ipu-v3/dw_hdmi
Hotplug state fixes
Audio driver integration
imx:
More color formats support.
tegra:
Minor fixes/improvements"
[ Merge fixup: remove unused variable 'dev' that had all uses removed in
commit 4e270f0880
: "drm/gem: Drop struct_mutex requirement from
drm_gem_mmap_obj" ]
* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (764 commits)
drm/vmwgfx: Relax irq locking somewhat
drm/vmwgfx: Properly flush cursor updates and page-flips
drm/i915/skl: disable display side power well support for now
drm/i915: Extend DSL readout fix to BDW and SKL.
drm/i915: Do graphics device reset under forcewake
drm/i915: Skip fence installation for objects with rotated views (v4)
vga_switcheroo: Drop client power state VGA_SWITCHEROO_INIT
drm/amdgpu: group together common fence implementation
drm/amdgpu: remove AMDGPU_FENCE_OWNER_MOVE
drm/amdgpu: remove now unused fence functions
drm/amdgpu: fix fence fallback check
drm/amdgpu: fix stoping the scheduler timeout
drm/amdgpu: cleanup on error in amdgpu_cs_ioctl()
drm/i915: Fix locking around GuC firmware load
drm/amdgpu: update Fiji's Golden setting
drm/amdgpu: update Fiji's rev id
drm/amdgpu: extract common code in vi_common_early_init
drm/amd/scheduler: don't oops on failure to load
drm/amdgpu: don't oops on failure to load (v2)
drm/amdgpu: don't VT switch on suspend
...
This commit is contained in:
commit
3e82806b97
468 changed files with 53369 additions and 9828 deletions
|
@ -14,7 +14,7 @@ DOCBOOKS := z8530book.xml device-drivers.xml \
|
|||
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
|
||||
80211.xml debugobjects.xml sh.xml regulator.xml \
|
||||
alsa-driver-api.xml writing-an-alsa-driver.xml \
|
||||
tracepoint.xml drm.xml media_api.xml w1.xml \
|
||||
tracepoint.xml gpu.xml media_api.xml w1.xml \
|
||||
writing_musb_glue_layer.xml crypto-API.xml iio.xml
|
||||
|
||||
include Documentation/DocBook/media/Makefile
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
|
||||
"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
|
||||
|
||||
<book id="drmDevelopersGuide">
|
||||
<book id="gpuDevelopersGuide">
|
||||
<bookinfo>
|
||||
<title>Linux DRM Developer's Guide</title>
|
||||
<title>Linux GPU Driver Developer's Guide</title>
|
||||
|
||||
<authorgroup>
|
||||
<author>
|
||||
|
@ -40,6 +40,16 @@
|
|||
</address>
|
||||
</affiliation>
|
||||
</author>
|
||||
<author>
|
||||
<firstname>Lukas</firstname>
|
||||
<surname>Wunner</surname>
|
||||
<contrib>vga_switcheroo documentation</contrib>
|
||||
<affiliation>
|
||||
<address>
|
||||
<email>lukas@wunner.de</email>
|
||||
</address>
|
||||
</affiliation>
|
||||
</author>
|
||||
</authorgroup>
|
||||
|
||||
<copyright>
|
||||
|
@ -51,6 +61,10 @@
|
|||
<year>2012</year>
|
||||
<holder>Laurent Pinchart</holder>
|
||||
</copyright>
|
||||
<copyright>
|
||||
<year>2015</year>
|
||||
<holder>Lukas Wunner</holder>
|
||||
</copyright>
|
||||
|
||||
<legalnotice>
|
||||
<para>
|
||||
|
@ -69,6 +83,13 @@
|
|||
<revremark>Added extensive documentation about driver internals.
|
||||
</revremark>
|
||||
</revision>
|
||||
<revision>
|
||||
<revnumber>1.1</revnumber>
|
||||
<date>2015-10-11</date>
|
||||
<authorinitials>LW</authorinitials>
|
||||
<revremark>Added vga_switcheroo documentation.
|
||||
</revremark>
|
||||
</revision>
|
||||
</revhistory>
|
||||
</bookinfo>
|
||||
|
||||
|
@ -78,9 +99,9 @@
|
|||
<title>DRM Core</title>
|
||||
<partintro>
|
||||
<para>
|
||||
This first part of the DRM Developer's Guide documents core DRM code,
|
||||
helper libraries for writing drivers and generic userspace interfaces
|
||||
exposed by DRM drivers.
|
||||
This first part of the GPU Driver Developer's Guide documents core DRM
|
||||
code, helper libraries for writing drivers and generic userspace
|
||||
interfaces exposed by DRM drivers.
|
||||
</para>
|
||||
</partintro>
|
||||
|
||||
|
@ -138,14 +159,10 @@
|
|||
<para>
|
||||
At the core of every DRM driver is a <structname>drm_driver</structname>
|
||||
structure. Drivers typically statically initialize a drm_driver structure,
|
||||
and then pass it to one of the <function>drm_*_init()</function> functions
|
||||
to register it with the DRM subsystem.
|
||||
</para>
|
||||
<para>
|
||||
Newer drivers that no longer require a <structname>drm_bus</structname>
|
||||
structure can alternatively use the low-level device initialization and
|
||||
registration functions such as <function>drm_dev_alloc()</function> and
|
||||
<function>drm_dev_register()</function> directly.
|
||||
and then pass it to <function>drm_dev_alloc()</function> to allocate a
|
||||
device instance. After the device instance is fully initialized it can be
|
||||
registered (which makes it accessible from userspace) using
|
||||
<function>drm_dev_register()</function>.
|
||||
</para>
|
||||
<para>
|
||||
The <structname>drm_driver</structname> structure contains static
|
||||
|
@ -296,83 +313,12 @@ char *date;</synopsis>
|
|||
</sect3>
|
||||
</sect2>
|
||||
<sect2>
|
||||
<title>Device Registration</title>
|
||||
<para>
|
||||
A number of functions are provided to help with device registration.
|
||||
The functions deal with PCI and platform devices, respectively.
|
||||
</para>
|
||||
!Edrivers/gpu/drm/drm_pci.c
|
||||
!Edrivers/gpu/drm/drm_platform.c
|
||||
<para>
|
||||
New drivers that no longer rely on the services provided by the
|
||||
<structname>drm_bus</structname> structure can call the low-level
|
||||
device registration functions directly. The
|
||||
<function>drm_dev_alloc()</function> function can be used to allocate
|
||||
and initialize a new <structname>drm_device</structname> structure.
|
||||
Drivers will typically want to perform some additional setup on this
|
||||
structure, such as allocating driver-specific data and storing a
|
||||
pointer to it in the DRM device's <structfield>dev_private</structfield>
|
||||
field. Drivers should also set the device's unique name using the
|
||||
<function>drm_dev_set_unique()</function> function. After it has been
|
||||
set up a device can be registered with the DRM subsystem by calling
|
||||
<function>drm_dev_register()</function>. This will cause the device to
|
||||
be exposed to userspace and will call the driver's
|
||||
<structfield>.load()</structfield> implementation. When a device is
|
||||
removed, the DRM device can safely be unregistered and freed by calling
|
||||
<function>drm_dev_unregister()</function> followed by a call to
|
||||
<function>drm_dev_unref()</function>.
|
||||
</para>
|
||||
<title>Device Instance and Driver Handling</title>
|
||||
!Pdrivers/gpu/drm/drm_drv.c driver instance overview
|
||||
!Edrivers/gpu/drm/drm_drv.c
|
||||
</sect2>
|
||||
<sect2>
|
||||
<title>Driver Load</title>
|
||||
<para>
|
||||
The <methodname>load</methodname> method is the driver and device
|
||||
initialization entry point. The method is responsible for allocating and
|
||||
initializing driver private data, performing resource allocation and
|
||||
mapping (e.g. acquiring
|
||||
clocks, mapping registers or allocating command buffers), initializing
|
||||
the memory manager (<xref linkend="drm-memory-management"/>), installing
|
||||
the IRQ handler (<xref linkend="drm-irq-registration"/>), setting up
|
||||
vertical blanking handling (<xref linkend="drm-vertical-blank"/>), mode
|
||||
setting (<xref linkend="drm-mode-setting"/>) and initial output
|
||||
configuration (<xref linkend="drm-kms-init"/>).
|
||||
</para>
|
||||
<note><para>
|
||||
If compatibility is a concern (e.g. with drivers converted over from
|
||||
User Mode Setting to Kernel Mode Setting), care must be taken to prevent
|
||||
device initialization and control that is incompatible with currently
|
||||
active userspace drivers. For instance, if user level mode setting
|
||||
drivers are in use, it would be problematic to perform output discovery
|
||||
& configuration at load time. Likewise, if user-level drivers
|
||||
unaware of memory management are in use, memory management and command
|
||||
buffer setup may need to be omitted. These requirements are
|
||||
driver-specific, and care needs to be taken to keep both old and new
|
||||
applications and libraries working.
|
||||
</para></note>
|
||||
<synopsis>int (*load) (struct drm_device *, unsigned long flags);</synopsis>
|
||||
<para>
|
||||
The method takes two arguments, a pointer to the newly created
|
||||
<structname>drm_device</structname> and flags. The flags are used to
|
||||
pass the <structfield>driver_data</structfield> field of the device id
|
||||
corresponding to the device passed to <function>drm_*_init()</function>.
|
||||
Only PCI devices currently use this, USB and platform DRM drivers have
|
||||
their <methodname>load</methodname> method called with flags to 0.
|
||||
</para>
|
||||
<sect3>
|
||||
<title>Driver Private Data</title>
|
||||
<para>
|
||||
The driver private hangs off the main
|
||||
<structname>drm_device</structname> structure and can be used for
|
||||
tracking various device-specific bits of information, like register
|
||||
offsets, command buffer status, register state for suspend/resume, etc.
|
||||
At load time, a driver may simply allocate one and set
|
||||
<structname>drm_device</structname>.<structfield>dev_priv</structfield>
|
||||
appropriately; it should be freed and
|
||||
<structname>drm_device</structname>.<structfield>dev_priv</structfield>
|
||||
set to NULL when the driver is unloaded.
|
||||
</para>
|
||||
</sect3>
|
||||
<sect3 id="drm-irq-registration">
|
||||
<title>IRQ Registration</title>
|
||||
<para>
|
||||
|
@ -465,6 +411,18 @@ char *date;</synopsis>
|
|||
</para>
|
||||
</sect3>
|
||||
</sect2>
|
||||
<sect2>
|
||||
<title>Bus-specific Device Registration and PCI Support</title>
|
||||
<para>
|
||||
A number of functions are provided to help with device registration.
|
||||
The functions deal with PCI and platform devices respectively and are
|
||||
only provided for historical reasons. These are all deprecated and
|
||||
shouldn't be used in new drivers. Besides that there's a few
|
||||
helpers for pci drivers.
|
||||
</para>
|
||||
!Edrivers/gpu/drm/drm_pci.c
|
||||
!Edrivers/gpu/drm/drm_platform.c
|
||||
</sect2>
|
||||
</sect1>
|
||||
|
||||
<!-- Internals: memory management -->
|
||||
|
@ -3646,10 +3604,11 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
|
|||
plane properties to default value, so that a subsequent open of the
|
||||
device will not inherit state from the previous user. It can also be
|
||||
used to execute delayed power switching state changes, e.g. in
|
||||
conjunction with the vga-switcheroo infrastructure. Beyond that KMS
|
||||
drivers should not do any further cleanup. Only legacy UMS drivers might
|
||||
need to clean up device state so that the vga console or an independent
|
||||
fbdev driver could take over.
|
||||
conjunction with the vga_switcheroo infrastructure (see
|
||||
<xref linkend="vga_switcheroo"/>). Beyond that KMS drivers should not
|
||||
do any further cleanup. Only legacy UMS drivers might need to clean up
|
||||
device state so that the vga console or an independent fbdev driver
|
||||
could take over.
|
||||
</para>
|
||||
</sect2>
|
||||
<sect2>
|
||||
|
@ -3747,11 +3706,14 @@ int num_ioctls;</synopsis>
|
|||
</para></listitem>
|
||||
<listitem><para>
|
||||
DRM_UNLOCKED - The ioctl handler will be called without locking
|
||||
the DRM global mutex
|
||||
the DRM global mutex. This is the enforced default for kms drivers
|
||||
(i.e. using the DRIVER_MODESET flag) and hence shouldn't be used
|
||||
any more for new drivers.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</para>
|
||||
!Edrivers/gpu/drm/drm_ioctl.c
|
||||
</sect2>
|
||||
</sect1>
|
||||
<sect1>
|
||||
|
@ -3949,8 +3911,8 @@ int num_ioctls;</synopsis>
|
|||
|
||||
<partintro>
|
||||
<para>
|
||||
This second part of the DRM Developer's Guide documents driver code,
|
||||
implementation details and also all the driver-specific userspace
|
||||
This second part of the GPU Driver Developer's Guide documents driver
|
||||
code, implementation details and also all the driver-specific userspace
|
||||
interfaces. Especially since all hardware-acceleration interfaces to
|
||||
userspace are driver specific for efficiency and other reasons these
|
||||
interfaces can be rather substantial. Hence every driver has its own
|
||||
|
@ -4051,6 +4013,7 @@ int num_ioctls;</synopsis>
|
|||
<title>High Definition Audio</title>
|
||||
!Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
|
||||
!Idrivers/gpu/drm/i915/intel_audio.c
|
||||
!Iinclude/drm/i915_component.h
|
||||
</sect2>
|
||||
<sect2>
|
||||
<title>Panel Self Refresh PSR (PSR/SRD)</title>
|
||||
|
@ -4237,6 +4200,20 @@ int num_ioctls;</synopsis>
|
|||
!Idrivers/gpu/drm/i915/i915_gem_shrinker.c
|
||||
</sect2>
|
||||
</sect1>
|
||||
<sect1>
|
||||
<title>GuC-based Command Submission</title>
|
||||
<sect2>
|
||||
<title>GuC</title>
|
||||
!Pdrivers/gpu/drm/i915/intel_guc_loader.c GuC-specific firmware loader
|
||||
!Idrivers/gpu/drm/i915/intel_guc_loader.c
|
||||
</sect2>
|
||||
<sect2>
|
||||
<title>GuC Client</title>
|
||||
!Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submissison
|
||||
!Idrivers/gpu/drm/i915/i915_guc_submission.c
|
||||
</sect2>
|
||||
</sect1>
|
||||
|
||||
<sect1>
|
||||
<title> Tracing </title>
|
||||
<para>
|
||||
|
@ -4260,4 +4237,50 @@ int num_ioctls;</synopsis>
|
|||
</chapter>
|
||||
!Cdrivers/gpu/drm/i915/i915_irq.c
|
||||
</part>
|
||||
|
||||
<part id="vga_switcheroo">
|
||||
<title>vga_switcheroo</title>
|
||||
<partintro>
|
||||
!Pdrivers/gpu/vga/vga_switcheroo.c Overview
|
||||
</partintro>
|
||||
|
||||
<chapter id="modes_of_use">
|
||||
<title>Modes of Use</title>
|
||||
<sect1>
|
||||
<title>Manual switching and manual power control</title>
|
||||
!Pdrivers/gpu/vga/vga_switcheroo.c Manual switching and manual power control
|
||||
</sect1>
|
||||
<sect1>
|
||||
<title>Driver power control</title>
|
||||
!Pdrivers/gpu/vga/vga_switcheroo.c Driver power control
|
||||
</sect1>
|
||||
</chapter>
|
||||
|
||||
<chapter id="pubfunctions">
|
||||
<title>Public functions</title>
|
||||
!Edrivers/gpu/vga/vga_switcheroo.c
|
||||
</chapter>
|
||||
|
||||
<chapter id="pubstructures">
|
||||
<title>Public structures</title>
|
||||
!Finclude/linux/vga_switcheroo.h vga_switcheroo_handler
|
||||
!Finclude/linux/vga_switcheroo.h vga_switcheroo_client_ops
|
||||
</chapter>
|
||||
|
||||
<chapter id="pubconstants">
|
||||
<title>Public constants</title>
|
||||
!Finclude/linux/vga_switcheroo.h vga_switcheroo_client_id
|
||||
!Finclude/linux/vga_switcheroo.h vga_switcheroo_state
|
||||
</chapter>
|
||||
|
||||
<chapter id="privstructures">
|
||||
<title>Private structures</title>
|
||||
!Fdrivers/gpu/vga/vga_switcheroo.c vgasr_priv
|
||||
!Fdrivers/gpu/vga/vga_switcheroo.c vga_switcheroo_client
|
||||
</chapter>
|
||||
|
||||
!Cdrivers/gpu/vga/vga_switcheroo.c
|
||||
!Cinclude/linux/vga_switcheroo.h
|
||||
</part>
|
||||
|
||||
</book>
|
65
Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
Normal file
65
Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
Normal file
|
@ -0,0 +1,65 @@
|
|||
Broadcom VC4 (VideoCore4) GPU
|
||||
|
||||
The VC4 device present on the Raspberry Pi includes a display system
|
||||
with HDMI output and the HVS (Hardware Video Scaler) for compositing
|
||||
display planes.
|
||||
|
||||
Required properties for VC4:
|
||||
- compatible: Should be "brcm,bcm2835-vc4"
|
||||
|
||||
Required properties for Pixel Valve:
|
||||
- compatible: Should be one of "brcm,bcm2835-pixelvalve0",
|
||||
"brcm,bcm2835-pixelvalve1", or "brcm,bcm2835-pixelvalve2"
|
||||
- reg: Physical base address and length of the PV's registers
|
||||
- interrupts: The interrupt number
|
||||
See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
|
||||
|
||||
Required properties for HVS:
|
||||
- compatible: Should be "brcm,bcm2835-hvs"
|
||||
- reg: Physical base address and length of the HVS's registers
|
||||
- interrupts: The interrupt number
|
||||
See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
|
||||
|
||||
Required properties for HDMI
|
||||
- compatible: Should be "brcm,bcm2835-hdmi"
|
||||
- reg: Physical base address and length of the two register ranges
|
||||
("HDMI" and "HD", in that order)
|
||||
- interrupts: The interrupt numbers
|
||||
See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
|
||||
- ddc: phandle of the I2C controller used for DDC EDID probing
|
||||
- clocks: a) hdmi: The HDMI state machine clock
|
||||
b) pixel: The pixel clock.
|
||||
|
||||
Optional properties for HDMI:
|
||||
- hpd-gpios: The GPIO pin for HDMI hotplug detect (if it doesn't appear
|
||||
as an interrupt/status bit in the HDMI controller
|
||||
itself). See bindings/pinctrl/brcm,bcm2835-gpio.txt
|
||||
|
||||
Example:
|
||||
pixelvalve@7e807000 {
|
||||
compatible = "brcm,bcm2835-pixelvalve2";
|
||||
reg = <0x7e807000 0x100>;
|
||||
interrupts = <2 10>; /* pixelvalve */
|
||||
};
|
||||
|
||||
hvs@7e400000 {
|
||||
compatible = "brcm,bcm2835-hvs";
|
||||
reg = <0x7e400000 0x6000>;
|
||||
interrupts = <2 1>;
|
||||
};
|
||||
|
||||
hdmi: hdmi@7e902000 {
|
||||
compatible = "brcm,bcm2835-hdmi";
|
||||
reg = <0x7e902000 0x600>,
|
||||
<0x7e808000 0x100>;
|
||||
interrupts = <2 8>, <2 9>;
|
||||
ddc = <&i2c2>;
|
||||
hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
|
||||
clocks = <&clocks BCM2835_PLLH_PIX>,
|
||||
<&clocks BCM2835_CLOCK_HSM>;
|
||||
clock-names = "pixel", "hdmi";
|
||||
};
|
||||
|
||||
vc4: gpu {
|
||||
compatible = "brcm,bcm2835-vc4";
|
||||
};
|
|
@ -2,6 +2,7 @@ Qualcomm adreno/snapdragon hdmi output
|
|||
|
||||
Required properties:
|
||||
- compatible: one of the following
|
||||
* "qcom,hdmi-tx-8996"
|
||||
* "qcom,hdmi-tx-8994"
|
||||
* "qcom,hdmi-tx-8084"
|
||||
* "qcom,hdmi-tx-8974"
|
||||
|
@ -21,6 +22,7 @@ Required properties:
|
|||
Optional properties:
|
||||
- qcom,hdmi-tx-mux-en-gpio: hdmi mux enable pin
|
||||
- qcom,hdmi-tx-mux-sel-gpio: hdmi mux select pin
|
||||
- power-domains: reference to the power domain(s), if available.
|
||||
- pinctrl-names: the pin control state names; should contain "default"
|
||||
- pinctrl-0: the default pinctrl state (active)
|
||||
- pinctrl-1: the "sleep" pinctrl state
|
||||
|
@ -35,6 +37,7 @@ Example:
|
|||
reg-names = "core_physical";
|
||||
reg = <0x04a00000 0x1000>;
|
||||
interrupts = <GIC_SPI 79 0>;
|
||||
power-domains = <&mmcc MDSS_GDSC>;
|
||||
clock-names =
|
||||
"core_clk",
|
||||
"master_iface_clk",
|
||||
|
|
|
@ -11,13 +11,14 @@ Required properties:
|
|||
- clock-names: the following clocks are required:
|
||||
* "core_clk"
|
||||
* "iface_clk"
|
||||
* "lut_clk"
|
||||
* "src_clk"
|
||||
* "hdmi_clk"
|
||||
* "mpd_clk"
|
||||
|
||||
Optional properties:
|
||||
- gpus: phandle for gpu device
|
||||
- clock-names: the following clocks are optional:
|
||||
* "lut_clk"
|
||||
|
||||
Example:
|
||||
|
||||
|
|
|
@ -5,7 +5,9 @@ Required Properties:
|
|||
- compatible: must be one of the following.
|
||||
- "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
|
||||
- "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
|
||||
- "renesas,du-r8a7791" for R8A7791 (R-Car M2) compatible DU
|
||||
- "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
|
||||
- "renesas,du-r8a7793" for R8A7793 (R-Car M2-N) compatible DU
|
||||
- "renesas,du-r8a7794" for R8A7794 (R-Car E2) compatible DU
|
||||
|
||||
- reg: A list of base address and length of each memory resource, one for
|
||||
each entry in the reg-names property.
|
||||
|
@ -22,9 +24,9 @@ Required Properties:
|
|||
- clock-names: Name of the clocks. This property is model-dependent.
|
||||
- R8A7779 uses a single functional clock. The clock doesn't need to be
|
||||
named.
|
||||
- R8A7790 and R8A7791 use one functional clock per channel and one clock
|
||||
per LVDS encoder. The functional clocks must be named "du.x" with "x"
|
||||
being the channel numerical index. The LVDS clocks must be named
|
||||
- R8A779[0134] use one functional clock per channel and one clock per LVDS
|
||||
encoder (if available). The functional clocks must be named "du.x" with
|
||||
"x" being the channel numerical index. The LVDS clocks must be named
|
||||
"lvds.x" with "x" being the LVDS encoder numerical index.
|
||||
- In addition to the functional and encoder clocks, all DU versions also
|
||||
support externally supplied pixel clocks. Those clocks are optional.
|
||||
|
@ -43,7 +45,9 @@ corresponding to each DU output.
|
|||
-----------------------------------------------------------------------------
|
||||
R8A7779 (H1) DPAD 0 DPAD 1 -
|
||||
R8A7790 (H2) DPAD LVDS 0 LVDS 1
|
||||
R8A7791 (M2) DPAD LVDS 0 -
|
||||
R8A7791 (M2-W) DPAD LVDS 0 -
|
||||
R8A7793 (M2-N) DPAD LVDS 0 -
|
||||
R8A7794 (E2) DPAD 0 DPAD 1 -
|
||||
|
||||
|
||||
Example: R8A7790 (R-Car H2) DU
|
||||
|
|
|
@ -932,11 +932,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
The filter can be disabled or changed to another
|
||||
driver later using sysfs.
|
||||
|
||||
drm_kms_helper.edid_firmware=[<connector>:]<file>
|
||||
Broken monitors, graphic adapters and KVMs may
|
||||
send no or incorrect EDID data sets. This parameter
|
||||
allows to specify an EDID data set in the
|
||||
/lib/firmware directory that is used instead.
|
||||
drm_kms_helper.edid_firmware=[<connector>:]<file>[,[<connector>:]<file>]
|
||||
Broken monitors, graphic adapters, KVMs and EDIDless
|
||||
panels may send no or incorrect EDID data sets.
|
||||
This parameter allows to specify an EDID data sets
|
||||
in the /lib/firmware directory that are used instead.
|
||||
Generic built-in EDID data sets are used, if one of
|
||||
edid/1024x768.bin, edid/1280x1024.bin,
|
||||
edid/1680x1050.bin, or edid/1920x1080.bin is given
|
||||
|
@ -945,7 +945,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
available in Documentation/EDID/HOWTO.txt. An EDID
|
||||
data set will only be used for a particular connector,
|
||||
if its name and a colon are prepended to the EDID
|
||||
name.
|
||||
name. Each connector may use a unique EDID data
|
||||
set by separating the files with a comma. An EDID
|
||||
data set with no connector name will be used for
|
||||
any connectors not explicitly specified.
|
||||
|
||||
dscc4.setup= [NET]
|
||||
|
||||
|
|
|
@ -3624,6 +3624,7 @@ M: Daniel Vetter <daniel.vetter@intel.com>
|
|||
M: Jani Nikula <jani.nikula@linux.intel.com>
|
||||
L: intel-gfx@lists.freedesktop.org
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
W: https://01.org/linuxgraphics/
|
||||
Q: http://patchwork.freedesktop.org/project/intel-gfx/
|
||||
T: git git://anongit.freedesktop.org/drm-intel
|
||||
S: Supported
|
||||
|
@ -3664,6 +3665,7 @@ M: Philipp Zabel <p.zabel@pengutronix.de>
|
|||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/imx/
|
||||
F: drivers/gpu/ipu-v3/
|
||||
F: Documentation/devicetree/bindings/display/imx/
|
||||
|
||||
DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets)
|
||||
|
|
|
@ -132,6 +132,7 @@ CONFIG_DRM_PARADE_PS8622=y
|
|||
CONFIG_DRM_EXYNOS=y
|
||||
CONFIG_DRM_EXYNOS_FIMD=y
|
||||
CONFIG_DRM_EXYNOS_DSI=y
|
||||
CONFIG_DRM_EXYNOS_MIXER=y
|
||||
CONFIG_DRM_EXYNOS_HDMI=y
|
||||
CONFIG_DRM_PANEL_SIMPLE=y
|
||||
CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=y
|
||||
|
|
|
@ -397,6 +397,104 @@ fence_default_wait(struct fence *fence, bool intr, signed long timeout)
|
|||
}
|
||||
EXPORT_SYMBOL(fence_default_wait);
|
||||
|
||||
static bool
|
||||
fence_test_signaled_any(struct fence **fences, uint32_t count)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; ++i) {
|
||||
struct fence *fence = fences[i];
|
||||
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* fence_wait_any_timeout - sleep until any fence gets signaled
|
||||
* or until timeout elapses
|
||||
* @fences: [in] array of fences to wait on
|
||||
* @count: [in] number of fences to wait on
|
||||
* @intr: [in] if true, do an interruptible wait
|
||||
* @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
|
||||
*
|
||||
* Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
|
||||
* interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
|
||||
* on success.
|
||||
*
|
||||
* Synchronous waits for the first fence in the array to be signaled. The
|
||||
* caller needs to hold a reference to all fences in the array, otherwise a
|
||||
* fence might be freed before return, resulting in undefined behavior.
|
||||
*/
|
||||
signed long
|
||||
fence_wait_any_timeout(struct fence **fences, uint32_t count,
|
||||
bool intr, signed long timeout)
|
||||
{
|
||||
struct default_wait_cb *cb;
|
||||
signed long ret = timeout;
|
||||
unsigned i;
|
||||
|
||||
if (WARN_ON(!fences || !count || timeout < 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (timeout == 0) {
|
||||
for (i = 0; i < count; ++i)
|
||||
if (fence_is_signaled(fences[i]))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
|
||||
if (cb == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_cb;
|
||||
}
|
||||
|
||||
for (i = 0; i < count; ++i) {
|
||||
struct fence *fence = fences[i];
|
||||
|
||||
if (fence->ops->wait != fence_default_wait) {
|
||||
ret = -EINVAL;
|
||||
goto fence_rm_cb;
|
||||
}
|
||||
|
||||
cb[i].task = current;
|
||||
if (fence_add_callback(fence, &cb[i].base,
|
||||
fence_default_wait_cb)) {
|
||||
/* This fence is already signaled */
|
||||
goto fence_rm_cb;
|
||||
}
|
||||
}
|
||||
|
||||
while (ret > 0) {
|
||||
if (intr)
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
else
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
|
||||
if (fence_test_signaled_any(fences, count))
|
||||
break;
|
||||
|
||||
ret = schedule_timeout(ret);
|
||||
|
||||
if (ret > 0 && intr && signal_pending(current))
|
||||
ret = -ERESTARTSYS;
|
||||
}
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
fence_rm_cb:
|
||||
while (i-- > 0)
|
||||
fence_remove_callback(fences[i], &cb[i].base);
|
||||
|
||||
err_free_cb:
|
||||
kfree(cb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(fence_wait_any_timeout);
|
||||
|
||||
/**
|
||||
* fence_init - Initialize a custom fence.
|
||||
* @fence: [in] the fence to initialize
|
||||
|
|
|
@ -264,3 +264,5 @@ source "drivers/gpu/drm/sti/Kconfig"
|
|||
source "drivers/gpu/drm/amd/amdkfd/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/imx/Kconfig"
|
||||
|
||||
source "drivers/gpu/drm/vc4/Kconfig"
|
||||
|
|
|
@ -6,7 +6,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
|
|||
drm_context.o drm_dma.o \
|
||||
drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
|
||||
drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
|
||||
drm_agpsupport.o drm_scatter.o drm_pci.o \
|
||||
drm_scatter.o drm_pci.o \
|
||||
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
|
||||
drm_crtc.o drm_modes.o drm_edid.o \
|
||||
drm_info.o drm_debugfs.o drm_encoder_slave.o \
|
||||
|
@ -19,6 +19,9 @@ drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
|
|||
drm-$(CONFIG_PCI) += ati_pcigart.o
|
||||
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
|
||||
drm-$(CONFIG_OF) += drm_of.o
|
||||
drm-$(CONFIG_AGP) += drm_agpsupport.o
|
||||
|
||||
drm-y += $(drm-m)
|
||||
|
||||
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
|
||||
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
|
||||
|
@ -42,6 +45,7 @@ obj-$(CONFIG_DRM_MGA) += mga/
|
|||
obj-$(CONFIG_DRM_I810) += i810/
|
||||
obj-$(CONFIG_DRM_I915) += i915/
|
||||
obj-$(CONFIG_DRM_MGAG200) += mgag200/
|
||||
obj-$(CONFIG_DRM_VC4) += vc4/
|
||||
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
|
||||
obj-$(CONFIG_DRM_SIS) += sis/
|
||||
obj-$(CONFIG_DRM_SAVAGE)+= savage/
|
||||
|
|
|
@ -79,6 +79,8 @@ extern int amdgpu_bapm;
|
|||
extern int amdgpu_deep_color;
|
||||
extern int amdgpu_vm_size;
|
||||
extern int amdgpu_vm_block_size;
|
||||
extern int amdgpu_vm_fault_stop;
|
||||
extern int amdgpu_vm_debug;
|
||||
extern int amdgpu_enable_scheduler;
|
||||
extern int amdgpu_sched_jobs;
|
||||
extern int amdgpu_sched_hw_submission;
|
||||
|
@ -343,7 +345,6 @@ struct amdgpu_ring_funcs {
|
|||
/* testing functions */
|
||||
int (*test_ring)(struct amdgpu_ring *ring);
|
||||
int (*test_ib)(struct amdgpu_ring *ring);
|
||||
bool (*is_lockup)(struct amdgpu_ring *ring);
|
||||
/* insert NOP packets */
|
||||
void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
|
||||
};
|
||||
|
@ -404,7 +405,6 @@ struct amdgpu_fence_driver {
|
|||
/* some special values for the owner field */
|
||||
#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
|
||||
#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
|
||||
#define AMDGPU_FENCE_OWNER_MOVE ((void*)2ul)
|
||||
|
||||
#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
|
||||
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
|
||||
|
@ -446,58 +446,11 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
|
|||
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
|
||||
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
|
||||
|
||||
signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
|
||||
struct fence **array,
|
||||
uint32_t count,
|
||||
bool intr,
|
||||
signed long t);
|
||||
struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
|
||||
void amdgpu_fence_unref(struct amdgpu_fence **fence);
|
||||
|
||||
bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
|
||||
struct amdgpu_ring *ring);
|
||||
void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
|
||||
struct amdgpu_ring *ring);
|
||||
|
||||
static inline struct amdgpu_fence *amdgpu_fence_later(struct amdgpu_fence *a,
|
||||
struct amdgpu_fence *b)
|
||||
{
|
||||
if (!a) {
|
||||
return b;
|
||||
}
|
||||
|
||||
if (!b) {
|
||||
return a;
|
||||
}
|
||||
|
||||
BUG_ON(a->ring != b->ring);
|
||||
|
||||
if (a->seq > b->seq) {
|
||||
return a;
|
||||
} else {
|
||||
return b;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
|
||||
struct amdgpu_fence *b)
|
||||
{
|
||||
if (!a) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!b) {
|
||||
return true;
|
||||
}
|
||||
|
||||
BUG_ON(a->ring != b->ring);
|
||||
|
||||
return a->seq < b->seq;
|
||||
}
|
||||
|
||||
int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
|
||||
void *owner, struct amdgpu_fence **fence);
|
||||
|
||||
/*
|
||||
* TTM.
|
||||
*/
|
||||
|
@ -708,7 +661,7 @@ void amdgpu_semaphore_free(struct amdgpu_device *adev,
|
|||
*/
|
||||
struct amdgpu_sync {
|
||||
struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
|
||||
struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS];
|
||||
struct fence *sync_to[AMDGPU_MAX_RINGS];
|
||||
DECLARE_HASHTABLE(fences, 4);
|
||||
struct fence *last_vm_update;
|
||||
};
|
||||
|
@ -905,8 +858,6 @@ struct amdgpu_ring {
|
|||
unsigned ring_size;
|
||||
unsigned ring_free_dw;
|
||||
int count_dw;
|
||||
atomic_t last_rptr;
|
||||
atomic64_t last_activity;
|
||||
uint64_t gpu_addr;
|
||||
uint32_t align_mask;
|
||||
uint32_t ptr_mask;
|
||||
|
@ -960,6 +911,11 @@ struct amdgpu_ring {
|
|||
#define AMDGPU_PTE_FRAG_64KB (4 << 7)
|
||||
#define AMDGPU_LOG2_PAGES_PER_FRAG 4
|
||||
|
||||
/* How to programm VM fault handling */
|
||||
#define AMDGPU_VM_FAULT_STOP_NEVER 0
|
||||
#define AMDGPU_VM_FAULT_STOP_FIRST 1
|
||||
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
|
||||
|
||||
struct amdgpu_vm_pt {
|
||||
struct amdgpu_bo *bo;
|
||||
uint64_t addr;
|
||||
|
@ -971,7 +927,7 @@ struct amdgpu_vm_id {
|
|||
/* last flushed PD/PT update */
|
||||
struct fence *flushed_updates;
|
||||
/* last use of vmid */
|
||||
struct amdgpu_fence *last_id_use;
|
||||
struct fence *last_id_use;
|
||||
};
|
||||
|
||||
struct amdgpu_vm {
|
||||
|
@ -1004,7 +960,7 @@ struct amdgpu_vm {
|
|||
};
|
||||
|
||||
struct amdgpu_vm_manager {
|
||||
struct amdgpu_fence *active[AMDGPU_NUM_VM];
|
||||
struct fence *active[AMDGPU_NUM_VM];
|
||||
uint32_t max_pfn;
|
||||
/* number of VMIDs */
|
||||
unsigned nvm;
|
||||
|
@ -1223,8 +1179,6 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring);
|
|||
void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
|
||||
void amdgpu_ring_undo(struct amdgpu_ring *ring);
|
||||
void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring);
|
||||
void amdgpu_ring_lockup_update(struct amdgpu_ring *ring);
|
||||
bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring);
|
||||
unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
|
||||
uint32_t **data);
|
||||
int amdgpu_ring_restore(struct amdgpu_ring *ring,
|
||||
|
@ -1234,6 +1188,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
struct amdgpu_irq_src *irq_src, unsigned irq_type,
|
||||
enum amdgpu_ring_type ring_type);
|
||||
void amdgpu_ring_fini(struct amdgpu_ring *ring);
|
||||
struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f);
|
||||
|
||||
/*
|
||||
* CS.
|
||||
|
@ -1709,7 +1664,7 @@ struct amdgpu_vce {
|
|||
/*
|
||||
* SDMA
|
||||
*/
|
||||
struct amdgpu_sdma {
|
||||
struct amdgpu_sdma_instance {
|
||||
/* SDMA firmware */
|
||||
const struct firmware *fw;
|
||||
uint32_t fw_version;
|
||||
|
@ -1719,6 +1674,13 @@ struct amdgpu_sdma {
|
|||
bool burst_nop;
|
||||
};
|
||||
|
||||
struct amdgpu_sdma {
|
||||
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
|
||||
struct amdgpu_irq_src trap_irq;
|
||||
struct amdgpu_irq_src illegal_inst_irq;
|
||||
int num_instances;
|
||||
};
|
||||
|
||||
/*
|
||||
* Firmware
|
||||
*/
|
||||
|
@ -1751,11 +1713,11 @@ void amdgpu_test_syncing(struct amdgpu_device *adev);
|
|||
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
|
||||
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
|
||||
#else
|
||||
static int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
|
||||
static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
|
||||
static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -1947,7 +1909,6 @@ struct amdgpu_device {
|
|||
struct device *dev;
|
||||
struct drm_device *ddev;
|
||||
struct pci_dev *pdev;
|
||||
struct rw_semaphore exclusive_lock;
|
||||
|
||||
/* ASIC */
|
||||
enum amd_asic_type asic_type;
|
||||
|
@ -1961,7 +1922,6 @@ struct amdgpu_device {
|
|||
bool suspend;
|
||||
bool need_dma32;
|
||||
bool accel_working;
|
||||
bool needs_reset;
|
||||
struct work_struct reset_work;
|
||||
struct notifier_block acpi_nb;
|
||||
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
|
||||
|
@ -2065,9 +2025,7 @@ struct amdgpu_device {
|
|||
struct amdgpu_gfx gfx;
|
||||
|
||||
/* sdma */
|
||||
struct amdgpu_sdma sdma[AMDGPU_MAX_SDMA_INSTANCES];
|
||||
struct amdgpu_irq_src sdma_trap_irq;
|
||||
struct amdgpu_irq_src sdma_illegal_inst_irq;
|
||||
struct amdgpu_sdma sdma;
|
||||
|
||||
/* uvd */
|
||||
bool has_uvd;
|
||||
|
@ -2204,17 +2162,18 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
|
|||
ring->ring_free_dw--;
|
||||
}
|
||||
|
||||
static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
||||
static inline struct amdgpu_sdma_instance *
|
||||
amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++)
|
||||
if (&adev->sdma[i].ring == ring)
|
||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
if (&adev->sdma.instance[i].ring == ring)
|
||||
break;
|
||||
|
||||
if (i < AMDGPU_MAX_SDMA_INSTANCES)
|
||||
return &adev->sdma[i];
|
||||
return &adev->sdma.instance[i];
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
@ -2241,7 +2200,6 @@ static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *
|
|||
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
|
||||
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
|
||||
#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
|
||||
#define amdgpu_ring_is_lockup(r) (r)->funcs->is_lockup((r))
|
||||
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
|
||||
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
|
||||
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
|
||||
|
@ -2350,10 +2308,10 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev,
|
|||
struct drm_file *file_priv);
|
||||
int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
|
||||
int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
|
||||
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc);
|
||||
int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc);
|
||||
void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc);
|
||||
int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
|
||||
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
|
||||
int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
|
||||
void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
|
||||
int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
|
||||
int *max_error,
|
||||
struct timeval *vblank_time,
|
||||
unsigned flags);
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include <linux/acpi.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/power_supply.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <acpi/video.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
|
|
|
@ -649,12 +649,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
|
|||
|
||||
case KGD_ENGINE_SDMA1:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->sdma[0].fw->data;
|
||||
adev->sdma.instance[0].fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_SDMA2:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->sdma[1].fw->data;
|
||||
adev->sdma.instance[1].fw->data;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -523,12 +523,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
|
|||
|
||||
case KGD_ENGINE_SDMA1:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->sdma[0].fw->data;
|
||||
adev->sdma.instance[0].fw->data;
|
||||
break;
|
||||
|
||||
case KGD_ENGINE_SDMA2:
|
||||
hdr = (const union amdgpu_firmware_header *)
|
||||
adev->sdma[1].fw->data;
|
||||
adev->sdma.instance[1].fw->data;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -501,7 +501,7 @@ static int amdgpu_atpx_get_client_id(struct pci_dev *pdev)
|
|||
return VGA_SWITCHEROO_DIS;
|
||||
}
|
||||
|
||||
static struct vga_switcheroo_handler amdgpu_atpx_handler = {
|
||||
static const struct vga_switcheroo_handler amdgpu_atpx_handler = {
|
||||
.switchto = amdgpu_atpx_switchto,
|
||||
.power_state = amdgpu_atpx_power_state,
|
||||
.init = amdgpu_atpx_init,
|
||||
|
@ -536,7 +536,7 @@ static bool amdgpu_atpx_detect(void)
|
|||
|
||||
if (has_atpx && vga_count == 2) {
|
||||
acpi_get_name(amdgpu_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
|
||||
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
|
||||
printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
|
||||
acpi_method_name);
|
||||
amdgpu_atpx_priv.atpx_detected = true;
|
||||
return true;
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include "amdgpu.h"
|
||||
#include "atom.h"
|
||||
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/acpi.h>
|
||||
/*
|
||||
|
|
|
@ -104,10 +104,11 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
|||
}
|
||||
break;
|
||||
case AMDGPU_HW_IP_DMA:
|
||||
if (ring < 2) {
|
||||
*out_ring = &adev->sdma[ring].ring;
|
||||
if (ring < adev->sdma.num_instances) {
|
||||
*out_ring = &adev->sdma.instance[ring].ring;
|
||||
} else {
|
||||
DRM_ERROR("only two SDMA rings are supported\n");
|
||||
DRM_ERROR("only %d SDMA rings are supported\n",
|
||||
adev->sdma.num_instances);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
|
@ -567,9 +568,24 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
|
|||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync);
|
||||
r = amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync);
|
||||
|
||||
if (amdgpu_vm_debug && p->bo_list) {
|
||||
/* Invalidate all BOs to test for userspace bugs */
|
||||
for (i = 0; i < p->bo_list->num_entries; i++) {
|
||||
/* ignore duplicates */
|
||||
bo = p->bo_list->array[i].robj;
|
||||
if (!bo)
|
||||
continue;
|
||||
|
||||
amdgpu_vm_bo_invalidate(adev, bo);
|
||||
}
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
||||
|
@ -593,7 +609,6 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
|||
}
|
||||
}
|
||||
|
||||
mutex_lock(&vm->mutex);
|
||||
r = amdgpu_bo_vm_update_pte(parser, vm);
|
||||
if (r) {
|
||||
goto out;
|
||||
|
@ -604,7 +619,6 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
|||
parser->filp);
|
||||
|
||||
out:
|
||||
mutex_unlock(&vm->mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -812,15 +826,14 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
union drm_amdgpu_cs *cs = data;
|
||||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
struct amdgpu_cs_parser *parser;
|
||||
bool reserved_buffers = false;
|
||||
int i, r;
|
||||
|
||||
down_read(&adev->exclusive_lock);
|
||||
if (!adev->accel_working) {
|
||||
up_read(&adev->exclusive_lock);
|
||||
if (!adev->accel_working)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
|
||||
if (!parser)
|
||||
|
@ -828,12 +841,11 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
r = amdgpu_cs_parser_init(parser, data);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to initialize parser !\n");
|
||||
kfree(parser);
|
||||
up_read(&adev->exclusive_lock);
|
||||
amdgpu_cs_parser_fini(parser, r, false);
|
||||
r = amdgpu_cs_handle_lockup(adev, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
mutex_lock(&vm->mutex);
|
||||
r = amdgpu_cs_parser_relocs(parser);
|
||||
if (r == -ENOMEM)
|
||||
DRM_ERROR("Not enough memory for command submission!\n");
|
||||
|
@ -864,8 +876,10 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
struct amdgpu_job *job;
|
||||
struct amdgpu_ring * ring = parser->ibs->ring;
|
||||
job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
|
||||
if (!job)
|
||||
return -ENOMEM;
|
||||
if (!job) {
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
job->base.sched = &ring->sched;
|
||||
job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
|
||||
job->adev = parser->adev;
|
||||
|
@ -900,14 +914,14 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
|
||||
mutex_unlock(&job->job_lock);
|
||||
amdgpu_cs_parser_fini_late(parser);
|
||||
up_read(&adev->exclusive_lock);
|
||||
mutex_unlock(&vm->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
|
||||
out:
|
||||
amdgpu_cs_parser_fini(parser, r, reserved_buffers);
|
||||
up_read(&adev->exclusive_lock);
|
||||
mutex_unlock(&vm->mutex);
|
||||
r = amdgpu_cs_handle_lockup(adev, r);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -69,6 +69,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
|
|||
struct amdgpu_device *adev = ctx->adev;
|
||||
unsigned i, j;
|
||||
|
||||
if (!adev)
|
||||
return;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
|
||||
for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
|
||||
fence_put(ctx->rings[i].fences[j]);
|
||||
|
|
|
@ -57,6 +57,7 @@ static const char *amdgpu_asic_name[] = {
|
|||
"TONGA",
|
||||
"FIJI",
|
||||
"CARRIZO",
|
||||
"STONEY",
|
||||
"LAST",
|
||||
};
|
||||
|
||||
|
@ -1022,7 +1023,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
|
|||
* amdgpu_switcheroo_set_state - set switcheroo state
|
||||
*
|
||||
* @pdev: pci dev pointer
|
||||
* @state: vga switcheroo state
|
||||
* @state: vga_switcheroo state
|
||||
*
|
||||
* Callback for the switcheroo driver. Suspends or resumes the
|
||||
* the asics before or after it is powered up using ACPI methods.
|
||||
|
@ -1165,7 +1166,8 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
|
|||
case CHIP_TONGA:
|
||||
case CHIP_FIJI:
|
||||
case CHIP_CARRIZO:
|
||||
if (adev->asic_type == CHIP_CARRIZO)
|
||||
case CHIP_STONEY:
|
||||
if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
|
||||
adev->family = AMDGPU_FAMILY_CZ;
|
||||
else
|
||||
adev->family = AMDGPU_FAMILY_VI;
|
||||
|
@ -1418,7 +1420,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
mutex_init(&adev->gfx.gpu_clock_mutex);
|
||||
mutex_init(&adev->srbm_mutex);
|
||||
mutex_init(&adev->grbm_idx_mutex);
|
||||
init_rwsem(&adev->exclusive_lock);
|
||||
mutex_init(&adev->mn_lock);
|
||||
hash_init(adev->mn_hash);
|
||||
|
||||
|
@ -1657,11 +1658,21 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
|
|||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
/* unpin the front buffers */
|
||||
/* unpin the front buffers and cursors */
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
|
||||
struct amdgpu_bo *robj;
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
|
||||
r = amdgpu_bo_reserve(aobj, false);
|
||||
if (r == 0) {
|
||||
amdgpu_bo_unpin(aobj);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
}
|
||||
}
|
||||
|
||||
if (rfb == NULL || rfb->obj == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
@ -1713,6 +1724,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
|
|||
{
|
||||
struct drm_connector *connector;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
int r;
|
||||
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
|
@ -1746,6 +1758,24 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
/* pin cursors */
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
|
||||
r = amdgpu_bo_reserve(aobj, false);
|
||||
if (r == 0) {
|
||||
r = amdgpu_bo_pin(aobj,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&amdgpu_crtc->cursor_addr);
|
||||
if (r != 0)
|
||||
DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* blat the mode back in */
|
||||
if (fbcon) {
|
||||
drm_helper_resume_force_mode(dev);
|
||||
|
@ -1785,14 +1815,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
|||
int i, r;
|
||||
int resched;
|
||||
|
||||
down_write(&adev->exclusive_lock);
|
||||
|
||||
if (!adev->needs_reset) {
|
||||
up_write(&adev->exclusive_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
adev->needs_reset = false;
|
||||
atomic_inc(&adev->gpu_reset_counter);
|
||||
|
||||
/* block TTM */
|
||||
|
@ -1856,7 +1878,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
|||
dev_info(adev->dev, "GPU reset failed\n");
|
||||
}
|
||||
|
||||
up_write(&adev->exclusive_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,11 +47,8 @@ static void amdgpu_flip_wait_fence(struct amdgpu_device *adev,
|
|||
fence = to_amdgpu_fence(*f);
|
||||
if (fence) {
|
||||
r = fence_wait(&fence->base, false);
|
||||
if (r == -EDEADLK) {
|
||||
up_read(&adev->exclusive_lock);
|
||||
if (r == -EDEADLK)
|
||||
r = amdgpu_gpu_reset(adev);
|
||||
down_read(&adev->exclusive_lock);
|
||||
}
|
||||
} else
|
||||
r = fence_wait(*f, false);
|
||||
|
||||
|
@ -77,7 +74,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
|
|||
unsigned long flags;
|
||||
unsigned i;
|
||||
|
||||
down_read(&adev->exclusive_lock);
|
||||
amdgpu_flip_wait_fence(adev, &work->excl);
|
||||
for (i = 0; i < work->shared_count; ++i)
|
||||
amdgpu_flip_wait_fence(adev, &work->shared[i]);
|
||||
|
@ -91,7 +87,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
|
|||
amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
|
||||
|
||||
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
||||
up_read(&adev->exclusive_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -715,7 +710,7 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
|
|||
* an optional accurate timestamp of when query happened.
|
||||
*
|
||||
* \param dev Device to query.
|
||||
* \param crtc Crtc to query.
|
||||
* \param pipe Crtc to query.
|
||||
* \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
|
||||
* \param *vpos Location where vertical scanout position should be stored.
|
||||
* \param *hpos Location where horizontal scanout position should go.
|
||||
|
@ -738,8 +733,10 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
|
|||
* unknown small number of scanlines wrt. real scanout position.
|
||||
*
|
||||
*/
|
||||
int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags,
|
||||
int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
|
||||
int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
|
||||
unsigned int flags, int *vpos, int *hpos,
|
||||
ktime_t *stime, ktime_t *etime,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
u32 vbl = 0, position = 0;
|
||||
int vbl_start, vbl_end, vtotal, ret = 0;
|
||||
|
@ -753,7 +750,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
|
|||
if (stime)
|
||||
*stime = ktime_get();
|
||||
|
||||
if (amdgpu_display_page_flip_get_scanoutpos(adev, crtc, &vbl, &position) == 0)
|
||||
if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
|
||||
ret |= DRM_SCANOUTPOS_VALID;
|
||||
|
||||
/* Get optional system timestamp after query. */
|
||||
|
@ -775,7 +772,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
|
|||
}
|
||||
else {
|
||||
/* No: Fake something reasonable which gives at least ok results. */
|
||||
vbl_start = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
|
||||
vbl_start = mode->crtc_vdisplay;
|
||||
vbl_end = 0;
|
||||
}
|
||||
|
||||
|
@ -791,7 +788,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
|
|||
|
||||
/* Inside "upper part" of vblank area? Apply corrective offset if so: */
|
||||
if (in_vbl && (*vpos >= vbl_start)) {
|
||||
vtotal = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
|
||||
vtotal = mode->crtc_vtotal;
|
||||
*vpos = *vpos - vtotal;
|
||||
}
|
||||
|
||||
|
@ -813,8 +810,8 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
|
|||
* We only do this if DRM_CALLED_FROM_VBLIRQ.
|
||||
*/
|
||||
if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
|
||||
vbl_start = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
|
||||
vtotal = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
|
||||
vbl_start = mode->crtc_vdisplay;
|
||||
vtotal = mode->crtc_vtotal;
|
||||
|
||||
if (vbl_start - *vpos < vtotal / 100) {
|
||||
*vpos -= vtotal;
|
||||
|
|
|
@ -73,13 +73,15 @@ int amdgpu_hard_reset = 0;
|
|||
unsigned amdgpu_ip_block_mask = 0xffffffff;
|
||||
int amdgpu_bapm = -1;
|
||||
int amdgpu_deep_color = 0;
|
||||
int amdgpu_vm_size = 8;
|
||||
int amdgpu_vm_size = 64;
|
||||
int amdgpu_vm_block_size = -1;
|
||||
int amdgpu_vm_fault_stop = 0;
|
||||
int amdgpu_vm_debug = 0;
|
||||
int amdgpu_exp_hw_support = 0;
|
||||
int amdgpu_enable_scheduler = 0;
|
||||
int amdgpu_enable_scheduler = 1;
|
||||
int amdgpu_sched_jobs = 16;
|
||||
int amdgpu_sched_hw_submission = 2;
|
||||
int amdgpu_enable_semaphores = 1;
|
||||
int amdgpu_enable_semaphores = 0;
|
||||
|
||||
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
|
||||
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
|
||||
|
@ -135,16 +137,22 @@ module_param_named(bapm, amdgpu_bapm, int, 0444);
|
|||
MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
|
||||
module_param_named(deep_color, amdgpu_deep_color, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 8GB)");
|
||||
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)");
|
||||
module_param_named(vm_size, amdgpu_vm_size, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
|
||||
module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)");
|
||||
module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
|
||||
module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
|
||||
|
||||
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
|
||||
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable, 0 = disable ((default))");
|
||||
MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)");
|
||||
module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)");
|
||||
|
@ -153,7 +161,7 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
|
|||
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
|
||||
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable (default), 0 = disable)");
|
||||
MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable, 0 = disable (default))");
|
||||
module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
|
@ -265,6 +273,8 @@ static struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
|
||||
{0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
|
||||
{0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
|
||||
/* stoney */
|
||||
{0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU},
|
||||
|
||||
{0, 0, 0}
|
||||
};
|
||||
|
|
|
@ -207,6 +207,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
|
|||
}
|
||||
|
||||
info->par = rfbdev;
|
||||
info->skip_vt_switch = true;
|
||||
|
||||
ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
|
||||
if (ret) {
|
||||
|
|
|
@ -136,42 +136,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_check_signaled - callback from fence_queue
|
||||
*
|
||||
* this function is called with fence_queue lock held, which is also used
|
||||
* for the fence locking itself, so unlocked variants are used for
|
||||
* fence_signal, and remove_wait_queue.
|
||||
*/
|
||||
static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
|
||||
{
|
||||
struct amdgpu_fence *fence;
|
||||
struct amdgpu_device *adev;
|
||||
u64 seq;
|
||||
int ret;
|
||||
|
||||
fence = container_of(wait, struct amdgpu_fence, fence_wake);
|
||||
adev = fence->ring->adev;
|
||||
|
||||
/*
|
||||
* We cannot use amdgpu_fence_process here because we're already
|
||||
* in the waitqueue, in a call from wake_up_all.
|
||||
*/
|
||||
seq = atomic64_read(&fence->ring->fence_drv.last_seq);
|
||||
if (seq >= fence->seq) {
|
||||
ret = fence_signal_locked(&fence->base);
|
||||
if (!ret)
|
||||
FENCE_TRACE(&fence->base, "signaled from irq context\n");
|
||||
else
|
||||
FENCE_TRACE(&fence->base, "was already signaled\n");
|
||||
|
||||
__remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
|
||||
fence_put(&fence->base);
|
||||
} else
|
||||
FENCE_TRACE(&fence->base, "pending\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_activity - check for fence activity
|
||||
*
|
||||
|
@ -260,27 +224,8 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
|
|||
lockup_work.work);
|
||||
ring = fence_drv->ring;
|
||||
|
||||
if (!down_read_trylock(&ring->adev->exclusive_lock)) {
|
||||
/* just reschedule the check if a reset is going on */
|
||||
amdgpu_fence_schedule_check(ring);
|
||||
return;
|
||||
}
|
||||
|
||||
if (amdgpu_fence_activity(ring)) {
|
||||
if (amdgpu_fence_activity(ring))
|
||||
wake_up_all(&ring->fence_drv.fence_queue);
|
||||
}
|
||||
else if (amdgpu_ring_is_lockup(ring)) {
|
||||
/* good news we believe it's a lockup */
|
||||
dev_warn(ring->adev->dev, "GPU lockup (current fence id "
|
||||
"0x%016llx last fence id 0x%016llx on ring %d)\n",
|
||||
(uint64_t)atomic64_read(&fence_drv->last_seq),
|
||||
fence_drv->sync_seq[ring->idx], ring->idx);
|
||||
|
||||
/* remember that we need an reset */
|
||||
ring->adev->needs_reset = true;
|
||||
wake_up_all(&ring->fence_drv.fence_queue);
|
||||
}
|
||||
up_read(&ring->adev->exclusive_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -324,50 +269,6 @@ static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool amdgpu_fence_is_signaled(struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||
struct amdgpu_ring *ring = fence->ring;
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
|
||||
return true;
|
||||
|
||||
if (down_read_trylock(&adev->exclusive_lock)) {
|
||||
amdgpu_fence_process(ring);
|
||||
up_read(&adev->exclusive_lock);
|
||||
|
||||
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_enable_signaling - enable signalling on fence
|
||||
* @fence: fence
|
||||
*
|
||||
* This function is called with fence_queue lock held, and adds a callback
|
||||
* to fence_queue that checks if this fence is signaled, and if so it
|
||||
* signals the fence and removes itself.
|
||||
*/
|
||||
static bool amdgpu_fence_enable_signaling(struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||
struct amdgpu_ring *ring = fence->ring;
|
||||
|
||||
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
|
||||
return false;
|
||||
|
||||
fence->fence_wake.flags = 0;
|
||||
fence->fence_wake.private = NULL;
|
||||
fence->fence_wake.func = amdgpu_fence_check_signaled;
|
||||
__add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
|
||||
fence_get(f);
|
||||
FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
|
||||
* @ring: ring to wait on for the seq number
|
||||
|
@ -380,7 +281,6 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
|
|||
*/
|
||||
static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
bool signaled = false;
|
||||
|
||||
BUG_ON(!ring);
|
||||
|
@ -390,9 +290,9 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
|
|||
if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
|
||||
return 0;
|
||||
|
||||
amdgpu_fence_schedule_check(ring);
|
||||
wait_event(ring->fence_drv.fence_queue, (
|
||||
(signaled = amdgpu_fence_seq_signaled(ring, seq))
|
||||
|| adev->needs_reset));
|
||||
(signaled = amdgpu_fence_seq_signaled(ring, seq))));
|
||||
|
||||
if (signaled)
|
||||
return 0;
|
||||
|
@ -440,36 +340,6 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
|
|||
return amdgpu_fence_ring_wait_seq(ring, seq);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_ref - take a ref on a fence
|
||||
*
|
||||
* @fence: amdgpu fence object
|
||||
*
|
||||
* Take a reference on a fence (all asics).
|
||||
* Returns the fence.
|
||||
*/
|
||||
struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence)
|
||||
{
|
||||
fence_get(&fence->base);
|
||||
return fence;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_unref - remove a ref on a fence
|
||||
*
|
||||
* @fence: amdgpu fence object
|
||||
*
|
||||
* Remove a reference on a fence (all asics).
|
||||
*/
|
||||
void amdgpu_fence_unref(struct amdgpu_fence **fence)
|
||||
{
|
||||
struct amdgpu_fence *tmp = *fence;
|
||||
|
||||
*fence = NULL;
|
||||
if (tmp)
|
||||
fence_put(&tmp->base);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_count_emitted - get the count of emitted fences
|
||||
*
|
||||
|
@ -628,8 +498,20 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
|
|||
init_waitqueue_head(&ring->fence_drv.fence_queue);
|
||||
|
||||
if (amdgpu_enable_scheduler) {
|
||||
long timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
|
||||
if (timeout == 0) {
|
||||
/*
|
||||
* FIXME:
|
||||
* Delayed workqueue cannot use it directly,
|
||||
* so the scheduler will not use delayed workqueue if
|
||||
* MAX_SCHEDULE_TIMEOUT is set.
|
||||
* Currently keep it simple and silly.
|
||||
*/
|
||||
timeout = MAX_SCHEDULE_TIMEOUT;
|
||||
}
|
||||
r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
|
||||
amdgpu_sched_hw_submission, ring->name);
|
||||
amdgpu_sched_hw_submission,
|
||||
timeout, ring->name);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create scheduler on ring %s.\n",
|
||||
ring->name);
|
||||
|
@ -773,6 +655,115 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Common fence implementation
|
||||
*/
|
||||
|
||||
static const char *amdgpu_fence_get_driver_name(struct fence *fence)
|
||||
{
|
||||
return "amdgpu";
|
||||
}
|
||||
|
||||
static const char *amdgpu_fence_get_timeline_name(struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||
return (const char *)fence->ring->name;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_is_signaled - test if fence is signaled
|
||||
*
|
||||
* @f: fence to test
|
||||
*
|
||||
* Test the fence sequence number if it is already signaled. If it isn't
|
||||
* signaled start fence processing. Returns True if the fence is signaled.
|
||||
*/
|
||||
static bool amdgpu_fence_is_signaled(struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||
struct amdgpu_ring *ring = fence->ring;
|
||||
|
||||
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
|
||||
return true;
|
||||
|
||||
amdgpu_fence_process(ring);
|
||||
|
||||
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_check_signaled - callback from fence_queue
|
||||
*
|
||||
* this function is called with fence_queue lock held, which is also used
|
||||
* for the fence locking itself, so unlocked variants are used for
|
||||
* fence_signal, and remove_wait_queue.
|
||||
*/
|
||||
static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
|
||||
{
|
||||
struct amdgpu_fence *fence;
|
||||
struct amdgpu_device *adev;
|
||||
u64 seq;
|
||||
int ret;
|
||||
|
||||
fence = container_of(wait, struct amdgpu_fence, fence_wake);
|
||||
adev = fence->ring->adev;
|
||||
|
||||
/*
|
||||
* We cannot use amdgpu_fence_process here because we're already
|
||||
* in the waitqueue, in a call from wake_up_all.
|
||||
*/
|
||||
seq = atomic64_read(&fence->ring->fence_drv.last_seq);
|
||||
if (seq >= fence->seq) {
|
||||
ret = fence_signal_locked(&fence->base);
|
||||
if (!ret)
|
||||
FENCE_TRACE(&fence->base, "signaled from irq context\n");
|
||||
else
|
||||
FENCE_TRACE(&fence->base, "was already signaled\n");
|
||||
|
||||
__remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
|
||||
fence_put(&fence->base);
|
||||
} else
|
||||
FENCE_TRACE(&fence->base, "pending\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_enable_signaling - enable signalling on fence
|
||||
* @fence: fence
|
||||
*
|
||||
* This function is called with fence_queue lock held, and adds a callback
|
||||
* to fence_queue that checks if this fence is signaled, and if so it
|
||||
* signals the fence and removes itself.
|
||||
*/
|
||||
static bool amdgpu_fence_enable_signaling(struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||
struct amdgpu_ring *ring = fence->ring;
|
||||
|
||||
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
|
||||
return false;
|
||||
|
||||
fence->fence_wake.flags = 0;
|
||||
fence->fence_wake.private = NULL;
|
||||
fence->fence_wake.func = amdgpu_fence_check_signaled;
|
||||
__add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
|
||||
fence_get(f);
|
||||
amdgpu_fence_schedule_check(ring);
|
||||
FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
|
||||
return true;
|
||||
}
|
||||
|
||||
const struct fence_ops amdgpu_fence_ops = {
|
||||
.get_driver_name = amdgpu_fence_get_driver_name,
|
||||
.get_timeline_name = amdgpu_fence_get_timeline_name,
|
||||
.enable_signaling = amdgpu_fence_enable_signaling,
|
||||
.signaled = amdgpu_fence_is_signaled,
|
||||
.wait = fence_default_wait,
|
||||
.release = NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
* Fence debugfs
|
||||
|
@ -823,141 +814,3 @@ int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
|
|||
#endif
|
||||
}
|
||||
|
||||
static const char *amdgpu_fence_get_driver_name(struct fence *fence)
|
||||
{
|
||||
return "amdgpu";
|
||||
}
|
||||
|
||||
static const char *amdgpu_fence_get_timeline_name(struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||
return (const char *)fence->ring->name;
|
||||
}
|
||||
|
||||
static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
|
||||
{
|
||||
return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
|
||||
}
|
||||
|
||||
static bool amdgpu_test_signaled_any(struct fence **fences, uint32_t count)
|
||||
{
|
||||
int idx;
|
||||
struct fence *fence;
|
||||
|
||||
for (idx = 0; idx < count; ++idx) {
|
||||
fence = fences[idx];
|
||||
if (fence) {
|
||||
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
struct amdgpu_wait_cb {
|
||||
struct fence_cb base;
|
||||
struct task_struct *task;
|
||||
};
|
||||
|
||||
static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
|
||||
{
|
||||
struct amdgpu_wait_cb *wait =
|
||||
container_of(cb, struct amdgpu_wait_cb, base);
|
||||
wake_up_process(wait->task);
|
||||
}
|
||||
|
||||
static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
|
||||
signed long t)
|
||||
{
|
||||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||
struct amdgpu_device *adev = fence->ring->adev;
|
||||
|
||||
return amdgpu_fence_wait_any(adev, &f, 1, intr, t);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait the fence array with timeout
|
||||
*
|
||||
* @adev: amdgpu device
|
||||
* @array: the fence array with amdgpu fence pointer
|
||||
* @count: the number of the fence array
|
||||
* @intr: when sleep, set the current task interruptable or not
|
||||
* @t: timeout to wait
|
||||
*
|
||||
* It will return when any fence is signaled or timeout.
|
||||
*/
|
||||
signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
|
||||
struct fence **array, uint32_t count,
|
||||
bool intr, signed long t)
|
||||
{
|
||||
struct amdgpu_wait_cb *cb;
|
||||
struct fence *fence;
|
||||
unsigned idx;
|
||||
|
||||
BUG_ON(!array);
|
||||
|
||||
cb = kcalloc(count, sizeof(struct amdgpu_wait_cb), GFP_KERNEL);
|
||||
if (cb == NULL) {
|
||||
t = -ENOMEM;
|
||||
goto err_free_cb;
|
||||
}
|
||||
|
||||
for (idx = 0; idx < count; ++idx) {
|
||||
fence = array[idx];
|
||||
if (fence) {
|
||||
cb[idx].task = current;
|
||||
if (fence_add_callback(fence,
|
||||
&cb[idx].base, amdgpu_fence_wait_cb)) {
|
||||
/* The fence is already signaled */
|
||||
goto fence_rm_cb;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while (t > 0) {
|
||||
if (intr)
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
else
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
|
||||
/*
|
||||
* amdgpu_test_signaled_any must be called after
|
||||
* set_current_state to prevent a race with wake_up_process
|
||||
*/
|
||||
if (amdgpu_test_signaled_any(array, count))
|
||||
break;
|
||||
|
||||
if (adev->needs_reset) {
|
||||
t = -EDEADLK;
|
||||
break;
|
||||
}
|
||||
|
||||
t = schedule_timeout(t);
|
||||
|
||||
if (t > 0 && intr && signal_pending(current))
|
||||
t = -ERESTARTSYS;
|
||||
}
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
fence_rm_cb:
|
||||
for (idx = 0; idx < count; ++idx) {
|
||||
fence = array[idx];
|
||||
if (fence && cb[idx].base.func)
|
||||
fence_remove_callback(fence, &cb[idx].base);
|
||||
}
|
||||
|
||||
err_free_cb:
|
||||
kfree(cb);
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
const struct fence_ops amdgpu_fence_ops = {
|
||||
.get_driver_name = amdgpu_fence_get_driver_name,
|
||||
.get_timeline_name = amdgpu_fence_get_timeline_name,
|
||||
.enable_signaling = amdgpu_fence_enable_signaling,
|
||||
.signaled = amdgpu_fence_is_signaled,
|
||||
.wait = amdgpu_fence_default_wait,
|
||||
.release = NULL,
|
||||
};
|
||||
|
|
|
@ -115,9 +115,10 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
|
|||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
int r;
|
||||
|
||||
mutex_lock(&vm->mutex);
|
||||
r = amdgpu_bo_reserve(rbo, false);
|
||||
if (r) {
|
||||
mutex_unlock(&vm->mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -128,7 +129,7 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
|
|||
++bo_va->ref_count;
|
||||
}
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
|
||||
mutex_unlock(&vm->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -141,9 +142,10 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
|||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
int r;
|
||||
|
||||
mutex_lock(&vm->mutex);
|
||||
r = amdgpu_bo_reserve(rbo, true);
|
||||
if (r) {
|
||||
mutex_unlock(&vm->mutex);
|
||||
dev_err(adev->dev, "leaking bo va because "
|
||||
"we fail to reserve bo (%d)\n", r);
|
||||
return;
|
||||
|
@ -155,6 +157,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
|||
}
|
||||
}
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
mutex_unlock(&vm->mutex);
|
||||
}
|
||||
|
||||
static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
|
||||
|
@ -181,7 +184,6 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||
bool kernel = false;
|
||||
int r;
|
||||
|
||||
down_read(&adev->exclusive_lock);
|
||||
/* create a gem object to contain this object in */
|
||||
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
|
||||
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
|
||||
|
@ -214,11 +216,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->out.handle = handle;
|
||||
up_read(&adev->exclusive_lock);
|
||||
return 0;
|
||||
|
||||
error_unlock:
|
||||
up_read(&adev->exclusive_lock);
|
||||
r = amdgpu_gem_handle_lockup(adev, r);
|
||||
return r;
|
||||
}
|
||||
|
@ -250,8 +250,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||
return -EACCES;
|
||||
}
|
||||
|
||||
down_read(&adev->exclusive_lock);
|
||||
|
||||
/* create a gem object to contain this object in */
|
||||
r = amdgpu_gem_object_create(adev, args->size, 0,
|
||||
AMDGPU_GEM_DOMAIN_CPU, 0,
|
||||
|
@ -293,14 +291,12 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||
goto handle_lockup;
|
||||
|
||||
args->handle = handle;
|
||||
up_read(&adev->exclusive_lock);
|
||||
return 0;
|
||||
|
||||
release_object:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
|
||||
handle_lockup:
|
||||
up_read(&adev->exclusive_lock);
|
||||
r = amdgpu_gem_handle_lockup(adev, r);
|
||||
|
||||
return r;
|
||||
|
@ -488,18 +484,13 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
|||
goto error_unreserve;
|
||||
}
|
||||
|
||||
mutex_lock(&bo_va->vm->mutex);
|
||||
r = amdgpu_vm_clear_freed(adev, bo_va->vm);
|
||||
if (r)
|
||||
goto error_unlock;
|
||||
|
||||
goto error_unreserve;
|
||||
|
||||
if (operation == AMDGPU_VA_OP_MAP)
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
|
||||
|
||||
error_unlock:
|
||||
mutex_unlock(&bo_va->vm->mutex);
|
||||
|
||||
error_unreserve:
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
|
||||
|
@ -556,10 +547,11 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
||||
if (gobj == NULL)
|
||||
return -ENOENT;
|
||||
|
||||
mutex_lock(&fpriv->vm.mutex);
|
||||
rbo = gem_to_amdgpu_bo(gobj);
|
||||
r = amdgpu_bo_reserve(rbo, false);
|
||||
if (r) {
|
||||
mutex_unlock(&fpriv->vm.mutex);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
@ -567,6 +559,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
|
||||
if (!bo_va) {
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
mutex_unlock(&fpriv->vm.mutex);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
|
@ -591,7 +584,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
|
||||
amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
|
||||
|
||||
mutex_unlock(&fpriv->vm.mutex);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -95,7 +95,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
|
|||
{
|
||||
amdgpu_sync_free(adev, &ib->sync, &ib->fence->base);
|
||||
amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base);
|
||||
amdgpu_fence_unref(&ib->fence);
|
||||
if (ib->fence)
|
||||
fence_put(&ib->fence->base);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -298,7 +299,6 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
|
|||
r = amdgpu_ring_test_ib(ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
adev->needs_reset = false;
|
||||
|
||||
if (ring == &adev->gfx.gfx_ring[0]) {
|
||||
/* oh, oh, that's really bad */
|
||||
|
|
|
@ -218,8 +218,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
break;
|
||||
case AMDGPU_HW_IP_DMA:
|
||||
type = AMD_IP_BLOCK_TYPE_SDMA;
|
||||
ring_mask = adev->sdma[0].ring.ready ? 1 : 0;
|
||||
ring_mask |= ((adev->sdma[1].ring.ready ? 1 : 0) << 1);
|
||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
|
||||
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
|
||||
ib_size_alignment = 1;
|
||||
break;
|
||||
|
@ -341,10 +341,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
fw_info.feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_SDMA:
|
||||
if (info->query_fw.index >= 2)
|
||||
if (info->query_fw.index >= adev->sdma.num_instances)
|
||||
return -EINVAL;
|
||||
fw_info.ver = adev->sdma[info->query_fw.index].fw_version;
|
||||
fw_info.feature = adev->sdma[info->query_fw.index].feature_version;
|
||||
fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version;
|
||||
fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -489,7 +489,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
*
|
||||
* @dev: drm dev pointer
|
||||
*
|
||||
* Switch vga switcheroo state after last close (all asics).
|
||||
* Switch vga_switcheroo state after last close (all asics).
|
||||
*/
|
||||
void amdgpu_driver_lastclose_kms(struct drm_device *dev)
|
||||
{
|
||||
|
@ -603,36 +603,36 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev,
|
|||
* amdgpu_get_vblank_counter_kms - get frame count
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
* @crtc: crtc to get the frame count from
|
||||
* @pipe: crtc to get the frame count from
|
||||
*
|
||||
* Gets the frame count on the requested crtc (all asics).
|
||||
* Returns frame count on success, -EINVAL on failure.
|
||||
*/
|
||||
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc)
|
||||
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
|
||||
DRM_ERROR("Invalid crtc %d\n", crtc);
|
||||
if (pipe >= adev->mode_info.num_crtc) {
|
||||
DRM_ERROR("Invalid crtc %u\n", pipe);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return amdgpu_display_vblank_get_counter(adev, crtc);
|
||||
return amdgpu_display_vblank_get_counter(adev, pipe);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_enable_vblank_kms - enable vblank interrupt
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
* @crtc: crtc to enable vblank interrupt for
|
||||
* @pipe: crtc to enable vblank interrupt for
|
||||
*
|
||||
* Enable the interrupt on the requested crtc (all asics).
|
||||
* Returns 0 on success, -EINVAL on failure.
|
||||
*/
|
||||
int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc)
|
||||
int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
int idx = amdgpu_crtc_idx_to_irq_type(adev, crtc);
|
||||
int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe);
|
||||
|
||||
return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
|
||||
}
|
||||
|
@ -641,14 +641,14 @@ int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc)
|
|||
* amdgpu_disable_vblank_kms - disable vblank interrupt
|
||||
*
|
||||
* @dev: drm dev pointer
|
||||
* @crtc: crtc to disable vblank interrupt for
|
||||
* @pipe: crtc to disable vblank interrupt for
|
||||
*
|
||||
* Disable the interrupt on the requested crtc (all asics).
|
||||
*/
|
||||
void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc)
|
||||
void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
int idx = amdgpu_crtc_idx_to_irq_type(adev, crtc);
|
||||
int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe);
|
||||
|
||||
amdgpu_irq_put(adev, &adev->crtc_irq, idx);
|
||||
}
|
||||
|
@ -666,41 +666,41 @@ void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc)
|
|||
* scanout position. (all asics).
|
||||
* Returns postive status flags on success, negative error on failure.
|
||||
*/
|
||||
int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
|
||||
int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
|
||||
int *max_error,
|
||||
struct timeval *vblank_time,
|
||||
unsigned flags)
|
||||
{
|
||||
struct drm_crtc *drmcrtc;
|
||||
struct drm_crtc *crtc;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
|
||||
if (crtc < 0 || crtc >= dev->num_crtcs) {
|
||||
DRM_ERROR("Invalid crtc %d\n", crtc);
|
||||
if (pipe >= dev->num_crtcs) {
|
||||
DRM_ERROR("Invalid crtc %u\n", pipe);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Get associated drm_crtc: */
|
||||
drmcrtc = &adev->mode_info.crtcs[crtc]->base;
|
||||
crtc = &adev->mode_info.crtcs[pipe]->base;
|
||||
|
||||
/* Helper routine in DRM core does all the work: */
|
||||
return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
|
||||
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
|
||||
vblank_time, flags,
|
||||
drmcrtc, &drmcrtc->hwmode);
|
||||
&crtc->hwmode);
|
||||
}
|
||||
|
||||
const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
/* KMS */
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
};
|
||||
int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
|
||||
|
|
|
@ -373,6 +373,10 @@ struct amdgpu_crtc {
|
|||
uint32_t crtc_offset;
|
||||
struct drm_gem_object *cursor_bo;
|
||||
uint64_t cursor_addr;
|
||||
int cursor_x;
|
||||
int cursor_y;
|
||||
int cursor_hot_x;
|
||||
int cursor_hot_y;
|
||||
int cursor_width;
|
||||
int cursor_height;
|
||||
int max_cursor_width;
|
||||
|
@ -540,10 +544,10 @@ bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, bool use_aux);
|
|||
|
||||
void amdgpu_encoder_set_active_device(struct drm_encoder *encoder);
|
||||
|
||||
int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
|
||||
unsigned int flags,
|
||||
int *vpos, int *hpos, ktime_t *stime,
|
||||
ktime_t *etime);
|
||||
int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
|
||||
unsigned int flags, int *vpos, int *hpos,
|
||||
ktime_t *stime, ktime_t *etime,
|
||||
const struct drm_display_mode *mode);
|
||||
|
||||
int amdgpu_framebuffer_init(struct drm_device *dev,
|
||||
struct amdgpu_framebuffer *rfb,
|
||||
|
|
|
@ -132,6 +132,8 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
|
|||
placements[c].fpfn = 0;
|
||||
placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
|
||||
TTM_PL_FLAG_VRAM;
|
||||
if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED))
|
||||
placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN;
|
||||
}
|
||||
|
||||
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
|
||||
|
|
|
@ -67,8 +67,6 @@ void amdgpu_ring_free_size(struct amdgpu_ring *ring)
|
|||
if (!ring->ring_free_dw) {
|
||||
/* this is an empty ring */
|
||||
ring->ring_free_dw = ring->ring_size / 4;
|
||||
/* update lockup info to avoid false positive */
|
||||
amdgpu_ring_lockup_update(ring);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -208,46 +206,6 @@ void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring)
|
|||
mutex_unlock(ring->ring_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_lockup_update - update lockup variables
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
*
|
||||
* Update the last rptr value and timestamp (all asics).
|
||||
*/
|
||||
void amdgpu_ring_lockup_update(struct amdgpu_ring *ring)
|
||||
{
|
||||
atomic_set(&ring->last_rptr, amdgpu_ring_get_rptr(ring));
|
||||
atomic64_set(&ring->last_activity, jiffies_64);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_test_lockup() - check if ring is lockedup by recording information
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
*
|
||||
*/
|
||||
bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring)
|
||||
{
|
||||
uint32_t rptr = amdgpu_ring_get_rptr(ring);
|
||||
uint64_t last = atomic64_read(&ring->last_activity);
|
||||
uint64_t elapsed;
|
||||
|
||||
if (rptr != atomic_read(&ring->last_rptr)) {
|
||||
/* ring is still working, no lockup */
|
||||
amdgpu_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
|
||||
elapsed = jiffies_to_msecs(jiffies_64 - last);
|
||||
if (amdgpu_lockup_timeout && elapsed >= amdgpu_lockup_timeout) {
|
||||
dev_err(ring->adev->dev, "ring %d stalled for more than %llumsec\n",
|
||||
ring->idx, elapsed);
|
||||
return true;
|
||||
}
|
||||
/* give a chance to the GPU ... */
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_backup - Back up the content of a ring
|
||||
*
|
||||
|
@ -436,7 +394,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
if (amdgpu_debugfs_ring_init(adev, ring)) {
|
||||
DRM_ERROR("Failed to register debugfs file for rings !\n");
|
||||
}
|
||||
amdgpu_ring_lockup_update(ring);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -479,6 +436,30 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_ring_from_fence - get ring from fence
|
||||
*
|
||||
* @f: fence structure
|
||||
*
|
||||
* Extract the ring a fence belongs to. Handles both scheduler as
|
||||
* well as hardware fences.
|
||||
*/
|
||||
struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *a_fence;
|
||||
struct amd_sched_fence *s_fence;
|
||||
|
||||
s_fence = to_amd_sched_fence(f);
|
||||
if (s_fence)
|
||||
return container_of(s_fence->sched, struct amdgpu_ring, sched);
|
||||
|
||||
a_fence = to_amdgpu_fence(f);
|
||||
if (a_fence)
|
||||
return a_fence->ring;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Debugfs info
|
||||
*/
|
||||
|
@ -540,8 +521,8 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
|
|||
static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]);
|
||||
static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]);
|
||||
static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]);
|
||||
static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma[0].ring);
|
||||
static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma[1].ring);
|
||||
static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma.instance[0].ring);
|
||||
static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma.instance[1].ring);
|
||||
static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring);
|
||||
static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]);
|
||||
static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]);
|
||||
|
|
|
@ -139,25 +139,6 @@ int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
|
|||
return r;
|
||||
}
|
||||
|
||||
static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
|
||||
{
|
||||
struct amdgpu_fence *a_fence;
|
||||
struct amd_sched_fence *s_fence;
|
||||
|
||||
s_fence = to_amd_sched_fence(f);
|
||||
if (s_fence) {
|
||||
struct amdgpu_ring *ring;
|
||||
|
||||
ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
|
||||
return ring->idx;
|
||||
}
|
||||
|
||||
a_fence = to_amdgpu_fence(f);
|
||||
if (a_fence)
|
||||
return a_fence->ring->idx;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
|
||||
{
|
||||
struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
|
||||
|
@ -318,7 +299,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
|
|||
}
|
||||
|
||||
if (best_bo) {
|
||||
uint32_t idx = amdgpu_sa_get_ring_from_fence(best_bo->fence);
|
||||
uint32_t idx = amdgpu_ring_from_fence(best_bo->fence)->idx;
|
||||
++tries[idx];
|
||||
sa_manager->hole = best_bo->olist.prev;
|
||||
|
||||
|
@ -337,6 +318,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
|
|||
{
|
||||
struct fence *fences[AMDGPU_MAX_RINGS];
|
||||
unsigned tries[AMDGPU_MAX_RINGS];
|
||||
unsigned count;
|
||||
int i, r;
|
||||
signed long t;
|
||||
|
||||
|
@ -371,13 +353,18 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
|
|||
/* see if we can skip over some allocations */
|
||||
} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
|
||||
|
||||
spin_unlock(&sa_manager->wq.lock);
|
||||
t = amdgpu_fence_wait_any(adev, fences, AMDGPU_MAX_RINGS,
|
||||
false, MAX_SCHEDULE_TIMEOUT);
|
||||
r = (t > 0) ? 0 : t;
|
||||
spin_lock(&sa_manager->wq.lock);
|
||||
/* if we have nothing to wait for block */
|
||||
if (r == -ENOENT) {
|
||||
for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
|
||||
if (fences[i])
|
||||
fences[count++] = fences[i];
|
||||
|
||||
if (count) {
|
||||
spin_unlock(&sa_manager->wq.lock);
|
||||
t = fence_wait_any_timeout(fences, count, false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
r = (t > 0) ? 0 : t;
|
||||
spin_lock(&sa_manager->wq.lock);
|
||||
} else {
|
||||
/* if we have nothing to wait for block */
|
||||
r = wait_event_interruptible_locked(
|
||||
sa_manager->wq,
|
||||
amdgpu_sa_event(sa_manager, size, align)
|
||||
|
@ -406,7 +393,7 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
|
|||
if (fence && !fence_is_signaled(fence)) {
|
||||
uint32_t idx;
|
||||
(*sa_bo)->fence = fence_get(fence);
|
||||
idx = amdgpu_sa_get_ring_from_fence(fence);
|
||||
idx = amdgpu_ring_from_fence(fence)->idx;
|
||||
list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
|
||||
} else {
|
||||
amdgpu_sa_bo_remove_locked(*sa_bo);
|
||||
|
|
|
@ -54,7 +54,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
|
|||
goto err;
|
||||
}
|
||||
|
||||
fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence);
|
||||
fence = job->ibs[job->num_ibs - 1].fence;
|
||||
fence_get(&fence->base);
|
||||
|
||||
err:
|
||||
if (job->free_job)
|
||||
|
|
|
@ -87,6 +87,15 @@ static bool amdgpu_sync_test_owner(struct fence *f, void *owner)
|
|||
return false;
|
||||
}
|
||||
|
||||
static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
|
||||
{
|
||||
if (*keep && fence_is_later(*keep, fence))
|
||||
return;
|
||||
|
||||
fence_put(*keep);
|
||||
*keep = fence_get(fence);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_fence - remember to sync to this fence
|
||||
*
|
||||
|
@ -99,35 +108,21 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
|||
{
|
||||
struct amdgpu_sync_entry *e;
|
||||
struct amdgpu_fence *fence;
|
||||
struct amdgpu_fence *other;
|
||||
struct fence *tmp, *later;
|
||||
|
||||
if (!f)
|
||||
return 0;
|
||||
|
||||
if (amdgpu_sync_same_dev(adev, f) &&
|
||||
amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM)) {
|
||||
if (sync->last_vm_update) {
|
||||
tmp = sync->last_vm_update;
|
||||
BUG_ON(f->context != tmp->context);
|
||||
later = (f->seqno - tmp->seqno <= INT_MAX) ? f : tmp;
|
||||
sync->last_vm_update = fence_get(later);
|
||||
fence_put(tmp);
|
||||
} else
|
||||
sync->last_vm_update = fence_get(f);
|
||||
}
|
||||
amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM))
|
||||
amdgpu_sync_keep_later(&sync->last_vm_update, f);
|
||||
|
||||
fence = to_amdgpu_fence(f);
|
||||
if (!fence || fence->ring->adev != adev) {
|
||||
hash_for_each_possible(sync->fences, e, node, f->context) {
|
||||
struct fence *new;
|
||||
if (unlikely(e->fence->context != f->context))
|
||||
continue;
|
||||
new = fence_get(fence_later(e->fence, f));
|
||||
if (new) {
|
||||
fence_put(e->fence);
|
||||
e->fence = new;
|
||||
}
|
||||
|
||||
amdgpu_sync_keep_later(&e->fence, f);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -140,10 +135,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
|||
return 0;
|
||||
}
|
||||
|
||||
other = sync->sync_to[fence->ring->idx];
|
||||
sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
|
||||
amdgpu_fence_later(fence, other));
|
||||
amdgpu_fence_unref(&other);
|
||||
amdgpu_sync_keep_later(&sync->sync_to[fence->ring->idx], f);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -199,8 +191,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
|
|||
* for other VM updates and moves.
|
||||
*/
|
||||
fence_owner = amdgpu_sync_get_owner(f);
|
||||
if ((owner != AMDGPU_FENCE_OWNER_MOVE) &&
|
||||
(fence_owner != AMDGPU_FENCE_OWNER_MOVE) &&
|
||||
if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
|
||||
(fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
|
||||
((owner == AMDGPU_FENCE_OWNER_VM) !=
|
||||
(fence_owner == AMDGPU_FENCE_OWNER_VM)))
|
||||
continue;
|
||||
|
@ -262,11 +254,11 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
|
|||
return 0;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_fence *fence = sync->sync_to[i];
|
||||
struct fence *fence = sync->sync_to[i];
|
||||
if (!fence)
|
||||
continue;
|
||||
|
||||
r = fence_wait(&fence->base, false);
|
||||
r = fence_wait(fence, false);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -291,9 +283,14 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
|
|||
int i, r;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_fence *fence = sync->sync_to[i];
|
||||
struct amdgpu_semaphore *semaphore;
|
||||
struct amdgpu_ring *other = adev->rings[i];
|
||||
struct amdgpu_semaphore *semaphore;
|
||||
struct amdgpu_fence *fence;
|
||||
|
||||
if (!sync->sync_to[i])
|
||||
continue;
|
||||
|
||||
fence = to_amdgpu_fence(sync->sync_to[i]);
|
||||
|
||||
/* check if we really need to sync */
|
||||
if (!amdgpu_fence_need_sync(fence, ring))
|
||||
|
@ -378,7 +375,7 @@ void amdgpu_sync_free(struct amdgpu_device *adev,
|
|||
amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
|
||||
amdgpu_fence_unref(&sync->sync_to[i]);
|
||||
fence_put(sync->sync_to[i]);
|
||||
|
||||
fence_put(sync->last_vm_update);
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
|
|||
__entry->offset, __entry->flags)
|
||||
);
|
||||
|
||||
TRACE_EVENT(amdgpu_vm_bo_update,
|
||||
DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
|
||||
TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
|
||||
TP_ARGS(mapping),
|
||||
TP_STRUCT__entry(
|
||||
|
@ -129,6 +129,16 @@ TRACE_EVENT(amdgpu_vm_bo_update,
|
|||
__entry->soffset, __entry->eoffset, __entry->flags)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_update,
|
||||
TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
|
||||
TP_ARGS(mapping)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
|
||||
TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
|
||||
TP_ARGS(mapping)
|
||||
);
|
||||
|
||||
TRACE_EVENT(amdgpu_vm_set_page,
|
||||
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags),
|
||||
|
|
|
@ -1041,7 +1041,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
|
|||
WARN_ON(ib->length_dw > num_dw);
|
||||
r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
|
||||
&amdgpu_vm_free_job,
|
||||
AMDGPU_FENCE_OWNER_MOVE,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
fence);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
@ -1072,6 +1072,11 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
|
|||
spin_lock(&glob->lru_lock);
|
||||
ret = drm_mm_dump_table(m, mm);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
if (ttm_pl == TTM_PL_VRAM)
|
||||
seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n",
|
||||
adev->mman.bdev.man[ttm_pl].size,
|
||||
atomic64_read(&adev->vram_usage) >> 20,
|
||||
atomic64_read(&adev->vram_vis_usage) >> 20);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -53,6 +53,7 @@
|
|||
#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
|
||||
#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
|
||||
#define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
|
||||
#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
|
||||
|
||||
/**
|
||||
* amdgpu_uvd_cs_ctx - Command submission parser context
|
||||
|
@ -83,6 +84,7 @@ MODULE_FIRMWARE(FIRMWARE_MULLINS);
|
|||
MODULE_FIRMWARE(FIRMWARE_TONGA);
|
||||
MODULE_FIRMWARE(FIRMWARE_CARRIZO);
|
||||
MODULE_FIRMWARE(FIRMWARE_FIJI);
|
||||
MODULE_FIRMWARE(FIRMWARE_STONEY);
|
||||
|
||||
static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
|
||||
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
|
||||
|
@ -124,6 +126,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
|||
case CHIP_CARRIZO:
|
||||
fw_name = FIRMWARE_CARRIZO;
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
fw_name = FIRMWARE_STONEY;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
#define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
|
||||
#define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
|
||||
#define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
|
||||
#define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
|
||||
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
|
||||
|
@ -60,6 +61,7 @@ MODULE_FIRMWARE(FIRMWARE_MULLINS);
|
|||
MODULE_FIRMWARE(FIRMWARE_TONGA);
|
||||
MODULE_FIRMWARE(FIRMWARE_CARRIZO);
|
||||
MODULE_FIRMWARE(FIRMWARE_FIJI);
|
||||
MODULE_FIRMWARE(FIRMWARE_STONEY);
|
||||
|
||||
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
|
||||
|
||||
|
@ -106,6 +108,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
|
|||
case CHIP_FIJI:
|
||||
fw_name = FIRMWARE_FIJI;
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
fw_name = FIRMWARE_STONEY;
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
|
|
@ -90,11 +90,9 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
|
|||
struct amdgpu_bo_list_entry *list;
|
||||
unsigned i, idx;
|
||||
|
||||
mutex_lock(&vm->mutex);
|
||||
list = drm_malloc_ab(vm->max_pde_used + 2,
|
||||
sizeof(struct amdgpu_bo_list_entry));
|
||||
if (!list) {
|
||||
mutex_unlock(&vm->mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -119,7 +117,6 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
|
|||
list[idx].tv.shared = true;
|
||||
list_add(&list[idx++].tv.head, head);
|
||||
}
|
||||
mutex_unlock(&vm->mutex);
|
||||
|
||||
return list;
|
||||
}
|
||||
|
@ -138,7 +135,7 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
|
|||
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync)
|
||||
{
|
||||
struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {};
|
||||
struct fence *best[AMDGPU_MAX_RINGS] = {};
|
||||
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
|
||||
|
@ -147,15 +144,18 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|||
|
||||
/* check if the id is still valid */
|
||||
if (vm_id->id && vm_id->last_id_use &&
|
||||
vm_id->last_id_use == adev->vm_manager.active[vm_id->id])
|
||||
vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) {
|
||||
trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* we definately need to flush */
|
||||
vm_id->pd_gpu_addr = ~0ll;
|
||||
|
||||
/* skip over VMID 0, since it is the system VM */
|
||||
for (i = 1; i < adev->vm_manager.nvm; ++i) {
|
||||
struct amdgpu_fence *fence = adev->vm_manager.active[i];
|
||||
struct fence *fence = adev->vm_manager.active[i];
|
||||
struct amdgpu_ring *fring;
|
||||
|
||||
if (fence == NULL) {
|
||||
/* found a free one */
|
||||
|
@ -164,21 +164,23 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) {
|
||||
best[fence->ring->idx] = fence;
|
||||
choices[fence->ring == ring ? 0 : 1] = i;
|
||||
fring = amdgpu_ring_from_fence(fence);
|
||||
if (best[fring->idx] == NULL ||
|
||||
fence_is_later(best[fring->idx], fence)) {
|
||||
best[fring->idx] = fence;
|
||||
choices[fring == ring ? 0 : 1] = i;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < 2; ++i) {
|
||||
if (choices[i]) {
|
||||
struct amdgpu_fence *fence;
|
||||
struct fence *fence;
|
||||
|
||||
fence = adev->vm_manager.active[choices[i]];
|
||||
vm_id->id = choices[i];
|
||||
|
||||
trace_amdgpu_vm_grab_id(choices[i], ring->idx);
|
||||
return amdgpu_sync_fence(ring->adev, sync, &fence->base);
|
||||
return amdgpu_sync_fence(ring->adev, sync, fence);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -247,11 +249,11 @@ void amdgpu_vm_fence(struct amdgpu_device *adev,
|
|||
unsigned ridx = fence->ring->idx;
|
||||
unsigned vm_id = vm->ids[ridx].id;
|
||||
|
||||
amdgpu_fence_unref(&adev->vm_manager.active[vm_id]);
|
||||
adev->vm_manager.active[vm_id] = amdgpu_fence_ref(fence);
|
||||
fence_put(adev->vm_manager.active[vm_id]);
|
||||
adev->vm_manager.active[vm_id] = fence_get(&fence->base);
|
||||
|
||||
amdgpu_fence_unref(&vm->ids[ridx].last_id_use);
|
||||
vm->ids[ridx].last_id_use = amdgpu_fence_ref(fence);
|
||||
fence_put(vm->ids[ridx].last_id_use);
|
||||
vm->ids[ridx].last_id_use = fence_get(&fence->base);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -852,6 +854,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
|||
return r;
|
||||
}
|
||||
|
||||
if (trace_amdgpu_vm_bo_mapping_enabled()) {
|
||||
list_for_each_entry(mapping, &bo_va->valids, list)
|
||||
trace_amdgpu_vm_bo_mapping(mapping);
|
||||
|
||||
list_for_each_entry(mapping, &bo_va->invalids, list)
|
||||
trace_amdgpu_vm_bo_mapping(mapping);
|
||||
}
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
list_splice_init(&bo_va->invalids, &bo_va->valids);
|
||||
list_del_init(&bo_va->vm_status);
|
||||
|
@ -962,9 +972,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
|||
INIT_LIST_HEAD(&bo_va->invalids);
|
||||
INIT_LIST_HEAD(&bo_va->vm_status);
|
||||
|
||||
mutex_lock(&vm->mutex);
|
||||
list_add_tail(&bo_va->bo_list, &bo->va);
|
||||
mutex_unlock(&vm->mutex);
|
||||
|
||||
return bo_va;
|
||||
}
|
||||
|
@ -1017,8 +1025,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&vm->mutex);
|
||||
|
||||
saddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
eaddr /= AMDGPU_GPU_PAGE_SIZE;
|
||||
|
||||
|
@ -1032,14 +1038,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
tmp->it.start, tmp->it.last + 1);
|
||||
amdgpu_bo_unreserve(bo_va->bo);
|
||||
r = -EINVAL;
|
||||
goto error_unlock;
|
||||
goto error;
|
||||
}
|
||||
|
||||
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
|
||||
if (!mapping) {
|
||||
amdgpu_bo_unreserve(bo_va->bo);
|
||||
r = -ENOMEM;
|
||||
goto error_unlock;
|
||||
goto error;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&mapping->list);
|
||||
|
@ -1071,9 +1077,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
if (vm->page_tables[pt_idx].bo)
|
||||
continue;
|
||||
|
||||
/* drop mutex to allocate and clear page table */
|
||||
mutex_unlock(&vm->mutex);
|
||||
|
||||
ww_mutex_lock(&resv->lock, NULL);
|
||||
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
|
||||
AMDGPU_GPU_PAGE_SIZE, true,
|
||||
|
@ -1090,32 +1093,19 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
|||
goto error_free;
|
||||
}
|
||||
|
||||
/* aquire mutex again */
|
||||
mutex_lock(&vm->mutex);
|
||||
if (vm->page_tables[pt_idx].bo) {
|
||||
/* someone else allocated the pt in the meantime */
|
||||
mutex_unlock(&vm->mutex);
|
||||
amdgpu_bo_unref(&pt);
|
||||
mutex_lock(&vm->mutex);
|
||||
continue;
|
||||
}
|
||||
|
||||
vm->page_tables[pt_idx].addr = 0;
|
||||
vm->page_tables[pt_idx].bo = pt;
|
||||
}
|
||||
|
||||
mutex_unlock(&vm->mutex);
|
||||
return 0;
|
||||
|
||||
error_free:
|
||||
mutex_lock(&vm->mutex);
|
||||
list_del(&mapping->list);
|
||||
interval_tree_remove(&mapping->it, &vm->va);
|
||||
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
||||
kfree(mapping);
|
||||
|
||||
error_unlock:
|
||||
mutex_unlock(&vm->mutex);
|
||||
error:
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -1160,7 +1150,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
|||
}
|
||||
}
|
||||
|
||||
mutex_lock(&vm->mutex);
|
||||
list_del(&mapping->list);
|
||||
interval_tree_remove(&mapping->it, &vm->va);
|
||||
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
|
||||
|
@ -1169,7 +1158,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
|||
list_add(&mapping->list, &vm->freed);
|
||||
else
|
||||
kfree(mapping);
|
||||
mutex_unlock(&vm->mutex);
|
||||
amdgpu_bo_unreserve(bo_va->bo);
|
||||
|
||||
return 0;
|
||||
|
@ -1193,8 +1181,6 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
|||
|
||||
list_del(&bo_va->bo_list);
|
||||
|
||||
mutex_lock(&vm->mutex);
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
list_del(&bo_va->vm_status);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
@ -1213,8 +1199,6 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
|||
|
||||
fence_put(bo_va->last_pt_update);
|
||||
kfree(bo_va);
|
||||
|
||||
mutex_unlock(&vm->mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1332,7 +1316,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
fence_put(vm->ids[i].flushed_updates);
|
||||
amdgpu_fence_unref(&vm->ids[i].last_id_use);
|
||||
fence_put(vm->ids[i].last_id_use);
|
||||
}
|
||||
|
||||
mutex_destroy(&vm->mutex);
|
||||
|
|
|
@ -685,6 +685,27 @@ static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
|
|||
}
|
||||
}
|
||||
|
||||
static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
|
||||
{
|
||||
uint64_t val64;
|
||||
uint8_t attr = U8((*ptr)++);
|
||||
uint32_t dst, src;
|
||||
SDEBUG(" src1: ");
|
||||
dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
|
||||
SDEBUG(" src2: ");
|
||||
src = atom_get_src(ctx, attr, ptr);
|
||||
if (src != 0) {
|
||||
val64 = dst;
|
||||
val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
|
||||
do_div(val64, src);
|
||||
ctx->ctx->divmul[0] = lower_32_bits(val64);
|
||||
ctx->ctx->divmul[1] = upper_32_bits(val64);
|
||||
} else {
|
||||
ctx->ctx->divmul[0] = 0;
|
||||
ctx->ctx->divmul[1] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
|
||||
{
|
||||
/* functionally, a nop */
|
||||
|
@ -788,6 +809,20 @@ static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
|
|||
ctx->ctx->divmul[0] = dst * src;
|
||||
}
|
||||
|
||||
static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
|
||||
{
|
||||
uint64_t val64;
|
||||
uint8_t attr = U8((*ptr)++);
|
||||
uint32_t dst, src;
|
||||
SDEBUG(" src1: ");
|
||||
dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
|
||||
SDEBUG(" src2: ");
|
||||
src = atom_get_src(ctx, attr, ptr);
|
||||
val64 = (uint64_t)dst * (uint64_t)src;
|
||||
ctx->ctx->divmul[0] = lower_32_bits(val64);
|
||||
ctx->ctx->divmul[1] = upper_32_bits(val64);
|
||||
}
|
||||
|
||||
static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
|
||||
{
|
||||
/* nothing */
|
||||
|
@ -1022,7 +1057,15 @@ static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
|
|||
|
||||
static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
|
||||
{
|
||||
printk(KERN_INFO "unimplemented!\n");
|
||||
uint8_t val = U8((*ptr)++);
|
||||
SDEBUG("DEBUG output: 0x%02X\n", val);
|
||||
}
|
||||
|
||||
static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
|
||||
{
|
||||
uint16_t val = U16(*ptr);
|
||||
(*ptr) += val + 2;
|
||||
SDEBUG("PROCESSDS output: 0x%02X\n", val);
|
||||
}
|
||||
|
||||
static struct {
|
||||
|
@ -1151,7 +1194,13 @@ static struct {
|
|||
atom_op_shr, ATOM_ARG_FB}, {
|
||||
atom_op_shr, ATOM_ARG_PLL}, {
|
||||
atom_op_shr, ATOM_ARG_MC}, {
|
||||
atom_op_debug, 0},};
|
||||
atom_op_debug, 0}, {
|
||||
atom_op_processds, 0}, {
|
||||
atom_op_mul32, ATOM_ARG_PS}, {
|
||||
atom_op_mul32, ATOM_ARG_WS}, {
|
||||
atom_op_div32, ATOM_ARG_PS}, {
|
||||
atom_op_div32, ATOM_ARG_WS},
|
||||
};
|
||||
|
||||
static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
|
||||
{
|
||||
|
|
|
@ -60,7 +60,7 @@
|
|||
#define ATOM_CT_PS_MASK 0x7F
|
||||
#define ATOM_CT_CODE_PTR 6
|
||||
|
||||
#define ATOM_OP_CNT 123
|
||||
#define ATOM_OP_CNT 127
|
||||
#define ATOM_OP_EOT 91
|
||||
|
||||
#define ATOM_CASE_MAGIC 0x63
|
||||
|
|
|
@ -96,7 +96,7 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
|
|||
{
|
||||
const char *chip_name;
|
||||
char fw_name[30];
|
||||
int err, i;
|
||||
int err = 0, i;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
|
@ -119,24 +119,24 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
|
|||
default: BUG();
|
||||
}
|
||||
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (i == 0)
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
|
||||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
|
||||
err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
|
||||
err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
err = amdgpu_ucode_validate(adev->sdma[i].fw);
|
||||
err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
|
||||
}
|
||||
out:
|
||||
if (err) {
|
||||
printk(KERN_ERR
|
||||
"cik_sdma: Failed to load firmware \"%s\"\n",
|
||||
fw_name);
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
release_firmware(adev->sdma[i].fw);
|
||||
adev->sdma[i].fw = NULL;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
release_firmware(adev->sdma.instance[i].fw);
|
||||
adev->sdma.instance[i].fw = NULL;
|
||||
}
|
||||
}
|
||||
return err;
|
||||
|
@ -168,7 +168,7 @@ static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
|
|||
static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1;
|
||||
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
|
||||
|
||||
return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
|
||||
}
|
||||
|
@ -183,14 +183,14 @@ static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
|
|||
static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1;
|
||||
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
|
||||
|
||||
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
|
||||
}
|
||||
|
||||
static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
||||
{
|
||||
struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
|
||||
struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
|
@ -248,7 +248,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|||
SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
|
||||
u32 ref_and_mask;
|
||||
|
||||
if (ring == &ring->adev->sdma[0].ring)
|
||||
if (ring == &ring->adev->sdma.instance[0].ring)
|
||||
ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
|
||||
else
|
||||
ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
|
||||
|
@ -327,8 +327,8 @@ static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring,
|
|||
*/
|
||||
static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
|
||||
struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
|
||||
struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
|
||||
struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
|
||||
u32 rb_cntl;
|
||||
int i;
|
||||
|
||||
|
@ -336,7 +336,7 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
|
|||
(adev->mman.buffer_funcs_ring == sdma1))
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
|
||||
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
|
||||
rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
|
||||
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
|
||||
|
@ -376,7 +376,7 @@ static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
|
|||
cik_sdma_rlc_stop(adev);
|
||||
}
|
||||
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
|
||||
if (enable)
|
||||
me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK;
|
||||
|
@ -402,8 +402,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
|
|||
u32 wb_offset;
|
||||
int i, j, r;
|
||||
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
ring = &adev->sdma[i].ring;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
wb_offset = (ring->rptr_offs * 4);
|
||||
|
||||
mutex_lock(&adev->srbm_mutex);
|
||||
|
@ -502,26 +502,25 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev)
|
|||
u32 fw_size;
|
||||
int i, j;
|
||||
|
||||
if (!adev->sdma[0].fw || !adev->sdma[1].fw)
|
||||
return -EINVAL;
|
||||
|
||||
/* halt the MEs */
|
||||
cik_sdma_enable(adev, false);
|
||||
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (!adev->sdma.instance[i].fw)
|
||||
return -EINVAL;
|
||||
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
|
||||
amdgpu_ucode_print_sdma_hdr(&hdr->header);
|
||||
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
|
||||
adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
|
||||
adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
|
||||
if (adev->sdma[i].feature_version >= 20)
|
||||
adev->sdma[i].burst_nop = true;
|
||||
adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
|
||||
adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
|
||||
if (adev->sdma.instance[i].feature_version >= 20)
|
||||
adev->sdma.instance[i].burst_nop = true;
|
||||
fw_data = (const __le32 *)
|
||||
(adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
(adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
|
||||
for (j = 0; j < fw_size; j++)
|
||||
WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
|
||||
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
|
||||
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -830,7 +829,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
|
|||
*/
|
||||
static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
|
||||
{
|
||||
struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
|
||||
struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
|
||||
u32 pad_count;
|
||||
int i;
|
||||
|
||||
|
@ -934,6 +933,8 @@ static int cik_sdma_early_init(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->sdma.num_instances = SDMA_MAX_INSTANCE;
|
||||
|
||||
cik_sdma_set_ring_funcs(adev);
|
||||
cik_sdma_set_irq_funcs(adev);
|
||||
cik_sdma_set_buffer_funcs(adev);
|
||||
|
@ -946,7 +947,7 @@ static int cik_sdma_sw_init(void *handle)
|
|||
{
|
||||
struct amdgpu_ring *ring;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int r;
|
||||
int r, i;
|
||||
|
||||
r = cik_sdma_init_microcode(adev);
|
||||
if (r) {
|
||||
|
@ -955,43 +956,33 @@ static int cik_sdma_sw_init(void *handle)
|
|||
}
|
||||
|
||||
/* SDMA trap event */
|
||||
r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
|
||||
r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* SDMA Privileged inst */
|
||||
r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
|
||||
r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* SDMA Privileged inst */
|
||||
r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
|
||||
r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ring = &adev->sdma[0].ring;
|
||||
ring->ring_obj = NULL;
|
||||
|
||||
ring = &adev->sdma[1].ring;
|
||||
ring->ring_obj = NULL;
|
||||
|
||||
ring = &adev->sdma[0].ring;
|
||||
sprintf(ring->name, "sdma0");
|
||||
r = amdgpu_ring_init(adev, ring, 256 * 1024,
|
||||
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
|
||||
&adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
|
||||
AMDGPU_RING_TYPE_SDMA);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ring = &adev->sdma[1].ring;
|
||||
sprintf(ring->name, "sdma1");
|
||||
r = amdgpu_ring_init(adev, ring, 256 * 1024,
|
||||
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
|
||||
&adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
|
||||
AMDGPU_RING_TYPE_SDMA);
|
||||
if (r)
|
||||
return r;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
ring->ring_obj = NULL;
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 256 * 1024,
|
||||
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
|
||||
AMDGPU_RING_TYPE_SDMA);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -999,9 +990,10 @@ static int cik_sdma_sw_init(void *handle)
|
|||
static int cik_sdma_sw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int i;
|
||||
|
||||
amdgpu_ring_fini(&adev->sdma[0].ring);
|
||||
amdgpu_ring_fini(&adev->sdma[1].ring);
|
||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1078,7 +1070,7 @@ static void cik_sdma_print_status(void *handle)
|
|||
dev_info(adev->dev, "CIK SDMA registers\n");
|
||||
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
|
||||
RREG32(mmSRBM_STATUS2));
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
|
||||
i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
|
||||
dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n",
|
||||
|
@ -1223,7 +1215,7 @@ static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
|
|||
case 0:
|
||||
switch (queue_id) {
|
||||
case 0:
|
||||
amdgpu_fence_process(&adev->sdma[0].ring);
|
||||
amdgpu_fence_process(&adev->sdma.instance[0].ring);
|
||||
break;
|
||||
case 1:
|
||||
/* XXX compute */
|
||||
|
@ -1236,7 +1228,7 @@ static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
|
|||
case 1:
|
||||
switch (queue_id) {
|
||||
case 0:
|
||||
amdgpu_fence_process(&adev->sdma[1].ring);
|
||||
amdgpu_fence_process(&adev->sdma.instance[1].ring);
|
||||
break;
|
||||
case 1:
|
||||
/* XXX compute */
|
||||
|
@ -1298,24 +1290,6 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = {
|
|||
.set_powergating_state = cik_sdma_set_powergating_state,
|
||||
};
|
||||
|
||||
/**
|
||||
* cik_sdma_ring_is_lockup - Check if the DMA engine is locked up
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
*
|
||||
* Check if the async DMA engine is locked up (CIK).
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
static bool cik_sdma_ring_is_lockup(struct amdgpu_ring *ring)
|
||||
{
|
||||
|
||||
if (cik_sdma_is_idle(ring->adev)) {
|
||||
amdgpu_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
return amdgpu_ring_test_lockup(ring);
|
||||
}
|
||||
|
||||
static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
|
||||
.get_rptr = cik_sdma_ring_get_rptr,
|
||||
.get_wptr = cik_sdma_ring_get_wptr,
|
||||
|
@ -1328,14 +1302,15 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
|
|||
.emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
|
||||
.test_ring = cik_sdma_ring_test_ring,
|
||||
.test_ib = cik_sdma_ring_test_ib,
|
||||
.is_lockup = cik_sdma_ring_is_lockup,
|
||||
.insert_nop = cik_sdma_ring_insert_nop,
|
||||
};
|
||||
|
||||
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->sdma[0].ring.funcs = &cik_sdma_ring_funcs;
|
||||
adev->sdma[1].ring.funcs = &cik_sdma_ring_funcs;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
|
||||
|
@ -1349,9 +1324,9 @@ static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = {
|
|||
|
||||
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
|
||||
adev->sdma_trap_irq.funcs = &cik_sdma_trap_irq_funcs;
|
||||
adev->sdma_illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
|
||||
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
|
||||
adev->sdma.trap_irq.funcs = &cik_sdma_trap_irq_funcs;
|
||||
adev->sdma.illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1416,7 +1391,7 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
|
|||
{
|
||||
if (adev->mman.buffer_funcs == NULL) {
|
||||
adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1431,7 +1406,7 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|||
{
|
||||
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
||||
adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
|
||||
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
|
||||
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1264,6 +1264,7 @@ static void cz_apply_state_adjust_rules(struct amdgpu_device *adev,
|
|||
|
||||
static int cz_dpm_enable(struct amdgpu_device *adev)
|
||||
{
|
||||
const char *chip_name;
|
||||
int ret = 0;
|
||||
|
||||
/* renable will hang up SMU, so check first */
|
||||
|
@ -1272,21 +1273,33 @@ static int cz_dpm_enable(struct amdgpu_device *adev)
|
|||
|
||||
cz_program_voting_clients(adev);
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_CARRIZO:
|
||||
chip_name = "carrizo";
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
chip_name = "stoney";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
|
||||
ret = cz_start_dpm(adev);
|
||||
if (ret) {
|
||||
DRM_ERROR("Carrizo DPM enable failed\n");
|
||||
DRM_ERROR("%s DPM enable failed\n", chip_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = cz_program_bootup_state(adev);
|
||||
if (ret) {
|
||||
DRM_ERROR("Carrizo bootup state program failed\n");
|
||||
DRM_ERROR("%s bootup state program failed\n", chip_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = cz_enable_didt(adev, true);
|
||||
if (ret) {
|
||||
DRM_ERROR("Carrizo enable di/dt failed\n");
|
||||
DRM_ERROR("%s enable di/dt failed\n", chip_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -1353,7 +1366,7 @@ static int cz_dpm_disable(struct amdgpu_device *adev)
|
|||
|
||||
ret = cz_enable_didt(adev, false);
|
||||
if (ret) {
|
||||
DRM_ERROR("Carrizo disable di/dt failed\n");
|
||||
DRM_ERROR("disable di/dt failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -312,13 +312,16 @@ int cz_smu_start(struct amdgpu_device *adev)
|
|||
UCODE_ID_CP_MEC_JT1_MASK |
|
||||
UCODE_ID_CP_MEC_JT2_MASK;
|
||||
|
||||
if (adev->asic_type == CHIP_STONEY)
|
||||
fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
|
||||
|
||||
cz_smu_request_load_fw(adev);
|
||||
ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* manually load MEC firmware for CZ */
|
||||
if (adev->asic_type == CHIP_CARRIZO) {
|
||||
if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
|
||||
ret = cz_load_mec_firmware(adev);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
|
||||
|
@ -336,6 +339,9 @@ int cz_smu_start(struct amdgpu_device *adev)
|
|||
AMDGPU_CPMEC2_UCODE_LOADED |
|
||||
AMDGPU_CPRLC_UCODE_LOADED;
|
||||
|
||||
if (adev->asic_type == CHIP_STONEY)
|
||||
adev->smu.fw_flags &= ~(AMDGPU_SDMA1_UCODE_LOADED | AMDGPU_CPMEC2_UCODE_LOADED);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -601,8 +607,13 @@ static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
|
|||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
if (adev->asic_type == CHIP_STONEY) {
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
|
||||
} else {
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
|
||||
}
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
|
||||
}
|
||||
|
@ -642,8 +653,13 @@ static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
|
|||
if (adev->firmware.smu_load) {
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
if (adev->asic_type == CHIP_STONEY) {
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
|
||||
} else {
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
|
||||
}
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
|
@ -652,8 +668,13 @@ static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
|
|||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
if (adev->asic_type == CHIP_STONEY) {
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
|
||||
} else {
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
|
||||
}
|
||||
cz_smu_populate_single_ucode_load_task(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
|
||||
}
|
||||
|
@ -888,10 +909,18 @@ int cz_smu_init(struct amdgpu_device *adev)
|
|||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
|
||||
if (adev->asic_type == CHIP_STONEY) {
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
} else {
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
}
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
|
@ -908,10 +937,17 @@ int cz_smu_init(struct amdgpu_device *adev)
|
|||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
if (adev->asic_type == CHIP_STONEY) {
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
} else {
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
goto smu_init_failed;
|
||||
}
|
||||
if (cz_smu_populate_single_firmware_entry(adev,
|
||||
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
|
||||
&priv->driver_buffer[priv->driver_buffer_length++]))
|
||||
|
|
|
@ -280,46 +280,22 @@ static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
|
|||
* @crtc_id: crtc to cleanup pageflip on
|
||||
* @crtc_base: new address of the crtc (GPU MC address)
|
||||
*
|
||||
* Does the actual pageflip (evergreen+).
|
||||
* During vblank we take the crtc lock and wait for the update_pending
|
||||
* bit to go high, when it does, we release the lock, and allow the
|
||||
* double buffered update to take place.
|
||||
* Returns the current update pending status.
|
||||
* Triggers the actual pageflip by updating the primary
|
||||
* surface base address.
|
||||
*/
|
||||
static void dce_v10_0_page_flip(struct amdgpu_device *adev,
|
||||
int crtc_id, u64 crtc_base)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
|
||||
u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset);
|
||||
int i;
|
||||
|
||||
/* Lock the graphics update lock */
|
||||
tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
|
||||
WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
|
||||
|
||||
/* update the scanout addresses */
|
||||
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(crtc_base));
|
||||
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
lower_32_bits(crtc_base));
|
||||
|
||||
/* update the primary scanout address */
|
||||
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(crtc_base));
|
||||
/* writing to the low address triggers the update */
|
||||
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
lower_32_bits(crtc_base));
|
||||
|
||||
/* Wait for update_pending to go high. */
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) &
|
||||
GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
|
||||
|
||||
/* Unlock the lock, so double-buffering can take place inside vblank */
|
||||
tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
|
||||
WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
|
||||
/* post the write */
|
||||
RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
|
||||
}
|
||||
|
||||
static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
|
||||
|
@ -2517,26 +2493,19 @@ static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
|
|||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
u32 tmp;
|
||||
|
||||
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(amdgpu_crtc->cursor_addr));
|
||||
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
lower_32_bits(amdgpu_crtc->cursor_addr));
|
||||
|
||||
tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
|
||||
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
|
||||
WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
|
||||
}
|
||||
|
||||
static void dce_v10_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
|
||||
uint64_t gpu_addr)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(gpu_addr));
|
||||
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
lower_32_bits(gpu_addr));
|
||||
}
|
||||
|
||||
static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
|
||||
int x, int y)
|
||||
static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
|
||||
int x, int y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
@ -2556,26 +2525,40 @@ static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
|
|||
y = 0;
|
||||
}
|
||||
|
||||
dce_v10_0_lock_cursor(crtc, true);
|
||||
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
|
||||
dce_v10_0_lock_cursor(crtc, false);
|
||||
|
||||
amdgpu_crtc->cursor_x = x;
|
||||
amdgpu_crtc->cursor_y = y;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_v10_0_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
struct drm_file *file_priv,
|
||||
uint32_t handle,
|
||||
uint32_t width,
|
||||
uint32_t height)
|
||||
static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
|
||||
int x, int y)
|
||||
{
|
||||
int ret;
|
||||
|
||||
dce_v10_0_lock_cursor(crtc, true);
|
||||
ret = dce_v10_0_cursor_move_locked(crtc, x, y);
|
||||
dce_v10_0_lock_cursor(crtc, false);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
struct drm_file *file_priv,
|
||||
uint32_t handle,
|
||||
uint32_t width,
|
||||
uint32_t height,
|
||||
int32_t hot_x,
|
||||
int32_t hot_y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *robj;
|
||||
uint64_t gpu_addr;
|
||||
struct amdgpu_bo *aobj;
|
||||
int ret;
|
||||
|
||||
if (!handle) {
|
||||
|
@ -2597,41 +2580,71 @@ static int dce_v10_0_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
return -ENOENT;
|
||||
}
|
||||
|
||||
robj = gem_to_amdgpu_bo(obj);
|
||||
ret = amdgpu_bo_reserve(robj, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto fail;
|
||||
ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
0, 0, &gpu_addr);
|
||||
amdgpu_bo_unreserve(robj);
|
||||
if (ret)
|
||||
goto fail;
|
||||
aobj = gem_to_amdgpu_bo(obj);
|
||||
ret = amdgpu_bo_reserve(aobj, false);
|
||||
if (ret != 0) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
amdgpu_crtc->cursor_width = width;
|
||||
amdgpu_crtc->cursor_height = height;
|
||||
|
||||
dce_v10_0_lock_cursor(crtc, true);
|
||||
dce_v10_0_set_cursor(crtc, obj, gpu_addr);
|
||||
|
||||
if (hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
hot_y != amdgpu_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
|
||||
x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
|
||||
y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
|
||||
|
||||
dce_v10_0_cursor_move_locked(crtc, x, y);
|
||||
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
dce_v10_0_show_cursor(crtc);
|
||||
dce_v10_0_lock_cursor(crtc, false);
|
||||
|
||||
unpin:
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
|
||||
ret = amdgpu_bo_reserve(robj, false);
|
||||
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
|
||||
ret = amdgpu_bo_reserve(aobj, false);
|
||||
if (likely(ret == 0)) {
|
||||
amdgpu_bo_unpin(robj);
|
||||
amdgpu_bo_unreserve(robj);
|
||||
amdgpu_bo_unpin(aobj);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
}
|
||||
drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
|
||||
}
|
||||
|
||||
amdgpu_crtc->cursor_bo = obj;
|
||||
return 0;
|
||||
fail:
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
}
|
||||
|
||||
return ret;
|
||||
static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
dce_v10_0_lock_cursor(crtc, true);
|
||||
|
||||
dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
|
||||
amdgpu_crtc->cursor_y);
|
||||
|
||||
dce_v10_0_show_cursor(crtc);
|
||||
|
||||
dce_v10_0_lock_cursor(crtc, false);
|
||||
}
|
||||
}
|
||||
|
||||
static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
|
||||
|
@ -2659,7 +2672,7 @@ static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
|
|||
}
|
||||
|
||||
static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
|
||||
.cursor_set = dce_v10_0_crtc_cursor_set,
|
||||
.cursor_set2 = dce_v10_0_crtc_cursor_set2,
|
||||
.cursor_move = dce_v10_0_crtc_cursor_move,
|
||||
.gamma_set = dce_v10_0_crtc_gamma_set,
|
||||
.set_config = amdgpu_crtc_set_config,
|
||||
|
@ -2793,6 +2806,7 @@ static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc,
|
|||
dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
|
||||
amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
|
||||
amdgpu_atombios_crtc_scaler_setup(crtc);
|
||||
dce_v10_0_cursor_reset(crtc);
|
||||
/* update the hw version fpr dpm */
|
||||
amdgpu_crtc->hw_mode = *adjusted_mode;
|
||||
|
||||
|
@ -3071,24 +3085,18 @@ static int dce_v10_0_suspend(void *handle)
|
|||
|
||||
amdgpu_atombios_scratch_regs_save(adev);
|
||||
|
||||
dce_v10_0_hpd_fini(adev);
|
||||
|
||||
dce_v10_0_pageflip_interrupt_fini(adev);
|
||||
|
||||
return 0;
|
||||
return dce_v10_0_hw_fini(handle);
|
||||
}
|
||||
|
||||
static int dce_v10_0_resume(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret;
|
||||
|
||||
dce_v10_0_init_golden_registers(adev);
|
||||
ret = dce_v10_0_hw_init(handle);
|
||||
|
||||
amdgpu_atombios_scratch_regs_restore(adev);
|
||||
|
||||
/* init dig PHYs, disp eng pll */
|
||||
amdgpu_atombios_encoder_init_dig(adev);
|
||||
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
|
||||
/* turn on the BL */
|
||||
if (adev->mode_info.bl_encoder) {
|
||||
u8 bl_level = amdgpu_display_backlight_get_level(adev,
|
||||
|
@ -3097,12 +3105,7 @@ static int dce_v10_0_resume(void *handle)
|
|||
bl_level);
|
||||
}
|
||||
|
||||
/* initialize hpd */
|
||||
dce_v10_0_hpd_init(adev);
|
||||
|
||||
dce_v10_0_pageflip_interrupt_init(adev);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool dce_v10_0_is_idle(void *handle)
|
||||
|
@ -3294,37 +3297,20 @@ static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev,
|
|||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
u32 reg, reg_block;
|
||||
/* now deal with page flip IRQ */
|
||||
switch (type) {
|
||||
case AMDGPU_PAGEFLIP_IRQ_D1:
|
||||
reg_block = CRTC0_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D2:
|
||||
reg_block = CRTC1_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D3:
|
||||
reg_block = CRTC2_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D4:
|
||||
reg_block = CRTC3_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D5:
|
||||
reg_block = CRTC4_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D6:
|
||||
reg_block = CRTC5_REGISTER_OFFSET;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("invalid pageflip crtc %d\n", type);
|
||||
return -EINVAL;
|
||||
u32 reg;
|
||||
|
||||
if (type >= adev->mode_info.num_crtc) {
|
||||
DRM_ERROR("invalid pageflip crtc %d\n", type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block);
|
||||
reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
|
||||
if (state == AMDGPU_IRQ_STATE_DISABLE)
|
||||
WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
|
||||
WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
|
||||
reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
|
||||
else
|
||||
WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
|
||||
WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
|
||||
reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3333,7 +3319,6 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
|
|||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
int reg_block;
|
||||
unsigned long flags;
|
||||
unsigned crtc_id;
|
||||
struct amdgpu_crtc *amdgpu_crtc;
|
||||
|
@ -3342,33 +3327,15 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
|
|||
crtc_id = (entry->src_id - 8) >> 1;
|
||||
amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
|
||||
|
||||
/* ack the interrupt */
|
||||
switch(crtc_id){
|
||||
case AMDGPU_PAGEFLIP_IRQ_D1:
|
||||
reg_block = CRTC0_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D2:
|
||||
reg_block = CRTC1_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D3:
|
||||
reg_block = CRTC2_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D4:
|
||||
reg_block = CRTC3_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D5:
|
||||
reg_block = CRTC4_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D6:
|
||||
reg_block = CRTC5_REGISTER_OFFSET;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
|
||||
return -EINVAL;
|
||||
if (crtc_id >= adev->mode_info.num_crtc) {
|
||||
DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
|
||||
WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
|
||||
if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
|
||||
GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
|
||||
WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
|
||||
GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
|
||||
|
||||
/* IRQ could occur when in initial stage */
|
||||
if (amdgpu_crtc == NULL)
|
||||
|
|
|
@ -126,6 +126,13 @@ static const u32 cz_mgcg_cgcg_init[] =
|
|||
mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
|
||||
};
|
||||
|
||||
static const u32 stoney_golden_settings_a11[] =
|
||||
{
|
||||
mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
|
||||
mmFBC_MISC, 0x1f311fff, 0x14302000,
|
||||
};
|
||||
|
||||
|
||||
static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
|
@ -137,6 +144,11 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
|
|||
cz_golden_settings_a11,
|
||||
(const u32)ARRAY_SIZE(cz_golden_settings_a11));
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
stoney_golden_settings_a11,
|
||||
(const u32)ARRAY_SIZE(stoney_golden_settings_a11));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -258,46 +270,22 @@ static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
|
|||
* @crtc_id: crtc to cleanup pageflip on
|
||||
* @crtc_base: new address of the crtc (GPU MC address)
|
||||
*
|
||||
* Does the actual pageflip (evergreen+).
|
||||
* During vblank we take the crtc lock and wait for the update_pending
|
||||
* bit to go high, when it does, we release the lock, and allow the
|
||||
* double buffered update to take place.
|
||||
* Returns the current update pending status.
|
||||
* Triggers the actual pageflip by updating the primary
|
||||
* surface base address.
|
||||
*/
|
||||
static void dce_v11_0_page_flip(struct amdgpu_device *adev,
|
||||
int crtc_id, u64 crtc_base)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
|
||||
u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset);
|
||||
int i;
|
||||
|
||||
/* Lock the graphics update lock */
|
||||
tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
|
||||
WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
|
||||
|
||||
/* update the scanout addresses */
|
||||
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(crtc_base));
|
||||
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
lower_32_bits(crtc_base));
|
||||
|
||||
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(crtc_base));
|
||||
/* writing to the low address triggers the update */
|
||||
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
lower_32_bits(crtc_base));
|
||||
|
||||
/* Wait for update_pending to go high. */
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) &
|
||||
GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
|
||||
|
||||
/* Unlock the lock, so double-buffering can take place inside vblank */
|
||||
tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
|
||||
WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
|
||||
/* post the write */
|
||||
RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
|
||||
}
|
||||
|
||||
static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
|
||||
|
@ -2443,7 +2431,7 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
|
|||
|
||||
/* XXX need to determine what plls are available on each DCE11 part */
|
||||
pll_in_use = amdgpu_pll_get_use_mask(crtc);
|
||||
if (adev->asic_type == CHIP_CARRIZO) {
|
||||
if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
|
||||
if (!(pll_in_use & (1 << ATOM_PPLL1)))
|
||||
return ATOM_PPLL1;
|
||||
if (!(pll_in_use & (1 << ATOM_PPLL0)))
|
||||
|
@ -2494,26 +2482,19 @@ static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
|
|||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
u32 tmp;
|
||||
|
||||
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(amdgpu_crtc->cursor_addr));
|
||||
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
lower_32_bits(amdgpu_crtc->cursor_addr));
|
||||
|
||||
tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
|
||||
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
|
||||
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
|
||||
WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
|
||||
}
|
||||
|
||||
static void dce_v11_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
|
||||
uint64_t gpu_addr)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(gpu_addr));
|
||||
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
lower_32_bits(gpu_addr));
|
||||
}
|
||||
|
||||
static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
|
||||
int x, int y)
|
||||
static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
|
||||
int x, int y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
@ -2533,26 +2514,40 @@ static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
|
|||
y = 0;
|
||||
}
|
||||
|
||||
dce_v11_0_lock_cursor(crtc, true);
|
||||
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
|
||||
dce_v11_0_lock_cursor(crtc, false);
|
||||
|
||||
amdgpu_crtc->cursor_x = x;
|
||||
amdgpu_crtc->cursor_y = y;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_v11_0_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
struct drm_file *file_priv,
|
||||
uint32_t handle,
|
||||
uint32_t width,
|
||||
uint32_t height)
|
||||
static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
|
||||
int x, int y)
|
||||
{
|
||||
int ret;
|
||||
|
||||
dce_v11_0_lock_cursor(crtc, true);
|
||||
ret = dce_v11_0_cursor_move_locked(crtc, x, y);
|
||||
dce_v11_0_lock_cursor(crtc, false);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
struct drm_file *file_priv,
|
||||
uint32_t handle,
|
||||
uint32_t width,
|
||||
uint32_t height,
|
||||
int32_t hot_x,
|
||||
int32_t hot_y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *robj;
|
||||
uint64_t gpu_addr;
|
||||
struct amdgpu_bo *aobj;
|
||||
int ret;
|
||||
|
||||
if (!handle) {
|
||||
|
@ -2574,41 +2569,71 @@ static int dce_v11_0_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
return -ENOENT;
|
||||
}
|
||||
|
||||
robj = gem_to_amdgpu_bo(obj);
|
||||
ret = amdgpu_bo_reserve(robj, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto fail;
|
||||
ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
0, 0, &gpu_addr);
|
||||
amdgpu_bo_unreserve(robj);
|
||||
if (ret)
|
||||
goto fail;
|
||||
aobj = gem_to_amdgpu_bo(obj);
|
||||
ret = amdgpu_bo_reserve(aobj, false);
|
||||
if (ret != 0) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
amdgpu_crtc->cursor_width = width;
|
||||
amdgpu_crtc->cursor_height = height;
|
||||
|
||||
dce_v11_0_lock_cursor(crtc, true);
|
||||
dce_v11_0_set_cursor(crtc, obj, gpu_addr);
|
||||
|
||||
if (hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
hot_y != amdgpu_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
|
||||
x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
|
||||
y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
|
||||
|
||||
dce_v11_0_cursor_move_locked(crtc, x, y);
|
||||
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
dce_v11_0_show_cursor(crtc);
|
||||
dce_v11_0_lock_cursor(crtc, false);
|
||||
|
||||
unpin:
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
|
||||
ret = amdgpu_bo_reserve(robj, false);
|
||||
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
|
||||
ret = amdgpu_bo_reserve(aobj, false);
|
||||
if (likely(ret == 0)) {
|
||||
amdgpu_bo_unpin(robj);
|
||||
amdgpu_bo_unreserve(robj);
|
||||
amdgpu_bo_unpin(aobj);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
}
|
||||
drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
|
||||
}
|
||||
|
||||
amdgpu_crtc->cursor_bo = obj;
|
||||
return 0;
|
||||
fail:
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
}
|
||||
|
||||
return ret;
|
||||
static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
dce_v11_0_lock_cursor(crtc, true);
|
||||
|
||||
dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
|
||||
amdgpu_crtc->cursor_y);
|
||||
|
||||
dce_v11_0_show_cursor(crtc);
|
||||
|
||||
dce_v11_0_lock_cursor(crtc, false);
|
||||
}
|
||||
}
|
||||
|
||||
static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
|
||||
|
@ -2636,7 +2661,7 @@ static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
|
|||
}
|
||||
|
||||
static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
|
||||
.cursor_set = dce_v11_0_crtc_cursor_set,
|
||||
.cursor_set2 = dce_v11_0_crtc_cursor_set2,
|
||||
.cursor_move = dce_v11_0_crtc_cursor_move,
|
||||
.gamma_set = dce_v11_0_crtc_gamma_set,
|
||||
.set_config = amdgpu_crtc_set_config,
|
||||
|
@ -2770,6 +2795,7 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
|
|||
dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
|
||||
amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
|
||||
amdgpu_atombios_crtc_scaler_setup(crtc);
|
||||
dce_v11_0_cursor_reset(crtc);
|
||||
/* update the hw version fpr dpm */
|
||||
amdgpu_crtc->hw_mode = *adjusted_mode;
|
||||
|
||||
|
@ -2911,6 +2937,11 @@ static int dce_v11_0_early_init(void *handle)
|
|||
adev->mode_info.num_hpd = 6;
|
||||
adev->mode_info.num_dig = 9;
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
adev->mode_info.num_crtc = 2;
|
||||
adev->mode_info.num_hpd = 6;
|
||||
adev->mode_info.num_dig = 9;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
|
@ -3009,6 +3040,7 @@ static int dce_v11_0_hw_init(void *handle)
|
|||
dce_v11_0_init_golden_registers(adev);
|
||||
|
||||
/* init dig PHYs, disp eng pll */
|
||||
amdgpu_atombios_crtc_powergate_init(adev);
|
||||
amdgpu_atombios_encoder_init_dig(adev);
|
||||
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
|
||||
|
||||
|
@ -3046,25 +3078,18 @@ static int dce_v11_0_suspend(void *handle)
|
|||
|
||||
amdgpu_atombios_scratch_regs_save(adev);
|
||||
|
||||
dce_v11_0_hpd_fini(adev);
|
||||
|
||||
dce_v11_0_pageflip_interrupt_fini(adev);
|
||||
|
||||
return 0;
|
||||
return dce_v11_0_hw_fini(handle);
|
||||
}
|
||||
|
||||
static int dce_v11_0_resume(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret;
|
||||
|
||||
dce_v11_0_init_golden_registers(adev);
|
||||
ret = dce_v11_0_hw_init(handle);
|
||||
|
||||
amdgpu_atombios_scratch_regs_restore(adev);
|
||||
|
||||
/* init dig PHYs, disp eng pll */
|
||||
amdgpu_atombios_crtc_powergate_init(adev);
|
||||
amdgpu_atombios_encoder_init_dig(adev);
|
||||
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
|
||||
/* turn on the BL */
|
||||
if (adev->mode_info.bl_encoder) {
|
||||
u8 bl_level = amdgpu_display_backlight_get_level(adev,
|
||||
|
@ -3073,12 +3098,7 @@ static int dce_v11_0_resume(void *handle)
|
|||
bl_level);
|
||||
}
|
||||
|
||||
/* initialize hpd */
|
||||
dce_v11_0_hpd_init(adev);
|
||||
|
||||
dce_v11_0_pageflip_interrupt_init(adev);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool dce_v11_0_is_idle(void *handle)
|
||||
|
@ -3270,37 +3290,20 @@ static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
|
|||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
u32 reg, reg_block;
|
||||
/* now deal with page flip IRQ */
|
||||
switch (type) {
|
||||
case AMDGPU_PAGEFLIP_IRQ_D1:
|
||||
reg_block = CRTC0_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D2:
|
||||
reg_block = CRTC1_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D3:
|
||||
reg_block = CRTC2_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D4:
|
||||
reg_block = CRTC3_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D5:
|
||||
reg_block = CRTC4_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D6:
|
||||
reg_block = CRTC5_REGISTER_OFFSET;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("invalid pageflip crtc %d\n", type);
|
||||
return -EINVAL;
|
||||
u32 reg;
|
||||
|
||||
if (type >= adev->mode_info.num_crtc) {
|
||||
DRM_ERROR("invalid pageflip crtc %d\n", type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block);
|
||||
reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
|
||||
if (state == AMDGPU_IRQ_STATE_DISABLE)
|
||||
WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
|
||||
WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
|
||||
reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
|
||||
else
|
||||
WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
|
||||
WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
|
||||
reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3309,7 +3312,6 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
|
|||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
int reg_block;
|
||||
unsigned long flags;
|
||||
unsigned crtc_id;
|
||||
struct amdgpu_crtc *amdgpu_crtc;
|
||||
|
@ -3318,33 +3320,15 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
|
|||
crtc_id = (entry->src_id - 8) >> 1;
|
||||
amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
|
||||
|
||||
/* ack the interrupt */
|
||||
switch(crtc_id){
|
||||
case AMDGPU_PAGEFLIP_IRQ_D1:
|
||||
reg_block = CRTC0_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D2:
|
||||
reg_block = CRTC1_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D3:
|
||||
reg_block = CRTC2_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D4:
|
||||
reg_block = CRTC3_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D5:
|
||||
reg_block = CRTC4_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D6:
|
||||
reg_block = CRTC5_REGISTER_OFFSET;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
|
||||
return -EINVAL;
|
||||
if (crtc_id >= adev->mode_info.num_crtc) {
|
||||
DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
|
||||
WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
|
||||
if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
|
||||
GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
|
||||
WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
|
||||
GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
|
||||
|
||||
/* IRQ could occur when in initial stage */
|
||||
if(amdgpu_crtc == NULL)
|
||||
|
|
|
@ -229,46 +229,22 @@ static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
|
|||
* @crtc_id: crtc to cleanup pageflip on
|
||||
* @crtc_base: new address of the crtc (GPU MC address)
|
||||
*
|
||||
* Does the actual pageflip (evergreen+).
|
||||
* During vblank we take the crtc lock and wait for the update_pending
|
||||
* bit to go high, when it does, we release the lock, and allow the
|
||||
* double buffered update to take place.
|
||||
* Returns the current update pending status.
|
||||
* Triggers the actual pageflip by updating the primary
|
||||
* surface base address.
|
||||
*/
|
||||
static void dce_v8_0_page_flip(struct amdgpu_device *adev,
|
||||
int crtc_id, u64 crtc_base)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
|
||||
u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset);
|
||||
int i;
|
||||
|
||||
/* Lock the graphics update lock */
|
||||
tmp |= GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
|
||||
WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
|
||||
|
||||
/* update the scanout addresses */
|
||||
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(crtc_base));
|
||||
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
(u32)crtc_base);
|
||||
|
||||
/* update the primary scanout addresses */
|
||||
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(crtc_base));
|
||||
/* writing to the low address triggers the update */
|
||||
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
(u32)crtc_base);
|
||||
|
||||
/* Wait for update_pending to go high. */
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) &
|
||||
GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
|
||||
|
||||
/* Unlock the lock, so double-buffering can take place inside vblank */
|
||||
tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
|
||||
WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp);
|
||||
lower_32_bits(crtc_base));
|
||||
/* post the write */
|
||||
RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
|
||||
}
|
||||
|
||||
static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
|
||||
|
@ -2429,26 +2405,19 @@ static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
|
|||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(amdgpu_crtc->cursor_addr));
|
||||
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
lower_32_bits(amdgpu_crtc->cursor_addr));
|
||||
|
||||
WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
|
||||
CUR_CONTROL__CURSOR_EN_MASK |
|
||||
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
|
||||
(CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
|
||||
}
|
||||
|
||||
static void dce_v8_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
|
||||
uint64_t gpu_addr)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
|
||||
upper_32_bits(gpu_addr));
|
||||
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
|
||||
gpu_addr & 0xffffffff);
|
||||
}
|
||||
|
||||
static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
|
||||
int x, int y)
|
||||
static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
|
||||
int x, int y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
@ -2468,26 +2437,40 @@ static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
|
|||
y = 0;
|
||||
}
|
||||
|
||||
dce_v8_0_lock_cursor(crtc, true);
|
||||
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
|
||||
dce_v8_0_lock_cursor(crtc, false);
|
||||
|
||||
amdgpu_crtc->cursor_x = x;
|
||||
amdgpu_crtc->cursor_y = y;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dce_v8_0_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
struct drm_file *file_priv,
|
||||
uint32_t handle,
|
||||
uint32_t width,
|
||||
uint32_t height)
|
||||
static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
|
||||
int x, int y)
|
||||
{
|
||||
int ret;
|
||||
|
||||
dce_v8_0_lock_cursor(crtc, true);
|
||||
ret = dce_v8_0_cursor_move_locked(crtc, x, y);
|
||||
dce_v8_0_lock_cursor(crtc, false);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
struct drm_file *file_priv,
|
||||
uint32_t handle,
|
||||
uint32_t width,
|
||||
uint32_t height,
|
||||
int32_t hot_x,
|
||||
int32_t hot_y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *robj;
|
||||
uint64_t gpu_addr;
|
||||
struct amdgpu_bo *aobj;
|
||||
int ret;
|
||||
|
||||
if (!handle) {
|
||||
|
@ -2509,41 +2492,71 @@ static int dce_v8_0_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
return -ENOENT;
|
||||
}
|
||||
|
||||
robj = gem_to_amdgpu_bo(obj);
|
||||
ret = amdgpu_bo_reserve(robj, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto fail;
|
||||
ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM,
|
||||
0, 0, &gpu_addr);
|
||||
amdgpu_bo_unreserve(robj);
|
||||
if (ret)
|
||||
goto fail;
|
||||
aobj = gem_to_amdgpu_bo(obj);
|
||||
ret = amdgpu_bo_reserve(aobj, false);
|
||||
if (ret != 0) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
amdgpu_crtc->cursor_width = width;
|
||||
amdgpu_crtc->cursor_height = height;
|
||||
|
||||
dce_v8_0_lock_cursor(crtc, true);
|
||||
dce_v8_0_set_cursor(crtc, obj, gpu_addr);
|
||||
|
||||
if (hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
hot_y != amdgpu_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
|
||||
x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
|
||||
y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
|
||||
|
||||
dce_v8_0_cursor_move_locked(crtc, x, y);
|
||||
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
dce_v8_0_show_cursor(crtc);
|
||||
dce_v8_0_lock_cursor(crtc, false);
|
||||
|
||||
unpin:
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
|
||||
ret = amdgpu_bo_reserve(robj, false);
|
||||
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
|
||||
ret = amdgpu_bo_reserve(aobj, false);
|
||||
if (likely(ret == 0)) {
|
||||
amdgpu_bo_unpin(robj);
|
||||
amdgpu_bo_unreserve(robj);
|
||||
amdgpu_bo_unpin(aobj);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
}
|
||||
drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
|
||||
}
|
||||
|
||||
amdgpu_crtc->cursor_bo = obj;
|
||||
return 0;
|
||||
fail:
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
}
|
||||
|
||||
return ret;
|
||||
static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
dce_v8_0_lock_cursor(crtc, true);
|
||||
|
||||
dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
|
||||
amdgpu_crtc->cursor_y);
|
||||
|
||||
dce_v8_0_show_cursor(crtc);
|
||||
|
||||
dce_v8_0_lock_cursor(crtc, false);
|
||||
}
|
||||
}
|
||||
|
||||
static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
|
||||
|
@ -2571,7 +2584,7 @@ static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
|
|||
}
|
||||
|
||||
static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
|
||||
.cursor_set = dce_v8_0_crtc_cursor_set,
|
||||
.cursor_set2 = dce_v8_0_crtc_cursor_set2,
|
||||
.cursor_move = dce_v8_0_crtc_cursor_move,
|
||||
.gamma_set = dce_v8_0_crtc_gamma_set,
|
||||
.set_config = amdgpu_crtc_set_config,
|
||||
|
@ -2712,6 +2725,7 @@ static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
|
|||
dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
|
||||
amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
|
||||
amdgpu_atombios_crtc_scaler_setup(crtc);
|
||||
dce_v8_0_cursor_reset(crtc);
|
||||
/* update the hw version fpr dpm */
|
||||
amdgpu_crtc->hw_mode = *adjusted_mode;
|
||||
|
||||
|
@ -2979,22 +2993,18 @@ static int dce_v8_0_suspend(void *handle)
|
|||
|
||||
amdgpu_atombios_scratch_regs_save(adev);
|
||||
|
||||
dce_v8_0_hpd_fini(adev);
|
||||
|
||||
dce_v8_0_pageflip_interrupt_fini(adev);
|
||||
|
||||
return 0;
|
||||
return dce_v8_0_hw_fini(handle);
|
||||
}
|
||||
|
||||
static int dce_v8_0_resume(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int ret;
|
||||
|
||||
ret = dce_v8_0_hw_init(handle);
|
||||
|
||||
amdgpu_atombios_scratch_regs_restore(adev);
|
||||
|
||||
/* init dig PHYs, disp eng pll */
|
||||
amdgpu_atombios_encoder_init_dig(adev);
|
||||
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
|
||||
/* turn on the BL */
|
||||
if (adev->mode_info.bl_encoder) {
|
||||
u8 bl_level = amdgpu_display_backlight_get_level(adev,
|
||||
|
@ -3003,12 +3013,7 @@ static int dce_v8_0_resume(void *handle)
|
|||
bl_level);
|
||||
}
|
||||
|
||||
/* initialize hpd */
|
||||
dce_v8_0_hpd_init(adev);
|
||||
|
||||
dce_v8_0_pageflip_interrupt_init(adev);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool dce_v8_0_is_idle(void *handle)
|
||||
|
@ -3301,37 +3306,20 @@ static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
|
|||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
u32 reg, reg_block;
|
||||
/* now deal with page flip IRQ */
|
||||
switch (type) {
|
||||
case AMDGPU_PAGEFLIP_IRQ_D1:
|
||||
reg_block = CRTC0_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D2:
|
||||
reg_block = CRTC1_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D3:
|
||||
reg_block = CRTC2_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D4:
|
||||
reg_block = CRTC3_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D5:
|
||||
reg_block = CRTC4_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D6:
|
||||
reg_block = CRTC5_REGISTER_OFFSET;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("invalid pageflip crtc %d\n", type);
|
||||
return -EINVAL;
|
||||
u32 reg;
|
||||
|
||||
if (type >= adev->mode_info.num_crtc) {
|
||||
DRM_ERROR("invalid pageflip crtc %d\n", type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block);
|
||||
reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
|
||||
if (state == AMDGPU_IRQ_STATE_DISABLE)
|
||||
WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
|
||||
WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
|
||||
reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
|
||||
else
|
||||
WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
|
||||
WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
|
||||
reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3340,7 +3328,6 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
|
|||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
int reg_block;
|
||||
unsigned long flags;
|
||||
unsigned crtc_id;
|
||||
struct amdgpu_crtc *amdgpu_crtc;
|
||||
|
@ -3349,33 +3336,15 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
|
|||
crtc_id = (entry->src_id - 8) >> 1;
|
||||
amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
|
||||
|
||||
/* ack the interrupt */
|
||||
switch(crtc_id){
|
||||
case AMDGPU_PAGEFLIP_IRQ_D1:
|
||||
reg_block = CRTC0_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D2:
|
||||
reg_block = CRTC1_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D3:
|
||||
reg_block = CRTC2_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D4:
|
||||
reg_block = CRTC3_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D5:
|
||||
reg_block = CRTC4_REGISTER_OFFSET;
|
||||
break;
|
||||
case AMDGPU_PAGEFLIP_IRQ_D6:
|
||||
reg_block = CRTC5_REGISTER_OFFSET;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
|
||||
return -EINVAL;
|
||||
if (crtc_id >= adev->mode_info.num_crtc) {
|
||||
DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
|
||||
WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
|
||||
if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
|
||||
GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
|
||||
WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
|
||||
GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
|
||||
|
||||
/* IRQ could occur when in initial stage */
|
||||
if (amdgpu_crtc == NULL)
|
||||
|
|
|
@ -5542,24 +5542,6 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
|
|||
.set_powergating_state = gfx_v7_0_set_powergating_state,
|
||||
};
|
||||
|
||||
/**
|
||||
* gfx_v7_0_ring_is_lockup - check if the 3D engine is locked up
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
*
|
||||
* Check if the 3D engine is locked up (CIK).
|
||||
* Returns true if the engine is locked, false if not.
|
||||
*/
|
||||
static bool gfx_v7_0_ring_is_lockup(struct amdgpu_ring *ring)
|
||||
{
|
||||
if (gfx_v7_0_is_idle(ring->adev)) {
|
||||
amdgpu_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
return amdgpu_ring_test_lockup(ring);
|
||||
}
|
||||
|
||||
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
|
||||
.get_rptr = gfx_v7_0_ring_get_rptr_gfx,
|
||||
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
|
||||
|
@ -5573,7 +5555,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
|
|||
.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
|
||||
.test_ring = gfx_v7_0_ring_test_ring,
|
||||
.test_ib = gfx_v7_0_ring_test_ib,
|
||||
.is_lockup = gfx_v7_0_ring_is_lockup,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
};
|
||||
|
||||
|
@ -5590,7 +5571,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
|
|||
.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
|
||||
.test_ring = gfx_v7_0_ring_test_ring,
|
||||
.test_ib = gfx_v7_0_ring_test_ib,
|
||||
.is_lockup = gfx_v7_0_ring_is_lockup,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
};
|
||||
|
||||
|
|
|
@ -73,6 +73,12 @@ MODULE_FIRMWARE("amdgpu/carrizo_mec.bin");
|
|||
MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/stoney_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/stoney_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/stoney_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/stoney_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/stoney_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/tonga_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/tonga_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/tonga_me.bin");
|
||||
|
@ -229,11 +235,13 @@ static const u32 fiji_golden_common_all[] =
|
|||
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
|
||||
mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
|
||||
mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
|
||||
mmGB_ADDR_CONFIG, 0xffffffff, 0x12011003,
|
||||
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
|
||||
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
|
||||
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
|
||||
mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
|
||||
mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
|
||||
mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
|
||||
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
|
||||
mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
|
||||
};
|
||||
|
||||
static const u32 golden_settings_fiji_a10[] =
|
||||
|
@ -241,24 +249,26 @@ static const u32 golden_settings_fiji_a10[] =
|
|||
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
|
||||
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
|
||||
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
|
||||
mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x00000100,
|
||||
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
|
||||
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
|
||||
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
|
||||
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
|
||||
mmTCC_CTRL, 0x00100000, 0xf30fff7f,
|
||||
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
|
||||
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
|
||||
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
|
||||
mmTCP_CHAN_STEER_HI, 0xffffffff, 0x7d6cf5e4,
|
||||
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x3928b1a0,
|
||||
mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
|
||||
};
|
||||
|
||||
static const u32 fiji_mgcg_cgcg_init[] =
|
||||
{
|
||||
mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffc0,
|
||||
mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
|
||||
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
|
||||
mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
|
||||
mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
|
||||
mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
|
||||
mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
|
@ -286,6 +296,10 @@ static const u32 fiji_mgcg_cgcg_init[] =
|
|||
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
|
||||
mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
|
||||
mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
|
||||
mmPCIE_INDEX, 0xffffffff, 0x0140001c,
|
||||
mmPCIE_DATA, 0x000f0000, 0x00000000,
|
||||
mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
|
||||
mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
|
||||
mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
|
||||
};
|
||||
|
||||
|
@ -493,6 +507,42 @@ static const u32 cz_mgcg_cgcg_init[] =
|
|||
mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
|
||||
};
|
||||
|
||||
static const u32 stoney_golden_settings_a11[] =
|
||||
{
|
||||
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
|
||||
mmGB_GPU_ID, 0x0000000f, 0x00000000,
|
||||
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
|
||||
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
|
||||
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
|
||||
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
|
||||
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
|
||||
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
|
||||
mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
|
||||
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
|
||||
};
|
||||
|
||||
static const u32 stoney_golden_common_all[] =
|
||||
{
|
||||
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
|
||||
mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000000,
|
||||
mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
|
||||
mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
|
||||
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
|
||||
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
|
||||
mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
|
||||
mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
|
||||
};
|
||||
|
||||
static const u32 stoney_mgcg_cgcg_init[] =
|
||||
{
|
||||
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
|
||||
mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
|
||||
mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
|
||||
mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
|
||||
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
|
||||
mmATC_MISC_CG, 0xffffffff, 0x000c0200,
|
||||
};
|
||||
|
||||
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
|
||||
|
@ -545,6 +595,17 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
|
|||
cz_golden_common_all,
|
||||
(const u32)ARRAY_SIZE(cz_golden_common_all));
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
stoney_mgcg_cgcg_init,
|
||||
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
|
||||
amdgpu_program_register_sequence(adev,
|
||||
stoney_golden_settings_a11,
|
||||
(const u32)ARRAY_SIZE(stoney_golden_settings_a11));
|
||||
amdgpu_program_register_sequence(adev,
|
||||
stoney_golden_common_all,
|
||||
(const u32)ARRAY_SIZE(stoney_golden_common_all));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -691,6 +752,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
|
|||
case CHIP_FIJI:
|
||||
chip_name = "fiji";
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
chip_name = "stoney";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
@ -748,21 +812,23 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
|
|||
adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
|
||||
adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
|
||||
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
|
||||
if (!err) {
|
||||
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
|
||||
if (err)
|
||||
goto out;
|
||||
cp_hdr = (const struct gfx_firmware_header_v1_0 *)
|
||||
adev->gfx.mec2_fw->data;
|
||||
adev->gfx.mec2_fw_version = le32_to_cpu(
|
||||
cp_hdr->header.ucode_version);
|
||||
adev->gfx.mec2_feature_version = le32_to_cpu(
|
||||
cp_hdr->ucode_feature_version);
|
||||
} else {
|
||||
err = 0;
|
||||
adev->gfx.mec2_fw = NULL;
|
||||
if (adev->asic_type != CHIP_STONEY) {
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
|
||||
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
|
||||
if (!err) {
|
||||
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
|
||||
if (err)
|
||||
goto out;
|
||||
cp_hdr = (const struct gfx_firmware_header_v1_0 *)
|
||||
adev->gfx.mec2_fw->data;
|
||||
adev->gfx.mec2_fw_version =
|
||||
le32_to_cpu(cp_hdr->header.ucode_version);
|
||||
adev->gfx.mec2_feature_version =
|
||||
le32_to_cpu(cp_hdr->ucode_feature_version);
|
||||
} else {
|
||||
err = 0;
|
||||
adev->gfx.mec2_fw = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (adev->firmware.smu_load) {
|
||||
|
@ -903,6 +969,232 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 gb_addr_config;
|
||||
u32 mc_shared_chmap, mc_arb_ramcfg;
|
||||
u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
|
||||
u32 tmp;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_TOPAZ:
|
||||
adev->gfx.config.max_shader_engines = 1;
|
||||
adev->gfx.config.max_tile_pipes = 2;
|
||||
adev->gfx.config.max_cu_per_sh = 6;
|
||||
adev->gfx.config.max_sh_per_se = 1;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
adev->gfx.config.max_texture_channel_caches = 2;
|
||||
adev->gfx.config.max_gprs = 256;
|
||||
adev->gfx.config.max_gs_threads = 32;
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
|
||||
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
|
||||
gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
|
||||
break;
|
||||
case CHIP_FIJI:
|
||||
adev->gfx.config.max_shader_engines = 4;
|
||||
adev->gfx.config.max_tile_pipes = 16;
|
||||
adev->gfx.config.max_cu_per_sh = 16;
|
||||
adev->gfx.config.max_sh_per_se = 1;
|
||||
adev->gfx.config.max_backends_per_se = 4;
|
||||
adev->gfx.config.max_texture_channel_caches = 8;
|
||||
adev->gfx.config.max_gprs = 256;
|
||||
adev->gfx.config.max_gs_threads = 32;
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
|
||||
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
|
||||
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
|
||||
break;
|
||||
case CHIP_TONGA:
|
||||
adev->gfx.config.max_shader_engines = 4;
|
||||
adev->gfx.config.max_tile_pipes = 8;
|
||||
adev->gfx.config.max_cu_per_sh = 8;
|
||||
adev->gfx.config.max_sh_per_se = 1;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
adev->gfx.config.max_texture_channel_caches = 8;
|
||||
adev->gfx.config.max_gprs = 256;
|
||||
adev->gfx.config.max_gs_threads = 32;
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
|
||||
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
|
||||
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
adev->gfx.config.max_shader_engines = 1;
|
||||
adev->gfx.config.max_tile_pipes = 2;
|
||||
adev->gfx.config.max_sh_per_se = 1;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
|
||||
switch (adev->pdev->revision) {
|
||||
case 0xc4:
|
||||
case 0x84:
|
||||
case 0xc8:
|
||||
case 0xcc:
|
||||
case 0xe1:
|
||||
case 0xe3:
|
||||
/* B10 */
|
||||
adev->gfx.config.max_cu_per_sh = 8;
|
||||
break;
|
||||
case 0xc5:
|
||||
case 0x81:
|
||||
case 0x85:
|
||||
case 0xc9:
|
||||
case 0xcd:
|
||||
case 0xe2:
|
||||
case 0xe4:
|
||||
/* B8 */
|
||||
adev->gfx.config.max_cu_per_sh = 6;
|
||||
break;
|
||||
case 0xc6:
|
||||
case 0xca:
|
||||
case 0xce:
|
||||
case 0x88:
|
||||
/* B6 */
|
||||
adev->gfx.config.max_cu_per_sh = 6;
|
||||
break;
|
||||
case 0xc7:
|
||||
case 0x87:
|
||||
case 0xcb:
|
||||
case 0xe5:
|
||||
case 0x89:
|
||||
default:
|
||||
/* B4 */
|
||||
adev->gfx.config.max_cu_per_sh = 4;
|
||||
break;
|
||||
}
|
||||
|
||||
adev->gfx.config.max_texture_channel_caches = 2;
|
||||
adev->gfx.config.max_gprs = 256;
|
||||
adev->gfx.config.max_gs_threads = 32;
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
|
||||
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
|
||||
gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
adev->gfx.config.max_shader_engines = 1;
|
||||
adev->gfx.config.max_tile_pipes = 2;
|
||||
adev->gfx.config.max_sh_per_se = 1;
|
||||
adev->gfx.config.max_backends_per_se = 1;
|
||||
|
||||
switch (adev->pdev->revision) {
|
||||
case 0xc0:
|
||||
case 0xc1:
|
||||
case 0xc2:
|
||||
case 0xc4:
|
||||
case 0xc8:
|
||||
case 0xc9:
|
||||
adev->gfx.config.max_cu_per_sh = 3;
|
||||
break;
|
||||
case 0xd0:
|
||||
case 0xd1:
|
||||
case 0xd2:
|
||||
default:
|
||||
adev->gfx.config.max_cu_per_sh = 2;
|
||||
break;
|
||||
}
|
||||
|
||||
adev->gfx.config.max_texture_channel_caches = 2;
|
||||
adev->gfx.config.max_gprs = 256;
|
||||
adev->gfx.config.max_gs_threads = 16;
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
|
||||
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
|
||||
gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
|
||||
break;
|
||||
default:
|
||||
adev->gfx.config.max_shader_engines = 2;
|
||||
adev->gfx.config.max_tile_pipes = 4;
|
||||
adev->gfx.config.max_cu_per_sh = 2;
|
||||
adev->gfx.config.max_sh_per_se = 1;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
adev->gfx.config.max_texture_channel_caches = 4;
|
||||
adev->gfx.config.max_gprs = 256;
|
||||
adev->gfx.config.max_gs_threads = 32;
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
|
||||
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
|
||||
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
|
||||
break;
|
||||
}
|
||||
|
||||
mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
|
||||
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
|
||||
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
|
||||
|
||||
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
|
||||
adev->gfx.config.mem_max_burst_length_bytes = 256;
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
/* Get memory bank mapping mode. */
|
||||
tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
|
||||
dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
|
||||
dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
|
||||
|
||||
tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
|
||||
dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
|
||||
dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
|
||||
|
||||
/* Validate settings in case only one DIMM installed. */
|
||||
if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
|
||||
dimm00_addr_map = 0;
|
||||
if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
|
||||
dimm01_addr_map = 0;
|
||||
if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
|
||||
dimm10_addr_map = 0;
|
||||
if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
|
||||
dimm11_addr_map = 0;
|
||||
|
||||
/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
|
||||
/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
|
||||
if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
|
||||
adev->gfx.config.mem_row_size_in_kb = 2;
|
||||
else
|
||||
adev->gfx.config.mem_row_size_in_kb = 1;
|
||||
} else {
|
||||
tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
|
||||
adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
|
||||
if (adev->gfx.config.mem_row_size_in_kb > 4)
|
||||
adev->gfx.config.mem_row_size_in_kb = 4;
|
||||
}
|
||||
|
||||
adev->gfx.config.shader_engine_tile_size = 32;
|
||||
adev->gfx.config.num_gpus = 1;
|
||||
adev->gfx.config.multi_gpu_tile_size = 64;
|
||||
|
||||
/* fix up row size */
|
||||
switch (adev->gfx.config.mem_row_size_in_kb) {
|
||||
case 1:
|
||||
default:
|
||||
gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
|
||||
break;
|
||||
case 2:
|
||||
gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
|
||||
break;
|
||||
case 4:
|
||||
gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
|
||||
break;
|
||||
}
|
||||
adev->gfx.config.gb_addr_config = gb_addr_config;
|
||||
}
|
||||
|
||||
static int gfx_v8_0_sw_init(void *handle)
|
||||
{
|
||||
int i, r;
|
||||
|
@ -1010,6 +1302,8 @@ static int gfx_v8_0_sw_init(void *handle)
|
|||
|
||||
adev->gfx.ce_ram_size = 0x8000;
|
||||
|
||||
gfx_v8_0_gpu_early_init(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1610,6 +1904,273 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
|
|||
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
|
||||
}
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
|
||||
switch (reg_offset) {
|
||||
case 0:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
|
||||
break;
|
||||
case 1:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
|
||||
break;
|
||||
case 2:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
|
||||
break;
|
||||
case 3:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
|
||||
break;
|
||||
case 4:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
|
||||
break;
|
||||
case 5:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
|
||||
break;
|
||||
case 6:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
|
||||
break;
|
||||
case 8:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2));
|
||||
break;
|
||||
case 9:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
|
||||
break;
|
||||
case 10:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
|
||||
break;
|
||||
case 11:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
|
||||
break;
|
||||
case 13:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
|
||||
break;
|
||||
case 14:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
|
||||
break;
|
||||
case 15:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
|
||||
break;
|
||||
case 16:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
|
||||
break;
|
||||
case 18:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
|
||||
break;
|
||||
case 19:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
|
||||
break;
|
||||
case 20:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
|
||||
break;
|
||||
case 21:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
|
||||
break;
|
||||
case 22:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
|
||||
break;
|
||||
case 24:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
|
||||
break;
|
||||
case 25:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
|
||||
break;
|
||||
case 26:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
|
||||
break;
|
||||
case 27:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
|
||||
break;
|
||||
case 28:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
|
||||
break;
|
||||
case 29:
|
||||
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
|
||||
PIPE_CONFIG(ADDR_SURF_P2) |
|
||||
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
|
||||
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
|
||||
break;
|
||||
case 7:
|
||||
case 12:
|
||||
case 17:
|
||||
case 23:
|
||||
/* unused idx */
|
||||
continue;
|
||||
default:
|
||||
gb_tile_moden = 0;
|
||||
break;
|
||||
};
|
||||
adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
|
||||
WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
|
||||
}
|
||||
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
|
||||
switch (reg_offset) {
|
||||
case 0:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 1:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 2:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 3:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 4:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 5:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 6:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 8:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
|
||||
NUM_BANKS(ADDR_SURF_16_BANK));
|
||||
break;
|
||||
case 9:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
|
||||
NUM_BANKS(ADDR_SURF_16_BANK));
|
||||
break;
|
||||
case 10:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
|
||||
NUM_BANKS(ADDR_SURF_16_BANK));
|
||||
break;
|
||||
case 11:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
|
||||
NUM_BANKS(ADDR_SURF_16_BANK));
|
||||
break;
|
||||
case 12:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
|
||||
NUM_BANKS(ADDR_SURF_16_BANK));
|
||||
break;
|
||||
case 13:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
|
||||
NUM_BANKS(ADDR_SURF_16_BANK));
|
||||
break;
|
||||
case 14:
|
||||
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
|
||||
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
|
||||
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
|
||||
NUM_BANKS(ADDR_SURF_8_BANK));
|
||||
break;
|
||||
case 7:
|
||||
/* unused idx */
|
||||
continue;
|
||||
default:
|
||||
gb_tile_moden = 0;
|
||||
break;
|
||||
};
|
||||
adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
|
||||
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
|
||||
}
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
default:
|
||||
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
|
||||
|
@ -2043,203 +2604,23 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
|
|||
|
||||
static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 gb_addr_config;
|
||||
u32 mc_shared_chmap, mc_arb_ramcfg;
|
||||
u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
|
||||
u32 tmp;
|
||||
int i;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_TOPAZ:
|
||||
adev->gfx.config.max_shader_engines = 1;
|
||||
adev->gfx.config.max_tile_pipes = 2;
|
||||
adev->gfx.config.max_cu_per_sh = 6;
|
||||
adev->gfx.config.max_sh_per_se = 1;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
adev->gfx.config.max_texture_channel_caches = 2;
|
||||
adev->gfx.config.max_gprs = 256;
|
||||
adev->gfx.config.max_gs_threads = 32;
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
|
||||
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
|
||||
gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
|
||||
break;
|
||||
case CHIP_FIJI:
|
||||
adev->gfx.config.max_shader_engines = 4;
|
||||
adev->gfx.config.max_tile_pipes = 16;
|
||||
adev->gfx.config.max_cu_per_sh = 16;
|
||||
adev->gfx.config.max_sh_per_se = 1;
|
||||
adev->gfx.config.max_backends_per_se = 4;
|
||||
adev->gfx.config.max_texture_channel_caches = 8;
|
||||
adev->gfx.config.max_gprs = 256;
|
||||
adev->gfx.config.max_gs_threads = 32;
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
|
||||
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
|
||||
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
|
||||
break;
|
||||
case CHIP_TONGA:
|
||||
adev->gfx.config.max_shader_engines = 4;
|
||||
adev->gfx.config.max_tile_pipes = 8;
|
||||
adev->gfx.config.max_cu_per_sh = 8;
|
||||
adev->gfx.config.max_sh_per_se = 1;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
adev->gfx.config.max_texture_channel_caches = 8;
|
||||
adev->gfx.config.max_gprs = 256;
|
||||
adev->gfx.config.max_gs_threads = 32;
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
|
||||
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
|
||||
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
adev->gfx.config.max_shader_engines = 1;
|
||||
adev->gfx.config.max_tile_pipes = 2;
|
||||
adev->gfx.config.max_sh_per_se = 1;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
|
||||
switch (adev->pdev->revision) {
|
||||
case 0xc4:
|
||||
case 0x84:
|
||||
case 0xc8:
|
||||
case 0xcc:
|
||||
/* B10 */
|
||||
adev->gfx.config.max_cu_per_sh = 8;
|
||||
break;
|
||||
case 0xc5:
|
||||
case 0x81:
|
||||
case 0x85:
|
||||
case 0xc9:
|
||||
case 0xcd:
|
||||
/* B8 */
|
||||
adev->gfx.config.max_cu_per_sh = 6;
|
||||
break;
|
||||
case 0xc6:
|
||||
case 0xca:
|
||||
case 0xce:
|
||||
/* B6 */
|
||||
adev->gfx.config.max_cu_per_sh = 6;
|
||||
break;
|
||||
case 0xc7:
|
||||
case 0x87:
|
||||
case 0xcb:
|
||||
default:
|
||||
/* B4 */
|
||||
adev->gfx.config.max_cu_per_sh = 4;
|
||||
break;
|
||||
}
|
||||
|
||||
adev->gfx.config.max_texture_channel_caches = 2;
|
||||
adev->gfx.config.max_gprs = 256;
|
||||
adev->gfx.config.max_gs_threads = 32;
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
|
||||
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
|
||||
gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
|
||||
break;
|
||||
default:
|
||||
adev->gfx.config.max_shader_engines = 2;
|
||||
adev->gfx.config.max_tile_pipes = 4;
|
||||
adev->gfx.config.max_cu_per_sh = 2;
|
||||
adev->gfx.config.max_sh_per_se = 1;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
adev->gfx.config.max_texture_channel_caches = 4;
|
||||
adev->gfx.config.max_gprs = 256;
|
||||
adev->gfx.config.max_gs_threads = 32;
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
|
||||
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
|
||||
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
|
||||
break;
|
||||
}
|
||||
|
||||
tmp = RREG32(mmGRBM_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
|
||||
WREG32(mmGRBM_CNTL, tmp);
|
||||
|
||||
mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
|
||||
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
|
||||
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
|
||||
|
||||
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
|
||||
adev->gfx.config.mem_max_burst_length_bytes = 256;
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
/* Get memory bank mapping mode. */
|
||||
tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
|
||||
dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
|
||||
dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
|
||||
|
||||
tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
|
||||
dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
|
||||
dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
|
||||
|
||||
/* Validate settings in case only one DIMM installed. */
|
||||
if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
|
||||
dimm00_addr_map = 0;
|
||||
if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
|
||||
dimm01_addr_map = 0;
|
||||
if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
|
||||
dimm10_addr_map = 0;
|
||||
if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
|
||||
dimm11_addr_map = 0;
|
||||
|
||||
/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
|
||||
/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
|
||||
if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
|
||||
adev->gfx.config.mem_row_size_in_kb = 2;
|
||||
else
|
||||
adev->gfx.config.mem_row_size_in_kb = 1;
|
||||
} else {
|
||||
tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
|
||||
adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
|
||||
if (adev->gfx.config.mem_row_size_in_kb > 4)
|
||||
adev->gfx.config.mem_row_size_in_kb = 4;
|
||||
}
|
||||
|
||||
adev->gfx.config.shader_engine_tile_size = 32;
|
||||
adev->gfx.config.num_gpus = 1;
|
||||
adev->gfx.config.multi_gpu_tile_size = 64;
|
||||
|
||||
/* fix up row size */
|
||||
switch (adev->gfx.config.mem_row_size_in_kb) {
|
||||
case 1:
|
||||
default:
|
||||
gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
|
||||
break;
|
||||
case 2:
|
||||
gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
|
||||
break;
|
||||
case 4:
|
||||
gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
|
||||
break;
|
||||
}
|
||||
adev->gfx.config.gb_addr_config = gb_addr_config;
|
||||
|
||||
WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
|
||||
WREG32(mmHDP_ADDR_CONFIG, gb_addr_config);
|
||||
WREG32(mmDMIF_ADDR_CALC, gb_addr_config);
|
||||
WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
|
||||
WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
|
||||
WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
|
||||
WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET,
|
||||
gb_addr_config & 0x70);
|
||||
adev->gfx.config.gb_addr_config & 0x70);
|
||||
WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET,
|
||||
gb_addr_config & 0x70);
|
||||
WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config);
|
||||
WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
|
||||
WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
|
||||
adev->gfx.config.gb_addr_config & 0x70);
|
||||
WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
|
||||
WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
|
||||
WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
|
||||
|
||||
gfx_v8_0_tiling_mode_table_init(adev);
|
||||
|
||||
|
@ -2256,13 +2637,13 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
|
|||
if (i == 0) {
|
||||
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
|
||||
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
|
||||
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
|
||||
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
|
||||
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
|
||||
WREG32(mmSH_MEM_CONFIG, tmp);
|
||||
} else {
|
||||
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
|
||||
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_NC);
|
||||
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
|
||||
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
|
||||
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
|
||||
WREG32(mmSH_MEM_CONFIG, tmp);
|
||||
}
|
||||
|
@ -2377,7 +2758,7 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
|
|||
WREG32(mmRLC_CNTL, tmp);
|
||||
|
||||
/* carrizo do enable cp interrupt after cp inited */
|
||||
if (adev->asic_type != CHIP_CARRIZO)
|
||||
if (!(adev->flags & AMD_IS_APU))
|
||||
gfx_v8_0_enable_gui_idle_interrupt(adev, true);
|
||||
|
||||
udelay(50);
|
||||
|
@ -2599,6 +2980,10 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
|
|||
amdgpu_ring_write(ring, 0x00000002);
|
||||
amdgpu_ring_write(ring, 0x00000000);
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
amdgpu_ring_write(ring, 0x00000000);
|
||||
amdgpu_ring_write(ring, 0x00000000);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
@ -3233,7 +3618,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
|
|||
/* enable the doorbell if requested */
|
||||
if (use_doorbell) {
|
||||
if ((adev->asic_type == CHIP_CARRIZO) ||
|
||||
(adev->asic_type == CHIP_FIJI)) {
|
||||
(adev->asic_type == CHIP_FIJI) ||
|
||||
(adev->asic_type == CHIP_STONEY)) {
|
||||
WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
|
||||
AMDGPU_DOORBELL_KIQ << 2);
|
||||
WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
|
||||
|
@ -3305,7 +3691,7 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
|
|||
{
|
||||
int r;
|
||||
|
||||
if (adev->asic_type != CHIP_CARRIZO)
|
||||
if (!(adev->flags & AMD_IS_APU))
|
||||
gfx_v8_0_enable_gui_idle_interrupt(adev, false);
|
||||
|
||||
if (!adev->firmware.smu_load) {
|
||||
|
@ -4068,15 +4454,6 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
|||
}
|
||||
}
|
||||
|
||||
static bool gfx_v8_0_ring_is_lockup(struct amdgpu_ring *ring)
|
||||
{
|
||||
if (gfx_v8_0_is_idle(ring->adev)) {
|
||||
amdgpu_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
return amdgpu_ring_test_lockup(ring);
|
||||
}
|
||||
|
||||
static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
|
||||
{
|
||||
return ring->adev->wb.wb[ring->rptr_offs];
|
||||
|
@ -4107,6 +4484,7 @@ static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
|
|||
amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
|
||||
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
|
||||
EOP_TC_ACTION_EN |
|
||||
EOP_TC_WB_ACTION_EN |
|
||||
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
|
||||
EVENT_INDEX(5)));
|
||||
amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
|
||||
|
@ -4357,7 +4735,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
|||
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
|
||||
.test_ring = gfx_v8_0_ring_test_ring,
|
||||
.test_ib = gfx_v8_0_ring_test_ib,
|
||||
.is_lockup = gfx_v8_0_ring_is_lockup,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
};
|
||||
|
||||
|
@ -4374,7 +4751,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
|
|||
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
|
||||
.test_ring = gfx_v8_0_ring_test_ring,
|
||||
.test_ib = gfx_v8_0_ring_test_ib,
|
||||
.is_lockup = gfx_v8_0_ring_is_lockup,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
};
|
||||
|
||||
|
|
|
@ -435,6 +435,33 @@ static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v8_0_set_fault_enable_default - update VM fault handling
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @value: true redirects VM faults to the default page
|
||||
*/
|
||||
static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
|
||||
bool value)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
tmp = RREG32(mmVM_CONTEXT1_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
WREG32(mmVM_CONTEXT1_CNTL, tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v7_0_gart_enable - gart enable
|
||||
*
|
||||
|
@ -523,15 +550,13 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
|
|||
tmp = RREG32(mmVM_CONTEXT1_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
|
||||
amdgpu_vm_block_size - 9);
|
||||
WREG32(mmVM_CONTEXT1_CNTL, tmp);
|
||||
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||
gmc_v7_0_set_fault_enable_default(adev, false);
|
||||
else
|
||||
gmc_v7_0_set_fault_enable_default(adev, true);
|
||||
|
||||
if (adev->asic_type == CHIP_KAVERI) {
|
||||
tmp = RREG32(mmCHUB_CONTROL);
|
||||
|
@ -940,7 +965,7 @@ static int gmc_v7_0_sw_fini(void *handle)
|
|||
|
||||
if (adev->vm_manager.enabled) {
|
||||
for (i = 0; i < AMDGPU_NUM_VM; ++i)
|
||||
amdgpu_fence_unref(&adev->vm_manager.active[i]);
|
||||
fence_put(adev->vm_manager.active[i]);
|
||||
gmc_v7_0_vm_fini(adev);
|
||||
adev->vm_manager.enabled = false;
|
||||
}
|
||||
|
@ -990,7 +1015,7 @@ static int gmc_v7_0_suspend(void *handle)
|
|||
|
||||
if (adev->vm_manager.enabled) {
|
||||
for (i = 0; i < AMDGPU_NUM_VM; ++i)
|
||||
amdgpu_fence_unref(&adev->vm_manager.active[i]);
|
||||
fence_put(adev->vm_manager.active[i]);
|
||||
gmc_v7_0_vm_fini(adev);
|
||||
adev->vm_manager.enabled = false;
|
||||
}
|
||||
|
@ -1268,6 +1293,9 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
|
|||
if (!addr && !status)
|
||||
return 0;
|
||||
|
||||
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
|
||||
gmc_v7_0_set_fault_enable_default(adev, false);
|
||||
|
||||
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
|
||||
entry->src_id, entry->src_data);
|
||||
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
|
||||
|
|
|
@ -93,6 +93,12 @@ static const u32 cz_mgcg_cgcg_init[] =
|
|||
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
|
||||
};
|
||||
|
||||
static const u32 stoney_mgcg_cgcg_init[] =
|
||||
{
|
||||
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
|
||||
};
|
||||
|
||||
|
||||
static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
|
@ -125,6 +131,11 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
|
|||
cz_mgcg_cgcg_init,
|
||||
(const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
stoney_mgcg_cgcg_init,
|
||||
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -228,6 +239,7 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
|
|||
chip_name = "fiji";
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
return 0;
|
||||
default: BUG();
|
||||
}
|
||||
|
@ -549,6 +561,35 @@ static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v8_0_set_fault_enable_default - update VM fault handling
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @value: true redirects VM faults to the default page
|
||||
*/
|
||||
static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
|
||||
bool value)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
tmp = RREG32(mmVM_CONTEXT1_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
|
||||
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||
WREG32(mmVM_CONTEXT1_CNTL, tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v8_0_gart_enable - gart enable
|
||||
*
|
||||
|
@ -663,6 +704,10 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
|
|||
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
|
||||
amdgpu_vm_block_size - 9);
|
||||
WREG32(mmVM_CONTEXT1_CNTL, tmp);
|
||||
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||
gmc_v8_0_set_fault_enable_default(adev, false);
|
||||
else
|
||||
gmc_v8_0_set_fault_enable_default(adev, true);
|
||||
|
||||
gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
|
||||
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||
|
@ -939,7 +984,7 @@ static int gmc_v8_0_sw_fini(void *handle)
|
|||
|
||||
if (adev->vm_manager.enabled) {
|
||||
for (i = 0; i < AMDGPU_NUM_VM; ++i)
|
||||
amdgpu_fence_unref(&adev->vm_manager.active[i]);
|
||||
fence_put(adev->vm_manager.active[i]);
|
||||
gmc_v8_0_vm_fini(adev);
|
||||
adev->vm_manager.enabled = false;
|
||||
}
|
||||
|
@ -991,7 +1036,7 @@ static int gmc_v8_0_suspend(void *handle)
|
|||
|
||||
if (adev->vm_manager.enabled) {
|
||||
for (i = 0; i < AMDGPU_NUM_VM; ++i)
|
||||
amdgpu_fence_unref(&adev->vm_manager.active[i]);
|
||||
fence_put(adev->vm_manager.active[i]);
|
||||
gmc_v8_0_vm_fini(adev);
|
||||
adev->vm_manager.enabled = false;
|
||||
}
|
||||
|
@ -1268,6 +1313,9 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
|
|||
if (!addr && !status)
|
||||
return 0;
|
||||
|
||||
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
|
||||
gmc_v8_0_set_fault_enable_default(adev, false);
|
||||
|
||||
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
|
||||
entry->src_id, entry->src_data);
|
||||
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
|
||||
|
|
|
@ -118,7 +118,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
|
|||
{
|
||||
const char *chip_name;
|
||||
char fw_name[30];
|
||||
int err, i;
|
||||
int err = 0, i;
|
||||
struct amdgpu_firmware_info *info = NULL;
|
||||
const struct common_firmware_header *header = NULL;
|
||||
const struct sdma_firmware_header_v1_0 *hdr;
|
||||
|
@ -132,27 +132,27 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
|
|||
default: BUG();
|
||||
}
|
||||
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (i == 0)
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
|
||||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
|
||||
err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
|
||||
err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
err = amdgpu_ucode_validate(adev->sdma[i].fw);
|
||||
err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
|
||||
if (err)
|
||||
goto out;
|
||||
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
|
||||
adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
|
||||
adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
|
||||
if (adev->sdma[i].feature_version >= 20)
|
||||
adev->sdma[i].burst_nop = true;
|
||||
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
|
||||
adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
|
||||
adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
|
||||
if (adev->sdma.instance[i].feature_version >= 20)
|
||||
adev->sdma.instance[i].burst_nop = true;
|
||||
|
||||
if (adev->firmware.smu_load) {
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
|
||||
info->fw = adev->sdma[i].fw;
|
||||
info->fw = adev->sdma.instance[i].fw;
|
||||
header = (const struct common_firmware_header *)info->fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
|
@ -164,9 +164,9 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
|
|||
printk(KERN_ERR
|
||||
"sdma_v2_4: Failed to load firmware \"%s\"\n",
|
||||
fw_name);
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
release_firmware(adev->sdma[i].fw);
|
||||
adev->sdma[i].fw = NULL;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
release_firmware(adev->sdma.instance[i].fw);
|
||||
adev->sdma.instance[i].fw = NULL;
|
||||
}
|
||||
}
|
||||
return err;
|
||||
|
@ -199,7 +199,7 @@ static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
|
|||
static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
|
||||
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
|
||||
u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
|
||||
|
||||
return wptr;
|
||||
|
@ -215,14 +215,14 @@ static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
|
|||
static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
|
||||
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
|
||||
|
||||
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
|
||||
}
|
||||
|
||||
static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
||||
{
|
||||
struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
|
||||
struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
|
@ -284,7 +284,7 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|||
{
|
||||
u32 ref_and_mask = 0;
|
||||
|
||||
if (ring == &ring->adev->sdma[0].ring)
|
||||
if (ring == &ring->adev->sdma.instance[0].ring)
|
||||
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
|
||||
else
|
||||
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
|
||||
|
@ -368,8 +368,8 @@ static bool sdma_v2_4_ring_emit_semaphore(struct amdgpu_ring *ring,
|
|||
*/
|
||||
static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
|
||||
struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
|
||||
struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
|
||||
struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
|
||||
u32 rb_cntl, ib_cntl;
|
||||
int i;
|
||||
|
||||
|
@ -377,7 +377,7 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
|
|||
(adev->mman.buffer_funcs_ring == sdma1))
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
|
||||
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
|
||||
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
|
||||
|
@ -419,7 +419,7 @@ static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
|
|||
sdma_v2_4_rlc_stop(adev);
|
||||
}
|
||||
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
|
||||
if (enable)
|
||||
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
|
||||
|
@ -445,8 +445,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
|
|||
u32 wb_offset;
|
||||
int i, j, r;
|
||||
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
ring = &adev->sdma[i].ring;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
wb_offset = (ring->rptr_offs * 4);
|
||||
|
||||
mutex_lock(&adev->srbm_mutex);
|
||||
|
@ -545,29 +545,23 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
|
|||
const __le32 *fw_data;
|
||||
u32 fw_size;
|
||||
int i, j;
|
||||
bool smc_loads_fw = false; /* XXX fix me */
|
||||
|
||||
if (!adev->sdma[0].fw || !adev->sdma[1].fw)
|
||||
return -EINVAL;
|
||||
|
||||
/* halt the MEs */
|
||||
sdma_v2_4_enable(adev, false);
|
||||
|
||||
if (smc_loads_fw) {
|
||||
/* XXX query SMC for fw load complete */
|
||||
} else {
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
|
||||
amdgpu_ucode_print_sdma_hdr(&hdr->header);
|
||||
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
|
||||
fw_data = (const __le32 *)
|
||||
(adev->sdma[i].fw->data +
|
||||
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
|
||||
for (j = 0; j < fw_size; j++)
|
||||
WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
|
||||
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
|
||||
}
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (!adev->sdma.instance[i].fw)
|
||||
return -EINVAL;
|
||||
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
|
||||
amdgpu_ucode_print_sdma_hdr(&hdr->header);
|
||||
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
|
||||
fw_data = (const __le32 *)
|
||||
(adev->sdma.instance[i].fw->data +
|
||||
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
|
||||
for (j = 0; j < fw_size; j++)
|
||||
WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
|
||||
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -894,7 +888,7 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
|
|||
*/
|
||||
static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib)
|
||||
{
|
||||
struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
|
||||
struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
|
||||
u32 pad_count;
|
||||
int i;
|
||||
|
||||
|
@ -952,6 +946,8 @@ static int sdma_v2_4_early_init(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->sdma.num_instances = SDMA_MAX_INSTANCE;
|
||||
|
||||
sdma_v2_4_set_ring_funcs(adev);
|
||||
sdma_v2_4_set_buffer_funcs(adev);
|
||||
sdma_v2_4_set_vm_pte_funcs(adev);
|
||||
|
@ -963,21 +959,21 @@ static int sdma_v2_4_early_init(void *handle)
|
|||
static int sdma_v2_4_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
int r;
|
||||
int r, i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* SDMA trap event */
|
||||
r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
|
||||
r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* SDMA Privileged inst */
|
||||
r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
|
||||
r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* SDMA Privileged inst */
|
||||
r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
|
||||
r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -987,31 +983,20 @@ static int sdma_v2_4_sw_init(void *handle)
|
|||
return r;
|
||||
}
|
||||
|
||||
ring = &adev->sdma[0].ring;
|
||||
ring->ring_obj = NULL;
|
||||
ring->use_doorbell = false;
|
||||
|
||||
ring = &adev->sdma[1].ring;
|
||||
ring->ring_obj = NULL;
|
||||
ring->use_doorbell = false;
|
||||
|
||||
ring = &adev->sdma[0].ring;
|
||||
sprintf(ring->name, "sdma0");
|
||||
r = amdgpu_ring_init(adev, ring, 256 * 1024,
|
||||
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
|
||||
&adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
|
||||
AMDGPU_RING_TYPE_SDMA);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ring = &adev->sdma[1].ring;
|
||||
sprintf(ring->name, "sdma1");
|
||||
r = amdgpu_ring_init(adev, ring, 256 * 1024,
|
||||
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
|
||||
&adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
|
||||
AMDGPU_RING_TYPE_SDMA);
|
||||
if (r)
|
||||
return r;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
ring->ring_obj = NULL;
|
||||
ring->use_doorbell = false;
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 256 * 1024,
|
||||
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
|
||||
AMDGPU_RING_TYPE_SDMA);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -1019,9 +1004,10 @@ static int sdma_v2_4_sw_init(void *handle)
|
|||
static int sdma_v2_4_sw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int i;
|
||||
|
||||
amdgpu_ring_fini(&adev->sdma[0].ring);
|
||||
amdgpu_ring_fini(&adev->sdma[1].ring);
|
||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1100,7 +1086,7 @@ static void sdma_v2_4_print_status(void *handle)
|
|||
dev_info(adev->dev, "VI SDMA registers\n");
|
||||
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
|
||||
RREG32(mmSRBM_STATUS2));
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
|
||||
i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
|
||||
dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n",
|
||||
|
@ -1243,7 +1229,7 @@ static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
|
|||
case 0:
|
||||
switch (queue_id) {
|
||||
case 0:
|
||||
amdgpu_fence_process(&adev->sdma[0].ring);
|
||||
amdgpu_fence_process(&adev->sdma.instance[0].ring);
|
||||
break;
|
||||
case 1:
|
||||
/* XXX compute */
|
||||
|
@ -1256,7 +1242,7 @@ static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
|
|||
case 1:
|
||||
switch (queue_id) {
|
||||
case 0:
|
||||
amdgpu_fence_process(&adev->sdma[1].ring);
|
||||
amdgpu_fence_process(&adev->sdma.instance[1].ring);
|
||||
break;
|
||||
case 1:
|
||||
/* XXX compute */
|
||||
|
@ -1309,24 +1295,6 @@ const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
|
|||
.set_powergating_state = sdma_v2_4_set_powergating_state,
|
||||
};
|
||||
|
||||
/**
|
||||
* sdma_v2_4_ring_is_lockup - Check if the DMA engine is locked up
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
*
|
||||
* Check if the async DMA engine is locked up (VI).
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
static bool sdma_v2_4_ring_is_lockup(struct amdgpu_ring *ring)
|
||||
{
|
||||
|
||||
if (sdma_v2_4_is_idle(ring->adev)) {
|
||||
amdgpu_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
return amdgpu_ring_test_lockup(ring);
|
||||
}
|
||||
|
||||
static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
|
||||
.get_rptr = sdma_v2_4_ring_get_rptr,
|
||||
.get_wptr = sdma_v2_4_ring_get_wptr,
|
||||
|
@ -1339,14 +1307,15 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
|
|||
.emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
|
||||
.test_ring = sdma_v2_4_ring_test_ring,
|
||||
.test_ib = sdma_v2_4_ring_test_ib,
|
||||
.is_lockup = sdma_v2_4_ring_is_lockup,
|
||||
.insert_nop = sdma_v2_4_ring_insert_nop,
|
||||
};
|
||||
|
||||
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->sdma[0].ring.funcs = &sdma_v2_4_ring_funcs;
|
||||
adev->sdma[1].ring.funcs = &sdma_v2_4_ring_funcs;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
|
||||
|
@ -1360,9 +1329,9 @@ static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = {
|
|||
|
||||
static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
|
||||
adev->sdma_trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
|
||||
adev->sdma_illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
|
||||
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
|
||||
adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
|
||||
adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1428,7 +1397,7 @@ static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
|
|||
{
|
||||
if (adev->mman.buffer_funcs == NULL) {
|
||||
adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1443,7 +1412,7 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|||
{
|
||||
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
||||
adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
|
||||
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
|
||||
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -55,6 +55,7 @@ MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
|
|||
MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
|
||||
MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
|
||||
MODULE_FIRMWARE("amdgpu/stoney_sdma.bin");
|
||||
|
||||
static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
|
||||
{
|
||||
|
@ -122,6 +123,19 @@ static const u32 cz_mgcg_cgcg_init[] =
|
|||
mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
|
||||
};
|
||||
|
||||
static const u32 stoney_golden_settings_a11[] =
|
||||
{
|
||||
mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
|
||||
mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
|
||||
mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
|
||||
mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
|
||||
};
|
||||
|
||||
static const u32 stoney_mgcg_cgcg_init[] =
|
||||
{
|
||||
mmSDMA0_CLK_CTRL, 0xffffffff, 0x00000100,
|
||||
};
|
||||
|
||||
/*
|
||||
* sDMA - System DMA
|
||||
* Starting with CIK, the GPU has new asynchronous
|
||||
|
@ -166,6 +180,14 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
|
|||
cz_golden_settings_a11,
|
||||
(const u32)ARRAY_SIZE(cz_golden_settings_a11));
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
stoney_mgcg_cgcg_init,
|
||||
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
|
||||
amdgpu_program_register_sequence(adev,
|
||||
stoney_golden_settings_a11,
|
||||
(const u32)ARRAY_SIZE(stoney_golden_settings_a11));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -184,7 +206,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
|
|||
{
|
||||
const char *chip_name;
|
||||
char fw_name[30];
|
||||
int err, i;
|
||||
int err = 0, i;
|
||||
struct amdgpu_firmware_info *info = NULL;
|
||||
const struct common_firmware_header *header = NULL;
|
||||
const struct sdma_firmware_header_v1_0 *hdr;
|
||||
|
@ -201,30 +223,33 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
|
|||
case CHIP_CARRIZO:
|
||||
chip_name = "carrizo";
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
chip_name = "stoney";
|
||||
break;
|
||||
default: BUG();
|
||||
}
|
||||
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (i == 0)
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
|
||||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
|
||||
err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
|
||||
err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
err = amdgpu_ucode_validate(adev->sdma[i].fw);
|
||||
err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
|
||||
if (err)
|
||||
goto out;
|
||||
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
|
||||
adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
|
||||
adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
|
||||
if (adev->sdma[i].feature_version >= 20)
|
||||
adev->sdma[i].burst_nop = true;
|
||||
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
|
||||
adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
|
||||
adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
|
||||
if (adev->sdma.instance[i].feature_version >= 20)
|
||||
adev->sdma.instance[i].burst_nop = true;
|
||||
|
||||
if (adev->firmware.smu_load) {
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
|
||||
info->fw = adev->sdma[i].fw;
|
||||
info->fw = adev->sdma.instance[i].fw;
|
||||
header = (const struct common_firmware_header *)info->fw->data;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
|
||||
|
@ -235,9 +260,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
|
|||
printk(KERN_ERR
|
||||
"sdma_v3_0: Failed to load firmware \"%s\"\n",
|
||||
fw_name);
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
release_firmware(adev->sdma[i].fw);
|
||||
adev->sdma[i].fw = NULL;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
release_firmware(adev->sdma.instance[i].fw);
|
||||
adev->sdma.instance[i].fw = NULL;
|
||||
}
|
||||
}
|
||||
return err;
|
||||
|
@ -276,7 +301,7 @@ static uint32_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
|
|||
/* XXX check if swapping is necessary on BE */
|
||||
wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
|
||||
} else {
|
||||
int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
|
||||
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
|
||||
|
||||
wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
|
||||
}
|
||||
|
@ -300,7 +325,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
|
|||
adev->wb.wb[ring->wptr_offs] = ring->wptr << 2;
|
||||
WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
|
||||
} else {
|
||||
int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
|
||||
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
|
||||
|
||||
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
|
||||
}
|
||||
|
@ -308,7 +333,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
|
|||
|
||||
static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
|
||||
{
|
||||
struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
|
||||
struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
|
@ -369,7 +394,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|||
{
|
||||
u32 ref_and_mask = 0;
|
||||
|
||||
if (ring == &ring->adev->sdma[0].ring)
|
||||
if (ring == &ring->adev->sdma.instance[0].ring)
|
||||
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
|
||||
else
|
||||
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
|
||||
|
@ -454,8 +479,8 @@ static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring *ring,
|
|||
*/
|
||||
static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
|
||||
struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
|
||||
struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
|
||||
struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
|
||||
u32 rb_cntl, ib_cntl;
|
||||
int i;
|
||||
|
||||
|
@ -463,7 +488,7 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
|
|||
(adev->mman.buffer_funcs_ring == sdma1))
|
||||
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
|
||||
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
|
||||
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
|
||||
|
@ -500,7 +525,7 @@ static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
|
|||
u32 f32_cntl;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
|
||||
if (enable)
|
||||
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
|
||||
|
@ -530,7 +555,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
|
|||
sdma_v3_0_rlc_stop(adev);
|
||||
}
|
||||
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
|
||||
if (enable)
|
||||
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
|
||||
|
@ -557,8 +582,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
|
|||
u32 doorbell;
|
||||
int i, j, r;
|
||||
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
ring = &adev->sdma[i].ring;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
wb_offset = (ring->rptr_offs * 4);
|
||||
|
||||
mutex_lock(&adev->srbm_mutex);
|
||||
|
@ -669,23 +694,22 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
|
|||
u32 fw_size;
|
||||
int i, j;
|
||||
|
||||
if (!adev->sdma[0].fw || !adev->sdma[1].fw)
|
||||
return -EINVAL;
|
||||
|
||||
/* halt the MEs */
|
||||
sdma_v3_0_enable(adev, false);
|
||||
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
if (!adev->sdma.instance[i].fw)
|
||||
return -EINVAL;
|
||||
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
|
||||
amdgpu_ucode_print_sdma_hdr(&hdr->header);
|
||||
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
|
||||
fw_data = (const __le32 *)
|
||||
(adev->sdma[i].fw->data +
|
||||
(adev->sdma.instance[i].fw->data +
|
||||
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
|
||||
for (j = 0; j < fw_size; j++)
|
||||
WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
|
||||
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
|
||||
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -701,21 +725,21 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
|
|||
*/
|
||||
static int sdma_v3_0_start(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
int r, i;
|
||||
|
||||
if (!adev->firmware.smu_load) {
|
||||
r = sdma_v3_0_load_microcode(adev);
|
||||
if (r)
|
||||
return r;
|
||||
} else {
|
||||
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
|
||||
AMDGPU_UCODE_ID_SDMA0);
|
||||
if (r)
|
||||
return -EINVAL;
|
||||
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
|
||||
AMDGPU_UCODE_ID_SDMA1);
|
||||
if (r)
|
||||
return -EINVAL;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
|
||||
(i == 0) ?
|
||||
AMDGPU_UCODE_ID_SDMA0 :
|
||||
AMDGPU_UCODE_ID_SDMA1);
|
||||
if (r)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* unhalt the MEs */
|
||||
|
@ -1013,7 +1037,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
|
|||
*/
|
||||
static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
|
||||
{
|
||||
struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
|
||||
struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
|
||||
u32 pad_count;
|
||||
int i;
|
||||
|
||||
|
@ -1071,6 +1095,15 @@ static int sdma_v3_0_early_init(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_STONEY:
|
||||
adev->sdma.num_instances = 1;
|
||||
break;
|
||||
default:
|
||||
adev->sdma.num_instances = SDMA_MAX_INSTANCE;
|
||||
break;
|
||||
}
|
||||
|
||||
sdma_v3_0_set_ring_funcs(adev);
|
||||
sdma_v3_0_set_buffer_funcs(adev);
|
||||
sdma_v3_0_set_vm_pte_funcs(adev);
|
||||
|
@ -1082,21 +1115,21 @@ static int sdma_v3_0_early_init(void *handle)
|
|||
static int sdma_v3_0_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_ring *ring;
|
||||
int r;
|
||||
int r, i;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* SDMA trap event */
|
||||
r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
|
||||
r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* SDMA Privileged inst */
|
||||
r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
|
||||
r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* SDMA Privileged inst */
|
||||
r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
|
||||
r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -1106,33 +1139,23 @@ static int sdma_v3_0_sw_init(void *handle)
|
|||
return r;
|
||||
}
|
||||
|
||||
ring = &adev->sdma[0].ring;
|
||||
ring->ring_obj = NULL;
|
||||
ring->use_doorbell = true;
|
||||
ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE0;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
ring->ring_obj = NULL;
|
||||
ring->use_doorbell = true;
|
||||
ring->doorbell_index = (i == 0) ?
|
||||
AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
|
||||
|
||||
ring = &adev->sdma[1].ring;
|
||||
ring->ring_obj = NULL;
|
||||
ring->use_doorbell = true;
|
||||
ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE1;
|
||||
|
||||
ring = &adev->sdma[0].ring;
|
||||
sprintf(ring->name, "sdma0");
|
||||
r = amdgpu_ring_init(adev, ring, 256 * 1024,
|
||||
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
|
||||
&adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
|
||||
AMDGPU_RING_TYPE_SDMA);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ring = &adev->sdma[1].ring;
|
||||
sprintf(ring->name, "sdma1");
|
||||
r = amdgpu_ring_init(adev, ring, 256 * 1024,
|
||||
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
|
||||
&adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
|
||||
AMDGPU_RING_TYPE_SDMA);
|
||||
if (r)
|
||||
return r;
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 256 * 1024,
|
||||
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
|
||||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
|
||||
AMDGPU_RING_TYPE_SDMA);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -1140,9 +1163,10 @@ static int sdma_v3_0_sw_init(void *handle)
|
|||
static int sdma_v3_0_sw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int i;
|
||||
|
||||
amdgpu_ring_fini(&adev->sdma[0].ring);
|
||||
amdgpu_ring_fini(&adev->sdma[1].ring);
|
||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1222,7 +1246,7 @@ static void sdma_v3_0_print_status(void *handle)
|
|||
dev_info(adev->dev, "VI SDMA registers\n");
|
||||
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
|
||||
RREG32(mmSRBM_STATUS2));
|
||||
for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
|
||||
i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
|
||||
dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n",
|
||||
|
@ -1367,7 +1391,7 @@ static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
|
|||
case 0:
|
||||
switch (queue_id) {
|
||||
case 0:
|
||||
amdgpu_fence_process(&adev->sdma[0].ring);
|
||||
amdgpu_fence_process(&adev->sdma.instance[0].ring);
|
||||
break;
|
||||
case 1:
|
||||
/* XXX compute */
|
||||
|
@ -1380,7 +1404,7 @@ static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
|
|||
case 1:
|
||||
switch (queue_id) {
|
||||
case 0:
|
||||
amdgpu_fence_process(&adev->sdma[1].ring);
|
||||
amdgpu_fence_process(&adev->sdma.instance[1].ring);
|
||||
break;
|
||||
case 1:
|
||||
/* XXX compute */
|
||||
|
@ -1432,24 +1456,6 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
|
|||
.set_powergating_state = sdma_v3_0_set_powergating_state,
|
||||
};
|
||||
|
||||
/**
|
||||
* sdma_v3_0_ring_is_lockup - Check if the DMA engine is locked up
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
*
|
||||
* Check if the async DMA engine is locked up (VI).
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
static bool sdma_v3_0_ring_is_lockup(struct amdgpu_ring *ring)
|
||||
{
|
||||
|
||||
if (sdma_v3_0_is_idle(ring->adev)) {
|
||||
amdgpu_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
return amdgpu_ring_test_lockup(ring);
|
||||
}
|
||||
|
||||
static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
|
||||
.get_rptr = sdma_v3_0_ring_get_rptr,
|
||||
.get_wptr = sdma_v3_0_ring_get_wptr,
|
||||
|
@ -1462,14 +1468,15 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
|
|||
.emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
|
||||
.test_ring = sdma_v3_0_ring_test_ring,
|
||||
.test_ib = sdma_v3_0_ring_test_ib,
|
||||
.is_lockup = sdma_v3_0_ring_is_lockup,
|
||||
.insert_nop = sdma_v3_0_ring_insert_nop,
|
||||
};
|
||||
|
||||
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->sdma[0].ring.funcs = &sdma_v3_0_ring_funcs;
|
||||
adev->sdma[1].ring.funcs = &sdma_v3_0_ring_funcs;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
|
||||
|
@ -1483,9 +1490,9 @@ static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = {
|
|||
|
||||
static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
|
||||
adev->sdma_trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
|
||||
adev->sdma_illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
|
||||
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
|
||||
adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
|
||||
adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1551,7 +1558,7 @@ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
|
|||
{
|
||||
if (adev->mman.buffer_funcs == NULL) {
|
||||
adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1566,7 +1573,7 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|||
{
|
||||
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
||||
adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
|
||||
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
|
||||
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -885,7 +885,6 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
|
|||
.emit_semaphore = uvd_v4_2_ring_emit_semaphore,
|
||||
.test_ring = uvd_v4_2_ring_test_ring,
|
||||
.test_ib = uvd_v4_2_ring_test_ib,
|
||||
.is_lockup = amdgpu_ring_test_lockup,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
};
|
||||
|
||||
|
|
|
@ -824,7 +824,6 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
|
|||
.emit_semaphore = uvd_v5_0_ring_emit_semaphore,
|
||||
.test_ring = uvd_v5_0_ring_test_ring,
|
||||
.test_ib = uvd_v5_0_ring_test_ib,
|
||||
.is_lockup = amdgpu_ring_test_lockup,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
};
|
||||
|
||||
|
|
|
@ -808,7 +808,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
|
|||
.emit_semaphore = uvd_v6_0_ring_emit_semaphore,
|
||||
.test_ring = uvd_v6_0_ring_test_ring,
|
||||
.test_ib = uvd_v6_0_ring_test_ib,
|
||||
.is_lockup = amdgpu_ring_test_lockup,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
};
|
||||
|
||||
|
|
|
@ -642,7 +642,6 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
|
|||
.emit_semaphore = amdgpu_vce_ring_emit_semaphore,
|
||||
.test_ring = amdgpu_vce_ring_test_ring,
|
||||
.test_ib = amdgpu_vce_ring_test_ib,
|
||||
.is_lockup = amdgpu_ring_test_lockup,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
};
|
||||
|
||||
|
|
|
@ -205,8 +205,9 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
|
|||
u32 tmp;
|
||||
unsigned ret;
|
||||
|
||||
/* Fiji is single pipe */
|
||||
if (adev->asic_type == CHIP_FIJI) {
|
||||
/* Fiji, Stoney are single pipe */
|
||||
if ((adev->asic_type == CHIP_FIJI) ||
|
||||
(adev->asic_type == CHIP_STONEY)){
|
||||
ret = AMDGPU_VCE_HARVEST_VCE1;
|
||||
return ret;
|
||||
}
|
||||
|
@ -643,7 +644,6 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
|
|||
.emit_semaphore = amdgpu_vce_ring_emit_semaphore,
|
||||
.test_ring = amdgpu_vce_ring_test_ring,
|
||||
.test_ib = amdgpu_vce_ring_test_ib,
|
||||
.is_lockup = amdgpu_ring_test_lockup,
|
||||
.insert_nop = amdgpu_ring_insert_nop,
|
||||
};
|
||||
|
||||
|
|
|
@ -232,6 +232,13 @@ static const u32 cz_mgcg_cgcg_init[] =
|
|||
mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
|
||||
};
|
||||
|
||||
static const u32 stoney_mgcg_cgcg_init[] =
|
||||
{
|
||||
mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
|
||||
mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
|
||||
mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
|
||||
};
|
||||
|
||||
static void vi_init_golden_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
/* Some of the registers might be dependent on GRBM_GFX_INDEX */
|
||||
|
@ -258,6 +265,11 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
|
|||
cz_mgcg_cgcg_init,
|
||||
(const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
|
||||
break;
|
||||
case CHIP_STONEY:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
stoney_mgcg_cgcg_init,
|
||||
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -488,6 +500,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
|
|||
case CHIP_FIJI:
|
||||
case CHIP_TONGA:
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
asic_register_table = cz_allowed_read_registers;
|
||||
size = ARRAY_SIZE(cz_allowed_read_registers);
|
||||
break;
|
||||
|
@ -543,8 +556,10 @@ static void vi_print_gpu_status_regs(struct amdgpu_device *adev)
|
|||
RREG32(mmSRBM_STATUS2));
|
||||
dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
|
||||
RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
|
||||
dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
|
||||
RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
|
||||
if (adev->sdma.num_instances > 1) {
|
||||
dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
|
||||
RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
|
||||
}
|
||||
dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
|
||||
dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
|
||||
RREG32(mmCP_STALLED_STAT1));
|
||||
|
@ -639,9 +654,11 @@ u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev)
|
|||
reset_mask |= AMDGPU_RESET_DMA;
|
||||
|
||||
/* SDMA1_STATUS_REG */
|
||||
tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
|
||||
if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
|
||||
reset_mask |= AMDGPU_RESET_DMA1;
|
||||
if (adev->sdma.num_instances > 1) {
|
||||
tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
|
||||
if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
|
||||
reset_mask |= AMDGPU_RESET_DMA1;
|
||||
}
|
||||
#if 0
|
||||
/* VCE_STATUS */
|
||||
if (adev->asic_type != CHIP_TOPAZ) {
|
||||
|
@ -1319,6 +1336,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
adev->ip_blocks = cz_ip_blocks;
|
||||
adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
|
||||
break;
|
||||
|
@ -1330,11 +1348,18 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
|
||||
#define ATI_REV_ID_FUSE_MACRO__SHIFT 9
|
||||
#define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
|
||||
|
||||
static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->asic_type == CHIP_TOPAZ)
|
||||
return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
|
||||
>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
|
||||
else if (adev->flags & AMD_IS_APU)
|
||||
return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
|
||||
>> ATI_REV_ID_FUSE_MACRO__SHIFT;
|
||||
else
|
||||
return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
|
||||
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
|
||||
|
@ -1388,32 +1413,35 @@ static int vi_common_early_init(void *handle)
|
|||
adev->cg_flags = 0;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = 0x1;
|
||||
if (amdgpu_smc_load_fw && smc_enabled)
|
||||
adev->firmware.smu_load = true;
|
||||
break;
|
||||
case CHIP_FIJI:
|
||||
adev->has_uvd = true;
|
||||
adev->cg_flags = 0;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x3c;
|
||||
break;
|
||||
case CHIP_TONGA:
|
||||
adev->has_uvd = true;
|
||||
adev->cg_flags = 0;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x14;
|
||||
if (amdgpu_smc_load_fw && smc_enabled)
|
||||
adev->firmware.smu_load = true;
|
||||
break;
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
adev->has_uvd = true;
|
||||
adev->cg_flags = 0;
|
||||
/* Disable UVD pg */
|
||||
adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
|
||||
adev->external_rev_id = adev->rev_id + 0x1;
|
||||
if (amdgpu_smc_load_fw && smc_enabled)
|
||||
adev->firmware.smu_load = true;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (amdgpu_smc_load_fw && smc_enabled)
|
||||
adev->firmware.smu_load = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@ enum amd_asic_type {
|
|||
CHIP_TONGA,
|
||||
CHIP_FIJI,
|
||||
CHIP_CARRIZO,
|
||||
CHIP_STONEY,
|
||||
CHIP_LAST,
|
||||
};
|
||||
|
||||
|
|
2791
drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_d.h
Normal file
2791
drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_d.h
Normal file
File diff suppressed because it is too large
Load diff
6808
drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_enum.h
Normal file
6808
drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_enum.h
Normal file
File diff suppressed because it is too large
Load diff
21368
drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_sh_mask.h
Normal file
21368
drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_1_sh_mask.h
Normal file
File diff suppressed because it is too large
Load diff
|
@ -6784,7 +6784,7 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE_V2_1
|
|||
ULONG ulMCUcodeRomStartAddr;
|
||||
ULONG ulMCUcodeLength;
|
||||
USHORT usMcRegInitTableOffset; // offset of ATOM_REG_INIT_SETTING array for MC core register settings.
|
||||
USHORT usReserved; // offset of ATOM_INIT_REG_BLOCK for MC SEQ/PHY regsiter setting
|
||||
USHORT usReserved; // offset of ATOM_INIT_REG_BLOCK for MC SEQ/PHY register setting
|
||||
}ATOM_MC_INIT_PARAM_TABLE_V2_1;
|
||||
|
||||
|
||||
|
|
|
@ -222,6 +222,12 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
|
|||
|
||||
while ((entity->dependency = sched->ops->dependency(sched_job))) {
|
||||
|
||||
if (entity->dependency->context == entity->fence_context) {
|
||||
/* We can ignore fences from ourself */
|
||||
fence_put(entity->dependency);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (fence_add_callback(entity->dependency, &entity->cb,
|
||||
amd_sched_entity_wakeup))
|
||||
fence_put(entity->dependency);
|
||||
|
@ -327,19 +333,49 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
|
|||
struct amd_sched_fence *s_fence =
|
||||
container_of(cb, struct amd_sched_fence, cb);
|
||||
struct amd_gpu_scheduler *sched = s_fence->sched;
|
||||
unsigned long flags;
|
||||
|
||||
atomic_dec(&sched->hw_rq_count);
|
||||
amd_sched_fence_signal(s_fence);
|
||||
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
|
||||
cancel_delayed_work(&s_fence->dwork);
|
||||
spin_lock_irqsave(&sched->fence_list_lock, flags);
|
||||
list_del_init(&s_fence->list);
|
||||
spin_unlock_irqrestore(&sched->fence_list_lock, flags);
|
||||
}
|
||||
fence_put(&s_fence->base);
|
||||
wake_up_interruptible(&sched->wake_up_worker);
|
||||
}
|
||||
|
||||
static void amd_sched_fence_work_func(struct work_struct *work)
|
||||
{
|
||||
struct amd_sched_fence *s_fence =
|
||||
container_of(work, struct amd_sched_fence, dwork.work);
|
||||
struct amd_gpu_scheduler *sched = s_fence->sched;
|
||||
struct amd_sched_fence *entity, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
|
||||
|
||||
/* Clean all pending fences */
|
||||
spin_lock_irqsave(&sched->fence_list_lock, flags);
|
||||
list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
|
||||
DRM_ERROR(" fence no %d\n", entity->base.seqno);
|
||||
cancel_delayed_work(&entity->dwork);
|
||||
list_del_init(&entity->list);
|
||||
fence_put(&entity->base);
|
||||
}
|
||||
spin_unlock_irqrestore(&sched->fence_list_lock, flags);
|
||||
}
|
||||
|
||||
static int amd_sched_main(void *param)
|
||||
{
|
||||
struct sched_param sparam = {.sched_priority = 1};
|
||||
struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
|
||||
int r, count;
|
||||
|
||||
spin_lock_init(&sched->fence_list_lock);
|
||||
INIT_LIST_HEAD(&sched->fence_list);
|
||||
sched_setscheduler(current, SCHED_FIFO, &sparam);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
|
@ -347,6 +383,7 @@ static int amd_sched_main(void *param)
|
|||
struct amd_sched_fence *s_fence;
|
||||
struct amd_sched_job *sched_job;
|
||||
struct fence *fence;
|
||||
unsigned long flags;
|
||||
|
||||
wait_event_interruptible(sched->wake_up_worker,
|
||||
kthread_should_stop() ||
|
||||
|
@ -357,6 +394,15 @@ static int amd_sched_main(void *param)
|
|||
|
||||
entity = sched_job->s_entity;
|
||||
s_fence = sched_job->s_fence;
|
||||
|
||||
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
|
||||
INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
|
||||
schedule_delayed_work(&s_fence->dwork, sched->timeout);
|
||||
spin_lock_irqsave(&sched->fence_list_lock, flags);
|
||||
list_add_tail(&s_fence->list, &sched->fence_list);
|
||||
spin_unlock_irqrestore(&sched->fence_list_lock, flags);
|
||||
}
|
||||
|
||||
atomic_inc(&sched->hw_rq_count);
|
||||
fence = sched->ops->run_job(sched_job);
|
||||
if (fence) {
|
||||
|
@ -392,11 +438,12 @@ static int amd_sched_main(void *param)
|
|||
*/
|
||||
int amd_sched_init(struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_backend_ops *ops,
|
||||
unsigned hw_submission, const char *name)
|
||||
unsigned hw_submission, long timeout, const char *name)
|
||||
{
|
||||
sched->ops = ops;
|
||||
sched->hw_submission_limit = hw_submission;
|
||||
sched->name = name;
|
||||
sched->timeout = timeout;
|
||||
amd_sched_rq_init(&sched->sched_rq);
|
||||
amd_sched_rq_init(&sched->kernel_rq);
|
||||
|
||||
|
@ -421,5 +468,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
|
|||
*/
|
||||
void amd_sched_fini(struct amd_gpu_scheduler *sched)
|
||||
{
|
||||
kthread_stop(sched->thread);
|
||||
if (sched->thread)
|
||||
kthread_stop(sched->thread);
|
||||
}
|
||||
|
|
|
@ -68,6 +68,8 @@ struct amd_sched_fence {
|
|||
struct amd_gpu_scheduler *sched;
|
||||
spinlock_t lock;
|
||||
void *owner;
|
||||
struct delayed_work dwork;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct amd_sched_job {
|
||||
|
@ -103,18 +105,21 @@ struct amd_sched_backend_ops {
|
|||
struct amd_gpu_scheduler {
|
||||
struct amd_sched_backend_ops *ops;
|
||||
uint32_t hw_submission_limit;
|
||||
long timeout;
|
||||
const char *name;
|
||||
struct amd_sched_rq sched_rq;
|
||||
struct amd_sched_rq kernel_rq;
|
||||
wait_queue_head_t wake_up_worker;
|
||||
wait_queue_head_t job_scheduled;
|
||||
atomic_t hw_rq_count;
|
||||
struct list_head fence_list;
|
||||
spinlock_t fence_list_lock;
|
||||
struct task_struct *thread;
|
||||
};
|
||||
|
||||
int amd_sched_init(struct amd_gpu_scheduler *sched,
|
||||
struct amd_sched_backend_ops *ops,
|
||||
uint32_t hw_submission, const char *name);
|
||||
uint32_t hw_submission, long timeout, const char *name);
|
||||
void amd_sched_fini(struct amd_gpu_scheduler *sched);
|
||||
|
||||
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
|
||||
|
|
|
@ -14,12 +14,3 @@ config DRM_ARMADA
|
|||
This driver provides no built-in acceleration; acceleration is
|
||||
performed by other IP found on the SoC. This driver provides
|
||||
kernel mode setting and buffer management to userspace.
|
||||
|
||||
config DRM_ARMADA_TDA1998X
|
||||
bool "Support TDA1998X HDMI output"
|
||||
depends on DRM_ARMADA != n
|
||||
depends on I2C && DRM_I2C_NXP_TDA998X = y
|
||||
default y
|
||||
help
|
||||
Support the TDA1998x HDMI output device found on the Solid-Run
|
||||
CuBox.
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
|
||||
armada_gem.o armada_output.o armada_overlay.o \
|
||||
armada_slave.o
|
||||
armada_gem.o armada_overlay.o
|
||||
armada-y += armada_510.o
|
||||
armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "armada_hw.h"
|
||||
|
||||
struct armada_frame_work {
|
||||
struct armada_plane_work work;
|
||||
struct drm_pending_vblank_event *event;
|
||||
struct armada_regs regs[4];
|
||||
struct drm_framebuffer *old_fb;
|
||||
|
@ -33,6 +34,23 @@ enum csc_mode {
|
|||
CSC_RGB_STUDIO = 2,
|
||||
};
|
||||
|
||||
static const uint32_t armada_primary_formats[] = {
|
||||
DRM_FORMAT_UYVY,
|
||||
DRM_FORMAT_YUYV,
|
||||
DRM_FORMAT_VYUY,
|
||||
DRM_FORMAT_YVYU,
|
||||
DRM_FORMAT_ARGB8888,
|
||||
DRM_FORMAT_ABGR8888,
|
||||
DRM_FORMAT_XRGB8888,
|
||||
DRM_FORMAT_XBGR8888,
|
||||
DRM_FORMAT_RGB888,
|
||||
DRM_FORMAT_BGR888,
|
||||
DRM_FORMAT_ARGB1555,
|
||||
DRM_FORMAT_ABGR1555,
|
||||
DRM_FORMAT_RGB565,
|
||||
DRM_FORMAT_BGR565,
|
||||
};
|
||||
|
||||
/*
|
||||
* A note about interlacing. Let's consider HDMI 1920x1080i.
|
||||
* The timing parameters we have from X are:
|
||||
|
@ -173,49 +191,82 @@ static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
|
|||
return i;
|
||||
}
|
||||
|
||||
static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc,
|
||||
struct armada_frame_work *work)
|
||||
static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
|
||||
struct armada_plane *plane)
|
||||
{
|
||||
struct armada_plane_work *work = xchg(&plane->work, NULL);
|
||||
|
||||
/* Handle any pending frame work. */
|
||||
if (work) {
|
||||
work->fn(dcrtc, plane, work);
|
||||
drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
|
||||
}
|
||||
|
||||
wake_up(&plane->frame_wait);
|
||||
}
|
||||
|
||||
int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
|
||||
struct armada_plane *plane, struct armada_plane_work *work)
|
||||
{
|
||||
struct drm_device *dev = dcrtc->crtc.dev;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
ret = drm_vblank_get(dev, dcrtc->num);
|
||||
ret = drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to acquire vblank counter\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
if (!dcrtc->frame_work)
|
||||
dcrtc->frame_work = work;
|
||||
else
|
||||
ret = -EBUSY;
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0;
|
||||
if (ret)
|
||||
drm_vblank_put(dev, dcrtc->num);
|
||||
drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc)
|
||||
int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout)
|
||||
{
|
||||
return wait_event_timeout(plane->frame_wait, !plane->work, timeout);
|
||||
}
|
||||
|
||||
struct armada_plane_work *armada_drm_plane_work_cancel(
|
||||
struct armada_crtc *dcrtc, struct armada_plane *plane)
|
||||
{
|
||||
struct armada_plane_work *work = xchg(&plane->work, NULL);
|
||||
|
||||
if (work)
|
||||
drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
|
||||
|
||||
return work;
|
||||
}
|
||||
|
||||
static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc,
|
||||
struct armada_frame_work *work)
|
||||
{
|
||||
struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
|
||||
|
||||
return armada_drm_plane_work_queue(dcrtc, plane, &work->work);
|
||||
}
|
||||
|
||||
static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
|
||||
struct armada_plane *plane, struct armada_plane_work *work)
|
||||
{
|
||||
struct armada_frame_work *fwork = container_of(work, struct armada_frame_work, work);
|
||||
struct drm_device *dev = dcrtc->crtc.dev;
|
||||
struct armada_frame_work *work = dcrtc->frame_work;
|
||||
unsigned long flags;
|
||||
|
||||
dcrtc->frame_work = NULL;
|
||||
spin_lock_irqsave(&dcrtc->irq_lock, flags);
|
||||
armada_drm_crtc_update_regs(dcrtc, fwork->regs);
|
||||
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
|
||||
|
||||
armada_drm_crtc_update_regs(dcrtc, work->regs);
|
||||
|
||||
if (work->event)
|
||||
drm_send_vblank_event(dev, dcrtc->num, work->event);
|
||||
|
||||
drm_vblank_put(dev, dcrtc->num);
|
||||
if (fwork->event) {
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
drm_send_vblank_event(dev, dcrtc->num, fwork->event);
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
|
||||
/* Finally, queue the process-half of the cleanup. */
|
||||
__armada_drm_queue_unref_work(dcrtc->crtc.dev, work->old_fb);
|
||||
kfree(work);
|
||||
__armada_drm_queue_unref_work(dcrtc->crtc.dev, fwork->old_fb);
|
||||
kfree(fwork);
|
||||
}
|
||||
|
||||
static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
|
||||
|
@ -235,6 +286,7 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
|
|||
work = kmalloc(sizeof(*work), GFP_KERNEL);
|
||||
if (work) {
|
||||
int i = 0;
|
||||
work->work.fn = armada_drm_crtc_complete_frame_work;
|
||||
work->event = NULL;
|
||||
work->old_fb = fb;
|
||||
armada_reg_queue_end(work->regs, i);
|
||||
|
@ -255,19 +307,14 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
|
|||
|
||||
static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
|
||||
{
|
||||
struct drm_device *dev = dcrtc->crtc.dev;
|
||||
struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
|
||||
|
||||
/*
|
||||
* Tell the DRM core that vblank IRQs aren't going to happen for
|
||||
* a while. This cleans up any pending vblank events for us.
|
||||
*/
|
||||
drm_crtc_vblank_off(&dcrtc->crtc);
|
||||
|
||||
/* Handle any pending flip event. */
|
||||
spin_lock_irq(&dev->event_lock);
|
||||
if (dcrtc->frame_work)
|
||||
armada_drm_crtc_complete_frame_work(dcrtc);
|
||||
spin_unlock_irq(&dev->event_lock);
|
||||
armada_drm_plane_work_run(dcrtc, plane);
|
||||
}
|
||||
|
||||
void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
|
||||
|
@ -287,7 +334,11 @@ static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
|
|||
|
||||
if (dcrtc->dpms != dpms) {
|
||||
dcrtc->dpms = dpms;
|
||||
if (!IS_ERR(dcrtc->clk) && !dpms_blanked(dpms))
|
||||
WARN_ON(clk_prepare_enable(dcrtc->clk));
|
||||
armada_drm_crtc_update(dcrtc);
|
||||
if (!IS_ERR(dcrtc->clk) && dpms_blanked(dpms))
|
||||
clk_disable_unprepare(dcrtc->clk);
|
||||
if (dpms_blanked(dpms))
|
||||
armada_drm_vblank_off(dcrtc);
|
||||
else
|
||||
|
@ -310,17 +361,11 @@ static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
|
|||
/*
|
||||
* If we have an overlay plane associated with this CRTC, disable
|
||||
* it before the modeset to avoid its coordinates being outside
|
||||
* the new mode parameters. DRM doesn't provide help with this.
|
||||
* the new mode parameters.
|
||||
*/
|
||||
plane = dcrtc->plane;
|
||||
if (plane) {
|
||||
struct drm_framebuffer *fb = plane->fb;
|
||||
|
||||
plane->funcs->disable_plane(plane);
|
||||
plane->fb = NULL;
|
||||
plane->crtc = NULL;
|
||||
drm_framebuffer_unreference(fb);
|
||||
}
|
||||
if (plane)
|
||||
drm_plane_force_disable(plane);
|
||||
}
|
||||
|
||||
/* The mode_config.mutex will be held for this call */
|
||||
|
@ -356,8 +401,8 @@ static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
|
|||
|
||||
static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
|
||||
{
|
||||
struct armada_vbl_event *e, *n;
|
||||
void __iomem *base = dcrtc->base;
|
||||
struct drm_plane *ovl_plane;
|
||||
|
||||
if (stat & DMA_FF_UNDERFLOW)
|
||||
DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
|
||||
|
@ -368,11 +413,10 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
|
|||
drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num);
|
||||
|
||||
spin_lock(&dcrtc->irq_lock);
|
||||
|
||||
list_for_each_entry_safe(e, n, &dcrtc->vbl_list, node) {
|
||||
list_del_init(&e->node);
|
||||
drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
|
||||
e->fn(dcrtc, e->data);
|
||||
ovl_plane = dcrtc->plane;
|
||||
if (ovl_plane) {
|
||||
struct armada_plane *plane = drm_to_armada_plane(ovl_plane);
|
||||
armada_drm_plane_work_run(dcrtc, plane);
|
||||
}
|
||||
|
||||
if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
|
||||
|
@ -404,14 +448,8 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
|
|||
spin_unlock(&dcrtc->irq_lock);
|
||||
|
||||
if (stat & GRA_FRAME_IRQ) {
|
||||
struct drm_device *dev = dcrtc->crtc.dev;
|
||||
|
||||
spin_lock(&dev->event_lock);
|
||||
if (dcrtc->frame_work)
|
||||
armada_drm_crtc_complete_frame_work(dcrtc);
|
||||
spin_unlock(&dev->event_lock);
|
||||
|
||||
wake_up(&dcrtc->frame_wait);
|
||||
struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
|
||||
armada_drm_plane_work_run(dcrtc, plane);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -527,7 +565,8 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
|
|||
adj->crtc_vtotal, tm, bm);
|
||||
|
||||
/* Wait for pending flips to complete */
|
||||
wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
|
||||
armada_drm_plane_work_wait(drm_to_armada_plane(dcrtc->crtc.primary),
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
|
||||
drm_crtc_vblank_off(crtc);
|
||||
|
||||
|
@ -537,6 +576,13 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
|
|||
writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are blanked, we would have disabled the clock. Re-enable
|
||||
* it so that compute_clock() does the right thing.
|
||||
*/
|
||||
if (!IS_ERR(dcrtc->clk) && dpms_blanked(dcrtc->dpms))
|
||||
WARN_ON(clk_prepare_enable(dcrtc->clk));
|
||||
|
||||
/* Now compute the divider for real */
|
||||
dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
|
||||
|
||||
|
@ -637,7 +683,8 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
|
|||
armada_reg_queue_end(regs, i);
|
||||
|
||||
/* Wait for pending flips to complete */
|
||||
wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
|
||||
armada_drm_plane_work_wait(drm_to_armada_plane(dcrtc->crtc.primary),
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
|
||||
/* Take a reference to the new fb as we're using it */
|
||||
drm_framebuffer_reference(crtc->primary->fb);
|
||||
|
@ -651,18 +698,47 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
|
||||
struct drm_plane *plane)
|
||||
{
|
||||
u32 sram_para1, dma_ctrl0_mask;
|
||||
|
||||
/*
|
||||
* Drop our reference on any framebuffer attached to this plane.
|
||||
* We don't need to NULL this out as drm_plane_force_disable(),
|
||||
* and __setplane_internal() will do so for an overlay plane, and
|
||||
* __drm_helper_disable_unused_functions() will do so for the
|
||||
* primary plane.
|
||||
*/
|
||||
if (plane->fb)
|
||||
drm_framebuffer_unreference(plane->fb);
|
||||
|
||||
/* Power down the Y/U/V FIFOs */
|
||||
sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
|
||||
|
||||
/* Power down most RAMs and FIFOs if this is the primary plane */
|
||||
if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
|
||||
sram_para1 |= CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
|
||||
CFG_PDWN32x32 | CFG_PDWN64x66;
|
||||
dma_ctrl0_mask = CFG_GRA_ENA;
|
||||
} else {
|
||||
dma_ctrl0_mask = CFG_DMA_ENA;
|
||||
}
|
||||
|
||||
spin_lock_irq(&dcrtc->irq_lock);
|
||||
armada_updatel(0, dma_ctrl0_mask, dcrtc->base + LCD_SPU_DMA_CTRL0);
|
||||
spin_unlock_irq(&dcrtc->irq_lock);
|
||||
|
||||
armada_updatel(sram_para1, 0, dcrtc->base + LCD_SPU_SRAM_PARA1);
|
||||
}
|
||||
|
||||
/* The mode_config.mutex will be held for this call */
|
||||
static void armada_drm_crtc_disable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
|
||||
|
||||
armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||
armada_drm_crtc_finish_fb(dcrtc, crtc->primary->fb, true);
|
||||
|
||||
/* Power down most RAMs and FIFOs */
|
||||
writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
|
||||
CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
|
||||
CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
|
||||
armada_drm_crtc_plane_disable(dcrtc, crtc->primary);
|
||||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
|
||||
|
@ -920,8 +996,6 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
|
|||
{
|
||||
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
|
||||
struct armada_frame_work *work;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
unsigned long flags;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
|
@ -933,6 +1007,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
|
|||
if (!work)
|
||||
return -ENOMEM;
|
||||
|
||||
work->work.fn = armada_drm_crtc_complete_frame_work;
|
||||
work->event = event;
|
||||
work->old_fb = dcrtc->crtc.primary->fb;
|
||||
|
||||
|
@ -966,12 +1041,8 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
|
|||
* Finally, if the display is blanked, we won't receive an
|
||||
* interrupt, so complete it now.
|
||||
*/
|
||||
if (dpms_blanked(dcrtc->dpms)) {
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
if (dcrtc->frame_work)
|
||||
armada_drm_crtc_complete_frame_work(dcrtc);
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
if (dpms_blanked(dcrtc->dpms))
|
||||
armada_drm_plane_work_run(dcrtc, drm_to_armada_plane(dcrtc->crtc.primary));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1012,6 +1083,19 @@ static struct drm_crtc_funcs armada_crtc_funcs = {
|
|||
.set_property = armada_drm_crtc_set_property,
|
||||
};
|
||||
|
||||
static const struct drm_plane_funcs armada_primary_plane_funcs = {
|
||||
.update_plane = drm_primary_helper_update,
|
||||
.disable_plane = drm_primary_helper_disable,
|
||||
.destroy = drm_primary_helper_destroy,
|
||||
};
|
||||
|
||||
int armada_drm_plane_init(struct armada_plane *plane)
|
||||
{
|
||||
init_waitqueue_head(&plane->frame_wait);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
|
||||
{ CSC_AUTO, "Auto" },
|
||||
{ CSC_YUV_CCIR601, "CCIR601" },
|
||||
|
@ -1044,12 +1128,13 @@ static int armada_drm_crtc_create_properties(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
|
||||
static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
|
||||
struct resource *res, int irq, const struct armada_variant *variant,
|
||||
struct device_node *port)
|
||||
{
|
||||
struct armada_private *priv = drm->dev_private;
|
||||
struct armada_crtc *dcrtc;
|
||||
struct armada_plane *primary;
|
||||
void __iomem *base;
|
||||
int ret;
|
||||
|
||||
|
@ -1080,8 +1165,6 @@ int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
|
|||
dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
|
||||
spin_lock_init(&dcrtc->irq_lock);
|
||||
dcrtc->irq_ena = CLEAN_SPU_IRQ_ISR;
|
||||
INIT_LIST_HEAD(&dcrtc->vbl_list);
|
||||
init_waitqueue_head(&dcrtc->frame_wait);
|
||||
|
||||
/* Initialize some registers which we don't otherwise set */
|
||||
writel_relaxed(0x00000001, dcrtc->base + LCD_CFG_SCLK_DIV);
|
||||
|
@ -1118,7 +1201,32 @@ int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
|
|||
priv->dcrtc[dcrtc->num] = dcrtc;
|
||||
|
||||
dcrtc->crtc.port = port;
|
||||
drm_crtc_init(drm, &dcrtc->crtc, &armada_crtc_funcs);
|
||||
|
||||
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
|
||||
if (!primary)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = armada_drm_plane_init(primary);
|
||||
if (ret) {
|
||||
kfree(primary);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_universal_plane_init(drm, &primary->base, 0,
|
||||
&armada_primary_plane_funcs,
|
||||
armada_primary_formats,
|
||||
ARRAY_SIZE(armada_primary_formats),
|
||||
DRM_PLANE_TYPE_PRIMARY);
|
||||
if (ret) {
|
||||
kfree(primary);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
|
||||
&armada_crtc_funcs);
|
||||
if (ret)
|
||||
goto err_crtc_init;
|
||||
|
||||
drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
|
||||
|
||||
drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
|
||||
|
@ -1127,6 +1235,10 @@ int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
|
|||
dcrtc->csc_rgb_mode);
|
||||
|
||||
return armada_overlay_plane_create(drm, 1 << dcrtc->num);
|
||||
|
||||
err_crtc_init:
|
||||
primary->base.funcs->destroy(&primary->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -31,9 +31,30 @@ struct armada_regs {
|
|||
#define armada_reg_queue_end(_r, _i) \
|
||||
armada_reg_queue_mod(_r, _i, 0, 0, ~0)
|
||||
|
||||
struct armada_frame_work;
|
||||
struct armada_crtc;
|
||||
struct armada_plane;
|
||||
struct armada_variant;
|
||||
|
||||
struct armada_plane_work {
|
||||
void (*fn)(struct armada_crtc *,
|
||||
struct armada_plane *,
|
||||
struct armada_plane_work *);
|
||||
};
|
||||
|
||||
struct armada_plane {
|
||||
struct drm_plane base;
|
||||
wait_queue_head_t frame_wait;
|
||||
struct armada_plane_work *work;
|
||||
};
|
||||
#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
|
||||
|
||||
int armada_drm_plane_init(struct armada_plane *plane);
|
||||
int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
|
||||
struct armada_plane *plane, struct armada_plane_work *work);
|
||||
int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout);
|
||||
struct armada_plane_work *armada_drm_plane_work_cancel(
|
||||
struct armada_crtc *dcrtc, struct armada_plane *plane);
|
||||
|
||||
struct armada_crtc {
|
||||
struct drm_crtc crtc;
|
||||
const struct armada_variant *variant;
|
||||
|
@ -66,25 +87,20 @@ struct armada_crtc {
|
|||
uint32_t dumb_ctrl;
|
||||
uint32_t spu_iopad_ctrl;
|
||||
|
||||
wait_queue_head_t frame_wait;
|
||||
struct armada_frame_work *frame_work;
|
||||
|
||||
spinlock_t irq_lock;
|
||||
uint32_t irq_ena;
|
||||
struct list_head vbl_list;
|
||||
};
|
||||
#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
|
||||
|
||||
struct device_node;
|
||||
int armada_drm_crtc_create(struct drm_device *, struct device *,
|
||||
struct resource *, int, const struct armada_variant *,
|
||||
struct device_node *);
|
||||
void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
|
||||
void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
|
||||
void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
|
||||
void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
|
||||
void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
|
||||
|
||||
void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
|
||||
struct drm_plane *plane);
|
||||
|
||||
extern struct platform_driver armada_lcd_platform_driver;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -37,22 +37,6 @@ static inline uint32_t armada_pitch(uint32_t width, uint32_t bpp)
|
|||
return ALIGN(pitch, 128);
|
||||
}
|
||||
|
||||
struct armada_vbl_event {
|
||||
struct list_head node;
|
||||
void *data;
|
||||
void (*fn)(struct armada_crtc *, void *);
|
||||
};
|
||||
void armada_drm_vbl_event_add(struct armada_crtc *,
|
||||
struct armada_vbl_event *);
|
||||
void armada_drm_vbl_event_remove(struct armada_crtc *,
|
||||
struct armada_vbl_event *);
|
||||
#define armada_drm_vbl_event_init(_e, _f, _d) do { \
|
||||
struct armada_vbl_event *__e = _e; \
|
||||
INIT_LIST_HEAD(&__e->node); \
|
||||
__e->data = _d; \
|
||||
__e->fn = _f; \
|
||||
} while (0)
|
||||
|
||||
|
||||
struct armada_private;
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/of_graph.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_of.h>
|
||||
#include "armada_crtc.h"
|
||||
#include "armada_drm.h"
|
||||
#include "armada_gem.h"
|
||||
|
@ -18,47 +19,6 @@
|
|||
#include <drm/armada_drm.h>
|
||||
#include "armada_ioctlP.h"
|
||||
|
||||
#ifdef CONFIG_DRM_ARMADA_TDA1998X
|
||||
#include <drm/i2c/tda998x.h>
|
||||
#include "armada_slave.h"
|
||||
|
||||
static struct tda998x_encoder_params params = {
|
||||
/* With 0x24, there is no translation between vp_out and int_vp
|
||||
FB LCD out Pins VIP Int Vp
|
||||
R:23:16 R:7:0 VPC7:0 7:0 7:0[R]
|
||||
G:15:8 G:15:8 VPB7:0 23:16 23:16[G]
|
||||
B:7:0 B:23:16 VPA7:0 15:8 15:8[B]
|
||||
*/
|
||||
.swap_a = 2,
|
||||
.swap_b = 3,
|
||||
.swap_c = 4,
|
||||
.swap_d = 5,
|
||||
.swap_e = 0,
|
||||
.swap_f = 1,
|
||||
.audio_cfg = BIT(2),
|
||||
.audio_frame[1] = 1,
|
||||
.audio_format = AFMT_SPDIF,
|
||||
.audio_sample_rate = 44100,
|
||||
};
|
||||
|
||||
static const struct armada_drm_slave_config tda19988_config = {
|
||||
.i2c_adapter_id = 0,
|
||||
.crtcs = 1 << 0, /* Only LCD0 at the moment */
|
||||
.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT,
|
||||
.interlace_allowed = true,
|
||||
.info = {
|
||||
.type = "tda998x",
|
||||
.addr = 0x70,
|
||||
.platform_data = ¶ms,
|
||||
},
|
||||
};
|
||||
#endif
|
||||
|
||||
static bool is_componentized(struct device *dev)
|
||||
{
|
||||
return dev->of_node || dev->platform_data;
|
||||
}
|
||||
|
||||
static void armada_drm_unref_work(struct work_struct *work)
|
||||
{
|
||||
struct armada_private *priv =
|
||||
|
@ -91,16 +51,11 @@ void armada_drm_queue_unref_work(struct drm_device *dev,
|
|||
|
||||
static int armada_drm_load(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
const struct platform_device_id *id;
|
||||
const struct armada_variant *variant;
|
||||
struct armada_private *priv;
|
||||
struct resource *res[ARRAY_SIZE(priv->dcrtc)];
|
||||
struct resource *mem = NULL;
|
||||
int ret, n, i;
|
||||
int ret, n;
|
||||
|
||||
memset(res, 0, sizeof(res));
|
||||
|
||||
for (n = i = 0; ; n++) {
|
||||
for (n = 0; ; n++) {
|
||||
struct resource *r = platform_get_resource(dev->platformdev,
|
||||
IORESOURCE_MEM, n);
|
||||
if (!r)
|
||||
|
@ -109,8 +64,6 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
|
|||
/* Resources above 64K are graphics memory */
|
||||
if (resource_size(r) > SZ_64K)
|
||||
mem = r;
|
||||
else if (i < ARRAY_SIZE(priv->dcrtc))
|
||||
res[i++] = r;
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -131,13 +84,6 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
|
|||
platform_set_drvdata(dev->platformdev, dev);
|
||||
dev->dev_private = priv;
|
||||
|
||||
/* Get the implementation specific driver data. */
|
||||
id = platform_get_device_id(dev->platformdev);
|
||||
if (!id)
|
||||
return -ENXIO;
|
||||
|
||||
variant = (const struct armada_variant *)id->driver_data;
|
||||
|
||||
INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
|
||||
INIT_KFIFO(priv->fb_unref);
|
||||
|
||||
|
@ -157,34 +103,9 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
|
|||
dev->mode_config.funcs = &armada_drm_mode_config_funcs;
|
||||
drm_mm_init(&priv->linear, mem->start, resource_size(mem));
|
||||
|
||||
/* Create all LCD controllers */
|
||||
for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
|
||||
int irq;
|
||||
|
||||
if (!res[n])
|
||||
break;
|
||||
|
||||
irq = platform_get_irq(dev->platformdev, n);
|
||||
if (irq < 0)
|
||||
goto err_kms;
|
||||
|
||||
ret = armada_drm_crtc_create(dev, dev->dev, res[n], irq,
|
||||
variant, NULL);
|
||||
if (ret)
|
||||
goto err_kms;
|
||||
}
|
||||
|
||||
if (is_componentized(dev->dev)) {
|
||||
ret = component_bind_all(dev->dev, dev);
|
||||
if (ret)
|
||||
goto err_kms;
|
||||
} else {
|
||||
#ifdef CONFIG_DRM_ARMADA_TDA1998X
|
||||
ret = armada_drm_connector_slave_create(dev, &tda19988_config);
|
||||
if (ret)
|
||||
goto err_kms;
|
||||
#endif
|
||||
}
|
||||
ret = component_bind_all(dev->dev, dev);
|
||||
if (ret)
|
||||
goto err_kms;
|
||||
|
||||
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
|
||||
if (ret)
|
||||
|
@ -202,8 +123,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
|
|||
return 0;
|
||||
|
||||
err_comp:
|
||||
if (is_componentized(dev->dev))
|
||||
component_unbind_all(dev->dev, dev);
|
||||
component_unbind_all(dev->dev, dev);
|
||||
err_kms:
|
||||
drm_mode_config_cleanup(dev);
|
||||
drm_mm_takedown(&priv->linear);
|
||||
|
@ -219,8 +139,7 @@ static int armada_drm_unload(struct drm_device *dev)
|
|||
drm_kms_helper_poll_fini(dev);
|
||||
armada_fbdev_fini(dev);
|
||||
|
||||
if (is_componentized(dev->dev))
|
||||
component_unbind_all(dev->dev, dev);
|
||||
component_unbind_all(dev->dev, dev);
|
||||
|
||||
drm_mode_config_cleanup(dev);
|
||||
drm_mm_takedown(&priv->linear);
|
||||
|
@ -230,50 +149,24 @@ static int armada_drm_unload(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void armada_drm_vbl_event_add(struct armada_crtc *dcrtc,
|
||||
struct armada_vbl_event *evt)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dcrtc->irq_lock, flags);
|
||||
if (list_empty(&evt->node)) {
|
||||
list_add_tail(&evt->node, &dcrtc->vbl_list);
|
||||
|
||||
drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
|
||||
}
|
||||
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
|
||||
}
|
||||
|
||||
void armada_drm_vbl_event_remove(struct armada_crtc *dcrtc,
|
||||
struct armada_vbl_event *evt)
|
||||
{
|
||||
if (!list_empty(&evt->node)) {
|
||||
list_del_init(&evt->node);
|
||||
drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
|
||||
}
|
||||
}
|
||||
|
||||
/* These are called under the vbl_lock. */
|
||||
static int armada_drm_enable_vblank(struct drm_device *dev, int crtc)
|
||||
static int armada_drm_enable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
armada_drm_crtc_enable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
|
||||
armada_drm_crtc_enable_irq(priv->dcrtc[pipe], VSYNC_IRQ_ENA);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void armada_drm_disable_vblank(struct drm_device *dev, int crtc)
|
||||
static void armada_drm_disable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
|
||||
armada_drm_crtc_disable_irq(priv->dcrtc[pipe], VSYNC_IRQ_ENA);
|
||||
}
|
||||
|
||||
static struct drm_ioctl_desc armada_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
|
||||
DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl,
|
||||
DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl,
|
||||
DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,0),
|
||||
DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, 0),
|
||||
DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl, 0),
|
||||
};
|
||||
|
||||
static void armada_drm_lastclose(struct drm_device *dev)
|
||||
|
@ -300,7 +193,7 @@ static struct drm_driver armada_drm_driver = {
|
|||
.lastclose = armada_drm_lastclose,
|
||||
.unload = armada_drm_unload,
|
||||
.set_busid = drm_platform_set_busid,
|
||||
.get_vblank_counter = drm_vblank_count,
|
||||
.get_vblank_counter = drm_vblank_no_hw_counter,
|
||||
.enable_vblank = armada_drm_enable_vblank,
|
||||
.disable_vblank = armada_drm_disable_vblank,
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
@ -370,43 +263,29 @@ static void armada_add_endpoints(struct device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
static int armada_drm_find_components(struct device *dev,
|
||||
struct component_match **match)
|
||||
static const struct component_master_ops armada_master_ops = {
|
||||
.bind = armada_drm_bind,
|
||||
.unbind = armada_drm_unbind,
|
||||
};
|
||||
|
||||
static int armada_drm_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *port;
|
||||
int i;
|
||||
struct component_match *match = NULL;
|
||||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
|
||||
if (dev->of_node) {
|
||||
struct device_node *np = dev->of_node;
|
||||
ret = drm_of_component_probe(dev, compare_dev_name, &armada_master_ops);
|
||||
if (ret != -EINVAL)
|
||||
return ret;
|
||||
|
||||
for (i = 0; ; i++) {
|
||||
port = of_parse_phandle(np, "ports", i);
|
||||
if (!port)
|
||||
break;
|
||||
|
||||
component_match_add(dev, match, compare_of, port);
|
||||
of_node_put(port);
|
||||
}
|
||||
|
||||
if (i == 0) {
|
||||
dev_err(dev, "missing 'ports' property\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
for (i = 0; ; i++) {
|
||||
port = of_parse_phandle(np, "ports", i);
|
||||
if (!port)
|
||||
break;
|
||||
|
||||
armada_add_endpoints(dev, match, port);
|
||||
of_node_put(port);
|
||||
}
|
||||
} else if (dev->platform_data) {
|
||||
if (dev->platform_data) {
|
||||
char **devices = dev->platform_data;
|
||||
struct device_node *port;
|
||||
struct device *d;
|
||||
int i;
|
||||
|
||||
for (i = 0; devices[i]; i++)
|
||||
component_match_add(dev, match, compare_dev_name,
|
||||
component_match_add(dev, &match, compare_dev_name,
|
||||
devices[i]);
|
||||
|
||||
if (i == 0) {
|
||||
|
@ -416,56 +295,30 @@ static int armada_drm_find_components(struct device *dev,
|
|||
|
||||
for (i = 0; devices[i]; i++) {
|
||||
d = bus_find_device_by_name(&platform_bus_type, NULL,
|
||||
devices[i]);
|
||||
devices[i]);
|
||||
if (d && d->of_node) {
|
||||
for_each_child_of_node(d->of_node, port)
|
||||
armada_add_endpoints(dev, match, port);
|
||||
armada_add_endpoints(dev, &match, port);
|
||||
}
|
||||
put_device(d);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct component_master_ops armada_master_ops = {
|
||||
.bind = armada_drm_bind,
|
||||
.unbind = armada_drm_unbind,
|
||||
};
|
||||
|
||||
static int armada_drm_probe(struct platform_device *pdev)
|
||||
{
|
||||
if (is_componentized(&pdev->dev)) {
|
||||
struct component_match *match = NULL;
|
||||
int ret;
|
||||
|
||||
ret = armada_drm_find_components(&pdev->dev, &match);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return component_master_add_with_match(&pdev->dev,
|
||||
&armada_master_ops, match);
|
||||
} else {
|
||||
return drm_platform_init(&armada_drm_driver, pdev);
|
||||
}
|
||||
return component_master_add_with_match(&pdev->dev, &armada_master_ops,
|
||||
match);
|
||||
}
|
||||
|
||||
static int armada_drm_remove(struct platform_device *pdev)
|
||||
{
|
||||
if (is_componentized(&pdev->dev))
|
||||
component_master_del(&pdev->dev, &armada_master_ops);
|
||||
else
|
||||
drm_put_dev(platform_get_drvdata(pdev));
|
||||
component_master_del(&pdev->dev, &armada_master_ops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct platform_device_id armada_drm_platform_ids[] = {
|
||||
{
|
||||
.name = "armada-drm",
|
||||
.driver_data = (unsigned long)&armada510_ops,
|
||||
}, {
|
||||
.name = "armada-510-drm",
|
||||
.driver_data = (unsigned long)&armada510_ops,
|
||||
},
|
||||
{ },
|
||||
};
|
||||
|
|
|
@ -1,142 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_encoder_slave.h>
|
||||
#include "armada_output.h"
|
||||
#include "armada_drm.h"
|
||||
|
||||
struct armada_connector {
|
||||
struct drm_connector conn;
|
||||
const struct armada_output_type *type;
|
||||
};
|
||||
|
||||
#define drm_to_armada_conn(c) container_of(c, struct armada_connector, conn)
|
||||
|
||||
struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn)
|
||||
{
|
||||
struct drm_encoder *enc = conn->encoder;
|
||||
|
||||
return enc ? enc : drm_encoder_find(conn->dev, conn->encoder_ids[0]);
|
||||
}
|
||||
|
||||
static enum drm_connector_status armada_drm_connector_detect(
|
||||
struct drm_connector *conn, bool force)
|
||||
{
|
||||
struct armada_connector *dconn = drm_to_armada_conn(conn);
|
||||
enum drm_connector_status status = connector_status_disconnected;
|
||||
|
||||
if (dconn->type->detect) {
|
||||
status = dconn->type->detect(conn, force);
|
||||
} else {
|
||||
struct drm_encoder *enc = armada_drm_connector_encoder(conn);
|
||||
|
||||
if (enc)
|
||||
status = encoder_helper_funcs(enc)->detect(enc, conn);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void armada_drm_connector_destroy(struct drm_connector *conn)
|
||||
{
|
||||
struct armada_connector *dconn = drm_to_armada_conn(conn);
|
||||
|
||||
drm_connector_unregister(conn);
|
||||
drm_connector_cleanup(conn);
|
||||
kfree(dconn);
|
||||
}
|
||||
|
||||
static int armada_drm_connector_set_property(struct drm_connector *conn,
|
||||
struct drm_property *property, uint64_t value)
|
||||
{
|
||||
struct armada_connector *dconn = drm_to_armada_conn(conn);
|
||||
|
||||
if (!dconn->type->set_property)
|
||||
return -EINVAL;
|
||||
|
||||
return dconn->type->set_property(conn, property, value);
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs armada_drm_conn_funcs = {
|
||||
.dpms = drm_helper_connector_dpms,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.detect = armada_drm_connector_detect,
|
||||
.destroy = armada_drm_connector_destroy,
|
||||
.set_property = armada_drm_connector_set_property,
|
||||
};
|
||||
|
||||
/* Shouldn't this be a generic helper function? */
|
||||
int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
|
||||
int valid = MODE_BAD;
|
||||
|
||||
if (encoder) {
|
||||
struct drm_encoder_slave *slave = to_encoder_slave(encoder);
|
||||
|
||||
valid = slave->slave_funcs->mode_valid(encoder, mode);
|
||||
}
|
||||
return valid;
|
||||
}
|
||||
|
||||
int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
|
||||
struct drm_property *property, uint64_t value)
|
||||
{
|
||||
struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (encoder) {
|
||||
struct drm_encoder_slave *slave = to_encoder_slave(encoder);
|
||||
|
||||
rc = slave->slave_funcs->set_property(encoder, conn, property,
|
||||
value);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
int armada_output_create(struct drm_device *dev,
|
||||
const struct armada_output_type *type, const void *data)
|
||||
{
|
||||
struct armada_connector *dconn;
|
||||
int ret;
|
||||
|
||||
dconn = kzalloc(sizeof(*dconn), GFP_KERNEL);
|
||||
if (!dconn)
|
||||
return -ENOMEM;
|
||||
|
||||
dconn->type = type;
|
||||
|
||||
ret = drm_connector_init(dev, &dconn->conn, &armada_drm_conn_funcs,
|
||||
type->connector_type);
|
||||
if (ret) {
|
||||
DRM_ERROR("unable to init connector\n");
|
||||
goto err_destroy_dconn;
|
||||
}
|
||||
|
||||
ret = type->create(&dconn->conn, data);
|
||||
if (ret)
|
||||
goto err_conn;
|
||||
|
||||
ret = drm_connector_register(&dconn->conn);
|
||||
if (ret)
|
||||
goto err_sysfs;
|
||||
|
||||
return 0;
|
||||
|
||||
err_sysfs:
|
||||
if (dconn->conn.encoder)
|
||||
dconn->conn.encoder->funcs->destroy(dconn->conn.encoder);
|
||||
err_conn:
|
||||
drm_connector_cleanup(&dconn->conn);
|
||||
err_destroy_dconn:
|
||||
kfree(dconn);
|
||||
return ret;
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef ARMADA_CONNETOR_H
|
||||
#define ARMADA_CONNETOR_H
|
||||
|
||||
#define encoder_helper_funcs(encoder) \
|
||||
((const struct drm_encoder_helper_funcs *)encoder->helper_private)
|
||||
|
||||
struct armada_output_type {
|
||||
int connector_type;
|
||||
enum drm_connector_status (*detect)(struct drm_connector *, bool);
|
||||
int (*create)(struct drm_connector *, const void *);
|
||||
int (*set_property)(struct drm_connector *, struct drm_property *,
|
||||
uint64_t);
|
||||
};
|
||||
|
||||
struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn);
|
||||
|
||||
int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
|
||||
struct drm_display_mode *mode);
|
||||
|
||||
int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
|
||||
struct drm_property *property, uint64_t value);
|
||||
|
||||
int armada_output_create(struct drm_device *dev,
|
||||
const struct armada_output_type *type, const void *data);
|
||||
|
||||
#endif
|
|
@ -16,7 +16,7 @@
|
|||
#include <drm/armada_drm.h>
|
||||
#include "armada_ioctlP.h"
|
||||
|
||||
struct armada_plane_properties {
|
||||
struct armada_ovl_plane_properties {
|
||||
uint32_t colorkey_yr;
|
||||
uint32_t colorkey_ug;
|
||||
uint32_t colorkey_vb;
|
||||
|
@ -29,26 +29,25 @@ struct armada_plane_properties {
|
|||
uint32_t colorkey_mode;
|
||||
};
|
||||
|
||||
struct armada_plane {
|
||||
struct drm_plane base;
|
||||
spinlock_t lock;
|
||||
struct armada_ovl_plane {
|
||||
struct armada_plane base;
|
||||
struct drm_framebuffer *old_fb;
|
||||
uint32_t src_hw;
|
||||
uint32_t dst_hw;
|
||||
uint32_t dst_yx;
|
||||
uint32_t ctrl0;
|
||||
struct {
|
||||
struct armada_vbl_event update;
|
||||
struct armada_plane_work work;
|
||||
struct armada_regs regs[13];
|
||||
wait_queue_head_t wait;
|
||||
} vbl;
|
||||
struct armada_plane_properties prop;
|
||||
struct armada_ovl_plane_properties prop;
|
||||
};
|
||||
#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
|
||||
#define drm_to_armada_ovl_plane(p) \
|
||||
container_of(p, struct armada_ovl_plane, base.base)
|
||||
|
||||
|
||||
static void
|
||||
armada_ovl_update_attr(struct armada_plane_properties *prop,
|
||||
armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
|
||||
struct armada_crtc *dcrtc)
|
||||
{
|
||||
writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
|
||||
|
@ -71,32 +70,34 @@ armada_ovl_update_attr(struct armada_plane_properties *prop,
|
|||
spin_unlock_irq(&dcrtc->irq_lock);
|
||||
}
|
||||
|
||||
/* === Plane support === */
|
||||
static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
|
||||
static void armada_ovl_retire_fb(struct armada_ovl_plane *dplane,
|
||||
struct drm_framebuffer *fb)
|
||||
{
|
||||
struct armada_plane *dplane = data;
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_framebuffer *old_fb;
|
||||
|
||||
old_fb = xchg(&dplane->old_fb, fb);
|
||||
|
||||
if (old_fb)
|
||||
armada_drm_queue_unref_work(dplane->base.base.dev, old_fb);
|
||||
}
|
||||
|
||||
/* === Plane support === */
|
||||
static void armada_ovl_plane_work(struct armada_crtc *dcrtc,
|
||||
struct armada_plane *plane, struct armada_plane_work *work)
|
||||
{
|
||||
struct armada_ovl_plane *dplane = container_of(plane, struct armada_ovl_plane, base);
|
||||
|
||||
armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
|
||||
|
||||
spin_lock(&dplane->lock);
|
||||
fb = dplane->old_fb;
|
||||
dplane->old_fb = NULL;
|
||||
spin_unlock(&dplane->lock);
|
||||
|
||||
if (fb)
|
||||
armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
|
||||
|
||||
wake_up(&dplane->vbl.wait);
|
||||
armada_ovl_retire_fb(dplane, NULL);
|
||||
}
|
||||
|
||||
static int
|
||||
armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
|
||||
uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h)
|
||||
{
|
||||
struct armada_plane *dplane = drm_to_armada_plane(plane);
|
||||
struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
|
||||
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
|
||||
struct drm_rect src = {
|
||||
.x1 = src_x,
|
||||
|
@ -160,9 +161,8 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
dcrtc->base + LCD_SPU_SRAM_PARA1);
|
||||
}
|
||||
|
||||
wait_event_timeout(dplane->vbl.wait,
|
||||
list_empty(&dplane->vbl.update.node),
|
||||
HZ/25);
|
||||
if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
|
||||
armada_drm_plane_work_cancel(dcrtc, &dplane->base);
|
||||
|
||||
if (plane->fb != fb) {
|
||||
struct armada_gem_object *obj = drm_fb_obj(fb);
|
||||
|
@ -175,17 +175,8 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
*/
|
||||
drm_framebuffer_reference(fb);
|
||||
|
||||
if (plane->fb) {
|
||||
struct drm_framebuffer *older_fb;
|
||||
|
||||
spin_lock_irq(&dplane->lock);
|
||||
older_fb = dplane->old_fb;
|
||||
dplane->old_fb = plane->fb;
|
||||
spin_unlock_irq(&dplane->lock);
|
||||
if (older_fb)
|
||||
armada_drm_queue_unref_work(dcrtc->crtc.dev,
|
||||
older_fb);
|
||||
}
|
||||
if (plane->fb)
|
||||
armada_ovl_retire_fb(dplane, plane->fb);
|
||||
|
||||
src_y = src.y1 >> 16;
|
||||
src_x = src.x1 >> 16;
|
||||
|
@ -262,60 +253,50 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||
}
|
||||
if (idx) {
|
||||
armada_reg_queue_end(dplane->vbl.regs, idx);
|
||||
armada_drm_vbl_event_add(dcrtc, &dplane->vbl.update);
|
||||
armada_drm_plane_work_queue(dcrtc, &dplane->base,
|
||||
&dplane->vbl.work);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int armada_plane_disable(struct drm_plane *plane)
|
||||
static int armada_ovl_plane_disable(struct drm_plane *plane)
|
||||
{
|
||||
struct armada_plane *dplane = drm_to_armada_plane(plane);
|
||||
struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
|
||||
struct drm_framebuffer *fb;
|
||||
struct armada_crtc *dcrtc;
|
||||
|
||||
if (!dplane->base.crtc)
|
||||
if (!dplane->base.base.crtc)
|
||||
return 0;
|
||||
|
||||
dcrtc = drm_to_armada_crtc(dplane->base.crtc);
|
||||
dcrtc = drm_to_armada_crtc(dplane->base.base.crtc);
|
||||
|
||||
armada_drm_plane_work_cancel(dcrtc, &dplane->base);
|
||||
armada_drm_crtc_plane_disable(dcrtc, plane);
|
||||
|
||||
dcrtc->plane = NULL;
|
||||
|
||||
spin_lock_irq(&dcrtc->irq_lock);
|
||||
armada_drm_vbl_event_remove(dcrtc, &dplane->vbl.update);
|
||||
armada_updatel(0, CFG_DMA_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
|
||||
dplane->ctrl0 = 0;
|
||||
spin_unlock_irq(&dcrtc->irq_lock);
|
||||
|
||||
/* Power down the Y/U/V FIFOs */
|
||||
armada_updatel(CFG_PDWN16x66 | CFG_PDWN32x66, 0,
|
||||
dcrtc->base + LCD_SPU_SRAM_PARA1);
|
||||
|
||||
if (plane->fb)
|
||||
drm_framebuffer_unreference(plane->fb);
|
||||
|
||||
spin_lock_irq(&dplane->lock);
|
||||
fb = dplane->old_fb;
|
||||
dplane->old_fb = NULL;
|
||||
spin_unlock_irq(&dplane->lock);
|
||||
fb = xchg(&dplane->old_fb, NULL);
|
||||
if (fb)
|
||||
drm_framebuffer_unreference(fb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void armada_plane_destroy(struct drm_plane *plane)
|
||||
static void armada_ovl_plane_destroy(struct drm_plane *plane)
|
||||
{
|
||||
struct armada_plane *dplane = drm_to_armada_plane(plane);
|
||||
struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
|
||||
|
||||
drm_plane_cleanup(plane);
|
||||
|
||||
kfree(dplane);
|
||||
}
|
||||
|
||||
static int armada_plane_set_property(struct drm_plane *plane,
|
||||
static int armada_ovl_plane_set_property(struct drm_plane *plane,
|
||||
struct drm_property *property, uint64_t val)
|
||||
{
|
||||
struct armada_private *priv = plane->dev->dev_private;
|
||||
struct armada_plane *dplane = drm_to_armada_plane(plane);
|
||||
struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
|
||||
bool update_attr = false;
|
||||
|
||||
if (property == priv->colorkey_prop) {
|
||||
|
@ -372,21 +353,21 @@ static int armada_plane_set_property(struct drm_plane *plane,
|
|||
update_attr = true;
|
||||
}
|
||||
|
||||
if (update_attr && dplane->base.crtc)
|
||||
if (update_attr && dplane->base.base.crtc)
|
||||
armada_ovl_update_attr(&dplane->prop,
|
||||
drm_to_armada_crtc(dplane->base.crtc));
|
||||
drm_to_armada_crtc(dplane->base.base.crtc));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_plane_funcs armada_plane_funcs = {
|
||||
.update_plane = armada_plane_update,
|
||||
.disable_plane = armada_plane_disable,
|
||||
.destroy = armada_plane_destroy,
|
||||
.set_property = armada_plane_set_property,
|
||||
static const struct drm_plane_funcs armada_ovl_plane_funcs = {
|
||||
.update_plane = armada_ovl_plane_update,
|
||||
.disable_plane = armada_ovl_plane_disable,
|
||||
.destroy = armada_ovl_plane_destroy,
|
||||
.set_property = armada_ovl_plane_set_property,
|
||||
};
|
||||
|
||||
static const uint32_t armada_formats[] = {
|
||||
static const uint32_t armada_ovl_formats[] = {
|
||||
DRM_FORMAT_UYVY,
|
||||
DRM_FORMAT_YUYV,
|
||||
DRM_FORMAT_YUV420,
|
||||
|
@ -456,7 +437,7 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
|
|||
{
|
||||
struct armada_private *priv = dev->dev_private;
|
||||
struct drm_mode_object *mobj;
|
||||
struct armada_plane *dplane;
|
||||
struct armada_ovl_plane *dplane;
|
||||
int ret;
|
||||
|
||||
ret = armada_overlay_create_properties(dev);
|
||||
|
@ -467,13 +448,23 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
|
|||
if (!dplane)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&dplane->lock);
|
||||
init_waitqueue_head(&dplane->vbl.wait);
|
||||
armada_drm_vbl_event_init(&dplane->vbl.update, armada_plane_vbl,
|
||||
dplane);
|
||||
ret = armada_drm_plane_init(&dplane->base);
|
||||
if (ret) {
|
||||
kfree(dplane);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_plane_init(dev, &dplane->base, crtcs, &armada_plane_funcs,
|
||||
armada_formats, ARRAY_SIZE(armada_formats), false);
|
||||
dplane->vbl.work.fn = armada_ovl_plane_work;
|
||||
|
||||
ret = drm_universal_plane_init(dev, &dplane->base.base, crtcs,
|
||||
&armada_ovl_plane_funcs,
|
||||
armada_ovl_formats,
|
||||
ARRAY_SIZE(armada_ovl_formats),
|
||||
DRM_PLANE_TYPE_OVERLAY);
|
||||
if (ret) {
|
||||
kfree(dplane);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dplane->prop.colorkey_yr = 0xfefefe00;
|
||||
dplane->prop.colorkey_ug = 0x01010100;
|
||||
|
@ -483,7 +474,7 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
|
|||
dplane->prop.contrast = 0x4000;
|
||||
dplane->prop.saturation = 0x4000;
|
||||
|
||||
mobj = &dplane->base.base;
|
||||
mobj = &dplane->base.base.base;
|
||||
drm_object_attach_property(mobj, priv->colorkey_prop,
|
||||
0x0101fe);
|
||||
drm_object_attach_property(mobj, priv->colorkey_min_prop,
|
||||
|
|
|
@ -1,139 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
* Rewritten from the dovefb driver, and Armada510 manuals.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_encoder_slave.h>
|
||||
#include "armada_drm.h"
|
||||
#include "armada_output.h"
|
||||
#include "armada_slave.h"
|
||||
|
||||
static int armada_drm_slave_get_modes(struct drm_connector *conn)
|
||||
{
|
||||
struct drm_encoder *enc = armada_drm_connector_encoder(conn);
|
||||
int count = 0;
|
||||
|
||||
if (enc) {
|
||||
struct drm_encoder_slave *slave = to_encoder_slave(enc);
|
||||
|
||||
count = slave->slave_funcs->get_modes(enc, conn);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static void armada_drm_slave_destroy(struct drm_encoder *enc)
|
||||
{
|
||||
struct drm_encoder_slave *slave = to_encoder_slave(enc);
|
||||
struct i2c_client *client = drm_i2c_encoder_get_client(enc);
|
||||
|
||||
if (slave->slave_funcs)
|
||||
slave->slave_funcs->destroy(enc);
|
||||
if (client)
|
||||
i2c_put_adapter(client->adapter);
|
||||
|
||||
drm_encoder_cleanup(&slave->base);
|
||||
kfree(slave);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_funcs armada_drm_slave_encoder_funcs = {
|
||||
.destroy = armada_drm_slave_destroy,
|
||||
};
|
||||
|
||||
static const struct drm_connector_helper_funcs armada_drm_slave_helper_funcs = {
|
||||
.get_modes = armada_drm_slave_get_modes,
|
||||
.mode_valid = armada_drm_slave_encoder_mode_valid,
|
||||
.best_encoder = armada_drm_connector_encoder,
|
||||
};
|
||||
|
||||
static const struct drm_encoder_helper_funcs drm_slave_encoder_helpers = {
|
||||
.dpms = drm_i2c_encoder_dpms,
|
||||
.save = drm_i2c_encoder_save,
|
||||
.restore = drm_i2c_encoder_restore,
|
||||
.mode_fixup = drm_i2c_encoder_mode_fixup,
|
||||
.prepare = drm_i2c_encoder_prepare,
|
||||
.commit = drm_i2c_encoder_commit,
|
||||
.mode_set = drm_i2c_encoder_mode_set,
|
||||
.detect = drm_i2c_encoder_detect,
|
||||
};
|
||||
|
||||
static int
|
||||
armada_drm_conn_slave_create(struct drm_connector *conn, const void *data)
|
||||
{
|
||||
const struct armada_drm_slave_config *config = data;
|
||||
struct drm_encoder_slave *slave;
|
||||
struct i2c_adapter *adap;
|
||||
int ret;
|
||||
|
||||
conn->interlace_allowed = config->interlace_allowed;
|
||||
conn->doublescan_allowed = config->doublescan_allowed;
|
||||
conn->polled = config->polled;
|
||||
|
||||
drm_connector_helper_add(conn, &armada_drm_slave_helper_funcs);
|
||||
|
||||
slave = kzalloc(sizeof(*slave), GFP_KERNEL);
|
||||
if (!slave)
|
||||
return -ENOMEM;
|
||||
|
||||
slave->base.possible_crtcs = config->crtcs;
|
||||
|
||||
adap = i2c_get_adapter(config->i2c_adapter_id);
|
||||
if (!adap) {
|
||||
kfree(slave);
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
ret = drm_encoder_init(conn->dev, &slave->base,
|
||||
&armada_drm_slave_encoder_funcs,
|
||||
DRM_MODE_ENCODER_TMDS);
|
||||
if (ret) {
|
||||
DRM_ERROR("unable to init encoder\n");
|
||||
i2c_put_adapter(adap);
|
||||
kfree(slave);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_i2c_encoder_init(conn->dev, slave, adap, &config->info);
|
||||
i2c_put_adapter(adap);
|
||||
if (ret) {
|
||||
DRM_ERROR("unable to init encoder slave\n");
|
||||
armada_drm_slave_destroy(&slave->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_encoder_helper_add(&slave->base, &drm_slave_encoder_helpers);
|
||||
|
||||
ret = slave->slave_funcs->create_resources(&slave->base, conn);
|
||||
if (ret) {
|
||||
armada_drm_slave_destroy(&slave->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_mode_connector_attach_encoder(conn, &slave->base);
|
||||
if (ret) {
|
||||
armada_drm_slave_destroy(&slave->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
conn->encoder = &slave->base;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct armada_output_type armada_drm_conn_slave = {
|
||||
.connector_type = DRM_MODE_CONNECTOR_HDMIA,
|
||||
.create = armada_drm_conn_slave_create,
|
||||
.set_property = armada_drm_slave_encoder_set_property,
|
||||
};
|
||||
|
||||
int armada_drm_connector_slave_create(struct drm_device *dev,
|
||||
const struct armada_drm_slave_config *config)
|
||||
{
|
||||
return armada_output_create(dev, &armada_drm_conn_slave, config);
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef ARMADA_SLAVE_H
|
||||
#define ARMADA_SLAVE_H
|
||||
|
||||
#include <linux/i2c.h>
|
||||
#include <drm/drmP.h>
|
||||
|
||||
struct armada_drm_slave_config {
|
||||
int i2c_adapter_id;
|
||||
uint32_t crtcs;
|
||||
uint8_t polled;
|
||||
bool interlace_allowed;
|
||||
bool doublescan_allowed;
|
||||
struct i2c_board_info info;
|
||||
};
|
||||
|
||||
int armada_drm_connector_slave_create(struct drm_device *dev,
|
||||
const struct armada_drm_slave_config *);
|
||||
|
||||
#endif
|
|
@ -656,7 +656,8 @@ static void atmel_hlcdc_dc_irq_uninstall(struct drm_device *dev)
|
|||
regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_ISR, &isr);
|
||||
}
|
||||
|
||||
static int atmel_hlcdc_dc_enable_vblank(struct drm_device *dev, int crtc)
|
||||
static int atmel_hlcdc_dc_enable_vblank(struct drm_device *dev,
|
||||
unsigned int pipe)
|
||||
{
|
||||
struct atmel_hlcdc_dc *dc = dev->dev_private;
|
||||
|
||||
|
@ -666,7 +667,8 @@ static int atmel_hlcdc_dc_enable_vblank(struct drm_device *dev, int crtc)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void atmel_hlcdc_dc_disable_vblank(struct drm_device *dev, int crtc)
|
||||
static void atmel_hlcdc_dc_disable_vblank(struct drm_device *dev,
|
||||
unsigned int pipe)
|
||||
{
|
||||
struct atmel_hlcdc_dc *dc = dev->dev_private;
|
||||
|
||||
|
@ -697,7 +699,7 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
|
|||
.irq_preinstall = atmel_hlcdc_dc_irq_uninstall,
|
||||
.irq_postinstall = atmel_hlcdc_dc_irq_postinstall,
|
||||
.irq_uninstall = atmel_hlcdc_dc_irq_uninstall,
|
||||
.get_vblank_counter = drm_vblank_count,
|
||||
.get_vblank_counter = drm_vblank_no_hw_counter,
|
||||
.enable_vblank = atmel_hlcdc_dc_enable_vblank,
|
||||
.disable_vblank = atmel_hlcdc_dc_disable_vblank,
|
||||
.gem_free_object = drm_gem_cma_free_object,
|
||||
|
|
|
@ -633,7 +633,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
|
|||
if (!state->bpp[i])
|
||||
return -EINVAL;
|
||||
|
||||
switch (state->base.rotation & 0xf) {
|
||||
switch (state->base.rotation & DRM_ROTATE_MASK) {
|
||||
case BIT(DRM_ROTATE_90):
|
||||
offset = ((y_offset + state->src_y + patched_src_w - 1) /
|
||||
ydiv) * fb->pitches[i];
|
||||
|
@ -712,11 +712,13 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
|
|||
}
|
||||
|
||||
static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p,
|
||||
struct drm_framebuffer *fb,
|
||||
const struct drm_plane_state *new_state)
|
||||
{
|
||||
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
|
||||
|
||||
if (!new_state->fb)
|
||||
return 0;
|
||||
|
||||
return atmel_hlcdc_layer_update_start(&plane->layer);
|
||||
}
|
||||
|
||||
|
|
|
@ -11,6 +11,18 @@ config DRM_DW_HDMI
|
|||
tristate
|
||||
select DRM_KMS_HELPER
|
||||
|
||||
config DRM_DW_HDMI_AHB_AUDIO
|
||||
tristate "Synopsis Designware AHB Audio interface"
|
||||
depends on DRM_DW_HDMI && SND
|
||||
select SND_PCM
|
||||
select SND_PCM_ELD
|
||||
select SND_PCM_IEC958
|
||||
help
|
||||
Support the AHB Audio interface which is part of the Synopsis
|
||||
Designware HDMI block. This is used in conjunction with
|
||||
the i.MX6 HDMI driver.
|
||||
|
||||
|
||||
config DRM_NXP_PTN3460
|
||||
tristate "NXP PTN3460 DP/LVDS bridge"
|
||||
depends on OF
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
ccflags-y := -Iinclude/drm
|
||||
|
||||
obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o
|
||||
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw_hdmi-ahb-audio.o
|
||||
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
|
||||
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
|
||||
|
|
653
drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c
Normal file
653
drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c
Normal file
|
@ -0,0 +1,653 @@
|
|||
/*
|
||||
* DesignWare HDMI audio driver
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Written and tested against the Designware HDMI Tx found in iMX6.
|
||||
*/
|
||||
#include <linux/io.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <drm/bridge/dw_hdmi.h>
|
||||
#include <drm/drm_edid.h>
|
||||
|
||||
#include <sound/asoundef.h>
|
||||
#include <sound/core.h>
|
||||
#include <sound/initval.h>
|
||||
#include <sound/pcm.h>
|
||||
#include <sound/pcm_drm_eld.h>
|
||||
#include <sound/pcm_iec958.h>
|
||||
|
||||
#include "dw_hdmi-audio.h"
|
||||
|
||||
#define DRIVER_NAME "dw-hdmi-ahb-audio"
|
||||
|
||||
/* Provide some bits rather than bit offsets */
|
||||
enum {
|
||||
HDMI_AHB_DMA_CONF0_SW_FIFO_RST = BIT(7),
|
||||
HDMI_AHB_DMA_CONF0_EN_HLOCK = BIT(3),
|
||||
HDMI_AHB_DMA_START_START = BIT(0),
|
||||
HDMI_AHB_DMA_STOP_STOP = BIT(0),
|
||||
HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR = BIT(5),
|
||||
HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST = BIT(4),
|
||||
HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY = BIT(3),
|
||||
HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE = BIT(2),
|
||||
HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL = BIT(1),
|
||||
HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY = BIT(0),
|
||||
HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL =
|
||||
HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR |
|
||||
HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST |
|
||||
HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY |
|
||||
HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE |
|
||||
HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL |
|
||||
HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY,
|
||||
HDMI_IH_AHBDMAAUD_STAT0_ERROR = BIT(5),
|
||||
HDMI_IH_AHBDMAAUD_STAT0_LOST = BIT(4),
|
||||
HDMI_IH_AHBDMAAUD_STAT0_RETRY = BIT(3),
|
||||
HDMI_IH_AHBDMAAUD_STAT0_DONE = BIT(2),
|
||||
HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL = BIT(1),
|
||||
HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY = BIT(0),
|
||||
HDMI_IH_AHBDMAAUD_STAT0_ALL =
|
||||
HDMI_IH_AHBDMAAUD_STAT0_ERROR |
|
||||
HDMI_IH_AHBDMAAUD_STAT0_LOST |
|
||||
HDMI_IH_AHBDMAAUD_STAT0_RETRY |
|
||||
HDMI_IH_AHBDMAAUD_STAT0_DONE |
|
||||
HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL |
|
||||
HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY,
|
||||
HDMI_AHB_DMA_CONF0_INCR16 = 2 << 1,
|
||||
HDMI_AHB_DMA_CONF0_INCR8 = 1 << 1,
|
||||
HDMI_AHB_DMA_CONF0_INCR4 = 0,
|
||||
HDMI_AHB_DMA_CONF0_BURST_MODE = BIT(0),
|
||||
HDMI_AHB_DMA_MASK_DONE = BIT(7),
|
||||
|
||||
HDMI_REVISION_ID = 0x0001,
|
||||
HDMI_IH_AHBDMAAUD_STAT0 = 0x0109,
|
||||
HDMI_IH_MUTE_AHBDMAAUD_STAT0 = 0x0189,
|
||||
HDMI_FC_AUDICONF2 = 0x1027,
|
||||
HDMI_FC_AUDSCONF = 0x1063,
|
||||
HDMI_FC_AUDSCONF_LAYOUT1 = 1 << 0,
|
||||
HDMI_FC_AUDSCONF_LAYOUT0 = 0 << 0,
|
||||
HDMI_AHB_DMA_CONF0 = 0x3600,
|
||||
HDMI_AHB_DMA_START = 0x3601,
|
||||
HDMI_AHB_DMA_STOP = 0x3602,
|
||||
HDMI_AHB_DMA_THRSLD = 0x3603,
|
||||
HDMI_AHB_DMA_STRADDR0 = 0x3604,
|
||||
HDMI_AHB_DMA_STPADDR0 = 0x3608,
|
||||
HDMI_AHB_DMA_MASK = 0x3614,
|
||||
HDMI_AHB_DMA_POL = 0x3615,
|
||||
HDMI_AHB_DMA_CONF1 = 0x3616,
|
||||
HDMI_AHB_DMA_BUFFPOL = 0x361a,
|
||||
};
|
||||
|
||||
struct dw_hdmi_channel_conf {
|
||||
u8 conf1;
|
||||
u8 ca;
|
||||
};
|
||||
|
||||
/*
|
||||
* The default mapping of ALSA channels to HDMI channels and speaker
|
||||
* allocation bits. Note that we can't do channel remapping here -
|
||||
* channels must be in the same order.
|
||||
*
|
||||
* Mappings for alsa-lib pcm/surround*.conf files:
|
||||
*
|
||||
* Front Sur4.0 Sur4.1 Sur5.0 Sur5.1 Sur7.1
|
||||
* Channels 2 4 6 6 6 8
|
||||
*
|
||||
* Our mapping from ALSA channel to CEA686D speaker name and HDMI channel:
|
||||
*
|
||||
* Number of ALSA channels
|
||||
* ALSA Channel 2 3 4 5 6 7 8
|
||||
* 0 FL:0 = = = = = =
|
||||
* 1 FR:1 = = = = = =
|
||||
* 2 FC:3 RL:4 LFE:2 = = =
|
||||
* 3 RR:5 RL:4 FC:3 = =
|
||||
* 4 RR:5 RL:4 = =
|
||||
* 5 RR:5 = =
|
||||
* 6 RC:6 =
|
||||
* 7 RLC/FRC RLC/FRC
|
||||
*/
|
||||
static struct dw_hdmi_channel_conf default_hdmi_channel_config[7] = {
|
||||
{ 0x03, 0x00 }, /* FL,FR */
|
||||
{ 0x0b, 0x02 }, /* FL,FR,FC */
|
||||
{ 0x33, 0x08 }, /* FL,FR,RL,RR */
|
||||
{ 0x37, 0x09 }, /* FL,FR,LFE,RL,RR */
|
||||
{ 0x3f, 0x0b }, /* FL,FR,LFE,FC,RL,RR */
|
||||
{ 0x7f, 0x0f }, /* FL,FR,LFE,FC,RL,RR,RC */
|
||||
{ 0xff, 0x13 }, /* FL,FR,LFE,FC,RL,RR,[FR]RC,[FR]LC */
|
||||
};
|
||||
|
||||
struct snd_dw_hdmi {
|
||||
struct snd_card *card;
|
||||
struct snd_pcm *pcm;
|
||||
spinlock_t lock;
|
||||
struct dw_hdmi_audio_data data;
|
||||
struct snd_pcm_substream *substream;
|
||||
void (*reformat)(struct snd_dw_hdmi *, size_t, size_t);
|
||||
void *buf_src;
|
||||
void *buf_dst;
|
||||
dma_addr_t buf_addr;
|
||||
unsigned buf_offset;
|
||||
unsigned buf_period;
|
||||
unsigned buf_size;
|
||||
unsigned channels;
|
||||
u8 revision;
|
||||
u8 iec_offset;
|
||||
u8 cs[192][8];
|
||||
};
|
||||
|
||||
static void dw_hdmi_writel(u32 val, void __iomem *ptr)
|
||||
{
|
||||
writeb_relaxed(val, ptr);
|
||||
writeb_relaxed(val >> 8, ptr + 1);
|
||||
writeb_relaxed(val >> 16, ptr + 2);
|
||||
writeb_relaxed(val >> 24, ptr + 3);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert to hardware format: The userspace buffer contains IEC958 samples,
|
||||
* with the PCUV bits in bits 31..28 and audio samples in bits 27..4. We
|
||||
* need these to be in bits 27..24, with the IEC B bit in bit 28, and audio
|
||||
* samples in 23..0.
|
||||
*
|
||||
* Default preamble in bits 3..0: 8 = block start, 4 = even 2 = odd
|
||||
*
|
||||
* Ideally, we could do with having the data properly formatted in userspace.
|
||||
*/
|
||||
static void dw_hdmi_reformat_iec958(struct snd_dw_hdmi *dw,
|
||||
size_t offset, size_t bytes)
|
||||
{
|
||||
u32 *src = dw->buf_src + offset;
|
||||
u32 *dst = dw->buf_dst + offset;
|
||||
u32 *end = dw->buf_src + offset + bytes;
|
||||
|
||||
do {
|
||||
u32 b, sample = *src++;
|
||||
|
||||
b = (sample & 8) << (28 - 3);
|
||||
|
||||
sample >>= 4;
|
||||
|
||||
*dst++ = sample | b;
|
||||
} while (src < end);
|
||||
}
|
||||
|
||||
static u32 parity(u32 sample)
|
||||
{
|
||||
sample ^= sample >> 16;
|
||||
sample ^= sample >> 8;
|
||||
sample ^= sample >> 4;
|
||||
sample ^= sample >> 2;
|
||||
sample ^= sample >> 1;
|
||||
return (sample & 1) << 27;
|
||||
}
|
||||
|
||||
static void dw_hdmi_reformat_s24(struct snd_dw_hdmi *dw,
|
||||
size_t offset, size_t bytes)
|
||||
{
|
||||
u32 *src = dw->buf_src + offset;
|
||||
u32 *dst = dw->buf_dst + offset;
|
||||
u32 *end = dw->buf_src + offset + bytes;
|
||||
|
||||
do {
|
||||
unsigned i;
|
||||
u8 *cs;
|
||||
|
||||
cs = dw->cs[dw->iec_offset++];
|
||||
if (dw->iec_offset >= 192)
|
||||
dw->iec_offset = 0;
|
||||
|
||||
i = dw->channels;
|
||||
do {
|
||||
u32 sample = *src++;
|
||||
|
||||
sample &= ~0xff000000;
|
||||
sample |= *cs++ << 24;
|
||||
sample |= parity(sample & ~0xf8000000);
|
||||
|
||||
*dst++ = sample;
|
||||
} while (--i);
|
||||
} while (src < end);
|
||||
}
|
||||
|
||||
static void dw_hdmi_create_cs(struct snd_dw_hdmi *dw,
|
||||
struct snd_pcm_runtime *runtime)
|
||||
{
|
||||
u8 cs[4];
|
||||
unsigned ch, i, j;
|
||||
|
||||
snd_pcm_create_iec958_consumer(runtime, cs, sizeof(cs));
|
||||
|
||||
memset(dw->cs, 0, sizeof(dw->cs));
|
||||
|
||||
for (ch = 0; ch < 8; ch++) {
|
||||
cs[2] &= ~IEC958_AES2_CON_CHANNEL;
|
||||
cs[2] |= (ch + 1) << 4;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cs); i++) {
|
||||
unsigned c = cs[i];
|
||||
|
||||
for (j = 0; j < 8; j++, c >>= 1)
|
||||
dw->cs[i * 8 + j][ch] = (c & 1) << 2;
|
||||
}
|
||||
}
|
||||
dw->cs[0][0] |= BIT(4);
|
||||
}
|
||||
|
||||
static void dw_hdmi_start_dma(struct snd_dw_hdmi *dw)
|
||||
{
|
||||
void __iomem *base = dw->data.base;
|
||||
unsigned offset = dw->buf_offset;
|
||||
unsigned period = dw->buf_period;
|
||||
u32 start, stop;
|
||||
|
||||
dw->reformat(dw, offset, period);
|
||||
|
||||
/* Clear all irqs before enabling irqs and starting DMA */
|
||||
writeb_relaxed(HDMI_IH_AHBDMAAUD_STAT0_ALL,
|
||||
base + HDMI_IH_AHBDMAAUD_STAT0);
|
||||
|
||||
start = dw->buf_addr + offset;
|
||||
stop = start + period - 1;
|
||||
|
||||
/* Setup the hardware start/stop addresses */
|
||||
dw_hdmi_writel(start, base + HDMI_AHB_DMA_STRADDR0);
|
||||
dw_hdmi_writel(stop, base + HDMI_AHB_DMA_STPADDR0);
|
||||
|
||||
writeb_relaxed((u8)~HDMI_AHB_DMA_MASK_DONE, base + HDMI_AHB_DMA_MASK);
|
||||
writeb(HDMI_AHB_DMA_START_START, base + HDMI_AHB_DMA_START);
|
||||
|
||||
offset += period;
|
||||
if (offset >= dw->buf_size)
|
||||
offset = 0;
|
||||
dw->buf_offset = offset;
|
||||
}
|
||||
|
||||
static void dw_hdmi_stop_dma(struct snd_dw_hdmi *dw)
|
||||
{
|
||||
/* Disable interrupts before disabling DMA */
|
||||
writeb_relaxed(~0, dw->data.base + HDMI_AHB_DMA_MASK);
|
||||
writeb_relaxed(HDMI_AHB_DMA_STOP_STOP, dw->data.base + HDMI_AHB_DMA_STOP);
|
||||
}
|
||||
|
||||
static irqreturn_t snd_dw_hdmi_irq(int irq, void *data)
|
||||
{
|
||||
struct snd_dw_hdmi *dw = data;
|
||||
struct snd_pcm_substream *substream;
|
||||
unsigned stat;
|
||||
|
||||
stat = readb_relaxed(dw->data.base + HDMI_IH_AHBDMAAUD_STAT0);
|
||||
if (!stat)
|
||||
return IRQ_NONE;
|
||||
|
||||
writeb_relaxed(stat, dw->data.base + HDMI_IH_AHBDMAAUD_STAT0);
|
||||
|
||||
substream = dw->substream;
|
||||
if (stat & HDMI_IH_AHBDMAAUD_STAT0_DONE && substream) {
|
||||
snd_pcm_period_elapsed(substream);
|
||||
|
||||
spin_lock(&dw->lock);
|
||||
if (dw->substream)
|
||||
dw_hdmi_start_dma(dw);
|
||||
spin_unlock(&dw->lock);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static struct snd_pcm_hardware dw_hdmi_hw = {
|
||||
.info = SNDRV_PCM_INFO_INTERLEAVED |
|
||||
SNDRV_PCM_INFO_BLOCK_TRANSFER |
|
||||
SNDRV_PCM_INFO_MMAP |
|
||||
SNDRV_PCM_INFO_MMAP_VALID,
|
||||
.formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE |
|
||||
SNDRV_PCM_FMTBIT_S24_LE,
|
||||
.rates = SNDRV_PCM_RATE_32000 |
|
||||
SNDRV_PCM_RATE_44100 |
|
||||
SNDRV_PCM_RATE_48000 |
|
||||
SNDRV_PCM_RATE_88200 |
|
||||
SNDRV_PCM_RATE_96000 |
|
||||
SNDRV_PCM_RATE_176400 |
|
||||
SNDRV_PCM_RATE_192000,
|
||||
.channels_min = 2,
|
||||
.channels_max = 8,
|
||||
.buffer_bytes_max = 1024 * 1024,
|
||||
.period_bytes_min = 256,
|
||||
.period_bytes_max = 8192, /* ERR004323: must limit to 8k */
|
||||
.periods_min = 2,
|
||||
.periods_max = 16,
|
||||
.fifo_size = 0,
|
||||
};
|
||||
|
||||
static int dw_hdmi_open(struct snd_pcm_substream *substream)
|
||||
{
|
||||
struct snd_pcm_runtime *runtime = substream->runtime;
|
||||
struct snd_dw_hdmi *dw = substream->private_data;
|
||||
void __iomem *base = dw->data.base;
|
||||
int ret;
|
||||
|
||||
runtime->hw = dw_hdmi_hw;
|
||||
|
||||
ret = snd_pcm_hw_constraint_eld(runtime, dw->data.eld);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = snd_pcm_limit_hw_rates(runtime);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = snd_pcm_hw_constraint_integer(runtime,
|
||||
SNDRV_PCM_HW_PARAM_PERIODS);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Limit the buffer size to the size of the preallocated buffer */
|
||||
ret = snd_pcm_hw_constraint_minmax(runtime,
|
||||
SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
|
||||
0, substream->dma_buffer.bytes);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Clear FIFO */
|
||||
writeb_relaxed(HDMI_AHB_DMA_CONF0_SW_FIFO_RST,
|
||||
base + HDMI_AHB_DMA_CONF0);
|
||||
|
||||
/* Configure interrupt polarities */
|
||||
writeb_relaxed(~0, base + HDMI_AHB_DMA_POL);
|
||||
writeb_relaxed(~0, base + HDMI_AHB_DMA_BUFFPOL);
|
||||
|
||||
/* Keep interrupts masked, and clear any pending */
|
||||
writeb_relaxed(~0, base + HDMI_AHB_DMA_MASK);
|
||||
writeb_relaxed(~0, base + HDMI_IH_AHBDMAAUD_STAT0);
|
||||
|
||||
ret = request_irq(dw->data.irq, snd_dw_hdmi_irq, IRQF_SHARED,
|
||||
"dw-hdmi-audio", dw);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Un-mute done interrupt */
|
||||
writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL &
|
||||
~HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE,
|
||||
base + HDMI_IH_MUTE_AHBDMAAUD_STAT0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dw_hdmi_close(struct snd_pcm_substream *substream)
|
||||
{
|
||||
struct snd_dw_hdmi *dw = substream->private_data;
|
||||
|
||||
/* Mute all interrupts */
|
||||
writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL,
|
||||
dw->data.base + HDMI_IH_MUTE_AHBDMAAUD_STAT0);
|
||||
|
||||
free_irq(dw->data.irq, dw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dw_hdmi_hw_free(struct snd_pcm_substream *substream)
|
||||
{
|
||||
return snd_pcm_lib_free_vmalloc_buffer(substream);
|
||||
}
|
||||
|
||||
static int dw_hdmi_hw_params(struct snd_pcm_substream *substream,
|
||||
struct snd_pcm_hw_params *params)
|
||||
{
|
||||
/* Allocate the PCM runtime buffer, which is exposed to userspace. */
|
||||
return snd_pcm_lib_alloc_vmalloc_buffer(substream,
|
||||
params_buffer_bytes(params));
|
||||
}
|
||||
|
||||
static int dw_hdmi_prepare(struct snd_pcm_substream *substream)
|
||||
{
|
||||
struct snd_pcm_runtime *runtime = substream->runtime;
|
||||
struct snd_dw_hdmi *dw = substream->private_data;
|
||||
u8 threshold, conf0, conf1, layout, ca;
|
||||
|
||||
/* Setup as per 3.0.5 FSL 4.1.0 BSP */
|
||||
switch (dw->revision) {
|
||||
case 0x0a:
|
||||
conf0 = HDMI_AHB_DMA_CONF0_BURST_MODE |
|
||||
HDMI_AHB_DMA_CONF0_INCR4;
|
||||
if (runtime->channels == 2)
|
||||
threshold = 126;
|
||||
else
|
||||
threshold = 124;
|
||||
break;
|
||||
case 0x1a:
|
||||
conf0 = HDMI_AHB_DMA_CONF0_BURST_MODE |
|
||||
HDMI_AHB_DMA_CONF0_INCR8;
|
||||
threshold = 128;
|
||||
break;
|
||||
default:
|
||||
/* NOTREACHED */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dw_hdmi_set_sample_rate(dw->data.hdmi, runtime->rate);
|
||||
|
||||
/* Minimum number of bytes in the fifo. */
|
||||
runtime->hw.fifo_size = threshold * 32;
|
||||
|
||||
conf0 |= HDMI_AHB_DMA_CONF0_EN_HLOCK;
|
||||
conf1 = default_hdmi_channel_config[runtime->channels - 2].conf1;
|
||||
ca = default_hdmi_channel_config[runtime->channels - 2].ca;
|
||||
|
||||
/*
|
||||
* For >2 channel PCM audio, we need to select layout 1
|
||||
* and set an appropriate channel map.
|
||||
*/
|
||||
if (runtime->channels > 2)
|
||||
layout = HDMI_FC_AUDSCONF_LAYOUT1;
|
||||
else
|
||||
layout = HDMI_FC_AUDSCONF_LAYOUT0;
|
||||
|
||||
writeb_relaxed(threshold, dw->data.base + HDMI_AHB_DMA_THRSLD);
|
||||
writeb_relaxed(conf0, dw->data.base + HDMI_AHB_DMA_CONF0);
|
||||
writeb_relaxed(conf1, dw->data.base + HDMI_AHB_DMA_CONF1);
|
||||
writeb_relaxed(layout, dw->data.base + HDMI_FC_AUDSCONF);
|
||||
writeb_relaxed(ca, dw->data.base + HDMI_FC_AUDICONF2);
|
||||
|
||||
switch (runtime->format) {
|
||||
case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE:
|
||||
dw->reformat = dw_hdmi_reformat_iec958;
|
||||
break;
|
||||
case SNDRV_PCM_FORMAT_S24_LE:
|
||||
dw_hdmi_create_cs(dw, runtime);
|
||||
dw->reformat = dw_hdmi_reformat_s24;
|
||||
break;
|
||||
}
|
||||
dw->iec_offset = 0;
|
||||
dw->channels = runtime->channels;
|
||||
dw->buf_src = runtime->dma_area;
|
||||
dw->buf_dst = substream->dma_buffer.area;
|
||||
dw->buf_addr = substream->dma_buffer.addr;
|
||||
dw->buf_period = snd_pcm_lib_period_bytes(substream);
|
||||
dw->buf_size = snd_pcm_lib_buffer_bytes(substream);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dw_hdmi_trigger(struct snd_pcm_substream *substream, int cmd)
|
||||
{
|
||||
struct snd_dw_hdmi *dw = substream->private_data;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
switch (cmd) {
|
||||
case SNDRV_PCM_TRIGGER_START:
|
||||
spin_lock_irqsave(&dw->lock, flags);
|
||||
dw->buf_offset = 0;
|
||||
dw->substream = substream;
|
||||
dw_hdmi_start_dma(dw);
|
||||
dw_hdmi_audio_enable(dw->data.hdmi);
|
||||
spin_unlock_irqrestore(&dw->lock, flags);
|
||||
substream->runtime->delay = substream->runtime->period_size;
|
||||
break;
|
||||
|
||||
case SNDRV_PCM_TRIGGER_STOP:
|
||||
spin_lock_irqsave(&dw->lock, flags);
|
||||
dw->substream = NULL;
|
||||
dw_hdmi_stop_dma(dw);
|
||||
dw_hdmi_audio_disable(dw->data.hdmi);
|
||||
spin_unlock_irqrestore(&dw->lock, flags);
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static snd_pcm_uframes_t dw_hdmi_pointer(struct snd_pcm_substream *substream)
|
||||
{
|
||||
struct snd_pcm_runtime *runtime = substream->runtime;
|
||||
struct snd_dw_hdmi *dw = substream->private_data;
|
||||
|
||||
/*
|
||||
* We are unable to report the exact hardware position as
|
||||
* reading the 32-bit DMA position using 8-bit reads is racy.
|
||||
*/
|
||||
return bytes_to_frames(runtime, dw->buf_offset);
|
||||
}
|
||||
|
||||
static struct snd_pcm_ops snd_dw_hdmi_ops = {
|
||||
.open = dw_hdmi_open,
|
||||
.close = dw_hdmi_close,
|
||||
.ioctl = snd_pcm_lib_ioctl,
|
||||
.hw_params = dw_hdmi_hw_params,
|
||||
.hw_free = dw_hdmi_hw_free,
|
||||
.prepare = dw_hdmi_prepare,
|
||||
.trigger = dw_hdmi_trigger,
|
||||
.pointer = dw_hdmi_pointer,
|
||||
.page = snd_pcm_lib_get_vmalloc_page,
|
||||
};
|
||||
|
||||
static int snd_dw_hdmi_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct dw_hdmi_audio_data *data = pdev->dev.platform_data;
|
||||
struct device *dev = pdev->dev.parent;
|
||||
struct snd_dw_hdmi *dw;
|
||||
struct snd_card *card;
|
||||
struct snd_pcm *pcm;
|
||||
unsigned revision;
|
||||
int ret;
|
||||
|
||||
writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL,
|
||||
data->base + HDMI_IH_MUTE_AHBDMAAUD_STAT0);
|
||||
revision = readb_relaxed(data->base + HDMI_REVISION_ID);
|
||||
if (revision != 0x0a && revision != 0x1a) {
|
||||
dev_err(dev, "dw-hdmi-audio: unknown revision 0x%02x\n",
|
||||
revision);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
ret = snd_card_new(dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
|
||||
THIS_MODULE, sizeof(struct snd_dw_hdmi), &card);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
strlcpy(card->driver, DRIVER_NAME, sizeof(card->driver));
|
||||
strlcpy(card->shortname, "DW-HDMI", sizeof(card->shortname));
|
||||
snprintf(card->longname, sizeof(card->longname),
|
||||
"%s rev 0x%02x, irq %d", card->shortname, revision,
|
||||
data->irq);
|
||||
|
||||
dw = card->private_data;
|
||||
dw->card = card;
|
||||
dw->data = *data;
|
||||
dw->revision = revision;
|
||||
|
||||
spin_lock_init(&dw->lock);
|
||||
|
||||
ret = snd_pcm_new(card, "DW HDMI", 0, 1, 0, &pcm);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
dw->pcm = pcm;
|
||||
pcm->private_data = dw;
|
||||
strlcpy(pcm->name, DRIVER_NAME, sizeof(pcm->name));
|
||||
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_dw_hdmi_ops);
|
||||
|
||||
/*
|
||||
* To support 8-channel 96kHz audio reliably, we need 512k
|
||||
* to satisfy alsa with our restricted period (ERR004323).
|
||||
*/
|
||||
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
|
||||
dev, 128 * 1024, 1024 * 1024);
|
||||
|
||||
ret = snd_card_register(card);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
platform_set_drvdata(pdev, dw);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
snd_card_free(card);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int snd_dw_hdmi_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct snd_dw_hdmi *dw = platform_get_drvdata(pdev);
|
||||
|
||||
snd_card_free(dw->card);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_PM_SLEEP) && defined(IS_NOT_BROKEN)
|
||||
/*
|
||||
* This code is fine, but requires implementation in the dw_hdmi_trigger()
|
||||
* method which is currently missing as I have no way to test this.
|
||||
*/
|
||||
static int snd_dw_hdmi_suspend(struct device *dev)
|
||||
{
|
||||
struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
|
||||
|
||||
snd_power_change_state(dw->card, SNDRV_CTL_POWER_D3cold);
|
||||
snd_pcm_suspend_all(dw->pcm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int snd_dw_hdmi_resume(struct device *dev)
|
||||
{
|
||||
struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
|
||||
|
||||
snd_power_change_state(dw->card, SNDRV_CTL_POWER_D0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(snd_dw_hdmi_pm, snd_dw_hdmi_suspend,
|
||||
snd_dw_hdmi_resume);
|
||||
#define PM_OPS &snd_dw_hdmi_pm
|
||||
#else
|
||||
#define PM_OPS NULL
|
||||
#endif
|
||||
|
||||
static struct platform_driver snd_dw_hdmi_driver = {
|
||||
.probe = snd_dw_hdmi_probe,
|
||||
.remove = snd_dw_hdmi_remove,
|
||||
.driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.owner = THIS_MODULE,
|
||||
.pm = PM_OPS,
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(snd_dw_hdmi_driver);
|
||||
|
||||
MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
|
||||
MODULE_DESCRIPTION("Synopsis Designware HDMI AHB ALSA interface");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS("platform:" DRIVER_NAME);
|
14
drivers/gpu/drm/bridge/dw_hdmi-audio.h
Normal file
14
drivers/gpu/drm/bridge/dw_hdmi-audio.h
Normal file
|
@ -0,0 +1,14 @@
|
|||
#ifndef DW_HDMI_AUDIO_H
|
||||
#define DW_HDMI_AUDIO_H
|
||||
|
||||
struct dw_hdmi;
|
||||
|
||||
struct dw_hdmi_audio_data {
|
||||
phys_addr_t phys;
|
||||
void __iomem *base;
|
||||
int irq;
|
||||
struct dw_hdmi *hdmi;
|
||||
u8 *eld;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -28,6 +28,7 @@
|
|||
#include <drm/bridge/dw_hdmi.h>
|
||||
|
||||
#include "dw_hdmi.h"
|
||||
#include "dw_hdmi-audio.h"
|
||||
|
||||
#define HDMI_EDID_LEN 512
|
||||
|
||||
|
@ -104,6 +105,7 @@ struct dw_hdmi {
|
|||
struct drm_encoder *encoder;
|
||||
struct drm_bridge *bridge;
|
||||
|
||||
struct platform_device *audio;
|
||||
enum dw_hdmi_devtype dev_type;
|
||||
struct device *dev;
|
||||
struct clk *isfr_clk;
|
||||
|
@ -126,7 +128,11 @@ struct dw_hdmi {
|
|||
bool sink_has_audio;
|
||||
|
||||
struct mutex mutex; /* for state below and previous_mode */
|
||||
enum drm_connector_force force; /* mutex-protected force state */
|
||||
bool disabled; /* DRM has disabled our bridge */
|
||||
bool bridge_is_on; /* indicates the bridge is on */
|
||||
bool rxsense; /* rxsense state */
|
||||
u8 phy_mask; /* desired phy int mask settings */
|
||||
|
||||
spinlock_t audio_lock;
|
||||
struct mutex audio_mutex;
|
||||
|
@ -134,12 +140,19 @@ struct dw_hdmi {
|
|||
unsigned int audio_cts;
|
||||
unsigned int audio_n;
|
||||
bool audio_enable;
|
||||
int ratio;
|
||||
|
||||
void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
|
||||
u8 (*read)(struct dw_hdmi *hdmi, int offset);
|
||||
};
|
||||
|
||||
#define HDMI_IH_PHY_STAT0_RX_SENSE \
|
||||
(HDMI_IH_PHY_STAT0_RX_SENSE0 | HDMI_IH_PHY_STAT0_RX_SENSE1 | \
|
||||
HDMI_IH_PHY_STAT0_RX_SENSE2 | HDMI_IH_PHY_STAT0_RX_SENSE3)
|
||||
|
||||
#define HDMI_PHY_RX_SENSE \
|
||||
(HDMI_PHY_RX_SENSE0 | HDMI_PHY_RX_SENSE1 | \
|
||||
HDMI_PHY_RX_SENSE2 | HDMI_PHY_RX_SENSE3)
|
||||
|
||||
static void dw_hdmi_writel(struct dw_hdmi *hdmi, u8 val, int offset)
|
||||
{
|
||||
writel(val, hdmi->regs + (offset << 2));
|
||||
|
@ -203,61 +216,53 @@ static void hdmi_set_cts_n(struct dw_hdmi *hdmi, unsigned int cts,
|
|||
hdmi_writeb(hdmi, n & 0xff, HDMI_AUD_N1);
|
||||
}
|
||||
|
||||
static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk,
|
||||
unsigned int ratio)
|
||||
static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk)
|
||||
{
|
||||
unsigned int n = (128 * freq) / 1000;
|
||||
unsigned int mult = 1;
|
||||
|
||||
while (freq > 48000) {
|
||||
mult *= 2;
|
||||
freq /= 2;
|
||||
}
|
||||
|
||||
switch (freq) {
|
||||
case 32000:
|
||||
if (pixel_clk == 25170000)
|
||||
n = (ratio == 150) ? 9152 : 4576;
|
||||
else if (pixel_clk == 27020000)
|
||||
n = (ratio == 150) ? 8192 : 4096;
|
||||
else if (pixel_clk == 74170000 || pixel_clk == 148350000)
|
||||
if (pixel_clk == 25175000)
|
||||
n = 4576;
|
||||
else if (pixel_clk == 27027000)
|
||||
n = 4096;
|
||||
else if (pixel_clk == 74176000 || pixel_clk == 148352000)
|
||||
n = 11648;
|
||||
else
|
||||
n = 4096;
|
||||
n *= mult;
|
||||
break;
|
||||
|
||||
case 44100:
|
||||
if (pixel_clk == 25170000)
|
||||
if (pixel_clk == 25175000)
|
||||
n = 7007;
|
||||
else if (pixel_clk == 74170000)
|
||||
else if (pixel_clk == 74176000)
|
||||
n = 17836;
|
||||
else if (pixel_clk == 148350000)
|
||||
n = (ratio == 150) ? 17836 : 8918;
|
||||
else if (pixel_clk == 148352000)
|
||||
n = 8918;
|
||||
else
|
||||
n = 6272;
|
||||
n *= mult;
|
||||
break;
|
||||
|
||||
case 48000:
|
||||
if (pixel_clk == 25170000)
|
||||
n = (ratio == 150) ? 9152 : 6864;
|
||||
else if (pixel_clk == 27020000)
|
||||
n = (ratio == 150) ? 8192 : 6144;
|
||||
else if (pixel_clk == 74170000)
|
||||
if (pixel_clk == 25175000)
|
||||
n = 6864;
|
||||
else if (pixel_clk == 27027000)
|
||||
n = 6144;
|
||||
else if (pixel_clk == 74176000)
|
||||
n = 11648;
|
||||
else if (pixel_clk == 148350000)
|
||||
n = (ratio == 150) ? 11648 : 5824;
|
||||
else if (pixel_clk == 148352000)
|
||||
n = 5824;
|
||||
else
|
||||
n = 6144;
|
||||
break;
|
||||
|
||||
case 88200:
|
||||
n = hdmi_compute_n(44100, pixel_clk, ratio) * 2;
|
||||
break;
|
||||
|
||||
case 96000:
|
||||
n = hdmi_compute_n(48000, pixel_clk, ratio) * 2;
|
||||
break;
|
||||
|
||||
case 176400:
|
||||
n = hdmi_compute_n(44100, pixel_clk, ratio) * 4;
|
||||
break;
|
||||
|
||||
case 192000:
|
||||
n = hdmi_compute_n(48000, pixel_clk, ratio) * 4;
|
||||
n *= mult;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -267,93 +272,29 @@ static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk,
|
|||
return n;
|
||||
}
|
||||
|
||||
static unsigned int hdmi_compute_cts(unsigned int freq, unsigned long pixel_clk,
|
||||
unsigned int ratio)
|
||||
{
|
||||
unsigned int cts = 0;
|
||||
|
||||
pr_debug("%s: freq: %d pixel_clk: %ld ratio: %d\n", __func__, freq,
|
||||
pixel_clk, ratio);
|
||||
|
||||
switch (freq) {
|
||||
case 32000:
|
||||
if (pixel_clk == 297000000) {
|
||||
cts = 222750;
|
||||
break;
|
||||
}
|
||||
case 48000:
|
||||
case 96000:
|
||||
case 192000:
|
||||
switch (pixel_clk) {
|
||||
case 25200000:
|
||||
case 27000000:
|
||||
case 54000000:
|
||||
case 74250000:
|
||||
case 148500000:
|
||||
cts = pixel_clk / 1000;
|
||||
break;
|
||||
case 297000000:
|
||||
cts = 247500;
|
||||
break;
|
||||
/*
|
||||
* All other TMDS clocks are not supported by
|
||||
* DWC_hdmi_tx. The TMDS clocks divided or
|
||||
* multiplied by 1,001 coefficients are not
|
||||
* supported.
|
||||
*/
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 44100:
|
||||
case 88200:
|
||||
case 176400:
|
||||
switch (pixel_clk) {
|
||||
case 25200000:
|
||||
cts = 28000;
|
||||
break;
|
||||
case 27000000:
|
||||
cts = 30000;
|
||||
break;
|
||||
case 54000000:
|
||||
cts = 60000;
|
||||
break;
|
||||
case 74250000:
|
||||
cts = 82500;
|
||||
break;
|
||||
case 148500000:
|
||||
cts = 165000;
|
||||
break;
|
||||
case 297000000:
|
||||
cts = 247500;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (ratio == 100)
|
||||
return cts;
|
||||
return (cts * ratio) / 100;
|
||||
}
|
||||
|
||||
static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
|
||||
unsigned long pixel_clk, unsigned int sample_rate, unsigned int ratio)
|
||||
unsigned long pixel_clk, unsigned int sample_rate)
|
||||
{
|
||||
unsigned long ftdms = pixel_clk;
|
||||
unsigned int n, cts;
|
||||
u64 tmp;
|
||||
|
||||
n = hdmi_compute_n(sample_rate, pixel_clk, ratio);
|
||||
cts = hdmi_compute_cts(sample_rate, pixel_clk, ratio);
|
||||
if (!cts) {
|
||||
dev_err(hdmi->dev,
|
||||
"%s: pixel clock/sample rate not supported: %luMHz / %ukHz\n",
|
||||
__func__, pixel_clk, sample_rate);
|
||||
}
|
||||
n = hdmi_compute_n(sample_rate, pixel_clk);
|
||||
|
||||
dev_dbg(hdmi->dev, "%s: samplerate=%ukHz ratio=%d pixelclk=%luMHz N=%d cts=%d\n",
|
||||
__func__, sample_rate, ratio, pixel_clk, n, cts);
|
||||
/*
|
||||
* Compute the CTS value from the N value. Note that CTS and N
|
||||
* can be up to 20 bits in total, so we need 64-bit math. Also
|
||||
* note that our TDMS clock is not fully accurate; it is accurate
|
||||
* to kHz. This can introduce an unnecessary remainder in the
|
||||
* calculation below, so we don't try to warn about that.
|
||||
*/
|
||||
tmp = (u64)ftdms * n;
|
||||
do_div(tmp, 128 * sample_rate);
|
||||
cts = tmp;
|
||||
|
||||
dev_dbg(hdmi->dev, "%s: fs=%uHz ftdms=%lu.%03luMHz N=%d cts=%d\n",
|
||||
__func__, sample_rate, ftdms / 1000000, (ftdms / 1000) % 1000,
|
||||
n, cts);
|
||||
|
||||
spin_lock_irq(&hdmi->audio_lock);
|
||||
hdmi->audio_n = n;
|
||||
|
@ -365,8 +306,7 @@ static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
|
|||
static void hdmi_init_clk_regenerator(struct dw_hdmi *hdmi)
|
||||
{
|
||||
mutex_lock(&hdmi->audio_mutex);
|
||||
hdmi_set_clk_regenerator(hdmi, 74250000, hdmi->sample_rate,
|
||||
hdmi->ratio);
|
||||
hdmi_set_clk_regenerator(hdmi, 74250000, hdmi->sample_rate);
|
||||
mutex_unlock(&hdmi->audio_mutex);
|
||||
}
|
||||
|
||||
|
@ -374,7 +314,7 @@ static void hdmi_clk_regenerator_update_pixel_clock(struct dw_hdmi *hdmi)
|
|||
{
|
||||
mutex_lock(&hdmi->audio_mutex);
|
||||
hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock,
|
||||
hdmi->sample_rate, hdmi->ratio);
|
||||
hdmi->sample_rate);
|
||||
mutex_unlock(&hdmi->audio_mutex);
|
||||
}
|
||||
|
||||
|
@ -383,7 +323,7 @@ void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate)
|
|||
mutex_lock(&hdmi->audio_mutex);
|
||||
hdmi->sample_rate = rate;
|
||||
hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock,
|
||||
hdmi->sample_rate, hdmi->ratio);
|
||||
hdmi->sample_rate);
|
||||
mutex_unlock(&hdmi->audio_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_rate);
|
||||
|
@ -1063,6 +1003,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
|
|||
u8 inv_val;
|
||||
struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
|
||||
int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len;
|
||||
unsigned int vdisplay;
|
||||
|
||||
vmode->mpixelclock = mode->clock * 1000;
|
||||
|
||||
|
@ -1102,13 +1043,29 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
|
|||
|
||||
hdmi_writeb(hdmi, inv_val, HDMI_FC_INVIDCONF);
|
||||
|
||||
vdisplay = mode->vdisplay;
|
||||
vblank = mode->vtotal - mode->vdisplay;
|
||||
v_de_vs = mode->vsync_start - mode->vdisplay;
|
||||
vsync_len = mode->vsync_end - mode->vsync_start;
|
||||
|
||||
/*
|
||||
* When we're setting an interlaced mode, we need
|
||||
* to adjust the vertical timing to suit.
|
||||
*/
|
||||
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
|
||||
vdisplay /= 2;
|
||||
vblank /= 2;
|
||||
v_de_vs /= 2;
|
||||
vsync_len /= 2;
|
||||
}
|
||||
|
||||
/* Set up horizontal active pixel width */
|
||||
hdmi_writeb(hdmi, mode->hdisplay >> 8, HDMI_FC_INHACTV1);
|
||||
hdmi_writeb(hdmi, mode->hdisplay, HDMI_FC_INHACTV0);
|
||||
|
||||
/* Set up vertical active lines */
|
||||
hdmi_writeb(hdmi, mode->vdisplay >> 8, HDMI_FC_INVACTV1);
|
||||
hdmi_writeb(hdmi, mode->vdisplay, HDMI_FC_INVACTV0);
|
||||
hdmi_writeb(hdmi, vdisplay >> 8, HDMI_FC_INVACTV1);
|
||||
hdmi_writeb(hdmi, vdisplay, HDMI_FC_INVACTV0);
|
||||
|
||||
/* Set up horizontal blanking pixel region width */
|
||||
hblank = mode->htotal - mode->hdisplay;
|
||||
|
@ -1116,7 +1073,6 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
|
|||
hdmi_writeb(hdmi, hblank, HDMI_FC_INHBLANK0);
|
||||
|
||||
/* Set up vertical blanking pixel region width */
|
||||
vblank = mode->vtotal - mode->vdisplay;
|
||||
hdmi_writeb(hdmi, vblank, HDMI_FC_INVBLANK);
|
||||
|
||||
/* Set up HSYNC active edge delay width (in pixel clks) */
|
||||
|
@ -1125,7 +1081,6 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
|
|||
hdmi_writeb(hdmi, h_de_hs, HDMI_FC_HSYNCINDELAY0);
|
||||
|
||||
/* Set up VSYNC active edge delay (in lines) */
|
||||
v_de_vs = mode->vsync_start - mode->vdisplay;
|
||||
hdmi_writeb(hdmi, v_de_vs, HDMI_FC_VSYNCINDELAY);
|
||||
|
||||
/* Set up HSYNC active pulse width (in pixel clks) */
|
||||
|
@ -1134,7 +1089,6 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
|
|||
hdmi_writeb(hdmi, hsync_len, HDMI_FC_HSYNCINWIDTH0);
|
||||
|
||||
/* Set up VSYNC active edge delay (in lines) */
|
||||
vsync_len = mode->vsync_end - mode->vsync_start;
|
||||
hdmi_writeb(hdmi, vsync_len, HDMI_FC_VSYNCINWIDTH);
|
||||
}
|
||||
|
||||
|
@ -1302,10 +1256,11 @@ static int dw_hdmi_fb_registered(struct dw_hdmi *hdmi)
|
|||
HDMI_PHY_I2CM_CTLINT_ADDR);
|
||||
|
||||
/* enable cable hot plug irq */
|
||||
hdmi_writeb(hdmi, (u8)~HDMI_PHY_HPD, HDMI_PHY_MASK0);
|
||||
hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
|
||||
|
||||
/* Clear Hotplug interrupts */
|
||||
hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0);
|
||||
hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
|
||||
HDMI_IH_PHY_STAT0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1364,12 +1319,61 @@ static void initialize_hdmi_ih_mutes(struct dw_hdmi *hdmi)
|
|||
|
||||
static void dw_hdmi_poweron(struct dw_hdmi *hdmi)
|
||||
{
|
||||
hdmi->bridge_is_on = true;
|
||||
dw_hdmi_setup(hdmi, &hdmi->previous_mode);
|
||||
}
|
||||
|
||||
static void dw_hdmi_poweroff(struct dw_hdmi *hdmi)
|
||||
{
|
||||
dw_hdmi_phy_disable(hdmi);
|
||||
hdmi->bridge_is_on = false;
|
||||
}
|
||||
|
||||
static void dw_hdmi_update_power(struct dw_hdmi *hdmi)
|
||||
{
|
||||
int force = hdmi->force;
|
||||
|
||||
if (hdmi->disabled) {
|
||||
force = DRM_FORCE_OFF;
|
||||
} else if (force == DRM_FORCE_UNSPECIFIED) {
|
||||
if (hdmi->rxsense)
|
||||
force = DRM_FORCE_ON;
|
||||
else
|
||||
force = DRM_FORCE_OFF;
|
||||
}
|
||||
|
||||
if (force == DRM_FORCE_OFF) {
|
||||
if (hdmi->bridge_is_on)
|
||||
dw_hdmi_poweroff(hdmi);
|
||||
} else {
|
||||
if (!hdmi->bridge_is_on)
|
||||
dw_hdmi_poweron(hdmi);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Adjust the detection of RXSENSE according to whether we have a forced
|
||||
* connection mode enabled, or whether we have been disabled. There is
|
||||
* no point processing RXSENSE interrupts if we have a forced connection
|
||||
* state, or DRM has us disabled.
|
||||
*
|
||||
* We also disable rxsense interrupts when we think we're disconnected
|
||||
* to avoid floating TDMS signals giving false rxsense interrupts.
|
||||
*
|
||||
* Note: we still need to listen for HPD interrupts even when DRM has us
|
||||
* disabled so that we can detect a connect event.
|
||||
*/
|
||||
static void dw_hdmi_update_phy_mask(struct dw_hdmi *hdmi)
|
||||
{
|
||||
u8 old_mask = hdmi->phy_mask;
|
||||
|
||||
if (hdmi->force || hdmi->disabled || !hdmi->rxsense)
|
||||
hdmi->phy_mask |= HDMI_PHY_RX_SENSE;
|
||||
else
|
||||
hdmi->phy_mask &= ~HDMI_PHY_RX_SENSE;
|
||||
|
||||
if (old_mask != hdmi->phy_mask)
|
||||
hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
|
||||
}
|
||||
|
||||
static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
|
||||
|
@ -1399,7 +1403,8 @@ static void dw_hdmi_bridge_disable(struct drm_bridge *bridge)
|
|||
|
||||
mutex_lock(&hdmi->mutex);
|
||||
hdmi->disabled = true;
|
||||
dw_hdmi_poweroff(hdmi);
|
||||
dw_hdmi_update_power(hdmi);
|
||||
dw_hdmi_update_phy_mask(hdmi);
|
||||
mutex_unlock(&hdmi->mutex);
|
||||
}
|
||||
|
||||
|
@ -1408,8 +1413,9 @@ static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
|
|||
struct dw_hdmi *hdmi = bridge->driver_private;
|
||||
|
||||
mutex_lock(&hdmi->mutex);
|
||||
dw_hdmi_poweron(hdmi);
|
||||
hdmi->disabled = false;
|
||||
dw_hdmi_update_power(hdmi);
|
||||
dw_hdmi_update_phy_mask(hdmi);
|
||||
mutex_unlock(&hdmi->mutex);
|
||||
}
|
||||
|
||||
|
@ -1424,6 +1430,12 @@ dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
|
|||
struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
|
||||
connector);
|
||||
|
||||
mutex_lock(&hdmi->mutex);
|
||||
hdmi->force = DRM_FORCE_UNSPECIFIED;
|
||||
dw_hdmi_update_power(hdmi);
|
||||
dw_hdmi_update_phy_mask(hdmi);
|
||||
mutex_unlock(&hdmi->mutex);
|
||||
|
||||
return hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_HPD ?
|
||||
connector_status_connected : connector_status_disconnected;
|
||||
}
|
||||
|
@ -1447,6 +1459,8 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
|
|||
hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
|
||||
drm_mode_connector_update_edid_property(connector, edid);
|
||||
ret = drm_add_edid_modes(connector, edid);
|
||||
/* Store the ELD */
|
||||
drm_edid_to_eld(connector, edid);
|
||||
kfree(edid);
|
||||
} else {
|
||||
dev_dbg(hdmi->dev, "failed to get edid\n");
|
||||
|
@ -1488,11 +1502,24 @@ static void dw_hdmi_connector_destroy(struct drm_connector *connector)
|
|||
drm_connector_cleanup(connector);
|
||||
}
|
||||
|
||||
static void dw_hdmi_connector_force(struct drm_connector *connector)
|
||||
{
|
||||
struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
|
||||
connector);
|
||||
|
||||
mutex_lock(&hdmi->mutex);
|
||||
hdmi->force = connector->force;
|
||||
dw_hdmi_update_power(hdmi);
|
||||
dw_hdmi_update_phy_mask(hdmi);
|
||||
mutex_unlock(&hdmi->mutex);
|
||||
}
|
||||
|
||||
static struct drm_connector_funcs dw_hdmi_connector_funcs = {
|
||||
.dpms = drm_helper_connector_dpms,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.detect = dw_hdmi_connector_detect,
|
||||
.destroy = dw_hdmi_connector_destroy,
|
||||
.force = dw_hdmi_connector_force,
|
||||
};
|
||||
|
||||
static struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
|
||||
|
@ -1525,33 +1552,69 @@ static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
|
|||
static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
|
||||
{
|
||||
struct dw_hdmi *hdmi = dev_id;
|
||||
u8 intr_stat;
|
||||
u8 phy_int_pol;
|
||||
u8 intr_stat, phy_int_pol, phy_pol_mask, phy_stat;
|
||||
|
||||
intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
|
||||
|
||||
phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0);
|
||||
phy_stat = hdmi_readb(hdmi, HDMI_PHY_STAT0);
|
||||
|
||||
if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
|
||||
hdmi_modb(hdmi, ~phy_int_pol, HDMI_PHY_HPD, HDMI_PHY_POL0);
|
||||
phy_pol_mask = 0;
|
||||
if (intr_stat & HDMI_IH_PHY_STAT0_HPD)
|
||||
phy_pol_mask |= HDMI_PHY_HPD;
|
||||
if (intr_stat & HDMI_IH_PHY_STAT0_RX_SENSE0)
|
||||
phy_pol_mask |= HDMI_PHY_RX_SENSE0;
|
||||
if (intr_stat & HDMI_IH_PHY_STAT0_RX_SENSE1)
|
||||
phy_pol_mask |= HDMI_PHY_RX_SENSE1;
|
||||
if (intr_stat & HDMI_IH_PHY_STAT0_RX_SENSE2)
|
||||
phy_pol_mask |= HDMI_PHY_RX_SENSE2;
|
||||
if (intr_stat & HDMI_IH_PHY_STAT0_RX_SENSE3)
|
||||
phy_pol_mask |= HDMI_PHY_RX_SENSE3;
|
||||
|
||||
if (phy_pol_mask)
|
||||
hdmi_modb(hdmi, ~phy_int_pol, phy_pol_mask, HDMI_PHY_POL0);
|
||||
|
||||
/*
|
||||
* RX sense tells us whether the TDMS transmitters are detecting
|
||||
* load - in other words, there's something listening on the
|
||||
* other end of the link. Use this to decide whether we should
|
||||
* power on the phy as HPD may be toggled by the sink to merely
|
||||
* ask the source to re-read the EDID.
|
||||
*/
|
||||
if (intr_stat &
|
||||
(HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) {
|
||||
mutex_lock(&hdmi->mutex);
|
||||
if (phy_int_pol & HDMI_PHY_HPD) {
|
||||
dev_dbg(hdmi->dev, "EVENT=plugin\n");
|
||||
if (!hdmi->disabled && !hdmi->force) {
|
||||
/*
|
||||
* If the RX sense status indicates we're disconnected,
|
||||
* clear the software rxsense status.
|
||||
*/
|
||||
if (!(phy_stat & HDMI_PHY_RX_SENSE))
|
||||
hdmi->rxsense = false;
|
||||
|
||||
if (!hdmi->disabled)
|
||||
dw_hdmi_poweron(hdmi);
|
||||
} else {
|
||||
dev_dbg(hdmi->dev, "EVENT=plugout\n");
|
||||
/*
|
||||
* Only set the software rxsense status when both
|
||||
* rxsense and hpd indicates we're connected.
|
||||
* This avoids what seems to be bad behaviour in
|
||||
* at least iMX6S versions of the phy.
|
||||
*/
|
||||
if (phy_stat & HDMI_PHY_HPD)
|
||||
hdmi->rxsense = true;
|
||||
|
||||
if (!hdmi->disabled)
|
||||
dw_hdmi_poweroff(hdmi);
|
||||
dw_hdmi_update_power(hdmi);
|
||||
dw_hdmi_update_phy_mask(hdmi);
|
||||
}
|
||||
mutex_unlock(&hdmi->mutex);
|
||||
}
|
||||
|
||||
if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
|
||||
dev_dbg(hdmi->dev, "EVENT=%s\n",
|
||||
phy_int_pol & HDMI_PHY_HPD ? "plugin" : "plugout");
|
||||
drm_helper_hpd_irq_event(hdmi->bridge->dev);
|
||||
}
|
||||
|
||||
hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0);
|
||||
hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0);
|
||||
hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE),
|
||||
HDMI_IH_MUTE_PHY_STAT0);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -1599,7 +1662,9 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
|
|||
{
|
||||
struct drm_device *drm = data;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct platform_device_info pdevinfo;
|
||||
struct device_node *ddc_node;
|
||||
struct dw_hdmi_audio_data audio;
|
||||
struct dw_hdmi *hdmi;
|
||||
int ret;
|
||||
u32 val = 1;
|
||||
|
@ -1608,13 +1673,16 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
|
|||
if (!hdmi)
|
||||
return -ENOMEM;
|
||||
|
||||
hdmi->connector.interlace_allowed = 1;
|
||||
|
||||
hdmi->plat_data = plat_data;
|
||||
hdmi->dev = dev;
|
||||
hdmi->dev_type = plat_data->dev_type;
|
||||
hdmi->sample_rate = 48000;
|
||||
hdmi->ratio = 100;
|
||||
hdmi->encoder = encoder;
|
||||
hdmi->disabled = true;
|
||||
hdmi->rxsense = true;
|
||||
hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE);
|
||||
|
||||
mutex_init(&hdmi->mutex);
|
||||
mutex_init(&hdmi->audio_mutex);
|
||||
|
@ -1705,10 +1773,11 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
|
|||
* Configure registers related to HDMI interrupt
|
||||
* generation before registering IRQ.
|
||||
*/
|
||||
hdmi_writeb(hdmi, HDMI_PHY_HPD, HDMI_PHY_POL0);
|
||||
hdmi_writeb(hdmi, HDMI_PHY_HPD | HDMI_PHY_RX_SENSE, HDMI_PHY_POL0);
|
||||
|
||||
/* Clear Hotplug interrupts */
|
||||
hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0);
|
||||
hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
|
||||
HDMI_IH_PHY_STAT0);
|
||||
|
||||
ret = dw_hdmi_fb_registered(hdmi);
|
||||
if (ret)
|
||||
|
@ -1719,7 +1788,26 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
|
|||
goto err_iahb;
|
||||
|
||||
/* Unmute interrupts */
|
||||
hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0);
|
||||
hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE),
|
||||
HDMI_IH_MUTE_PHY_STAT0);
|
||||
|
||||
memset(&pdevinfo, 0, sizeof(pdevinfo));
|
||||
pdevinfo.parent = dev;
|
||||
pdevinfo.id = PLATFORM_DEVID_AUTO;
|
||||
|
||||
if (hdmi_readb(hdmi, HDMI_CONFIG1_ID) & HDMI_CONFIG1_AHB) {
|
||||
audio.phys = iores->start;
|
||||
audio.base = hdmi->regs;
|
||||
audio.irq = irq;
|
||||
audio.hdmi = hdmi;
|
||||
audio.eld = hdmi->connector.eld;
|
||||
|
||||
pdevinfo.name = "dw-hdmi-ahb-audio";
|
||||
pdevinfo.data = &audio;
|
||||
pdevinfo.size_data = sizeof(audio);
|
||||
pdevinfo.dma_mask = DMA_BIT_MASK(32);
|
||||
hdmi->audio = platform_device_register_full(&pdevinfo);
|
||||
}
|
||||
|
||||
dev_set_drvdata(dev, hdmi);
|
||||
|
||||
|
@ -1738,6 +1826,9 @@ void dw_hdmi_unbind(struct device *dev, struct device *master, void *data)
|
|||
{
|
||||
struct dw_hdmi *hdmi = dev_get_drvdata(dev);
|
||||
|
||||
if (hdmi->audio && !IS_ERR(hdmi->audio))
|
||||
platform_device_unregister(hdmi->audio);
|
||||
|
||||
/* Disable all interrupts */
|
||||
hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
|
||||
|
||||
|
|
|
@ -545,6 +545,9 @@
|
|||
#define HDMI_I2CM_FS_SCL_LCNT_0_ADDR 0x7E12
|
||||
|
||||
enum {
|
||||
/* CONFIG1_ID field values */
|
||||
HDMI_CONFIG1_AHB = 0x01,
|
||||
|
||||
/* IH_FC_INT2 field values */
|
||||
HDMI_IH_FC_INT2_OVERFLOW_MASK = 0x03,
|
||||
HDMI_IH_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02,
|
||||
|
|
|
@ -400,7 +400,6 @@ static struct i2c_driver ptn3460_driver = {
|
|||
.remove = ptn3460_remove,
|
||||
.driver = {
|
||||
.name = "nxp,ptn3460",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = ptn3460_match,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -668,7 +668,6 @@ static struct i2c_driver ps8622_driver = {
|
|||
.remove = ps8622_remove,
|
||||
.driver = {
|
||||
.name = "ps8622",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = ps8622_devices,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -36,8 +36,6 @@
|
|||
#include <linux/slab.h>
|
||||
#include "drm_legacy.h"
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
|
||||
#include <asm/agp.h>
|
||||
|
||||
/**
|
||||
|
@ -502,5 +500,3 @@ drm_agp_bind_pages(struct drm_device *dev,
|
|||
return mem;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_agp_bind_pages);
|
||||
|
||||
#endif /* __OS_HAS_AGP */
|
||||
|
|
|
@ -438,7 +438,8 @@ EXPORT_SYMBOL(drm_atomic_crtc_set_property);
|
|||
* consistent behavior you must call this function rather than the
|
||||
* driver hook directly.
|
||||
*/
|
||||
int drm_atomic_crtc_get_property(struct drm_crtc *crtc,
|
||||
static int
|
||||
drm_atomic_crtc_get_property(struct drm_crtc *crtc,
|
||||
const struct drm_crtc_state *state,
|
||||
struct drm_property *property, uint64_t *val)
|
||||
{
|
||||
|
@ -663,6 +664,25 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
plane_switching_crtc(struct drm_atomic_state *state,
|
||||
struct drm_plane *plane,
|
||||
struct drm_plane_state *plane_state)
|
||||
{
|
||||
if (!plane->state->crtc || !plane_state->crtc)
|
||||
return false;
|
||||
|
||||
if (plane->state->crtc == plane_state->crtc)
|
||||
return false;
|
||||
|
||||
/* This could be refined, but currently there's no helper or driver code
|
||||
* to implement direct switching of active planes nor userspace to take
|
||||
* advantage of more direct plane switching without the intermediate
|
||||
* full OFF state.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_atomic_plane_check - check plane state
|
||||
* @plane: plane to check
|
||||
|
@ -734,6 +754,12 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
|
|||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (plane_switching_crtc(state->state, plane, state)) {
|
||||
DRM_DEBUG_ATOMIC("[PLANE:%d] switching CRTC directly\n",
|
||||
plane->base.id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -42,14 +42,14 @@
|
|||
* add their own additional internal state.
|
||||
*
|
||||
* This library also provides default implementations for the check callback in
|
||||
* drm_atomic_helper_check and for the commit callback with
|
||||
* drm_atomic_helper_commit. But the individual stages and callbacks are expose
|
||||
* to allow drivers to mix and match and e.g. use the plane helpers only
|
||||
* drm_atomic_helper_check() and for the commit callback with
|
||||
* drm_atomic_helper_commit(). But the individual stages and callbacks are
|
||||
* exposed to allow drivers to mix and match and e.g. use the plane helpers only
|
||||
* together with a driver private modeset implementation.
|
||||
*
|
||||
* This library also provides implementations for all the legacy driver
|
||||
* interfaces on top of the atomic interface. See drm_atomic_helper_set_config,
|
||||
* drm_atomic_helper_disable_plane, drm_atomic_helper_disable_plane and the
|
||||
* interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
|
||||
* drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
|
||||
* various functions to implement set_property callbacks. New drivers must not
|
||||
* implement these functions themselves but must use the provided helpers.
|
||||
*/
|
||||
|
@ -993,6 +993,22 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
|
|||
* object. This can still fail when e.g. the framebuffer reservation fails. For
|
||||
* now this doesn't implement asynchronous commits.
|
||||
*
|
||||
* Note that right now this function does not support async commits, and hence
|
||||
* driver writers must implement their own version for now. Also note that the
|
||||
* default ordering of how the various stages are called is to match the legacy
|
||||
* modeset helper library closest. One peculiarity of that is that it doesn't
|
||||
* mesh well with runtime PM at all.
|
||||
*
|
||||
* For drivers supporting runtime PM the recommended sequence is
|
||||
*
|
||||
* drm_atomic_helper_commit_modeset_disables(dev, state);
|
||||
*
|
||||
* drm_atomic_helper_commit_modeset_enables(dev, state);
|
||||
*
|
||||
* drm_atomic_helper_commit_planes(dev, state, true);
|
||||
*
|
||||
* See the kerneldoc entries for these three functions for more details.
|
||||
*
|
||||
* RETURNS
|
||||
* Zero for success or -errno.
|
||||
*/
|
||||
|
@ -1037,7 +1053,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
|
|||
|
||||
drm_atomic_helper_commit_modeset_disables(dev, state);
|
||||
|
||||
drm_atomic_helper_commit_planes(dev, state);
|
||||
drm_atomic_helper_commit_planes(dev, state, false);
|
||||
|
||||
drm_atomic_helper_commit_modeset_enables(dev, state);
|
||||
|
||||
|
@ -1077,7 +1093,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
|
|||
* work item, which allows nice concurrent updates on disjoint sets of crtcs.
|
||||
*
|
||||
* 3. The software state is updated synchronously with
|
||||
* drm_atomic_helper_swap_state. Doing this under the protection of all modeset
|
||||
* drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
|
||||
* locks means concurrent callers never see inconsistent state. And doing this
|
||||
* while it's guaranteed that no relevant async worker runs means that async
|
||||
* workers do not need grab any locks. Actually they must not grab locks, for
|
||||
|
@ -1111,17 +1127,14 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
|
|||
const struct drm_plane_helper_funcs *funcs;
|
||||
struct drm_plane *plane = state->planes[i];
|
||||
struct drm_plane_state *plane_state = state->plane_states[i];
|
||||
struct drm_framebuffer *fb;
|
||||
|
||||
if (!plane)
|
||||
continue;
|
||||
|
||||
funcs = plane->helper_private;
|
||||
|
||||
fb = plane_state->fb;
|
||||
|
||||
if (fb && funcs->prepare_fb) {
|
||||
ret = funcs->prepare_fb(plane, fb, plane_state);
|
||||
if (funcs->prepare_fb) {
|
||||
ret = funcs->prepare_fb(plane, plane_state);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
@ -1134,17 +1147,14 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
|
|||
const struct drm_plane_helper_funcs *funcs;
|
||||
struct drm_plane *plane = state->planes[i];
|
||||
struct drm_plane_state *plane_state = state->plane_states[i];
|
||||
struct drm_framebuffer *fb;
|
||||
|
||||
if (!plane)
|
||||
continue;
|
||||
|
||||
funcs = plane->helper_private;
|
||||
|
||||
fb = state->plane_states[i]->fb;
|
||||
|
||||
if (fb && funcs->cleanup_fb)
|
||||
funcs->cleanup_fb(plane, fb, plane_state);
|
||||
if (funcs->cleanup_fb)
|
||||
funcs->cleanup_fb(plane, plane_state);
|
||||
|
||||
}
|
||||
|
||||
|
@ -1152,10 +1162,16 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
|
||||
|
||||
bool plane_crtc_active(struct drm_plane_state *state)
|
||||
{
|
||||
return state->crtc && state->crtc->state->active;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_atomic_helper_commit_planes - commit plane state
|
||||
* @dev: DRM device
|
||||
* @old_state: atomic state object with old state structures
|
||||
* @active_only: Only commit on active CRTC if set
|
||||
*
|
||||
* This function commits the new plane state using the plane and atomic helper
|
||||
* functions for planes and crtcs. It assumes that the atomic state has already
|
||||
|
@ -1168,9 +1184,26 @@ EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
|
|||
* Note that this function does all plane updates across all CRTCs in one step.
|
||||
* If the hardware can't support this approach look at
|
||||
* drm_atomic_helper_commit_planes_on_crtc() instead.
|
||||
*
|
||||
* Plane parameters can be updated by applications while the associated CRTC is
|
||||
* disabled. The DRM/KMS core will store the parameters in the plane state,
|
||||
* which will be available to the driver when the CRTC is turned on. As a result
|
||||
* most drivers don't need to be immediately notified of plane updates for a
|
||||
* disabled CRTC.
|
||||
*
|
||||
* Unless otherwise needed, drivers are advised to set the @active_only
|
||||
* parameters to true in order not to receive plane update notifications related
|
||||
* to a disabled CRTC. This avoids the need to manually ignore plane updates in
|
||||
* driver code when the driver and/or hardware can't or just don't need to deal
|
||||
* with updates on disabled CRTCs, for example when supporting runtime PM.
|
||||
*
|
||||
* The drm_atomic_helper_commit() default implementation only sets @active_only
|
||||
* to false to most closely match the behaviour of the legacy helpers. This should
|
||||
* not be copied blindly by drivers.
|
||||
*/
|
||||
void drm_atomic_helper_commit_planes(struct drm_device *dev,
|
||||
struct drm_atomic_state *old_state)
|
||||
struct drm_atomic_state *old_state,
|
||||
bool active_only)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc_state *old_crtc_state;
|
||||
|
@ -1186,25 +1219,43 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
|
|||
if (!funcs || !funcs->atomic_begin)
|
||||
continue;
|
||||
|
||||
if (active_only && !crtc->state->active)
|
||||
continue;
|
||||
|
||||
funcs->atomic_begin(crtc, old_crtc_state);
|
||||
}
|
||||
|
||||
for_each_plane_in_state(old_state, plane, old_plane_state, i) {
|
||||
const struct drm_plane_helper_funcs *funcs;
|
||||
bool disabling;
|
||||
|
||||
funcs = plane->helper_private;
|
||||
|
||||
if (!funcs)
|
||||
continue;
|
||||
|
||||
disabling = drm_atomic_plane_disabling(plane, old_plane_state);
|
||||
|
||||
if (active_only) {
|
||||
/*
|
||||
* Skip planes related to inactive CRTCs. If the plane
|
||||
* is enabled use the state of the current CRTC. If the
|
||||
* plane is being disabled use the state of the old
|
||||
* CRTC to avoid skipping planes being disabled on an
|
||||
* active CRTC.
|
||||
*/
|
||||
if (!disabling && !plane_crtc_active(plane->state))
|
||||
continue;
|
||||
if (disabling && !plane_crtc_active(old_plane_state))
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Special-case disabling the plane if drivers support it.
|
||||
*/
|
||||
if (drm_atomic_plane_disabling(plane, old_plane_state) &&
|
||||
funcs->atomic_disable)
|
||||
if (disabling && funcs->atomic_disable)
|
||||
funcs->atomic_disable(plane, old_plane_state);
|
||||
else if (plane->state->crtc ||
|
||||
drm_atomic_plane_disabling(plane, old_plane_state))
|
||||
else if (plane->state->crtc || disabling)
|
||||
funcs->atomic_update(plane, old_plane_state);
|
||||
}
|
||||
|
||||
|
@ -1216,6 +1267,9 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
|
|||
if (!funcs || !funcs->atomic_flush)
|
||||
continue;
|
||||
|
||||
if (active_only && !crtc->state->active)
|
||||
continue;
|
||||
|
||||
funcs->atomic_flush(crtc, old_crtc_state);
|
||||
}
|
||||
}
|
||||
|
@ -1300,14 +1354,11 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
|
|||
|
||||
for_each_plane_in_state(old_state, plane, plane_state, i) {
|
||||
const struct drm_plane_helper_funcs *funcs;
|
||||
struct drm_framebuffer *old_fb;
|
||||
|
||||
funcs = plane->helper_private;
|
||||
|
||||
old_fb = plane_state->fb;
|
||||
|
||||
if (old_fb && funcs->cleanup_fb)
|
||||
funcs->cleanup_fb(plane, old_fb, plane_state);
|
||||
if (funcs->cleanup_fb)
|
||||
funcs->cleanup_fb(plane, plane_state);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
|
||||
|
@ -1334,7 +1385,7 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
|
|||
*
|
||||
* 4. Actually commit the hardware state.
|
||||
*
|
||||
* 5. Call drm_atomic_helper_cleanup_planes with @state, which since step 3
|
||||
* 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
|
||||
* contains the old state. Also do any other cleanup required with that state.
|
||||
*/
|
||||
void drm_atomic_helper_swap_state(struct drm_device *dev,
|
||||
|
@ -1502,21 +1553,9 @@ int drm_atomic_helper_disable_plane(struct drm_plane *plane)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
|
||||
ret = __drm_atomic_helper_disable_plane(plane, plane_state);
|
||||
if (ret != 0)
|
||||
goto fail;
|
||||
drm_atomic_set_fb_for_plane(plane_state, NULL);
|
||||
plane_state->crtc_x = 0;
|
||||
plane_state->crtc_y = 0;
|
||||
plane_state->crtc_h = 0;
|
||||
plane_state->crtc_w = 0;
|
||||
plane_state->src_x = 0;
|
||||
plane_state->src_y = 0;
|
||||
plane_state->src_h = 0;
|
||||
plane_state->src_w = 0;
|
||||
|
||||
if (plane == plane->crtc->cursor)
|
||||
state->legacy_cursor_update = true;
|
||||
|
||||
ret = drm_atomic_commit(state);
|
||||
if (ret != 0)
|
||||
|
@ -1546,6 +1585,32 @@ int drm_atomic_helper_disable_plane(struct drm_plane *plane)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
|
||||
|
||||
/* just used from fb-helper and atomic-helper: */
|
||||
int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
|
||||
struct drm_plane_state *plane_state)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
drm_atomic_set_fb_for_plane(plane_state, NULL);
|
||||
plane_state->crtc_x = 0;
|
||||
plane_state->crtc_y = 0;
|
||||
plane_state->crtc_h = 0;
|
||||
plane_state->crtc_w = 0;
|
||||
plane_state->src_x = 0;
|
||||
plane_state->src_y = 0;
|
||||
plane_state->src_h = 0;
|
||||
plane_state->src_w = 0;
|
||||
|
||||
if (plane->crtc && (plane == plane->crtc->cursor))
|
||||
plane_state->state->legacy_cursor_update = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int update_output_state(struct drm_atomic_state *state,
|
||||
struct drm_mode_set *set)
|
||||
{
|
||||
|
@ -1629,8 +1694,6 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set)
|
|||
{
|
||||
struct drm_atomic_state *state;
|
||||
struct drm_crtc *crtc = set->crtc;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct drm_plane_state *primary_state;
|
||||
int ret = 0;
|
||||
|
||||
state = drm_atomic_state_alloc(crtc->dev);
|
||||
|
@ -1639,64 +1702,10 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set)
|
|||
|
||||
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
|
||||
retry:
|
||||
crtc_state = drm_atomic_get_crtc_state(state, crtc);
|
||||
if (IS_ERR(crtc_state)) {
|
||||
ret = PTR_ERR(crtc_state);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
primary_state = drm_atomic_get_plane_state(state, crtc->primary);
|
||||
if (IS_ERR(primary_state)) {
|
||||
ret = PTR_ERR(primary_state);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (!set->mode) {
|
||||
WARN_ON(set->fb);
|
||||
WARN_ON(set->num_connectors);
|
||||
|
||||
ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
|
||||
if (ret != 0)
|
||||
goto fail;
|
||||
|
||||
crtc_state->active = false;
|
||||
|
||||
ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
|
||||
if (ret != 0)
|
||||
goto fail;
|
||||
|
||||
drm_atomic_set_fb_for_plane(primary_state, NULL);
|
||||
|
||||
goto commit;
|
||||
}
|
||||
|
||||
WARN_ON(!set->fb);
|
||||
WARN_ON(!set->num_connectors);
|
||||
|
||||
ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode);
|
||||
ret = __drm_atomic_helper_set_config(set, state);
|
||||
if (ret != 0)
|
||||
goto fail;
|
||||
|
||||
crtc_state->active = true;
|
||||
|
||||
ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
|
||||
if (ret != 0)
|
||||
goto fail;
|
||||
drm_atomic_set_fb_for_plane(primary_state, set->fb);
|
||||
primary_state->crtc_x = 0;
|
||||
primary_state->crtc_y = 0;
|
||||
primary_state->crtc_h = set->mode->vdisplay;
|
||||
primary_state->crtc_w = set->mode->hdisplay;
|
||||
primary_state->src_x = set->x << 16;
|
||||
primary_state->src_y = set->y << 16;
|
||||
primary_state->src_h = set->mode->vdisplay << 16;
|
||||
primary_state->src_w = set->mode->hdisplay << 16;
|
||||
|
||||
commit:
|
||||
ret = update_output_state(state, set);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = drm_atomic_commit(state);
|
||||
if (ret != 0)
|
||||
goto fail;
|
||||
|
@ -1725,6 +1734,78 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_atomic_helper_set_config);
|
||||
|
||||
/* just used from fb-helper and atomic-helper: */
|
||||
int __drm_atomic_helper_set_config(struct drm_mode_set *set,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct drm_plane_state *primary_state;
|
||||
struct drm_crtc *crtc = set->crtc;
|
||||
int ret;
|
||||
|
||||
crtc_state = drm_atomic_get_crtc_state(state, crtc);
|
||||
if (IS_ERR(crtc_state))
|
||||
return PTR_ERR(crtc_state);
|
||||
|
||||
primary_state = drm_atomic_get_plane_state(state, crtc->primary);
|
||||
if (IS_ERR(primary_state))
|
||||
return PTR_ERR(primary_state);
|
||||
|
||||
if (!set->mode) {
|
||||
WARN_ON(set->fb);
|
||||
WARN_ON(set->num_connectors);
|
||||
|
||||
ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
crtc_state->active = false;
|
||||
|
||||
ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
drm_atomic_set_fb_for_plane(primary_state, NULL);
|
||||
|
||||
goto commit;
|
||||
}
|
||||
|
||||
WARN_ON(!set->fb);
|
||||
WARN_ON(!set->num_connectors);
|
||||
|
||||
ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
crtc_state->active = true;
|
||||
|
||||
ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
drm_atomic_set_fb_for_plane(primary_state, set->fb);
|
||||
primary_state->crtc_x = 0;
|
||||
primary_state->crtc_y = 0;
|
||||
primary_state->crtc_h = set->mode->vdisplay;
|
||||
primary_state->crtc_w = set->mode->hdisplay;
|
||||
primary_state->src_x = set->x << 16;
|
||||
primary_state->src_y = set->y << 16;
|
||||
if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
|
||||
primary_state->src_h = set->mode->hdisplay << 16;
|
||||
primary_state->src_w = set->mode->vdisplay << 16;
|
||||
} else {
|
||||
primary_state->src_h = set->mode->vdisplay << 16;
|
||||
primary_state->src_w = set->mode->hdisplay << 16;
|
||||
}
|
||||
|
||||
commit:
|
||||
ret = update_output_state(state, set);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_atomic_helper_crtc_set_property - helper for crtc properties
|
||||
* @crtc: DRM crtc
|
||||
|
@ -2332,6 +2413,84 @@ drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
|
||||
|
||||
/**
|
||||
* drm_atomic_helper_duplicate_state - duplicate an atomic state object
|
||||
* @dev: DRM device
|
||||
* @ctx: lock acquisition context
|
||||
*
|
||||
* Makes a copy of the current atomic state by looping over all objects and
|
||||
* duplicating their respective states.
|
||||
*
|
||||
* Note that this treats atomic state as persistent between save and restore.
|
||||
* Drivers must make sure that this is possible and won't result in confusion
|
||||
* or erroneous behaviour.
|
||||
*
|
||||
* Note that if callers haven't already acquired all modeset locks this might
|
||||
* return -EDEADLK, which must be handled by calling drm_modeset_backoff().
|
||||
*
|
||||
* Returns:
|
||||
* A pointer to the copy of the atomic state object on success or an
|
||||
* ERR_PTR()-encoded error code on failure.
|
||||
*/
|
||||
struct drm_atomic_state *
|
||||
drm_atomic_helper_duplicate_state(struct drm_device *dev,
|
||||
struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
struct drm_atomic_state *state;
|
||||
struct drm_connector *conn;
|
||||
struct drm_plane *plane;
|
||||
struct drm_crtc *crtc;
|
||||
int err = 0;
|
||||
|
||||
state = drm_atomic_state_alloc(dev);
|
||||
if (!state)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
state->acquire_ctx = ctx;
|
||||
|
||||
drm_for_each_crtc(crtc, dev) {
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
crtc_state = drm_atomic_get_crtc_state(state, crtc);
|
||||
if (IS_ERR(crtc_state)) {
|
||||
err = PTR_ERR(crtc_state);
|
||||
goto free;
|
||||
}
|
||||
}
|
||||
|
||||
drm_for_each_plane(plane, dev) {
|
||||
struct drm_plane_state *plane_state;
|
||||
|
||||
plane_state = drm_atomic_get_plane_state(state, plane);
|
||||
if (IS_ERR(plane_state)) {
|
||||
err = PTR_ERR(plane_state);
|
||||
goto free;
|
||||
}
|
||||
}
|
||||
|
||||
drm_for_each_connector(conn, dev) {
|
||||
struct drm_connector_state *conn_state;
|
||||
|
||||
conn_state = drm_atomic_get_connector_state(state, conn);
|
||||
if (IS_ERR(conn_state)) {
|
||||
err = PTR_ERR(conn_state);
|
||||
goto free;
|
||||
}
|
||||
}
|
||||
|
||||
/* clear the acquire context so that it isn't accidentally reused */
|
||||
state->acquire_ctx = NULL;
|
||||
|
||||
free:
|
||||
if (err < 0) {
|
||||
drm_atomic_state_free(state);
|
||||
state = ERR_PTR(err);
|
||||
}
|
||||
|
||||
return state;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
|
||||
|
||||
/**
|
||||
* __drm_atomic_helper_connector_destroy_state - release connector state
|
||||
* @connector: connector object
|
||||
|
|
|
@ -582,7 +582,7 @@ static void drm_cleanup_buf_error(struct drm_device * dev,
|
|||
}
|
||||
}
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
#if IS_ENABLED(CONFIG_AGP)
|
||||
/**
|
||||
* Add AGP buffers for DMA transfers.
|
||||
*
|
||||
|
@ -756,7 +756,7 @@ int drm_legacy_addbufs_agp(struct drm_device *dev,
|
|||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_legacy_addbufs_agp);
|
||||
#endif /* __OS_HAS_AGP */
|
||||
#endif /* CONFIG_AGP */
|
||||
|
||||
int drm_legacy_addbufs_pci(struct drm_device *dev,
|
||||
struct drm_buf_desc *request)
|
||||
|
@ -1145,7 +1145,7 @@ int drm_legacy_addbufs(struct drm_device *dev, void *data,
|
|||
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
|
||||
return -EINVAL;
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
#if IS_ENABLED(CONFIG_AGP)
|
||||
if (request->flags & _DRM_AGP_BUFFER)
|
||||
ret = drm_legacy_addbufs_agp(dev, request);
|
||||
else
|
||||
|
|
|
@ -306,8 +306,7 @@ static int drm_mode_object_get_reg(struct drm_device *dev,
|
|||
* reference counted modeset objects like framebuffers.
|
||||
*
|
||||
* Returns:
|
||||
* New unique (relative to other objects in @dev) integer identifier for the
|
||||
* object.
|
||||
* Zero on success, error code on failure.
|
||||
*/
|
||||
int drm_mode_object_get(struct drm_device *dev,
|
||||
struct drm_mode_object *obj, uint32_t obj_type)
|
||||
|
@ -423,7 +422,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
|
|||
out:
|
||||
mutex_unlock(&dev->mode_config.fb_lock);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_framebuffer_init);
|
||||
|
||||
|
@ -538,7 +537,12 @@ EXPORT_SYMBOL(drm_framebuffer_reference);
|
|||
*/
|
||||
void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct drm_device *dev = fb->dev;
|
||||
struct drm_device *dev;
|
||||
|
||||
if (!fb)
|
||||
return;
|
||||
|
||||
dev = fb->dev;
|
||||
|
||||
mutex_lock(&dev->mode_config.fb_lock);
|
||||
/* Mark fb as reaped and drop idr ref. */
|
||||
|
@ -589,12 +593,17 @@ EXPORT_SYMBOL(drm_framebuffer_cleanup);
|
|||
*/
|
||||
void drm_framebuffer_remove(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct drm_device *dev = fb->dev;
|
||||
struct drm_device *dev;
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_plane *plane;
|
||||
struct drm_mode_set set;
|
||||
int ret;
|
||||
|
||||
if (!fb)
|
||||
return;
|
||||
|
||||
dev = fb->dev;
|
||||
|
||||
WARN_ON(!list_empty(&fb->filp_head));
|
||||
|
||||
/*
|
||||
|
@ -667,7 +676,6 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
|
|||
|
||||
crtc->dev = dev;
|
||||
crtc->funcs = funcs;
|
||||
crtc->invert_dimensions = false;
|
||||
|
||||
drm_modeset_lock_init(&crtc->mutex);
|
||||
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
|
||||
|
@ -1509,7 +1517,7 @@ EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
|
|||
*/
|
||||
int drm_mode_create_tv_properties(struct drm_device *dev,
|
||||
unsigned int num_modes,
|
||||
char *modes[])
|
||||
const char * const modes[])
|
||||
{
|
||||
struct drm_property *tv_selector;
|
||||
struct drm_property *tv_subconnector;
|
||||
|
@ -1525,6 +1533,9 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
|
|||
"select subconnector",
|
||||
drm_tv_select_enum_list,
|
||||
ARRAY_SIZE(drm_tv_select_enum_list));
|
||||
if (!tv_selector)
|
||||
goto nomem;
|
||||
|
||||
dev->mode_config.tv_select_subconnector_property = tv_selector;
|
||||
|
||||
tv_subconnector =
|
||||
|
@ -1532,6 +1543,8 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
|
|||
"subconnector",
|
||||
drm_tv_subconnector_enum_list,
|
||||
ARRAY_SIZE(drm_tv_subconnector_enum_list));
|
||||
if (!tv_subconnector)
|
||||
goto nomem;
|
||||
dev->mode_config.tv_subconnector_property = tv_subconnector;
|
||||
|
||||
/*
|
||||
|
@ -1539,42 +1552,67 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
|
|||
*/
|
||||
dev->mode_config.tv_left_margin_property =
|
||||
drm_property_create_range(dev, 0, "left margin", 0, 100);
|
||||
if (!dev->mode_config.tv_left_margin_property)
|
||||
goto nomem;
|
||||
|
||||
dev->mode_config.tv_right_margin_property =
|
||||
drm_property_create_range(dev, 0, "right margin", 0, 100);
|
||||
if (!dev->mode_config.tv_right_margin_property)
|
||||
goto nomem;
|
||||
|
||||
dev->mode_config.tv_top_margin_property =
|
||||
drm_property_create_range(dev, 0, "top margin", 0, 100);
|
||||
if (!dev->mode_config.tv_top_margin_property)
|
||||
goto nomem;
|
||||
|
||||
dev->mode_config.tv_bottom_margin_property =
|
||||
drm_property_create_range(dev, 0, "bottom margin", 0, 100);
|
||||
if (!dev->mode_config.tv_bottom_margin_property)
|
||||
goto nomem;
|
||||
|
||||
dev->mode_config.tv_mode_property =
|
||||
drm_property_create(dev, DRM_MODE_PROP_ENUM,
|
||||
"mode", num_modes);
|
||||
if (!dev->mode_config.tv_mode_property)
|
||||
goto nomem;
|
||||
|
||||
for (i = 0; i < num_modes; i++)
|
||||
drm_property_add_enum(dev->mode_config.tv_mode_property, i,
|
||||
i, modes[i]);
|
||||
|
||||
dev->mode_config.tv_brightness_property =
|
||||
drm_property_create_range(dev, 0, "brightness", 0, 100);
|
||||
if (!dev->mode_config.tv_brightness_property)
|
||||
goto nomem;
|
||||
|
||||
dev->mode_config.tv_contrast_property =
|
||||
drm_property_create_range(dev, 0, "contrast", 0, 100);
|
||||
if (!dev->mode_config.tv_contrast_property)
|
||||
goto nomem;
|
||||
|
||||
dev->mode_config.tv_flicker_reduction_property =
|
||||
drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
|
||||
if (!dev->mode_config.tv_flicker_reduction_property)
|
||||
goto nomem;
|
||||
|
||||
dev->mode_config.tv_overscan_property =
|
||||
drm_property_create_range(dev, 0, "overscan", 0, 100);
|
||||
if (!dev->mode_config.tv_overscan_property)
|
||||
goto nomem;
|
||||
|
||||
dev->mode_config.tv_saturation_property =
|
||||
drm_property_create_range(dev, 0, "saturation", 0, 100);
|
||||
if (!dev->mode_config.tv_saturation_property)
|
||||
goto nomem;
|
||||
|
||||
dev->mode_config.tv_hue_property =
|
||||
drm_property_create_range(dev, 0, "hue", 0, 100);
|
||||
if (!dev->mode_config.tv_hue_property)
|
||||
goto nomem;
|
||||
|
||||
return 0;
|
||||
nomem:
|
||||
return -ENOMEM;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mode_create_tv_properties);
|
||||
|
||||
|
@ -2276,6 +2314,32 @@ int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int check_src_coords(uint32_t src_x, uint32_t src_y,
|
||||
uint32_t src_w, uint32_t src_h,
|
||||
const struct drm_framebuffer *fb)
|
||||
{
|
||||
unsigned int fb_width, fb_height;
|
||||
|
||||
fb_width = fb->width << 16;
|
||||
fb_height = fb->height << 16;
|
||||
|
||||
/* Make sure source coordinates are inside the fb. */
|
||||
if (src_w > fb_width ||
|
||||
src_x > fb_width - src_w ||
|
||||
src_h > fb_height ||
|
||||
src_y > fb_height - src_h) {
|
||||
DRM_DEBUG_KMS("Invalid source coordinates "
|
||||
"%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
|
||||
src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
|
||||
src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
|
||||
src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
|
||||
src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* setplane_internal - setplane handler for internal callers
|
||||
*
|
||||
|
@ -2295,7 +2359,6 @@ static int __setplane_internal(struct drm_plane *plane,
|
|||
uint32_t src_w, uint32_t src_h)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int fb_width, fb_height;
|
||||
|
||||
/* No fb means shut it down */
|
||||
if (!fb) {
|
||||
|
@ -2332,28 +2395,14 @@ static int __setplane_internal(struct drm_plane *plane,
|
|||
crtc_y > INT_MAX - (int32_t) crtc_h) {
|
||||
DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
|
||||
crtc_w, crtc_h, crtc_x, crtc_y);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
|
||||
fb_width = fb->width << 16;
|
||||
fb_height = fb->height << 16;
|
||||
|
||||
/* Make sure source coordinates are inside the fb. */
|
||||
if (src_w > fb_width ||
|
||||
src_x > fb_width - src_w ||
|
||||
src_h > fb_height ||
|
||||
src_y > fb_height - src_h) {
|
||||
DRM_DEBUG_KMS("Invalid source coordinates "
|
||||
"%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
|
||||
src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
|
||||
src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
|
||||
src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
|
||||
src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
|
||||
ret = -ENOSPC;
|
||||
ret = -ERANGE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = check_src_coords(src_x, src_y, src_w, src_h, fb);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
plane->old_fb = plane->fb;
|
||||
ret = plane->funcs->update_plane(plane, crtc, fb,
|
||||
crtc_x, crtc_y, crtc_w, crtc_h,
|
||||
|
@ -2543,20 +2592,13 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
|
|||
|
||||
drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
|
||||
|
||||
if (crtc->invert_dimensions)
|
||||
if (crtc->state &&
|
||||
crtc->primary->state->rotation & (BIT(DRM_ROTATE_90) |
|
||||
BIT(DRM_ROTATE_270)))
|
||||
swap(hdisplay, vdisplay);
|
||||
|
||||
if (hdisplay > fb->width ||
|
||||
vdisplay > fb->height ||
|
||||
x > fb->width - hdisplay ||
|
||||
y > fb->height - vdisplay) {
|
||||
DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
|
||||
fb->width, fb->height, hdisplay, vdisplay, x, y,
|
||||
crtc->invert_dimensions ? " (inverted)" : "");
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return check_src_coords(x << 16, y << 16,
|
||||
hdisplay << 16, vdisplay << 16, fb);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_crtc_check_viewport);
|
||||
|
||||
|
@ -3310,14 +3352,11 @@ int drm_mode_rmfb(struct drm_device *dev,
|
|||
if (!found)
|
||||
goto fail_lookup;
|
||||
|
||||
/* Mark fb as reaped, we still have a ref from fpriv->fbs. */
|
||||
__drm_framebuffer_unregister(dev, fb);
|
||||
|
||||
list_del_init(&fb->filp_head);
|
||||
mutex_unlock(&dev->mode_config.fb_lock);
|
||||
mutex_unlock(&file_priv->fbs_lock);
|
||||
|
||||
drm_framebuffer_remove(fb);
|
||||
drm_framebuffer_unreference(fb);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -3484,7 +3523,6 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
|
|||
*/
|
||||
void drm_fb_release(struct drm_file *priv)
|
||||
{
|
||||
struct drm_device *dev = priv->minor->dev;
|
||||
struct drm_framebuffer *fb, *tfb;
|
||||
|
||||
/*
|
||||
|
@ -3498,16 +3536,10 @@ void drm_fb_release(struct drm_file *priv)
|
|||
* at it any more.
|
||||
*/
|
||||
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
|
||||
|
||||
mutex_lock(&dev->mode_config.fb_lock);
|
||||
/* Mark fb as reaped, we still have a ref from fpriv->fbs. */
|
||||
__drm_framebuffer_unregister(dev, fb);
|
||||
mutex_unlock(&dev->mode_config.fb_lock);
|
||||
|
||||
list_del_init(&fb->filp_head);
|
||||
|
||||
/* This will also drop the fpriv->fbs reference. */
|
||||
drm_framebuffer_remove(fb);
|
||||
/* This drops the fpriv->fbs reference. */
|
||||
drm_framebuffer_unreference(fb);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5181,7 +5213,14 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
|
||||
if (crtc->state) {
|
||||
const struct drm_plane_state *state = crtc->primary->state;
|
||||
|
||||
ret = check_src_coords(state->src_x, state->src_y,
|
||||
state->src_w, state->src_h, fb);
|
||||
} else {
|
||||
ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
|
||||
}
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -5629,7 +5668,8 @@ unsigned int drm_rotation_simplify(unsigned int rotation,
|
|||
{
|
||||
if (rotation & ~supported_rotations) {
|
||||
rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y);
|
||||
rotation = (rotation & ~0xf) | BIT((ffs(rotation & 0xf) + 1) % 4);
|
||||
rotation = (rotation & DRM_REFLECT_MASK) |
|
||||
BIT((ffs(rotation & DRM_ROTATE_MASK) + 1) % 4);
|
||||
}
|
||||
|
||||
return rotation;
|
||||
|
@ -5732,7 +5772,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
|
|||
*/
|
||||
WARN_ON(!list_empty(&dev->mode_config.fb_list));
|
||||
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
|
||||
drm_framebuffer_remove(fb);
|
||||
drm_framebuffer_free(&fb->refcount);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
|
||||
|
|
|
@ -424,6 +424,19 @@ static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
|
|||
I2C_FUNC_10BIT_ADDR;
|
||||
}
|
||||
|
||||
static void drm_dp_i2c_msg_write_status_update(struct drm_dp_aux_msg *msg)
|
||||
{
|
||||
/*
|
||||
* In case of i2c defer or short i2c ack reply to a write,
|
||||
* we need to switch to WRITE_STATUS_UPDATE to drain the
|
||||
* rest of the message
|
||||
*/
|
||||
if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_I2C_WRITE) {
|
||||
msg->request &= DP_AUX_I2C_MOT;
|
||||
msg->request |= DP_AUX_I2C_WRITE_STATUS_UPDATE;
|
||||
}
|
||||
}
|
||||
|
||||
#define AUX_PRECHARGE_LEN 10 /* 10 to 16 */
|
||||
#define AUX_SYNC_LEN (16 + 4) /* preamble + AUX_SYNC_END */
|
||||
#define AUX_STOP_LEN 4
|
||||
|
@ -579,6 +592,8 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
|
|||
* Both native ACK and I2C ACK replies received. We
|
||||
* can assume the transfer was successful.
|
||||
*/
|
||||
if (ret != msg->size)
|
||||
drm_dp_i2c_msg_write_status_update(msg);
|
||||
return ret;
|
||||
|
||||
case DP_AUX_I2C_REPLY_NACK:
|
||||
|
@ -596,6 +611,8 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
|
|||
if (defer_i2c < 7)
|
||||
defer_i2c++;
|
||||
usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
|
||||
drm_dp_i2c_msg_write_status_update(msg);
|
||||
|
||||
continue;
|
||||
|
||||
default:
|
||||
|
@ -608,6 +625,14 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
|
|||
return -EREMOTEIO;
|
||||
}
|
||||
|
||||
static void drm_dp_i2c_msg_set_request(struct drm_dp_aux_msg *msg,
|
||||
const struct i2c_msg *i2c_msg)
|
||||
{
|
||||
msg->request = (i2c_msg->flags & I2C_M_RD) ?
|
||||
DP_AUX_I2C_READ : DP_AUX_I2C_WRITE;
|
||||
msg->request |= DP_AUX_I2C_MOT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Keep retrying drm_dp_i2c_do_msg until all data has been transferred.
|
||||
*
|
||||
|
@ -661,10 +686,7 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
|
|||
|
||||
for (i = 0; i < num; i++) {
|
||||
msg.address = msgs[i].addr;
|
||||
msg.request = (msgs[i].flags & I2C_M_RD) ?
|
||||
DP_AUX_I2C_READ :
|
||||
DP_AUX_I2C_WRITE;
|
||||
msg.request |= DP_AUX_I2C_MOT;
|
||||
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
|
||||
/* Send a bare address packet to start the transaction.
|
||||
* Zero sized messages specify an address only (bare
|
||||
* address) transaction.
|
||||
|
@ -672,6 +694,13 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
|
|||
msg.buffer = NULL;
|
||||
msg.size = 0;
|
||||
err = drm_dp_i2c_do_msg(aux, &msg);
|
||||
|
||||
/*
|
||||
* Reset msg.request in case in case it got
|
||||
* changed into a WRITE_STATUS_UPDATE.
|
||||
*/
|
||||
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
|
||||
|
||||
if (err < 0)
|
||||
break;
|
||||
/* We want each transaction to be as large as possible, but
|
||||
|
@ -684,6 +713,13 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
|
|||
msg.size = min(transfer_size, msgs[i].len - j);
|
||||
|
||||
err = drm_dp_i2c_drain_msg(aux, &msg);
|
||||
|
||||
/*
|
||||
* Reset msg.request in case in case it got
|
||||
* changed into a WRITE_STATUS_UPDATE.
|
||||
*/
|
||||
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
|
||||
|
||||
if (err < 0)
|
||||
break;
|
||||
transfer_size = err;
|
||||
|
|
|
@ -37,11 +37,9 @@
|
|||
#include "drm_legacy.h"
|
||||
#include "drm_internal.h"
|
||||
|
||||
unsigned int drm_debug = 0; /* 1 to enable debug output */
|
||||
unsigned int drm_debug = 0; /* bitmask of DRM_UT_x */
|
||||
EXPORT_SYMBOL(drm_debug);
|
||||
|
||||
bool drm_atomic = 0;
|
||||
|
||||
MODULE_AUTHOR(CORE_AUTHOR);
|
||||
MODULE_DESCRIPTION(CORE_DESC);
|
||||
MODULE_LICENSE("GPL and additional rights");
|
||||
|
@ -55,7 +53,6 @@ module_param_named(debug, drm_debug, int, 0600);
|
|||
static DEFINE_SPINLOCK(drm_minor_lock);
|
||||
static struct idr drm_minors_idr;
|
||||
|
||||
struct class *drm_class;
|
||||
static struct dentry *drm_debugfs_root;
|
||||
|
||||
void drm_err(const char *format, ...)
|
||||
|
@ -397,16 +394,52 @@ void drm_minor_release(struct drm_minor *minor)
|
|||
drm_dev_unref(minor->dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: driver instance overview
|
||||
*
|
||||
* A device instance for a drm driver is represented by struct &drm_device. This
|
||||
* is allocated with drm_dev_alloc(), usually from bus-specific ->probe()
|
||||
* callbacks implemented by the driver. The driver then needs to initialize all
|
||||
* the various subsystems for the drm device like memory management, vblank
|
||||
* handling, modesetting support and intial output configuration plus obviously
|
||||
* initialize all the corresponding hardware bits. An important part of this is
|
||||
* also calling drm_dev_set_unique() to set the userspace-visible unique name of
|
||||
* this device instance. Finally when everything is up and running and ready for
|
||||
* userspace the device instance can be published using drm_dev_register().
|
||||
*
|
||||
* There is also deprecated support for initalizing device instances using
|
||||
* bus-specific helpers and the ->load() callback. But due to
|
||||
* backwards-compatibility needs the device instance have to be published too
|
||||
* early, which requires unpretty global locking to make safe and is therefore
|
||||
* only support for existing drivers not yet converted to the new scheme.
|
||||
*
|
||||
* When cleaning up a device instance everything needs to be done in reverse:
|
||||
* First unpublish the device instance with drm_dev_unregister(). Then clean up
|
||||
* any other resources allocated at device initialization and drop the driver's
|
||||
* reference to &drm_device using drm_dev_unref().
|
||||
*
|
||||
* Note that the lifetime rules for &drm_device instance has still a lot of
|
||||
* historical baggage. Hence use the reference counting provided by
|
||||
* drm_dev_ref() and drm_dev_unref() only carefully.
|
||||
*
|
||||
* Also note that embedding of &drm_device is currently not (yet) supported (but
|
||||
* it would be easy to add). Drivers can store driver-private data in the
|
||||
* dev_priv field of &drm_device.
|
||||
*/
|
||||
|
||||
/**
|
||||
* drm_put_dev - Unregister and release a DRM device
|
||||
* @dev: DRM device
|
||||
*
|
||||
* Called at module unload time or when a PCI device is unplugged.
|
||||
*
|
||||
* Use of this function is discouraged. It will eventually go away completely.
|
||||
* Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
|
||||
*
|
||||
* Cleans up all DRM device, calling drm_lastclose().
|
||||
*
|
||||
* Note: Use of this function is deprecated. It will eventually go away
|
||||
* completely. Please use drm_dev_unregister() and drm_dev_unref() explicitly
|
||||
* instead to make sure that the device isn't userspace accessible any more
|
||||
* while teardown is in progress, ensuring that userspace can't access an
|
||||
* inconsistent state.
|
||||
*/
|
||||
void drm_put_dev(struct drm_device *dev)
|
||||
{
|
||||
|
@ -519,7 +552,9 @@ static void drm_fs_inode_free(struct inode *inode)
|
|||
*
|
||||
* Allocate and initialize a new DRM device. No device registration is done.
|
||||
* Call drm_dev_register() to advertice the device to user space and register it
|
||||
* with other core subsystems.
|
||||
* with other core subsystems. This should be done last in the device
|
||||
* initialization sequence to make sure userspace can't access an inconsistent
|
||||
* state.
|
||||
*
|
||||
* The initial ref-count of the object is 1. Use drm_dev_ref() and
|
||||
* drm_dev_unref() to take and drop further ref-counts.
|
||||
|
@ -566,6 +601,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
|
|||
ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
|
||||
if (ret)
|
||||
goto err_minors;
|
||||
|
||||
WARN_ON(driver->suspend || driver->resume);
|
||||
}
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_RENDER)) {
|
||||
|
@ -672,6 +709,12 @@ EXPORT_SYMBOL(drm_dev_unref);
|
|||
*
|
||||
* Never call this twice on any device!
|
||||
*
|
||||
* NOTE: To ensure backward compatibility with existing drivers method this
|
||||
* function calls the ->load() method after registering the device nodes,
|
||||
* creating race conditions. Usage of the ->load() methods is therefore
|
||||
* deprecated, drivers must perform all initialization before calling
|
||||
* drm_dev_register().
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 on success, negative error code on failure.
|
||||
*/
|
||||
|
@ -719,6 +762,9 @@ EXPORT_SYMBOL(drm_dev_register);
|
|||
* Unregister the DRM device from the system. This does the reverse of
|
||||
* drm_dev_register() but does not deallocate the device. The caller must call
|
||||
* drm_dev_unref() to drop their final reference.
|
||||
*
|
||||
* This should be called first in the device teardown code to make sure
|
||||
* userspace can't access the device instance any more.
|
||||
*/
|
||||
void drm_dev_unregister(struct drm_device *dev)
|
||||
{
|
||||
|
@ -839,10 +885,9 @@ static int __init drm_core_init(void)
|
|||
if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
|
||||
goto err_p1;
|
||||
|
||||
drm_class = drm_sysfs_create(THIS_MODULE, "drm");
|
||||
if (IS_ERR(drm_class)) {
|
||||
ret = drm_sysfs_init();
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "DRM: Error creating drm class.\n");
|
||||
ret = PTR_ERR(drm_class);
|
||||
goto err_p2;
|
||||
}
|
||||
|
||||
|
|
|
@ -2044,7 +2044,7 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
|
|||
static bool valid_inferred_mode(const struct drm_connector *connector,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_display_mode *m;
|
||||
const struct drm_display_mode *m;
|
||||
bool ok = false;
|
||||
|
||||
list_for_each_entry(m, &connector->probed_modes, head) {
|
||||
|
@ -2418,6 +2418,8 @@ add_cvt_modes(struct drm_connector *connector, struct edid *edid)
|
|||
return closure.modes;
|
||||
}
|
||||
|
||||
static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode);
|
||||
|
||||
static void
|
||||
do_detailed_mode(struct detailed_timing *timing, void *c)
|
||||
{
|
||||
|
@ -2434,6 +2436,13 @@ do_detailed_mode(struct detailed_timing *timing, void *c)
|
|||
if (closure->preferred)
|
||||
newmode->type |= DRM_MODE_TYPE_PREFERRED;
|
||||
|
||||
/*
|
||||
* Detailed modes are limited to 10kHz pixel clock resolution,
|
||||
* so fix up anything that looks like CEA/HDMI mode, but the clock
|
||||
* is just slightly off.
|
||||
*/
|
||||
fixup_detailed_cea_mode_clock(newmode);
|
||||
|
||||
drm_mode_probed_add(closure->connector, newmode);
|
||||
closure->modes++;
|
||||
closure->preferred = 0;
|
||||
|
@ -2529,9 +2538,9 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
|
|||
* and the 60Hz variant otherwise.
|
||||
*/
|
||||
if (cea_mode->vdisplay == 240 || cea_mode->vdisplay == 480)
|
||||
clock = clock * 1001 / 1000;
|
||||
clock = DIV_ROUND_CLOSEST(clock * 1001, 1000);
|
||||
else
|
||||
clock = DIV_ROUND_UP(clock * 1000, 1001);
|
||||
clock = DIV_ROUND_CLOSEST(clock * 1000, 1001);
|
||||
|
||||
return clock;
|
||||
}
|
||||
|
@ -3103,6 +3112,45 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
|
|||
return modes;
|
||||
}
|
||||
|
||||
static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
|
||||
{
|
||||
const struct drm_display_mode *cea_mode;
|
||||
int clock1, clock2, clock;
|
||||
u8 mode_idx;
|
||||
const char *type;
|
||||
|
||||
mode_idx = drm_match_cea_mode(mode) - 1;
|
||||
if (mode_idx < ARRAY_SIZE(edid_cea_modes)) {
|
||||
type = "CEA";
|
||||
cea_mode = &edid_cea_modes[mode_idx];
|
||||
clock1 = cea_mode->clock;
|
||||
clock2 = cea_mode_alternate_clock(cea_mode);
|
||||
} else {
|
||||
mode_idx = drm_match_hdmi_mode(mode) - 1;
|
||||
if (mode_idx < ARRAY_SIZE(edid_4k_modes)) {
|
||||
type = "HDMI";
|
||||
cea_mode = &edid_4k_modes[mode_idx];
|
||||
clock1 = cea_mode->clock;
|
||||
clock2 = hdmi_mode_alternate_clock(cea_mode);
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* pick whichever is closest */
|
||||
if (abs(mode->clock - clock1) < abs(mode->clock - clock2))
|
||||
clock = clock1;
|
||||
else
|
||||
clock = clock2;
|
||||
|
||||
if (mode->clock == clock)
|
||||
return;
|
||||
|
||||
DRM_DEBUG("detailed mode matches %s VIC %d, adjusting clock %d -> %d\n",
|
||||
type, mode_idx + 1, mode->clock, clock);
|
||||
mode->clock = clock;
|
||||
}
|
||||
|
||||
static void
|
||||
parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
|
||||
{
|
||||
|
@ -3361,7 +3409,7 @@ EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
|
|||
* the sink doesn't support audio or video.
|
||||
*/
|
||||
int drm_av_sync_delay(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
|
||||
int a, v;
|
||||
|
@ -3396,7 +3444,6 @@ EXPORT_SYMBOL(drm_av_sync_delay);
|
|||
/**
|
||||
* drm_select_eld - select one ELD from multiple HDMI/DP sinks
|
||||
* @encoder: the encoder just changed display mode
|
||||
* @mode: the adjusted display mode
|
||||
*
|
||||
* It's possible for one encoder to be associated with multiple HDMI/DP sinks.
|
||||
* The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
|
||||
|
@ -3404,8 +3451,7 @@ EXPORT_SYMBOL(drm_av_sync_delay);
|
|||
* Return: The connector associated with the first HDMI/DP sink that has ELD
|
||||
* attached to it.
|
||||
*/
|
||||
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode)
|
||||
struct drm_connector *drm_select_eld(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
|
|
|
@ -32,7 +32,7 @@ MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
|
|||
"from built-in data or /lib/firmware instead. ");
|
||||
|
||||
#define GENERIC_EDIDS 6
|
||||
static const char *generic_edid_name[GENERIC_EDIDS] = {
|
||||
static const char * const generic_edid_name[GENERIC_EDIDS] = {
|
||||
"edid/800x600.bin",
|
||||
"edid/1024x768.bin",
|
||||
"edid/1280x1024.bin",
|
||||
|
@ -264,20 +264,43 @@ static void *edid_load(struct drm_connector *connector, const char *name,
|
|||
int drm_load_edid_firmware(struct drm_connector *connector)
|
||||
{
|
||||
const char *connector_name = connector->name;
|
||||
char *edidname = edid_firmware, *last, *colon;
|
||||
char *edidname, *last, *colon, *fwstr, *edidstr, *fallback = NULL;
|
||||
int ret;
|
||||
struct edid *edid;
|
||||
|
||||
if (*edidname == '\0')
|
||||
if (edid_firmware[0] == '\0')
|
||||
return 0;
|
||||
|
||||
colon = strchr(edidname, ':');
|
||||
if (colon != NULL) {
|
||||
if (strncmp(connector_name, edidname, colon - edidname))
|
||||
return 0;
|
||||
edidname = colon + 1;
|
||||
if (*edidname == '\0')
|
||||
/*
|
||||
* If there are multiple edid files specified and separated
|
||||
* by commas, search through the list looking for one that
|
||||
* matches the connector.
|
||||
*
|
||||
* If there's one or more that don't't specify a connector, keep
|
||||
* the last one found one as a fallback.
|
||||
*/
|
||||
fwstr = kstrdup(edid_firmware, GFP_KERNEL);
|
||||
edidstr = fwstr;
|
||||
|
||||
while ((edidname = strsep(&edidstr, ","))) {
|
||||
colon = strchr(edidname, ':');
|
||||
if (colon != NULL) {
|
||||
if (strncmp(connector_name, edidname, colon - edidname))
|
||||
continue;
|
||||
edidname = colon + 1;
|
||||
break;
|
||||
}
|
||||
|
||||
if (*edidname != '\0') /* corner case: multiple ',' */
|
||||
fallback = edidname;
|
||||
}
|
||||
|
||||
if (!edidname) {
|
||||
if (!fallback) {
|
||||
kfree(fwstr);
|
||||
return 0;
|
||||
}
|
||||
edidname = fallback;
|
||||
}
|
||||
|
||||
last = edidname + strlen(edidname) - 1;
|
||||
|
@ -285,6 +308,8 @@ int drm_load_edid_firmware(struct drm_connector *connector)
|
|||
*last = '\0';
|
||||
|
||||
edid = edid_load(connector, edidname, connector_name);
|
||||
kfree(fwstr);
|
||||
|
||||
if (IS_ERR_OR_NULL(edid))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -38,6 +38,13 @@
|
|||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
|
||||
static bool drm_fbdev_emulation = true;
|
||||
module_param_named(fbdev_emulation, drm_fbdev_emulation, bool, 0600);
|
||||
MODULE_PARM_DESC(fbdev_emulation,
|
||||
"Enable legacy fbdev emulation [default=true]");
|
||||
|
||||
static LIST_HEAD(kernel_fb_helper_list);
|
||||
|
||||
|
@ -99,6 +106,9 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
|
|||
struct drm_connector *connector;
|
||||
int i;
|
||||
|
||||
if (!drm_fbdev_emulation)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
drm_for_each_connector(connector, dev) {
|
||||
struct drm_fb_helper_connector *fb_helper_connector;
|
||||
|
@ -129,6 +139,9 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
|
|||
struct drm_fb_helper_connector **temp;
|
||||
struct drm_fb_helper_connector *fb_helper_connector;
|
||||
|
||||
if (!drm_fbdev_emulation)
|
||||
return 0;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
|
||||
if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
|
||||
temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector *) * (fb_helper->connector_count + 1), GFP_KERNEL);
|
||||
|
@ -184,6 +197,9 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
|
|||
struct drm_fb_helper_connector *fb_helper_connector;
|
||||
int i, j;
|
||||
|
||||
if (!drm_fbdev_emulation)
|
||||
return 0;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
|
||||
|
||||
for (i = 0; i < fb_helper->connector_count; i++) {
|
||||
|
@ -320,15 +336,92 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_debug_leave);
|
||||
|
||||
static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
|
||||
static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
|
||||
{
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
struct drm_plane *plane;
|
||||
struct drm_atomic_state *state;
|
||||
int i, ret;
|
||||
|
||||
state = drm_atomic_state_alloc(dev);
|
||||
if (!state)
|
||||
return -ENOMEM;
|
||||
|
||||
state->acquire_ctx = dev->mode_config.acquire_ctx;
|
||||
retry:
|
||||
drm_for_each_plane(plane, dev) {
|
||||
struct drm_plane_state *plane_state;
|
||||
|
||||
plane->old_fb = plane->fb;
|
||||
|
||||
plane_state = drm_atomic_get_plane_state(state, plane);
|
||||
if (IS_ERR(plane_state)) {
|
||||
ret = PTR_ERR(plane_state);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
plane_state->rotation = BIT(DRM_ROTATE_0);
|
||||
|
||||
/* disable non-primary: */
|
||||
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
|
||||
continue;
|
||||
|
||||
ret = __drm_atomic_helper_disable_plane(plane, plane_state);
|
||||
if (ret != 0)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for(i = 0; i < fb_helper->crtc_count; i++) {
|
||||
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
|
||||
|
||||
ret = __drm_atomic_helper_set_config(mode_set, state);
|
||||
if (ret != 0)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = drm_atomic_commit(state);
|
||||
|
||||
fail:
|
||||
drm_for_each_plane(plane, dev) {
|
||||
if (ret == 0) {
|
||||
struct drm_framebuffer *new_fb = plane->state->fb;
|
||||
if (new_fb)
|
||||
drm_framebuffer_reference(new_fb);
|
||||
plane->fb = new_fb;
|
||||
plane->crtc = plane->state->crtc;
|
||||
|
||||
if (plane->old_fb)
|
||||
drm_framebuffer_unreference(plane->old_fb);
|
||||
}
|
||||
plane->old_fb = NULL;
|
||||
}
|
||||
|
||||
if (ret == -EDEADLK)
|
||||
goto backoff;
|
||||
|
||||
if (ret != 0)
|
||||
drm_atomic_state_free(state);
|
||||
|
||||
return ret;
|
||||
|
||||
backoff:
|
||||
drm_atomic_state_clear(state);
|
||||
drm_atomic_legacy_backoff(state);
|
||||
|
||||
goto retry;
|
||||
}
|
||||
|
||||
static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
|
||||
{
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
struct drm_plane *plane;
|
||||
bool error = false;
|
||||
int i;
|
||||
|
||||
drm_warn_on_modeset_not_all_locked(dev);
|
||||
|
||||
if (fb_helper->atomic)
|
||||
return restore_fbdev_mode_atomic(fb_helper);
|
||||
|
||||
drm_for_each_plane(plane, dev) {
|
||||
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
|
||||
drm_plane_force_disable(plane);
|
||||
|
@ -348,18 +441,19 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
|
|||
if (crtc->funcs->cursor_set2) {
|
||||
ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
|
||||
if (ret)
|
||||
error = true;
|
||||
return ret;
|
||||
} else if (crtc->funcs->cursor_set) {
|
||||
ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
|
||||
if (ret)
|
||||
error = true;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_mode_set_config_internal(mode_set);
|
||||
if (ret)
|
||||
error = true;
|
||||
return ret;
|
||||
}
|
||||
return error;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -369,12 +463,18 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
|
|||
* This should be called from driver's drm ->lastclose callback
|
||||
* when implementing an fbcon on top of kms using this helper. This ensures that
|
||||
* the user isn't greeted with a black screen when e.g. X dies.
|
||||
*
|
||||
* RETURNS:
|
||||
* Zero if everything went ok, negative error code otherwise.
|
||||
*/
|
||||
bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
|
||||
int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
|
||||
{
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
bool ret;
|
||||
bool do_delayed = false;
|
||||
bool do_delayed;
|
||||
int ret;
|
||||
|
||||
if (!drm_fbdev_emulation)
|
||||
return -ENODEV;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
ret = restore_fbdev_mode(fb_helper);
|
||||
|
@ -592,6 +692,9 @@ int drm_fb_helper_init(struct drm_device *dev,
|
|||
struct drm_crtc *crtc;
|
||||
int i;
|
||||
|
||||
if (!drm_fbdev_emulation)
|
||||
return 0;
|
||||
|
||||
if (!max_conn_count)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -625,6 +728,8 @@ int drm_fb_helper_init(struct drm_device *dev,
|
|||
i++;
|
||||
}
|
||||
|
||||
fb_helper->atomic = !!drm_core_check_feature(dev, DRIVER_ATOMIC);
|
||||
|
||||
return 0;
|
||||
out_free:
|
||||
drm_fb_helper_crtc_free(fb_helper);
|
||||
|
@ -714,6 +819,9 @@ EXPORT_SYMBOL(drm_fb_helper_release_fbi);
|
|||
|
||||
void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
|
||||
{
|
||||
if (!drm_fbdev_emulation)
|
||||
return;
|
||||
|
||||
if (!list_empty(&fb_helper->kernel_fb_list)) {
|
||||
list_del(&fb_helper->kernel_fb_list);
|
||||
if (list_empty(&kernel_fb_helper_list)) {
|
||||
|
@ -1122,6 +1230,80 @@ int drm_fb_helper_set_par(struct fb_info *info)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_set_par);
|
||||
|
||||
static int pan_display_atomic(struct fb_var_screeninfo *var,
|
||||
struct fb_info *info)
|
||||
{
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
struct drm_atomic_state *state;
|
||||
int i, ret;
|
||||
|
||||
state = drm_atomic_state_alloc(dev);
|
||||
if (!state)
|
||||
return -ENOMEM;
|
||||
|
||||
state->acquire_ctx = dev->mode_config.acquire_ctx;
|
||||
retry:
|
||||
for(i = 0; i < fb_helper->crtc_count; i++) {
|
||||
struct drm_mode_set *mode_set;
|
||||
|
||||
mode_set = &fb_helper->crtc_info[i].mode_set;
|
||||
|
||||
mode_set->crtc->primary->old_fb = mode_set->crtc->primary->fb;
|
||||
|
||||
mode_set->x = var->xoffset;
|
||||
mode_set->y = var->yoffset;
|
||||
|
||||
ret = __drm_atomic_helper_set_config(mode_set, state);
|
||||
if (ret != 0)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = drm_atomic_commit(state);
|
||||
if (ret != 0)
|
||||
goto fail;
|
||||
|
||||
info->var.xoffset = var->xoffset;
|
||||
info->var.yoffset = var->yoffset;
|
||||
|
||||
|
||||
fail:
|
||||
for(i = 0; i < fb_helper->crtc_count; i++) {
|
||||
struct drm_mode_set *mode_set;
|
||||
struct drm_plane *plane;
|
||||
|
||||
mode_set = &fb_helper->crtc_info[i].mode_set;
|
||||
plane = mode_set->crtc->primary;
|
||||
|
||||
if (ret == 0) {
|
||||
struct drm_framebuffer *new_fb = plane->state->fb;
|
||||
|
||||
if (new_fb)
|
||||
drm_framebuffer_reference(new_fb);
|
||||
plane->fb = new_fb;
|
||||
plane->crtc = plane->state->crtc;
|
||||
|
||||
if (plane->old_fb)
|
||||
drm_framebuffer_unreference(plane->old_fb);
|
||||
}
|
||||
plane->old_fb = NULL;
|
||||
}
|
||||
|
||||
if (ret == -EDEADLK)
|
||||
goto backoff;
|
||||
|
||||
if (ret != 0)
|
||||
drm_atomic_state_free(state);
|
||||
|
||||
return ret;
|
||||
|
||||
backoff:
|
||||
drm_atomic_state_clear(state);
|
||||
drm_atomic_legacy_backoff(state);
|
||||
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_fb_helper_pan_display - implementation for ->fb_pan_display
|
||||
* @var: updated screen information
|
||||
|
@ -1145,6 +1327,11 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (fb_helper->atomic) {
|
||||
ret = pan_display_atomic(var, info);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
for (i = 0; i < fb_helper->crtc_count; i++) {
|
||||
modeset = &fb_helper->crtc_info[i].mode_set;
|
||||
|
||||
|
@ -1159,6 +1346,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
|
|||
}
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
drm_modeset_unlock_all(dev);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1934,6 +2122,9 @@ int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
|
|||
struct drm_device *dev = fb_helper->dev;
|
||||
int count = 0;
|
||||
|
||||
if (!drm_fbdev_emulation)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
count = drm_fb_helper_probe_connector_modes(fb_helper,
|
||||
dev->mode_config.max_width,
|
||||
|
@ -1977,6 +2168,9 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
|
|||
struct drm_device *dev = fb_helper->dev;
|
||||
u32 max_width, max_height;
|
||||
|
||||
if (!drm_fbdev_emulation)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&fb_helper->dev->mode_config.mutex);
|
||||
if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) {
|
||||
fb_helper->delayed_hotplug = true;
|
||||
|
|
|
@ -763,7 +763,8 @@ EXPORT_SYMBOL(drm_gem_object_release);
|
|||
void
|
||||
drm_gem_object_free(struct kref *kref)
|
||||
{
|
||||
struct drm_gem_object *obj = (struct drm_gem_object *) kref;
|
||||
struct drm_gem_object *obj =
|
||||
container_of(kref, struct drm_gem_object, refcount);
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
@ -810,8 +811,6 @@ EXPORT_SYMBOL(drm_gem_vm_close);
|
|||
* drm_gem_mmap() prevents unprivileged users from mapping random objects. So
|
||||
* callers must verify access restrictions before calling this helper.
|
||||
*
|
||||
* NOTE: This function has to be protected with dev->struct_mutex
|
||||
*
|
||||
* Return 0 or success or -EINVAL if the object size is smaller than the VMA
|
||||
* size, or if no gem_vm_ops are provided.
|
||||
*/
|
||||
|
@ -820,8 +819,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
|
|||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
lockdep_assert_held(&dev->struct_mutex);
|
||||
|
||||
/* Check for valid size. */
|
||||
if (obj_size < vma->vm_end - vma->vm_start)
|
||||
return -EINVAL;
|
||||
|
@ -865,30 +862,46 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
{
|
||||
struct drm_file *priv = filp->private_data;
|
||||
struct drm_device *dev = priv->minor->dev;
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_gem_object *obj = NULL;
|
||||
struct drm_vma_offset_node *node;
|
||||
int ret;
|
||||
|
||||
if (drm_device_is_unplugged(dev))
|
||||
return -ENODEV;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
|
||||
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
|
||||
vma->vm_pgoff,
|
||||
vma_pages(vma));
|
||||
if (likely(node)) {
|
||||
obj = container_of(node, struct drm_gem_object, vma_node);
|
||||
/*
|
||||
* When the object is being freed, after it hits 0-refcnt it
|
||||
* proceeds to tear down the object. In the process it will
|
||||
* attempt to remove the VMA offset and so acquire this
|
||||
* mgr->vm_lock. Therefore if we find an object with a 0-refcnt
|
||||
* that matches our range, we know it is in the process of being
|
||||
* destroyed and will be freed as soon as we release the lock -
|
||||
* so we have to check for the 0-refcnted object and treat it as
|
||||
* invalid.
|
||||
*/
|
||||
if (!kref_get_unless_zero(&obj->refcount))
|
||||
obj = NULL;
|
||||
}
|
||||
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
|
||||
|
||||
node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
|
||||
vma->vm_pgoff,
|
||||
vma_pages(vma));
|
||||
if (!node) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (!obj)
|
||||
return -EINVAL;
|
||||
} else if (!drm_vma_node_is_allowed(node, filp)) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (!drm_vma_node_is_allowed(node, filp)) {
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
obj = container_of(node, struct drm_gem_object, vma_node);
|
||||
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
|
||||
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
|
||||
vma);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -481,12 +481,9 @@ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
|
|||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_gem_cma_object *cma_obj;
|
||||
struct drm_device *dev = obj->dev;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = drm_gem_mmap_obj(obj, obj->size, vma);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ int drm_authmagic(struct drm_device *dev, void *data,
|
|||
/* drm_sysfs.c */
|
||||
extern struct class *drm_class;
|
||||
|
||||
struct class *drm_sysfs_create(struct module *owner, char *name);
|
||||
int drm_sysfs_init(void);
|
||||
void drm_sysfs_destroy(void);
|
||||
struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
|
||||
int drm_sysfs_connector_add(struct drm_connector *connector);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue