Merge "Merge android-4.19-stable.113 (2b82910d
) into msm-4.19"
This commit is contained in:
commit
eb7bfc5b72
227 changed files with 77978 additions and 75614 deletions
|
@ -136,6 +136,10 @@
|
||||||
dynamic table installation which will install SSDT
|
dynamic table installation which will install SSDT
|
||||||
tables to /sys/firmware/acpi/tables/dynamic.
|
tables to /sys/firmware/acpi/tables/dynamic.
|
||||||
|
|
||||||
|
acpi_no_watchdog [HW,ACPI,WDT]
|
||||||
|
Ignore the ACPI-based watchdog interface (WDAT) and let
|
||||||
|
a native driver control the watchdog device instead.
|
||||||
|
|
||||||
acpi_rsdp= [ACPI,EFI,KEXEC]
|
acpi_rsdp= [ACPI,EFI,KEXEC]
|
||||||
Pass the RSDP address to the kernel, mostly used
|
Pass the RSDP address to the kernel, mostly used
|
||||||
on machines running EFI runtime service to boot the
|
on machines running EFI runtime service to boot the
|
||||||
|
|
|
@ -25,8 +25,8 @@ suspend/resume and shutdown ordering.
|
||||||
|
|
||||||
Device links allow representation of such dependencies in the driver core.
|
Device links allow representation of such dependencies in the driver core.
|
||||||
|
|
||||||
In its standard form, a device link combines *both* dependency types:
|
In its standard or *managed* form, a device link combines *both* dependency
|
||||||
It guarantees correct suspend/resume and shutdown ordering between a
|
types: It guarantees correct suspend/resume and shutdown ordering between a
|
||||||
"supplier" device and its "consumer" devices, and it guarantees driver
|
"supplier" device and its "consumer" devices, and it guarantees driver
|
||||||
presence on the supplier. The consumer devices are not probed before the
|
presence on the supplier. The consumer devices are not probed before the
|
||||||
supplier is bound to a driver, and they're unbound before the supplier
|
supplier is bound to a driver, and they're unbound before the supplier
|
||||||
|
@ -59,18 +59,24 @@ device ``->probe`` callback or a boot-time PCI quirk.
|
||||||
|
|
||||||
Another example for an inconsistent state would be a device link that
|
Another example for an inconsistent state would be a device link that
|
||||||
represents a driver presence dependency, yet is added from the consumer's
|
represents a driver presence dependency, yet is added from the consumer's
|
||||||
``->probe`` callback while the supplier hasn't probed yet: Had the driver
|
``->probe`` callback while the supplier hasn't started to probe yet: Had the
|
||||||
core known about the device link earlier, it wouldn't have probed the
|
driver core known about the device link earlier, it wouldn't have probed the
|
||||||
consumer in the first place. The onus is thus on the consumer to check
|
consumer in the first place. The onus is thus on the consumer to check
|
||||||
presence of the supplier after adding the link, and defer probing on
|
presence of the supplier after adding the link, and defer probing on
|
||||||
non-presence.
|
non-presence. [Note that it is valid to create a link from the consumer's
|
||||||
|
``->probe`` callback while the supplier is still probing, but the consumer must
|
||||||
|
know that the supplier is functional already at the link creation time (that is
|
||||||
|
the case, for instance, if the consumer has just acquired some resources that
|
||||||
|
would not have been available had the supplier not been functional then).]
|
||||||
|
|
||||||
If a device link is added in the ``->probe`` callback of the supplier or
|
If a device link with ``DL_FLAG_STATELESS`` set (i.e. a stateless device link)
|
||||||
consumer driver, it is typically deleted in its ``->remove`` callback for
|
is added in the ``->probe`` callback of the supplier or consumer driver, it is
|
||||||
symmetry. That way, if the driver is compiled as a module, the device
|
typically deleted in its ``->remove`` callback for symmetry. That way, if the
|
||||||
link is added on module load and orderly deleted on unload. The same
|
driver is compiled as a module, the device link is added on module load and
|
||||||
restrictions that apply to device link addition (e.g. exclusion of a
|
orderly deleted on unload. The same restrictions that apply to device link
|
||||||
parallel suspend/resume transition) apply equally to deletion.
|
addition (e.g. exclusion of a parallel suspend/resume transition) apply equally
|
||||||
|
to deletion. Device links managed by the driver core are deleted automatically
|
||||||
|
by it.
|
||||||
|
|
||||||
Several flags may be specified on device link addition, two of which
|
Several flags may be specified on device link addition, two of which
|
||||||
have already been mentioned above: ``DL_FLAG_STATELESS`` to express that no
|
have already been mentioned above: ``DL_FLAG_STATELESS`` to express that no
|
||||||
|
@ -83,22 +89,37 @@ link is added from the consumer's ``->probe`` callback: ``DL_FLAG_RPM_ACTIVE``
|
||||||
can be specified to runtime resume the supplier upon addition of the
|
can be specified to runtime resume the supplier upon addition of the
|
||||||
device link. ``DL_FLAG_AUTOREMOVE_CONSUMER`` causes the device link to be
|
device link. ``DL_FLAG_AUTOREMOVE_CONSUMER`` causes the device link to be
|
||||||
automatically purged when the consumer fails to probe or later unbinds.
|
automatically purged when the consumer fails to probe or later unbinds.
|
||||||
This obviates the need to explicitly delete the link in the ``->remove``
|
|
||||||
callback or in the error path of the ``->probe`` callback.
|
|
||||||
|
|
||||||
Similarly, when the device link is added from supplier's ``->probe`` callback,
|
Similarly, when the device link is added from supplier's ``->probe`` callback,
|
||||||
``DL_FLAG_AUTOREMOVE_SUPPLIER`` causes the device link to be automatically
|
``DL_FLAG_AUTOREMOVE_SUPPLIER`` causes the device link to be automatically
|
||||||
purged when the supplier fails to probe or later unbinds.
|
purged when the supplier fails to probe or later unbinds.
|
||||||
|
|
||||||
|
If neither ``DL_FLAG_AUTOREMOVE_CONSUMER`` nor ``DL_FLAG_AUTOREMOVE_SUPPLIER``
|
||||||
|
is set, ``DL_FLAG_AUTOPROBE_CONSUMER`` can be used to request the driver core
|
||||||
|
to probe for a driver for the consumer driver on the link automatically after
|
||||||
|
a driver has been bound to the supplier device.
|
||||||
|
|
||||||
|
Note, however, that any combinations of ``DL_FLAG_AUTOREMOVE_CONSUMER``,
|
||||||
|
``DL_FLAG_AUTOREMOVE_SUPPLIER`` or ``DL_FLAG_AUTOPROBE_CONSUMER`` with
|
||||||
|
``DL_FLAG_STATELESS`` are invalid and cannot be used.
|
||||||
|
|
||||||
Limitations
|
Limitations
|
||||||
===========
|
===========
|
||||||
|
|
||||||
Driver authors should be aware that a driver presence dependency (i.e. when
|
Driver authors should be aware that a driver presence dependency for managed
|
||||||
``DL_FLAG_STATELESS`` is not specified on link addition) may cause probing of
|
device links (i.e. when ``DL_FLAG_STATELESS`` is not specified on link addition)
|
||||||
the consumer to be deferred indefinitely. This can become a problem if the
|
may cause probing of the consumer to be deferred indefinitely. This can become
|
||||||
consumer is required to probe before a certain initcall level is reached.
|
a problem if the consumer is required to probe before a certain initcall level
|
||||||
Worse, if the supplier driver is blacklisted or missing, the consumer will
|
is reached. Worse, if the supplier driver is blacklisted or missing, the
|
||||||
never be probed.
|
consumer will never be probed.
|
||||||
|
|
||||||
|
Moreover, managed device links cannot be deleted directly. They are deleted
|
||||||
|
by the driver core when they are not necessary any more in accordance with the
|
||||||
|
``DL_FLAG_AUTOREMOVE_CONSUMER`` and ``DL_FLAG_AUTOREMOVE_SUPPLIER`` flags.
|
||||||
|
However, stateless device links (i.e. device links with ``DL_FLAG_STATELESS``
|
||||||
|
set) are expected to be removed by whoever called :c:func:`device_link_add()`
|
||||||
|
to add them with the help of either :c:func:`device_link_del()` or
|
||||||
|
:c:func:`device_link_remove()`.
|
||||||
|
|
||||||
Sometimes drivers depend on optional resources. They are able to operate
|
Sometimes drivers depend on optional resources. They are able to operate
|
||||||
in a degraded mode (reduced feature set or performance) when those resources
|
in a degraded mode (reduced feature set or performance) when those resources
|
||||||
|
@ -283,4 +304,4 @@ API
|
||||||
===
|
===
|
||||||
|
|
||||||
.. kernel-doc:: drivers/base/core.c
|
.. kernel-doc:: drivers/base/core.c
|
||||||
:functions: device_link_add device_link_del
|
:functions: device_link_add device_link_del device_link_remove
|
||||||
|
|
|
@ -627,3 +627,10 @@ in your dentry operations instead.
|
||||||
DCACHE_RCUACCESS is gone; having an RCU delay on dentry freeing is the
|
DCACHE_RCUACCESS is gone; having an RCU delay on dentry freeing is the
|
||||||
default. DCACHE_NORCU opts out, and only d_alloc_pseudo() has any
|
default. DCACHE_NORCU opts out, and only d_alloc_pseudo() has any
|
||||||
business doing so.
|
business doing so.
|
||||||
|
--
|
||||||
|
[mandatory]
|
||||||
|
|
||||||
|
[should've been added in 2016] stale comment in finish_open()
|
||||||
|
nonwithstanding, failure exits in ->atomic_open() instances should
|
||||||
|
*NOT* fput() the file, no matter what. Everything is handled by the
|
||||||
|
caller.
|
||||||
|
|
61
Documentation/usb/raw-gadget.rst
Normal file
61
Documentation/usb/raw-gadget.rst
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
==============
|
||||||
|
USB Raw Gadget
|
||||||
|
==============
|
||||||
|
|
||||||
|
USB Raw Gadget is a kernel module that provides a userspace interface for
|
||||||
|
the USB Gadget subsystem. Essentially it allows to emulate USB devices
|
||||||
|
from userspace. Enabled with CONFIG_USB_RAW_GADGET. Raw Gadget is
|
||||||
|
currently a strictly debugging feature and shouldn't be used in
|
||||||
|
production, use GadgetFS instead.
|
||||||
|
|
||||||
|
Comparison to GadgetFS
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Raw Gadget is similar to GadgetFS, but provides a more low-level and
|
||||||
|
direct access to the USB Gadget layer for the userspace. The key
|
||||||
|
differences are:
|
||||||
|
|
||||||
|
1. Every USB request is passed to the userspace to get a response, while
|
||||||
|
GadgetFS responds to some USB requests internally based on the provided
|
||||||
|
descriptors. However note, that the UDC driver might respond to some
|
||||||
|
requests on its own and never forward them to the Gadget layer.
|
||||||
|
|
||||||
|
2. GadgetFS performs some sanity checks on the provided USB descriptors,
|
||||||
|
while Raw Gadget allows you to provide arbitrary data as responses to
|
||||||
|
USB requests.
|
||||||
|
|
||||||
|
3. Raw Gadget provides a way to select a UDC device/driver to bind to,
|
||||||
|
while GadgetFS currently binds to the first available UDC.
|
||||||
|
|
||||||
|
4. Raw Gadget uses predictable endpoint names (handles) across different
|
||||||
|
UDCs (as long as UDCs have enough endpoints of each required transfer
|
||||||
|
type).
|
||||||
|
|
||||||
|
5. Raw Gadget has ioctl-based interface instead of a filesystem-based one.
|
||||||
|
|
||||||
|
Userspace interface
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
To create a Raw Gadget instance open /dev/raw-gadget. Multiple raw-gadget
|
||||||
|
instances (bound to different UDCs) can be used at the same time. The
|
||||||
|
interaction with the opened file happens through the ioctl() calls, see
|
||||||
|
comments in include/uapi/linux/usb/raw_gadget.h for details.
|
||||||
|
|
||||||
|
The typical usage of Raw Gadget looks like:
|
||||||
|
|
||||||
|
1. Open Raw Gadget instance via /dev/raw-gadget.
|
||||||
|
2. Initialize the instance via USB_RAW_IOCTL_INIT.
|
||||||
|
3. Launch the instance with USB_RAW_IOCTL_RUN.
|
||||||
|
4. In a loop issue USB_RAW_IOCTL_EVENT_FETCH calls to receive events from
|
||||||
|
Raw Gadget and react to those depending on what kind of USB device
|
||||||
|
needs to be emulated.
|
||||||
|
|
||||||
|
Potential future improvements
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- Implement ioctl's for setting/clearing halt status on endpoints.
|
||||||
|
|
||||||
|
- Reporting more events (suspend, resume, etc.) through
|
||||||
|
USB_RAW_IOCTL_EVENT_FETCH.
|
||||||
|
|
||||||
|
- Support O_NONBLOCK I/O.
|
4
Makefile
4
Makefile
|
@ -1,7 +1,7 @@
|
||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 19
|
PATCHLEVEL = 19
|
||||||
SUBLEVEL = 110
|
SUBLEVEL = 113
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = "People's Front"
|
NAME = "People's Front"
|
||||||
|
|
||||||
|
@ -853,7 +853,7 @@ LD_FLAGS_LTO_CLANG := -mllvm -import-instr-limit=5
|
||||||
KBUILD_LDFLAGS += $(LD_FLAGS_LTO_CLANG)
|
KBUILD_LDFLAGS += $(LD_FLAGS_LTO_CLANG)
|
||||||
KBUILD_LDFLAGS_MODULE += $(LD_FLAGS_LTO_CLANG)
|
KBUILD_LDFLAGS_MODULE += $(LD_FLAGS_LTO_CLANG)
|
||||||
|
|
||||||
KBUILD_LDS_MODULE += $(srctree)/scripts/module-lto.lds
|
KBUILD_LDFLAGS_MODULE += -T $(srctree)/scripts/module-lto.lds
|
||||||
|
|
||||||
# allow disabling only clang LTO where needed
|
# allow disabling only clang LTO where needed
|
||||||
DISABLE_LTO_CLANG := -fno-lto
|
DISABLE_LTO_CLANG := -fno-lto
|
||||||
|
|
149687
abi_gki_aarch64.xml
149687
abi_gki_aarch64.xml
File diff suppressed because it is too large
Load diff
|
@ -14,6 +14,8 @@
|
||||||
#ifdef __ASSEMBLY__
|
#ifdef __ASSEMBLY__
|
||||||
|
|
||||||
#define ASM_NL ` /* use '`' to mark new line in macro */
|
#define ASM_NL ` /* use '`' to mark new line in macro */
|
||||||
|
#define __ALIGN .align 4
|
||||||
|
#define __ALIGN_STR __stringify(__ALIGN)
|
||||||
|
|
||||||
/* annotation for data we want in DCCM - if enabled in .config */
|
/* annotation for data we want in DCCM - if enabled in .config */
|
||||||
.macro ARCFP_DATA nm
|
.macro ARCFP_DATA nm
|
||||||
|
|
|
@ -324,6 +324,7 @@
|
||||||
device_type = "pci";
|
device_type = "pci";
|
||||||
ranges = <0x81000000 0 0 0x03000 0 0x00010000
|
ranges = <0x81000000 0 0 0x03000 0 0x00010000
|
||||||
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
|
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
|
||||||
|
dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
|
||||||
bus-range = <0x00 0xff>;
|
bus-range = <0x00 0xff>;
|
||||||
#interrupt-cells = <1>;
|
#interrupt-cells = <1>;
|
||||||
num-lanes = <1>;
|
num-lanes = <1>;
|
||||||
|
@ -376,6 +377,7 @@
|
||||||
device_type = "pci";
|
device_type = "pci";
|
||||||
ranges = <0x81000000 0 0 0x03000 0 0x00010000
|
ranges = <0x81000000 0 0 0x03000 0 0x00010000
|
||||||
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
|
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
|
||||||
|
dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
|
||||||
bus-range = <0x00 0xff>;
|
bus-range = <0x00 0xff>;
|
||||||
#interrupt-cells = <1>;
|
#interrupt-cells = <1>;
|
||||||
num-lanes = <1>;
|
num-lanes = <1>;
|
||||||
|
|
|
@ -103,6 +103,8 @@ static bool __init cntvct_functional(void)
|
||||||
* this.
|
* this.
|
||||||
*/
|
*/
|
||||||
np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
|
np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
|
||||||
|
if (!np)
|
||||||
|
np = of_find_compatible_node(NULL, NULL, "arm,armv8-timer");
|
||||||
if (!np)
|
if (!np)
|
||||||
goto out_put;
|
goto out_put;
|
||||||
|
|
||||||
|
|
|
@ -77,7 +77,9 @@ CONFIG_ARM_SCPI_PROTOCOL=y
|
||||||
CONFIG_ARM64_CRYPTO=y
|
CONFIG_ARM64_CRYPTO=y
|
||||||
CONFIG_CRYPTO_SHA2_ARM64_CE=y
|
CONFIG_CRYPTO_SHA2_ARM64_CE=y
|
||||||
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
|
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
|
||||||
|
CONFIG_JUMP_LABEL=y
|
||||||
CONFIG_LTO_CLANG=y
|
CONFIG_LTO_CLANG=y
|
||||||
|
CONFIG_CFI_CLANG=y
|
||||||
CONFIG_SHADOW_CALL_STACK=y
|
CONFIG_SHADOW_CALL_STACK=y
|
||||||
CONFIG_MODULES=y
|
CONFIG_MODULES=y
|
||||||
CONFIG_MODULE_UNLOAD=y
|
CONFIG_MODULE_UNLOAD=y
|
||||||
|
@ -311,6 +313,10 @@ CONFIG_MEDIA_CONTROLLER=y
|
||||||
# CONFIG_VGA_ARB is not set
|
# CONFIG_VGA_ARB is not set
|
||||||
CONFIG_DRM=y
|
CONFIG_DRM=y
|
||||||
# CONFIG_DRM_FBDEV_EMULATION is not set
|
# CONFIG_DRM_FBDEV_EMULATION is not set
|
||||||
|
CONFIG_BACKLIGHT_LCD_SUPPORT=y
|
||||||
|
# CONFIG_LCD_CLASS_DEVICE is not set
|
||||||
|
CONFIG_BACKLIGHT_CLASS_DEVICE=y
|
||||||
|
# CONFIG_BACKLIGHT_GENERIC is not set
|
||||||
CONFIG_SOUND=y
|
CONFIG_SOUND=y
|
||||||
CONFIG_SND=y
|
CONFIG_SND=y
|
||||||
CONFIG_SND_HRTIMER=y
|
CONFIG_SND_HRTIMER=y
|
||||||
|
@ -368,7 +374,6 @@ CONFIG_DEVFREQ_GOV_PERFORMANCE=y
|
||||||
CONFIG_DEVFREQ_GOV_POWERSAVE=y
|
CONFIG_DEVFREQ_GOV_POWERSAVE=y
|
||||||
CONFIG_DEVFREQ_GOV_USERSPACE=y
|
CONFIG_DEVFREQ_GOV_USERSPACE=y
|
||||||
CONFIG_DEVFREQ_GOV_PASSIVE=y
|
CONFIG_DEVFREQ_GOV_PASSIVE=y
|
||||||
CONFIG_EXTCON=y
|
|
||||||
CONFIG_IIO=y
|
CONFIG_IIO=y
|
||||||
CONFIG_PWM=y
|
CONFIG_PWM=y
|
||||||
CONFIG_QCOM_PDC=y
|
CONFIG_QCOM_PDC=y
|
||||||
|
|
|
@ -32,6 +32,10 @@ extern void __cpu_copy_user_page(void *to, const void *from,
|
||||||
extern void copy_page(void *to, const void *from);
|
extern void copy_page(void *to, const void *from);
|
||||||
extern void clear_page(void *to);
|
extern void clear_page(void *to);
|
||||||
|
|
||||||
|
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
|
||||||
|
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
|
||||||
|
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
|
||||||
|
|
||||||
#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr)
|
#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr)
|
||||||
#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr)
|
#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr)
|
||||||
|
|
||||||
|
|
|
@ -970,11 +970,29 @@ void tick_broadcast(const struct cpumask *mask)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The number of CPUs online, not counting this CPU (which may not be
|
||||||
|
* fully online and so not counted in num_online_cpus()).
|
||||||
|
*/
|
||||||
|
static inline unsigned int num_other_online_cpus(void)
|
||||||
|
{
|
||||||
|
unsigned int this_cpu_online = cpu_online(smp_processor_id());
|
||||||
|
|
||||||
|
return num_online_cpus() - this_cpu_online;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned int num_other_active_cpus(void)
|
||||||
|
{
|
||||||
|
unsigned int this_cpu_active = cpu_active(smp_processor_id());
|
||||||
|
|
||||||
|
return num_active_cpus() - this_cpu_active;
|
||||||
|
}
|
||||||
|
|
||||||
void smp_send_stop(void)
|
void smp_send_stop(void)
|
||||||
{
|
{
|
||||||
unsigned long timeout;
|
unsigned long timeout;
|
||||||
|
|
||||||
if (num_online_cpus() > 1) {
|
if (num_other_online_cpus()) {
|
||||||
cpumask_t mask;
|
cpumask_t mask;
|
||||||
|
|
||||||
cpumask_copy(&mask, cpu_online_mask);
|
cpumask_copy(&mask, cpu_online_mask);
|
||||||
|
@ -987,10 +1005,10 @@ void smp_send_stop(void)
|
||||||
|
|
||||||
/* Wait up to one second for other CPUs to stop */
|
/* Wait up to one second for other CPUs to stop */
|
||||||
timeout = USEC_PER_SEC;
|
timeout = USEC_PER_SEC;
|
||||||
while (num_active_cpus() > 1 && timeout--)
|
while (num_other_active_cpus() && timeout--)
|
||||||
udelay(1);
|
udelay(1);
|
||||||
|
|
||||||
if (num_active_cpus() > 1)
|
if (num_other_active_cpus())
|
||||||
pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
|
pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
|
||||||
cpumask_pr_args(cpu_online_mask));
|
cpumask_pr_args(cpu_online_mask));
|
||||||
|
|
||||||
|
@ -1013,7 +1031,11 @@ void crash_smp_send_stop(void)
|
||||||
|
|
||||||
cpus_stopped = 1;
|
cpus_stopped = 1;
|
||||||
|
|
||||||
if (num_online_cpus() == 1) {
|
/*
|
||||||
|
* If this cpu is the only one alive at this point in time, online or
|
||||||
|
* not, there are no stop messages to be sent around, so just back out.
|
||||||
|
*/
|
||||||
|
if (num_other_online_cpus() == 0) {
|
||||||
sdei_mask_local_cpu();
|
sdei_mask_local_cpu();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1021,7 +1043,7 @@ void crash_smp_send_stop(void)
|
||||||
cpumask_copy(&mask, cpu_online_mask);
|
cpumask_copy(&mask, cpu_online_mask);
|
||||||
cpumask_clear_cpu(smp_processor_id(), &mask);
|
cpumask_clear_cpu(smp_processor_id(), &mask);
|
||||||
|
|
||||||
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
|
atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
|
||||||
|
|
||||||
pr_crit("SMP: stopping secondary CPUs\n");
|
pr_crit("SMP: stopping secondary CPUs\n");
|
||||||
smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
|
smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
|
||||||
|
|
|
@ -399,13 +399,26 @@ static phys_addr_t pgd_pgtable_alloc(void)
|
||||||
return __pa(ptr);
|
return __pa(ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* create_pgtable_mapping - create a pagetable mapping for given
|
||||||
|
* physical start and end addresses.
|
||||||
|
* @start: physical start address.
|
||||||
|
* @end: physical end address.
|
||||||
|
*/
|
||||||
void create_pgtable_mapping(phys_addr_t start, phys_addr_t end)
|
void create_pgtable_mapping(phys_addr_t start, phys_addr_t end)
|
||||||
{
|
{
|
||||||
unsigned long virt = (unsigned long)phys_to_virt(start);
|
unsigned long virt = (unsigned long)phys_to_virt(start);
|
||||||
|
|
||||||
|
if (virt < VMALLOC_START) {
|
||||||
|
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
|
||||||
|
&start, virt);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
__create_pgd_mapping(init_mm.pgd, start, virt, end - start,
|
__create_pgd_mapping(init_mm.pgd, start, virt, end - start,
|
||||||
PAGE_KERNEL, NULL, 0);
|
PAGE_KERNEL, NULL, 0);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(create_pgtable_mapping);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function can only be used to modify existing table entries,
|
* This function can only be used to modify existing table entries,
|
||||||
|
|
|
@ -322,6 +322,12 @@ SECTIONS
|
||||||
*(.branch_lt)
|
*(.branch_lt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_DEBUG_INFO_BTF
|
||||||
|
.BTF : AT(ADDR(.BTF) - LOAD_OFFSET) {
|
||||||
|
*(.BTF)
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
|
.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
|
||||||
__start_opd = .;
|
__start_opd = .;
|
||||||
KEEP(*(.opd))
|
KEEP(*(.opd))
|
||||||
|
|
|
@ -16,6 +16,10 @@
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/moduleloader.h>
|
#include <linux/moduleloader.h>
|
||||||
|
#include <linux/vmalloc.h>
|
||||||
|
#include <linux/sizes.h>
|
||||||
|
#include <asm/pgtable.h>
|
||||||
|
#include <asm/sections.h>
|
||||||
|
|
||||||
static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
|
static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
|
||||||
{
|
{
|
||||||
|
@ -394,3 +398,15 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
|
||||||
|
#define VMALLOC_MODULE_START \
|
||||||
|
max(PFN_ALIGN((unsigned long)&_end - SZ_2G), VMALLOC_START)
|
||||||
|
void *module_alloc(unsigned long size)
|
||||||
|
{
|
||||||
|
return __vmalloc_node_range(size, 1, VMALLOC_MODULE_START,
|
||||||
|
VMALLOC_END, GFP_KERNEL,
|
||||||
|
PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
|
||||||
|
__builtin_return_address(0));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
|
@ -49,6 +49,7 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=y
|
||||||
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
|
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
|
||||||
CONFIG_PCI_MSI=y
|
CONFIG_PCI_MSI=y
|
||||||
CONFIG_IA32_EMULATION=y
|
CONFIG_IA32_EMULATION=y
|
||||||
|
CONFIG_JUMP_LABEL=y
|
||||||
CONFIG_LTO_CLANG=y
|
CONFIG_LTO_CLANG=y
|
||||||
CONFIG_CFI_CLANG=y
|
CONFIG_CFI_CLANG=y
|
||||||
CONFIG_MODULES=y
|
CONFIG_MODULES=y
|
||||||
|
|
|
@ -193,20 +193,18 @@ static int amd_uncore_event_init(struct perf_event *event)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NB and Last level cache counters (MSRs) are shared across all cores
|
* NB and Last level cache counters (MSRs) are shared across all cores
|
||||||
* that share the same NB / Last level cache. Interrupts can be directed
|
* that share the same NB / Last level cache. On family 16h and below,
|
||||||
* to a single target core, however, event counts generated by processes
|
* Interrupts can be directed to a single target core, however, event
|
||||||
* running on other cores cannot be masked out. So we do not support
|
* counts generated by processes running on other cores cannot be masked
|
||||||
* sampling and per-thread events.
|
* out. So we do not support sampling and per-thread events via
|
||||||
|
* CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
|
||||||
*/
|
*/
|
||||||
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
/* NB and Last level cache counters do not have usr/os/guest/host bits */
|
/* NB and Last level cache counters do not have usr/os/guest/host bits */
|
||||||
if (event->attr.exclude_user || event->attr.exclude_kernel ||
|
if (event->attr.exclude_user || event->attr.exclude_kernel ||
|
||||||
event->attr.exclude_host || event->attr.exclude_guest)
|
event->attr.exclude_host || event->attr.exclude_guest)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* and we do not enable counter overflow interrupts */
|
|
||||||
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
|
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
|
||||||
hwc->idx = -1;
|
hwc->idx = -1;
|
||||||
|
|
||||||
|
@ -314,6 +312,7 @@ static struct pmu amd_nb_pmu = {
|
||||||
.start = amd_uncore_start,
|
.start = amd_uncore_start,
|
||||||
.stop = amd_uncore_stop,
|
.stop = amd_uncore_stop,
|
||||||
.read = amd_uncore_read,
|
.read = amd_uncore_read,
|
||||||
|
.capabilities = PERF_PMU_CAP_NO_INTERRUPT,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct pmu amd_llc_pmu = {
|
static struct pmu amd_llc_pmu = {
|
||||||
|
@ -324,6 +323,7 @@ static struct pmu amd_llc_pmu = {
|
||||||
.start = amd_uncore_start,
|
.start = amd_uncore_start,
|
||||||
.stop = amd_uncore_stop,
|
.stop = amd_uncore_stop,
|
||||||
.read = amd_uncore_read,
|
.read = amd_uncore_read,
|
||||||
|
.capabilities = PERF_PMU_CAP_NO_INTERRUPT,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
|
static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
|
||||||
|
|
|
@ -489,17 +489,18 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if ((val & 3UL) == 1UL) {
|
if ((val & 3UL) == 1UL) {
|
||||||
/* PPIN available but disabled: */
|
/* PPIN locked in disabled mode */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If PPIN is disabled, but not locked, try to enable: */
|
/* If PPIN is disabled, try to enable */
|
||||||
if (!(val & 3UL)) {
|
if (!(val & 2UL)) {
|
||||||
wrmsrl_safe(MSR_PPIN_CTL, val | 2UL);
|
wrmsrl_safe(MSR_PPIN_CTL, val | 2UL);
|
||||||
rdmsrl_safe(MSR_PPIN_CTL, &val);
|
rdmsrl_safe(MSR_PPIN_CTL, &val);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((val & 3UL) == 2UL)
|
/* Is the enable bit set? */
|
||||||
|
if (val & 2UL)
|
||||||
set_cpu_cap(c, X86_FEATURE_INTEL_PPIN);
|
set_cpu_cap(c, X86_FEATURE_INTEL_PPIN);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5112,6 +5112,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
|
||||||
ctxt->fetch.ptr = ctxt->fetch.data;
|
ctxt->fetch.ptr = ctxt->fetch.data;
|
||||||
ctxt->fetch.end = ctxt->fetch.data + insn_len;
|
ctxt->fetch.end = ctxt->fetch.data + insn_len;
|
||||||
ctxt->opcode_len = 1;
|
ctxt->opcode_len = 1;
|
||||||
|
ctxt->intercept = x86_intercept_none;
|
||||||
if (insn_len > 0)
|
if (insn_len > 0)
|
||||||
memcpy(ctxt->fetch.data, insn, insn_len);
|
memcpy(ctxt->fetch.data, insn, insn_len);
|
||||||
else {
|
else {
|
||||||
|
|
|
@ -273,7 +273,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
|
||||||
return pmd_k;
|
return pmd_k;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vmalloc_sync_all(void)
|
static void vmalloc_sync(void)
|
||||||
{
|
{
|
||||||
unsigned long address;
|
unsigned long address;
|
||||||
|
|
||||||
|
@ -300,6 +300,16 @@ void vmalloc_sync_all(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void vmalloc_sync_mappings(void)
|
||||||
|
{
|
||||||
|
vmalloc_sync();
|
||||||
|
}
|
||||||
|
|
||||||
|
void vmalloc_sync_unmappings(void)
|
||||||
|
{
|
||||||
|
vmalloc_sync();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 32-bit:
|
* 32-bit:
|
||||||
*
|
*
|
||||||
|
@ -402,11 +412,23 @@ static void dump_pagetable(unsigned long address)
|
||||||
|
|
||||||
#else /* CONFIG_X86_64: */
|
#else /* CONFIG_X86_64: */
|
||||||
|
|
||||||
void vmalloc_sync_all(void)
|
void vmalloc_sync_mappings(void)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* 64-bit mappings might allocate new p4d/pud pages
|
||||||
|
* that need to be propagated to all tasks' PGDs.
|
||||||
|
*/
|
||||||
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
|
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void vmalloc_sync_unmappings(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Unmappings never allocate or free p4d/pud pages.
|
||||||
|
* No work is required here.
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 64-bit:
|
* 64-bit:
|
||||||
*
|
*
|
||||||
|
|
|
@ -525,12 +525,13 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
|
||||||
*/
|
*/
|
||||||
entity = &bfqg->entity;
|
entity = &bfqg->entity;
|
||||||
for_each_entity(entity) {
|
for_each_entity(entity) {
|
||||||
bfqg = container_of(entity, struct bfq_group, entity);
|
struct bfq_group *curr_bfqg = container_of(entity,
|
||||||
if (bfqg != bfqd->root_group) {
|
struct bfq_group, entity);
|
||||||
parent = bfqg_parent(bfqg);
|
if (curr_bfqg != bfqd->root_group) {
|
||||||
|
parent = bfqg_parent(curr_bfqg);
|
||||||
if (!parent)
|
if (!parent)
|
||||||
parent = bfqd->root_group;
|
parent = bfqd->root_group;
|
||||||
bfq_group_set_parent(bfqg, parent);
|
bfq_group_set_parent(curr_bfqg, parent);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
4
build.config.allmodconfig.arm
Normal file
4
build.config.allmodconfig.arm
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
. ${ROOT_DIR}/common/build.config.common
|
||||||
|
. ${ROOT_DIR}/common/build.config.arm
|
||||||
|
. ${ROOT_DIR}/common/build.config.allmodconfig
|
||||||
|
|
12
build.config.arm
Normal file
12
build.config.arm
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
ARCH=arm
|
||||||
|
|
||||||
|
CLANG_TRIPLE=arm-linux-gnueabi-
|
||||||
|
CROSS_COMPILE=arm-linux-androidkernel-
|
||||||
|
LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9/bin
|
||||||
|
|
||||||
|
FILES="
|
||||||
|
arch/arm/boot/Image.gz
|
||||||
|
arch/arm/boot/Image
|
||||||
|
vmlinux
|
||||||
|
System.map
|
||||||
|
"
|
|
@ -5,7 +5,7 @@ CC=clang
|
||||||
LD=ld.lld
|
LD=ld.lld
|
||||||
NM=llvm-nm
|
NM=llvm-nm
|
||||||
OBJCOPY=llvm-objcopy
|
OBJCOPY=llvm-objcopy
|
||||||
CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r377782b/bin
|
CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r377782c/bin
|
||||||
|
|
||||||
EXTRA_CMDS=''
|
EXTRA_CMDS=''
|
||||||
STOP_SHIP_TRACEPRINTK=1
|
STOP_SHIP_TRACEPRINTK=1
|
||||||
|
|
|
@ -1,11 +0,0 @@
|
||||||
. ${ROOT_DIR}/common/build.config.common
|
|
||||||
. ${ROOT_DIR}/common/build.config.aarch64
|
|
||||||
. ${ROOT_DIR}/common/build.config.gki
|
|
||||||
|
|
||||||
BUILD_INITRAMFS=1
|
|
||||||
DEFCONFIG=cf_aarch_64_gki_defconfig
|
|
||||||
PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/common/arch/arm64/configs/${DEFCONFIG} ${ROOT_DIR}/common/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/common/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/common/cuttlefish.fragment"
|
|
||||||
POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/common/arch/arm64/configs/${DEFCONFIG}"
|
|
||||||
|
|
||||||
# Not saving any kernel images. This build step is meant purely to generate the .kos.
|
|
||||||
FILES=""
|
|
|
@ -1,11 +0,0 @@
|
||||||
. ${ROOT_DIR}/common/build.config.common
|
|
||||||
. ${ROOT_DIR}/common/build.config.x86_64
|
|
||||||
. ${ROOT_DIR}/common/build.config.gki
|
|
||||||
|
|
||||||
BUILD_INITRAMFS=1
|
|
||||||
DEFCONFIG=cf_x86_64_gki_defconfig
|
|
||||||
PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/common/arch/x86/configs/${DEFCONFIG} ${ROOT_DIR}/common/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/common/arch/x86/configs/gki_defconfig ${ROOT_DIR}/common/cuttlefish.fragment"
|
|
||||||
POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/common/arch/x86/configs/${DEFCONFIG}"
|
|
||||||
|
|
||||||
# Not saving any kernel images. This build step is meant purely to generate the .kos.
|
|
||||||
FILES=""
|
|
2
build.config.gki-debug.aarch64
Normal file
2
build.config.gki-debug.aarch64
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
. ${ROOT_DIR}/common/build.config.gki.aarch64
|
||||||
|
TRIM_NONLISTED_KMI=""
|
2
build.config.gki-debug.x86_64
Normal file
2
build.config.gki-debug.x86_64
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
. ${ROOT_DIR}/common/build.config.gki.x86_64
|
||||||
|
TRIM_NONLISTED_KMI=""
|
|
@ -1,24 +0,0 @@
|
||||||
CONFIG_CPUFREQ_DUMMY=m
|
|
||||||
CONFIG_VSOCKETS=m
|
|
||||||
CONFIG_VIRTIO_VSOCKETS=m
|
|
||||||
CONFIG_GNSS_CMDLINE_SERIAL=m
|
|
||||||
CONFIG_VIRTIO_BLK=m
|
|
||||||
CONFIG_VIRTIO_NET=m
|
|
||||||
CONFIG_VIRT_WIFI=m
|
|
||||||
CONFIG_HW_RANDOM_VIRTIO=m
|
|
||||||
CONFIG_DRM_VIRTIO_GPU=m
|
|
||||||
CONFIG_SND_INTEL8X0=m
|
|
||||||
CONFIG_USB_DUMMY_HCD=m
|
|
||||||
CONFIG_RTC_DRV_TEST=m
|
|
||||||
CONFIG_VIRTIO_PCI=m
|
|
||||||
CONFIG_VIRTIO_PMEM=m
|
|
||||||
CONFIG_VIRTIO_INPUT=m
|
|
||||||
CONFIG_VIRTIO_MMIO=m
|
|
||||||
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
|
|
||||||
CONFIG_ION=m
|
|
||||||
CONFIG_ION_SYSTEM_HEAP=y
|
|
||||||
CONFIG_TEST_STACKINIT=m
|
|
||||||
CONFIG_TEST_MEMINIT=m
|
|
||||||
CONFIG_SDCARD_FS=m
|
|
||||||
CONFIG_TCG_TPM=m
|
|
||||||
CONFIG_TCG_VTPM_PROXY=m
|
|
|
@ -58,12 +58,14 @@ static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static bool acpi_no_watchdog;
|
||||||
|
|
||||||
static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
|
static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
|
||||||
{
|
{
|
||||||
const struct acpi_table_wdat *wdat = NULL;
|
const struct acpi_table_wdat *wdat = NULL;
|
||||||
acpi_status status;
|
acpi_status status;
|
||||||
|
|
||||||
if (acpi_disabled)
|
if (acpi_disabled || acpi_no_watchdog)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
status = acpi_get_table(ACPI_SIG_WDAT, 0,
|
status = acpi_get_table(ACPI_SIG_WDAT, 0,
|
||||||
|
@ -91,6 +93,14 @@ bool acpi_has_watchdog(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(acpi_has_watchdog);
|
EXPORT_SYMBOL_GPL(acpi_has_watchdog);
|
||||||
|
|
||||||
|
/* ACPI watchdog can be disabled on boot command line */
|
||||||
|
static int __init disable_acpi_watchdog(char *str)
|
||||||
|
{
|
||||||
|
acpi_no_watchdog = true;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
__setup("acpi_no_watchdog", disable_acpi_watchdog);
|
||||||
|
|
||||||
void __init acpi_watchdog_init(void)
|
void __init acpi_watchdog_init(void)
|
||||||
{
|
{
|
||||||
const struct acpi_wdat_entry *entries;
|
const struct acpi_wdat_entry *entries;
|
||||||
|
|
|
@ -201,7 +201,7 @@ static int ghes_estatus_pool_expand(unsigned long len)
|
||||||
* New allocation must be visible in all pgd before it can be found by
|
* New allocation must be visible in all pgd before it can be found by
|
||||||
* an NMI allocating from the pool.
|
* an NMI allocating from the pool.
|
||||||
*/
|
*/
|
||||||
vmalloc_sync_all();
|
vmalloc_sync_mappings();
|
||||||
|
|
||||||
return gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
|
return gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
|
||||||
}
|
}
|
||||||
|
|
|
@ -118,7 +118,7 @@ static int device_is_dependent(struct device *dev, void *target)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
||||||
if (link->flags == DL_FLAG_SYNC_STATE_ONLY)
|
if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (link->consumer == target)
|
if (link->consumer == target)
|
||||||
|
@ -131,6 +131,50 @@ static int device_is_dependent(struct device *dev, void *target)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void device_link_init_status(struct device_link *link,
|
||||||
|
struct device *consumer,
|
||||||
|
struct device *supplier)
|
||||||
|
{
|
||||||
|
switch (supplier->links.status) {
|
||||||
|
case DL_DEV_PROBING:
|
||||||
|
switch (consumer->links.status) {
|
||||||
|
case DL_DEV_PROBING:
|
||||||
|
/*
|
||||||
|
* A consumer driver can create a link to a supplier
|
||||||
|
* that has not completed its probing yet as long as it
|
||||||
|
* knows that the supplier is already functional (for
|
||||||
|
* example, it has just acquired some resources from the
|
||||||
|
* supplier).
|
||||||
|
*/
|
||||||
|
link->status = DL_STATE_CONSUMER_PROBE;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
link->status = DL_STATE_DORMANT;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case DL_DEV_DRIVER_BOUND:
|
||||||
|
switch (consumer->links.status) {
|
||||||
|
case DL_DEV_PROBING:
|
||||||
|
link->status = DL_STATE_CONSUMER_PROBE;
|
||||||
|
break;
|
||||||
|
case DL_DEV_DRIVER_BOUND:
|
||||||
|
link->status = DL_STATE_ACTIVE;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
link->status = DL_STATE_AVAILABLE;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case DL_DEV_UNBINDING:
|
||||||
|
link->status = DL_STATE_SUPPLIER_UNBIND;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
link->status = DL_STATE_DORMANT;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int device_reorder_to_tail(struct device *dev, void *not_used)
|
static int device_reorder_to_tail(struct device *dev, void *not_used)
|
||||||
{
|
{
|
||||||
struct device_link *link;
|
struct device_link *link;
|
||||||
|
@ -147,7 +191,7 @@ static int device_reorder_to_tail(struct device *dev, void *not_used)
|
||||||
|
|
||||||
device_for_each_child(dev, NULL, device_reorder_to_tail);
|
device_for_each_child(dev, NULL, device_reorder_to_tail);
|
||||||
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
||||||
if (link->flags == DL_FLAG_SYNC_STATE_ONLY)
|
if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
|
||||||
continue;
|
continue;
|
||||||
device_reorder_to_tail(link->consumer, NULL);
|
device_reorder_to_tail(link->consumer, NULL);
|
||||||
}
|
}
|
||||||
|
@ -175,6 +219,14 @@ void device_pm_move_to_tail(struct device *dev)
|
||||||
device_links_read_unlock(idx);
|
device_links_read_unlock(idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
|
||||||
|
DL_FLAG_AUTOREMOVE_SUPPLIER | \
|
||||||
|
DL_FLAG_AUTOPROBE_CONSUMER | \
|
||||||
|
DL_FLAG_SYNC_STATE_ONLY)
|
||||||
|
|
||||||
|
#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
|
||||||
|
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* device_link_add - Create a link between two devices.
|
* device_link_add - Create a link between two devices.
|
||||||
* @consumer: Consumer end of the link.
|
* @consumer: Consumer end of the link.
|
||||||
|
@ -189,14 +241,38 @@ void device_pm_move_to_tail(struct device *dev)
|
||||||
* of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
|
* of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
|
||||||
* ignored.
|
* ignored.
|
||||||
*
|
*
|
||||||
* If the DL_FLAG_AUTOREMOVE_CONSUMER flag is set, the link will be removed
|
* If DL_FLAG_STATELESS is set in @flags, the caller of this function is
|
||||||
* automatically when the consumer device driver unbinds from it. Analogously,
|
* expected to release the link returned by it directly with the help of either
|
||||||
* if DL_FLAG_AUTOREMOVE_SUPPLIER is set in @flags, the link will be removed
|
* device_link_del() or device_link_remove().
|
||||||
* automatically when the supplier device driver unbinds from it.
|
|
||||||
*
|
*
|
||||||
* The combination of DL_FLAG_STATELESS and either DL_FLAG_AUTOREMOVE_CONSUMER
|
* If that flag is not set, however, the caller of this function is handing the
|
||||||
* or DL_FLAG_AUTOREMOVE_SUPPLIER set in @flags at the same time is invalid and
|
* management of the link over to the driver core entirely and its return value
|
||||||
* will cause NULL to be returned upfront.
|
* can only be used to check whether or not the link is present. In that case,
|
||||||
|
* the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link
|
||||||
|
* flags can be used to indicate to the driver core when the link can be safely
|
||||||
|
* deleted. Namely, setting one of them in @flags indicates to the driver core
|
||||||
|
* that the link is not going to be used (by the given caller of this function)
|
||||||
|
* after unbinding the consumer or supplier driver, respectively, from its
|
||||||
|
* device, so the link can be deleted at that point. If none of them is set,
|
||||||
|
* the link will be maintained until one of the devices pointed to by it (either
|
||||||
|
* the consumer or the supplier) is unregistered.
|
||||||
|
*
|
||||||
|
* Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and
|
||||||
|
* DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent
|
||||||
|
* managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can
|
||||||
|
* be used to request the driver core to automaticall probe for a consmer
|
||||||
|
* driver after successfully binding a driver to the supplier device.
|
||||||
|
*
|
||||||
|
* The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER,
|
||||||
|
* DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at
|
||||||
|
* the same time is invalid and will cause NULL to be returned upfront.
|
||||||
|
* However, if a device link between the given @consumer and @supplier pair
|
||||||
|
* exists already when this function is called for them, the existing link will
|
||||||
|
* be returned regardless of its current type and status (the link's flags may
|
||||||
|
* be modified then). The caller of this function is then expected to treat
|
||||||
|
* the link as though it has just been created, so (in particular) if
|
||||||
|
* DL_FLAG_STATELESS was passed in @flags, the link needs to be released
|
||||||
|
* explicitly when not needed any more (as stated above).
|
||||||
*
|
*
|
||||||
* A side effect of the link creation is re-ordering of dpm_list and the
|
* A side effect of the link creation is re-ordering of dpm_list and the
|
||||||
* devices_kset list by moving the consumer device and all devices depending
|
* devices_kset list by moving the consumer device and all devices depending
|
||||||
|
@ -212,11 +288,13 @@ struct device_link *device_link_add(struct device *consumer,
|
||||||
{
|
{
|
||||||
struct device_link *link;
|
struct device_link *link;
|
||||||
|
|
||||||
if (!consumer || !supplier ||
|
if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
|
||||||
|
(flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
|
||||||
(flags & DL_FLAG_SYNC_STATE_ONLY &&
|
(flags & DL_FLAG_SYNC_STATE_ONLY &&
|
||||||
flags != DL_FLAG_SYNC_STATE_ONLY) ||
|
flags != DL_FLAG_SYNC_STATE_ONLY) ||
|
||||||
(flags & DL_FLAG_STATELESS &&
|
(flags & DL_FLAG_AUTOPROBE_CONSUMER &&
|
||||||
flags & (DL_FLAG_AUTOREMOVE_CONSUMER | DL_FLAG_AUTOREMOVE_SUPPLIER)))
|
flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
|
||||||
|
DL_FLAG_AUTOREMOVE_SUPPLIER)))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
|
if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
|
||||||
|
@ -226,6 +304,9 @@ struct device_link *device_link_add(struct device *consumer,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!(flags & DL_FLAG_STATELESS))
|
||||||
|
flags |= DL_FLAG_MANAGED;
|
||||||
|
|
||||||
device_links_write_lock();
|
device_links_write_lock();
|
||||||
device_pm_lock();
|
device_pm_lock();
|
||||||
|
|
||||||
|
@ -243,25 +324,18 @@ struct device_link *device_link_add(struct device *consumer,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed
|
||||||
|
* longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both
|
||||||
|
* together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER.
|
||||||
|
*/
|
||||||
|
if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
|
||||||
|
flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
|
||||||
|
|
||||||
list_for_each_entry(link, &supplier->links.consumers, s_node) {
|
list_for_each_entry(link, &supplier->links.consumers, s_node) {
|
||||||
if (link->consumer != consumer)
|
if (link->consumer != consumer)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/*
|
|
||||||
* Don't return a stateless link if the caller wants a stateful
|
|
||||||
* one and vice versa.
|
|
||||||
*/
|
|
||||||
if (WARN_ON((flags & DL_FLAG_STATELESS) != (link->flags & DL_FLAG_STATELESS))) {
|
|
||||||
link = NULL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (flags & DL_FLAG_AUTOREMOVE_CONSUMER)
|
|
||||||
link->flags |= DL_FLAG_AUTOREMOVE_CONSUMER;
|
|
||||||
|
|
||||||
if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
|
|
||||||
link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
|
|
||||||
|
|
||||||
if (flags & DL_FLAG_PM_RUNTIME) {
|
if (flags & DL_FLAG_PM_RUNTIME) {
|
||||||
if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
|
if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
|
||||||
pm_runtime_new_link(consumer);
|
pm_runtime_new_link(consumer);
|
||||||
|
@ -271,13 +345,42 @@ struct device_link *device_link_add(struct device *consumer,
|
||||||
refcount_inc(&link->rpm_active);
|
refcount_inc(&link->rpm_active);
|
||||||
}
|
}
|
||||||
|
|
||||||
kref_get(&link->kref);
|
if (flags & DL_FLAG_STATELESS) {
|
||||||
|
kref_get(&link->kref);
|
||||||
|
if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
|
||||||
|
!(link->flags & DL_FLAG_STATELESS)) {
|
||||||
|
link->flags |= DL_FLAG_STATELESS;
|
||||||
|
goto reorder;
|
||||||
|
} else {
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the life time of the link following from the new flags is
|
||||||
|
* longer than indicated by the flags of the existing link,
|
||||||
|
* update the existing link to stay around longer.
|
||||||
|
*/
|
||||||
|
if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
|
||||||
|
if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
|
||||||
|
link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
|
||||||
|
link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
|
||||||
|
}
|
||||||
|
} else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
|
||||||
|
link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
|
||||||
|
DL_FLAG_AUTOREMOVE_SUPPLIER);
|
||||||
|
}
|
||||||
|
if (!(link->flags & DL_FLAG_MANAGED)) {
|
||||||
|
kref_get(&link->kref);
|
||||||
|
link->flags |= DL_FLAG_MANAGED;
|
||||||
|
device_link_init_status(link, consumer, supplier);
|
||||||
|
}
|
||||||
if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
|
if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
|
||||||
!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
|
!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
|
||||||
link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
|
link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
|
||||||
goto reorder;
|
goto reorder;
|
||||||
}
|
}
|
||||||
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -304,42 +407,25 @@ struct device_link *device_link_add(struct device *consumer,
|
||||||
kref_init(&link->kref);
|
kref_init(&link->kref);
|
||||||
|
|
||||||
/* Determine the initial link state. */
|
/* Determine the initial link state. */
|
||||||
if (flags & DL_FLAG_STATELESS) {
|
if (flags & DL_FLAG_STATELESS)
|
||||||
link->status = DL_STATE_NONE;
|
link->status = DL_STATE_NONE;
|
||||||
} else {
|
else
|
||||||
switch (supplier->links.status) {
|
device_link_init_status(link, consumer, supplier);
|
||||||
case DL_DEV_DRIVER_BOUND:
|
|
||||||
switch (consumer->links.status) {
|
|
||||||
case DL_DEV_PROBING:
|
|
||||||
/*
|
|
||||||
* Some callers expect the link creation during
|
|
||||||
* consumer driver probe to resume the supplier
|
|
||||||
* even without DL_FLAG_RPM_ACTIVE.
|
|
||||||
*/
|
|
||||||
if (flags & DL_FLAG_PM_RUNTIME)
|
|
||||||
pm_runtime_resume(supplier);
|
|
||||||
|
|
||||||
link->status = DL_STATE_CONSUMER_PROBE;
|
/*
|
||||||
break;
|
* Some callers expect the link creation during consumer driver probe to
|
||||||
case DL_DEV_DRIVER_BOUND:
|
* resume the supplier even without DL_FLAG_RPM_ACTIVE.
|
||||||
link->status = DL_STATE_ACTIVE;
|
*/
|
||||||
break;
|
if (link->status == DL_STATE_CONSUMER_PROBE &&
|
||||||
default:
|
flags & DL_FLAG_PM_RUNTIME)
|
||||||
link->status = DL_STATE_AVAILABLE;
|
pm_runtime_resume(supplier);
|
||||||
break;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case DL_DEV_UNBINDING:
|
|
||||||
link->status = DL_STATE_SUPPLIER_UNBIND;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
link->status = DL_STATE_DORMANT;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (flags & DL_FLAG_SYNC_STATE_ONLY)
|
if (flags & DL_FLAG_SYNC_STATE_ONLY) {
|
||||||
|
dev_dbg(consumer,
|
||||||
|
"Linked as a sync state only consumer to %s\n",
|
||||||
|
dev_name(supplier));
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
reorder:
|
reorder:
|
||||||
/*
|
/*
|
||||||
* Move the consumer and all of the devices depending on it to the end
|
* Move the consumer and all of the devices depending on it to the end
|
||||||
|
@ -481,8 +567,16 @@ static void __device_link_del(struct kref *kref)
|
||||||
}
|
}
|
||||||
#endif /* !CONFIG_SRCU */
|
#endif /* !CONFIG_SRCU */
|
||||||
|
|
||||||
|
static void device_link_put_kref(struct device_link *link)
|
||||||
|
{
|
||||||
|
if (link->flags & DL_FLAG_STATELESS)
|
||||||
|
kref_put(&link->kref, __device_link_del);
|
||||||
|
else
|
||||||
|
WARN(1, "Unable to drop a managed device link reference\n");
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* device_link_del - Delete a link between two devices.
|
* device_link_del - Delete a stateless link between two devices.
|
||||||
* @link: Device link to delete.
|
* @link: Device link to delete.
|
||||||
*
|
*
|
||||||
* The caller must ensure proper synchronization of this function with runtime
|
* The caller must ensure proper synchronization of this function with runtime
|
||||||
|
@ -494,14 +588,14 @@ void device_link_del(struct device_link *link)
|
||||||
{
|
{
|
||||||
device_links_write_lock();
|
device_links_write_lock();
|
||||||
device_pm_lock();
|
device_pm_lock();
|
||||||
kref_put(&link->kref, __device_link_del);
|
device_link_put_kref(link);
|
||||||
device_pm_unlock();
|
device_pm_unlock();
|
||||||
device_links_write_unlock();
|
device_links_write_unlock();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(device_link_del);
|
EXPORT_SYMBOL_GPL(device_link_del);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* device_link_remove - remove a link between two devices.
|
* device_link_remove - Delete a stateless link between two devices.
|
||||||
* @consumer: Consumer end of the link.
|
* @consumer: Consumer end of the link.
|
||||||
* @supplier: Supplier end of the link.
|
* @supplier: Supplier end of the link.
|
||||||
*
|
*
|
||||||
|
@ -520,7 +614,7 @@ void device_link_remove(void *consumer, struct device *supplier)
|
||||||
|
|
||||||
list_for_each_entry(link, &supplier->links.consumers, s_node) {
|
list_for_each_entry(link, &supplier->links.consumers, s_node) {
|
||||||
if (link->consumer == consumer) {
|
if (link->consumer == consumer) {
|
||||||
kref_put(&link->kref, __device_link_del);
|
device_link_put_kref(link);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -553,7 +647,7 @@ static void device_links_missing_supplier(struct device *dev)
|
||||||
* mark the link as "consumer probe in progress" to make the supplier removal
|
* mark the link as "consumer probe in progress" to make the supplier removal
|
||||||
* wait for us to complete (or bad things may happen).
|
* wait for us to complete (or bad things may happen).
|
||||||
*
|
*
|
||||||
* Links with the DL_FLAG_STATELESS flag set are ignored.
|
* Links without the DL_FLAG_MANAGED flag set are ignored.
|
||||||
*/
|
*/
|
||||||
int device_links_check_suppliers(struct device *dev)
|
int device_links_check_suppliers(struct device *dev)
|
||||||
{
|
{
|
||||||
|
@ -575,7 +669,7 @@ int device_links_check_suppliers(struct device *dev)
|
||||||
device_links_write_lock();
|
device_links_write_lock();
|
||||||
|
|
||||||
list_for_each_entry(link, &dev->links.suppliers, c_node) {
|
list_for_each_entry(link, &dev->links.suppliers, c_node) {
|
||||||
if (link->flags & DL_FLAG_STATELESS ||
|
if (!(link->flags & DL_FLAG_MANAGED) ||
|
||||||
link->flags & DL_FLAG_SYNC_STATE_ONLY)
|
link->flags & DL_FLAG_SYNC_STATE_ONLY)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -619,7 +713,7 @@ static void __device_links_queue_sync_state(struct device *dev,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
||||||
if (link->flags & DL_FLAG_STATELESS)
|
if (!(link->flags & DL_FLAG_MANAGED))
|
||||||
continue;
|
continue;
|
||||||
if (link->status != DL_STATE_ACTIVE)
|
if (link->status != DL_STATE_ACTIVE)
|
||||||
return;
|
return;
|
||||||
|
@ -729,7 +823,7 @@ static void __device_links_supplier_defer_sync(struct device *sup)
|
||||||
*
|
*
|
||||||
* Also change the status of @dev's links to suppliers to "active".
|
* Also change the status of @dev's links to suppliers to "active".
|
||||||
*
|
*
|
||||||
* Links with the DL_FLAG_STATELESS flag set are ignored.
|
* Links without the DL_FLAG_MANAGED flag set are ignored.
|
||||||
*/
|
*/
|
||||||
void device_links_driver_bound(struct device *dev)
|
void device_links_driver_bound(struct device *dev)
|
||||||
{
|
{
|
||||||
|
@ -748,11 +842,24 @@ void device_links_driver_bound(struct device *dev)
|
||||||
device_links_write_lock();
|
device_links_write_lock();
|
||||||
|
|
||||||
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
||||||
if (link->flags & DL_FLAG_STATELESS)
|
if (!(link->flags & DL_FLAG_MANAGED))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Links created during consumer probe may be in the "consumer
|
||||||
|
* probe" state to start with if the supplier is still probing
|
||||||
|
* when they are created and they may become "active" if the
|
||||||
|
* consumer probe returns first. Skip them here.
|
||||||
|
*/
|
||||||
|
if (link->status == DL_STATE_CONSUMER_PROBE ||
|
||||||
|
link->status == DL_STATE_ACTIVE)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
WARN_ON(link->status != DL_STATE_DORMANT);
|
WARN_ON(link->status != DL_STATE_DORMANT);
|
||||||
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
|
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
|
||||||
|
|
||||||
|
if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
|
||||||
|
driver_deferred_probe_add(link->consumer);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (defer_sync_state_count)
|
if (defer_sync_state_count)
|
||||||
|
@ -761,7 +868,7 @@ void device_links_driver_bound(struct device *dev)
|
||||||
__device_links_queue_sync_state(dev, &sync_list);
|
__device_links_queue_sync_state(dev, &sync_list);
|
||||||
|
|
||||||
list_for_each_entry(link, &dev->links.suppliers, c_node) {
|
list_for_each_entry(link, &dev->links.suppliers, c_node) {
|
||||||
if (link->flags & DL_FLAG_STATELESS)
|
if (!(link->flags & DL_FLAG_MANAGED))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
|
WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
|
||||||
|
@ -781,6 +888,13 @@ void device_links_driver_bound(struct device *dev)
|
||||||
device_links_flush_sync_list(&sync_list, dev);
|
device_links_flush_sync_list(&sync_list, dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void device_link_drop_managed(struct device_link *link)
|
||||||
|
{
|
||||||
|
link->flags &= ~DL_FLAG_MANAGED;
|
||||||
|
WRITE_ONCE(link->status, DL_STATE_NONE);
|
||||||
|
kref_put(&link->kref, __device_link_del);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __device_links_no_driver - Update links of a device without a driver.
|
* __device_links_no_driver - Update links of a device without a driver.
|
||||||
* @dev: Device without a drvier.
|
* @dev: Device without a drvier.
|
||||||
|
@ -791,29 +905,60 @@ void device_links_driver_bound(struct device *dev)
|
||||||
* unless they already are in the "supplier unbind in progress" state in which
|
* unless they already are in the "supplier unbind in progress" state in which
|
||||||
* case they need not be updated.
|
* case they need not be updated.
|
||||||
*
|
*
|
||||||
* Links with the DL_FLAG_STATELESS flag set are ignored.
|
* Links without the DL_FLAG_MANAGED flag set are ignored.
|
||||||
*/
|
*/
|
||||||
static void __device_links_no_driver(struct device *dev)
|
static void __device_links_no_driver(struct device *dev)
|
||||||
{
|
{
|
||||||
struct device_link *link, *ln;
|
struct device_link *link, *ln;
|
||||||
|
|
||||||
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
|
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
|
||||||
if (link->flags & DL_FLAG_STATELESS)
|
if (!(link->flags & DL_FLAG_MANAGED))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
|
if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
|
||||||
kref_put(&link->kref, __device_link_del);
|
device_link_drop_managed(link);
|
||||||
else if (link->status != DL_STATE_SUPPLIER_UNBIND)
|
else if (link->status == DL_STATE_CONSUMER_PROBE ||
|
||||||
|
link->status == DL_STATE_ACTIVE)
|
||||||
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
|
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
dev->links.status = DL_DEV_NO_DRIVER;
|
dev->links.status = DL_DEV_NO_DRIVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* device_links_no_driver - Update links after failing driver probe.
|
||||||
|
* @dev: Device whose driver has just failed to probe.
|
||||||
|
*
|
||||||
|
* Clean up leftover links to consumers for @dev and invoke
|
||||||
|
* %__device_links_no_driver() to update links to suppliers for it as
|
||||||
|
* appropriate.
|
||||||
|
*
|
||||||
|
* Links without the DL_FLAG_MANAGED flag set are ignored.
|
||||||
|
*/
|
||||||
void device_links_no_driver(struct device *dev)
|
void device_links_no_driver(struct device *dev)
|
||||||
{
|
{
|
||||||
|
struct device_link *link;
|
||||||
|
|
||||||
device_links_write_lock();
|
device_links_write_lock();
|
||||||
|
|
||||||
|
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
||||||
|
if (!(link->flags & DL_FLAG_MANAGED))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The probe has failed, so if the status of the link is
|
||||||
|
* "consumer probe" or "active", it must have been added by
|
||||||
|
* a probing consumer while this device was still probing.
|
||||||
|
* Change its state to "dormant", as it represents a valid
|
||||||
|
* relationship, but it is not functionally meaningful.
|
||||||
|
*/
|
||||||
|
if (link->status == DL_STATE_CONSUMER_PROBE ||
|
||||||
|
link->status == DL_STATE_ACTIVE)
|
||||||
|
WRITE_ONCE(link->status, DL_STATE_DORMANT);
|
||||||
|
}
|
||||||
|
|
||||||
__device_links_no_driver(dev);
|
__device_links_no_driver(dev);
|
||||||
|
|
||||||
device_links_write_unlock();
|
device_links_write_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -825,7 +970,7 @@ void device_links_no_driver(struct device *dev)
|
||||||
* invoke %__device_links_no_driver() to update links to suppliers for it as
|
* invoke %__device_links_no_driver() to update links to suppliers for it as
|
||||||
* appropriate.
|
* appropriate.
|
||||||
*
|
*
|
||||||
* Links with the DL_FLAG_STATELESS flag set are ignored.
|
* Links without the DL_FLAG_MANAGED flag set are ignored.
|
||||||
*/
|
*/
|
||||||
void device_links_driver_cleanup(struct device *dev)
|
void device_links_driver_cleanup(struct device *dev)
|
||||||
{
|
{
|
||||||
|
@ -834,7 +979,7 @@ void device_links_driver_cleanup(struct device *dev)
|
||||||
device_links_write_lock();
|
device_links_write_lock();
|
||||||
|
|
||||||
list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
|
list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
|
||||||
if (link->flags & DL_FLAG_STATELESS)
|
if (!(link->flags & DL_FLAG_MANAGED))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
|
WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
|
||||||
|
@ -847,7 +992,7 @@ void device_links_driver_cleanup(struct device *dev)
|
||||||
*/
|
*/
|
||||||
if (link->status == DL_STATE_SUPPLIER_UNBIND &&
|
if (link->status == DL_STATE_SUPPLIER_UNBIND &&
|
||||||
link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
|
link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
|
||||||
kref_put(&link->kref, __device_link_del);
|
device_link_drop_managed(link);
|
||||||
|
|
||||||
WRITE_ONCE(link->status, DL_STATE_DORMANT);
|
WRITE_ONCE(link->status, DL_STATE_DORMANT);
|
||||||
}
|
}
|
||||||
|
@ -870,7 +1015,7 @@ void device_links_driver_cleanup(struct device *dev)
|
||||||
*
|
*
|
||||||
* Return 'false' if there are no probing or active consumers.
|
* Return 'false' if there are no probing or active consumers.
|
||||||
*
|
*
|
||||||
* Links with the DL_FLAG_STATELESS flag set are ignored.
|
* Links without the DL_FLAG_MANAGED flag set are ignored.
|
||||||
*/
|
*/
|
||||||
bool device_links_busy(struct device *dev)
|
bool device_links_busy(struct device *dev)
|
||||||
{
|
{
|
||||||
|
@ -880,7 +1025,7 @@ bool device_links_busy(struct device *dev)
|
||||||
device_links_write_lock();
|
device_links_write_lock();
|
||||||
|
|
||||||
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
||||||
if (link->flags & DL_FLAG_STATELESS)
|
if (!(link->flags & DL_FLAG_MANAGED))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (link->status == DL_STATE_CONSUMER_PROBE
|
if (link->status == DL_STATE_CONSUMER_PROBE
|
||||||
|
@ -910,7 +1055,7 @@ bool device_links_busy(struct device *dev)
|
||||||
* driver to unbind and start over (the consumer will not re-probe as we have
|
* driver to unbind and start over (the consumer will not re-probe as we have
|
||||||
* changed the state of the link already).
|
* changed the state of the link already).
|
||||||
*
|
*
|
||||||
* Links with the DL_FLAG_STATELESS flag set are ignored.
|
* Links without the DL_FLAG_MANAGED flag set are ignored.
|
||||||
*/
|
*/
|
||||||
void device_links_unbind_consumers(struct device *dev)
|
void device_links_unbind_consumers(struct device *dev)
|
||||||
{
|
{
|
||||||
|
@ -922,7 +1067,7 @@ void device_links_unbind_consumers(struct device *dev)
|
||||||
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
||||||
enum device_link_state status;
|
enum device_link_state status;
|
||||||
|
|
||||||
if (link->flags & DL_FLAG_STATELESS ||
|
if (!(link->flags & DL_FLAG_MANAGED) ||
|
||||||
link->flags & DL_FLAG_SYNC_STATE_ONLY)
|
link->flags & DL_FLAG_SYNC_STATE_ONLY)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
|
@ -116,7 +116,7 @@ static void deferred_probe_work_func(struct work_struct *work)
|
||||||
}
|
}
|
||||||
static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
|
static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
|
||||||
|
|
||||||
static void driver_deferred_probe_add(struct device *dev)
|
void driver_deferred_probe_add(struct device *dev)
|
||||||
{
|
{
|
||||||
mutex_lock(&deferred_probe_mutex);
|
mutex_lock(&deferred_probe_mutex);
|
||||||
if (list_empty(&dev->p->deferred_probe)) {
|
if (list_empty(&dev->p->deferred_probe)) {
|
||||||
|
|
|
@ -1531,7 +1531,7 @@ void pm_runtime_remove(struct device *dev)
|
||||||
* runtime PM references to the device, drop the usage counter of the device
|
* runtime PM references to the device, drop the usage counter of the device
|
||||||
* (as many times as needed).
|
* (as many times as needed).
|
||||||
*
|
*
|
||||||
* Links with the DL_FLAG_STATELESS flag set are ignored.
|
* Links with the DL_FLAG_MANAGED flag unset are ignored.
|
||||||
*
|
*
|
||||||
* Since the device is guaranteed to be runtime-active at the point this is
|
* Since the device is guaranteed to be runtime-active at the point this is
|
||||||
* called, nothing else needs to be done here.
|
* called, nothing else needs to be done here.
|
||||||
|
@ -1548,7 +1548,7 @@ void pm_runtime_clean_up_links(struct device *dev)
|
||||||
idx = device_links_read_lock();
|
idx = device_links_read_lock();
|
||||||
|
|
||||||
list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
|
list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
|
||||||
if (link->flags & DL_FLAG_STATELESS)
|
if (!(link->flags & DL_FLAG_MANAGED))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
while (refcount_dec_not_one(&link->rpm_active))
|
while (refcount_dec_not_one(&link->rpm_active))
|
||||||
|
|
|
@ -271,10 +271,12 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
|
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
|
||||||
if (err) {
|
if (err) {
|
||||||
virtqueue_kick(vblk->vqs[qid].vq);
|
virtqueue_kick(vblk->vqs[qid].vq);
|
||||||
blk_mq_stop_hw_queue(hctx);
|
/* Don't stop the queue if -ENOMEM: we may have failed to
|
||||||
|
* bounce the buffer due to global resource outage.
|
||||||
|
*/
|
||||||
|
if (err == -ENOSPC)
|
||||||
|
blk_mq_stop_hw_queue(hctx);
|
||||||
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
|
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
|
||||||
/* Out of mem doesn't actually happen, since we fall back
|
|
||||||
* to direct descriptors */
|
|
||||||
if (err == -ENOMEM || err == -ENOSPC)
|
if (err == -ENOMEM || err == -ENOSPC)
|
||||||
return BLK_STS_DEV_RESOURCE;
|
return BLK_STS_DEV_RESOURCE;
|
||||||
return BLK_STS_IOERR;
|
return BLK_STS_IOERR;
|
||||||
|
|
|
@ -396,21 +396,28 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
|
||||||
return PTR_ERR(name);
|
return PTR_ERR(name);
|
||||||
|
|
||||||
mutex_lock(&dmabuf->lock);
|
mutex_lock(&dmabuf->lock);
|
||||||
|
spin_lock(&dmabuf->name_lock);
|
||||||
if (!list_empty(&dmabuf->attachments)) {
|
if (!list_empty(&dmabuf->attachments)) {
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
kfree(name);
|
kfree(name);
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
spin_lock(&dmabuf->name_lock);
|
|
||||||
kfree(dmabuf->name);
|
kfree(dmabuf->name);
|
||||||
dmabuf->name = name;
|
dmabuf->name = name;
|
||||||
spin_unlock(&dmabuf->name_lock);
|
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
|
spin_unlock(&dmabuf->name_lock);
|
||||||
mutex_unlock(&dmabuf->lock);
|
mutex_unlock(&dmabuf->lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf,
|
||||||
|
enum dma_data_direction direction);
|
||||||
|
|
||||||
|
|
||||||
|
static int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
|
||||||
|
enum dma_data_direction direction);
|
||||||
|
|
||||||
static long dma_buf_ioctl(struct file *file,
|
static long dma_buf_ioctl(struct file *file,
|
||||||
unsigned int cmd, unsigned long arg)
|
unsigned int cmd, unsigned long arg)
|
||||||
{
|
{
|
||||||
|
@ -621,7 +628,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
||||||
dmabuf->size = exp_info->size;
|
dmabuf->size = exp_info->size;
|
||||||
dmabuf->exp_name = exp_info->exp_name;
|
dmabuf->exp_name = exp_info->exp_name;
|
||||||
dmabuf->owner = exp_info->owner;
|
dmabuf->owner = exp_info->owner;
|
||||||
spin_lock_init(&dmabuf->name_lock);
|
|
||||||
init_waitqueue_head(&dmabuf->poll);
|
init_waitqueue_head(&dmabuf->poll);
|
||||||
dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
|
dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
|
||||||
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
|
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
|
||||||
|
@ -646,6 +652,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
||||||
dmabuf->file = file;
|
dmabuf->file = file;
|
||||||
|
|
||||||
mutex_init(&dmabuf->lock);
|
mutex_init(&dmabuf->lock);
|
||||||
|
spin_lock_init(&dmabuf->name_lock);
|
||||||
INIT_LIST_HEAD(&dmabuf->attachments);
|
INIT_LIST_HEAD(&dmabuf->attachments);
|
||||||
|
|
||||||
dma_buf_ref_init(dmabuf);
|
dma_buf_ref_init(dmabuf);
|
||||||
|
@ -1049,6 +1056,7 @@ static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf,
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
|
int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
|
||||||
enum dma_data_direction direction,
|
enum dma_data_direction direction,
|
||||||
unsigned int offset, unsigned int len)
|
unsigned int offset, unsigned int len)
|
||||||
|
@ -1071,7 +1079,7 @@ int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_buf_begin_cpu_access_partial);
|
EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access_partial);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
|
* dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
|
||||||
|
@ -1099,7 +1107,7 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
|
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
|
||||||
|
|
||||||
int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
|
static int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -1126,7 +1134,7 @@ int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_buf_end_cpu_access_partial);
|
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_buf_kmap - Map a page of the buffer object into kernel address space. The
|
* dma_buf_kmap - Map a page of the buffer object into kernel address space. The
|
||||||
|
@ -1293,7 +1301,7 @@ int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (WARN_ON(!dmabuf))
|
if (WARN_ON(!dmabuf) || !flags)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (dmabuf->ops->get_flags)
|
if (dmabuf->ops->get_flags)
|
||||||
|
@ -1301,7 +1309,7 @@ int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_buf_get_flags);
|
EXPORT_SYMBOL_GPL(dma_buf_get_flags);
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#ifdef CONFIG_DEBUG_FS
|
||||||
static int dma_buf_debug_show(struct seq_file *s, void *unused)
|
static int dma_buf_debug_show(struct seq_file *s, void *unused)
|
||||||
|
|
|
@ -139,13 +139,16 @@ static ssize_t
|
||||||
efivar_attr_read(struct efivar_entry *entry, char *buf)
|
efivar_attr_read(struct efivar_entry *entry, char *buf)
|
||||||
{
|
{
|
||||||
struct efi_variable *var = &entry->var;
|
struct efi_variable *var = &entry->var;
|
||||||
|
unsigned long size = sizeof(var->Data);
|
||||||
char *str = buf;
|
char *str = buf;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!entry || !buf)
|
if (!entry || !buf)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
var->DataSize = 1024;
|
ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
|
||||||
if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
|
var->DataSize = size;
|
||||||
|
if (ret)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
|
if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
|
||||||
|
@ -172,13 +175,16 @@ static ssize_t
|
||||||
efivar_size_read(struct efivar_entry *entry, char *buf)
|
efivar_size_read(struct efivar_entry *entry, char *buf)
|
||||||
{
|
{
|
||||||
struct efi_variable *var = &entry->var;
|
struct efi_variable *var = &entry->var;
|
||||||
|
unsigned long size = sizeof(var->Data);
|
||||||
char *str = buf;
|
char *str = buf;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!entry || !buf)
|
if (!entry || !buf)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
var->DataSize = 1024;
|
ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
|
||||||
if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
|
var->DataSize = size;
|
||||||
|
if (ret)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
str += sprintf(str, "0x%lx\n", var->DataSize);
|
str += sprintf(str, "0x%lx\n", var->DataSize);
|
||||||
|
@ -189,12 +195,15 @@ static ssize_t
|
||||||
efivar_data_read(struct efivar_entry *entry, char *buf)
|
efivar_data_read(struct efivar_entry *entry, char *buf)
|
||||||
{
|
{
|
||||||
struct efi_variable *var = &entry->var;
|
struct efi_variable *var = &entry->var;
|
||||||
|
unsigned long size = sizeof(var->Data);
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!entry || !buf)
|
if (!entry || !buf)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
var->DataSize = 1024;
|
ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
|
||||||
if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
|
var->DataSize = size;
|
||||||
|
if (ret)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
memcpy(buf, var->Data, var->DataSize);
|
memcpy(buf, var->Data, var->DataSize);
|
||||||
|
@ -263,6 +272,9 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
|
||||||
u8 *data;
|
u8 *data;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (!entry || !buf)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (is_compat()) {
|
if (is_compat()) {
|
||||||
struct compat_efi_variable *compat;
|
struct compat_efi_variable *compat;
|
||||||
|
|
||||||
|
@ -314,14 +326,16 @@ efivar_show_raw(struct efivar_entry *entry, char *buf)
|
||||||
{
|
{
|
||||||
struct efi_variable *var = &entry->var;
|
struct efi_variable *var = &entry->var;
|
||||||
struct compat_efi_variable *compat;
|
struct compat_efi_variable *compat;
|
||||||
|
unsigned long datasize = sizeof(var->Data);
|
||||||
size_t size;
|
size_t size;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!entry || !buf)
|
if (!entry || !buf)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
var->DataSize = 1024;
|
ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data);
|
||||||
if (efivar_entry_get(entry, &entry->var.Attributes,
|
var->DataSize = datasize;
|
||||||
&entry->var.DataSize, entry->var.Data))
|
if (ret)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
if (is_compat()) {
|
if (is_compat()) {
|
||||||
|
|
|
@ -45,39 +45,7 @@
|
||||||
#define __efi_call_virt(f, args...) \
|
#define __efi_call_virt(f, args...) \
|
||||||
__efi_call_virt_pointer(efi.systab->runtime, f, args)
|
__efi_call_virt_pointer(efi.systab->runtime, f, args)
|
||||||
|
|
||||||
/* efi_runtime_service() function identifiers */
|
struct efi_runtime_work efi_rts_work;
|
||||||
enum efi_rts_ids {
|
|
||||||
GET_TIME,
|
|
||||||
SET_TIME,
|
|
||||||
GET_WAKEUP_TIME,
|
|
||||||
SET_WAKEUP_TIME,
|
|
||||||
GET_VARIABLE,
|
|
||||||
GET_NEXT_VARIABLE,
|
|
||||||
SET_VARIABLE,
|
|
||||||
QUERY_VARIABLE_INFO,
|
|
||||||
GET_NEXT_HIGH_MONO_COUNT,
|
|
||||||
UPDATE_CAPSULE,
|
|
||||||
QUERY_CAPSULE_CAPS,
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* efi_runtime_work: Details of EFI Runtime Service work
|
|
||||||
* @arg<1-5>: EFI Runtime Service function arguments
|
|
||||||
* @status: Status of executing EFI Runtime Service
|
|
||||||
* @efi_rts_id: EFI Runtime Service function identifier
|
|
||||||
* @efi_rts_comp: Struct used for handling completions
|
|
||||||
*/
|
|
||||||
struct efi_runtime_work {
|
|
||||||
void *arg1;
|
|
||||||
void *arg2;
|
|
||||||
void *arg3;
|
|
||||||
void *arg4;
|
|
||||||
void *arg5;
|
|
||||||
efi_status_t status;
|
|
||||||
struct work_struct work;
|
|
||||||
enum efi_rts_ids efi_rts_id;
|
|
||||||
struct completion efi_rts_comp;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* efi_queue_work: Queue efi_runtime_service() and wait until it's done
|
* efi_queue_work: Queue efi_runtime_service() and wait until it's done
|
||||||
|
@ -91,11 +59,10 @@ struct efi_runtime_work {
|
||||||
*/
|
*/
|
||||||
#define efi_queue_work(_rts, _arg1, _arg2, _arg3, _arg4, _arg5) \
|
#define efi_queue_work(_rts, _arg1, _arg2, _arg3, _arg4, _arg5) \
|
||||||
({ \
|
({ \
|
||||||
struct efi_runtime_work efi_rts_work; \
|
|
||||||
efi_rts_work.status = EFI_ABORTED; \
|
efi_rts_work.status = EFI_ABORTED; \
|
||||||
\
|
\
|
||||||
init_completion(&efi_rts_work.efi_rts_comp); \
|
init_completion(&efi_rts_work.efi_rts_comp); \
|
||||||
INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts); \
|
INIT_WORK(&efi_rts_work.work, efi_call_rts); \
|
||||||
efi_rts_work.arg1 = _arg1; \
|
efi_rts_work.arg1 = _arg1; \
|
||||||
efi_rts_work.arg2 = _arg2; \
|
efi_rts_work.arg2 = _arg2; \
|
||||||
efi_rts_work.arg3 = _arg3; \
|
efi_rts_work.arg3 = _arg3; \
|
||||||
|
@ -191,18 +158,16 @@ extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
|
||||||
*/
|
*/
|
||||||
static void efi_call_rts(struct work_struct *work)
|
static void efi_call_rts(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct efi_runtime_work *efi_rts_work;
|
|
||||||
void *arg1, *arg2, *arg3, *arg4, *arg5;
|
void *arg1, *arg2, *arg3, *arg4, *arg5;
|
||||||
efi_status_t status = EFI_NOT_FOUND;
|
efi_status_t status = EFI_NOT_FOUND;
|
||||||
|
|
||||||
efi_rts_work = container_of(work, struct efi_runtime_work, work);
|
arg1 = efi_rts_work.arg1;
|
||||||
arg1 = efi_rts_work->arg1;
|
arg2 = efi_rts_work.arg2;
|
||||||
arg2 = efi_rts_work->arg2;
|
arg3 = efi_rts_work.arg3;
|
||||||
arg3 = efi_rts_work->arg3;
|
arg4 = efi_rts_work.arg4;
|
||||||
arg4 = efi_rts_work->arg4;
|
arg5 = efi_rts_work.arg5;
|
||||||
arg5 = efi_rts_work->arg5;
|
|
||||||
|
|
||||||
switch (efi_rts_work->efi_rts_id) {
|
switch (efi_rts_work.efi_rts_id) {
|
||||||
case GET_TIME:
|
case GET_TIME:
|
||||||
status = efi_call_virt(get_time, (efi_time_t *)arg1,
|
status = efi_call_virt(get_time, (efi_time_t *)arg1,
|
||||||
(efi_time_cap_t *)arg2);
|
(efi_time_cap_t *)arg2);
|
||||||
|
@ -260,8 +225,8 @@ static void efi_call_rts(struct work_struct *work)
|
||||||
*/
|
*/
|
||||||
pr_err("Requested executing invalid EFI Runtime Service.\n");
|
pr_err("Requested executing invalid EFI Runtime Service.\n");
|
||||||
}
|
}
|
||||||
efi_rts_work->status = status;
|
efi_rts_work.status = status;
|
||||||
complete(&efi_rts_work->efi_rts_comp);
|
complete(&efi_rts_work.efi_rts_comp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
|
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
|
||||||
|
|
|
@ -364,8 +364,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
|
||||||
router.ddc_valid = false;
|
router.ddc_valid = false;
|
||||||
router.cd_valid = false;
|
router.cd_valid = false;
|
||||||
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
|
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
|
||||||
uint8_t grph_obj_type=
|
uint8_t grph_obj_type =
|
||||||
grph_obj_type =
|
|
||||||
(le16_to_cpu(path->usGraphicObjIds[j]) &
|
(le16_to_cpu(path->usGraphicObjIds[j]) &
|
||||||
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
|
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
|
||||||
|
|
||||||
|
|
|
@ -694,11 +694,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
|
||||||
ssize_t result = 0;
|
ssize_t result = 0;
|
||||||
uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
|
uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
|
||||||
|
|
||||||
if (size & 3 || *pos & 3)
|
if (size > 4096 || size & 3 || *pos & 3)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* decode offset */
|
/* decode offset */
|
||||||
offset = *pos & GENMASK_ULL(11, 0);
|
offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
|
||||||
se = (*pos & GENMASK_ULL(19, 12)) >> 12;
|
se = (*pos & GENMASK_ULL(19, 12)) >> 12;
|
||||||
sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
|
sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
|
||||||
cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
|
cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
|
||||||
|
@ -729,7 +729,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
|
||||||
while (size) {
|
while (size) {
|
||||||
uint32_t value;
|
uint32_t value;
|
||||||
|
|
||||||
value = data[offset++];
|
value = data[result >> 2];
|
||||||
r = put_user(value, (uint32_t *)buf);
|
r = put_user(value, (uint32_t *)buf);
|
||||||
if (r) {
|
if (r) {
|
||||||
result = r;
|
result = r;
|
||||||
|
|
|
@ -419,6 +419,7 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||||
dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
|
dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
|
||||||
dc_sink_release(aconnector->dc_sink);
|
dc_sink_release(aconnector->dc_sink);
|
||||||
aconnector->dc_sink = NULL;
|
aconnector->dc_sink = NULL;
|
||||||
|
aconnector->dc_link->cur_link_settings.lane_count = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_connector_unregister(connector);
|
drm_connector_unregister(connector);
|
||||||
|
|
|
@ -684,8 +684,8 @@ static void hubbub1_det_request_size(
|
||||||
|
|
||||||
hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
|
hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
|
||||||
|
|
||||||
swath_bytes_horz_wc = height * blk256_height * bpe;
|
swath_bytes_horz_wc = width * blk256_height * bpe;
|
||||||
swath_bytes_vert_wc = width * blk256_width * bpe;
|
swath_bytes_vert_wc = height * blk256_width * bpe;
|
||||||
|
|
||||||
*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
|
*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
|
||||||
false : /* full 256B request */
|
false : /* full 256B request */
|
||||||
|
|
|
@ -1364,28 +1364,34 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
|
||||||
frame.colorspace = HDMI_COLORSPACE_RGB;
|
frame.colorspace = HDMI_COLORSPACE_RGB;
|
||||||
|
|
||||||
/* Set up colorimetry */
|
/* Set up colorimetry */
|
||||||
switch (hdmi->hdmi_data.enc_out_encoding) {
|
if (!hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
|
||||||
case V4L2_YCBCR_ENC_601:
|
switch (hdmi->hdmi_data.enc_out_encoding) {
|
||||||
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
|
case V4L2_YCBCR_ENC_601:
|
||||||
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
|
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
|
||||||
else
|
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
|
||||||
|
else
|
||||||
|
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
|
||||||
|
frame.extended_colorimetry =
|
||||||
|
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
|
||||||
|
break;
|
||||||
|
case V4L2_YCBCR_ENC_709:
|
||||||
|
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
|
||||||
|
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
|
||||||
|
else
|
||||||
|
frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
|
||||||
|
frame.extended_colorimetry =
|
||||||
|
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
|
||||||
|
break;
|
||||||
|
default: /* Carries no data */
|
||||||
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
|
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
|
||||||
|
frame.extended_colorimetry =
|
||||||
|
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
frame.colorimetry = HDMI_COLORIMETRY_NONE;
|
||||||
frame.extended_colorimetry =
|
frame.extended_colorimetry =
|
||||||
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
|
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
|
||||||
break;
|
|
||||||
case V4L2_YCBCR_ENC_709:
|
|
||||||
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
|
|
||||||
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
|
|
||||||
else
|
|
||||||
frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
|
|
||||||
frame.extended_colorimetry =
|
|
||||||
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
|
|
||||||
break;
|
|
||||||
default: /* Carries no data */
|
|
||||||
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
|
|
||||||
frame.extended_colorimetry =
|
|
||||||
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
frame.scan_mode = HDMI_SCAN_MODE_NONE;
|
frame.scan_mode = HDMI_SCAN_MODE_NONE;
|
||||||
|
|
|
@ -545,10 +545,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
DRM_DEBUG_LEASE("Creating lease\n");
|
DRM_DEBUG_LEASE("Creating lease\n");
|
||||||
|
/* lessee will take the ownership of leases */
|
||||||
lessee = drm_lease_create(lessor, &leases);
|
lessee = drm_lease_create(lessor, &leases);
|
||||||
|
|
||||||
if (IS_ERR(lessee)) {
|
if (IS_ERR(lessee)) {
|
||||||
ret = PTR_ERR(lessee);
|
ret = PTR_ERR(lessee);
|
||||||
|
idr_destroy(&leases);
|
||||||
goto out_leases;
|
goto out_leases;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -583,7 +585,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
|
||||||
|
|
||||||
out_leases:
|
out_leases:
|
||||||
put_unused_fd(fd);
|
put_unused_fd(fd);
|
||||||
idr_destroy(&leases);
|
|
||||||
|
|
||||||
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
|
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -1722,8 +1722,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
|
||||||
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
|
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
|
||||||
dsi->supplies);
|
dsi->supplies);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_info(dev, "failed to get regulators: %d\n", ret);
|
if (ret != -EPROBE_DEFER)
|
||||||
return -EPROBE_DEFER;
|
dev_info(dev, "failed to get regulators: %d\n", ret);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
dsi->clks = devm_kcalloc(dev,
|
dsi->clks = devm_kcalloc(dev,
|
||||||
|
@ -1736,9 +1737,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
|
||||||
dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
|
dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
|
||||||
if (IS_ERR(dsi->clks[i])) {
|
if (IS_ERR(dsi->clks[i])) {
|
||||||
if (strcmp(clk_names[i], "sclk_mipi") == 0) {
|
if (strcmp(clk_names[i], "sclk_mipi") == 0) {
|
||||||
strcpy(clk_names[i], OLD_SCLK_MIPI_CLK_NAME);
|
dsi->clks[i] = devm_clk_get(dev,
|
||||||
i--;
|
OLD_SCLK_MIPI_CLK_NAME);
|
||||||
continue;
|
if (!IS_ERR(dsi->clks[i]))
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_info(dev, "failed to get the clock: %s\n",
|
dev_info(dev, "failed to get the clock: %s\n",
|
||||||
|
|
|
@ -272,10 +272,17 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
struct intel_gvt *gvt = vgpu->gvt;
|
struct intel_gvt *gvt = vgpu->gvt;
|
||||||
|
|
||||||
mutex_lock(&vgpu->vgpu_lock);
|
|
||||||
|
|
||||||
WARN(vgpu->active, "vGPU is still active!\n");
|
WARN(vgpu->active, "vGPU is still active!\n");
|
||||||
|
|
||||||
|
/*
|
||||||
|
* remove idr first so later clean can judge if need to stop
|
||||||
|
* service if no active vgpu.
|
||||||
|
*/
|
||||||
|
mutex_lock(&gvt->lock);
|
||||||
|
idr_remove(&gvt->vgpu_idr, vgpu->id);
|
||||||
|
mutex_unlock(&gvt->lock);
|
||||||
|
|
||||||
|
mutex_lock(&vgpu->vgpu_lock);
|
||||||
intel_gvt_debugfs_remove_vgpu(vgpu);
|
intel_gvt_debugfs_remove_vgpu(vgpu);
|
||||||
intel_vgpu_clean_sched_policy(vgpu);
|
intel_vgpu_clean_sched_policy(vgpu);
|
||||||
intel_vgpu_clean_submission(vgpu);
|
intel_vgpu_clean_submission(vgpu);
|
||||||
|
@ -290,7 +297,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
||||||
mutex_unlock(&vgpu->vgpu_lock);
|
mutex_unlock(&vgpu->vgpu_lock);
|
||||||
|
|
||||||
mutex_lock(&gvt->lock);
|
mutex_lock(&gvt->lock);
|
||||||
idr_remove(&gvt->vgpu_idr, vgpu->id);
|
|
||||||
if (idr_is_empty(&gvt->vgpu_idr))
|
if (idr_is_empty(&gvt->vgpu_idr))
|
||||||
intel_gvt_clean_irq(gvt);
|
intel_gvt_clean_irq(gvt);
|
||||||
intel_gvt_update_vgpu_types(gvt);
|
intel_gvt_update_vgpu_types(gvt);
|
||||||
|
|
|
@ -506,10 +506,18 @@ static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
|
||||||
|
|
||||||
static int mtk_drm_crtc_init(struct drm_device *drm,
|
static int mtk_drm_crtc_init(struct drm_device *drm,
|
||||||
struct mtk_drm_crtc *mtk_crtc,
|
struct mtk_drm_crtc *mtk_crtc,
|
||||||
struct drm_plane *primary,
|
unsigned int pipe)
|
||||||
struct drm_plane *cursor, unsigned int pipe)
|
|
||||||
{
|
{
|
||||||
int ret;
|
struct drm_plane *primary = NULL;
|
||||||
|
struct drm_plane *cursor = NULL;
|
||||||
|
int i, ret;
|
||||||
|
|
||||||
|
for (i = 0; i < mtk_crtc->layer_nr; i++) {
|
||||||
|
if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
|
||||||
|
primary = &mtk_crtc->planes[i];
|
||||||
|
else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
|
||||||
|
cursor = &mtk_crtc->planes[i];
|
||||||
|
}
|
||||||
|
|
||||||
ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
|
ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
|
||||||
&mtk_crtc_funcs, NULL);
|
&mtk_crtc_funcs, NULL);
|
||||||
|
@ -622,9 +630,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
|
||||||
goto unprepare;
|
goto unprepare;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
|
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, pipe);
|
||||||
mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
|
|
||||||
NULL, pipe);
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto unprepare;
|
goto unprepare;
|
||||||
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
|
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
|
||||||
|
|
|
@ -343,7 +343,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
|
||||||
unsigned long **bit, int *max)
|
unsigned long **bit, int *max)
|
||||||
{
|
{
|
||||||
if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
|
if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
|
||||||
usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
|
usage->hid == (HID_UP_MSVENDOR | 0x0003) ||
|
||||||
|
usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) {
|
||||||
/* The fn key on Apple USB keyboards */
|
/* The fn key on Apple USB keyboards */
|
||||||
set_bit(EV_REP, hi->input->evbit);
|
set_bit(EV_REP, hi->input->evbit);
|
||||||
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
|
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
|
||||||
|
|
|
@ -124,6 +124,8 @@ static const struct hid_device_id hammer_devices[] = {
|
||||||
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
|
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
|
||||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||||
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) },
|
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) },
|
||||||
|
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||||
|
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MOONBALL) },
|
||||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||||
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
|
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
|
||||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||||
|
|
|
@ -468,6 +468,7 @@
|
||||||
#define USB_DEVICE_ID_GOOGLE_WHISKERS 0x5030
|
#define USB_DEVICE_ID_GOOGLE_WHISKERS 0x5030
|
||||||
#define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c
|
#define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c
|
||||||
#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
|
#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
|
||||||
|
#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
|
||||||
|
|
||||||
#define USB_VENDOR_ID_GOTOP 0x08f2
|
#define USB_VENDOR_ID_GOTOP 0x08f2
|
||||||
#define USB_DEVICE_ID_SUPER_Q2 0x007f
|
#define USB_DEVICE_ID_SUPER_Q2 0x007f
|
||||||
|
|
|
@ -341,6 +341,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
|
||||||
},
|
},
|
||||||
.driver_data = (void *)&sipodev_desc
|
.driver_data = (void *)&sipodev_desc
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.ident = "Trekstor SURFBOOK E11B",
|
||||||
|
.matches = {
|
||||||
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
|
||||||
|
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SURFBOOK E11B"),
|
||||||
|
},
|
||||||
|
.driver_data = (void *)&sipodev_desc
|
||||||
|
},
|
||||||
{
|
{
|
||||||
.ident = "Direkt-Tek DTLAPY116-2",
|
.ident = "Direkt-Tek DTLAPY116-2",
|
||||||
.matches = {
|
.matches = {
|
||||||
|
|
|
@ -303,6 +303,7 @@ static int funnel_probe(struct device *dev, struct resource *res)
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_put(dev);
|
pm_runtime_put(dev);
|
||||||
|
ret = 0;
|
||||||
|
|
||||||
out_disable_clk:
|
out_disable_clk:
|
||||||
if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
|
if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
|
||||||
|
|
|
@ -491,7 +491,7 @@ static int msc_configure(struct msc *msc)
|
||||||
lockdep_assert_held(&msc->buf_mutex);
|
lockdep_assert_held(&msc->buf_mutex);
|
||||||
|
|
||||||
if (msc->mode > MSC_MODE_MULTI)
|
if (msc->mode > MSC_MODE_MULTI)
|
||||||
return -ENOTSUPP;
|
return -EINVAL;
|
||||||
|
|
||||||
if (msc->mode == MSC_MODE_MULTI)
|
if (msc->mode == MSC_MODE_MULTI)
|
||||||
msc_buffer_clear_hw_header(msc);
|
msc_buffer_clear_hw_header(msc);
|
||||||
|
@ -942,7 +942,7 @@ static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
|
||||||
} else if (msc->mode == MSC_MODE_MULTI) {
|
} else if (msc->mode == MSC_MODE_MULTI) {
|
||||||
ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
|
ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
|
||||||
} else {
|
} else {
|
||||||
ret = -ENOTSUPP;
|
ret = -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
|
@ -1165,7 +1165,7 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
|
||||||
if (ret >= 0)
|
if (ret >= 0)
|
||||||
*ppos = iter->offset;
|
*ppos = iter->offset;
|
||||||
} else {
|
} else {
|
||||||
ret = -ENOTSUPP;
|
ret = -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
put_count:
|
put_count:
|
||||||
|
|
|
@ -210,6 +210,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
|
||||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
|
||||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
/* Elkhart Lake CPU */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4529),
|
||||||
|
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
/* Elkhart Lake */
|
/* Elkhart Lake */
|
||||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),
|
||||||
|
|
|
@ -248,7 +248,7 @@ static struct gpio_desc *i2c_gpio_get_desc(struct device *dev,
|
||||||
if (ret == -ENOENT)
|
if (ret == -ENOENT)
|
||||||
retdesc = ERR_PTR(-EPROBE_DEFER);
|
retdesc = ERR_PTR(-EPROBE_DEFER);
|
||||||
|
|
||||||
if (ret != -EPROBE_DEFER)
|
if (PTR_ERR(retdesc) != -EPROBE_DEFER)
|
||||||
dev_err(dev, "error trying to get descriptor: %d\n", ret);
|
dev_err(dev, "error trying to get descriptor: %d\n", ret);
|
||||||
|
|
||||||
return retdesc;
|
return retdesc;
|
||||||
|
|
|
@ -352,10 +352,18 @@ static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
|
||||||
static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
|
static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
|
||||||
{
|
{
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
|
struct i2c_client *client;
|
||||||
|
|
||||||
dev = bus_find_device(&i2c_bus_type, NULL, adev,
|
dev = bus_find_device(&i2c_bus_type, NULL, adev,
|
||||||
i2c_acpi_find_match_device);
|
i2c_acpi_find_match_device);
|
||||||
return dev ? i2c_verify_client(dev) : NULL;
|
if (!dev)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
client = i2c_verify_client(dev);
|
||||||
|
if (!client)
|
||||||
|
put_device(dev);
|
||||||
|
|
||||||
|
return client;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
|
static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
|
||||||
|
|
|
@ -107,7 +107,7 @@ MODULE_DEVICE_TABLE(of, st_accel_of_match);
|
||||||
|
|
||||||
#ifdef CONFIG_ACPI
|
#ifdef CONFIG_ACPI
|
||||||
static const struct acpi_device_id st_accel_acpi_match[] = {
|
static const struct acpi_device_id st_accel_acpi_match[] = {
|
||||||
{"SMO8840", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
|
{"SMO8840", (kernel_ulong_t)LIS2DH12_ACCEL_DEV_NAME},
|
||||||
{"SMO8A90", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
|
{"SMO8A90", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
|
||||||
{ },
|
{ },
|
||||||
};
|
};
|
||||||
|
|
|
@ -731,6 +731,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
|
||||||
|
|
||||||
for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
|
for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
|
||||||
struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit);
|
struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit);
|
||||||
|
u32 cor;
|
||||||
|
|
||||||
if (!chan)
|
if (!chan)
|
||||||
continue;
|
continue;
|
||||||
|
@ -739,6 +740,20 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
|
||||||
chan->type == IIO_PRESSURE)
|
chan->type == IIO_PRESSURE)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
if (state) {
|
||||||
|
cor = at91_adc_readl(st, AT91_SAMA5D2_COR);
|
||||||
|
|
||||||
|
if (chan->differential)
|
||||||
|
cor |= (BIT(chan->channel) |
|
||||||
|
BIT(chan->channel2)) <<
|
||||||
|
AT91_SAMA5D2_COR_DIFF_OFFSET;
|
||||||
|
else
|
||||||
|
cor &= ~(BIT(chan->channel) <<
|
||||||
|
AT91_SAMA5D2_COR_DIFF_OFFSET);
|
||||||
|
|
||||||
|
at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
|
||||||
|
}
|
||||||
|
|
||||||
if (state) {
|
if (state) {
|
||||||
at91_adc_writel(st, AT91_SAMA5D2_CHER,
|
at91_adc_writel(st, AT91_SAMA5D2_CHER,
|
||||||
BIT(chan->channel));
|
BIT(chan->channel));
|
||||||
|
|
|
@ -150,9 +150,10 @@ static int vcnl4200_init(struct vcnl4000_data *data)
|
||||||
data->al_scale = 24000;
|
data->al_scale = 24000;
|
||||||
data->vcnl4200_al.reg = VCNL4200_AL_DATA;
|
data->vcnl4200_al.reg = VCNL4200_AL_DATA;
|
||||||
data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
|
data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
|
||||||
/* Integration time is 50ms, but the experiments show 54ms in total. */
|
/* Default wait time is 50ms, add 20% tolerance. */
|
||||||
data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000);
|
data->vcnl4200_al.sampling_rate = ktime_set(0, 60000 * 1000);
|
||||||
data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000);
|
/* Default wait time is 4.8ms, add 20% tolerance. */
|
||||||
|
data->vcnl4200_ps.sampling_rate = ktime_set(0, 5760 * 1000);
|
||||||
data->vcnl4200_al.last_measurement = ktime_set(0, 0);
|
data->vcnl4200_al.last_measurement = ktime_set(0, 0);
|
||||||
data->vcnl4200_ps.last_measurement = ktime_set(0, 0);
|
data->vcnl4200_ps.last_measurement = ktime_set(0, 0);
|
||||||
mutex_init(&data->vcnl4200_al.lock);
|
mutex_init(&data->vcnl4200_al.lock);
|
||||||
|
|
|
@ -563,7 +563,7 @@ static int ak8974_read_raw(struct iio_dev *indio_dev,
|
||||||
* We read all axes and discard all but one, for optimized
|
* We read all axes and discard all but one, for optimized
|
||||||
* reading, use the triggered buffer.
|
* reading, use the triggered buffer.
|
||||||
*/
|
*/
|
||||||
*val = le16_to_cpu(hw_values[chan->address]);
|
*val = (s16)le16_to_cpu(hw_values[chan->address]);
|
||||||
|
|
||||||
ret = IIO_VAL_INT;
|
ret = IIO_VAL_INT;
|
||||||
}
|
}
|
||||||
|
|
|
@ -161,7 +161,8 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void stm32_timer_stop(struct stm32_timer_trigger *priv)
|
static void stm32_timer_stop(struct stm32_timer_trigger *priv,
|
||||||
|
struct iio_trigger *trig)
|
||||||
{
|
{
|
||||||
u32 ccer, cr1;
|
u32 ccer, cr1;
|
||||||
|
|
||||||
|
@ -179,6 +180,12 @@ static void stm32_timer_stop(struct stm32_timer_trigger *priv)
|
||||||
regmap_write(priv->regmap, TIM_PSC, 0);
|
regmap_write(priv->regmap, TIM_PSC, 0);
|
||||||
regmap_write(priv->regmap, TIM_ARR, 0);
|
regmap_write(priv->regmap, TIM_ARR, 0);
|
||||||
|
|
||||||
|
/* Force disable master mode */
|
||||||
|
if (stm32_timer_is_trgo2_name(trig->name))
|
||||||
|
regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2, 0);
|
||||||
|
else
|
||||||
|
regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS, 0);
|
||||||
|
|
||||||
/* Make sure that registers are updated */
|
/* Make sure that registers are updated */
|
||||||
regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
|
regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
|
||||||
}
|
}
|
||||||
|
@ -197,7 +204,7 @@ static ssize_t stm32_tt_store_frequency(struct device *dev,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (freq == 0) {
|
if (freq == 0) {
|
||||||
stm32_timer_stop(priv);
|
stm32_timer_stop(priv, trig);
|
||||||
} else {
|
} else {
|
||||||
ret = stm32_timer_start(priv, trig, freq);
|
ret = stm32_timer_start(priv, trig, freq);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -190,15 +190,15 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
|
||||||
start -= iova_offset(iovad, start);
|
start -= iova_offset(iovad, start);
|
||||||
num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
|
num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
|
||||||
|
|
||||||
msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
|
|
||||||
if (!msi_page)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
for (i = 0; i < num_pages; i++) {
|
for (i = 0; i < num_pages; i++) {
|
||||||
msi_page[i].phys = start;
|
msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
|
||||||
msi_page[i].iova = start;
|
if (!msi_page)
|
||||||
INIT_LIST_HEAD(&msi_page[i].list);
|
return -ENOMEM;
|
||||||
list_add(&msi_page[i].list, &cookie->msi_page_list);
|
|
||||||
|
msi_page->phys = start;
|
||||||
|
msi_page->iova = start;
|
||||||
|
INIT_LIST_HEAD(&msi_page->list);
|
||||||
|
list_add(&msi_page->list, &cookie->msi_page_list);
|
||||||
start += iovad->granule;
|
start += iovad->granule;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -39,6 +39,7 @@
|
||||||
#include <linux/dmi.h>
|
#include <linux/dmi.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/iommu.h>
|
#include <linux/iommu.h>
|
||||||
|
#include <linux/limits.h>
|
||||||
#include <asm/irq_remapping.h>
|
#include <asm/irq_remapping.h>
|
||||||
#include <asm/iommu_table.h>
|
#include <asm/iommu_table.h>
|
||||||
|
|
||||||
|
@ -139,6 +140,13 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
|
||||||
|
|
||||||
BUG_ON(dev->is_virtfn);
|
BUG_ON(dev->is_virtfn);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ignore devices that have a domain number higher than what can
|
||||||
|
* be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
|
||||||
|
*/
|
||||||
|
if (pci_domain_nr(dev->bus) > U16_MAX)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
/* Only generate path[] for device addition event */
|
/* Only generate path[] for device addition event */
|
||||||
if (event == BUS_NOTIFY_ADD_DEVICE)
|
if (event == BUS_NOTIFY_ADD_DEVICE)
|
||||||
for (tmp = dev; tmp; tmp = tmp->bus->self)
|
for (tmp = dev; tmp; tmp = tmp->bus->self)
|
||||||
|
@ -451,12 +459,13 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
|
||||||
|
|
||||||
/* Check for NUL termination within the designated length */
|
/* Check for NUL termination within the designated length */
|
||||||
if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
|
if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
|
||||||
WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
|
pr_warn(FW_BUG
|
||||||
"Your BIOS is broken; ANDD object name is not NUL-terminated\n"
|
"Your BIOS is broken; ANDD object name is not NUL-terminated\n"
|
||||||
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
|
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
|
||||||
dmi_get_system_info(DMI_BIOS_VENDOR),
|
dmi_get_system_info(DMI_BIOS_VENDOR),
|
||||||
dmi_get_system_info(DMI_BIOS_VERSION),
|
dmi_get_system_info(DMI_BIOS_VERSION),
|
||||||
dmi_get_system_info(DMI_PRODUCT_VERSION));
|
dmi_get_system_info(DMI_PRODUCT_VERSION));
|
||||||
|
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
pr_info("ANDD device: %x name: %s\n", andd->device_number,
|
pr_info("ANDD device: %x name: %s\n", andd->device_number,
|
||||||
|
@ -482,14 +491,14 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
WARN_TAINT(
|
pr_warn(FW_BUG
|
||||||
1, TAINT_FIRMWARE_WORKAROUND,
|
|
||||||
"Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
|
"Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
|
||||||
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
|
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
|
||||||
drhd->reg_base_addr,
|
rhsa->base_address,
|
||||||
dmi_get_system_info(DMI_BIOS_VENDOR),
|
dmi_get_system_info(DMI_BIOS_VENDOR),
|
||||||
dmi_get_system_info(DMI_BIOS_VERSION),
|
dmi_get_system_info(DMI_BIOS_VERSION),
|
||||||
dmi_get_system_info(DMI_PRODUCT_VERSION));
|
dmi_get_system_info(DMI_PRODUCT_VERSION));
|
||||||
|
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -838,14 +847,14 @@ int __init dmar_table_init(void)
|
||||||
|
|
||||||
static void warn_invalid_dmar(u64 addr, const char *message)
|
static void warn_invalid_dmar(u64 addr, const char *message)
|
||||||
{
|
{
|
||||||
WARN_TAINT_ONCE(
|
pr_warn_once(FW_BUG
|
||||||
1, TAINT_FIRMWARE_WORKAROUND,
|
|
||||||
"Your BIOS is broken; DMAR reported at address %llx%s!\n"
|
"Your BIOS is broken; DMAR reported at address %llx%s!\n"
|
||||||
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
|
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
|
||||||
addr, message,
|
addr, message,
|
||||||
dmi_get_system_info(DMI_BIOS_VENDOR),
|
dmi_get_system_info(DMI_BIOS_VENDOR),
|
||||||
dmi_get_system_info(DMI_BIOS_VERSION),
|
dmi_get_system_info(DMI_BIOS_VERSION),
|
||||||
dmi_get_system_info(DMI_PRODUCT_VERSION));
|
dmi_get_system_info(DMI_PRODUCT_VERSION));
|
||||||
|
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __ref
|
static int __ref
|
||||||
|
|
|
@ -3998,10 +3998,11 @@ static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
|
||||||
|
|
||||||
/* we know that the this iommu should be at offset 0xa000 from vtbar */
|
/* we know that the this iommu should be at offset 0xa000 from vtbar */
|
||||||
drhd = dmar_find_matched_drhd_unit(pdev);
|
drhd = dmar_find_matched_drhd_unit(pdev);
|
||||||
if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
|
if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
|
||||||
TAINT_FIRMWARE_WORKAROUND,
|
pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
|
||||||
"BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
|
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
|
||||||
pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
|
pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
|
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
|
||||||
|
|
||||||
|
@ -5143,8 +5144,10 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
u64 phys = 0;
|
u64 phys = 0;
|
||||||
|
|
||||||
pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
|
pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
|
||||||
if (pte)
|
if (pte && dma_pte_present(pte))
|
||||||
phys = dma_pte_addr(pte);
|
phys = dma_pte_addr(pte) +
|
||||||
|
(iova & (BIT_MASK(level_to_offset_bits(level) +
|
||||||
|
VTD_PAGE_SHIFT) - 1));
|
||||||
|
|
||||||
return phys;
|
return phys;
|
||||||
}
|
}
|
||||||
|
|
|
@ -313,9 +313,16 @@ static const struct i2c_device_id wf_ad7417_id[] = {
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(i2c, wf_ad7417_id);
|
MODULE_DEVICE_TABLE(i2c, wf_ad7417_id);
|
||||||
|
|
||||||
|
static const struct of_device_id wf_ad7417_of_id[] = {
|
||||||
|
{ .compatible = "ad7417", },
|
||||||
|
{ }
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(of, wf_ad7417_of_id);
|
||||||
|
|
||||||
static struct i2c_driver wf_ad7417_driver = {
|
static struct i2c_driver wf_ad7417_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "wf_ad7417",
|
.name = "wf_ad7417",
|
||||||
|
.of_match_table = wf_ad7417_of_id,
|
||||||
},
|
},
|
||||||
.probe = wf_ad7417_probe,
|
.probe = wf_ad7417_probe,
|
||||||
.remove = wf_ad7417_remove,
|
.remove = wf_ad7417_remove,
|
||||||
|
|
|
@ -583,9 +583,16 @@ static const struct i2c_device_id wf_fcu_id[] = {
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(i2c, wf_fcu_id);
|
MODULE_DEVICE_TABLE(i2c, wf_fcu_id);
|
||||||
|
|
||||||
|
static const struct of_device_id wf_fcu_of_id[] = {
|
||||||
|
{ .compatible = "fcu", },
|
||||||
|
{ }
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(of, wf_fcu_of_id);
|
||||||
|
|
||||||
static struct i2c_driver wf_fcu_driver = {
|
static struct i2c_driver wf_fcu_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "wf_fcu",
|
.name = "wf_fcu",
|
||||||
|
.of_match_table = wf_fcu_of_id,
|
||||||
},
|
},
|
||||||
.probe = wf_fcu_probe,
|
.probe = wf_fcu_probe,
|
||||||
.remove = wf_fcu_remove,
|
.remove = wf_fcu_remove,
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/wait.h>
|
#include <linux/wait.h>
|
||||||
#include <linux/i2c.h>
|
#include <linux/i2c.h>
|
||||||
|
#include <linux/of_device.h>
|
||||||
#include <asm/prom.h>
|
#include <asm/prom.h>
|
||||||
#include <asm/machdep.h>
|
#include <asm/machdep.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
|
@ -92,9 +93,14 @@ static int wf_lm75_probe(struct i2c_client *client,
|
||||||
const struct i2c_device_id *id)
|
const struct i2c_device_id *id)
|
||||||
{
|
{
|
||||||
struct wf_lm75_sensor *lm;
|
struct wf_lm75_sensor *lm;
|
||||||
int rc, ds1775 = id->driver_data;
|
int rc, ds1775;
|
||||||
const char *name, *loc;
|
const char *name, *loc;
|
||||||
|
|
||||||
|
if (id)
|
||||||
|
ds1775 = id->driver_data;
|
||||||
|
else
|
||||||
|
ds1775 = !!of_device_get_match_data(&client->dev);
|
||||||
|
|
||||||
DBG("wf_lm75: creating %s device at address 0x%02x\n",
|
DBG("wf_lm75: creating %s device at address 0x%02x\n",
|
||||||
ds1775 ? "ds1775" : "lm75", client->addr);
|
ds1775 ? "ds1775" : "lm75", client->addr);
|
||||||
|
|
||||||
|
@ -165,9 +171,17 @@ static const struct i2c_device_id wf_lm75_id[] = {
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(i2c, wf_lm75_id);
|
MODULE_DEVICE_TABLE(i2c, wf_lm75_id);
|
||||||
|
|
||||||
|
static const struct of_device_id wf_lm75_of_id[] = {
|
||||||
|
{ .compatible = "lm75", .data = (void *)0},
|
||||||
|
{ .compatible = "ds1775", .data = (void *)1 },
|
||||||
|
{ }
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(of, wf_lm75_of_id);
|
||||||
|
|
||||||
static struct i2c_driver wf_lm75_driver = {
|
static struct i2c_driver wf_lm75_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "wf_lm75",
|
.name = "wf_lm75",
|
||||||
|
.of_match_table = wf_lm75_of_id,
|
||||||
},
|
},
|
||||||
.probe = wf_lm75_probe,
|
.probe = wf_lm75_probe,
|
||||||
.remove = wf_lm75_remove,
|
.remove = wf_lm75_remove,
|
||||||
|
|
|
@ -168,9 +168,16 @@ static const struct i2c_device_id wf_lm87_id[] = {
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(i2c, wf_lm87_id);
|
MODULE_DEVICE_TABLE(i2c, wf_lm87_id);
|
||||||
|
|
||||||
|
static const struct of_device_id wf_lm87_of_id[] = {
|
||||||
|
{ .compatible = "lm87cimt", },
|
||||||
|
{ }
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(of, wf_lm87_of_id);
|
||||||
|
|
||||||
static struct i2c_driver wf_lm87_driver = {
|
static struct i2c_driver wf_lm87_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "wf_lm87",
|
.name = "wf_lm87",
|
||||||
|
.of_match_table = wf_lm87_of_id,
|
||||||
},
|
},
|
||||||
.probe = wf_lm87_probe,
|
.probe = wf_lm87_probe,
|
||||||
.remove = wf_lm87_remove,
|
.remove = wf_lm87_remove,
|
||||||
|
|
|
@ -121,9 +121,16 @@ static const struct i2c_device_id wf_max6690_id[] = {
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(i2c, wf_max6690_id);
|
MODULE_DEVICE_TABLE(i2c, wf_max6690_id);
|
||||||
|
|
||||||
|
static const struct of_device_id wf_max6690_of_id[] = {
|
||||||
|
{ .compatible = "max6690", },
|
||||||
|
{ }
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(of, wf_max6690_of_id);
|
||||||
|
|
||||||
static struct i2c_driver wf_max6690_driver = {
|
static struct i2c_driver wf_max6690_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "wf_max6690",
|
.name = "wf_max6690",
|
||||||
|
.of_match_table = wf_max6690_of_id,
|
||||||
},
|
},
|
||||||
.probe = wf_max6690_probe,
|
.probe = wf_max6690_probe,
|
||||||
.remove = wf_max6690_remove,
|
.remove = wf_max6690_remove,
|
||||||
|
|
|
@ -343,9 +343,16 @@ static const struct i2c_device_id wf_sat_id[] = {
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(i2c, wf_sat_id);
|
MODULE_DEVICE_TABLE(i2c, wf_sat_id);
|
||||||
|
|
||||||
|
static const struct of_device_id wf_sat_of_id[] = {
|
||||||
|
{ .compatible = "smu-sat", },
|
||||||
|
{ }
|
||||||
|
};
|
||||||
|
MODULE_DEVICE_TABLE(of, wf_sat_of_id);
|
||||||
|
|
||||||
static struct i2c_driver wf_sat_driver = {
|
static struct i2c_driver wf_sat_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "wf_smu_sat",
|
.name = "wf_smu_sat",
|
||||||
|
.of_match_table = wf_sat_of_id,
|
||||||
},
|
},
|
||||||
.probe = wf_sat_probe,
|
.probe = wf_sat_probe,
|
||||||
.remove = wf_sat_remove,
|
.remove = wf_sat_remove,
|
||||||
|
|
|
@ -20,8 +20,13 @@
|
||||||
struct dm_bio_details {
|
struct dm_bio_details {
|
||||||
struct gendisk *bi_disk;
|
struct gendisk *bi_disk;
|
||||||
u8 bi_partno;
|
u8 bi_partno;
|
||||||
|
int __bi_remaining;
|
||||||
unsigned long bi_flags;
|
unsigned long bi_flags;
|
||||||
struct bvec_iter bi_iter;
|
struct bvec_iter bi_iter;
|
||||||
|
bio_end_io_t *bi_end_io;
|
||||||
|
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||||
|
struct bio_integrity_payload *bi_integrity;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
|
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
|
||||||
|
@ -30,6 +35,11 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
|
||||||
bd->bi_partno = bio->bi_partno;
|
bd->bi_partno = bio->bi_partno;
|
||||||
bd->bi_flags = bio->bi_flags;
|
bd->bi_flags = bio->bi_flags;
|
||||||
bd->bi_iter = bio->bi_iter;
|
bd->bi_iter = bio->bi_iter;
|
||||||
|
bd->__bi_remaining = atomic_read(&bio->__bi_remaining);
|
||||||
|
bd->bi_end_io = bio->bi_end_io;
|
||||||
|
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||||
|
bd->bi_integrity = bio_integrity(bio);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
|
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
|
||||||
|
@ -38,6 +48,11 @@ static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
|
||||||
bio->bi_partno = bd->bi_partno;
|
bio->bi_partno = bd->bi_partno;
|
||||||
bio->bi_flags = bd->bi_flags;
|
bio->bi_flags = bd->bi_flags;
|
||||||
bio->bi_iter = bd->bi_iter;
|
bio->bi_iter = bd->bi_iter;
|
||||||
|
atomic_set(&bio->__bi_remaining, bd->__bi_remaining);
|
||||||
|
bio->bi_end_io = bd->bi_end_io;
|
||||||
|
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||||
|
bio->bi_integrity = bd->bi_integrity;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -792,6 +792,7 @@ static int prepare_unchanged_range(struct bow_context *bc, struct bow_range *br,
|
||||||
*/
|
*/
|
||||||
original_type = br->type;
|
original_type = br->type;
|
||||||
sector0 = backup_br->sector;
|
sector0 = backup_br->sector;
|
||||||
|
bc->trims_total -= range_size(backup_br);
|
||||||
if (backup_br->type == TRIMMED)
|
if (backup_br->type == TRIMMED)
|
||||||
list_del(&backup_br->trimmed_list);
|
list_del(&backup_br->trimmed_list);
|
||||||
backup_br->type = br->type == SECTOR0_CURRENT ? SECTOR0_CURRENT
|
backup_br->type = br->type == SECTOR0_CURRENT ? SECTOR0_CURRENT
|
||||||
|
|
|
@ -6,6 +6,8 @@
|
||||||
* This file is released under the GPL.
|
* This file is released under the GPL.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include "dm-bio-record.h"
|
||||||
|
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/device-mapper.h>
|
#include <linux/device-mapper.h>
|
||||||
|
@ -276,11 +278,7 @@ struct dm_integrity_io {
|
||||||
|
|
||||||
struct completion *completion;
|
struct completion *completion;
|
||||||
|
|
||||||
struct gendisk *orig_bi_disk;
|
struct dm_bio_details bio_details;
|
||||||
u8 orig_bi_partno;
|
|
||||||
bio_end_io_t *orig_bi_end_io;
|
|
||||||
struct bio_integrity_payload *orig_bi_integrity;
|
|
||||||
struct bvec_iter orig_bi_iter;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct journal_completion {
|
struct journal_completion {
|
||||||
|
@ -1254,14 +1252,9 @@ static void integrity_end_io(struct bio *bio)
|
||||||
{
|
{
|
||||||
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
|
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
|
||||||
|
|
||||||
bio->bi_iter = dio->orig_bi_iter;
|
dm_bio_restore(&dio->bio_details, bio);
|
||||||
bio->bi_disk = dio->orig_bi_disk;
|
if (bio->bi_integrity)
|
||||||
bio->bi_partno = dio->orig_bi_partno;
|
|
||||||
if (dio->orig_bi_integrity) {
|
|
||||||
bio->bi_integrity = dio->orig_bi_integrity;
|
|
||||||
bio->bi_opf |= REQ_INTEGRITY;
|
bio->bi_opf |= REQ_INTEGRITY;
|
||||||
}
|
|
||||||
bio->bi_end_io = dio->orig_bi_end_io;
|
|
||||||
|
|
||||||
if (dio->completion)
|
if (dio->completion)
|
||||||
complete(dio->completion);
|
complete(dio->completion);
|
||||||
|
@ -1347,7 +1340,7 @@ static void integrity_metadata(struct work_struct *w)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
|
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
|
||||||
unsigned pos;
|
unsigned pos;
|
||||||
char *mem, *checksums_ptr;
|
char *mem, *checksums_ptr;
|
||||||
|
|
||||||
|
@ -1391,7 +1384,7 @@ static void integrity_metadata(struct work_struct *w)
|
||||||
if (likely(checksums != checksums_onstack))
|
if (likely(checksums != checksums_onstack))
|
||||||
kfree(checksums);
|
kfree(checksums);
|
||||||
} else {
|
} else {
|
||||||
struct bio_integrity_payload *bip = dio->orig_bi_integrity;
|
struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
|
||||||
|
|
||||||
if (bip) {
|
if (bip) {
|
||||||
struct bio_vec biv;
|
struct bio_vec biv;
|
||||||
|
@ -1795,20 +1788,13 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
|
||||||
} else
|
} else
|
||||||
dio->completion = NULL;
|
dio->completion = NULL;
|
||||||
|
|
||||||
dio->orig_bi_iter = bio->bi_iter;
|
dm_bio_record(&dio->bio_details, bio);
|
||||||
|
|
||||||
dio->orig_bi_disk = bio->bi_disk;
|
|
||||||
dio->orig_bi_partno = bio->bi_partno;
|
|
||||||
bio_set_dev(bio, ic->dev->bdev);
|
bio_set_dev(bio, ic->dev->bdev);
|
||||||
|
|
||||||
dio->orig_bi_integrity = bio_integrity(bio);
|
|
||||||
bio->bi_integrity = NULL;
|
bio->bi_integrity = NULL;
|
||||||
bio->bi_opf &= ~REQ_INTEGRITY;
|
bio->bi_opf &= ~REQ_INTEGRITY;
|
||||||
|
|
||||||
dio->orig_bi_end_io = bio->bi_end_io;
|
|
||||||
bio->bi_end_io = integrity_end_io;
|
bio->bi_end_io = integrity_end_io;
|
||||||
|
|
||||||
bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
|
bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
|
||||||
|
|
||||||
generic_make_request(bio);
|
generic_make_request(bio);
|
||||||
|
|
||||||
if (need_sync_io) {
|
if (need_sync_io) {
|
||||||
|
|
|
@ -2126,8 +2126,8 @@ static int altera_execute(struct altera_state *astate,
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int altera_get_note(u8 *p, s32 program_size,
|
static int altera_get_note(u8 *p, s32 program_size, s32 *offset,
|
||||||
s32 *offset, char *key, char *value, int length)
|
char *key, char *value, int keylen, int vallen)
|
||||||
/*
|
/*
|
||||||
* Gets key and value of NOTE fields in the JBC file.
|
* Gets key and value of NOTE fields in the JBC file.
|
||||||
* Can be called in two modes: if offset pointer is NULL,
|
* Can be called in two modes: if offset pointer is NULL,
|
||||||
|
@ -2184,7 +2184,7 @@ static int altera_get_note(u8 *p, s32 program_size,
|
||||||
&p[note_table + (8 * i) + 4])];
|
&p[note_table + (8 * i) + 4])];
|
||||||
|
|
||||||
if (value != NULL)
|
if (value != NULL)
|
||||||
strlcpy(value, value_ptr, length);
|
strlcpy(value, value_ptr, vallen);
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2203,13 +2203,13 @@ static int altera_get_note(u8 *p, s32 program_size,
|
||||||
strlcpy(key, &p[note_strings +
|
strlcpy(key, &p[note_strings +
|
||||||
get_unaligned_be32(
|
get_unaligned_be32(
|
||||||
&p[note_table + (8 * i)])],
|
&p[note_table + (8 * i)])],
|
||||||
length);
|
keylen);
|
||||||
|
|
||||||
if (value != NULL)
|
if (value != NULL)
|
||||||
strlcpy(value, &p[note_strings +
|
strlcpy(value, &p[note_strings +
|
||||||
get_unaligned_be32(
|
get_unaligned_be32(
|
||||||
&p[note_table + (8 * i) + 4])],
|
&p[note_table + (8 * i) + 4])],
|
||||||
length);
|
vallen);
|
||||||
|
|
||||||
*offset = i + 1;
|
*offset = i + 1;
|
||||||
}
|
}
|
||||||
|
@ -2463,7 +2463,7 @@ int altera_init(struct altera_config *config, const struct firmware *fw)
|
||||||
__func__, (format_version == 2) ? "Jam STAPL" :
|
__func__, (format_version == 2) ? "Jam STAPL" :
|
||||||
"pre-standardized Jam 1.1");
|
"pre-standardized Jam 1.1");
|
||||||
while (altera_get_note((u8 *)fw->data, fw->size,
|
while (altera_get_note((u8 *)fw->data, fw->size,
|
||||||
&offset, key, value, 256) == 0)
|
&offset, key, value, 32, 256) == 0)
|
||||||
printk(KERN_INFO "%s: NOTE \"%s\" = \"%s\"\n",
|
printk(KERN_INFO "%s: NOTE \"%s\" = \"%s\"\n",
|
||||||
__func__, key, value);
|
__func__, key, value);
|
||||||
}
|
}
|
||||||
|
|
|
@ -369,6 +369,6 @@ static const struct pcr_ops rts522a_pcr_ops = {
|
||||||
void rts522a_init_params(struct rtsx_pcr *pcr)
|
void rts522a_init_params(struct rtsx_pcr *pcr)
|
||||||
{
|
{
|
||||||
rts5227_init_params(pcr);
|
rts5227_init_params(pcr);
|
||||||
|
pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
|
||||||
pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
|
pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
|
||||||
}
|
}
|
||||||
|
|
|
@ -623,6 +623,7 @@ static const struct pcr_ops rts524a_pcr_ops = {
|
||||||
void rts524a_init_params(struct rtsx_pcr *pcr)
|
void rts524a_init_params(struct rtsx_pcr *pcr)
|
||||||
{
|
{
|
||||||
rts5249_init_params(pcr);
|
rts5249_init_params(pcr);
|
||||||
|
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
|
||||||
pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
|
pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
|
||||||
pcr->option.ltr_l1off_snooze_sspwrgate =
|
pcr->option.ltr_l1off_snooze_sspwrgate =
|
||||||
LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
|
LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
|
||||||
|
@ -731,6 +732,7 @@ static const struct pcr_ops rts525a_pcr_ops = {
|
||||||
void rts525a_init_params(struct rtsx_pcr *pcr)
|
void rts525a_init_params(struct rtsx_pcr *pcr)
|
||||||
{
|
{
|
||||||
rts5249_init_params(pcr);
|
rts5249_init_params(pcr);
|
||||||
|
pcr->tx_initial_phase = SET_CLOCK_PHASE(25, 29, 11);
|
||||||
pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
|
pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
|
||||||
pcr->option.ltr_l1off_snooze_sspwrgate =
|
pcr->option.ltr_l1off_snooze_sspwrgate =
|
||||||
LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
|
LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
|
||||||
|
|
|
@ -712,7 +712,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
|
||||||
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
|
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
|
||||||
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
|
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
|
||||||
pcr->aspm_en = ASPM_L1_EN;
|
pcr->aspm_en = ASPM_L1_EN;
|
||||||
pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
|
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
|
||||||
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
|
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
|
||||||
|
|
||||||
pcr->ic_version = rts5260_get_ic_version(pcr);
|
pcr->ic_version = rts5260_get_ic_version(pcr);
|
||||||
|
|
|
@ -938,6 +938,8 @@ config MMC_SDHCI_XENON
|
||||||
config MMC_SDHCI_OMAP
|
config MMC_SDHCI_OMAP
|
||||||
tristate "TI SDHCI Controller Support"
|
tristate "TI SDHCI Controller Support"
|
||||||
depends on MMC_SDHCI_PLTFM && OF
|
depends on MMC_SDHCI_PLTFM && OF
|
||||||
|
select THERMAL
|
||||||
|
imply TI_SOC_THERMAL
|
||||||
help
|
help
|
||||||
This selects the Secure Digital Host Controller Interface (SDHCI)
|
This selects the Secure Digital Host Controller Interface (SDHCI)
|
||||||
support present in TI's DRA7 SOCs. The controller supports
|
support present in TI's DRA7 SOCs. The controller supports
|
||||||
|
|
|
@ -618,19 +618,22 @@ static int sd_change_phase(struct realtek_pci_sdmmc *host,
|
||||||
u8 sample_point, bool rx)
|
u8 sample_point, bool rx)
|
||||||
{
|
{
|
||||||
struct rtsx_pcr *pcr = host->pcr;
|
struct rtsx_pcr *pcr = host->pcr;
|
||||||
|
u16 SD_VP_CTL = 0;
|
||||||
dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n",
|
dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n",
|
||||||
__func__, rx ? "RX" : "TX", sample_point);
|
__func__, rx ? "RX" : "TX", sample_point);
|
||||||
|
|
||||||
rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
|
rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
|
||||||
if (rx)
|
if (rx) {
|
||||||
|
SD_VP_CTL = SD_VPRX_CTL;
|
||||||
rtsx_pci_write_register(pcr, SD_VPRX_CTL,
|
rtsx_pci_write_register(pcr, SD_VPRX_CTL,
|
||||||
PHASE_SELECT_MASK, sample_point);
|
PHASE_SELECT_MASK, sample_point);
|
||||||
else
|
} else {
|
||||||
|
SD_VP_CTL = SD_VPTX_CTL;
|
||||||
rtsx_pci_write_register(pcr, SD_VPTX_CTL,
|
rtsx_pci_write_register(pcr, SD_VPTX_CTL,
|
||||||
PHASE_SELECT_MASK, sample_point);
|
PHASE_SELECT_MASK, sample_point);
|
||||||
rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
|
}
|
||||||
rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET,
|
rtsx_pci_write_register(pcr, SD_VP_CTL, PHASE_NOT_RESET, 0);
|
||||||
|
rtsx_pci_write_register(pcr, SD_VP_CTL, PHASE_NOT_RESET,
|
||||||
PHASE_NOT_RESET);
|
PHASE_NOT_RESET);
|
||||||
rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0);
|
rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0);
|
||||||
rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
|
rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
|
||||||
|
|
|
@ -126,7 +126,8 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
|
||||||
{
|
{
|
||||||
sdhci_reset(host, mask);
|
sdhci_reset(host, mask);
|
||||||
|
|
||||||
if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
|
if ((host->mmc->caps & MMC_CAP_NONREMOVABLE)
|
||||||
|
|| mmc_gpio_get_cd(host->mmc) >= 0)
|
||||||
sdhci_at91_set_force_card_detect(host);
|
sdhci_at91_set_force_card_detect(host);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -405,8 +406,11 @@ static int sdhci_at91_probe(struct platform_device *pdev)
|
||||||
* detection procedure using the SDMCC_CD signal is bypassed.
|
* detection procedure using the SDMCC_CD signal is bypassed.
|
||||||
* This bit is reset when a software reset for all command is performed
|
* This bit is reset when a software reset for all command is performed
|
||||||
* so we need to implement our own reset function to set back this bit.
|
* so we need to implement our own reset function to set back this bit.
|
||||||
|
*
|
||||||
|
* WA: SAMA5D2 doesn't drive CMD if using CD GPIO line.
|
||||||
*/
|
*/
|
||||||
if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
|
if ((host->mmc->caps & MMC_CAP_NONREMOVABLE)
|
||||||
|
|| mmc_gpio_get_cd(host->mmc) >= 0)
|
||||||
sdhci_at91_set_force_card_detect(host);
|
sdhci_at91_set_force_card_detect(host);
|
||||||
|
|
||||||
pm_runtime_put_autosuspend(&pdev->dev);
|
pm_runtime_put_autosuspend(&pdev->dev);
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include <linux/regulator/consumer.h>
|
#include <linux/regulator/consumer.h>
|
||||||
#include <linux/pinctrl/consumer.h>
|
#include <linux/pinctrl/consumer.h>
|
||||||
#include <linux/sys_soc.h>
|
#include <linux/sys_soc.h>
|
||||||
|
#include <linux/thermal.h>
|
||||||
|
|
||||||
#include "sdhci-pltfm.h"
|
#include "sdhci-pltfm.h"
|
||||||
|
|
||||||
|
@ -115,6 +116,7 @@ struct sdhci_omap_host {
|
||||||
|
|
||||||
struct pinctrl *pinctrl;
|
struct pinctrl *pinctrl;
|
||||||
struct pinctrl_state **pinctrl_state;
|
struct pinctrl_state **pinctrl_state;
|
||||||
|
bool is_tuning;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host);
|
static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host);
|
||||||
|
@ -289,15 +291,19 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||||
struct sdhci_host *host = mmc_priv(mmc);
|
struct sdhci_host *host = mmc_priv(mmc);
|
||||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||||
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
|
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
|
||||||
|
struct thermal_zone_device *thermal_dev;
|
||||||
struct device *dev = omap_host->dev;
|
struct device *dev = omap_host->dev;
|
||||||
struct mmc_ios *ios = &mmc->ios;
|
struct mmc_ios *ios = &mmc->ios;
|
||||||
u32 start_window = 0, max_window = 0;
|
u32 start_window = 0, max_window = 0;
|
||||||
|
bool single_point_failure = false;
|
||||||
bool dcrc_was_enabled = false;
|
bool dcrc_was_enabled = false;
|
||||||
u8 cur_match, prev_match = 0;
|
u8 cur_match, prev_match = 0;
|
||||||
u32 length = 0, max_len = 0;
|
u32 length = 0, max_len = 0;
|
||||||
u32 phase_delay = 0;
|
u32 phase_delay = 0;
|
||||||
|
int temperature;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
u32 reg;
|
u32 reg;
|
||||||
|
int i;
|
||||||
|
|
||||||
pltfm_host = sdhci_priv(host);
|
pltfm_host = sdhci_priv(host);
|
||||||
omap_host = sdhci_pltfm_priv(pltfm_host);
|
omap_host = sdhci_pltfm_priv(pltfm_host);
|
||||||
|
@ -311,6 +317,16 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||||
if (ios->timing == MMC_TIMING_UHS_SDR50 && !(reg & CAPA2_TSDR50))
|
if (ios->timing == MMC_TIMING_UHS_SDR50 && !(reg & CAPA2_TSDR50))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
thermal_dev = thermal_zone_get_zone_by_name("cpu_thermal");
|
||||||
|
if (IS_ERR(thermal_dev)) {
|
||||||
|
dev_err(dev, "Unable to get thermal zone for tuning\n");
|
||||||
|
return PTR_ERR(thermal_dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = thermal_zone_get_temp(thermal_dev, &temperature);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_DLL);
|
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_DLL);
|
||||||
reg |= DLL_SWT;
|
reg |= DLL_SWT;
|
||||||
sdhci_omap_writel(omap_host, SDHCI_OMAP_DLL, reg);
|
sdhci_omap_writel(omap_host, SDHCI_OMAP_DLL, reg);
|
||||||
|
@ -326,6 +342,13 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||||
dcrc_was_enabled = true;
|
dcrc_was_enabled = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
omap_host->is_tuning = true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Stage 1: Search for a maximum pass window ignoring any
|
||||||
|
* any single point failures. If the tuning value ends up
|
||||||
|
* near it, move away from it in stage 2 below
|
||||||
|
*/
|
||||||
while (phase_delay <= MAX_PHASE_DELAY) {
|
while (phase_delay <= MAX_PHASE_DELAY) {
|
||||||
sdhci_omap_set_dll(omap_host, phase_delay);
|
sdhci_omap_set_dll(omap_host, phase_delay);
|
||||||
|
|
||||||
|
@ -333,10 +356,15 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||||
if (cur_match) {
|
if (cur_match) {
|
||||||
if (prev_match) {
|
if (prev_match) {
|
||||||
length++;
|
length++;
|
||||||
|
} else if (single_point_failure) {
|
||||||
|
/* ignore single point failure */
|
||||||
|
length++;
|
||||||
} else {
|
} else {
|
||||||
start_window = phase_delay;
|
start_window = phase_delay;
|
||||||
length = 1;
|
length = 1;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
single_point_failure = prev_match;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (length > max_len) {
|
if (length > max_len) {
|
||||||
|
@ -354,18 +382,84 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||||
goto tuning_error;
|
goto tuning_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Assign tuning value as a ratio of maximum pass window based
|
||||||
|
* on temperature
|
||||||
|
*/
|
||||||
|
if (temperature < -20000)
|
||||||
|
phase_delay = min(max_window + 4 * (max_len - 1) - 24,
|
||||||
|
max_window +
|
||||||
|
DIV_ROUND_UP(13 * max_len, 16) * 4);
|
||||||
|
else if (temperature < 20000)
|
||||||
|
phase_delay = max_window + DIV_ROUND_UP(9 * max_len, 16) * 4;
|
||||||
|
else if (temperature < 40000)
|
||||||
|
phase_delay = max_window + DIV_ROUND_UP(8 * max_len, 16) * 4;
|
||||||
|
else if (temperature < 70000)
|
||||||
|
phase_delay = max_window + DIV_ROUND_UP(7 * max_len, 16) * 4;
|
||||||
|
else if (temperature < 90000)
|
||||||
|
phase_delay = max_window + DIV_ROUND_UP(5 * max_len, 16) * 4;
|
||||||
|
else if (temperature < 120000)
|
||||||
|
phase_delay = max_window + DIV_ROUND_UP(4 * max_len, 16) * 4;
|
||||||
|
else
|
||||||
|
phase_delay = max_window + DIV_ROUND_UP(3 * max_len, 16) * 4;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Stage 2: Search for a single point failure near the chosen tuning
|
||||||
|
* value in two steps. First in the +3 to +10 range and then in the
|
||||||
|
* +2 to -10 range. If found, move away from it in the appropriate
|
||||||
|
* direction by the appropriate amount depending on the temperature.
|
||||||
|
*/
|
||||||
|
for (i = 3; i <= 10; i++) {
|
||||||
|
sdhci_omap_set_dll(omap_host, phase_delay + i);
|
||||||
|
|
||||||
|
if (mmc_send_tuning(mmc, opcode, NULL)) {
|
||||||
|
if (temperature < 10000)
|
||||||
|
phase_delay += i + 6;
|
||||||
|
else if (temperature < 20000)
|
||||||
|
phase_delay += i - 12;
|
||||||
|
else if (temperature < 70000)
|
||||||
|
phase_delay += i - 8;
|
||||||
|
else
|
||||||
|
phase_delay += i - 6;
|
||||||
|
|
||||||
|
goto single_failure_found;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 2; i >= -10; i--) {
|
||||||
|
sdhci_omap_set_dll(omap_host, phase_delay + i);
|
||||||
|
|
||||||
|
if (mmc_send_tuning(mmc, opcode, NULL)) {
|
||||||
|
if (temperature < 10000)
|
||||||
|
phase_delay += i + 12;
|
||||||
|
else if (temperature < 20000)
|
||||||
|
phase_delay += i + 8;
|
||||||
|
else if (temperature < 70000)
|
||||||
|
phase_delay += i + 8;
|
||||||
|
else if (temperature < 90000)
|
||||||
|
phase_delay += i + 10;
|
||||||
|
else
|
||||||
|
phase_delay += i + 12;
|
||||||
|
|
||||||
|
goto single_failure_found;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
single_failure_found:
|
||||||
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
|
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
|
||||||
if (!(reg & AC12_SCLK_SEL)) {
|
if (!(reg & AC12_SCLK_SEL)) {
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto tuning_error;
|
goto tuning_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
phase_delay = max_window + 4 * (max_len >> 1);
|
|
||||||
sdhci_omap_set_dll(omap_host, phase_delay);
|
sdhci_omap_set_dll(omap_host, phase_delay);
|
||||||
|
|
||||||
|
omap_host->is_tuning = false;
|
||||||
|
|
||||||
goto ret;
|
goto ret;
|
||||||
|
|
||||||
tuning_error:
|
tuning_error:
|
||||||
|
omap_host->is_tuning = false;
|
||||||
dev_err(dev, "Tuning failed\n");
|
dev_err(dev, "Tuning failed\n");
|
||||||
sdhci_omap_disable_tuning(omap_host);
|
sdhci_omap_disable_tuning(omap_host);
|
||||||
|
|
||||||
|
@ -695,6 +789,55 @@ static void sdhci_omap_set_uhs_signaling(struct sdhci_host *host,
|
||||||
sdhci_omap_start_clock(omap_host);
|
sdhci_omap_start_clock(omap_host);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
|
||||||
|
{
|
||||||
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||||
|
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
|
||||||
|
|
||||||
|
/* Don't reset data lines during tuning operation */
|
||||||
|
if (omap_host->is_tuning)
|
||||||
|
mask &= ~SDHCI_RESET_DATA;
|
||||||
|
|
||||||
|
sdhci_reset(host, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\
|
||||||
|
SDHCI_INT_TIMEOUT)
|
||||||
|
#define CMD_MASK (CMD_ERR_MASK | SDHCI_INT_RESPONSE)
|
||||||
|
|
||||||
|
static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask)
|
||||||
|
{
|
||||||
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||||
|
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
|
||||||
|
|
||||||
|
if (omap_host->is_tuning && host->cmd && !host->data_early &&
|
||||||
|
(intmask & CMD_ERR_MASK)) {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since we are not resetting data lines during tuning
|
||||||
|
* operation, data error or data complete interrupts
|
||||||
|
* might still arrive. Mark this request as a failure
|
||||||
|
* but still wait for the data interrupt
|
||||||
|
*/
|
||||||
|
if (intmask & SDHCI_INT_TIMEOUT)
|
||||||
|
host->cmd->error = -ETIMEDOUT;
|
||||||
|
else
|
||||||
|
host->cmd->error = -EILSEQ;
|
||||||
|
|
||||||
|
host->cmd = NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Sometimes command error interrupts and command complete
|
||||||
|
* interrupt will arrive together. Clear all command related
|
||||||
|
* interrupts here.
|
||||||
|
*/
|
||||||
|
sdhci_writel(host, intmask & CMD_MASK, SDHCI_INT_STATUS);
|
||||||
|
intmask &= ~CMD_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
|
return intmask;
|
||||||
|
}
|
||||||
|
|
||||||
static struct sdhci_ops sdhci_omap_ops = {
|
static struct sdhci_ops sdhci_omap_ops = {
|
||||||
.set_clock = sdhci_omap_set_clock,
|
.set_clock = sdhci_omap_set_clock,
|
||||||
.set_power = sdhci_omap_set_power,
|
.set_power = sdhci_omap_set_power,
|
||||||
|
@ -703,8 +846,9 @@ static struct sdhci_ops sdhci_omap_ops = {
|
||||||
.get_min_clock = sdhci_omap_get_min_clock,
|
.get_min_clock = sdhci_omap_get_min_clock,
|
||||||
.set_bus_width = sdhci_omap_set_bus_width,
|
.set_bus_width = sdhci_omap_set_bus_width,
|
||||||
.platform_send_init_74_clocks = sdhci_omap_init_74_clocks,
|
.platform_send_init_74_clocks = sdhci_omap_init_74_clocks,
|
||||||
.reset = sdhci_reset,
|
.reset = sdhci_omap_reset,
|
||||||
.set_uhs_signaling = sdhci_omap_set_uhs_signaling,
|
.set_uhs_signaling = sdhci_omap_set_uhs_signaling,
|
||||||
|
.irq = sdhci_omap_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
|
static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
|
||||||
|
|
|
@ -66,11 +66,6 @@ struct arp_pkt {
|
||||||
};
|
};
|
||||||
#pragma pack()
|
#pragma pack()
|
||||||
|
|
||||||
static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
|
|
||||||
{
|
|
||||||
return (struct arp_pkt *)skb_network_header(skb);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Forward declaration */
|
/* Forward declaration */
|
||||||
static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
|
static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
|
||||||
bool strict_match);
|
bool strict_match);
|
||||||
|
@ -568,10 +563,11 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
|
||||||
spin_unlock(&bond->mode_lock);
|
spin_unlock(&bond->mode_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond)
|
static struct slave *rlb_choose_channel(struct sk_buff *skb,
|
||||||
|
struct bonding *bond,
|
||||||
|
const struct arp_pkt *arp)
|
||||||
{
|
{
|
||||||
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
|
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
|
||||||
struct arp_pkt *arp = arp_pkt(skb);
|
|
||||||
struct slave *assigned_slave, *curr_active_slave;
|
struct slave *assigned_slave, *curr_active_slave;
|
||||||
struct rlb_client_info *client_info;
|
struct rlb_client_info *client_info;
|
||||||
u32 hash_index = 0;
|
u32 hash_index = 0;
|
||||||
|
@ -668,8 +664,12 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
|
||||||
*/
|
*/
|
||||||
static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
|
static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
|
||||||
{
|
{
|
||||||
struct arp_pkt *arp = arp_pkt(skb);
|
|
||||||
struct slave *tx_slave = NULL;
|
struct slave *tx_slave = NULL;
|
||||||
|
struct arp_pkt *arp;
|
||||||
|
|
||||||
|
if (!pskb_network_may_pull(skb, sizeof(*arp)))
|
||||||
|
return NULL;
|
||||||
|
arp = (struct arp_pkt *)skb_network_header(skb);
|
||||||
|
|
||||||
/* Don't modify or load balance ARPs that do not originate locally
|
/* Don't modify or load balance ARPs that do not originate locally
|
||||||
* (e.g.,arrive via a bridge).
|
* (e.g.,arrive via a bridge).
|
||||||
|
@ -679,7 +679,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
|
||||||
|
|
||||||
if (arp->op_code == htons(ARPOP_REPLY)) {
|
if (arp->op_code == htons(ARPOP_REPLY)) {
|
||||||
/* the arp must be sent on the selected rx channel */
|
/* the arp must be sent on the selected rx channel */
|
||||||
tx_slave = rlb_choose_channel(skb, bond);
|
tx_slave = rlb_choose_channel(skb, bond, arp);
|
||||||
if (tx_slave)
|
if (tx_slave)
|
||||||
bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr,
|
bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr,
|
||||||
tx_slave->dev->addr_len);
|
tx_slave->dev->addr_len);
|
||||||
|
@ -690,7 +690,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
|
||||||
* When the arp reply is received the entry will be updated
|
* When the arp reply is received the entry will be updated
|
||||||
* with the correct unicast address of the client.
|
* with the correct unicast address of the client.
|
||||||
*/
|
*/
|
||||||
rlb_choose_channel(skb, bond);
|
rlb_choose_channel(skb, bond, arp);
|
||||||
|
|
||||||
/* The ARP reply packets must be delayed so that
|
/* The ARP reply packets must be delayed so that
|
||||||
* they can cancel out the influence of the ARP request.
|
* they can cancel out the influence of the ARP request.
|
||||||
|
|
|
@ -892,6 +892,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
|
||||||
= { .len = sizeof(struct can_bittiming) },
|
= { .len = sizeof(struct can_bittiming) },
|
||||||
[IFLA_CAN_DATA_BITTIMING_CONST]
|
[IFLA_CAN_DATA_BITTIMING_CONST]
|
||||||
= { .len = sizeof(struct can_bittiming_const) },
|
= { .len = sizeof(struct can_bittiming_const) },
|
||||||
|
[IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
|
||||||
};
|
};
|
||||||
|
|
||||||
static int can_validate(struct nlattr *tb[], struct nlattr *data[],
|
static int can_validate(struct nlattr *tb[], struct nlattr *data[],
|
||||||
|
|
|
@ -2168,7 +2168,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
||||||
index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
|
index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
|
||||||
if (index > RXCHK_BRCM_TAG_MAX)
|
if (index >= RXCHK_BRCM_TAG_MAX)
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
||||||
/* Location is the classification ID, and index is the position
|
/* Location is the classification ID, and index is the position
|
||||||
|
|
|
@ -8097,13 +8097,13 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
|
||||||
struct bnxt *bp = netdev_priv(dev);
|
struct bnxt *bp = netdev_priv(dev);
|
||||||
|
|
||||||
if (netif_running(dev))
|
if (netif_running(dev))
|
||||||
bnxt_close_nic(bp, false, false);
|
bnxt_close_nic(bp, true, false);
|
||||||
|
|
||||||
dev->mtu = new_mtu;
|
dev->mtu = new_mtu;
|
||||||
bnxt_set_ring_params(bp);
|
bnxt_set_ring_params(bp);
|
||||||
|
|
||||||
if (netif_running(dev))
|
if (netif_running(dev))
|
||||||
return bnxt_open_nic(bp, false, false);
|
return bnxt_open_nic(bp, true, false);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2476,15 +2476,15 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
|
cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
|
||||||
if (cycle > 0xFFFF) {
|
if (cycle > 0xFFFF) {
|
||||||
pr_err("Rx coalesced usec exceed hardware limitation\n");
|
pr_err("Rx coalesced usec exceed hardware limitation\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
|
cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
|
||||||
if (cycle > 0xFFFF) {
|
if (cycle > 0xFFFF) {
|
||||||
pr_err("Rx coalesced usec exceed hardware limitation\n");
|
pr_err("Tx coalesced usec exceed hardware limitation\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -309,6 +309,7 @@ static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth,
|
||||||
}
|
}
|
||||||
|
|
||||||
hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
|
hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
|
||||||
|
hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
|
||||||
|
|
||||||
hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT;
|
hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT;
|
||||||
hw_ioctxt.cmdq_depth = 0;
|
hw_ioctxt.cmdq_depth = 0;
|
||||||
|
|
|
@ -104,8 +104,8 @@ struct hinic_cmd_hw_ioctxt {
|
||||||
|
|
||||||
u8 rsvd2;
|
u8 rsvd2;
|
||||||
u8 rsvd3;
|
u8 rsvd3;
|
||||||
|
u8 ppf_idx;
|
||||||
u8 rsvd4;
|
u8 rsvd4;
|
||||||
u8 rsvd5;
|
|
||||||
|
|
||||||
u16 rq_depth;
|
u16 rq_depth;
|
||||||
u16 rx_buf_sz_idx;
|
u16 rx_buf_sz_idx;
|
||||||
|
|
|
@ -146,6 +146,7 @@
|
||||||
#define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx)
|
#define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx)
|
||||||
#define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx)
|
#define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx)
|
||||||
#define HINIC_HWIF_PF_IDX(hwif) ((hwif)->attr.pf_idx)
|
#define HINIC_HWIF_PF_IDX(hwif) ((hwif)->attr.pf_idx)
|
||||||
|
#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx)
|
||||||
|
|
||||||
#define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type)
|
#define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type)
|
||||||
#define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF)
|
#define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF)
|
||||||
|
|
|
@ -103,6 +103,7 @@ struct hinic_rq {
|
||||||
|
|
||||||
struct hinic_wq *wq;
|
struct hinic_wq *wq;
|
||||||
|
|
||||||
|
struct cpumask affinity_mask;
|
||||||
u32 irq;
|
u32 irq;
|
||||||
u16 msix_entry;
|
u16 msix_entry;
|
||||||
|
|
||||||
|
|
|
@ -414,7 +414,6 @@ static int rx_request_irq(struct hinic_rxq *rxq)
|
||||||
struct hinic_hwdev *hwdev = nic_dev->hwdev;
|
struct hinic_hwdev *hwdev = nic_dev->hwdev;
|
||||||
struct hinic_rq *rq = rxq->rq;
|
struct hinic_rq *rq = rxq->rq;
|
||||||
struct hinic_qp *qp;
|
struct hinic_qp *qp;
|
||||||
struct cpumask mask;
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
rx_add_napi(rxq);
|
rx_add_napi(rxq);
|
||||||
|
@ -431,8 +430,8 @@ static int rx_request_irq(struct hinic_rxq *rxq)
|
||||||
}
|
}
|
||||||
|
|
||||||
qp = container_of(rq, struct hinic_qp, rq);
|
qp = container_of(rq, struct hinic_qp, rq);
|
||||||
cpumask_set_cpu(qp->q_id % num_online_cpus(), &mask);
|
cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
|
||||||
return irq_set_affinity_hint(rq->irq, &mask);
|
return irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rx_free_irq(struct hinic_rxq *rxq)
|
static void rx_free_irq(struct hinic_rxq *rxq)
|
||||||
|
|
|
@ -832,14 +832,17 @@ static irqreturn_t ks_irq(int irq, void *pw)
|
||||||
{
|
{
|
||||||
struct net_device *netdev = pw;
|
struct net_device *netdev = pw;
|
||||||
struct ks_net *ks = netdev_priv(netdev);
|
struct ks_net *ks = netdev_priv(netdev);
|
||||||
|
unsigned long flags;
|
||||||
u16 status;
|
u16 status;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&ks->statelock, flags);
|
||||||
/*this should be the first in IRQ handler */
|
/*this should be the first in IRQ handler */
|
||||||
ks_save_cmd_reg(ks);
|
ks_save_cmd_reg(ks);
|
||||||
|
|
||||||
status = ks_rdreg16(ks, KS_ISR);
|
status = ks_rdreg16(ks, KS_ISR);
|
||||||
if (unlikely(!status)) {
|
if (unlikely(!status)) {
|
||||||
ks_restore_cmd_reg(ks);
|
ks_restore_cmd_reg(ks);
|
||||||
|
spin_unlock_irqrestore(&ks->statelock, flags);
|
||||||
return IRQ_NONE;
|
return IRQ_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -865,6 +868,7 @@ static irqreturn_t ks_irq(int irq, void *pw)
|
||||||
ks->netdev->stats.rx_over_errors++;
|
ks->netdev->stats.rx_over_errors++;
|
||||||
/* this should be the last in IRQ handler*/
|
/* this should be the last in IRQ handler*/
|
||||||
ks_restore_cmd_reg(ks);
|
ks_restore_cmd_reg(ks);
|
||||||
|
spin_unlock_irqrestore(&ks->statelock, flags);
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -934,6 +938,7 @@ static int ks_net_stop(struct net_device *netdev)
|
||||||
|
|
||||||
/* shutdown RX/TX QMU */
|
/* shutdown RX/TX QMU */
|
||||||
ks_disable_qmu(ks);
|
ks_disable_qmu(ks);
|
||||||
|
ks_disable_int(ks);
|
||||||
|
|
||||||
/* set powermode to soft power down to save power */
|
/* set powermode to soft power down to save power */
|
||||||
ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
|
ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
|
||||||
|
@ -990,10 +995,9 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||||
{
|
{
|
||||||
netdev_tx_t retv = NETDEV_TX_OK;
|
netdev_tx_t retv = NETDEV_TX_OK;
|
||||||
struct ks_net *ks = netdev_priv(netdev);
|
struct ks_net *ks = netdev_priv(netdev);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
disable_irq(netdev->irq);
|
spin_lock_irqsave(&ks->statelock, flags);
|
||||||
ks_disable_int(ks);
|
|
||||||
spin_lock(&ks->statelock);
|
|
||||||
|
|
||||||
/* Extra space are required:
|
/* Extra space are required:
|
||||||
* 4 byte for alignment, 4 for status/length, 4 for CRC
|
* 4 byte for alignment, 4 for status/length, 4 for CRC
|
||||||
|
@ -1007,9 +1011,7 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
} else
|
} else
|
||||||
retv = NETDEV_TX_BUSY;
|
retv = NETDEV_TX_BUSY;
|
||||||
spin_unlock(&ks->statelock);
|
spin_unlock_irqrestore(&ks->statelock, flags);
|
||||||
ks_enable_int(ks);
|
|
||||||
enable_irq(netdev->irq);
|
|
||||||
return retv;
|
return retv;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -522,6 +522,7 @@ efx_copy_channel(const struct efx_channel *old_channel)
|
||||||
if (tx_queue->channel)
|
if (tx_queue->channel)
|
||||||
tx_queue->channel = channel;
|
tx_queue->channel = channel;
|
||||||
tx_queue->buffer = NULL;
|
tx_queue->buffer = NULL;
|
||||||
|
tx_queue->cb_page = NULL;
|
||||||
memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
|
memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -563,13 +563,45 @@ efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx,
|
||||||
u32 nic_major, u32 nic_minor,
|
u32 nic_major, u32 nic_minor,
|
||||||
s32 correction)
|
s32 correction)
|
||||||
{
|
{
|
||||||
|
u32 sync_timestamp;
|
||||||
ktime_t kt = { 0 };
|
ktime_t kt = { 0 };
|
||||||
|
s16 delta;
|
||||||
|
|
||||||
if (!(nic_major & 0x80000000)) {
|
if (!(nic_major & 0x80000000)) {
|
||||||
WARN_ON_ONCE(nic_major >> 16);
|
WARN_ON_ONCE(nic_major >> 16);
|
||||||
/* Use the top bits from the latest sync event. */
|
|
||||||
nic_major &= 0xffff;
|
/* Medford provides 48 bits of timestamp, so we must get the top
|
||||||
nic_major |= (last_sync_timestamp_major(efx) & 0xffff0000);
|
* 16 bits from the timesync event state.
|
||||||
|
*
|
||||||
|
* We only have the lower 16 bits of the time now, but we do
|
||||||
|
* have a full resolution timestamp at some point in past. As
|
||||||
|
* long as the difference between the (real) now and the sync
|
||||||
|
* is less than 2^15, then we can reconstruct the difference
|
||||||
|
* between those two numbers using only the lower 16 bits of
|
||||||
|
* each.
|
||||||
|
*
|
||||||
|
* Put another way
|
||||||
|
*
|
||||||
|
* a - b = ((a mod k) - b) mod k
|
||||||
|
*
|
||||||
|
* when -k/2 < (a-b) < k/2. In our case k is 2^16. We know
|
||||||
|
* (a mod k) and b, so can calculate the delta, a - b.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
sync_timestamp = last_sync_timestamp_major(efx);
|
||||||
|
|
||||||
|
/* Because delta is s16 this does an implicit mask down to
|
||||||
|
* 16 bits which is what we need, assuming
|
||||||
|
* MEDFORD_TX_SECS_EVENT_BITS is 16. delta is signed so that
|
||||||
|
* we can deal with the (unlikely) case of sync timestamps
|
||||||
|
* arriving from the future.
|
||||||
|
*/
|
||||||
|
delta = nic_major - sync_timestamp;
|
||||||
|
|
||||||
|
/* Recover the fully specified time now, by applying the offset
|
||||||
|
* to the (fully specified) sync time.
|
||||||
|
*/
|
||||||
|
nic_major = sync_timestamp + delta;
|
||||||
|
|
||||||
kt = ptp->nic_to_kernel_time(nic_major, nic_minor,
|
kt = ptp->nic_to_kernel_time(nic_major, nic_minor,
|
||||||
correction);
|
correction);
|
||||||
|
|
|
@ -34,6 +34,7 @@
|
||||||
static void dwmac1000_core_init(struct mac_device_info *hw,
|
static void dwmac1000_core_init(struct mac_device_info *hw,
|
||||||
struct net_device *dev)
|
struct net_device *dev)
|
||||||
{
|
{
|
||||||
|
struct stmmac_priv *priv = netdev_priv(dev);
|
||||||
void __iomem *ioaddr = hw->pcsr;
|
void __iomem *ioaddr = hw->pcsr;
|
||||||
u32 value = readl(ioaddr + GMAC_CONTROL);
|
u32 value = readl(ioaddr + GMAC_CONTROL);
|
||||||
int mtu = dev->mtu;
|
int mtu = dev->mtu;
|
||||||
|
@ -45,7 +46,7 @@ static void dwmac1000_core_init(struct mac_device_info *hw,
|
||||||
* Broadcom tags can look like invalid LLC/SNAP packets and cause the
|
* Broadcom tags can look like invalid LLC/SNAP packets and cause the
|
||||||
* hardware to truncate packets on reception.
|
* hardware to truncate packets on reception.
|
||||||
*/
|
*/
|
||||||
if (netdev_uses_dsa(dev))
|
if (netdev_uses_dsa(dev) || !priv->plat->enh_desc)
|
||||||
value &= ~GMAC_CONTROL_ACS;
|
value &= ~GMAC_CONTROL_ACS;
|
||||||
|
|
||||||
if (mtu > 1500)
|
if (mtu > 1500)
|
||||||
|
|
|
@ -298,6 +298,7 @@ void ipvlan_process_multicast(struct work_struct *work)
|
||||||
}
|
}
|
||||||
if (dev)
|
if (dev)
|
||||||
dev_put(dev);
|
dev_put(dev);
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -504,19 +505,21 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
|
||||||
struct ethhdr *ethh = eth_hdr(skb);
|
struct ethhdr *ethh = eth_hdr(skb);
|
||||||
int ret = NET_XMIT_DROP;
|
int ret = NET_XMIT_DROP;
|
||||||
|
|
||||||
/* In this mode we dont care about multicast and broadcast traffic */
|
|
||||||
if (is_multicast_ether_addr(ethh->h_dest)) {
|
|
||||||
pr_debug_ratelimited("Dropped {multi|broad}cast of type=[%x]\n",
|
|
||||||
ntohs(skb->protocol));
|
|
||||||
kfree_skb(skb);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The ipvlan is a pseudo-L2 device, so the packets that we receive
|
/* The ipvlan is a pseudo-L2 device, so the packets that we receive
|
||||||
* will have L2; which need to discarded and processed further
|
* will have L2; which need to discarded and processed further
|
||||||
* in the net-ns of the main-device.
|
* in the net-ns of the main-device.
|
||||||
*/
|
*/
|
||||||
if (skb_mac_header_was_set(skb)) {
|
if (skb_mac_header_was_set(skb)) {
|
||||||
|
/* In this mode we dont care about
|
||||||
|
* multicast and broadcast traffic */
|
||||||
|
if (is_multicast_ether_addr(ethh->h_dest)) {
|
||||||
|
pr_debug_ratelimited(
|
||||||
|
"Dropped {multi|broad}cast of type=[%x]\n",
|
||||||
|
ntohs(skb->protocol));
|
||||||
|
kfree_skb(skb);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
skb_pull(skb, sizeof(*ethh));
|
skb_pull(skb, sizeof(*ethh));
|
||||||
skb->mac_header = (typeof(skb->mac_header))~0U;
|
skb->mac_header = (typeof(skb->mac_header))~0U;
|
||||||
skb_reset_network_header(skb);
|
skb_reset_network_header(skb);
|
||||||
|
|
|
@ -236,7 +236,6 @@ static void ipvlan_uninit(struct net_device *dev)
|
||||||
static int ipvlan_open(struct net_device *dev)
|
static int ipvlan_open(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct ipvl_dev *ipvlan = netdev_priv(dev);
|
struct ipvl_dev *ipvlan = netdev_priv(dev);
|
||||||
struct net_device *phy_dev = ipvlan->phy_dev;
|
|
||||||
struct ipvl_addr *addr;
|
struct ipvl_addr *addr;
|
||||||
|
|
||||||
if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
|
if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
|
||||||
|
@ -250,7 +249,7 @@ static int ipvlan_open(struct net_device *dev)
|
||||||
ipvlan_ht_addr_add(ipvlan, addr);
|
ipvlan_ht_addr_add(ipvlan, addr);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
return dev_uc_add(phy_dev, phy_dev->dev_addr);
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ipvlan_stop(struct net_device *dev)
|
static int ipvlan_stop(struct net_device *dev)
|
||||||
|
@ -262,8 +261,6 @@ static int ipvlan_stop(struct net_device *dev)
|
||||||
dev_uc_unsync(phy_dev, dev);
|
dev_uc_unsync(phy_dev, dev);
|
||||||
dev_mc_unsync(phy_dev, dev);
|
dev_mc_unsync(phy_dev, dev);
|
||||||
|
|
||||||
dev_uc_del(phy_dev, phy_dev->dev_addr);
|
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
|
list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
|
||||||
ipvlan_ht_addr_del(addr);
|
ipvlan_ht_addr_del(addr);
|
||||||
|
|
|
@ -2886,6 +2886,11 @@ static void macsec_dev_set_rx_mode(struct net_device *dev)
|
||||||
dev_uc_sync(real_dev, dev);
|
dev_uc_sync(real_dev, dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static sci_t dev_to_sci(struct net_device *dev, __be16 port)
|
||||||
|
{
|
||||||
|
return make_sci(dev->dev_addr, port);
|
||||||
|
}
|
||||||
|
|
||||||
static int macsec_set_mac_address(struct net_device *dev, void *p)
|
static int macsec_set_mac_address(struct net_device *dev, void *p)
|
||||||
{
|
{
|
||||||
struct macsec_dev *macsec = macsec_priv(dev);
|
struct macsec_dev *macsec = macsec_priv(dev);
|
||||||
|
@ -2907,6 +2912,7 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
|
||||||
|
|
||||||
out:
|
out:
|
||||||
ether_addr_copy(dev->dev_addr, addr->sa_data);
|
ether_addr_copy(dev->dev_addr, addr->sa_data);
|
||||||
|
macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2989,6 +2995,7 @@ static const struct device_type macsec_type = {
|
||||||
|
|
||||||
static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
|
static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
|
||||||
[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
|
[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
|
||||||
|
[IFLA_MACSEC_PORT] = { .type = NLA_U16 },
|
||||||
[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
|
[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
|
||||||
[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
|
[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
|
||||||
[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
|
[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
|
||||||
|
@ -3188,11 +3195,6 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static sci_t dev_to_sci(struct net_device *dev, __be16 port)
|
|
||||||
{
|
|
||||||
return make_sci(dev->dev_addr, port);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
|
static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
|
||||||
{
|
{
|
||||||
struct macsec_dev *macsec = macsec_priv(dev);
|
struct macsec_dev *macsec = macsec_priv(dev);
|
||||||
|
|
|
@ -338,6 +338,8 @@ static void macvlan_process_broadcast(struct work_struct *w)
|
||||||
if (src)
|
if (src)
|
||||||
dev_put(src->dev);
|
dev_put(src->dev);
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
|
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -76,7 +76,7 @@ static LIST_HEAD(phy_fixup_list);
|
||||||
static DEFINE_MUTEX(phy_fixup_lock);
|
static DEFINE_MUTEX(phy_fixup_lock);
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
#ifdef CONFIG_PM
|
||||||
static bool mdio_bus_phy_may_suspend(struct phy_device *phydev, bool suspend)
|
static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
|
||||||
{
|
{
|
||||||
struct device_driver *drv = phydev->mdio.dev.driver;
|
struct device_driver *drv = phydev->mdio.dev.driver;
|
||||||
struct phy_driver *phydrv = to_phy_driver(drv);
|
struct phy_driver *phydrv = to_phy_driver(drv);
|
||||||
|
@ -88,11 +88,10 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev, bool suspend)
|
||||||
/* PHY not attached? May suspend if the PHY has not already been
|
/* PHY not attached? May suspend if the PHY has not already been
|
||||||
* suspended as part of a prior call to phy_disconnect() ->
|
* suspended as part of a prior call to phy_disconnect() ->
|
||||||
* phy_detach() -> phy_suspend() because the parent netdev might be the
|
* phy_detach() -> phy_suspend() because the parent netdev might be the
|
||||||
* MDIO bus driver and clock gated at this point. Also may resume if
|
* MDIO bus driver and clock gated at this point.
|
||||||
* PHY is not attached.
|
|
||||||
*/
|
*/
|
||||||
if (!netdev)
|
if (!netdev)
|
||||||
return suspend ? !phydev->suspended : phydev->suspended;
|
goto out;
|
||||||
|
|
||||||
if (netdev->wol_enabled)
|
if (netdev->wol_enabled)
|
||||||
return false;
|
return false;
|
||||||
|
@ -112,7 +111,8 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev, bool suspend)
|
||||||
if (device_may_wakeup(&netdev->dev))
|
if (device_may_wakeup(&netdev->dev))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return true;
|
out:
|
||||||
|
return !phydev->suspended;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mdio_bus_phy_suspend(struct device *dev)
|
static int mdio_bus_phy_suspend(struct device *dev)
|
||||||
|
@ -127,9 +127,11 @@ static int mdio_bus_phy_suspend(struct device *dev)
|
||||||
if (phydev->attached_dev && phydev->adjust_link)
|
if (phydev->attached_dev && phydev->adjust_link)
|
||||||
phy_stop_machine(phydev);
|
phy_stop_machine(phydev);
|
||||||
|
|
||||||
if (!mdio_bus_phy_may_suspend(phydev, true))
|
if (!mdio_bus_phy_may_suspend(phydev))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
phydev->suspended_by_mdio_bus = 1;
|
||||||
|
|
||||||
return phy_suspend(phydev);
|
return phy_suspend(phydev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,9 +140,11 @@ static int mdio_bus_phy_resume(struct device *dev)
|
||||||
struct phy_device *phydev = to_phy_device(dev);
|
struct phy_device *phydev = to_phy_device(dev);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!mdio_bus_phy_may_suspend(phydev, false))
|
if (!phydev->suspended_by_mdio_bus)
|
||||||
goto no_resume;
|
goto no_resume;
|
||||||
|
|
||||||
|
phydev->suspended_by_mdio_bus = 0;
|
||||||
|
|
||||||
ret = phy_resume(phydev);
|
ret = phy_resume(phydev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue