This is the 4.19.34 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlynu40ACgkQONu9yGCS aT5X6g//Wkfm/+qSZ0GhLDQkPniiH1QkvzhOmVrrxu+KB0qsiwsEl8Srw33ZVkJK LT8+IPGiG9jEGu9dj+BYXTIfy9ZvfSsEL2N6GhYwDSXP0fok2rUaHbZvv1IB2g4W afhGdNwNAUCJ/j1UrUsi+SAFJ+xWbVxFpGstd0cqM9IbKdEV7RIukvuKckHiKOKR qI8FxC+G2PAr+BtnETfk5/suPDJ7B3ZicDoMhiWJGxJ6dfFTVmkSmasSoPDaMiHm 4S3hN2lu+WTeRpRPPB17Dlk4MmIp0k+bGYBKAlaxAMCc/RZxvbT2pRYaMQbId2/L mNUfSnOQFGEAhlAPfb7wdbObphnyT34GhlkWfZBTrnhPO0/FomLOvU6xVdcNuakX Tv2JKfDzb+2ttcMZ+0T84Ru9RztoswFATSw8uFMVxW8oTS6MVWnHu96Kxfl7QO3J PdlIGcyqxSuWNE8OX1QVtdSruGZfwUDNs94S4nQJtkB8BViRwhGJlqaXuy4d9Wp6 fGlI2W6qhjyosi2wBSMTjh/ytk/jq0vfs+z2XjR2gAYssvB/SOLR/AlSVguWsDnf WaoFBkXvCbuPvPlo0TrLpl5RW5WlOtLXHE3Vr3dKp458wLwpf/OZBGoZiknp7DrF PzBZs2ie5tmyqTxbAygl7WkbQPJ682pd5R4nf5CY+zvUaOMZv1g= =Iuup -----END PGP SIGNATURE----- Merge 4.19.34 into android-4.19 Changes in 4.19.34 arm64: debug: Don't propagate UNKNOWN FAR into si_code for debug signals ext4: cleanup bh release code in ext4_ind_remove_space() tty/serial: atmel: Add is_half_duplex helper tty/serial: atmel: RS485 HD w/DMA: enable RX after TX is stopped CIFS: fix POSIX lock leak and invalid ptr deref h8300: use cc-cross-prefix instead of hardcoding h8300-unknown-linux- f2fs: fix to adapt small inline xattr space in __find_inline_xattr() f2fs: fix to avoid deadlock in f2fs_read_inline_dir() tracing: kdb: Fix ftdump to not sleep net/mlx5: Avoid panic when setting vport rate net/mlx5: Avoid panic when setting vport mac, getting vport config gpio: gpio-omap: fix level interrupt idling include/linux/relay.h: fix percpu annotation in struct rchan sysctl: handle overflow for file-max net: stmmac: Avoid sometimes uninitialized Clang warnings enic: fix build warning without CONFIG_CPUMASK_OFFSTACK libbpf: force fixdep compilation at the start of the build scsi: hisi_sas: Set PHY linkrate when disconnected scsi: hisi_sas: Fix a timeout race of driver internal and SMP IO iio: adc: fix warning in Qualcomm PM8xxx HK/XOADC driver x86/hyperv: Fix kernel panic when kexec on HyperV perf c2c: Fix c2c report for empty numa node mm/sparse: fix a bad comparison mm/cma.c: cma_declare_contiguous: correct err handling mm/page_ext.c: fix an imbalance with kmemleak mm, swap: bounds check swap_info array accesses to avoid NULL derefs mm,oom: don't kill global init via memory.oom.group memcg: killed threads should not invoke memcg OOM killer mm, mempolicy: fix uninit memory access mm/vmalloc.c: fix kernel BUG at mm/vmalloc.c:512! mm/slab.c: kmemleak no scan alien caches ocfs2: fix a panic problem caused by o2cb_ctl f2fs: do not use mutex lock in atomic context fs/file.c: initialize init_files.resize_wait page_poison: play nicely with KASAN cifs: use correct format characters dm thin: add sanity checks to thin-pool and external snapshot creation f2fs: fix to check inline_xattr_size boundary correctly cifs: Accept validate negotiate if server return NT_STATUS_NOT_SUPPORTED cifs: Fix NULL pointer dereference of devname netfilter: nf_tables: check the result of dereferencing base_chain->stats netfilter: conntrack: tcp: only close if RST matches exact sequence jbd2: fix invalid descriptor block checksum fs: fix guard_bio_eod to check for real EOD errors tools lib traceevent: Fix buffer overflow in arg_eval PCI/PME: Fix hotplug/sysfs remove deadlock in pcie_pme_remove() wil6210: check null pointer in _wil_cfg80211_merge_extra_ies mt76: fix a leaked reference by adding a missing of_node_put crypto: crypto4xx - add missing of_node_put after of_device_is_available crypto: cavium/zip - fix collision with generic cra_driver_name usb: chipidea: Grab the (legacy) USB PHY by phandle first powerpc/powernv/ioda: Fix locked_vm counting for memory used by IOMMU tables scsi: core: replace GFP_ATOMIC with GFP_KERNEL in scsi_scan.c kbuild: invoke syncconfig if include/config/auto.conf.cmd is missing powerpc/xmon: Fix opcode being uninitialized in print_insn_powerpc coresight: etm4x: Add support to enable ETMv4.2 serial: 8250_pxa: honor the port number from devicetree ARM: 8840/1: use a raw_spinlock_t in unwind iommu/io-pgtable-arm-v7s: Only kmemleak_ignore L2 tables powerpc/hugetlb: Handle mmap_min_addr correctly in get_unmapped_area callback btrfs: qgroup: Make qgroup async transaction commit more aggressive mmc: omap: fix the maximum timeout setting net: dsa: mv88e6xxx: Add lockdep classes to fix false positive splat e1000e: Fix -Wformat-truncation warnings mlxsw: spectrum: Avoid -Wformat-truncation warnings platform/x86: ideapad-laptop: Fix no_hw_rfkill_list for Lenovo RESCUER R720-15IKBN platform/mellanox: mlxreg-hotplug: Fix KASAN warning loop: set GENHD_FL_NO_PART_SCAN after blkdev_reread_part() IB/mlx4: Increase the timeout for CM cache clk: fractional-divider: check parent rate only if flag is set perf annotate: Fix getting source line failure ASoC: qcom: Fix of-node refcount unbalance in qcom_snd_parse_of() cpufreq: acpi-cpufreq: Report if CPU doesn't support boost technologies efi: cper: Fix possible out-of-bounds access s390/ism: ignore some errors during deregistration scsi: megaraid_sas: return error when create DMA pool failed scsi: fcoe: make use of fip_mode enum complete drm/amd/display: Clear stream->mode_changed after commit perf test: Fix failure of 'evsel-tp-sched' test on s390 mwifiex: don't advertise IBSS features without FW support perf report: Don't shadow inlined symbol with different addr range SoC: imx-sgtl5000: add missing put_device() media: ov7740: fix runtime pm initialization media: sh_veu: Correct return type for mem2mem buffer helpers media: s5p-jpeg: Correct return type for mem2mem buffer helpers media: rockchip/rga: Correct return type for mem2mem buffer helpers media: s5p-g2d: Correct return type for mem2mem buffer helpers media: mx2_emmaprp: Correct return type for mem2mem buffer helpers media: mtk-jpeg: Correct return type for mem2mem buffer helpers mt76: usb: do not run mt76u_queues_deinit twice xen/gntdev: Do not destroy context while dma-bufs are in use vfs: fix preadv64v2 and pwritev64v2 compat syscalls with offset == -1 HID: intel-ish-hid: avoid binding wrong ishtp_cl_device cgroup, rstat: Don't flush subtree root unless necessary jbd2: fix race when writing superblock leds: lp55xx: fix null deref on firmware load failure perf report: Add s390 diagnosic sampling descriptor size iwlwifi: pcie: fix emergency path ACPI / video: Refactor and fix dmi_is_desktop() selftests: skip seccomp get_metadata test if not real root kprobes: Prohibit probing on bsearch() kprobes: Prohibit probing on RCU debug routine netfilter: conntrack: fix cloned unconfirmed skb->_nfct race in __nf_conntrack_confirm ARM: 8833/1: Ensure that NEON code always compiles with Clang ARM: dts: meson8b: fix the Ethernet data line signals in eth_rgmii_pins ALSA: PCM: check if ops are defined before suspending PCM ath10k: fix shadow register implementation for WCN3990 usb: f_fs: Avoid crash due to out-of-scope stack ptr access sched/topology: Fix percpu data types in struct sd_data & struct s_data bcache: fix input overflow to cache set sysfs file io_error_halflife bcache: fix input overflow to sequential_cutoff bcache: fix potential div-zero error of writeback_rate_i_term_inverse bcache: improve sysfs_strtoul_clamp() genirq: Avoid summation loops for /proc/stat net: marvell: mvpp2: fix stuck in-band SGMII negotiation iw_cxgb4: fix srqidx leak during connection abort net: phy: consider latched link-down status in polling mode fbdev: fbmem: fix memory access if logo is bigger than the screen cdrom: Fix race condition in cdrom_sysctl_register drm: rcar-du: add missing of_node_put drm/amd/display: Don't re-program planes for DPMS changes drm/amd/display: Disconnect mpcc when changing tg perf/aux: Make perf_event accessible to setup_aux() e1000e: fix cyclic resets at link up with active tx e1000e: Exclude device from suspend direct complete optimization platform/x86: intel_pmc_core: Fix PCH IP sts reading i2c: of: Try to find an I2C adapter matching the parent staging: spi: mt7621: Add return code check on device_reset() iwlwifi: mvm: fix RFH config command with >=10 CPUs ASoC: fsl-asoc-card: fix object reference leaks in fsl_asoc_card_probe sched/debug: Initialize sd_sysctl_cpus if !CONFIG_CPUMASK_OFFSTACK efi/memattr: Don't bail on zero VA if it equals the region's PA sched/core: Use READ_ONCE()/WRITE_ONCE() in move_queued_task()/task_rq_lock() drm/vkms: Bugfix extra vblank frame ARM: dts: lpc32xx: Remove leading 0x and 0s from bindings notation efi/arm/arm64: Allow SetVirtualAddressMap() to be omitted soc: qcom: gsbi: Fix error handling in gsbi_probe() mt7601u: bump supported EEPROM version ARM: 8830/1: NOMMU: Toggle only bits in EXC_RETURN we are really care of ARM: avoid Cortex-A9 livelock on tight dmb loops block, bfq: fix in-service-queue check for queue merging bpf: fix missing prototype warnings selftests/bpf: skip verifier tests for unsupported program types powerpc/64s: Clear on-stack exception marker upon exception return cgroup/pids: turn cgroup_subsys->free() into cgroup_subsys->release() to fix the accounting backlight: pwm_bl: Use gpiod_get_value_cansleep() to get initial state tty: increase the default flip buffer limit to 2*640K powerpc/pseries: Perform full re-add of CPU for topology update post-migration drm/amd/display: Enable vblank interrupt during CRC capture ALSA: dice: add support for Solid State Logic Duende Classic/Mini usb: dwc3: gadget: Fix OTG events when gadget driver isn't loaded platform/x86: intel-hid: Missing power button release on some Dell models perf script python: Use PyBytes for attr in trace-event-python perf script python: Add trace_context extension module to sys.modules media: mt9m111: set initial frame size other than 0x0 hwrng: virtio - Avoid repeated init of completion soc/tegra: fuse: Fix illegal free of IO base address HID: intel-ish: ipc: handle PIMR before ish_wakeup also clear PISR busy_clear bit f2fs: UBSAN: set boolean value iostat_enable correctly hpet: Fix missing '=' character in the __setup() code of hpet_mmap_enable cpu/hotplug: Mute hotplug lockdep during init dmaengine: imx-dma: fix warning comparison of distinct pointer types dmaengine: qcom_hidma: assign channel cookie correctly dmaengine: qcom_hidma: initialize tx flags in hidma_prep_dma_* netfilter: physdev: relax br_netfilter dependency media: rcar-vin: Allow independent VIN link enablement media: s5p-jpeg: Check for fmt_ver_flag when doing fmt enumeration regulator: act8865: Fix act8600_sudcdc_voltage_ranges setting pinctrl: meson: meson8b: add the eth_rxd2 and eth_rxd3 pins drm: Auto-set allow_fb_modifiers when given modifiers at plane init drm/nouveau: Stop using drm_crtc_force_disable x86/build: Specify elf_i386 linker emulation explicitly for i386 objects selinux: do not override context on context mounts brcmfmac: Use firmware_request_nowarn for the clm_blob wlcore: Fix memory leak in case wl12xx_fetch_firmware failure x86/build: Mark per-CPU symbols as absolute explicitly for LLD drm/fb-helper: fix leaks in error path of drm_fb_helper_fbdev_setup clk: meson: clean-up clock registration clk: rockchip: fix frac settings of GPLL clock for rk3328 dmaengine: tegra: avoid overflow of byte tracking Input: soc_button_array - fix mapping of the 5th GPIO in a PNP0C40 device drm/dp/mst: Configure no_stop_bit correctly for remote i2c xfers net: stmmac: Avoid one more sometimes uninitialized Clang warning ACPI / video: Extend chassis-type detection with a "Lunch Box" check bcache: fix potential div-zero error of writeback_rate_p_term_inverse kprobes/x86: Blacklist non-attachable interrupt functions Linux 4.19.34 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
d885da678e
222 changed files with 1210 additions and 593 deletions
Documentation/arm
Makefilearch
arm
boot/dts
include/asm
kernel
lib
mach-omap2
mm
arm64/mm
h8300
powerpc
include/asm
kernel
mm
platforms
xmon
s390/kernel
x86
boot
entry
events/intel
hyperv
kernel
realmode/rm
block
drivers
acpi
block
cdrom
char
clk
cpufreq
crypto
dma
firmware/efi
gpio
gpu/drm
hid/intel-ish-hid
hwtracing/coresight
i2c
iio/adc
infiniband/hw
input/misc
iommu
leds
md
media
mmc/host
net
dsa/mv88e6xxx
ethernet
cisco/enic
intel/e1000e
marvell/mvpp2
mellanox
stmicro/stmmac
phy
|
@ -6,7 +6,7 @@ TL;DR summary
|
|||
* Use only NEON instructions, or VFP instructions that don't rely on support
|
||||
code
|
||||
* Isolate your NEON code in a separate compilation unit, and compile it with
|
||||
'-mfpu=neon -mfloat-abi=softfp'
|
||||
'-march=armv7-a -mfpu=neon -mfloat-abi=softfp'
|
||||
* Put kernel_neon_begin() and kernel_neon_end() calls around the calls into your
|
||||
NEON code
|
||||
* Don't sleep in your NEON code, and be aware that it will be executed with
|
||||
|
@ -87,7 +87,7 @@ instructions appearing in unexpected places if no special care is taken.
|
|||
Therefore, the recommended and only supported way of using NEON/VFP in the
|
||||
kernel is by adhering to the following rules:
|
||||
* isolate the NEON code in a separate compilation unit and compile it with
|
||||
'-mfpu=neon -mfloat-abi=softfp';
|
||||
'-march=armv7-a -mfpu=neon -mfloat-abi=softfp';
|
||||
* issue the calls to kernel_neon_begin(), kernel_neon_end() as well as the calls
|
||||
into the unit containing the NEON code from a compilation unit which is *not*
|
||||
built with the GCC flag '-mfpu=neon' set.
|
||||
|
|
9
Makefile
9
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 33
|
||||
SUBLEVEL = 34
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
@ -623,12 +623,15 @@ ifeq ($(may-sync-config),1)
|
|||
-include include/config/auto.conf.cmd
|
||||
|
||||
# To avoid any implicit rule to kick in, define an empty command
|
||||
$(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
|
||||
$(KCONFIG_CONFIG): ;
|
||||
|
||||
# The actual configuration files used during the build are stored in
|
||||
# include/generated/ and include/config/. Update them if .config is newer than
|
||||
# include/config/auto.conf (which mirrors .config).
|
||||
include/config/%.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd
|
||||
#
|
||||
# This exploits the 'multi-target pattern rule' trick.
|
||||
# The syncconfig should be executed only once to make all the targets.
|
||||
%/auto.conf %/auto.conf.cmd %/tristate.conf: $(KCONFIG_CONFIG)
|
||||
$(Q)$(MAKE) -f $(srctree)/Makefile syncconfig
|
||||
else
|
||||
# External modules and some install targets need include/generated/autoconf.h
|
||||
|
|
|
@ -230,7 +230,7 @@
|
|||
status = "disabled";
|
||||
};
|
||||
|
||||
i2s1: i2s@2009C000 {
|
||||
i2s1: i2s@2009c000 {
|
||||
compatible = "nxp,lpc3220-i2s";
|
||||
reg = <0x2009C000 0x1000>;
|
||||
};
|
||||
|
@ -273,7 +273,7 @@
|
|||
status = "disabled";
|
||||
};
|
||||
|
||||
i2c1: i2c@400A0000 {
|
||||
i2c1: i2c@400a0000 {
|
||||
compatible = "nxp,pnx-i2c";
|
||||
reg = <0x400A0000 0x100>;
|
||||
interrupt-parent = <&sic1>;
|
||||
|
@ -284,7 +284,7 @@
|
|||
clocks = <&clk LPC32XX_CLK_I2C1>;
|
||||
};
|
||||
|
||||
i2c2: i2c@400A8000 {
|
||||
i2c2: i2c@400a8000 {
|
||||
compatible = "nxp,pnx-i2c";
|
||||
reg = <0x400A8000 0x100>;
|
||||
interrupt-parent = <&sic1>;
|
||||
|
@ -295,7 +295,7 @@
|
|||
clocks = <&clk LPC32XX_CLK_I2C2>;
|
||||
};
|
||||
|
||||
mpwm: mpwm@400E8000 {
|
||||
mpwm: mpwm@400e8000 {
|
||||
compatible = "nxp,lpc3220-motor-pwm";
|
||||
reg = <0x400E8000 0x78>;
|
||||
status = "disabled";
|
||||
|
@ -394,7 +394,7 @@
|
|||
#gpio-cells = <3>; /* bank, pin, flags */
|
||||
};
|
||||
|
||||
timer4: timer@4002C000 {
|
||||
timer4: timer@4002c000 {
|
||||
compatible = "nxp,lpc3220-timer";
|
||||
reg = <0x4002C000 0x1000>;
|
||||
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
|
||||
|
@ -412,7 +412,7 @@
|
|||
status = "disabled";
|
||||
};
|
||||
|
||||
watchdog: watchdog@4003C000 {
|
||||
watchdog: watchdog@4003c000 {
|
||||
compatible = "nxp,pnx4008-wdt";
|
||||
reg = <0x4003C000 0x1000>;
|
||||
clocks = <&clk LPC32XX_CLK_WDOG>;
|
||||
|
@ -451,7 +451,7 @@
|
|||
status = "disabled";
|
||||
};
|
||||
|
||||
timer1: timer@4004C000 {
|
||||
timer1: timer@4004c000 {
|
||||
compatible = "nxp,lpc3220-timer";
|
||||
reg = <0x4004C000 0x1000>;
|
||||
interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
|
||||
|
@ -475,7 +475,7 @@
|
|||
status = "disabled";
|
||||
};
|
||||
|
||||
pwm1: pwm@4005C000 {
|
||||
pwm1: pwm@4005c000 {
|
||||
compatible = "nxp,lpc3220-pwm";
|
||||
reg = <0x4005C000 0x4>;
|
||||
clocks = <&clk LPC32XX_CLK_PWM1>;
|
||||
|
@ -484,7 +484,7 @@
|
|||
status = "disabled";
|
||||
};
|
||||
|
||||
pwm2: pwm@4005C004 {
|
||||
pwm2: pwm@4005c004 {
|
||||
compatible = "nxp,lpc3220-pwm";
|
||||
reg = <0x4005C004 0x4>;
|
||||
clocks = <&clk LPC32XX_CLK_PWM2>;
|
||||
|
|
|
@ -207,9 +207,7 @@
|
|||
groups = "eth_tx_clk",
|
||||
"eth_tx_en",
|
||||
"eth_txd1_0",
|
||||
"eth_txd1_1",
|
||||
"eth_txd0_0",
|
||||
"eth_txd0_1",
|
||||
"eth_rx_clk",
|
||||
"eth_rx_dv",
|
||||
"eth_rxd1",
|
||||
|
@ -218,7 +216,9 @@
|
|||
"eth_mdc",
|
||||
"eth_ref_clk",
|
||||
"eth_txd2",
|
||||
"eth_txd3";
|
||||
"eth_txd3",
|
||||
"eth_rxd3",
|
||||
"eth_rxd2";
|
||||
function = "ethernet";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
#define sev() __asm__ __volatile__ ("sev" : : : "memory")
|
||||
#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
|
||||
#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
|
||||
#else
|
||||
#define wfe() do { } while (0)
|
||||
#endif
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 7
|
||||
|
|
|
@ -95,7 +95,11 @@ extern void release_thread(struct task_struct *);
|
|||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
|
||||
#define cpu_relax() smp_mb()
|
||||
#define cpu_relax() \
|
||||
do { \
|
||||
smp_mb(); \
|
||||
__asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
|
||||
} while (0)
|
||||
#else
|
||||
#define cpu_relax() barrier()
|
||||
#endif
|
||||
|
|
|
@ -49,7 +49,7 @@
|
|||
* (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01.
|
||||
*/
|
||||
#define EXC_RET_STACK_MASK 0x00000004
|
||||
#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd
|
||||
#define EXC_RET_THREADMODE_PROCESSSTACK (3 << 2)
|
||||
|
||||
/* Cache related definitions */
|
||||
|
||||
|
|
|
@ -127,7 +127,8 @@
|
|||
*/
|
||||
.macro v7m_exception_slow_exit ret_r0
|
||||
cpsid i
|
||||
ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK
|
||||
ldr lr, =exc_ret
|
||||
ldr lr, [lr]
|
||||
|
||||
@ read original r12, sp, lr, pc and xPSR
|
||||
add r12, sp, #S_IP
|
||||
|
|
|
@ -146,3 +146,7 @@ ENTRY(vector_table)
|
|||
.rept CONFIG_CPU_V7M_NUM_IRQ
|
||||
.long __irq_entry @ External Interrupts
|
||||
.endr
|
||||
.align 2
|
||||
.globl exc_ret
|
||||
exc_ret:
|
||||
.space 4
|
||||
|
|
|
@ -91,8 +91,11 @@ void machine_crash_nonpanic_core(void *unused)
|
|||
|
||||
set_cpu_online(smp_processor_id(), false);
|
||||
atomic_dec(&waiting_for_crash_ipi);
|
||||
while (1)
|
||||
|
||||
while (1) {
|
||||
cpu_relax();
|
||||
wfe();
|
||||
}
|
||||
}
|
||||
|
||||
void crash_smp_send_stop(void)
|
||||
|
|
|
@ -604,8 +604,10 @@ static void ipi_cpu_stop(unsigned int cpu)
|
|||
local_fiq_disable();
|
||||
local_irq_disable();
|
||||
|
||||
while (1)
|
||||
while (1) {
|
||||
cpu_relax();
|
||||
wfe();
|
||||
}
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct completion *, cpu_completion);
|
||||
|
|
|
@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[];
|
|||
static const struct unwind_idx *__origin_unwind_idx;
|
||||
extern const struct unwind_idx __stop_unwind_idx[];
|
||||
|
||||
static DEFINE_SPINLOCK(unwind_lock);
|
||||
static DEFINE_RAW_SPINLOCK(unwind_lock);
|
||||
static LIST_HEAD(unwind_tables);
|
||||
|
||||
/* Convert a prel31 symbol to an absolute address */
|
||||
|
@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
|
|||
/* module unwind tables */
|
||||
struct unwind_table *table;
|
||||
|
||||
spin_lock_irqsave(&unwind_lock, flags);
|
||||
raw_spin_lock_irqsave(&unwind_lock, flags);
|
||||
list_for_each_entry(table, &unwind_tables, list) {
|
||||
if (addr >= table->begin_addr &&
|
||||
addr < table->end_addr) {
|
||||
|
@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
|
|||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&unwind_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&unwind_lock, flags);
|
||||
}
|
||||
|
||||
pr_debug("%s: idx = %p\n", __func__, idx);
|
||||
|
@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
|
|||
tab->begin_addr = text_addr;
|
||||
tab->end_addr = text_addr + text_size;
|
||||
|
||||
spin_lock_irqsave(&unwind_lock, flags);
|
||||
raw_spin_lock_irqsave(&unwind_lock, flags);
|
||||
list_add_tail(&tab->list, &unwind_tables);
|
||||
spin_unlock_irqrestore(&unwind_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&unwind_lock, flags);
|
||||
|
||||
return tab;
|
||||
}
|
||||
|
@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab)
|
|||
if (!tab)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&unwind_lock, flags);
|
||||
raw_spin_lock_irqsave(&unwind_lock, flags);
|
||||
list_del(&tab->list);
|
||||
spin_unlock_irqrestore(&unwind_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&unwind_lock, flags);
|
||||
|
||||
kfree(tab);
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ $(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S
|
|||
$(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S
|
||||
|
||||
ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
|
||||
NEON_FLAGS := -mfloat-abi=softfp -mfpu=neon
|
||||
NEON_FLAGS := -march=armv7-a -mfloat-abi=softfp -mfpu=neon
|
||||
CFLAGS_xor-neon.o += $(NEON_FLAGS)
|
||||
obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o
|
||||
endif
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
MODULE_LICENSE("GPL");
|
||||
|
||||
#ifndef __ARM_NEON__
|
||||
#error You should compile this file with '-mfloat-abi=softfp -mfpu=neon'
|
||||
#error You should compile this file with '-march=armv7-a -mfloat-abi=softfp -mfpu=neon'
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -523,8 +523,10 @@ void omap_prm_reset_system(void)
|
|||
|
||||
prm_ll_data->reset_system();
|
||||
|
||||
while (1)
|
||||
while (1) {
|
||||
cpu_relax();
|
||||
wfe();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -139,6 +139,9 @@ __v7m_setup_cont:
|
|||
cpsie i
|
||||
svc #0
|
||||
1: cpsid i
|
||||
ldr r0, =exc_ret
|
||||
orr lr, lr, #EXC_RET_THREADMODE_PROCESSSTACK
|
||||
str lr, [r0]
|
||||
ldmia sp, {r0-r3, r12}
|
||||
str r5, [r12, #11 * 4] @ restore the original SVC vector entry
|
||||
mov lr, r6 @ restore LR
|
||||
|
|
|
@ -827,11 +827,12 @@ void __init hook_debug_fault_code(int nr,
|
|||
debug_fault_info[nr].name = name;
|
||||
}
|
||||
|
||||
asmlinkage int __exception do_debug_exception(unsigned long addr,
|
||||
asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
|
||||
unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
|
||||
unsigned long pc = instruction_pointer(regs);
|
||||
int rv;
|
||||
|
||||
/*
|
||||
|
@ -841,10 +842,10 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
|
|||
if (interrupts_enabled(regs))
|
||||
trace_hardirqs_off();
|
||||
|
||||
if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE)
|
||||
if (user_mode(regs) && pc > TASK_SIZE)
|
||||
arm64_apply_bp_hardening();
|
||||
|
||||
if (!inf->fn(addr, esr, regs)) {
|
||||
if (!inf->fn(addr_if_watchpoint, esr, regs)) {
|
||||
rv = 1;
|
||||
} else {
|
||||
struct siginfo info;
|
||||
|
@ -853,7 +854,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
|
|||
info.si_signo = inf->sig;
|
||||
info.si_errno = 0;
|
||||
info.si_code = inf->code;
|
||||
info.si_addr = (void __user *)addr;
|
||||
info.si_addr = (void __user *)pc;
|
||||
arm64_notify_die(inf->name, regs, &info, esr);
|
||||
rv = 0;
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ KBUILD_LDFLAGS += $(ldflags-y)
|
|||
CHECKFLAGS += -msize-long
|
||||
|
||||
ifeq ($(CROSS_COMPILE),)
|
||||
CROSS_COMPILE := h8300-unknown-linux-
|
||||
CROSS_COMPILE := $(call cc-cross-prefix, h8300-unknown-linux- h8300-linux-)
|
||||
endif
|
||||
|
||||
core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/
|
||||
|
|
|
@ -132,6 +132,8 @@ static inline void shared_proc_topology_init(void) {}
|
|||
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
|
||||
|
||||
int dlpar_cpu_readd(int cpu);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
|
|
@ -994,6 +994,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
|||
ld r2,_NIP(r1)
|
||||
mtspr SPRN_SRR0,r2
|
||||
|
||||
/*
|
||||
* Leaving a stale exception_marker on the stack can confuse
|
||||
* the reliable stack unwinder later on. Clear it.
|
||||
*/
|
||||
li r2,0
|
||||
std r2,STACK_FRAME_OVERHEAD-16(r1)
|
||||
|
||||
ld r0,GPR0(r1)
|
||||
ld r2,GPR2(r1)
|
||||
ld r3,GPR3(r1)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/security.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
@ -73,7 +74,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
|||
if (addr) {
|
||||
addr = ALIGN(addr, huge_page_size(h));
|
||||
vma = find_vma(mm, addr);
|
||||
if (high_limit - len >= addr &&
|
||||
if (high_limit - len >= addr && addr >= mmap_min_addr &&
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
@ -83,7 +84,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
|||
*/
|
||||
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
||||
info.length = len;
|
||||
info.low_limit = PAGE_SIZE;
|
||||
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
|
||||
info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
|
||||
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
|
||||
info.align_offset = 0;
|
||||
|
|
|
@ -1461,13 +1461,6 @@ static void reset_topology_timer(void)
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static void stage_topology_update(int core_id)
|
||||
{
|
||||
cpumask_or(&cpu_associativity_changes_mask,
|
||||
&cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
|
||||
reset_topology_timer();
|
||||
}
|
||||
|
||||
static int dt_update_callback(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
|
@ -1480,7 +1473,7 @@ static int dt_update_callback(struct notifier_block *nb,
|
|||
!of_prop_cmp(update->prop->name, "ibm,associativity")) {
|
||||
u32 core_id;
|
||||
of_property_read_u32(update->dn, "reg", &core_id);
|
||||
stage_topology_update(core_id);
|
||||
rc = dlpar_cpu_readd(core_id);
|
||||
rc = NOTIFY_OK;
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -313,7 +313,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
|||
page_shift);
|
||||
tbl->it_level_size = 1ULL << (level_shift - 3);
|
||||
tbl->it_indirect_levels = levels - 1;
|
||||
tbl->it_allocated_size = total_allocated;
|
||||
tbl->it_userspace = uas;
|
||||
tbl->it_nid = nid;
|
||||
|
||||
|
|
|
@ -2603,8 +2603,13 @@ static long pnv_pci_ioda2_create_table_userspace(
|
|||
int num, __u32 page_shift, __u64 window_size, __u32 levels,
|
||||
struct iommu_table **ptbl)
|
||||
{
|
||||
return pnv_pci_ioda2_create_table(table_group,
|
||||
long ret = pnv_pci_ioda2_create_table(table_group,
|
||||
num, page_shift, window_size, levels, true, ptbl);
|
||||
|
||||
if (!ret)
|
||||
(*ptbl)->it_allocated_size = pnv_pci_ioda2_get_table_size(
|
||||
page_shift, window_size, levels);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
|
||||
|
|
|
@ -802,6 +802,25 @@ static int dlpar_cpu_add_by_count(u32 cpus_to_add)
|
|||
return rc;
|
||||
}
|
||||
|
||||
int dlpar_cpu_readd(int cpu)
|
||||
{
|
||||
struct device_node *dn;
|
||||
struct device *dev;
|
||||
u32 drc_index;
|
||||
int rc;
|
||||
|
||||
dev = get_cpu_device(cpu);
|
||||
dn = dev->of_node;
|
||||
|
||||
rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
|
||||
|
||||
rc = dlpar_cpu_remove_by_index(drc_index);
|
||||
if (!rc)
|
||||
rc = dlpar_cpu_add(drc_index);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
|
||||
{
|
||||
u32 count, drc_index;
|
||||
|
|
|
@ -158,7 +158,7 @@ int print_insn_powerpc (unsigned long insn, unsigned long memaddr)
|
|||
dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7
|
||||
| PPC_OPCODE_POWER8 | PPC_OPCODE_POWER9 | PPC_OPCODE_HTM
|
||||
| PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2
|
||||
| PPC_OPCODE_VSX | PPC_OPCODE_VSX3),
|
||||
| PPC_OPCODE_VSX | PPC_OPCODE_VSX3);
|
||||
|
||||
/* Get the major opcode of the insn. */
|
||||
opcode = NULL;
|
||||
|
|
|
@ -1600,7 +1600,7 @@ static void aux_sdb_init(unsigned long sdb)
|
|||
|
||||
/*
|
||||
* aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling
|
||||
* @cpu: On which to allocate, -1 means current
|
||||
* @event: Event the buffer is setup for, event->cpu == -1 means current
|
||||
* @pages: Array of pointers to buffer pages passed from perf core
|
||||
* @nr_pages: Total pages
|
||||
* @snapshot: Flag for snapshot mode
|
||||
|
@ -1612,8 +1612,8 @@ static void aux_sdb_init(unsigned long sdb)
|
|||
*
|
||||
* Return the private AUX buffer structure if success or NULL if fails.
|
||||
*/
|
||||
static void *aux_buffer_setup(int cpu, void **pages, int nr_pages,
|
||||
bool snapshot)
|
||||
static void *aux_buffer_setup(struct perf_event *event, void **pages,
|
||||
int nr_pages, bool snapshot)
|
||||
{
|
||||
struct sf_buffer *sfb;
|
||||
struct aux_buffer *aux;
|
||||
|
|
|
@ -100,7 +100,7 @@ $(obj)/zoffset.h: $(obj)/compressed/vmlinux FORCE
|
|||
AFLAGS_header.o += -I$(objtree)/$(obj)
|
||||
$(obj)/header.o: $(obj)/zoffset.h
|
||||
|
||||
LDFLAGS_setup.elf := -T
|
||||
LDFLAGS_setup.elf := -m elf_i386 -T
|
||||
$(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE
|
||||
$(call if_changed,ld)
|
||||
|
||||
|
|
|
@ -627,6 +627,7 @@ ENTRY(interrupt_entry)
|
|||
|
||||
ret
|
||||
END(interrupt_entry)
|
||||
_ASM_NOKPROBE(interrupt_entry)
|
||||
|
||||
|
||||
/* Interrupt entry/exit. */
|
||||
|
@ -826,6 +827,7 @@ native_irq_return_ldt:
|
|||
jmp native_irq_return_iret
|
||||
#endif
|
||||
END(common_interrupt)
|
||||
_ASM_NOKPROBE(common_interrupt)
|
||||
|
||||
/*
|
||||
* APIC interrupts.
|
||||
|
@ -840,6 +842,7 @@ ENTRY(\sym)
|
|||
call \do_sym /* rdi points to pt_regs */
|
||||
jmp ret_from_intr
|
||||
END(\sym)
|
||||
_ASM_NOKPROBE(\sym)
|
||||
.endm
|
||||
|
||||
/* Make sure APIC interrupt handlers end up in the irqentry section: */
|
||||
|
@ -984,6 +987,7 @@ ENTRY(\sym)
|
|||
|
||||
jmp error_exit
|
||||
.endif
|
||||
_ASM_NOKPROBE(\sym)
|
||||
END(\sym)
|
||||
.endm
|
||||
|
||||
|
|
|
@ -77,10 +77,12 @@ static size_t buf_size(struct page *page)
|
|||
}
|
||||
|
||||
static void *
|
||||
bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite)
|
||||
bts_buffer_setup_aux(struct perf_event *event, void **pages,
|
||||
int nr_pages, bool overwrite)
|
||||
{
|
||||
struct bts_buffer *buf;
|
||||
struct page *page;
|
||||
int cpu = event->cpu;
|
||||
int node = (cpu == -1) ? cpu : cpu_to_node(cpu);
|
||||
unsigned long offset;
|
||||
size_t size = nr_pages << PAGE_SHIFT;
|
||||
|
|
|
@ -1104,10 +1104,11 @@ static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
|
|||
* Return: Our private PT buffer structure.
|
||||
*/
|
||||
static void *
|
||||
pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot)
|
||||
pt_buffer_setup_aux(struct perf_event *event, void **pages,
|
||||
int nr_pages, bool snapshot)
|
||||
{
|
||||
struct pt_buffer *buf;
|
||||
int node, ret;
|
||||
int node, ret, cpu = event->cpu;
|
||||
|
||||
if (!nr_pages)
|
||||
return NULL;
|
||||
|
|
|
@ -387,6 +387,13 @@ void hyperv_cleanup(void)
|
|||
/* Reset our OS id */
|
||||
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
|
||||
|
||||
/*
|
||||
* Reset hypercall page reference before reset the page,
|
||||
* let hypercall operations fail safely rather than
|
||||
* panic the kernel for using invalid hypercall page
|
||||
*/
|
||||
hv_hypercall_pg = NULL;
|
||||
|
||||
/* Reset the hypercall page */
|
||||
hypercall_msr.as_uint64 = 0;
|
||||
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
|
|
|
@ -411,7 +411,7 @@ SECTIONS
|
|||
* Per-cpu symbols which need to be offset from __per_cpu_load
|
||||
* for the boot processor.
|
||||
*/
|
||||
#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
|
||||
#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
|
||||
INIT_PER_CPU(gdt_page);
|
||||
INIT_PER_CPU(irq_stack_union);
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ $(obj)/pasyms.h: $(REALMODE_OBJS) FORCE
|
|||
targets += realmode.lds
|
||||
$(obj)/realmode.lds: $(obj)/pasyms.h
|
||||
|
||||
LDFLAGS_realmode.elf := --emit-relocs -T
|
||||
LDFLAGS_realmode.elf := -m elf_i386 --emit-relocs -T
|
||||
CPPFLAGS_realmode.lds += -P -C -I$(objtree)/$(obj)
|
||||
|
||||
targets += realmode.elf
|
||||
|
|
|
@ -2215,7 +2215,8 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
|||
|
||||
if (in_service_bfqq && in_service_bfqq != bfqq &&
|
||||
likely(in_service_bfqq != &bfqd->oom_bfqq) &&
|
||||
bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
|
||||
bfq_rq_close_to_sector(io_struct, request,
|
||||
bfqd->in_serv_last_pos) &&
|
||||
bfqq->entity.parent == in_service_bfqq->entity.parent &&
|
||||
bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
|
||||
new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
|
||||
|
@ -2755,6 +2756,8 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
|
|||
bfq_update_rate_reset(bfqd, rq);
|
||||
update_last_values:
|
||||
bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
|
||||
if (RQ_BFQQ(rq) == bfqd->in_service_queue)
|
||||
bfqd->in_serv_last_pos = bfqd->last_position;
|
||||
bfqd->last_dispatch = now_ns;
|
||||
}
|
||||
|
||||
|
|
|
@ -469,6 +469,9 @@ struct bfq_data {
|
|||
/* on-disk position of the last served request */
|
||||
sector_t last_position;
|
||||
|
||||
/* position of the last served request for the in-service queue */
|
||||
sector_t in_serv_last_pos;
|
||||
|
||||
/* time of last request completion (ns) */
|
||||
u64 last_completion;
|
||||
|
||||
|
|
|
@ -2124,21 +2124,29 @@ static int __init intel_opregion_present(void)
|
|||
return opregion;
|
||||
}
|
||||
|
||||
/* Check if the chassis-type indicates there is no builtin LCD panel */
|
||||
static bool dmi_is_desktop(void)
|
||||
{
|
||||
const char *chassis_type;
|
||||
unsigned long type;
|
||||
|
||||
chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
|
||||
if (!chassis_type)
|
||||
return false;
|
||||
|
||||
if (!strcmp(chassis_type, "3") || /* 3: Desktop */
|
||||
!strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
|
||||
!strcmp(chassis_type, "5") || /* 5: Pizza Box */
|
||||
!strcmp(chassis_type, "6") || /* 6: Mini Tower */
|
||||
!strcmp(chassis_type, "7") || /* 7: Tower */
|
||||
!strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
|
||||
if (kstrtoul(chassis_type, 10, &type) != 0)
|
||||
return false;
|
||||
|
||||
switch (type) {
|
||||
case 0x03: /* Desktop */
|
||||
case 0x04: /* Low Profile Desktop */
|
||||
case 0x05: /* Pizza Box */
|
||||
case 0x06: /* Mini Tower */
|
||||
case 0x07: /* Tower */
|
||||
case 0x10: /* Lunch Box */
|
||||
case 0x11: /* Main Server Chassis */
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -1090,16 +1090,12 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
|
|||
kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
|
||||
}
|
||||
mapping_set_gfp_mask(filp->f_mapping, gfp);
|
||||
lo->lo_state = Lo_unbound;
|
||||
/* This is safe: open() is still holding a reference. */
|
||||
module_put(THIS_MODULE);
|
||||
blk_mq_unfreeze_queue(lo->lo_queue);
|
||||
|
||||
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
|
||||
lo_number = lo->lo_number;
|
||||
lo->lo_flags = 0;
|
||||
if (!part_shift)
|
||||
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
|
||||
loop_unprepare_queue(lo);
|
||||
out_unlock:
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
|
@ -1121,6 +1117,23 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
|
|||
/* Device is gone, no point in returning error */
|
||||
err = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* lo->lo_state is set to Lo_unbound here after above partscan has
|
||||
* finished.
|
||||
*
|
||||
* There cannot be anybody else entering __loop_clr_fd() as
|
||||
* lo->lo_backing_file is already cleared and Lo_rundown state
|
||||
* protects us from all the other places trying to change the 'lo'
|
||||
* device.
|
||||
*/
|
||||
mutex_lock(&loop_ctl_mutex);
|
||||
lo->lo_flags = 0;
|
||||
if (!part_shift)
|
||||
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
|
||||
lo->lo_state = Lo_unbound;
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
|
||||
/*
|
||||
* Need not hold loop_ctl_mutex to fput backing file.
|
||||
* Calling fput holding loop_ctl_mutex triggers a circular
|
||||
|
|
|
@ -265,6 +265,7 @@
|
|||
/* #define ERRLOGMASK (CD_WARNING|CD_OPEN|CD_COUNT_TRACKS|CD_CLOSE) */
|
||||
/* #define ERRLOGMASK (CD_WARNING|CD_REG_UNREG|CD_DO_IOCTL|CD_OPEN|CD_CLOSE|CD_COUNT_TRACKS) */
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/major.h>
|
||||
|
@ -3693,9 +3694,9 @@ static struct ctl_table_header *cdrom_sysctl_header;
|
|||
|
||||
static void cdrom_sysctl_register(void)
|
||||
{
|
||||
static int initialized;
|
||||
static atomic_t initialized = ATOMIC_INIT(0);
|
||||
|
||||
if (initialized == 1)
|
||||
if (!atomic_add_unless(&initialized, 1, 1))
|
||||
return;
|
||||
|
||||
cdrom_sysctl_header = register_sysctl_table(cdrom_root_table);
|
||||
|
@ -3706,8 +3707,6 @@ static void cdrom_sysctl_register(void)
|
|||
cdrom_sysctl_settings.debug = debug;
|
||||
cdrom_sysctl_settings.lock = lockdoor;
|
||||
cdrom_sysctl_settings.check = check_media_type;
|
||||
|
||||
initialized = 1;
|
||||
}
|
||||
|
||||
static void cdrom_sysctl_unregister(void)
|
||||
|
|
|
@ -377,7 +377,7 @@ static __init int hpet_mmap_enable(char *str)
|
|||
pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled");
|
||||
return 1;
|
||||
}
|
||||
__setup("hpet_mmap", hpet_mmap_enable);
|
||||
__setup("hpet_mmap=", hpet_mmap_enable);
|
||||
|
||||
static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
|
|
|
@ -73,7 +73,7 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
|
|||
|
||||
if (!vi->busy) {
|
||||
vi->busy = true;
|
||||
init_completion(&vi->have_data);
|
||||
reinit_completion(&vi->have_data);
|
||||
register_buffer(vi, buf, size);
|
||||
}
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
|
|||
unsigned long m, n;
|
||||
u64 ret;
|
||||
|
||||
if (!rate || rate >= *parent_rate)
|
||||
if (!rate || (!clk_hw_can_set_rate_parent(hw) && rate >= *parent_rate))
|
||||
return *parent_rate;
|
||||
|
||||
if (fd->approximation)
|
||||
|
|
|
@ -65,15 +65,20 @@ int meson_aoclkc_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Populate regmap and register all clks
|
||||
*/
|
||||
for (clkid = 0; clkid < data->num_clks; clkid++) {
|
||||
/* Populate regmap */
|
||||
for (clkid = 0; clkid < data->num_clks; clkid++)
|
||||
data->clks[clkid]->map = regmap;
|
||||
|
||||
/* Register all clks */
|
||||
for (clkid = 0; clkid < data->hw_data->num; clkid++) {
|
||||
if (!data->hw_data->hws[clkid])
|
||||
continue;
|
||||
|
||||
ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dev_err(dev, "Clock registration failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
|
||||
|
|
|
@ -78,17 +78,17 @@ static struct rockchip_pll_rate_table rk3328_pll_rates[] = {
|
|||
|
||||
static struct rockchip_pll_rate_table rk3328_pll_frac_rates[] = {
|
||||
/* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
|
||||
RK3036_PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134217),
|
||||
RK3036_PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134218),
|
||||
/* vco = 1016064000 */
|
||||
RK3036_PLL_RATE(983040000, 24, 983, 1, 1, 0, 671088),
|
||||
RK3036_PLL_RATE(983040000, 24, 983, 1, 1, 0, 671089),
|
||||
/* vco = 983040000 */
|
||||
RK3036_PLL_RATE(491520000, 24, 983, 2, 1, 0, 671088),
|
||||
RK3036_PLL_RATE(491520000, 24, 983, 2, 1, 0, 671089),
|
||||
/* vco = 983040000 */
|
||||
RK3036_PLL_RATE(61440000, 6, 215, 7, 2, 0, 671088),
|
||||
RK3036_PLL_RATE(61440000, 6, 215, 7, 2, 0, 671089),
|
||||
/* vco = 860156000 */
|
||||
RK3036_PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797894),
|
||||
RK3036_PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797895),
|
||||
/* vco = 903168000 */
|
||||
RK3036_PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066329),
|
||||
RK3036_PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066330),
|
||||
/* vco = 819200000 */
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
|
|
|
@ -911,8 +911,10 @@ static void __init acpi_cpufreq_boost_init(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)))
|
||||
if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
|
||||
pr_debug("Boost capabilities not present in the processor\n");
|
||||
return;
|
||||
}
|
||||
|
||||
acpi_cpufreq_driver.set_boost = set_boost;
|
||||
acpi_cpufreq_driver.boost_enabled = boost_state(0);
|
||||
|
|
|
@ -80,8 +80,10 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
|
|||
|
||||
/* Find the TRNG device node and map it */
|
||||
trng = of_find_matching_node(NULL, ppc4xx_trng_match);
|
||||
if (!trng || !of_device_is_available(trng))
|
||||
if (!trng || !of_device_is_available(trng)) {
|
||||
of_node_put(trng);
|
||||
return;
|
||||
}
|
||||
|
||||
dev->trng_base = of_iomap(trng, 0);
|
||||
of_node_put(trng);
|
||||
|
|
|
@ -351,6 +351,7 @@ static struct pci_driver zip_driver = {
|
|||
|
||||
static struct crypto_alg zip_comp_deflate = {
|
||||
.cra_name = "deflate",
|
||||
.cra_driver_name = "deflate-cavium",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
||||
.cra_ctxsize = sizeof(struct zip_kernel_ctx),
|
||||
.cra_priority = 300,
|
||||
|
@ -365,6 +366,7 @@ static struct crypto_alg zip_comp_deflate = {
|
|||
|
||||
static struct crypto_alg zip_comp_lzs = {
|
||||
.cra_name = "lzs",
|
||||
.cra_driver_name = "lzs-cavium",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
||||
.cra_ctxsize = sizeof(struct zip_kernel_ctx),
|
||||
.cra_priority = 300,
|
||||
|
@ -384,7 +386,7 @@ static struct scomp_alg zip_scomp_deflate = {
|
|||
.decompress = zip_scomp_decompress,
|
||||
.base = {
|
||||
.cra_name = "deflate",
|
||||
.cra_driver_name = "deflate-scomp",
|
||||
.cra_driver_name = "deflate-scomp-cavium",
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_priority = 300,
|
||||
}
|
||||
|
@ -397,7 +399,7 @@ static struct scomp_alg zip_scomp_lzs = {
|
|||
.decompress = zip_scomp_decompress,
|
||||
.base = {
|
||||
.cra_name = "lzs",
|
||||
.cra_driver_name = "lzs-scomp",
|
||||
.cra_driver_name = "lzs-scomp-cavium",
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_priority = 300,
|
||||
}
|
||||
|
|
|
@ -284,7 +284,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d)
|
|||
struct scatterlist *sg = d->sg;
|
||||
unsigned long now;
|
||||
|
||||
now = min(d->len, sg_dma_len(sg));
|
||||
now = min_t(size_t, d->len, sg_dma_len(sg));
|
||||
if (d->len != IMX_DMA_LENGTH_LOOP)
|
||||
d->len -= now;
|
||||
|
||||
|
|
|
@ -138,24 +138,25 @@ static void hidma_process_completed(struct hidma_chan *mchan)
|
|||
desc = &mdesc->desc;
|
||||
last_cookie = desc->cookie;
|
||||
|
||||
llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
|
||||
|
||||
spin_lock_irqsave(&mchan->lock, irqflags);
|
||||
if (llstat == DMA_COMPLETE) {
|
||||
mchan->last_success = last_cookie;
|
||||
result.result = DMA_TRANS_NOERROR;
|
||||
} else {
|
||||
result.result = DMA_TRANS_ABORTED;
|
||||
}
|
||||
|
||||
dma_cookie_complete(desc);
|
||||
spin_unlock_irqrestore(&mchan->lock, irqflags);
|
||||
|
||||
llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
|
||||
dmaengine_desc_get_callback(desc, &cb);
|
||||
|
||||
dma_run_dependencies(desc);
|
||||
|
||||
spin_lock_irqsave(&mchan->lock, irqflags);
|
||||
list_move(&mdesc->node, &mchan->free);
|
||||
|
||||
if (llstat == DMA_COMPLETE) {
|
||||
mchan->last_success = last_cookie;
|
||||
result.result = DMA_TRANS_NOERROR;
|
||||
} else
|
||||
result.result = DMA_TRANS_ABORTED;
|
||||
|
||||
spin_unlock_irqrestore(&mchan->lock, irqflags);
|
||||
|
||||
dmaengine_desc_callback_invoke(&cb, &result);
|
||||
|
@ -415,6 +416,7 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
|
|||
if (!mdesc)
|
||||
return NULL;
|
||||
|
||||
mdesc->desc.flags = flags;
|
||||
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
|
||||
src, dest, len, flags,
|
||||
HIDMA_TRE_MEMCPY);
|
||||
|
@ -447,6 +449,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
|
|||
if (!mdesc)
|
||||
return NULL;
|
||||
|
||||
mdesc->desc.flags = flags;
|
||||
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
|
||||
value, dest, len, flags,
|
||||
HIDMA_TRE_MEMSET);
|
||||
|
|
|
@ -636,7 +636,10 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
|
|||
|
||||
sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
|
||||
dma_desc = sgreq->dma_desc;
|
||||
dma_desc->bytes_transferred += sgreq->req_len;
|
||||
/* if we dma for long enough the transfer count will wrap */
|
||||
dma_desc->bytes_transferred =
|
||||
(dma_desc->bytes_transferred + sgreq->req_len) %
|
||||
dma_desc->bytes_requested;
|
||||
|
||||
/* Callback need to be call */
|
||||
if (!dma_desc->cb_count)
|
||||
|
|
|
@ -546,19 +546,24 @@ EXPORT_SYMBOL_GPL(cper_estatus_check_header);
|
|||
int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
|
||||
{
|
||||
struct acpi_hest_generic_data *gdata;
|
||||
unsigned int data_len, gedata_len;
|
||||
unsigned int data_len, record_size;
|
||||
int rc;
|
||||
|
||||
rc = cper_estatus_check_header(estatus);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
data_len = estatus->data_length;
|
||||
|
||||
apei_estatus_for_each_section(estatus, gdata) {
|
||||
gedata_len = acpi_hest_get_error_length(gdata);
|
||||
if (gedata_len > data_len - acpi_hest_get_size(gdata))
|
||||
if (sizeof(struct acpi_hest_generic_data) > data_len)
|
||||
return -EINVAL;
|
||||
data_len -= acpi_hest_get_record_size(gdata);
|
||||
|
||||
record_size = acpi_hest_get_record_size(gdata);
|
||||
if (record_size > data_len)
|
||||
return -EINVAL;
|
||||
|
||||
data_len -= record_size;
|
||||
}
|
||||
if (data_len)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -340,6 +340,11 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
|
|||
paddr = in->phys_addr;
|
||||
size = in->num_pages * EFI_PAGE_SIZE;
|
||||
|
||||
if (novamap()) {
|
||||
in->virt_addr = in->phys_addr;
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make the mapping compatible with 64k pages: this allows
|
||||
* a 4k page size kernel to kexec a 64k page size kernel and
|
||||
|
|
|
@ -34,6 +34,7 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
|
|||
|
||||
static int __section(.data) __nokaslr;
|
||||
static int __section(.data) __quiet;
|
||||
static int __section(.data) __novamap;
|
||||
|
||||
int __pure nokaslr(void)
|
||||
{
|
||||
|
@ -43,6 +44,10 @@ int __pure is_quiet(void)
|
|||
{
|
||||
return __quiet;
|
||||
}
|
||||
int __pure novamap(void)
|
||||
{
|
||||
return __novamap;
|
||||
}
|
||||
|
||||
#define EFI_MMAP_NR_SLACK_SLOTS 8
|
||||
|
||||
|
@ -482,6 +487,11 @@ efi_status_t efi_parse_options(char const *cmdline)
|
|||
__chunk_size = -1UL;
|
||||
}
|
||||
|
||||
if (!strncmp(str, "novamap", 7)) {
|
||||
str += strlen("novamap");
|
||||
__novamap = 1;
|
||||
}
|
||||
|
||||
/* Group words together, delimited by "," */
|
||||
while (*str && *str != ' ' && *str != ',')
|
||||
str++;
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
|
||||
extern int __pure nokaslr(void);
|
||||
extern int __pure is_quiet(void);
|
||||
extern int __pure novamap(void);
|
||||
|
||||
#define pr_efi(sys_table, msg) do { \
|
||||
if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg); \
|
||||
|
|
|
@ -327,6 +327,9 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
|
|||
if (status == EFI_SUCCESS) {
|
||||
efi_set_virtual_address_map_t *svam;
|
||||
|
||||
if (novamap())
|
||||
return EFI_SUCCESS;
|
||||
|
||||
/* Install the new virtual address map */
|
||||
svam = sys_table->runtime->set_virtual_address_map;
|
||||
status = svam(runtime_entry_count * desc_size, desc_size,
|
||||
|
|
|
@ -94,7 +94,7 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
|
|||
|
||||
if (!(md->attribute & EFI_MEMORY_RUNTIME))
|
||||
continue;
|
||||
if (md->virt_addr == 0) {
|
||||
if (md->virt_addr == 0 && md->phys_addr != 0) {
|
||||
/* no virtual mapping has been installed by the stub */
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -888,14 +888,16 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
|
|||
if (trigger)
|
||||
omap_set_gpio_triggering(bank, offset, trigger);
|
||||
|
||||
/* For level-triggered GPIOs, the clearing must be done after
|
||||
* the HW source is cleared, thus after the handler has run */
|
||||
if (bank->level_mask & BIT(offset)) {
|
||||
omap_set_gpio_irqenable(bank, offset, 0);
|
||||
omap_clear_gpio_irqstatus(bank, offset);
|
||||
}
|
||||
|
||||
omap_set_gpio_irqenable(bank, offset, 1);
|
||||
|
||||
/*
|
||||
* For level-triggered GPIOs, clearing must be done after the source
|
||||
* is cleared, thus after the handler has run. OMAP4 needs this done
|
||||
* after enabing the interrupt to clear the wakeup status.
|
||||
*/
|
||||
if (bank->level_mask & BIT(offset))
|
||||
omap_clear_gpio_irqstatus(bank, offset);
|
||||
|
||||
raw_spin_unlock_irqrestore(&bank->lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -4368,7 +4368,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
|
||||
struct dc_stream_state *stream_state)
|
||||
{
|
||||
stream_state->mode_changed = crtc_state->mode_changed;
|
||||
stream_state->mode_changed =
|
||||
crtc_state->mode_changed || crtc_state->active_changed;
|
||||
}
|
||||
|
||||
static int amdgpu_dm_atomic_commit(struct drm_device *dev,
|
||||
|
@ -4389,10 +4390,22 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
|
|||
*/
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
|
||||
struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
|
||||
if (drm_atomic_crtc_needs_modeset(new_crtc_state)
|
||||
&& dm_old_crtc_state->stream) {
|
||||
/*
|
||||
* CRC capture was enabled but not disabled.
|
||||
* Release the vblank reference.
|
||||
*/
|
||||
if (dm_new_crtc_state->crc_enabled) {
|
||||
drm_crtc_vblank_put(crtc);
|
||||
dm_new_crtc_state->crc_enabled = false;
|
||||
}
|
||||
|
||||
manage_dm_interrupts(adev, acrtc, false);
|
||||
}
|
||||
}
|
||||
/* Add check here for SoC's that support hardware cursor plane, to
|
||||
* unset legacy_cursor_update */
|
||||
|
|
|
@ -51,6 +51,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
|
|||
{
|
||||
struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
|
||||
struct dc_stream_state *stream_state = crtc_state->stream;
|
||||
bool enable;
|
||||
|
||||
enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
|
||||
|
||||
|
@ -65,28 +66,27 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
enable = (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO);
|
||||
|
||||
if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
|
||||
enable, enable))
|
||||
return -EINVAL;
|
||||
|
||||
/* When enabling CRC, we should also disable dithering. */
|
||||
if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
|
||||
if (dc_stream_configure_crc(stream_state->ctx->dc,
|
||||
stream_state,
|
||||
true, true)) {
|
||||
crtc_state->crc_enabled = true;
|
||||
dc_stream_set_dither_option(stream_state,
|
||||
DITHER_OPTION_TRUN8);
|
||||
}
|
||||
else
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (dc_stream_configure_crc(stream_state->ctx->dc,
|
||||
stream_state,
|
||||
false, false)) {
|
||||
crtc_state->crc_enabled = false;
|
||||
dc_stream_set_dither_option(stream_state,
|
||||
DITHER_OPTION_DEFAULT);
|
||||
}
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
dc_stream_set_dither_option(stream_state,
|
||||
enable ? DITHER_OPTION_TRUN8
|
||||
: DITHER_OPTION_DEFAULT);
|
||||
|
||||
/*
|
||||
* Reading the CRC requires the vblank interrupt handler to be
|
||||
* enabled. Keep a reference until CRC capture stops.
|
||||
*/
|
||||
if (!crtc_state->crc_enabled && enable)
|
||||
drm_crtc_vblank_get(crtc);
|
||||
else if (crtc_state->crc_enabled && !enable)
|
||||
drm_crtc_vblank_put(crtc);
|
||||
|
||||
crtc_state->crc_enabled = enable;
|
||||
|
||||
*values_cnt = 3;
|
||||
/* Reset crc_skipped on dm state */
|
||||
|
|
|
@ -958,6 +958,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
|||
/* pplib is notified if disp_num changed */
|
||||
dc->hwss.set_bandwidth(dc, context, true);
|
||||
|
||||
for (i = 0; i < context->stream_count; i++)
|
||||
context->streams[i]->mode_changed = false;
|
||||
|
||||
dc_release_state(dc->current_state);
|
||||
|
||||
dc->current_state = context;
|
||||
|
|
|
@ -2336,9 +2336,10 @@ static void dcn10_apply_ctx_for_surface(
|
|||
}
|
||||
}
|
||||
|
||||
if (!pipe_ctx->plane_state &&
|
||||
old_pipe_ctx->plane_state &&
|
||||
old_pipe_ctx->stream_res.tg == tg) {
|
||||
if ((!pipe_ctx->plane_state ||
|
||||
pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
|
||||
old_pipe_ctx->plane_state &&
|
||||
old_pipe_ctx->stream_res.tg == tg) {
|
||||
|
||||
dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
|
||||
removed_pipe[i] = true;
|
||||
|
|
|
@ -3278,6 +3278,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
|
|||
msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
|
||||
msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
|
||||
msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
|
||||
msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
|
||||
}
|
||||
msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
|
||||
msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
|
||||
|
|
|
@ -2877,7 +2877,7 @@ int drm_fb_helper_fbdev_setup(struct drm_device *dev,
|
|||
return 0;
|
||||
|
||||
err_drm_fb_helper_fini:
|
||||
drm_fb_helper_fini(fb_helper);
|
||||
drm_fb_helper_fbdev_teardown(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -211,6 +211,9 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
|
|||
format_modifier_count++;
|
||||
}
|
||||
|
||||
if (format_modifier_count)
|
||||
config->allow_fb_modifiers = true;
|
||||
|
||||
plane->modifier_count = format_modifier_count;
|
||||
plane->modifiers = kmalloc_array(format_modifier_count,
|
||||
sizeof(format_modifiers[0]),
|
||||
|
|
|
@ -750,7 +750,9 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
|
|||
/* Disable the crtc to ensure a full modeset is
|
||||
* performed whenever it's turned on again. */
|
||||
if (crtc)
|
||||
drm_crtc_force_disable(crtc);
|
||||
drm_crtc_helper_set_mode(crtc, &crtc->mode,
|
||||
crtc->x, crtc->y,
|
||||
crtc->primary->fb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -300,6 +300,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
|
|||
dev_dbg(rcdu->dev,
|
||||
"connected entity %pOF is disabled, skipping\n",
|
||||
entity);
|
||||
of_node_put(entity);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -335,6 +336,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
|
|||
dev_warn(rcdu->dev,
|
||||
"no encoder found for endpoint %pOF, skipping\n",
|
||||
ep->local_node);
|
||||
of_node_put(entity);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
|
|
@ -55,6 +55,9 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
|
|||
|
||||
*vblank_time = output->vblank_hrtimer.node.expires;
|
||||
|
||||
if (!in_vblank_irq)
|
||||
*vblank_time -= output->period_ns;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -91,7 +91,10 @@ static bool check_generated_interrupt(struct ishtp_device *dev)
|
|||
IPC_INT_FROM_ISH_TO_HOST_CHV_AB(pisr_val);
|
||||
} else {
|
||||
pisr_val = ish_reg_read(dev, IPC_REG_PISR_BXT);
|
||||
interrupt_generated = IPC_INT_FROM_ISH_TO_HOST_BXT(pisr_val);
|
||||
interrupt_generated = !!pisr_val;
|
||||
/* only busy-clear bit is RW, others are RO */
|
||||
if (pisr_val)
|
||||
ish_reg_write(dev, IPC_REG_PISR_BXT, pisr_val);
|
||||
}
|
||||
|
||||
return interrupt_generated;
|
||||
|
@ -843,11 +846,11 @@ int ish_hw_start(struct ishtp_device *dev)
|
|||
{
|
||||
ish_set_host_rdy(dev);
|
||||
|
||||
set_host_ready(dev);
|
||||
|
||||
/* After that we can enable ISH DMA operation and wakeup ISHFW */
|
||||
ish_wakeup(dev);
|
||||
|
||||
set_host_ready(dev);
|
||||
|
||||
/* wait for FW-initiated reset flow */
|
||||
if (!dev->recvd_hw_ready)
|
||||
wait_event_interruptible_timeout(dev->wait_hw_ready,
|
||||
|
|
|
@ -623,7 +623,8 @@ int ishtp_cl_device_bind(struct ishtp_cl *cl)
|
|||
spin_lock_irqsave(&cl->dev->device_list_lock, flags);
|
||||
list_for_each_entry(cl_device, &cl->dev->device_list,
|
||||
device_link) {
|
||||
if (cl_device->fw_client->client_id == cl->fw_client_id) {
|
||||
if (cl_device->fw_client &&
|
||||
cl_device->fw_client->client_id == cl->fw_client_id) {
|
||||
cl->device = cl_device;
|
||||
rv = 0;
|
||||
break;
|
||||
|
@ -683,6 +684,7 @@ void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
|
|||
spin_lock_irqsave(&ishtp_dev->device_list_lock, flags);
|
||||
list_for_each_entry_safe(cl_device, n, &ishtp_dev->device_list,
|
||||
device_link) {
|
||||
cl_device->fw_client = NULL;
|
||||
if (warm_reset && cl_device->reference_count)
|
||||
continue;
|
||||
|
||||
|
|
|
@ -181,15 +181,15 @@ static void etm_free_aux(void *data)
|
|||
schedule_work(&event_data->work);
|
||||
}
|
||||
|
||||
static void *etm_setup_aux(int event_cpu, void **pages,
|
||||
static void *etm_setup_aux(struct perf_event *event, void **pages,
|
||||
int nr_pages, bool overwrite)
|
||||
{
|
||||
int cpu;
|
||||
int cpu = event->cpu;
|
||||
cpumask_t *mask;
|
||||
struct coresight_device *sink;
|
||||
struct etm_event_data *event_data = NULL;
|
||||
|
||||
event_data = alloc_event_data(event_cpu);
|
||||
event_data = alloc_event_data(cpu);
|
||||
if (!event_data)
|
||||
return NULL;
|
||||
INIT_WORK(&event_data->work, free_event_data);
|
||||
|
|
|
@ -54,7 +54,8 @@ static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
|
|||
|
||||
static bool etm4_arch_supported(u8 arch)
|
||||
{
|
||||
switch (arch) {
|
||||
/* Mask out the minor version number */
|
||||
switch (arch & 0xf0) {
|
||||
case ETM_ARCH_V4:
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -121,6 +121,17 @@ static int of_dev_node_match(struct device *dev, void *data)
|
|||
return dev->of_node == data;
|
||||
}
|
||||
|
||||
static int of_dev_or_parent_node_match(struct device *dev, void *data)
|
||||
{
|
||||
if (dev->of_node == data)
|
||||
return 1;
|
||||
|
||||
if (dev->parent)
|
||||
return dev->parent->of_node == data;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* must call put_device() when done with returned i2c_client device */
|
||||
struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
|
||||
{
|
||||
|
@ -145,7 +156,8 @@ struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
|
|||
struct device *dev;
|
||||
struct i2c_adapter *adapter;
|
||||
|
||||
dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
|
||||
dev = bus_find_device(&i2c_bus_type, NULL, node,
|
||||
of_dev_or_parent_node_match);
|
||||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -423,18 +423,14 @@ static irqreturn_t pm8xxx_eoc_irq(int irq, void *d)
|
|||
static struct pm8xxx_chan_info *
|
||||
pm8xxx_get_channel(struct pm8xxx_xoadc *adc, u8 chan)
|
||||
{
|
||||
struct pm8xxx_chan_info *ch;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adc->nchans; i++) {
|
||||
ch = &adc->chans[i];
|
||||
struct pm8xxx_chan_info *ch = &adc->chans[i];
|
||||
if (ch->hwchan->amux_channel == chan)
|
||||
break;
|
||||
return ch;
|
||||
}
|
||||
if (i == adc->nchans)
|
||||
return NULL;
|
||||
|
||||
return ch;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int pm8xxx_read_channel_rsv(struct pm8xxx_xoadc *adc,
|
||||
|
|
|
@ -1904,8 +1904,10 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
}
|
||||
mutex_unlock(&ep->com.mutex);
|
||||
|
||||
if (release)
|
||||
if (release) {
|
||||
close_complete_upcall(ep, -ECONNRESET);
|
||||
release_ep_resources(ep);
|
||||
}
|
||||
c4iw_put_ep(&ep->com);
|
||||
return 0;
|
||||
}
|
||||
|
@ -3608,7 +3610,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
|
|||
if (close) {
|
||||
if (abrupt) {
|
||||
set_bit(EP_DISC_ABORT, &ep->com.history);
|
||||
close_complete_upcall(ep, -ECONNRESET);
|
||||
ret = send_abort(ep);
|
||||
} else {
|
||||
set_bit(EP_DISC_CLOSE, &ep->com.history);
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
|
||||
#include "mlx4_ib.h"
|
||||
|
||||
#define CM_CLEANUP_CACHE_TIMEOUT (5 * HZ)
|
||||
#define CM_CLEANUP_CACHE_TIMEOUT (30 * HZ)
|
||||
|
||||
struct id_map_entry {
|
||||
struct rb_node node;
|
||||
|
|
|
@ -373,7 +373,7 @@ static struct soc_button_info soc_button_PNP0C40[] = {
|
|||
{ "home", 1, EV_KEY, KEY_LEFTMETA, false, true },
|
||||
{ "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false },
|
||||
{ "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false },
|
||||
{ "rotation_lock", 4, EV_SW, SW_ROTATE_LOCK, false, false },
|
||||
{ "rotation_lock", 4, EV_KEY, KEY_ROTATE_LOCK_TOGGLE, false, false },
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
@ -228,7 +228,8 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
|
|||
if (dma != phys)
|
||||
goto out_unmap;
|
||||
}
|
||||
kmemleak_ignore(table);
|
||||
if (lvl == 2)
|
||||
kmemleak_ignore(table);
|
||||
return table;
|
||||
|
||||
out_unmap:
|
||||
|
|
|
@ -201,7 +201,7 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
|
|||
|
||||
if (!fw) {
|
||||
dev_err(dev, "firmware request failed\n");
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
/* handling firmware data is chip dependent */
|
||||
|
@ -214,9 +214,9 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
|
|||
|
||||
mutex_unlock(&chip->lock);
|
||||
|
||||
out:
|
||||
/* firmware should be released for other channel use */
|
||||
release_firmware(chip->fw);
|
||||
chip->fw = NULL;
|
||||
}
|
||||
|
||||
static int lp55xx_request_firmware(struct lp55xx_chip *chip)
|
||||
|
|
|
@ -283,8 +283,12 @@ STORE(__cached_dev)
|
|||
sysfs_strtoul_clamp(writeback_rate_update_seconds,
|
||||
dc->writeback_rate_update_seconds,
|
||||
1, WRITEBACK_RATE_UPDATE_SECS_MAX);
|
||||
d_strtoul(writeback_rate_i_term_inverse);
|
||||
d_strtoul_nonzero(writeback_rate_p_term_inverse);
|
||||
sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
|
||||
dc->writeback_rate_i_term_inverse,
|
||||
1, UINT_MAX);
|
||||
sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
|
||||
dc->writeback_rate_p_term_inverse,
|
||||
1, UINT_MAX);
|
||||
d_strtoul_nonzero(writeback_rate_minimum);
|
||||
|
||||
sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
|
||||
|
@ -295,7 +299,9 @@ STORE(__cached_dev)
|
|||
dc->io_disable = v ? 1 : 0;
|
||||
}
|
||||
|
||||
d_strtoi_h(sequential_cutoff);
|
||||
sysfs_strtoul_clamp(sequential_cutoff,
|
||||
dc->sequential_cutoff,
|
||||
0, UINT_MAX);
|
||||
d_strtoi_h(readahead);
|
||||
|
||||
if (attr == &sysfs_clear_stats)
|
||||
|
@ -766,8 +772,17 @@ STORE(__bch_cache_set)
|
|||
c->error_limit = strtoul_or_return(buf);
|
||||
|
||||
/* See count_io_errors() for why 88 */
|
||||
if (attr == &sysfs_io_error_halflife)
|
||||
c->error_decay = strtoul_or_return(buf) / 88;
|
||||
if (attr == &sysfs_io_error_halflife) {
|
||||
unsigned long v = 0;
|
||||
ssize_t ret;
|
||||
|
||||
ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
|
||||
if (!ret) {
|
||||
c->error_decay = v / 88;
|
||||
return size;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (attr == &sysfs_io_disable) {
|
||||
v = strtoul_or_return(buf);
|
||||
|
|
|
@ -81,9 +81,16 @@ do { \
|
|||
|
||||
#define sysfs_strtoul_clamp(file, var, min, max) \
|
||||
do { \
|
||||
if (attr == &sysfs_ ## file) \
|
||||
return strtoul_safe_clamp(buf, var, min, max) \
|
||||
?: (ssize_t) size; \
|
||||
if (attr == &sysfs_ ## file) { \
|
||||
unsigned long v = 0; \
|
||||
ssize_t ret; \
|
||||
ret = strtoul_safe_clamp(buf, v, min, max); \
|
||||
if (!ret) { \
|
||||
var = v; \
|
||||
return size; \
|
||||
} \
|
||||
return ret; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define strtoul_or_return(cp) \
|
||||
|
|
|
@ -3283,6 +3283,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|||
as.argc = argc;
|
||||
as.argv = argv;
|
||||
|
||||
/* make sure metadata and data are different devices */
|
||||
if (!strcmp(argv[0], argv[1])) {
|
||||
ti->error = "Error setting metadata or data device";
|
||||
r = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set default pool features.
|
||||
*/
|
||||
|
@ -4167,6 +4174,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|||
tc->sort_bio_list = RB_ROOT;
|
||||
|
||||
if (argc == 3) {
|
||||
if (!strcmp(argv[0], argv[2])) {
|
||||
ti->error = "Error setting origin device";
|
||||
r = -EINVAL;
|
||||
goto bad_origin_dev;
|
||||
}
|
||||
|
||||
r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
|
||||
if (r) {
|
||||
ti->error = "Error opening origin device";
|
||||
|
|
|
@ -1014,6 +1014,8 @@ static int mt9m111_probe(struct i2c_client *client,
|
|||
mt9m111->rect.top = MT9M111_MIN_DARK_ROWS;
|
||||
mt9m111->rect.width = MT9M111_MAX_WIDTH;
|
||||
mt9m111->rect.height = MT9M111_MAX_HEIGHT;
|
||||
mt9m111->width = mt9m111->rect.width;
|
||||
mt9m111->height = mt9m111->rect.height;
|
||||
mt9m111->fmt = &mt9m111_colour_fmts[0];
|
||||
mt9m111->lastpage = -1;
|
||||
mutex_init(&mt9m111->power_lock);
|
||||
|
|
|
@ -1101,6 +1101,9 @@ static int ov7740_probe(struct i2c_client *client,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
pm_runtime_set_active(&client->dev);
|
||||
pm_runtime_enable(&client->dev);
|
||||
|
||||
ret = ov7740_detect(ov7740);
|
||||
if (ret)
|
||||
goto error_detect;
|
||||
|
@ -1123,8 +1126,6 @@ static int ov7740_probe(struct i2c_client *client,
|
|||
if (ret)
|
||||
goto error_async_register;
|
||||
|
||||
pm_runtime_set_active(&client->dev);
|
||||
pm_runtime_enable(&client->dev);
|
||||
pm_runtime_idle(&client->dev);
|
||||
|
||||
return 0;
|
||||
|
@ -1134,6 +1135,8 @@ static int ov7740_probe(struct i2c_client *client,
|
|||
error_init_controls:
|
||||
ov7740_free_controls(ov7740);
|
||||
error_detect:
|
||||
pm_runtime_disable(&client->dev);
|
||||
pm_runtime_set_suspended(&client->dev);
|
||||
ov7740_set_power(ov7740, 0);
|
||||
media_entity_cleanup(&ov7740->subdev.entity);
|
||||
|
||||
|
|
|
@ -702,7 +702,7 @@ static void mtk_jpeg_buf_queue(struct vb2_buffer *vb)
|
|||
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, to_vb2_v4l2_buffer(vb));
|
||||
}
|
||||
|
||||
static void *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
|
||||
static struct vb2_v4l2_buffer *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
|
||||
enum v4l2_buf_type type)
|
||||
{
|
||||
if (V4L2_TYPE_IS_OUTPUT(type))
|
||||
|
@ -714,7 +714,7 @@ static void *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
|
|||
static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
|
||||
{
|
||||
struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
|
||||
struct vb2_buffer *vb;
|
||||
struct vb2_v4l2_buffer *vb;
|
||||
int ret = 0;
|
||||
|
||||
ret = pm_runtime_get_sync(ctx->jpeg->dev);
|
||||
|
@ -724,14 +724,14 @@ static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
|
|||
return 0;
|
||||
err:
|
||||
while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
|
||||
v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_QUEUED);
|
||||
v4l2_m2m_buf_done(vb, VB2_BUF_STATE_QUEUED);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
|
||||
{
|
||||
struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
|
||||
struct vb2_buffer *vb;
|
||||
struct vb2_v4l2_buffer *vb;
|
||||
|
||||
/*
|
||||
* STREAMOFF is an acknowledgment for source change event.
|
||||
|
@ -743,7 +743,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
|
|||
struct mtk_jpeg_src_buf *src_buf;
|
||||
|
||||
vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
|
||||
src_buf = mtk_jpeg_vb2_to_srcbuf(vb);
|
||||
src_buf = mtk_jpeg_vb2_to_srcbuf(&vb->vb2_buf);
|
||||
mtk_jpeg_set_queue_data(ctx, &src_buf->dec_param);
|
||||
ctx->state = MTK_JPEG_RUNNING;
|
||||
} else if (V4L2_TYPE_IS_OUTPUT(q->type)) {
|
||||
|
@ -751,7 +751,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
|
|||
}
|
||||
|
||||
while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
|
||||
v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_ERROR);
|
||||
v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
|
||||
|
||||
pm_runtime_put_sync(ctx->jpeg->dev);
|
||||
}
|
||||
|
@ -807,7 +807,7 @@ static void mtk_jpeg_device_run(void *priv)
|
|||
{
|
||||
struct mtk_jpeg_ctx *ctx = priv;
|
||||
struct mtk_jpeg_dev *jpeg = ctx->jpeg;
|
||||
struct vb2_buffer *src_buf, *dst_buf;
|
||||
struct vb2_v4l2_buffer *src_buf, *dst_buf;
|
||||
enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
|
||||
unsigned long flags;
|
||||
struct mtk_jpeg_src_buf *jpeg_src_buf;
|
||||
|
@ -817,11 +817,11 @@ static void mtk_jpeg_device_run(void *priv)
|
|||
|
||||
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
|
||||
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
|
||||
jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf);
|
||||
jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
|
||||
|
||||
if (jpeg_src_buf->flags & MTK_JPEG_BUF_FLAGS_LAST_FRAME) {
|
||||
for (i = 0; i < dst_buf->num_planes; i++)
|
||||
vb2_set_plane_payload(dst_buf, i, 0);
|
||||
for (i = 0; i < dst_buf->vb2_buf.num_planes; i++)
|
||||
vb2_set_plane_payload(&dst_buf->vb2_buf, i, 0);
|
||||
buf_state = VB2_BUF_STATE_DONE;
|
||||
goto dec_end;
|
||||
}
|
||||
|
@ -833,8 +833,8 @@ static void mtk_jpeg_device_run(void *priv)
|
|||
return;
|
||||
}
|
||||
|
||||
mtk_jpeg_set_dec_src(ctx, src_buf, &bs);
|
||||
if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, dst_buf, &fb))
|
||||
mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs);
|
||||
if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, &dst_buf->vb2_buf, &fb))
|
||||
goto dec_end;
|
||||
|
||||
spin_lock_irqsave(&jpeg->hw_lock, flags);
|
||||
|
@ -849,8 +849,8 @@ static void mtk_jpeg_device_run(void *priv)
|
|||
dec_end:
|
||||
v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
|
||||
v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
|
||||
v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state);
|
||||
v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state);
|
||||
v4l2_m2m_buf_done(src_buf, buf_state);
|
||||
v4l2_m2m_buf_done(dst_buf, buf_state);
|
||||
v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
|
||||
}
|
||||
|
||||
|
@ -921,7 +921,7 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
|
|||
{
|
||||
struct mtk_jpeg_dev *jpeg = priv;
|
||||
struct mtk_jpeg_ctx *ctx;
|
||||
struct vb2_buffer *src_buf, *dst_buf;
|
||||
struct vb2_v4l2_buffer *src_buf, *dst_buf;
|
||||
struct mtk_jpeg_src_buf *jpeg_src_buf;
|
||||
enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
|
||||
u32 dec_irq_ret;
|
||||
|
@ -938,7 +938,7 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
|
|||
|
||||
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
|
||||
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
|
||||
jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf);
|
||||
jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
|
||||
|
||||
if (dec_irq_ret >= MTK_JPEG_DEC_RESULT_UNDERFLOW)
|
||||
mtk_jpeg_dec_reset(jpeg->dec_reg_base);
|
||||
|
@ -948,15 +948,15 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
|
|||
goto dec_end;
|
||||
}
|
||||
|
||||
for (i = 0; i < dst_buf->num_planes; i++)
|
||||
vb2_set_plane_payload(dst_buf, i,
|
||||
for (i = 0; i < dst_buf->vb2_buf.num_planes; i++)
|
||||
vb2_set_plane_payload(&dst_buf->vb2_buf, i,
|
||||
jpeg_src_buf->dec_param.comp_size[i]);
|
||||
|
||||
buf_state = VB2_BUF_STATE_DONE;
|
||||
|
||||
dec_end:
|
||||
v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state);
|
||||
v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state);
|
||||
v4l2_m2m_buf_done(src_buf, buf_state);
|
||||
v4l2_m2m_buf_done(dst_buf, buf_state);
|
||||
v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -274,7 +274,7 @@ static void emmaprp_device_run(void *priv)
|
|||
{
|
||||
struct emmaprp_ctx *ctx = priv;
|
||||
struct emmaprp_q_data *s_q_data, *d_q_data;
|
||||
struct vb2_buffer *src_buf, *dst_buf;
|
||||
struct vb2_v4l2_buffer *src_buf, *dst_buf;
|
||||
struct emmaprp_dev *pcdev = ctx->dev;
|
||||
unsigned int s_width, s_height;
|
||||
unsigned int d_width, d_height;
|
||||
|
@ -294,8 +294,8 @@ static void emmaprp_device_run(void *priv)
|
|||
d_height = d_q_data->height;
|
||||
d_size = d_width * d_height;
|
||||
|
||||
p_in = vb2_dma_contig_plane_dma_addr(src_buf, 0);
|
||||
p_out = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
|
||||
p_in = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
|
||||
p_out = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
|
||||
if (!p_in || !p_out) {
|
||||
v4l2_err(&pcdev->v4l2_dev,
|
||||
"Acquiring kernel pointers to buffers failed\n");
|
||||
|
|
|
@ -131,9 +131,13 @@ static int rvin_group_link_notify(struct media_link *link, u32 flags,
|
|||
!is_media_entity_v4l2_video_device(link->sink->entity))
|
||||
return 0;
|
||||
|
||||
/* If any entity is in use don't allow link changes. */
|
||||
/*
|
||||
* Don't allow link changes if any entity in the graph is
|
||||
* streaming, modifying the CHSEL register fields can disrupt
|
||||
* running streams.
|
||||
*/
|
||||
media_device_for_each_entity(entity, &group->mdev)
|
||||
if (entity->use_count)
|
||||
if (entity->stream_count)
|
||||
return -EBUSY;
|
||||
|
||||
mutex_lock(&group->lock);
|
||||
|
|
|
@ -43,7 +43,7 @@ static void device_run(void *prv)
|
|||
{
|
||||
struct rga_ctx *ctx = prv;
|
||||
struct rockchip_rga *rga = ctx->rga;
|
||||
struct vb2_buffer *src, *dst;
|
||||
struct vb2_v4l2_buffer *src, *dst;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rga->ctrl_lock, flags);
|
||||
|
@ -53,8 +53,8 @@ static void device_run(void *prv)
|
|||
src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
|
||||
dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
|
||||
|
||||
rga_buf_map(src);
|
||||
rga_buf_map(dst);
|
||||
rga_buf_map(&src->vb2_buf);
|
||||
rga_buf_map(&dst->vb2_buf);
|
||||
|
||||
rga_hw_start(rga);
|
||||
|
||||
|
|
|
@ -487,7 +487,7 @@ static void device_run(void *prv)
|
|||
{
|
||||
struct g2d_ctx *ctx = prv;
|
||||
struct g2d_dev *dev = ctx->dev;
|
||||
struct vb2_buffer *src, *dst;
|
||||
struct vb2_v4l2_buffer *src, *dst;
|
||||
unsigned long flags;
|
||||
u32 cmd = 0;
|
||||
|
||||
|
@ -502,10 +502,10 @@ static void device_run(void *prv)
|
|||
spin_lock_irqsave(&dev->ctrl_lock, flags);
|
||||
|
||||
g2d_set_src_size(dev, &ctx->in);
|
||||
g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(src, 0));
|
||||
g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0));
|
||||
|
||||
g2d_set_dst_size(dev, &ctx->out);
|
||||
g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(dst, 0));
|
||||
g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0));
|
||||
|
||||
g2d_set_rop4(dev, ctx->rop);
|
||||
g2d_set_flip(dev, ctx->flip);
|
||||
|
|
|
@ -793,14 +793,14 @@ static void skip(struct s5p_jpeg_buffer *buf, long len);
|
|||
static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx)
|
||||
{
|
||||
struct s5p_jpeg *jpeg = ctx->jpeg;
|
||||
struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
|
||||
struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
|
||||
struct s5p_jpeg_buffer jpeg_buffer;
|
||||
unsigned int word;
|
||||
int c, x, components;
|
||||
|
||||
jpeg_buffer.size = 2; /* Ls */
|
||||
jpeg_buffer.data =
|
||||
(unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sos + 2;
|
||||
(unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sos + 2;
|
||||
jpeg_buffer.curr = 0;
|
||||
|
||||
word = 0;
|
||||
|
@ -830,14 +830,14 @@ static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx)
|
|||
static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx)
|
||||
{
|
||||
struct s5p_jpeg *jpeg = ctx->jpeg;
|
||||
struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
|
||||
struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
|
||||
struct s5p_jpeg_buffer jpeg_buffer;
|
||||
unsigned int word;
|
||||
int c, i, n, j;
|
||||
|
||||
for (j = 0; j < ctx->out_q.dht.n; ++j) {
|
||||
jpeg_buffer.size = ctx->out_q.dht.len[j];
|
||||
jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) +
|
||||
jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
|
||||
ctx->out_q.dht.marker[j];
|
||||
jpeg_buffer.curr = 0;
|
||||
|
||||
|
@ -889,13 +889,13 @@ static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx)
|
|||
static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx)
|
||||
{
|
||||
struct s5p_jpeg *jpeg = ctx->jpeg;
|
||||
struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
|
||||
struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
|
||||
struct s5p_jpeg_buffer jpeg_buffer;
|
||||
int c, x, components;
|
||||
|
||||
jpeg_buffer.size = ctx->out_q.sof_len;
|
||||
jpeg_buffer.data =
|
||||
(unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sof;
|
||||
(unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sof;
|
||||
jpeg_buffer.curr = 0;
|
||||
|
||||
skip(&jpeg_buffer, 5); /* P, Y, X */
|
||||
|
@ -920,14 +920,14 @@ static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx)
|
|||
static void exynos4_jpeg_parse_q_tbl(struct s5p_jpeg_ctx *ctx)
|
||||
{
|
||||
struct s5p_jpeg *jpeg = ctx->jpeg;
|
||||
struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
|
||||
struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
|
||||
struct s5p_jpeg_buffer jpeg_buffer;
|
||||
unsigned int word;
|
||||
int c, i, j;
|
||||
|
||||
for (j = 0; j < ctx->out_q.dqt.n; ++j) {
|
||||
jpeg_buffer.size = ctx->out_q.dqt.len[j];
|
||||
jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) +
|
||||
jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
|
||||
ctx->out_q.dqt.marker[j];
|
||||
jpeg_buffer.curr = 0;
|
||||
|
||||
|
@ -1293,13 +1293,16 @@ static int s5p_jpeg_querycap(struct file *file, void *priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int enum_fmt(struct s5p_jpeg_fmt *sjpeg_formats, int n,
|
||||
static int enum_fmt(struct s5p_jpeg_ctx *ctx,
|
||||
struct s5p_jpeg_fmt *sjpeg_formats, int n,
|
||||
struct v4l2_fmtdesc *f, u32 type)
|
||||
{
|
||||
int i, num = 0;
|
||||
unsigned int fmt_ver_flag = ctx->jpeg->variant->fmt_ver_flag;
|
||||
|
||||
for (i = 0; i < n; ++i) {
|
||||
if (sjpeg_formats[i].flags & type) {
|
||||
if (sjpeg_formats[i].flags & type &&
|
||||
sjpeg_formats[i].flags & fmt_ver_flag) {
|
||||
/* index-th format of type type found ? */
|
||||
if (num == f->index)
|
||||
break;
|
||||
|
@ -1326,11 +1329,11 @@ static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
|
|||
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
|
||||
|
||||
if (ctx->mode == S5P_JPEG_ENCODE)
|
||||
return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
|
||||
return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
|
||||
SJPEG_FMT_FLAG_ENC_CAPTURE);
|
||||
|
||||
return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
|
||||
SJPEG_FMT_FLAG_DEC_CAPTURE);
|
||||
return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
|
||||
SJPEG_FMT_FLAG_DEC_CAPTURE);
|
||||
}
|
||||
|
||||
static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
|
||||
|
@ -1339,11 +1342,11 @@ static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
|
|||
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
|
||||
|
||||
if (ctx->mode == S5P_JPEG_ENCODE)
|
||||
return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
|
||||
return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
|
||||
SJPEG_FMT_FLAG_ENC_OUTPUT);
|
||||
|
||||
return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
|
||||
SJPEG_FMT_FLAG_DEC_OUTPUT);
|
||||
return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
|
||||
SJPEG_FMT_FLAG_DEC_OUTPUT);
|
||||
}
|
||||
|
||||
static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx,
|
||||
|
@ -2072,15 +2075,15 @@ static void s5p_jpeg_device_run(void *priv)
|
|||
{
|
||||
struct s5p_jpeg_ctx *ctx = priv;
|
||||
struct s5p_jpeg *jpeg = ctx->jpeg;
|
||||
struct vb2_buffer *src_buf, *dst_buf;
|
||||
struct vb2_v4l2_buffer *src_buf, *dst_buf;
|
||||
unsigned long src_addr, dst_addr, flags;
|
||||
|
||||
spin_lock_irqsave(&ctx->jpeg->slock, flags);
|
||||
|
||||
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
|
||||
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
|
||||
src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
|
||||
dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
|
||||
src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
|
||||
dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
|
||||
|
||||
s5p_jpeg_reset(jpeg->regs);
|
||||
s5p_jpeg_poweron(jpeg->regs);
|
||||
|
@ -2153,7 +2156,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
|
|||
{
|
||||
struct s5p_jpeg *jpeg = ctx->jpeg;
|
||||
struct s5p_jpeg_fmt *fmt;
|
||||
struct vb2_buffer *vb;
|
||||
struct vb2_v4l2_buffer *vb;
|
||||
struct s5p_jpeg_addr jpeg_addr = {};
|
||||
u32 pix_size, padding_bytes = 0;
|
||||
|
||||
|
@ -2172,7 +2175,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
|
|||
vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
|
||||
}
|
||||
|
||||
jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0);
|
||||
jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
|
||||
|
||||
if (fmt->colplanes == 2) {
|
||||
jpeg_addr.cb = jpeg_addr.y + pix_size - padding_bytes;
|
||||
|
@ -2190,7 +2193,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
|
|||
static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
|
||||
{
|
||||
struct s5p_jpeg *jpeg = ctx->jpeg;
|
||||
struct vb2_buffer *vb;
|
||||
struct vb2_v4l2_buffer *vb;
|
||||
unsigned int jpeg_addr = 0;
|
||||
|
||||
if (ctx->mode == S5P_JPEG_ENCODE)
|
||||
|
@ -2198,7 +2201,7 @@ static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
|
|||
else
|
||||
vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
|
||||
|
||||
jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
|
||||
jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
|
||||
if (jpeg->variant->version == SJPEG_EXYNOS5433 &&
|
||||
ctx->mode == S5P_JPEG_DECODE)
|
||||
jpeg_addr += ctx->out_q.sos;
|
||||
|
@ -2314,7 +2317,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
|
|||
{
|
||||
struct s5p_jpeg *jpeg = ctx->jpeg;
|
||||
struct s5p_jpeg_fmt *fmt;
|
||||
struct vb2_buffer *vb;
|
||||
struct vb2_v4l2_buffer *vb;
|
||||
struct s5p_jpeg_addr jpeg_addr = {};
|
||||
u32 pix_size;
|
||||
|
||||
|
@ -2328,7 +2331,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
|
|||
fmt = ctx->cap_q.fmt;
|
||||
}
|
||||
|
||||
jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0);
|
||||
jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
|
||||
|
||||
if (fmt->colplanes == 2) {
|
||||
jpeg_addr.cb = jpeg_addr.y + pix_size;
|
||||
|
@ -2346,7 +2349,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
|
|||
static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
|
||||
{
|
||||
struct s5p_jpeg *jpeg = ctx->jpeg;
|
||||
struct vb2_buffer *vb;
|
||||
struct vb2_v4l2_buffer *vb;
|
||||
unsigned int jpeg_addr = 0;
|
||||
|
||||
if (ctx->mode == S5P_JPEG_ENCODE)
|
||||
|
@ -2354,7 +2357,7 @@ static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
|
|||
else
|
||||
vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
|
||||
|
||||
jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
|
||||
jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
|
||||
exynos3250_jpeg_jpgadr(jpeg->regs, jpeg_addr);
|
||||
}
|
||||
|
||||
|
|
|
@ -273,13 +273,13 @@ static void sh_veu_process(struct sh_veu_dev *veu,
|
|||
static void sh_veu_device_run(void *priv)
|
||||
{
|
||||
struct sh_veu_dev *veu = priv;
|
||||
struct vb2_buffer *src_buf, *dst_buf;
|
||||
struct vb2_v4l2_buffer *src_buf, *dst_buf;
|
||||
|
||||
src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx);
|
||||
dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx);
|
||||
|
||||
if (src_buf && dst_buf)
|
||||
sh_veu_process(veu, src_buf, dst_buf);
|
||||
sh_veu_process(veu, &src_buf->vb2_buf, &dst_buf->vb2_buf);
|
||||
}
|
||||
|
||||
/* ========== video ioctls ========== */
|
||||
|
|
|
@ -920,7 +920,7 @@ static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_reques
|
|||
reg &= ~(1 << 5);
|
||||
OMAP_MMC_WRITE(host, SDIO, reg);
|
||||
/* Set maximum timeout */
|
||||
OMAP_MMC_WRITE(host, CTO, 0xff);
|
||||
OMAP_MMC_WRITE(host, CTO, 0xfd);
|
||||
}
|
||||
|
||||
static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
|
||||
|
|
|
@ -442,12 +442,20 @@ static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
|
|||
|
||||
static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
|
||||
{
|
||||
static struct lock_class_key lock_key;
|
||||
static struct lock_class_key request_key;
|
||||
int err;
|
||||
|
||||
err = mv88e6xxx_g1_irq_setup_common(chip);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* These lock classes tells lockdep that global 1 irqs are in
|
||||
* a different category than their parent GPIO, so it won't
|
||||
* report false recursion.
|
||||
*/
|
||||
irq_set_lockdep_class(chip->irq, &lock_key, &request_key);
|
||||
|
||||
err = request_threaded_irq(chip->irq, NULL,
|
||||
mv88e6xxx_g1_irq_thread_fn,
|
||||
IRQF_ONESHOT,
|
||||
|
|
|
@ -119,7 +119,7 @@ static void enic_init_affinity_hint(struct enic *enic)
|
|||
|
||||
for (i = 0; i < enic->intr_count; i++) {
|
||||
if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) ||
|
||||
(enic->msix[i].affinity_mask &&
|
||||
(cpumask_available(enic->msix[i].affinity_mask) &&
|
||||
!cpumask_empty(enic->msix[i].affinity_mask)))
|
||||
continue;
|
||||
if (zalloc_cpumask_var(&enic->msix[i].affinity_mask,
|
||||
|
@ -148,7 +148,7 @@ static void enic_set_affinity_hint(struct enic *enic)
|
|||
for (i = 0; i < enic->intr_count; i++) {
|
||||
if (enic_is_err_intr(enic, i) ||
|
||||
enic_is_notify_intr(enic, i) ||
|
||||
!enic->msix[i].affinity_mask ||
|
||||
!cpumask_available(enic->msix[i].affinity_mask) ||
|
||||
cpumask_empty(enic->msix[i].affinity_mask))
|
||||
continue;
|
||||
err = irq_set_affinity_hint(enic->msix_entry[i].vector,
|
||||
|
@ -161,7 +161,7 @@ static void enic_set_affinity_hint(struct enic *enic)
|
|||
for (i = 0; i < enic->wq_count; i++) {
|
||||
int wq_intr = enic_msix_wq_intr(enic, i);
|
||||
|
||||
if (enic->msix[wq_intr].affinity_mask &&
|
||||
if (cpumask_available(enic->msix[wq_intr].affinity_mask) &&
|
||||
!cpumask_empty(enic->msix[wq_intr].affinity_mask))
|
||||
netif_set_xps_queue(enic->netdev,
|
||||
enic->msix[wq_intr].affinity_mask,
|
||||
|
|
|
@ -2106,7 +2106,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
|
|||
if (strlen(netdev->name) < (IFNAMSIZ - 5))
|
||||
snprintf(adapter->rx_ring->name,
|
||||
sizeof(adapter->rx_ring->name) - 1,
|
||||
"%s-rx-0", netdev->name);
|
||||
"%.14s-rx-0", netdev->name);
|
||||
else
|
||||
memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
|
||||
err = request_irq(adapter->msix_entries[vector].vector,
|
||||
|
@ -2122,7 +2122,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
|
|||
if (strlen(netdev->name) < (IFNAMSIZ - 5))
|
||||
snprintf(adapter->tx_ring->name,
|
||||
sizeof(adapter->tx_ring->name) - 1,
|
||||
"%s-tx-0", netdev->name);
|
||||
"%.14s-tx-0", netdev->name);
|
||||
else
|
||||
memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
|
||||
err = request_irq(adapter->msix_entries[vector].vector,
|
||||
|
@ -5286,8 +5286,13 @@ static void e1000_watchdog_task(struct work_struct *work)
|
|||
/* 8000ES2LAN requires a Rx packet buffer work-around
|
||||
* on link down event; reset the controller to flush
|
||||
* the Rx packet buffer.
|
||||
*
|
||||
* If the link is lost the controller stops DMA, but
|
||||
* if there is queued Tx work it cannot be done. So
|
||||
* reset the controller to flush the Tx packet buffers.
|
||||
*/
|
||||
if (adapter->flags & FLAG_RX_NEEDS_RESTART)
|
||||
if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
|
||||
e1000_desc_unused(tx_ring) + 1 < tx_ring->count)
|
||||
adapter->flags |= FLAG_RESTART_NOW;
|
||||
else
|
||||
pm_schedule_suspend(netdev->dev.parent,
|
||||
|
@ -5310,14 +5315,6 @@ static void e1000_watchdog_task(struct work_struct *work)
|
|||
adapter->gotc_old = adapter->stats.gotc;
|
||||
spin_unlock(&adapter->stats64_lock);
|
||||
|
||||
/* If the link is lost the controller stops DMA, but
|
||||
* if there is queued Tx work it cannot be done. So
|
||||
* reset the controller to flush the Tx packet buffers.
|
||||
*/
|
||||
if (!netif_carrier_ok(netdev) &&
|
||||
(e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
|
||||
adapter->flags |= FLAG_RESTART_NOW;
|
||||
|
||||
/* If reset is necessary, do it outside of interrupt context. */
|
||||
if (adapter->flags & FLAG_RESTART_NOW) {
|
||||
schedule_work(&adapter->reset_task);
|
||||
|
@ -7330,6 +7327,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
e1000_print_device_info(adapter);
|
||||
|
||||
dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
|
||||
|
||||
if (pci_dev_run_wake(pdev))
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
|
||||
|
|
|
@ -1372,13 +1372,9 @@ static void mvpp2_port_reset(struct mvpp2_port *port)
|
|||
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
|
||||
mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
|
||||
|
||||
val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
|
||||
~MVPP2_GMAC_PORT_RESET_MASK;
|
||||
val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
|
||||
MVPP2_GMAC_PORT_RESET_MASK;
|
||||
writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
|
||||
|
||||
while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
|
||||
MVPP2_GMAC_PORT_RESET_MASK)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Change maximum receive size of the port */
|
||||
|
@ -4445,12 +4441,15 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
|
|||
const struct phylink_link_state *state)
|
||||
{
|
||||
u32 an, ctrl0, ctrl2, ctrl4;
|
||||
u32 old_ctrl2;
|
||||
|
||||
an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
|
||||
ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
|
||||
ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
|
||||
ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
|
||||
|
||||
old_ctrl2 = ctrl2;
|
||||
|
||||
/* Force link down */
|
||||
an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
|
||||
an |= MVPP2_GMAC_FORCE_LINK_DOWN;
|
||||
|
@ -4523,6 +4522,12 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
|
|||
writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
|
||||
writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
|
||||
writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
|
||||
|
||||
if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) {
|
||||
while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
|
||||
MVPP2_GMAC_PORT_RESET_MASK)
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
|
||||
|
|
|
@ -1797,7 +1797,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
|||
u64 node_guid;
|
||||
int err = 0;
|
||||
|
||||
if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
|
||||
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
|
||||
return -EPERM;
|
||||
if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
|
||||
return -EINVAL;
|
||||
|
@ -1871,7 +1871,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
|
|||
{
|
||||
struct mlx5_vport *evport;
|
||||
|
||||
if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
|
||||
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
|
||||
return -EPERM;
|
||||
if (!LEGAL_VPORT(esw, vport))
|
||||
return -EINVAL;
|
||||
|
@ -2044,19 +2044,24 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
|
|||
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
|
||||
u32 max_rate, u32 min_rate)
|
||||
{
|
||||
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
|
||||
bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
|
||||
fw_max_bw_share >= MLX5_MIN_BW_SHARE;
|
||||
bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
|
||||
struct mlx5_vport *evport;
|
||||
u32 fw_max_bw_share;
|
||||
u32 previous_min_rate;
|
||||
u32 divider;
|
||||
bool min_rate_supported;
|
||||
bool max_rate_supported;
|
||||
int err = 0;
|
||||
|
||||
if (!ESW_ALLOWED(esw))
|
||||
return -EPERM;
|
||||
if (!LEGAL_VPORT(esw, vport))
|
||||
return -EINVAL;
|
||||
|
||||
fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
|
||||
min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
|
||||
fw_max_bw_share >= MLX5_MIN_BW_SHARE;
|
||||
max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
|
||||
|
||||
if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
|
|
@ -1988,7 +1988,7 @@ static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
|
||||
snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
|
||||
snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
|
||||
mlxsw_sp_port_hw_prio_stats[i].str, prio);
|
||||
*p += ETH_GSTRING_LEN;
|
||||
}
|
||||
|
@ -1999,7 +1999,7 @@ static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
|
||||
snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
|
||||
snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
|
||||
mlxsw_sp_port_hw_tc_stats[i].str, tc);
|
||||
*p += ETH_GSTRING_LEN;
|
||||
}
|
||||
|
|
|
@ -474,7 +474,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
|
|||
struct dma_desc *p, struct sk_buff *skb)
|
||||
{
|
||||
struct skb_shared_hwtstamps shhwtstamp;
|
||||
u64 ns;
|
||||
u64 ns = 0;
|
||||
|
||||
if (!priv->hwts_tx_en)
|
||||
return;
|
||||
|
@ -513,7 +513,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
|
|||
{
|
||||
struct skb_shared_hwtstamps *shhwtstamp = NULL;
|
||||
struct dma_desc *desc = p;
|
||||
u64 ns;
|
||||
u64 ns = 0;
|
||||
|
||||
if (!priv->hwts_rx_en)
|
||||
return;
|
||||
|
@ -558,8 +558,8 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
|
|||
u32 snap_type_sel = 0;
|
||||
u32 ts_master_en = 0;
|
||||
u32 ts_event_en = 0;
|
||||
u32 sec_inc = 0;
|
||||
u32 value = 0;
|
||||
u32 sec_inc;
|
||||
bool xmac;
|
||||
|
||||
xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
|
||||
|
|
|
@ -105,7 +105,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
|
|||
struct stmmac_priv *priv =
|
||||
container_of(ptp, struct stmmac_priv, ptp_clock_ops);
|
||||
unsigned long flags;
|
||||
u64 ns;
|
||||
u64 ns = 0;
|
||||
|
||||
spin_lock_irqsave(&priv->ptp_lock, flags);
|
||||
stmmac_get_systime(priv, priv->ptpaddr, &ns);
|
||||
|
|
|
@ -147,9 +147,15 @@ int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask)
|
|||
mmd_mask &= ~BIT(devad);
|
||||
|
||||
/* The link state is latched low so that momentary link
|
||||
* drops can be detected. Do not double-read the status
|
||||
* register if the link is down.
|
||||
* drops can be detected. Do not double-read the status
|
||||
* in polling mode to detect such short link drops.
|
||||
*/
|
||||
if (!phy_polling_mode(phydev)) {
|
||||
val = phy_read_mmd(phydev, devad, MDIO_STAT1);
|
||||
if (val < 0)
|
||||
return val;
|
||||
}
|
||||
|
||||
val = phy_read_mmd(phydev, devad, MDIO_STAT1);
|
||||
if (val < 0)
|
||||
return val;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue