Merge android-4.19.24 (cca7d2d
) into msm-4.19
* refs/heads/tmp-cca7d2d: Linux 4.19.24 mm: proc: smaps_rollup: fix pss_locked calculation drm/i915: Prevent a race during I915_GEM_MMAP ioctl with WC set drm/i915: Block fbdev HPD processing during suspend drm/vkms: Fix license inconsistent drm: Use array_size() when creating lease dm thin: fix bug where bio that overwrites thin block ignores FUA dm crypt: don't overallocate the integrity tag space x86/a.out: Clear the dump structure initially md/raid1: don't clear bitmap bits on interrupted recovery. signal: Restore the stop PTRACE_EVENT_EXIT scsi: sd: fix entropy gathering for most rotational disks x86/platform/UV: Use efi_runtime_lock to serialise BIOS calls tracing/uprobes: Fix output for multiple string arguments s390/zcrypt: fix specification exception on z196 during ap probe alpha: Fix Eiger NR_IRQS to 128 alpha: fix page fault handling for r16-r18 targets Revert "mm: slowly shrink slabs with a relatively small number of objects" Revert "mm: don't reclaim inodes with many attached pages" Revert "nfsd4: return default lease period" Input: elantech - enable 3rd button support on Fujitsu CELSIUS H780 Input: bma150 - register input device after setting private data mmc: block: handle complete_work on separate workqueue mmc: sunxi: Filter out unsupported modes declared in the device tree kvm: vmx: Fix entry number check for add_atomic_switch_msr() x86/kvm/nVMX: read from MSR_IA32_VMX_PROCBASED_CTLS2 only when it is available riscv: Add pte bit to distinguish swap from invalid tools uapi: fix Alpha support ASoC: hdmi-codec: fix oops on re-probe ALSA: usb-audio: Fix implicit fb endpoint setup by quirk ALSA: hda - Add quirk for HP EliteBook 840 G5 perf/x86: Add check_period PMU callback perf/core: Fix impossible ring-buffer sizes warning ARM: OMAP5+: Fix inverted nirq pin interrupts with irq_set_type Input: elan_i2c - add ACPI ID for touchpad in Lenovo V330-15ISK Revert "Input: elan_i2c - add ACPI ID for touchpad in ASUS Aspire F5-573G" gpio: mxc: move gpio noirq suspend/resume to syscore phase CIFS: Do not assume one credit for async responses kvm: sev: Fail KVM_SEV_INIT if already initialized cifs: Limit memory used by lock request calls to a page drm/nouveau/falcon: avoid touching registers if engine is off drm/nouveau: Don't disable polling in fallback mode gpio: pl061: handle failed allocations ARM: dts: kirkwood: Fix polarity of GPIO fan lines ARM: dts: da850-lcdk: Correct the sound card name ARM: dts: da850-lcdk: Correct the audio codec regulators ARM: dts: da850-evm: Correct the sound card name ARM: dts: da850-evm: Correct the audio codec regulators drm/amdgpu: set WRITE_BURST_LENGTH to 64B to workaround SDMA1 hang nvme: pad fake subsys NQN vid and ssvid with zeros nvme-multipath: zero out ANA log buffer nvme-pci: fix out of bounds access in nvme_cqe_pending nvme-pci: use the same attributes when freeing host_mem_desc_bufs. drm/bridge: tc358767: fix output H/V syncs drm/bridge: tc358767: reject modes which require too much BW drm/bridge: tc358767: fix initial DP0/1_SRCCTRL value drm/bridge: tc358767: fix single lane configuration drm/bridge: tc358767: add defines for DP1_SRCCTRL & PHY_2LANE drm/bridge: tc358767: add bus flags cpufreq: check if policy is inactive early in __cpufreq_get() riscv: fix trace_sys_exit hook tools uapi: fix RISC-V 64-bit support perf test shell: Use a fallback to get the pathname in vfs_getname perf report: Fix wrong iteration count in --branch-history ACPI: NUMA: Use correct type for printing addresses on i386-PAE drm/amdgpu/sriov:Correct pfvf exchange logic ARM: fix the cockup in the previous patch ARM: ensure that processor vtables is not lost after boot ARM: spectre-v2: per-CPU vtables to work around big.Little systems ARM: add PROC_VTABLE and PROC_TABLE macros ARM: clean up per-processor check_bugs method call ARM: split out processor lookup ARM: make lookup_processor_type() non-__init ARM: 8810/1: vfp: Fix wrong assignement to ufp_exc ARM: 8797/1: spectre-v1.1: harden __copy_to_user ARM: 8796/1: spectre-v1,v1.1: provide helpers for address sanitization ARM: 8795/1: spectre-v1.1: use put_user() for __put_user() ARM: 8794/1: uaccess: Prevent speculative use of the current addr_limit ARM: 8793/1: signal: replace __put_user_error with __put_user ARM: 8792/1: oabi-compat: copy oabi events using __copy_to_user() ARM: 8791/1: vfp: use __copy_to_user() when saving VFP state ARM: 8790/1: signal: always use __copy_to_user to save iwmmxt context ARM: 8789/1: signal: copy registers using __copy_to_user() blk-mq: fix a hung issue when fsync eeprom: at24: add support for 24c2048 dt-bindings: eeprom: at24: add "atmel,24c2048" compatible string Conflicts: drivers/mmc/core/block.c include/linux/mmc/card.h Change-Id: I829d46ab020fcefca26c7d12e03c64c0ca7c3528 Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
This commit is contained in:
commit
64895402bf
97 changed files with 893 additions and 317 deletions
|
@ -27,6 +27,7 @@ Required properties:
|
|||
"atmel,24c256",
|
||||
"atmel,24c512",
|
||||
"atmel,24c1024",
|
||||
"atmel,24c2048",
|
||||
|
||||
If <manufacturer> is not "atmel", then a fallback must be used
|
||||
with the same <model> and "atmel" as manufacturer.
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 23
|
||||
SUBLEVEL = 24
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
|
|
@ -56,15 +56,15 @@
|
|||
|
||||
#elif defined(CONFIG_ALPHA_DP264) || \
|
||||
defined(CONFIG_ALPHA_LYNX) || \
|
||||
defined(CONFIG_ALPHA_SHARK) || \
|
||||
defined(CONFIG_ALPHA_EIGER)
|
||||
defined(CONFIG_ALPHA_SHARK)
|
||||
# define NR_IRQS 64
|
||||
|
||||
#elif defined(CONFIG_ALPHA_TITAN)
|
||||
#define NR_IRQS 80
|
||||
|
||||
#elif defined(CONFIG_ALPHA_RAWHIDE) || \
|
||||
defined(CONFIG_ALPHA_TAKARA)
|
||||
defined(CONFIG_ALPHA_TAKARA) || \
|
||||
defined(CONFIG_ALPHA_EIGER)
|
||||
# define NR_IRQS 128
|
||||
|
||||
#elif defined(CONFIG_ALPHA_WILDFIRE)
|
||||
|
|
|
@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm)
|
|||
/* Macro for exception fixup code to access integer registers. */
|
||||
#define dpf_reg(r) \
|
||||
(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
|
||||
(r) <= 18 ? (r)+8 : (r)-10])
|
||||
(r) <= 18 ? (r)+10 : (r)-10])
|
||||
|
||||
asmlinkage void
|
||||
do_page_fault(unsigned long address, unsigned long mmcsr,
|
||||
|
|
|
@ -94,6 +94,28 @@
|
|||
regulator-boot-on;
|
||||
};
|
||||
|
||||
baseboard_3v3: fixedregulator-3v3 {
|
||||
/* TPS73701DCQ */
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "baseboard_3v3";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
vin-supply = <&vbat>;
|
||||
regulator-always-on;
|
||||
regulator-boot-on;
|
||||
};
|
||||
|
||||
baseboard_1v8: fixedregulator-1v8 {
|
||||
/* TPS73701DCQ */
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "baseboard_1v8";
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
vin-supply = <&vbat>;
|
||||
regulator-always-on;
|
||||
regulator-boot-on;
|
||||
};
|
||||
|
||||
backlight_lcd: backlight-regulator {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "lcd_backlight_pwr";
|
||||
|
@ -105,7 +127,7 @@
|
|||
|
||||
sound {
|
||||
compatible = "simple-audio-card";
|
||||
simple-audio-card,name = "DA850/OMAP-L138 EVM";
|
||||
simple-audio-card,name = "DA850-OMAPL138 EVM";
|
||||
simple-audio-card,widgets =
|
||||
"Line", "Line In",
|
||||
"Line", "Line Out";
|
||||
|
@ -210,10 +232,9 @@
|
|||
|
||||
/* Regulators */
|
||||
IOVDD-supply = <&vdcdc2_reg>;
|
||||
/* Derived from VBAT: Baseboard 3.3V / 1.8V */
|
||||
AVDD-supply = <&vbat>;
|
||||
DRVDD-supply = <&vbat>;
|
||||
DVDD-supply = <&vbat>;
|
||||
AVDD-supply = <&baseboard_3v3>;
|
||||
DRVDD-supply = <&baseboard_3v3>;
|
||||
DVDD-supply = <&baseboard_1v8>;
|
||||
};
|
||||
tca6416: gpio@20 {
|
||||
compatible = "ti,tca6416";
|
||||
|
|
|
@ -39,9 +39,39 @@
|
|||
};
|
||||
};
|
||||
|
||||
vcc_5vd: fixedregulator-vcc_5vd {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "vcc_5vd";
|
||||
regulator-min-microvolt = <5000000>;
|
||||
regulator-max-microvolt = <5000000>;
|
||||
regulator-boot-on;
|
||||
};
|
||||
|
||||
vcc_3v3d: fixedregulator-vcc_3v3d {
|
||||
/* TPS650250 - VDCDC1 */
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "vcc_3v3d";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
vin-supply = <&vcc_5vd>;
|
||||
regulator-always-on;
|
||||
regulator-boot-on;
|
||||
};
|
||||
|
||||
vcc_1v8d: fixedregulator-vcc_1v8d {
|
||||
/* TPS650250 - VDCDC2 */
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "vcc_1v8d";
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
vin-supply = <&vcc_5vd>;
|
||||
regulator-always-on;
|
||||
regulator-boot-on;
|
||||
};
|
||||
|
||||
sound {
|
||||
compatible = "simple-audio-card";
|
||||
simple-audio-card,name = "DA850/OMAP-L138 LCDK";
|
||||
simple-audio-card,name = "DA850-OMAPL138 LCDK";
|
||||
simple-audio-card,widgets =
|
||||
"Line", "Line In",
|
||||
"Line", "Line Out";
|
||||
|
@ -221,6 +251,12 @@
|
|||
compatible = "ti,tlv320aic3106";
|
||||
reg = <0x18>;
|
||||
status = "okay";
|
||||
|
||||
/* Regulators */
|
||||
IOVDD-supply = <&vcc_3v3d>;
|
||||
AVDD-supply = <&vcc_3v3d>;
|
||||
DRVDD-supply = <&vcc_3v3d>;
|
||||
DVDD-supply = <&vcc_1v8d>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -36,8 +36,8 @@
|
|||
compatible = "gpio-fan";
|
||||
pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
|
||||
pinctrl-names = "default";
|
||||
gpios = <&gpio1 14 GPIO_ACTIVE_LOW
|
||||
&gpio1 13 GPIO_ACTIVE_LOW>;
|
||||
gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
|
||||
&gpio1 13 GPIO_ACTIVE_HIGH>;
|
||||
gpio-fan,speed-map = <0 0
|
||||
3000 1
|
||||
6000 2>;
|
||||
|
|
|
@ -317,7 +317,8 @@
|
|||
|
||||
palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
|
||||
pinctrl-single,pins = <
|
||||
OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */
|
||||
/* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
|
||||
OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
|
||||
>;
|
||||
};
|
||||
|
||||
|
@ -385,7 +386,8 @@
|
|||
|
||||
palmas: palmas@48 {
|
||||
compatible = "ti,palmas";
|
||||
interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
|
||||
/* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
|
||||
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
|
||||
reg = <0x48>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
|
@ -651,7 +653,8 @@
|
|||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&twl6040_pins>;
|
||||
|
||||
interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
|
||||
/* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
|
||||
interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>;
|
||||
|
||||
/* audpwron gpio defined in the board specific dts */
|
||||
|
||||
|
|
|
@ -181,6 +181,13 @@
|
|||
OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */
|
||||
>;
|
||||
};
|
||||
|
||||
palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
|
||||
pinctrl-single,pins = <
|
||||
/* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
|
||||
OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
|
||||
>;
|
||||
};
|
||||
};
|
||||
|
||||
&omap5_pmx_core {
|
||||
|
@ -414,8 +421,11 @@
|
|||
|
||||
palmas: palmas@48 {
|
||||
compatible = "ti,palmas";
|
||||
interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
|
||||
reg = <0x48>;
|
||||
pinctrl-0 = <&palmas_sys_nirq_pins>;
|
||||
pinctrl-names = "default";
|
||||
/* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
|
||||
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
ti,system-power-controller;
|
||||
|
|
|
@ -467,6 +467,17 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
|||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
|
||||
#ifdef CONFIG_CPU_SPECTRE
|
||||
sub \tmp, \limit, #1
|
||||
subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
|
||||
addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
|
||||
subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) }
|
||||
movlo \addr, #0 @ if (tmp < 0) addr = NULL
|
||||
csdb
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_disable, tmp, isb=1
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
/*
|
||||
|
|
|
@ -111,6 +111,7 @@
|
|||
#include <linux/kernel.h>
|
||||
|
||||
extern unsigned int processor_id;
|
||||
struct proc_info_list *lookup_processor(u32 midr);
|
||||
|
||||
#ifdef CONFIG_CPU_CP15
|
||||
#define read_cpuid(reg) \
|
||||
|
|
|
@ -23,7 +23,7 @@ struct mm_struct;
|
|||
/*
|
||||
* Don't change this structure - ASM code relies on it.
|
||||
*/
|
||||
extern struct processor {
|
||||
struct processor {
|
||||
/* MISC
|
||||
* get data abort address/flags
|
||||
*/
|
||||
|
@ -79,9 +79,13 @@ extern struct processor {
|
|||
unsigned int suspend_size;
|
||||
void (*do_suspend)(void *);
|
||||
void (*do_resume)(void *);
|
||||
} processor;
|
||||
};
|
||||
|
||||
#ifndef MULTI_CPU
|
||||
static inline void init_proc_vtable(const struct processor *p)
|
||||
{
|
||||
}
|
||||
|
||||
extern void cpu_proc_init(void);
|
||||
extern void cpu_proc_fin(void);
|
||||
extern int cpu_do_idle(void);
|
||||
|
@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
|
|||
extern void cpu_do_suspend(void *);
|
||||
extern void cpu_do_resume(void *);
|
||||
#else
|
||||
#define cpu_proc_init processor._proc_init
|
||||
#define cpu_proc_fin processor._proc_fin
|
||||
#define cpu_reset processor.reset
|
||||
#define cpu_do_idle processor._do_idle
|
||||
#define cpu_dcache_clean_area processor.dcache_clean_area
|
||||
#define cpu_set_pte_ext processor.set_pte_ext
|
||||
#define cpu_do_switch_mm processor.switch_mm
|
||||
|
||||
/* These three are private to arch/arm/kernel/suspend.c */
|
||||
#define cpu_do_suspend processor.do_suspend
|
||||
#define cpu_do_resume processor.do_resume
|
||||
extern struct processor processor;
|
||||
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
||||
#include <linux/smp.h>
|
||||
/*
|
||||
* This can't be a per-cpu variable because we need to access it before
|
||||
* per-cpu has been initialised. We have a couple of functions that are
|
||||
* called in a pre-emptible context, and so can't use smp_processor_id()
|
||||
* there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
|
||||
* function pointers for these are identical across all CPUs.
|
||||
*/
|
||||
extern struct processor *cpu_vtable[];
|
||||
#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
|
||||
#define PROC_TABLE(f) cpu_vtable[0]->f
|
||||
static inline void init_proc_vtable(const struct processor *p)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
*cpu_vtable[cpu] = *p;
|
||||
WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
|
||||
cpu_vtable[0]->dcache_clean_area);
|
||||
WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
|
||||
cpu_vtable[0]->set_pte_ext);
|
||||
}
|
||||
#else
|
||||
#define PROC_VTABLE(f) processor.f
|
||||
#define PROC_TABLE(f) processor.f
|
||||
static inline void init_proc_vtable(const struct processor *p)
|
||||
{
|
||||
processor = *p;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define cpu_proc_init PROC_VTABLE(_proc_init)
|
||||
#define cpu_check_bugs PROC_VTABLE(check_bugs)
|
||||
#define cpu_proc_fin PROC_VTABLE(_proc_fin)
|
||||
#define cpu_reset PROC_VTABLE(reset)
|
||||
#define cpu_do_idle PROC_VTABLE(_do_idle)
|
||||
#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
|
||||
#define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
|
||||
#define cpu_do_switch_mm PROC_VTABLE(switch_mm)
|
||||
|
||||
/* These two are private to arch/arm/kernel/suspend.c */
|
||||
#define cpu_do_suspend PROC_VTABLE(do_suspend)
|
||||
#define cpu_do_resume PROC_VTABLE(do_resume)
|
||||
#endif
|
||||
|
||||
extern void cpu_resume(void);
|
||||
|
|
|
@ -121,8 +121,8 @@ extern void vfp_flush_hwstate(struct thread_info *);
|
|||
struct user_vfp;
|
||||
struct user_vfp_exc;
|
||||
|
||||
extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
|
||||
struct user_vfp_exc __user *);
|
||||
extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
|
||||
struct user_vfp_exc *);
|
||||
extern int vfp_restore_user_hwstate(struct user_vfp *,
|
||||
struct user_vfp_exc *);
|
||||
#endif
|
||||
|
|
|
@ -69,6 +69,14 @@ extern int __put_user_bad(void);
|
|||
static inline void set_fs(mm_segment_t fs)
|
||||
{
|
||||
current_thread_info()->addr_limit = fs;
|
||||
|
||||
/*
|
||||
* Prevent a mispredicted conditional call to set_fs from forwarding
|
||||
* the wrong address limit to access_ok under speculation.
|
||||
*/
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
|
||||
}
|
||||
|
||||
|
@ -91,6 +99,32 @@ static inline void set_fs(mm_segment_t fs)
|
|||
#define __inttype(x) \
|
||||
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
|
||||
|
||||
/*
|
||||
* Sanitise a uaccess pointer such that it becomes NULL if addr+size
|
||||
* is above the current addr_limit.
|
||||
*/
|
||||
#define uaccess_mask_range_ptr(ptr, size) \
|
||||
((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
|
||||
static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
|
||||
size_t size)
|
||||
{
|
||||
void __user *safe_ptr = (void __user *)ptr;
|
||||
unsigned long tmp;
|
||||
|
||||
asm volatile(
|
||||
" sub %1, %3, #1\n"
|
||||
" subs %1, %1, %0\n"
|
||||
" addhs %1, %1, #1\n"
|
||||
" subhss %1, %1, %2\n"
|
||||
" movlo %0, #0\n"
|
||||
: "+r" (safe_ptr), "=&r" (tmp)
|
||||
: "r" (size), "r" (current_thread_info()->addr_limit)
|
||||
: "cc");
|
||||
|
||||
csdb();
|
||||
return safe_ptr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Single-value transfer routines. They automatically use the right
|
||||
* size if we just have the right pointer type. Note that the functions
|
||||
|
@ -362,6 +396,14 @@ do { \
|
|||
__pu_err; \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_CPU_SPECTRE
|
||||
/*
|
||||
* When mitigating Spectre variant 1.1, all accessors need to include
|
||||
* verification of the address space.
|
||||
*/
|
||||
#define __put_user(x, ptr) put_user(x, ptr)
|
||||
|
||||
#else
|
||||
#define __put_user(x, ptr) \
|
||||
({ \
|
||||
long __pu_err = 0; \
|
||||
|
@ -369,12 +411,6 @@ do { \
|
|||
__pu_err; \
|
||||
})
|
||||
|
||||
#define __put_user_error(x, ptr, err) \
|
||||
({ \
|
||||
__put_user_switch((x), (ptr), (err), __put_user_nocheck); \
|
||||
(void) 0; \
|
||||
})
|
||||
|
||||
#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
|
||||
do { \
|
||||
unsigned long __pu_addr = (unsigned long)__pu_ptr; \
|
||||
|
@ -454,6 +490,7 @@ do { \
|
|||
: "r" (x), "i" (-EFAULT) \
|
||||
: "cc")
|
||||
|
||||
#endif /* !CONFIG_CPU_SPECTRE */
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
extern unsigned long __must_check
|
||||
|
|
|
@ -6,8 +6,8 @@
|
|||
void check_other_bugs(void)
|
||||
{
|
||||
#ifdef MULTI_CPU
|
||||
if (processor.check_bugs)
|
||||
processor.check_bugs();
|
||||
if (cpu_check_bugs)
|
||||
cpu_check_bugs();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -145,6 +145,9 @@ __mmap_switched_data:
|
|||
#endif
|
||||
.size __mmap_switched_data, . - __mmap_switched_data
|
||||
|
||||
__FINIT
|
||||
.text
|
||||
|
||||
/*
|
||||
* This provides a C-API version of __lookup_processor_type
|
||||
*/
|
||||
|
@ -156,9 +159,6 @@ ENTRY(lookup_processor_type)
|
|||
ldmfd sp!, {r4 - r6, r9, pc}
|
||||
ENDPROC(lookup_processor_type)
|
||||
|
||||
__FINIT
|
||||
.text
|
||||
|
||||
/*
|
||||
* Read processor ID register (CP#15, CR0), and look up in the linker-built
|
||||
* supported processor list. Note that we can't use the absolute addresses
|
||||
|
|
|
@ -124,6 +124,11 @@ EXPORT_SYMBOL(cold_boot);
|
|||
|
||||
#ifdef MULTI_CPU
|
||||
struct processor processor __ro_after_init;
|
||||
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
||||
struct processor *cpu_vtable[NR_CPUS] = {
|
||||
[0] = &processor,
|
||||
};
|
||||
#endif
|
||||
#endif
|
||||
#ifdef MULTI_TLB
|
||||
struct cpu_tlb_fns cpu_tlb __ro_after_init;
|
||||
|
@ -676,28 +681,33 @@ static void __init smp_build_mpidr_hash(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* locate processor in the list of supported processor types. The linker
|
||||
* builds this table for us from the entries in arch/arm/mm/proc-*.S
|
||||
*/
|
||||
struct proc_info_list *lookup_processor(u32 midr)
|
||||
{
|
||||
struct proc_info_list *list = lookup_processor_type(midr);
|
||||
|
||||
if (!list) {
|
||||
pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
|
||||
smp_processor_id(), midr);
|
||||
while (1)
|
||||
/* can't use cpu_relax() here as it may require MMU setup */;
|
||||
}
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
static void __init setup_processor(void)
|
||||
{
|
||||
struct proc_info_list *list;
|
||||
|
||||
/*
|
||||
* locate processor in the list of supported processor
|
||||
* types. The linker builds this table for us from the
|
||||
* entries in arch/arm/mm/proc-*.S
|
||||
*/
|
||||
list = lookup_processor_type(read_cpuid_id());
|
||||
if (!list) {
|
||||
pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
|
||||
read_cpuid_id());
|
||||
while (1);
|
||||
}
|
||||
unsigned int midr = read_cpuid_id();
|
||||
struct proc_info_list *list = lookup_processor(midr);
|
||||
|
||||
cpu_name = list->cpu_name;
|
||||
__cpu_architecture = __get_cpu_architecture();
|
||||
|
||||
#ifdef MULTI_CPU
|
||||
processor = *list->proc;
|
||||
#endif
|
||||
init_proc_vtable(list->proc);
|
||||
#ifdef MULTI_TLB
|
||||
cpu_tlb = *list->tlb;
|
||||
#endif
|
||||
|
@ -709,7 +719,7 @@ static void __init setup_processor(void)
|
|||
#endif
|
||||
|
||||
pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
|
||||
cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
|
||||
list->cpu_name, midr, midr & 15,
|
||||
proc_arch[cpu_architecture()], get_cr());
|
||||
|
||||
snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
|
||||
|
|
|
@ -77,8 +77,6 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
|
|||
kframe->magic = IWMMXT_MAGIC;
|
||||
kframe->size = IWMMXT_STORAGE_SIZE;
|
||||
iwmmxt_task_copy(current_thread_info(), &kframe->storage);
|
||||
|
||||
err = __copy_to_user(frame, kframe, sizeof(*frame));
|
||||
} else {
|
||||
/*
|
||||
* For bug-compatibility with older kernels, some space
|
||||
|
@ -86,10 +84,14 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
|
|||
* Set the magic and size appropriately so that properly
|
||||
* written userspace can skip it reliably:
|
||||
*/
|
||||
__put_user_error(DUMMY_MAGIC, &frame->magic, err);
|
||||
__put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err);
|
||||
*kframe = (struct iwmmxt_sigframe) {
|
||||
.magic = DUMMY_MAGIC,
|
||||
.size = IWMMXT_STORAGE_SIZE,
|
||||
};
|
||||
}
|
||||
|
||||
err = __copy_to_user(frame, kframe, sizeof(*kframe));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -135,17 +137,18 @@ static int restore_iwmmxt_context(char __user **auxp)
|
|||
|
||||
static int preserve_vfp_context(struct vfp_sigframe __user *frame)
|
||||
{
|
||||
const unsigned long magic = VFP_MAGIC;
|
||||
const unsigned long size = VFP_STORAGE_SIZE;
|
||||
struct vfp_sigframe kframe;
|
||||
int err = 0;
|
||||
|
||||
__put_user_error(magic, &frame->magic, err);
|
||||
__put_user_error(size, &frame->size, err);
|
||||
memset(&kframe, 0, sizeof(kframe));
|
||||
kframe.magic = VFP_MAGIC;
|
||||
kframe.size = VFP_STORAGE_SIZE;
|
||||
|
||||
err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
return err;
|
||||
|
||||
return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
|
||||
return __copy_to_user(frame, &kframe, sizeof(kframe));
|
||||
}
|
||||
|
||||
static int restore_vfp_context(char __user **auxp)
|
||||
|
@ -288,30 +291,35 @@ static int
|
|||
setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
|
||||
{
|
||||
struct aux_sigframe __user *aux;
|
||||
struct sigcontext context;
|
||||
int err = 0;
|
||||
|
||||
__put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
|
||||
__put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
|
||||
__put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
|
||||
__put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
|
||||
__put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
|
||||
__put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
|
||||
__put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
|
||||
__put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
|
||||
__put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
|
||||
__put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
|
||||
__put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
|
||||
__put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
|
||||
__put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
|
||||
__put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
|
||||
__put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
|
||||
__put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
|
||||
__put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
|
||||
context = (struct sigcontext) {
|
||||
.arm_r0 = regs->ARM_r0,
|
||||
.arm_r1 = regs->ARM_r1,
|
||||
.arm_r2 = regs->ARM_r2,
|
||||
.arm_r3 = regs->ARM_r3,
|
||||
.arm_r4 = regs->ARM_r4,
|
||||
.arm_r5 = regs->ARM_r5,
|
||||
.arm_r6 = regs->ARM_r6,
|
||||
.arm_r7 = regs->ARM_r7,
|
||||
.arm_r8 = regs->ARM_r8,
|
||||
.arm_r9 = regs->ARM_r9,
|
||||
.arm_r10 = regs->ARM_r10,
|
||||
.arm_fp = regs->ARM_fp,
|
||||
.arm_ip = regs->ARM_ip,
|
||||
.arm_sp = regs->ARM_sp,
|
||||
.arm_lr = regs->ARM_lr,
|
||||
.arm_pc = regs->ARM_pc,
|
||||
.arm_cpsr = regs->ARM_cpsr,
|
||||
|
||||
__put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
|
||||
__put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
|
||||
__put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
|
||||
__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
|
||||
.trap_no = current->thread.trap_no,
|
||||
.error_code = current->thread.error_code,
|
||||
.fault_address = current->thread.address,
|
||||
.oldmask = set->sig[0],
|
||||
};
|
||||
|
||||
err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
|
||||
|
||||
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
|
||||
|
||||
|
@ -328,7 +336,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
|
|||
if (err == 0)
|
||||
err |= preserve_vfp_context(&aux->vfp);
|
||||
#endif
|
||||
__put_user_error(0, &aux->end_magic, err);
|
||||
err |= __put_user(0, &aux->end_magic);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -491,7 +499,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
|
|||
/*
|
||||
* Set uc.uc_flags to a value which sc.trap_no would never have.
|
||||
*/
|
||||
__put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
|
||||
err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
|
||||
|
||||
err |= setup_sigframe(frame, regs, set);
|
||||
if (err == 0)
|
||||
|
@ -511,8 +519,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
|
|||
|
||||
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
|
||||
|
||||
__put_user_error(0, &frame->sig.uc.uc_flags, err);
|
||||
__put_user_error(NULL, &frame->sig.uc.uc_link, err);
|
||||
err |= __put_user(0, &frame->sig.uc.uc_flags);
|
||||
err |= __put_user(NULL, &frame->sig.uc.uc_link);
|
||||
|
||||
err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
|
||||
err |= setup_sigframe(&frame->sig, regs, set);
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/procinfo.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
@ -102,6 +103,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
|
|||
#endif
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
||||
static int secondary_biglittle_prepare(unsigned int cpu)
|
||||
{
|
||||
if (!cpu_vtable[cpu])
|
||||
cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
|
||||
|
||||
return cpu_vtable[cpu] ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static void secondary_biglittle_init(void)
|
||||
{
|
||||
init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
|
||||
}
|
||||
#else
|
||||
static int secondary_biglittle_prepare(unsigned int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void secondary_biglittle_init(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
int ret;
|
||||
|
@ -109,6 +134,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
|||
if (!smp_ops.smp_boot_secondary)
|
||||
return -ENOSYS;
|
||||
|
||||
ret = secondary_biglittle_prepare(cpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* We need to tell the secondary core where to find
|
||||
* its stack and the page tables.
|
||||
|
@ -359,6 +388,8 @@ asmlinkage void secondary_start_kernel(void)
|
|||
struct mm_struct *mm = &init_mm;
|
||||
unsigned int cpu;
|
||||
|
||||
secondary_biglittle_init();
|
||||
|
||||
/*
|
||||
* The identity mapping is uncached (strongly ordered), so
|
||||
* switch away from it before attempting any exclusive accesses.
|
||||
|
|
|
@ -277,6 +277,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
|
|||
int maxevents, int timeout)
|
||||
{
|
||||
struct epoll_event *kbuf;
|
||||
struct oabi_epoll_event e;
|
||||
mm_segment_t fs;
|
||||
long ret, err, i;
|
||||
|
||||
|
@ -295,8 +296,11 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
|
|||
set_fs(fs);
|
||||
err = 0;
|
||||
for (i = 0; i < ret; i++) {
|
||||
__put_user_error(kbuf[i].events, &events->events, err);
|
||||
__put_user_error(kbuf[i].data, &events->data, err);
|
||||
e.events = kbuf[i].events;
|
||||
e.data = kbuf[i].data;
|
||||
err = __copy_to_user(events, &e, sizeof(e));
|
||||
if (err)
|
||||
break;
|
||||
events++;
|
||||
}
|
||||
kfree(kbuf);
|
||||
|
|
|
@ -93,11 +93,7 @@ ENTRY(arm_copy_from_user)
|
|||
#ifdef CONFIG_CPU_SPECTRE
|
||||
get_thread_info r3
|
||||
ldr r3, [r3, #TI_ADDR_LIMIT]
|
||||
adds ip, r1, r2 @ ip=addr+size
|
||||
sub r3, r3, #1 @ addr_limit - 1
|
||||
cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
|
||||
movcs r1, #0 @ addr = NULL
|
||||
csdb
|
||||
uaccess_mask_range_ptr r1, r2, r3, ip
|
||||
#endif
|
||||
|
||||
#include "copy_template.S"
|
||||
|
|
|
@ -94,6 +94,11 @@
|
|||
|
||||
ENTRY(__copy_to_user_std)
|
||||
WEAK(arm_copy_to_user)
|
||||
#ifdef CONFIG_CPU_SPECTRE
|
||||
get_thread_info r3
|
||||
ldr r3, [r3, #TI_ADDR_LIMIT]
|
||||
uaccess_mask_range_ptr r0, r2, r3, ip
|
||||
#endif
|
||||
|
||||
#include "copy_template.S"
|
||||
|
||||
|
@ -108,4 +113,3 @@ ENDPROC(__copy_to_user_std)
|
|||
rsb r0, r0, r2
|
||||
copy_abort_end
|
||||
.popsection
|
||||
|
||||
|
|
|
@ -152,7 +152,8 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
|
|||
n = __copy_to_user_std(to, from, n);
|
||||
uaccess_restore(ua_flags);
|
||||
} else {
|
||||
n = __copy_to_user_memcpy(to, from, n);
|
||||
n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
|
||||
from, n);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
|
|
@ -393,7 +393,11 @@ static int __ref impd1_probe(struct lm_device *dev)
|
|||
sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
|
||||
GFP_KERNEL);
|
||||
chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
|
||||
mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id);
|
||||
mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
|
||||
"lm%x:00700", dev->id);
|
||||
if (!lookup || !chipname || !mmciname)
|
||||
return -ENOMEM;
|
||||
|
||||
lookup->dev_id = mmciname;
|
||||
/*
|
||||
* Offsets on GPIO block 1:
|
||||
|
|
|
@ -50,6 +50,9 @@
|
|||
#define OMAP4_NR_BANKS 4
|
||||
#define OMAP4_NR_IRQS 128
|
||||
|
||||
#define SYS_NIRQ1_EXT_SYS_IRQ_1 7
|
||||
#define SYS_NIRQ2_EXT_SYS_IRQ_2 119
|
||||
|
||||
static void __iomem *wakeupgen_base;
|
||||
static void __iomem *sar_base;
|
||||
static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
|
||||
|
@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d)
|
|||
irq_chip_unmask_parent(d);
|
||||
}
|
||||
|
||||
/*
|
||||
* The sys_nirq pins bypass peripheral modules and are wired directly
|
||||
* to MPUSS wakeupgen. They get automatically inverted for GIC.
|
||||
*/
|
||||
static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type)
|
||||
{
|
||||
bool inverted = false;
|
||||
|
||||
switch (type) {
|
||||
case IRQ_TYPE_LEVEL_LOW:
|
||||
type &= ~IRQ_TYPE_LEVEL_MASK;
|
||||
type |= IRQ_TYPE_LEVEL_HIGH;
|
||||
inverted = true;
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_FALLING:
|
||||
type &= ~IRQ_TYPE_EDGE_BOTH;
|
||||
type |= IRQ_TYPE_EDGE_RISING;
|
||||
inverted = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 &&
|
||||
d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2)
|
||||
pr_warn("wakeupgen: irq%li polarity inverted in dts\n",
|
||||
d->hwirq);
|
||||
|
||||
return irq_chip_set_type_parent(d, type);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
|
||||
|
||||
|
@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = {
|
|||
.irq_mask = wakeupgen_mask,
|
||||
.irq_unmask = wakeupgen_unmask,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_set_type = irq_chip_set_type_parent,
|
||||
.irq_set_type = wakeupgen_irq_set_type,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = irq_chip_set_affinity_parent,
|
||||
|
|
|
@ -274,6 +274,13 @@
|
|||
.endm
|
||||
|
||||
.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
|
||||
/*
|
||||
* If we are building for big.Little with branch predictor hardening,
|
||||
* we need the processor function tables to remain available after boot.
|
||||
*/
|
||||
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
||||
.section ".rodata"
|
||||
#endif
|
||||
.type \name\()_processor_functions, #object
|
||||
.align 2
|
||||
ENTRY(\name\()_processor_functions)
|
||||
|
@ -309,6 +316,9 @@ ENTRY(\name\()_processor_functions)
|
|||
.endif
|
||||
|
||||
.size \name\()_processor_functions, . - \name\()_processor_functions
|
||||
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
||||
.previous
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro define_cache_functions name:req
|
||||
|
|
|
@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
|
|||
case ARM_CPU_PART_CORTEX_A17:
|
||||
case ARM_CPU_PART_CORTEX_A73:
|
||||
case ARM_CPU_PART_CORTEX_A75:
|
||||
if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
|
||||
goto bl_error;
|
||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||
harden_branch_predictor_bpiall;
|
||||
spectre_v2_method = "BPIALL";
|
||||
|
@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
|
|||
|
||||
case ARM_CPU_PART_CORTEX_A15:
|
||||
case ARM_CPU_PART_BRAHMA_B15:
|
||||
if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
|
||||
goto bl_error;
|
||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||
harden_branch_predictor_iciallu;
|
||||
spectre_v2_method = "ICIALLU";
|
||||
|
@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
|
|||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
if ((int)res.a0 != 0)
|
||||
break;
|
||||
if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
|
||||
goto bl_error;
|
||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||
call_hvc_arch_workaround_1;
|
||||
processor.switch_mm = cpu_v7_hvc_switch_mm;
|
||||
cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
|
||||
spectre_v2_method = "hypervisor";
|
||||
break;
|
||||
|
||||
|
@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
|
|||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
if ((int)res.a0 != 0)
|
||||
break;
|
||||
if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
|
||||
goto bl_error;
|
||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||
call_smc_arch_workaround_1;
|
||||
processor.switch_mm = cpu_v7_smc_switch_mm;
|
||||
cpu_do_switch_mm = cpu_v7_smc_switch_mm;
|
||||
spectre_v2_method = "firmware";
|
||||
break;
|
||||
|
||||
|
@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
|
|||
if (spectre_v2_method)
|
||||
pr_info("CPU%u: Spectre v2: using %s workaround\n",
|
||||
smp_processor_id(), spectre_v2_method);
|
||||
return;
|
||||
|
||||
bl_error:
|
||||
pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
|
||||
cpu);
|
||||
}
|
||||
#else
|
||||
static void cpu_v7_spectre_init(void)
|
||||
|
|
|
@ -553,12 +553,11 @@ void vfp_flush_hwstate(struct thread_info *thread)
|
|||
* Save the current VFP state into the provided structures and prepare
|
||||
* for entry into a new function (signal handler).
|
||||
*/
|
||||
int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
|
||||
struct user_vfp_exc __user *ufp_exc)
|
||||
int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
|
||||
struct user_vfp_exc *ufp_exc)
|
||||
{
|
||||
struct thread_info *thread = current_thread_info();
|
||||
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
|
||||
int err = 0;
|
||||
|
||||
/* Ensure that the saved hwstate is up-to-date. */
|
||||
vfp_sync_hwstate(thread);
|
||||
|
@ -567,22 +566,19 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
|
|||
* Copy the floating point registers. There can be unused
|
||||
* registers see asm/hwcap.h for details.
|
||||
*/
|
||||
err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
|
||||
sizeof(hwstate->fpregs));
|
||||
memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs));
|
||||
|
||||
/*
|
||||
* Copy the status and control register.
|
||||
*/
|
||||
__put_user_error(hwstate->fpscr, &ufp->fpscr, err);
|
||||
ufp->fpscr = hwstate->fpscr;
|
||||
|
||||
/*
|
||||
* Copy the exception registers.
|
||||
*/
|
||||
__put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
|
||||
__put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
|
||||
__put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
ufp_exc->fpexc = hwstate->fpexc;
|
||||
ufp_exc->fpinst = hwstate->fpinst;
|
||||
ufp_exc->fpinst2 = hwstate->fpinst2;
|
||||
|
||||
/* Ensure that VFP is disabled. */
|
||||
vfp_flush_hwstate(thread);
|
||||
|
|
|
@ -35,6 +35,12 @@
|
|||
#define _PAGE_SPECIAL _PAGE_SOFT
|
||||
#define _PAGE_TABLE _PAGE_PRESENT
|
||||
|
||||
/*
|
||||
* _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to
|
||||
* distinguish them from swapped out pages
|
||||
*/
|
||||
#define _PAGE_PROT_NONE _PAGE_READ
|
||||
|
||||
#define _PAGE_PFN_SHIFT 10
|
||||
|
||||
/* Set of bits to preserve across pte_modify() */
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
/* Page protection bits */
|
||||
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
|
||||
|
||||
#define PAGE_NONE __pgprot(0)
|
||||
#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
|
||||
#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
|
||||
#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
|
||||
#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
|
||||
|
@ -98,7 +98,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
|||
|
||||
static inline int pmd_present(pmd_t pmd)
|
||||
{
|
||||
return (pmd_val(pmd) & _PAGE_PRESENT);
|
||||
return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
|
||||
}
|
||||
|
||||
static inline int pmd_none(pmd_t pmd)
|
||||
|
@ -178,7 +178,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
|
|||
|
||||
static inline int pte_present(pte_t pte)
|
||||
{
|
||||
return (pte_val(pte) & _PAGE_PRESENT);
|
||||
return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
|
||||
}
|
||||
|
||||
static inline int pte_none(pte_t pte)
|
||||
|
@ -380,7 +380,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
|
|||
*
|
||||
* Format of swap PTE:
|
||||
* bit 0: _PAGE_PRESENT (zero)
|
||||
* bit 1: reserved for future use (zero)
|
||||
* bit 1: _PAGE_PROT_NONE (zero)
|
||||
* bits 2 to 6: swap type
|
||||
* bits 7 to XLEN-1: swap offset
|
||||
*/
|
||||
|
|
|
@ -120,6 +120,6 @@ void do_syscall_trace_exit(struct pt_regs *regs)
|
|||
|
||||
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
|
||||
trace_sys_exit(regs, regs->regs[0]);
|
||||
trace_sys_exit(regs, regs_return_value(regs));
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -2253,6 +2253,19 @@ void perf_check_microcode(void)
|
|||
x86_pmu.check_microcode();
|
||||
}
|
||||
|
||||
static int x86_pmu_check_period(struct perf_event *event, u64 value)
|
||||
{
|
||||
if (x86_pmu.check_period && x86_pmu.check_period(event, value))
|
||||
return -EINVAL;
|
||||
|
||||
if (value && x86_pmu.limit_period) {
|
||||
if (x86_pmu.limit_period(event, value) > value)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pmu pmu = {
|
||||
.pmu_enable = x86_pmu_enable,
|
||||
.pmu_disable = x86_pmu_disable,
|
||||
|
@ -2277,6 +2290,7 @@ static struct pmu pmu = {
|
|||
.event_idx = x86_pmu_event_idx,
|
||||
.sched_task = x86_pmu_sched_task,
|
||||
.task_ctx_size = sizeof(struct x86_perf_task_context),
|
||||
.check_period = x86_pmu_check_period,
|
||||
};
|
||||
|
||||
void arch_perf_update_userpage(struct perf_event *event,
|
||||
|
|
|
@ -3465,6 +3465,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
|
|||
intel_pmu_lbr_sched_task(ctx, sched_in);
|
||||
}
|
||||
|
||||
static int intel_pmu_check_period(struct perf_event *event, u64 value)
|
||||
{
|
||||
return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
|
||||
|
||||
PMU_FORMAT_ATTR(ldlat, "config1:0-15");
|
||||
|
@ -3545,6 +3550,8 @@ static __initconst const struct x86_pmu core_pmu = {
|
|||
.cpu_starting = intel_pmu_cpu_starting,
|
||||
.cpu_dying = intel_pmu_cpu_dying,
|
||||
.cpu_dead = intel_pmu_cpu_dead,
|
||||
|
||||
.check_period = intel_pmu_check_period,
|
||||
};
|
||||
|
||||
static struct attribute *intel_pmu_attrs[];
|
||||
|
@ -3589,6 +3596,8 @@ static __initconst const struct x86_pmu intel_pmu = {
|
|||
|
||||
.guest_get_msrs = intel_guest_get_msrs,
|
||||
.sched_task = intel_pmu_sched_task,
|
||||
|
||||
.check_period = intel_pmu_check_period,
|
||||
};
|
||||
|
||||
static __init void intel_clovertown_quirk(void)
|
||||
|
|
|
@ -644,6 +644,11 @@ struct x86_pmu {
|
|||
* Intel host/guest support (KVM)
|
||||
*/
|
||||
struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
|
||||
|
||||
/*
|
||||
* Check period value for PERF_EVENT_IOC_PERIOD ioctl.
|
||||
*/
|
||||
int (*check_period) (struct perf_event *event, u64 period);
|
||||
};
|
||||
|
||||
struct x86_perf_task_context {
|
||||
|
@ -855,7 +860,7 @@ static inline int amd_pmu_init(void)
|
|||
|
||||
#ifdef CONFIG_CPU_SUP_INTEL
|
||||
|
||||
static inline bool intel_pmu_has_bts(struct perf_event *event)
|
||||
static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
unsigned int hw_event, bts_event;
|
||||
|
@ -866,7 +871,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
|
|||
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
|
||||
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
|
||||
|
||||
return hw_event == bts_event && hwc->sample_period == 1;
|
||||
return hw_event == bts_event && period == 1;
|
||||
}
|
||||
|
||||
static inline bool intel_pmu_has_bts(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
return intel_pmu_has_bts_period(event, hwc->sample_period);
|
||||
}
|
||||
|
||||
int intel_pmu_save_and_restart(struct perf_event *event);
|
||||
|
|
|
@ -51,7 +51,7 @@ static unsigned long get_dr(int n)
|
|||
/*
|
||||
* fill in the user structure for a core dump..
|
||||
*/
|
||||
static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
|
||||
static void fill_dump(struct pt_regs *regs, struct user32 *dump)
|
||||
{
|
||||
u32 fs, gs;
|
||||
memset(dump, 0, sizeof(*dump));
|
||||
|
@ -157,10 +157,12 @@ static int aout_core_dump(struct coredump_params *cprm)
|
|||
fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
has_dumped = 1;
|
||||
|
||||
fill_dump(cprm->regs, &dump);
|
||||
|
||||
strncpy(dump.u_comm, current->comm, sizeof(current->comm));
|
||||
dump.u_ar0 = offsetof(struct user32, regs);
|
||||
dump.signal = cprm->siginfo->si_signo;
|
||||
dump_thread32(cprm->regs, &dump);
|
||||
|
||||
/*
|
||||
* If the size of the dump file exceeds the rlimit, then see
|
||||
|
|
|
@ -48,7 +48,8 @@ enum {
|
|||
BIOS_STATUS_SUCCESS = 0,
|
||||
BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
|
||||
BIOS_STATUS_EINVAL = -EINVAL,
|
||||
BIOS_STATUS_UNAVAIL = -EBUSY
|
||||
BIOS_STATUS_UNAVAIL = -EBUSY,
|
||||
BIOS_STATUS_ABORT = -EINTR,
|
||||
};
|
||||
|
||||
/* Address map parameters */
|
||||
|
@ -167,4 +168,9 @@ extern long system_serial_number;
|
|||
|
||||
extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
|
||||
|
||||
/*
|
||||
* EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
|
||||
*/
|
||||
extern struct semaphore __efi_uv_runtime_lock;
|
||||
|
||||
#endif /* _ASM_X86_UV_BIOS_H */
|
||||
|
|
|
@ -6256,6 +6256,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
|||
int asid, ret;
|
||||
|
||||
ret = -EBUSY;
|
||||
if (unlikely(sev->active))
|
||||
return ret;
|
||||
|
||||
asid = sev_asid_new();
|
||||
if (asid < 0)
|
||||
return ret;
|
||||
|
|
|
@ -2757,7 +2757,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
|
|||
if (!entry_only)
|
||||
j = find_msr(&m->host, msr);
|
||||
|
||||
if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
|
||||
if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) ||
|
||||
(j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) {
|
||||
printk_once(KERN_WARNING "Not enough msr switch entries. "
|
||||
"Can't add msr %x\n", msr);
|
||||
return;
|
||||
|
@ -3601,9 +3602,11 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
|
|||
* secondary cpu-based controls. Do not include those that
|
||||
* depend on CPUID bits, they are added later by vmx_cpuid_update.
|
||||
*/
|
||||
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
|
||||
msrs->secondary_ctls_low,
|
||||
msrs->secondary_ctls_high);
|
||||
if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
|
||||
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
|
||||
msrs->secondary_ctls_low,
|
||||
msrs->secondary_ctls_high);
|
||||
|
||||
msrs->secondary_ctls_low = 0;
|
||||
msrs->secondary_ctls_high &=
|
||||
SECONDARY_EXEC_DESC |
|
||||
|
|
|
@ -29,7 +29,8 @@
|
|||
|
||||
struct uv_systab *uv_systab;
|
||||
|
||||
s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
|
||||
static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
|
||||
u64 a4, u64 a5)
|
||||
{
|
||||
struct uv_systab *tab = uv_systab;
|
||||
s64 ret;
|
||||
|
@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
|
||||
{
|
||||
s64 ret;
|
||||
|
||||
if (down_interruptible(&__efi_uv_runtime_lock))
|
||||
return BIOS_STATUS_ABORT;
|
||||
|
||||
ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
|
||||
up(&__efi_uv_runtime_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(uv_bios_call);
|
||||
|
||||
s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
|
||||
|
@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
|
|||
unsigned long bios_flags;
|
||||
s64 ret;
|
||||
|
||||
if (down_interruptible(&__efi_uv_runtime_lock))
|
||||
return BIOS_STATUS_ABORT;
|
||||
|
||||
local_irq_save(bios_flags);
|
||||
ret = uv_bios_call(which, a1, a2, a3, a4, a5);
|
||||
ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
|
||||
local_irq_restore(bios_flags);
|
||||
|
||||
up(&__efi_uv_runtime_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -424,7 +424,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
|
|||
blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
|
||||
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
|
||||
|
||||
blk_mq_run_hw_queue(hctx, true);
|
||||
blk_mq_sched_restart(hctx);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -147,9 +147,9 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
|
|||
{
|
||||
struct acpi_srat_mem_affinity *p =
|
||||
(struct acpi_srat_mem_affinity *)header;
|
||||
pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
|
||||
(unsigned long)p->base_address,
|
||||
(unsigned long)p->length,
|
||||
pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n",
|
||||
(unsigned long long)p->base_address,
|
||||
(unsigned long long)p->length,
|
||||
p->proximity_domain,
|
||||
(p->flags & ACPI_SRAT_MEM_ENABLED) ?
|
||||
"enabled" : "disabled",
|
||||
|
|
|
@ -1542,17 +1542,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
|
|||
{
|
||||
unsigned int ret_freq = 0;
|
||||
|
||||
if (!cpufreq_driver->get)
|
||||
if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
|
||||
return ret_freq;
|
||||
|
||||
ret_freq = cpufreq_driver->get(policy->cpu);
|
||||
|
||||
/*
|
||||
* Updating inactive policies is invalid, so avoid doing that. Also
|
||||
* if fast frequency switching is used with the given policy, the check
|
||||
* If fast frequency switching is used with the given policy, the check
|
||||
* against policy->cur is pointless, so skip it in that case too.
|
||||
*/
|
||||
if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
|
||||
if (policy->fast_switch_enabled)
|
||||
return ret_freq;
|
||||
|
||||
if (ret_freq && policy->cur &&
|
||||
|
@ -1581,10 +1580,7 @@ unsigned int cpufreq_get(unsigned int cpu)
|
|||
|
||||
if (policy) {
|
||||
down_read(&policy->rwsem);
|
||||
|
||||
if (!policy_is_inactive(policy))
|
||||
ret_freq = __cpufreq_get(policy);
|
||||
|
||||
ret_freq = __cpufreq_get(policy);
|
||||
up_read(&policy->rwsem);
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
|
|
|
@ -172,6 +172,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
|
|||
*/
|
||||
static DEFINE_SEMAPHORE(efi_runtime_lock);
|
||||
|
||||
/*
|
||||
* Expose the EFI runtime lock to the UV platform
|
||||
*/
|
||||
#ifdef CONFIG_X86_UV
|
||||
extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Calls the appropriate efi_runtime_service() with the appropriate
|
||||
* arguments.
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
#include <linux/gpio/driver.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
|
@ -550,33 +551,38 @@ static void mxc_gpio_restore_regs(struct mxc_gpio_port *port)
|
|||
writel(port->gpio_saved_reg.dr, port->base + GPIO_DR);
|
||||
}
|
||||
|
||||
static int __maybe_unused mxc_gpio_noirq_suspend(struct device *dev)
|
||||
static int mxc_gpio_syscore_suspend(void)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct mxc_gpio_port *port = platform_get_drvdata(pdev);
|
||||
struct mxc_gpio_port *port;
|
||||
|
||||
mxc_gpio_save_regs(port);
|
||||
clk_disable_unprepare(port->clk);
|
||||
/* walk through all ports */
|
||||
list_for_each_entry(port, &mxc_gpio_ports, node) {
|
||||
mxc_gpio_save_regs(port);
|
||||
clk_disable_unprepare(port->clk);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused mxc_gpio_noirq_resume(struct device *dev)
|
||||
static void mxc_gpio_syscore_resume(void)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct mxc_gpio_port *port = platform_get_drvdata(pdev);
|
||||
struct mxc_gpio_port *port;
|
||||
int ret;
|
||||
|
||||
ret = clk_prepare_enable(port->clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
mxc_gpio_restore_regs(port);
|
||||
|
||||
return 0;
|
||||
/* walk through all ports */
|
||||
list_for_each_entry(port, &mxc_gpio_ports, node) {
|
||||
ret = clk_prepare_enable(port->clk);
|
||||
if (ret) {
|
||||
pr_err("mxc: failed to enable gpio clock %d\n", ret);
|
||||
return;
|
||||
}
|
||||
mxc_gpio_restore_regs(port);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops mxc_gpio_dev_pm_ops = {
|
||||
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mxc_gpio_noirq_suspend, mxc_gpio_noirq_resume)
|
||||
static struct syscore_ops mxc_gpio_syscore_ops = {
|
||||
.suspend = mxc_gpio_syscore_suspend,
|
||||
.resume = mxc_gpio_syscore_resume,
|
||||
};
|
||||
|
||||
static struct platform_driver mxc_gpio_driver = {
|
||||
|
@ -584,7 +590,6 @@ static struct platform_driver mxc_gpio_driver = {
|
|||
.name = "gpio-mxc",
|
||||
.of_match_table = mxc_gpio_dt_ids,
|
||||
.suppress_bind_attrs = true,
|
||||
.pm = &mxc_gpio_dev_pm_ops,
|
||||
},
|
||||
.probe = mxc_gpio_probe,
|
||||
.id_table = mxc_gpio_devtype,
|
||||
|
@ -592,6 +597,8 @@ static struct platform_driver mxc_gpio_driver = {
|
|||
|
||||
static int __init gpio_mxc_init(void)
|
||||
{
|
||||
register_syscore_ops(&mxc_gpio_syscore_ops);
|
||||
|
||||
return platform_driver_register(&mxc_gpio_driver);
|
||||
}
|
||||
subsys_initcall(gpio_mxc_init);
|
||||
|
|
|
@ -1653,8 +1653,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
|||
|
||||
amdgpu_amdkfd_device_init(adev);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
amdgpu_virt_init_data_exchange(adev);
|
||||
amdgpu_virt_release_full_gpu(adev, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2555,9 +2557,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
goto failed;
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_init_data_exchange(adev);
|
||||
|
||||
amdgpu_fbdev_init(adev);
|
||||
|
||||
r = amdgpu_pm_sysfs_init(adev);
|
||||
|
@ -3269,6 +3268,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
|||
r = amdgpu_ib_ring_tests(adev);
|
||||
|
||||
error:
|
||||
amdgpu_virt_init_data_exchange(adev);
|
||||
amdgpu_virt_release_full_gpu(adev, true);
|
||||
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
|
||||
atomic_inc(&adev->vram_lost_counter);
|
||||
|
|
|
@ -174,7 +174,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
|
|||
return r;
|
||||
}
|
||||
/* Retrieve checksum from mailbox2 */
|
||||
if (req == IDH_REQ_GPU_INIT_ACCESS) {
|
||||
if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
|
||||
adev->virt.fw_reserve.checksum_key =
|
||||
RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
|
||||
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
|
||||
|
|
|
@ -71,7 +71,6 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
|
|||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
|
||||
|
@ -89,6 +88,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
|
|||
static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
|
||||
};
|
||||
|
@ -96,6 +96,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
|
|||
static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
|
||||
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
|
||||
};
|
||||
|
|
|
@ -98,6 +98,8 @@
|
|||
#define DP0_STARTVAL 0x064c
|
||||
#define DP0_ACTIVEVAL 0x0650
|
||||
#define DP0_SYNCVAL 0x0654
|
||||
#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15)
|
||||
#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31)
|
||||
#define DP0_MISC 0x0658
|
||||
#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
|
||||
#define BPC_6 (0 << 5)
|
||||
|
@ -142,6 +144,8 @@
|
|||
#define DP0_LTLOOPCTRL 0x06d8
|
||||
#define DP0_SNKLTCTRL 0x06e4
|
||||
|
||||
#define DP1_SRCCTRL 0x07a0
|
||||
|
||||
/* PHY */
|
||||
#define DP_PHY_CTRL 0x0800
|
||||
#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
|
||||
|
@ -150,6 +154,7 @@
|
|||
#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
|
||||
#define PHY_RDY BIT(16) /* PHY Main Channels Ready */
|
||||
#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
|
||||
#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */
|
||||
#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
|
||||
#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
|
||||
|
||||
|
@ -540,6 +545,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
|
|||
unsigned long rate;
|
||||
u32 value;
|
||||
int ret;
|
||||
u32 dp_phy_ctrl;
|
||||
|
||||
rate = clk_get_rate(tc->refclk);
|
||||
switch (rate) {
|
||||
|
@ -564,7 +570,10 @@ static int tc_aux_link_setup(struct tc_data *tc)
|
|||
value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
|
||||
tc_write(SYS_PLLPARAM, value);
|
||||
|
||||
tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
|
||||
dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN;
|
||||
if (tc->link.base.num_lanes == 2)
|
||||
dp_phy_ctrl |= PHY_2LANE;
|
||||
tc_write(DP_PHY_CTRL, dp_phy_ctrl);
|
||||
|
||||
/*
|
||||
* Initially PLLs are in bypass. Force PLL parameter update,
|
||||
|
@ -719,7 +728,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
|
|||
|
||||
tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
|
||||
|
||||
tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
|
||||
tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
|
||||
((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
|
||||
((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
|
||||
|
||||
tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
|
||||
DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
|
||||
|
@ -829,12 +840,11 @@ static int tc_main_link_setup(struct tc_data *tc)
|
|||
if (!tc->mode)
|
||||
return -EINVAL;
|
||||
|
||||
/* from excel file - DP0_SrcCtrl */
|
||||
tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
|
||||
DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
|
||||
DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
|
||||
/* from excel file - DP1_SrcCtrl */
|
||||
tc_write(0x07a0, 0x00003083);
|
||||
tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
|
||||
/* SSCG and BW27 on DP1 must be set to the same as on DP0 */
|
||||
tc_write(DP1_SRCCTRL,
|
||||
(tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
|
||||
((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
|
||||
|
||||
rate = clk_get_rate(tc->refclk);
|
||||
switch (rate) {
|
||||
|
@ -855,8 +865,11 @@ static int tc_main_link_setup(struct tc_data *tc)
|
|||
}
|
||||
value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
|
||||
tc_write(SYS_PLLPARAM, value);
|
||||
|
||||
/* Setup Main Link */
|
||||
dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN;
|
||||
dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
|
||||
if (tc->link.base.num_lanes == 2)
|
||||
dp_phy_ctrl |= PHY_2LANE;
|
||||
tc_write(DP_PHY_CTRL, dp_phy_ctrl);
|
||||
msleep(100);
|
||||
|
||||
|
@ -1105,10 +1118,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
|
|||
static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct tc_data *tc = connector_to_tc(connector);
|
||||
u32 req, avail;
|
||||
u32 bits_per_pixel = 24;
|
||||
|
||||
/* DPI interface clock limitation: upto 154 MHz */
|
||||
if (mode->clock > 154000)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
req = mode->clock * bits_per_pixel / 8;
|
||||
avail = tc->link.base.num_lanes * tc->link.base.rate;
|
||||
|
||||
if (req > avail)
|
||||
return MODE_BAD;
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
|
@ -1195,6 +1218,10 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
|
|||
|
||||
drm_display_info_set_bus_formats(&tc->connector.display_info,
|
||||
&bus_format, 1);
|
||||
tc->connector.display_info.bus_flags =
|
||||
DRM_BUS_FLAG_DE_HIGH |
|
||||
DRM_BUS_FLAG_PIXDATA_NEGEDGE |
|
||||
DRM_BUS_FLAG_SYNC_NEGEDGE;
|
||||
drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -521,7 +521,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
|
|||
|
||||
object_count = cl->object_count;
|
||||
|
||||
object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32));
|
||||
object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
|
||||
array_size(object_count, sizeof(__u32)));
|
||||
if (IS_ERR(object_ids))
|
||||
return PTR_ERR(object_ids);
|
||||
|
||||
|
|
|
@ -1821,6 +1821,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
__vma_matches(struct vm_area_struct *vma, struct file *filp,
|
||||
unsigned long addr, unsigned long size)
|
||||
{
|
||||
if (vma->vm_file != filp)
|
||||
return false;
|
||||
|
||||
return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
|
||||
* it is mapped to.
|
||||
|
@ -1879,7 +1889,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
|||
return -EINTR;
|
||||
}
|
||||
vma = find_vma(mm, addr);
|
||||
if (vma)
|
||||
if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
|
||||
vma->vm_page_prot =
|
||||
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
||||
else
|
||||
|
|
|
@ -209,6 +209,16 @@ struct intel_fbdev {
|
|||
unsigned long vma_flags;
|
||||
async_cookie_t cookie;
|
||||
int preferred_bpp;
|
||||
|
||||
/* Whether or not fbdev hpd processing is temporarily suspended */
|
||||
bool hpd_suspended : 1;
|
||||
/* Set when a hotplug was received while HPD processing was
|
||||
* suspended
|
||||
*/
|
||||
bool hpd_waiting : 1;
|
||||
|
||||
/* Protects hpd_suspended */
|
||||
struct mutex hpd_lock;
|
||||
};
|
||||
|
||||
struct intel_encoder {
|
||||
|
|
|
@ -677,6 +677,7 @@ int intel_fbdev_init(struct drm_device *dev)
|
|||
if (ifbdev == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&ifbdev->hpd_lock);
|
||||
drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
|
||||
|
||||
if (!intel_fbdev_init_bios(dev, ifbdev))
|
||||
|
@ -750,6 +751,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
|
|||
intel_fbdev_destroy(ifbdev);
|
||||
}
|
||||
|
||||
/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
|
||||
* processing, fbdev will perform a full connector reprobe if a hotplug event
|
||||
* was received while HPD was suspended.
|
||||
*/
|
||||
static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
|
||||
{
|
||||
bool send_hpd = false;
|
||||
|
||||
mutex_lock(&ifbdev->hpd_lock);
|
||||
ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
|
||||
send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
|
||||
ifbdev->hpd_waiting = false;
|
||||
mutex_unlock(&ifbdev->hpd_lock);
|
||||
|
||||
if (send_hpd) {
|
||||
DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
|
||||
drm_fb_helper_hotplug_event(&ifbdev->helper);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
@ -771,6 +792,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
|
|||
*/
|
||||
if (state != FBINFO_STATE_RUNNING)
|
||||
flush_work(&dev_priv->fbdev_suspend_work);
|
||||
|
||||
console_lock();
|
||||
} else {
|
||||
/*
|
||||
|
@ -798,17 +820,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
|
|||
|
||||
drm_fb_helper_set_suspend(&ifbdev->helper, state);
|
||||
console_unlock();
|
||||
|
||||
intel_fbdev_hpd_set_suspend(ifbdev, state);
|
||||
}
|
||||
|
||||
void intel_fbdev_output_poll_changed(struct drm_device *dev)
|
||||
{
|
||||
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
|
||||
bool send_hpd;
|
||||
|
||||
if (!ifbdev)
|
||||
return;
|
||||
|
||||
intel_fbdev_sync(ifbdev);
|
||||
if (ifbdev->vma || ifbdev->helper.deferred_setup)
|
||||
|
||||
mutex_lock(&ifbdev->hpd_lock);
|
||||
send_hpd = !ifbdev->hpd_suspended;
|
||||
ifbdev->hpd_waiting = true;
|
||||
mutex_unlock(&ifbdev->hpd_lock);
|
||||
|
||||
if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
|
||||
drm_fb_helper_hotplug_event(&ifbdev->helper);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <engine/falcon.h>
|
||||
|
||||
#include <core/gpuobj.h>
|
||||
#include <subdev/mc.h>
|
||||
#include <subdev/timer.h>
|
||||
#include <engine/fifo.h>
|
||||
|
||||
|
@ -107,8 +108,10 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
|
|||
}
|
||||
}
|
||||
|
||||
nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
|
||||
nvkm_wr32(device, base + 0x014, 0xffffffff);
|
||||
if (nvkm_mc_enabled(device, engine->subdev.index)) {
|
||||
nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
|
||||
nvkm_wr32(device, base + 0x014, 0xffffffff);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -132,11 +132,12 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
|
|||
duty = nvkm_therm_update_linear(therm);
|
||||
break;
|
||||
case NVBIOS_THERM_FAN_OTHER:
|
||||
if (therm->cstate)
|
||||
if (therm->cstate) {
|
||||
duty = therm->cstate;
|
||||
else
|
||||
poll = false;
|
||||
} else {
|
||||
duty = nvkm_therm_update_linear_fallback(therm);
|
||||
poll = false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
immd = false;
|
||||
|
|
|
@ -1,10 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
#include "vkms_drv.h"
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
|
|
|
@ -1,9 +1,4 @@
|
|||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <drm/drm_gem.h>
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
|
||||
#ifndef _VKMS_DRV_H_
|
||||
#define _VKMS_DRV_H_
|
||||
|
||||
|
|
|
@ -1,10 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
#include <linux/shmem_fs.h>
|
||||
|
||||
|
|
|
@ -1,10 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
#include "vkms_drv.h"
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
|
|
|
@ -1,10 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
#include "vkms_drv.h"
|
||||
#include <drm/drm_plane_helper.h>
|
||||
|
|
|
@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150)
|
|||
idev->close = bma150_irq_close;
|
||||
input_set_drvdata(idev, bma150);
|
||||
|
||||
bma150->input = idev;
|
||||
|
||||
error = input_register_device(idev);
|
||||
if (error) {
|
||||
input_free_device(idev);
|
||||
return error;
|
||||
}
|
||||
|
||||
bma150->input = idev;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
|
|||
|
||||
bma150_init_input_device(bma150, ipoll_dev->input);
|
||||
|
||||
bma150->input_polled = ipoll_dev;
|
||||
bma150->input = ipoll_dev->input;
|
||||
|
||||
error = input_register_polled_device(ipoll_dev);
|
||||
if (error) {
|
||||
input_free_polled_device(ipoll_dev);
|
||||
return error;
|
||||
}
|
||||
|
||||
bma150->input_polled = ipoll_dev;
|
||||
bma150->input = ipoll_dev->input;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1336,7 +1336,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
|
|||
static const struct acpi_device_id elan_acpi_id[] = {
|
||||
{ "ELAN0000", 0 },
|
||||
{ "ELAN0100", 0 },
|
||||
{ "ELAN0501", 0 },
|
||||
{ "ELAN0600", 0 },
|
||||
{ "ELAN0602", 0 },
|
||||
{ "ELAN0605", 0 },
|
||||
|
@ -1346,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
|
|||
{ "ELAN060C", 0 },
|
||||
{ "ELAN0611", 0 },
|
||||
{ "ELAN0612", 0 },
|
||||
{ "ELAN0617", 0 },
|
||||
{ "ELAN0618", 0 },
|
||||
{ "ELAN061C", 0 },
|
||||
{ "ELAN061D", 0 },
|
||||
|
|
|
@ -1119,6 +1119,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
|
|||
* Asus UX31 0x361f00 20, 15, 0e clickpad
|
||||
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
|
||||
* Avatar AVIU-145A2 0x361f00 ? clickpad
|
||||
* Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**)
|
||||
* Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**)
|
||||
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
|
||||
* Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
|
||||
* Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
|
||||
|
@ -1171,6 +1173,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Fujitsu H780 also has a middle button */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"),
|
||||
},
|
||||
},
|
||||
#endif
|
||||
{ }
|
||||
};
|
||||
|
|
|
@ -932,7 +932,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
|
|||
if (IS_ERR(bip))
|
||||
return PTR_ERR(bip);
|
||||
|
||||
tag_len = io->cc->on_disk_tag_size * bio_sectors(bio);
|
||||
tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
|
||||
|
||||
bip->bip_iter.bi_size = tag_len;
|
||||
bip->bip_iter.bi_sector = io->cc->start + io->sector;
|
||||
|
|
|
@ -257,6 +257,7 @@ struct pool {
|
|||
|
||||
spinlock_t lock;
|
||||
struct bio_list deferred_flush_bios;
|
||||
struct bio_list deferred_flush_completions;
|
||||
struct list_head prepared_mappings;
|
||||
struct list_head prepared_discards;
|
||||
struct list_head prepared_discards_pt2;
|
||||
|
@ -956,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
|
|||
mempool_free(m, &m->tc->pool->mapping_pool);
|
||||
}
|
||||
|
||||
static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
|
||||
{
|
||||
struct pool *pool = tc->pool;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* If the bio has the REQ_FUA flag set we must commit the metadata
|
||||
* before signaling its completion.
|
||||
*/
|
||||
if (!bio_triggers_commit(tc, bio)) {
|
||||
bio_endio(bio);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Complete bio with an error if earlier I/O caused changes to the
|
||||
* metadata that can't be committed, e.g, due to I/O errors on the
|
||||
* metadata device.
|
||||
*/
|
||||
if (dm_thin_aborted_changes(tc->td)) {
|
||||
bio_io_error(bio);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Batch together any bios that trigger commits and then issue a
|
||||
* single commit for them in process_deferred_bios().
|
||||
*/
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
bio_list_add(&pool->deferred_flush_completions, bio);
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
}
|
||||
|
||||
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
|
||||
{
|
||||
struct thin_c *tc = m->tc;
|
||||
|
@ -988,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
|
|||
*/
|
||||
if (bio) {
|
||||
inc_remap_and_issue_cell(tc, m->cell, m->data_block);
|
||||
bio_endio(bio);
|
||||
complete_overwrite_bio(tc, bio);
|
||||
} else {
|
||||
inc_all_io_entry(tc->pool, m->cell->holder);
|
||||
remap_and_issue(tc, m->cell->holder, m->data_block);
|
||||
|
@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool)
|
|||
{
|
||||
unsigned long flags;
|
||||
struct bio *bio;
|
||||
struct bio_list bios;
|
||||
struct bio_list bios, bio_completions;
|
||||
struct thin_c *tc;
|
||||
|
||||
tc = get_first_thin(pool);
|
||||
|
@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool)
|
|||
}
|
||||
|
||||
/*
|
||||
* If there are any deferred flush bios, we must commit
|
||||
* the metadata before issuing them.
|
||||
* If there are any deferred flush bios, we must commit the metadata
|
||||
* before issuing them or signaling their completion.
|
||||
*/
|
||||
bio_list_init(&bios);
|
||||
bio_list_init(&bio_completions);
|
||||
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
bio_list_merge(&bios, &pool->deferred_flush_bios);
|
||||
bio_list_init(&pool->deferred_flush_bios);
|
||||
|
||||
bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
|
||||
bio_list_init(&pool->deferred_flush_completions);
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
|
||||
if (bio_list_empty(&bios) &&
|
||||
if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
|
||||
!(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
|
||||
return;
|
||||
|
||||
if (commit(pool)) {
|
||||
bio_list_merge(&bios, &bio_completions);
|
||||
|
||||
while ((bio = bio_list_pop(&bios)))
|
||||
bio_io_error(bio);
|
||||
return;
|
||||
}
|
||||
pool->last_commit_jiffies = jiffies;
|
||||
|
||||
while ((bio = bio_list_pop(&bio_completions)))
|
||||
bio_endio(bio);
|
||||
|
||||
while ((bio = bio_list_pop(&bios)))
|
||||
generic_make_request(bio);
|
||||
}
|
||||
|
@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
|
|||
INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
|
||||
spin_lock_init(&pool->lock);
|
||||
bio_list_init(&pool->deferred_flush_bios);
|
||||
bio_list_init(&pool->deferred_flush_completions);
|
||||
INIT_LIST_HEAD(&pool->prepared_mappings);
|
||||
INIT_LIST_HEAD(&pool->prepared_discards);
|
||||
INIT_LIST_HEAD(&pool->prepared_discards_pt2);
|
||||
|
|
|
@ -1863,6 +1863,20 @@ static void end_sync_read(struct bio *bio)
|
|||
reschedule_retry(r1_bio);
|
||||
}
|
||||
|
||||
static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
|
||||
{
|
||||
sector_t sync_blocks = 0;
|
||||
sector_t s = r1_bio->sector;
|
||||
long sectors_to_go = r1_bio->sectors;
|
||||
|
||||
/* make sure these bits don't get cleared. */
|
||||
do {
|
||||
md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
|
||||
s += sync_blocks;
|
||||
sectors_to_go -= sync_blocks;
|
||||
} while (sectors_to_go > 0);
|
||||
}
|
||||
|
||||
static void end_sync_write(struct bio *bio)
|
||||
{
|
||||
int uptodate = !bio->bi_status;
|
||||
|
@ -1874,15 +1888,7 @@ static void end_sync_write(struct bio *bio)
|
|||
struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
|
||||
|
||||
if (!uptodate) {
|
||||
sector_t sync_blocks = 0;
|
||||
sector_t s = r1_bio->sector;
|
||||
long sectors_to_go = r1_bio->sectors;
|
||||
/* make sure these bits doesn't get cleared. */
|
||||
do {
|
||||
md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
|
||||
s += sync_blocks;
|
||||
sectors_to_go -= sync_blocks;
|
||||
} while (sectors_to_go > 0);
|
||||
abort_sync_write(mddev, r1_bio);
|
||||
set_bit(WriteErrorSeen, &rdev->flags);
|
||||
if (!test_and_set_bit(WantReplacement, &rdev->flags))
|
||||
set_bit(MD_RECOVERY_NEEDED, &
|
||||
|
@ -2172,8 +2178,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
|
|||
(i == r1_bio->read_disk ||
|
||||
!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
|
||||
continue;
|
||||
if (test_bit(Faulty, &conf->mirrors[i].rdev->flags))
|
||||
if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
|
||||
abort_sync_write(mddev, r1_bio);
|
||||
continue;
|
||||
}
|
||||
|
||||
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
|
||||
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
|
||||
|
|
|
@ -13,7 +13,7 @@ config EEPROM_AT24
|
|||
ones like at24c64, 24lc02 or fm24c04:
|
||||
|
||||
24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
|
||||
24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
|
||||
24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024, 24c2048
|
||||
|
||||
Unless you like data loss puzzles, always be sure that any chip
|
||||
you configure as a 24c32 (32 kbit) or larger is NOT really a
|
||||
|
|
|
@ -173,6 +173,7 @@ AT24_CHIP_DATA(at24_data_24c128, 131072 / 8, AT24_FLAG_ADDR16);
|
|||
AT24_CHIP_DATA(at24_data_24c256, 262144 / 8, AT24_FLAG_ADDR16);
|
||||
AT24_CHIP_DATA(at24_data_24c512, 524288 / 8, AT24_FLAG_ADDR16);
|
||||
AT24_CHIP_DATA(at24_data_24c1024, 1048576 / 8, AT24_FLAG_ADDR16);
|
||||
AT24_CHIP_DATA(at24_data_24c2048, 2097152 / 8, AT24_FLAG_ADDR16);
|
||||
/* identical to 24c08 ? */
|
||||
AT24_CHIP_DATA(at24_data_INT3499, 8192 / 8, 0);
|
||||
|
||||
|
@ -199,6 +200,7 @@ static const struct i2c_device_id at24_ids[] = {
|
|||
{ "24c256", (kernel_ulong_t)&at24_data_24c256 },
|
||||
{ "24c512", (kernel_ulong_t)&at24_data_24c512 },
|
||||
{ "24c1024", (kernel_ulong_t)&at24_data_24c1024 },
|
||||
{ "24c2048", (kernel_ulong_t)&at24_data_24c2048 },
|
||||
{ "at24", 0 },
|
||||
{ /* END OF LIST */ }
|
||||
};
|
||||
|
@ -227,6 +229,7 @@ static const struct of_device_id at24_of_match[] = {
|
|||
{ .compatible = "atmel,24c256", .data = &at24_data_24c256 },
|
||||
{ .compatible = "atmel,24c512", .data = &at24_data_24c512 },
|
||||
{ .compatible = "atmel,24c1024", .data = &at24_data_24c1024 },
|
||||
{ .compatible = "atmel,24c2048", .data = &at24_data_24c2048 },
|
||||
{ /* END OF LIST */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, at24_of_match);
|
||||
|
|
|
@ -2158,7 +2158,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
|
|||
if (waiting)
|
||||
wake_up(&mq->wait);
|
||||
else
|
||||
kblockd_schedule_work(&mq->complete_work);
|
||||
queue_work(mq->card->complete_wq, &mq->complete_work);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -2972,6 +2972,13 @@ static int mmc_blk_probe(struct mmc_card *card)
|
|||
|
||||
mmc_fixup_device(card, mmc_blk_fixups);
|
||||
|
||||
card->complete_wq = alloc_workqueue("mmc_complete",
|
||||
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
|
||||
if (unlikely(!card->complete_wq)) {
|
||||
pr_err("Failed to create mmc completion workqueue");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
md = mmc_blk_alloc(card);
|
||||
if (IS_ERR(md))
|
||||
return PTR_ERR(md);
|
||||
|
@ -3042,6 +3049,7 @@ static void mmc_blk_remove(struct mmc_card *card)
|
|||
#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
|
||||
mmc_set_bus_resume_policy(card->host, 0);
|
||||
#endif
|
||||
destroy_workqueue(card->complete_wq);
|
||||
}
|
||||
|
||||
static int _mmc_blk_suspend(struct mmc_card *card)
|
||||
|
|
|
@ -1394,6 +1394,21 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
goto error_free_dma;
|
||||
|
||||
/*
|
||||
* If we don't support delay chains in the SoC, we can't use any
|
||||
* of the higher speed modes. Mask them out in case the device
|
||||
* tree specifies the properties for them, which gets added to
|
||||
* the caps by mmc_of_parse() above.
|
||||
*/
|
||||
if (!(host->cfg->clk_delays || host->use_new_timings)) {
|
||||
mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR |
|
||||
MMC_CAP_1_2V_DDR | MMC_CAP_UHS);
|
||||
mmc->caps2 &= ~MMC_CAP2_HS200;
|
||||
}
|
||||
|
||||
/* TODO: This driver doesn't support HS400 mode yet */
|
||||
mmc->caps2 &= ~MMC_CAP2_HS400;
|
||||
|
||||
ret = sunxi_mmc_init_host(host);
|
||||
if (ret)
|
||||
goto error_free_dma;
|
||||
|
|
|
@ -2095,7 +2095,7 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
|
|||
|
||||
/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
|
||||
off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
|
||||
"nqn.2014.08.org.nvmexpress:%4x%4x",
|
||||
"nqn.2014.08.org.nvmexpress:%04x%04x",
|
||||
le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
|
||||
memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
|
||||
off += sizeof(id->sn);
|
||||
|
|
|
@ -556,6 +556,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
|
|||
return 0;
|
||||
out_free_ana_log_buf:
|
||||
kfree(ctrl->ana_log_buf);
|
||||
ctrl->ana_log_buf = NULL;
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
@ -563,5 +564,6 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
|
|||
void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
kfree(ctrl->ana_log_buf);
|
||||
ctrl->ana_log_buf = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -908,9 +908,11 @@ static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
|
|||
|
||||
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
|
||||
{
|
||||
if (++nvmeq->cq_head == nvmeq->q_depth) {
|
||||
if (nvmeq->cq_head == nvmeq->q_depth - 1) {
|
||||
nvmeq->cq_head = 0;
|
||||
nvmeq->cq_phase = !nvmeq->cq_phase;
|
||||
} else {
|
||||
nvmeq->cq_head++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1727,8 +1729,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
|
|||
struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
|
||||
size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
|
||||
|
||||
dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i],
|
||||
le64_to_cpu(desc->addr));
|
||||
dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
|
||||
le64_to_cpu(desc->addr),
|
||||
DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
|
||||
}
|
||||
|
||||
kfree(dev->host_mem_desc_bufs);
|
||||
|
@ -1794,8 +1797,9 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
|
|||
while (--i >= 0) {
|
||||
size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
|
||||
|
||||
dma_free_coherent(dev->dev, size, bufs[i],
|
||||
le64_to_cpu(descs[i].addr));
|
||||
dma_free_attrs(dev->dev, size, bufs[i],
|
||||
le64_to_cpu(descs[i].addr),
|
||||
DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
|
||||
}
|
||||
|
||||
kfree(bufs);
|
||||
|
|
|
@ -249,7 +249,8 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr)
|
|||
static inline int ap_test_config_card_id(unsigned int id)
|
||||
{
|
||||
if (!ap_configuration) /* QCI not supported */
|
||||
return 1;
|
||||
/* only ids 0...3F may be probed */
|
||||
return id < 0x40 ? 1 : 0;
|
||||
return ap_test_config(ap_configuration->apm, id);
|
||||
}
|
||||
|
||||
|
|
|
@ -2881,9 +2881,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
|
|||
if (rot == 1) {
|
||||
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
|
||||
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
|
||||
} else {
|
||||
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
|
||||
blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
|
||||
}
|
||||
|
||||
if (sdkp->device->type == TYPE_ZBC) {
|
||||
|
@ -3020,6 +3017,15 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
|||
if (sdkp->media_present) {
|
||||
sd_read_capacity(sdkp, buffer);
|
||||
|
||||
/*
|
||||
* set the default to rotational. All non-rotational devices
|
||||
* support the block characteristics VPD page, which will
|
||||
* cause this to be updated correctly and any device which
|
||||
* doesn't support it should be treated as rotational.
|
||||
*/
|
||||
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
|
||||
blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
|
||||
|
||||
if (scsi_device_supports_vpd(sdp)) {
|
||||
sd_read_block_provisioning(sdkp);
|
||||
sd_read_block_limits(sdkp);
|
||||
|
|
|
@ -1128,6 +1128,10 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
|
||||
PAGE_SIZE);
|
||||
max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
|
||||
PAGE_SIZE);
|
||||
max_num = (max_buf - sizeof(struct smb_hdr)) /
|
||||
sizeof(LOCKING_ANDX_RANGE);
|
||||
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
|
||||
|
@ -1466,6 +1470,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
|
|||
if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
|
||||
return -EINVAL;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
|
||||
PAGE_SIZE);
|
||||
max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
|
||||
PAGE_SIZE);
|
||||
max_num = (max_buf - sizeof(struct smb_hdr)) /
|
||||
sizeof(LOCKING_ANDX_RANGE);
|
||||
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
|
||||
|
|
|
@ -128,6 +128,8 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
|
|||
if (max_buf < sizeof(struct smb2_lock_element))
|
||||
return -EINVAL;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
|
||||
max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
|
||||
max_num = max_buf / sizeof(struct smb2_lock_element);
|
||||
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
|
||||
if (!buf)
|
||||
|
@ -264,6 +266,8 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
|
||||
max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
|
||||
max_num = max_buf / sizeof(struct smb2_lock_element);
|
||||
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
|
||||
if (!buf) {
|
||||
|
|
|
@ -2814,9 +2814,10 @@ smb2_echo_callback(struct mid_q_entry *mid)
|
|||
{
|
||||
struct TCP_Server_Info *server = mid->callback_data;
|
||||
struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
|
||||
unsigned int credits_received = 1;
|
||||
unsigned int credits_received = 0;
|
||||
|
||||
if (mid->mid_state == MID_RESPONSE_RECEIVED)
|
||||
if (mid->mid_state == MID_RESPONSE_RECEIVED
|
||||
|| mid->mid_state == MID_RESPONSE_MALFORMED)
|
||||
credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
|
||||
|
||||
DeleteMidQEntry(mid);
|
||||
|
@ -3073,7 +3074,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
|
|||
struct TCP_Server_Info *server = tcon->ses->server;
|
||||
struct smb2_sync_hdr *shdr =
|
||||
(struct smb2_sync_hdr *)rdata->iov[0].iov_base;
|
||||
unsigned int credits_received = 1;
|
||||
unsigned int credits_received = 0;
|
||||
struct smb_rqst rqst = { .rq_iov = rdata->iov,
|
||||
.rq_nvec = 2,
|
||||
.rq_pages = rdata->pages,
|
||||
|
@ -3112,6 +3113,9 @@ smb2_readv_callback(struct mid_q_entry *mid)
|
|||
task_io_account_read(rdata->got_bytes);
|
||||
cifs_stats_bytes_read(tcon, rdata->got_bytes);
|
||||
break;
|
||||
case MID_RESPONSE_MALFORMED:
|
||||
credits_received = le16_to_cpu(shdr->CreditRequest);
|
||||
/* fall through */
|
||||
default:
|
||||
if (rdata->result != -ENODATA)
|
||||
rdata->result = -EIO;
|
||||
|
@ -3305,7 +3309,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
|
|||
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
|
||||
unsigned int written;
|
||||
struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
|
||||
unsigned int credits_received = 1;
|
||||
unsigned int credits_received = 0;
|
||||
|
||||
switch (mid->mid_state) {
|
||||
case MID_RESPONSE_RECEIVED:
|
||||
|
@ -3333,6 +3337,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
|
|||
case MID_RETRY_NEEDED:
|
||||
wdata->result = -EAGAIN;
|
||||
break;
|
||||
case MID_RESPONSE_MALFORMED:
|
||||
credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
|
||||
/* fall through */
|
||||
default:
|
||||
wdata->result = -EIO;
|
||||
break;
|
||||
|
|
|
@ -730,11 +730,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
|
|||
return LRU_REMOVED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Recently referenced inodes and inodes with many attached pages
|
||||
* get one more pass.
|
||||
*/
|
||||
if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
|
||||
/* recently referenced inodes get one more pass */
|
||||
if (inode->i_state & I_REFERENCED) {
|
||||
inode->i_state &= ~I_REFERENCED;
|
||||
spin_unlock(&inode->i_lock);
|
||||
return LRU_ROTATE;
|
||||
|
|
|
@ -1239,8 +1239,8 @@ static __net_init int nfsd_init_net(struct net *net)
|
|||
retval = nfsd_idmap_init(net);
|
||||
if (retval)
|
||||
goto out_idmap_error;
|
||||
nn->nfsd4_lease = 45; /* default lease time */
|
||||
nn->nfsd4_grace = 45;
|
||||
nn->nfsd4_lease = 90; /* default lease time */
|
||||
nn->nfsd4_grace = 90;
|
||||
nn->somebody_reclaimed = false;
|
||||
nn->clverifier_counter = prandom_u32();
|
||||
nn->clientid_counter = prandom_u32();
|
||||
|
|
|
@ -480,7 +480,7 @@ struct mem_size_stats {
|
|||
};
|
||||
|
||||
static void smaps_account(struct mem_size_stats *mss, struct page *page,
|
||||
bool compound, bool young, bool dirty)
|
||||
bool compound, bool young, bool dirty, bool locked)
|
||||
{
|
||||
int i, nr = compound ? 1 << compound_order(page) : 1;
|
||||
unsigned long size = nr * PAGE_SIZE;
|
||||
|
@ -507,24 +507,31 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
|
|||
else
|
||||
mss->private_clean += size;
|
||||
mss->pss += (u64)size << PSS_SHIFT;
|
||||
if (locked)
|
||||
mss->pss_locked += (u64)size << PSS_SHIFT;
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr; i++, page++) {
|
||||
int mapcount = page_mapcount(page);
|
||||
unsigned long pss = (PAGE_SIZE << PSS_SHIFT);
|
||||
|
||||
if (mapcount >= 2) {
|
||||
if (dirty || PageDirty(page))
|
||||
mss->shared_dirty += PAGE_SIZE;
|
||||
else
|
||||
mss->shared_clean += PAGE_SIZE;
|
||||
mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
|
||||
mss->pss += pss / mapcount;
|
||||
if (locked)
|
||||
mss->pss_locked += pss / mapcount;
|
||||
} else {
|
||||
if (dirty || PageDirty(page))
|
||||
mss->private_dirty += PAGE_SIZE;
|
||||
else
|
||||
mss->private_clean += PAGE_SIZE;
|
||||
mss->pss += PAGE_SIZE << PSS_SHIFT;
|
||||
mss->pss += pss;
|
||||
if (locked)
|
||||
mss->pss_locked += pss;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -547,6 +554,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
|
|||
{
|
||||
struct mem_size_stats *mss = walk->private;
|
||||
struct vm_area_struct *vma = walk->vma;
|
||||
bool locked = !!(vma->vm_flags & VM_LOCKED);
|
||||
struct page *page = NULL;
|
||||
|
||||
if (pte_present(*pte)) {
|
||||
|
@ -589,7 +597,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
|
|||
if (!page)
|
||||
return;
|
||||
|
||||
smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
|
||||
smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
@ -598,6 +606,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
|||
{
|
||||
struct mem_size_stats *mss = walk->private;
|
||||
struct vm_area_struct *vma = walk->vma;
|
||||
bool locked = !!(vma->vm_flags & VM_LOCKED);
|
||||
struct page *page;
|
||||
|
||||
/* FOLL_DUMP will return -EFAULT on huge zero page */
|
||||
|
@ -612,7 +621,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
|||
/* pass */;
|
||||
else
|
||||
VM_BUG_ON_PAGE(1, page);
|
||||
smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
|
||||
smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
|
||||
}
|
||||
#else
|
||||
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||
|
@ -794,11 +803,8 @@ static void smap_gather_stats(struct vm_area_struct *vma,
|
|||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* mmap_sem is held in m_start */
|
||||
walk_page_vma(vma, &smaps_walk);
|
||||
if (vma->vm_flags & VM_LOCKED)
|
||||
mss->pss_locked += mss->pss;
|
||||
}
|
||||
|
||||
#define SEQ_PUT_DEC(str, val) \
|
||||
|
|
|
@ -396,6 +396,7 @@ struct mmc_card {
|
|||
struct notifier_block reboot_notify;
|
||||
enum mmc_pon_type pon_type;
|
||||
struct mmc_bkops_info bkops;
|
||||
struct workqueue_struct *complete_wq; /* Private workqueue */
|
||||
};
|
||||
|
||||
static inline bool mmc_large_sector(struct mmc_card *card)
|
||||
|
|
|
@ -449,6 +449,11 @@ struct pmu {
|
|||
* Filter events for PMU-specific reasons.
|
||||
*/
|
||||
int (*filter_match) (struct perf_event *event); /* optional */
|
||||
|
||||
/*
|
||||
* Check period value for PERF_EVENT_IOC_PERIOD ioctl.
|
||||
*/
|
||||
int (*check_period) (struct perf_event *event, u64 value); /* optional */
|
||||
};
|
||||
|
||||
enum perf_addr_filter_action_t {
|
||||
|
|
|
@ -5136,6 +5136,11 @@ static void __perf_event_period(struct perf_event *event,
|
|||
}
|
||||
}
|
||||
|
||||
static int perf_event_check_period(struct perf_event *event, u64 value)
|
||||
{
|
||||
return event->pmu->check_period(event, value);
|
||||
}
|
||||
|
||||
static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
||||
{
|
||||
u64 value;
|
||||
|
@ -5152,6 +5157,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
|||
if (event->attr.freq && value > sysctl_perf_event_sample_rate)
|
||||
return -EINVAL;
|
||||
|
||||
if (perf_event_check_period(event, value))
|
||||
return -EINVAL;
|
||||
|
||||
event_function_call(event, __perf_event_period, &value);
|
||||
|
||||
return 0;
|
||||
|
@ -9539,6 +9547,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int perf_event_nop_int(struct perf_event *event, u64 value)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
|
||||
|
||||
static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
|
||||
|
@ -9839,6 +9852,9 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
|
|||
pmu->pmu_disable = perf_pmu_nop_void;
|
||||
}
|
||||
|
||||
if (!pmu->check_period)
|
||||
pmu->check_period = perf_event_nop_int;
|
||||
|
||||
if (!pmu->event_idx)
|
||||
pmu->event_idx = perf_event_idx_default;
|
||||
|
||||
|
|
|
@ -724,7 +724,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
|
|||
size = sizeof(struct ring_buffer);
|
||||
size += nr_pages * sizeof(void *);
|
||||
|
||||
if (order_base_2(size) >= MAX_ORDER)
|
||||
if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
|
||||
goto fail;
|
||||
|
||||
rb = kzalloc(size, GFP_KERNEL);
|
||||
|
|
|
@ -2438,9 +2438,12 @@ bool get_signal(struct ksignal *ksig)
|
|||
}
|
||||
|
||||
/* Has this task already been marked for death? */
|
||||
ksig->info.si_signo = signr = SIGKILL;
|
||||
if (signal_group_exit(signal))
|
||||
if (signal_group_exit(signal)) {
|
||||
ksig->info.si_signo = signr = SIGKILL;
|
||||
sigdelset(¤t->pending.signal, SIGKILL);
|
||||
recalc_sigpending();
|
||||
goto fatal;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
struct k_sigaction *ka;
|
||||
|
|
|
@ -141,7 +141,14 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
|
|||
|
||||
ret = strncpy_from_user(dst, src, maxlen);
|
||||
if (ret == maxlen)
|
||||
dst[--ret] = '\0';
|
||||
dst[ret - 1] = '\0';
|
||||
else if (ret >= 0)
|
||||
/*
|
||||
* Include the terminating null byte. In this case it
|
||||
* was copied by strncpy_from_user but not accounted
|
||||
* for in ret.
|
||||
*/
|
||||
ret++;
|
||||
|
||||
if (ret < 0) { /* Failed to fetch string */
|
||||
((u8 *)get_rloc_data(dest))[0] = '\0';
|
||||
|
|
10
mm/vmscan.c
10
mm/vmscan.c
|
@ -482,16 +482,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
|||
delta *= 4;
|
||||
do_div(delta, shrinker->seeks);
|
||||
|
||||
/*
|
||||
* Make sure we apply some minimal pressure on default priority
|
||||
* even on small cgroups. Stale objects are not only consuming memory
|
||||
* by themselves, but can also hold a reference to a dying cgroup,
|
||||
* preventing it from being reclaimed. A dying cgroup with all
|
||||
* corresponding structures like per-cpu stats and kmem caches
|
||||
* can be really big, so it may lead to a significant waste of memory.
|
||||
*/
|
||||
delta = max_t(unsigned long long, delta, min(freeable, batch_size));
|
||||
|
||||
total_scan += delta;
|
||||
if (total_scan < 0) {
|
||||
pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
|
||||
|
|
|
@ -924,6 +924,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
|
|||
SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
|
||||
SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
|
||||
SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
|
||||
SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
|
||||
SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
|
||||
SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
|
||||
SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
|
||||
|
|
|
@ -795,6 +795,8 @@ static int hdmi_codec_probe(struct platform_device *pdev)
|
|||
if (hcd->spdif)
|
||||
hcp->daidrv[i] = hdmi_spdif_dai;
|
||||
|
||||
dev_set_drvdata(dev, hcp);
|
||||
|
||||
ret = devm_snd_soc_register_component(dev, &hdmi_driver, hcp->daidrv,
|
||||
dai_count);
|
||||
if (ret) {
|
||||
|
@ -802,8 +804,6 @@ static int hdmi_codec_probe(struct platform_device *pdev)
|
|||
__func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_set_drvdata(dev, hcp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -377,6 +377,9 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk
|
||||
* applies. Returns 1 if a quirk was found.
|
||||
*/
|
||||
static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
|
||||
struct usb_device *dev,
|
||||
struct usb_interface_descriptor *altsd,
|
||||
|
@ -447,7 +450,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
|
|||
|
||||
subs->data_endpoint->sync_master = subs->sync_endpoint;
|
||||
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int set_sync_endpoint(struct snd_usb_substream *subs,
|
||||
|
@ -486,6 +489,10 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* endpoint set by quirk */
|
||||
if (err > 0)
|
||||
return 0;
|
||||
|
||||
if (altsd->bNumEndpoints < 2)
|
||||
return 0;
|
||||
|
||||
|
|
25
tools/arch/riscv/include/uapi/asm/bitsperlong.h
Normal file
25
tools/arch/riscv/include/uapi/asm/bitsperlong.h
Normal file
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
* Copyright (C) 2015 Regents of the University of California
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
|
||||
#define _UAPI_ASM_RISCV_BITSPERLONG_H
|
||||
|
||||
#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
||||
#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
|
|
@ -13,6 +13,10 @@
|
|||
#include "../../arch/mips/include/uapi/asm/bitsperlong.h"
|
||||
#elif defined(__ia64__)
|
||||
#include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
|
||||
#elif defined(__riscv)
|
||||
#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
|
||||
#elif defined(__alpha__)
|
||||
#include "../../arch/alpha/include/uapi/asm/bitsperlong.h"
|
||||
#else
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
#endif
|
||||
|
|
|
@ -13,7 +13,8 @@ add_probe_vfs_getname() {
|
|||
local verbose=$1
|
||||
if [ $had_vfs_getname -eq 1 ] ; then
|
||||
line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
|
||||
perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string"
|
||||
perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
|
||||
perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
|
||||
fi
|
||||
}
|
||||
|
||||
|
|
|
@ -766,6 +766,7 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
|
|||
cnode->cycles_count += node->branch_flags.cycles;
|
||||
cnode->iter_count += node->nr_loop_iter;
|
||||
cnode->iter_cycles += node->iter_cycles;
|
||||
cnode->from_count++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1345,10 +1346,10 @@ static int branch_to_str(char *bf, int bfsize,
|
|||
static int branch_from_str(char *bf, int bfsize,
|
||||
u64 branch_count,
|
||||
u64 cycles_count, u64 iter_count,
|
||||
u64 iter_cycles)
|
||||
u64 iter_cycles, u64 from_count)
|
||||
{
|
||||
int printed = 0, i = 0;
|
||||
u64 cycles;
|
||||
u64 cycles, v = 0;
|
||||
|
||||
cycles = cycles_count / branch_count;
|
||||
if (cycles) {
|
||||
|
@ -1357,14 +1358,16 @@ static int branch_from_str(char *bf, int bfsize,
|
|||
bf + printed, bfsize - printed);
|
||||
}
|
||||
|
||||
if (iter_count) {
|
||||
printed += count_pri64_printf(i++, "iter",
|
||||
iter_count,
|
||||
bf + printed, bfsize - printed);
|
||||
if (iter_count && from_count) {
|
||||
v = iter_count / from_count;
|
||||
if (v) {
|
||||
printed += count_pri64_printf(i++, "iter",
|
||||
v, bf + printed, bfsize - printed);
|
||||
|
||||
printed += count_pri64_printf(i++, "avg_cycles",
|
||||
iter_cycles / iter_count,
|
||||
bf + printed, bfsize - printed);
|
||||
printed += count_pri64_printf(i++, "avg_cycles",
|
||||
iter_cycles / iter_count,
|
||||
bf + printed, bfsize - printed);
|
||||
}
|
||||
}
|
||||
|
||||
if (i)
|
||||
|
@ -1377,6 +1380,7 @@ static int counts_str_build(char *bf, int bfsize,
|
|||
u64 branch_count, u64 predicted_count,
|
||||
u64 abort_count, u64 cycles_count,
|
||||
u64 iter_count, u64 iter_cycles,
|
||||
u64 from_count,
|
||||
struct branch_type_stat *brtype_stat)
|
||||
{
|
||||
int printed;
|
||||
|
@ -1389,7 +1393,8 @@ static int counts_str_build(char *bf, int bfsize,
|
|||
predicted_count, abort_count, brtype_stat);
|
||||
} else {
|
||||
printed = branch_from_str(bf, bfsize, branch_count,
|
||||
cycles_count, iter_count, iter_cycles);
|
||||
cycles_count, iter_count, iter_cycles,
|
||||
from_count);
|
||||
}
|
||||
|
||||
if (!printed)
|
||||
|
@ -1402,13 +1407,14 @@ static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
|
|||
u64 branch_count, u64 predicted_count,
|
||||
u64 abort_count, u64 cycles_count,
|
||||
u64 iter_count, u64 iter_cycles,
|
||||
u64 from_count,
|
||||
struct branch_type_stat *brtype_stat)
|
||||
{
|
||||
char str[256];
|
||||
|
||||
counts_str_build(str, sizeof(str), branch_count,
|
||||
predicted_count, abort_count, cycles_count,
|
||||
iter_count, iter_cycles, brtype_stat);
|
||||
iter_count, iter_cycles, from_count, brtype_stat);
|
||||
|
||||
if (fp)
|
||||
return fprintf(fp, "%s", str);
|
||||
|
@ -1422,6 +1428,7 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
|
|||
u64 branch_count, predicted_count;
|
||||
u64 abort_count, cycles_count;
|
||||
u64 iter_count, iter_cycles;
|
||||
u64 from_count;
|
||||
|
||||
branch_count = clist->branch_count;
|
||||
predicted_count = clist->predicted_count;
|
||||
|
@ -1429,11 +1436,12 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
|
|||
cycles_count = clist->cycles_count;
|
||||
iter_count = clist->iter_count;
|
||||
iter_cycles = clist->iter_cycles;
|
||||
from_count = clist->from_count;
|
||||
|
||||
return callchain_counts_printf(fp, bf, bfsize, branch_count,
|
||||
predicted_count, abort_count,
|
||||
cycles_count, iter_count, iter_cycles,
|
||||
&clist->brtype_stat);
|
||||
from_count, &clist->brtype_stat);
|
||||
}
|
||||
|
||||
static void free_callchain_node(struct callchain_node *node)
|
||||
|
|
|
@ -118,6 +118,7 @@ struct callchain_list {
|
|||
bool has_children;
|
||||
};
|
||||
u64 branch_count;
|
||||
u64 from_count;
|
||||
u64 predicted_count;
|
||||
u64 abort_count;
|
||||
u64 cycles_count;
|
||||
|
|
|
@ -1988,7 +1988,7 @@ static void save_iterations(struct iterations *iter,
|
|||
{
|
||||
int i;
|
||||
|
||||
iter->nr_loop_iter = nr;
|
||||
iter->nr_loop_iter++;
|
||||
iter->cycles = 0;
|
||||
|
||||
for (i = 0; i < nr; i++)
|
||||
|
|
Loading…
Reference in a new issue