This is the 4.19.103 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl5Cn0wACgkQONu9yGCS
 aT584xAAtePSlzTxst/jukREoyrpAfTM1BeovMdsZEBpKh+/F3n1udqHeo+iNAAN
 qSOig012aW2qP7b5/4CrEU9ZRTvd0AM4fog7ABLJVahMYMqoJgod8TRaE4v0nVut
 eRans6w3NbZJCZwdw2aiu5gwFfjwJLSUckBNmj4XVYdyfh7q0BgnZV5OY0V+zhuG
 1MWXaylbRqjguR/ZFk0UPAmRaqNKHbwfCJ1V0ygL9xQkJM0cUn7hX9/CqM4aYnm6
 m1oux4ektLAmF1XK4NiQEuRBMeFO74XlKcsZqQHf/b4FZfcPergcPwIj8ugtCHzJ
 kx2QgURDjgH4Tnu+Q0ScPrjj2kjU8rWmjqlcv1PcUyOWm+MR0OK9bW7TLEntMSF8
 HOEe9j6SsjQNIOoYh1YcMnuGjKNIZjl2L3VbDzpVN2GxZxwAutY6G68tV7sbA2pu
 wtsrAVOqdcjoo0ruRmwognBqQAdNdsbiBx7bgcNjVEXWL0N3Ddiv6CNYwnehA5Hq
 cvQwVQpFGP9ZGYUcCMbdwR+7kJzVy6V2S615M8GkE9FouOwTfV60zM/sZ1rFVt1J
 70zxfRX5ys19aTAVkbi6pHHCUJ0ZAiTgWujp5Hp4kPt7gEz01Ur0s1kI3b7b6iWh
 cuycRFULvqeXCApQacs//lOVDoUV20uFcL/zqOFM33v/+YzkyjA=
 =3D8z
 -----END PGP SIGNATURE-----

Merge 4.19.103 into android-4.19

Changes in 4.19.103
	Revert "drm/sun4i: dsi: Change the start delay calculation"
	ovl: fix lseek overflow on 32bit
	kernel/module: Fix memleak in module_add_modinfo_attrs()
	media: iguanair: fix endpoint sanity check
	ocfs2: fix oops when writing cloned file
	x86/cpu: Update cached HLE state on write to TSX_CTRL_CPUID_CLEAR
	udf: Allow writing to 'Rewritable' partitions
	printk: fix exclusive_console replaying
	iwlwifi: mvm: fix NVM check for 3168 devices
	sparc32: fix struct ipc64_perm type definition
	cls_rsvp: fix rsvp_policy
	gtp: use __GFP_NOWARN to avoid memalloc warning
	l2tp: Allow duplicate session creation with UDP
	net: hsr: fix possible NULL deref in hsr_handle_frame()
	net_sched: fix an OOB access in cls_tcindex
	net: stmmac: Delete txtimer in suspend()
	bnxt_en: Fix TC queue mapping.
	tcp: clear tp->total_retrans in tcp_disconnect()
	tcp: clear tp->delivered in tcp_disconnect()
	tcp: clear tp->data_segs{in|out} in tcp_disconnect()
	tcp: clear tp->segs_{in|out} in tcp_disconnect()
	rxrpc: Fix use-after-free in rxrpc_put_local()
	rxrpc: Fix insufficient receive notification generation
	rxrpc: Fix missing active use pinning of rxrpc_local object
	rxrpc: Fix NULL pointer deref due to call->conn being cleared on disconnect
	media: uvcvideo: Avoid cyclic entity chains due to malformed USB descriptors
	mfd: dln2: More sanity checking for endpoints
	ipc/msg.c: consolidate all xxxctl_down() functions
	tracing: Fix sched switch start/stop refcount racy updates
	rcu: Avoid data-race in rcu_gp_fqs_check_wake()
	brcmfmac: Fix memory leak in brcmf_usbdev_qinit
	usb: typec: tcpci: mask event interrupts when remove driver
	usb: gadget: legacy: set max_speed to super-speed
	usb: gadget: f_ncm: Use atomic_t to track in-flight request
	usb: gadget: f_ecm: Use atomic_t to track in-flight request
	ALSA: usb-audio: Fix endianess in descriptor validation
	ALSA: dummy: Fix PCM format loop in proc output
	mm/memory_hotplug: fix remove_memory() lockdep splat
	mm: move_pages: report the number of non-attempted pages
	media/v4l2-core: set pages dirty upon releasing DMA buffers
	media: v4l2-core: compat: ignore native command codes
	media: v4l2-rect.h: fix v4l2_rect_map_inside() top/left adjustments
	lib/test_kasan.c: fix memory leak in kmalloc_oob_krealloc_more()
	irqdomain: Fix a memory leak in irq_domain_push_irq()
	platform/x86: intel_scu_ipc: Fix interrupt support
	ALSA: hda: Add Clevo W65_67SB the power_save blacklist
	KVM: arm64: Correct PSTATE on exception entry
	KVM: arm/arm64: Correct CPSR on exception entry
	KVM: arm/arm64: Correct AArch32 SPSR on exception entry
	KVM: arm64: Only sign-extend MMIO up to register width
	MIPS: fix indentation of the 'RELOCS' message
	MIPS: boot: fix typo in 'vmlinux.lzma.its' target
	s390/mm: fix dynamic pagetable upgrade for hugetlbfs
	powerpc/xmon: don't access ASDR in VMs
	powerpc/pseries: Advance pfn if section is not present in lmb_is_removable()
	smb3: fix signing verification of large reads
	PCI: tegra: Fix return value check of pm_runtime_get_sync()
	mmc: spi: Toggle SPI polarity, do not hardcode it
	ACPI: video: Do not export a non working backlight interface on MSI MS-7721 boards
	ACPI / battery: Deal with design or full capacity being reported as -1
	ACPI / battery: Use design-cap for capacity calculations if full-cap is not available
	ACPI / battery: Deal better with neither design nor full capacity not being reported
	alarmtimer: Unregister wakeup source when module get fails
	ubifs: Reject unsupported ioctl flags explicitly
	ubifs: don't trigger assertion on invalid no-key filename
	ubifs: Fix FS_IOC_SETFLAGS unexpectedly clearing encrypt flag
	ubifs: Fix deadlock in concurrent bulk-read and writepage
	crypto: geode-aes - convert to skcipher API and make thread-safe
	PCI: keystone: Fix link training retries initiation
	mmc: sdhci-of-at91: fix memleak on clk_get failure
	hv_balloon: Balloon up according to request page number
	mfd: axp20x: Mark AXP20X_VBUS_IPSOUT_MGMT as volatile
	crypto: api - Check spawn->alg under lock in crypto_drop_spawn
	crypto: ccree - fix backlog memory leak
	crypto: ccree - fix pm wrongful error reporting
	crypto: ccree - fix PM race condition
	scripts/find-unused-docs: Fix massive false positives
	scsi: qla2xxx: Fix mtcp dump collection failure
	power: supply: ltc2941-battery-gauge: fix use-after-free
	ovl: fix wrong WARN_ON() in ovl_cache_update_ino()
	f2fs: choose hardlimit when softlimit is larger than hardlimit in f2fs_statfs_project()
	f2fs: fix miscounted block limit in f2fs_statfs_project()
	f2fs: code cleanup for f2fs_statfs_project()
	PM: core: Fix handling of devices deleted during system-wide resume
	of: Add OF_DMA_DEFAULT_COHERENT & select it on powerpc
	dm zoned: support zone sizes smaller than 128MiB
	dm space map common: fix to ensure new block isn't already in use
	dm crypt: fix benbi IV constructor crash if used in authenticated mode
	dm: fix potential for q->make_request_fn NULL pointer
	dm writecache: fix incorrect flush sequence when doing SSD mode commit
	padata: Remove broken queue flushing
	tracing: Annotate ftrace_graph_hash pointer with __rcu
	tracing: Annotate ftrace_graph_notrace_hash pointer with __rcu
	ftrace: Add comment to why rcu_dereference_sched() is open coded
	ftrace: Protect ftrace_graph_hash with ftrace_sync
	samples/bpf: Don't try to remove user's homedir on clean
	crypto: ccp - set max RSA modulus size for v3 platform devices as well
	crypto: pcrypt - Do not clear MAY_SLEEP flag in original request
	crypto: atmel-aes - Fix counter overflow in CTR mode
	crypto: api - Fix race condition in crypto_spawn_alg
	crypto: picoxcell - adjust the position of tasklet_init and fix missed tasklet_kill
	scsi: qla2xxx: Fix unbound NVME response length
	NFS: Fix memory leaks and corruption in readdir
	NFS: Directory page cache pages need to be locked when read
	jbd2_seq_info_next should increase position index
	Btrfs: fix missing hole after hole punching and fsync when using NO_HOLES
	btrfs: set trans->drity in btrfs_commit_transaction
	Btrfs: fix race between adding and putting tree mod seq elements and nodes
	ARM: tegra: Enable PLLP bypass during Tegra124 LP1
	iwlwifi: don't throw error when trying to remove IGTK
	mwifiex: fix unbalanced locking in mwifiex_process_country_ie()
	sunrpc: expiry_time should be seconds not timeval
	gfs2: move setting current->backing_dev_info
	gfs2: fix O_SYNC write handling
	drm/rect: Avoid division by zero
	media: rc: ensure lirc is initialized before registering input device
	tools/kvm_stat: Fix kvm_exit filter name
	xen/balloon: Support xend-based toolstack take two
	watchdog: fix UAF in reboot notifier handling in watchdog core code
	bcache: add readahead cache policy options via sysfs interface
	eventfd: track eventfd_signal() recursion depth
	aio: prevent potential eventfd recursion on poll
	KVM: x86: Refactor picdev_write() to prevent Spectre-v1/L1TF attacks
	KVM: x86: Refactor prefix decoding to prevent Spectre-v1/L1TF attacks
	KVM: x86: Protect pmu_intel.c from Spectre-v1/L1TF attacks
	KVM: x86: Protect DR-based index computations from Spectre-v1/L1TF attacks
	KVM: x86: Protect kvm_lapic_reg_write() from Spectre-v1/L1TF attacks
	KVM: x86: Protect kvm_hv_msr_[get|set]_crash_data() from Spectre-v1/L1TF attacks
	KVM: x86: Protect ioapic_write_indirect() from Spectre-v1/L1TF attacks
	KVM: x86: Protect MSR-based index computations in pmu.h from Spectre-v1/L1TF attacks
	KVM: x86: Protect ioapic_read_indirect() from Spectre-v1/L1TF attacks
	KVM: x86: Protect MSR-based index computations from Spectre-v1/L1TF attacks in x86.c
	KVM: x86: Protect x86_decode_insn from Spectre-v1/L1TF attacks
	KVM: x86: Protect MSR-based index computations in fixed_msr_to_seg_unit() from Spectre-v1/L1TF attacks
	KVM: x86: Fix potential put_fpu() w/o load_fpu() on MPX platform
	KVM: PPC: Book3S HV: Uninit vCPU if vcore creation fails
	KVM: PPC: Book3S PR: Free shared page if mmu initialization fails
	x86/kvm: Be careful not to clear KVM_VCPU_FLUSH_TLB bit
	KVM: x86: Don't let userspace set host-reserved cr4 bits
	KVM: x86: Free wbinvd_dirty_mask if vCPU creation fails
	KVM: s390: do not clobber registers during guest reset/store status
	clk: tegra: Mark fuse clock as critical
	drm/amd/dm/mst: Ignore payload update failures
	percpu: Separate decrypted varaibles anytime encryption can be enabled
	scsi: qla2xxx: Fix the endianness of the qla82xx_get_fw_size() return type
	scsi: csiostor: Adjust indentation in csio_device_reset
	scsi: qla4xxx: Adjust indentation in qla4xxx_mem_free
	scsi: ufs: Recheck bkops level if bkops is disabled
	phy: qualcomm: Adjust indentation in read_poll_timeout
	ext2: Adjust indentation in ext2_fill_super
	powerpc/44x: Adjust indentation in ibm4xx_denali_fixup_memsize
	drm: msm: mdp4: Adjust indentation in mdp4_dsi_encoder_enable
	NFC: pn544: Adjust indentation in pn544_hci_check_presence
	ppp: Adjust indentation into ppp_async_input
	net: smc911x: Adjust indentation in smc911x_phy_configure
	net: tulip: Adjust indentation in {dmfe, uli526x}_init_module
	IB/mlx5: Fix outstanding_pi index for GSI qps
	IB/core: Fix ODP get user pages flow
	nfsd: fix delay timer on 32-bit architectures
	nfsd: fix jiffies/time_t mixup in LRU list
	nfsd: Return the correct number of bytes written to the file
	ubi: fastmap: Fix inverted logic in seen selfcheck
	ubi: Fix an error pointer dereference in error handling code
	mfd: da9062: Fix watchdog compatible string
	mfd: rn5t618: Mark ADC control register volatile
	bonding/alb: properly access headers in bond_alb_xmit()
	net: dsa: bcm_sf2: Only 7278 supports 2Gb/sec IMP port
	net: mvneta: move rx_dropped and rx_errors in per-cpu stats
	net_sched: fix a resource leak in tcindex_set_parms()
	net: systemport: Avoid RBUF stuck in Wake-on-LAN mode
	net/mlx5: IPsec, Fix esp modify function attribute
	net/mlx5: IPsec, fix memory leak at mlx5_fpga_ipsec_delete_sa_ctx
	net: macb: Remove unnecessary alignment check for TSO
	net: macb: Limit maximum GEM TX length in TSO
	net: dsa: b53: Always use dev->vlan_enabled in b53_configure_vlan()
	ext4: fix deadlock allocating crypto bounce page from mempool
	btrfs: use bool argument in free_root_pointers()
	btrfs: free block groups after free'ing fs trees
	drm: atmel-hlcdc: enable clock before configuring timing engine
	drm/dp_mst: Remove VCPI while disabling topology mgr
	btrfs: flush write bio if we loop in extent_write_cache_pages
	KVM: x86/mmu: Apply max PA check for MMIO sptes to 32-bit KVM
	KVM: x86: Use gpa_t for cr2/gpa to fix TDP support on 32-bit KVM
	KVM: VMX: Add non-canonical check on writes to RTIT address MSRs
	KVM: nVMX: vmread should not set rflags to specify success in case of #PF
	KVM: Use vcpu-specific gva->hva translation when querying host page size
	KVM: Play nice with read-only memslots when querying host page size
	mm: zero remaining unavailable struct pages
	mm: return zero_resv_unavail optimization
	mm/page_alloc.c: fix uninitialized memmaps on a partially populated last section
	cifs: fail i/o on soft mounts if sessionsetup errors out
	x86/apic/msi: Plug non-maskable MSI affinity race
	clocksource: Prevent double add_timer_on() for watchdog_timer
	perf/core: Fix mlock accounting in perf_mmap()
	rxrpc: Fix service call disconnection
	Linux 4.19.103

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I0d7f09085c3541373e0fd6b2e3ffacc5e34f7d55
This commit is contained in:
Greg Kroah-Hartman 2020-02-11 15:05:03 -08:00
commit 3389e56d31
212 changed files with 10300 additions and 1569 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 102
SUBLEVEL = 103
EXTRAVERSION =
NAME = "People's Front"

View file

@ -26,13 +26,25 @@
#include <asm/cputype.h>
/* arm64 compatibility macros */
#define PSR_AA32_MODE_FIQ FIQ_MODE
#define PSR_AA32_MODE_SVC SVC_MODE
#define PSR_AA32_MODE_ABT ABT_MODE
#define PSR_AA32_MODE_UND UND_MODE
#define PSR_AA32_T_BIT PSR_T_BIT
#define PSR_AA32_F_BIT PSR_F_BIT
#define PSR_AA32_I_BIT PSR_I_BIT
#define PSR_AA32_A_BIT PSR_A_BIT
#define PSR_AA32_E_BIT PSR_E_BIT
#define PSR_AA32_IT_MASK PSR_IT_MASK
#define PSR_AA32_GE_MASK 0x000f0000
#define PSR_AA32_DIT_BIT 0x00200000
#define PSR_AA32_PAN_BIT 0x00400000
#define PSR_AA32_SSBS_BIT 0x00800000
#define PSR_AA32_Q_BIT PSR_Q_BIT
#define PSR_AA32_V_BIT PSR_V_BIT
#define PSR_AA32_C_BIT PSR_C_BIT
#define PSR_AA32_Z_BIT PSR_Z_BIT
#define PSR_AA32_N_BIT PSR_N_BIT
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
@ -53,6 +65,11 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
*__vcpu_spsr(vcpu) = v;
}
static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
{
return spsr;
}
static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
u8 reg_num)
{
@ -189,6 +206,11 @@ static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
}
static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
{
return false;
}
static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
{
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;

View file

@ -26,6 +26,8 @@
struct kvm_decode {
unsigned long rt;
bool sign_extend;
/* Not used on 32-bit arm */
bool sixty_four;
};
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);

View file

@ -382,6 +382,14 @@ _pll_m_c_x_done:
pll_locked r1, r0, CLK_RESET_PLLC_BASE
pll_locked r1, r0, CLK_RESET_PLLX_BASE
tegra_get_soc_id TEGRA_APB_MISC_BASE, r1
cmp r1, #TEGRA30
beq 1f
ldr r1, [r0, #CLK_RESET_PLLP_BASE]
bic r1, r1, #(1<<31) @ disable PllP bypass
str r1, [r0, #CLK_RESET_PLLP_BASE]
1:
mov32 r7, TEGRA_TMRUS_BASE
ldr r1, [r7]
add r1, r1, #LOCK_DELAY
@ -641,7 +649,10 @@ tegra30_switch_cpu_to_clk32k:
str r0, [r4, #PMC_PLLP_WB0_OVERRIDE]
/* disable PLLP, PLLA, PLLC and PLLX */
tegra_get_soc_id TEGRA_APB_MISC_BASE, r1
cmp r1, #TEGRA30
ldr r0, [r5, #CLK_RESET_PLLP_BASE]
orrne r0, r0, #(1 << 31) @ enable PllP bypass on fast cluster
bic r0, r0, #(1 << 30)
str r0, [r5, #CLK_RESET_PLLP_BASE]
ldr r0, [r5, #CLK_RESET_PLLA_BASE]

View file

@ -202,6 +202,38 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
}
/*
* The layout of SPSR for an AArch32 state is different when observed from an
* AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
* view given an AArch64 view.
*
* In ARM DDI 0487E.a see:
*
* - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
* - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
* - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
*
* Which show the following differences:
*
* | Bit | AA64 | AA32 | Notes |
* +-----+------+------+-----------------------------|
* | 24 | DIT | J | J is RES0 in ARMv8 |
* | 21 | SS | DIT | SS doesn't exist in AArch32 |
*
* ... and all other bits are (currently) common.
*/
static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
{
const unsigned long overlap = BIT(24) | BIT(21);
unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
spsr &= ~overlap;
spsr |= dit << 21;
return spsr;
}
static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
{
u32 mode;
@ -261,6 +293,11 @@ static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
}
static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
}
static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
{
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;

View file

@ -21,13 +21,11 @@
#include <linux/kvm_host.h>
#include <asm/kvm_arm.h>
/*
* This is annoying. The mmio code requires this, even if we don't
* need any decoding. To be fixed.
*/
struct kvm_decode {
unsigned long rt;
bool sign_extend;
/* Witdth of the register accessed by the faulting instruction is 64-bits */
bool sixty_four;
};
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);

View file

@ -50,6 +50,7 @@
#define PSR_AA32_I_BIT 0x00000080
#define PSR_AA32_A_BIT 0x00000100
#define PSR_AA32_E_BIT 0x00000200
#define PSR_AA32_PAN_BIT 0x00400000
#define PSR_AA32_SSBS_BIT 0x00800000
#define PSR_AA32_DIT_BIT 0x01000000
#define PSR_AA32_Q_BIT 0x08000000

View file

@ -49,6 +49,7 @@
#define PSR_SSBS_BIT 0x00001000
#define PSR_PAN_BIT 0x00400000
#define PSR_UAO_BIT 0x00800000
#define PSR_DIT_BIT 0x01000000
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
#define PSR_Z_BIT 0x40000000

View file

@ -25,9 +25,6 @@
#include <asm/kvm_emulate.h>
#include <asm/esr.h>
#define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
PSR_I_BIT | PSR_D_BIT)
#define CURRENT_EL_SP_EL0_VECTOR 0x0
#define CURRENT_EL_SP_ELx_VECTOR 0x200
#define LOWER_EL_AArch64_VECTOR 0x400
@ -61,6 +58,69 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
}
/*
* When an exception is taken, most PSTATE fields are left unchanged in the
* handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
* of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
* layouts, so we don't need to shuffle these for exceptions from AArch32 EL0.
*
* For the SPSR_ELx layout for AArch64, see ARM DDI 0487E.a page C5-429.
* For the SPSR_ELx layout for AArch32, see ARM DDI 0487E.a page C5-426.
*
* Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
* MSB to LSB.
*/
static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
{
unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
unsigned long old, new;
old = *vcpu_cpsr(vcpu);
new = 0;
new |= (old & PSR_N_BIT);
new |= (old & PSR_Z_BIT);
new |= (old & PSR_C_BIT);
new |= (old & PSR_V_BIT);
// TODO: TCO (if/when ARMv8.5-MemTag is exposed to guests)
new |= (old & PSR_DIT_BIT);
// PSTATE.UAO is set to zero upon any exception to AArch64
// See ARM DDI 0487E.a, page D5-2579.
// PSTATE.PAN is unchanged unless SCTLR_ELx.SPAN == 0b0
// SCTLR_ELx.SPAN is RES1 when ARMv8.1-PAN is not implemented
// See ARM DDI 0487E.a, page D5-2578.
new |= (old & PSR_PAN_BIT);
if (!(sctlr & SCTLR_EL1_SPAN))
new |= PSR_PAN_BIT;
// PSTATE.SS is set to zero upon any exception to AArch64
// See ARM DDI 0487E.a, page D2-2452.
// PSTATE.IL is set to zero upon any exception to AArch64
// See ARM DDI 0487E.a, page D1-2306.
// PSTATE.SSBS is set to SCTLR_ELx.DSSBS upon any exception to AArch64
// See ARM DDI 0487E.a, page D13-3258
if (sctlr & SCTLR_ELx_DSSBS)
new |= PSR_SSBS_BIT;
// PSTATE.BTYPE is set to zero upon any exception to AArch64
// See ARM DDI 0487E.a, pages D1-2293 to D1-2294.
new |= PSR_D_BIT;
new |= PSR_A_BIT;
new |= PSR_I_BIT;
new |= PSR_F_BIT;
new |= PSR_MODE_EL1h;
return new;
}
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
{
unsigned long cpsr = *vcpu_cpsr(vcpu);
@ -70,7 +130,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
vcpu_write_spsr(vcpu, cpsr);
vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
@ -105,7 +165,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
vcpu_write_spsr(vcpu, cpsr);
/*

View file

@ -123,7 +123,7 @@ $(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS
targets += vmlinux.its
targets += vmlinux.gz.its
targets += vmlinux.bz2.its
targets += vmlinux.lzmo.its
targets += vmlinux.lzma.its
targets += vmlinux.lzo.its
quiet_cmd_cpp_its_S = ITS $@

View file

@ -230,6 +230,7 @@ config PPC
select NEED_SG_DMA_LENGTH
select NO_BOOTMEM
select OF
select OF_DMA_DEFAULT_COHERENT if !NOT_COHERENT_CACHE
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
select OLD_SIGACTION if PPC32

View file

@ -2065,7 +2065,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
mutex_unlock(&kvm->lock);
if (!vcore)
goto free_vcpu;
goto uninit_vcpu;
spin_lock(&vcore->lock);
++vcore->num_threads;
@ -2082,6 +2082,8 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
return vcpu;
uninit_vcpu:
kvm_vcpu_uninit(vcpu);
free_vcpu:
kmem_cache_free(kvm_vcpu_cache, vcpu);
out:

View file

@ -1772,10 +1772,12 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
err = kvmppc_mmu_init(vcpu);
if (err < 0)
goto uninit_vcpu;
goto free_shared_page;
return vcpu;
free_shared_page:
free_page((unsigned long)vcpu->arch.shared);
uninit_vcpu:
kvm_vcpu_uninit(vcpu);
free_shadow_vcpu:

View file

@ -366,8 +366,10 @@ static bool lmb_is_removable(struct drmem_lmb *lmb)
for (i = 0; i < scns_per_block; i++) {
pfn = PFN_DOWN(phys_addr);
if (!pfn_present(pfn))
if (!pfn_present(pfn)) {
phys_addr += MIN_MEMORY_BLOCK_SIZE;
continue;
}
rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
phys_addr += MIN_MEMORY_BLOCK_SIZE;

View file

@ -1878,15 +1878,14 @@ static void dump_300_sprs(void)
printf("pidr = %.16lx tidr = %.16lx\n",
mfspr(SPRN_PID), mfspr(SPRN_TIDR));
printf("asdr = %.16lx psscr = %.16lx\n",
mfspr(SPRN_ASDR), hv ? mfspr(SPRN_PSSCR)
: mfspr(SPRN_PSSCR_PR));
printf("psscr = %.16lx\n",
hv ? mfspr(SPRN_PSSCR) : mfspr(SPRN_PSSCR_PR));
if (!hv)
return;
printf("ptcr = %.16lx\n",
mfspr(SPRN_PTCR));
printf("ptcr = %.16lx asdr = %.16lx\n",
mfspr(SPRN_PTCR), mfspr(SPRN_ASDR));
#endif
}

View file

@ -33,6 +33,8 @@
#define ARCH_HAS_PREPARE_HUGEPAGE
#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#include <asm/setup.h>
#ifndef __ASSEMBLY__

View file

@ -2564,9 +2564,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
CR14_UNUSED_33 |
CR14_EXTERNAL_DAMAGE_SUBMASK;
/* make sure the new fpc will be lazily loaded */
save_fpu_regs();
current->thread.fpu.fpc = 0;
vcpu->run->s.regs.fpc = 0;
vcpu->arch.sie_block->gbea = 1;
vcpu->arch.sie_block->pp = 0;
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
@ -3994,7 +3992,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
switch (ioctl) {
case KVM_S390_STORE_STATUS:
idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvm_s390_vcpu_store_status(vcpu, arg);
r = kvm_s390_store_status_unloaded(vcpu, arg);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
case KVM_S390_SET_INITIAL_PSW: {

View file

@ -2,7 +2,7 @@
/*
* IBM System z Huge TLB Page Support for Kernel.
*
* Copyright IBM Corp. 2007,2016
* Copyright IBM Corp. 2007,2020
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
@ -11,6 +11,9 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/sched/mm.h>
#include <linux/security.h>
/*
* If the bit selected by single-bit bitmask "a" is set within "x", move
@ -267,3 +270,98 @@ static __init int setup_hugepagesz(char *opt)
return 1;
}
__setup("hugepagesz=", setup_hugepagesz);
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct vm_unmapped_area_info info;
info.flags = 0;
info.length = len;
info.low_limit = current->mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
return vm_unmapped_area(&info);
}
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
unsigned long addr0, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct vm_unmapped_area_info info;
unsigned long addr;
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = current->mm->mmap_base;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
addr = vm_unmapped_area(&info);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
if (addr & ~PAGE_MASK) {
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
}
return addr;
}
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int rc;
if (len & ~huge_page_mask(h))
return -EINVAL;
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED) {
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
goto check_asce_limit;
}
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)))
goto check_asce_limit;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
addr = hugetlb_get_unmapped_area_bottomup(file, addr, len,
pgoff, flags);
else
addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
pgoff, flags);
if (addr & ~PAGE_MASK)
return addr;
check_asce_limit:
if (addr + len > current->mm->context.asce_limit &&
addr + len <= TASK_SIZE) {
rc = crst_table_upgrade(mm, addr + len);
if (rc)
return (unsigned long) rc;
}
return addr;
}

View file

@ -16,10 +16,10 @@
struct ipc64_perm
{
__kernel_key_t key;
__kernel_uid_t uid;
__kernel_gid_t gid;
__kernel_uid_t cuid;
__kernel_gid_t cgid;
__kernel_uid32_t uid;
__kernel_gid32_t gid;
__kernel_uid32_t cuid;
__kernel_gid32_t cgid;
#ifndef __arch64__
unsigned short __pad0;
#endif

View file

@ -448,6 +448,14 @@ static inline void ack_APIC_irq(void)
apic_eoi();
}
static inline bool lapic_vector_set_in_irr(unsigned int vector)
{
u32 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
return !!(irr & (1U << (vector % 32)));
}
static inline unsigned default_get_apic_id(unsigned long x)
{
unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));

View file

@ -350,12 +350,12 @@ struct kvm_mmu {
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err,
bool prefault);
void (*inject_page_fault)(struct kvm_vcpu *vcpu,
struct x86_exception *fault);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
struct x86_exception *exception);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa,
u32 access, struct x86_exception *exception);
gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
struct x86_exception *exception);
int (*sync_page)(struct kvm_vcpu *vcpu,
@ -1354,7 +1354,7 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
void *insn, int insn_len);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);

View file

@ -26,10 +26,8 @@
static struct irq_domain *msi_default_domain;
static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
static void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg)
{
struct irq_cfg *cfg = irqd_cfg(data);
msg->address_hi = MSI_ADDR_BASE_HI;
if (x2apic_enabled())
@ -50,6 +48,127 @@ static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
MSI_DATA_VECTOR(cfg->vector);
}
static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
{
__irq_msi_compose_msg(irqd_cfg(data), msg);
}
static void irq_msi_update_msg(struct irq_data *irqd, struct irq_cfg *cfg)
{
struct msi_msg msg[2] = { [1] = { }, };
__irq_msi_compose_msg(cfg, msg);
irq_data_get_irq_chip(irqd)->irq_write_msi_msg(irqd, msg);
}
static int
msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
{
struct irq_cfg old_cfg, *cfg = irqd_cfg(irqd);
struct irq_data *parent = irqd->parent_data;
unsigned int cpu;
int ret;
/* Save the current configuration */
cpu = cpumask_first(irq_data_get_effective_affinity_mask(irqd));
old_cfg = *cfg;
/* Allocate a new target vector */
ret = parent->chip->irq_set_affinity(parent, mask, force);
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
return ret;
/*
* For non-maskable and non-remapped MSI interrupts the migration
* to a different destination CPU and a different vector has to be
* done careful to handle the possible stray interrupt which can be
* caused by the non-atomic update of the address/data pair.
*
* Direct update is possible when:
* - The MSI is maskable (remapped MSI does not use this code path)).
* The quirk bit is not set in this case.
* - The new vector is the same as the old vector
* - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
* - The new destination CPU is the same as the old destination CPU
*/
if (!irqd_msi_nomask_quirk(irqd) ||
cfg->vector == old_cfg.vector ||
old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
cfg->dest_apicid == old_cfg.dest_apicid) {
irq_msi_update_msg(irqd, cfg);
return ret;
}
/*
* Paranoia: Validate that the interrupt target is the local
* CPU.
*/
if (WARN_ON_ONCE(cpu != smp_processor_id())) {
irq_msi_update_msg(irqd, cfg);
return ret;
}
/*
* Redirect the interrupt to the new vector on the current CPU
* first. This might cause a spurious interrupt on this vector if
* the device raises an interrupt right between this update and the
* update to the final destination CPU.
*
* If the vector is in use then the installed device handler will
* denote it as spurious which is no harm as this is a rare event
* and interrupt handlers have to cope with spurious interrupts
* anyway. If the vector is unused, then it is marked so it won't
* trigger the 'No irq handler for vector' warning in do_IRQ().
*
* This requires to hold vector lock to prevent concurrent updates to
* the affected vector.
*/
lock_vector_lock();
/*
* Mark the new target vector on the local CPU if it is currently
* unused. Reuse the VECTOR_RETRIGGERED state which is also used in
* the CPU hotplug path for a similar purpose. This cannot be
* undone here as the current CPU has interrupts disabled and
* cannot handle the interrupt before the whole set_affinity()
* section is done. In the CPU unplug case, the current CPU is
* about to vanish and will not handle any interrupts anymore. The
* vector is cleaned up when the CPU comes online again.
*/
if (IS_ERR_OR_NULL(this_cpu_read(vector_irq[cfg->vector])))
this_cpu_write(vector_irq[cfg->vector], VECTOR_RETRIGGERED);
/* Redirect it to the new vector on the local CPU temporarily */
old_cfg.vector = cfg->vector;
irq_msi_update_msg(irqd, &old_cfg);
/* Now transition it to the target CPU */
irq_msi_update_msg(irqd, cfg);
/*
* All interrupts after this point are now targeted at the new
* vector/CPU.
*
* Drop vector lock before testing whether the temporary assignment
* to the local CPU was hit by an interrupt raised in the device,
* because the retrigger function acquires vector lock again.
*/
unlock_vector_lock();
/*
* Check whether the transition raced with a device interrupt and
* is pending in the local APICs IRR. It is safe to do this outside
* of vector lock as the irq_desc::lock of this interrupt is still
* held and interrupts are disabled: The check is not accessing the
* underlying vector store. It's just checking the local APIC's
* IRR.
*/
if (lapic_vector_set_in_irr(cfg->vector))
irq_data_get_irq_chip(irqd)->irq_retrigger(irqd);
return ret;
}
/*
* IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
* which implement the MSI or MSI-X Capability Structure.
@ -61,6 +180,7 @@ static struct irq_chip pci_msi_controller = {
.irq_ack = irq_chip_ack_parent,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_compose_msi_msg = irq_msi_compose_msg,
.irq_set_affinity = msi_set_affinity,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
@ -149,6 +269,8 @@ void __init arch_init_msi_domain(struct irq_domain *parent)
}
if (!msi_default_domain)
pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n");
else
msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK;
}
#ifdef CONFIG_IRQ_REMAP

View file

@ -115,11 +115,12 @@ void __init tsx_init(void)
tsx_disable();
/*
* tsx_disable() will change the state of the
* RTM CPUID bit. Clear it here since it is now
* expected to be not set.
* tsx_disable() will change the state of the RTM and HLE CPUID
* bits. Clear them here since they are now expected to be not
* set.
*/
setup_clear_cpu_cap(X86_FEATURE_RTM);
setup_clear_cpu_cap(X86_FEATURE_HLE);
} else if (tsx_ctrl_state == TSX_CTRL_ENABLE) {
/*
@ -131,10 +132,10 @@ void __init tsx_init(void)
tsx_enable();
/*
* tsx_enable() will change the state of the
* RTM CPUID bit. Force it here since it is now
* expected to be set.
* tsx_enable() will change the state of the RTM and HLE CPUID
* bits. Force them here since they are now expected to be set.
*/
setup_force_cpu_cap(X86_FEATURE_RTM);
setup_force_cpu_cap(X86_FEATURE_HLE);
}
}

View file

@ -5164,16 +5164,28 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
ctxt->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
has_seg_override = true;
ctxt->seg_override = VCPU_SREG_ES;
break;
case 0x2e: /* CS override */
has_seg_override = true;
ctxt->seg_override = VCPU_SREG_CS;
break;
case 0x36: /* SS override */
has_seg_override = true;
ctxt->seg_override = VCPU_SREG_SS;
break;
case 0x3e: /* DS override */
has_seg_override = true;
ctxt->seg_override = (ctxt->b >> 3) & 3;
ctxt->seg_override = VCPU_SREG_DS;
break;
case 0x64: /* FS override */
has_seg_override = true;
ctxt->seg_override = VCPU_SREG_FS;
break;
case 0x65: /* GS override */
has_seg_override = true;
ctxt->seg_override = ctxt->b & 7;
ctxt->seg_override = VCPU_SREG_GS;
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
@ -5257,10 +5269,15 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
}
break;
case Escape:
if (ctxt->modrm > 0xbf)
opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
else
if (ctxt->modrm > 0xbf) {
size_t size = ARRAY_SIZE(opcode.u.esc->high);
u32 index = array_index_nospec(
ctxt->modrm - 0xc0, size);
opcode = opcode.u.esc->high[index];
} else {
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
}
break;
case InstrDual:
if ((ctxt->modrm >> 6) == 3)

View file

@ -792,11 +792,12 @@ static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
u32 index, u64 *pdata)
{
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
size_t size = ARRAY_SIZE(hv->hv_crash_param);
if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
if (WARN_ON_ONCE(index >= size))
return -EINVAL;
*pdata = hv->hv_crash_param[index];
*pdata = hv->hv_crash_param[array_index_nospec(index, size)];
return 0;
}
@ -835,11 +836,12 @@ static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
u32 index, u64 data)
{
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
size_t size = ARRAY_SIZE(hv->hv_crash_param);
if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
if (WARN_ON_ONCE(index >= size))
return -EINVAL;
hv->hv_crash_param[index] = data;
hv->hv_crash_param[array_index_nospec(index, size)] = data;
return 0;
}

View file

@ -460,10 +460,14 @@ static int picdev_write(struct kvm_pic *s,
switch (addr) {
case 0x20:
case 0x21:
pic_lock(s);
pic_ioport_write(&s->pics[0], addr, data);
pic_unlock(s);
break;
case 0xa0:
case 0xa1:
pic_lock(s);
pic_ioport_write(&s->pics[addr >> 7], addr, data);
pic_ioport_write(&s->pics[1], addr, data);
pic_unlock(s);
break;
case 0x4d0:

View file

@ -36,6 +36,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/nospec.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/current.h>
@ -73,13 +74,14 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
default:
{
u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
u64 redir_content;
u64 redir_content = ~0ULL;
if (redir_index < IOAPIC_NUM_PINS)
redir_content =
ioapic->redirtbl[redir_index].bits;
else
redir_content = ~0ULL;
if (redir_index < IOAPIC_NUM_PINS) {
u32 index = array_index_nospec(
redir_index, IOAPIC_NUM_PINS);
redir_content = ioapic->redirtbl[index].bits;
}
result = (ioapic->ioregsel & 0x1) ?
(redir_content >> 32) & 0xffffffff :
@ -297,6 +299,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
ioapic_debug("change redir index %x val %x\n", index, val);
if (index >= IOAPIC_NUM_PINS)
return;
index = array_index_nospec(index, IOAPIC_NUM_PINS);
e = &ioapic->redirtbl[index];
mask_before = e->fields.mask;
/* Preserve read-only fields */

View file

@ -1862,15 +1862,20 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_LVTTHMR:
case APIC_LVTPC:
case APIC_LVT1:
case APIC_LVTERR:
case APIC_LVTERR: {
/* TODO: Check vector */
size_t size;
u32 index;
if (!kvm_apic_sw_enabled(apic))
val |= APIC_LVT_MASKED;
val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
size = ARRAY_SIZE(apic_lvt_mask);
index = array_index_nospec(
(reg - APIC_LVTT) >> 4, size);
val &= apic_lvt_mask[index];
kvm_lapic_set_reg(apic, reg, val);
break;
}
case APIC_LVTT:
if (!kvm_apic_sw_enabled(apic))

View file

@ -1184,12 +1184,12 @@ static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
}
static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
static int host_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn)
{
unsigned long page_size;
int i, ret = 0;
page_size = kvm_host_page_size(kvm, gfn);
page_size = kvm_host_page_size(vcpu, gfn);
for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
if (page_size >= KVM_HPAGE_SIZE(i))
@ -1239,7 +1239,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
if (unlikely(*force_pt_level))
return PT_PAGE_TABLE_LEVEL;
host_level = host_mapping_level(vcpu->kvm, large_gfn);
host_level = host_mapping_level(vcpu, large_gfn);
if (host_level == PT_PAGE_TABLE_LEVEL)
return host_level;
@ -3390,7 +3390,7 @@ static bool is_access_allowed(u32 fault_err_code, u64 spte)
* - true: let the vcpu to access on the same address again.
* - false: let the real page fault path to fix it.
*/
static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int level,
u32 error_code)
{
struct kvm_shadow_walk_iterator iterator;
@ -3410,7 +3410,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
do {
u64 new_spte;
for_each_shadow_entry_lockless(vcpu, gva, iterator, spte)
for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
if (!is_shadow_present_pte(spte) ||
iterator.level < level)
break;
@ -3488,7 +3488,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
} while (true);
trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
spte, fault_handled);
walk_shadow_page_lockless_end(vcpu);
@ -3496,10 +3496,11 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
}
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable);
gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
bool *writable);
static int make_mmu_pages_available(struct kvm_vcpu *vcpu);
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
static int nonpaging_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
gfn_t gfn, bool prefault)
{
int r;
@ -3525,16 +3526,16 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
}
if (fast_page_fault(vcpu, v, level, error_code))
if (fast_page_fault(vcpu, gpa, level, error_code))
return RET_PF_RETRY;
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
return RET_PF_RETRY;
if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
if (handle_abnormal_pfn(vcpu, gpa, gfn, pfn, ACC_ALL, &r))
return r;
r = RET_PF_RETRY;
@ -3545,7 +3546,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
goto out_unlock;
if (likely(!force_pt_level))
transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
r = __direct_map(vcpu, v, write, map_writable, level, pfn,
r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
prefault, false);
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
@ -3838,7 +3839,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
u32 access, struct x86_exception *exception)
{
if (exception)
@ -3846,7 +3847,7 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
return vaddr;
}
static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
u32 access,
struct x86_exception *exception)
{
@ -4006,13 +4007,14 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
walk_shadow_page_lockless_end(vcpu);
}
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
u32 error_code, bool prefault)
{
gfn_t gfn = gva >> PAGE_SHIFT;
gfn_t gfn = gpa >> PAGE_SHIFT;
int r;
pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
/* Note, paging is disabled, ergo gva == gpa. */
pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
if (page_fault_handle_page_track(vcpu, error_code, gfn))
return RET_PF_EMULATE;
@ -4024,11 +4026,12 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
return nonpaging_map(vcpu, gva & PAGE_MASK,
return nonpaging_map(vcpu, gpa & PAGE_MASK,
error_code, gfn, prefault);
}
static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
gfn_t gfn)
{
struct kvm_arch_async_pf arch;
@ -4037,7 +4040,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
arch.direct_map = vcpu->arch.mmu.direct_map;
arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
return kvm_setup_async_pf(vcpu, cr2_or_gpa,
kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
}
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
@ -4054,7 +4058,8 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
}
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable)
gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
bool *writable)
{
struct kvm_memory_slot *slot;
bool async;
@ -4074,12 +4079,12 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
return false; /* *pfn has correct page already */
if (!prefault && kvm_can_do_async_pf(vcpu)) {
trace_kvm_try_async_get_page(gva, gfn);
trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
if (kvm_find_async_pf_gfn(vcpu, gfn)) {
trace_kvm_async_pf_doublefault(gva, gfn);
trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
kvm_make_request(KVM_REQ_APF_HALT, vcpu);
return true;
} else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
} else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
return true;
}
@ -4092,6 +4097,12 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
{
int r = 1;
#ifndef CONFIG_X86_64
/* A 64-bit CR2 should be impossible on 32-bit KVM. */
if (WARN_ON_ONCE(fault_address >> 32))
return -EFAULT;
#endif
vcpu->arch.l1tf_flush_l1d = true;
switch (vcpu->arch.apf.host_apf_reason) {
default:
@ -4129,7 +4140,7 @@ check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num);
}
static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
bool prefault)
{
kvm_pfn_t pfn;
@ -5307,7 +5318,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
return 0;
}
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
void *insn, int insn_len)
{
int r, emulation_type = 0;
@ -5317,18 +5328,19 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
/* With shadow page tables, fault_address contains a GVA or nGPA. */
if (vcpu->arch.mmu.direct_map) {
vcpu->arch.gpa_available = true;
vcpu->arch.gpa_val = cr2;
vcpu->arch.gpa_val = cr2_or_gpa;
}
r = RET_PF_INVALID;
if (unlikely(error_code & PFERR_RSVD_MASK)) {
r = handle_mmio_page_fault(vcpu, cr2, direct);
r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
if (r == RET_PF_EMULATE)
goto emulate;
}
if (r == RET_PF_INVALID) {
r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
r = vcpu->arch.mmu.page_fault(vcpu, cr2_or_gpa,
lower_32_bits(error_code),
false);
WARN_ON(r == RET_PF_INVALID);
}
@ -5347,7 +5359,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
*/
if (vcpu->arch.mmu.direct_map &&
(error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
return 1;
}
@ -5362,7 +5374,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
* explicitly shadowing L1's page tables, i.e. unprotecting something
* for L1 isn't going to magically fix whatever issue cause L2 to fail.
*/
if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu))
if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
emulation_type = EMULTYPE_ALLOW_RETRY;
emulate:
/*
@ -5375,7 +5387,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
if (unlikely(insn && !insn_len))
return 1;
er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
er = x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn, insn_len);
switch (er) {
case EMULATE_DONE:

View file

@ -249,13 +249,13 @@ TRACE_EVENT(
TRACE_EVENT(
fast_page_fault,
TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
u64 *sptep, u64 old_spte, bool retry),
TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, retry),
TP_STRUCT__entry(
__field(int, vcpu_id)
__field(gva_t, gva)
__field(gpa_t, cr2_or_gpa)
__field(u32, error_code)
__field(u64 *, sptep)
__field(u64, old_spte)
@ -265,7 +265,7 @@ TRACE_EVENT(
TP_fast_assign(
__entry->vcpu_id = vcpu->vcpu_id;
__entry->gva = gva;
__entry->cr2_or_gpa = cr2_or_gpa;
__entry->error_code = error_code;
__entry->sptep = sptep;
__entry->old_spte = old_spte;
@ -273,9 +273,9 @@ TRACE_EVENT(
__entry->retry = retry;
),
TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx"
TP_printk("vcpu %d gva %llx error_code %s sptep %p old %#llx"
" new %llx spurious %d fixed %d", __entry->vcpu_id,
__entry->gva, __print_flags(__entry->error_code, "|",
__entry->cr2_or_gpa, __print_flags(__entry->error_code, "|",
kvm_mmu_trace_pferr_flags), __entry->sptep,
__entry->old_spte, __entry->new_spte,
__spte_satisfied(old_spte), __spte_satisfied(new_spte)

View file

@ -194,11 +194,15 @@ static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
break;
case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
*seg = 1;
*unit = msr - MSR_MTRRfix16K_80000;
*unit = array_index_nospec(
msr - MSR_MTRRfix16K_80000,
MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1);
break;
case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
*seg = 2;
*unit = msr - MSR_MTRRfix4K_C0000;
*unit = array_index_nospec(
msr - MSR_MTRRfix4K_C0000,
MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1);
break;
default:
return false;

View file

@ -273,11 +273,11 @@ static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
}
/*
* Fetch a guest pte for a guest virtual address
* Fetch a guest pte for a guest virtual address, or for an L2's GPA.
*/
static int FNAME(walk_addr_generic)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gva_t addr, u32 access)
gpa_t addr, u32 access)
{
int ret;
pt_element_t pte;
@ -478,7 +478,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
}
static int FNAME(walk_addr)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, gva_t addr, u32 access)
struct kvm_vcpu *vcpu, gpa_t addr, u32 access)
{
return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
access);
@ -593,7 +593,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
* If the guest tries to write a write-protected page, we need to
* emulate this operation, return 1 to indicate this case.
*/
static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
struct guest_walker *gw,
int write_fault, int hlevel,
kvm_pfn_t pfn, bool map_writable, bool prefault,
@ -747,7 +747,7 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
* Returns: 1 if we need to emulate the instruction, 0 otherwise, or
* a negative value on error.
*/
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
bool prefault)
{
int write_fault = error_code & PFERR_WRITE_MASK;
@ -926,18 +926,19 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
spin_unlock(&vcpu->kvm->mmu_lock);
}
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
/* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access,
struct x86_exception *exception)
{
struct guest_walker walker;
gpa_t gpa = UNMAPPED_GVA;
int r;
r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
r = FNAME(walk_addr)(&walker, vcpu, addr, access);
if (r) {
gpa = gfn_to_gpa(walker.gfn);
gpa |= vaddr & ~PAGE_MASK;
gpa |= addr & ~PAGE_MASK;
} else if (exception)
*exception = walker.fault;
@ -945,7 +946,8 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
}
#if PTTYPE != PTTYPE_EPT
static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
/* Note, gva_to_gpa_nested() is only used to translate L2 GVAs. */
static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
u32 access,
struct x86_exception *exception)
{
@ -953,6 +955,11 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
gpa_t gpa = UNMAPPED_GVA;
int r;
#ifndef CONFIG_X86_64
/* A 64-bit GVA should be impossible on 32-bit KVM. */
WARN_ON_ONCE(vaddr >> 32);
#endif
r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
if (r) {

View file

@ -2,6 +2,8 @@
#ifndef __KVM_X86_PMU_H
#define __KVM_X86_PMU_H
#include <linux/nospec.h>
#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
#define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
@ -86,8 +88,12 @@ static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
u32 base)
{
if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
return &pmu->gp_counters[msr - base];
if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
u32 index = array_index_nospec(msr - base,
pmu->nr_arch_gp_counters);
return &pmu->gp_counters[index];
}
return NULL;
}
@ -97,8 +103,12 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
{
int base = MSR_CORE_PERF_FIXED_CTR0;
if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
return &pmu->fixed_counters[msr - base];
if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
u32 index = array_index_nospec(msr - base,
pmu->nr_arch_fixed_counters);
return &pmu->fixed_counters[index];
}
return NULL;
}

View file

@ -87,10 +87,14 @@ static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
static unsigned intel_find_fixed_event(int idx)
{
if (idx >= ARRAY_SIZE(fixed_pmc_events))
u32 event;
size_t size = ARRAY_SIZE(fixed_pmc_events);
if (idx >= size)
return PERF_COUNT_HW_MAX;
return intel_arch_events[fixed_pmc_events[idx]].event_type;
event = fixed_pmc_events[array_index_nospec(idx, size)];
return intel_arch_events[event].event_type;
}
/* check if a PMC is enabled by comparing it with globl_ctrl bits. */
@ -131,16 +135,20 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
bool fixed = idx & (1u << 30);
struct kvm_pmc *counters;
unsigned int num_counters;
idx &= ~(3u << 30);
if (!fixed && idx >= pmu->nr_arch_gp_counters)
if (fixed) {
counters = pmu->fixed_counters;
num_counters = pmu->nr_arch_fixed_counters;
} else {
counters = pmu->gp_counters;
num_counters = pmu->nr_arch_gp_counters;
}
if (idx >= num_counters)
return NULL;
if (fixed && idx >= pmu->nr_arch_fixed_counters)
return NULL;
counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
*mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
return &counters[idx];
return &counters[array_index_nospec(idx, num_counters)];
}
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)

View file

@ -8793,8 +8793,10 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
/* _system ok, nested_vmx_check_permission has verified cpl=0 */
if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
(is_long_mode(vcpu) ? 8 : 4),
&e))
&e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
}
nested_vmx_succeed(vcpu);

8033
arch/x86/kvm/vmx/vmx.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -92,6 +92,8 @@ u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
#endif
static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
#define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
#define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
@ -793,9 +795,38 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
}
EXPORT_SYMBOL_GPL(kvm_set_xcr);
static u64 kvm_host_cr4_reserved_bits(struct cpuinfo_x86 *c)
{
u64 reserved_bits = CR4_RESERVED_BITS;
if (!cpu_has(c, X86_FEATURE_XSAVE))
reserved_bits |= X86_CR4_OSXSAVE;
if (!cpu_has(c, X86_FEATURE_SMEP))
reserved_bits |= X86_CR4_SMEP;
if (!cpu_has(c, X86_FEATURE_SMAP))
reserved_bits |= X86_CR4_SMAP;
if (!cpu_has(c, X86_FEATURE_FSGSBASE))
reserved_bits |= X86_CR4_FSGSBASE;
if (!cpu_has(c, X86_FEATURE_PKU))
reserved_bits |= X86_CR4_PKE;
if (!cpu_has(c, X86_FEATURE_LA57) &&
!(cpuid_ecx(0x7) & bit(X86_FEATURE_LA57)))
reserved_bits |= X86_CR4_LA57;
if (!cpu_has(c, X86_FEATURE_UMIP) && !kvm_x86_ops->umip_emulated())
reserved_bits |= X86_CR4_UMIP;
return reserved_bits;
}
static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
if (cr4 & CR4_RESERVED_BITS)
if (cr4 & cr4_reserved_bits)
return -EINVAL;
if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
@ -961,9 +992,11 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{
size_t size = ARRAY_SIZE(vcpu->arch.db);
switch (dr) {
case 0 ... 3:
vcpu->arch.db[dr] = val;
vcpu->arch.db[array_index_nospec(dr, size)] = val;
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
vcpu->arch.eff_db[dr] = val;
break;
@ -1000,9 +1033,11 @@ EXPORT_SYMBOL_GPL(kvm_set_dr);
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
{
size_t size = ARRAY_SIZE(vcpu->arch.db);
switch (dr) {
case 0 ... 3:
*val = vcpu->arch.db[dr];
*val = vcpu->arch.db[array_index_nospec(dr, size)];
break;
case 4:
/* fall through */
@ -2269,7 +2304,10 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
default:
if (msr >= MSR_IA32_MC0_CTL &&
msr < MSR_IA32_MCx_CTL(bank_num)) {
u32 offset = msr - MSR_IA32_MC0_CTL;
u32 offset = array_index_nospec(
msr - MSR_IA32_MC0_CTL,
MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL);
/* only 0 or all 1s can be written to IA32_MCi_CTL
* some Linux kernels though clear bit 10 in bank 4 to
* workaround a BIOS/GART TBL issue on AMD K8s, ignore
@ -2681,7 +2719,10 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
default:
if (msr >= MSR_IA32_MC0_CTL &&
msr < MSR_IA32_MCx_CTL(bank_num)) {
u32 offset = msr - MSR_IA32_MC0_CTL;
u32 offset = array_index_nospec(
msr - MSR_IA32_MC0_CTL,
MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL);
data = vcpu->arch.mce_banks[offset];
break;
}
@ -3234,6 +3275,9 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
if (vcpu->arch.st.steal.preempted)
return;
vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
@ -5977,11 +6021,11 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
return r;
}
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
bool write_fault_to_shadow_pgtable,
int emulation_type)
{
gpa_t gpa = cr2;
gpa_t gpa = cr2_or_gpa;
kvm_pfn_t pfn;
if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
@ -5995,7 +6039,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
* Write permission should be allowed since only
* write access need to be emulated.
*/
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
/*
* If the mapping is invalid in guest, let cpu retry
@ -6052,10 +6096,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
}
static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
unsigned long cr2, int emulation_type)
gpa_t cr2_or_gpa, int emulation_type)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa;
last_retry_eip = vcpu->arch.last_retry_eip;
last_retry_addr = vcpu->arch.last_retry_addr;
@ -6084,14 +6128,14 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
if (x86_page_table_writing_insn(ctxt))
return false;
if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa)
return false;
vcpu->arch.last_retry_eip = ctxt->eip;
vcpu->arch.last_retry_addr = cr2;
vcpu->arch.last_retry_addr = cr2_or_gpa;
if (!vcpu->arch.mmu.direct_map)
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
@ -6252,11 +6296,8 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
return false;
}
int x86_emulate_instruction(struct kvm_vcpu *vcpu,
unsigned long cr2,
int emulation_type,
void *insn,
int insn_len)
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len)
{
int r;
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
@ -6299,7 +6340,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
if (r != EMULATION_OK) {
if (emulation_type & EMULTYPE_TRAP_UD)
return EMULATE_FAIL;
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt,
emulation_type))
return EMULATE_DONE;
if (ctxt->have_exception) {
@ -6329,7 +6370,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
return EMULATE_DONE;
}
if (retry_instruction(ctxt, cr2, emulation_type))
if (retry_instruction(ctxt, cr2_or_gpa, emulation_type))
return EMULATE_DONE;
/* this is needed for vmware backdoor interface to work since it
@ -6341,7 +6382,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
restart:
/* Save the faulting GPA (cr2) in the address field */
ctxt->exception.address = cr2;
ctxt->exception.address = cr2_or_gpa;
r = x86_emulate_insn(ctxt);
@ -6349,7 +6390,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
return EMULATE_DONE;
if (r == EMULATION_FAILED) {
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt,
emulation_type))
return EMULATE_DONE;
@ -6753,7 +6794,7 @@ static void kvm_set_mmio_spte_mask(void)
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
if (IS_ENABLED(CONFIG_X86_64) && maxphyaddr == 52)
if (maxphyaddr == 52)
mask &= ~1ull;
kvm_mmu_set_mmio_spte_mask(mask, mask);
@ -8225,6 +8266,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
vcpu_load(vcpu);
if (kvm_mpx_supported())
kvm_load_guest_fpu(vcpu);
kvm_apic_accept_events(vcpu);
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
@ -8233,6 +8276,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
else
mp_state->mp_state = vcpu->arch.mp_state;
if (kvm_mpx_supported())
kvm_put_guest_fpu(vcpu);
vcpu_put(vcpu);
return 0;
}
@ -8654,7 +8699,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvm_mmu_unload(vcpu);
vcpu_put(vcpu);
kvm_x86_ops->vcpu_free(vcpu);
kvm_arch_vcpu_free(vcpu);
}
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
@ -8847,6 +8892,8 @@ int kvm_arch_hardware_setup(void)
if (r != 0)
return r;
cr4_reserved_bits = kvm_host_cr4_reserved_bits(&boot_cpu_data);
if (kvm_has_tsc_control) {
/*
* Make sure the user can only configure tsc_khz values that
@ -9505,7 +9552,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
return;
vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
vcpu->arch.mmu.page_fault(vcpu, work->cr2_or_gpa, 0, true);
}
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
@ -9588,7 +9635,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
{
struct x86_exception fault;
trace_kvm_async_pf_not_present(work->arch.token, work->gva);
trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa);
kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
@ -9616,7 +9663,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
work->arch.token = ~0; /* broadcast wakeup */
else
kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
trace_kvm_async_pf_ready(work->arch.token, work->gva);
trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED &&
!apf_get_user(vcpu, &val)) {

View file

@ -284,7 +284,7 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
int page_num);
bool kvm_vector_hashing_enabled(void);
int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len);
#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \

View file

@ -649,10 +649,8 @@ EXPORT_SYMBOL_GPL(crypto_grab_spawn);
void crypto_drop_spawn(struct crypto_spawn *spawn)
{
if (!spawn->alg)
return;
down_write(&crypto_alg_sem);
if (spawn->alg)
list_del(&spawn->list);
up_write(&crypto_alg_sem);
}
@ -661,22 +659,16 @@ EXPORT_SYMBOL_GPL(crypto_drop_spawn);
static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn)
{
struct crypto_alg *alg;
struct crypto_alg *alg2;
down_read(&crypto_alg_sem);
alg = spawn->alg;
alg2 = alg;
if (alg2)
alg2 = crypto_mod_get(alg2);
if (alg && !crypto_mod_get(alg)) {
alg->cra_flags |= CRYPTO_ALG_DYING;
alg = NULL;
}
up_read(&crypto_alg_sem);
if (!alg2) {
if (alg)
crypto_shoot_alg(alg);
return ERR_PTR(-EAGAIN);
}
return alg;
return alg ?: ERR_PTR(-EAGAIN);
}
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,

View file

@ -349,13 +349,12 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
return len;
}
void crypto_shoot_alg(struct crypto_alg *alg)
static void crypto_shoot_alg(struct crypto_alg *alg)
{
down_write(&crypto_alg_sem);
alg->cra_flags |= CRYPTO_ALG_DYING;
up_write(&crypto_alg_sem);
}
EXPORT_SYMBOL_GPL(crypto_shoot_alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask)

View file

@ -79,7 +79,6 @@ void crypto_alg_tested(const char *name, int err);
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
struct crypto_alg *nalg);
void crypto_remove_final(struct list_head *list);
void crypto_shoot_alg(struct crypto_alg *alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask);
void *crypto_create_tfm(struct crypto_alg *alg,

View file

@ -130,7 +130,6 @@ static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
struct padata_priv *padata = pcrypt_request_padata(preq);
padata->info = err;
req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
padata_do_serial(padata);
}

View file

@ -51,6 +51,8 @@
#define PREFIX "ACPI: "
#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
#define ACPI_BATTERY_CAPACITY_VALID(capacity) \
((capacity) != 0 && (capacity) != ACPI_BATTERY_VALUE_UNKNOWN)
#define ACPI_BATTERY_DEVICE_NAME "Battery"
@ -205,7 +207,8 @@ static int acpi_battery_is_charged(struct acpi_battery *battery)
static bool acpi_battery_is_degraded(struct acpi_battery *battery)
{
return battery->full_charge_capacity && battery->design_capacity &&
return ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity) &&
battery->full_charge_capacity < battery->design_capacity;
}
@ -227,7 +230,7 @@ static int acpi_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
int ret = 0;
int full_capacity = ACPI_BATTERY_VALUE_UNKNOWN, ret = 0;
struct acpi_battery *battery = to_acpi_battery(psy);
if (acpi_battery_present(battery)) {
@ -276,14 +279,14 @@ static int acpi_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
if (!ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
ret = -ENODEV;
else
val->intval = battery->design_capacity * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
case POWER_SUPPLY_PROP_ENERGY_FULL:
if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity))
ret = -ENODEV;
else
val->intval = battery->full_charge_capacity * 1000;
@ -296,11 +299,17 @@ static int acpi_battery_get_property(struct power_supply *psy,
val->intval = battery->capacity_now * 1000;
break;
case POWER_SUPPLY_PROP_CAPACITY:
if (battery->capacity_now && battery->full_charge_capacity)
val->intval = battery->capacity_now * 100/
battery->full_charge_capacity;
if (ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity))
full_capacity = battery->full_charge_capacity;
else if (ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
full_capacity = battery->design_capacity;
if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN ||
full_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = 0;
val->intval = battery->capacity_now * 100/
full_capacity;
break;
case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
if (battery->state & ACPI_BATTERY_STATE_CRITICAL)
@ -346,6 +355,20 @@ static enum power_supply_property charge_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
static enum power_supply_property charge_battery_full_cap_broken_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@ -807,12 +830,25 @@ static void __exit battery_hook_exit(void)
static int sysfs_add_battery(struct acpi_battery *battery)
{
struct power_supply_config psy_cfg = { .drv_data = battery, };
bool full_cap_broken = false;
if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
!ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
full_cap_broken = true;
if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) {
if (full_cap_broken) {
battery->bat_desc.properties =
charge_battery_full_cap_broken_props;
battery->bat_desc.num_properties =
ARRAY_SIZE(charge_battery_full_cap_broken_props);
} else {
battery->bat_desc.properties = charge_battery_props;
battery->bat_desc.num_properties =
ARRAY_SIZE(charge_battery_props);
} else if (battery->full_charge_capacity == 0) {
}
} else {
if (full_cap_broken) {
battery->bat_desc.properties =
energy_battery_full_cap_broken_props;
battery->bat_desc.num_properties =
@ -822,6 +858,7 @@ static int sysfs_add_battery(struct acpi_battery *battery)
battery->bat_desc.num_properties =
ARRAY_SIZE(energy_battery_props);
}
}
battery->bat_desc.name = acpi_device_bid(battery->device);
battery->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY;

View file

@ -328,6 +328,11 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
},
},
/*
* Desktops which falsely report a backlight and which our heuristics
* for this do not catch.
*/
{
.callback = video_detect_force_none,
.ident = "Dell OptiPlex 9020M",
@ -336,6 +341,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"),
},
},
{
.callback = video_detect_force_none,
.ident = "MSI MS-7721",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-7721"),
},
},
{ },
};

View file

@ -273,10 +273,38 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
device_links_read_unlock(idx);
}
static void dpm_wait_for_superior(struct device *dev, bool async)
static bool dpm_wait_for_superior(struct device *dev, bool async)
{
dpm_wait(dev->parent, async);
struct device *parent;
/*
* If the device is resumed asynchronously and the parent's callback
* deletes both the device and the parent itself, the parent object may
* be freed while this function is running, so avoid that by reference
* counting the parent once more unless the device has been deleted
* already (in which case return right away).
*/
mutex_lock(&dpm_list_mtx);
if (!device_pm_initialized(dev)) {
mutex_unlock(&dpm_list_mtx);
return false;
}
parent = get_device(dev->parent);
mutex_unlock(&dpm_list_mtx);
dpm_wait(parent, async);
put_device(parent);
dpm_wait_for_suppliers(dev, async);
/*
* If the parent's callback has deleted the device, attempting to resume
* it would be invalid, so avoid doing that then.
*/
return device_pm_initialized(dev);
}
static void dpm_wait_for_consumers(struct device *dev, bool async)
@ -636,7 +664,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_noirq_suspended)
goto Out;
dpm_wait_for_superior(dev, async);
if (!dpm_wait_for_superior(dev, async))
goto Out;
skip_resume = dev_pm_may_skip_resume(dev);
@ -837,7 +866,8 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_late_suspended)
goto Out;
dpm_wait_for_superior(dev, async);
if (!dpm_wait_for_superior(dev, async))
goto Out;
callback = dpm_subsys_resume_early_cb(dev, state, &info);
@ -957,7 +987,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
}
dpm_wait_for_superior(dev, async);
if (!dpm_wait_for_superior(dev, async))
goto Complete;
dpm_watchdog_set(&wd, dev);
device_lock(dev);

View file

@ -799,7 +799,11 @@ static struct tegra_periph_init_data gate_clks[] = {
GATE("ahbdma", "hclk", 33, 0, tegra_clk_ahbdma, 0),
GATE("apbdma", "pclk", 34, 0, tegra_clk_apbdma, 0),
GATE("kbc", "clk_32k", 36, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_kbc, 0),
GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, 0),
/*
* Critical for RAM re-repair operation, which must occur on resume
* from LP1 system suspend and as part of CCPLEX cluster switching.
*/
GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, CLK_IS_CRITICAL),
GATE("fuse_burn", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse_burn, 0),
GATE("kfuse", "clk_m", 40, TEGRA_PERIPH_ON_APB, tegra_clk_kfuse, 0),
GATE("apbif", "clk_m", 107, TEGRA_PERIPH_ON_APB, tegra_clk_apbif, 0),

View file

@ -91,7 +91,6 @@
struct atmel_aes_caps {
bool has_dualbuff;
bool has_cfb64;
bool has_ctr32;
bool has_gcm;
bool has_xts;
bool has_authenc;
@ -1016,8 +1015,9 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
struct scatterlist *src, *dst;
u32 ctr, blocks;
size_t datalen;
u32 ctr;
u16 blocks, start, end;
bool use_dma, fragmented = false;
/* Check for transfer completion. */
@ -1029,27 +1029,17 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
datalen = req->nbytes - ctx->offset;
blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
ctr = be32_to_cpu(ctx->iv[3]);
if (dd->caps.has_ctr32) {
/* Check 32bit counter overflow. */
u32 start = ctr;
u32 end = start + blocks - 1;
if (end < start) {
ctr |= 0xffffffff;
datalen = AES_BLOCK_SIZE * -start;
fragmented = true;
}
} else {
/* Check 16bit counter overflow. */
u16 start = ctr & 0xffff;
u16 end = start + (u16)blocks - 1;
start = ctr & 0xffff;
end = start + blocks - 1;
if (blocks >> 16 || end < start) {
ctr |= 0xffff;
datalen = AES_BLOCK_SIZE * (0x10000 - start);
fragmented = true;
}
}
use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
/* Jump to offset. */
@ -2553,7 +2543,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
{
dd->caps.has_dualbuff = 0;
dd->caps.has_cfb64 = 0;
dd->caps.has_ctr32 = 0;
dd->caps.has_gcm = 0;
dd->caps.has_xts = 0;
dd->caps.has_authenc = 0;
@ -2564,7 +2553,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
case 0x500:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
dd->caps.has_xts = 1;
dd->caps.has_authenc = 1;
@ -2573,7 +2561,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
case 0x200:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
dd->caps.max_burst_size = 4;
break;

View file

@ -590,6 +590,7 @@ const struct ccp_vdata ccpv3_platform = {
.setup = NULL,
.perform = &ccp3_actions,
.offset = 0,
.rsamax = CCP_RSA_MAX_WIDTH,
};
const struct ccp_vdata ccpv3 = {

View file

@ -131,6 +131,7 @@ struct cc_drvdata {
u32 axim_mon_offset;
u32 sig_offset;
u32 ver_offset;
bool pm_on;
};
struct cc_crypto_alg {

View file

@ -23,14 +23,8 @@ const struct dev_pm_ops ccree_pm = {
int cc_pm_suspend(struct device *dev)
{
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
int rc;
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
rc = cc_suspend_req_queue(drvdata);
if (rc) {
dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
return rc;
}
fini_cc_regs(drvdata);
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
cc_clk_off(drvdata);
@ -59,13 +53,6 @@ int cc_pm_resume(struct device *dev)
/* check if tee fips error occurred during power down */
cc_tee_handle_fips_error(drvdata);
rc = cc_resume_req_queue(drvdata);
if (rc) {
dev_err(dev, "cc_resume_req_queue (%x)\n", rc);
return rc;
}
/* must be after the queue resuming as it uses the HW queue*/
cc_init_hash_sram(drvdata);
cc_init_iv_sram(drvdata);
@ -77,12 +64,10 @@ int cc_pm_get(struct device *dev)
int rc = 0;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
if (cc_req_queue_suspended(drvdata))
if (drvdata->pm_on)
rc = pm_runtime_get_sync(dev);
else
pm_runtime_get_noresume(dev);
return rc;
return (rc == 1 ? 0 : rc);
}
int cc_pm_put_suspend(struct device *dev)
@ -90,14 +75,11 @@ int cc_pm_put_suspend(struct device *dev)
int rc = 0;
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
if (!cc_req_queue_suspended(drvdata)) {
if (drvdata->pm_on) {
pm_runtime_mark_last_busy(dev);
rc = pm_runtime_put_autosuspend(dev);
} else {
/* Something wrong happens*/
dev_err(dev, "request to suspend already suspended queue");
rc = -EBUSY;
}
return rc;
}
@ -108,7 +90,7 @@ int cc_pm_init(struct cc_drvdata *drvdata)
/* must be before the enabling to avoid resdundent suspending */
pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(dev);
/* activate the PM module */
/* set us as active - note we won't do PM ops until cc_pm_go()! */
return pm_runtime_set_active(dev);
}
@ -116,9 +98,11 @@ int cc_pm_init(struct cc_drvdata *drvdata)
void cc_pm_go(struct cc_drvdata *drvdata)
{
pm_runtime_enable(drvdata_to_dev(drvdata));
drvdata->pm_on = true;
}
void cc_pm_fini(struct cc_drvdata *drvdata)
{
pm_runtime_disable(drvdata_to_dev(drvdata));
drvdata->pm_on = false;
}

View file

@ -41,7 +41,6 @@ struct cc_req_mgr_handle {
#else
struct tasklet_struct comptask;
#endif
bool is_runtime_suspended;
};
struct cc_bl_item {
@ -403,6 +402,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
spin_lock(&mgr->bl_lock);
list_del(&bli->list);
--mgr->bl_len;
kfree(bli);
}
spin_unlock(&mgr->bl_lock);
@ -660,52 +660,3 @@ static void comp_handler(unsigned long devarg)
cc_proc_backlog(drvdata);
}
/*
* resume the queue configuration - no need to take the lock as this happens
* inside the spin lock protection
*/
#if defined(CONFIG_PM)
int cc_resume_req_queue(struct cc_drvdata *drvdata)
{
struct cc_req_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
spin_lock_bh(&request_mgr_handle->hw_lock);
request_mgr_handle->is_runtime_suspended = false;
spin_unlock_bh(&request_mgr_handle->hw_lock);
return 0;
}
/*
* suspend the queue configuration. Since it is used for the runtime suspend
* only verify that the queue can be suspended.
*/
int cc_suspend_req_queue(struct cc_drvdata *drvdata)
{
struct cc_req_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
/* lock the send_request */
spin_lock_bh(&request_mgr_handle->hw_lock);
if (request_mgr_handle->req_queue_head !=
request_mgr_handle->req_queue_tail) {
spin_unlock_bh(&request_mgr_handle->hw_lock);
return -EBUSY;
}
request_mgr_handle->is_runtime_suspended = true;
spin_unlock_bh(&request_mgr_handle->hw_lock);
return 0;
}
bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
{
struct cc_req_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
return request_mgr_handle->is_runtime_suspended;
}
#endif

View file

@ -40,12 +40,4 @@ void complete_request(struct cc_drvdata *drvdata);
void cc_req_mgr_fini(struct cc_drvdata *drvdata);
#if defined(CONFIG_PM)
int cc_resume_req_queue(struct cc_drvdata *drvdata);
int cc_suspend_req_queue(struct cc_drvdata *drvdata);
bool cc_req_queue_suspended(struct cc_drvdata *drvdata);
#endif
#endif /*__REQUEST_MGR_H__*/

View file

@ -14,7 +14,7 @@
#include <linux/spinlock.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/skcipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/io.h>
#include <linux/delay.h>
@ -28,12 +28,12 @@ static spinlock_t lock;
/* Write a 128 bit field (either a writable key or IV) */
static inline void
_writefield(u32 offset, void *value)
_writefield(u32 offset, const void *value)
{
int i;
for (i = 0; i < 4; i++)
iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
iowrite32(((const u32 *) value)[i], _iobase + offset + (i * 4));
}
/* Read a 128 bit field (either a writable key or IV) */
@ -47,12 +47,12 @@ _readfield(u32 offset, void *value)
}
static int
do_crypt(void *src, void *dst, int len, u32 flags)
do_crypt(const void *src, void *dst, u32 len, u32 flags)
{
u32 status;
u32 counter = AES_OP_TIMEOUT;
iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
iowrite32(virt_to_phys((void *)src), _iobase + AES_SOURCEA_REG);
iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
iowrite32(len, _iobase + AES_LENA_REG);
@ -69,16 +69,14 @@ do_crypt(void *src, void *dst, int len, u32 flags)
return counter ? 0 : 1;
}
static unsigned int
geode_aes_crypt(struct geode_aes_op *op)
static void
geode_aes_crypt(const struct geode_aes_tfm_ctx *tctx, const void *src,
void *dst, u32 len, u8 *iv, int mode, int dir)
{
u32 flags = 0;
unsigned long iflags;
int ret;
if (op->len == 0)
return 0;
/* If the source and destination is the same, then
* we need to turn on the coherent flags, otherwise
* we don't need to worry
@ -86,32 +84,28 @@ geode_aes_crypt(struct geode_aes_op *op)
flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
if (op->dir == AES_DIR_ENCRYPT)
if (dir == AES_DIR_ENCRYPT)
flags |= AES_CTRL_ENCRYPT;
/* Start the critical section */
spin_lock_irqsave(&lock, iflags);
if (op->mode == AES_MODE_CBC) {
if (mode == AES_MODE_CBC) {
flags |= AES_CTRL_CBC;
_writefield(AES_WRITEIV0_REG, op->iv);
_writefield(AES_WRITEIV0_REG, iv);
}
if (!(op->flags & AES_FLAGS_HIDDENKEY)) {
flags |= AES_CTRL_WRKEY;
_writefield(AES_WRITEKEY0_REG, op->key);
}
_writefield(AES_WRITEKEY0_REG, tctx->key);
ret = do_crypt(op->src, op->dst, op->len, flags);
ret = do_crypt(src, dst, len, flags);
BUG_ON(ret);
if (op->mode == AES_MODE_CBC)
_readfield(AES_WRITEIV0_REG, op->iv);
if (mode == AES_MODE_CBC)
_readfield(AES_WRITEIV0_REG, iv);
spin_unlock_irqrestore(&lock, iflags);
return op->len;
}
/* CRYPTO-API Functions */
@ -119,13 +113,13 @@ geode_aes_crypt(struct geode_aes_op *op)
static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
unsigned int len)
{
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
unsigned int ret;
op->keylen = len;
tctx->keylen = len;
if (len == AES_KEYSIZE_128) {
memcpy(op->key, key, len);
memcpy(tctx->key, key, len);
return 0;
}
@ -138,132 +132,93 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
/*
* The requested key size is not supported by HW, do a fallback
*/
op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
tctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
tctx->fallback.cip->base.crt_flags |=
(tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(op->fallback.cip, key, len);
ret = crypto_cipher_setkey(tctx->fallback.cip, key, len);
if (ret) {
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK);
tfm->crt_flags |= (tctx->fallback.cip->base.crt_flags &
CRYPTO_TFM_RES_MASK);
}
return ret;
}
static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
unsigned int ret;
op->keylen = len;
tctx->keylen = len;
if (len == AES_KEYSIZE_128) {
memcpy(op->key, key, len);
memcpy(tctx->key, key, len);
return 0;
}
if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
/* not supported at all */
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
/*
* The requested key size is not supported by HW, do a fallback
*/
crypto_skcipher_clear_flags(op->fallback.blk, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(op->fallback.blk,
tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
ret = crypto_skcipher_setkey(op->fallback.blk, key, len);
if (ret) {
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
tfm->crt_flags |= crypto_skcipher_get_flags(op->fallback.blk) &
CRYPTO_TFM_RES_MASK;
}
crypto_skcipher_clear_flags(tctx->fallback.skcipher,
CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(tctx->fallback.skcipher,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
ret = crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
crypto_skcipher_set_flags(tfm,
crypto_skcipher_get_flags(tctx->fallback.skcipher) &
CRYPTO_TFM_RES_MASK);
return ret;
}
static int fallback_blk_dec(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk);
skcipher_request_set_tfm(req, op->fallback.blk);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
return crypto_skcipher_decrypt(req);
}
static int fallback_blk_enc(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk);
skcipher_request_set_tfm(req, op->fallback.blk);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
return crypto_skcipher_encrypt(req);
}
static void
geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
if (unlikely(op->keylen != AES_KEYSIZE_128)) {
crypto_cipher_encrypt_one(op->fallback.cip, out, in);
if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
crypto_cipher_encrypt_one(tctx->fallback.cip, out, in);
return;
}
op->src = (void *) in;
op->dst = (void *) out;
op->mode = AES_MODE_ECB;
op->flags = 0;
op->len = AES_BLOCK_SIZE;
op->dir = AES_DIR_ENCRYPT;
geode_aes_crypt(op);
geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
AES_MODE_ECB, AES_DIR_ENCRYPT);
}
static void
geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
if (unlikely(op->keylen != AES_KEYSIZE_128)) {
crypto_cipher_decrypt_one(op->fallback.cip, out, in);
if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
crypto_cipher_decrypt_one(tctx->fallback.cip, out, in);
return;
}
op->src = (void *) in;
op->dst = (void *) out;
op->mode = AES_MODE_ECB;
op->flags = 0;
op->len = AES_BLOCK_SIZE;
op->dir = AES_DIR_DECRYPT;
geode_aes_crypt(op);
geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
AES_MODE_ECB, AES_DIR_DECRYPT);
}
static int fallback_init_cip(struct crypto_tfm *tfm)
{
const char *name = crypto_tfm_alg_name(tfm);
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
op->fallback.cip = crypto_alloc_cipher(name, 0,
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
tctx->fallback.cip = crypto_alloc_cipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback.cip)) {
if (IS_ERR(tctx->fallback.cip)) {
printk(KERN_ERR "Error allocating fallback algo %s\n", name);
return PTR_ERR(op->fallback.cip);
return PTR_ERR(tctx->fallback.cip);
}
return 0;
@ -271,10 +226,9 @@ static int fallback_init_cip(struct crypto_tfm *tfm)
static void fallback_exit_cip(struct crypto_tfm *tfm)
{
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(op->fallback.cip);
op->fallback.cip = NULL;
crypto_free_cipher(tctx->fallback.cip);
}
static struct crypto_alg geode_alg = {
@ -287,7 +241,7 @@ static struct crypto_alg geode_alg = {
.cra_init = fallback_init_cip,
.cra_exit = fallback_exit_cip,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct geode_aes_op),
.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
@ -300,222 +254,126 @@ static struct crypto_alg geode_alg = {
}
};
static int
geode_cbc_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
static int geode_init_skcipher(struct crypto_skcipher *tfm)
{
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err, ret;
const char *name = crypto_tfm_alg_name(&tfm->base);
struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
if (nbytes % AES_BLOCK_SIZE)
return -EINVAL;
if (unlikely(op->keylen != AES_KEYSIZE_128))
return fallback_blk_dec(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
op->iv = walk.iv;
while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_CBC;
op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
op->dir = AES_DIR_DECRYPT;
ret = geode_aes_crypt(op);
nbytes -= ret;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static int
geode_cbc_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err, ret;
if (nbytes % AES_BLOCK_SIZE)
return -EINVAL;
if (unlikely(op->keylen != AES_KEYSIZE_128))
return fallback_blk_enc(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
op->iv = walk.iv;
while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_CBC;
op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
op->dir = AES_DIR_ENCRYPT;
ret = geode_aes_crypt(op);
nbytes -= ret;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static int fallback_init_blk(struct crypto_tfm *tfm)
{
const char *name = crypto_tfm_alg_name(tfm);
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
op->fallback.blk = crypto_alloc_skcipher(name, 0,
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback.blk)) {
tctx->fallback.skcipher =
crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ASYNC);
if (IS_ERR(tctx->fallback.skcipher)) {
printk(KERN_ERR "Error allocating fallback algo %s\n", name);
return PTR_ERR(op->fallback.blk);
return PTR_ERR(tctx->fallback.skcipher);
}
crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
crypto_skcipher_reqsize(tctx->fallback.skcipher));
return 0;
}
static void fallback_exit_blk(struct crypto_tfm *tfm)
static void geode_exit_skcipher(struct crypto_skcipher *tfm)
{
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
crypto_free_skcipher(op->fallback.blk);
op->fallback.blk = NULL;
crypto_free_skcipher(tctx->fallback.skcipher);
}
static struct crypto_alg geode_cbc_alg = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-geode",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_KERN_DRIVER_ONLY |
static int geode_skcipher_crypt(struct skcipher_request *req, int mode, int dir)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
struct skcipher_request *subreq = skcipher_request_ctx(req);
*subreq = *req;
skcipher_request_set_tfm(subreq, tctx->fallback.skcipher);
if (dir == AES_DIR_DECRYPT)
return crypto_skcipher_decrypt(subreq);
else
return crypto_skcipher_encrypt(subreq);
}
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) != 0) {
geode_aes_crypt(tctx, walk.src.virt.addr, walk.dst.virt.addr,
round_down(nbytes, AES_BLOCK_SIZE),
walk.iv, mode, dir);
err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
}
return err;
}
static int geode_cbc_encrypt(struct skcipher_request *req)
{
return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_ENCRYPT);
}
static int geode_cbc_decrypt(struct skcipher_request *req)
{
return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_DECRYPT);
}
static int geode_ecb_encrypt(struct skcipher_request *req)
{
return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_ENCRYPT);
}
static int geode_ecb_decrypt(struct skcipher_request *req)
{
return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_DECRYPT);
}
static struct skcipher_alg geode_skcipher_algs[] = {
{
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "cbc-aes-geode",
.base.cra_priority = 400,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_init = fallback_init_blk,
.cra_exit = fallback_exit_blk,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct geode_aes_op),
.cra_alignmask = 15,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = geode_setkey_blk,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
.base.cra_alignmask = 15,
.base.cra_module = THIS_MODULE,
.init = geode_init_skcipher,
.exit = geode_exit_skcipher,
.setkey = geode_setkey_skcipher,
.encrypt = geode_cbc_encrypt,
.decrypt = geode_cbc_decrypt,
.ivsize = AES_BLOCK_SIZE,
}
}
};
static int
geode_ecb_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err, ret;
if (nbytes % AES_BLOCK_SIZE)
return -EINVAL;
if (unlikely(op->keylen != AES_KEYSIZE_128))
return fallback_blk_dec(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_ECB;
op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
op->dir = AES_DIR_DECRYPT;
ret = geode_aes_crypt(op);
nbytes -= ret;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static int
geode_ecb_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err, ret;
if (nbytes % AES_BLOCK_SIZE)
return -EINVAL;
if (unlikely(op->keylen != AES_KEYSIZE_128))
return fallback_blk_enc(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_ECB;
op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
op->dir = AES_DIR_ENCRYPT;
ret = geode_aes_crypt(op);
nbytes -= ret;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static struct crypto_alg geode_ecb_alg = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-geode",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_init = fallback_init_blk,
.cra_exit = fallback_exit_blk,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct geode_aes_op),
.cra_alignmask = 15,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = geode_setkey_blk,
.ivsize = AES_BLOCK_SIZE,
}, {
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "ecb-aes-geode",
.base.cra_priority = 400,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
.base.cra_alignmask = 15,
.base.cra_module = THIS_MODULE,
.init = geode_init_skcipher,
.exit = geode_exit_skcipher,
.setkey = geode_setkey_skcipher,
.encrypt = geode_ecb_encrypt,
.decrypt = geode_ecb_decrypt,
}
}
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
},
};
static void geode_aes_remove(struct pci_dev *dev)
{
crypto_unregister_alg(&geode_alg);
crypto_unregister_alg(&geode_ecb_alg);
crypto_unregister_alg(&geode_cbc_alg);
crypto_unregister_skciphers(geode_skcipher_algs,
ARRAY_SIZE(geode_skcipher_algs));
pci_iounmap(dev, _iobase);
_iobase = NULL;
@ -553,20 +411,14 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (ret)
goto eiomap;
ret = crypto_register_alg(&geode_ecb_alg);
ret = crypto_register_skciphers(geode_skcipher_algs,
ARRAY_SIZE(geode_skcipher_algs));
if (ret)
goto ealg;
ret = crypto_register_alg(&geode_cbc_alg);
if (ret)
goto eecb;
dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
return 0;
eecb:
crypto_unregister_alg(&geode_ecb_alg);
ealg:
crypto_unregister_alg(&geode_alg);

View file

@ -50,21 +50,10 @@
#define AES_OP_TIMEOUT 0x50000
struct geode_aes_op {
void *src;
void *dst;
u32 mode;
u32 dir;
u32 flags;
int len;
struct geode_aes_tfm_ctx {
u8 key[AES_KEYSIZE_128];
u8 *iv;
union {
struct crypto_skcipher *blk;
struct crypto_skcipher *skcipher;
struct crypto_cipher *cip;
} fallback;
u32 keylen;

View file

@ -1616,6 +1616,11 @@ static const struct of_device_id spacc_of_id_table[] = {
MODULE_DEVICE_TABLE(of, spacc_of_id_table);
#endif /* CONFIG_OF */
static void spacc_tasklet_kill(void *data)
{
tasklet_kill(data);
}
static int spacc_probe(struct platform_device *pdev)
{
int i, err, ret;
@ -1659,6 +1664,14 @@ static int spacc_probe(struct platform_device *pdev)
return -ENXIO;
}
tasklet_init(&engine->complete, spacc_spacc_complete,
(unsigned long)engine);
ret = devm_add_action(&pdev->dev, spacc_tasklet_kill,
&engine->complete);
if (ret)
return ret;
if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
engine->name, engine)) {
dev_err(engine->dev, "failed to request IRQ\n");
@ -1716,8 +1729,6 @@ static int spacc_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&engine->completed);
INIT_LIST_HEAD(&engine->in_progress);
engine->in_flight = 0;
tasklet_init(&engine->complete, spacc_spacc_complete,
(unsigned long)engine);
platform_set_drvdata(pdev, engine);

View file

@ -248,7 +248,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
}
ret = drm_dp_update_payload_part1(mst_mgr);
/* It's OK for this to fail */
drm_dp_update_payload_part1(mst_mgr);
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
@ -257,9 +258,6 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
get_payload_table(aconnector, proposed_table);
if (ret)
return false;
return true;
}
@ -310,7 +308,6 @@ bool dm_helpers_dp_mst_send_payload_allocation(
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_mgr *mst_mgr;
struct drm_dp_mst_port *mst_port;
int ret;
aconnector = stream->sink->priv;
@ -324,10 +321,8 @@ bool dm_helpers_dp_mst_send_payload_allocation(
if (!mst_mgr->mst_state)
return false;
ret = drm_dp_update_payload_part2(mst_mgr);
if (ret)
return false;
/* It's OK for this to fail */
drm_dp_update_payload_part2(mst_mgr);
if (!enable)
drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);

View file

@ -79,7 +79,11 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
struct videomode vm;
unsigned long prate;
unsigned int cfg;
int div;
int div, ret;
ret = clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
if (ret)
return;
vm.vfront_porch = adj->crtc_vsync_start - adj->crtc_vdisplay;
vm.vback_porch = adj->crtc_vtotal - adj->crtc_vsync_end;
@ -138,6 +142,8 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO |
ATMEL_HLCDC_GUARDTIME_MASK | ATMEL_HLCDC_MODE_MASK,
cfg);
clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
}
static enum drm_mode_status

View file

@ -2125,6 +2125,7 @@ static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
{
int ret = 0;
int i = 0;
struct drm_dp_mst_branch *mstb = NULL;
mutex_lock(&mgr->lock);
@ -2185,10 +2186,21 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
/* this can fail if the device is gone */
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
mutex_lock(&mgr->payload_lock);
memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
mgr->payload_mask = 0;
set_bit(0, &mgr->payload_mask);
for (i = 0; i < mgr->max_payloads; i++) {
struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
if (vcpi) {
vcpi->vcpi = 0;
vcpi->num_slots = 0;
}
mgr->proposed_vcpis[i] = NULL;
}
mgr->vcpi_mask = 0;
mutex_unlock(&mgr->payload_lock);
}
out_unlock:

View file

@ -52,7 +52,12 @@ EXPORT_SYMBOL(drm_rect_intersect);
static u32 clip_scaled(u32 src, u32 dst, u32 clip)
{
u64 tmp = mul_u32_u32(src, dst - clip);
u64 tmp;
if (dst == 0)
return 0;
tmp = mul_u32_u32(src, dst - clip);
/*
* Round toward 1.0 when clipping so that we don't accidentally

View file

@ -357,8 +357,7 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100);
u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start;
u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1;
if (delay > mode->vtotal)
delay = delay % mode->vtotal;

View file

@ -1215,10 +1215,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
unsigned int i = 0;
struct page *pg;
if (num_pages < alloc_unit)
return 0;
for (i = 0; (i * alloc_unit) < num_pages; i++) {
for (i = 0; i < num_pages / alloc_unit; i++) {
if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
PAGE_SIZE)
return i * alloc_unit;
@ -1252,7 +1249,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
}
return num_pages;
return i * alloc_unit;
}
static void balloon_up(struct work_struct *dummy)
@ -1267,9 +1264,6 @@ static void balloon_up(struct work_struct *dummy)
long avail_pages;
unsigned long floor;
/* The host balloons pages in 2M granularity. */
WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
/*
* We will attempt 2M allocations. However, if we fail to
* allocate 2M chunks, we will go back to 4k allocations.
@ -1279,14 +1273,13 @@ static void balloon_up(struct work_struct *dummy)
avail_pages = si_mem_available();
floor = compute_balloon_floor();
/* Refuse to balloon below the floor, keep the 2M granularity. */
/* Refuse to balloon below the floor. */
if (avail_pages < num_pages || avail_pages - num_pages < floor) {
pr_warn("Balloon request will be partially fulfilled. %s\n",
avail_pages < num_pages ? "Not enough memory." :
"Balloon floor reached.");
num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
num_pages -= num_pages % PAGES_IN_2M;
}
while (!done) {

View file

@ -689,7 +689,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
while (bcnt > 0) {
const size_t gup_num_pages = min_t(size_t,
(bcnt + BIT(page_shift) - 1) >> page_shift,
ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
PAGE_SIZE / sizeof(struct page *));
down_read(&owning_mm->mmap_sem);

View file

@ -507,8 +507,7 @@ int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
ret = ib_post_send(tx_qp, &cur_wr.wr, bad_wr);
if (ret) {
/* Undo the effect of adding the outstanding wr */
gsi->outstanding_pi = (gsi->outstanding_pi - 1) %
gsi->cap.max_send_wr;
gsi->outstanding_pi--;
goto err;
}
spin_unlock_irqrestore(&gsi->lock, flags);

View file

@ -329,6 +329,9 @@ struct cached_dev {
*/
atomic_t has_dirty;
#define BCH_CACHE_READA_ALL 0
#define BCH_CACHE_READA_META_ONLY 1
unsigned int cache_readahead_policy;
struct bch_ratelimit writeback_rate;
struct delayed_work writeback_rate_update;

View file

@ -391,13 +391,20 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
goto skip;
/*
* Flag for bypass if the IO is for read-ahead or background,
* unless the read-ahead request is for metadata
* If the bio is for read-ahead or background IO, bypass it or
* not depends on the following situations,
* - If the IO is for meta data, always cache it and no bypass
* - If the IO is not meta data, check dc->cache_reada_policy,
* BCH_CACHE_READA_ALL: cache it and not bypass
* BCH_CACHE_READA_META_ONLY: not cache it and bypass
* That is, read-ahead request for metadata always get cached
* (eg, for gfs2 or xfs).
*/
if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
!(bio->bi_opf & (REQ_META|REQ_PRIO)))
if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) {
if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
(dc->cache_readahead_policy != BCH_CACHE_READA_ALL))
goto skip;
}
if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
bio_sectors(bio) & (c->sb.block_size - 1)) {

View file

@ -25,6 +25,12 @@ static const char * const bch_cache_modes[] = {
NULL
};
static const char * const bch_reada_cache_policies[] = {
"all",
"meta-only",
NULL
};
/* Default is -1; we skip past it for stop_when_cache_set_failed */
static const char * const bch_stop_on_failure_modes[] = {
"auto",
@ -94,6 +100,7 @@ rw_attribute(congested_write_threshold_us);
rw_attribute(sequential_cutoff);
rw_attribute(data_csum);
rw_attribute(cache_mode);
rw_attribute(readahead_cache_policy);
rw_attribute(stop_when_cache_set_failed);
rw_attribute(writeback_metadata);
rw_attribute(writeback_running);
@ -160,6 +167,11 @@ SHOW(__bch_cached_dev)
bch_cache_modes,
BDEV_CACHE_MODE(&dc->sb));
if (attr == &sysfs_readahead_cache_policy)
return bch_snprint_string_list(buf, PAGE_SIZE,
bch_reada_cache_policies,
dc->cache_readahead_policy);
if (attr == &sysfs_stop_when_cache_set_failed)
return bch_snprint_string_list(buf, PAGE_SIZE,
bch_stop_on_failure_modes,
@ -324,6 +336,15 @@ STORE(__cached_dev)
}
}
if (attr == &sysfs_readahead_cache_policy) {
v = __sysfs_match_string(bch_reada_cache_policies, -1, buf);
if (v < 0)
return v;
if ((unsigned int) v != dc->cache_readahead_policy)
dc->cache_readahead_policy = v;
}
if (attr == &sysfs_stop_when_cache_set_failed) {
v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
if (v < 0)
@ -417,6 +438,7 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_data_csum,
#endif
&sysfs_cache_mode,
&sysfs_readahead_cache_policy,
&sysfs_stop_when_cache_set_failed,
&sysfs_writeback_metadata,
&sysfs_writeback_running,

View file

@ -482,8 +482,14 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
unsigned bs = crypto_skcipher_blocksize(any_tfm(cc));
int log = ilog2(bs);
unsigned bs;
int log;
if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags))
bs = crypto_aead_blocksize(any_tfm_aead(cc));
else
bs = crypto_skcipher_blocksize(any_tfm(cc));
log = ilog2(bs);
/* we need to calculate how far we must shift the sector count
* to get the cipher block count, we use this shift in _gen */

View file

@ -447,7 +447,13 @@ static void writecache_notify_io(unsigned long error, void *context)
complete(&endio->c);
}
static void ssd_commit_flushed(struct dm_writecache *wc)
static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
{
wait_event(wc->bio_in_progress_wait[direction],
!atomic_read(&wc->bio_in_progress[direction]));
}
static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
{
struct dm_io_region region;
struct dm_io_request req;
@ -493,17 +499,20 @@ static void ssd_commit_flushed(struct dm_writecache *wc)
writecache_notify_io(0, &endio);
wait_for_completion_io(&endio.c);
if (wait_for_ios)
writecache_wait_for_ios(wc, WRITE);
writecache_disk_flush(wc, wc->ssd_dev);
memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
}
static void writecache_commit_flushed(struct dm_writecache *wc)
static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
{
if (WC_MODE_PMEM(wc))
wmb();
else
ssd_commit_flushed(wc);
ssd_commit_flushed(wc, wait_for_ios);
}
static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
@ -527,12 +536,6 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
writecache_error(wc, r, "error flushing metadata: %d", r);
}
static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
{
wait_event(wc->bio_in_progress_wait[direction],
!atomic_read(&wc->bio_in_progress[direction]));
}
#define WFE_RETURN_FOLLOWING 1
#define WFE_LOWEST_SEQ 2
@ -730,14 +733,12 @@ static void writecache_flush(struct dm_writecache *wc)
e = e2;
cond_resched();
}
writecache_commit_flushed(wc);
writecache_wait_for_ios(wc, WRITE);
writecache_commit_flushed(wc, true);
wc->seq_count++;
pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
writecache_flush_region(wc, &sb(wc)->seq_count, sizeof sb(wc)->seq_count);
writecache_commit_flushed(wc);
writecache_commit_flushed(wc, false);
wc->overwrote_committed = false;
@ -761,7 +762,7 @@ static void writecache_flush(struct dm_writecache *wc)
}
if (need_flush_after_free)
writecache_commit_flushed(wc);
writecache_commit_flushed(wc, false);
}
static void writecache_flush_work(struct work_struct *work)
@ -814,7 +815,7 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_
}
if (discarded_something)
writecache_commit_flushed(wc);
writecache_commit_flushed(wc, false);
}
static bool writecache_wait_for_writeback(struct dm_writecache *wc)
@ -963,7 +964,7 @@ static void writecache_resume(struct dm_target *ti)
if (need_flush) {
writecache_flush_all_metadata(wc);
writecache_commit_flushed(wc);
writecache_commit_flushed(wc, false);
}
wc_unlock(wc);
@ -1347,7 +1348,7 @@ static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *
wc->writeback_size--;
n_walked++;
if (unlikely(n_walked >= ENDIO_LATENCY)) {
writecache_commit_flushed(wc);
writecache_commit_flushed(wc, false);
wc_unlock(wc);
wc_lock(wc);
n_walked = 0;
@ -1428,7 +1429,7 @@ static int writecache_endio_thread(void *data)
writecache_wait_for_ios(wc, READ);
}
writecache_commit_flushed(wc);
writecache_commit_flushed(wc, false);
wc_unlock(wc);
}
@ -1759,10 +1760,10 @@ static int init_memory(struct dm_writecache *wc)
write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
writecache_flush_all_metadata(wc);
writecache_commit_flushed(wc);
writecache_commit_flushed(wc, false);
pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
writecache_commit_flushed(wc);
writecache_commit_flushed(wc, false);
return 0;
}

View file

@ -132,6 +132,7 @@ struct dmz_metadata {
sector_t zone_bitmap_size;
unsigned int zone_nr_bitmap_blocks;
unsigned int zone_bits_per_mblk;
unsigned int nr_bitmap_blocks;
unsigned int nr_map_blocks;
@ -1165,7 +1166,10 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
/* Init */
zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3;
zmd->zone_nr_bitmap_blocks = zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT;
zmd->zone_nr_bitmap_blocks =
max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT);
zmd->zone_bits_per_mblk = min_t(sector_t, dev->zone_nr_blocks,
DMZ_BLOCK_SIZE_BITS);
/* Allocate zone array */
zmd->zones = kcalloc(dev->nr_zones, sizeof(struct dm_zone), GFP_KERNEL);
@ -1982,7 +1986,7 @@ int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
dmz_release_mblock(zmd, to_mblk);
dmz_release_mblock(zmd, from_mblk);
chunk_block += DMZ_BLOCK_SIZE_BITS;
chunk_block += zmd->zone_bits_per_mblk;
}
to_zone->weight = from_zone->weight;
@ -2043,7 +2047,7 @@ int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
/* Set bits */
bit = chunk_block & DMZ_BLOCK_MASK_BITS;
nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits);
if (count) {
@ -2122,7 +2126,7 @@ int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
/* Clear bits */
bit = chunk_block & DMZ_BLOCK_MASK_BITS;
nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
count = dmz_clear_bits((unsigned long *)mblk->data,
bit, nr_bits);
@ -2182,6 +2186,7 @@ static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
{
struct dmz_mblock *mblk;
unsigned int bit, set_bit, nr_bits;
unsigned int zone_bits = zmd->zone_bits_per_mblk;
unsigned long *bitmap;
int n = 0;
@ -2196,15 +2201,15 @@ static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
/* Get offset */
bitmap = (unsigned long *) mblk->data;
bit = chunk_block & DMZ_BLOCK_MASK_BITS;
nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
nr_bits = min(nr_blocks, zone_bits - bit);
if (set)
set_bit = find_next_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit);
set_bit = find_next_bit(bitmap, zone_bits, bit);
else
set_bit = find_next_zero_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit);
set_bit = find_next_zero_bit(bitmap, zone_bits, bit);
dmz_release_mblock(zmd, mblk);
n += set_bit - bit;
if (set_bit < DMZ_BLOCK_SIZE_BITS)
if (set_bit < zone_bits)
break;
nr_blocks -= nr_bits;
@ -2307,7 +2312,7 @@ static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
/* Count bits in this block */
bitmap = mblk->data;
bit = chunk_block & DMZ_BLOCK_MASK_BITS;
nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
n += dmz_count_bits(bitmap, bit, nr_bits);
dmz_release_mblock(zmd, mblk);

View file

@ -1822,6 +1822,7 @@ static void dm_init_normal_md_queue(struct mapped_device *md)
/*
* Initialize aspects of queue that aren't relevant for blk-mq
*/
md->queue->backing_dev_info->congested_data = md;
md->queue->backing_dev_info->congested_fn = dm_any_congested;
}
@ -1920,7 +1921,12 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->queue)
goto bad;
md->queue->queuedata = md;
md->queue->backing_dev_info->congested_data = md;
/*
* default to bio-based required ->make_request_fn until DM
* table is loaded and md->type established. If request-based
* table is loaded: blk-mq will override accordingly.
*/
blk_queue_make_request(md->queue, dm_make_request);
md->disk = alloc_disk_node(1, md->numa_node_id);
if (!md->disk)
@ -2332,7 +2338,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
dm_init_normal_md_queue(md);
blk_queue_make_request(md->queue, dm_make_request);
break;
case DM_TYPE_NVME_BIO_BASED:
dm_init_normal_md_queue(md);

View file

@ -382,6 +382,33 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
return -ENOSPC;
}
int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
dm_block_t begin, dm_block_t end, dm_block_t *b)
{
int r;
uint32_t count;
do {
r = sm_ll_find_free_block(new_ll, begin, new_ll->nr_blocks, b);
if (r)
break;
/* double check this block wasn't used in the old transaction */
if (*b >= old_ll->nr_blocks)
count = 0;
else {
r = sm_ll_lookup(old_ll, *b, &count);
if (r)
break;
if (count)
begin = *b + 1;
}
} while (count);
return r;
}
static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
int (*mutator)(void *context, uint32_t old, uint32_t *new),
void *context, enum allocation_event *ev)

View file

@ -109,6 +109,8 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result);
int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result);
int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
dm_block_t end, dm_block_t *result);
int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
dm_block_t begin, dm_block_t end, dm_block_t *result);
int sm_ll_insert(struct ll_disk *ll, dm_block_t b, uint32_t ref_count, enum allocation_event *ev);
int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev);
int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev);

View file

@ -167,8 +167,10 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
/* FIXME: we should loop round a couple of times */
r = sm_ll_find_free_block(&smd->old_ll, smd->begin, smd->old_ll.nr_blocks, b);
/*
* Any block we allocate has to be free in both the old and current ll.
*/
r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b);
if (r)
return r;

View file

@ -448,7 +448,10 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
enum allocation_event ev;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
r = sm_ll_find_free_block(&smm->old_ll, smm->begin, smm->old_ll.nr_blocks, b);
/*
* Any block we allocate has to be free in both the old and current ll.
*/
r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b);
if (r)
return r;

View file

@ -424,7 +424,7 @@ static int iguanair_probe(struct usb_interface *intf,
int ret, pipein, pipeout;
struct usb_host_interface *idesc;
idesc = intf->altsetting;
idesc = intf->cur_altsetting;
if (idesc->desc.bNumEndpoints < 2)
return -ENODEV;

View file

@ -1874,23 +1874,28 @@ int rc_register_device(struct rc_dev *dev)
dev->registered = true;
if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
rc = rc_setup_rx_device(dev);
if (rc)
goto out_dev;
}
/* Ensure that the lirc kfifo is setup before we start the thread */
/*
* once the the input device is registered in rc_setup_rx_device,
* userspace can open the input device and rc_open() will be called
* as a result. This results in driver code being allowed to submit
* keycodes with rc_keydown, so lirc must be registered first.
*/
if (dev->allowed_protocols != RC_PROTO_BIT_CEC) {
rc = ir_lirc_register(dev);
if (rc < 0)
goto out_rx;
goto out_dev;
}
if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
rc = rc_setup_rx_device(dev);
if (rc)
goto out_lirc;
}
if (dev->driver_type == RC_DRIVER_IR_RAW) {
rc = ir_raw_event_register(dev);
if (rc < 0)
goto out_lirc;
goto out_rx;
}
dev_dbg(&dev->dev, "Registered rc%u (driver: %s)\n", dev->minor,
@ -1898,11 +1903,11 @@ int rc_register_device(struct rc_dev *dev)
return 0;
out_rx:
rc_free_rx_device(dev);
out_lirc:
if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
ir_lirc_unregister(dev);
out_rx:
rc_free_rx_device(dev);
out_dev:
device_del(&dev->dev);
out_rx_free:

View file

@ -1482,6 +1482,11 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
break;
if (forward == prev)
continue;
if (forward->chain.next || forward->chain.prev) {
uvc_trace(UVC_TRACE_DESCR, "Found reference to "
"entity %d already in chain.\n", forward->id);
return -EINVAL;
}
switch (UVC_ENTITY_TYPE(forward)) {
case UVC_VC_EXTENSION_UNIT:
@ -1563,6 +1568,13 @@ static int uvc_scan_chain_backward(struct uvc_video_chain *chain,
return -1;
}
if (term->chain.next || term->chain.prev) {
uvc_trace(UVC_TRACE_DESCR, "Found reference to "
"entity %d already in chain.\n",
term->id);
return -EINVAL;
}
if (uvc_trace_param & UVC_TRACE_PROBE)
printk(KERN_CONT " %d", term->id);

View file

@ -1171,36 +1171,38 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
u32 aux_space;
int compatible_arg = 1;
long err = 0;
unsigned int ncmd;
/*
* 1. When struct size is different, converts the command.
*/
switch (cmd) {
case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break;
case VIDIOC_S_FMT32: cmd = VIDIOC_S_FMT; break;
case VIDIOC_QUERYBUF32: cmd = VIDIOC_QUERYBUF; break;
case VIDIOC_G_FBUF32: cmd = VIDIOC_G_FBUF; break;
case VIDIOC_S_FBUF32: cmd = VIDIOC_S_FBUF; break;
case VIDIOC_QBUF32: cmd = VIDIOC_QBUF; break;
case VIDIOC_DQBUF32: cmd = VIDIOC_DQBUF; break;
case VIDIOC_ENUMSTD32: cmd = VIDIOC_ENUMSTD; break;
case VIDIOC_ENUMINPUT32: cmd = VIDIOC_ENUMINPUT; break;
case VIDIOC_TRY_FMT32: cmd = VIDIOC_TRY_FMT; break;
case VIDIOC_G_EXT_CTRLS32: cmd = VIDIOC_G_EXT_CTRLS; break;
case VIDIOC_S_EXT_CTRLS32: cmd = VIDIOC_S_EXT_CTRLS; break;
case VIDIOC_TRY_EXT_CTRLS32: cmd = VIDIOC_TRY_EXT_CTRLS; break;
case VIDIOC_DQEVENT32: cmd = VIDIOC_DQEVENT; break;
case VIDIOC_OVERLAY32: cmd = VIDIOC_OVERLAY; break;
case VIDIOC_STREAMON32: cmd = VIDIOC_STREAMON; break;
case VIDIOC_STREAMOFF32: cmd = VIDIOC_STREAMOFF; break;
case VIDIOC_G_INPUT32: cmd = VIDIOC_G_INPUT; break;
case VIDIOC_S_INPUT32: cmd = VIDIOC_S_INPUT; break;
case VIDIOC_G_OUTPUT32: cmd = VIDIOC_G_OUTPUT; break;
case VIDIOC_S_OUTPUT32: cmd = VIDIOC_S_OUTPUT; break;
case VIDIOC_CREATE_BUFS32: cmd = VIDIOC_CREATE_BUFS; break;
case VIDIOC_PREPARE_BUF32: cmd = VIDIOC_PREPARE_BUF; break;
case VIDIOC_G_EDID32: cmd = VIDIOC_G_EDID; break;
case VIDIOC_S_EDID32: cmd = VIDIOC_S_EDID; break;
case VIDIOC_G_FMT32: ncmd = VIDIOC_G_FMT; break;
case VIDIOC_S_FMT32: ncmd = VIDIOC_S_FMT; break;
case VIDIOC_QUERYBUF32: ncmd = VIDIOC_QUERYBUF; break;
case VIDIOC_G_FBUF32: ncmd = VIDIOC_G_FBUF; break;
case VIDIOC_S_FBUF32: ncmd = VIDIOC_S_FBUF; break;
case VIDIOC_QBUF32: ncmd = VIDIOC_QBUF; break;
case VIDIOC_DQBUF32: ncmd = VIDIOC_DQBUF; break;
case VIDIOC_ENUMSTD32: ncmd = VIDIOC_ENUMSTD; break;
case VIDIOC_ENUMINPUT32: ncmd = VIDIOC_ENUMINPUT; break;
case VIDIOC_TRY_FMT32: ncmd = VIDIOC_TRY_FMT; break;
case VIDIOC_G_EXT_CTRLS32: ncmd = VIDIOC_G_EXT_CTRLS; break;
case VIDIOC_S_EXT_CTRLS32: ncmd = VIDIOC_S_EXT_CTRLS; break;
case VIDIOC_TRY_EXT_CTRLS32: ncmd = VIDIOC_TRY_EXT_CTRLS; break;
case VIDIOC_DQEVENT32: ncmd = VIDIOC_DQEVENT; break;
case VIDIOC_OVERLAY32: ncmd = VIDIOC_OVERLAY; break;
case VIDIOC_STREAMON32: ncmd = VIDIOC_STREAMON; break;
case VIDIOC_STREAMOFF32: ncmd = VIDIOC_STREAMOFF; break;
case VIDIOC_G_INPUT32: ncmd = VIDIOC_G_INPUT; break;
case VIDIOC_S_INPUT32: ncmd = VIDIOC_S_INPUT; break;
case VIDIOC_G_OUTPUT32: ncmd = VIDIOC_G_OUTPUT; break;
case VIDIOC_S_OUTPUT32: ncmd = VIDIOC_S_OUTPUT; break;
case VIDIOC_CREATE_BUFS32: ncmd = VIDIOC_CREATE_BUFS; break;
case VIDIOC_PREPARE_BUF32: ncmd = VIDIOC_PREPARE_BUF; break;
case VIDIOC_G_EDID32: ncmd = VIDIOC_G_EDID; break;
case VIDIOC_S_EDID32: ncmd = VIDIOC_S_EDID; break;
default: ncmd = cmd; break;
}
/*
@ -1209,11 +1211,11 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
* argument into it.
*/
switch (cmd) {
case VIDIOC_OVERLAY:
case VIDIOC_STREAMON:
case VIDIOC_STREAMOFF:
case VIDIOC_S_INPUT:
case VIDIOC_S_OUTPUT:
case VIDIOC_OVERLAY32:
case VIDIOC_STREAMON32:
case VIDIOC_STREAMOFF32:
case VIDIOC_S_INPUT32:
case VIDIOC_S_OUTPUT32:
err = alloc_userspace(sizeof(unsigned int), 0, &new_p64);
if (!err && assign_in_user((unsigned int __user *)new_p64,
(compat_uint_t __user *)p32))
@ -1221,23 +1223,23 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
compatible_arg = 0;
break;
case VIDIOC_G_INPUT:
case VIDIOC_G_OUTPUT:
case VIDIOC_G_INPUT32:
case VIDIOC_G_OUTPUT32:
err = alloc_userspace(sizeof(unsigned int), 0, &new_p64);
compatible_arg = 0;
break;
case VIDIOC_G_EDID:
case VIDIOC_S_EDID:
case VIDIOC_G_EDID32:
case VIDIOC_S_EDID32:
err = alloc_userspace(sizeof(struct v4l2_edid), 0, &new_p64);
if (!err)
err = get_v4l2_edid32(new_p64, p32);
compatible_arg = 0;
break;
case VIDIOC_G_FMT:
case VIDIOC_S_FMT:
case VIDIOC_TRY_FMT:
case VIDIOC_G_FMT32:
case VIDIOC_S_FMT32:
case VIDIOC_TRY_FMT32:
err = bufsize_v4l2_format(p32, &aux_space);
if (!err)
err = alloc_userspace(sizeof(struct v4l2_format),
@ -1250,7 +1252,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
compatible_arg = 0;
break;
case VIDIOC_CREATE_BUFS:
case VIDIOC_CREATE_BUFS32:
err = bufsize_v4l2_create(p32, &aux_space);
if (!err)
err = alloc_userspace(sizeof(struct v4l2_create_buffers),
@ -1263,10 +1265,10 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
compatible_arg = 0;
break;
case VIDIOC_PREPARE_BUF:
case VIDIOC_QUERYBUF:
case VIDIOC_QBUF:
case VIDIOC_DQBUF:
case VIDIOC_PREPARE_BUF32:
case VIDIOC_QUERYBUF32:
case VIDIOC_QBUF32:
case VIDIOC_DQBUF32:
err = bufsize_v4l2_buffer(p32, &aux_space);
if (!err)
err = alloc_userspace(sizeof(struct v4l2_buffer),
@ -1279,7 +1281,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
compatible_arg = 0;
break;
case VIDIOC_S_FBUF:
case VIDIOC_S_FBUF32:
err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
&new_p64);
if (!err)
@ -1287,13 +1289,13 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
compatible_arg = 0;
break;
case VIDIOC_G_FBUF:
case VIDIOC_G_FBUF32:
err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
&new_p64);
compatible_arg = 0;
break;
case VIDIOC_ENUMSTD:
case VIDIOC_ENUMSTD32:
err = alloc_userspace(sizeof(struct v4l2_standard), 0,
&new_p64);
if (!err)
@ -1301,16 +1303,16 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
compatible_arg = 0;
break;
case VIDIOC_ENUMINPUT:
case VIDIOC_ENUMINPUT32:
err = alloc_userspace(sizeof(struct v4l2_input), 0, &new_p64);
if (!err)
err = get_v4l2_input32(new_p64, p32);
compatible_arg = 0;
break;
case VIDIOC_G_EXT_CTRLS:
case VIDIOC_S_EXT_CTRLS:
case VIDIOC_TRY_EXT_CTRLS:
case VIDIOC_G_EXT_CTRLS32:
case VIDIOC_S_EXT_CTRLS32:
case VIDIOC_TRY_EXT_CTRLS32:
err = bufsize_v4l2_ext_controls(p32, &aux_space);
if (!err)
err = alloc_userspace(sizeof(struct v4l2_ext_controls),
@ -1322,7 +1324,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
}
compatible_arg = 0;
break;
case VIDIOC_DQEVENT:
case VIDIOC_DQEVENT32:
err = alloc_userspace(sizeof(struct v4l2_event), 0, &new_p64);
compatible_arg = 0;
break;
@ -1340,9 +1342,9 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
* Otherwise, it will pass the newly allocated @new_p64 argument.
*/
if (compatible_arg)
err = native_ioctl(file, cmd, (unsigned long)p32);
err = native_ioctl(file, ncmd, (unsigned long)p32);
else
err = native_ioctl(file, cmd, (unsigned long)new_p64);
err = native_ioctl(file, ncmd, (unsigned long)new_p64);
if (err == -ENOTTY)
return err;
@ -1358,13 +1360,13 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
* the blocks to maximum allowed value.
*/
switch (cmd) {
case VIDIOC_G_EXT_CTRLS:
case VIDIOC_S_EXT_CTRLS:
case VIDIOC_TRY_EXT_CTRLS:
case VIDIOC_G_EXT_CTRLS32:
case VIDIOC_S_EXT_CTRLS32:
case VIDIOC_TRY_EXT_CTRLS32:
if (put_v4l2_ext_controls32(file, new_p64, p32))
err = -EFAULT;
break;
case VIDIOC_S_EDID:
case VIDIOC_S_EDID32:
if (put_v4l2_edid32(new_p64, p32))
err = -EFAULT;
break;
@ -1377,49 +1379,49 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
* the original 32 bits structure.
*/
switch (cmd) {
case VIDIOC_S_INPUT:
case VIDIOC_S_OUTPUT:
case VIDIOC_G_INPUT:
case VIDIOC_G_OUTPUT:
case VIDIOC_S_INPUT32:
case VIDIOC_S_OUTPUT32:
case VIDIOC_G_INPUT32:
case VIDIOC_G_OUTPUT32:
if (assign_in_user((compat_uint_t __user *)p32,
((unsigned int __user *)new_p64)))
err = -EFAULT;
break;
case VIDIOC_G_FBUF:
case VIDIOC_G_FBUF32:
err = put_v4l2_framebuffer32(new_p64, p32);
break;
case VIDIOC_DQEVENT:
case VIDIOC_DQEVENT32:
err = put_v4l2_event32(new_p64, p32);
break;
case VIDIOC_G_EDID:
case VIDIOC_G_EDID32:
err = put_v4l2_edid32(new_p64, p32);
break;
case VIDIOC_G_FMT:
case VIDIOC_S_FMT:
case VIDIOC_TRY_FMT:
case VIDIOC_G_FMT32:
case VIDIOC_S_FMT32:
case VIDIOC_TRY_FMT32:
err = put_v4l2_format32(new_p64, p32);
break;
case VIDIOC_CREATE_BUFS:
case VIDIOC_CREATE_BUFS32:
err = put_v4l2_create32(new_p64, p32);
break;
case VIDIOC_PREPARE_BUF:
case VIDIOC_QUERYBUF:
case VIDIOC_QBUF:
case VIDIOC_DQBUF:
case VIDIOC_PREPARE_BUF32:
case VIDIOC_QUERYBUF32:
case VIDIOC_QBUF32:
case VIDIOC_DQBUF32:
err = put_v4l2_buffer32(new_p64, p32);
break;
case VIDIOC_ENUMSTD:
case VIDIOC_ENUMSTD32:
err = put_v4l2_standard32(new_p64, p32);
break;
case VIDIOC_ENUMINPUT:
case VIDIOC_ENUMINPUT32:
err = put_v4l2_input32(new_p64, p32);
break;
}

View file

@ -352,8 +352,11 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
BUG_ON(dma->sglen);
if (dma->pages) {
for (i = 0; i < dma->nr_pages; i++)
for (i = 0; i < dma->nr_pages; i++) {
if (dma->direction == DMA_FROM_DEVICE)
set_page_dirty_lock(dma->pages[i]);
put_page(dma->pages[i]);
}
kfree(dma->pages);
dma->pages = NULL;
}

View file

@ -128,7 +128,7 @@ static const struct regmap_range axp288_writeable_ranges[] = {
static const struct regmap_range axp288_volatile_ranges[] = {
regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON),
regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL),
regmap_reg_range(AXP288_BC_DET_STAT, AXP288_BC_DET_STAT),
regmap_reg_range(AXP288_BC_DET_STAT, AXP20X_VBUS_IPSOUT_MGMT),
regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL),
regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),

View file

@ -257,7 +257,7 @@ static const struct mfd_cell da9062_devs[] = {
.name = "da9062-watchdog",
.num_resources = ARRAY_SIZE(da9062_wdt_resources),
.resources = da9062_wdt_resources,
.of_compatible = "dlg,da9062-wdt",
.of_compatible = "dlg,da9062-watchdog",
},
{
.name = "da9062-thermal",

View file

@ -725,6 +725,8 @@ static int dln2_probe(struct usb_interface *interface,
const struct usb_device_id *usb_id)
{
struct usb_host_interface *hostif = interface->cur_altsetting;
struct usb_endpoint_descriptor *epin;
struct usb_endpoint_descriptor *epout;
struct device *dev = &interface->dev;
struct dln2_dev *dln2;
int ret;
@ -734,12 +736,19 @@ static int dln2_probe(struct usb_interface *interface,
hostif->desc.bNumEndpoints < 2)
return -ENODEV;
epin = &hostif->endpoint[0].desc;
epout = &hostif->endpoint[1].desc;
if (!usb_endpoint_is_bulk_out(epout))
return -ENODEV;
if (!usb_endpoint_is_bulk_in(epin))
return -ENODEV;
dln2 = kzalloc(sizeof(*dln2), GFP_KERNEL);
if (!dln2)
return -ENOMEM;
dln2->ep_out = hostif->endpoint[0].desc.bEndpointAddress;
dln2->ep_in = hostif->endpoint[1].desc.bEndpointAddress;
dln2->ep_out = epout->bEndpointAddress;
dln2->ep_in = epin->bEndpointAddress;
dln2->usb_dev = usb_get_dev(interface_to_usbdev(interface));
dln2->interface = interface;
usb_set_intfdata(interface, dln2);

View file

@ -32,6 +32,7 @@ static bool rn5t618_volatile_reg(struct device *dev, unsigned int reg)
case RN5T618_WATCHDOGCNT:
case RN5T618_DCIRQ:
case RN5T618_ILIMDATAH ... RN5T618_AIN0DATAL:
case RN5T618_ADCCNT3:
case RN5T618_IR_ADC1 ... RN5T618_IR_ADC3:
case RN5T618_IR_GPR:
case RN5T618_IR_GPF:

View file

@ -1154,17 +1154,22 @@ static void mmc_spi_initsequence(struct mmc_spi_host *host)
* SPI protocol. Another is that when chipselect is released while
* the card returns BUSY status, the clock must issue several cycles
* with chipselect high before the card will stop driving its output.
*
* SPI_CS_HIGH means "asserted" here. In some cases like when using
* GPIOs for chip select, SPI_CS_HIGH is set but this will be logically
* inverted by gpiolib, so if we want to ascertain to drive it high
* we should toggle the default with an XOR as we do here.
*/
host->spi->mode |= SPI_CS_HIGH;
host->spi->mode ^= SPI_CS_HIGH;
if (spi_setup(host->spi) != 0) {
/* Just warn; most cards work without it. */
dev_warn(&host->spi->dev,
"can't change chip-select polarity\n");
host->spi->mode &= ~SPI_CS_HIGH;
host->spi->mode ^= SPI_CS_HIGH;
} else {
mmc_spi_readbytes(host, 18);
host->spi->mode &= ~SPI_CS_HIGH;
host->spi->mode ^= SPI_CS_HIGH;
if (spi_setup(host->spi) != 0) {
/* Wot, we can't get the same setup we had before? */
dev_err(&host->spi->dev,

View file

@ -332,19 +332,22 @@ static int sdhci_at91_probe(struct platform_device *pdev)
priv->mainck = devm_clk_get(&pdev->dev, "baseclk");
if (IS_ERR(priv->mainck)) {
dev_err(&pdev->dev, "failed to get baseclk\n");
return PTR_ERR(priv->mainck);
ret = PTR_ERR(priv->mainck);
goto sdhci_pltfm_free;
}
priv->hclock = devm_clk_get(&pdev->dev, "hclock");
if (IS_ERR(priv->hclock)) {
dev_err(&pdev->dev, "failed to get hclock\n");
return PTR_ERR(priv->hclock);
ret = PTR_ERR(priv->hclock);
goto sdhci_pltfm_free;
}
priv->gck = devm_clk_get(&pdev->dev, "multclk");
if (IS_ERR(priv->gck)) {
dev_err(&pdev->dev, "failed to get multclk\n");
return PTR_ERR(priv->gck);
ret = PTR_ERR(priv->gck);
goto sdhci_pltfm_free;
}
ret = sdhci_at91_set_clks_presets(&pdev->dev);

View file

@ -73,7 +73,7 @@ static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
return 0;
for (pnum = 0; pnum < ubi->peb_count; pnum++) {
if (test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
ret = -EINVAL;
}
@ -1146,7 +1146,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
struct rb_node *tmp_rb;
int ret, i, j, free_peb_count, used_peb_count, vol_count;
int scrub_peb_count, erase_peb_count;
unsigned long *seen_pebs = NULL;
unsigned long *seen_pebs;
fm_raw = ubi->fm_buf;
memset(ubi->fm_buf, 0, ubi->fm_size);
@ -1160,7 +1160,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
if (!dvbuf) {
ret = -ENOMEM;
goto out_kfree;
goto out_free_avbuf;
}
avhdr = ubi_get_vid_hdr(avbuf);
@ -1169,7 +1169,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
seen_pebs = init_seen(ubi);
if (IS_ERR(seen_pebs)) {
ret = PTR_ERR(seen_pebs);
goto out_kfree;
goto out_free_dvbuf;
}
spin_lock(&ubi->volumes_lock);
@ -1337,7 +1337,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
if (ret) {
ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
goto out_kfree;
goto out_free_seen;
}
for (i = 0; i < new_fm->used_blocks; i++) {
@ -1359,7 +1359,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
if (ret) {
ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
new_fm->e[i]->pnum);
goto out_kfree;
goto out_free_seen;
}
}
@ -1369,7 +1369,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
if (ret) {
ubi_err(ubi, "unable to write fastmap to PEB %i!",
new_fm->e[i]->pnum);
goto out_kfree;
goto out_free_seen;
}
}
@ -1379,10 +1379,13 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
ret = self_check_seen(ubi, seen_pebs);
dbg_bld("fastmap written!");
out_kfree:
ubi_free_vid_buf(avbuf);
ubi_free_vid_buf(dvbuf);
out_free_seen:
free_seen(seen_pebs);
out_free_dvbuf:
ubi_free_vid_buf(dvbuf);
out_free_avbuf:
ubi_free_vid_buf(avbuf);
out:
return ret;
}

View file

@ -1399,26 +1399,31 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
bool do_tx_balance = true;
u32 hash_index = 0;
const u8 *hash_start = NULL;
struct ipv6hdr *ip6hdr;
skb_reset_mac_header(skb);
eth_data = eth_hdr(skb);
switch (ntohs(skb->protocol)) {
case ETH_P_IP: {
const struct iphdr *iph = ip_hdr(skb);
const struct iphdr *iph;
if (is_broadcast_ether_addr(eth_data->h_dest) ||
iph->daddr == ip_bcast ||
iph->protocol == IPPROTO_IGMP) {
!pskb_network_may_pull(skb, sizeof(*iph))) {
do_tx_balance = false;
break;
}
iph = ip_hdr(skb);
if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) {
do_tx_balance = false;
break;
}
hash_start = (char *)&(iph->daddr);
hash_size = sizeof(iph->daddr);
}
break;
case ETH_P_IPV6:
}
case ETH_P_IPV6: {
const struct ipv6hdr *ip6hdr;
/* IPv6 doesn't really use broadcast mac address, but leave
* that here just in case.
*/
@ -1435,7 +1440,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
/* Additianally, DAD probes should not be tx-balanced as that
if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) {
do_tx_balance = false;
break;
}
/* Additionally, DAD probes should not be tx-balanced as that
* will lead to false positives for duplicate addresses and
* prevent address configuration from working.
*/
@ -1445,17 +1454,26 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
hash_start = (char *)&(ipv6_hdr(skb)->daddr);
hash_size = sizeof(ipv6_hdr(skb)->daddr);
hash_start = (char *)&ip6hdr->daddr;
hash_size = sizeof(ip6hdr->daddr);
break;
case ETH_P_IPX:
if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
}
case ETH_P_IPX: {
const struct ipxhdr *ipxhdr;
if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) {
do_tx_balance = false;
break;
}
ipxhdr = (struct ipxhdr *)skb_network_header(skb);
if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) {
/* something is wrong with this packet */
do_tx_balance = false;
break;
}
if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) {
if (ipxhdr->ipx_type != IPX_TYPE_NCP) {
/* The only protocol worth balancing in
* this family since it has an "ARP" like
* mechanism
@ -1464,9 +1482,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
eth_data = eth_hdr(skb);
hash_start = (char *)eth_data->h_dest;
hash_size = ETH_ALEN;
break;
}
case ETH_P_ARP:
do_tx_balance = false;
if (bond_info->rlb_enabled)

View file

@ -655,7 +655,7 @@ int b53_configure_vlan(struct dsa_switch *ds)
b53_do_vlan_op(dev, VTA_CMD_CLEAR);
}
b53_enable_vlan(dev, false, dev->vlan_filtering_enabled);
b53_enable_vlan(dev, dev->vlan_enabled, dev->vlan_filtering_enabled);
b53_for_each_port(dev, i)
b53_write16(dev, B53_VLAN_PAGE,

View file

@ -72,7 +72,9 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
/* Force link status for IMP port */
reg = core_readl(priv, offset);
reg |= (MII_SW_OR | LINK_STS | GMII_SPEED_UP_2G);
reg |= (MII_SW_OR | LINK_STS);
if (priv->type == BCM7278_DEVICE_ID)
reg |= GMII_SPEED_UP_2G;
core_writel(priv, reg, offset);
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */

View file

@ -2716,6 +2716,9 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
umac_reset(priv);
/* Disable the UniMAC RX/TX */
umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
/* We may have been suspended and never received a WOL event that
* would turn off MPD detection, take care of that now
*/

View file

@ -5861,7 +5861,7 @@ static void bnxt_setup_msix(struct bnxt *bp)
int tcs, i;
tcs = netdev_get_num_tc(dev);
if (tcs > 1) {
if (tcs) {
int i, off, count;
for (i = 0; i < tcs; i++) {

View file

@ -66,7 +66,11 @@
/* Max length of transmit frame must be a multiple of 8 bytes */
#define MACB_TX_LEN_ALIGN 8
#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
/* Limit maximum TX length as per Cadence TSO errata. This is to avoid a
* false amba_error in TX path from the DMA assuming there is not enough
* space in the SRAM (16KB) even when there is.
*/
#define GEM_MAX_TX_LEN (unsigned int)(0x3FC0)
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
#define MACB_NETIF_LSO NETIF_F_TSO
@ -1654,16 +1658,14 @@ static netdev_features_t macb_features_check(struct sk_buff *skb,
/* Validate LSO compatibility */
/* there is only one buffer */
if (!skb_is_nonlinear(skb))
/* there is only one buffer or protocol is not UDP */
if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
return features;
/* length of header */
hdrlen = skb_transport_offset(skb);
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
hdrlen += tcp_hdrlen(skb);
/* For LSO:
/* For UFO only:
* When software supplies two or more payload buffers all payload buffers
* apart from the last must be a multiple of 8 bytes in size.
*/

View file

@ -2230,7 +2230,8 @@ static int __init dmfe_init_module(void)
case DMFE_1M_HPNA:
dmfe_media_mode = mode;
break;
default:dmfe_media_mode = DMFE_AUTO;
default:
dmfe_media_mode = DMFE_AUTO;
break;
}

View file

@ -385,6 +385,8 @@ struct mvneta_pcpu_stats {
struct u64_stats_sync syncp;
u64 rx_packets;
u64 rx_bytes;
u64 rx_dropped;
u64 rx_errors;
u64 tx_packets;
u64 tx_bytes;
};
@ -701,6 +703,8 @@ mvneta_get_stats64(struct net_device *dev,
struct mvneta_pcpu_stats *cpu_stats;
u64 rx_packets;
u64 rx_bytes;
u64 rx_dropped;
u64 rx_errors;
u64 tx_packets;
u64 tx_bytes;
@ -709,19 +713,20 @@ mvneta_get_stats64(struct net_device *dev,
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
rx_packets = cpu_stats->rx_packets;
rx_bytes = cpu_stats->rx_bytes;
rx_dropped = cpu_stats->rx_dropped;
rx_errors = cpu_stats->rx_errors;
tx_packets = cpu_stats->tx_packets;
tx_bytes = cpu_stats->tx_bytes;
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
stats->rx_dropped += rx_dropped;
stats->rx_errors += rx_errors;
stats->tx_packets += tx_packets;
stats->tx_bytes += tx_bytes;
}
stats->rx_errors = dev->stats.rx_errors;
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;
}
@ -1698,8 +1703,14 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
static void mvneta_rx_error(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc)
{
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
u32 status = rx_desc->status;
/* update per-cpu counter */
u64_stats_update_begin(&stats->syncp);
stats->rx_errors++;
u64_stats_update_end(&stats->syncp);
switch (status & MVNETA_RXD_ERR_CODE_MASK) {
case MVNETA_RXD_ERR_CRC:
netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
@ -1960,7 +1971,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* Check errors only for FIRST descriptor */
if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
mvneta_rx_error(pp, rx_desc);
dev->stats.rx_errors++;
/* leave the descriptor untouched */
continue;
}
@ -1971,11 +1981,17 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
skb_size = max(rx_copybreak, rx_header_size);
rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
if (unlikely(!rxq->skb)) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
netdev_err(dev,
"Can't allocate skb on queue %d\n",
rxq->id);
dev->stats.rx_dropped++;
rxq->skb_alloc_err++;
u64_stats_update_begin(&stats->syncp);
stats->rx_dropped++;
u64_stats_update_end(&stats->syncp);
continue;
}
copy_size = min(skb_size, rx_bytes);
@ -2135,7 +2151,6 @@ static int mvneta_rx_hwbm(struct napi_struct *napi,
mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
rx_desc->buf_phys_addr);
err_drop_frame:
dev->stats.rx_errors++;
mvneta_rx_error(pp, rx_desc);
/* leave the descriptor untouched */
continue;

View file

@ -848,6 +848,7 @@ void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
mutex_lock(&fpga_xfrm->lock);
if (!--fpga_xfrm->num_rules) {
mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
kfree(fpga_xfrm->sa_ctx);
fpga_xfrm->sa_ctx = NULL;
}
mutex_unlock(&fpga_xfrm->lock);
@ -1472,7 +1473,7 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
return 0;
if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
return -EOPNOTSUPP;
}

View file

@ -4513,6 +4513,7 @@ int stmmac_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
u32 chan;
if (!ndev || !netif_running(ndev))
return 0;
@ -4527,6 +4528,9 @@ int stmmac_suspend(struct device *dev)
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
del_timer_sync(&priv->tx_queue[chan].txtimer);
/* Stop TX/RX DMA */
stmmac_stop_all_dma(priv);

View file

@ -772,12 +772,12 @@ static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
int i;
gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
GFP_KERNEL);
GFP_KERNEL | __GFP_NOWARN);
if (gtp->addr_hash == NULL)
return -ENOMEM;
gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
GFP_KERNEL);
GFP_KERNEL | __GFP_NOWARN);
if (gtp->tid_hash == NULL)
goto err1;

View file

@ -441,6 +441,7 @@ brcmf_usbdev_qinit(struct list_head *q, int qsize)
usb_free_urb(req->urb);
list_del(q->next);
}
kfree(reqs);
return NULL;
}

View file

@ -286,7 +286,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
int regulatory_type;
/* Checking for required sections */
if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
if (mvm->trans->cfg->nvm_type == IWL_NVM) {
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");

View file

@ -3045,6 +3045,10 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
igtk_cmd.sta_id = cpu_to_le32(sta_id);
if (remove_key) {
/* This is a valid situation for IGTK */
if (sta_id == IWL_MVM_INVALID_STA)
return 0;
igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
} else {
struct ieee80211_key_seq seq;
@ -3352,9 +3356,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
keyconf->keyidx, sta_id);
if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {

Some files were not shown because too many files have changed in this diff Show more