This is the 4.19.68 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl1iS0cACgkQONu9yGCS aT7zbhAAyPU6KNVLa1Pj/xQf8puJ3v+FTws0X9Ii9zyWCNNuTXSi8mf4arP8oMjH NsYvhCGBHIQO0l3kFJRnOMLp0pPZPPUgHLvFWQljKWRABUOzMLjCWolnjegIPo8E cUwYkwx5T5oZtYH7ScxfQLiQUJB28L65+gi+/LBqANcEYL6WaSa2+UIBeVUzbMTt tQw6TAmE6f/kGAbmiQFeIdHj6Q9MrQyGBwTdhVSe+OENlZnZq8pxbwy3GXwEBuOP rtmUfZrSxdUmWBa7+/oW14TBe/c6j2LT0tVoyZdUFGZNbOUNv4vEImXghd28YSuv fppeSkom5di+RDH+B+LCNm+rV5vbfQqGTMuwBT6do2EDuQQ5KqS5kR8tULI4GKVl pNejWeK2qcNNloC7imH+4rHIy9AFewKz1ixbGolSaPXyCYfYEBx1xfpw4npng2+X aWJuk7/DnEEdzeKu9msdycQpf0aT5vkJqquBZQTzd5HAMlgqAF/sjvKjIsaUPNu1 wDc+tyyAF0lObR7aswuhvttZ/8yczMW6pOJiM/XAfFn/a+Y7V2FRedGcIsxyeKOw YMNoW6VskIunShpbKhxVjPViI27NM0o8vmwCKmRQ6FRpxOX6vViNj+uQOjGQxfez N5g9jqxLsq9YGVQfoseS2Taao8JkBrTOW0wiJtE3/nwt/FaeHpU= =M37u -----END PGP SIGNATURE----- Merge 4.19.68 into android-4.19-q Changes in 4.19.68 sh: kernel: hw_breakpoint: Fix missing break in switch statement seq_file: fix problem when seeking mid-record mm/hmm: fix bad subpage pointer in try_to_unmap_one mm: mempolicy: make the behavior consistent when MPOL_MF_MOVE* and MPOL_MF_STRICT were specified mm: mempolicy: handle vma with unmovable pages mapped correctly in mbind mm/memcontrol.c: fix use after free in mem_cgroup_iter() mm/usercopy: use memory range to be accessed for wraparound check Revert "pwm: Set class for exported channels in sysfs" cpufreq: schedutil: Don't skip freq update when limits change xtensa: add missing isync to the cpu_reset TLB code ALSA: hda/realtek - Add quirk for HP Envy x360 ALSA: usb-audio: Fix a stack buffer overflow bug in check_input_term ALSA: usb-audio: Fix an OOB bug in parse_audio_mixer_unit ALSA: hda - Apply workaround for another AMD chip 1022:1487 ALSA: hda - Fix a memory leak bug ALSA: hda - Add a generic reboot_notify ALSA: hda - Let all conexant codec enter D3 when rebooting HID: holtek: test for sanity of intfdata HID: hiddev: avoid opening a disconnected device HID: hiddev: do cleanup in failure of opening a device Input: kbtab - sanity check for endpoint type Input: iforce - add sanity checks net: usb: pegasus: fix improper read if get_registers() fail netfilter: ebtables: also count base chain policies riscv: Make __fstate_clean() work correctly. clk: at91: generated: Truncate divisor to GENERATED_MAX_DIV + 1 clk: sprd: Select REGMAP_MMIO to avoid compile errors clk: renesas: cpg-mssr: Fix reset control race condition xen/pciback: remove set but not used variable 'old_state' irqchip/gic-v3-its: Free unused vpt_page when alloc vpe table fail irqchip/irq-imx-gpcv2: Forward irq type to parent perf header: Fix divide by zero error if f_header.attr_size==0 perf header: Fix use of unitialized value warning libata: zpodd: Fix small read overflow in zpodd_get_mech_type() drm/bridge: lvds-encoder: Fix build error while CONFIG_DRM_KMS_HELPER=m Btrfs: fix deadlock between fiemap and transaction commits scsi: hpsa: correct scsi command status issue after reset scsi: qla2xxx: Fix possible fcport null-pointer dereferences drm/amdgpu: fix a potential information leaking bug ata: libahci: do not complain in case of deferred probe kbuild: modpost: handle KBUILD_EXTRA_SYMBOLS only for external modules kbuild: Check for unknown options with cc-option usage in Kconfig and clang arm64/efi: fix variable 'si' set but not used arm64: unwind: Prohibit probing on return_address() arm64/mm: fix variable 'pud' set but not used IB/core: Add mitigation for Spectre V1 IB/mlx5: Fix MR registration flow to use UMR properly IB/mad: Fix use-after-free in ib mad completion handling drm: msm: Fix add_gpu_components drm/exynos: fix missing decrement of retry counter Revert "kmemleak: allow to coexist with fault injection" ocfs2: remove set but not used variable 'last_hash' asm-generic: fix -Wtype-limits compiler warnings arm64: KVM: regmap: Fix unexpected switch fall-through KVM: arm/arm64: Sync ICH_VMCR_EL2 back when about to block staging: comedi: dt3000: Fix signed integer overflow 'divider * base' staging: comedi: dt3000: Fix rounding up of timer divisor iio: adc: max9611: Fix temperature reading in probe USB: core: Fix races in character device registration and deregistraion usb: gadget: udc: renesas_usb3: Fix sysfs interface of "role" usb: cdc-acm: make sure a refcount is taken early enough USB: CDC: fix sanity checks in CDC union parser USB: serial: option: add D-Link DWM-222 device ID USB: serial: option: Add support for ZTE MF871A USB: serial: option: add the BroadMobi BM818 card USB: serial: option: Add Motorola modem UARTs drm/i915/cfl: Add a new CFL PCI ID. dm: disable DISCARD if the underlying storage no longer supports it arm64: ftrace: Ensure module ftrace trampoline is coherent with I-side netfilter: conntrack: Use consistent ct id hash calculation Input: psmouse - fix build error of multiple definition iommu/amd: Move iommu_init_pci() to .init section bnx2x: Fix VF's VLAN reconfiguration in reload. bonding: Add vlan tx offload to hw_enc_features net: dsa: Check existence of .port_mdb_add callback before calling it net/mlx4_en: fix a memory leak bug net/packet: fix race in tpacket_snd() sctp: fix memleak in sctp_send_reset_streams sctp: fix the transport error_count check team: Add vlan tx offload to hw_enc_features tipc: initialise addr_trail_end when setting node addresses xen/netback: Reset nr_frags before freeing skb net/mlx5e: Only support tx/rx pause setting for port owner net/mlx5e: Use flow keys dissector to parse packets for ARFS mmc: sdhci-of-arasan: Do now show error message in case of deffered probe Linux 4.19.68 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ib17849ac5ecc11f8bd998c994891ee12d6528da3
This commit is contained in:
commit
f3e9c9b0c0
90 changed files with 550 additions and 315 deletions
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 67
|
||||
SUBLEVEL = 68
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
|
|
@ -94,7 +94,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
|
|||
((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
|
||||
|
||||
#define alloc_screen_info(x...) &screen_info
|
||||
#define free_screen_info(x...)
|
||||
|
||||
static inline void free_screen_info(efi_system_table_t *sys_table_arg,
|
||||
struct screen_info *si)
|
||||
{
|
||||
}
|
||||
|
||||
/* redeclare as 'hidden' so the compiler will generate relative references */
|
||||
extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
|
||||
|
|
|
@ -419,8 +419,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|||
PMD_TYPE_SECT)
|
||||
|
||||
#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
|
||||
#define pud_sect(pud) (0)
|
||||
#define pud_table(pud) (1)
|
||||
static inline bool pud_sect(pud_t pud) { return false; }
|
||||
static inline bool pud_table(pud_t pud) { return true; }
|
||||
#else
|
||||
#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
|
||||
PUD_TYPE_SECT)
|
||||
|
|
|
@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
|||
|
||||
if (offset < -SZ_128M || offset >= SZ_128M) {
|
||||
#ifdef CONFIG_ARM64_MODULE_PLTS
|
||||
struct plt_entry trampoline;
|
||||
struct plt_entry trampoline, *dst;
|
||||
struct module *mod;
|
||||
|
||||
/*
|
||||
|
@ -104,24 +104,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
|||
* is added in the future, but for now, the pr_err() below
|
||||
* deals with a theoretical issue only.
|
||||
*/
|
||||
dst = mod->arch.ftrace_trampoline;
|
||||
trampoline = get_plt_entry(addr);
|
||||
if (!plt_entries_equal(mod->arch.ftrace_trampoline,
|
||||
&trampoline)) {
|
||||
if (!plt_entries_equal(mod->arch.ftrace_trampoline,
|
||||
&(struct plt_entry){})) {
|
||||
if (!plt_entries_equal(dst, &trampoline)) {
|
||||
if (!plt_entries_equal(dst, &(struct plt_entry){})) {
|
||||
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* point the trampoline to our ftrace entry point */
|
||||
module_disable_ro(mod);
|
||||
*mod->arch.ftrace_trampoline = trampoline;
|
||||
*dst = trampoline;
|
||||
module_enable_ro(mod, true);
|
||||
|
||||
/* update trampoline before patching in the branch */
|
||||
smp_wmb();
|
||||
/*
|
||||
* Ensure updated trampoline is visible to instruction
|
||||
* fetch before we patch in the branch.
|
||||
*/
|
||||
__flush_icache_range((unsigned long)&dst[0],
|
||||
(unsigned long)&dst[1]);
|
||||
}
|
||||
addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
|
||||
addr = (unsigned long)dst;
|
||||
#else /* CONFIG_ARM64_MODULE_PLTS */
|
||||
return -EINVAL;
|
||||
#endif /* CONFIG_ARM64_MODULE_PLTS */
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#include <linux/export.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
#include <asm/stack_pointer.h>
|
||||
#include <asm/stacktrace.h>
|
||||
|
@ -32,6 +33,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
|
|||
return 0;
|
||||
}
|
||||
}
|
||||
NOKPROBE_SYMBOL(save_return_addr);
|
||||
|
||||
void *return_address(unsigned int level)
|
||||
{
|
||||
|
@ -55,3 +57,4 @@ void *return_address(unsigned int level)
|
|||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(return_address);
|
||||
NOKPROBE_SYMBOL(return_address);
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/debug.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
|
@ -85,6 +86,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
|
|||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(unwind_frame);
|
||||
|
||||
void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
|
||||
int (*fn)(struct stackframe *, void *), void *data)
|
||||
|
@ -99,6 +101,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
|
|||
break;
|
||||
}
|
||||
}
|
||||
NOKPROBE_SYMBOL(walk_stackframe);
|
||||
|
||||
#ifdef CONFIG_STACKTRACE
|
||||
struct stack_trace_data {
|
||||
|
|
|
@ -189,13 +189,18 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
|
|||
switch (spsr_idx) {
|
||||
case KVM_SPSR_SVC:
|
||||
write_sysreg_el1(v, spsr);
|
||||
break;
|
||||
case KVM_SPSR_ABT:
|
||||
write_sysreg(v, spsr_abt);
|
||||
break;
|
||||
case KVM_SPSR_UND:
|
||||
write_sysreg(v, spsr_und);
|
||||
break;
|
||||
case KVM_SPSR_IRQ:
|
||||
write_sysreg(v, spsr_irq);
|
||||
break;
|
||||
case KVM_SPSR_FIQ:
|
||||
write_sysreg(v, spsr_fiq);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ extern void __fstate_restore(struct task_struct *restore_from);
|
|||
|
||||
static inline void __fstate_clean(struct pt_regs *regs)
|
||||
{
|
||||
regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
|
||||
regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
|
||||
}
|
||||
|
||||
static inline void fstate_save(struct task_struct *task,
|
||||
|
|
|
@ -160,6 +160,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
|
|||
switch (sh_type) {
|
||||
case SH_BREAKPOINT_READ:
|
||||
*gen_type = HW_BREAKPOINT_R;
|
||||
break;
|
||||
case SH_BREAKPOINT_WRITE:
|
||||
*gen_type = HW_BREAKPOINT_W;
|
||||
break;
|
||||
|
|
|
@ -515,6 +515,7 @@ void cpu_reset(void)
|
|||
"add %2, %2, %7\n\t"
|
||||
"addi %0, %0, -1\n\t"
|
||||
"bnez %0, 1b\n\t"
|
||||
"isync\n\t"
|
||||
/* Jump to identity mapping */
|
||||
"jx %3\n"
|
||||
"2:\n\t"
|
||||
|
|
|
@ -313,6 +313,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
|
|||
hpriv->phys[port] = NULL;
|
||||
rc = 0;
|
||||
break;
|
||||
case -EPROBE_DEFER:
|
||||
/* Do not complain yet */
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_err(dev,
|
||||
|
|
|
@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
|
|||
unsigned int ret;
|
||||
struct rm_feature_desc *desc;
|
||||
struct ata_taskfile tf;
|
||||
static const char cdb[] = { GPCMD_GET_CONFIGURATION,
|
||||
static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION,
|
||||
2, /* only 1 feature descriptor requested */
|
||||
0, 3, /* 3, removable medium feature */
|
||||
0, 0, 0,/* reserved */
|
||||
|
|
|
@ -153,6 +153,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
|
|||
continue;
|
||||
|
||||
div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
|
||||
if (div > GENERATED_MAX_DIV + 1)
|
||||
div = GENERATED_MAX_DIV + 1;
|
||||
|
||||
clk_generated_best_diff(req, parent, parent_rate, div,
|
||||
&best_diff, &best_rate);
|
||||
|
|
|
@ -535,17 +535,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
|
|||
unsigned int reg = id / 32;
|
||||
unsigned int bit = id % 32;
|
||||
u32 bitmask = BIT(bit);
|
||||
unsigned long flags;
|
||||
u32 value;
|
||||
|
||||
dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
|
||||
|
||||
/* Reset module */
|
||||
spin_lock_irqsave(&priv->rmw_lock, flags);
|
||||
value = readl(priv->base + SRCR(reg));
|
||||
value |= bitmask;
|
||||
writel(value, priv->base + SRCR(reg));
|
||||
spin_unlock_irqrestore(&priv->rmw_lock, flags);
|
||||
writel(bitmask, priv->base + SRCR(reg));
|
||||
|
||||
/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
|
||||
udelay(35);
|
||||
|
@ -562,16 +556,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
|
|||
unsigned int reg = id / 32;
|
||||
unsigned int bit = id % 32;
|
||||
u32 bitmask = BIT(bit);
|
||||
unsigned long flags;
|
||||
u32 value;
|
||||
|
||||
dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
|
||||
|
||||
spin_lock_irqsave(&priv->rmw_lock, flags);
|
||||
value = readl(priv->base + SRCR(reg));
|
||||
value |= bitmask;
|
||||
writel(value, priv->base + SRCR(reg));
|
||||
spin_unlock_irqrestore(&priv->rmw_lock, flags);
|
||||
writel(bitmask, priv->base + SRCR(reg));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ config SPRD_COMMON_CLK
|
|||
tristate "Clock support for Spreadtrum SoCs"
|
||||
depends on ARCH_SPRD || COMPILE_TEST
|
||||
default ARCH_SPRD
|
||||
select REGMAP_MMIO
|
||||
|
||||
if SPRD_COMMON_CLK
|
||||
|
||||
|
|
|
@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
|
|||
thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
|
||||
bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
|
||||
|
||||
data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
|
||||
data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ config DRM_DUMB_VGA_DAC
|
|||
config DRM_LVDS_ENCODER
|
||||
tristate "Transparent parallel to LVDS encoder support"
|
||||
depends on OF
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_PANEL_BRIDGE
|
||||
help
|
||||
Support for transparent parallel to LVDS encoders that don't require
|
||||
|
|
|
@ -108,12 +108,12 @@ static inline int scaler_reset(struct scaler_context *scaler)
|
|||
scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
|
||||
do {
|
||||
cpu_relax();
|
||||
} while (retry > 1 &&
|
||||
} while (--retry > 1 &&
|
||||
scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
|
||||
do {
|
||||
cpu_relax();
|
||||
scaler_write(1, SCALER_INT_EN);
|
||||
} while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
|
||||
} while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1);
|
||||
|
||||
return retry ? 0 : -EIO;
|
||||
}
|
||||
|
|
|
@ -1284,7 +1284,8 @@ static int add_gpu_components(struct device *dev,
|
|||
if (!np)
|
||||
return 0;
|
||||
|
||||
drm_of_component_match_add(dev, matchptr, compare_of, np);
|
||||
if (of_device_is_available(np))
|
||||
drm_of_component_match_add(dev, matchptr, compare_of, np);
|
||||
|
||||
of_node_put(np);
|
||||
|
||||
|
|
|
@ -126,9 +126,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
|
|||
|
||||
/* Locate the boot interface, to receive the LED change events */
|
||||
struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
|
||||
struct hid_device *boot_hid;
|
||||
struct hid_input *boot_hid_input;
|
||||
|
||||
struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
|
||||
struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
|
||||
if (unlikely(boot_interface == NULL))
|
||||
return -ENODEV;
|
||||
|
||||
boot_hid = usb_get_intfdata(boot_interface);
|
||||
boot_hid_input = list_first_entry(&boot_hid->inputs,
|
||||
struct hid_input, list);
|
||||
|
||||
return boot_hid_input->input->event(boot_hid_input->input, type, code,
|
||||
|
|
|
@ -297,6 +297,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
|
|||
spin_unlock_irq(&list->hiddev->list_lock);
|
||||
|
||||
mutex_lock(&hiddev->existancelock);
|
||||
/*
|
||||
* recheck exist with existance lock held to
|
||||
* avoid opening a disconnected device
|
||||
*/
|
||||
if (!list->hiddev->exist) {
|
||||
res = -ENODEV;
|
||||
goto bail_unlock;
|
||||
}
|
||||
if (!list->hiddev->open++)
|
||||
if (list->hiddev->exist) {
|
||||
struct hid_device *hid = hiddev->hid;
|
||||
|
@ -313,6 +321,10 @@ static int hiddev_open(struct inode *inode, struct file *file)
|
|||
hid_hw_power(hid, PM_HINT_NORMAL);
|
||||
bail_unlock:
|
||||
mutex_unlock(&hiddev->existancelock);
|
||||
|
||||
spin_lock_irq(&list->hiddev->list_lock);
|
||||
list_del(&list->node);
|
||||
spin_unlock_irq(&list->hiddev->list_lock);
|
||||
bail:
|
||||
file->private_data = NULL;
|
||||
vfree(list);
|
||||
|
|
|
@ -483,7 +483,7 @@ static int max9611_init(struct max9611_dev *max9611)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
regval = ret & MAX9611_TEMP_MASK;
|
||||
regval &= MAX9611_TEMP_MASK;
|
||||
|
||||
if ((regval > MAX9611_TEMP_MAX_POS &&
|
||||
regval < MAX9611_TEMP_MIN_NEG) ||
|
||||
|
|
|
@ -3182,18 +3182,18 @@ static int ib_mad_port_open(struct ib_device *device,
|
|||
if (has_smi)
|
||||
cq_size *= 2;
|
||||
|
||||
port_priv->pd = ib_alloc_pd(device, 0);
|
||||
if (IS_ERR(port_priv->pd)) {
|
||||
dev_err(&device->dev, "Couldn't create ib_mad PD\n");
|
||||
ret = PTR_ERR(port_priv->pd);
|
||||
goto error3;
|
||||
}
|
||||
|
||||
port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
|
||||
IB_POLL_WORKQUEUE);
|
||||
if (IS_ERR(port_priv->cq)) {
|
||||
dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
|
||||
ret = PTR_ERR(port_priv->cq);
|
||||
goto error3;
|
||||
}
|
||||
|
||||
port_priv->pd = ib_alloc_pd(device, 0);
|
||||
if (IS_ERR(port_priv->pd)) {
|
||||
dev_err(&device->dev, "Couldn't create ib_mad PD\n");
|
||||
ret = PTR_ERR(port_priv->pd);
|
||||
goto error4;
|
||||
}
|
||||
|
||||
|
@ -3236,11 +3236,11 @@ static int ib_mad_port_open(struct ib_device *device,
|
|||
error7:
|
||||
destroy_mad_qp(&port_priv->qp_info[0]);
|
||||
error6:
|
||||
ib_dealloc_pd(port_priv->pd);
|
||||
error4:
|
||||
ib_free_cq(port_priv->cq);
|
||||
cleanup_recv_queue(&port_priv->qp_info[1]);
|
||||
cleanup_recv_queue(&port_priv->qp_info[0]);
|
||||
error4:
|
||||
ib_dealloc_pd(port_priv->pd);
|
||||
error3:
|
||||
kfree(port_priv);
|
||||
|
||||
|
@ -3270,8 +3270,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
|
|||
destroy_workqueue(port_priv->wq);
|
||||
destroy_mad_qp(&port_priv->qp_info[1]);
|
||||
destroy_mad_qp(&port_priv->qp_info[0]);
|
||||
ib_dealloc_pd(port_priv->pd);
|
||||
ib_free_cq(port_priv->cq);
|
||||
ib_dealloc_pd(port_priv->pd);
|
||||
cleanup_recv_queue(&port_priv->qp_info[1]);
|
||||
cleanup_recv_queue(&port_priv->qp_info[0]);
|
||||
/* XXX: Handle deallocation of MAD registration tables */
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/nospec.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
|
@ -868,11 +869,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
|
|||
|
||||
if (get_user(id, arg))
|
||||
return -EFAULT;
|
||||
if (id >= IB_UMAD_MAX_AGENTS)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&file->port->file_mutex);
|
||||
mutex_lock(&file->mutex);
|
||||
|
||||
if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
|
||||
id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
|
||||
if (!__get_agent(file, id)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
|||
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
||||
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
|
||||
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
||||
static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
|
||||
}
|
||||
|
||||
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
|
||||
}
|
||||
|
||||
static bool use_umr(struct mlx5_ib_dev *dev, int order)
|
||||
{
|
||||
return order <= mr_cache_max_order(dev) &&
|
||||
umr_can_modify_entity_size(dev);
|
||||
}
|
||||
|
||||
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
{
|
||||
int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
|
||||
|
@ -1305,7 +1295,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||
struct mlx5_ib_mr *mr = NULL;
|
||||
bool populate_mtts = false;
|
||||
bool use_umr;
|
||||
struct ib_umem *umem;
|
||||
int page_shift;
|
||||
int npages;
|
||||
|
@ -1338,29 +1328,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
|
||||
if (use_umr(dev, order)) {
|
||||
use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
|
||||
(!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
|
||||
!MLX5_CAP_GEN(dev->mdev, atomic));
|
||||
|
||||
if (order <= mr_cache_max_order(dev) && use_umr) {
|
||||
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
|
||||
page_shift, order, access_flags);
|
||||
if (PTR_ERR(mr) == -EAGAIN) {
|
||||
mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
|
||||
mr = NULL;
|
||||
}
|
||||
populate_mtts = false;
|
||||
} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
|
||||
if (access_flags & IB_ACCESS_ON_DEMAND) {
|
||||
err = -EINVAL;
|
||||
pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
|
||||
goto error;
|
||||
}
|
||||
populate_mtts = true;
|
||||
use_umr = false;
|
||||
}
|
||||
|
||||
if (!mr) {
|
||||
if (!umr_can_modify_entity_size(dev))
|
||||
populate_mtts = true;
|
||||
mutex_lock(&dev->slow_path_mutex);
|
||||
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
|
||||
page_shift, access_flags, populate_mtts);
|
||||
page_shift, access_flags, !use_umr);
|
||||
mutex_unlock(&dev->slow_path_mutex);
|
||||
}
|
||||
|
||||
|
@ -1378,7 +1369,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
update_odp_mr(mr);
|
||||
#endif
|
||||
|
||||
if (!populate_mtts) {
|
||||
if (use_umr) {
|
||||
int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
|
||||
|
||||
if (access_flags & IB_ACCESS_ON_DEMAND)
|
||||
|
|
|
@ -141,7 +141,12 @@ static int iforce_usb_probe(struct usb_interface *intf,
|
|||
return -ENODEV;
|
||||
|
||||
epirq = &interface->endpoint[0].desc;
|
||||
if (!usb_endpoint_is_int_in(epirq))
|
||||
return -ENODEV;
|
||||
|
||||
epout = &interface->endpoint[1].desc;
|
||||
if (!usb_endpoint_is_int_out(epout))
|
||||
return -ENODEV;
|
||||
|
||||
if (!(iforce = kzalloc(sizeof(struct iforce) + 32, GFP_KERNEL)))
|
||||
goto fail;
|
||||
|
|
|
@ -161,7 +161,8 @@ struct trackpoint_data {
|
|||
#ifdef CONFIG_MOUSE_PS2_TRACKPOINT
|
||||
int trackpoint_detect(struct psmouse *psmouse, bool set_properties);
|
||||
#else
|
||||
inline int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
|
||||
static inline int trackpoint_detect(struct psmouse *psmouse,
|
||||
bool set_properties)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
|
|
@ -116,6 +116,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
|
|||
if (intf->cur_altsetting->desc.bNumEndpoints < 1)
|
||||
return -ENODEV;
|
||||
|
||||
endpoint = &intf->cur_altsetting->endpoint[0].desc;
|
||||
if (!usb_endpoint_is_int_in(endpoint))
|
||||
return -ENODEV;
|
||||
|
||||
kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
|
||||
input_dev = input_allocate_device();
|
||||
if (!kbtab || !input_dev)
|
||||
|
@ -154,8 +158,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
|
|||
input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
|
||||
input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
|
||||
|
||||
endpoint = &intf->cur_altsetting->endpoint[0].desc;
|
||||
|
||||
usb_fill_int_urb(kbtab->irq, dev,
|
||||
usb_rcvintpipe(dev, endpoint->bEndpointAddress),
|
||||
kbtab->data, 8,
|
||||
|
|
|
@ -1710,7 +1710,7 @@ static const struct attribute_group *amd_iommu_groups[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static int iommu_init_pci(struct amd_iommu *iommu)
|
||||
static int __init iommu_init_pci(struct amd_iommu *iommu)
|
||||
{
|
||||
int cap_ptr = iommu->cap_ptr;
|
||||
u32 range, misc, low, high;
|
||||
|
|
|
@ -2883,7 +2883,7 @@ static int its_vpe_init(struct its_vpe *vpe)
|
|||
|
||||
if (!its_alloc_vpe_table(vpe_id)) {
|
||||
its_vpe_id_free(vpe_id);
|
||||
its_free_pending_table(vpe->vpt_page);
|
||||
its_free_pending_table(vpt_page);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
|
|||
.irq_unmask = imx_gpcv2_irq_unmask,
|
||||
.irq_set_wake = imx_gpcv2_irq_set_wake,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_set_type = irq_chip_set_type_parent,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = irq_chip_set_affinity_parent,
|
||||
#endif
|
||||
|
|
|
@ -130,6 +130,7 @@ struct mapped_device {
|
|||
};
|
||||
|
||||
int md_in_flight(struct mapped_device *md);
|
||||
void disable_discard(struct mapped_device *md);
|
||||
void disable_write_same(struct mapped_device *md);
|
||||
void disable_write_zeroes(struct mapped_device *md);
|
||||
|
||||
|
|
|
@ -295,11 +295,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
|
|||
}
|
||||
|
||||
if (unlikely(error == BLK_STS_TARGET)) {
|
||||
if (req_op(clone) == REQ_OP_WRITE_SAME &&
|
||||
!clone->q->limits.max_write_same_sectors)
|
||||
if (req_op(clone) == REQ_OP_DISCARD &&
|
||||
!clone->q->limits.max_discard_sectors)
|
||||
disable_discard(tio->md);
|
||||
else if (req_op(clone) == REQ_OP_WRITE_SAME &&
|
||||
!clone->q->limits.max_write_same_sectors)
|
||||
disable_write_same(tio->md);
|
||||
if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
|
||||
!clone->q->limits.max_write_zeroes_sectors)
|
||||
else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
|
||||
!clone->q->limits.max_write_zeroes_sectors)
|
||||
disable_write_zeroes(tio->md);
|
||||
}
|
||||
|
||||
|
|
|
@ -910,6 +910,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
|
|||
}
|
||||
}
|
||||
|
||||
void disable_discard(struct mapped_device *md)
|
||||
{
|
||||
struct queue_limits *limits = dm_get_queue_limits(md);
|
||||
|
||||
/* device doesn't really support DISCARD, disable it */
|
||||
limits->max_discard_sectors = 0;
|
||||
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
|
||||
}
|
||||
|
||||
void disable_write_same(struct mapped_device *md)
|
||||
{
|
||||
struct queue_limits *limits = dm_get_queue_limits(md);
|
||||
|
@ -935,11 +944,14 @@ static void clone_endio(struct bio *bio)
|
|||
dm_endio_fn endio = tio->ti->type->end_io;
|
||||
|
||||
if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
|
||||
if (bio_op(bio) == REQ_OP_WRITE_SAME &&
|
||||
!bio->bi_disk->queue->limits.max_write_same_sectors)
|
||||
if (bio_op(bio) == REQ_OP_DISCARD &&
|
||||
!bio->bi_disk->queue->limits.max_discard_sectors)
|
||||
disable_discard(md);
|
||||
else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
|
||||
!bio->bi_disk->queue->limits.max_write_same_sectors)
|
||||
disable_write_same(md);
|
||||
if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
|
||||
!bio->bi_disk->queue->limits.max_write_zeroes_sectors)
|
||||
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
|
||||
!bio->bi_disk->queue->limits.max_write_zeroes_sectors)
|
||||
disable_write_zeroes(md);
|
||||
}
|
||||
|
||||
|
|
|
@ -788,7 +788,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
|
|||
|
||||
ret = mmc_of_parse(host->mmc);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
|
||||
goto unreg_clk;
|
||||
}
|
||||
|
||||
|
|
|
@ -1102,6 +1102,8 @@ static void bond_compute_features(struct bonding *bond)
|
|||
done:
|
||||
bond_dev->vlan_features = vlan_features;
|
||||
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX |
|
||||
NETIF_F_GSO_UDP_L4;
|
||||
bond_dev->gso_max_segs = gso_max_segs;
|
||||
netif_set_gso_max_size(bond_dev, gso_max_size);
|
||||
|
|
|
@ -3058,12 +3058,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
|
|||
/* if VF indicate to PF this function is going down (PF will delete sp
|
||||
* elements and clear initializations
|
||||
*/
|
||||
if (IS_VF(bp))
|
||||
if (IS_VF(bp)) {
|
||||
bnx2x_clear_vlan_info(bp);
|
||||
bnx2x_vfpf_close_vf(bp);
|
||||
else if (unload_mode != UNLOAD_RECOVERY)
|
||||
} else if (unload_mode != UNLOAD_RECOVERY) {
|
||||
/* if this is a normal/close unload need to clean up chip*/
|
||||
bnx2x_chip_cleanup(bp, unload_mode, keep_link);
|
||||
else {
|
||||
} else {
|
||||
/* Send the UNLOAD_REQUEST to the MCP */
|
||||
bnx2x_send_unload_req(bp, unload_mode);
|
||||
|
||||
|
|
|
@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp);
|
|||
void bnx2x_disable_close_the_gate(struct bnx2x *bp);
|
||||
int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
|
||||
|
||||
void bnx2x_clear_vlan_info(struct bnx2x *bp);
|
||||
|
||||
/**
|
||||
* bnx2x_sp_event - handle ramrods completion.
|
||||
*
|
||||
|
|
|
@ -8488,11 +8488,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
|
|||
return rc;
|
||||
}
|
||||
|
||||
void bnx2x_clear_vlan_info(struct bnx2x *bp)
|
||||
{
|
||||
struct bnx2x_vlan_entry *vlan;
|
||||
|
||||
/* Mark that hw forgot all entries */
|
||||
list_for_each_entry(vlan, &bp->vlan_reg, link)
|
||||
vlan->hw = false;
|
||||
|
||||
bp->vlan_cnt = 0;
|
||||
}
|
||||
|
||||
static int bnx2x_del_all_vlans(struct bnx2x *bp)
|
||||
{
|
||||
struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
|
||||
unsigned long ramrod_flags = 0, vlan_flags = 0;
|
||||
struct bnx2x_vlan_entry *vlan;
|
||||
int rc;
|
||||
|
||||
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
|
||||
|
@ -8501,10 +8511,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Mark that hw forgot all entries */
|
||||
list_for_each_entry(vlan, &bp->vlan_reg, link)
|
||||
vlan->hw = false;
|
||||
bp->vlan_cnt = 0;
|
||||
bnx2x_clear_vlan_info(bp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1190,7 +1190,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
|
|||
err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
|
||||
if (err) {
|
||||
en_err(priv, "Failed to allocate RSS indirection QP\n");
|
||||
goto rss_err;
|
||||
goto qp_alloc_err;
|
||||
}
|
||||
|
||||
rss_map->indir_qp->event = mlx4_en_sqp_event;
|
||||
|
@ -1244,6 +1244,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
|
|||
MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
|
||||
mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
|
||||
mlx4_qp_free(mdev->dev, rss_map->indir_qp);
|
||||
qp_alloc_err:
|
||||
kfree(rss_map->indir_qp);
|
||||
rss_map->indir_qp = NULL;
|
||||
rss_err:
|
||||
|
|
|
@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
|
|||
return &arfs_t->rules_hash[bucket_idx];
|
||||
}
|
||||
|
||||
static u8 arfs_get_ip_proto(const struct sk_buff *skb)
|
||||
{
|
||||
return (skb->protocol == htons(ETH_P_IP)) ?
|
||||
ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
|
||||
}
|
||||
|
||||
static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
|
||||
u8 ip_proto, __be16 etype)
|
||||
{
|
||||
|
@ -599,31 +593,9 @@ static void arfs_handle_work(struct work_struct *work)
|
|||
arfs_may_expire_flow(priv);
|
||||
}
|
||||
|
||||
/* return L4 destination port from ip4/6 packets */
|
||||
static __be16 arfs_get_dst_port(const struct sk_buff *skb)
|
||||
{
|
||||
char *transport_header;
|
||||
|
||||
transport_header = skb_transport_header(skb);
|
||||
if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
|
||||
return ((struct tcphdr *)transport_header)->dest;
|
||||
return ((struct udphdr *)transport_header)->dest;
|
||||
}
|
||||
|
||||
/* return L4 source port from ip4/6 packets */
|
||||
static __be16 arfs_get_src_port(const struct sk_buff *skb)
|
||||
{
|
||||
char *transport_header;
|
||||
|
||||
transport_header = skb_transport_header(skb);
|
||||
if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
|
||||
return ((struct tcphdr *)transport_header)->source;
|
||||
return ((struct udphdr *)transport_header)->source;
|
||||
}
|
||||
|
||||
static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
|
||||
struct arfs_table *arfs_t,
|
||||
const struct sk_buff *skb,
|
||||
const struct flow_keys *fk,
|
||||
u16 rxq, u32 flow_id)
|
||||
{
|
||||
struct arfs_rule *rule;
|
||||
|
@ -638,19 +610,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
|
|||
INIT_WORK(&rule->arfs_work, arfs_handle_work);
|
||||
|
||||
tuple = &rule->tuple;
|
||||
tuple->etype = skb->protocol;
|
||||
tuple->etype = fk->basic.n_proto;
|
||||
tuple->ip_proto = fk->basic.ip_proto;
|
||||
if (tuple->etype == htons(ETH_P_IP)) {
|
||||
tuple->src_ipv4 = ip_hdr(skb)->saddr;
|
||||
tuple->dst_ipv4 = ip_hdr(skb)->daddr;
|
||||
tuple->src_ipv4 = fk->addrs.v4addrs.src;
|
||||
tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
|
||||
} else {
|
||||
memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
|
||||
memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
|
||||
sizeof(struct in6_addr));
|
||||
memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
|
||||
memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
|
||||
sizeof(struct in6_addr));
|
||||
}
|
||||
tuple->ip_proto = arfs_get_ip_proto(skb);
|
||||
tuple->src_port = arfs_get_src_port(skb);
|
||||
tuple->dst_port = arfs_get_dst_port(skb);
|
||||
tuple->src_port = fk->ports.src;
|
||||
tuple->dst_port = fk->ports.dst;
|
||||
|
||||
rule->flow_id = flow_id;
|
||||
rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
|
||||
|
@ -661,37 +633,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
|
|||
return rule;
|
||||
}
|
||||
|
||||
static bool arfs_cmp_ips(struct arfs_tuple *tuple,
|
||||
const struct sk_buff *skb)
|
||||
static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
|
||||
{
|
||||
if (tuple->etype == htons(ETH_P_IP) &&
|
||||
tuple->src_ipv4 == ip_hdr(skb)->saddr &&
|
||||
tuple->dst_ipv4 == ip_hdr(skb)->daddr)
|
||||
return true;
|
||||
if (tuple->etype == htons(ETH_P_IPV6) &&
|
||||
(!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
|
||||
sizeof(struct in6_addr))) &&
|
||||
(!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
|
||||
sizeof(struct in6_addr))))
|
||||
return true;
|
||||
if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
|
||||
return false;
|
||||
if (tuple->etype != fk->basic.n_proto)
|
||||
return false;
|
||||
if (tuple->etype == htons(ETH_P_IP))
|
||||
return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
|
||||
tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
|
||||
if (tuple->etype == htons(ETH_P_IPV6))
|
||||
return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
|
||||
sizeof(struct in6_addr)) &&
|
||||
!memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
|
||||
sizeof(struct in6_addr));
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
|
||||
const struct sk_buff *skb)
|
||||
const struct flow_keys *fk)
|
||||
{
|
||||
struct arfs_rule *arfs_rule;
|
||||
struct hlist_head *head;
|
||||
__be16 src_port = arfs_get_src_port(skb);
|
||||
__be16 dst_port = arfs_get_dst_port(skb);
|
||||
|
||||
head = arfs_hash_bucket(arfs_t, src_port, dst_port);
|
||||
head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
|
||||
hlist_for_each_entry(arfs_rule, head, hlist) {
|
||||
if (arfs_rule->tuple.src_port == src_port &&
|
||||
arfs_rule->tuple.dst_port == dst_port &&
|
||||
arfs_cmp_ips(&arfs_rule->tuple, skb)) {
|
||||
if (arfs_cmp(&arfs_rule->tuple, fk))
|
||||
return arfs_rule;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
@ -704,20 +672,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
|||
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
|
||||
struct arfs_table *arfs_t;
|
||||
struct arfs_rule *arfs_rule;
|
||||
struct flow_keys fk;
|
||||
|
||||
if (skb->protocol != htons(ETH_P_IP) &&
|
||||
skb->protocol != htons(ETH_P_IPV6))
|
||||
if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
if (fk.basic.n_proto != htons(ETH_P_IP) &&
|
||||
fk.basic.n_proto != htons(ETH_P_IPV6))
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
if (skb->encapsulation)
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
|
||||
arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
|
||||
if (!arfs_t)
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
spin_lock_bh(&arfs->arfs_lock);
|
||||
arfs_rule = arfs_find_rule(arfs_t, skb);
|
||||
arfs_rule = arfs_find_rule(arfs_t, &fk);
|
||||
if (arfs_rule) {
|
||||
if (arfs_rule->rxq == rxq_index) {
|
||||
spin_unlock_bh(&arfs->arfs_lock);
|
||||
|
@ -725,8 +697,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
|||
}
|
||||
arfs_rule->rxq = rxq_index;
|
||||
} else {
|
||||
arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
|
||||
rxq_index, flow_id);
|
||||
arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
|
||||
if (!arfs_rule) {
|
||||
spin_unlock_bh(&arfs->arfs_lock);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -1083,6 +1083,9 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
|
|||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (pauseparam->autoneg)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -1011,6 +1011,8 @@ static void __team_compute_features(struct team *team)
|
|||
|
||||
team->dev->vlan_features = vlan_features;
|
||||
team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX |
|
||||
NETIF_F_GSO_UDP_L4;
|
||||
team->dev->hard_header_len = max_hard_header_len;
|
||||
|
||||
|
|
|
@ -285,7 +285,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
|
|||
static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
|
||||
{
|
||||
int i;
|
||||
__u8 tmp;
|
||||
__u8 tmp = 0;
|
||||
__le16 retdatai;
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
|||
skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
|
||||
nskb = xenvif_alloc_skb(0);
|
||||
if (unlikely(nskb == NULL)) {
|
||||
skb_shinfo(skb)->nr_frags = 0;
|
||||
kfree_skb(skb);
|
||||
xenvif_tx_err(queue, &txreq, extra_count, idx);
|
||||
if (net_ratelimit())
|
||||
|
@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
|||
|
||||
if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
|
||||
/* Failure in xenvif_set_skb_gso is fatal. */
|
||||
skb_shinfo(skb)->nr_frags = 0;
|
||||
kfree_skb(skb);
|
||||
kfree_skb(nskb);
|
||||
break;
|
||||
|
|
|
@ -263,7 +263,6 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
|
|||
export->pwm = pwm;
|
||||
mutex_init(&export->lock);
|
||||
|
||||
export->child.class = parent->class;
|
||||
export->child.release = pwm_export_release;
|
||||
export->child.parent = parent;
|
||||
export->child.devt = MKDEV(0, 0);
|
||||
|
|
|
@ -2320,6 +2320,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
|
|||
case IOACCEL2_SERV_RESPONSE_COMPLETE:
|
||||
switch (c2->error_data.status) {
|
||||
case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
|
||||
if (cmd)
|
||||
cmd->result = 0;
|
||||
break;
|
||||
case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
|
||||
cmd->result |= SAM_STAT_CHECK_CONDITION;
|
||||
|
@ -2479,8 +2481,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
|
|||
|
||||
/* check for good status */
|
||||
if (likely(c2->error_data.serv_response == 0 &&
|
||||
c2->error_data.status == 0))
|
||||
c2->error_data.status == 0)) {
|
||||
cmd->result = 0;
|
||||
return hpsa_cmd_free_and_done(h, c, cmd);
|
||||
}
|
||||
|
||||
/*
|
||||
* Any RAID offload error results in retry which will use
|
||||
|
@ -5617,6 +5621,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
|
|||
}
|
||||
c = cmd_tagged_alloc(h, cmd);
|
||||
|
||||
/*
|
||||
* This is necessary because the SML doesn't zero out this field during
|
||||
* error recovery.
|
||||
*/
|
||||
cmd->result = 0;
|
||||
|
||||
/*
|
||||
* Call alternate submit routine for I/O accelerated commands.
|
||||
* Retries always go down the normal I/O path.
|
||||
|
|
|
@ -4732,7 +4732,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
|
|||
ql_log(ql_log_warn, vha, 0xd049,
|
||||
"Failed to allocate ct_sns request.\n");
|
||||
kfree(fcport);
|
||||
fcport = NULL;
|
||||
return NULL;
|
||||
}
|
||||
INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
|
||||
INIT_LIST_HEAD(&fcport->gnl_entry);
|
||||
|
|
|
@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
|
|||
static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
|
||||
unsigned int flags)
|
||||
{
|
||||
int divider, base, prescale;
|
||||
unsigned int divider, base, prescale;
|
||||
|
||||
/* This function needs improvment */
|
||||
/* This function needs improvement */
|
||||
/* Don't know if divider==0 works. */
|
||||
|
||||
for (prescale = 0; prescale < 16; prescale++) {
|
||||
|
@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
|
|||
divider = (*nanosec) / base;
|
||||
break;
|
||||
case CMDF_ROUND_UP:
|
||||
divider = (*nanosec) / base;
|
||||
divider = DIV_ROUND_UP(*nanosec, base);
|
||||
break;
|
||||
}
|
||||
if (divider < 65536) {
|
||||
|
@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
|
|||
}
|
||||
|
||||
prescale = 15;
|
||||
base = timer_base * (1 << prescale);
|
||||
base = timer_base * (prescale + 1);
|
||||
divider = 65535;
|
||||
*nanosec = divider * base;
|
||||
return (prescale << 16) | (divider);
|
||||
|
|
|
@ -1333,10 +1333,6 @@ static int acm_probe(struct usb_interface *intf,
|
|||
tty_port_init(&acm->port);
|
||||
acm->port.ops = &acm_port_ops;
|
||||
|
||||
minor = acm_alloc_minor(acm);
|
||||
if (minor < 0)
|
||||
goto alloc_fail1;
|
||||
|
||||
ctrlsize = usb_endpoint_maxp(epctrl);
|
||||
readsize = usb_endpoint_maxp(epread) *
|
||||
(quirks == SINGLE_RX_URB ? 1 : 2);
|
||||
|
@ -1344,6 +1340,13 @@ static int acm_probe(struct usb_interface *intf,
|
|||
acm->writesize = usb_endpoint_maxp(epwrite) * 20;
|
||||
acm->control = control_interface;
|
||||
acm->data = data_interface;
|
||||
|
||||
usb_get_intf(acm->control); /* undone in destruct() */
|
||||
|
||||
minor = acm_alloc_minor(acm);
|
||||
if (minor < 0)
|
||||
goto alloc_fail1;
|
||||
|
||||
acm->minor = minor;
|
||||
acm->dev = usb_dev;
|
||||
if (h.usb_cdc_acm_descriptor)
|
||||
|
@ -1490,7 +1493,6 @@ static int acm_probe(struct usb_interface *intf,
|
|||
usb_driver_claim_interface(&acm_driver, data_interface, acm);
|
||||
usb_set_intfdata(data_interface, acm);
|
||||
|
||||
usb_get_intf(control_interface);
|
||||
tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
|
||||
&control_interface->dev);
|
||||
if (IS_ERR(tty_dev)) {
|
||||
|
|
|
@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf,
|
|||
intf->minor = minor;
|
||||
break;
|
||||
}
|
||||
up_write(&minor_rwsem);
|
||||
if (intf->minor < 0)
|
||||
if (intf->minor < 0) {
|
||||
up_write(&minor_rwsem);
|
||||
return -EXFULL;
|
||||
}
|
||||
|
||||
/* create a usb class device for this usb interface */
|
||||
snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
|
||||
|
@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf,
|
|||
MKDEV(USB_MAJOR, minor), class_driver,
|
||||
"%s", kbasename(name));
|
||||
if (IS_ERR(intf->usb_dev)) {
|
||||
down_write(&minor_rwsem);
|
||||
usb_minors[minor] = NULL;
|
||||
intf->minor = -1;
|
||||
up_write(&minor_rwsem);
|
||||
retval = PTR_ERR(intf->usb_dev);
|
||||
}
|
||||
up_write(&minor_rwsem);
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_register_dev);
|
||||
|
@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf,
|
|||
return;
|
||||
|
||||
dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
|
||||
device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
|
||||
|
||||
down_write(&minor_rwsem);
|
||||
usb_minors[intf->minor] = NULL;
|
||||
up_write(&minor_rwsem);
|
||||
|
||||
device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
|
||||
intf->usb_dev = NULL;
|
||||
intf->minor = -1;
|
||||
destroy_usb_class();
|
||||
|
|
|
@ -2211,14 +2211,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
|
|||
(struct usb_cdc_dmm_desc *)buffer;
|
||||
break;
|
||||
case USB_CDC_MDLM_TYPE:
|
||||
if (elength < sizeof(struct usb_cdc_mdlm_desc *))
|
||||
if (elength < sizeof(struct usb_cdc_mdlm_desc))
|
||||
goto next_desc;
|
||||
if (desc)
|
||||
return -EINVAL;
|
||||
desc = (struct usb_cdc_mdlm_desc *)buffer;
|
||||
break;
|
||||
case USB_CDC_MDLM_DETAIL_TYPE:
|
||||
if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
|
||||
if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
|
||||
goto next_desc;
|
||||
if (detail)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/pm_runtime.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/sys_soc.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/usb/ch9.h>
|
||||
|
@ -2378,9 +2379,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
|
|||
if (usb3->forced_b_device)
|
||||
return -EBUSY;
|
||||
|
||||
if (!strncmp(buf, "host", strlen("host")))
|
||||
if (sysfs_streq(buf, "host"))
|
||||
new_mode_is_host = true;
|
||||
else if (!strncmp(buf, "peripheral", strlen("peripheral")))
|
||||
else if (sysfs_streq(buf, "peripheral"))
|
||||
new_mode_is_host = false;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
|
|
@ -968,6 +968,11 @@ static const struct usb_device_id option_ids[] = {
|
|||
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
|
||||
|
||||
/* Motorola devices */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */
|
||||
|
||||
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
|
||||
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
|
||||
|
@ -1549,6 +1554,7 @@ static const struct usb_device_id option_ids[] = {
|
|||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
|
||||
.driver_info = RSVD(2) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
|
||||
|
@ -1952,11 +1958,15 @@ static const struct usb_device_id option_ids[] = {
|
|||
.driver_info = RSVD(4) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
|
||||
.driver_info = RSVD(4) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */
|
||||
.driver_info = RSVD(4) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
|
||||
.driver_info = RSVD(4) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
|
||||
.driver_info = RSVD(4) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
|
||||
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
|
||||
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
|
||||
|
|
|
@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
|
|||
{
|
||||
int err;
|
||||
u16 old_value;
|
||||
pci_power_t new_state, old_state;
|
||||
pci_power_t new_state;
|
||||
|
||||
err = pci_read_config_word(dev, offset, &old_value);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
|
||||
new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
|
||||
|
||||
new_value &= PM_OK_BITS;
|
||||
|
|
|
@ -1485,7 +1485,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
|
|||
goto out;
|
||||
}
|
||||
|
||||
trans = btrfs_attach_transaction(root);
|
||||
trans = btrfs_join_transaction_nostart(root);
|
||||
if (IS_ERR(trans)) {
|
||||
if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
|
||||
ret = PTR_ERR(trans);
|
||||
|
|
|
@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
|
|||
[TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
|
||||
[TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
|
||||
__TRANS_ATTACH |
|
||||
__TRANS_JOIN),
|
||||
__TRANS_JOIN |
|
||||
__TRANS_JOIN_NOSTART),
|
||||
[TRANS_STATE_UNBLOCKED] = (__TRANS_START |
|
||||
__TRANS_ATTACH |
|
||||
__TRANS_JOIN |
|
||||
__TRANS_JOIN_NOLOCK),
|
||||
__TRANS_JOIN_NOLOCK |
|
||||
__TRANS_JOIN_NOSTART),
|
||||
[TRANS_STATE_COMPLETED] = (__TRANS_START |
|
||||
__TRANS_ATTACH |
|
||||
__TRANS_JOIN |
|
||||
__TRANS_JOIN_NOLOCK),
|
||||
__TRANS_JOIN_NOLOCK |
|
||||
__TRANS_JOIN_NOSTART),
|
||||
};
|
||||
|
||||
void btrfs_put_transaction(struct btrfs_transaction *transaction)
|
||||
|
@ -531,7 +534,8 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
|
|||
ret = join_transaction(fs_info, type);
|
||||
if (ret == -EBUSY) {
|
||||
wait_current_trans(fs_info);
|
||||
if (unlikely(type == TRANS_ATTACH))
|
||||
if (unlikely(type == TRANS_ATTACH ||
|
||||
type == TRANS_JOIN_NOSTART))
|
||||
ret = -ENOENT;
|
||||
}
|
||||
} while (ret == -EBUSY);
|
||||
|
@ -647,6 +651,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root
|
|||
BTRFS_RESERVE_NO_FLUSH, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Similar to regular join but it never starts a transaction when none is
|
||||
* running or after waiting for the current one to finish.
|
||||
*/
|
||||
struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
|
||||
{
|
||||
return start_transaction(root, 0, TRANS_JOIN_NOSTART,
|
||||
BTRFS_RESERVE_NO_FLUSH, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* btrfs_attach_transaction() - catch the running transaction
|
||||
*
|
||||
|
|
|
@ -97,11 +97,13 @@ struct btrfs_transaction {
|
|||
#define __TRANS_JOIN (1U << 11)
|
||||
#define __TRANS_JOIN_NOLOCK (1U << 12)
|
||||
#define __TRANS_DUMMY (1U << 13)
|
||||
#define __TRANS_JOIN_NOSTART (1U << 14)
|
||||
|
||||
#define TRANS_START (__TRANS_START | __TRANS_FREEZABLE)
|
||||
#define TRANS_ATTACH (__TRANS_ATTACH)
|
||||
#define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE)
|
||||
#define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK)
|
||||
#define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART)
|
||||
|
||||
#define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH)
|
||||
|
||||
|
@ -187,6 +189,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
|
|||
int min_factor);
|
||||
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
|
||||
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
|
||||
struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
|
||||
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
|
||||
struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
|
||||
struct btrfs_root *root);
|
||||
|
|
|
@ -3833,7 +3833,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
|
|||
u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
|
||||
int low_bucket = 0, bucket, high_bucket;
|
||||
struct ocfs2_xattr_bucket *search;
|
||||
u32 last_hash;
|
||||
u64 blkno, lower_blkno = 0;
|
||||
|
||||
search = ocfs2_xattr_bucket_new(inode);
|
||||
|
@ -3877,8 +3876,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
|
|||
if (xh->xh_count)
|
||||
xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
|
||||
|
||||
last_hash = le32_to_cpu(xe->xe_name_hash);
|
||||
|
||||
/* record lower_blkno which may be the insert place. */
|
||||
lower_blkno = blkno;
|
||||
|
||||
|
|
|
@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset)
|
|||
}
|
||||
if (seq_has_overflowed(m))
|
||||
goto Eoverflow;
|
||||
p = m->op->next(m, p, &m->index);
|
||||
if (pos + m->count > offset) {
|
||||
m->from = offset - pos;
|
||||
m->count -= m->from;
|
||||
|
@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset)
|
|||
}
|
||||
pos += m->count;
|
||||
m->count = 0;
|
||||
p = m->op->next(m, p, &m->index);
|
||||
if (pos == offset)
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -7,24 +7,6 @@
|
|||
#include <linux/compiler.h>
|
||||
#include <linux/log2.h>
|
||||
|
||||
/*
|
||||
* Runtime evaluation of get_order()
|
||||
*/
|
||||
static inline __attribute_const__
|
||||
int __get_order(unsigned long size)
|
||||
{
|
||||
int order;
|
||||
|
||||
size--;
|
||||
size >>= PAGE_SHIFT;
|
||||
#if BITS_PER_LONG == 32
|
||||
order = fls(size);
|
||||
#else
|
||||
order = fls64(size);
|
||||
#endif
|
||||
return order;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_order - Determine the allocation order of a memory size
|
||||
* @size: The size for which to get the order
|
||||
|
@ -43,19 +25,27 @@ int __get_order(unsigned long size)
|
|||
* to hold an object of the specified size.
|
||||
*
|
||||
* The result is undefined if the size is 0.
|
||||
*
|
||||
* This function may be used to initialise variables with compile time
|
||||
* evaluations of constants.
|
||||
*/
|
||||
#define get_order(n) \
|
||||
( \
|
||||
__builtin_constant_p(n) ? ( \
|
||||
((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
|
||||
(((n) < (1UL << PAGE_SHIFT)) ? 0 : \
|
||||
ilog2((n) - 1) - PAGE_SHIFT + 1) \
|
||||
) : \
|
||||
__get_order(n) \
|
||||
)
|
||||
static inline __attribute_const__ int get_order(unsigned long size)
|
||||
{
|
||||
if (__builtin_constant_p(size)) {
|
||||
if (!size)
|
||||
return BITS_PER_LONG - PAGE_SHIFT;
|
||||
|
||||
if (size < (1UL << PAGE_SHIFT))
|
||||
return 0;
|
||||
|
||||
return ilog2((size) - 1) - PAGE_SHIFT + 1;
|
||||
}
|
||||
|
||||
size--;
|
||||
size >>= PAGE_SHIFT;
|
||||
#if BITS_PER_LONG == 32
|
||||
return fls(size);
|
||||
#else
|
||||
return fls64(size);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -386,6 +386,7 @@
|
|||
INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */
|
||||
|
||||
/* CFL H */
|
||||
|
|
|
@ -361,6 +361,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
|
|||
|
||||
void kvm_vgic_load(struct kvm_vcpu *vcpu);
|
||||
void kvm_vgic_put(struct kvm_vcpu *vcpu);
|
||||
void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
|
||||
|
||||
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
|
||||
#define vgic_initialized(k) ((k)->arch.vgic.initialized)
|
||||
|
|
|
@ -44,6 +44,7 @@ struct sugov_policy {
|
|||
struct task_struct *thread;
|
||||
bool work_in_progress;
|
||||
|
||||
bool limits_changed;
|
||||
bool need_freq_update;
|
||||
};
|
||||
|
||||
|
@ -94,8 +95,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
|
|||
!cpufreq_this_cpu_can_update(sg_policy->policy))
|
||||
return false;
|
||||
|
||||
if (unlikely(sg_policy->need_freq_update))
|
||||
if (unlikely(sg_policy->limits_changed)) {
|
||||
sg_policy->limits_changed = false;
|
||||
sg_policy->need_freq_update = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* No need to recalculate next freq for min_rate_limit_us
|
||||
* at least. However we might still decide to further rate
|
||||
|
@ -458,7 +462,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
|
|||
static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
|
||||
{
|
||||
if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
|
||||
sg_policy->need_freq_update = true;
|
||||
sg_policy->limits_changed = true;
|
||||
}
|
||||
|
||||
static void sugov_update_single(struct update_util_data *hook, u64 time,
|
||||
|
@ -478,7 +482,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
|
|||
if (!sugov_should_update_freq(sg_policy, time))
|
||||
return;
|
||||
|
||||
busy = sugov_cpu_is_busy(sg_cpu);
|
||||
/* Limits may have changed, don't skip frequency update */
|
||||
busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
|
||||
|
||||
util = sugov_get_util(sg_cpu);
|
||||
max = sg_cpu->max;
|
||||
|
@ -897,6 +902,7 @@ static int sugov_start(struct cpufreq_policy *policy)
|
|||
sg_policy->last_freq_update_time = 0;
|
||||
sg_policy->next_freq = 0;
|
||||
sg_policy->work_in_progress = false;
|
||||
sg_policy->limits_changed = false;
|
||||
sg_policy->need_freq_update = false;
|
||||
sg_policy->cached_raw_freq = 0;
|
||||
|
||||
|
@ -948,7 +954,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
|
|||
mutex_unlock(&sg_policy->work_lock);
|
||||
}
|
||||
|
||||
sg_policy->need_freq_update = true;
|
||||
sg_policy->limits_changed = true;
|
||||
}
|
||||
|
||||
struct cpufreq_governor schedutil_gov = {
|
||||
|
|
|
@ -126,7 +126,7 @@
|
|||
/* GFP bitmask for kmemleak internal allocations */
|
||||
#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
|
||||
__GFP_NORETRY | __GFP_NOMEMALLOC | \
|
||||
__GFP_NOWARN | __GFP_NOFAIL)
|
||||
__GFP_NOWARN)
|
||||
|
||||
/* scanning area inside a memory block */
|
||||
struct kmemleak_scan_area {
|
||||
|
|
|
@ -1037,26 +1037,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
|
|||
css_put(&prev->css);
|
||||
}
|
||||
|
||||
static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
|
||||
static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
|
||||
struct mem_cgroup *dead_memcg)
|
||||
{
|
||||
struct mem_cgroup *memcg = dead_memcg;
|
||||
struct mem_cgroup_reclaim_iter *iter;
|
||||
struct mem_cgroup_per_node *mz;
|
||||
int nid;
|
||||
int i;
|
||||
|
||||
for (; memcg; memcg = parent_mem_cgroup(memcg)) {
|
||||
for_each_node(nid) {
|
||||
mz = mem_cgroup_nodeinfo(memcg, nid);
|
||||
for (i = 0; i <= DEF_PRIORITY; i++) {
|
||||
iter = &mz->iter[i];
|
||||
cmpxchg(&iter->position,
|
||||
dead_memcg, NULL);
|
||||
}
|
||||
for_each_node(nid) {
|
||||
mz = mem_cgroup_nodeinfo(from, nid);
|
||||
for (i = 0; i <= DEF_PRIORITY; i++) {
|
||||
iter = &mz->iter[i];
|
||||
cmpxchg(&iter->position,
|
||||
dead_memcg, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
|
||||
{
|
||||
struct mem_cgroup *memcg = dead_memcg;
|
||||
struct mem_cgroup *last;
|
||||
|
||||
do {
|
||||
__invalidate_reclaim_iterators(memcg, dead_memcg);
|
||||
last = memcg;
|
||||
} while ((memcg = parent_mem_cgroup(memcg)));
|
||||
|
||||
/*
|
||||
* When cgruop1 non-hierarchy mode is used,
|
||||
* parent_mem_cgroup() does not walk all the way up to the
|
||||
* cgroup root (root_mem_cgroup). So we have to handle
|
||||
* dead_memcg from cgroup root separately.
|
||||
*/
|
||||
if (last != root_mem_cgroup)
|
||||
__invalidate_reclaim_iterators(root_mem_cgroup,
|
||||
dead_memcg);
|
||||
}
|
||||
|
||||
/**
|
||||
* mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
|
||||
* @memcg: hierarchy root
|
||||
|
|
100
mm/mempolicy.c
100
mm/mempolicy.c
|
@ -403,7 +403,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
|
|||
},
|
||||
};
|
||||
|
||||
static void migrate_page_add(struct page *page, struct list_head *pagelist,
|
||||
static int migrate_page_add(struct page *page, struct list_head *pagelist,
|
||||
unsigned long flags);
|
||||
|
||||
struct queue_pages {
|
||||
|
@ -429,11 +429,14 @@ static inline bool queue_pages_required(struct page *page,
|
|||
}
|
||||
|
||||
/*
|
||||
* queue_pages_pmd() has three possible return values:
|
||||
* 1 - pages are placed on the right node or queued successfully.
|
||||
* 0 - THP was split.
|
||||
* -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
|
||||
* page was already on a node that does not follow the policy.
|
||||
* queue_pages_pmd() has four possible return values:
|
||||
* 0 - pages are placed on the right node or queued successfully.
|
||||
* 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
|
||||
* specified.
|
||||
* 2 - THP was split.
|
||||
* -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
|
||||
* existing page was already on a node that does not follow the
|
||||
* policy.
|
||||
*/
|
||||
static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
|
||||
unsigned long end, struct mm_walk *walk)
|
||||
|
@ -451,23 +454,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
|
|||
if (is_huge_zero_page(page)) {
|
||||
spin_unlock(ptl);
|
||||
__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
|
||||
ret = 2;
|
||||
goto out;
|
||||
}
|
||||
if (!queue_pages_required(page, qp)) {
|
||||
ret = 1;
|
||||
if (!queue_pages_required(page, qp))
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = 1;
|
||||
flags = qp->flags;
|
||||
/* go to thp migration */
|
||||
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
|
||||
if (!vma_migratable(walk->vma)) {
|
||||
ret = -EIO;
|
||||
if (!vma_migratable(walk->vma) ||
|
||||
migrate_page_add(page, qp->pagelist, flags)) {
|
||||
ret = 1;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
migrate_page_add(page, qp->pagelist, flags);
|
||||
} else
|
||||
ret = -EIO;
|
||||
unlock:
|
||||
|
@ -479,6 +479,13 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
|
|||
/*
|
||||
* Scan through pages checking if pages follow certain conditions,
|
||||
* and move them to the pagelist if they do.
|
||||
*
|
||||
* queue_pages_pte_range() has three possible return values:
|
||||
* 0 - pages are placed on the right node or queued successfully.
|
||||
* 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
|
||||
* specified.
|
||||
* -EIO - only MPOL_MF_STRICT was specified and an existing page was already
|
||||
* on a node that does not follow the policy.
|
||||
*/
|
||||
static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long end, struct mm_walk *walk)
|
||||
|
@ -488,17 +495,17 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
|
|||
struct queue_pages *qp = walk->private;
|
||||
unsigned long flags = qp->flags;
|
||||
int ret;
|
||||
bool has_unmovable = false;
|
||||
pte_t *pte;
|
||||
spinlock_t *ptl;
|
||||
|
||||
ptl = pmd_trans_huge_lock(pmd, vma);
|
||||
if (ptl) {
|
||||
ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
|
||||
if (ret > 0)
|
||||
return 0;
|
||||
else if (ret < 0)
|
||||
if (ret != 2)
|
||||
return ret;
|
||||
}
|
||||
/* THP was split, fall through to pte walk */
|
||||
|
||||
if (pmd_trans_unstable(pmd))
|
||||
return 0;
|
||||
|
@ -519,14 +526,28 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
|
|||
if (!queue_pages_required(page, qp))
|
||||
continue;
|
||||
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
|
||||
if (!vma_migratable(vma))
|
||||
/* MPOL_MF_STRICT must be specified if we get here */
|
||||
if (!vma_migratable(vma)) {
|
||||
has_unmovable = true;
|
||||
break;
|
||||
migrate_page_add(page, qp->pagelist, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not abort immediately since there may be
|
||||
* temporary off LRU pages in the range. Still
|
||||
* need migrate other LRU pages.
|
||||
*/
|
||||
if (migrate_page_add(page, qp->pagelist, flags))
|
||||
has_unmovable = true;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
pte_unmap_unlock(pte - 1, ptl);
|
||||
cond_resched();
|
||||
|
||||
if (has_unmovable)
|
||||
return 1;
|
||||
|
||||
return addr != end ? -EIO : 0;
|
||||
}
|
||||
|
||||
|
@ -639,7 +660,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
|
|||
*
|
||||
* If pages found in a given range are on a set of nodes (determined by
|
||||
* @nodes and @flags,) it's isolated and queued to the pagelist which is
|
||||
* passed via @private.)
|
||||
* passed via @private.
|
||||
*
|
||||
* queue_pages_range() has three possible return values:
|
||||
* 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
|
||||
* specified.
|
||||
* 0 - queue pages successfully or no misplaced page.
|
||||
* -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
|
||||
*/
|
||||
static int
|
||||
queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
|
||||
|
@ -927,7 +954,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
|
|||
/*
|
||||
* page migration, thp tail pages can be passed.
|
||||
*/
|
||||
static void migrate_page_add(struct page *page, struct list_head *pagelist,
|
||||
static int migrate_page_add(struct page *page, struct list_head *pagelist,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct page *head = compound_head(page);
|
||||
|
@ -940,8 +967,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
|
|||
mod_node_page_state(page_pgdat(head),
|
||||
NR_ISOLATED_ANON + page_is_file_cache(head),
|
||||
hpage_nr_pages(head));
|
||||
} else if (flags & MPOL_MF_STRICT) {
|
||||
/*
|
||||
* Non-movable page may reach here. And, there may be
|
||||
* temporary off LRU pages or non-LRU movable pages.
|
||||
* Treat them as unmovable pages since they can't be
|
||||
* isolated, so they can't be moved at the moment. It
|
||||
* should return -EIO for this case too.
|
||||
*/
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* page allocation callback for NUMA node migration */
|
||||
|
@ -1144,9 +1182,10 @@ static struct page *new_page(struct page *page, unsigned long start)
|
|||
}
|
||||
#else
|
||||
|
||||
static void migrate_page_add(struct page *page, struct list_head *pagelist,
|
||||
static int migrate_page_add(struct page *page, struct list_head *pagelist,
|
||||
unsigned long flags)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
|
||||
|
@ -1169,6 +1208,7 @@ static long do_mbind(unsigned long start, unsigned long len,
|
|||
struct mempolicy *new;
|
||||
unsigned long end;
|
||||
int err;
|
||||
int ret;
|
||||
LIST_HEAD(pagelist);
|
||||
|
||||
if (flags & ~(unsigned long)MPOL_MF_VALID)
|
||||
|
@ -1230,10 +1270,15 @@ static long do_mbind(unsigned long start, unsigned long len,
|
|||
if (err)
|
||||
goto mpol_out;
|
||||
|
||||
err = queue_pages_range(mm, start, end, nmask,
|
||||
ret = queue_pages_range(mm, start, end, nmask,
|
||||
flags | MPOL_MF_INVERT, &pagelist);
|
||||
if (!err)
|
||||
err = mbind_range(mm, start, end, new);
|
||||
|
||||
if (ret < 0) {
|
||||
err = -EIO;
|
||||
goto up_out;
|
||||
}
|
||||
|
||||
err = mbind_range(mm, start, end, new);
|
||||
|
||||
if (!err) {
|
||||
int nr_failed = 0;
|
||||
|
@ -1246,13 +1291,14 @@ static long do_mbind(unsigned long start, unsigned long len,
|
|||
putback_movable_pages(&pagelist);
|
||||
}
|
||||
|
||||
if (nr_failed && (flags & MPOL_MF_STRICT))
|
||||
if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
|
||||
err = -EIO;
|
||||
} else
|
||||
putback_movable_pages(&pagelist);
|
||||
|
||||
up_out:
|
||||
up_write(&mm->mmap_sem);
|
||||
mpol_out:
|
||||
mpol_out:
|
||||
mpol_put(new);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -1467,7 +1467,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|||
/*
|
||||
* No need to invalidate here it will synchronize on
|
||||
* against the special swap migration pte.
|
||||
*
|
||||
* The assignment to subpage above was computed from a
|
||||
* swap PTE which results in an invalid pointer.
|
||||
* Since only PAGE_SIZE pages can currently be
|
||||
* migrated, just set it to page. This will need to be
|
||||
* changed when hugepage migrations to device private
|
||||
* memory are supported.
|
||||
*/
|
||||
subpage = page;
|
||||
goto discard;
|
||||
}
|
||||
|
||||
|
|
|
@ -151,7 +151,7 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
|
|||
bool to_user)
|
||||
{
|
||||
/* Reject if object wraps past end of memory. */
|
||||
if (ptr + n < ptr)
|
||||
if (ptr + (n - 1) < ptr)
|
||||
usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
|
||||
|
||||
/* Reject if NULL or ZERO-allocation. */
|
||||
|
|
|
@ -1779,20 +1779,28 @@ static int compat_calc_entry(const struct ebt_entry *e,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ebt_compat_init_offsets(unsigned int number)
|
||||
{
|
||||
if (number > INT_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
/* also count the base chain policies */
|
||||
number += NF_BR_NUMHOOKS;
|
||||
|
||||
return xt_compat_init_offsets(NFPROTO_BRIDGE, number);
|
||||
}
|
||||
|
||||
static int compat_table_info(const struct ebt_table_info *info,
|
||||
struct compat_ebt_replace *newinfo)
|
||||
{
|
||||
unsigned int size = info->entries_size;
|
||||
const void *entries = info->entries;
|
||||
int ret;
|
||||
|
||||
newinfo->entries_size = size;
|
||||
if (info->nentries) {
|
||||
int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
|
||||
info->nentries);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ret = ebt_compat_init_offsets(info->nentries);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
|
||||
entries, newinfo);
|
||||
|
@ -2241,11 +2249,9 @@ static int compat_do_replace(struct net *net, void __user *user,
|
|||
|
||||
xt_compat_lock(NFPROTO_BRIDGE);
|
||||
|
||||
if (tmp.nentries) {
|
||||
ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
|
||||
if (ret < 0)
|
||||
goto out_unlock;
|
||||
}
|
||||
ret = ebt_compat_init_offsets(tmp.nentries);
|
||||
if (ret < 0)
|
||||
goto out_unlock;
|
||||
|
||||
ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -127,6 +127,9 @@ static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
|
|||
{
|
||||
int port;
|
||||
|
||||
if (!ds->ops->port_mdb_add)
|
||||
return;
|
||||
|
||||
for_each_set_bit(port, bitmap, ds->num_ports)
|
||||
ds->ops->port_mdb_add(ds, port, mdb);
|
||||
}
|
||||
|
|
|
@ -431,13 +431,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
|
|||
* table location, we assume id gets exposed to userspace.
|
||||
*
|
||||
* Following nf_conn items do not change throughout lifetime
|
||||
* of the nf_conn after it has been committed to main hash table:
|
||||
* of the nf_conn:
|
||||
*
|
||||
* 1. nf_conn address
|
||||
* 2. nf_conn->ext address
|
||||
* 3. nf_conn->master address (normally NULL)
|
||||
* 4. tuple
|
||||
* 5. the associated net namespace
|
||||
* 2. nf_conn->master address (normally NULL)
|
||||
* 3. the associated net namespace
|
||||
* 4. the original direction tuple
|
||||
*/
|
||||
u32 nf_ct_get_id(const struct nf_conn *ct)
|
||||
{
|
||||
|
@ -447,9 +446,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct)
|
|||
net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
|
||||
|
||||
a = (unsigned long)ct;
|
||||
b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
|
||||
c = (unsigned long)ct->ext;
|
||||
d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
|
||||
b = (unsigned long)ct->master;
|
||||
c = (unsigned long)nf_ct_net(ct);
|
||||
d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
|
||||
sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
|
||||
&ct_id_seed);
|
||||
#ifdef CONFIG_64BIT
|
||||
return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
|
||||
|
|
|
@ -2616,6 +2616,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
|
|||
|
||||
mutex_lock(&po->pg_vec_lock);
|
||||
|
||||
/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
|
||||
* we need to confirm it under protection of pg_vec_lock.
|
||||
*/
|
||||
if (unlikely(!po->tx_ring.pg_vec)) {
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
if (likely(saddr == NULL)) {
|
||||
dev = packet_cached_dev_get(po);
|
||||
proto = po->num;
|
||||
|
|
|
@ -561,7 +561,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
|
|||
*/
|
||||
if (net->sctp.pf_enable &&
|
||||
(transport->state == SCTP_ACTIVE) &&
|
||||
(asoc->pf_retrans < transport->pathmaxrxt) &&
|
||||
(transport->error_count < transport->pathmaxrxt) &&
|
||||
(transport->error_count > asoc->pf_retrans)) {
|
||||
|
||||
sctp_assoc_control_transport(asoc, transport,
|
||||
|
|
|
@ -416,6 +416,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
|
|||
nstr_list[i] = htons(str_list[i]);
|
||||
|
||||
if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
|
||||
kfree(nstr_list);
|
||||
retval = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -75,6 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr)
|
|||
tipc_set_node_id(net, node_id);
|
||||
}
|
||||
tn->trial_addr = addr;
|
||||
tn->addr_trial_end = jiffies;
|
||||
pr_info("32-bit node address hash set to %x\n", addr);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ success = $(if-success,$(1),y,n)
|
|||
|
||||
# $(cc-option,<flag>)
|
||||
# Return y if the compiler supports <flag>, n otherwise
|
||||
cc-option = $(success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null)
|
||||
cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null)
|
||||
|
||||
# $(ld-option,<flag>)
|
||||
# Return y if the linker supports <flag>, n otherwise
|
||||
|
|
|
@ -75,7 +75,7 @@ modpost = scripts/mod/modpost \
|
|||
$(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \
|
||||
$(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
|
||||
$(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
|
||||
$(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
|
||||
$(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \
|
||||
$(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
|
||||
$(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \
|
||||
$(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \
|
||||
|
|
|
@ -6033,6 +6033,24 @@ void snd_hda_gen_free(struct hda_codec *codec)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(snd_hda_gen_free);
|
||||
|
||||
/**
|
||||
* snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
|
||||
* @codec: the HDA codec
|
||||
*
|
||||
* This can be put as patch_ops reboot_notify function.
|
||||
*/
|
||||
void snd_hda_gen_reboot_notify(struct hda_codec *codec)
|
||||
{
|
||||
/* Make the codec enter D3 to avoid spurious noises from the internal
|
||||
* speaker during (and after) reboot
|
||||
*/
|
||||
snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
|
||||
snd_hda_codec_write(codec, codec->core.afg, 0,
|
||||
AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
|
||||
msleep(10);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/**
|
||||
* snd_hda_gen_check_power_status - check the loopback power save state
|
||||
|
@ -6060,6 +6078,7 @@ static const struct hda_codec_ops generic_patch_ops = {
|
|||
.init = snd_hda_gen_init,
|
||||
.free = snd_hda_gen_free,
|
||||
.unsol_event = snd_hda_jack_unsol_event,
|
||||
.reboot_notify = snd_hda_gen_reboot_notify,
|
||||
#ifdef CONFIG_PM
|
||||
.check_power_status = snd_hda_gen_check_power_status,
|
||||
#endif
|
||||
|
@ -6082,7 +6101,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
|
|||
|
||||
err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
goto error;
|
||||
|
||||
err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
|
||||
if (err < 0)
|
||||
|
|
|
@ -336,6 +336,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
|
|||
struct auto_pin_cfg *cfg);
|
||||
int snd_hda_gen_build_controls(struct hda_codec *codec);
|
||||
int snd_hda_gen_build_pcms(struct hda_codec *codec);
|
||||
void snd_hda_gen_reboot_notify(struct hda_codec *codec);
|
||||
|
||||
/* standard jack event callbacks */
|
||||
void snd_hda_gen_hp_automute(struct hda_codec *codec,
|
||||
|
|
|
@ -2655,6 +2655,9 @@ static const struct pci_device_id azx_ids[] = {
|
|||
/* AMD, X370 & co */
|
||||
{ PCI_DEVICE(0x1022, 0x1457),
|
||||
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
|
||||
/* AMD, X570 & co */
|
||||
{ PCI_DEVICE(0x1022, 0x1487),
|
||||
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
|
||||
/* AMD Stoney */
|
||||
{ PCI_DEVICE(0x1022, 0x157a),
|
||||
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
|
||||
|
|
|
@ -176,23 +176,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
|
|||
{
|
||||
struct conexant_spec *spec = codec->spec;
|
||||
|
||||
switch (codec->core.vendor_id) {
|
||||
case 0x14f12008: /* CX8200 */
|
||||
case 0x14f150f2: /* CX20722 */
|
||||
case 0x14f150f4: /* CX20724 */
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
/* Turn the problematic codec into D3 to avoid spurious noises
|
||||
from the internal speaker during (and after) reboot */
|
||||
cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
|
||||
|
||||
snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
|
||||
snd_hda_codec_write(codec, codec->core.afg, 0,
|
||||
AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
|
||||
msleep(10);
|
||||
snd_hda_gen_reboot_notify(codec);
|
||||
}
|
||||
|
||||
static void cx_auto_free(struct hda_codec *codec)
|
||||
|
|
|
@ -868,15 +868,6 @@ static void alc_reboot_notify(struct hda_codec *codec)
|
|||
alc_shutup(codec);
|
||||
}
|
||||
|
||||
/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
|
||||
static void alc_d3_at_reboot(struct hda_codec *codec)
|
||||
{
|
||||
snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
|
||||
snd_hda_codec_write(codec, codec->core.afg, 0,
|
||||
AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
|
||||
msleep(10);
|
||||
}
|
||||
|
||||
#define alc_free snd_hda_gen_free
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
@ -5111,7 +5102,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
|
|||
struct alc_spec *spec = codec->spec;
|
||||
|
||||
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
|
||||
spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
|
||||
spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
|
||||
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
|
||||
codec->power_save_node = 0; /* avoid click noises */
|
||||
snd_hda_apply_pincfgs(codec, pincfgs);
|
||||
|
@ -6851,6 +6842,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|||
SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
|
||||
SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
|
||||
SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
|
||||
SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
|
||||
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
|
||||
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
|
||||
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
|
||||
|
|
|
@ -83,6 +83,7 @@ struct mixer_build {
|
|||
unsigned char *buffer;
|
||||
unsigned int buflen;
|
||||
DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
|
||||
DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
|
||||
struct usb_audio_term oterm;
|
||||
const struct usbmix_name_map *map;
|
||||
const struct usbmix_selector_map *selector_map;
|
||||
|
@ -759,6 +760,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
|
|||
return -EINVAL;
|
||||
if (!desc->bNrInPins)
|
||||
return -EINVAL;
|
||||
if (desc->bLength < sizeof(*desc) + desc->bNrInPins)
|
||||
return -EINVAL;
|
||||
|
||||
switch (state->mixer->protocol) {
|
||||
case UAC_VERSION_1:
|
||||
|
@ -788,16 +791,25 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
|
|||
* parse the source unit recursively until it reaches to a terminal
|
||||
* or a branched unit.
|
||||
*/
|
||||
static int check_input_term(struct mixer_build *state, int id,
|
||||
static int __check_input_term(struct mixer_build *state, int id,
|
||||
struct usb_audio_term *term)
|
||||
{
|
||||
int protocol = state->mixer->protocol;
|
||||
int err;
|
||||
void *p1;
|
||||
unsigned char *hdr;
|
||||
|
||||
memset(term, 0, sizeof(*term));
|
||||
while ((p1 = find_audio_control_unit(state, id)) != NULL) {
|
||||
unsigned char *hdr = p1;
|
||||
for (;;) {
|
||||
/* a loop in the terminal chain? */
|
||||
if (test_and_set_bit(id, state->termbitmap))
|
||||
return -EINVAL;
|
||||
|
||||
p1 = find_audio_control_unit(state, id);
|
||||
if (!p1)
|
||||
break;
|
||||
|
||||
hdr = p1;
|
||||
term->id = id;
|
||||
|
||||
if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) {
|
||||
|
@ -815,7 +827,7 @@ static int check_input_term(struct mixer_build *state, int id,
|
|||
|
||||
/* call recursively to verify that the
|
||||
* referenced clock entity is valid */
|
||||
err = check_input_term(state, d->bCSourceID, term);
|
||||
err = __check_input_term(state, d->bCSourceID, term);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -849,7 +861,7 @@ static int check_input_term(struct mixer_build *state, int id,
|
|||
case UAC2_CLOCK_SELECTOR: {
|
||||
struct uac_selector_unit_descriptor *d = p1;
|
||||
/* call recursively to retrieve the channel info */
|
||||
err = check_input_term(state, d->baSourceID[0], term);
|
||||
err = __check_input_term(state, d->baSourceID[0], term);
|
||||
if (err < 0)
|
||||
return err;
|
||||
term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
|
||||
|
@ -912,7 +924,7 @@ static int check_input_term(struct mixer_build *state, int id,
|
|||
|
||||
/* call recursively to verify that the
|
||||
* referenced clock entity is valid */
|
||||
err = check_input_term(state, d->bCSourceID, term);
|
||||
err = __check_input_term(state, d->bCSourceID, term);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -963,7 +975,7 @@ static int check_input_term(struct mixer_build *state, int id,
|
|||
case UAC3_CLOCK_SELECTOR: {
|
||||
struct uac_selector_unit_descriptor *d = p1;
|
||||
/* call recursively to retrieve the channel info */
|
||||
err = check_input_term(state, d->baSourceID[0], term);
|
||||
err = __check_input_term(state, d->baSourceID[0], term);
|
||||
if (err < 0)
|
||||
return err;
|
||||
term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
|
||||
|
@ -979,7 +991,7 @@ static int check_input_term(struct mixer_build *state, int id,
|
|||
return -EINVAL;
|
||||
|
||||
/* call recursively to retrieve the channel info */
|
||||
err = check_input_term(state, d->baSourceID[0], term);
|
||||
err = __check_input_term(state, d->baSourceID[0], term);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -997,6 +1009,15 @@ static int check_input_term(struct mixer_build *state, int id,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
||||
static int check_input_term(struct mixer_build *state, int id,
|
||||
struct usb_audio_term *term)
|
||||
{
|
||||
memset(term, 0, sizeof(*term));
|
||||
memset(state->termbitmap, 0, sizeof(state->termbitmap));
|
||||
return __check_input_term(state, id, term);
|
||||
}
|
||||
|
||||
/*
|
||||
* Feature Unit
|
||||
*/
|
||||
|
|
|
@ -3285,6 +3285,13 @@ int perf_session__read_header(struct perf_session *session)
|
|||
data->file.path);
|
||||
}
|
||||
|
||||
if (f_header.attr_size == 0) {
|
||||
pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
|
||||
"Was the 'perf record' command properly terminated?\n",
|
||||
data->file.path);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nr_attrs = f_header.attrs.size / f_header.attr_size;
|
||||
lseek(fd, f_header.attrs.offset, SEEK_SET);
|
||||
|
||||
|
@ -3365,7 +3372,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
|
|||
size += sizeof(struct perf_event_header);
|
||||
size += ids * sizeof(u64);
|
||||
|
||||
ev = malloc(size);
|
||||
ev = zalloc(size);
|
||||
|
||||
if (ev == NULL)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -338,6 +338,17 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
|||
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_timer_schedule(vcpu);
|
||||
/*
|
||||
* If we're about to block (most likely because we've just hit a
|
||||
* WFI), we need to sync back the state of the GIC CPU interface
|
||||
* so that we have the lastest PMR and group enables. This ensures
|
||||
* that kvm_arch_vcpu_runnable has up-to-date data to decide
|
||||
* whether we have pending interrupts.
|
||||
*/
|
||||
preempt_disable();
|
||||
kvm_vgic_vmcr_sync(vcpu);
|
||||
preempt_enable();
|
||||
|
||||
kvm_vgic_v4_enable_doorbell(vcpu);
|
||||
}
|
||||
|
||||
|
|
|
@ -495,10 +495,17 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
|
|||
kvm_vgic_global_state.vctrl_base + GICH_APR);
|
||||
}
|
||||
|
||||
void vgic_v2_put(struct kvm_vcpu *vcpu)
|
||||
void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
||||
|
||||
cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
|
||||
}
|
||||
|
||||
void vgic_v2_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
||||
|
||||
vgic_v2_vmcr_sync(vcpu);
|
||||
cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
|
||||
}
|
||||
|
|
|
@ -674,12 +674,17 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
|
|||
__vgic_v3_activate_traps(vcpu);
|
||||
}
|
||||
|
||||
void vgic_v3_put(struct kvm_vcpu *vcpu)
|
||||
void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
|
||||
if (likely(cpu_if->vgic_sre))
|
||||
cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr);
|
||||
}
|
||||
|
||||
void vgic_v3_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vgic_v3_vmcr_sync(vcpu);
|
||||
|
||||
kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
|
||||
|
||||
|
|
|
@ -902,6 +902,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
|
|||
vgic_v3_put(vcpu);
|
||||
}
|
||||
|
||||
void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
|
||||
return;
|
||||
|
||||
if (kvm_vgic_global_state.type == VGIC_V2)
|
||||
vgic_v2_vmcr_sync(vcpu);
|
||||
else
|
||||
vgic_v3_vmcr_sync(vcpu);
|
||||
}
|
||||
|
||||
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
||||
|
|
|
@ -204,6 +204,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
|
|||
void vgic_v2_init_lrs(void);
|
||||
void vgic_v2_load(struct kvm_vcpu *vcpu);
|
||||
void vgic_v2_put(struct kvm_vcpu *vcpu);
|
||||
void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
|
||||
|
||||
void vgic_v2_save_state(struct kvm_vcpu *vcpu);
|
||||
void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
|
||||
|
@ -234,6 +235,7 @@ bool vgic_v3_check_base(struct kvm *kvm);
|
|||
|
||||
void vgic_v3_load(struct kvm_vcpu *vcpu);
|
||||
void vgic_v3_put(struct kvm_vcpu *vcpu);
|
||||
void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
|
||||
|
||||
bool vgic_has_its(struct kvm *kvm);
|
||||
int kvm_vgic_register_its_device(void);
|
||||
|
|
Loading…
Add table
Reference in a new issue