This is the 4.19.92 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl4La9gACgkQONu9yGCS aT6hlA//TDpj9rdEwkaKyg/Ge4TCOJSOiwlp2/5lg2Sroiuizz527hVybGOOYAHl gMA2Syt73PWStyfgl5B3AimcBvPADX8h/b1KiSoIdHFkq5rPFyneB6aEj+5jSK1V 63UnnTV0T49wt0Jvs6nN0FxI4ZCXbfjzaSVz4BGIflz6h9UUkPAu91CJTKtPmrAp pliH20cMOykxyS/KfKa6zDcpIfU0k+DxL5U0Y5F1YRDKc1iPg8e6I3cNLgwKSja6 21BgdoTyZdvbC85HxSY7V6Dswp4YQPBY3y8crp8npZ9apbYV7eNU3L1+WVQvxpFg kahhyjalqwqkKq+cTEsIFj7cjPksSlH/qytTS+lnN3BScXbFPp8GdzIazhQNSCv3 S/7T51CcvNoVcs9Qeu+nwyvx+H1LH4MYO4C7RYWZhPnMcA+/MxvT5WXNKfjf2ekM N5h8xNATllzDuDkX+zVwW8i80SCyhVqQIKbXLn8ugGYW3G5TNdy8Ysh0kdrq26Y+ LAELsbQhK/Kt8WF+XNBpb9LLbeUGn1GTwhnbEuD7IKI+bVxnmsGk8QUu3h+a9xFh lI7bsj8Ku9T+59/9xqAnoStEto+0tdTPB9Cx1jNdWlLiVdkewiDKiUbloFpDFS1n L3SvqB68DC/IznQcK970g3aIx9zbkb2KZRdj2Fu7apaY5D9q85I= =W+5k -----END PGP SIGNATURE----- Merge 4.19.92 into android-4.19-q Changes in 4.19.92 af_packet: set defaule value for tmo fjes: fix missed check in fjes_acpi_add mod_devicetable: fix PHY module format net: dst: Force 4-byte alignment of dst_metrics net: gemini: Fix memory leak in gmac_setup_txqs net: hisilicon: Fix a BUG trigered by wrong bytes_compl net: nfc: nci: fix a possible sleep-in-atomic-context bug in nci_uart_tty_receive() net: qlogic: Fix error paths in ql_alloc_large_buffers() net: usb: lan78xx: Fix suspend/resume PHY register access error qede: Disable hardware gro when xdp prog is installed qede: Fix multicast mac configuration sctp: fully initialize v4 addr in some functions selftests: forwarding: Delete IPv6 address at the end btrfs: don't double lock the subvol_sem for rename exchange btrfs: do not call synchronize_srcu() in inode_tree_del Btrfs: fix missing data checksums after replaying a log tree btrfs: send: remove WARN_ON for readonly mount btrfs: abort transaction after failed inode updates in create_subvol btrfs: skip log replay on orphaned roots btrfs: do not leak reloc root if we fail to read the fs root btrfs: handle ENOENT in btrfs_uuid_tree_iterate Btrfs: fix removal logic of the tree mod log that leads to use-after-free issues ALSA: pcm: Avoid possible info leaks from PCM stream buffers ALSA: hda/ca0132 - Keep power on during processing DSP response ALSA: hda/ca0132 - Avoid endless loop ALSA: hda/ca0132 - Fix work handling in delayed HP detection drm: mst: Fix query_payload ack reply struct drm/panel: Add missing drm_panel_init() in panel drivers drm/bridge: analogix-anx78xx: silence -EPROBE_DEFER warnings iio: light: bh1750: Resolve compiler warning and make code more readable drm/amdgpu: grab the id mgr lock while accessing passid_mapping spi: Add call to spi_slave_abort() function when spidev driver is released staging: rtl8192u: fix multiple memory leaks on error path staging: rtl8188eu: fix possible null dereference rtlwifi: prevent memory leak in rtl_usb_probe libertas: fix a potential NULL pointer dereference ath10k: fix backtrace on coredump IB/iser: bound protection_sg size by data_sg size media: am437x-vpfe: Setting STD to current value is not an error media: i2c: ov2659: fix s_stream return value media: ov6650: Fix crop rectangle alignment not passed back media: i2c: ov2659: Fix missing 720p register config media: ov6650: Fix stored frame format not in sync with hardware media: ov6650: Fix stored crop rectangle not in sync with hardware tools/power/cpupower: Fix initializer override in hsw_ext_cstates media: venus: core: Fix msm8996 frequency table ath10k: fix offchannel tx failure when no ath10k_mac_tx_frm_has_freq pinctrl: devicetree: Avoid taking direct reference to device name string drm/amdkfd: fix a potential NULL pointer dereference (v2) selftests/bpf: Correct path to include msg + path media: venus: Fix occasionally failures to suspend usb: renesas_usbhs: add suspend event support in gadget mode hwrng: omap3-rom - Call clk_disable_unprepare() on exit only if not idled regulator: max8907: Fix the usage of uninitialized variable in max8907_regulator_probe() media: flexcop-usb: fix NULL-ptr deref in flexcop_usb_transfer_init() media: cec-funcs.h: add status_req checks drm/bridge: dw-hdmi: Refuse DDC/CI transfers on the internal I2C controller samples: pktgen: fix proc_cmd command result check logic block: Fix writeback throttling W=1 compiler warnings mwifiex: pcie: Fix memory leak in mwifiex_pcie_init_evt_ring drm/drm_vblank: Change EINVAL by the correct errno media: cx88: Fix some error handling path in 'cx8800_initdev()' media: ti-vpe: vpe: Fix Motion Vector vpdma stride media: ti-vpe: vpe: fix a v4l2-compliance warning about invalid pixel format media: ti-vpe: vpe: fix a v4l2-compliance failure about frame sequence number media: ti-vpe: vpe: Make sure YUYV is set as default format media: ti-vpe: vpe: fix a v4l2-compliance failure causing a kernel panic media: ti-vpe: vpe: ensure buffers are cleaned up properly in abort cases media: ti-vpe: vpe: fix a v4l2-compliance failure about invalid sizeimage syscalls/x86: Use the correct function type in SYSCALL_DEFINE0 drm/amd/display: Fix dongle_caps containing stale information. extcon: sm5502: Reset registers during initialization x86/mm: Use the correct function type for native_set_fixmap() ath10k: Correct error handling of dma_map_single() drm/bridge: dw-hdmi: Restore audio when setting a mode perf test: Report failure for mmap events perf report: Add warning when libunwind not compiled in usb: usbfs: Suppress problematic bind and unbind uevents. iio: adc: max1027: Reset the device at probe time Bluetooth: missed cpu_to_le16 conversion in hci_init4_req Bluetooth: Workaround directed advertising bug in Broadcom controllers Bluetooth: hci_core: fix init for HCI_USER_CHANNEL bpf/stackmap: Fix deadlock with rq_lock in bpf_get_stack() x86/mce: Lower throttling MCE messages' priority to warning perf tests: Disable bp_signal testing for arm64 drm/gma500: fix memory disclosures due to uninitialized bytes rtl8xxxu: fix RTL8723BU connection failure issue after warm reboot ipmi: Don't allow device module unload when in use x86/ioapic: Prevent inconsistent state when moving an interrupt media: smiapp: Register sensor after enabling runtime PM on the device md/bitmap: avoid race window between md_bitmap_resize and bitmap_file_clear_bit arm64: psci: Reduce the waiting time for cpu_psci_cpu_kill() i40e: initialize ITRN registers with correct values net: phy: dp83867: enable robust auto-mdix drm/tegra: sor: Use correct SOR index on Tegra210 spi: sprd: adi: Add missing lock protection when rebooting ACPI: button: Add DMI quirk for Medion Akoya E2215T RDMA/qedr: Fix memory leak in user qp and mr gpu: host1x: Allocate gather copy for host1x net: dsa: LAN9303: select REGMAP when LAN9303 enable phy: qcom-usb-hs: Fix extcon double register after power cycle s390/time: ensure get_clock_monotonic() returns monotonic values s390/mm: add mm_pxd_folded() checks to pxd_free() net: hns3: add struct netdev_queue debug info for TX timeout libata: Ensure ata_port probe has completed before detach loop: fix no-unmap write-zeroes request behavior pinctrl: sh-pfc: sh7734: Fix duplicate TCLK1_B iio: dln2-adc: fix iio_triggered_buffer_postenable() position libbpf: Fix error handling in bpf_map__reuse_fd() Bluetooth: Fix advertising duplicated flags pinctrl: amd: fix __iomem annotation in amd_gpio_irq_handler() ixgbe: protect TX timestamping from API misuse media: rcar_drif: fix a memory disclosure media: v4l2-core: fix touch support in v4l_g_fmt nvmem: imx-ocotp: reset error status on probe rfkill: allocate static minor bnx2x: Fix PF-VF communication over multi-cos queues. spi: img-spfi: fix potential double release ALSA: timer: Limit max amount of slave instances rtlwifi: fix memory leak in rtl92c_set_fw_rsvdpagepkt() perf probe: Fix to find range-only function instance perf probe: Fix to list probe event with correct line number perf jevents: Fix resource leak in process_mapfile() and main() perf probe: Walk function lines in lexical blocks perf probe: Fix to probe an inline function which has no entry pc perf probe: Fix to show ranges of variables in functions without entry_pc perf probe: Fix to show inlined function callsite without entry_pc libsubcmd: Use -O0 with DEBUG=1 perf probe: Fix to probe a function which has no entry pc perf tools: Splice events onto evlist even on error drm/amdgpu: disallow direct upload save restore list from gfx driver drm/amdgpu: fix potential double drop fence reference xen/gntdev: Use select for DMA_SHARED_BUFFER perf parse: If pmu configuration fails free terms perf probe: Skip overlapped location on searching variables perf probe: Return a better scope DIE if there is no best scope perf probe: Fix to show calling lines of inlined functions perf probe: Skip end-of-sequence and non statement lines perf probe: Filter out instances except for inlined subroutine and subprogram ath10k: fix get invalid tx rate for Mesh metric fsi: core: Fix small accesses and unaligned offsets via sysfs media: pvrusb2: Fix oops on tear-down when radio support is not present soundwire: intel: fix PDI/stream mapping for Bulk crypto: atmel - Fix authenc support when it is set to m ice: delay less media: si470x-i2c: add missed operations in remove EDAC/ghes: Fix grain calculation spi: pxa2xx: Add missed security checks ASoC: rt5677: Mark reg RT5677_PWR_ANLG2 as volatile iio: dac: ad5446: Add support for new AD5600 DAC ASoC: Intel: kbl_rt5663_rt5514_max98927: Add dmic format constraint s390/disassembler: don't hide instruction addresses nvme: Discard workaround for non-conformant devices parport: load lowlevel driver if ports not found bcache: fix static checker warning in bcache_device_free() cpufreq: Register drivers only after CPU devices have been registered x86/crash: Add a forward declaration of struct kimage tracing: use kvcalloc for tgid_map array allocation tracing/kprobe: Check whether the non-suffixed symbol is notrace bcache: fix deadlock in bcache_allocator iwlwifi: mvm: fix unaligned read of rx_pkt_status ASoC: wm8904: fix regcache handling spi: tegra20-slink: add missed clk_unprepare tun: fix data-race in gro_normal_list() crypto: virtio - deal with unsupported input sizes mmc: tmio: Add MMC_CAP_ERASE to allow erase/discard/trim requests btrfs: don't prematurely free work in end_workqueue_fn() btrfs: don't prematurely free work in run_ordered_work() ASoC: wm2200: add missed operations in remove and probe failure spi: st-ssc4: add missed pm_runtime_disable ASoC: wm5100: add missed pm_runtime_disable ASoC: Intel: bytcr_rt5640: Update quirk for Acer Switch 10 SW5-012 2-in-1 x86/insn: Add some Intel instructions to the opcode map brcmfmac: remove monitor interface when detaching iwlwifi: check kasprintf() return value fbtft: Make sure string is NULL terminated net: ethernet: ti: ale: clean ale tbl on init and intf restart crypto: sun4i-ss - Fix 64-bit size_t warnings crypto: sun4i-ss - Fix 64-bit size_t warnings on sun4i-ss-hash.c mac80211: consider QoS Null frames for STA_NULLFUNC_ACKED crypto: vmx - Avoid weird build failures libtraceevent: Fix memory leakage in copy_filter_type mips: fix build when "48 bits virtual memory" is enabled drm/amdgpu: fix bad DMA from INTERRUPT_CNTL2 net: phy: initialise phydev speed and duplex sanely btrfs: don't prematurely free work in reada_start_machine_worker() btrfs: don't prematurely free work in scrub_missing_raid56_worker() Revert "mmc: sdhci: Fix incorrect switch to HS mode" mmc: mediatek: fix CMD_TA to 2 for MT8173 HS200/HS400 mode can: kvaser_usb: kvaser_usb_leaf: Fix some info-leaks to USB devices usb: xhci: Fix build warning seen with CONFIG_PM=n drm/amdgpu: fix uninitialized variable pasid_mapping_needed s390/ftrace: fix endless recursion in function_graph tracer btrfs: return error pointer from alloc_test_extent_buffer usbip: Fix receive error in vhci-hcd when using scatter-gather usbip: Fix error path of vhci_recv_ret_submit() cpufreq: Avoid leaving stale IRQ work items during CPU offline USB: EHCI: Do not return -EPIPE when hub is disconnected intel_th: pci: Add Comet Lake PCH-V support intel_th: pci: Add Elkhart Lake SOC support platform/x86: hp-wmi: Make buffer for HPWMI_FEATURE2_QUERY 128 bytes staging: comedi: gsc_hpdi: check dma_alloc_coherent() return value ext4: fix ext4_empty_dir() for directories with holes ext4: check for directory entries too close to block end ext4: unlock on error in ext4_expand_extra_isize() KVM: arm64: Ensure 'params' is initialised when looking up sys register x86/MCE/AMD: Do not use rdmsr_safe_on_cpu() in smca_configure() x86/MCE/AMD: Allow Reserved types to be overwritten in smca_banks[] powerpc/vcpu: Assume dedicated processors as non-preempt powerpc/irq: fix stack overflow verification mmc: sdhci-msm: Correct the offset and value for DDR_CONFIG register mmc: sdhci-of-esdhc: Revert "mmc: sdhci-of-esdhc: add erratum A-009204 support" mmc: sdhci: Update the tuning failed messages to pr_debug level mmc: sdhci-of-esdhc: fix P2020 errata handling mmc: sdhci: Workaround broken command queuing on Intel GLK mmc: sdhci: Add a quirk for broken command queuing nbd: fix shutdown and recv work deadlock v2 perf probe: Fix to show function entry line as probe-able Linux 4.19.92 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: If5b3537932bcabaffceef591cf1a4c1b71d7874e
This commit is contained in:
commit
3e66813e88
212 changed files with 1397 additions and 510 deletions
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 91
|
||||
SUBLEVEL = 92
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
|
|
@ -84,7 +84,8 @@ static void cpu_psci_cpu_die(unsigned int cpu)
|
|||
|
||||
static int cpu_psci_cpu_kill(unsigned int cpu)
|
||||
{
|
||||
int err, i;
|
||||
int err;
|
||||
unsigned long start, end;
|
||||
|
||||
if (!psci_ops.affinity_info)
|
||||
return 0;
|
||||
|
@ -94,16 +95,18 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
|
|||
* while it is dying. So, try again a few times.
|
||||
*/
|
||||
|
||||
for (i = 0; i < 10; i++) {
|
||||
start = jiffies;
|
||||
end = start + msecs_to_jiffies(100);
|
||||
do {
|
||||
err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
|
||||
if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
|
||||
pr_info("CPU%d killed.\n", cpu);
|
||||
pr_info("CPU%d killed (polled %d ms)\n", cpu,
|
||||
jiffies_to_msecs(jiffies - start));
|
||||
return 0;
|
||||
}
|
||||
|
||||
msleep(10);
|
||||
pr_info("Retrying again to check for CPU kill\n");
|
||||
}
|
||||
usleep_range(100, 1000);
|
||||
} while (time_before(jiffies, end));
|
||||
|
||||
pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
|
||||
cpu, err);
|
||||
|
|
|
@ -2174,8 +2174,11 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
|
|||
if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
|
||||
return NULL;
|
||||
|
||||
if (!index_to_params(id, ¶ms))
|
||||
return NULL;
|
||||
|
||||
table = get_target_table(vcpu->arch.target, true, &num);
|
||||
r = find_reg_by_id(id, ¶ms, table, num);
|
||||
r = find_reg(¶ms, table, num);
|
||||
if (!r)
|
||||
r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
|
||||
|
||||
|
|
|
@ -18,10 +18,12 @@
|
|||
#include <asm/fixmap.h>
|
||||
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
|
||||
#if CONFIG_PGTABLE_LEVELS == 2
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
#elif !(defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_MIPS_VA_BITS_48))
|
||||
#elif CONFIG_PGTABLE_LEVELS == 3
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
#else
|
||||
#include <asm-generic/5level-fixup.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -216,6 +218,9 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
|
|||
return pgd_val(pgd);
|
||||
}
|
||||
|
||||
#define pgd_phys(pgd) virt_to_phys((void *)pgd_val(pgd))
|
||||
#define pgd_page(pgd) (pfn_to_page(pgd_phys(pgd) >> PAGE_SHIFT))
|
||||
|
||||
static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
|
||||
{
|
||||
return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
|
||||
|
|
|
@ -53,10 +53,12 @@
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
DECLARE_STATIC_KEY_FALSE(shared_processor);
|
||||
|
||||
#define vcpu_is_preempted vcpu_is_preempted
|
||||
static inline bool vcpu_is_preempted(int cpu)
|
||||
{
|
||||
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
|
||||
if (!static_branch_unlikely(&shared_processor))
|
||||
return false;
|
||||
return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
|
||||
}
|
||||
|
|
|
@ -634,8 +634,6 @@ void __do_irq(struct pt_regs *regs)
|
|||
|
||||
trace_irq_entry(regs);
|
||||
|
||||
check_stack_overflow();
|
||||
|
||||
/*
|
||||
* Query the platform PIC for the interrupt & ack it.
|
||||
*
|
||||
|
@ -667,6 +665,8 @@ void do_IRQ(struct pt_regs *regs)
|
|||
irqtp = hardirq_ctx[raw_smp_processor_id()];
|
||||
sirqtp = softirq_ctx[raw_smp_processor_id()];
|
||||
|
||||
check_stack_overflow();
|
||||
|
||||
/* Already there ? */
|
||||
if (unlikely(curtp == irqtp || curtp == sirqtp)) {
|
||||
__do_irq(regs);
|
||||
|
|
|
@ -75,6 +75,9 @@
|
|||
#include "pseries.h"
|
||||
#include "../../../../drivers/pci/pci.h"
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(shared_processor);
|
||||
EXPORT_SYMBOL_GPL(shared_processor);
|
||||
|
||||
int CMO_PrPSP = -1;
|
||||
int CMO_SecPSP = -1;
|
||||
unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
|
||||
|
@ -761,6 +764,10 @@ static void __init pSeries_setup_arch(void)
|
|||
|
||||
if (firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
vpa_init(boot_cpuid);
|
||||
|
||||
if (lppaca_shared_proc(get_lppaca()))
|
||||
static_branch_enable(&shared_processor);
|
||||
|
||||
ppc_md.power_save = pseries_lpar_idle;
|
||||
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
|
|
|
@ -56,7 +56,12 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
|
|||
crst_table_init(table, _REGION2_ENTRY_EMPTY);
|
||||
return (p4d_t *) table;
|
||||
}
|
||||
#define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d)
|
||||
|
||||
static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
|
||||
{
|
||||
if (!mm_p4d_folded(mm))
|
||||
crst_table_free(mm, (unsigned long *) p4d);
|
||||
}
|
||||
|
||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||
{
|
||||
|
@ -65,7 +70,12 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
|
|||
crst_table_init(table, _REGION3_ENTRY_EMPTY);
|
||||
return (pud_t *) table;
|
||||
}
|
||||
#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
|
||||
|
||||
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
||||
{
|
||||
if (!mm_pud_folded(mm))
|
||||
crst_table_free(mm, (unsigned long *) pud);
|
||||
}
|
||||
|
||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
|
||||
{
|
||||
|
@ -83,6 +93,8 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
|
|||
|
||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
{
|
||||
if (mm_pmd_folded(mm))
|
||||
return;
|
||||
pgtable_pmd_page_dtor(virt_to_page(pmd));
|
||||
crst_table_free(mm, (unsigned long *) pmd);
|
||||
}
|
||||
|
|
|
@ -10,8 +10,9 @@
|
|||
#ifndef _ASM_S390_TIMEX_H
|
||||
#define _ASM_S390_TIMEX_H
|
||||
|
||||
#include <asm/lowcore.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/time64.h>
|
||||
#include <asm/lowcore.h>
|
||||
|
||||
/* The value of the TOD clock for 1.1.1970. */
|
||||
#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
|
||||
|
@ -186,15 +187,18 @@ extern unsigned char tod_clock_base[16] __aligned(8);
|
|||
/**
|
||||
* get_clock_monotonic - returns current time in clock rate units
|
||||
*
|
||||
* The caller must ensure that preemption is disabled.
|
||||
* The clock and tod_clock_base get changed via stop_machine.
|
||||
* Therefore preemption must be disabled when calling this
|
||||
* function, otherwise the returned value is not guaranteed to
|
||||
* be monotonic.
|
||||
* Therefore preemption must be disabled, otherwise the returned
|
||||
* value is not guaranteed to be monotonic.
|
||||
*/
|
||||
static inline unsigned long long get_tod_clock_monotonic(void)
|
||||
{
|
||||
return get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
|
||||
unsigned long long tod;
|
||||
|
||||
preempt_disable_notrace();
|
||||
tod = get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
|
||||
preempt_enable_notrace();
|
||||
return tod;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -462,10 +462,11 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
|
|||
ptr += sprintf(ptr, "%%c%i", value);
|
||||
else if (operand->flags & OPERAND_VR)
|
||||
ptr += sprintf(ptr, "%%v%i", value);
|
||||
else if (operand->flags & OPERAND_PCREL)
|
||||
ptr += sprintf(ptr, "%lx", (signed int) value
|
||||
+ addr);
|
||||
else if (operand->flags & OPERAND_SIGNED)
|
||||
else if (operand->flags & OPERAND_PCREL) {
|
||||
void *pcrel = (void *)((int)value + addr);
|
||||
|
||||
ptr += sprintf(ptr, "%px", pcrel);
|
||||
} else if (operand->flags & OPERAND_SIGNED)
|
||||
ptr += sprintf(ptr, "%i", value);
|
||||
else
|
||||
ptr += sprintf(ptr, "%u", value);
|
||||
|
@ -537,7 +538,7 @@ void show_code(struct pt_regs *regs)
|
|||
else
|
||||
*ptr++ = ' ';
|
||||
addr = regs->psw.addr + start - 32;
|
||||
ptr += sprintf(ptr, "%016lx: ", addr);
|
||||
ptr += sprintf(ptr, "%px: ", (void *)addr);
|
||||
if (start + opsize >= end)
|
||||
break;
|
||||
for (i = 0; i < opsize; i++)
|
||||
|
@ -565,7 +566,7 @@ void print_fn_code(unsigned char *code, unsigned long len)
|
|||
opsize = insn_length(*code);
|
||||
if (opsize > len)
|
||||
break;
|
||||
ptr += sprintf(ptr, "%p: ", code);
|
||||
ptr += sprintf(ptr, "%px: ", code);
|
||||
for (i = 0; i < opsize; i++)
|
||||
ptr += sprintf(ptr, "%02x", code[i]);
|
||||
*ptr++ = '\t';
|
||||
|
|
|
@ -134,7 +134,7 @@ enum {
|
|||
GPIO_FN_EX_WAIT1, GPIO_FN_SD1_DAT0_A, GPIO_FN_DREQ2, GPIO_FN_CAN1_TX_C,
|
||||
GPIO_FN_ET0_LINK_C, GPIO_FN_ET0_ETXD5_A,
|
||||
GPIO_FN_EX_WAIT0, GPIO_FN_TCLK1_B,
|
||||
GPIO_FN_RD_WR, GPIO_FN_TCLK0,
|
||||
GPIO_FN_RD_WR, GPIO_FN_TCLK0, GPIO_FN_CAN_CLK_B, GPIO_FN_ET0_ETXD4,
|
||||
GPIO_FN_EX_CS5, GPIO_FN_SD1_CMD_A, GPIO_FN_ATADIR, GPIO_FN_QSSL_B,
|
||||
GPIO_FN_ET0_ETXD3_A,
|
||||
GPIO_FN_EX_CS4, GPIO_FN_SD1_WP_A, GPIO_FN_ATAWR, GPIO_FN_QMI_QIO1_B,
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
#ifndef _ASM_X86_CRASH_H
|
||||
#define _ASM_X86_CRASH_H
|
||||
|
||||
struct kimage;
|
||||
|
||||
int crash_load_segments(struct kimage *image);
|
||||
int crash_copy_backup_region(struct kimage *image);
|
||||
int crash_setup_memmap_entries(struct kimage *image,
|
||||
|
|
|
@ -182,10 +182,10 @@
|
|||
* macros to work correctly.
|
||||
*/
|
||||
#ifndef SYSCALL_DEFINE0
|
||||
#define SYSCALL_DEFINE0(sname) \
|
||||
SYSCALL_METADATA(_##sname, 0); \
|
||||
#define SYSCALL_DEFINE0(sname) \
|
||||
SYSCALL_METADATA(_##sname, 0); \
|
||||
asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\
|
||||
ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
|
||||
ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
|
||||
asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused)
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1724,9 +1724,10 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
|
|||
|
||||
static inline bool ioapic_irqd_mask(struct irq_data *data)
|
||||
{
|
||||
/* If we are moving the irq we need to mask it */
|
||||
/* If we are moving the IRQ we need to mask it */
|
||||
if (unlikely(irqd_is_setaffinity_pending(data))) {
|
||||
mask_ioapic_irq(data);
|
||||
if (!irqd_irq_masked(data))
|
||||
mask_ioapic_irq(data);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -1763,7 +1764,9 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
|
|||
*/
|
||||
if (!io_apic_level_ack_pending(data->chip_data))
|
||||
irq_move_masked_irq(data);
|
||||
unmask_ioapic_irq(data);
|
||||
/* If the IRQ is masked in the core, leave it: */
|
||||
if (!irqd_irq_masked(data))
|
||||
unmask_ioapic_irq(data);
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
|
|
@ -228,10 +228,10 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
|
|||
}
|
||||
|
||||
/* Return early if this bank was already initialized. */
|
||||
if (smca_banks[bank].hwid)
|
||||
if (smca_banks[bank].hwid && smca_banks[bank].hwid->hwid_mcatype != 0)
|
||||
return;
|
||||
|
||||
if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
|
||||
if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
|
||||
pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -185,7 +185,7 @@ static void therm_throt_process(bool new_event, int event, int level)
|
|||
/* if we just entered the thermal event */
|
||||
if (new_event) {
|
||||
if (event == THERMAL_THROTTLING_EVENT)
|
||||
pr_crit("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
|
||||
pr_warn("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
|
||||
this_cpu,
|
||||
level == CORE_LEVEL ? "Core" : "Package",
|
||||
state->count);
|
||||
|
|
|
@ -333,7 +333,7 @@ AVXcode: 1
|
|||
06: CLTS
|
||||
07: SYSRET (o64)
|
||||
08: INVD
|
||||
09: WBINVD
|
||||
09: WBINVD | WBNOINVD (F3)
|
||||
0a:
|
||||
0b: UD2 (1B)
|
||||
0c:
|
||||
|
@ -364,7 +364,7 @@ AVXcode: 1
|
|||
# a ModR/M byte.
|
||||
1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
|
||||
1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
|
||||
1c:
|
||||
1c: Grp20 (1A),(1C)
|
||||
1d:
|
||||
1e:
|
||||
1f: NOP Ev
|
||||
|
@ -792,6 +792,8 @@ f3: Grp17 (1A)
|
|||
f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
|
||||
f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
|
||||
f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
|
||||
f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3)
|
||||
f9: MOVDIRI My,Gy
|
||||
EndTable
|
||||
|
||||
Table: 3-byte opcode 2 (0x0f 0x3a)
|
||||
|
@ -943,9 +945,9 @@ GrpTable: Grp6
|
|||
EndTable
|
||||
|
||||
GrpTable: Grp7
|
||||
0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
|
||||
1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
|
||||
2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
|
||||
0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B)
|
||||
1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B)
|
||||
2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B)
|
||||
3: LIDT Ms
|
||||
4: SMSW Mw/Rv
|
||||
5: rdpkru (110),(11B) | wrpkru (111),(11B)
|
||||
|
@ -1020,7 +1022,7 @@ GrpTable: Grp15
|
|||
3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
|
||||
4: XSAVE | ptwrite Ey (F3),(11B)
|
||||
5: XRSTOR | lfence (11B)
|
||||
6: XSAVEOPT | clwb (66) | mfence (11B)
|
||||
6: XSAVEOPT | clwb (66) | mfence (11B) | TPAUSE Rd (66),(11B) | UMONITOR Rv (F3),(11B) | UMWAIT Rd (F2),(11B)
|
||||
7: clflush | clflushopt (66) | sfence (11B)
|
||||
EndTable
|
||||
|
||||
|
@ -1051,6 +1053,10 @@ GrpTable: Grp19
|
|||
6: vscatterpf1qps/d Wx (66),(ev)
|
||||
EndTable
|
||||
|
||||
GrpTable: Grp20
|
||||
0: cldemote Mb
|
||||
EndTable
|
||||
|
||||
# AMD's Prefetch Group
|
||||
GrpTable: GrpP
|
||||
0: PREFETCH
|
||||
|
|
|
@ -660,8 +660,8 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
|
|||
fixmaps_set++;
|
||||
}
|
||||
|
||||
void native_set_fixmap(unsigned /* enum fixed_addresses */ idx, phys_addr_t phys,
|
||||
pgprot_t flags)
|
||||
void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
|
||||
phys_addr_t phys, pgprot_t flags)
|
||||
{
|
||||
/* Sanitize 'prot' against any unsupported bits: */
|
||||
pgprot_val(flags) &= __default_kernel_pte_mask;
|
||||
|
|
|
@ -91,6 +91,17 @@ static const struct dmi_system_id lid_blacklst[] = {
|
|||
DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Medion Akoya E2215T, notification of the LID device only
|
||||
* happens on close, not on open and _LID always returns closed.
|
||||
*/
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "E2215T MD60198"),
|
||||
},
|
||||
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -6726,6 +6726,9 @@ void ata_host_detach(struct ata_host *host)
|
|||
{
|
||||
int i;
|
||||
|
||||
/* Ensure ata_port probe has completed */
|
||||
async_synchronize_full();
|
||||
|
||||
for (i = 0; i < host->n_ports; i++)
|
||||
ata_port_detach(host->ports[i]);
|
||||
|
||||
|
|
|
@ -416,18 +416,20 @@ static int lo_read_transfer(struct loop_device *lo, struct request *rq,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos)
|
||||
static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
|
||||
int mode)
|
||||
{
|
||||
/*
|
||||
* We use punch hole to reclaim the free space used by the
|
||||
* image a.k.a. discard. However we do not support discard if
|
||||
* encryption is enabled, because it may give an attacker
|
||||
* useful information.
|
||||
* We use fallocate to manipulate the space mappings used by the image
|
||||
* a.k.a. discard/zerorange. However we do not support this if
|
||||
* encryption is enabled, because it may give an attacker useful
|
||||
* information.
|
||||
*/
|
||||
struct file *file = lo->lo_backing_file;
|
||||
int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
|
||||
int ret;
|
||||
|
||||
mode |= FALLOC_FL_KEEP_SIZE;
|
||||
|
||||
if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out;
|
||||
|
@ -596,9 +598,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
|
|||
switch (req_op(rq)) {
|
||||
case REQ_OP_FLUSH:
|
||||
return lo_req_flush(lo, rq);
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
return lo_discard(lo, rq, pos);
|
||||
/*
|
||||
* If the caller doesn't want deallocation, call zeroout to
|
||||
* write zeroes the range. Otherwise, punch them out.
|
||||
*/
|
||||
return lo_fallocate(lo, rq, pos,
|
||||
(rq->cmd_flags & REQ_NOUNMAP) ?
|
||||
FALLOC_FL_ZERO_RANGE :
|
||||
FALLOC_FL_PUNCH_HOLE);
|
||||
case REQ_OP_DISCARD:
|
||||
return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
|
||||
case REQ_OP_WRITE:
|
||||
if (lo->transfer)
|
||||
return lo_write_transfer(lo, rq, pos);
|
||||
|
|
|
@ -1247,10 +1247,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
|
|||
mutex_unlock(&nbd->config_lock);
|
||||
ret = wait_event_interruptible(config->recv_wq,
|
||||
atomic_read(&config->recv_threads) == 0);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
sock_shutdown(nbd);
|
||||
flush_workqueue(nbd->recv_workq);
|
||||
}
|
||||
flush_workqueue(nbd->recv_workq);
|
||||
|
||||
mutex_lock(&nbd->config_lock);
|
||||
nbd_bdev_reset(bdev);
|
||||
/* user requested, ignore socket errors */
|
||||
|
|
|
@ -121,7 +121,8 @@ static int omap3_rom_rng_remove(struct platform_device *pdev)
|
|||
{
|
||||
cancel_delayed_work_sync(&idle_work);
|
||||
hwrng_unregister(&omap3_rom_rng_ops);
|
||||
clk_disable_unprepare(rng_clk);
|
||||
if (!rng_idle)
|
||||
clk_disable_unprepare(rng_clk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -447,6 +447,8 @@ enum ipmi_stat_indexes {
|
|||
|
||||
#define IPMI_IPMB_NUM_SEQ 64
|
||||
struct ipmi_smi {
|
||||
struct module *owner;
|
||||
|
||||
/* What interface number are we? */
|
||||
int intf_num;
|
||||
|
||||
|
@ -1139,6 +1141,11 @@ int ipmi_create_user(unsigned int if_num,
|
|||
if (rv)
|
||||
goto out_kfree;
|
||||
|
||||
if (!try_module_get(intf->owner)) {
|
||||
rv = -ENODEV;
|
||||
goto out_kfree;
|
||||
}
|
||||
|
||||
/* Note that each existing user holds a refcount to the interface. */
|
||||
kref_get(&intf->refcount);
|
||||
|
||||
|
@ -1269,6 +1276,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
|
|||
}
|
||||
|
||||
kref_put(&intf->refcount, intf_free);
|
||||
module_put(intf->owner);
|
||||
}
|
||||
|
||||
int ipmi_destroy_user(struct ipmi_user *user)
|
||||
|
@ -2384,7 +2392,7 @@ static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
|
|||
* been recently fetched, this will just use the cached data. Otherwise
|
||||
* it will run a new fetch.
|
||||
*
|
||||
* Except for the first time this is called (in ipmi_register_smi()),
|
||||
* Except for the first time this is called (in ipmi_add_smi()),
|
||||
* this will always return good data;
|
||||
*/
|
||||
static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
|
||||
|
@ -3304,10 +3312,11 @@ static void redo_bmc_reg(struct work_struct *work)
|
|||
kref_put(&intf->refcount, intf_free);
|
||||
}
|
||||
|
||||
int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
|
||||
void *send_info,
|
||||
struct device *si_dev,
|
||||
unsigned char slave_addr)
|
||||
int ipmi_add_smi(struct module *owner,
|
||||
const struct ipmi_smi_handlers *handlers,
|
||||
void *send_info,
|
||||
struct device *si_dev,
|
||||
unsigned char slave_addr)
|
||||
{
|
||||
int i, j;
|
||||
int rv;
|
||||
|
@ -3333,7 +3342,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
|
|||
return rv;
|
||||
}
|
||||
|
||||
|
||||
intf->owner = owner;
|
||||
intf->bmc = &intf->tmp_bmc;
|
||||
INIT_LIST_HEAD(&intf->bmc->intfs);
|
||||
mutex_init(&intf->bmc->dyn_mutex);
|
||||
|
@ -3440,7 +3449,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
|
|||
|
||||
return rv;
|
||||
}
|
||||
EXPORT_SYMBOL(ipmi_register_smi);
|
||||
EXPORT_SYMBOL(ipmi_add_smi);
|
||||
|
||||
static void deliver_smi_err_response(struct ipmi_smi *intf,
|
||||
struct ipmi_smi_msg *msg,
|
||||
|
|
|
@ -2498,6 +2498,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
|
|||
if (cpufreq_disabled())
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* The cpufreq core depends heavily on the availability of device
|
||||
* structure, make sure they are available before proceeding further.
|
||||
*/
|
||||
if (!get_cpu_device(0))
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
if (!driver_data || !driver_data->verify || !driver_data->init ||
|
||||
!(driver_data->setpolicy || driver_data->target_index ||
|
||||
driver_data->target) ||
|
||||
|
|
|
@ -148,7 +148,7 @@ struct atmel_aes_xts_ctx {
|
|||
u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
||||
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
||||
struct atmel_aes_authenc_ctx {
|
||||
struct atmel_aes_base_ctx base;
|
||||
struct atmel_sha_authenc_ctx *auth;
|
||||
|
@ -160,7 +160,7 @@ struct atmel_aes_reqctx {
|
|||
u32 lastc[AES_BLOCK_SIZE / sizeof(u32)];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
||||
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
||||
struct atmel_aes_authenc_reqctx {
|
||||
struct atmel_aes_reqctx base;
|
||||
|
||||
|
@ -489,7 +489,7 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
|
|||
return (dd->flags & AES_FLAGS_ENCRYPT);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
||||
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
||||
static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
|
||||
#endif
|
||||
|
||||
|
@ -518,7 +518,7 @@ static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
|
|||
|
||||
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
|
||||
{
|
||||
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
||||
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
||||
if (dd->ctx->is_aead)
|
||||
atmel_aes_authenc_complete(dd, err);
|
||||
#endif
|
||||
|
@ -1983,7 +1983,7 @@ static struct crypto_alg aes_xts_alg = {
|
|||
}
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
||||
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
||||
/* authenc aead functions */
|
||||
|
||||
static int atmel_aes_authenc_start(struct atmel_aes_dev *dd);
|
||||
|
@ -2470,7 +2470,7 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
|
|||
{
|
||||
int i;
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
||||
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
||||
if (dd->caps.has_authenc)
|
||||
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++)
|
||||
crypto_unregister_aead(&aes_authenc_algs[i]);
|
||||
|
@ -2517,7 +2517,7 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
|
|||
goto err_aes_xts_alg;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
||||
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
||||
if (dd->caps.has_authenc) {
|
||||
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
|
||||
err = crypto_register_aead(&aes_authenc_algs[i]);
|
||||
|
@ -2529,7 +2529,7 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
|
|||
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
||||
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
||||
/* i = ARRAY_SIZE(aes_authenc_algs); */
|
||||
err_aes_authenc_alg:
|
||||
for (j = 0; j < i; j++)
|
||||
|
@ -2720,7 +2720,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
|
|||
|
||||
atmel_aes_get_cap(aes_dd);
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
||||
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
||||
if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
|
||||
err = -EPROBE_DEFER;
|
||||
goto iclk_unprepare;
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#ifndef __ATMEL_AUTHENC_H__
|
||||
#define __ATMEL_AUTHENC_H__
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
||||
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
||||
|
||||
#include <crypto/authenc.h>
|
||||
#include <crypto/hash.h>
|
||||
|
|
|
@ -2215,7 +2215,7 @@ static struct ahash_alg sha_hmac_algs[] = {
|
|||
},
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
||||
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
||||
/* authenc functions */
|
||||
|
||||
static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd);
|
||||
|
|
|
@ -81,7 +81,8 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq)
|
|||
oi = 0;
|
||||
oo = 0;
|
||||
do {
|
||||
todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
|
||||
todo = min(rx_cnt, ileft);
|
||||
todo = min_t(size_t, todo, (mi.length - oi) / 4);
|
||||
if (todo) {
|
||||
ileft -= todo;
|
||||
writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
|
||||
|
@ -96,7 +97,8 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq)
|
|||
rx_cnt = SS_RXFIFO_SPACES(spaces);
|
||||
tx_cnt = SS_TXFIFO_SPACES(spaces);
|
||||
|
||||
todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
|
||||
todo = min(tx_cnt, oleft);
|
||||
todo = min_t(size_t, todo, (mo.length - oo) / 4);
|
||||
if (todo) {
|
||||
oleft -= todo;
|
||||
readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
|
||||
|
@ -220,7 +222,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
|
|||
* todo is the number of consecutive 4byte word that we
|
||||
* can read from current SG
|
||||
*/
|
||||
todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
|
||||
todo = min(rx_cnt, ileft / 4);
|
||||
todo = min_t(size_t, todo, (mi.length - oi) / 4);
|
||||
if (todo && !ob) {
|
||||
writesl(ss->base + SS_RXFIFO, mi.addr + oi,
|
||||
todo);
|
||||
|
@ -234,8 +237,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
|
|||
* we need to be able to write all buf in one
|
||||
* pass, so it is why we min() with rx_cnt
|
||||
*/
|
||||
todo = min3(rx_cnt * 4 - ob, ileft,
|
||||
mi.length - oi);
|
||||
todo = min(rx_cnt * 4 - ob, ileft);
|
||||
todo = min_t(size_t, todo, mi.length - oi);
|
||||
memcpy(buf + ob, mi.addr + oi, todo);
|
||||
ileft -= todo;
|
||||
oi += todo;
|
||||
|
@ -255,7 +258,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
|
|||
spaces = readl(ss->base + SS_FCSR);
|
||||
rx_cnt = SS_RXFIFO_SPACES(spaces);
|
||||
tx_cnt = SS_TXFIFO_SPACES(spaces);
|
||||
dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
|
||||
dev_dbg(ss->dev,
|
||||
"%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
|
||||
mode,
|
||||
oi, mi.length, ileft, areq->cryptlen, rx_cnt,
|
||||
oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
|
||||
|
@ -263,7 +267,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
|
|||
if (!tx_cnt)
|
||||
continue;
|
||||
/* todo in 4bytes word */
|
||||
todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
|
||||
todo = min(tx_cnt, oleft / 4);
|
||||
todo = min_t(size_t, todo, (mo.length - oo) / 4);
|
||||
if (todo) {
|
||||
readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
|
||||
oleft -= todo * 4;
|
||||
|
@ -287,7 +292,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
|
|||
* no more than remaining buffer
|
||||
* no need to test against oleft
|
||||
*/
|
||||
todo = min(mo.length - oo, obl - obo);
|
||||
todo = min_t(size_t,
|
||||
mo.length - oo, obl - obo);
|
||||
memcpy(mo.addr + oo, bufo + obo, todo);
|
||||
oleft -= todo;
|
||||
obo += todo;
|
||||
|
|
|
@ -276,8 +276,8 @@ static int sun4i_hash(struct ahash_request *areq)
|
|||
*/
|
||||
while (op->len < 64 && i < end) {
|
||||
/* how many bytes we can read from current SG */
|
||||
in_r = min3(mi.length - in_i, end - i,
|
||||
64 - op->len);
|
||||
in_r = min(end - i, 64 - op->len);
|
||||
in_r = min_t(size_t, mi.length - in_i, in_r);
|
||||
memcpy(op->buf + op->len, mi.addr + in_i, in_r);
|
||||
op->len += in_r;
|
||||
i += in_r;
|
||||
|
@ -297,8 +297,8 @@ static int sun4i_hash(struct ahash_request *areq)
|
|||
}
|
||||
if (mi.length - in_i > 3 && i < end) {
|
||||
/* how many bytes we can read from current SG */
|
||||
in_r = min3(mi.length - in_i, areq->nbytes - i,
|
||||
((mi.length - in_i) / 4) * 4);
|
||||
in_r = min_t(size_t, mi.length - in_i, areq->nbytes - i);
|
||||
in_r = min_t(size_t, ((mi.length - in_i) / 4) * 4, in_r);
|
||||
/* how many bytes we can write in the device*/
|
||||
todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4);
|
||||
writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo);
|
||||
|
@ -324,8 +324,8 @@ static int sun4i_hash(struct ahash_request *areq)
|
|||
if ((areq->nbytes - i) < 64) {
|
||||
while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
|
||||
/* how many bytes we can read from current SG */
|
||||
in_r = min3(mi.length - in_i, areq->nbytes - i,
|
||||
64 - op->len);
|
||||
in_r = min(areq->nbytes - i, 64 - op->len);
|
||||
in_r = min_t(size_t, mi.length - in_i, in_r);
|
||||
memcpy(op->buf + op->len, mi.addr + in_i, in_r);
|
||||
op->len += in_r;
|
||||
i += in_r;
|
||||
|
|
|
@ -117,8 +117,6 @@ virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
|
|||
*alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
|
||||
break;
|
||||
default:
|
||||
pr_err("virtio_crypto: Unsupported key length: %d\n",
|
||||
key_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
|
@ -498,6 +496,11 @@ static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
|
|||
/* Use the first data virtqueue as default */
|
||||
struct data_queue *data_vq = &vcrypto->data_vq[0];
|
||||
|
||||
if (!req->nbytes)
|
||||
return 0;
|
||||
if (req->nbytes % AES_BLOCK_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
vc_req->dataq = data_vq;
|
||||
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
|
||||
vc_sym_req->ablkcipher_ctx = ctx;
|
||||
|
@ -518,6 +521,11 @@ static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
|
|||
/* Use the first data virtqueue as default */
|
||||
struct data_queue *data_vq = &vcrypto->data_vq[0];
|
||||
|
||||
if (!req->nbytes)
|
||||
return 0;
|
||||
if (req->nbytes % AES_BLOCK_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
vc_req->dataq = data_vq;
|
||||
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
|
||||
vc_sym_req->ablkcipher_ctx = ctx;
|
||||
|
|
|
@ -3,13 +3,13 @@ obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
|
|||
vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
|
||||
|
||||
ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
|
||||
TARGET := linux-ppc64le
|
||||
override flavour := linux-ppc64le
|
||||
else
|
||||
TARGET := linux-ppc64
|
||||
override flavour := linux-ppc64
|
||||
endif
|
||||
|
||||
quiet_cmd_perl = PERL $@
|
||||
cmd_perl = $(PERL) $(<) $(TARGET) > $(@)
|
||||
cmd_perl = $(PERL) $(<) $(flavour) > $(@)
|
||||
|
||||
targets += aesp8-ppc.S ghashp8-ppc.S
|
||||
|
||||
|
|
|
@ -210,6 +210,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
|
|||
/* Cleans the error report buffer */
|
||||
memset(e, 0, sizeof (*e));
|
||||
e->error_count = 1;
|
||||
e->grain = 1;
|
||||
strcpy(e->label, "unknown label");
|
||||
e->msg = pvt->msg;
|
||||
e->other_detail = pvt->other_detail;
|
||||
|
@ -305,7 +306,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
|
|||
|
||||
/* Error grain */
|
||||
if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK)
|
||||
e->grain = ~(mem_err->physical_addr_mask & ~PAGE_MASK);
|
||||
e->grain = ~mem_err->physical_addr_mask + 1;
|
||||
|
||||
/* Memory error location, mapped on e->location */
|
||||
p = e->location;
|
||||
|
@ -412,8 +413,13 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
|
|||
if (p > pvt->other_detail)
|
||||
*(p - 1) = '\0';
|
||||
|
||||
/* Sanity-check driver-supplied grain value. */
|
||||
if (WARN_ON_ONCE(!e->grain))
|
||||
e->grain = 1;
|
||||
|
||||
grain_bits = fls_long(e->grain - 1);
|
||||
|
||||
/* Generate the trace event */
|
||||
grain_bits = fls_long(e->grain);
|
||||
snprintf(pvt->detail_location, sizeof(pvt->detail_location),
|
||||
"APEI location: %s %s", e->location, e->other_detail);
|
||||
trace_mc_event(type, e->msg, e->label, e->error_count,
|
||||
|
|
|
@ -69,6 +69,10 @@ struct sm5502_muic_info {
|
|||
/* Default value of SM5502 register to bring up MUIC device. */
|
||||
static struct reg_data sm5502_reg_data[] = {
|
||||
{
|
||||
.reg = SM5502_REG_RESET,
|
||||
.val = SM5502_REG_RESET_MASK,
|
||||
.invert = true,
|
||||
}, {
|
||||
.reg = SM5502_REG_CONTROL,
|
||||
.val = SM5502_REG_CONTROL_MASK_INT_MASK,
|
||||
.invert = false,
|
||||
|
|
|
@ -241,6 +241,8 @@ enum sm5502_reg {
|
|||
#define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
|
||||
| (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
|
||||
|
||||
#define SM5502_REG_RESET_MASK (0x1)
|
||||
|
||||
/* SM5502 Interrupts */
|
||||
enum sm5502_irq {
|
||||
/* INT1 */
|
||||
|
|
|
@ -552,6 +552,31 @@ static int fsi_slave_scan(struct fsi_slave *slave)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long aligned_access_size(size_t offset, size_t count)
|
||||
{
|
||||
unsigned long offset_unit, count_unit;
|
||||
|
||||
/* Criteria:
|
||||
*
|
||||
* 1. Access size must be less than or equal to the maximum access
|
||||
* width or the highest power-of-two factor of offset
|
||||
* 2. Access size must be less than or equal to the amount specified by
|
||||
* count
|
||||
*
|
||||
* The access width is optimal if we can calculate 1 to be strictly
|
||||
* equal while still satisfying 2.
|
||||
*/
|
||||
|
||||
/* Find 1 by the bottom bit of offset (with a 4 byte access cap) */
|
||||
offset_unit = BIT(__builtin_ctzl(offset | 4));
|
||||
|
||||
/* Find 2 by the top bit of count */
|
||||
count_unit = BIT(8 * sizeof(unsigned long) - 1 - __builtin_clzl(count));
|
||||
|
||||
/* Constrain the maximum access width to the minimum of both criteria */
|
||||
return BIT(__builtin_ctzl(offset_unit | count_unit));
|
||||
}
|
||||
|
||||
static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
|
||||
struct kobject *kobj, struct bin_attribute *attr, char *buf,
|
||||
loff_t off, size_t count)
|
||||
|
@ -567,8 +592,7 @@ static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
|
|||
return -EINVAL;
|
||||
|
||||
for (total_len = 0; total_len < count; total_len += read_len) {
|
||||
read_len = min_t(size_t, count, 4);
|
||||
read_len -= off & 0x3;
|
||||
read_len = aligned_access_size(off, count - total_len);
|
||||
|
||||
rc = fsi_slave_read(slave, off, buf + total_len, read_len);
|
||||
if (rc)
|
||||
|
@ -595,8 +619,7 @@ static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
|
|||
return -EINVAL;
|
||||
|
||||
for (total_len = 0; total_len < count; total_len += write_len) {
|
||||
write_len = min_t(size_t, count, 4);
|
||||
write_len -= off & 0x3;
|
||||
write_len = aligned_access_size(off, count - total_len);
|
||||
|
||||
rc = fsi_slave_write(slave, off, buf + total_len, write_len);
|
||||
if (rc)
|
||||
|
|
|
@ -138,6 +138,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
dma_fence_put(fence);
|
||||
fence = NULL;
|
||||
|
||||
r = amdgpu_bo_kmap(vram_obj, &vram_map);
|
||||
if (r) {
|
||||
|
@ -183,6 +184,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
dma_fence_put(fence);
|
||||
fence = NULL;
|
||||
|
||||
r = amdgpu_bo_kmap(gtt_obj[i], >t_map);
|
||||
if (r) {
|
||||
|
|
|
@ -700,10 +700,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
|
|||
id->oa_base != job->oa_base ||
|
||||
id->oa_size != job->oa_size);
|
||||
bool vm_flush_needed = job->vm_needs_flush;
|
||||
bool pasid_mapping_needed = id->pasid != job->pasid ||
|
||||
!id->pasid_mapping ||
|
||||
!dma_fence_is_signaled(id->pasid_mapping);
|
||||
struct dma_fence *fence = NULL;
|
||||
bool pasid_mapping_needed = false;
|
||||
unsigned patch_offset = 0;
|
||||
int r;
|
||||
|
||||
|
@ -713,6 +711,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
|
|||
pasid_mapping_needed = true;
|
||||
}
|
||||
|
||||
mutex_lock(&id_mgr->lock);
|
||||
if (id->pasid != job->pasid || !id->pasid_mapping ||
|
||||
!dma_fence_is_signaled(id->pasid_mapping))
|
||||
pasid_mapping_needed = true;
|
||||
mutex_unlock(&id_mgr->lock);
|
||||
|
||||
gds_switch_needed &= !!ring->funcs->emit_gds_switch;
|
||||
vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
|
||||
job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
|
||||
|
@ -752,9 +756,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
|
|||
}
|
||||
|
||||
if (pasid_mapping_needed) {
|
||||
mutex_lock(&id_mgr->lock);
|
||||
id->pasid = job->pasid;
|
||||
dma_fence_put(id->pasid_mapping);
|
||||
id->pasid_mapping = dma_fence_get(fence);
|
||||
mutex_unlock(&id_mgr->lock);
|
||||
}
|
||||
dma_fence_put(fence);
|
||||
|
||||
|
|
|
@ -2187,7 +2187,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
|
|||
* And it's needed by gfxoff feature.
|
||||
*/
|
||||
if (adev->gfx.rlc.is_rlc_v2_1) {
|
||||
gfx_v9_1_init_rlc_save_restore_list(adev);
|
||||
if (adev->asic_type == CHIP_VEGA12)
|
||||
gfx_v9_1_init_rlc_save_restore_list(adev);
|
||||
gfx_v9_0_enable_save_restore_machine(adev);
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,8 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
|
|||
u64 wptr_off;
|
||||
|
||||
si_ih_disable_interrupts(adev);
|
||||
WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
|
||||
/* set dummy read address to dummy page address */
|
||||
WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
|
||||
interrupt_cntl = RREG32(INTERRUPT_CNTL);
|
||||
interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
|
||||
interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
|
||||
|
|
|
@ -62,6 +62,11 @@ int kfd_interrupt_init(struct kfd_dev *kfd)
|
|||
}
|
||||
|
||||
kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
|
||||
if (unlikely(!kfd->ih_wq)) {
|
||||
kfifo_free(&kfd->ih_fifo);
|
||||
dev_err(kfd_chardev(), "Failed to allocate KFD IH workqueue\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
spin_lock_init(&kfd->interrupt_lock);
|
||||
|
||||
INIT_WORK(&kfd->interrupt_work, interrupt_wq);
|
||||
|
|
|
@ -1950,7 +1950,7 @@ static bool dp_active_dongle_validate_timing(
|
|||
break;
|
||||
}
|
||||
|
||||
if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
|
||||
if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
|
||||
dongle_caps->extendedCapValid == false)
|
||||
return true;
|
||||
|
||||
|
|
|
@ -2172,6 +2172,7 @@ static void get_active_converter_info(
|
|||
uint8_t data, struct dc_link *link)
|
||||
{
|
||||
union dp_downstream_port_present ds_port = { .byte = data };
|
||||
memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps));
|
||||
|
||||
/* decode converter info*/
|
||||
if (!ds_port.fields.PORT_PRESENT) {
|
||||
|
|
|
@ -725,7 +725,9 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx)
|
|||
/* 1.0V digital core power regulator */
|
||||
pdata->dvdd10 = devm_regulator_get(dev, "dvdd10");
|
||||
if (IS_ERR(pdata->dvdd10)) {
|
||||
DRM_ERROR("DVDD10 regulator not found\n");
|
||||
if (PTR_ERR(pdata->dvdd10) != -EPROBE_DEFER)
|
||||
DRM_ERROR("DVDD10 regulator not found\n");
|
||||
|
||||
return PTR_ERR(pdata->dvdd10);
|
||||
}
|
||||
|
||||
|
@ -1341,7 +1343,9 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
|
|||
|
||||
err = anx78xx_init_pdata(anx78xx);
|
||||
if (err) {
|
||||
DRM_ERROR("Failed to initialize pdata: %d\n", err);
|
||||
if (err != -EPROBE_DEFER)
|
||||
DRM_ERROR("Failed to initialize pdata: %d\n", err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
|
||||
#include <media/cec-notifier.h>
|
||||
|
||||
#define DDC_CI_ADDR 0x37
|
||||
#define DDC_SEGMENT_ADDR 0x30
|
||||
|
||||
#define HDMI_EDID_LEN 512
|
||||
|
@ -320,6 +321,15 @@ static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap,
|
|||
u8 addr = msgs[0].addr;
|
||||
int i, ret = 0;
|
||||
|
||||
if (addr == DDC_CI_ADDR)
|
||||
/*
|
||||
* The internal I2C controller does not support the multi-byte
|
||||
* read and write operations needed for DDC/CI.
|
||||
* TOFIX: Blacklist the DDC/CI address until we filter out
|
||||
* unsupported I2C operations.
|
||||
*/
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
dev_dbg(hdmi->dev, "xfer: num: %d, addr: %#x\n", num, addr);
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
|
@ -1747,7 +1757,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
|
|||
|
||||
/* HDMI Initialization Step E - Configure audio */
|
||||
hdmi_clk_regenerator_update_pixel_clock(hdmi);
|
||||
hdmi_enable_audio_clk(hdmi, true);
|
||||
hdmi_enable_audio_clk(hdmi, hdmi->audio_enable);
|
||||
}
|
||||
|
||||
/* not for DVI mode */
|
||||
|
|
|
@ -1572,7 +1572,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
|
|||
unsigned int flags, pipe, high_pipe;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
|
||||
return -EINVAL;
|
||||
|
@ -1813,7 +1813,7 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
|
|||
return -EINVAL;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id);
|
||||
if (!crtc)
|
||||
|
@ -1871,7 +1871,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
|
|||
return -EINVAL;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id);
|
||||
if (!crtc)
|
||||
|
|
|
@ -139,6 +139,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
|
|||
s32 freq_error, min_error = 100000;
|
||||
|
||||
memset(best_clock, 0, sizeof(*best_clock));
|
||||
memset(&clock, 0, sizeof(clock));
|
||||
|
||||
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
|
||||
for (clock.n = limit->n.min; clock.n <= limit->n.max;
|
||||
|
@ -195,6 +196,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
|
|||
int err = target;
|
||||
|
||||
memset(best_clock, 0, sizeof(*best_clock));
|
||||
memset(&clock, 0, sizeof(clock));
|
||||
|
||||
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
|
||||
for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
|
||||
|
|
|
@ -427,6 +427,7 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
|
|||
return PTR_ERR(ts->dsi);
|
||||
}
|
||||
|
||||
drm_panel_init(&ts->base);
|
||||
ts->base.dev = dev;
|
||||
ts->base.funcs = &rpi_touchscreen_funcs;
|
||||
|
||||
|
|
|
@ -380,6 +380,7 @@ static int st7789v_probe(struct spi_device *spi)
|
|||
spi_set_drvdata(spi, ctx);
|
||||
ctx->spi = spi;
|
||||
|
||||
drm_panel_init(&ctx->panel);
|
||||
ctx->panel.dev = &spi->dev;
|
||||
ctx->panel.funcs = &st7789v_drm_funcs;
|
||||
|
||||
|
|
|
@ -2922,6 +2922,11 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor)
|
|||
* earlier
|
||||
*/
|
||||
sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index;
|
||||
} else {
|
||||
if (sor->soc->supports_edp)
|
||||
sor->index = 0;
|
||||
else
|
||||
sor->index = 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -447,7 +447,8 @@ static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
|
|||
return err;
|
||||
}
|
||||
|
||||
static inline int copy_gathers(struct host1x_job *job, struct device *dev)
|
||||
static inline int copy_gathers(struct device *host, struct host1x_job *job,
|
||||
struct device *dev)
|
||||
{
|
||||
struct host1x_firewall fw;
|
||||
size_t size = 0;
|
||||
|
@ -470,12 +471,12 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
|
|||
* Try a non-blocking allocation from a higher priority pools first,
|
||||
* as awaiting for the allocation here is a major performance hit.
|
||||
*/
|
||||
job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
|
||||
job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
|
||||
GFP_NOWAIT);
|
||||
|
||||
/* the higher priority allocation failed, try the generic-blocking */
|
||||
if (!job->gather_copy_mapped)
|
||||
job->gather_copy_mapped = dma_alloc_wc(dev, size,
|
||||
job->gather_copy_mapped = dma_alloc_wc(host, size,
|
||||
&job->gather_copy,
|
||||
GFP_KERNEL);
|
||||
if (!job->gather_copy_mapped)
|
||||
|
@ -523,7 +524,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
|
|||
goto out;
|
||||
|
||||
if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
|
||||
err = copy_gathers(job, dev);
|
||||
err = copy_gathers(host->dev, job, dev);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
@ -584,7 +585,7 @@ void host1x_job_unpin(struct host1x_job *job)
|
|||
job->num_unpins = 0;
|
||||
|
||||
if (job->gather_copy_size)
|
||||
dma_free_wc(job->channel->dev, job->gather_copy_size,
|
||||
dma_free_wc(host->dev, job->gather_copy_size,
|
||||
job->gather_copy_mapped, job->gather_copy);
|
||||
}
|
||||
EXPORT_SYMBOL(host1x_job_unpin);
|
||||
|
|
|
@ -180,6 +180,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
|
|||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x06a6),
|
||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||
},
|
||||
{
|
||||
/* Comet Lake PCH-V */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa3a6),
|
||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||
},
|
||||
{
|
||||
/* Ice Lake NNPI */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
|
||||
|
@ -205,6 +210,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
|
|||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
|
||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||
},
|
||||
{
|
||||
/* Elkhart Lake */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),
|
||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||
},
|
||||
{ 0 },
|
||||
};
|
||||
|
||||
|
|
|
@ -527,6 +527,10 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
|
|||
u16 conflict;
|
||||
unsigned int trigger_chan;
|
||||
|
||||
ret = iio_triggered_buffer_postenable(indio_dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&dln2->mutex);
|
||||
|
||||
/* Enable ADC */
|
||||
|
@ -540,6 +544,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
|
|||
(int)conflict);
|
||||
ret = -EBUSY;
|
||||
}
|
||||
iio_triggered_buffer_predisable(indio_dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -553,6 +558,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
|
|||
mutex_unlock(&dln2->mutex);
|
||||
if (ret < 0) {
|
||||
dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
|
||||
iio_triggered_buffer_predisable(indio_dev);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
|
@ -560,12 +566,12 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
|
|||
mutex_unlock(&dln2->mutex);
|
||||
}
|
||||
|
||||
return iio_triggered_buffer_postenable(indio_dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
|
||||
{
|
||||
int ret;
|
||||
int ret, ret2;
|
||||
struct dln2_adc *dln2 = iio_priv(indio_dev);
|
||||
|
||||
mutex_lock(&dln2->mutex);
|
||||
|
@ -580,12 +586,14 @@ static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
|
|||
ret = dln2_adc_set_port_enabled(dln2, false, NULL);
|
||||
|
||||
mutex_unlock(&dln2->mutex);
|
||||
if (ret < 0) {
|
||||
if (ret < 0)
|
||||
dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return iio_triggered_buffer_predisable(indio_dev);
|
||||
ret2 = iio_triggered_buffer_predisable(indio_dev);
|
||||
if (ret == 0)
|
||||
ret = ret2;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct iio_buffer_setup_ops dln2_adc_buffer_setup_ops = {
|
||||
|
|
|
@ -460,6 +460,14 @@ static int max1027_probe(struct spi_device *spi)
|
|||
goto fail_dev_register;
|
||||
}
|
||||
|
||||
/* Internal reset */
|
||||
st->reg = MAX1027_RST_REG;
|
||||
ret = spi_write(st->spi, &st->reg, 1);
|
||||
if (ret < 0) {
|
||||
dev_err(&indio_dev->dev, "Failed to reset the ADC\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Disable averaging */
|
||||
st->reg = MAX1027_AVG_REG;
|
||||
ret = spi_write(st->spi, &st->reg, 1);
|
||||
|
|
|
@ -59,8 +59,8 @@ config AD5446
|
|||
help
|
||||
Say yes here to build support for Analog Devices AD5300, AD5301, AD5310,
|
||||
AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453,
|
||||
AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5601, AD5602, AD5611, AD5612,
|
||||
AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs
|
||||
AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5600, AD5601, AD5602, AD5611,
|
||||
AD5612, AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs
|
||||
as well as Texas Instruments DAC081S101, DAC101S101, DAC121S101.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
|
|
|
@ -328,6 +328,7 @@ enum ad5446_supported_spi_device_ids {
|
|||
ID_AD5541A,
|
||||
ID_AD5512A,
|
||||
ID_AD5553,
|
||||
ID_AD5600,
|
||||
ID_AD5601,
|
||||
ID_AD5611,
|
||||
ID_AD5621,
|
||||
|
@ -382,6 +383,10 @@ static const struct ad5446_chip_info ad5446_spi_chip_info[] = {
|
|||
.channel = AD5446_CHANNEL(14, 16, 0),
|
||||
.write = ad5446_write,
|
||||
},
|
||||
[ID_AD5600] = {
|
||||
.channel = AD5446_CHANNEL(16, 16, 0),
|
||||
.write = ad5446_write,
|
||||
},
|
||||
[ID_AD5601] = {
|
||||
.channel = AD5446_CHANNEL_POWERDOWN(8, 16, 6),
|
||||
.write = ad5446_write,
|
||||
|
@ -449,6 +454,7 @@ static const struct spi_device_id ad5446_spi_ids[] = {
|
|||
{"ad5542a", ID_AD5541A}, /* ad5541a and ad5542a are compatible */
|
||||
{"ad5543", ID_AD5541A}, /* ad5541a and ad5543 are compatible */
|
||||
{"ad5553", ID_AD5553},
|
||||
{"ad5600", ID_AD5600},
|
||||
{"ad5601", ID_AD5601},
|
||||
{"ad5611", ID_AD5611},
|
||||
{"ad5621", ID_AD5621},
|
||||
|
|
|
@ -62,9 +62,9 @@ struct bh1750_chip_info {
|
|||
|
||||
u16 int_time_low_mask;
|
||||
u16 int_time_high_mask;
|
||||
}
|
||||
};
|
||||
|
||||
static const bh1750_chip_info_tbl[] = {
|
||||
static const struct bh1750_chip_info bh1750_chip_info_tbl[] = {
|
||||
[BH1710] = { 140, 1022, 300, 400, 250000000, 2, 0x001F, 0x03E0 },
|
||||
[BH1721] = { 140, 1020, 300, 400, 250000000, 2, 0x0010, 0x03E0 },
|
||||
[BH1750] = { 31, 254, 69, 1740, 57500000, 1, 0x001F, 0x00E0 },
|
||||
|
|
|
@ -1701,6 +1701,14 @@ static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
|
|||
if (qp->urq.umem)
|
||||
ib_umem_release(qp->urq.umem);
|
||||
qp->urq.umem = NULL;
|
||||
|
||||
if (rdma_protocol_roce(&dev->ibdev, 1)) {
|
||||
qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
|
||||
qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
|
||||
} else {
|
||||
kfree(qp->usq.pbl_tbl);
|
||||
kfree(qp->urq.pbl_tbl);
|
||||
}
|
||||
}
|
||||
|
||||
static int qedr_create_user_qp(struct qedr_dev *dev,
|
||||
|
@ -2809,8 +2817,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr)
|
|||
|
||||
dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
|
||||
|
||||
if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
|
||||
qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
|
||||
if (mr->type != QEDR_MR_DMA)
|
||||
free_mr_info(dev, &mr->info);
|
||||
|
||||
/* it could be user registered memory. */
|
||||
if (mr->umem)
|
||||
|
|
|
@ -646,6 +646,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
|
|||
if (ib_conn->pi_support) {
|
||||
u32 sig_caps = ib_conn->device->ib_device->attrs.sig_prot_cap;
|
||||
|
||||
shost->sg_prot_tablesize = shost->sg_tablesize;
|
||||
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
|
||||
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
|
||||
SHOST_DIX_GUARD_CRC);
|
||||
|
|
|
@ -377,7 +377,10 @@ static int bch_allocator_thread(void *arg)
|
|||
if (!fifo_full(&ca->free_inc))
|
||||
goto retry_invalidate;
|
||||
|
||||
bch_prio_write(ca);
|
||||
if (bch_prio_write(ca, false) < 0) {
|
||||
ca->invalidate_needs_gc = 1;
|
||||
wake_up_gc(ca->set);
|
||||
}
|
||||
}
|
||||
}
|
||||
out:
|
||||
|
|
|
@ -959,7 +959,7 @@ bool bch_cached_dev_error(struct cached_dev *dc);
|
|||
__printf(2, 3)
|
||||
bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
|
||||
|
||||
void bch_prio_write(struct cache *ca);
|
||||
int bch_prio_write(struct cache *ca, bool wait);
|
||||
void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
|
||||
|
||||
extern struct workqueue_struct *bcache_wq;
|
||||
|
|
|
@ -525,12 +525,29 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op,
|
|||
closure_sync(cl);
|
||||
}
|
||||
|
||||
void bch_prio_write(struct cache *ca)
|
||||
int bch_prio_write(struct cache *ca, bool wait)
|
||||
{
|
||||
int i;
|
||||
struct bucket *b;
|
||||
struct closure cl;
|
||||
|
||||
pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu",
|
||||
fifo_used(&ca->free[RESERVE_PRIO]),
|
||||
fifo_used(&ca->free[RESERVE_NONE]),
|
||||
fifo_used(&ca->free_inc));
|
||||
|
||||
/*
|
||||
* Pre-check if there are enough free buckets. In the non-blocking
|
||||
* scenario it's better to fail early rather than starting to allocate
|
||||
* buckets and do a cleanup later in case of failure.
|
||||
*/
|
||||
if (!wait) {
|
||||
size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) +
|
||||
fifo_used(&ca->free[RESERVE_NONE]);
|
||||
if (prio_buckets(ca) > avail)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
closure_init_stack(&cl);
|
||||
|
||||
lockdep_assert_held(&ca->set->bucket_lock);
|
||||
|
@ -540,9 +557,6 @@ void bch_prio_write(struct cache *ca)
|
|||
atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
|
||||
&ca->meta_sectors_written);
|
||||
|
||||
//pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
|
||||
// fifo_used(&ca->free_inc), fifo_used(&ca->unused));
|
||||
|
||||
for (i = prio_buckets(ca) - 1; i >= 0; --i) {
|
||||
long bucket;
|
||||
struct prio_set *p = ca->disk_buckets;
|
||||
|
@ -560,7 +574,7 @@ void bch_prio_write(struct cache *ca)
|
|||
p->magic = pset_magic(&ca->sb);
|
||||
p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
|
||||
|
||||
bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
|
||||
bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait);
|
||||
BUG_ON(bucket == -1);
|
||||
|
||||
mutex_unlock(&ca->set->bucket_lock);
|
||||
|
@ -589,6 +603,7 @@ void bch_prio_write(struct cache *ca)
|
|||
|
||||
ca->prio_last_buckets[i] = ca->prio_buckets[i];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void prio_read(struct cache *ca, uint64_t bucket)
|
||||
|
@ -747,20 +762,28 @@ static inline int idx_to_first_minor(int idx)
|
|||
|
||||
static void bcache_device_free(struct bcache_device *d)
|
||||
{
|
||||
struct gendisk *disk = d->disk;
|
||||
|
||||
lockdep_assert_held(&bch_register_lock);
|
||||
|
||||
pr_info("%s stopped", d->disk->disk_name);
|
||||
if (disk)
|
||||
pr_info("%s stopped", disk->disk_name);
|
||||
else
|
||||
pr_err("bcache device (NULL gendisk) stopped");
|
||||
|
||||
if (d->c)
|
||||
bcache_device_detach(d);
|
||||
if (d->disk && d->disk->flags & GENHD_FL_UP)
|
||||
del_gendisk(d->disk);
|
||||
if (d->disk && d->disk->queue)
|
||||
blk_cleanup_queue(d->disk->queue);
|
||||
if (d->disk) {
|
||||
|
||||
if (disk) {
|
||||
if (disk->flags & GENHD_FL_UP)
|
||||
del_gendisk(disk);
|
||||
|
||||
if (disk->queue)
|
||||
blk_cleanup_queue(disk->queue);
|
||||
|
||||
ida_simple_remove(&bcache_device_idx,
|
||||
first_minor_to_idx(d->disk->first_minor));
|
||||
put_disk(d->disk);
|
||||
first_minor_to_idx(disk->first_minor));
|
||||
put_disk(disk);
|
||||
}
|
||||
|
||||
bioset_exit(&d->bio_split);
|
||||
|
@ -1876,7 +1899,7 @@ static int run_cache_set(struct cache_set *c)
|
|||
|
||||
mutex_lock(&c->bucket_lock);
|
||||
for_each_cache(ca, c, i)
|
||||
bch_prio_write(ca);
|
||||
bch_prio_write(ca, true);
|
||||
mutex_unlock(&c->bucket_lock);
|
||||
|
||||
err = "cannot allocate new UUID bucket";
|
||||
|
|
|
@ -2132,6 +2132,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
|
|||
memcpy(page_address(store.sb_page),
|
||||
page_address(bitmap->storage.sb_page),
|
||||
sizeof(bitmap_super_t));
|
||||
spin_lock_irq(&bitmap->counts.lock);
|
||||
md_bitmap_file_unmap(&bitmap->storage);
|
||||
bitmap->storage = store;
|
||||
|
||||
|
@ -2147,7 +2148,6 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
|
|||
blocks = min(old_counts.chunks << old_counts.chunkshift,
|
||||
chunks << chunkshift);
|
||||
|
||||
spin_lock_irq(&bitmap->counts.lock);
|
||||
/* For cluster raid, need to pre-allocate bitmap */
|
||||
if (mddev_is_clustered(bitmap->mddev)) {
|
||||
unsigned long page;
|
||||
|
|
|
@ -419,10 +419,14 @@ static struct sensor_register ov2659_720p[] = {
|
|||
{ REG_TIMING_YINC, 0x11 },
|
||||
{ REG_TIMING_VERT_FORMAT, 0x80 },
|
||||
{ REG_TIMING_HORIZ_FORMAT, 0x00 },
|
||||
{ 0x370a, 0x12 },
|
||||
{ 0x3a03, 0xe8 },
|
||||
{ 0x3a09, 0x6f },
|
||||
{ 0x3a0b, 0x5d },
|
||||
{ 0x3a15, 0x9a },
|
||||
{ REG_VFIFO_READ_START_H, 0x00 },
|
||||
{ REG_VFIFO_READ_START_L, 0x80 },
|
||||
{ REG_ISP_CTRL02, 0x00 },
|
||||
{ REG_NULL, 0x00 },
|
||||
};
|
||||
|
||||
|
@ -1203,11 +1207,15 @@ static int ov2659_s_stream(struct v4l2_subdev *sd, int on)
|
|||
goto unlock;
|
||||
}
|
||||
|
||||
ov2659_set_pixel_clock(ov2659);
|
||||
ov2659_set_frame_size(ov2659);
|
||||
ov2659_set_format(ov2659);
|
||||
ov2659_set_streaming(ov2659, 1);
|
||||
ov2659->streaming = on;
|
||||
ret = ov2659_set_pixel_clock(ov2659);
|
||||
if (!ret)
|
||||
ret = ov2659_set_frame_size(ov2659);
|
||||
if (!ret)
|
||||
ret = ov2659_set_format(ov2659);
|
||||
if (!ret) {
|
||||
ov2659_set_streaming(ov2659, 1);
|
||||
ov2659->streaming = on;
|
||||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&ov2659->lock);
|
||||
|
|
|
@ -469,38 +469,39 @@ static int ov6650_set_selection(struct v4l2_subdev *sd,
|
|||
{
|
||||
struct i2c_client *client = v4l2_get_subdevdata(sd);
|
||||
struct ov6650 *priv = to_ov6650(client);
|
||||
struct v4l2_rect rect = sel->r;
|
||||
int ret;
|
||||
|
||||
if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
|
||||
sel->target != V4L2_SEL_TGT_CROP)
|
||||
return -EINVAL;
|
||||
|
||||
v4l_bound_align_image(&rect.width, 2, W_CIF, 1,
|
||||
&rect.height, 2, H_CIF, 1, 0);
|
||||
v4l_bound_align_image(&rect.left, DEF_HSTRT << 1,
|
||||
(DEF_HSTRT << 1) + W_CIF - (__s32)rect.width, 1,
|
||||
&rect.top, DEF_VSTRT << 1,
|
||||
(DEF_VSTRT << 1) + H_CIF - (__s32)rect.height, 1,
|
||||
0);
|
||||
v4l_bound_align_image(&sel->r.width, 2, W_CIF, 1,
|
||||
&sel->r.height, 2, H_CIF, 1, 0);
|
||||
v4l_bound_align_image(&sel->r.left, DEF_HSTRT << 1,
|
||||
(DEF_HSTRT << 1) + W_CIF - (__s32)sel->r.width, 1,
|
||||
&sel->r.top, DEF_VSTRT << 1,
|
||||
(DEF_VSTRT << 1) + H_CIF - (__s32)sel->r.height,
|
||||
1, 0);
|
||||
|
||||
ret = ov6650_reg_write(client, REG_HSTRT, rect.left >> 1);
|
||||
ret = ov6650_reg_write(client, REG_HSTRT, sel->r.left >> 1);
|
||||
if (!ret) {
|
||||
priv->rect.left = rect.left;
|
||||
priv->rect.width += priv->rect.left - sel->r.left;
|
||||
priv->rect.left = sel->r.left;
|
||||
ret = ov6650_reg_write(client, REG_HSTOP,
|
||||
(rect.left + rect.width) >> 1);
|
||||
(sel->r.left + sel->r.width) >> 1);
|
||||
}
|
||||
if (!ret) {
|
||||
priv->rect.width = rect.width;
|
||||
ret = ov6650_reg_write(client, REG_VSTRT, rect.top >> 1);
|
||||
priv->rect.width = sel->r.width;
|
||||
ret = ov6650_reg_write(client, REG_VSTRT, sel->r.top >> 1);
|
||||
}
|
||||
if (!ret) {
|
||||
priv->rect.top = rect.top;
|
||||
priv->rect.height += priv->rect.top - sel->r.top;
|
||||
priv->rect.top = sel->r.top;
|
||||
ret = ov6650_reg_write(client, REG_VSTOP,
|
||||
(rect.top + rect.height) >> 1);
|
||||
(sel->r.top + sel->r.height) >> 1);
|
||||
}
|
||||
if (!ret)
|
||||
priv->rect.height = rect.height;
|
||||
priv->rect.height = sel->r.height;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -614,7 +615,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
|
|||
dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code);
|
||||
return -EINVAL;
|
||||
}
|
||||
priv->code = code;
|
||||
|
||||
if (code == MEDIA_BUS_FMT_Y8_1X8 ||
|
||||
code == MEDIA_BUS_FMT_SBGGR8_1X8) {
|
||||
|
@ -640,7 +640,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
|
|||
dev_dbg(&client->dev, "max resolution: CIF\n");
|
||||
coma_mask |= COMA_QCIF;
|
||||
}
|
||||
priv->half_scale = half_scale;
|
||||
|
||||
clkrc = CLKRC_12MHz;
|
||||
mclk = 12000000;
|
||||
|
@ -658,8 +657,13 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
|
|||
ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
|
||||
if (!ret)
|
||||
ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
priv->half_scale = half_scale;
|
||||
|
||||
ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
|
||||
}
|
||||
if (!ret)
|
||||
priv->code = code;
|
||||
|
||||
if (!ret) {
|
||||
mf->colorspace = priv->colorspace;
|
||||
|
|
|
@ -3108,19 +3108,23 @@ static int smiapp_probe(struct i2c_client *client,
|
|||
if (rval < 0)
|
||||
goto out_media_entity_cleanup;
|
||||
|
||||
rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd);
|
||||
if (rval < 0)
|
||||
goto out_media_entity_cleanup;
|
||||
|
||||
pm_runtime_set_active(&client->dev);
|
||||
pm_runtime_get_noresume(&client->dev);
|
||||
pm_runtime_enable(&client->dev);
|
||||
|
||||
rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd);
|
||||
if (rval < 0)
|
||||
goto out_disable_runtime_pm;
|
||||
|
||||
pm_runtime_set_autosuspend_delay(&client->dev, 1000);
|
||||
pm_runtime_use_autosuspend(&client->dev);
|
||||
pm_runtime_put_autosuspend(&client->dev);
|
||||
|
||||
return 0;
|
||||
|
||||
out_disable_runtime_pm:
|
||||
pm_runtime_disable(&client->dev);
|
||||
|
||||
out_media_entity_cleanup:
|
||||
media_entity_cleanup(&sensor->src->sd.entity);
|
||||
|
||||
|
|
|
@ -1312,7 +1312,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
|
|||
core = cx88_core_get(dev->pci);
|
||||
if (!core) {
|
||||
err = -EINVAL;
|
||||
goto fail_free;
|
||||
goto fail_disable;
|
||||
}
|
||||
dev->core = core;
|
||||
|
||||
|
@ -1358,7 +1358,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
|
|||
cc->step, cc->default_value);
|
||||
if (!vc) {
|
||||
err = core->audio_hdl.error;
|
||||
goto fail_core;
|
||||
goto fail_irq;
|
||||
}
|
||||
vc->priv = (void *)cc;
|
||||
}
|
||||
|
@ -1372,7 +1372,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
|
|||
cc->step, cc->default_value);
|
||||
if (!vc) {
|
||||
err = core->video_hdl.error;
|
||||
goto fail_core;
|
||||
goto fail_irq;
|
||||
}
|
||||
vc->priv = (void *)cc;
|
||||
if (vc->id == V4L2_CID_CHROMA_AGC)
|
||||
|
@ -1535,11 +1535,14 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
|
|||
|
||||
fail_unreg:
|
||||
cx8800_unregister_video(dev);
|
||||
free_irq(pci_dev->irq, dev);
|
||||
mutex_unlock(&core->lock);
|
||||
fail_irq:
|
||||
free_irq(pci_dev->irq, dev);
|
||||
fail_core:
|
||||
core->v4ldev = NULL;
|
||||
cx88_core_put(core, dev->pci);
|
||||
fail_disable:
|
||||
pci_disable_device(pci_dev);
|
||||
fail_free:
|
||||
kfree(dev);
|
||||
return err;
|
||||
|
|
|
@ -1848,6 +1848,10 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
|
|||
if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
|
||||
return -ENODATA;
|
||||
|
||||
/* if trying to set the same std then nothing to do */
|
||||
if (vpfe_standards[vpfe->std_index].std_id == std_id)
|
||||
return 0;
|
||||
|
||||
/* If streaming is started, return error */
|
||||
if (vb2_is_busy(&vpfe->buffer_queue)) {
|
||||
vpfe_err(vpfe, "%s device busy\n", __func__);
|
||||
|
|
|
@ -430,10 +430,11 @@ static const struct venus_resources msm8916_res = {
|
|||
};
|
||||
|
||||
static const struct freq_tbl msm8996_freq_table[] = {
|
||||
{ 1944000, 490000000 }, /* 4k UHD @ 60 */
|
||||
{ 972000, 320000000 }, /* 4k UHD @ 30 */
|
||||
{ 489600, 150000000 }, /* 1080p @ 60 */
|
||||
{ 244800, 75000000 }, /* 1080p @ 30 */
|
||||
{ 1944000, 520000000 }, /* 4k UHD @ 60 (decode only) */
|
||||
{ 972000, 520000000 }, /* 4k UHD @ 30 */
|
||||
{ 489600, 346666667 }, /* 1080p @ 60 */
|
||||
{ 244800, 150000000 }, /* 1080p @ 30 */
|
||||
{ 108000, 75000000 }, /* 720p @ 30 */
|
||||
};
|
||||
|
||||
static const struct reg_val msm8996_reg_preset[] = {
|
||||
|
|
|
@ -1484,6 +1484,7 @@ static int venus_suspend_3xx(struct venus_core *core)
|
|||
{
|
||||
struct venus_hfi_device *hdev = to_hfi_priv(core);
|
||||
struct device *dev = core->dev;
|
||||
u32 ctrl_status;
|
||||
bool val;
|
||||
int ret;
|
||||
|
||||
|
@ -1499,6 +1500,10 @@ static int venus_suspend_3xx(struct venus_core *core)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
|
||||
if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
|
||||
goto power_off;
|
||||
|
||||
/*
|
||||
* Power collapse sequence for Venus 3xx and 4xx versions:
|
||||
* 1. Check for ARM9 and video core to be idle by checking WFI bit
|
||||
|
@ -1523,6 +1528,7 @@ static int venus_suspend_3xx(struct venus_core *core)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
power_off:
|
||||
mutex_lock(&hdev->lock);
|
||||
|
||||
ret = venus_power_off(hdev);
|
||||
|
|
|
@ -912,6 +912,7 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
|
|||
{
|
||||
struct rcar_drif_sdr *sdr = video_drvdata(file);
|
||||
|
||||
memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
|
||||
f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
|
||||
f->fmt.sdr.buffersize = sdr->fmt->buffersize;
|
||||
|
||||
|
|
|
@ -60,6 +60,7 @@ struct vpdma_data_format {
|
|||
* line stride of source and dest
|
||||
* buffers should be 16 byte aligned
|
||||
*/
|
||||
#define VPDMA_MAX_STRIDE 65520 /* Max line stride 16 byte aligned */
|
||||
#define VPDMA_DTD_DESC_SIZE 32 /* 8 words */
|
||||
#define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */
|
||||
|
||||
|
|
|
@ -352,20 +352,25 @@ enum {
|
|||
};
|
||||
|
||||
/* find our format description corresponding to the passed v4l2_format */
|
||||
static struct vpe_fmt *find_format(struct v4l2_format *f)
|
||||
static struct vpe_fmt *__find_format(u32 fourcc)
|
||||
{
|
||||
struct vpe_fmt *fmt;
|
||||
unsigned int k;
|
||||
|
||||
for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
|
||||
fmt = &vpe_formats[k];
|
||||
if (fmt->fourcc == f->fmt.pix.pixelformat)
|
||||
if (fmt->fourcc == fourcc)
|
||||
return fmt;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct vpe_fmt *find_format(struct v4l2_format *f)
|
||||
{
|
||||
return __find_format(f->fmt.pix.pixelformat);
|
||||
}
|
||||
|
||||
/*
|
||||
* there is one vpe_dev structure in the driver, it is shared by
|
||||
* all instances.
|
||||
|
@ -1027,11 +1032,14 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
|
|||
dma_addr_t dma_addr;
|
||||
u32 flags = 0;
|
||||
u32 offset = 0;
|
||||
u32 stride;
|
||||
|
||||
if (port == VPE_PORT_MV_OUT) {
|
||||
vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
|
||||
dma_addr = ctx->mv_buf_dma[mv_buf_selector];
|
||||
q_data = &ctx->q_data[Q_DATA_SRC];
|
||||
stride = ALIGN((q_data->width * vpdma_fmt->depth) >> 3,
|
||||
VPDMA_STRIDE_ALIGN);
|
||||
} else {
|
||||
/* to incorporate interleaved formats */
|
||||
int plane = fmt->coplanar ? p_data->vb_part : 0;
|
||||
|
@ -1058,6 +1066,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
|
|||
}
|
||||
/* Apply the offset */
|
||||
dma_addr += offset;
|
||||
stride = q_data->bytesperline[VPE_LUMA];
|
||||
}
|
||||
|
||||
if (q_data->flags & Q_DATA_FRAME_1D)
|
||||
|
@ -1069,7 +1078,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
|
|||
MAX_W, MAX_H);
|
||||
|
||||
vpdma_add_out_dtd(&ctx->desc_list, q_data->width,
|
||||
q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
|
||||
stride, &q_data->c_rect,
|
||||
vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1,
|
||||
MAX_OUT_HEIGHT_REG1, p_data->channel, flags);
|
||||
}
|
||||
|
@ -1088,10 +1097,13 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
|
|||
dma_addr_t dma_addr;
|
||||
u32 flags = 0;
|
||||
u32 offset = 0;
|
||||
u32 stride;
|
||||
|
||||
if (port == VPE_PORT_MV_IN) {
|
||||
vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
|
||||
dma_addr = ctx->mv_buf_dma[mv_buf_selector];
|
||||
stride = ALIGN((q_data->width * vpdma_fmt->depth) >> 3,
|
||||
VPDMA_STRIDE_ALIGN);
|
||||
} else {
|
||||
/* to incorporate interleaved formats */
|
||||
int plane = fmt->coplanar ? p_data->vb_part : 0;
|
||||
|
@ -1118,6 +1130,7 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
|
|||
}
|
||||
/* Apply the offset */
|
||||
dma_addr += offset;
|
||||
stride = q_data->bytesperline[VPE_LUMA];
|
||||
|
||||
if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB) {
|
||||
/*
|
||||
|
@ -1153,10 +1166,10 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
|
|||
if (p_data->vb_part && fmt->fourcc == V4L2_PIX_FMT_NV12)
|
||||
frame_height /= 2;
|
||||
|
||||
vpdma_add_in_dtd(&ctx->desc_list, q_data->width,
|
||||
q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
|
||||
vpdma_fmt, dma_addr, p_data->channel, field, flags, frame_width,
|
||||
frame_height, 0, 0);
|
||||
vpdma_add_in_dtd(&ctx->desc_list, q_data->width, stride,
|
||||
&q_data->c_rect, vpdma_fmt, dma_addr,
|
||||
p_data->channel, field, flags, frame_width,
|
||||
frame_height, 0, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1405,9 +1418,6 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
|
|||
/* the previous dst mv buffer becomes the next src mv buffer */
|
||||
ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
|
||||
|
||||
if (ctx->aborting)
|
||||
goto finished;
|
||||
|
||||
s_vb = ctx->src_vbs[0];
|
||||
d_vb = ctx->dst_vb;
|
||||
|
||||
|
@ -1418,6 +1428,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
|
|||
d_vb->timecode = s_vb->timecode;
|
||||
|
||||
d_vb->sequence = ctx->sequence;
|
||||
s_vb->sequence = ctx->sequence;
|
||||
|
||||
d_q_data = &ctx->q_data[Q_DATA_DST];
|
||||
if (d_q_data->flags & Q_IS_INTERLACED) {
|
||||
|
@ -1471,6 +1482,9 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
|
|||
ctx->src_vbs[0] = NULL;
|
||||
ctx->dst_vb = NULL;
|
||||
|
||||
if (ctx->aborting)
|
||||
goto finished;
|
||||
|
||||
ctx->bufs_completed++;
|
||||
if (ctx->bufs_completed < ctx->bufs_per_job && job_ready(ctx)) {
|
||||
device_run(ctx);
|
||||
|
@ -1583,9 +1597,9 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
|
|||
unsigned int stride = 0;
|
||||
|
||||
if (!fmt || !(fmt->types & type)) {
|
||||
vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
|
||||
vpe_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
|
||||
pix->pixelformat);
|
||||
return -EINVAL;
|
||||
fmt = __find_format(V4L2_PIX_FMT_YUYV);
|
||||
}
|
||||
|
||||
if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE
|
||||
|
@ -1632,7 +1646,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
|
|||
&pix->height, MIN_H, MAX_H, H_ALIGN,
|
||||
S_ALIGN);
|
||||
|
||||
if (!pix->num_planes)
|
||||
if (!pix->num_planes || pix->num_planes > 2)
|
||||
pix->num_planes = fmt->coplanar ? 2 : 1;
|
||||
else if (pix->num_planes > 1 && !fmt->coplanar)
|
||||
pix->num_planes = 1;
|
||||
|
@ -1671,6 +1685,10 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
|
|||
if (stride > plane_fmt->bytesperline)
|
||||
plane_fmt->bytesperline = stride;
|
||||
|
||||
plane_fmt->bytesperline = clamp_t(u32, plane_fmt->bytesperline,
|
||||
stride,
|
||||
VPDMA_MAX_STRIDE);
|
||||
|
||||
plane_fmt->bytesperline = ALIGN(plane_fmt->bytesperline,
|
||||
VPDMA_STRIDE_ALIGN);
|
||||
|
||||
|
@ -2291,7 +2309,7 @@ static int vpe_open(struct file *file)
|
|||
v4l2_ctrl_handler_setup(hdl);
|
||||
|
||||
s_q_data = &ctx->q_data[Q_DATA_SRC];
|
||||
s_q_data->fmt = &vpe_formats[2];
|
||||
s_q_data->fmt = __find_format(V4L2_PIX_FMT_YUYV);
|
||||
s_q_data->width = 1920;
|
||||
s_q_data->height = 1080;
|
||||
s_q_data->nplanes = 1;
|
||||
|
@ -2369,6 +2387,12 @@ static int vpe_release(struct file *file)
|
|||
|
||||
mutex_lock(&dev->dev_mutex);
|
||||
free_mv_buffers(ctx);
|
||||
|
||||
vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
|
||||
vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
|
||||
vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
|
||||
vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
|
||||
|
||||
vpdma_free_desc_list(&ctx->desc_list);
|
||||
vpdma_free_desc_buf(&ctx->mmr_adb);
|
||||
|
||||
|
|
|
@ -485,6 +485,8 @@ static int si470x_i2c_remove(struct i2c_client *client)
|
|||
video_unregister_device(&radio->videodev);
|
||||
kfree(radio);
|
||||
|
||||
v4l2_ctrl_handler_free(&radio->hdl);
|
||||
v4l2_device_unregister(&radio->v4l2_dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -503,7 +503,13 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
|
|||
static int flexcop_usb_init(struct flexcop_usb *fc_usb)
|
||||
{
|
||||
/* use the alternate setting with the larges buffer */
|
||||
usb_set_interface(fc_usb->udev,0,1);
|
||||
int ret = usb_set_interface(fc_usb->udev, 0, 1);
|
||||
|
||||
if (ret) {
|
||||
err("set interface failed.");
|
||||
return ret;
|
||||
}
|
||||
|
||||
switch (fc_usb->udev->speed) {
|
||||
case USB_SPEED_LOW:
|
||||
err("cannot handle USB speed because it is too slow.");
|
||||
|
|
|
@ -916,8 +916,12 @@ static void pvr2_v4l2_internal_check(struct pvr2_channel *chp)
|
|||
pvr2_v4l2_dev_disassociate_parent(vp->dev_video);
|
||||
pvr2_v4l2_dev_disassociate_parent(vp->dev_radio);
|
||||
if (!list_empty(&vp->dev_video->devbase.fh_list) ||
|
||||
!list_empty(&vp->dev_radio->devbase.fh_list))
|
||||
(vp->dev_radio &&
|
||||
!list_empty(&vp->dev_radio->devbase.fh_list))) {
|
||||
pvr2_trace(PVR2_TRACE_STRUCT,
|
||||
"pvr2_v4l2 internal_check exit-empty id=%p", vp);
|
||||
return;
|
||||
}
|
||||
pvr2_v4l2_destroy_no_lock(vp);
|
||||
}
|
||||
|
||||
|
@ -953,7 +957,8 @@ static int pvr2_v4l2_release(struct file *file)
|
|||
kfree(fhp);
|
||||
if (vp->channel.mc_head->disconnect_flag &&
|
||||
list_empty(&vp->dev_video->devbase.fh_list) &&
|
||||
list_empty(&vp->dev_radio->devbase.fh_list)) {
|
||||
(!vp->dev_radio ||
|
||||
list_empty(&vp->dev_radio->devbase.fh_list))) {
|
||||
pvr2_v4l2_destroy_no_lock(vp);
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -1415,10 +1415,26 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void v4l_pix_format_touch(struct v4l2_pix_format *p)
|
||||
{
|
||||
/*
|
||||
* The v4l2_pix_format structure contains fields that make no sense for
|
||||
* touch. Set them to default values in this case.
|
||||
*/
|
||||
|
||||
p->field = V4L2_FIELD_NONE;
|
||||
p->colorspace = V4L2_COLORSPACE_RAW;
|
||||
p->flags = 0;
|
||||
p->ycbcr_enc = 0;
|
||||
p->quantization = 0;
|
||||
p->xfer_func = 0;
|
||||
}
|
||||
|
||||
static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
|
||||
struct file *file, void *fh, void *arg)
|
||||
{
|
||||
struct v4l2_format *p = arg;
|
||||
struct video_device *vfd = video_devdata(file);
|
||||
int ret = check_fmt(file, p->type);
|
||||
|
||||
if (ret)
|
||||
|
@ -1456,6 +1472,8 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
|
|||
ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg);
|
||||
/* just in case the driver zeroed it again */
|
||||
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
|
||||
if (vfd->vfl_type == VFL_TYPE_TOUCH)
|
||||
v4l_pix_format_touch(&p->fmt.pix);
|
||||
return ret;
|
||||
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
|
||||
return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
|
||||
|
@ -1491,21 +1509,6 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void v4l_pix_format_touch(struct v4l2_pix_format *p)
|
||||
{
|
||||
/*
|
||||
* The v4l2_pix_format structure contains fields that make no sense for
|
||||
* touch. Set them to default values in this case.
|
||||
*/
|
||||
|
||||
p->field = V4L2_FIELD_NONE;
|
||||
p->colorspace = V4L2_COLORSPACE_RAW;
|
||||
p->flags = 0;
|
||||
p->ycbcr_enc = 0;
|
||||
p->quantization = 0;
|
||||
p->xfer_func = 0;
|
||||
}
|
||||
|
||||
static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
|
||||
struct file *file, void *fh, void *arg)
|
||||
{
|
||||
|
|
|
@ -228,6 +228,7 @@
|
|||
#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
|
||||
#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
|
||||
|
||||
#define MSDC_PATCH_BIT1_CMDTA (0x7 << 3) /* RW */
|
||||
#define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */
|
||||
|
||||
#define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */
|
||||
|
@ -1673,6 +1674,7 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode)
|
|||
|
||||
/* select EMMC50 PAD CMD tune */
|
||||
sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0));
|
||||
sdr_set_field(host->base + MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMDTA, 2);
|
||||
|
||||
if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
|
||||
mmc->ios.timing == MMC_TIMING_UHS_SDR104)
|
||||
|
|
|
@ -108,7 +108,7 @@
|
|||
|
||||
#define CORE_PWRSAVE_DLL BIT(3)
|
||||
|
||||
#define DDR_CONFIG_POR_VAL 0x80040853
|
||||
#define DDR_CONFIG_POR_VAL 0x80040873
|
||||
|
||||
|
||||
#define INVALID_TUNING_PHASE -1
|
||||
|
@ -157,8 +157,9 @@ struct sdhci_msm_offset {
|
|||
u32 core_ddr_200_cfg;
|
||||
u32 core_vendor_spec3;
|
||||
u32 core_dll_config_2;
|
||||
u32 core_dll_config_3;
|
||||
u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */
|
||||
u32 core_ddr_config;
|
||||
u32 core_ddr_config_2;
|
||||
};
|
||||
|
||||
static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
|
||||
|
@ -186,8 +187,8 @@ static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
|
|||
.core_ddr_200_cfg = 0x224,
|
||||
.core_vendor_spec3 = 0x250,
|
||||
.core_dll_config_2 = 0x254,
|
||||
.core_ddr_config = 0x258,
|
||||
.core_ddr_config_2 = 0x25c,
|
||||
.core_dll_config_3 = 0x258,
|
||||
.core_ddr_config = 0x25c,
|
||||
};
|
||||
|
||||
static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
|
||||
|
@ -216,8 +217,8 @@ static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
|
|||
.core_ddr_200_cfg = 0x184,
|
||||
.core_vendor_spec3 = 0x1b0,
|
||||
.core_dll_config_2 = 0x1b4,
|
||||
.core_ddr_config = 0x1b8,
|
||||
.core_ddr_config_2 = 0x1bc,
|
||||
.core_ddr_config_old = 0x1b8,
|
||||
.core_ddr_config = 0x1bc,
|
||||
};
|
||||
|
||||
struct sdhci_msm_variant_ops {
|
||||
|
@ -260,6 +261,7 @@ struct sdhci_msm_host {
|
|||
const struct sdhci_msm_offset *offset;
|
||||
bool use_cdr;
|
||||
u32 transfer_mode;
|
||||
bool updated_ddr_cfg;
|
||||
};
|
||||
|
||||
static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
|
||||
|
@ -931,8 +933,10 @@ static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
|
|||
static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
|
||||
{
|
||||
struct mmc_host *mmc = host->mmc;
|
||||
u32 dll_status, config;
|
||||
u32 dll_status, config, ddr_cfg_offset;
|
||||
int ret;
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
|
||||
const struct sdhci_msm_offset *msm_offset =
|
||||
sdhci_priv_msm_offset(host);
|
||||
|
||||
|
@ -945,8 +949,11 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
|
|||
* bootloaders. In the future, if this changes, then the desired
|
||||
* values will need to be programmed appropriately.
|
||||
*/
|
||||
writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr +
|
||||
msm_offset->core_ddr_config);
|
||||
if (msm_host->updated_ddr_cfg)
|
||||
ddr_cfg_offset = msm_offset->core_ddr_config;
|
||||
else
|
||||
ddr_cfg_offset = msm_offset->core_ddr_config_old;
|
||||
writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + ddr_cfg_offset);
|
||||
|
||||
if (mmc->ios.enhanced_strobe) {
|
||||
config = readl_relaxed(host->ioaddr +
|
||||
|
@ -1862,6 +1869,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
|
|||
msm_offset->core_vendor_spec_capabilities0);
|
||||
}
|
||||
|
||||
if (core_major == 1 && core_minor >= 0x49)
|
||||
msm_host->updated_ddr_cfg = true;
|
||||
|
||||
/*
|
||||
* Power on reset state may trigger power irq if previous status of
|
||||
* PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
|
||||
|
|
|
@ -648,9 +648,6 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
|
|||
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
|
||||
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
|
||||
|
||||
if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc"))
|
||||
mdelay(5);
|
||||
|
||||
if (mask & SDHCI_RESET_ALL) {
|
||||
val = sdhci_readl(host, ESDHC_TBCTL);
|
||||
val &= ~ESDHC_TB_EN;
|
||||
|
@ -926,8 +923,8 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
|
|||
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
|
||||
|
||||
if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
|
||||
host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
|
||||
host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
|
||||
host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
|
||||
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
|
||||
}
|
||||
|
||||
if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/mmc/slot-gpio.h>
|
||||
#include <linux/mmc/sdhci-pci-data.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/dmi.h>
|
||||
|
||||
#include "cqhci.h"
|
||||
|
||||
|
@ -732,11 +733,18 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
|
||||
{
|
||||
return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
|
||||
dmi_match(DMI_BIOS_VENDOR, "LENOVO");
|
||||
}
|
||||
|
||||
static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
|
||||
{
|
||||
int ret = byt_emmc_probe_slot(slot);
|
||||
|
||||
slot->host->mmc->caps2 |= MMC_CAP2_CQE;
|
||||
if (!glk_broken_cqhci(slot))
|
||||
slot->host->mmc->caps2 |= MMC_CAP2_CQE;
|
||||
|
||||
if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
|
||||
slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES,
|
||||
|
|
|
@ -1713,9 +1713,7 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
|
|||
ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
|
||||
else if (timing == MMC_TIMING_UHS_SDR12)
|
||||
ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
|
||||
else if (timing == MMC_TIMING_SD_HS ||
|
||||
timing == MMC_TIMING_MMC_HS ||
|
||||
timing == MMC_TIMING_UHS_SDR25)
|
||||
else if (timing == MMC_TIMING_UHS_SDR25)
|
||||
ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
|
||||
else if (timing == MMC_TIMING_UHS_SDR50)
|
||||
ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
|
||||
|
@ -2246,8 +2244,8 @@ static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
|
|||
sdhci_send_tuning(host, opcode);
|
||||
|
||||
if (!host->tuning_done) {
|
||||
pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
|
||||
mmc_hostname(host->mmc));
|
||||
pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
|
||||
mmc_hostname(host->mmc));
|
||||
sdhci_abort_tuning(host, opcode);
|
||||
return;
|
||||
}
|
||||
|
@ -3551,6 +3549,9 @@ int sdhci_setup_host(struct sdhci_host *host)
|
|||
mmc_hostname(mmc), host->version);
|
||||
}
|
||||
|
||||
if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
|
||||
mmc->caps2 &= ~MMC_CAP2_CQE;
|
||||
|
||||
if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
|
||||
host->flags |= SDHCI_USE_SDMA;
|
||||
else if (!(host->caps & SDHCI_CAN_DO_SDMA))
|
||||
|
|
|
@ -391,6 +391,8 @@ struct sdhci_host {
|
|||
#define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15)
|
||||
/* Controller reports inverted write-protect state */
|
||||
#define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16)
|
||||
/* Controller has unusable command queue engine */
|
||||
#define SDHCI_QUIRK_BROKEN_CQE (1<<17)
|
||||
/* Controller does not like fast PIO transfers */
|
||||
#define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18)
|
||||
/* Controller has to be forced to use block size of 2048 bytes */
|
||||
|
|
|
@ -1267,7 +1267,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
|
|||
return ret;
|
||||
}
|
||||
|
||||
mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
|
||||
mmc->caps |= MMC_CAP_ERASE | MMC_CAP_4_BIT_DATA | pdata->capabilities;
|
||||
mmc->caps2 |= pdata->capabilities2;
|
||||
mmc->max_segs = pdata->max_segs ? : 32;
|
||||
mmc->max_blk_size = 512;
|
||||
|
|
|
@ -608,7 +608,7 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
|
|||
struct kvaser_cmd *cmd;
|
||||
int err;
|
||||
|
||||
cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1140,7 +1140,7 @@ static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv)
|
|||
struct kvaser_cmd *cmd;
|
||||
int rc;
|
||||
|
||||
cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1206,7 +1206,7 @@ static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv)
|
|||
struct kvaser_cmd *cmd;
|
||||
int rc;
|
||||
|
||||
cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -66,6 +66,7 @@ config NET_DSA_REALTEK_SMI
|
|||
config NET_DSA_SMSC_LAN9303
|
||||
tristate
|
||||
select NET_DSA_TAG_LAN9303
|
||||
select REGMAP
|
||||
---help---
|
||||
This enables support for the SMSC/Microchip LAN9303 3 port ethernet
|
||||
switch chips.
|
||||
|
|
|
@ -2394,15 +2394,21 @@ static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
|
|||
/* send the ramrod on all the queues of the PF */
|
||||
for_each_eth_queue(bp, i) {
|
||||
struct bnx2x_fastpath *fp = &bp->fp[i];
|
||||
int tx_idx;
|
||||
|
||||
/* Set the appropriate Queue object */
|
||||
q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
|
||||
|
||||
/* Update the Queue state */
|
||||
rc = bnx2x_queue_state_change(bp, &q_params);
|
||||
if (rc) {
|
||||
BNX2X_ERR("Failed to configure Tx switching\n");
|
||||
return rc;
|
||||
for (tx_idx = FIRST_TX_COS_INDEX;
|
||||
tx_idx < fp->max_cos; tx_idx++) {
|
||||
q_params.params.update.cid_index = tx_idx;
|
||||
|
||||
/* Update the Queue state */
|
||||
rc = bnx2x_queue_state_change(bp, &q_params);
|
||||
if (rc) {
|
||||
BNX2X_ERR("Failed to configure Tx switching\n");
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -577,6 +577,8 @@ static int gmac_setup_txqs(struct net_device *netdev)
|
|||
|
||||
if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
|
||||
dev_warn(geth->dev, "TX queue base is not aligned\n");
|
||||
dma_free_coherent(geth->dev, len * sizeof(*desc_ring),
|
||||
desc_ring, port->txq_dma_base);
|
||||
kfree(skb_tab);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
|
@ -456,9 +456,9 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
skb_tx_timestamp(skb);
|
||||
|
||||
hip04_set_xmit_desc(priv, phys);
|
||||
priv->tx_head = TX_NEXT(tx_head);
|
||||
count++;
|
||||
netdev_sent_queue(ndev, skb->len);
|
||||
priv->tx_head = TX_NEXT(tx_head);
|
||||
|
||||
stats->tx_bytes += skb->len;
|
||||
stats->tx_packets++;
|
||||
|
|
|
@ -1474,6 +1474,9 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
|
|||
time_after(jiffies,
|
||||
(trans_start + ndev->watchdog_timeo))) {
|
||||
timeout_queue = i;
|
||||
netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
|
||||
q->state,
|
||||
jiffies_to_msecs(jiffies - trans_start));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3441,14 +3441,14 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
|
|||
q_vector->rx.target_itr =
|
||||
ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
|
||||
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
|
||||
q_vector->rx.target_itr);
|
||||
q_vector->rx.target_itr >> 1);
|
||||
q_vector->rx.current_itr = q_vector->rx.target_itr;
|
||||
|
||||
q_vector->tx.next_update = jiffies + 1;
|
||||
q_vector->tx.target_itr =
|
||||
ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
|
||||
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
|
||||
q_vector->tx.target_itr);
|
||||
q_vector->tx.target_itr >> 1);
|
||||
q_vector->tx.current_itr = q_vector->tx.target_itr;
|
||||
|
||||
wr32(hw, I40E_PFINT_RATEN(vector - 1),
|
||||
|
@ -3553,11 +3553,11 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
|
|||
/* set the ITR configuration */
|
||||
q_vector->rx.next_update = jiffies + 1;
|
||||
q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
|
||||
wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr);
|
||||
wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
|
||||
q_vector->rx.current_itr = q_vector->rx.target_itr;
|
||||
q_vector->tx.next_update = jiffies + 1;
|
||||
q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
|
||||
wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr);
|
||||
wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
|
||||
q_vector->tx.current_itr = q_vector->tx.target_itr;
|
||||
|
||||
i40e_enable_misc_int_causes(pf);
|
||||
|
@ -10735,7 +10735,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
|
|||
|
||||
/* associate no queues to the misc vector */
|
||||
wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
|
||||
wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
|
||||
wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
|
||||
|
||||
i40e_flush(hw);
|
||||
|
||||
|
|
|
@ -911,7 +911,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
|
|||
if (ice_sq_done(hw, cq))
|
||||
break;
|
||||
|
||||
mdelay(1);
|
||||
udelay(ICE_CTL_Q_SQ_CMD_USEC);
|
||||
total_delay++;
|
||||
} while (total_delay < cq->sq_cmd_timeout);
|
||||
|
||||
|
|
|
@ -30,8 +30,9 @@ enum ice_ctl_q {
|
|||
ICE_CTL_Q_ADMIN,
|
||||
};
|
||||
|
||||
/* Control Queue default settings */
|
||||
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */
|
||||
/* Control Queue timeout settings - max delay 250ms */
|
||||
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
|
||||
#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
|
||||
|
||||
struct ice_ctl_q_ring {
|
||||
void *dma_head; /* Virtual address to dma head */
|
||||
|
|
|
@ -8551,7 +8551,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
|||
|
||||
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
|
||||
adapter->ptp_clock) {
|
||||
if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
|
||||
if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
|
||||
!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
|
||||
&adapter->state)) {
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
|
||||
|
|
|
@ -1230,7 +1230,7 @@ qede_configure_mcast_filtering(struct net_device *ndev,
|
|||
netif_addr_lock_bh(ndev);
|
||||
|
||||
mc_count = netdev_mc_count(ndev);
|
||||
if (mc_count < 64) {
|
||||
if (mc_count <= 64) {
|
||||
netdev_for_each_mc_addr(ha, ndev) {
|
||||
ether_addr_copy(temp, ha->addr);
|
||||
temp += ETH_ALEN;
|
||||
|
|
|
@ -1362,6 +1362,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
|
|||
rxq->rx_buf_seg_size = roundup_pow_of_two(size);
|
||||
} else {
|
||||
rxq->rx_buf_seg_size = PAGE_SIZE;
|
||||
edev->ndev->features &= ~NETIF_F_GRO_HW;
|
||||
}
|
||||
|
||||
/* Allocate the parallel driver ring for Rx buffers */
|
||||
|
@ -1406,6 +1407,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
|
|||
}
|
||||
}
|
||||
|
||||
edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
|
||||
if (!edev->gro_disable)
|
||||
qede_set_tpa_param(rxq);
|
||||
err:
|
||||
|
@ -1606,8 +1608,6 @@ static void qede_init_fp(struct qede_dev *edev)
|
|||
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
|
||||
edev->ndev->name, queue_id);
|
||||
}
|
||||
|
||||
edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
|
||||
}
|
||||
|
||||
static int qede_set_real_num_queues(struct qede_dev *edev)
|
||||
|
|
|
@ -2757,6 +2757,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
|
|||
int err;
|
||||
|
||||
for (i = 0; i < qdev->num_large_buffers; i++) {
|
||||
lrg_buf_cb = &qdev->lrg_buf[i];
|
||||
memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
|
||||
|
||||
skb = netdev_alloc_skb(qdev->ndev,
|
||||
qdev->lrg_buffer_len);
|
||||
if (unlikely(!skb)) {
|
||||
|
@ -2767,11 +2770,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
|
|||
ql_free_large_buffers(qdev);
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
|
||||
lrg_buf_cb = &qdev->lrg_buf[i];
|
||||
memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
|
||||
lrg_buf_cb->index = i;
|
||||
lrg_buf_cb->skb = skb;
|
||||
/*
|
||||
* We save some space to copy the ethhdr from first
|
||||
* buffer
|
||||
|
@ -2793,6 +2792,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
lrg_buf_cb->skb = skb;
|
||||
dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
|
||||
dma_unmap_len_set(lrg_buf_cb, maplen,
|
||||
qdev->lrg_buffer_len -
|
||||
|
|
|
@ -793,6 +793,7 @@ EXPORT_SYMBOL_GPL(cpsw_ale_start);
|
|||
void cpsw_ale_stop(struct cpsw_ale *ale)
|
||||
{
|
||||
del_timer_sync(&ale->timer);
|
||||
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
|
||||
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpsw_ale_stop);
|
||||
|
@ -877,6 +878,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
|
|||
ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS;
|
||||
}
|
||||
|
||||
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
|
||||
return ale;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpsw_ale_create);
|
||||
|
|
|
@ -181,6 +181,9 @@ static int fjes_acpi_add(struct acpi_device *device)
|
|||
/* create platform_device */
|
||||
plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
|
||||
ARRAY_SIZE(fjes_resource));
|
||||
if (IS_ERR(plat_dev))
|
||||
return PTR_ERR(plat_dev);
|
||||
|
||||
device->driver_data = plat_dev;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -86,6 +86,10 @@
|
|||
#define DP83867_IO_MUX_CFG_CLK_O_SEL_MASK (0x1f << 8)
|
||||
#define DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT 8
|
||||
|
||||
/* CFG3 bits */
|
||||
#define DP83867_CFG3_INT_OE BIT(7)
|
||||
#define DP83867_CFG3_ROBUST_AUTO_MDIX BIT(9)
|
||||
|
||||
/* CFG4 bits */
|
||||
#define DP83867_CFG4_PORT_MIRROR_EN BIT(0)
|
||||
|
||||
|
@ -331,12 +335,13 @@ static int dp83867_config_init(struct phy_device *phydev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
val = phy_read(phydev, DP83867_CFG3);
|
||||
/* Enable Interrupt output INT_OE in CFG3 register */
|
||||
if (phy_interrupt_is_valid(phydev)) {
|
||||
val = phy_read(phydev, DP83867_CFG3);
|
||||
val |= BIT(7);
|
||||
phy_write(phydev, DP83867_CFG3, val);
|
||||
}
|
||||
if (phy_interrupt_is_valid(phydev))
|
||||
val |= DP83867_CFG3_INT_OE;
|
||||
|
||||
val |= DP83867_CFG3_ROBUST_AUTO_MDIX;
|
||||
phy_write(phydev, DP83867_CFG3, val);
|
||||
|
||||
if (dp83867->port_mirroring != DP83867_PORT_MIRROING_KEEP)
|
||||
dp83867_config_port_mirroring(phydev);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue