Merge android-4.19.113 (248555d
) into msm-4.19
* refs/heads/tmp-248555d: Linux 4.19.113 staging: greybus: loopback_test: fix potential path truncations staging: greybus: loopback_test: fix potential path truncation drm/bridge: dw-hdmi: fix AVI frame colorimetry arm64: smp: fix crash_smp_send_stop() behaviour arm64: smp: fix smp_send_stop() behaviour ALSA: hda/realtek: Fix pop noise on ALC225 Revert "ipv6: Fix handling of LLA with VRF and sockets bound to VRF" Revert "vrf: mark skb for multicast or link-local as enslaved to VRF" futex: Unbreak futex hashing futex: Fix inode life-time issue kbuild: Disable -Wpointer-to-enum-cast iio: light: vcnl4000: update sampling periods for vcnl4200 USB: cdc-acm: fix rounding error in TIOCSSERIAL USB: cdc-acm: fix close_delay and closing_wait units in TIOCSSERIAL x86/mm: split vmalloc_sync_all() page-flags: fix a crash at SetPageError(THP_SWAP) mm, slub: prevent kmalloc_node crashes and memory leaks mm: slub: be more careful about the double cmpxchg of freelist memcg: fix NULL pointer dereference in __mem_cgroup_usage_unregister_event drm/lease: fix WARNING in idr_destroy drm/amd/amdgpu: Fix GPR read from debugfs (v2) btrfs: fix log context list corruption after rename whiteout error xhci: Do not open code __print_symbolic() in xhci trace events rtc: max8907: add missing select REGMAP_IRQ intel_th: pci: Add Elkhart Lake CPU support intel_th: Fix user-visible error codes staging/speakup: fix get_word non-space look-ahead staging: greybus: loopback_test: fix poll-mask build breakage staging: rtl8188eu: Add device id for MERCUSYS MW150US v2 mmc: sdhci-of-at91: fix cd-gpios for SAMA5D2 mmc: rtsx_pci: Fix support for speed-modes that relies on tuning iio: adc: at91-sama5d2_adc: fix differential channels in triggered mode iio: magnetometer: ak8974: Fix negative raw values in sysfs iio: trigger: stm32-timer: disable master mode when stopping iio: st_sensors: remap SMO8840 to LIS2DH12 ALSA: pcm: oss: Remove WARNING from snd_pcm_plug_alloc() checks ALSA: pcm: oss: Avoid plugin buffer overflow ALSA: seq: oss: Fix running status after receiving sysex ALSA: seq: virmidi: Fix running status after receiving sysex ALSA: line6: Fix endless MIDI read loop usb: xhci: apply XHCI_SUSPEND_DELAY to AMD XHCI controller 1022:145c USB: serial: pl2303: add device-id for HP LD381 usb: host: xhci-plat: add a shutdown USB: serial: option: add ME910G1 ECM composition 0x110b usb: quirks: add NO_LPM quirk for RTL8153 based ethernet adapters USB: Disable LPM on WD19's Realtek Hub parse-maintainers: Mark as executable block, bfq: fix overwrite of bfq_group pointer in bfq_find_set_group() xenbus: req->err should be updated before req->state xenbus: req->body should be updated before req->state drm/amd/display: fix dcc swath size calculations on dcn1 drm/amd/display: Clear link settings on MST disable connector riscv: avoid the PIC offset of static percpu data in module beyond 2G limits dm integrity: use dm_bio_record and dm_bio_restore dm bio record: save/restore bi_end_io and bi_integrity altera-stapl: altera_get_note: prevent write beyond end of 'key' drivers/perf: arm_pmu_acpi: Fix incorrect checking of gicc pointer drm/exynos: dsi: fix workaround for the legacy clock name drm/exynos: dsi: propagate error value and silence meaningless warning spi/zynqmp: remove entry that causes a cs glitch spi: pxa2xx: Add CS control clock quirk ARM: dts: dra7: Add "dma-ranges" property to PCIe RC DT nodes powerpc: Include .BTF section spi: qup: call spi_qup_pm_resume_runtime before suspending drm/mediatek: Find the cursor plane instead of hard coding it ANDROID: ABI: Update ABI with CONFIG_SOC_BUS enabled ANDROID: GKI: Add CONFIG_SOC_BUS to gki_defconfig ANDROID: kbuild: do not merge .section..* into .section in modules ANDROID: scsi: ufs: add ->map_sg_crypto() variant op ANDROID: GKI: Update ABI after fixing vm_event_item diffs ANDROID: GKI: mm: vmstat: add pageoutclean ANDROID: GKI: mm: add struct/enum fields for SPECULATIVE_PAGE_FAULTS ANDROID: GKI: Update ABI after fixing mm diffs ANDROID: GKI: Add write_pending and max_writes fields to swap_info_struct ANDROID: GKI: memblock: Add memblock_overlaps_memory() to fix ABI diff ANDROID: GKI: net: remove conditional members causing ABI diffs ANDROID: GKI: mm: introduce NR_UNRECLAIMABLE_PAGES ANDROID: GKI: Update ABI ANDROID: GKI: sound: soc: Resolve ABI diff for struct snd_compr_stream ANDROID: GKI: sound: pcm: Add field hw_no_buffer to snd_pcm_substream ANDROID: GKI: ALSA: core: Add snd_soc_card_change_online_state() API ANDROID: GKI: SoC: core: Introduce macro SOC_SINGLE_MULTI_EXT ANDROID: GKI: ALSA: PCM: User control API implementation ANDROID: GKI: ALSA: PCM: volume API implementation ANDROID: GKI: kernel: tick-sched: Add API to get the next wakeup for a CPU ANDROID: GKI: extcon: Add extcon_register_blocking_notifier API. UPSTREAM: bpf: Explicitly memset some bpf info structures declared on the stack UPSTREAM: bpf: Explicitly memset the bpf_attr structure ANDROID: ABI: Update abi after enabling CONFIG_USB_PHY ANDROID: GKI: Enable CONFIG_USB_PHY for usb drivers like dwc3 UPSTREAM: driver core: Add device link support for SYNC_STATE_ONLY flag ANDROID: Conflict fix for merging 4.19.112 Conflicts: arch/arm64/kernel/smp.c drivers/extcon/extcon.c include/linux/extcon.h include/linux/mm.h include/linux/mm_types.h include/linux/vm_event_item.h include/sound/core.h kernel/time/tick-sched.c mm/vmstat.c sound/core/init.c sound/soc/soc-core.c Change-Id: Ibffc219e0859b7d5c9580c930664eea5b822a704 Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
This commit is contained in:
commit
c161b2d152
88 changed files with 42415 additions and 41903 deletions
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 112
|
||||
SUBLEVEL = 113
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
|
83524
abi_gki_aarch64.xml
83524
abi_gki_aarch64.xml
File diff suppressed because it is too large
Load diff
|
@ -324,6 +324,7 @@
|
|||
device_type = "pci";
|
||||
ranges = <0x81000000 0 0 0x03000 0 0x00010000
|
||||
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
|
||||
dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
|
||||
bus-range = <0x00 0xff>;
|
||||
#interrupt-cells = <1>;
|
||||
num-lanes = <1>;
|
||||
|
@ -376,6 +377,7 @@
|
|||
device_type = "pci";
|
||||
ranges = <0x81000000 0 0 0x03000 0 0x00010000
|
||||
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
|
||||
dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
|
||||
bus-range = <0x00 0xff>;
|
||||
#interrupt-cells = <1>;
|
||||
num-lanes = <1>;
|
||||
|
|
|
@ -371,7 +371,6 @@ CONFIG_DEVFREQ_GOV_PERFORMANCE=y
|
|||
CONFIG_DEVFREQ_GOV_POWERSAVE=y
|
||||
CONFIG_DEVFREQ_GOV_USERSPACE=y
|
||||
CONFIG_DEVFREQ_GOV_PASSIVE=y
|
||||
CONFIG_EXTCON=y
|
||||
CONFIG_IIO=y
|
||||
CONFIG_PWM=y
|
||||
CONFIG_QCOM_PDC=y
|
||||
|
|
|
@ -970,11 +970,29 @@ void tick_broadcast(const struct cpumask *mask)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The number of CPUs online, not counting this CPU (which may not be
|
||||
* fully online and so not counted in num_online_cpus()).
|
||||
*/
|
||||
static inline unsigned int num_other_online_cpus(void)
|
||||
{
|
||||
unsigned int this_cpu_online = cpu_online(smp_processor_id());
|
||||
|
||||
return num_online_cpus() - this_cpu_online;
|
||||
}
|
||||
|
||||
static inline unsigned int num_other_active_cpus(void)
|
||||
{
|
||||
unsigned int this_cpu_active = cpu_active(smp_processor_id());
|
||||
|
||||
return num_active_cpus() - this_cpu_active;
|
||||
}
|
||||
|
||||
void smp_send_stop(void)
|
||||
{
|
||||
unsigned long timeout;
|
||||
|
||||
if (num_online_cpus() > 1) {
|
||||
if (num_other_online_cpus()) {
|
||||
cpumask_t mask;
|
||||
|
||||
cpumask_copy(&mask, cpu_online_mask);
|
||||
|
@ -987,10 +1005,10 @@ void smp_send_stop(void)
|
|||
|
||||
/* Wait up to one second for other CPUs to stop */
|
||||
timeout = USEC_PER_SEC;
|
||||
while (num_active_cpus() > 1 && timeout--)
|
||||
while (num_other_active_cpus() && timeout--)
|
||||
udelay(1);
|
||||
|
||||
if (num_active_cpus() > 1)
|
||||
if (num_other_active_cpus())
|
||||
pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
|
||||
cpumask_pr_args(cpu_online_mask));
|
||||
|
||||
|
@ -1013,7 +1031,11 @@ void crash_smp_send_stop(void)
|
|||
|
||||
cpus_stopped = 1;
|
||||
|
||||
if (num_online_cpus() == 1) {
|
||||
/*
|
||||
* If this cpu is the only one alive at this point in time, online or
|
||||
* not, there are no stop messages to be sent around, so just back out.
|
||||
*/
|
||||
if (num_other_online_cpus() == 0) {
|
||||
sdei_mask_local_cpu();
|
||||
return;
|
||||
}
|
||||
|
@ -1021,7 +1043,7 @@ void crash_smp_send_stop(void)
|
|||
cpumask_copy(&mask, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &mask);
|
||||
|
||||
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
|
||||
atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
|
||||
|
||||
pr_crit("SMP: stopping secondary CPUs\n");
|
||||
smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
|
||||
|
|
|
@ -322,6 +322,12 @@ SECTIONS
|
|||
*(.branch_lt)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_INFO_BTF
|
||||
.BTF : AT(ADDR(.BTF) - LOAD_OFFSET) {
|
||||
*(.BTF)
|
||||
}
|
||||
#endif
|
||||
|
||||
.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
|
||||
__start_opd = .;
|
||||
KEEP(*(.opd))
|
||||
|
|
|
@ -16,6 +16,10 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
|
||||
{
|
||||
|
@ -394,3 +398,15 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
|
||||
#define VMALLOC_MODULE_START \
|
||||
max(PFN_ALIGN((unsigned long)&_end - SZ_2G), VMALLOC_START)
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
return __vmalloc_node_range(size, 1, VMALLOC_MODULE_START,
|
||||
VMALLOC_END, GFP_KERNEL,
|
||||
PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -273,7 +273,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
|
|||
return pmd_k;
|
||||
}
|
||||
|
||||
void vmalloc_sync_all(void)
|
||||
static void vmalloc_sync(void)
|
||||
{
|
||||
unsigned long address;
|
||||
|
||||
|
@ -300,6 +300,16 @@ void vmalloc_sync_all(void)
|
|||
}
|
||||
}
|
||||
|
||||
void vmalloc_sync_mappings(void)
|
||||
{
|
||||
vmalloc_sync();
|
||||
}
|
||||
|
||||
void vmalloc_sync_unmappings(void)
|
||||
{
|
||||
vmalloc_sync();
|
||||
}
|
||||
|
||||
/*
|
||||
* 32-bit:
|
||||
*
|
||||
|
@ -402,11 +412,23 @@ static void dump_pagetable(unsigned long address)
|
|||
|
||||
#else /* CONFIG_X86_64: */
|
||||
|
||||
void vmalloc_sync_all(void)
|
||||
void vmalloc_sync_mappings(void)
|
||||
{
|
||||
/*
|
||||
* 64-bit mappings might allocate new p4d/pud pages
|
||||
* that need to be propagated to all tasks' PGDs.
|
||||
*/
|
||||
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
|
||||
}
|
||||
|
||||
void vmalloc_sync_unmappings(void)
|
||||
{
|
||||
/*
|
||||
* Unmappings never allocate or free p4d/pud pages.
|
||||
* No work is required here.
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
* 64-bit:
|
||||
*
|
||||
|
|
|
@ -525,12 +525,13 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
|
|||
*/
|
||||
entity = &bfqg->entity;
|
||||
for_each_entity(entity) {
|
||||
bfqg = container_of(entity, struct bfq_group, entity);
|
||||
if (bfqg != bfqd->root_group) {
|
||||
parent = bfqg_parent(bfqg);
|
||||
struct bfq_group *curr_bfqg = container_of(entity,
|
||||
struct bfq_group, entity);
|
||||
if (curr_bfqg != bfqd->root_group) {
|
||||
parent = bfqg_parent(curr_bfqg);
|
||||
if (!parent)
|
||||
parent = bfqd->root_group;
|
||||
bfq_group_set_parent(bfqg, parent);
|
||||
bfq_group_set_parent(curr_bfqg, parent);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -201,7 +201,7 @@ static int ghes_estatus_pool_expand(unsigned long len)
|
|||
* New allocation must be visible in all pgd before it can be found by
|
||||
* an NMI allocating from the pool.
|
||||
*/
|
||||
vmalloc_sync_all();
|
||||
vmalloc_sync_mappings();
|
||||
|
||||
return gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
|
||||
}
|
||||
|
|
|
@ -713,7 +713,7 @@ static void __device_links_queue_sync_state(struct device *dev,
|
|||
return;
|
||||
|
||||
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
||||
if (link->flags & DL_FLAG_STATELESS)
|
||||
if (!(link->flags & DL_FLAG_MANAGED))
|
||||
continue;
|
||||
if (link->status != DL_STATE_ACTIVE)
|
||||
return;
|
||||
|
|
|
@ -694,11 +694,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
|
|||
ssize_t result = 0;
|
||||
uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
|
||||
|
||||
if (size & 3 || *pos & 3)
|
||||
if (size > 4096 || size & 3 || *pos & 3)
|
||||
return -EINVAL;
|
||||
|
||||
/* decode offset */
|
||||
offset = *pos & GENMASK_ULL(11, 0);
|
||||
offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
|
||||
se = (*pos & GENMASK_ULL(19, 12)) >> 12;
|
||||
sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
|
||||
cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
|
||||
|
@ -729,7 +729,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
|
|||
while (size) {
|
||||
uint32_t value;
|
||||
|
||||
value = data[offset++];
|
||||
value = data[result >> 2];
|
||||
r = put_user(value, (uint32_t *)buf);
|
||||
if (r) {
|
||||
result = r;
|
||||
|
|
|
@ -419,6 +419,7 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
|||
dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
|
||||
dc_sink_release(aconnector->dc_sink);
|
||||
aconnector->dc_sink = NULL;
|
||||
aconnector->dc_link->cur_link_settings.lane_count = 0;
|
||||
}
|
||||
|
||||
drm_connector_unregister(connector);
|
||||
|
|
|
@ -684,8 +684,8 @@ static void hubbub1_det_request_size(
|
|||
|
||||
hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
|
||||
|
||||
swath_bytes_horz_wc = height * blk256_height * bpe;
|
||||
swath_bytes_vert_wc = width * blk256_width * bpe;
|
||||
swath_bytes_horz_wc = width * blk256_height * bpe;
|
||||
swath_bytes_vert_wc = height * blk256_width * bpe;
|
||||
|
||||
*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
|
||||
false : /* full 256B request */
|
||||
|
|
|
@ -1364,28 +1364,34 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
|
|||
frame.colorspace = HDMI_COLORSPACE_RGB;
|
||||
|
||||
/* Set up colorimetry */
|
||||
switch (hdmi->hdmi_data.enc_out_encoding) {
|
||||
case V4L2_YCBCR_ENC_601:
|
||||
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
|
||||
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
|
||||
else
|
||||
if (!hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
|
||||
switch (hdmi->hdmi_data.enc_out_encoding) {
|
||||
case V4L2_YCBCR_ENC_601:
|
||||
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
|
||||
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
|
||||
else
|
||||
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
|
||||
frame.extended_colorimetry =
|
||||
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
|
||||
break;
|
||||
case V4L2_YCBCR_ENC_709:
|
||||
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
|
||||
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
|
||||
else
|
||||
frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
|
||||
frame.extended_colorimetry =
|
||||
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
|
||||
break;
|
||||
default: /* Carries no data */
|
||||
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
|
||||
frame.extended_colorimetry =
|
||||
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
frame.colorimetry = HDMI_COLORIMETRY_NONE;
|
||||
frame.extended_colorimetry =
|
||||
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
|
||||
break;
|
||||
case V4L2_YCBCR_ENC_709:
|
||||
if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
|
||||
frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
|
||||
else
|
||||
frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
|
||||
frame.extended_colorimetry =
|
||||
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
|
||||
break;
|
||||
default: /* Carries no data */
|
||||
frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
|
||||
frame.extended_colorimetry =
|
||||
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
|
||||
break;
|
||||
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
|
||||
}
|
||||
|
||||
frame.scan_mode = HDMI_SCAN_MODE_NONE;
|
||||
|
|
|
@ -545,10 +545,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
|
|||
}
|
||||
|
||||
DRM_DEBUG_LEASE("Creating lease\n");
|
||||
/* lessee will take the ownership of leases */
|
||||
lessee = drm_lease_create(lessor, &leases);
|
||||
|
||||
if (IS_ERR(lessee)) {
|
||||
ret = PTR_ERR(lessee);
|
||||
idr_destroy(&leases);
|
||||
goto out_leases;
|
||||
}
|
||||
|
||||
|
@ -583,7 +585,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
|
|||
|
||||
out_leases:
|
||||
put_unused_fd(fd);
|
||||
idr_destroy(&leases);
|
||||
|
||||
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
|
||||
return ret;
|
||||
|
|
|
@ -1722,8 +1722,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
|
|||
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
|
||||
dsi->supplies);
|
||||
if (ret) {
|
||||
dev_info(dev, "failed to get regulators: %d\n", ret);
|
||||
return -EPROBE_DEFER;
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_info(dev, "failed to get regulators: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dsi->clks = devm_kcalloc(dev,
|
||||
|
@ -1736,9 +1737,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
|
|||
dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
|
||||
if (IS_ERR(dsi->clks[i])) {
|
||||
if (strcmp(clk_names[i], "sclk_mipi") == 0) {
|
||||
strcpy(clk_names[i], OLD_SCLK_MIPI_CLK_NAME);
|
||||
i--;
|
||||
continue;
|
||||
dsi->clks[i] = devm_clk_get(dev,
|
||||
OLD_SCLK_MIPI_CLK_NAME);
|
||||
if (!IS_ERR(dsi->clks[i]))
|
||||
continue;
|
||||
}
|
||||
|
||||
dev_info(dev, "failed to get the clock: %s\n",
|
||||
|
|
|
@ -506,10 +506,18 @@ static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
|
|||
|
||||
static int mtk_drm_crtc_init(struct drm_device *drm,
|
||||
struct mtk_drm_crtc *mtk_crtc,
|
||||
struct drm_plane *primary,
|
||||
struct drm_plane *cursor, unsigned int pipe)
|
||||
unsigned int pipe)
|
||||
{
|
||||
int ret;
|
||||
struct drm_plane *primary = NULL;
|
||||
struct drm_plane *cursor = NULL;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < mtk_crtc->layer_nr; i++) {
|
||||
if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
|
||||
primary = &mtk_crtc->planes[i];
|
||||
else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
|
||||
cursor = &mtk_crtc->planes[i];
|
||||
}
|
||||
|
||||
ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
|
||||
&mtk_crtc_funcs, NULL);
|
||||
|
@ -622,9 +630,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
|
|||
goto unprepare;
|
||||
}
|
||||
|
||||
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
|
||||
mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
|
||||
NULL, pipe);
|
||||
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, pipe);
|
||||
if (ret < 0)
|
||||
goto unprepare;
|
||||
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
|
||||
|
|
|
@ -491,7 +491,7 @@ static int msc_configure(struct msc *msc)
|
|||
lockdep_assert_held(&msc->buf_mutex);
|
||||
|
||||
if (msc->mode > MSC_MODE_MULTI)
|
||||
return -ENOTSUPP;
|
||||
return -EINVAL;
|
||||
|
||||
if (msc->mode == MSC_MODE_MULTI)
|
||||
msc_buffer_clear_hw_header(msc);
|
||||
|
@ -942,7 +942,7 @@ static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
|
|||
} else if (msc->mode == MSC_MODE_MULTI) {
|
||||
ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
|
||||
} else {
|
||||
ret = -ENOTSUPP;
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
|
@ -1165,7 +1165,7 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
|
|||
if (ret >= 0)
|
||||
*ppos = iter->offset;
|
||||
} else {
|
||||
ret = -ENOTSUPP;
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
put_count:
|
||||
|
|
|
@ -210,6 +210,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
|
|||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
|
||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||
},
|
||||
{
|
||||
/* Elkhart Lake CPU */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4529),
|
||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||
},
|
||||
{
|
||||
/* Elkhart Lake */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),
|
||||
|
|
|
@ -107,7 +107,7 @@ MODULE_DEVICE_TABLE(of, st_accel_of_match);
|
|||
|
||||
#ifdef CONFIG_ACPI
|
||||
static const struct acpi_device_id st_accel_acpi_match[] = {
|
||||
{"SMO8840", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
|
||||
{"SMO8840", (kernel_ulong_t)LIS2DH12_ACCEL_DEV_NAME},
|
||||
{"SMO8A90", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
|
||||
{ },
|
||||
};
|
||||
|
|
|
@ -731,6 +731,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
|
|||
|
||||
for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
|
||||
struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit);
|
||||
u32 cor;
|
||||
|
||||
if (!chan)
|
||||
continue;
|
||||
|
@ -739,6 +740,20 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
|
|||
chan->type == IIO_PRESSURE)
|
||||
continue;
|
||||
|
||||
if (state) {
|
||||
cor = at91_adc_readl(st, AT91_SAMA5D2_COR);
|
||||
|
||||
if (chan->differential)
|
||||
cor |= (BIT(chan->channel) |
|
||||
BIT(chan->channel2)) <<
|
||||
AT91_SAMA5D2_COR_DIFF_OFFSET;
|
||||
else
|
||||
cor &= ~(BIT(chan->channel) <<
|
||||
AT91_SAMA5D2_COR_DIFF_OFFSET);
|
||||
|
||||
at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
|
||||
}
|
||||
|
||||
if (state) {
|
||||
at91_adc_writel(st, AT91_SAMA5D2_CHER,
|
||||
BIT(chan->channel));
|
||||
|
|
|
@ -150,9 +150,10 @@ static int vcnl4200_init(struct vcnl4000_data *data)
|
|||
data->al_scale = 24000;
|
||||
data->vcnl4200_al.reg = VCNL4200_AL_DATA;
|
||||
data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
|
||||
/* Integration time is 50ms, but the experiments show 54ms in total. */
|
||||
data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000);
|
||||
data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000);
|
||||
/* Default wait time is 50ms, add 20% tolerance. */
|
||||
data->vcnl4200_al.sampling_rate = ktime_set(0, 60000 * 1000);
|
||||
/* Default wait time is 4.8ms, add 20% tolerance. */
|
||||
data->vcnl4200_ps.sampling_rate = ktime_set(0, 5760 * 1000);
|
||||
data->vcnl4200_al.last_measurement = ktime_set(0, 0);
|
||||
data->vcnl4200_ps.last_measurement = ktime_set(0, 0);
|
||||
mutex_init(&data->vcnl4200_al.lock);
|
||||
|
|
|
@ -563,7 +563,7 @@ static int ak8974_read_raw(struct iio_dev *indio_dev,
|
|||
* We read all axes and discard all but one, for optimized
|
||||
* reading, use the triggered buffer.
|
||||
*/
|
||||
*val = le16_to_cpu(hw_values[chan->address]);
|
||||
*val = (s16)le16_to_cpu(hw_values[chan->address]);
|
||||
|
||||
ret = IIO_VAL_INT;
|
||||
}
|
||||
|
|
|
@ -161,7 +161,8 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void stm32_timer_stop(struct stm32_timer_trigger *priv)
|
||||
static void stm32_timer_stop(struct stm32_timer_trigger *priv,
|
||||
struct iio_trigger *trig)
|
||||
{
|
||||
u32 ccer, cr1;
|
||||
|
||||
|
@ -179,6 +180,12 @@ static void stm32_timer_stop(struct stm32_timer_trigger *priv)
|
|||
regmap_write(priv->regmap, TIM_PSC, 0);
|
||||
regmap_write(priv->regmap, TIM_ARR, 0);
|
||||
|
||||
/* Force disable master mode */
|
||||
if (stm32_timer_is_trgo2_name(trig->name))
|
||||
regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2, 0);
|
||||
else
|
||||
regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS, 0);
|
||||
|
||||
/* Make sure that registers are updated */
|
||||
regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
|
||||
}
|
||||
|
@ -197,7 +204,7 @@ static ssize_t stm32_tt_store_frequency(struct device *dev,
|
|||
return ret;
|
||||
|
||||
if (freq == 0) {
|
||||
stm32_timer_stop(priv);
|
||||
stm32_timer_stop(priv, trig);
|
||||
} else {
|
||||
ret = stm32_timer_start(priv, trig, freq);
|
||||
if (ret)
|
||||
|
|
|
@ -20,8 +20,13 @@
|
|||
struct dm_bio_details {
|
||||
struct gendisk *bi_disk;
|
||||
u8 bi_partno;
|
||||
int __bi_remaining;
|
||||
unsigned long bi_flags;
|
||||
struct bvec_iter bi_iter;
|
||||
bio_end_io_t *bi_end_io;
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
struct bio_integrity_payload *bi_integrity;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
|
||||
|
@ -30,6 +35,11 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
|
|||
bd->bi_partno = bio->bi_partno;
|
||||
bd->bi_flags = bio->bi_flags;
|
||||
bd->bi_iter = bio->bi_iter;
|
||||
bd->__bi_remaining = atomic_read(&bio->__bi_remaining);
|
||||
bd->bi_end_io = bio->bi_end_io;
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
bd->bi_integrity = bio_integrity(bio);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
|
||||
|
@ -38,6 +48,11 @@ static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
|
|||
bio->bi_partno = bd->bi_partno;
|
||||
bio->bi_flags = bd->bi_flags;
|
||||
bio->bi_iter = bd->bi_iter;
|
||||
atomic_set(&bio->__bi_remaining, bd->__bi_remaining);
|
||||
bio->bi_end_io = bd->bi_end_io;
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
bio->bi_integrity = bd->bi_integrity;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
* This file is released under the GPL.
|
||||
*/
|
||||
|
||||
#include "dm-bio-record.h"
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/device-mapper.h>
|
||||
|
@ -276,11 +278,7 @@ struct dm_integrity_io {
|
|||
|
||||
struct completion *completion;
|
||||
|
||||
struct gendisk *orig_bi_disk;
|
||||
u8 orig_bi_partno;
|
||||
bio_end_io_t *orig_bi_end_io;
|
||||
struct bio_integrity_payload *orig_bi_integrity;
|
||||
struct bvec_iter orig_bi_iter;
|
||||
struct dm_bio_details bio_details;
|
||||
};
|
||||
|
||||
struct journal_completion {
|
||||
|
@ -1254,14 +1252,9 @@ static void integrity_end_io(struct bio *bio)
|
|||
{
|
||||
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
|
||||
|
||||
bio->bi_iter = dio->orig_bi_iter;
|
||||
bio->bi_disk = dio->orig_bi_disk;
|
||||
bio->bi_partno = dio->orig_bi_partno;
|
||||
if (dio->orig_bi_integrity) {
|
||||
bio->bi_integrity = dio->orig_bi_integrity;
|
||||
dm_bio_restore(&dio->bio_details, bio);
|
||||
if (bio->bi_integrity)
|
||||
bio->bi_opf |= REQ_INTEGRITY;
|
||||
}
|
||||
bio->bi_end_io = dio->orig_bi_end_io;
|
||||
|
||||
if (dio->completion)
|
||||
complete(dio->completion);
|
||||
|
@ -1347,7 +1340,7 @@ static void integrity_metadata(struct work_struct *w)
|
|||
}
|
||||
}
|
||||
|
||||
__bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
|
||||
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
|
||||
unsigned pos;
|
||||
char *mem, *checksums_ptr;
|
||||
|
||||
|
@ -1391,7 +1384,7 @@ static void integrity_metadata(struct work_struct *w)
|
|||
if (likely(checksums != checksums_onstack))
|
||||
kfree(checksums);
|
||||
} else {
|
||||
struct bio_integrity_payload *bip = dio->orig_bi_integrity;
|
||||
struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
|
||||
|
||||
if (bip) {
|
||||
struct bio_vec biv;
|
||||
|
@ -1795,20 +1788,13 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
|
|||
} else
|
||||
dio->completion = NULL;
|
||||
|
||||
dio->orig_bi_iter = bio->bi_iter;
|
||||
|
||||
dio->orig_bi_disk = bio->bi_disk;
|
||||
dio->orig_bi_partno = bio->bi_partno;
|
||||
dm_bio_record(&dio->bio_details, bio);
|
||||
bio_set_dev(bio, ic->dev->bdev);
|
||||
|
||||
dio->orig_bi_integrity = bio_integrity(bio);
|
||||
bio->bi_integrity = NULL;
|
||||
bio->bi_opf &= ~REQ_INTEGRITY;
|
||||
|
||||
dio->orig_bi_end_io = bio->bi_end_io;
|
||||
bio->bi_end_io = integrity_end_io;
|
||||
|
||||
bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
|
||||
|
||||
generic_make_request(bio);
|
||||
|
||||
if (need_sync_io) {
|
||||
|
|
|
@ -2126,8 +2126,8 @@ static int altera_execute(struct altera_state *astate,
|
|||
return status;
|
||||
}
|
||||
|
||||
static int altera_get_note(u8 *p, s32 program_size,
|
||||
s32 *offset, char *key, char *value, int length)
|
||||
static int altera_get_note(u8 *p, s32 program_size, s32 *offset,
|
||||
char *key, char *value, int keylen, int vallen)
|
||||
/*
|
||||
* Gets key and value of NOTE fields in the JBC file.
|
||||
* Can be called in two modes: if offset pointer is NULL,
|
||||
|
@ -2184,7 +2184,7 @@ static int altera_get_note(u8 *p, s32 program_size,
|
|||
&p[note_table + (8 * i) + 4])];
|
||||
|
||||
if (value != NULL)
|
||||
strlcpy(value, value_ptr, length);
|
||||
strlcpy(value, value_ptr, vallen);
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -2203,13 +2203,13 @@ static int altera_get_note(u8 *p, s32 program_size,
|
|||
strlcpy(key, &p[note_strings +
|
||||
get_unaligned_be32(
|
||||
&p[note_table + (8 * i)])],
|
||||
length);
|
||||
keylen);
|
||||
|
||||
if (value != NULL)
|
||||
strlcpy(value, &p[note_strings +
|
||||
get_unaligned_be32(
|
||||
&p[note_table + (8 * i) + 4])],
|
||||
length);
|
||||
vallen);
|
||||
|
||||
*offset = i + 1;
|
||||
}
|
||||
|
@ -2463,7 +2463,7 @@ int altera_init(struct altera_config *config, const struct firmware *fw)
|
|||
__func__, (format_version == 2) ? "Jam STAPL" :
|
||||
"pre-standardized Jam 1.1");
|
||||
while (altera_get_note((u8 *)fw->data, fw->size,
|
||||
&offset, key, value, 256) == 0)
|
||||
&offset, key, value, 32, 256) == 0)
|
||||
printk(KERN_INFO "%s: NOTE \"%s\" = \"%s\"\n",
|
||||
__func__, key, value);
|
||||
}
|
||||
|
|
|
@ -369,6 +369,6 @@ static const struct pcr_ops rts522a_pcr_ops = {
|
|||
void rts522a_init_params(struct rtsx_pcr *pcr)
|
||||
{
|
||||
rts5227_init_params(pcr);
|
||||
|
||||
pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
|
||||
pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
|
||||
}
|
||||
|
|
|
@ -623,6 +623,7 @@ static const struct pcr_ops rts524a_pcr_ops = {
|
|||
void rts524a_init_params(struct rtsx_pcr *pcr)
|
||||
{
|
||||
rts5249_init_params(pcr);
|
||||
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
|
||||
pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
|
||||
pcr->option.ltr_l1off_snooze_sspwrgate =
|
||||
LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
|
||||
|
@ -731,6 +732,7 @@ static const struct pcr_ops rts525a_pcr_ops = {
|
|||
void rts525a_init_params(struct rtsx_pcr *pcr)
|
||||
{
|
||||
rts5249_init_params(pcr);
|
||||
pcr->tx_initial_phase = SET_CLOCK_PHASE(25, 29, 11);
|
||||
pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
|
||||
pcr->option.ltr_l1off_snooze_sspwrgate =
|
||||
LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
|
||||
|
|
|
@ -712,7 +712,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
|
|||
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
|
||||
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
|
||||
pcr->aspm_en = ASPM_L1_EN;
|
||||
pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
|
||||
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
|
||||
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
|
||||
|
||||
pcr->ic_version = rts5260_get_ic_version(pcr);
|
||||
|
|
|
@ -618,19 +618,22 @@ static int sd_change_phase(struct realtek_pci_sdmmc *host,
|
|||
u8 sample_point, bool rx)
|
||||
{
|
||||
struct rtsx_pcr *pcr = host->pcr;
|
||||
|
||||
u16 SD_VP_CTL = 0;
|
||||
dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n",
|
||||
__func__, rx ? "RX" : "TX", sample_point);
|
||||
|
||||
rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
|
||||
if (rx)
|
||||
if (rx) {
|
||||
SD_VP_CTL = SD_VPRX_CTL;
|
||||
rtsx_pci_write_register(pcr, SD_VPRX_CTL,
|
||||
PHASE_SELECT_MASK, sample_point);
|
||||
else
|
||||
} else {
|
||||
SD_VP_CTL = SD_VPTX_CTL;
|
||||
rtsx_pci_write_register(pcr, SD_VPTX_CTL,
|
||||
PHASE_SELECT_MASK, sample_point);
|
||||
rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
|
||||
rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET,
|
||||
}
|
||||
rtsx_pci_write_register(pcr, SD_VP_CTL, PHASE_NOT_RESET, 0);
|
||||
rtsx_pci_write_register(pcr, SD_VP_CTL, PHASE_NOT_RESET,
|
||||
PHASE_NOT_RESET);
|
||||
rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0);
|
||||
rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
|
||||
|
|
|
@ -126,7 +126,8 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
|
|||
{
|
||||
sdhci_reset(host, mask);
|
||||
|
||||
if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
|
||||
if ((host->mmc->caps & MMC_CAP_NONREMOVABLE)
|
||||
|| mmc_gpio_get_cd(host->mmc) >= 0)
|
||||
sdhci_at91_set_force_card_detect(host);
|
||||
}
|
||||
|
||||
|
@ -405,8 +406,11 @@ static int sdhci_at91_probe(struct platform_device *pdev)
|
|||
* detection procedure using the SDMCC_CD signal is bypassed.
|
||||
* This bit is reset when a software reset for all command is performed
|
||||
* so we need to implement our own reset function to set back this bit.
|
||||
*
|
||||
* WA: SAMA5D2 doesn't drive CMD if using CD GPIO line.
|
||||
*/
|
||||
if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
|
||||
if ((host->mmc->caps & MMC_CAP_NONREMOVABLE)
|
||||
|| mmc_gpio_get_cd(host->mmc) >= 0)
|
||||
sdhci_at91_set_force_card_detect(host);
|
||||
|
||||
pm_runtime_put_autosuspend(&pdev->dev);
|
||||
|
|
|
@ -993,23 +993,24 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
|
|||
struct sk_buff *skb)
|
||||
{
|
||||
int orig_iif = skb->skb_iif;
|
||||
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
|
||||
bool is_ndisc = ipv6_ndisc_frame(skb);
|
||||
bool need_strict;
|
||||
|
||||
/* loopback, multicast & non-ND link-local traffic; do not push through
|
||||
* packet taps again. Reset pkt_type for upper layers to process skb
|
||||
/* loopback traffic; do not push through packet taps again.
|
||||
* Reset pkt_type for upper layers to process skb
|
||||
*/
|
||||
if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
|
||||
if (skb->pkt_type == PACKET_LOOPBACK) {
|
||||
skb->dev = vrf_dev;
|
||||
skb->skb_iif = vrf_dev->ifindex;
|
||||
IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
|
||||
if (skb->pkt_type == PACKET_LOOPBACK)
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* if packet is NDISC then keep the ingress interface */
|
||||
if (!is_ndisc) {
|
||||
/* if packet is NDISC or addressed to multicast or link-local
|
||||
* then keep the ingress interface
|
||||
*/
|
||||
need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
|
||||
if (!ipv6_ndisc_frame(skb) && !need_strict) {
|
||||
vrf_rx_stats(vrf_dev, skb->len);
|
||||
skb->dev = vrf_dev;
|
||||
skb->skb_iif = vrf_dev->ifindex;
|
||||
|
|
|
@ -27,8 +27,6 @@ static int arm_pmu_acpi_register_irq(int cpu)
|
|||
int gsi, trigger;
|
||||
|
||||
gicc = acpi_cpu_get_madt_gicc(cpu);
|
||||
if (WARN_ON(!gicc))
|
||||
return -EINVAL;
|
||||
|
||||
gsi = gicc->performance_interrupt;
|
||||
|
||||
|
@ -67,11 +65,10 @@ static void arm_pmu_acpi_unregister_irq(int cpu)
|
|||
int gsi;
|
||||
|
||||
gicc = acpi_cpu_get_madt_gicc(cpu);
|
||||
if (!gicc)
|
||||
return;
|
||||
|
||||
gsi = gicc->performance_interrupt;
|
||||
acpi_unregister_gsi(gsi);
|
||||
if (gsi)
|
||||
acpi_unregister_gsi(gsi);
|
||||
}
|
||||
|
||||
static int arm_pmu_acpi_parse_irqs(void)
|
||||
|
|
|
@ -314,6 +314,7 @@ config RTC_DRV_MAX6900
|
|||
config RTC_DRV_MAX8907
|
||||
tristate "Maxim MAX8907"
|
||||
depends on MFD_MAX8907 || COMPILE_TEST
|
||||
select REGMAP_IRQ
|
||||
help
|
||||
If you say yes here you will get support for the
|
||||
RTC of Maxim MAX8907 PMIC.
|
||||
|
|
|
@ -456,6 +456,14 @@ int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba,
|
|||
return ufshcd_prepare_lrbp_crypto_spec(hba, cmd, lrbp);
|
||||
}
|
||||
|
||||
int ufshcd_map_sg_crypto(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
||||
{
|
||||
if (hba->crypto_vops && hba->crypto_vops->map_sg_crypto)
|
||||
return hba->crypto_vops->map_sg_crypto(hba, lrbp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba,
|
||||
struct scsi_cmnd *cmd,
|
||||
struct ufshcd_lrb *lrbp)
|
||||
|
|
|
@ -80,6 +80,8 @@ int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba,
|
|||
struct scsi_cmnd *cmd,
|
||||
struct ufshcd_lrb *lrbp);
|
||||
|
||||
int ufshcd_map_sg_crypto(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
|
||||
|
||||
int ufshcd_complete_lrbp_crypto(struct ufs_hba *hba,
|
||||
struct scsi_cmnd *cmd,
|
||||
struct ufshcd_lrb *lrbp);
|
||||
|
@ -133,6 +135,12 @@ static inline int ufshcd_prepare_lrbp_crypto(struct ufs_hba *hba,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int ufshcd_map_sg_crypto(struct ufs_hba *hba,
|
||||
struct ufshcd_lrb *lrbp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool ufshcd_lrbp_crypto_enabled(struct ufshcd_lrb *lrbp)
|
||||
{
|
||||
return false;
|
||||
|
|
|
@ -3336,7 +3336,7 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
|||
lrbp->utr_descriptor_ptr->prd_table_length = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ufshcd_map_sg_crypto(hba, lrbp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -419,6 +419,7 @@ struct ufs_hba_crypto_variant_ops {
|
|||
int (*prepare_lrbp_crypto)(struct ufs_hba *hba,
|
||||
struct scsi_cmnd *cmd,
|
||||
struct ufshcd_lrb *lrbp);
|
||||
int (*map_sg_crypto)(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
|
||||
int (*complete_lrbp_crypto)(struct ufs_hba *hba,
|
||||
struct scsi_cmnd *cmd,
|
||||
struct ufshcd_lrb *lrbp);
|
||||
|
|
|
@ -76,6 +76,10 @@ MODULE_ALIAS("platform:pxa2xx-spi");
|
|||
#define LPSS_CAPS_CS_EN_SHIFT 9
|
||||
#define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT)
|
||||
|
||||
#define LPSS_PRIV_CLOCK_GATE 0x38
|
||||
#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK 0x3
|
||||
#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON 0x3
|
||||
|
||||
struct lpss_config {
|
||||
/* LPSS offset from drv_data->ioaddr */
|
||||
unsigned offset;
|
||||
|
@ -92,6 +96,8 @@ struct lpss_config {
|
|||
unsigned cs_sel_shift;
|
||||
unsigned cs_sel_mask;
|
||||
unsigned cs_num;
|
||||
/* Quirks */
|
||||
unsigned cs_clk_stays_gated : 1;
|
||||
};
|
||||
|
||||
/* Keep these sorted with enum pxa_ssp_type */
|
||||
|
@ -162,6 +168,7 @@ static const struct lpss_config lpss_platforms[] = {
|
|||
.tx_threshold_hi = 56,
|
||||
.cs_sel_shift = 8,
|
||||
.cs_sel_mask = 3 << 8,
|
||||
.cs_clk_stays_gated = true,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -389,6 +396,22 @@ static void lpss_ssp_cs_control(struct spi_device *spi, bool enable)
|
|||
else
|
||||
value |= LPSS_CS_CONTROL_CS_HIGH;
|
||||
__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
|
||||
if (config->cs_clk_stays_gated) {
|
||||
u32 clkgate;
|
||||
|
||||
/*
|
||||
* Changing CS alone when dynamic clock gating is on won't
|
||||
* actually flip CS at that time. This ruins SPI transfers
|
||||
* that specify delays, or have no data. Toggle the clock mode
|
||||
* to force on briefly to poke the CS pin to move.
|
||||
*/
|
||||
clkgate = __lpss_ssp_read_priv(drv_data, LPSS_PRIV_CLOCK_GATE);
|
||||
value = (clkgate & ~LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK) |
|
||||
LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON;
|
||||
|
||||
__lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, value);
|
||||
__lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, clkgate);
|
||||
}
|
||||
}
|
||||
|
||||
static void cs_assert(struct spi_device *spi)
|
||||
|
|
|
@ -1190,6 +1190,11 @@ static int spi_qup_suspend(struct device *device)
|
|||
struct spi_qup *controller = spi_master_get_devdata(master);
|
||||
int ret;
|
||||
|
||||
if (pm_runtime_suspended(device)) {
|
||||
ret = spi_qup_pm_resume_runtime(device);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ret = spi_master_suspend(master);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1198,10 +1203,8 @@ static int spi_qup_suspend(struct device *device)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!pm_runtime_suspended(device)) {
|
||||
clk_disable_unprepare(controller->cclk);
|
||||
clk_disable_unprepare(controller->iclk);
|
||||
}
|
||||
clk_disable_unprepare(controller->cclk);
|
||||
clk_disable_unprepare(controller->iclk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -403,9 +403,6 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
|
|||
|
||||
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
|
||||
|
||||
/* Dummy generic FIFO entry */
|
||||
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
|
||||
|
||||
/* Manually start the generic FIFO command */
|
||||
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
|
||||
zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <signal.h>
|
||||
|
||||
#define MAX_NUM_DEVICES 10
|
||||
#define MAX_SYSFS_PREFIX 0x80
|
||||
#define MAX_SYSFS_PATH 0x200
|
||||
#define CSV_MAX_LINE 0x1000
|
||||
#define SYSFS_MAX_INT 0x20
|
||||
|
@ -69,7 +70,7 @@ struct loopback_results {
|
|||
};
|
||||
|
||||
struct loopback_device {
|
||||
char name[MAX_SYSFS_PATH];
|
||||
char name[MAX_STR_LEN];
|
||||
char sysfs_entry[MAX_SYSFS_PATH];
|
||||
char debugfs_entry[MAX_SYSFS_PATH];
|
||||
struct loopback_results results;
|
||||
|
@ -95,8 +96,8 @@ struct loopback_test {
|
|||
int stop_all;
|
||||
int poll_count;
|
||||
char test_name[MAX_STR_LEN];
|
||||
char sysfs_prefix[MAX_SYSFS_PATH];
|
||||
char debugfs_prefix[MAX_SYSFS_PATH];
|
||||
char sysfs_prefix[MAX_SYSFS_PREFIX];
|
||||
char debugfs_prefix[MAX_SYSFS_PREFIX];
|
||||
struct timespec poll_timeout;
|
||||
struct loopback_device devices[MAX_NUM_DEVICES];
|
||||
struct loopback_results aggregate_results;
|
||||
|
@ -645,7 +646,7 @@ int find_loopback_devices(struct loopback_test *t)
|
|||
static int open_poll_files(struct loopback_test *t)
|
||||
{
|
||||
struct loopback_device *dev;
|
||||
char buf[MAX_STR_LEN];
|
||||
char buf[MAX_SYSFS_PATH + MAX_STR_LEN];
|
||||
char dummy;
|
||||
int fds_idx = 0;
|
||||
int i;
|
||||
|
@ -663,7 +664,7 @@ static int open_poll_files(struct loopback_test *t)
|
|||
goto err;
|
||||
}
|
||||
read(t->fds[fds_idx].fd, &dummy, 1);
|
||||
t->fds[fds_idx].events = EPOLLERR|EPOLLPRI;
|
||||
t->fds[fds_idx].events = POLLERR | POLLPRI;
|
||||
t->fds[fds_idx].revents = 0;
|
||||
fds_idx++;
|
||||
}
|
||||
|
@ -756,7 +757,7 @@ static int wait_for_complete(struct loopback_test *t)
|
|||
}
|
||||
|
||||
for (i = 0; i < t->poll_count; i++) {
|
||||
if (t->fds[i].revents & EPOLLPRI) {
|
||||
if (t->fds[i].revents & POLLPRI) {
|
||||
/* Dummy read to clear the event */
|
||||
read(t->fds[i].fd, &dummy, 1);
|
||||
number_of_events++;
|
||||
|
@ -915,10 +916,10 @@ int main(int argc, char *argv[])
|
|||
t.iteration_max = atoi(optarg);
|
||||
break;
|
||||
case 'S':
|
||||
snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", optarg);
|
||||
snprintf(t.sysfs_prefix, MAX_SYSFS_PREFIX, "%s", optarg);
|
||||
break;
|
||||
case 'D':
|
||||
snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", optarg);
|
||||
snprintf(t.debugfs_prefix, MAX_SYSFS_PREFIX, "%s", optarg);
|
||||
break;
|
||||
case 'm':
|
||||
t.mask = atol(optarg);
|
||||
|
@ -969,10 +970,10 @@ int main(int argc, char *argv[])
|
|||
}
|
||||
|
||||
if (!strcmp(t.sysfs_prefix, ""))
|
||||
snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", sysfs_prefix);
|
||||
snprintf(t.sysfs_prefix, MAX_SYSFS_PREFIX, "%s", sysfs_prefix);
|
||||
|
||||
if (!strcmp(t.debugfs_prefix, ""))
|
||||
snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", debugfs_prefix);
|
||||
snprintf(t.debugfs_prefix, MAX_SYSFS_PREFIX, "%s", debugfs_prefix);
|
||||
|
||||
ret = find_loopback_devices(&t);
|
||||
if (ret)
|
||||
|
|
|
@ -38,6 +38,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
|
|||
{USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
|
||||
{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
|
||||
{USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
|
||||
{USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */
|
||||
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
|
||||
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
|
||||
{} /* Terminating entry */
|
||||
|
|
|
@ -561,7 +561,7 @@ static u_long get_word(struct vc_data *vc)
|
|||
return 0;
|
||||
} else if (tmpx < vc->vc_cols - 2 &&
|
||||
(ch == SPACE || ch == 0 || (ch < 0x100 && IS_WDLM(ch))) &&
|
||||
get_char(vc, (u_short *)&tmp_pos + 1, &temp) > SPACE) {
|
||||
get_char(vc, (u_short *)tmp_pos + 1, &temp) > SPACE) {
|
||||
tmp_pos += 2;
|
||||
tmpx++;
|
||||
} else {
|
||||
|
|
|
@ -914,10 +914,10 @@ static int get_serial_info(struct acm *acm, struct serial_struct __user *info)
|
|||
memset(&tmp, 0, sizeof(tmp));
|
||||
tmp.xmit_fifo_size = acm->writesize;
|
||||
tmp.baud_base = le32_to_cpu(acm->line.dwDTERate);
|
||||
tmp.close_delay = acm->port.close_delay / 10;
|
||||
tmp.close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
|
||||
tmp.closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
|
||||
ASYNC_CLOSING_WAIT_NONE :
|
||||
acm->port.closing_wait / 10;
|
||||
jiffies_to_msecs(acm->port.closing_wait) / 10;
|
||||
|
||||
if (copy_to_user(info, &tmp, sizeof(tmp)))
|
||||
return -EFAULT;
|
||||
|
@ -930,27 +930,35 @@ static int set_serial_info(struct acm *acm,
|
|||
{
|
||||
struct serial_struct new_serial;
|
||||
unsigned int closing_wait, close_delay;
|
||||
unsigned int old_closing_wait, old_close_delay;
|
||||
int retval = 0;
|
||||
|
||||
if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
|
||||
return -EFAULT;
|
||||
|
||||
close_delay = new_serial.close_delay * 10;
|
||||
close_delay = msecs_to_jiffies(new_serial.close_delay * 10);
|
||||
closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
|
||||
ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
|
||||
ASYNC_CLOSING_WAIT_NONE :
|
||||
msecs_to_jiffies(new_serial.closing_wait * 10);
|
||||
|
||||
/* we must redo the rounding here, so that the values match */
|
||||
old_close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
|
||||
old_closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
|
||||
ASYNC_CLOSING_WAIT_NONE :
|
||||
jiffies_to_msecs(acm->port.closing_wait) / 10;
|
||||
|
||||
mutex_lock(&acm->port.mutex);
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN)) {
|
||||
if ((close_delay != acm->port.close_delay) ||
|
||||
(closing_wait != acm->port.closing_wait))
|
||||
if ((new_serial.close_delay != old_close_delay) ||
|
||||
(new_serial.closing_wait != old_closing_wait)) {
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
retval = -EPERM;
|
||||
else
|
||||
retval = -EOPNOTSUPP;
|
||||
} else {
|
||||
acm->port.close_delay = close_delay;
|
||||
acm->port.closing_wait = closing_wait;
|
||||
}
|
||||
else {
|
||||
acm->port.close_delay = close_delay;
|
||||
acm->port.closing_wait = closing_wait;
|
||||
}
|
||||
} else
|
||||
retval = -EOPNOTSUPP;
|
||||
|
||||
mutex_unlock(&acm->port.mutex);
|
||||
return retval;
|
||||
|
|
|
@ -378,6 +378,12 @@ static const struct usb_device_id usb_quirk_list[] = {
|
|||
{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
|
||||
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
|
||||
|
||||
/* Realtek hub in Dell WD19 (Type-C) */
|
||||
{ USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
|
||||
|
||||
/* Generic RTL8153 based ethernet adapters */
|
||||
{ USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
|
||||
|
||||
/* Action Semiconductor flash disk */
|
||||
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
|
||||
USB_QUIRK_STRING_FETCH_255 },
|
||||
|
|
|
@ -128,7 +128,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
|
|||
xhci->quirks |= XHCI_AMD_PLL_FIX;
|
||||
|
||||
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
|
||||
(pdev->device == 0x15e0 ||
|
||||
(pdev->device == 0x145c ||
|
||||
pdev->device == 0x15e0 ||
|
||||
pdev->device == 0x15e1 ||
|
||||
pdev->device == 0x43bb))
|
||||
xhci->quirks |= XHCI_SUSPEND_DELAY;
|
||||
|
|
|
@ -492,6 +492,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
|
|||
static struct platform_driver usb_xhci_driver = {
|
||||
.probe = xhci_plat_probe,
|
||||
.remove = xhci_plat_remove,
|
||||
.shutdown = usb_hcd_platform_shutdown,
|
||||
.driver = {
|
||||
.name = "xhci-hcd",
|
||||
.pm = &xhci_plat_pm_ops,
|
||||
|
|
|
@ -289,23 +289,12 @@ DECLARE_EVENT_CLASS(xhci_log_urb,
|
|||
),
|
||||
TP_printk("ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x",
|
||||
__entry->epnum, __entry->dir_in ? "in" : "out",
|
||||
({ char *s;
|
||||
switch (__entry->type) {
|
||||
case USB_ENDPOINT_XFER_INT:
|
||||
s = "intr";
|
||||
break;
|
||||
case USB_ENDPOINT_XFER_CONTROL:
|
||||
s = "control";
|
||||
break;
|
||||
case USB_ENDPOINT_XFER_BULK:
|
||||
s = "bulk";
|
||||
break;
|
||||
case USB_ENDPOINT_XFER_ISOC:
|
||||
s = "isoc";
|
||||
break;
|
||||
default:
|
||||
s = "UNKNOWN";
|
||||
} s; }), __entry->urb, __entry->pipe, __entry->slot_id,
|
||||
__print_symbolic(__entry->type,
|
||||
{ USB_ENDPOINT_XFER_INT, "intr" },
|
||||
{ USB_ENDPOINT_XFER_CONTROL, "control" },
|
||||
{ USB_ENDPOINT_XFER_BULK, "bulk" },
|
||||
{ USB_ENDPOINT_XFER_ISOC, "isoc" }),
|
||||
__entry->urb, __entry->pipe, __entry->slot_id,
|
||||
__entry->actual, __entry->length, __entry->num_mapped_sgs,
|
||||
__entry->num_sgs, __entry->stream, __entry->flags
|
||||
)
|
||||
|
|
|
@ -1183,6 +1183,8 @@ static const struct usb_device_id option_ids[] = {
|
|||
.driver_info = NCTRL(0) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */
|
||||
.driver_info = NCTRL(0) | RSVD(3) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110b, 0xff), /* Telit ME910G1 (ECM) */
|
||||
.driver_info = NCTRL(0) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
|
||||
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
|
||||
|
|
|
@ -93,6 +93,7 @@ static const struct usb_device_id id_table[] = {
|
|||
{ USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
|
||||
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
|
||||
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
|
||||
{ USB_DEVICE(HP_VENDOR_ID, HP_LD381_PRODUCT_ID) },
|
||||
{ USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
|
||||
{ USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
|
||||
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
|
||||
|
|
|
@ -124,6 +124,7 @@
|
|||
#define HP_LM920_PRODUCT_ID 0x026b
|
||||
#define HP_TD620_PRODUCT_ID 0x0956
|
||||
#define HP_LD960_PRODUCT_ID 0x0b39
|
||||
#define HP_LD381_PRODUCT_ID 0x0f7f
|
||||
#define HP_LCM220_PRODUCT_ID 0x3139
|
||||
#define HP_LCM960_PRODUCT_ID 0x3239
|
||||
#define HP_LD220_PRODUCT_ID 0x3524
|
||||
|
|
|
@ -313,6 +313,8 @@ static int process_msg(void)
|
|||
req->msg.type = state.msg.type;
|
||||
req->msg.len = state.msg.len;
|
||||
req->body = state.body;
|
||||
/* write body, then update state */
|
||||
virt_wmb();
|
||||
req->state = xb_req_state_got_reply;
|
||||
req->cb(req);
|
||||
} else
|
||||
|
@ -395,6 +397,8 @@ static int process_writes(void)
|
|||
if (state.req->state == xb_req_state_aborted)
|
||||
kfree(state.req);
|
||||
else {
|
||||
/* write err, then update state */
|
||||
virt_wmb();
|
||||
state.req->state = xb_req_state_got_reply;
|
||||
wake_up(&state.req->wq);
|
||||
}
|
||||
|
|
|
@ -191,8 +191,11 @@ static bool xenbus_ok(void)
|
|||
|
||||
static bool test_reply(struct xb_req_data *req)
|
||||
{
|
||||
if (req->state == xb_req_state_got_reply || !xenbus_ok())
|
||||
if (req->state == xb_req_state_got_reply || !xenbus_ok()) {
|
||||
/* read req->state before all other fields */
|
||||
virt_rmb();
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Make sure to reread req->state each time. */
|
||||
barrier();
|
||||
|
@ -202,7 +205,7 @@ static bool test_reply(struct xb_req_data *req)
|
|||
|
||||
static void *read_reply(struct xb_req_data *req)
|
||||
{
|
||||
while (req->state != xb_req_state_got_reply) {
|
||||
do {
|
||||
wait_event(req->wq, test_reply(req));
|
||||
|
||||
if (!xenbus_ok())
|
||||
|
@ -216,7 +219,7 @@ static void *read_reply(struct xb_req_data *req)
|
|||
if (req->err)
|
||||
return ERR_PTR(req->err);
|
||||
|
||||
}
|
||||
} while (req->state != xb_req_state_got_reply);
|
||||
|
||||
return req->body;
|
||||
}
|
||||
|
|
|
@ -10015,6 +10015,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|||
ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx);
|
||||
if (ret)
|
||||
commit_transaction = true;
|
||||
} else if (sync_log) {
|
||||
mutex_lock(&root->log_mutex);
|
||||
list_del(&ctx.list);
|
||||
mutex_unlock(&root->log_mutex);
|
||||
}
|
||||
if (commit_transaction) {
|
||||
ret = btrfs_commit_transaction(trans);
|
||||
|
|
|
@ -137,6 +137,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
|
|||
inode->i_sb = sb;
|
||||
inode->i_blkbits = sb->s_blocksize_bits;
|
||||
inode->i_flags = 0;
|
||||
atomic64_set(&inode->i_sequence, 0);
|
||||
atomic_set(&inode->i_count, 1);
|
||||
inode->i_op = &empty_iops;
|
||||
inode->i_fop = &no_open_fops;
|
||||
|
|
|
@ -665,6 +665,7 @@ struct inode {
|
|||
struct rcu_head i_rcu;
|
||||
};
|
||||
atomic64_t i_version;
|
||||
atomic64_t i_sequence; /* see futex */
|
||||
atomic_t i_count;
|
||||
atomic_t i_dio_count;
|
||||
atomic_t i_writecount;
|
||||
|
|
|
@ -29,23 +29,26 @@ struct task_struct;
|
|||
|
||||
union futex_key {
|
||||
struct {
|
||||
u64 i_seq;
|
||||
unsigned long pgoff;
|
||||
struct inode *inode;
|
||||
int offset;
|
||||
unsigned int offset;
|
||||
} shared;
|
||||
struct {
|
||||
union {
|
||||
struct mm_struct *mm;
|
||||
u64 __tmp;
|
||||
};
|
||||
unsigned long address;
|
||||
struct mm_struct *mm;
|
||||
int offset;
|
||||
unsigned int offset;
|
||||
} private;
|
||||
struct {
|
||||
u64 ptr;
|
||||
unsigned long word;
|
||||
void *ptr;
|
||||
int offset;
|
||||
unsigned int offset;
|
||||
} both;
|
||||
};
|
||||
|
||||
#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
|
||||
#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } }
|
||||
|
||||
#ifdef CONFIG_FUTEX
|
||||
extern void exit_robust_list(struct task_struct *curr);
|
||||
|
|
|
@ -398,8 +398,8 @@ struct vm_fault {
|
|||
* These entries are required when handling speculative page fault.
|
||||
* This way the page handling is done using consistent field values.
|
||||
*/
|
||||
unsigned long vma_flags;
|
||||
pgprot_t vma_page_prot;
|
||||
unsigned long vma_flags; /* Speculative Page Fault field */
|
||||
pgprot_t vma_page_prot; /* Speculative Page Fault field */
|
||||
};
|
||||
|
||||
/* page entry size for vm->huge_fault() */
|
||||
|
|
|
@ -336,7 +336,7 @@ struct vm_area_struct {
|
|||
#endif
|
||||
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
seqcount_t vm_sequence;
|
||||
seqcount_t vm_sequence; /* Speculative page fault field */
|
||||
atomic_t vm_ref_count; /* see vma_get(), vma_put() */
|
||||
#endif
|
||||
} __randomize_layout;
|
||||
|
@ -359,7 +359,7 @@ struct mm_struct {
|
|||
struct rb_root mm_rb;
|
||||
u64 vmacache_seqnum; /* per-thread vmacache */
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
rwlock_t mm_rb_lock;
|
||||
rwlock_t mm_rb_lock; /* Speculative page fault field */
|
||||
#endif
|
||||
#ifdef CONFIG_MMU
|
||||
unsigned long (*get_unmapped_area) (struct file *filp,
|
||||
|
|
|
@ -272,7 +272,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
|
|||
|
||||
__PAGEFLAG(Locked, locked, PF_NO_TAIL)
|
||||
PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
|
||||
PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
|
||||
PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
|
||||
PAGEFLAG(Referenced, referenced, PF_HEAD)
|
||||
TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
|
||||
__SETPAGEFLAG(Referenced, referenced, PF_HEAD)
|
||||
|
|
|
@ -111,8 +111,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PGPGOUTCLEAN, PSWPIN, PSWPOUT,
|
|||
SWAP_RA_HIT,
|
||||
#endif
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
SPECULATIVE_PGFAULT_ANON,
|
||||
SPECULATIVE_PGFAULT_FILE,
|
||||
SPECULATIVE_PGFAULT_ANON, /* Speculative page fault field */
|
||||
SPECULATIVE_PGFAULT_FILE, /* Speculative page fault field */
|
||||
#endif
|
||||
NR_VM_EVENT_ITEMS
|
||||
};
|
||||
|
|
|
@ -115,8 +115,9 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
|
|||
|
||||
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
||||
unsigned long pgoff);
|
||||
void vmalloc_sync_all(void);
|
||||
|
||||
void vmalloc_sync_mappings(void);
|
||||
void vmalloc_sync_unmappings(void);
|
||||
|
||||
/*
|
||||
* Lowlevel-APIs (not for driver use!)
|
||||
*/
|
||||
|
|
|
@ -113,9 +113,7 @@ struct net {
|
|||
#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
|
||||
struct netns_ieee802154_lowpan ieee802154_lowpan;
|
||||
#endif
|
||||
#if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
|
||||
struct netns_sctp sctp;
|
||||
#endif
|
||||
#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
|
||||
struct netns_dccp dccp;
|
||||
#endif
|
||||
|
|
|
@ -22,9 +22,7 @@ struct netns_nf {
|
|||
#ifdef CONFIG_NETFILTER_FAMILY_ARP
|
||||
struct nf_hook_entries __rcu *hooks_arp[NF_ARP_NUMHOOKS];
|
||||
#endif
|
||||
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
|
||||
struct nf_hook_entries __rcu *hooks_bridge[NF_INET_NUMHOOKS];
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_DECNET)
|
||||
struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS];
|
||||
#endif
|
||||
|
|
|
@ -11,11 +11,8 @@ struct netns_xt {
|
|||
struct list_head tables[NFPROTO_NUMPROTO];
|
||||
bool notrack_deprecated_warning;
|
||||
bool clusterip_deprecated_warning;
|
||||
#if defined(CONFIG_BRIDGE_NF_EBTABLES) || \
|
||||
defined(CONFIG_BRIDGE_NF_EBTABLES_MODULE)
|
||||
struct ebt_table *broute_table;
|
||||
struct ebt_table *frame_filter;
|
||||
struct ebt_table *frame_nat;
|
||||
#endif
|
||||
};
|
||||
#endif
|
||||
|
|
|
@ -97,6 +97,27 @@ config GKI_HIDDEN_VIDEOBUF2_CONFIGS
|
|||
help
|
||||
Dummy config option used to enable all the hidden v4l2 media module.
|
||||
|
||||
config GKI_HIDDEN_USB_CONFIGS
|
||||
bool "Hiddel USB configurations needed for GKI"
|
||||
select USB_PHY
|
||||
help
|
||||
Dummy config option used to enable all USB related hidden configs.
|
||||
These configurations are usually only selected by another config
|
||||
option or a combination of them.
|
||||
|
||||
If you are not building a kernel to be used for a variety of
|
||||
out-of-kernel build USB drivers, say N here.
|
||||
|
||||
config GKI_HIDDEN_SOC_BUS_CONFIGS
|
||||
bool "Hidden SoC bus configuration needed for GKI"
|
||||
select SOC_BUS
|
||||
help
|
||||
Dummy config option used to enable SOC_BUS hidden Kconfig.
|
||||
The configuration is required for SoCs to register themselves to the bus.
|
||||
|
||||
If you are not building a kernel to be used for a variety of SoCs and
|
||||
out-of-tree drivers, say N here.
|
||||
|
||||
# Atrocities needed for
|
||||
# a) building GKI modules in separate tree, or
|
||||
# b) building drivers that are not modularizable
|
||||
|
@ -116,6 +137,8 @@ config GKI_HACKS_TO_FIX
|
|||
select GKI_HIDDEN_VIRTUAL_CONFIGS
|
||||
select GKI_HIDDEN_SOC_PM_CONFIGS
|
||||
select GKI_HIDDEN_VIDEOBUF2_CONFIGS
|
||||
select GKI_HIDDEN_USB_CONFIGS
|
||||
select GKI_HIDDEN_SOC_BUS_CONFIGS
|
||||
help
|
||||
Dummy config option used to enable core functionality used by
|
||||
modules that may not be selectable in this config.
|
||||
|
|
|
@ -2387,7 +2387,7 @@ int btf_get_info_by_fd(const struct btf *btf,
|
|||
union bpf_attr __user *uattr)
|
||||
{
|
||||
struct bpf_btf_info __user *uinfo;
|
||||
struct bpf_btf_info info = {};
|
||||
struct bpf_btf_info info;
|
||||
u32 info_copy, btf_copy;
|
||||
void __user *ubtf;
|
||||
u32 uinfo_len;
|
||||
|
@ -2396,6 +2396,7 @@ int btf_get_info_by_fd(const struct btf *btf,
|
|||
uinfo_len = attr->info.info_len;
|
||||
|
||||
info_copy = min_t(u32, uinfo_len, sizeof(info));
|
||||
memset(&info, 0, sizeof(info));
|
||||
if (copy_from_user(&info, uinfo, info_copy))
|
||||
return -EFAULT;
|
||||
|
||||
|
|
|
@ -1958,7 +1958,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
|
|||
union bpf_attr __user *uattr)
|
||||
{
|
||||
struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
|
||||
struct bpf_prog_info info = {};
|
||||
struct bpf_prog_info info;
|
||||
u32 info_len = attr->info.info_len;
|
||||
char __user *uinsns;
|
||||
u32 ulen;
|
||||
|
@ -1969,6 +1969,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
|
|||
return err;
|
||||
info_len = min_t(u32, sizeof(info), info_len);
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
if (copy_from_user(&info, uinfo, info_len))
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -2136,7 +2137,7 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map,
|
|||
union bpf_attr __user *uattr)
|
||||
{
|
||||
struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
|
||||
struct bpf_map_info info = {};
|
||||
struct bpf_map_info info;
|
||||
u32 info_len = attr->info.info_len;
|
||||
int err;
|
||||
|
||||
|
@ -2145,6 +2146,7 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map,
|
|||
return err;
|
||||
info_len = min_t(u32, sizeof(info), info_len);
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.type = map->map_type;
|
||||
info.id = map->id;
|
||||
info.key_size = map->key_size;
|
||||
|
@ -2372,7 +2374,7 @@ static int bpf_task_fd_query(const union bpf_attr *attr,
|
|||
|
||||
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
|
||||
{
|
||||
union bpf_attr attr = {};
|
||||
union bpf_attr attr;
|
||||
int err;
|
||||
|
||||
if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
|
||||
|
@ -2384,6 +2386,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
|
|||
size = min_t(u32, size, sizeof(attr));
|
||||
|
||||
/* copy attributes from user space, may be less than sizeof(bpf_attr) */
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
if (copy_from_user(&attr, uattr, size) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
|
|
|
@ -395,9 +395,9 @@ static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
|
|||
*/
|
||||
static struct futex_hash_bucket *hash_futex(union futex_key *key)
|
||||
{
|
||||
u32 hash = jhash2((u32*)&key->both.word,
|
||||
(sizeof(key->both.word)+sizeof(key->both.ptr))/4,
|
||||
u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
|
||||
key->both.offset);
|
||||
|
||||
return &futex_queues[hash & (futex_hashsize - 1)];
|
||||
}
|
||||
|
||||
|
@ -439,7 +439,7 @@ static void get_futex_key_refs(union futex_key *key)
|
|||
|
||||
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
|
||||
case FUT_OFF_INODE:
|
||||
ihold(key->shared.inode); /* implies smp_mb(); (B) */
|
||||
smp_mb(); /* explicit smp_mb(); (B) */
|
||||
break;
|
||||
case FUT_OFF_MMSHARED:
|
||||
futex_get_mm(key); /* implies smp_mb(); (B) */
|
||||
|
@ -473,7 +473,6 @@ static void drop_futex_key_refs(union futex_key *key)
|
|||
|
||||
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
|
||||
case FUT_OFF_INODE:
|
||||
iput(key->shared.inode);
|
||||
break;
|
||||
case FUT_OFF_MMSHARED:
|
||||
mmdrop(key->private.mm);
|
||||
|
@ -481,6 +480,46 @@ static void drop_futex_key_refs(union futex_key *key)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Generate a machine wide unique identifier for this inode.
|
||||
*
|
||||
* This relies on u64 not wrapping in the life-time of the machine; which with
|
||||
* 1ns resolution means almost 585 years.
|
||||
*
|
||||
* This further relies on the fact that a well formed program will not unmap
|
||||
* the file while it has a (shared) futex waiting on it. This mapping will have
|
||||
* a file reference which pins the mount and inode.
|
||||
*
|
||||
* If for some reason an inode gets evicted and read back in again, it will get
|
||||
* a new sequence number and will _NOT_ match, even though it is the exact same
|
||||
* file.
|
||||
*
|
||||
* It is important that match_futex() will never have a false-positive, esp.
|
||||
* for PI futexes that can mess up the state. The above argues that false-negatives
|
||||
* are only possible for malformed programs.
|
||||
*/
|
||||
static u64 get_inode_sequence_number(struct inode *inode)
|
||||
{
|
||||
static atomic64_t i_seq;
|
||||
u64 old;
|
||||
|
||||
/* Does the inode already have a sequence number? */
|
||||
old = atomic64_read(&inode->i_sequence);
|
||||
if (likely(old))
|
||||
return old;
|
||||
|
||||
for (;;) {
|
||||
u64 new = atomic64_add_return(1, &i_seq);
|
||||
if (WARN_ON_ONCE(!new))
|
||||
continue;
|
||||
|
||||
old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
|
||||
if (old)
|
||||
return old;
|
||||
return new;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* get_futex_key() - Get parameters which are the keys for a futex
|
||||
* @uaddr: virtual address of the futex
|
||||
|
@ -493,9 +532,15 @@ static void drop_futex_key_refs(union futex_key *key)
|
|||
*
|
||||
* The key words are stored in @key on success.
|
||||
*
|
||||
* For shared mappings, it's (page->index, file_inode(vma->vm_file),
|
||||
* offset_within_page). For private mappings, it's (uaddr, current->mm).
|
||||
* We can usually work out the index without swapping in the page.
|
||||
* For shared mappings (when @fshared), the key is:
|
||||
* ( inode->i_sequence, page->index, offset_within_page )
|
||||
* [ also see get_inode_sequence_number() ]
|
||||
*
|
||||
* For private mappings (or when !@fshared), the key is:
|
||||
* ( current->mm, address, 0 )
|
||||
*
|
||||
* This allows (cross process, where applicable) identification of the futex
|
||||
* without keeping the page pinned for the duration of the FUTEX_WAIT.
|
||||
*
|
||||
* lock_page() might sleep, the caller should not hold a spinlock.
|
||||
*/
|
||||
|
@ -635,8 +680,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
|
|||
key->private.mm = mm;
|
||||
key->private.address = address;
|
||||
|
||||
get_futex_key_refs(key); /* implies smp_mb(); (B) */
|
||||
|
||||
} else {
|
||||
struct inode *inode;
|
||||
|
||||
|
@ -668,40 +711,14 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
|
|||
goto again;
|
||||
}
|
||||
|
||||
/*
|
||||
* Take a reference unless it is about to be freed. Previously
|
||||
* this reference was taken by ihold under the page lock
|
||||
* pinning the inode in place so i_lock was unnecessary. The
|
||||
* only way for this check to fail is if the inode was
|
||||
* truncated in parallel which is almost certainly an
|
||||
* application bug. In such a case, just retry.
|
||||
*
|
||||
* We are not calling into get_futex_key_refs() in file-backed
|
||||
* cases, therefore a successful atomic_inc return below will
|
||||
* guarantee that get_futex_key() will still imply smp_mb(); (B).
|
||||
*/
|
||||
if (!atomic_inc_not_zero(&inode->i_count)) {
|
||||
rcu_read_unlock();
|
||||
put_page(page);
|
||||
|
||||
goto again;
|
||||
}
|
||||
|
||||
/* Should be impossible but lets be paranoid for now */
|
||||
if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
|
||||
err = -EFAULT;
|
||||
rcu_read_unlock();
|
||||
iput(inode);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
key->both.offset |= FUT_OFF_INODE; /* inode-based key */
|
||||
key->shared.inode = inode;
|
||||
key->shared.i_seq = get_inode_sequence_number(inode);
|
||||
key->shared.pgoff = basepage_index(tail);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
get_futex_key_refs(key); /* implies smp_mb(); (B) */
|
||||
|
||||
out:
|
||||
put_page(page);
|
||||
return err;
|
||||
|
|
|
@ -552,7 +552,7 @@ NOKPROBE_SYMBOL(notify_die);
|
|||
|
||||
int register_die_notifier(struct notifier_block *nb)
|
||||
{
|
||||
vmalloc_sync_all();
|
||||
vmalloc_sync_mappings();
|
||||
return atomic_notifier_chain_register(&die_chain, nb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_die_notifier);
|
||||
|
|
|
@ -1426,3 +1426,4 @@ ktime_t *get_next_event_cpu(unsigned int cpu)
|
|||
{
|
||||
return &(per_cpu(tick_cpu_device, cpu).evtdev->next_event);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_next_event_cpu);
|
||||
|
|
|
@ -3759,7 +3759,7 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
|
|||
struct mem_cgroup_thresholds *thresholds;
|
||||
struct mem_cgroup_threshold_ary *new;
|
||||
unsigned long usage;
|
||||
int i, j, size;
|
||||
int i, j, size, entries;
|
||||
|
||||
mutex_lock(&memcg->thresholds_lock);
|
||||
|
||||
|
@ -3779,14 +3779,20 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
|
|||
__mem_cgroup_threshold(memcg, type == _MEMSWAP);
|
||||
|
||||
/* Calculate new number of threshold */
|
||||
size = 0;
|
||||
size = entries = 0;
|
||||
for (i = 0; i < thresholds->primary->size; i++) {
|
||||
if (thresholds->primary->entries[i].eventfd != eventfd)
|
||||
size++;
|
||||
else
|
||||
entries++;
|
||||
}
|
||||
|
||||
new = thresholds->spare;
|
||||
|
||||
/* If no items related to eventfd have been cleared, nothing to do */
|
||||
if (!entries)
|
||||
goto unlock;
|
||||
|
||||
/* Set thresholds array to NULL if we don't have thresholds */
|
||||
if (!size) {
|
||||
kfree(new);
|
||||
|
|
10
mm/nommu.c
10
mm/nommu.c
|
@ -446,10 +446,14 @@ void vm_unmap_aliases(void)
|
|||
EXPORT_SYMBOL_GPL(vm_unmap_aliases);
|
||||
|
||||
/*
|
||||
* Implement a stub for vmalloc_sync_all() if the architecture chose not to
|
||||
* have one.
|
||||
* Implement a stub for vmalloc_sync_[un]mapping() if the architecture
|
||||
* chose not to have one.
|
||||
*/
|
||||
void __weak vmalloc_sync_all(void)
|
||||
void __weak vmalloc_sync_mappings(void)
|
||||
{
|
||||
}
|
||||
|
||||
void __weak vmalloc_sync_unmappings(void)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
32
mm/slub.c
32
mm/slub.c
|
@ -1971,8 +1971,6 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
|
|||
|
||||
if (node == NUMA_NO_NODE)
|
||||
searchnode = numa_mem_id();
|
||||
else if (!node_present_pages(node))
|
||||
searchnode = node_to_mem_node(node);
|
||||
|
||||
object = get_partial_node(s, get_node(s, searchnode), c, flags);
|
||||
if (object || node != NUMA_NO_NODE)
|
||||
|
@ -2569,17 +2567,27 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|||
struct page *page;
|
||||
|
||||
page = c->page;
|
||||
if (!page)
|
||||
if (!page) {
|
||||
/*
|
||||
* if the node is not online or has no normal memory, just
|
||||
* ignore the node constraint
|
||||
*/
|
||||
if (unlikely(node != NUMA_NO_NODE &&
|
||||
!node_state(node, N_NORMAL_MEMORY)))
|
||||
node = NUMA_NO_NODE;
|
||||
goto new_slab;
|
||||
}
|
||||
redo:
|
||||
|
||||
if (unlikely(!node_match(page, node))) {
|
||||
int searchnode = node;
|
||||
|
||||
if (node != NUMA_NO_NODE && !node_present_pages(node))
|
||||
searchnode = node_to_mem_node(node);
|
||||
|
||||
if (unlikely(!node_match(page, searchnode))) {
|
||||
/*
|
||||
* same as above but node_match() being false already
|
||||
* implies node != NUMA_NO_NODE
|
||||
*/
|
||||
if (!node_state(node, N_NORMAL_MEMORY)) {
|
||||
node = NUMA_NO_NODE;
|
||||
goto redo;
|
||||
} else {
|
||||
stat(s, ALLOC_NODE_MISMATCH);
|
||||
deactivate_slab(s, page, c->freelist, c);
|
||||
goto new_slab;
|
||||
|
@ -3004,11 +3012,13 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
|
|||
barrier();
|
||||
|
||||
if (likely(page == c->page)) {
|
||||
set_freepointer(s, tail_obj, c->freelist);
|
||||
void **freelist = READ_ONCE(c->freelist);
|
||||
|
||||
set_freepointer(s, tail_obj, freelist);
|
||||
|
||||
if (unlikely(!this_cpu_cmpxchg_double(
|
||||
s->cpu_slab->freelist, s->cpu_slab->tid,
|
||||
c->freelist, tid,
|
||||
freelist, tid,
|
||||
head, next_tid(tid)))) {
|
||||
|
||||
note_cmpxchg_failure("slab_free", s, tid);
|
||||
|
|
11
mm/vmalloc.c
11
mm/vmalloc.c
|
@ -2460,7 +2460,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
|
|||
* First make sure the mappings are removed from all page-tables
|
||||
* before they are freed.
|
||||
*/
|
||||
vmalloc_sync_all();
|
||||
vmalloc_sync_unmappings();
|
||||
|
||||
/*
|
||||
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
|
||||
|
@ -3010,16 +3010,19 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
|||
EXPORT_SYMBOL(remap_vmalloc_range);
|
||||
|
||||
/*
|
||||
* Implement a stub for vmalloc_sync_all() if the architecture chose not to
|
||||
* have one.
|
||||
* Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose
|
||||
* not to have one.
|
||||
*
|
||||
* The purpose of this function is to make sure the vmalloc area
|
||||
* mappings are identical in all page-tables in the system.
|
||||
*/
|
||||
void __weak vmalloc_sync_all(void)
|
||||
void __weak vmalloc_sync_mappings(void)
|
||||
{
|
||||
}
|
||||
|
||||
void __weak vmalloc_sync_unmappings(void)
|
||||
{
|
||||
}
|
||||
|
||||
static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
|
||||
{
|
||||
|
|
|
@ -734,7 +734,6 @@ static void tcp_v6_init_req(struct request_sock *req,
|
|||
const struct sock *sk_listener,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
|
||||
struct inet_request_sock *ireq = inet_rsk(req);
|
||||
const struct ipv6_pinfo *np = inet6_sk(sk_listener);
|
||||
|
||||
|
@ -742,7 +741,7 @@ static void tcp_v6_init_req(struct request_sock *req,
|
|||
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
|
||||
|
||||
/* So that link locals have meaning */
|
||||
if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
|
||||
if (!sk_listener->sk_bound_dev_if &&
|
||||
ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
|
||||
ireq->ir_iif = tcp_v6_iif(skb);
|
||||
|
||||
|
|
|
@ -71,5 +71,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, format)
|
|||
KBUILD_CFLAGS += $(call cc-disable-warning, sign-compare)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, format-zero-length)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, uninitialized)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
|
||||
endif
|
||||
endif
|
||||
|
|
|
@ -12,11 +12,11 @@ SECTIONS {
|
|||
*(.eh_frame)
|
||||
}
|
||||
|
||||
.bss : { *(.bss .bss[.0-9a-zA-Z_]*) }
|
||||
.data : { *(.data .data[.0-9a-zA-Z_]*) }
|
||||
.rela.data : { *(.rela.data .rela.data[.0-9a-zA-Z_]*) }
|
||||
.rela.rodata : { *(.rela.rodata .rela.rodata[.0-9a-zA-Z_]*) }
|
||||
.rela.text : { *(.rela.text .rela.text[.0-9a-zA-Z_]*) }
|
||||
.rodata : { *(.rodata .rodata[.0-9a-zA-Z_]*) }
|
||||
.text : { *(.text .text[.0-9a-zA-Z_]*) }
|
||||
.bss : { *(.bss .bss.[0-9a-zA-Z_]*) }
|
||||
.data : { *(.data .data.[0-9a-zA-Z_]*) }
|
||||
.rela.data : { *(.rela.data .rela.data.[0-9a-zA-Z_]*) }
|
||||
.rela.rodata : { *(.rela.rodata .rela.rodata.[0-9a-zA-Z_]*) }
|
||||
.rela.text : { *(.rela.text .rela.text.[0-9a-zA-Z_]*) }
|
||||
.rodata : { *(.rodata .rodata.[0-9a-zA-Z_]*) }
|
||||
.text : { *(.text .text.[0-9a-zA-Z_]*) }
|
||||
}
|
||||
|
|
0
scripts/parse-maintainers.pl
Normal file → Executable file
0
scripts/parse-maintainers.pl
Normal file → Executable file
|
@ -111,7 +111,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames)
|
|||
while (plugin->next) {
|
||||
if (plugin->dst_frames)
|
||||
frames = plugin->dst_frames(plugin, frames);
|
||||
if (snd_BUG_ON((snd_pcm_sframes_t)frames <= 0))
|
||||
if ((snd_pcm_sframes_t)frames <= 0)
|
||||
return -ENXIO;
|
||||
plugin = plugin->next;
|
||||
err = snd_pcm_plugin_alloc(plugin, frames);
|
||||
|
@ -123,7 +123,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames)
|
|||
while (plugin->prev) {
|
||||
if (plugin->src_frames)
|
||||
frames = plugin->src_frames(plugin, frames);
|
||||
if (snd_BUG_ON((snd_pcm_sframes_t)frames <= 0))
|
||||
if ((snd_pcm_sframes_t)frames <= 0)
|
||||
return -ENXIO;
|
||||
plugin = plugin->prev;
|
||||
err = snd_pcm_plugin_alloc(plugin, frames);
|
||||
|
@ -209,6 +209,8 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p
|
|||
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
|
||||
plugin = snd_pcm_plug_last(plug);
|
||||
while (plugin && drv_frames > 0) {
|
||||
if (drv_frames > plugin->buf_frames)
|
||||
drv_frames = plugin->buf_frames;
|
||||
plugin_prev = plugin->prev;
|
||||
if (plugin->src_frames)
|
||||
drv_frames = plugin->src_frames(plugin, drv_frames);
|
||||
|
@ -220,6 +222,8 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p
|
|||
plugin_next = plugin->next;
|
||||
if (plugin->dst_frames)
|
||||
drv_frames = plugin->dst_frames(plugin, drv_frames);
|
||||
if (drv_frames > plugin->buf_frames)
|
||||
drv_frames = plugin->buf_frames;
|
||||
plugin = plugin_next;
|
||||
}
|
||||
} else
|
||||
|
@ -248,11 +252,15 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc
|
|||
if (frames < 0)
|
||||
return frames;
|
||||
}
|
||||
if (frames > plugin->buf_frames)
|
||||
frames = plugin->buf_frames;
|
||||
plugin = plugin_next;
|
||||
}
|
||||
} else if (stream == SNDRV_PCM_STREAM_CAPTURE) {
|
||||
plugin = snd_pcm_plug_last(plug);
|
||||
while (plugin) {
|
||||
if (frames > plugin->buf_frames)
|
||||
frames = plugin->buf_frames;
|
||||
plugin_prev = plugin->prev;
|
||||
if (plugin->src_frames) {
|
||||
frames = plugin->src_frames(plugin, frames);
|
||||
|
|
|
@ -615,6 +615,7 @@ send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq
|
|||
len = snd_seq_oss_timer_start(dp->timer);
|
||||
if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
|
||||
snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev);
|
||||
snd_midi_event_reset_decode(mdev->coder);
|
||||
} else {
|
||||
len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev);
|
||||
if (len > 0)
|
||||
|
|
|
@ -95,6 +95,7 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
|
|||
if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
|
||||
continue;
|
||||
snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)snd_rawmidi_receive, vmidi->substream);
|
||||
snd_midi_event_reset_decode(vmidi->parser);
|
||||
} else {
|
||||
len = snd_midi_event_decode(vmidi->parser, msg, sizeof(msg), ev);
|
||||
if (len > 0)
|
||||
|
|
|
@ -7828,6 +7828,8 @@ static int patch_alc269(struct hda_codec *codec)
|
|||
spec->gen.mixer_nid = 0;
|
||||
break;
|
||||
case 0x10ec0225:
|
||||
codec->power_save_node = 1;
|
||||
/* fall through */
|
||||
case 0x10ec0295:
|
||||
case 0x10ec0299:
|
||||
spec->codec_variant = ALC269_TYPE_ALC225;
|
||||
|
|
|
@ -320,7 +320,7 @@ static void line6_data_received(struct urb *urb)
|
|||
line6_midibuf_read(mb, line6->buffer_message,
|
||||
LINE6_MIDI_MESSAGE_MAXLEN);
|
||||
|
||||
if (done == 0)
|
||||
if (done <= 0)
|
||||
break;
|
||||
|
||||
line6->message_length = done;
|
||||
|
|
|
@ -163,7 +163,7 @@ int line6_midibuf_read(struct midi_buffer *this, unsigned char *data,
|
|||
int midi_length_prev =
|
||||
midibuf_message_length(this->command_prev);
|
||||
|
||||
if (midi_length_prev > 0) {
|
||||
if (midi_length_prev > 1) {
|
||||
midi_length = midi_length_prev - 1;
|
||||
repeat = 1;
|
||||
} else
|
||||
|
|
Loading…
Reference in a new issue