This is the 4.19.31 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlyWhJcACgkQONu9yGCS aT6XzxAAzP2QGzC4SVPgcFH1woF/d8Cz0zQ81mLXzjXtEPm39fZCM2hbBnxkXLu1 peFyrKNk6/c9541D9gsQCQT6Fu+H6u1bJKcIezlKJ2xyB/MsU1hXkjZrTJYW3RRs gimy1EGdood2el1ubEBZiaspazoeRzBqtg1Nsmr4V0l+RT8HwtKKw+0+Nxixfp59 NoVkqTpPI5mL0FiH2R9ogcfg3SvgMZOsOhOBjdPvSjiJJsbvIWcW48MCs95XSUpF R+l/fWn+oiFCcIqBaFheujuqZMvVrUHZHaWAPMuoR/c3Cdf0lTBokdv6UM9c0nv3 61jX5r5ImRI/dfQANN5mbB1YKcs5xOI+I7QZHQ2q4clsWrWyLapXW4clrAZJ6z5t UVeVbuLV2y5PL9GJyBcXpyY0BOf4e2gZURaPY3C5McNwgybNoiR0ZePqKb8ZhZyh jYOYRoBjJJpZoVTSt6MNX95NTvGaSAtqKMu1s3IeMfpwCfQKBPMOuBHr/dUqSC6I U0xxjk/71C15dSPVcTVJT/lmcKc6TXgoagnfbn8GBtDOAjBNsYyUJLQI+db1ERCe 9MEB9k1Z87ROQ5jQCQmWsewOVAtFZBEvSszFmpKv3zTe8M2oFpXG56zckdiumwHU nSfeZTTeWzsFJd30MioEnGYm3ZwKwZx7wi0x4B4WWvBfSpp20Us= =xtLx -----END PGP SIGNATURE----- Merge 4.19.31 into android-4.19 Changes in 4.19.31 media: videobuf2-v4l2: drop WARN_ON in vb2_warn_zero_bytesused() 9p: use inode->i_lock to protect i_size_write() under 32-bit 9p/net: fix memory leak in p9_client_create ASoC: fsl_esai: fix register setting issue in RIGHT_J mode ASoC: codecs: pcm186x: fix wrong usage of DECLARE_TLV_DB_SCALE() ASoC: codecs: pcm186x: Fix energysense SLEEP bit iio: adc: exynos-adc: Fix NULL pointer exception on unbind mei: hbm: clean the feature flags on link reset mei: bus: move hw module get/put to probe/release stm class: Fix an endless loop in channel allocation crypto: caam - fix hash context DMA unmap size crypto: ccree - fix missing break in switch statement crypto: caam - fixed handling of sg list crypto: caam - fix DMA mapping of stack memory crypto: ccree - fix free of unallocated mlli buffer crypto: ccree - unmap buffer before copying IV crypto: ccree - don't copy zero size ciphertext crypto: cfb - add missing 'chunksize' property crypto: cfb - remove bogus memcpy() with src == dest crypto: ahash - fix another early termination in hash walk crypto: rockchip - fix scatterlist nents error crypto: rockchip - update new iv to device in multiple operations drm/imx: ignore plane updates on disabled crtcs gpu: ipu-v3: Fix i.MX51 CSI control registers offset drm/imx: imx-ldb: add missing of_node_puts gpu: ipu-v3: Fix CSI offsets for imx53 ASoC: rt5682: Correct the setting while select ASRC clk for AD/DA filter clocksource: timer-ti-dm: Fix pwm dmtimer usage of fck reparenting KVM: arm/arm64: vgic: Make vgic_dist->lpi_list_lock a raw_spinlock arm64: dts: rockchip: fix graph_port warning on rk3399 bob kevin and excavator s390/dasd: fix using offset into zero size array error Input: pwm-vibra - prevent unbalanced regulator Input: pwm-vibra - stop regulator after disabling pwm, not before ARM: dts: Configure clock parent for pwm vibra ARM: OMAP2+: Variable "reg" in function omap4_dsi_mux_pads() could be uninitialized ASoC: dapm: fix out-of-bounds accesses to DAPM lookup tables ASoC: rsnd: fixup rsnd_ssi_master_clk_start() user count check KVM: arm/arm64: Reset the VCPU without preemption and vcpu state loaded arm/arm64: KVM: Allow a VCPU to fully reset itself arm/arm64: KVM: Don't panic on failure to properly reset system registers KVM: arm/arm64: vgic: Always initialize the group of private IRQs KVM: arm64: Forbid kprobing of the VHE world-switch code ASoC: samsung: Prevent clk_get_rate() calls in atomic context ARM: OMAP2+: fix lack of timer interrupts on CPU1 after hotplug Input: cap11xx - switch to using set_brightness_blocking() Input: ps2-gpio - flush TX work when closing port Input: matrix_keypad - use flush_delayed_work() mac80211: call drv_ibss_join() on restart mac80211: Fix Tx aggregation session tear down with ITXQs netfilter: compat: initialize all fields in xt_init blk-mq: insert rq with DONTPREP to hctx dispatch list when requeue ipvs: fix dependency on nf_defrag_ipv6 floppy: check_events callback should not return a negative number xprtrdma: Make sure Send CQ is allocated on an existing compvec NFS: Don't use page_file_mapping after removing the page mm/gup: fix gup_pmd_range() for dax Revert "mm: use early_pfn_to_nid in page_ext_init" scsi: qla2xxx: Fix panic from use after free in qla2x00_async_tm_cmd net: dsa: bcm_sf2: potential array overflow in bcm_sf2_sw_suspend() x86/CPU: Add Icelake model number mm: page_alloc: fix ref bias in page_frag_alloc() for 1-byte allocs net: hns: Fix object reference leaks in hns_dsaf_roce_reset() i2c: cadence: Fix the hold bit setting i2c: bcm2835: Clear current buffer pointers and counts after a transfer auxdisplay: ht16k33: fix potential user-after-free on module unload Input: st-keyscan - fix potential zalloc NULL dereference clk: sunxi-ng: v3s: Fix TCON reset de-assert bit kallsyms: Handle too long symbols in kallsyms.c clk: sunxi: A31: Fix wrong AHB gate number esp: Skip TX bytes accounting when sending from a request socket ARM: 8824/1: fix a migrating irq bug when hotplug cpu bpf: only adjust gso_size on bytestream protocols bpf: fix lockdep false positive in stackmap af_key: unconditionally clone on broadcast ARM: 8835/1: dma-mapping: Clear DMA ops on teardown assoc_array: Fix shortcut creation keys: Fix dependency loop between construction record and auth key scsi: libiscsi: Fix race between iscsi_xmit_task and iscsi_complete_task net: systemport: Fix reception of BPDUs net: dsa: bcm_sf2: Do not assume DSA master supports WoL pinctrl: meson: meson8b: fix the sdxc_a data 1..3 pins qmi_wwan: apply SET_DTR quirk to Sierra WP7607 net: mv643xx_eth: disable clk on error path in mv643xx_eth_shared_probe() xfrm: Fix inbound traffic via XFRM interfaces across network namespaces mailbox: bcm-flexrm-mailbox: Fix FlexRM ring flush timeout issue ASoC: topology: free created components in tplg load error qed: Fix iWARP buffer size provided for syn packet processing. qed: Fix iWARP syn packet mac address validation. ARM: dts: armada-xp: fix Armada XP boards NAND description arm64: Relax GIC version check during early boot ARM: tegra: Restore DT ABI on Tegra124 Chromebooks net: marvell: mvneta: fix DMA debug warning mm: handle lru_add_drain_all for UP properly tmpfs: fix link accounting when a tmpfile is linked in ixgbe: fix older devices that do not support IXGBE_MRQC_L3L4TXSWEN ARCv2: lib: memcpy: fix doing prefetchw outside of buffer ARC: uacces: remove lp_start, lp_end from clobber list ARCv2: support manual regfile save on interrupts ARCv2: don't assume core 0x54 has dual issue phonet: fix building with clang mac80211_hwsim: propagate genlmsg_reply return code bpf, lpm: fix lookup bug in map_delete_elem net: thunderx: make CFG_DONE message to run through generic send-ack sequence net: thunderx: add nicvf_send_msg_to_pf result check for set_rx_mode_task nfp: bpf: fix code-gen bug on BPF_ALU | BPF_XOR | BPF_K nfp: bpf: fix ALU32 high bits clearance bug bnxt_en: Fix typo in firmware message timeout logic. bnxt_en: Wait longer for the firmware message response to complete. net: set static variable an initial value in atl2_probe() selftests: fib_tests: sleep after changing carrier. again. tmpfs: fix uninitialized return value in shmem_link stm class: Prevent division by zero nfit: acpi_nfit_ctl(): Check out_obj->type in the right place acpi/nfit: Fix bus command validation nfit/ars: Attempt a short-ARS whenever the ARS state is idle at boot nfit/ars: Attempt short-ARS even in the no_init_ars case libnvdimm/label: Clear 'updating' flag after label-set update libnvdimm, pfn: Fix over-trim in trim_pfn_device() libnvdimm/pmem: Honor force_raw for legacy pmem regions libnvdimm: Fix altmap reservation size calculation fix cgroup_do_mount() handling of failure exits crypto: aead - set CRYPTO_TFM_NEED_KEY if ->setkey() fails crypto: aegis - fix handling chunked inputs crypto: arm/crct10dif - revert to C code for short inputs crypto: arm64/aes-neonbs - fix returning final keystream block crypto: arm64/crct10dif - revert to C code for short inputs crypto: hash - set CRYPTO_TFM_NEED_KEY if ->setkey() fails crypto: morus - fix handling chunked inputs crypto: pcbc - remove bogus memcpy()s with src == dest crypto: skcipher - set CRYPTO_TFM_NEED_KEY if ->setkey() fails crypto: testmgr - skip crc32c context test for ahash algorithms crypto: x86/aegis - fix handling chunked inputs and MAY_SLEEP crypto: x86/aesni-gcm - fix crash on empty plaintext crypto: x86/morus - fix handling chunked inputs and MAY_SLEEP crypto: arm64/aes-ccm - fix logical bug in AAD MAC handling crypto: arm64/aes-ccm - fix bugs in non-NEON fallback routine CIFS: Do not reset lease state to NONE on lease break CIFS: Do not skip SMB2 message IDs on send failures CIFS: Fix read after write for files with read caching tracing: Use strncpy instead of memcpy for string keys in hist triggers tracing: Do not free iter->trace in fail path of tracing_open_pipe() tracing/perf: Use strndup_user() instead of buggy open-coded version xen: fix dom0 boot on huge systems ACPI / device_sysfs: Avoid OF modalias creation for removed device mmc: sdhci-esdhc-imx: fix HS400 timing issue mmc:fix a bug when max_discard is 0 netfilter: ipt_CLUSTERIP: fix warning unused variable cn spi: ti-qspi: Fix mmap read when more than one CS in use spi: pxa2xx: Setup maximum supported DMA transfer length regulator: s2mps11: Fix steps for buck7, buck8 and LDO35 regulator: max77620: Initialize values for DT properties regulator: s2mpa01: Fix step values for some LDOs clocksource/drivers/exynos_mct: Move one-shot check from tick clear to ISR clocksource/drivers/exynos_mct: Clear timer interrupt when shutdown clocksource/drivers/arch_timer: Workaround for Allwinner A64 timer instability s390/setup: fix early warning messages s390/virtio: handle find on invalid queue gracefully scsi: virtio_scsi: don't send sc payload with tmfs scsi: aacraid: Fix performance issue on logical drives scsi: sd: Optimal I/O size should be a multiple of physical block size scsi: target/iscsi: Avoid iscsit_release_commands_from_conn() deadlock scsi: qla2xxx: Fix LUN discovery if loop id is not assigned yet by firmware fs/devpts: always delete dcache dentry-s in dput() splice: don't merge into linked buffers ovl: During copy up, first copy up data and then xattrs ovl: Do not lose security.capability xattr over metadata file copy-up m68k: Add -ffreestanding to CFLAGS Btrfs: setup a nofs context for memory allocation at btrfs_create_tree() Btrfs: setup a nofs context for memory allocation at __btrfs_set_acl btrfs: ensure that a DUP or RAID1 block group has exactly two stripes Btrfs: fix corruption reading shared and compressed extents after hole punching soc: qcom: rpmh: Avoid accessing freed memory from batch API libertas_tf: don't set URB_ZERO_PACKET on IN USB transfer irqchip/gic-v3-its: Avoid parsing _indirect_ twice for Device table irqchip/brcmstb-l2: Use _irqsave locking variants in non-interrupt code x86/kprobes: Prohibit probing on optprobe template code cpufreq: kryo: Release OPP tables on module removal cpufreq: tegra124: add missing of_node_put() cpufreq: pxa2xx: remove incorrect __init annotation ext4: fix check of inode in swap_inode_boot_loader ext4: cleanup pagecache before swap i_data ext4: update quota information while swapping boot loader inode ext4: add mask of ext4 flags to swap ext4: fix crash during online resizing PCI/ASPM: Use LTR if already enabled by platform PCI/DPC: Fix print AER status in DPC event handling PCI: dwc: skip MSI init if MSIs have been explicitly disabled IB/hfi1: Close race condition on user context disable and close cxl: Wrap iterations over afu slices inside 'afu_list_lock' ext2: Fix underflow in ext2_max_size() clk: uniphier: Fix update register for CPU-gear clk: clk-twl6040: Fix imprecise external abort for pdmclk clk: samsung: exynos5: Fix possible NULL pointer exception on platform_device_alloc() failure clk: samsung: exynos5: Fix kfree() of const memory on setting driver_override clk: ingenic: Fix round_rate misbehaving with non-integer dividers clk: ingenic: Fix doc of ingenic_cgu_div_info usb: chipidea: tegra: Fix missed ci_hdrc_remove_device() usb: typec: tps6598x: handle block writes separately with plain-I2C adapters dmaengine: usb-dmac: Make DMAC system sleep callbacks explicit mm: hwpoison: fix thp split handing in soft_offline_in_use_page() mm/vmalloc: fix size check for remap_vmalloc_range_partial() mm/memory.c: do_fault: avoid usage of stale vm_area_struct kernel/sysctl.c: add missing range check in do_proc_dointvec_minmax_conv device property: Fix the length used in PROPERTY_ENTRY_STRING() intel_th: Don't reference unassigned outputs parport_pc: fix find_superio io compare code, should use equal test. i2c: tegra: fix maximum transfer size media: i2c: ov5640: Fix post-reset delay gpio: pca953x: Fix dereference of irq data in shutdown can: flexcan: FLEXCAN_IFLAG_MB: add () around macro argument drm/i915: Relax mmap VMA check bpf: only test gso type on gso packets serial: uartps: Fix stuck ISR if RX disabled with non-empty FIFO serial: 8250_of: assume reg-shift of 2 for mrvl,mmp-uart serial: 8250_pci: Fix number of ports for ACCES serial cards serial: 8250_pci: Have ACCES cards that use the four port Pericom PI7C9X7954 chip use the pci_pericom_setup() jbd2: clear dirty flag when revoking a buffer from an older transaction jbd2: fix compile warning when using JBUFFER_TRACE selinux: add the missing walk_size + len check in selinux_sctp_bind_connect security/selinux: fix SECURITY_LSM_NATIVE_LABELS on reused superblock powerpc/32: Clear on-stack exception marker upon exception return powerpc/wii: properly disable use of BATs when requested. powerpc/powernv: Make opal log only readable by root powerpc/83xx: Also save/restore SPRG4-7 during suspend powerpc/powernv: Don't reprogram SLW image on every KVM guest entry/exit powerpc: Fix 32-bit KVM-PR lockup and host crash with MacOS guest powerpc/ptrace: Simplify vr_get/set() to avoid GCC warning powerpc/hugetlb: Don't do runtime allocation of 16G pages in LPAR configuration powerpc/traps: fix recoverability of machine check handling on book3s/32 powerpc/traps: Fix the message printed when stack overflows ARM: s3c24xx: Fix boolean expressions in osiris_dvs_notify arm64: Fix HCR.TGE status for NMI contexts arm64: debug: Ensure debug handlers check triggering exception level arm64: KVM: Fix architecturally invalid reset value for FPEXC32_EL2 ipmi_si: fix use-after-free of resource->name dm: fix to_sector() for 32bit dm integrity: limit the rate of error messages mfd: sm501: Fix potential NULL pointer dereference cpcap-charger: generate events for userspace NFS: Fix I/O request leakages NFS: Fix an I/O request leakage in nfs_do_recoalesce NFS: Don't recoalesce on error in nfs_pageio_complete_mirror() nfsd: fix performance-limiting session calculation nfsd: fix memory corruption caused by readdir nfsd: fix wrong check in write_v4_end_grace() NFSv4.1: Reinitialise sequence results before retransmitting a request svcrpc: fix UDP on servers with lots of threads PM / wakeup: Rework wakeup source timer cancellation bcache: never writeback a discard operation stable-kernel-rules.rst: add link to networking patch queue vt: perform safe console erase in the right order x86/unwind/orc: Fix ORC unwind table alignment perf intel-pt: Fix CYC timestamp calculation after OVF perf tools: Fix split_kallsyms_for_kcore() for trampoline symbols perf auxtrace: Define auxtrace record alignment perf intel-pt: Fix overlap calculation for padding perf/x86/intel/uncore: Fix client IMC events return huge result perf intel-pt: Fix divide by zero when TSC is not available md: Fix failed allocation of md_register_thread tpm/tpm_crb: Avoid unaligned reads in crb_recv() tpm: Unify the send callback behaviour rcu: Do RCU GP kthread self-wakeup from softirq and interrupt media: imx: prpencvf: Stop upstream before disabling IDMA channel media: lgdt330x: fix lock status reporting media: uvcvideo: Avoid NULL pointer dereference at the end of streaming media: vimc: Add vimc-streamer for stream control media: imx: csi: Disable CSI immediately after last EOF media: imx: csi: Stop upstream before disabling IDMA channel drm/fb-helper: generic: Fix drm_fbdev_client_restore() drm/radeon/evergreen_cs: fix missing break in switch statement drm/amd/powerplay: correct power reading on fiji drm/amd/display: don't call dm_pp_ function from an fpu block KVM: Call kvm_arch_memslots_updated() before updating memslots KVM: x86/mmu: Detect MMIO generation wrap in any address space KVM: x86/mmu: Do not cache MMIO accesses while memslots are in flux KVM: nVMX: Sign extend displacements of VMX instr's mem operands KVM: nVMX: Apply addr size mask to effective address for VMX instructions KVM: nVMX: Ignore limit checks on VMX instructions using flat segments bcache: use (REQ_META|REQ_PRIO) to indicate bio for metadata s390/setup: fix boot crash for machine without EDAT-1 Linux 4.19.31 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
bb418a146a
346 changed files with 3060 additions and 1578 deletions
|
@ -44,6 +44,8 @@ stable kernels.
|
|||
|
||||
| Implementor | Component | Erratum ID | Kconfig |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 |
|
||||
| | | | |
|
||||
| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
|
||||
| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |
|
||||
| ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 |
|
||||
|
|
|
@ -38,6 +38,9 @@ Procedure for submitting patches to the -stable tree
|
|||
- If the patch covers files in net/ or drivers/net please follow netdev stable
|
||||
submission guidelines as described in
|
||||
:ref:`Documentation/networking/netdev-FAQ.rst <netdev-FAQ>`
|
||||
after first checking the stable networking queue at
|
||||
https://patchwork.ozlabs.org/bundle/davem/stable/?series=&submitter=&state=*&q=&archive=
|
||||
to ensure the requested patch is not already queued up.
|
||||
- Security patches should not be handled (solely) by the -stable review
|
||||
process but should follow the procedures in
|
||||
:ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`.
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 30
|
||||
SUBLEVEL = 31
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
|
|
@ -420,6 +420,14 @@ config ARC_HAS_ACCL_REGS
|
|||
(also referred to as r58:r59). These can also be used by gcc as GPR so
|
||||
kernel needs to save/restore per process
|
||||
|
||||
config ARC_IRQ_NO_AUTOSAVE
|
||||
bool "Disable hardware autosave regfile on interrupts"
|
||||
default n
|
||||
help
|
||||
On HS cores, taken interrupt auto saves the regfile on stack.
|
||||
This is programmable and can be optionally disabled in which case
|
||||
software INTERRUPT_PROLOGUE/EPILGUE do the needed work
|
||||
|
||||
endif # ISA_ARCV2
|
||||
|
||||
endmenu # "ARC CPU Configuration"
|
||||
|
|
|
@ -151,6 +151,14 @@ struct bcr_isa_arcv2 {
|
|||
#endif
|
||||
};
|
||||
|
||||
struct bcr_uarch_build_arcv2 {
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
unsigned int pad:8, prod:8, maj:8, min:8;
|
||||
#else
|
||||
unsigned int min:8, maj:8, prod:8, pad:8;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct bcr_mpy {
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
|
||||
|
|
|
@ -17,6 +17,33 @@
|
|||
;
|
||||
; Now manually save: r12, sp, fp, gp, r25
|
||||
|
||||
#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
|
||||
.ifnc \called_from, exception
|
||||
st.as r9, [sp, -10] ; save r9 in it's final stack slot
|
||||
sub sp, sp, 12 ; skip JLI, LDI, EI
|
||||
|
||||
PUSH lp_count
|
||||
PUSHAX lp_start
|
||||
PUSHAX lp_end
|
||||
PUSH blink
|
||||
|
||||
PUSH r11
|
||||
PUSH r10
|
||||
|
||||
sub sp, sp, 4 ; skip r9
|
||||
|
||||
PUSH r8
|
||||
PUSH r7
|
||||
PUSH r6
|
||||
PUSH r5
|
||||
PUSH r4
|
||||
PUSH r3
|
||||
PUSH r2
|
||||
PUSH r1
|
||||
PUSH r0
|
||||
.endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
||||
PUSH r59
|
||||
PUSH r58
|
||||
|
@ -86,6 +113,33 @@
|
|||
POP r59
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
|
||||
.ifnc \called_from, exception
|
||||
POP r0
|
||||
POP r1
|
||||
POP r2
|
||||
POP r3
|
||||
POP r4
|
||||
POP r5
|
||||
POP r6
|
||||
POP r7
|
||||
POP r8
|
||||
POP r9
|
||||
POP r10
|
||||
POP r11
|
||||
|
||||
POP blink
|
||||
POPAX lp_end
|
||||
POPAX lp_start
|
||||
|
||||
POP r9
|
||||
mov lp_count, r9
|
||||
|
||||
add sp, sp, 12 ; skip JLI, LDI, EI
|
||||
ld.as r9, [sp, -10] ; reload r9 which got clobbered
|
||||
.endif
|
||||
#endif
|
||||
|
||||
.endm
|
||||
|
||||
/*------------------------------------------------------------------------*/
|
||||
|
|
|
@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
|
|||
*/
|
||||
"=&r" (tmp), "+r" (to), "+r" (from)
|
||||
:
|
||||
: "lp_count", "lp_start", "lp_end", "memory");
|
||||
: "lp_count", "memory");
|
||||
|
||||
return n;
|
||||
}
|
||||
|
@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
|
|||
*/
|
||||
"=&r" (tmp), "+r" (to), "+r" (from)
|
||||
:
|
||||
: "lp_count", "lp_start", "lp_end", "memory");
|
||||
: "lp_count", "memory");
|
||||
|
||||
return n;
|
||||
}
|
||||
|
@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
|
|||
" .previous \n"
|
||||
: "+r"(d_char), "+r"(res)
|
||||
: "i"(0)
|
||||
: "lp_count", "lp_start", "lp_end", "memory");
|
||||
: "lp_count", "memory");
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
|
|||
" .previous \n"
|
||||
: "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
|
||||
: "g"(-EFAULT), "r"(count)
|
||||
: "lp_count", "lp_start", "lp_end", "memory");
|
||||
: "lp_count", "memory");
|
||||
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -209,7 +209,9 @@ restore_regs:
|
|||
;####### Return from Intr #######
|
||||
|
||||
debug_marker_l1:
|
||||
bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
|
||||
; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
|
||||
btst r0, STATUS_DE_BIT ; Z flag set if bit clear
|
||||
bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set
|
||||
|
||||
.Lisr_ret_fast_path:
|
||||
; Handle special case #1: (Entry via Exception, Return via IRQ)
|
||||
|
|
|
@ -49,11 +49,13 @@ void arc_init_IRQ(void)
|
|||
|
||||
*(unsigned int *)&ictrl = 0;
|
||||
|
||||
#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE
|
||||
ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */
|
||||
ictrl.save_blink = 1;
|
||||
ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */
|
||||
ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */
|
||||
ictrl.save_idx_regs = 1; /* JLI, LDI, EI */
|
||||
#endif
|
||||
|
||||
WRITE_AUX(AUX_IRQ_CTRL, ictrl);
|
||||
|
||||
|
|
|
@ -196,13 +196,29 @@ static void read_arc_build_cfg_regs(void)
|
|||
cpu->bpu.num_pred = 2048 << bpu.pte;
|
||||
|
||||
if (cpu->core.family >= 0x54) {
|
||||
unsigned int exec_ctrl;
|
||||
|
||||
READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
|
||||
cpu->extn.dual_enb = !(exec_ctrl & 1);
|
||||
struct bcr_uarch_build_arcv2 uarch;
|
||||
|
||||
/* dual issue always present for this core */
|
||||
cpu->extn.dual = 1;
|
||||
/*
|
||||
* The first 0x54 core (uarch maj:min 0:1 or 0:2) was
|
||||
* dual issue only (HS4x). But next uarch rev (1:0)
|
||||
* allows it be configured for single issue (HS3x)
|
||||
* Ensure we fiddle with dual issue only on HS4x
|
||||
*/
|
||||
READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
|
||||
|
||||
if (uarch.prod == 4) {
|
||||
unsigned int exec_ctrl;
|
||||
|
||||
/* dual issue hardware always present */
|
||||
cpu->extn.dual = 1;
|
||||
|
||||
READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
|
||||
|
||||
/* dual issue hardware enabled ? */
|
||||
cpu->extn.dual_enb = !(exec_ctrl & 1);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -25,15 +25,11 @@
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_LL64
|
||||
# define PREFETCH_READ(RX) prefetch [RX, 56]
|
||||
# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
|
||||
# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
|
||||
# define STOREX(SRC,RX) std.ab SRC, [RX, 8]
|
||||
# define ZOLSHFT 5
|
||||
# define ZOLAND 0x1F
|
||||
#else
|
||||
# define PREFETCH_READ(RX) prefetch [RX, 28]
|
||||
# define PREFETCH_WRITE(RX) prefetchw [RX, 32]
|
||||
# define LOADX(DST,RX) ld.ab DST, [RX, 4]
|
||||
# define STOREX(SRC,RX) st.ab SRC, [RX, 4]
|
||||
# define ZOLSHFT 4
|
||||
|
@ -41,8 +37,6 @@
|
|||
#endif
|
||||
|
||||
ENTRY_CFI(memcpy)
|
||||
prefetch [r1] ; Prefetch the read location
|
||||
prefetchw [r0] ; Prefetch the write location
|
||||
mov.f 0, r2
|
||||
;;; if size is zero
|
||||
jz.d [blink]
|
||||
|
@ -72,8 +66,6 @@ ENTRY_CFI(memcpy)
|
|||
lpnz @.Lcopy32_64bytes
|
||||
;; LOOP START
|
||||
LOADX (r6, r1)
|
||||
PREFETCH_READ (r1)
|
||||
PREFETCH_WRITE (r3)
|
||||
LOADX (r8, r1)
|
||||
LOADX (r10, r1)
|
||||
LOADX (r4, r1)
|
||||
|
@ -117,9 +109,7 @@ ENTRY_CFI(memcpy)
|
|||
lpnz @.Lcopy8bytes_1
|
||||
;; LOOP START
|
||||
ld.ab r6, [r1, 4]
|
||||
prefetch [r1, 28] ;Prefetch the next read location
|
||||
ld.ab r8, [r1,4]
|
||||
prefetchw [r3, 32] ;Prefetch the next write location
|
||||
|
||||
SHIFT_1 (r7, r6, 24)
|
||||
or r7, r7, r5
|
||||
|
@ -162,9 +152,7 @@ ENTRY_CFI(memcpy)
|
|||
lpnz @.Lcopy8bytes_2
|
||||
;; LOOP START
|
||||
ld.ab r6, [r1, 4]
|
||||
prefetch [r1, 28] ;Prefetch the next read location
|
||||
ld.ab r8, [r1,4]
|
||||
prefetchw [r3, 32] ;Prefetch the next write location
|
||||
|
||||
SHIFT_1 (r7, r6, 16)
|
||||
or r7, r7, r5
|
||||
|
@ -204,9 +192,7 @@ ENTRY_CFI(memcpy)
|
|||
lpnz @.Lcopy8bytes_3
|
||||
;; LOOP START
|
||||
ld.ab r6, [r1, 4]
|
||||
prefetch [r1, 28] ;Prefetch the next read location
|
||||
ld.ab r8, [r1,4]
|
||||
prefetchw [r3, 32] ;Prefetch the next write location
|
||||
|
||||
SHIFT_1 (r7, r6, 8)
|
||||
or r7, r7, r5
|
||||
|
|
|
@ -9,6 +9,7 @@ menuconfig ARC_SOC_HSDK
|
|||
bool "ARC HS Development Kit SOC"
|
||||
depends on ISA_ARCV2
|
||||
select ARC_HAS_ACCL_REGS
|
||||
select ARC_IRQ_NO_AUTOSAVE
|
||||
select CLK_HSDK
|
||||
select RESET_HSDK
|
||||
select MIGHT_HAVE_PCI
|
||||
|
|
|
@ -1444,6 +1444,7 @@ config NR_CPUS
|
|||
config HOTPLUG_CPU
|
||||
bool "Support for hot-pluggable CPUs"
|
||||
depends on SMP
|
||||
select GENERIC_IRQ_MIGRATION
|
||||
help
|
||||
Say Y here to experiment with turning CPUs off and on. CPUs
|
||||
can be controlled through /sys/devices/system/cpu.
|
||||
|
|
|
@ -144,30 +144,32 @@
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
nand@d0000 {
|
||||
nand-controller@d0000 {
|
||||
status = "okay";
|
||||
label = "pxa3xx_nand-0";
|
||||
num-cs = <1>;
|
||||
marvell,nand-keep-config;
|
||||
nand-on-flash-bbt;
|
||||
|
||||
partitions {
|
||||
compatible = "fixed-partitions";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
nand@0 {
|
||||
reg = <0>;
|
||||
label = "pxa3xx_nand-0";
|
||||
nand-rb = <0>;
|
||||
nand-on-flash-bbt;
|
||||
|
||||
partition@0 {
|
||||
label = "U-Boot";
|
||||
reg = <0 0x800000>;
|
||||
};
|
||||
partition@800000 {
|
||||
label = "Linux";
|
||||
reg = <0x800000 0x800000>;
|
||||
};
|
||||
partition@1000000 {
|
||||
label = "Filesystem";
|
||||
reg = <0x1000000 0x3f000000>;
|
||||
partitions {
|
||||
compatible = "fixed-partitions";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
partition@0 {
|
||||
label = "U-Boot";
|
||||
reg = <0 0x800000>;
|
||||
};
|
||||
partition@800000 {
|
||||
label = "Linux";
|
||||
reg = <0x800000 0x800000>;
|
||||
};
|
||||
partition@1000000 {
|
||||
label = "Filesystem";
|
||||
reg = <0x1000000 0x3f000000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -160,12 +160,15 @@
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
nand@d0000 {
|
||||
nand-controller@d0000 {
|
||||
status = "okay";
|
||||
label = "pxa3xx_nand-0";
|
||||
num-cs = <1>;
|
||||
marvell,nand-keep-config;
|
||||
nand-on-flash-bbt;
|
||||
|
||||
nand@0 {
|
||||
reg = <0>;
|
||||
label = "pxa3xx_nand-0";
|
||||
nand-rb = <0>;
|
||||
nand-on-flash-bbt;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -81,49 +81,52 @@
|
|||
|
||||
};
|
||||
|
||||
nand@d0000 {
|
||||
nand-controller@d0000 {
|
||||
status = "okay";
|
||||
label = "pxa3xx_nand-0";
|
||||
num-cs = <1>;
|
||||
marvell,nand-keep-config;
|
||||
nand-on-flash-bbt;
|
||||
|
||||
partitions {
|
||||
compatible = "fixed-partitions";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
nand@0 {
|
||||
reg = <0>;
|
||||
label = "pxa3xx_nand-0";
|
||||
nand-rb = <0>;
|
||||
nand-on-flash-bbt;
|
||||
|
||||
partition@0 {
|
||||
label = "u-boot";
|
||||
reg = <0x00000000 0x000e0000>;
|
||||
read-only;
|
||||
};
|
||||
partitions {
|
||||
compatible = "fixed-partitions";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
partition@e0000 {
|
||||
label = "u-boot-env";
|
||||
reg = <0x000e0000 0x00020000>;
|
||||
read-only;
|
||||
};
|
||||
partition@0 {
|
||||
label = "u-boot";
|
||||
reg = <0x00000000 0x000e0000>;
|
||||
read-only;
|
||||
};
|
||||
|
||||
partition@100000 {
|
||||
label = "u-boot-env2";
|
||||
reg = <0x00100000 0x00020000>;
|
||||
read-only;
|
||||
};
|
||||
partition@e0000 {
|
||||
label = "u-boot-env";
|
||||
reg = <0x000e0000 0x00020000>;
|
||||
read-only;
|
||||
};
|
||||
|
||||
partition@120000 {
|
||||
label = "zImage";
|
||||
reg = <0x00120000 0x00400000>;
|
||||
};
|
||||
partition@100000 {
|
||||
label = "u-boot-env2";
|
||||
reg = <0x00100000 0x00020000>;
|
||||
read-only;
|
||||
};
|
||||
|
||||
partition@520000 {
|
||||
label = "initrd";
|
||||
reg = <0x00520000 0x00400000>;
|
||||
};
|
||||
partition@120000 {
|
||||
label = "zImage";
|
||||
reg = <0x00120000 0x00400000>;
|
||||
};
|
||||
|
||||
partition@e00000 {
|
||||
label = "boot";
|
||||
reg = <0x00e00000 0x3f200000>;
|
||||
partition@520000 {
|
||||
label = "initrd";
|
||||
reg = <0x00520000 0x00400000>;
|
||||
};
|
||||
|
||||
partition@e00000 {
|
||||
label = "boot";
|
||||
reg = <0x00e00000 0x3f200000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -644,6 +644,17 @@
|
|||
};
|
||||
};
|
||||
|
||||
/* Configure pwm clock source for timers 8 & 9 */
|
||||
&timer8 {
|
||||
assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>;
|
||||
assigned-clock-parents = <&sys_clkin_ck>;
|
||||
};
|
||||
|
||||
&timer9 {
|
||||
assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>;
|
||||
assigned-clock-parents = <&sys_clkin_ck>;
|
||||
};
|
||||
|
||||
/*
|
||||
* As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for
|
||||
* uart1 wakeirq.
|
||||
|
|
|
@ -13,10 +13,25 @@
|
|||
stdout-path = "serial0:115200n8";
|
||||
};
|
||||
|
||||
memory@80000000 {
|
||||
/*
|
||||
* Note that recent version of the device tree compiler (starting with
|
||||
* version 1.4.2) warn about this node containing a reg property, but
|
||||
* missing a unit-address. However, the bootloader on these Chromebook
|
||||
* devices relies on the full name of this node to be exactly /memory.
|
||||
* Adding the unit-address causes the bootloader to create a /memory
|
||||
* node and write the memory bank configuration to that node, which in
|
||||
* turn leads the kernel to believe that the device has 2 GiB of
|
||||
* memory instead of the amount detected by the bootloader.
|
||||
*
|
||||
* The name of this node is effectively ABI and must not be changed.
|
||||
*/
|
||||
memory {
|
||||
device_type = "memory";
|
||||
reg = <0x0 0x80000000 0x0 0x80000000>;
|
||||
};
|
||||
|
||||
/delete-node/ memory@80000000;
|
||||
|
||||
host1x@50000000 {
|
||||
hdmi@54280000 {
|
||||
status = "okay";
|
||||
|
|
|
@ -124,10 +124,10 @@ ENTRY(crc_t10dif_pmull)
|
|||
vext.8 q10, qzr, q0, #4
|
||||
|
||||
// receive the initial 64B data, xor the initial crc value
|
||||
vld1.64 {q0-q1}, [arg2, :128]!
|
||||
vld1.64 {q2-q3}, [arg2, :128]!
|
||||
vld1.64 {q4-q5}, [arg2, :128]!
|
||||
vld1.64 {q6-q7}, [arg2, :128]!
|
||||
vld1.64 {q0-q1}, [arg2]!
|
||||
vld1.64 {q2-q3}, [arg2]!
|
||||
vld1.64 {q4-q5}, [arg2]!
|
||||
vld1.64 {q6-q7}, [arg2]!
|
||||
CPU_LE( vrev64.8 q0, q0 )
|
||||
CPU_LE( vrev64.8 q1, q1 )
|
||||
CPU_LE( vrev64.8 q2, q2 )
|
||||
|
@ -167,7 +167,7 @@ CPU_LE( vrev64.8 q7, q7 )
|
|||
_fold_64_B_loop:
|
||||
|
||||
.macro fold64, reg1, reg2
|
||||
vld1.64 {q11-q12}, [arg2, :128]!
|
||||
vld1.64 {q11-q12}, [arg2]!
|
||||
|
||||
vmull.p64 q8, \reg1\()h, d21
|
||||
vmull.p64 \reg1, \reg1\()l, d20
|
||||
|
@ -238,7 +238,7 @@ _16B_reduction_loop:
|
|||
vmull.p64 q7, d15, d21
|
||||
veor.8 q7, q7, q8
|
||||
|
||||
vld1.64 {q0}, [arg2, :128]!
|
||||
vld1.64 {q0}, [arg2]!
|
||||
CPU_LE( vrev64.8 q0, q0 )
|
||||
vswp d0, d1
|
||||
veor.8 q7, q7, q0
|
||||
|
@ -335,7 +335,7 @@ _less_than_128:
|
|||
vmov.i8 q0, #0
|
||||
vmov s3, arg1_low32 // get the initial crc value
|
||||
|
||||
vld1.64 {q7}, [arg2, :128]!
|
||||
vld1.64 {q7}, [arg2]!
|
||||
CPU_LE( vrev64.8 q7, q7 )
|
||||
vswp d14, d15
|
||||
veor.8 q7, q7, q0
|
||||
|
|
|
@ -35,26 +35,15 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
|
|||
unsigned int length)
|
||||
{
|
||||
u16 *crc = shash_desc_ctx(desc);
|
||||
unsigned int l;
|
||||
|
||||
if (!may_use_simd()) {
|
||||
*crc = crc_t10dif_generic(*crc, data, length);
|
||||
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
|
||||
kernel_neon_begin();
|
||||
*crc = crc_t10dif_pmull(*crc, data, length);
|
||||
kernel_neon_end();
|
||||
} else {
|
||||
if (unlikely((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
|
||||
l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
|
||||
((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
|
||||
|
||||
*crc = crc_t10dif_generic(*crc, data, l);
|
||||
|
||||
length -= l;
|
||||
data += l;
|
||||
}
|
||||
if (length > 0) {
|
||||
kernel_neon_begin();
|
||||
*crc = crc_t10dif_pmull(*crc, data, length);
|
||||
kernel_neon_end();
|
||||
}
|
||||
*crc = crc_t10dif_generic(*crc, data, length);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#ifndef __ASSEMBLY__
|
||||
struct irqaction;
|
||||
struct pt_regs;
|
||||
extern void migrate_irqs(void);
|
||||
|
||||
extern void asm_do_IRQ(unsigned int, struct pt_regs *);
|
||||
void handle_IRQ(unsigned int, struct pt_regs *);
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
#define KVM_REQ_SLEEP \
|
||||
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
|
||||
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||
|
||||
|
@ -147,6 +148,13 @@ struct kvm_cpu_context {
|
|||
|
||||
typedef struct kvm_cpu_context kvm_cpu_context_t;
|
||||
|
||||
struct vcpu_reset_state {
|
||||
unsigned long pc;
|
||||
unsigned long r0;
|
||||
bool be;
|
||||
bool reset;
|
||||
};
|
||||
|
||||
struct kvm_vcpu_arch {
|
||||
struct kvm_cpu_context ctxt;
|
||||
|
||||
|
@ -186,6 +194,8 @@ struct kvm_vcpu_arch {
|
|||
/* Cache some mmu pages needed inside spinlock regions */
|
||||
struct kvm_mmu_memory_cache mmu_page_cache;
|
||||
|
||||
struct vcpu_reset_state reset_state;
|
||||
|
||||
/* Detect first run of a vcpu */
|
||||
bool has_run_once;
|
||||
};
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include <linux/smp.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/kallsyms.h>
|
||||
|
@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void)
|
|||
return nr_irqs;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static bool migrate_one_irq(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_data *d = irq_desc_get_irq_data(desc);
|
||||
const struct cpumask *affinity = irq_data_get_affinity_mask(d);
|
||||
struct irq_chip *c;
|
||||
bool ret = false;
|
||||
|
||||
/*
|
||||
* If this is a per-CPU interrupt, or the affinity does not
|
||||
* include this CPU, then we have nothing to do.
|
||||
*/
|
||||
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
|
||||
return false;
|
||||
|
||||
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
|
||||
affinity = cpu_online_mask;
|
||||
ret = true;
|
||||
}
|
||||
|
||||
c = irq_data_get_irq_chip(d);
|
||||
if (!c->irq_set_affinity)
|
||||
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
|
||||
else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
|
||||
cpumask_copy(irq_data_get_affinity_mask(d), affinity);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The current CPU has been marked offline. Migrate IRQs off this CPU.
|
||||
* If the affinity settings do not allow other CPUs, force them onto any
|
||||
* available CPU.
|
||||
*
|
||||
* Note: we must iterate over all IRQs, whether they have an attached
|
||||
* action structure or not, as we need to get chained interrupts too.
|
||||
*/
|
||||
void migrate_irqs(void)
|
||||
{
|
||||
unsigned int i;
|
||||
struct irq_desc *desc;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
for_each_irq_desc(i, desc) {
|
||||
bool affinity_broken;
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
affinity_broken = migrate_one_irq(desc);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
||||
if (affinity_broken)
|
||||
pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
|
||||
i, smp_processor_id());
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
|
|
@ -254,7 +254,7 @@ int __cpu_disable(void)
|
|||
/*
|
||||
* OK - migrate IRQs away from this CPU
|
||||
*/
|
||||
migrate_irqs();
|
||||
irq_migrate_all_off_this_cpu();
|
||||
|
||||
/*
|
||||
* Flush user cache and TLB mappings, and then remove this CPU
|
||||
|
|
|
@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
|
|||
reset_coproc_regs(vcpu, table, num);
|
||||
|
||||
for (num = 1; num < NR_CP15_REGS; num++)
|
||||
if (vcpu_cp15(vcpu, num) == 0x42424242)
|
||||
panic("Didn't reset vcpu_cp15(vcpu, %zi)", num);
|
||||
WARN(vcpu_cp15(vcpu, num) == 0x42424242,
|
||||
"Didn't reset vcpu_cp15(vcpu, %zi)", num);
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <asm/cputype.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
|
||||
#include <kvm/arm_arch_timer.h>
|
||||
|
||||
|
@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
|||
/* Reset CP15 registers */
|
||||
kvm_reset_coprocs(vcpu);
|
||||
|
||||
/*
|
||||
* Additional reset state handling that PSCI may have imposed on us.
|
||||
* Must be done after all the sys_reg reset.
|
||||
*/
|
||||
if (READ_ONCE(vcpu->arch.reset_state.reset)) {
|
||||
unsigned long target_pc = vcpu->arch.reset_state.pc;
|
||||
|
||||
/* Gracefully handle Thumb2 entry point */
|
||||
if (target_pc & 1) {
|
||||
target_pc &= ~1UL;
|
||||
vcpu_set_thumb(vcpu);
|
||||
}
|
||||
|
||||
/* Propagate caller endianness */
|
||||
if (vcpu->arch.reset_state.be)
|
||||
kvm_vcpu_set_be(vcpu);
|
||||
|
||||
*vcpu_pc(vcpu) = target_pc;
|
||||
vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
|
||||
|
||||
vcpu->arch.reset_state.reset = false;
|
||||
}
|
||||
|
||||
/* Reset arch_timer context */
|
||||
return kvm_timer_vcpu_reset(vcpu);
|
||||
}
|
||||
|
|
|
@ -152,6 +152,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
|||
mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
|
||||
(cx->mpu_logic_state == PWRDM_POWER_OFF);
|
||||
|
||||
/* Enter broadcast mode for periodic timers */
|
||||
tick_broadcast_enable();
|
||||
|
||||
/* Enter broadcast mode for one-shot timers */
|
||||
tick_broadcast_enter();
|
||||
|
||||
/*
|
||||
|
@ -218,15 +222,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
|||
return index;
|
||||
}
|
||||
|
||||
/*
|
||||
* For each cpu, setup the broadcast timer because local timers
|
||||
* stops for the states above C1.
|
||||
*/
|
||||
static void omap_setup_broadcast_timer(void *arg)
|
||||
{
|
||||
tick_broadcast_enable();
|
||||
}
|
||||
|
||||
static struct cpuidle_driver omap4_idle_driver = {
|
||||
.name = "omap4_idle",
|
||||
.owner = THIS_MODULE,
|
||||
|
@ -319,8 +314,5 @@ int __init omap4_idle_init(void)
|
|||
if (!cpu_clkdm[0] || !cpu_clkdm[1])
|
||||
return -ENODEV;
|
||||
|
||||
/* Configure the broadcast timer on each cpu */
|
||||
on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
|
||||
|
||||
return cpuidle_register(idle_driver, cpu_online_mask);
|
||||
}
|
||||
|
|
|
@ -83,6 +83,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
|
|||
u32 enable_mask, enable_shift;
|
||||
u32 pipd_mask, pipd_shift;
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
if (dsi_id == 0) {
|
||||
enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
|
||||
|
@ -98,7 +99,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, ®);
|
||||
ret = regmap_read(omap4_dsi_mux_syscon,
|
||||
OMAP4_DSIPHY_SYSCON_OFFSET,
|
||||
®);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
reg &= ~enable_mask;
|
||||
reg &= ~pipd_mask;
|
||||
|
|
|
@ -65,16 +65,16 @@ static int osiris_dvs_notify(struct notifier_block *nb,
|
|||
|
||||
switch (val) {
|
||||
case CPUFREQ_PRECHANGE:
|
||||
if (old_dvs & !new_dvs ||
|
||||
cur_dvs & !new_dvs) {
|
||||
if ((old_dvs && !new_dvs) ||
|
||||
(cur_dvs && !new_dvs)) {
|
||||
pr_debug("%s: exiting dvs\n", __func__);
|
||||
cur_dvs = false;
|
||||
gpio_set_value(OSIRIS_GPIO_DVS, 1);
|
||||
}
|
||||
break;
|
||||
case CPUFREQ_POSTCHANGE:
|
||||
if (!old_dvs & new_dvs ||
|
||||
!cur_dvs & new_dvs) {
|
||||
if ((!old_dvs && new_dvs) ||
|
||||
(!cur_dvs && new_dvs)) {
|
||||
pr_debug("entering dvs\n");
|
||||
cur_dvs = true;
|
||||
gpio_set_value(OSIRIS_GPIO_DVS, 0);
|
||||
|
|
|
@ -2400,4 +2400,6 @@ void arch_teardown_dma_ops(struct device *dev)
|
|||
return;
|
||||
|
||||
arm_teardown_iommu_dma_ops(dev);
|
||||
/* Let arch_setup_dma_ops() start again from scratch upon re-probe */
|
||||
set_dma_ops(dev, NULL);
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
backlight = <&backlight>;
|
||||
power-supply = <&pp3300_disp>;
|
||||
|
||||
ports {
|
||||
port {
|
||||
panel_in_edp: endpoint {
|
||||
remote-endpoint = <&edp_out_panel>;
|
||||
};
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
backlight = <&backlight>;
|
||||
power-supply = <&pp3300_disp>;
|
||||
|
||||
ports {
|
||||
port {
|
||||
panel_in_edp: endpoint {
|
||||
remote-endpoint = <&edp_out_panel>;
|
||||
};
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
pinctrl-0 = <&lcd_panel_reset>;
|
||||
power-supply = <&vcc3v3_s0>;
|
||||
|
||||
ports {
|
||||
port {
|
||||
panel_in_edp: endpoint {
|
||||
remote-endpoint = <&edp_out_panel>;
|
||||
};
|
||||
|
|
|
@ -74,12 +74,13 @@ ENTRY(ce_aes_ccm_auth_data)
|
|||
beq 10f
|
||||
ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
|
||||
b 7b
|
||||
8: mov w7, w8
|
||||
8: cbz w8, 91f
|
||||
mov w7, w8
|
||||
add w8, w8, #16
|
||||
9: ext v1.16b, v1.16b, v1.16b, #1
|
||||
adds w7, w7, #1
|
||||
bne 9b
|
||||
eor v0.16b, v0.16b, v1.16b
|
||||
91: eor v0.16b, v0.16b, v1.16b
|
||||
st1 {v0.16b}, [x0]
|
||||
10: str w8, [x3]
|
||||
ret
|
||||
|
|
|
@ -125,7 +125,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
|
|||
abytes -= added;
|
||||
}
|
||||
|
||||
while (abytes > AES_BLOCK_SIZE) {
|
||||
while (abytes >= AES_BLOCK_SIZE) {
|
||||
__aes_arm64_encrypt(key->key_enc, mac, mac,
|
||||
num_rounds(key));
|
||||
crypto_xor(mac, in, AES_BLOCK_SIZE);
|
||||
|
@ -139,8 +139,6 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
|
|||
num_rounds(key));
|
||||
crypto_xor(mac, in, abytes);
|
||||
*macp = abytes;
|
||||
} else {
|
||||
*macp = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -971,18 +971,22 @@ CPU_LE( rev x8, x8 )
|
|||
|
||||
8: next_ctr v0
|
||||
st1 {v0.16b}, [x24]
|
||||
cbz x23, 0f
|
||||
cbz x23, .Lctr_done
|
||||
|
||||
cond_yield_neon 98b
|
||||
b 99b
|
||||
|
||||
0: frame_pop
|
||||
.Lctr_done:
|
||||
frame_pop
|
||||
ret
|
||||
|
||||
/*
|
||||
* If we are handling the tail of the input (x6 != NULL), return the
|
||||
* final keystream block back to the caller.
|
||||
*/
|
||||
0: cbz x25, 8b
|
||||
st1 {v0.16b}, [x25]
|
||||
b 8b
|
||||
1: cbz x25, 8b
|
||||
st1 {v1.16b}, [x25]
|
||||
b 8b
|
||||
|
|
|
@ -36,26 +36,13 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
|
|||
unsigned int length)
|
||||
{
|
||||
u16 *crc = shash_desc_ctx(desc);
|
||||
unsigned int l;
|
||||
|
||||
if (unlikely((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
|
||||
l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
|
||||
((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
|
||||
|
||||
*crc = crc_t10dif_generic(*crc, data, l);
|
||||
|
||||
length -= l;
|
||||
data += l;
|
||||
}
|
||||
|
||||
if (length > 0) {
|
||||
if (may_use_simd()) {
|
||||
kernel_neon_begin();
|
||||
*crc = crc_t10dif_pmull(*crc, data, length);
|
||||
kernel_neon_end();
|
||||
} else {
|
||||
*crc = crc_t10dif_generic(*crc, data, length);
|
||||
}
|
||||
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
|
||||
kernel_neon_begin();
|
||||
*crc = crc_t10dif_pmull(*crc, data, length);
|
||||
kernel_neon_end();
|
||||
} else {
|
||||
*crc = crc_t10dif_generic(*crc, data, length);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -17,8 +17,12 @@
|
|||
#define __ASM_HARDIRQ_H
|
||||
|
||||
#include <linux/cache.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/threads.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
#define NR_IPI 7
|
||||
|
||||
|
@ -37,6 +41,33 @@ u64 smp_irq_stat_cpu(unsigned int cpu);
|
|||
|
||||
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
|
||||
|
||||
struct nmi_ctx {
|
||||
u64 hcr;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
|
||||
|
||||
#define arch_nmi_enter() \
|
||||
do { \
|
||||
if (is_kernel_in_hyp_mode()) { \
|
||||
struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
|
||||
nmi_ctx->hcr = read_sysreg(hcr_el2); \
|
||||
if (!(nmi_ctx->hcr & HCR_TGE)) { \
|
||||
write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \
|
||||
isb(); \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define arch_nmi_exit() \
|
||||
do { \
|
||||
if (is_kernel_in_hyp_mode()) { \
|
||||
struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
|
||||
if (!(nmi_ctx->hcr & HCR_TGE)) \
|
||||
write_sysreg(nmi_ctx->hcr, hcr_el2); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static inline void ack_bad_irq(unsigned int irq)
|
||||
{
|
||||
extern unsigned long irq_err_count;
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
#define KVM_REQ_SLEEP \
|
||||
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
|
||||
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||
|
||||
|
@ -206,6 +207,13 @@ struct kvm_cpu_context {
|
|||
|
||||
typedef struct kvm_cpu_context kvm_cpu_context_t;
|
||||
|
||||
struct vcpu_reset_state {
|
||||
unsigned long pc;
|
||||
unsigned long r0;
|
||||
bool be;
|
||||
bool reset;
|
||||
};
|
||||
|
||||
struct kvm_vcpu_arch {
|
||||
struct kvm_cpu_context ctxt;
|
||||
|
||||
|
@ -295,6 +303,9 @@ struct kvm_vcpu_arch {
|
|||
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
|
||||
u64 vsesr_el2;
|
||||
|
||||
/* Additional reset state */
|
||||
struct vcpu_reset_state reset_state;
|
||||
|
||||
/* True when deferrable sysregs are loaded on the physical CPU,
|
||||
* see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
|
||||
bool sysregs_loaded_on_cpu;
|
||||
|
|
|
@ -522,8 +522,7 @@ set_hcr:
|
|||
/* GICv3 system register access */
|
||||
mrs x0, id_aa64pfr0_el1
|
||||
ubfx x0, x0, #24, #4
|
||||
cmp x0, #1
|
||||
b.ne 3f
|
||||
cbz x0, 3f
|
||||
|
||||
mrs_s x0, SYS_ICC_SRE_EL2
|
||||
orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
|
||||
|
|
|
@ -33,6 +33,9 @@
|
|||
|
||||
unsigned long irq_err_count;
|
||||
|
||||
/* Only access this in an NMI enter/exit */
|
||||
DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
|
||||
|
||||
DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
|
||||
|
||||
int arch_show_interrupts(struct seq_file *p, int prec)
|
||||
|
|
|
@ -244,27 +244,33 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
|
|||
|
||||
static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
|
||||
{
|
||||
if (user_mode(regs))
|
||||
return DBG_HOOK_ERROR;
|
||||
|
||||
kgdb_handle_exception(1, SIGTRAP, 0, regs);
|
||||
return 0;
|
||||
return DBG_HOOK_HANDLED;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kgdb_brk_fn)
|
||||
|
||||
static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
|
||||
{
|
||||
if (user_mode(regs))
|
||||
return DBG_HOOK_ERROR;
|
||||
|
||||
compiled_break = 1;
|
||||
kgdb_handle_exception(1, SIGTRAP, 0, regs);
|
||||
|
||||
return 0;
|
||||
return DBG_HOOK_HANDLED;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
|
||||
|
||||
static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
|
||||
{
|
||||
if (!kgdb_single_step)
|
||||
if (user_mode(regs) || !kgdb_single_step)
|
||||
return DBG_HOOK_ERROR;
|
||||
|
||||
kgdb_handle_exception(1, SIGTRAP, 0, regs);
|
||||
return 0;
|
||||
return DBG_HOOK_HANDLED;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kgdb_step_brk_fn);
|
||||
|
||||
|
|
|
@ -450,6 +450,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
|
|||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
int retval;
|
||||
|
||||
if (user_mode(regs))
|
||||
return DBG_HOOK_ERROR;
|
||||
|
||||
/* return error if this is not our step */
|
||||
retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
|
||||
|
||||
|
@ -466,6 +469,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
|
|||
int __kprobes
|
||||
kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
|
||||
{
|
||||
if (user_mode(regs))
|
||||
return DBG_HOOK_ERROR;
|
||||
|
||||
kprobe_handler(regs);
|
||||
return DBG_HOOK_HANDLED;
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <kvm/arm_psci.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_host.h>
|
||||
|
@ -107,6 +108,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
|
|||
|
||||
write_sysreg(kvm_get_hyp_vector(), vbar_el1);
|
||||
}
|
||||
NOKPROBE_SYMBOL(activate_traps_vhe);
|
||||
|
||||
static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -146,6 +148,7 @@ static void deactivate_traps_vhe(void)
|
|||
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
|
||||
write_sysreg(vectors, vbar_el1);
|
||||
}
|
||||
NOKPROBE_SYMBOL(deactivate_traps_vhe);
|
||||
|
||||
static void __hyp_text __deactivate_traps_nvhe(void)
|
||||
{
|
||||
|
@ -529,6 +532,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
|||
|
||||
return exit_code;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
|
||||
|
||||
/* Switch to the guest for legacy non-VHE systems */
|
||||
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
|
||||
|
@ -636,6 +640,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
|
|||
read_sysreg_el2(esr), read_sysreg_el2(far),
|
||||
read_sysreg(hpfar_el2), par, vcpu);
|
||||
}
|
||||
NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
|
||||
|
||||
void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/compiler.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_hyp.h>
|
||||
|
@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
|
|||
{
|
||||
__sysreg_save_common_state(ctxt);
|
||||
}
|
||||
NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
|
||||
|
||||
void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
__sysreg_save_common_state(ctxt);
|
||||
__sysreg_save_el2_return_state(ctxt);
|
||||
}
|
||||
NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
|
||||
|
||||
static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
|
@ -171,12 +174,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
|
|||
{
|
||||
__sysreg_restore_common_state(ctxt);
|
||||
}
|
||||
NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
|
||||
|
||||
void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
__sysreg_restore_common_state(ctxt);
|
||||
__sysreg_restore_el2_return_state(ctxt);
|
||||
}
|
||||
NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
|
||||
|
||||
void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
/*
|
||||
|
@ -99,16 +100,33 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
* This function finds the right table above and sets the registers on
|
||||
* the virtual CPU struct to their architecturally defined reset
|
||||
* values.
|
||||
*
|
||||
* Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
|
||||
* ioctl or as part of handling a request issued by another VCPU in the PSCI
|
||||
* handling code. In the first case, the VCPU will not be loaded, and in the
|
||||
* second case the VCPU will be loaded. Because this function operates purely
|
||||
* on the memory-backed valus of system registers, we want to do a full put if
|
||||
* we were loaded (handling a request) and load the values back at the end of
|
||||
* the function. Otherwise we leave the state alone. In both cases, we
|
||||
* disable preemption around the vcpu reset as we would otherwise race with
|
||||
* preempt notifiers which also call put/load.
|
||||
*/
|
||||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
const struct kvm_regs *cpu_reset;
|
||||
int ret = -EINVAL;
|
||||
bool loaded;
|
||||
|
||||
preempt_disable();
|
||||
loaded = (vcpu->cpu != -1);
|
||||
if (loaded)
|
||||
kvm_arch_vcpu_put(vcpu);
|
||||
|
||||
switch (vcpu->arch.target) {
|
||||
default:
|
||||
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
|
||||
if (!cpu_has_32bit_el1())
|
||||
return -EINVAL;
|
||||
goto out;
|
||||
cpu_reset = &default_regs_reset32;
|
||||
} else {
|
||||
cpu_reset = &default_regs_reset;
|
||||
|
@ -123,6 +141,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
|||
/* Reset system registers */
|
||||
kvm_reset_sys_regs(vcpu);
|
||||
|
||||
/*
|
||||
* Additional reset state handling that PSCI may have imposed on us.
|
||||
* Must be done after all the sys_reg reset.
|
||||
*/
|
||||
if (vcpu->arch.reset_state.reset) {
|
||||
unsigned long target_pc = vcpu->arch.reset_state.pc;
|
||||
|
||||
/* Gracefully handle Thumb2 entry point */
|
||||
if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
|
||||
target_pc &= ~1UL;
|
||||
vcpu_set_thumb(vcpu);
|
||||
}
|
||||
|
||||
/* Propagate caller endianness */
|
||||
if (vcpu->arch.reset_state.be)
|
||||
kvm_vcpu_set_be(vcpu);
|
||||
|
||||
*vcpu_pc(vcpu) = target_pc;
|
||||
vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
|
||||
|
||||
vcpu->arch.reset_state.reset = false;
|
||||
}
|
||||
|
||||
/* Reset PMU */
|
||||
kvm_pmu_vcpu_reset(vcpu);
|
||||
|
||||
|
@ -131,5 +172,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
|
||||
|
||||
/* Reset timer */
|
||||
return kvm_timer_vcpu_reset(vcpu);
|
||||
ret = kvm_timer_vcpu_reset(vcpu);
|
||||
out:
|
||||
if (loaded)
|
||||
kvm_arch_vcpu_load(vcpu, smp_processor_id());
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1456,7 +1456,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
|
||||
{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
|
||||
{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
|
||||
{ SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
|
||||
{ SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
|
||||
};
|
||||
|
||||
static bool trap_dbgidr(struct kvm_vcpu *vcpu,
|
||||
|
@ -2586,7 +2586,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
|
|||
table = get_target_table(vcpu->arch.target, true, &num);
|
||||
reset_sys_reg_descs(vcpu, table, num);
|
||||
|
||||
for (num = 1; num < NR_SYS_REGS; num++)
|
||||
if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
|
||||
panic("Didn't reset __vcpu_sys_reg(%zi)", num);
|
||||
for (num = 1; num < NR_SYS_REGS; num++) {
|
||||
if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
|
||||
"Didn't reset __vcpu_sys_reg(%zi)\n", num))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,7 +58,10 @@ cpuflags-$(CONFIG_M5206e) := $(call cc-option,-mcpu=5206e,-m5200)
|
|||
cpuflags-$(CONFIG_M5206) := $(call cc-option,-mcpu=5206,-m5200)
|
||||
|
||||
KBUILD_AFLAGS += $(cpuflags-y)
|
||||
KBUILD_CFLAGS += $(cpuflags-y) -pipe
|
||||
KBUILD_CFLAGS += $(cpuflags-y)
|
||||
|
||||
KBUILD_CFLAGS += -pipe -ffreestanding
|
||||
|
||||
ifdef CONFIG_MMU
|
||||
# without -fno-strength-reduce the 53c7xx.c driver fails ;-(
|
||||
KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2
|
||||
|
|
|
@ -1131,7 +1131,7 @@ static inline void kvm_arch_hardware_unsetup(void) {}
|
|||
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
|
||||
static inline void kvm_arch_free_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
|
||||
static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
|
||||
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
|
||||
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
|
||||
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
|
||||
|
|
|
@ -35,6 +35,14 @@ static inline int hstate_get_psize(struct hstate *hstate)
|
|||
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
|
||||
static inline bool gigantic_page_supported(void)
|
||||
{
|
||||
/*
|
||||
* We used gigantic page reservation with hypervisor assist in some case.
|
||||
* We cannot use runtime allocation of gigantic pages in those platforms
|
||||
* This is hash translation mode LPARs.
|
||||
*/
|
||||
if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -822,7 +822,7 @@ struct kvm_vcpu_arch {
|
|||
static inline void kvm_arch_hardware_disable(void) {}
|
||||
static inline void kvm_arch_hardware_unsetup(void) {}
|
||||
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
|
||||
static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
|
||||
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
|
||||
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
|
||||
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
|
||||
static inline void kvm_arch_exit(void) {}
|
||||
|
|
|
@ -23,6 +23,8 @@ extern int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
|
|||
unsigned long *flags, unsigned long *status,
|
||||
int count);
|
||||
|
||||
void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val);
|
||||
|
||||
void pnv_tm_init(void);
|
||||
#else
|
||||
static inline void powernv_set_nmmu_ptcr(unsigned long ptcr) { }
|
||||
|
|
|
@ -745,6 +745,9 @@ fast_exception_return:
|
|||
mtcr r10
|
||||
lwz r10,_LINK(r11)
|
||||
mtlr r10
|
||||
/* Clear the exception_marker on the stack to avoid confusing stacktrace */
|
||||
li r10, 0
|
||||
stw r10, 8(r11)
|
||||
REST_GPR(10, r11)
|
||||
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
|
||||
mtspr SPRN_NRI, r0
|
||||
|
@ -982,6 +985,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
|
|||
mtcrf 0xFF,r10
|
||||
mtlr r11
|
||||
|
||||
/* Clear the exception_marker on the stack to avoid confusing stacktrace */
|
||||
li r10, 0
|
||||
stw r10, 8(r1)
|
||||
/*
|
||||
* Once we put values in SRR0 and SRR1, we are in a state
|
||||
* where exceptions are not recoverable, since taking an
|
||||
|
@ -1021,6 +1027,9 @@ exc_exit_restart_end:
|
|||
mtlr r11
|
||||
lwz r10,_CCR(r1)
|
||||
mtcrf 0xff,r10
|
||||
/* Clear the exception_marker on the stack to avoid confusing stacktrace */
|
||||
li r10, 0
|
||||
stw r10, 8(r1)
|
||||
REST_2GPRS(9, r1)
|
||||
.globl exc_exit_restart
|
||||
exc_exit_restart:
|
||||
|
|
|
@ -180,7 +180,7 @@ static void __giveup_fpu(struct task_struct *tsk)
|
|||
|
||||
save_fpu(tsk);
|
||||
msr = tsk->thread.regs->msr;
|
||||
msr &= ~MSR_FP;
|
||||
msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
|
||||
#ifdef CONFIG_VSX
|
||||
if (cpu_has_feature(CPU_FTR_VSX))
|
||||
msr &= ~MSR_VSX;
|
||||
|
|
|
@ -561,6 +561,7 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
|
|||
/*
|
||||
* Copy out only the low-order word of vrsave.
|
||||
*/
|
||||
int start, end;
|
||||
union {
|
||||
elf_vrreg_t reg;
|
||||
u32 word;
|
||||
|
@ -569,8 +570,10 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
|
|||
|
||||
vrsave.word = target->thread.vrsave;
|
||||
|
||||
start = 33 * sizeof(vector128);
|
||||
end = start + sizeof(vrsave);
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
|
||||
33 * sizeof(vector128), -1);
|
||||
start, end);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -608,6 +611,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
|
|||
/*
|
||||
* We use only the first word of vrsave.
|
||||
*/
|
||||
int start, end;
|
||||
union {
|
||||
elf_vrreg_t reg;
|
||||
u32 word;
|
||||
|
@ -616,8 +620,10 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
|
|||
|
||||
vrsave.word = target->thread.vrsave;
|
||||
|
||||
start = 33 * sizeof(vector128);
|
||||
end = start + sizeof(vrsave);
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
|
||||
33 * sizeof(vector128), -1);
|
||||
start, end);
|
||||
if (!ret)
|
||||
target->thread.vrsave = vrsave.word;
|
||||
}
|
||||
|
|
|
@ -767,15 +767,15 @@ void machine_check_exception(struct pt_regs *regs)
|
|||
if (check_io_access(regs))
|
||||
goto bail;
|
||||
|
||||
/* Must die if the interrupt is not recoverable */
|
||||
if (!(regs->msr & MSR_RI))
|
||||
nmi_panic(regs, "Unrecoverable Machine check");
|
||||
|
||||
if (!nested)
|
||||
nmi_exit();
|
||||
|
||||
die("Machine check", regs, SIGBUS);
|
||||
|
||||
/* Must die if the interrupt is not recoverable */
|
||||
if (!(regs->msr & MSR_RI))
|
||||
nmi_panic(regs, "Unrecoverable Machine check");
|
||||
|
||||
return;
|
||||
|
||||
bail:
|
||||
|
@ -1545,8 +1545,8 @@ void alignment_exception(struct pt_regs *regs)
|
|||
|
||||
void StackOverflow(struct pt_regs *regs)
|
||||
{
|
||||
printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
|
||||
current, regs->gpr[1]);
|
||||
pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n",
|
||||
current->comm, task_pid_nr(current), regs->gpr[1]);
|
||||
debugger(regs);
|
||||
show_regs(regs);
|
||||
panic("kernel stack overflow");
|
||||
|
|
|
@ -26,13 +26,13 @@
|
|||
#define SS_MSR 0x74
|
||||
#define SS_SDR1 0x78
|
||||
#define SS_LR 0x7c
|
||||
#define SS_SPRG 0x80 /* 4 SPRGs */
|
||||
#define SS_DBAT 0x90 /* 8 DBATs */
|
||||
#define SS_IBAT 0xd0 /* 8 IBATs */
|
||||
#define SS_TB 0x110
|
||||
#define SS_CR 0x118
|
||||
#define SS_GPREG 0x11c /* r12-r31 */
|
||||
#define STATE_SAVE_SIZE 0x16c
|
||||
#define SS_SPRG 0x80 /* 8 SPRGs */
|
||||
#define SS_DBAT 0xa0 /* 8 DBATs */
|
||||
#define SS_IBAT 0xe0 /* 8 IBATs */
|
||||
#define SS_TB 0x120
|
||||
#define SS_CR 0x128
|
||||
#define SS_GPREG 0x12c /* r12-r31 */
|
||||
#define STATE_SAVE_SIZE 0x17c
|
||||
|
||||
.section .data
|
||||
.align 5
|
||||
|
@ -103,6 +103,16 @@ _GLOBAL(mpc83xx_enter_deep_sleep)
|
|||
stw r7, SS_SPRG+12(r3)
|
||||
stw r8, SS_SDR1(r3)
|
||||
|
||||
mfspr r4, SPRN_SPRG4
|
||||
mfspr r5, SPRN_SPRG5
|
||||
mfspr r6, SPRN_SPRG6
|
||||
mfspr r7, SPRN_SPRG7
|
||||
|
||||
stw r4, SS_SPRG+16(r3)
|
||||
stw r5, SS_SPRG+20(r3)
|
||||
stw r6, SS_SPRG+24(r3)
|
||||
stw r7, SS_SPRG+28(r3)
|
||||
|
||||
mfspr r4, SPRN_DBAT0U
|
||||
mfspr r5, SPRN_DBAT0L
|
||||
mfspr r6, SPRN_DBAT1U
|
||||
|
@ -493,6 +503,16 @@ mpc83xx_deep_resume:
|
|||
mtspr SPRN_IBAT7U, r6
|
||||
mtspr SPRN_IBAT7L, r7
|
||||
|
||||
lwz r4, SS_SPRG+16(r3)
|
||||
lwz r5, SS_SPRG+20(r3)
|
||||
lwz r6, SS_SPRG+24(r3)
|
||||
lwz r7, SS_SPRG+28(r3)
|
||||
|
||||
mtspr SPRN_SPRG4, r4
|
||||
mtspr SPRN_SPRG5, r5
|
||||
mtspr SPRN_SPRG6, r6
|
||||
mtspr SPRN_SPRG7, r7
|
||||
|
||||
lwz r4, SS_SPRG+0(r3)
|
||||
lwz r5, SS_SPRG+4(r3)
|
||||
lwz r6, SS_SPRG+8(r3)
|
||||
|
|
|
@ -83,6 +83,10 @@ unsigned long __init wii_mmu_mapin_mem2(unsigned long top)
|
|||
/* MEM2 64MB@0x10000000 */
|
||||
delta = wii_hole_start + wii_hole_size;
|
||||
size = top - delta;
|
||||
|
||||
if (__map_without_bats)
|
||||
return delta;
|
||||
|
||||
for (bl = 128<<10; bl < max_size; bl <<= 1) {
|
||||
if (bl * 2 > size)
|
||||
break;
|
||||
|
|
|
@ -458,7 +458,8 @@ EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release);
|
|||
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
|
||||
|
||||
void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
|
||||
{
|
||||
u64 pir = get_hard_smp_processor_id(cpu);
|
||||
|
||||
|
@ -481,20 +482,6 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
|
|||
{
|
||||
unsigned long srr1;
|
||||
u32 idle_states = pnv_get_supported_cpuidle_states();
|
||||
u64 lpcr_val;
|
||||
|
||||
/*
|
||||
* We don't want to take decrementer interrupts while we are
|
||||
* offline, so clear LPCR:PECE1. We keep PECE2 (and
|
||||
* LPCR_PECE_HVEE on P9) enabled as to let IPIs in.
|
||||
*
|
||||
* If the CPU gets woken up by a special wakeup, ensure that
|
||||
* the SLW engine sets LPCR with decrementer bit cleared, else
|
||||
* the CPU will come back to the kernel due to a spurious
|
||||
* wakeup.
|
||||
*/
|
||||
lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
|
||||
pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
|
||||
|
||||
__ppc64_runlatch_off();
|
||||
|
||||
|
@ -526,16 +513,6 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
|
|||
|
||||
__ppc64_runlatch_on();
|
||||
|
||||
/*
|
||||
* Re-enable decrementer interrupts in LPCR.
|
||||
*
|
||||
* Further, we want stop states to be woken up by decrementer
|
||||
* for non-hotplug cases. So program the LPCR via stop api as
|
||||
* well.
|
||||
*/
|
||||
lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
|
||||
pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
|
||||
|
||||
return srr1;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -98,7 +98,7 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj,
|
|||
}
|
||||
|
||||
static struct bin_attribute opal_msglog_attr = {
|
||||
.attr = {.name = "msglog", .mode = 0444},
|
||||
.attr = {.name = "msglog", .mode = 0400},
|
||||
.read = opal_msglog_read
|
||||
};
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include <asm/cpuidle.h>
|
||||
#include <asm/kexec.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/powernv.h>
|
||||
|
||||
#include "powernv.h"
|
||||
|
||||
|
@ -153,6 +154,7 @@ static void pnv_smp_cpu_kill_self(void)
|
|||
{
|
||||
unsigned int cpu;
|
||||
unsigned long srr1, wmask;
|
||||
u64 lpcr_val;
|
||||
|
||||
/* Standard hot unplug procedure */
|
||||
/*
|
||||
|
@ -174,6 +176,19 @@ static void pnv_smp_cpu_kill_self(void)
|
|||
if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
||||
wmask = SRR1_WAKEMASK_P8;
|
||||
|
||||
/*
|
||||
* We don't want to take decrementer interrupts while we are
|
||||
* offline, so clear LPCR:PECE1. We keep PECE2 (and
|
||||
* LPCR_PECE_HVEE on P9) enabled so as to let IPIs in.
|
||||
*
|
||||
* If the CPU gets woken up by a special wakeup, ensure that
|
||||
* the SLW engine sets LPCR with decrementer bit cleared, else
|
||||
* the CPU will come back to the kernel due to a spurious
|
||||
* wakeup.
|
||||
*/
|
||||
lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
|
||||
pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
|
||||
|
||||
while (!generic_check_cpu_restart(cpu)) {
|
||||
/*
|
||||
* Clear IPI flag, since we don't handle IPIs while
|
||||
|
@ -246,6 +261,16 @@ static void pnv_smp_cpu_kill_self(void)
|
|||
|
||||
}
|
||||
|
||||
/*
|
||||
* Re-enable decrementer interrupts in LPCR.
|
||||
*
|
||||
* Further, we want stop states to be woken up by decrementer
|
||||
* for non-hotplug cases. So program the LPCR via stop api as
|
||||
* well.
|
||||
*/
|
||||
lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
|
||||
pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
|
||||
|
||||
DBG("CPU%d coming online...\n", cpu);
|
||||
}
|
||||
|
||||
|
|
|
@ -865,7 +865,7 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
|
|||
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
|
||||
static inline void kvm_arch_free_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
|
||||
static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
|
||||
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
|
||||
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
|
||||
static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot) {}
|
||||
|
|
|
@ -303,7 +303,7 @@ early_param("vmalloc", parse_vmalloc);
|
|||
|
||||
void *restart_stack __section(.data);
|
||||
|
||||
static void __init setup_lowcore(void)
|
||||
static void __init setup_lowcore_dat_off(void)
|
||||
{
|
||||
struct lowcore *lc;
|
||||
|
||||
|
@ -314,19 +314,16 @@ static void __init setup_lowcore(void)
|
|||
lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
|
||||
lc->restart_psw.mask = PSW_KERNEL_BITS;
|
||||
lc->restart_psw.addr = (unsigned long) restart_int_handler;
|
||||
lc->external_new_psw.mask = PSW_KERNEL_BITS |
|
||||
PSW_MASK_DAT | PSW_MASK_MCHECK;
|
||||
lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
|
||||
lc->external_new_psw.addr = (unsigned long) ext_int_handler;
|
||||
lc->svc_new_psw.mask = PSW_KERNEL_BITS |
|
||||
PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
|
||||
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
|
||||
lc->svc_new_psw.addr = (unsigned long) system_call;
|
||||
lc->program_new_psw.mask = PSW_KERNEL_BITS |
|
||||
PSW_MASK_DAT | PSW_MASK_MCHECK;
|
||||
lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
|
||||
lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
|
||||
lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
|
||||
lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
|
||||
lc->io_new_psw.mask = PSW_KERNEL_BITS |
|
||||
PSW_MASK_DAT | PSW_MASK_MCHECK;
|
||||
lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
|
||||
lc->io_new_psw.addr = (unsigned long) io_int_handler;
|
||||
lc->clock_comparator = clock_comparator_max;
|
||||
lc->kernel_stack = ((unsigned long) &init_thread_union)
|
||||
|
@ -388,6 +385,16 @@ static void __init setup_lowcore(void)
|
|||
lowcore_ptr[0] = lc;
|
||||
}
|
||||
|
||||
static void __init setup_lowcore_dat_on(void)
|
||||
{
|
||||
__ctl_clear_bit(0, 28);
|
||||
S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
|
||||
S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
|
||||
S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
|
||||
S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
|
||||
__ctl_set_bit(0, 28);
|
||||
}
|
||||
|
||||
static struct resource code_resource = {
|
||||
.name = "Kernel code",
|
||||
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
|
||||
|
@ -946,7 +953,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
#endif
|
||||
|
||||
setup_resources();
|
||||
setup_lowcore();
|
||||
setup_lowcore_dat_off();
|
||||
smp_fill_possible_mask();
|
||||
cpu_detect_mhz_feature();
|
||||
cpu_init();
|
||||
|
@ -959,6 +966,12 @@ void __init setup_arch(char **cmdline_p)
|
|||
*/
|
||||
paging_init();
|
||||
|
||||
/*
|
||||
* After paging_init created the kernel page table, the new PSWs
|
||||
* in lowcore can now run with DAT enabled.
|
||||
*/
|
||||
setup_lowcore_dat_on();
|
||||
|
||||
/* Setup default console */
|
||||
conmode_default();
|
||||
set_preferred_console();
|
||||
|
|
|
@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad(
|
|||
}
|
||||
|
||||
static void crypto_aegis128_aesni_process_crypt(
|
||||
struct aegis_state *state, struct aead_request *req,
|
||||
struct aegis_state *state, struct skcipher_walk *walk,
|
||||
const struct aegis_crypt_ops *ops)
|
||||
{
|
||||
struct skcipher_walk walk;
|
||||
u8 *src, *dst;
|
||||
unsigned int chunksize, base;
|
||||
while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
|
||||
ops->crypt_blocks(state,
|
||||
round_down(walk->nbytes, AEGIS128_BLOCK_SIZE),
|
||||
walk->src.virt.addr, walk->dst.virt.addr);
|
||||
skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
ops->skcipher_walk_init(&walk, req, false);
|
||||
|
||||
while (walk.nbytes) {
|
||||
src = walk.src.virt.addr;
|
||||
dst = walk.dst.virt.addr;
|
||||
chunksize = walk.nbytes;
|
||||
|
||||
ops->crypt_blocks(state, chunksize, src, dst);
|
||||
|
||||
base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1);
|
||||
src += base;
|
||||
dst += base;
|
||||
chunksize &= AEGIS128_BLOCK_SIZE - 1;
|
||||
|
||||
if (chunksize > 0)
|
||||
ops->crypt_tail(state, chunksize, src, dst);
|
||||
|
||||
skcipher_walk_done(&walk, 0);
|
||||
if (walk->nbytes) {
|
||||
ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
|
||||
walk->dst.virt.addr);
|
||||
skcipher_walk_done(walk, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req,
|
|||
{
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
struct aegis_state state;
|
||||
|
||||
ops->skcipher_walk_init(&walk, req, true);
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
||||
crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
|
||||
crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
|
||||
crypto_aegis128_aesni_process_crypt(&state, req, ops);
|
||||
crypto_aegis128_aesni_process_crypt(&state, &walk, ops);
|
||||
crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
|
||||
|
||||
kernel_fpu_end();
|
||||
|
|
|
@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad(
|
|||
}
|
||||
|
||||
static void crypto_aegis128l_aesni_process_crypt(
|
||||
struct aegis_state *state, struct aead_request *req,
|
||||
struct aegis_state *state, struct skcipher_walk *walk,
|
||||
const struct aegis_crypt_ops *ops)
|
||||
{
|
||||
struct skcipher_walk walk;
|
||||
u8 *src, *dst;
|
||||
unsigned int chunksize, base;
|
||||
while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) {
|
||||
ops->crypt_blocks(state, round_down(walk->nbytes,
|
||||
AEGIS128L_BLOCK_SIZE),
|
||||
walk->src.virt.addr, walk->dst.virt.addr);
|
||||
skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
ops->skcipher_walk_init(&walk, req, false);
|
||||
|
||||
while (walk.nbytes) {
|
||||
src = walk.src.virt.addr;
|
||||
dst = walk.dst.virt.addr;
|
||||
chunksize = walk.nbytes;
|
||||
|
||||
ops->crypt_blocks(state, chunksize, src, dst);
|
||||
|
||||
base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1);
|
||||
src += base;
|
||||
dst += base;
|
||||
chunksize &= AEGIS128L_BLOCK_SIZE - 1;
|
||||
|
||||
if (chunksize > 0)
|
||||
ops->crypt_tail(state, chunksize, src, dst);
|
||||
|
||||
skcipher_walk_done(&walk, 0);
|
||||
if (walk->nbytes) {
|
||||
ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
|
||||
walk->dst.virt.addr);
|
||||
skcipher_walk_done(walk, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct aead_request *req,
|
|||
{
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
struct aegis_state state;
|
||||
|
||||
ops->skcipher_walk_init(&walk, req, true);
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
||||
crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv);
|
||||
crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen);
|
||||
crypto_aegis128l_aesni_process_crypt(&state, req, ops);
|
||||
crypto_aegis128l_aesni_process_crypt(&state, &walk, ops);
|
||||
crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
|
||||
|
||||
kernel_fpu_end();
|
||||
|
|
|
@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad(
|
|||
}
|
||||
|
||||
static void crypto_aegis256_aesni_process_crypt(
|
||||
struct aegis_state *state, struct aead_request *req,
|
||||
struct aegis_state *state, struct skcipher_walk *walk,
|
||||
const struct aegis_crypt_ops *ops)
|
||||
{
|
||||
struct skcipher_walk walk;
|
||||
u8 *src, *dst;
|
||||
unsigned int chunksize, base;
|
||||
while (walk->nbytes >= AEGIS256_BLOCK_SIZE) {
|
||||
ops->crypt_blocks(state,
|
||||
round_down(walk->nbytes, AEGIS256_BLOCK_SIZE),
|
||||
walk->src.virt.addr, walk->dst.virt.addr);
|
||||
skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
ops->skcipher_walk_init(&walk, req, false);
|
||||
|
||||
while (walk.nbytes) {
|
||||
src = walk.src.virt.addr;
|
||||
dst = walk.dst.virt.addr;
|
||||
chunksize = walk.nbytes;
|
||||
|
||||
ops->crypt_blocks(state, chunksize, src, dst);
|
||||
|
||||
base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1);
|
||||
src += base;
|
||||
dst += base;
|
||||
chunksize &= AEGIS256_BLOCK_SIZE - 1;
|
||||
|
||||
if (chunksize > 0)
|
||||
ops->crypt_tail(state, chunksize, src, dst);
|
||||
|
||||
skcipher_walk_done(&walk, 0);
|
||||
if (walk->nbytes) {
|
||||
ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
|
||||
walk->dst.virt.addr);
|
||||
skcipher_walk_done(walk, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct aead_request *req,
|
|||
{
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
struct aegis_state state;
|
||||
|
||||
ops->skcipher_walk_init(&walk, req, true);
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
||||
crypto_aegis256_aesni_init(&state, ctx->key, req->iv);
|
||||
crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen);
|
||||
crypto_aegis256_aesni_process_crypt(&state, req, ops);
|
||||
crypto_aegis256_aesni_process_crypt(&state, &walk, ops);
|
||||
crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
|
||||
|
||||
kernel_fpu_end();
|
||||
|
|
|
@ -830,11 +830,14 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
|
|||
scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
|
||||
}
|
||||
|
||||
src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
|
||||
scatterwalk_start(&src_sg_walk, src_sg);
|
||||
if (req->src != req->dst) {
|
||||
dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen);
|
||||
scatterwalk_start(&dst_sg_walk, dst_sg);
|
||||
if (left) {
|
||||
src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
|
||||
scatterwalk_start(&src_sg_walk, src_sg);
|
||||
if (req->src != req->dst) {
|
||||
dst_sg = scatterwalk_ffwd(dst_start, req->dst,
|
||||
req->assoclen);
|
||||
scatterwalk_start(&dst_sg_walk, dst_sg);
|
||||
}
|
||||
}
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
|
|
@ -85,31 +85,20 @@ static void crypto_morus1280_glue_process_ad(
|
|||
|
||||
static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state,
|
||||
struct morus1280_ops ops,
|
||||
struct aead_request *req)
|
||||
struct skcipher_walk *walk)
|
||||
{
|
||||
struct skcipher_walk walk;
|
||||
u8 *cursor_src, *cursor_dst;
|
||||
unsigned int chunksize, base;
|
||||
while (walk->nbytes >= MORUS1280_BLOCK_SIZE) {
|
||||
ops.crypt_blocks(state, walk->src.virt.addr,
|
||||
walk->dst.virt.addr,
|
||||
round_down(walk->nbytes,
|
||||
MORUS1280_BLOCK_SIZE));
|
||||
skcipher_walk_done(walk, walk->nbytes % MORUS1280_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
ops.skcipher_walk_init(&walk, req, false);
|
||||
|
||||
while (walk.nbytes) {
|
||||
cursor_src = walk.src.virt.addr;
|
||||
cursor_dst = walk.dst.virt.addr;
|
||||
chunksize = walk.nbytes;
|
||||
|
||||
ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
|
||||
|
||||
base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1);
|
||||
cursor_src += base;
|
||||
cursor_dst += base;
|
||||
chunksize &= MORUS1280_BLOCK_SIZE - 1;
|
||||
|
||||
if (chunksize > 0)
|
||||
ops.crypt_tail(state, cursor_src, cursor_dst,
|
||||
chunksize);
|
||||
|
||||
skcipher_walk_done(&walk, 0);
|
||||
if (walk->nbytes) {
|
||||
ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
|
||||
walk->nbytes);
|
||||
skcipher_walk_done(walk, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -147,12 +136,15 @@ static void crypto_morus1280_glue_crypt(struct aead_request *req,
|
|||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct morus1280_state state;
|
||||
struct skcipher_walk walk;
|
||||
|
||||
ops.skcipher_walk_init(&walk, req, true);
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
||||
ctx->ops->init(&state, &ctx->key, req->iv);
|
||||
crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
|
||||
crypto_morus1280_glue_process_crypt(&state, ops, req);
|
||||
crypto_morus1280_glue_process_crypt(&state, ops, &walk);
|
||||
ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
|
||||
|
||||
kernel_fpu_end();
|
||||
|
|
|
@ -85,31 +85,19 @@ static void crypto_morus640_glue_process_ad(
|
|||
|
||||
static void crypto_morus640_glue_process_crypt(struct morus640_state *state,
|
||||
struct morus640_ops ops,
|
||||
struct aead_request *req)
|
||||
struct skcipher_walk *walk)
|
||||
{
|
||||
struct skcipher_walk walk;
|
||||
u8 *cursor_src, *cursor_dst;
|
||||
unsigned int chunksize, base;
|
||||
while (walk->nbytes >= MORUS640_BLOCK_SIZE) {
|
||||
ops.crypt_blocks(state, walk->src.virt.addr,
|
||||
walk->dst.virt.addr,
|
||||
round_down(walk->nbytes, MORUS640_BLOCK_SIZE));
|
||||
skcipher_walk_done(walk, walk->nbytes % MORUS640_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
ops.skcipher_walk_init(&walk, req, false);
|
||||
|
||||
while (walk.nbytes) {
|
||||
cursor_src = walk.src.virt.addr;
|
||||
cursor_dst = walk.dst.virt.addr;
|
||||
chunksize = walk.nbytes;
|
||||
|
||||
ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
|
||||
|
||||
base = chunksize & ~(MORUS640_BLOCK_SIZE - 1);
|
||||
cursor_src += base;
|
||||
cursor_dst += base;
|
||||
chunksize &= MORUS640_BLOCK_SIZE - 1;
|
||||
|
||||
if (chunksize > 0)
|
||||
ops.crypt_tail(state, cursor_src, cursor_dst,
|
||||
chunksize);
|
||||
|
||||
skcipher_walk_done(&walk, 0);
|
||||
if (walk->nbytes) {
|
||||
ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
|
||||
walk->nbytes);
|
||||
skcipher_walk_done(walk, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -143,12 +131,15 @@ static void crypto_morus640_glue_crypt(struct aead_request *req,
|
|||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct morus640_state state;
|
||||
struct skcipher_walk walk;
|
||||
|
||||
ops.skcipher_walk_init(&walk, req, true);
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
||||
ctx->ops->init(&state, &ctx->key, req->iv);
|
||||
crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
|
||||
crypto_morus640_glue_process_crypt(&state, ops, req);
|
||||
crypto_morus640_glue_process_crypt(&state, ops, &walk);
|
||||
ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
|
||||
|
||||
kernel_fpu_end();
|
||||
|
|
|
@ -740,6 +740,7 @@ static int uncore_pmu_event_init(struct perf_event *event)
|
|||
/* fixed counters have event field hardcoded to zero */
|
||||
hwc->config = 0ULL;
|
||||
} else if (is_freerunning_event(event)) {
|
||||
hwc->config = event->attr.config;
|
||||
if (!check_valid_freerunning_event(box, event))
|
||||
return -EINVAL;
|
||||
event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
|
||||
|
|
|
@ -285,8 +285,8 @@ static inline
|
|||
unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
|
||||
struct perf_event *event)
|
||||
{
|
||||
unsigned int type = uncore_freerunning_type(event->attr.config);
|
||||
unsigned int idx = uncore_freerunning_idx(event->attr.config);
|
||||
unsigned int type = uncore_freerunning_type(event->hw.config);
|
||||
unsigned int idx = uncore_freerunning_idx(event->hw.config);
|
||||
struct intel_uncore_pmu *pmu = box->pmu;
|
||||
|
||||
return pmu->type->freerunning[type].counter_base +
|
||||
|
@ -360,7 +360,7 @@ static inline
|
|||
unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
|
||||
struct perf_event *event)
|
||||
{
|
||||
unsigned int type = uncore_freerunning_type(event->attr.config);
|
||||
unsigned int type = uncore_freerunning_type(event->hw.config);
|
||||
|
||||
return box->pmu->type->freerunning[type].bits;
|
||||
}
|
||||
|
@ -368,7 +368,7 @@ unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
|
|||
static inline int uncore_num_freerunning(struct intel_uncore_box *box,
|
||||
struct perf_event *event)
|
||||
{
|
||||
unsigned int type = uncore_freerunning_type(event->attr.config);
|
||||
unsigned int type = uncore_freerunning_type(event->hw.config);
|
||||
|
||||
return box->pmu->type->freerunning[type].num_counters;
|
||||
}
|
||||
|
@ -382,8 +382,8 @@ static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
|
|||
static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
|
||||
struct perf_event *event)
|
||||
{
|
||||
unsigned int type = uncore_freerunning_type(event->attr.config);
|
||||
unsigned int idx = uncore_freerunning_idx(event->attr.config);
|
||||
unsigned int type = uncore_freerunning_type(event->hw.config);
|
||||
unsigned int idx = uncore_freerunning_idx(event->hw.config);
|
||||
|
||||
return (type < uncore_num_freerunning_types(box, event)) &&
|
||||
(idx < uncore_num_freerunning(box, event));
|
||||
|
|
|
@ -444,9 +444,11 @@ static int snb_uncore_imc_event_init(struct perf_event *event)
|
|||
|
||||
/* must be done before validate_group */
|
||||
event->hw.event_base = base;
|
||||
event->hw.config = cfg;
|
||||
event->hw.idx = idx;
|
||||
|
||||
/* Convert to standard encoding format for freerunning counters */
|
||||
event->hw.config = ((cfg - 1) << 8) | 0x10ff;
|
||||
|
||||
/* no group validation needed, we have free running counters */
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -55,6 +55,8 @@
|
|||
|
||||
#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66
|
||||
|
||||
#define INTEL_FAM6_ICELAKE_MOBILE 0x7E
|
||||
|
||||
/* "Small Core" Processors (Atom) */
|
||||
|
||||
#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
|
||||
|
|
|
@ -1194,7 +1194,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
|
|||
struct kvm_memory_slot *slot,
|
||||
gfn_t gfn_offset, unsigned long mask);
|
||||
void kvm_mmu_zap_all(struct kvm *kvm);
|
||||
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
|
||||
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
|
||||
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
|
||||
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
|
||||
|
||||
|
|
|
@ -141,6 +141,11 @@ asm (
|
|||
|
||||
void optprobe_template_func(void);
|
||||
STACK_FRAME_NON_STANDARD(optprobe_template_func);
|
||||
NOKPROBE_SYMBOL(optprobe_template_func);
|
||||
NOKPROBE_SYMBOL(optprobe_template_entry);
|
||||
NOKPROBE_SYMBOL(optprobe_template_val);
|
||||
NOKPROBE_SYMBOL(optprobe_template_call);
|
||||
NOKPROBE_SYMBOL(optprobe_template_end);
|
||||
|
||||
#define TMPL_MOVE_IDX \
|
||||
((long)optprobe_template_val - (long)optprobe_template_entry)
|
||||
|
|
|
@ -5774,13 +5774,30 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
|
|||
return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
|
||||
}
|
||||
|
||||
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
|
||||
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
|
||||
{
|
||||
gen &= MMIO_GEN_MASK;
|
||||
|
||||
/*
|
||||
* The very rare case: if the generation-number is round,
|
||||
* Shift to eliminate the "update in-progress" flag, which isn't
|
||||
* included in the spte's generation number.
|
||||
*/
|
||||
gen >>= 1;
|
||||
|
||||
/*
|
||||
* Generation numbers are incremented in multiples of the number of
|
||||
* address spaces in order to provide unique generations across all
|
||||
* address spaces. Strip what is effectively the address space
|
||||
* modifier prior to checking for a wrap of the MMIO generation so
|
||||
* that a wrap in any address space is detected.
|
||||
*/
|
||||
gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
|
||||
|
||||
/*
|
||||
* The very rare case: if the MMIO generation number has wrapped,
|
||||
* zap all shadow pages.
|
||||
*/
|
||||
if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
|
||||
if (unlikely(gen == 0)) {
|
||||
kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
|
||||
kvm_mmu_invalidate_zap_all_pages(kvm);
|
||||
}
|
||||
|
|
|
@ -8184,25 +8184,50 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
|
|||
/* Addr = segment_base + offset */
|
||||
/* offset = base + [index * scale] + displacement */
|
||||
off = exit_qualification; /* holds the displacement */
|
||||
if (addr_size == 1)
|
||||
off = (gva_t)sign_extend64(off, 31);
|
||||
else if (addr_size == 0)
|
||||
off = (gva_t)sign_extend64(off, 15);
|
||||
if (base_is_valid)
|
||||
off += kvm_register_read(vcpu, base_reg);
|
||||
if (index_is_valid)
|
||||
off += kvm_register_read(vcpu, index_reg)<<scaling;
|
||||
vmx_get_segment(vcpu, &s, seg_reg);
|
||||
*ret = s.base + off;
|
||||
|
||||
/*
|
||||
* The effective address, i.e. @off, of a memory operand is truncated
|
||||
* based on the address size of the instruction. Note that this is
|
||||
* the *effective address*, i.e. the address prior to accounting for
|
||||
* the segment's base.
|
||||
*/
|
||||
if (addr_size == 1) /* 32 bit */
|
||||
*ret &= 0xffffffff;
|
||||
off &= 0xffffffff;
|
||||
else if (addr_size == 0) /* 16 bit */
|
||||
off &= 0xffff;
|
||||
|
||||
/* Checks for #GP/#SS exceptions. */
|
||||
exn = false;
|
||||
if (is_long_mode(vcpu)) {
|
||||
/*
|
||||
* The virtual/linear address is never truncated in 64-bit
|
||||
* mode, e.g. a 32-bit address size can yield a 64-bit virtual
|
||||
* address when using FS/GS with a non-zero base.
|
||||
*/
|
||||
*ret = s.base + off;
|
||||
|
||||
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
|
||||
* non-canonical form. This is the only check on the memory
|
||||
* destination for long mode!
|
||||
*/
|
||||
exn = is_noncanonical_address(*ret, vcpu);
|
||||
} else if (is_protmode(vcpu)) {
|
||||
/*
|
||||
* When not in long mode, the virtual/linear address is
|
||||
* unconditionally truncated to 32 bits regardless of the
|
||||
* address size.
|
||||
*/
|
||||
*ret = (s.base + off) & 0xffffffff;
|
||||
|
||||
/* Protected mode: apply checks for segment validity in the
|
||||
* following order:
|
||||
* - segment type check (#GP(0) may be thrown)
|
||||
|
@ -8226,10 +8251,16 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
|
|||
/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
|
||||
*/
|
||||
exn = (s.unusable != 0);
|
||||
/* Protected mode: #GP(0)/#SS(0) if the memory
|
||||
* operand is outside the segment limit.
|
||||
|
||||
/*
|
||||
* Protected mode: #GP(0)/#SS(0) if the memory operand is
|
||||
* outside the segment limit. All CPUs that support VMX ignore
|
||||
* limit checks for flat segments, i.e. segments with base==0,
|
||||
* limit==0xffffffff and of type expand-up data or code.
|
||||
*/
|
||||
exn = exn || (off + sizeof(u64) > s.limit);
|
||||
if (!(s.base == 0 && s.limit == 0xffffffff &&
|
||||
((s.type & 8) || !(s.type & 4))))
|
||||
exn = exn || (off + sizeof(u64) > s.limit);
|
||||
}
|
||||
if (exn) {
|
||||
kvm_queue_exception_e(vcpu,
|
||||
|
|
|
@ -9108,13 +9108,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
|
||||
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
|
||||
{
|
||||
/*
|
||||
* memslots->generation has been incremented.
|
||||
* mmio generation may have reached its maximum value.
|
||||
*/
|
||||
kvm_mmu_invalidate_mmio_sptes(kvm, slots);
|
||||
kvm_mmu_invalidate_mmio_sptes(kvm, gen);
|
||||
}
|
||||
|
||||
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||
|
|
|
@ -181,6 +181,11 @@ static inline bool emul_is_noncanonical_address(u64 la,
|
|||
static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
|
||||
gva_t gva, gfn_t gfn, unsigned access)
|
||||
{
|
||||
u64 gen = kvm_memslots(vcpu->kvm)->generation;
|
||||
|
||||
if (unlikely(gen & 1))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If this is a shadow nested page table, the "GVA" is
|
||||
* actually a nGPA.
|
||||
|
@ -188,7 +193,7 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
|
|||
vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
|
||||
vcpu->arch.access = access;
|
||||
vcpu->arch.mmio_gfn = gfn;
|
||||
vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
|
||||
vcpu->arch.mmio_gen = gen;
|
||||
}
|
||||
|
||||
static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -2106,10 +2106,10 @@ void __init xen_relocate_p2m(void)
|
|||
pt = early_memremap(pt_phys, PAGE_SIZE);
|
||||
clear_page(pt);
|
||||
for (idx_pte = 0;
|
||||
idx_pte < min(n_pte, PTRS_PER_PTE);
|
||||
idx_pte++) {
|
||||
set_pte(pt + idx_pte,
|
||||
pfn_pte(p2m_pfn, PAGE_KERNEL));
|
||||
idx_pte < min(n_pte, PTRS_PER_PTE);
|
||||
idx_pte++) {
|
||||
pt[idx_pte] = pfn_pte(p2m_pfn,
|
||||
PAGE_KERNEL);
|
||||
p2m_pfn++;
|
||||
}
|
||||
n_pte -= PTRS_PER_PTE;
|
||||
|
@ -2117,8 +2117,7 @@ void __init xen_relocate_p2m(void)
|
|||
make_lowmem_page_readonly(__va(pt_phys));
|
||||
pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
|
||||
PFN_DOWN(pt_phys));
|
||||
set_pmd(pmd + idx_pt,
|
||||
__pmd(_PAGE_TABLE | pt_phys));
|
||||
pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys);
|
||||
pt_phys += PAGE_SIZE;
|
||||
}
|
||||
n_pt -= PTRS_PER_PMD;
|
||||
|
@ -2126,7 +2125,7 @@ void __init xen_relocate_p2m(void)
|
|||
make_lowmem_page_readonly(__va(pmd_phys));
|
||||
pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
|
||||
PFN_DOWN(pmd_phys));
|
||||
set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
|
||||
pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
|
||||
pmd_phys += PAGE_SIZE;
|
||||
}
|
||||
n_pmd -= PTRS_PER_PUD;
|
||||
|
|
|
@ -701,12 +701,20 @@ static void blk_mq_requeue_work(struct work_struct *work)
|
|||
spin_unlock_irq(&q->requeue_lock);
|
||||
|
||||
list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
|
||||
if (!(rq->rq_flags & RQF_SOFTBARRIER))
|
||||
if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
|
||||
continue;
|
||||
|
||||
rq->rq_flags &= ~RQF_SOFTBARRIER;
|
||||
list_del_init(&rq->queuelist);
|
||||
blk_mq_sched_insert_request(rq, true, false, false);
|
||||
/*
|
||||
* If RQF_DONTPREP, rq has contained some driver specific
|
||||
* data, so insert it to hctx dispatch list to avoid any
|
||||
* merge.
|
||||
*/
|
||||
if (rq->rq_flags & RQF_DONTPREP)
|
||||
blk_mq_request_bypass_insert(rq, false);
|
||||
else
|
||||
blk_mq_sched_insert_request(rq, true, false, false);
|
||||
}
|
||||
|
||||
while (!list_empty(&rq_list)) {
|
||||
|
|
|
@ -61,8 +61,10 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
|
|||
else
|
||||
err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
|
||||
|
||||
if (err)
|
||||
if (unlikely(err)) {
|
||||
crypto_aead_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
||||
return err;
|
||||
}
|
||||
|
||||
crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
||||
return 0;
|
||||
|
|
|
@ -290,19 +290,19 @@ static void crypto_aegis128_process_crypt(struct aegis_state *state,
|
|||
const struct aegis128_ops *ops)
|
||||
{
|
||||
struct skcipher_walk walk;
|
||||
u8 *src, *dst;
|
||||
unsigned int chunksize;
|
||||
|
||||
ops->skcipher_walk_init(&walk, req, false);
|
||||
|
||||
while (walk.nbytes) {
|
||||
src = walk.src.virt.addr;
|
||||
dst = walk.dst.virt.addr;
|
||||
chunksize = walk.nbytes;
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
|
||||
ops->crypt_chunk(state, dst, src, chunksize);
|
||||
if (nbytes < walk.total)
|
||||
nbytes = round_down(nbytes, walk.stride);
|
||||
|
||||
skcipher_walk_done(&walk, 0);
|
||||
ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
|
||||
nbytes);
|
||||
|
||||
skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -353,19 +353,19 @@ static void crypto_aegis128l_process_crypt(struct aegis_state *state,
|
|||
const struct aegis128l_ops *ops)
|
||||
{
|
||||
struct skcipher_walk walk;
|
||||
u8 *src, *dst;
|
||||
unsigned int chunksize;
|
||||
|
||||
ops->skcipher_walk_init(&walk, req, false);
|
||||
|
||||
while (walk.nbytes) {
|
||||
src = walk.src.virt.addr;
|
||||
dst = walk.dst.virt.addr;
|
||||
chunksize = walk.nbytes;
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
|
||||
ops->crypt_chunk(state, dst, src, chunksize);
|
||||
if (nbytes < walk.total)
|
||||
nbytes = round_down(nbytes, walk.stride);
|
||||
|
||||
skcipher_walk_done(&walk, 0);
|
||||
ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
|
||||
nbytes);
|
||||
|
||||
skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -303,19 +303,19 @@ static void crypto_aegis256_process_crypt(struct aegis_state *state,
|
|||
const struct aegis256_ops *ops)
|
||||
{
|
||||
struct skcipher_walk walk;
|
||||
u8 *src, *dst;
|
||||
unsigned int chunksize;
|
||||
|
||||
ops->skcipher_walk_init(&walk, req, false);
|
||||
|
||||
while (walk.nbytes) {
|
||||
src = walk.src.virt.addr;
|
||||
dst = walk.dst.virt.addr;
|
||||
chunksize = walk.nbytes;
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
|
||||
ops->crypt_chunk(state, dst, src, chunksize);
|
||||
if (nbytes < walk.total)
|
||||
nbytes = round_down(nbytes, walk.stride);
|
||||
|
||||
skcipher_walk_done(&walk, 0);
|
||||
ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
|
||||
nbytes);
|
||||
|
||||
skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -86,17 +86,17 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
|
|||
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
|
||||
{
|
||||
unsigned int alignmask = walk->alignmask;
|
||||
unsigned int nbytes = walk->entrylen;
|
||||
|
||||
walk->data -= walk->offset;
|
||||
|
||||
if (nbytes && walk->offset & alignmask && !err) {
|
||||
walk->offset = ALIGN(walk->offset, alignmask + 1);
|
||||
nbytes = min(nbytes,
|
||||
((unsigned int)(PAGE_SIZE)) - walk->offset);
|
||||
walk->entrylen -= nbytes;
|
||||
if (walk->entrylen && (walk->offset & alignmask) && !err) {
|
||||
unsigned int nbytes;
|
||||
|
||||
walk->offset = ALIGN(walk->offset, alignmask + 1);
|
||||
nbytes = min(walk->entrylen,
|
||||
(unsigned int)(PAGE_SIZE - walk->offset));
|
||||
if (nbytes) {
|
||||
walk->entrylen -= nbytes;
|
||||
walk->data += walk->offset;
|
||||
return nbytes;
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (nbytes) {
|
||||
if (walk->entrylen) {
|
||||
walk->offset = 0;
|
||||
walk->pg++;
|
||||
return hash_walk_next(walk);
|
||||
|
@ -190,6 +190,21 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static void ahash_set_needkey(struct crypto_ahash *tfm)
|
||||
{
|
||||
const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
|
||||
|
||||
if (tfm->setkey != ahash_nosetkey &&
|
||||
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
|
||||
crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
||||
}
|
||||
|
||||
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
|
@ -201,20 +216,16 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|||
else
|
||||
err = tfm->setkey(tfm, key, keylen);
|
||||
|
||||
if (err)
|
||||
if (unlikely(err)) {
|
||||
ahash_set_needkey(tfm);
|
||||
return err;
|
||||
}
|
||||
|
||||
crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
|
||||
|
||||
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline unsigned int ahash_align_buffer_size(unsigned len,
|
||||
unsigned long mask)
|
||||
{
|
||||
|
@ -467,8 +478,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
|
|||
|
||||
if (alg->setkey) {
|
||||
hash->setkey = alg->setkey;
|
||||
if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
|
||||
crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
|
||||
ahash_set_needkey(hash);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
14
crypto/cfb.c
14
crypto/cfb.c
|
@ -77,12 +77,14 @@ static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
|
|||
do {
|
||||
crypto_cfb_encrypt_one(tfm, iv, dst);
|
||||
crypto_xor(dst, src, bsize);
|
||||
memcpy(iv, dst, bsize);
|
||||
iv = dst;
|
||||
|
||||
src += bsize;
|
||||
dst += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
memcpy(walk->iv, iv, bsize);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
|
@ -162,7 +164,7 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
|
|||
const unsigned int bsize = crypto_cfb_bsize(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *iv = walk->iv;
|
||||
u8 * const iv = walk->iv;
|
||||
u8 tmp[MAX_CIPHER_BLOCKSIZE];
|
||||
|
||||
do {
|
||||
|
@ -172,8 +174,6 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
|
|||
src += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
memcpy(walk->iv, iv, bsize);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
|
@ -298,6 +298,12 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
|
|||
inst->alg.base.cra_blocksize = 1;
|
||||
inst->alg.base.cra_alignmask = alg->cra_alignmask;
|
||||
|
||||
/*
|
||||
* To simplify the implementation, configure the skcipher walk to only
|
||||
* give a partial block at the very end, never earlier.
|
||||
*/
|
||||
inst->alg.chunksize = alg->cra_blocksize;
|
||||
|
||||
inst->alg.ivsize = alg->cra_blocksize;
|
||||
inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
|
||||
inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
|
||||
|
|
|
@ -366,18 +366,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state,
|
|||
const struct morus1280_ops *ops)
|
||||
{
|
||||
struct skcipher_walk walk;
|
||||
u8 *dst;
|
||||
const u8 *src;
|
||||
|
||||
ops->skcipher_walk_init(&walk, req, false);
|
||||
|
||||
while (walk.nbytes) {
|
||||
src = walk.src.virt.addr;
|
||||
dst = walk.dst.virt.addr;
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
|
||||
ops->crypt_chunk(state, dst, src, walk.nbytes);
|
||||
if (nbytes < walk.total)
|
||||
nbytes = round_down(nbytes, walk.stride);
|
||||
|
||||
skcipher_walk_done(&walk, 0);
|
||||
ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
|
||||
nbytes);
|
||||
|
||||
skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -365,18 +365,19 @@ static void crypto_morus640_process_crypt(struct morus640_state *state,
|
|||
const struct morus640_ops *ops)
|
||||
{
|
||||
struct skcipher_walk walk;
|
||||
u8 *dst;
|
||||
const u8 *src;
|
||||
|
||||
ops->skcipher_walk_init(&walk, req, false);
|
||||
|
||||
while (walk.nbytes) {
|
||||
src = walk.src.virt.addr;
|
||||
dst = walk.dst.virt.addr;
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
|
||||
ops->crypt_chunk(state, dst, src, walk.nbytes);
|
||||
if (nbytes < walk.total)
|
||||
nbytes = round_down(nbytes, walk.stride);
|
||||
|
||||
skcipher_walk_done(&walk, 0);
|
||||
ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
|
||||
nbytes);
|
||||
|
||||
skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
|
|||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
u8 *iv = walk->iv;
|
||||
u8 * const iv = walk->iv;
|
||||
|
||||
do {
|
||||
crypto_xor(iv, src, bsize);
|
||||
|
@ -72,7 +72,7 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
|
|||
int bsize = crypto_cipher_blocksize(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *iv = walk->iv;
|
||||
u8 * const iv = walk->iv;
|
||||
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE];
|
||||
|
||||
do {
|
||||
|
@ -84,8 +84,6 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
|
|||
src += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
memcpy(walk->iv, iv, bsize);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
|
@ -121,7 +119,7 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
|
|||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
u8 *iv = walk->iv;
|
||||
u8 * const iv = walk->iv;
|
||||
|
||||
do {
|
||||
crypto_cipher_decrypt_one(tfm, dst, src);
|
||||
|
@ -132,8 +130,6 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
|
|||
dst += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
memcpy(walk->iv, iv, bsize);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
|
@ -144,7 +140,7 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
|
|||
int bsize = crypto_cipher_blocksize(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *iv = walk->iv;
|
||||
u8 * const iv = walk->iv;
|
||||
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32));
|
||||
|
||||
do {
|
||||
|
@ -156,8 +152,6 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
|
|||
src += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
memcpy(walk->iv, iv, bsize);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
|
|
|
@ -53,6 +53,13 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
|
|||
return err;
|
||||
}
|
||||
|
||||
static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg)
|
||||
{
|
||||
if (crypto_shash_alg_has_setkey(alg) &&
|
||||
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
|
||||
crypto_shash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
||||
}
|
||||
|
||||
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
|
@ -65,8 +72,10 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
|
|||
else
|
||||
err = shash->setkey(tfm, key, keylen);
|
||||
|
||||
if (err)
|
||||
if (unlikely(err)) {
|
||||
shash_set_needkey(tfm, shash);
|
||||
return err;
|
||||
}
|
||||
|
||||
crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
||||
return 0;
|
||||
|
@ -368,7 +377,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
|
|||
crt->final = shash_async_final;
|
||||
crt->finup = shash_async_finup;
|
||||
crt->digest = shash_async_digest;
|
||||
crt->setkey = shash_async_setkey;
|
||||
if (crypto_shash_alg_has_setkey(alg))
|
||||
crt->setkey = shash_async_setkey;
|
||||
|
||||
crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
|
||||
CRYPTO_TFM_NEED_KEY);
|
||||
|
@ -390,9 +400,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
|
|||
|
||||
hash->descsize = alg->descsize;
|
||||
|
||||
if (crypto_shash_alg_has_setkey(alg) &&
|
||||
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
|
||||
crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
|
||||
shash_set_needkey(hash, alg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -584,6 +584,12 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
|
|||
return crypto_alg_extsize(alg);
|
||||
}
|
||||
|
||||
static void skcipher_set_needkey(struct crypto_skcipher *tfm)
|
||||
{
|
||||
if (tfm->keysize)
|
||||
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
||||
}
|
||||
|
||||
static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
|
@ -597,8 +603,10 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
|
|||
err = crypto_blkcipher_setkey(blkcipher, key, keylen);
|
||||
crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
if (err)
|
||||
if (unlikely(err)) {
|
||||
skcipher_set_needkey(tfm);
|
||||
return err;
|
||||
}
|
||||
|
||||
crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
||||
return 0;
|
||||
|
@ -676,8 +684,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
|
|||
skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
|
||||
skcipher->keysize = calg->cra_blkcipher.max_keysize;
|
||||
|
||||
if (skcipher->keysize)
|
||||
crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
|
||||
skcipher_set_needkey(skcipher);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -697,8 +704,10 @@ static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
|
|||
crypto_skcipher_set_flags(tfm,
|
||||
crypto_ablkcipher_get_flags(ablkcipher) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
if (err)
|
||||
if (unlikely(err)) {
|
||||
skcipher_set_needkey(tfm);
|
||||
return err;
|
||||
}
|
||||
|
||||
crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
||||
return 0;
|
||||
|
@ -775,8 +784,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
|
|||
sizeof(struct ablkcipher_request);
|
||||
skcipher->keysize = calg->cra_ablkcipher.max_keysize;
|
||||
|
||||
if (skcipher->keysize)
|
||||
crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
|
||||
skcipher_set_needkey(skcipher);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -819,8 +827,10 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|||
else
|
||||
err = cipher->setkey(tfm, key, keylen);
|
||||
|
||||
if (err)
|
||||
if (unlikely(err)) {
|
||||
skcipher_set_needkey(tfm);
|
||||
return err;
|
||||
}
|
||||
|
||||
crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
|
||||
return 0;
|
||||
|
@ -852,8 +862,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
|
|||
skcipher->ivsize = alg->ivsize;
|
||||
skcipher->keysize = alg->max_keysize;
|
||||
|
||||
if (skcipher->keysize)
|
||||
crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
|
||||
skcipher_set_needkey(skcipher);
|
||||
|
||||
if (alg->exit)
|
||||
skcipher->base.exit = crypto_skcipher_exit_tfm;
|
||||
|
|
|
@ -1894,14 +1894,21 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
|
|||
|
||||
err = alg_test_hash(desc, driver, type, mask);
|
||||
if (err)
|
||||
goto out;
|
||||
return err;
|
||||
|
||||
tfm = crypto_alloc_shash(driver, type, mask);
|
||||
if (IS_ERR(tfm)) {
|
||||
if (PTR_ERR(tfm) == -ENOENT) {
|
||||
/*
|
||||
* This crc32c implementation is only available through
|
||||
* ahash API, not the shash API, so the remaining part
|
||||
* of the test is not applicable to it.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
|
||||
"%ld\n", driver, PTR_ERR(tfm));
|
||||
err = PTR_ERR(tfm);
|
||||
goto out;
|
||||
return PTR_ERR(tfm);
|
||||
}
|
||||
|
||||
do {
|
||||
|
@ -1928,7 +1935,6 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
|
|||
|
||||
crypto_free_shash(tfm);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -12648,6 +12648,31 @@ static const struct cipher_testvec aes_cfb_tv_template[] = {
|
|||
"\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
|
||||
"\x20\x31\x62\x3d\x55\xb1\xe4\x71",
|
||||
.len = 64,
|
||||
.also_non_np = 1,
|
||||
.np = 2,
|
||||
.tap = { 31, 33 },
|
||||
}, { /* > 16 bytes, not a multiple of 16 bytes */
|
||||
.key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
|
||||
"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
|
||||
.klen = 16,
|
||||
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
||||
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
|
||||
.ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
|
||||
"\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
|
||||
"\xae",
|
||||
.ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
|
||||
"\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
|
||||
"\xc8",
|
||||
.len = 17,
|
||||
}, { /* < 16 bytes */
|
||||
.key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
|
||||
"\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
|
||||
.klen = 16,
|
||||
.iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
|
||||
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
|
||||
.ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f",
|
||||
.ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad",
|
||||
.len = 7,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -202,11 +202,15 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
|
|||
{
|
||||
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
|
||||
const union acpi_object *of_compatible, *obj;
|
||||
acpi_status status;
|
||||
int len, count;
|
||||
int i, nval;
|
||||
char *c;
|
||||
|
||||
acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
|
||||
status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
|
||||
if (ACPI_FAILURE(status))
|
||||
return -ENODEV;
|
||||
|
||||
/* DT strings are all in lower case */
|
||||
for (c = buf.pointer; *c != '\0'; c++)
|
||||
*c = tolower(*c);
|
||||
|
|
|
@ -397,7 +397,7 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
|
|||
if (call_pkg) {
|
||||
int i;
|
||||
|
||||
if (nfit_mem->family != call_pkg->nd_family)
|
||||
if (nfit_mem && nfit_mem->family != call_pkg->nd_family)
|
||||
return -ENOTTY;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
|
||||
|
@ -406,6 +406,10 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
|
|||
return call_pkg->nd_command;
|
||||
}
|
||||
|
||||
/* In the !call_pkg case, bus commands == bus functions */
|
||||
if (!nfit_mem)
|
||||
return cmd;
|
||||
|
||||
/* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
|
||||
if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
|
||||
return cmd;
|
||||
|
@ -436,17 +440,18 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|||
if (cmd_rc)
|
||||
*cmd_rc = -EINVAL;
|
||||
|
||||
if (cmd == ND_CMD_CALL)
|
||||
call_pkg = buf;
|
||||
func = cmd_to_func(nfit_mem, cmd, call_pkg);
|
||||
if (func < 0)
|
||||
return func;
|
||||
|
||||
if (nvdimm) {
|
||||
struct acpi_device *adev = nfit_mem->adev;
|
||||
|
||||
if (!adev)
|
||||
return -ENOTTY;
|
||||
|
||||
if (cmd == ND_CMD_CALL)
|
||||
call_pkg = buf;
|
||||
func = cmd_to_func(nfit_mem, cmd, call_pkg);
|
||||
if (func < 0)
|
||||
return func;
|
||||
dimm_name = nvdimm_name(nvdimm);
|
||||
cmd_name = nvdimm_cmd_name(cmd);
|
||||
cmd_mask = nvdimm_cmd_mask(nvdimm);
|
||||
|
@ -457,12 +462,9 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|||
} else {
|
||||
struct acpi_device *adev = to_acpi_dev(acpi_desc);
|
||||
|
||||
func = cmd;
|
||||
cmd_name = nvdimm_bus_cmd_name(cmd);
|
||||
cmd_mask = nd_desc->cmd_mask;
|
||||
dsm_mask = cmd_mask;
|
||||
if (cmd == ND_CMD_CALL)
|
||||
dsm_mask = nd_desc->bus_dsm_mask;
|
||||
dsm_mask = nd_desc->bus_dsm_mask;
|
||||
desc = nd_cmd_bus_desc(cmd);
|
||||
guid = to_nfit_uuid(NFIT_DEV_BUS);
|
||||
handle = adev->handle;
|
||||
|
@ -533,6 +535,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (out_obj->type != ACPI_TYPE_BUFFER) {
|
||||
dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
|
||||
dimm_name, cmd_name, out_obj->type);
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (call_pkg) {
|
||||
call_pkg->nd_fw_size = out_obj->buffer.length;
|
||||
memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
|
||||
|
@ -551,13 +560,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (out_obj->package.type != ACPI_TYPE_BUFFER) {
|
||||
dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
|
||||
dimm_name, cmd_name, out_obj->type);
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
|
||||
cmd_name, out_obj->buffer.length);
|
||||
print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
|
||||
|
@ -2890,14 +2892,16 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc,
|
|||
{
|
||||
int rc;
|
||||
|
||||
if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state))
|
||||
if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
|
||||
return acpi_nfit_register_region(acpi_desc, nfit_spa);
|
||||
|
||||
set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
|
||||
set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
|
||||
if (!no_init_ars)
|
||||
set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
|
||||
|
||||
switch (acpi_nfit_query_poison(acpi_desc)) {
|
||||
case 0:
|
||||
case -ENOSPC:
|
||||
case -EAGAIN:
|
||||
rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
|
||||
/* shouldn't happen, try again later */
|
||||
|
@ -2922,7 +2926,6 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc,
|
|||
break;
|
||||
case -EBUSY:
|
||||
case -ENOMEM:
|
||||
case -ENOSPC:
|
||||
/*
|
||||
* BIOS was using ARS, wait for it to complete (or
|
||||
* resources to become available) and then perform our
|
||||
|
|
|
@ -509,7 +509,7 @@ static int ht16k33_remove(struct i2c_client *client)
|
|||
struct ht16k33_priv *priv = i2c_get_clientdata(client);
|
||||
struct ht16k33_fbdev *fbdev = &priv->fbdev;
|
||||
|
||||
cancel_delayed_work(&fbdev->work);
|
||||
cancel_delayed_work_sync(&fbdev->work);
|
||||
unregister_framebuffer(fbdev->info);
|
||||
framebuffer_release(fbdev->info);
|
||||
free_page((unsigned long) fbdev->buffer);
|
||||
|
|
|
@ -119,7 +119,6 @@ void wakeup_source_drop(struct wakeup_source *ws)
|
|||
if (!ws)
|
||||
return;
|
||||
|
||||
del_timer_sync(&ws->timer);
|
||||
__pm_relax(ws);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wakeup_source_drop);
|
||||
|
@ -206,6 +205,13 @@ void wakeup_source_remove(struct wakeup_source *ws)
|
|||
list_del_rcu(&ws->entry);
|
||||
raw_spin_unlock_irqrestore(&events_lock, flags);
|
||||
synchronize_srcu(&wakeup_srcu);
|
||||
|
||||
del_timer_sync(&ws->timer);
|
||||
/*
|
||||
* Clear timer.function to make wakeup_source_not_registered() treat
|
||||
* this wakeup source as not registered.
|
||||
*/
|
||||
ws->timer.function = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wakeup_source_remove);
|
||||
|
||||
|
|
|
@ -4084,7 +4084,7 @@ static unsigned int floppy_check_events(struct gendisk *disk,
|
|||
|
||||
if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
|
||||
if (lock_fdc(drive))
|
||||
return -EINTR;
|
||||
return 0;
|
||||
poll_drive(false, 0);
|
||||
process_fd_request();
|
||||
}
|
||||
|
|
|
@ -2085,6 +2085,11 @@ static int try_smi_init(struct smi_info *new_smi)
|
|||
WARN_ON(new_smi->io.dev->init_name != NULL);
|
||||
|
||||
out_err:
|
||||
if (rv && new_smi->io.io_cleanup) {
|
||||
new_smi->io.io_cleanup(&new_smi->io);
|
||||
new_smi->io.io_cleanup = NULL;
|
||||
}
|
||||
|
||||
kfree(init_name);
|
||||
return rv;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue