This is the 4.19.87 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl3jdysACgkQONu9yGCS
 aT49mhAAxl50oRh0bSk/SikbVCn7kRaoRNlCBsPtvtvbDDIpyREIzMRmIbH2OZjS
 ji1umUu8iMj+HXW72dWNqPo2K337UzcNRsUXWAdJH8Et+ao+xOUV1jps/Zr3D9ca
 2Cw6iTn2QFhKQythMlCxb30sf+kN4cAU1XY3M8xEWXB+4nAqc/aFW7mRAP1jusRb
 1DAW+xqPiwCbaag1v5OzumAOGBhpmTcX8sfEYM+3DKcPgGL1jPyYeWlXA26nih8/
 LQR6r2tAb454pipV0uApJ2u7V5nNxprcrfUNDmAfap2q/eF1w5pBbZoS5sqpf1eZ
 2ycZ36w0ThE7lJKvNrfjq13Su+bGtpENxHlwesNPbsvz0F4xoEkelYSCE1gJBaHX
 CZvq1Dhrk5DBvkCCElV8+CJxxuhMUZwzOwrz2iBLdPnpCpSgj8uNPLXMJno6D9fH
 PZMCcBFf4WnCUBc06fB7qG+Z7y0TeAuLsNQmK3zYQoEc1gz4Yk5aFo7NgTyajqbD
 YKINVP2Wj11TDBssolIA58EYcc2J38As54wuOvYtwy+k/mVkvZVhCPOtI+h3UYm4
 lX2ROCHTzt+Av5qFlM8aSBcIlm1qihzSHEdnyqX2EZvUGC4C5Mc5/Eml3QJxAnVh
 SzUfLZGzzfntpnWn2cJZ/EA/p6hXujG5k95LEwtAnxxYBFpLTKc=
 =d8kA
 -----END PGP SIGNATURE-----

Merge 4.19.87 into android-4.19

Changes in 4.19.87
	mlxsw: spectrum_router: Fix determining underlay for a GRE tunnel
	net/mlx4_en: fix mlx4 ethtool -N insertion
	net/mlx4_en: Fix wrong limitation for number of TX rings
	net: rtnetlink: prevent underflows in do_setvfinfo()
	net/sched: act_pedit: fix WARN() in the traffic path
	net: sched: ensure opts_len <= IP_TUNNEL_OPTS_MAX in act_tunnel_key
	sfc: Only cancel the PPS workqueue if it exists
	net/mlx5e: Fix set vf link state error flow
	net/mlxfw: Verify FSM error code translation doesn't exceed array size
	net/mlx5: Fix auto group size calculation
	vhost/vsock: split packets to send using multiple buffers
	gpio: max77620: Fixup debounce delays
	tools: gpio: Correctly add make dependencies for gpio_utils
	nbd:fix memory leak in nbd_get_socket()
	virtio_console: allocate inbufs in add_port() only if it is needed
	Revert "fs: ocfs2: fix possible null-pointer dereferences in ocfs2_xa_prepare_entry()"
	mm/ksm.c: don't WARN if page is still mapped in remove_stable_node()
	drm/amd/powerplay: issue no PPSMC_MSG_GetCurrPkgPwr on unsupported ASICs
	drm/i915/pmu: "Frequency" is reported as accumulated cycles
	drm/i915/userptr: Try to acquire the page lock around set_page_dirty()
	mwifiex: Fix NL80211_TX_POWER_LIMITED
	ALSA: isight: fix leak of reference to firewire unit in error path of .probe callback
	crypto: testmgr - fix sizeof() on COMP_BUF_SIZE
	printk: lock/unlock console only for new logbuf entries
	printk: fix integer overflow in setup_log_buf()
	pinctrl: madera: Fix uninitialized variable bug in madera_mux_set_mux
	PCI: cadence: Write MSI data with 32bits
	gfs2: Fix marking bitmaps non-full
	pty: fix compat ioctls
	synclink_gt(): fix compat_ioctl()
	powerpc: Fix signedness bug in update_flash_db()
	powerpc/boot: Fix opal console in boot wrapper
	powerpc/boot: Disable vector instructions
	powerpc/eeh: Fix null deref for devices removed during EEH
	powerpc/eeh: Fix use of EEH_PE_KEEP on wrong field
	EDAC, thunderx: Fix memory leak in thunderx_l2c_threaded_isr()
	mt76: do not store aggregation sequence number for null-data frames
	mt76x0: phy: fix restore phase in mt76x0_phy_recalibrate_after_assoc
	brcmsmac: AP mode: update beacon when TIM changes
	ath10k: set probe request oui during driver start
	ath10k: allocate small size dma memory in ath10k_pci_diag_write_mem
	skd: fixup usage of legacy IO API
	cdrom: don't attempt to fiddle with cdo->capability
	spi: sh-msiof: fix deferred probing
	mmc: mediatek: fill the actual clock for mmc debugfs
	mmc: mediatek: fix cannot receive new request when msdc_cmd_is_ready fail
	PCI: mediatek: Fix class type for MT7622 to PCI_CLASS_BRIDGE_PCI
	btrfs: defrag: use btrfs_mod_outstanding_extents in cluster_pages_for_defrag
	btrfs: handle error of get_old_root
	gsmi: Fix bug in append_to_eventlog sysfs handler
	misc: mic: fix a DMA pool free failure
	w1: IAD Register is yet readable trough iad sys file. Fix snprintf (%u for unsigned, count for max size).
	m68k: fix command-line parsing when passed from u-boot
	scsi: hisi_sas: Feed back linkrate(max/min) when re-attached
	scsi: hisi_sas: Fix the race between IO completion and timeout for SMP/internal IO
	scsi: hisi_sas: Free slot later in slot_complete_vx_hw()
	RDMA/bnxt_re: Avoid NULL check after accessing the pointer
	RDMA/bnxt_re: Fix qp async event reporting
	RDMA/bnxt_re: Avoid resource leak in case the NQ registration fails
	pinctrl: sunxi: Fix a memory leak in 'sunxi_pinctrl_build_state()'
	pwm: lpss: Only set update bit if we are actually changing the settings
	amiflop: clean up on errors during setup
	qed: Align local and global PTT to propagate through the APIs.
	scsi: ips: fix missing break in switch
	nfp: bpf: protect against mis-initializing atomic counters
	KVM: nVMX: reset cache/shadows when switching loaded VMCS
	KVM: nVMX: move check_vmentry_postreqs() call to nested_vmx_enter_non_root_mode()
	KVM/x86: Fix invvpid and invept register operand size in 64-bit mode
	clk: tegra: Fixes for MBIST work around
	scsi: isci: Use proper enumerated type in atapi_d2h_reg_frame_handler
	scsi: isci: Change sci_controller_start_task's return type to sci_status
	scsi: bfa: Avoid implicit enum conversion in bfad_im_post_vendor_event
	scsi: iscsi_tcp: Explicitly cast param in iscsi_sw_tcp_host_get_param
	crypto: ccree - avoid implicit enum conversion
	nvmet: avoid integer overflow in the discard code
	nvmet-fcloop: suppress a compiler warning
	nvme-pci: fix hot removal during error handling
	PCI: mediatek: Fixup MSI enablement logic by enabling MSI before clocks
	clk: mmp2: fix the clock id for sdh2_clk and sdh3_clk
	clk: at91: audio-pll: fix audio pmc type
	ASoC: tegra_sgtl5000: fix device_node refcounting
	scsi: dc395x: fix dma API usage in srb_done
	scsi: dc395x: fix DMA API usage in sg_update_list
	scsi: zorro_esp: Limit DMA transfers to 65535 bytes
	net: dsa: mv88e6xxx: Fix 88E6141/6341 2500mbps SERDES speed
	net: fix warning in af_unix
	net: ena: Fix Kconfig dependency on X86
	xfs: fix use-after-free race in xfs_buf_rele
	xfs: clear ail delwri queued bufs on unmount of shutdown fs
	kprobes, x86/ptrace.h: Make regs_get_kernel_stack_nth() not fault on bad stack
	ACPI / scan: Create platform device for INT33FE ACPI nodes
	PM / Domains: Deal with multiple states but no governor in genpd
	ALSA: i2c/cs8427: Fix int to char conversion
	macintosh/windfarm_smu_sat: Fix debug output
	PCI: vmd: Detach resources after stopping root bus
	USB: misc: appledisplay: fix backlight update_status return code
	usbip: tools: fix atoi() on non-null terminated string
	sctp: use sk_wmem_queued to check for writable space
	dm raid: avoid bitmap with raid4/5/6 journal device
	selftests/bpf: fix file resource leak in load_kallsyms
	SUNRPC: Fix a compile warning for cmpxchg64()
	sunrpc: safely reallow resvport min/max inversion
	atm: zatm: Fix empty body Clang warnings
	s390/perf: Return error when debug_register fails
	swiotlb: do not panic on mapping failures
	spi: omap2-mcspi: Set FIFO DMA trigger level to word length
	x86/intel_rdt: Prevent pseudo-locking from using stale pointers
	sparc: Fix parport build warnings.
	scsi: hisi_sas: Fix NULL pointer dereference
	powerpc/pseries: Export raw per-CPU VPA data via debugfs
	powerpc/mm/radix: Fix off-by-one in split mapping logic
	powerpc/mm/radix: Fix overuse of small pages in splitting logic
	powerpc/mm/radix: Fix small page at boundary when splitting
	powerpc/64s/radix: Fix radix__flush_tlb_collapsed_pmd double flushing pmd
	selftests/bpf: fix return value comparison for tests in test_libbpf.sh
	tools: bpftool: fix completion for "bpftool map update"
	ceph: fix dentry leak in ceph_readdir_prepopulate
	ceph: only allow punch hole mode in fallocate
	rtc: s35390a: Change buf's type to u8 in s35390a_init
	RISC-V: Avoid corrupting the upper 32-bit of phys_addr_t in ioremap
	thermal: armada: fix a test in probe()
	f2fs: fix to spread clear_cold_data()
	f2fs: spread f2fs_set_inode_flags()
	mISDN: Fix type of switch control variable in ctrl_teimanager
	qlcnic: fix a return in qlcnic_dcb_get_capability()
	net: ethernet: ti: cpsw: unsync mcast entries while switch promisc mode
	mfd: arizona: Correct calling of runtime_put_sync
	mfd: mc13xxx-core: Fix PMIC shutdown when reading ADC values
	mfd: intel_soc_pmic_bxtwc: Chain power button IRQs as well
	mfd: max8997: Enale irq-wakeup unconditionally
	net: socionext: Stop PHY before resetting netsec
	fs/cifs: fix uninitialised variable warnings
	spi: uniphier: fix incorrect property items
	selftests/ftrace: Fix to test kprobe $comm arg only if available
	selftests: watchdog: fix message when /dev/watchdog open fails
	selftests: watchdog: Fix error message.
	selftests: kvm: Fix -Wformat warnings
	selftests: fix warning: "_GNU_SOURCE" redefined
	thermal: rcar_thermal: fix duplicate IRQ request
	thermal: rcar_thermal: Prevent hardware access during system suspend
	net: ethernet: cadence: fix socket buffer corruption problem
	bpf: devmap: fix wrong interface selection in notifier_call
	bpf, btf: fix a missing check bug in btf_parse
	powerpc/process: Fix flush_all_to_thread for SPE
	sparc64: Rework xchg() definition to avoid warnings.
	arm64: lib: use C string functions with KASAN enabled
	fs/ocfs2/dlm/dlmdebug.c: fix a sleep-in-atomic-context bug in dlm_print_one_mle()
	mm/page-writeback.c: fix range_cyclic writeback vs writepages deadlock
	tools/testing/selftests/vm/gup_benchmark.c: fix 'write' flag usage
	mm: thp: fix MADV_DONTNEED vs migrate_misplaced_transhuge_page race condition
	macsec: update operstate when lower device changes
	macsec: let the administrator set UP state even if lowerdev is down
	block: fix the DISCARD request merge
	i2c: uniphier-f: make driver robust against concurrency
	i2c: uniphier-f: fix occasional timeout error
	i2c: uniphier-f: fix race condition when IRQ is cleared
	um: Make line/tty semantics use true write IRQ
	vfs: avoid problematic remapping requests into partial EOF block
	ipv4/igmp: fix v1/v2 switchback timeout based on rfc3376, 8.12
	powerpc/xmon: Relax frame size for clang
	selftests/powerpc/ptrace: Fix out-of-tree build
	selftests/powerpc/signal: Fix out-of-tree build
	selftests/powerpc/switch_endian: Fix out-of-tree build
	selftests/powerpc/cache_shape: Fix out-of-tree build
	block: call rq_qos_exit() after queue is frozen
	mm/gup_benchmark.c: prevent integer overflow in ioctl
	linux/bitmap.h: handle constant zero-size bitmaps correctly
	linux/bitmap.h: fix type of nbits in bitmap_shift_right()
	lib/bitmap.c: fix remaining space computation in bitmap_print_to_pagebuf
	hfsplus: fix BUG on bnode parent update
	hfs: fix BUG on bnode parent update
	hfsplus: prevent btree data loss on ENOSPC
	hfs: prevent btree data loss on ENOSPC
	hfsplus: fix return value of hfsplus_get_block()
	hfs: fix return value of hfs_get_block()
	hfsplus: update timestamps on truncate()
	hfs: update timestamp on truncate()
	fs/hfs/extent.c: fix array out of bounds read of array extent
	kernel/panic.c: do not append newline to the stack protector panic string
	mm/memory_hotplug: make add_memory() take the device_hotplug_lock
	mm/memory_hotplug: fix online/offline_pages called w.o. mem_hotplug_lock
	powerpc/powernv: hold device_hotplug_lock when calling device_online()
	igb: shorten maximum PHC timecounter update interval
	fm10k: ensure completer aborts are marked as non-fatal after a resume
	net: hns3: bugfix for buffer not free problem during resetting
	net: hns3: bugfix for reporting unknown vector0 interrupt repeatly problem
	net: hns3: bugfix for is_valid_csq_clean_head()
	net: hns3: bugfix for hclge_mdio_write and hclge_mdio_read
	ntb_netdev: fix sleep time mismatch
	ntb: intel: fix return value for ndev_vec_mask()
	irq/matrix: Fix memory overallocation
	nvme-pci: fix conflicting p2p resource adds
	arm64: makefile fix build of .i file in external module case
	tools/power turbosat: fix AMD APIC-id output
	mm: handle no memcg case in memcg_kmem_charge() properly
	ocfs2: without quota support, avoid calling quota recovery
	ocfs2: don't use iocb when EIOCBQUEUED returns
	ocfs2: don't put and assigning null to bh allocated outside
	ocfs2: fix clusters leak in ocfs2_defrag_extent()
	net: do not abort bulk send on BQL status
	sched/topology: Fix off by one bug
	sched/fair: Don't increase sd->balance_interval on newidle balance
	openvswitch: fix linking without CONFIG_NF_CONNTRACK_LABELS
	ARM: dts: imx6sx-sdb: Fix enet phy regulator
	clk: sunxi-ng: enable so-said LDOs for A64 SoC's pll-mipi clock
	soc: bcm: brcmstb: Fix re-entry point with a THUMB2_KERNEL
	audit: print empty EXECVE args
	sock_diag: fix autoloading of the raw_diag module
	net: bpfilter: fix iptables failure if bpfilter_umh is disabled
	nds32: Fix bug in bitfield.h
	media: ov13858: Check for possible null pointer
	btrfs: avoid link error with CONFIG_NO_AUTO_INLINE
	wil6210: fix debugfs memory access alignment
	wil6210: fix L2 RX status handling
	wil6210: fix RGF_CAF_ICR address for Talyn-MB
	wil6210: fix locking in wmi_call
	ath10k: snoc: fix unbalanced clock error handling
	wlcore: Fix the return value in case of error in 'wlcore_vendor_cmd_smart_config_start()'
	rtl8xxxu: Fix missing break in switch
	brcmsmac: never log "tid x is not agg'able" by default
	wireless: airo: potential buffer overflow in sprintf()
	rtlwifi: rtl8192de: Fix misleading REG_MCUFWDL information
	net: dsa: bcm_sf2: Turn on PHY to allow successful registration
	scsi: mpt3sas: Fix Sync cache command failure during driver unload
	scsi: mpt3sas: Don't modify EEDPTagMode field setting on SAS3.5 HBA devices
	scsi: mpt3sas: Fix driver modifying persistent data in Manufacturing page11
	scsi: megaraid_sas: Fix msleep granularity
	scsi: megaraid_sas: Fix goto labels in error handling
	scsi: lpfc: fcoe: Fix link down issue after 1000+ link bounces
	scsi: lpfc: Fix odd recovery in duplicate FLOGIs in point-to-point
	scsi: lpfc: Correct loss of fc4 type on remote port address change
	usb: typec: tcpm: charge current handling for sink during hard reset
	dlm: fix invalid free
	dlm: don't leak kernel pointer to userspace
	vrf: mark skb for multicast or link-local as enslaved to VRF
	clk: tegra20: Turn EMC clock gate into divider
	ACPICA: Use %d for signed int print formatting instead of %u
	net: bcmgenet: return correct value 'ret' from bcmgenet_power_down
	of: unittest: allow base devicetree to have symbol metadata
	of: unittest: initialize args before calling of_*parse_*()
	tools: bpftool: pass an argument to silence open_obj_pinned()
	cfg80211: Prevent regulatory restore during STA disconnect in concurrent interfaces
	pinctrl: qcom: spmi-gpio: fix gpio-hog related boot issues
	pinctrl: bcm2835: Use define directive for BCM2835_PINCONF_PARAM_PULL
	pinctrl: lpc18xx: Use define directive for PIN_CONFIG_GPIO_PIN_INT
	pinctrl: zynq: Use define directive for PIN_CONFIG_IO_STANDARD
	PCI: keystone: Use quirk to limit MRRS for K2G
	nvme-pci: fix surprise removal
	spi: omap2-mcspi: Fix DMA and FIFO event trigger size mismatch
	i2c: uniphier-f: fix timeout error after reading 8 bytes
	mm/memory_hotplug: Do not unlock when fails to take the device_hotplug_lock
	ipv6: Fix handling of LLA with VRF and sockets bound to VRF
	cfg80211: call disconnect_wk when AP stops
	mm/page_io.c: do not free shared swap slots
	Bluetooth: Fix invalid-free in bcsp_close()
	KVM: MMU: Do not treat ZONE_DEVICE pages as being reserved
	ath10k: Fix a NULL-ptr-deref bug in ath10k_usb_alloc_urb_from_pipe
	ath9k_hw: fix uninitialized variable data
	md/raid10: prevent access of uninitialized resync_pages offset
	mm/memory_hotplug: don't access uninitialized memmaps in shrink_zone_span()
	net: phy: dp83867: fix speed 10 in sgmii mode
	net: phy: dp83867: increase SGMII autoneg timer duration
	ocfs2: remove ocfs2_is_o2cb_active()
	ARM: 8904/1: skip nomap memblocks while finding the lowmem/highmem boundary
	ARC: perf: Accommodate big-endian CPU
	x86/insn: Fix awk regexp warnings
	x86/speculation: Fix incorrect MDS/TAA mitigation status
	x86/speculation: Fix redundant MDS mitigation message
	nbd: prevent memory leak
	y2038: futex: Move compat implementation into futex.c
	futex: Prevent robust futex exit race
	ALSA: usb-audio: Fix NULL dereference at parsing BADD
	nfc: port100: handle command failure cleanly
	media: vivid: Set vid_cap_streaming and vid_out_streaming to true
	media: vivid: Fix wrong locking that causes race conditions on streaming stop
	media: usbvision: Fix races among open, close, and disconnect
	cpufreq: Add NULL checks to show() and store() methods of cpufreq
	media: uvcvideo: Fix error path in control parsing failure
	media: b2c2-flexcop-usb: add sanity checking
	media: cxusb: detect cxusb_ctrl_msg error in query
	media: imon: invalid dereference in imon_touch_event
	virtio_ring: fix return code on DMA mapping fails
	USBIP: add config dependency for SGL_ALLOC
	usbip: tools: fix fd leakage in the function of read_attr_usbip_status
	usbip: Fix uninitialized symbol 'nents' in stub_recv_cmd_submit()
	usb-serial: cp201x: support Mark-10 digital force gauge
	USB: chaoskey: fix error case of a timeout
	appledisplay: fix error handling in the scheduled work
	USB: serial: mos7840: add USB ID to support Moxa UPort 2210
	USB: serial: mos7720: fix remote wakeup
	USB: serial: mos7840: fix remote wakeup
	USB: serial: option: add support for DW5821e with eSIM support
	USB: serial: option: add support for Foxconn T77W968 LTE modules
	staging: comedi: usbduxfast: usbduxfast_ai_cmdtest rounding error
	powerpc/64s: support nospectre_v2 cmdline option
	powerpc/book3s64: Fix link stack flush on context switch
	KVM: PPC: Book3S HV: Flush link stack on guest exit to host kernel
	PM / devfreq: Fix kernel oops on governor module load
	Linux 4.19.87

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Id8c8d4cd92227f8f46c48c05440e09da957fa687
This commit is contained in:
Greg Kroah-Hartman 2019-12-01 09:53:43 +01:00
commit 2700cf837e
323 changed files with 3055 additions and 1591 deletions

View file

@ -265,8 +265,11 @@ time with the option "mds=". The valid arguments for this option are:
============ =============================================================
Not specifying this option is equivalent to "mds=full".
Not specifying this option is equivalent to "mds=full". For processors
that are affected by both TAA (TSX Asynchronous Abort) and MDS,
specifying just "mds=off" without an accompanying "tsx_async_abort=off"
will have no effect as the same mitigation is used for both
vulnerabilities.
Mitigation selection guide
--------------------------

View file

@ -174,7 +174,10 @@ the option "tsx_async_abort=". The valid arguments for this option are:
CPU is not vulnerable to cross-thread TAA attacks.
============ =============================================================
Not specifying this option is equivalent to "tsx_async_abort=full".
Not specifying this option is equivalent to "tsx_async_abort=full". For
processors that are affected by both TAA and MDS, specifying just
"tsx_async_abort=off" without an accompanying "mds=off" will have no
effect as the same mitigation is used for both vulnerabilities.
The kernel command line also allows to control the TSX feature using the
parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used

View file

@ -2368,6 +2368,12 @@
SMT on vulnerable CPUs
off - Unconditionally disable MDS mitigation
On TAA-affected machines, mds=off can be prevented by
an active TAA mitigation as both vulnerabilities are
mitigated with the same mechanism so in order to disable
this mitigation, you need to specify tsx_async_abort=off
too.
Not specifying this option is equivalent to
mds=full.
@ -4792,6 +4798,11 @@
vulnerable to cross-thread TAA attacks.
off - Unconditionally disable TAA mitigation
On MDS-affected machines, tsx_async_abort=off can be
prevented by an active MDS mitigation as both vulnerabilities
are mitigated with the same mechanism so in order to disable
this mitigation, you need to specify mds=off too.
Not specifying this option is equivalent to
tsx_async_abort=full. On CPUs which are MDS affected
and deploy MDS mitigation, TAA mitigation is not

View file

@ -5,18 +5,20 @@ UniPhier SoCs have SCSSI which supports SPI single channel.
Required properties:
- compatible: should be "socionext,uniphier-scssi"
- reg: address and length of the spi master registers
- #address-cells: must be <1>, see spi-bus.txt
- #size-cells: must be <0>, see spi-bus.txt
- clocks: A phandle to the clock for the device.
- resets: A phandle to the reset control for the device.
- interrupts: a single interrupt specifier
- pinctrl-names: should be "default"
- pinctrl-0: pin control state for the default mode
- clocks: a phandle to the clock for the device
- resets: a phandle to the reset control for the device
Example:
spi0: spi@54006000 {
compatible = "socionext,uniphier-scssi";
reg = <0x54006000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
interrupts = <0 39 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0>;
clocks = <&peri_clk 11>;
resets = <&peri_rst 11>;
};

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 86
SUBLEVEL = 87
EXTRAVERSION =
NAME = "People's Front"

View file

@ -490,8 +490,8 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
/* loop thru all available h/w condition indexes */
for (j = 0; j < cc_bcr.c; j++) {
write_aux_reg(ARC_REG_CC_INDEX, j);
cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
cc_name.indiv.word0 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME0));
cc_name.indiv.word1 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME1));
/* See if it has been mapped to a perf event_id */
for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {

View file

@ -115,7 +115,9 @@
regulator-name = "enet_3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
gpio = <&gpio2 6 GPIO_ACTIVE_LOW>;
regulator-boot-on;
regulator-always-on;
};
reg_pcie_gpio: regulator-pcie-gpio {
@ -178,6 +180,7 @@
phy-supply = <&reg_enet_3v3>;
phy-mode = "rgmii";
phy-handle = <&ethphy1>;
phy-reset-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
status = "okay";
mdio {
@ -371,6 +374,8 @@
MX6SX_PAD_RGMII1_RD3__ENET1_RX_DATA_3 0x3081
MX6SX_PAD_RGMII1_RX_CTL__ENET1_RX_EN 0x3081
MX6SX_PAD_ENET2_RX_CLK__ENET2_REF_CLK_25M 0x91
/* phy reset */
MX6SX_PAD_ENET2_CRS__GPIO2_IO_7 0x10b0
>;
};

View file

@ -1195,6 +1195,9 @@ void __init adjust_lowmem_bounds(void)
phys_addr_t block_start = reg->base;
phys_addr_t block_end = reg->base + reg->size;
if (memblock_is_nomap(reg))
continue;
if (reg->base < vmalloc_limit) {
if (block_end > lowmem_limit)
/*

View file

@ -161,6 +161,7 @@ archclean:
$(Q)$(MAKE) $(clean)=$(boot)
$(Q)$(MAKE) $(clean)=$(boot)/dts
ifeq ($(KBUILD_EXTMOD),)
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
# In order to do that, we should use the archprepare target, but we can't since
# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
@ -170,6 +171,7 @@ archclean:
prepare: vdso_prepare
vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
endif
define archhelp
echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'

View file

@ -102,5 +102,5 @@ __init void process_uboot_commandline(char *commandp, int size)
}
parse_uboot_commandline(commandp, len);
commandp[size - 1] = 0;
commandp[len - 1] = 0;
}

View file

@ -692,8 +692,8 @@
#define PFM_CTL_offKU1 13 /* Enable user mode event counting for PFMC1 */
#define PFM_CTL_offKU2 14 /* Enable user mode event counting for PFMC2 */
#define PFM_CTL_offSEL0 15 /* The event selection for PFMC0 */
#define PFM_CTL_offSEL1 21 /* The event selection for PFMC1 */
#define PFM_CTL_offSEL2 27 /* The event selection for PFMC2 */
#define PFM_CTL_offSEL1 16 /* The event selection for PFMC1 */
#define PFM_CTL_offSEL2 22 /* The event selection for PFMC2 */
/* bit 28:31 reserved */
#define PFM_CTL_mskEN0 ( 0x01 << PFM_CTL_offEN0 )

View file

@ -32,8 +32,8 @@ else
endif
BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -Os -msoft-float -pipe \
-fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
-fno-strict-aliasing -Os -msoft-float -mno-altivec -mno-vsx \
-pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
-D$(compress-y)
ifdef CONFIG_PPC64_BOOT_WRAPPER

View file

@ -13,8 +13,6 @@
#include <libfdt.h>
#include "../include/asm/opal-api.h"
#ifdef CONFIG_PPC64_BOOT_WRAPPER
/* Global OPAL struct used by opal-call.S */
struct opal {
u64 base;
@ -101,9 +99,3 @@ int opal_console_init(void *devp, struct serial_console_data *scdp)
return 0;
}
#else
int opal_console_init(void *devp, struct serial_console_data *scdp)
{
return -1;
}
#endif /* __powerpc64__ */

View file

@ -146,8 +146,11 @@ void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
/* Patch sites */
extern s32 patch__call_flush_count_cache;
extern s32 patch__flush_count_cache_return;
extern s32 patch__flush_link_stack_return;
extern s32 patch__call_kvm_flush_link_stack;
extern s32 patch__memset_nocache, patch__memcpy_nocache;
extern long flush_count_cache;
extern long kvm_flush_link_stack;
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */

View file

@ -81,6 +81,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
// Software required to flush count cache on context switch
#define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull
// Software required to flush link stack on context switch
#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull
// Features enabled by default
#define SEC_FTR_DEFAULT \

View file

@ -281,6 +281,10 @@ static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn,
struct pci_driver *driver;
enum pci_ers_result new_result;
if (!edev->pdev) {
eeh_edev_info(edev, "no device");
return;
}
device_lock(&edev->pdev->dev);
if (eeh_edev_actionable(edev)) {
driver = eeh_pcid_get(edev->pdev);

View file

@ -379,7 +379,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
while (parent) {
if (!(parent->type & EEH_PE_INVALID))
break;
parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP);
parent->type &= ~EEH_PE_INVALID;
parent = parent->parent;
}

View file

@ -533,6 +533,7 @@ flush_count_cache:
/* Save LR into r9 */
mflr r9
// Flush the link stack
.rept 64
bl .+4
.endr
@ -542,6 +543,11 @@ flush_count_cache:
.balign 32
/* Restore LR */
1: mtlr r9
// If we're just flushing the link stack, return here
3: nop
patch_site 3b patch__flush_link_stack_return
li r9,0x7fff
mtctr r9

View file

@ -575,12 +575,11 @@ void flush_all_to_thread(struct task_struct *tsk)
if (tsk->thread.regs) {
preempt_disable();
BUG_ON(tsk != current);
save_all(tsk);
#ifdef CONFIG_SPE
if (tsk->thread.regs->msr & MSR_SPE)
tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
#endif
save_all(tsk);
preempt_enable();
}

View file

@ -24,11 +24,12 @@ enum count_cache_flush_type {
COUNT_CACHE_FLUSH_HW = 0x4,
};
static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
static bool link_stack_flush_enabled;
bool barrier_nospec_enabled;
static bool no_nospec;
static bool btb_flush_enabled;
#ifdef CONFIG_PPC_FSL_BOOK3E
#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
static bool no_spectrev2;
#endif
@ -106,7 +107,7 @@ static __init int barrier_nospec_debugfs_init(void)
device_initcall(barrier_nospec_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
#ifdef CONFIG_PPC_FSL_BOOK3E
#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
static int __init handle_nospectre_v2(char *p)
{
no_spectrev2 = true;
@ -114,6 +115,9 @@ static int __init handle_nospectre_v2(char *p)
return 0;
}
early_param("nospectre_v2", handle_nospectre_v2);
#endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_PPC_FSL_BOOK3E
void setup_spectre_v2(void)
{
if (no_spectrev2 || cpu_mitigations_off())
@ -201,11 +205,19 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
if (ccd)
seq_buf_printf(&s, "Indirect branch cache disabled");
if (link_stack_flush_enabled)
seq_buf_printf(&s, ", Software link stack flush");
} else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
seq_buf_printf(&s, "Mitigation: Software count cache flush");
if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
seq_buf_printf(&s, " (hardware accelerated)");
if (link_stack_flush_enabled)
seq_buf_printf(&s, ", Software link stack flush");
} else if (btb_flush_enabled) {
seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
} else {
@ -366,18 +378,49 @@ static __init int stf_barrier_debugfs_init(void)
device_initcall(stf_barrier_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
static void no_count_cache_flush(void)
{
count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
pr_info("count-cache-flush: software flush disabled.\n");
}
static void toggle_count_cache_flush(bool enable)
{
if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) &&
!security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK))
enable = false;
if (!enable) {
patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
pr_info("count-cache-flush: software flush disabled.\n");
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP);
#endif
pr_info("link-stack-flush: software flush disabled.\n");
link_stack_flush_enabled = false;
no_count_cache_flush();
return;
}
// This enables the branch from _switch to flush_count_cache
patch_branch_site(&patch__call_flush_count_cache,
(u64)&flush_count_cache, BRANCH_SET_LINK);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
// This enables the branch from guest_exit_cont to kvm_flush_link_stack
patch_branch_site(&patch__call_kvm_flush_link_stack,
(u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
#endif
pr_info("link-stack-flush: software flush enabled.\n");
link_stack_flush_enabled = true;
// If we just need to flush the link stack, patch an early return
if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR);
no_count_cache_flush();
return;
}
if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
pr_info("count-cache-flush: full software flush sequence enabled.\n");
@ -391,7 +434,26 @@ static void toggle_count_cache_flush(bool enable)
void setup_count_cache_flush(void)
{
toggle_count_cache_flush(true);
bool enable = true;
if (no_spectrev2 || cpu_mitigations_off()) {
if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
enable = false;
}
/*
* There's no firmware feature flag/hypervisor bit to tell us we need to
* flush the link stack on context switch. So we set it here if we see
* either of the Spectre v2 mitigations that aim to protect userspace.
*/
if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
toggle_count_cache_flush(enable);
}
#ifdef CONFIG_DEBUG_FS

View file

@ -18,6 +18,7 @@
*/
#include <asm/ppc_asm.h>
#include <asm/code-patching-asm.h>
#include <asm/kvm_asm.h>
#include <asm/reg.h>
#include <asm/mmu.h>
@ -1559,6 +1560,10 @@ mc_cont:
1:
#endif /* CONFIG_KVM_XICS */
/* Possibly flush the link stack here. */
1: nop
patch_site 1b patch__call_kvm_flush_link_stack
/* For hash guest, read the guest SLB and save it away */
ld r5, VCPU_KVM(r9)
lbz r0, KVM_RADIX(r5)
@ -2107,6 +2112,29 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mtlr r0
blr
.balign 32
.global kvm_flush_link_stack
kvm_flush_link_stack:
/* Save LR into r0 */
mflr r0
/* Flush the link stack. On Power8 it's up to 32 entries in size. */
.rept 32
bl .+4
.endr
/* And on Power9 it's up to 64. */
BEGIN_FTR_SECTION
.rept 32
bl .+4
.endr
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
/* Restore LR */
mtlr r0
blr
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Softpatch interrupt for transactional memory emulation cases

View file

@ -294,15 +294,15 @@ static int __meminit create_physical_mapping(unsigned long start,
}
if (split_text_mapping && (mapping_size == PUD_SIZE) &&
(addr <= __pa_symbol(__init_begin)) &&
(addr + mapping_size) >= __pa_symbol(_stext)) {
(addr < __pa_symbol(__init_begin)) &&
(addr + mapping_size) > __pa_symbol(__init_begin)) {
max_mapping_size = PMD_SIZE;
goto retry;
}
if (split_text_mapping && (mapping_size == PMD_SIZE) &&
(addr <= __pa_symbol(__init_begin)) &&
(addr + mapping_size) >= __pa_symbol(_stext)) {
(addr < __pa_symbol(__init_begin)) &&
(addr + mapping_size) > __pa_symbol(__init_begin)) {
mapping_size = PAGE_SIZE;
psize = mmu_virtual_psize;
}

View file

@ -1072,7 +1072,6 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
goto local;
}
_tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
goto local;
} else {
local:
_tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);

View file

@ -244,9 +244,11 @@ static int memtrace_online(void)
* we need to online the memory ourselves.
*/
if (!memhp_auto_online) {
lock_device_hotplug();
walk_memory_range(PFN_DOWN(ent->start),
PFN_UP(ent->start + ent->size - 1),
NULL, online_mem_block);
unlock_device_hotplug();
}
/*

View file

@ -664,7 +664,7 @@ static int update_flash_db(void)
db_set_64(db, &os_area_db_id_rtc_diff, saved_params.rtc_diff);
count = os_area_flash_write(db, sizeof(struct os_area_db), pos);
if (count < sizeof(struct os_area_db)) {
if (count < 0 || count < sizeof(struct os_area_db)) {
pr_debug("%s: os_area_flash_write failed %zd\n", __func__,
count);
error = count < 0 ? count : -EIO;

View file

@ -676,7 +676,7 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
nid = memory_add_physaddr_to_nid(lmb->base_addr);
/* Add the memory */
rc = add_memory(nid, lmb->base_addr, block_sz);
rc = __add_memory(nid, lmb->base_addr, block_sz);
if (rc) {
invalidate_lmb_associativity_index(lmb);
return rc;

View file

@ -48,6 +48,7 @@
#include <asm/kexec.h>
#include <asm/fadump.h>
#include <asm/asm-prototypes.h>
#include <asm/debugfs.h>
#include "pseries.h"
@ -1032,3 +1033,56 @@ static int __init reserve_vrma_context_id(void)
return 0;
}
machine_device_initcall(pseries, reserve_vrma_context_id);
#ifdef CONFIG_DEBUG_FS
/* debugfs file interface for vpa data */
static ssize_t vpa_file_read(struct file *filp, char __user *buf, size_t len,
loff_t *pos)
{
int cpu = (long)filp->private_data;
struct lppaca *lppaca = &lppaca_of(cpu);
return simple_read_from_buffer(buf, len, pos, lppaca,
sizeof(struct lppaca));
}
static const struct file_operations vpa_fops = {
.open = simple_open,
.read = vpa_file_read,
.llseek = default_llseek,
};
static int __init vpa_debugfs_init(void)
{
char name[16];
long i;
static struct dentry *vpa_dir;
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return 0;
vpa_dir = debugfs_create_dir("vpa", powerpc_debugfs_root);
if (!vpa_dir) {
pr_warn("%s: can't create vpa root dir\n", __func__);
return -ENOMEM;
}
/* set up the per-cpu vpa file*/
for_each_possible_cpu(i) {
struct dentry *d;
sprintf(name, "cpu-%ld", i);
d = debugfs_create_file(name, 0400, vpa_dir, (void *)i,
&vpa_fops);
if (!d) {
pr_warn("%s: can't create per-cpu vpa file\n",
__func__);
return -ENOMEM;
}
}
return 0;
}
machine_arch_initcall(pseries, vpa_debugfs_init);
#endif /* CONFIG_DEBUG_FS */

View file

@ -13,6 +13,12 @@ UBSAN_SANITIZE := n
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
ifdef CONFIG_CC_IS_CLANG
# clang stores addresses on the stack causing the frame size to blow
# out. See https://github.com/ClangBuiltLinux/linux/issues/252
KBUILD_CFLAGS += -Wframe-larger-than=4096
endif
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
obj-y += xmon.o nonstdio.o spr_access.o

View file

@ -42,7 +42,7 @@ static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size,
/* Page-align mappings */
offset = addr & (~PAGE_MASK);
addr &= PAGE_MASK;
addr -= offset;
size = PAGE_ALIGN(size + offset);
area = get_vm_area_caller(size, VM_IOREMAP, caller);

View file

@ -2045,14 +2045,17 @@ static int __init init_cpum_sampling_pmu(void)
}
sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
if (!sfdbg)
if (!sfdbg) {
pr_err("Registering for s390dbf failed\n");
return -ENOMEM;
}
debug_register_view(sfdbg, &debug_sprintf_view);
err = register_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
if (err) {
pr_cpumsf_err(RS_INIT_FAILURE_ALRT);
debug_unregister(sfdbg);
goto out;
}
@ -2061,6 +2064,7 @@ static int __init init_cpum_sampling_pmu(void)
pr_cpumsf_err(RS_INIT_FAILURE_PERF);
unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
debug_unregister(sfdbg);
goto out;
}

View file

@ -52,7 +52,12 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
return val;
}
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define xchg(ptr,x) \
({ __typeof__(*(ptr)) __ret; \
__ret = (__typeof__(*(ptr))) \
__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
__ret; \
})
void __xchg_called_with_bad_pointer(void);

View file

@ -21,6 +21,7 @@
*/
#define HAS_DMA
#ifdef CONFIG_PARPORT_PC_FIFO
static DEFINE_SPINLOCK(dma_spin_lock);
#define claim_dma_lock() \
@ -31,6 +32,7 @@ static DEFINE_SPINLOCK(dma_spin_lock);
#define release_dma_lock(__flags) \
spin_unlock_irqrestore(&dma_spin_lock, __flags);
#endif
static struct sparc_ebus_info {
struct ebus_dma_info info;

View file

@ -261,7 +261,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data)
if (err == 0) {
spin_unlock(&line->lock);
return IRQ_NONE;
} else if (err < 0) {
} else if ((err < 0) && (err != -EAGAIN)) {
line->head = line->buffer;
line->tail = line->buffer;
}
@ -284,7 +284,7 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
if (err)
return err;
if (output)
err = um_request_irq(driver->write_irq, fd, IRQ_NONE,
err = um_request_irq(driver->write_irq, fd, IRQ_WRITE,
line_write_interrupt, IRQF_SHARED,
driver->write_irq_name, data);
return err;

View file

@ -236,24 +236,52 @@ static inline int regs_within_kernel_stack(struct pt_regs *regs,
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
}
/**
* regs_get_kernel_stack_nth_addr() - get the address of the Nth entry on stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns the address of the @n th entry of the
* kernel stack which is specified by @regs. If the @n th entry is NOT in
* the kernel stack, this returns NULL.
*/
static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return addr;
else
return NULL;
}
/* To avoid include hell, we can't include uaccess.h */
extern long probe_kernel_read(void *dst, const void *src, size_t size);
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specified by @regs. If the @n th entry is NOT in the kernel stack,
* is specified by @regs. If the @n th entry is NOT in the kernel stack
* this returns 0.
*/
static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return *addr;
else
return 0;
unsigned long *addr;
unsigned long val;
long ret;
addr = regs_get_kernel_stack_nth_addr(regs, n);
if (addr) {
ret = probe_kernel_read(&val, addr, sizeof(val));
if (!ret)
return val;
}
return 0;
}
#define arch_has_single_step() (1)

View file

@ -39,6 +39,7 @@ static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
static void __init mds_select_mitigation(void);
static void __init mds_print_mitigation(void);
static void __init taa_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
@ -108,6 +109,12 @@ void __init check_bugs(void)
mds_select_mitigation();
taa_select_mitigation();
/*
* As MDS and TAA mitigations are inter-related, print MDS
* mitigation until after TAA mitigation selection is done.
*/
mds_print_mitigation();
arch_smt_update();
#ifdef CONFIG_X86_32
@ -245,6 +252,12 @@ static void __init mds_select_mitigation(void)
(mds_nosmt || cpu_mitigations_auto_nosmt()))
cpu_smt_disable(false);
}
}
static void __init mds_print_mitigation(void)
{
if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
return;
pr_info("%s\n", mds_strings[mds_mitigation]);
}
@ -304,8 +317,12 @@ static void __init taa_select_mitigation(void)
return;
}
/* TAA mitigation is turned off on the cmdline (tsx_async_abort=off) */
if (taa_mitigation == TAA_MITIGATION_OFF)
/*
* TAA mitigation via VERW is turned off if both
* tsx_async_abort=off and mds=off are specified.
*/
if (taa_mitigation == TAA_MITIGATION_OFF &&
mds_mitigation == MDS_MITIGATION_OFF)
goto out;
if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
@ -339,6 +356,15 @@ static void __init taa_select_mitigation(void)
if (taa_nosmt || cpu_mitigations_auto_nosmt())
cpu_smt_disable(false);
/*
* Update MDS mitigation, if necessary, as the mds_user_clear is
* now enabled for TAA mitigation.
*/
if (mds_mitigation == MDS_MITIGATION_OFF &&
boot_cpu_has_bug(X86_BUG_MDS)) {
mds_mitigation = MDS_MITIGATION_FULL;
mds_select_mitigation();
}
out:
pr_info("%s\n", taa_strings[taa_mitigation]);
}

View file

@ -610,6 +610,13 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
cancel_delayed_work(&d->cqm_limbo);
}
/*
* rdt_domain "d" is going to be freed below, so clear
* its pointer from pseudo_lock_region struct.
*/
if (d->plr)
d->plr->d = NULL;
kfree(d->ctrl_val);
kfree(d->mbps_val);
kfree(d->rmid_busy_llc);

View file

@ -408,8 +408,16 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
for_each_alloc_enabled_rdt_resource(r)
seq_printf(s, "%s:uninitialized\n", r->name);
} else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
seq_printf(s, "%s:%d=%x\n", rdtgrp->plr->r->name,
rdtgrp->plr->d->id, rdtgrp->plr->cbm);
if (!rdtgrp->plr->d) {
rdt_last_cmd_clear();
rdt_last_cmd_puts("Cache domain offline\n");
ret = -ENODEV;
} else {
seq_printf(s, "%s:%d=%x\n",
rdtgrp->plr->r->name,
rdtgrp->plr->d->id,
rdtgrp->plr->cbm);
}
} else {
closid = rdtgrp->closid;
for_each_alloc_enabled_rdt_resource(r) {

View file

@ -1116,6 +1116,11 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
goto out;
}
if (!plr->d) {
ret = -ENODEV;
goto out;
}
plr->thread_done = 0;
cpu = cpumask_first(&plr->d->cpu_mask);
if (!cpu_online(cpu)) {
@ -1429,6 +1434,11 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
plr = rdtgrp->plr;
if (!plr->d) {
mutex_unlock(&rdtgroup_mutex);
return -ENODEV;
}
/*
* Task is required to run with affinity to the cpus associated
* with the pseudo-locked region. If this is not the case the task

View file

@ -268,17 +268,27 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
struct seq_file *s, void *v)
{
struct rdtgroup *rdtgrp;
struct cpumask *mask;
int ret = 0;
rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (rdtgrp) {
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
cpumask_pr_args(&rdtgrp->plr->d->cpu_mask));
else
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
if (!rdtgrp->plr->d) {
rdt_last_cmd_clear();
rdt_last_cmd_puts("Cache domain offline\n");
ret = -ENODEV;
} else {
mask = &rdtgrp->plr->d->cpu_mask;
seq_printf(s, is_cpu_list(of) ?
"%*pbl\n" : "%*pb\n",
cpumask_pr_args(mask));
}
} else {
seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
cpumask_pr_args(&rdtgrp->cpu_mask));
}
} else {
ret = -ENOENT;
}
@ -1286,6 +1296,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
struct rdt_resource *r;
struct rdt_domain *d;
unsigned int size;
int ret = 0;
bool sep;
u32 ctrl;
@ -1296,11 +1307,18 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
}
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
seq_printf(s, "%*s:", max_name_width, rdtgrp->plr->r->name);
size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
rdtgrp->plr->d,
rdtgrp->plr->cbm);
seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
if (!rdtgrp->plr->d) {
rdt_last_cmd_clear();
rdt_last_cmd_puts("Cache domain offline\n");
ret = -ENODEV;
} else {
seq_printf(s, "%*s:", max_name_width,
rdtgrp->plr->r->name);
size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
rdtgrp->plr->d,
rdtgrp->plr->cbm);
seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
}
goto out;
}
@ -1330,7 +1348,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
out:
rdtgroup_kn_unlock(of->kn);
return 0;
return ret;
}
/* rdtgroup information files for one cache resource. */

View file

@ -3261,7 +3261,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
* here.
*/
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
level == PT_PAGE_TABLE_LEVEL &&
!kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
PageTransCompoundMap(pfn_to_page(pfn)) &&
!mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
unsigned long mask;
@ -5709,9 +5709,9 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
* the guest, and the guest page table is using 4K page size
* mapping if the indirect sp has level = 1.
*/
if (sp->role.direct &&
!kvm_is_reserved_pfn(pfn) &&
PageTransCompoundMap(pfn_to_page(pfn))) {
if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
!kvm_is_zone_device_pfn(pfn) &&
PageTransCompoundMap(pfn_to_page(pfn))) {
drop_spte(kvm, sptep);
need_tlb_flush = 1;
goto restart;

View file

@ -2079,7 +2079,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
return -1;
}
static inline void __invvpid(int ext, u16 vpid, gva_t gva)
static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
{
struct {
u64 vpid : 16;
@ -2094,7 +2094,7 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva)
BUG_ON(error);
}
static inline void __invept(int ext, u64 eptp, gpa_t gpa)
static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
{
struct {
u64 eptp, gpa;
@ -11013,6 +11013,10 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
vmx->loaded_vmcs = vmcs;
vmx_vcpu_load(vcpu, cpu);
put_cpu();
vm_entry_controls_reset_shadow(vmx);
vm_exit_controls_reset_shadow(vmx);
vmx_segment_cache_clear(vmx);
}
/*
@ -12690,6 +12694,9 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
if (from_vmentry && check_vmentry_postreqs(vcpu, vmcs12, exit_qual))
return EXIT_REASON_INVALID_STATE;
enter_guest_mode(vcpu);
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
@ -12699,7 +12706,6 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
vmx_segment_cache_clear(vmx);
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vcpu->arch.tsc_offset += vmcs12->tsc_offset;
@ -12833,13 +12839,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
*/
skip_emulated_instruction(vcpu);
ret = check_vmentry_postreqs(vcpu, vmcs12, &exit_qual);
if (ret) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, exit_qual);
return 1;
}
/*
* We're finally done with prerequisite checking, and can start with
* the nested entry.
@ -13530,9 +13529,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
}
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
vm_entry_controls_reset_shadow(vmx);
vm_exit_controls_reset_shadow(vmx);
vmx_segment_cache_clear(vmx);
/* Update any VMCS fields that might have changed while L2 ran */
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);

View file

@ -69,7 +69,7 @@ BEGIN {
lprefix1_expr = "\\((66|!F3)\\)"
lprefix2_expr = "\\(F3\\)"
lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
lprefix3_expr = "\\((F2|!F3|66&F2)\\)"
lprefix_expr = "\\((66|F2|F3)\\)"
max_lprefix = 4
@ -257,7 +257,7 @@ function convert_operands(count,opnd, i,j,imm,mod)
return add_flags(imm, mod)
}
/^[0-9a-f]+\:/ {
/^[0-9a-f]+:/ {
if (NR == 1)
next
# get index

View file

@ -786,6 +786,9 @@ void blk_cleanup_queue(struct request_queue *q)
* prevent that q->request_fn() gets invoked after draining finished.
*/
blk_freeze_queue(q);
rq_qos_exit(q);
spin_lock_irq(lock);
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);

View file

@ -672,6 +672,31 @@ static void blk_account_io_merge(struct request *req)
part_stat_unlock();
}
}
/*
* Two cases of handling DISCARD merge:
* If max_discard_segments > 1, the driver takes every bio
* as a range and send them to controller together. The ranges
* needn't to be contiguous.
* Otherwise, the bios/requests will be handled as same as
* others which should be contiguous.
*/
static inline bool blk_discard_mergable(struct request *req)
{
if (req_op(req) == REQ_OP_DISCARD &&
queue_max_discard_segments(req->q) > 1)
return true;
return false;
}
enum elv_merge blk_try_req_merge(struct request *req, struct request *next)
{
if (blk_discard_mergable(req))
return ELEVATOR_DISCARD_MERGE;
else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
return ELEVATOR_BACK_MERGE;
return ELEVATOR_NO_MERGE;
}
/*
* For non-mq, this has to be called with the request spinlock acquired.
@ -689,12 +714,6 @@ static struct request *attempt_merge(struct request_queue *q,
if (req_op(req) != req_op(next))
return NULL;
/*
* not contiguous
*/
if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
return NULL;
if (rq_data_dir(req) != rq_data_dir(next)
|| req->rq_disk != next->rq_disk
|| req_no_special_merge(next))
@ -723,11 +742,19 @@ static struct request *attempt_merge(struct request_queue *q,
* counts here. Handle DISCARDs separately, as they
* have separate settings.
*/
if (req_op(req) == REQ_OP_DISCARD) {
switch (blk_try_req_merge(req, next)) {
case ELEVATOR_DISCARD_MERGE:
if (!req_attempt_discard_merge(q, req, next))
return NULL;
} else if (!ll_merge_requests_fn(q, req, next))
break;
case ELEVATOR_BACK_MERGE:
if (!ll_merge_requests_fn(q, req, next))
return NULL;
break;
default:
return NULL;
}
/*
* If failfast settings disagree or any of the two is already
@ -855,8 +882,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
{
if (req_op(rq) == REQ_OP_DISCARD &&
queue_max_discard_segments(rq->q) > 1) {
if (blk_discard_mergable(rq)) {
return ELEVATOR_DISCARD_MERGE;
} else if (blk_rq_pos(rq) + blk_rq_sectors(rq) ==
bio->bi_iter.bi_sector) {

View file

@ -997,8 +997,6 @@ void blk_unregister_queue(struct gendisk *disk)
kobject_del(&q->kobj);
blk_trace_remove_sysfs(disk_to_dev(disk));
rq_qos_exit(q);
mutex_lock(&q->sysfs_lock);
if (q->request_fn || (q->mq_ops && q->elevator))
elv_unregister_queue(q);

View file

@ -1400,8 +1400,8 @@ static int test_comp(struct crypto_comp *tfm,
int ilen;
unsigned int dlen = COMP_BUF_SIZE;
memset(output, 0, sizeof(COMP_BUF_SIZE));
memset(decomp_output, 0, sizeof(COMP_BUF_SIZE));
memset(output, 0, COMP_BUF_SIZE);
memset(decomp_output, 0, COMP_BUF_SIZE);
ilen = ctemplate[i].inlen;
ret = crypto_comp_compress(tfm, ctemplate[i].input,
@ -1445,7 +1445,7 @@ static int test_comp(struct crypto_comp *tfm,
int ilen;
unsigned int dlen = COMP_BUF_SIZE;
memset(decomp_output, 0, sizeof(COMP_BUF_SIZE));
memset(decomp_output, 0, COMP_BUF_SIZE);
ilen = dtemplate[i].inlen;
ret = crypto_comp_decompress(tfm, dtemplate[i].input,

View file

@ -228,7 +228,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
if (node < 0)
node = memory_add_physaddr_to_nid(info->start_addr);
result = add_memory(node, info->start_addr, info->length);
result = __add_memory(node, info->start_addr, info->length);
/*
* If the memory block has been used by the kernel, add_memory()

View file

@ -1550,6 +1550,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
*/
static const struct acpi_device_id i2c_multi_instantiate_ids[] = {
{"BSG1160", },
{"INT33FE", },
{}
};

View file

@ -126,7 +126,7 @@ static unsigned long dummy[2] = {0,0};
#define zin_n(r) inl(zatm_dev->base+r*4)
#define zin(r) inl(zatm_dev->base+uPD98401_##r*4)
#define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4)
#define zwait while (zin(CMR) & uPD98401_BUSY)
#define zwait() do {} while (zin(CMR) & uPD98401_BUSY)
/* RX0, RX1, TX0, TX1 */
static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 };
@ -140,7 +140,7 @@ static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */
static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
{
zwait;
zwait();
zout(value,CER);
zout(uPD98401_IND_ACC | uPD98401_IA_BALL |
(uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
@ -149,10 +149,10 @@ static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr)
{
zwait;
zwait();
zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW |
(uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
zwait;
zwait();
return zin(CER);
}
@ -241,7 +241,7 @@ static void refill_pool(struct atm_dev *dev,int pool)
}
if (first) {
spin_lock_irqsave(&zatm_dev->lock, flags);
zwait;
zwait();
zout(virt_to_bus(first),CER);
zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count,
CMR);
@ -508,9 +508,9 @@ static int open_rx_first(struct atm_vcc *vcc)
}
if (zatm_vcc->pool < 0) return -EMSGSIZE;
spin_lock_irqsave(&zatm_dev->lock, flags);
zwait;
zwait();
zout(uPD98401_OPEN_CHAN,CMR);
zwait;
zwait();
DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
spin_unlock_irqrestore(&zatm_dev->lock, flags);
@ -571,21 +571,21 @@ static void close_rx(struct atm_vcc *vcc)
pos = vcc->vci >> 1;
shift = (1-(vcc->vci & 1)) << 4;
zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos);
zwait;
zwait();
zout(uPD98401_NOP,CMR);
zwait;
zwait();
zout(uPD98401_NOP,CMR);
spin_unlock_irqrestore(&zatm_dev->lock, flags);
}
spin_lock_irqsave(&zatm_dev->lock, flags);
zwait;
zwait();
zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
uPD98401_CHAN_ADDR_SHIFT),CMR);
zwait;
zwait();
udelay(10); /* why oh why ... ? */
zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
uPD98401_CHAN_ADDR_SHIFT),CMR);
zwait;
zwait();
if (!(zin(CMR) & uPD98401_CHAN_ADDR))
printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel "
"%d\n",vcc->dev->number,zatm_vcc->rx_chan);
@ -699,7 +699,7 @@ printk("NONONONOO!!!!\n");
skb_queue_tail(&zatm_vcc->tx_queue,skb);
DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
uPD98401_TXVC_QRP));
zwait;
zwait();
zout(uPD98401_TX_READY | (zatm_vcc->tx_chan <<
uPD98401_CHAN_ADDR_SHIFT),CMR);
spin_unlock_irqrestore(&zatm_dev->lock, flags);
@ -891,12 +891,12 @@ static void close_tx(struct atm_vcc *vcc)
}
spin_lock_irqsave(&zatm_dev->lock, flags);
#if 0
zwait;
zwait();
zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
#endif
zwait;
zwait();
zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
zwait;
zwait();
if (!(zin(CMR) & uPD98401_CHAN_ADDR))
printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel "
"%d\n",vcc->dev->number,chan);
@ -926,9 +926,9 @@ static int open_tx_first(struct atm_vcc *vcc)
zatm_vcc->tx_chan = 0;
if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
spin_lock_irqsave(&zatm_dev->lock, flags);
zwait;
zwait();
zout(uPD98401_OPEN_CHAN,CMR);
zwait;
zwait();
DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
spin_unlock_irqrestore(&zatm_dev->lock, flags);
@ -1557,7 +1557,7 @@ static void zatm_phy_put(struct atm_dev *dev,unsigned char value,
struct zatm_dev *zatm_dev;
zatm_dev = ZATM_DEV(dev);
zwait;
zwait();
zout(value,CER);
zout(uPD98401_IND_ACC | uPD98401_IA_B0 |
(uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
@ -1569,10 +1569,10 @@ static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr)
struct zatm_dev *zatm_dev;
zatm_dev = ZATM_DEV(dev);
zwait;
zwait();
zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW |
(uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
zwait;
zwait();
return zin(CER) & 0xff;
}

View file

@ -228,7 +228,6 @@ static bool pages_correctly_probed(unsigned long start_pfn)
/*
* MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
* OK to have direct references to sparsemem variables in here.
* Must already be protected by mem_hotplug_begin().
*/
static int
memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
@ -294,7 +293,6 @@ static int memory_subsys_online(struct device *dev)
if (mem->online_type < 0)
mem->online_type = MMOP_ONLINE_KEEP;
/* Already under protection of mem_hotplug_begin() */
ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
/* clear online_type */
@ -341,19 +339,11 @@ store_mem_state(struct device *dev,
goto err;
}
/*
* Memory hotplug needs to hold mem_hotplug_begin() for probe to find
* the correct memory block to online before doing device_online(dev),
* which will take dev->mutex. Take the lock early to prevent an
* inversion, memory_subsys_online() callbacks will be implemented by
* assuming it's already protected.
*/
mem_hotplug_begin();
switch (online_type) {
case MMOP_ONLINE_KERNEL:
case MMOP_ONLINE_MOVABLE:
case MMOP_ONLINE_KEEP:
/* mem->online_type is protected by device_hotplug_lock */
mem->online_type = online_type;
ret = device_online(&mem->dev);
break;
@ -364,7 +354,6 @@ store_mem_state(struct device *dev,
ret = -EINVAL; /* should never happen */
}
mem_hotplug_done();
err:
unlock_device_hotplug();
@ -519,15 +508,20 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
return -EINVAL;
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
nid = memory_add_physaddr_to_nid(phys_addr);
ret = add_memory(nid, phys_addr,
MIN_MEMORY_BLOCK_SIZE * sections_per_block);
ret = __add_memory(nid, phys_addr,
MIN_MEMORY_BLOCK_SIZE * sections_per_block);
if (ret)
goto out;
ret = count;
out:
unlock_device_hotplug();
return ret;
}

View file

@ -467,6 +467,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
return -EAGAIN;
}
/* Default to shallowest state. */
if (!genpd->gov)
genpd->state_idx = 0;
if (genpd->power_off) {
int ret;
@ -1686,6 +1690,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
ret = genpd_set_default_power_state(genpd);
if (ret)
return ret;
} else if (!gov) {
pr_warn("%s : no governor for states\n", genpd->name);
}
device_initialize(&genpd->dev);

View file

@ -1701,11 +1701,41 @@ static const struct block_device_operations floppy_fops = {
.check_events = amiga_check_events,
};
static struct gendisk *fd_alloc_disk(int drive)
{
struct gendisk *disk;
disk = alloc_disk(1);
if (!disk)
goto out;
disk->queue = blk_init_queue(do_fd_request, &amiflop_lock);
if (IS_ERR(disk->queue)) {
disk->queue = NULL;
goto out_put_disk;
}
unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL);
if (!unit[drive].trackbuf)
goto out_cleanup_queue;
return disk;
out_cleanup_queue:
blk_cleanup_queue(disk->queue);
disk->queue = NULL;
out_put_disk:
put_disk(disk);
out:
unit[drive].type->code = FD_NODRIVE;
return NULL;
}
static int __init fd_probe_drives(void)
{
int drive,drives,nomem;
printk(KERN_INFO "FD: probing units\nfound ");
pr_info("FD: probing units\nfound");
drives=0;
nomem=0;
for(drive=0;drive<FD_MAX_UNITS;drive++) {
@ -1713,27 +1743,17 @@ static int __init fd_probe_drives(void)
fd_probe(drive);
if (unit[drive].type->code == FD_NODRIVE)
continue;
disk = alloc_disk(1);
disk = fd_alloc_disk(drive);
if (!disk) {
unit[drive].type->code = FD_NODRIVE;
pr_cont(" no mem for fd%d", drive);
nomem = 1;
continue;
}
unit[drive].gendisk = disk;
disk->queue = blk_init_queue(do_fd_request, &amiflop_lock);
if (!disk->queue) {
unit[drive].type->code = FD_NODRIVE;
continue;
}
drives++;
if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) {
printk("no mem for ");
unit[drive].type = &drive_types[num_dr_types - 1]; /* FD_NODRIVE */
drives--;
nomem = 1;
}
printk("fd%d ",drive);
pr_cont(" fd%d",drive);
disk->major = FLOPPY_MAJOR;
disk->first_minor = drive;
disk->fops = &floppy_fops;
@ -1744,11 +1764,11 @@ static int __init fd_probe_drives(void)
}
if ((drives > 0) || (nomem == 0)) {
if (drives == 0)
printk("no drives");
printk("\n");
pr_cont(" no drives");
pr_cont("\n");
return drives;
}
printk("\n");
pr_cont("\n");
return -ENOMEM;
}
@ -1831,30 +1851,6 @@ static int __init amiga_floppy_probe(struct platform_device *pdev)
return ret;
}
#if 0 /* not safe to unload */
static int __exit amiga_floppy_remove(struct platform_device *pdev)
{
int i;
for( i = 0; i < FD_MAX_UNITS; i++) {
if (unit[i].type->code != FD_NODRIVE) {
struct request_queue *q = unit[i].gendisk->queue;
del_gendisk(unit[i].gendisk);
put_disk(unit[i].gendisk);
kfree(unit[i].trackbuf);
if (q)
blk_cleanup_queue(q);
}
}
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
free_irq(IRQ_AMIGA_CIAA_TB, NULL);
free_irq(IRQ_AMIGA_DSKBLK, NULL);
custom.dmacon = DMAF_DISK; /* disable DMA */
amiga_chip_free(raw_buf);
unregister_blkdev(FLOPPY_MAJOR, "fd");
}
#endif
static struct platform_driver amiga_floppy_driver = {
.driver = {
.name = "amiga-floppy",

View file

@ -945,6 +945,7 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
if (sock->ops->shutdown == sock_no_shutdown) {
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
*err = -EINVAL;
sockfd_put(sock);
return NULL;
}
@ -983,14 +984,15 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
sockfd_put(sock);
return -ENOMEM;
}
config->socks = socks;
nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
if (!nsock) {
sockfd_put(sock);
return -ENOMEM;
}
config->socks = socks;
nsock->fallback_index = -1;
nsock->dead = false;
mutex_init(&nsock->tx_lock);

View file

@ -1416,7 +1416,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
case SKD_CHECK_STATUS_BUSY_IMMINENT:
skd_log_skreq(skdev, skreq, "retry(busy)");
blk_requeue_request(skdev->queue, req);
blk_mq_requeue_request(req, true);
dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
skdev->timer_countdown = SKD_TIMER_MINUTES(20);
@ -1426,7 +1426,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
case SKD_CHECK_STATUS_REQUEUE_REQUEST:
if ((unsigned long) ++req->special < SKD_MAX_RETRIES) {
skd_log_skreq(skdev, skreq, "retry");
blk_requeue_request(skdev->queue, req);
blk_mq_requeue_request(req, true);
break;
}
/* fall through */

View file

@ -606,6 +606,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
if (*ptr == 0xc0) {
BT_ERR("Short BCSP packet");
kfree_skb(bcsp->rx_skb);
bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_START;
bcsp->rx_count = 0;
} else
@ -621,6 +622,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) {
BT_ERR("Error in BCSP hdr checksum");
kfree_skb(bcsp->rx_skb);
bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
bcsp->rx_count = 0;
continue;
@ -645,6 +647,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
bscp_get_crc(bcsp));
kfree_skb(bcsp->rx_skb);
bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
bcsp->rx_count = 0;
continue;

View file

@ -411,10 +411,10 @@ static int cdrom_get_disc_info(struct cdrom_device_info *cdi,
* hack to have the capability flags defined const, while we can still
* change it here without gcc complaining at every line.
*/
#define ENSURE(call, bits) \
do { \
if (cdo->call == NULL) \
*change_capability &= ~(bits); \
#define ENSURE(cdo, call, bits) \
do { \
if (cdo->call == NULL) \
WARN_ON_ONCE((cdo)->capability & (bits)); \
} while (0)
/*
@ -590,7 +590,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
{
static char banner_printed;
const struct cdrom_device_ops *cdo = cdi->ops;
int *change_capability = (int *)&cdo->capability; /* hack */
cd_dbg(CD_OPEN, "entering register_cdrom\n");
@ -602,16 +601,16 @@ int register_cdrom(struct cdrom_device_info *cdi)
cdrom_sysctl_register();
}
ENSURE(drive_status, CDC_DRIVE_STATUS);
ENSURE(cdo, drive_status, CDC_DRIVE_STATUS);
if (cdo->check_events == NULL && cdo->media_changed == NULL)
*change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC);
ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
ENSURE(lock_door, CDC_LOCK);
ENSURE(select_speed, CDC_SELECT_SPEED);
ENSURE(get_last_session, CDC_MULTI_SESSION);
ENSURE(get_mcn, CDC_MCN);
ENSURE(reset, CDC_RESET);
ENSURE(generic_packet, CDC_GENERIC_PACKET);
WARN_ON_ONCE(cdo->capability & (CDC_MEDIA_CHANGED | CDC_SELECT_DISC));
ENSURE(cdo, tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
ENSURE(cdo, lock_door, CDC_LOCK);
ENSURE(cdo, select_speed, CDC_SELECT_SPEED);
ENSURE(cdo, get_last_session, CDC_MULTI_SESSION);
ENSURE(cdo, get_mcn, CDC_MCN);
ENSURE(cdo, reset, CDC_RESET);
ENSURE(cdo, generic_packet, CDC_GENERIC_PACKET);
cdi->mc_flags = 0;
cdi->options = CDO_USE_FFLAGS;

View file

@ -1349,24 +1349,24 @@ static void set_console_size(struct port *port, u16 rows, u16 cols)
port->cons.ws.ws_col = cols;
}
static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
static int fill_queue(struct virtqueue *vq, spinlock_t *lock)
{
struct port_buffer *buf;
unsigned int nr_added_bufs;
int nr_added_bufs;
int ret;
nr_added_bufs = 0;
do {
buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
if (!buf)
break;
return -ENOMEM;
spin_lock_irq(lock);
ret = add_inbuf(vq, buf);
if (ret < 0) {
spin_unlock_irq(lock);
free_buf(buf, true);
break;
return ret;
}
nr_added_bufs++;
spin_unlock_irq(lock);
@ -1386,7 +1386,6 @@ static int add_port(struct ports_device *portdev, u32 id)
char debugfs_name[16];
struct port *port;
dev_t devt;
unsigned int nr_added_bufs;
int err;
port = kmalloc(sizeof(*port), GFP_KERNEL);
@ -1445,11 +1444,13 @@ static int add_port(struct ports_device *portdev, u32 id)
spin_lock_init(&port->outvq_lock);
init_waitqueue_head(&port->waitqueue);
/* Fill the in_vq with buffers so the host can send us data. */
nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
if (!nr_added_bufs) {
/* We can safely ignore ENOSPC because it means
* the queue already has buffers. Buffers are removed
* only by virtcons_remove(), not by unplug_port()
*/
err = fill_queue(port->in_vq, &port->inbuf_lock);
if (err < 0 && err != -ENOSPC) {
dev_err(port->dev, "Error allocating inbufs\n");
err = -ENOMEM;
goto free_device;
}
@ -2083,14 +2084,11 @@ static int virtcons_probe(struct virtio_device *vdev)
INIT_WORK(&portdev->control_work, &control_work_handler);
if (multiport) {
unsigned int nr_added_bufs;
spin_lock_init(&portdev->c_ivq_lock);
spin_lock_init(&portdev->c_ovq_lock);
nr_added_bufs = fill_queue(portdev->c_ivq,
&portdev->c_ivq_lock);
if (!nr_added_bufs) {
err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
if (err < 0) {
dev_err(&vdev->dev,
"Error allocating buffers for control queue\n");
/*
@ -2101,7 +2099,7 @@ static int virtcons_probe(struct virtio_device *vdev)
VIRTIO_CONSOLE_DEVICE_READY, 0);
/* Device was functional: we need full cleanup. */
virtcons_remove(vdev);
return -ENOMEM;
return err;
}
} else {
/*

View file

@ -509,7 +509,7 @@ static void __init of_sama5d2_clk_audio_pll_pad_setup(struct device_node *np)
static void __init of_sama5d2_clk_audio_pll_pmc_setup(struct device_node *np)
{
struct clk_audio_pad *apmc_ck;
struct clk_audio_pmc *apmc_ck;
struct clk_init_data init = {};
apmc_ck = kzalloc(sizeof(*apmc_ck), GFP_KERNEL);

View file

@ -227,8 +227,8 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
/* The gate clocks has mux parent. */
{MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH1, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH1, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
{MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock},
{MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock},

View file

@ -158,7 +158,12 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
#define SUN50I_A64_PLL_MIPI_REG 0x040
static struct ccu_nkm pll_mipi_clk = {
.enable = BIT(31),
/*
* The bit 23 and 22 are called "LDO{1,2}_EN" on the SoC's
* user manual, and by experiments the PLL doesn't work without
* these bits toggled.
*/
.enable = BIT(31) | BIT(23) | BIT(22),
.lock = BIT(28),
.n = _SUNXI_CCU_MULT(8, 4),
.k = _SUNXI_CCU_MULT_MIN(4, 2, 2),

View file

@ -578,7 +578,6 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = {
[tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true },
[tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true },
[tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true },
[tegra_clk_emc] = { .dt_id = TEGRA20_CLK_EMC, .present = true },
};
static unsigned long tegra20_clk_measure_input_freq(void)
@ -799,6 +798,31 @@ static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
TEGRA_INIT_DATA_NODIV("disp2", mux_pllpdc_clkm, CLK_SOURCE_DISP2, 30, 2, 26, 0, TEGRA20_CLK_DISP2),
};
static void __init tegra20_emc_clk_init(void)
{
struct clk *clk;
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
ARRAY_SIZE(mux_pllmcp_clkm),
CLK_SET_RATE_NO_REPARENT,
clk_base + CLK_SOURCE_EMC,
30, 2, 0, &emc_lock);
clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
&emc_lock);
clks[TEGRA20_CLK_MC] = clk;
/*
* Note that 'emc_mux' source and 'emc' rate shouldn't be changed at
* the same time due to a HW bug, this won't happen because we're
* defining 'emc_mux' and 'emc' as distinct clocks.
*/
clk = tegra_clk_register_divider("emc", "emc_mux",
clk_base + CLK_SOURCE_EMC, CLK_IS_CRITICAL,
TEGRA_DIVIDER_INT, 0, 8, 1, &emc_lock);
clks[TEGRA20_CLK_EMC] = clk;
}
static void __init tegra20_periph_clk_init(void)
{
struct tegra_periph_init_data *data;
@ -812,15 +836,7 @@ static void __init tegra20_periph_clk_init(void)
clks[TEGRA20_CLK_AC97] = clk;
/* emc */
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
ARRAY_SIZE(mux_pllmcp_clkm),
CLK_SET_RATE_NO_REPARENT,
clk_base + CLK_SOURCE_EMC,
30, 2, 0, &emc_lock);
clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
&emc_lock);
clks[TEGRA20_CLK_MC] = clk;
tegra20_emc_clk_init();
/* dsi */
clk = tegra_clk_register_periph_gate("dsi", "pll_d", 0, clk_base, 0,

View file

@ -2603,7 +2603,7 @@ static struct tegra210_domain_mbist_war tegra210_pg_mbist_war[] = {
[TEGRA_POWERGATE_MPE] = {
.handle_lvl2_ovr = tegra210_generic_mbist_war,
.lvl2_offset = LVL2_CLK_GATE_OVRE,
.lvl2_mask = BIT(2),
.lvl2_mask = BIT(29),
},
[TEGRA_POWERGATE_SOR] = {
.handle_lvl2_ovr = tegra210_generic_mbist_war,
@ -2654,14 +2654,14 @@ static struct tegra210_domain_mbist_war tegra210_pg_mbist_war[] = {
.num_clks = ARRAY_SIZE(nvdec_slcg_clkids),
.clk_init_data = nvdec_slcg_clkids,
.handle_lvl2_ovr = tegra210_generic_mbist_war,
.lvl2_offset = LVL2_CLK_GATE_OVRC,
.lvl2_offset = LVL2_CLK_GATE_OVRE,
.lvl2_mask = BIT(9) | BIT(31),
},
[TEGRA_POWERGATE_NVJPG] = {
.num_clks = ARRAY_SIZE(nvjpg_slcg_clkids),
.clk_init_data = nvjpg_slcg_clkids,
.handle_lvl2_ovr = tegra210_generic_mbist_war,
.lvl2_offset = LVL2_CLK_GATE_OVRC,
.lvl2_offset = LVL2_CLK_GATE_OVRE,
.lvl2_mask = BIT(9) | BIT(31),
},
[TEGRA_POWERGATE_AUD] = {

View file

@ -917,6 +917,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
struct freq_attr *fattr = to_attr(attr);
ssize_t ret;
if (!fattr->show)
return -EIO;
down_read(&policy->rwsem);
ret = fattr->show(policy, buf);
up_read(&policy->rwsem);
@ -931,6 +934,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
if (!fattr->store)
return -EIO;
/*
* cpus_read_trylock() is used here to work around a circular lock
* dependency problem with respect to the cpufreq_register_driver().

View file

@ -449,8 +449,7 @@ static inline void set_flow_mode(struct cc_hw_desc *pdesc,
* @pdesc: pointer HW descriptor struct
* @mode: Any one of the modes defined in [CC7x-DESC]
*/
static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
enum drv_cipher_mode mode)
static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode)
{
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
}
@ -461,8 +460,7 @@ static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
* @pdesc: pointer HW descriptor struct
* @mode: Any one of the modes defined in [CC7x-DESC]
*/
static inline void set_cipher_config0(struct cc_hw_desc *pdesc,
enum drv_crypto_direction mode)
static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode)
{
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
}

View file

@ -257,7 +257,7 @@ static struct devfreq_governor *try_then_request_governor(const char *name)
/* Restore previous state before return */
mutex_lock(&devfreq_list_lock);
if (err)
return ERR_PTR(err);
return (err < 0) ? ERR_PTR(err) : ERR_PTR(-EINVAL);
governor = find_devfreq_governor(name);
}

View file

@ -1884,7 +1884,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
default:
dev_err(&l2c->pdev->dev, "Unsupported device: %04x\n",
l2c->pdev->device);
return IRQ_NONE;
goto err_free;
}
while (CIRC_CNT(l2c->ring_head, l2c->ring_tail,
@ -1906,7 +1906,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
l2c->ring_tail++;
}
return IRQ_HANDLED;
ret = IRQ_HANDLED;
err_free:
kfree(other);

View file

@ -480,11 +480,10 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
if (count < sizeof(u32))
return -EINVAL;
param.type = *(u32 *)buf;
count -= sizeof(u32);
buf += sizeof(u32);
/* The remaining buffer is the data payload */
if (count > gsmi_dev.data_buf->length)
if ((count - sizeof(u32)) > gsmi_dev.data_buf->length)
return -EINVAL;
param.data_len = count - sizeof(u32);
@ -504,7 +503,7 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
spin_unlock_irqrestore(&gsmi_dev.lock, flags);
return rc;
return (rc == 0) ? count : rc;
}

View file

@ -163,13 +163,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
case 0:
val = MAX77620_CNFG_GPIO_DBNC_None;
break;
case 1000 ... 8000:
case 1 ... 8000:
val = MAX77620_CNFG_GPIO_DBNC_8ms;
break;
case 9000 ... 16000:
case 8001 ... 16000:
val = MAX77620_CNFG_GPIO_DBNC_16ms;
break;
case 17000 ... 32000:
case 16001 ... 32000:
val = MAX77620_CNFG_GPIO_DBNC_32ms;
break;
default:

View file

@ -3472,18 +3472,31 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
{
struct amdgpu_device *adev = hwmgr->adev;
int i;
u32 tmp = 0;
if (!query)
return -EINVAL;
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
*query = tmp;
/*
* PPSMC_MSG_GetCurrPkgPwr is not supported on:
* - Hawaii
* - Bonaire
* - Fiji
* - Tonga
*/
if ((adev->asic_type != CHIP_HAWAII) &&
(adev->asic_type != CHIP_BONAIRE) &&
(adev->asic_type != CHIP_FIJI) &&
(adev->asic_type != CHIP_TONGA)) {
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
*query = tmp;
if (tmp != 0)
return 0;
if (tmp != 0)
return 0;
}
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,

View file

@ -691,8 +691,28 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
i915_gem_gtt_finish_pages(obj, pages);
for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty)
if (obj->mm.dirty && trylock_page(page)) {
/*
* As this may not be anonymous memory (e.g. shmem)
* but exist on a real mapping, we have to lock
* the page in order to dirty it -- holding
* the page reference is not sufficient to
* prevent the inode from being truncated.
* Play safe and take the lock.
*
* However...!
*
* The mmu-notifier can be invalidated for a
* migrate_page, that is alreadying holding the lock
* on the page. Such a try_to_unmap() will result
* in us calling put_pages() and so recursively try
* to lock the page. We avoid that deadlock with
* a trylock_page() and in exchange we risk missing
* some page dirtying.
*/
set_page_dirty(page);
unlock_page(page);
}
mark_page_accessed(page);
put_page(page);

View file

@ -827,8 +827,8 @@ create_event_attributes(struct drm_i915_private *i915)
const char *name;
const char *unit;
} events[] = {
__event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"),
__event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"),
__event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"),
__event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
__event(I915_PMU_INTERRUPTS, "interrupts", NULL),
__event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
};

View file

@ -98,6 +98,7 @@ struct uniphier_fi2c_priv {
unsigned int flags;
unsigned int busy_cnt;
unsigned int clk_cycle;
spinlock_t lock; /* IRQ synchronization */
};
static void uniphier_fi2c_fill_txfifo(struct uniphier_fi2c_priv *priv,
@ -142,9 +143,10 @@ static void uniphier_fi2c_set_irqs(struct uniphier_fi2c_priv *priv)
writel(priv->enabled_irqs, priv->membase + UNIPHIER_FI2C_IE);
}
static void uniphier_fi2c_clear_irqs(struct uniphier_fi2c_priv *priv)
static void uniphier_fi2c_clear_irqs(struct uniphier_fi2c_priv *priv,
u32 mask)
{
writel(-1, priv->membase + UNIPHIER_FI2C_IC);
writel(mask, priv->membase + UNIPHIER_FI2C_IC);
}
static void uniphier_fi2c_stop(struct uniphier_fi2c_priv *priv)
@ -162,7 +164,10 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id)
struct uniphier_fi2c_priv *priv = dev_id;
u32 irq_status;
spin_lock(&priv->lock);
irq_status = readl(priv->membase + UNIPHIER_FI2C_INT);
irq_status &= priv->enabled_irqs;
dev_dbg(&priv->adap.dev,
"interrupt: enabled_irqs=%04x, irq_status=%04x\n",
@ -207,7 +212,13 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id)
if (irq_status & (UNIPHIER_FI2C_INT_RF | UNIPHIER_FI2C_INT_RB)) {
uniphier_fi2c_drain_rxfifo(priv);
if (!priv->len)
/*
* If the number of bytes to read is multiple of the FIFO size
* (msg->len == 8, 16, 24, ...), the INT_RF bit is set a little
* earlier than INT_RB. We wait for INT_RB to confirm the
* completion of the current message.
*/
if (!priv->len && (irq_status & UNIPHIER_FI2C_INT_RB))
goto data_done;
if (unlikely(priv->flags & UNIPHIER_FI2C_MANUAL_NACK)) {
@ -230,6 +241,8 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id)
goto handled;
}
spin_unlock(&priv->lock);
return IRQ_NONE;
data_done:
@ -244,7 +257,14 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id)
}
handled:
uniphier_fi2c_clear_irqs(priv);
/*
* This controller makes a pause while any bit of the IRQ status is
* asserted. Clear the asserted bit to kick the controller just before
* exiting the handler.
*/
uniphier_fi2c_clear_irqs(priv, irq_status);
spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
@ -252,6 +272,8 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id)
static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr)
{
priv->enabled_irqs |= UNIPHIER_FI2C_INT_TE;
uniphier_fi2c_set_irqs(priv);
/* do not use TX byte counter */
writel(0, priv->membase + UNIPHIER_FI2C_TBC);
/* set slave address */
@ -284,6 +306,8 @@ static void uniphier_fi2c_rx_init(struct uniphier_fi2c_priv *priv, u16 addr)
priv->enabled_irqs |= UNIPHIER_FI2C_INT_RF;
}
uniphier_fi2c_set_irqs(priv);
/* set slave address with RD bit */
writel(UNIPHIER_FI2C_DTTX_CMD | UNIPHIER_FI2C_DTTX_RD | addr << 1,
priv->membase + UNIPHIER_FI2C_DTTX);
@ -307,14 +331,16 @@ static void uniphier_fi2c_recover(struct uniphier_fi2c_priv *priv)
}
static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap,
struct i2c_msg *msg, bool stop)
struct i2c_msg *msg, bool repeat,
bool stop)
{
struct uniphier_fi2c_priv *priv = i2c_get_adapdata(adap);
bool is_read = msg->flags & I2C_M_RD;
unsigned long time_left;
unsigned long time_left, flags;
dev_dbg(&adap->dev, "%s: addr=0x%02x, len=%d, stop=%d\n",
is_read ? "receive" : "transmit", msg->addr, msg->len, stop);
dev_dbg(&adap->dev, "%s: addr=0x%02x, len=%d, repeat=%d, stop=%d\n",
is_read ? "receive" : "transmit", msg->addr, msg->len,
repeat, stop);
priv->len = msg->len;
priv->buf = msg->buf;
@ -326,22 +352,36 @@ static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap,
priv->flags |= UNIPHIER_FI2C_STOP;
reinit_completion(&priv->comp);
uniphier_fi2c_clear_irqs(priv);
uniphier_fi2c_clear_irqs(priv, U32_MAX);
writel(UNIPHIER_FI2C_RST_TBRST | UNIPHIER_FI2C_RST_RBRST,
priv->membase + UNIPHIER_FI2C_RST); /* reset TX/RX FIFO */
spin_lock_irqsave(&priv->lock, flags);
if (is_read)
uniphier_fi2c_rx_init(priv, msg->addr);
else
uniphier_fi2c_tx_init(priv, msg->addr);
uniphier_fi2c_set_irqs(priv);
dev_dbg(&adap->dev, "start condition\n");
writel(UNIPHIER_FI2C_CR_MST | UNIPHIER_FI2C_CR_STA,
priv->membase + UNIPHIER_FI2C_CR);
/*
* For a repeated START condition, writing a slave address to the FIFO
* kicks the controller. So, the UNIPHIER_FI2C_CR register should be
* written only for a non-repeated START condition.
*/
if (!repeat)
writel(UNIPHIER_FI2C_CR_MST | UNIPHIER_FI2C_CR_STA,
priv->membase + UNIPHIER_FI2C_CR);
spin_unlock_irqrestore(&priv->lock, flags);
time_left = wait_for_completion_timeout(&priv->comp, adap->timeout);
spin_lock_irqsave(&priv->lock, flags);
priv->enabled_irqs = 0;
uniphier_fi2c_set_irqs(priv);
spin_unlock_irqrestore(&priv->lock, flags);
if (!time_left) {
dev_err(&adap->dev, "transaction timeout.\n");
uniphier_fi2c_recover(priv);
@ -394,6 +434,7 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
struct i2c_msg *msg, *emsg = msgs + num;
bool repeat = false;
int ret;
ret = uniphier_fi2c_check_bus_busy(adap);
@ -404,9 +445,11 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
/* Emit STOP if it is the last message or I2C_M_STOP is set. */
bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
ret = uniphier_fi2c_master_xfer_one(adap, msg, repeat, stop);
if (ret)
return ret;
repeat = !stop;
}
return num;
@ -546,6 +589,7 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
priv->clk_cycle = clk_rate / bus_speed;
init_completion(&priv->comp);
spin_lock_init(&priv->lock);
priv->adap.owner = THIS_MODULE;
priv->adap.algo = &uniphier_fi2c_algo;
priv->adap.dev.parent = dev;

View file

@ -120,6 +120,8 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
#define BNXT_RE_FLAG_QOS_WORK_REG 5
#define BNXT_RE_FLAG_RESOURCES_ALLOCATED 7
#define BNXT_RE_FLAG_RESOURCES_INITIALIZED 8
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
unsigned int version, major, minor;

View file

@ -864,10 +864,8 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
{
int i;
if (rdev->nq[0].hwq.max_elements) {
for (i = 1; i < rdev->num_msix; i++)
bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
}
for (i = 1; i < rdev->num_msix; i++)
bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
if (rdev->qplib_res.rcfw)
bnxt_qplib_cleanup_res(&rdev->qplib_res);
@ -876,6 +874,7 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
{
int rc = 0, i;
int num_vec_enabled = 0;
bnxt_qplib_init_res(&rdev->qplib_res);
@ -891,9 +890,13 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
"Failed to enable NQ with rc = 0x%x", rc);
goto fail;
}
num_vec_enabled++;
}
return 0;
fail:
for (i = num_vec_enabled; i >= 0; i--)
bnxt_qplib_disable_nq(&rdev->nq[i]);
return rc;
}
@ -925,6 +928,7 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
{
int rc = 0, i;
int num_vec_created = 0;
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
@ -951,7 +955,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
if (rc) {
dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x",
i, rc);
goto dealloc_dpi;
goto free_nq;
}
rc = bnxt_re_net_ring_alloc
(rdev, rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr,
@ -964,14 +968,17 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
dev_err(rdev_to_dev(rdev),
"Failed to allocate NQ fw id with rc = 0x%x",
rc);
bnxt_qplib_free_nq(&rdev->nq[i]);
goto free_nq;
}
num_vec_created++;
}
return 0;
free_nq:
for (i = 0; i < rdev->num_msix - 1; i++)
for (i = num_vec_created; i >= 0; i--) {
bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
bnxt_qplib_free_nq(&rdev->nq[i]);
dealloc_dpi:
}
bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
&rdev->qplib_res.dpi_tbl,
&rdev->dpi_privileged);
@ -989,12 +996,17 @@ static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
struct ib_event ib_event;
ib_event.device = ibdev;
if (qp)
if (qp) {
ib_event.element.qp = qp;
else
ib_event.event = event;
if (qp->event_handler)
qp->event_handler(&ib_event, qp->qp_context);
} else {
ib_event.element.port_num = port_num;
ib_event.event = event;
ib_dispatch_event(&ib_event);
ib_event.event = event;
ib_dispatch_event(&ib_event);
}
}
#define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02
@ -1201,8 +1213,11 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
cancel_delayed_work(&rdev->worker);
bnxt_re_cleanup_res(rdev);
bnxt_re_free_res(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
&rdev->flags))
bnxt_re_cleanup_res(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags))
bnxt_re_free_res(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
@ -1332,12 +1347,15 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
pr_err("Failed to allocate resources: %#x\n", rc);
goto fail;
}
set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags);
rc = bnxt_re_init_res(rdev);
if (rc) {
pr_err("Failed to initialize resources: %#x\n", rc);
goto fail;
}
set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
if (!rdev->is_virtfn) {
rc = bnxt_re_setup_qos(rdev);
if (rc)

View file

@ -614,13 +614,8 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
bnxt_qplib_rcfw_stop_irq(rcfw, true);
if (rcfw->cmdq_bar_reg_iomem)
iounmap(rcfw->cmdq_bar_reg_iomem);
rcfw->cmdq_bar_reg_iomem = NULL;
if (rcfw->creq_bar_reg_iomem)
iounmap(rcfw->creq_bar_reg_iomem);
rcfw->creq_bar_reg_iomem = NULL;
iounmap(rcfw->cmdq_bar_reg_iomem);
iounmap(rcfw->creq_bar_reg_iomem);
indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size);
if (indx != rcfw->bmap_size)
@ -629,6 +624,8 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
kfree(rcfw->cmdq_bitmap);
rcfw->bmap_size = 0;
rcfw->cmdq_bar_reg_iomem = NULL;
rcfw->creq_bar_reg_iomem = NULL;
rcfw->aeq_handler = NULL;
rcfw->vector = 0;
}
@ -714,6 +711,8 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
dev_err(&rcfw->pdev->dev,
"QPLIB: CREQ BAR region %d mapping failed",
rcfw->creq_bar_reg);
iounmap(rcfw->cmdq_bar_reg_iomem);
rcfw->cmdq_bar_reg_iomem = NULL;
return -ENOMEM;
}
rcfw->creq_qp_event_processed = 0;

View file

@ -1180,8 +1180,7 @@ static int
ctrl_teimanager(struct manager *mgr, void *arg)
{
/* currently we only have one option */
int *val = (int *)arg;
int ret = 0;
unsigned int *val = (unsigned int *)arg;
switch (val[0]) {
case IMCLEAR_L2:
@ -1197,9 +1196,9 @@ ctrl_teimanager(struct manager *mgr, void *arg)
test_and_clear_bit(OPTION_L1_HOLD, &mgr->options);
break;
default:
ret = -EINVAL;
return -EINVAL;
}
return ret;
return 0;
}
/* This function does create a L2 for fixed TEI in NT Mode */

View file

@ -22,14 +22,6 @@
#define VERSION "1.0"
#define DEBUG
#ifdef DEBUG
#define DBG(args...) printk(args)
#else
#define DBG(args...) do { } while(0)
#endif
/* If the cache is older than 800ms we'll refetch it */
#define MAX_AGE msecs_to_jiffies(800)
@ -106,13 +98,10 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id,
buf[i+2] = data[3];
buf[i+3] = data[2];
}
#ifdef DEBUG
DBG(KERN_DEBUG "sat %d partition %x:", sat_id, id);
for (i = 0; i < len; ++i)
DBG(" %x", buf[i]);
DBG("\n");
#endif
printk(KERN_DEBUG "sat %d partition %x:", sat_id, id);
print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET,
16, 1, buf, len, false);
if (size)
*size = len;
return (struct smu_sdbp_header *) buf;
@ -132,13 +121,13 @@ static int wf_sat_read_cache(struct wf_sat *sat)
if (err < 0)
return err;
sat->last_read = jiffies;
#ifdef LOTSA_DEBUG
{
int i;
DBG(KERN_DEBUG "wf_sat_get: data is");
for (i = 0; i < 16; ++i)
DBG(" %.2x", sat->cache[i]);
DBG("\n");
printk(KERN_DEBUG "wf_sat_get: data is");
print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET,
16, 1, sat->cache, 16, false);
}
#endif
return 0;

View file

@ -2475,7 +2475,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
}
/* Enable bitmap creation for RAID levels != 0 */
mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096);
mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096);
mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
if (!test_and_clear_bit(FirstUse, &rdev->flags)) {

View file

@ -229,7 +229,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
out_free_pages:
while (--j >= 0)
resync_free_pages(&rps[j * 2]);
resync_free_pages(&rps[j]);
j = 0;
out_free_bio:

View file

@ -1612,7 +1612,8 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
OV13858_NUM_OF_LINK_FREQS - 1,
0,
link_freq_menu_items);
ov13858->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
if (ov13858->link_freq)
ov13858->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]);
pixel_rate_min = link_freq_to_pixel_rate(link_freq_menu_items[1]);
@ -1635,7 +1636,8 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
ov13858->hblank = v4l2_ctrl_new_std(
ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_HBLANK,
hblank, hblank, 1, hblank);
ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
if (ov13858->hblank)
ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
exposure_max = mode->vts_def - 8;
ov13858->exposure = v4l2_ctrl_new_std(

View file

@ -765,7 +765,11 @@ static int vivid_thread_vid_cap(void *data)
if (kthread_should_stop())
break;
mutex_lock(&dev->mutex);
if (!mutex_trylock(&dev->mutex)) {
schedule_timeout_uninterruptible(1);
continue;
}
cur_jiffies = jiffies;
if (dev->cap_seq_resync) {
dev->jiffies_vid_cap = cur_jiffies;
@ -918,8 +922,6 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
/* shutdown control thread */
vivid_grab_controls(dev, false);
mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_vid_cap);
dev->kthread_vid_cap = NULL;
mutex_lock(&dev->mutex);
}

View file

@ -135,7 +135,11 @@ static int vivid_thread_vid_out(void *data)
if (kthread_should_stop())
break;
mutex_lock(&dev->mutex);
if (!mutex_trylock(&dev->mutex)) {
schedule_timeout_uninterruptible(1);
continue;
}
cur_jiffies = jiffies;
if (dev->out_seq_resync) {
dev->jiffies_vid_out = cur_jiffies;
@ -289,8 +293,6 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
/* shutdown control thread */
vivid_grab_controls(dev, false);
mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_vid_out);
dev->kthread_vid_out = NULL;
mutex_lock(&dev->mutex);
}

View file

@ -137,7 +137,11 @@ static int vivid_thread_sdr_cap(void *data)
if (kthread_should_stop())
break;
mutex_lock(&dev->mutex);
if (!mutex_trylock(&dev->mutex)) {
schedule_timeout_uninterruptible(1);
continue;
}
cur_jiffies = jiffies;
if (dev->sdr_cap_seq_resync) {
dev->jiffies_sdr_cap = cur_jiffies;
@ -297,10 +301,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
}
/* shutdown control thread */
mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_sdr_cap);
dev->kthread_sdr_cap = NULL;
mutex_lock(&dev->mutex);
}
const struct vb2_ops vivid_sdr_cap_qops = {

View file

@ -222,9 +222,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
if (vb2_is_streaming(&dev->vb_vid_out_q))
dev->can_loop_video = vivid_vid_can_loop(dev);
if (dev->kthread_vid_cap)
return 0;
dev->vid_cap_seq_count = 0;
dprintk(dev, 1, "%s\n", __func__);
for (i = 0; i < VIDEO_MAX_FRAME; i++)

View file

@ -146,9 +146,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
if (vb2_is_streaming(&dev->vb_vid_cap_q))
dev->can_loop_video = vivid_vid_can_loop(dev);
if (dev->kthread_vid_out)
return 0;
dev->vid_out_seq_count = 0;
dprintk(dev, 1, "%s\n", __func__);
if (dev->start_streaming_error) {

View file

@ -1607,8 +1607,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
spin_unlock_irqrestore(&ictx->kc_lock, flags);
/* send touchscreen events through input subsystem if touchpad data */
if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 &&
buf[7] == 0x86) {
if (ictx->touch && len == 8 && buf[7] == 0x86) {
imon_touch_event(ictx, buf);
return;

View file

@ -537,6 +537,9 @@ static int flexcop_usb_probe(struct usb_interface *intf,
struct flexcop_device *fc = NULL;
int ret;
if (intf->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) {
err("out of memory\n");
return -ENOMEM;

View file

@ -457,7 +457,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d)
{
u8 ircode[4];
cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4);
if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0)
return 0;
if (ircode[2] || ircode[3])
rc_keydown(d->rc_dev, RC_PROTO_NEC,

View file

@ -327,6 +327,10 @@ static int usbvision_v4l2_open(struct file *file)
if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS;
if (usbvision->remove_pending) {
err_code = -ENODEV;
goto unlock;
}
if (usbvision->user) {
err_code = -EBUSY;
} else {
@ -390,6 +394,7 @@ static int usbvision_v4l2_open(struct file *file)
static int usbvision_v4l2_close(struct file *file)
{
struct usb_usbvision *usbvision = video_drvdata(file);
int r;
PDEBUG(DBG_IO, "close");
@ -404,9 +409,10 @@ static int usbvision_v4l2_close(struct file *file)
usbvision_scratch_free(usbvision);
usbvision->user--;
r = usbvision->remove_pending;
mutex_unlock(&usbvision->v4l2_lock);
if (usbvision->remove_pending) {
if (r) {
printk(KERN_INFO "%s: Final disconnect\n", __func__);
usbvision_release(usbvision);
return 0;
@ -1090,6 +1096,11 @@ static int usbvision_radio_open(struct file *file)
if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS;
if (usbvision->remove_pending) {
err_code = -ENODEV;
goto out;
}
err_code = v4l2_fh_open(file);
if (err_code)
goto out;
@ -1122,6 +1133,7 @@ static int usbvision_radio_open(struct file *file)
static int usbvision_radio_close(struct file *file)
{
struct usb_usbvision *usbvision = video_drvdata(file);
int r;
PDEBUG(DBG_IO, "");
@ -1134,9 +1146,10 @@ static int usbvision_radio_close(struct file *file)
usbvision_audio_off(usbvision);
usbvision->radio = 0;
usbvision->user--;
r = usbvision->remove_pending;
mutex_unlock(&usbvision->v4l2_lock);
if (usbvision->remove_pending) {
if (r) {
printk(KERN_INFO "%s: Final disconnect\n", __func__);
v4l2_fh_release(file);
usbvision_release(usbvision);
@ -1562,6 +1575,7 @@ static int usbvision_probe(struct usb_interface *intf,
static void usbvision_disconnect(struct usb_interface *intf)
{
struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf));
int u;
PDEBUG(DBG_PROBE, "");
@ -1578,13 +1592,14 @@ static void usbvision_disconnect(struct usb_interface *intf)
v4l2_device_disconnect(&usbvision->v4l2_dev);
usbvision_i2c_unregister(usbvision);
usbvision->remove_pending = 1; /* Now all ISO data will be ignored */
u = usbvision->user;
usb_put_dev(usbvision->dev);
usbvision->dev = NULL; /* USB device is no more */
mutex_unlock(&usbvision->v4l2_lock);
if (usbvision->user) {
if (u) {
printk(KERN_INFO "%s: In use, disconnect pending\n",
__func__);
wake_up_interruptible(&usbvision->wait_frame);

View file

@ -2124,6 +2124,20 @@ static int uvc_probe(struct usb_interface *intf,
sizeof(dev->name) - len);
}
/* Initialize the media device. */
#ifdef CONFIG_MEDIA_CONTROLLER
dev->mdev.dev = &intf->dev;
strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
if (udev->serial)
strscpy(dev->mdev.serial, udev->serial,
sizeof(dev->mdev.serial));
usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
media_device_init(&dev->mdev);
dev->vdev.mdev = &dev->mdev;
#endif
/* Parse the Video Class control descriptor. */
if (uvc_parse_control(dev) < 0) {
uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC "
@ -2144,19 +2158,7 @@ static int uvc_probe(struct usb_interface *intf,
"linux-uvc-devel mailing list.\n");
}
/* Initialize the media device and register the V4L2 device. */
#ifdef CONFIG_MEDIA_CONTROLLER
dev->mdev.dev = &intf->dev;
strlcpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
if (udev->serial)
strlcpy(dev->mdev.serial, udev->serial,
sizeof(dev->mdev.serial));
strcpy(dev->mdev.bus_info, udev->devpath);
dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
media_device_init(&dev->mdev);
dev->vdev.mdev = &dev->mdev;
#endif
/* Register the V4L2 device. */
if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
goto error;

View file

@ -52,8 +52,10 @@ int arizona_clk32k_enable(struct arizona *arizona)
if (ret != 0)
goto err_ref;
ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]);
if (ret != 0)
goto err_pm;
if (ret != 0) {
pm_runtime_put_sync(arizona->dev);
goto err_ref;
}
break;
case ARIZONA_32KZ_MCLK2:
ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK2]);
@ -67,8 +69,6 @@ int arizona_clk32k_enable(struct arizona *arizona)
ARIZONA_CLK_32K_ENA);
}
err_pm:
pm_runtime_put_sync(arizona->dev);
err_ref:
if (ret != 0)
arizona->clk32k_ref--;

View file

@ -31,8 +31,8 @@
/* Interrupt Status Registers */
#define BXTWC_IRQLVL1 0x4E02
#define BXTWC_PWRBTNIRQ 0x4E03
#define BXTWC_PWRBTNIRQ 0x4E03
#define BXTWC_THRM0IRQ 0x4E04
#define BXTWC_THRM1IRQ 0x4E05
#define BXTWC_THRM2IRQ 0x4E06
@ -47,10 +47,9 @@
/* Interrupt MASK Registers */
#define BXTWC_MIRQLVL1 0x4E0E
#define BXTWC_MPWRTNIRQ 0x4E0F
#define BXTWC_MIRQLVL1_MCHGR BIT(5)
#define BXTWC_MPWRBTNIRQ 0x4E0F
#define BXTWC_MTHRM0IRQ 0x4E12
#define BXTWC_MTHRM1IRQ 0x4E13
#define BXTWC_MTHRM2IRQ 0x4E14
@ -66,9 +65,7 @@
/* Whiskey Cove PMIC share same ACPI ID between different platforms */
#define BROXTON_PMIC_WC_HRV 4
/* Manage in two IRQ chips since mask registers are not consecutive */
enum bxtwc_irqs {
/* Level 1 */
BXTWC_PWRBTN_LVL1_IRQ = 0,
BXTWC_TMU_LVL1_IRQ,
BXTWC_THRM_LVL1_IRQ,
@ -77,9 +74,11 @@ enum bxtwc_irqs {
BXTWC_CHGR_LVL1_IRQ,
BXTWC_GPIO_LVL1_IRQ,
BXTWC_CRIT_LVL1_IRQ,
};
/* Level 2 */
BXTWC_PWRBTN_IRQ,
enum bxtwc_irqs_pwrbtn {
BXTWC_PWRBTN_IRQ = 0,
BXTWC_UIBTN_IRQ,
};
enum bxtwc_irqs_bcu {
@ -113,7 +112,10 @@ static const struct regmap_irq bxtwc_regmap_irqs[] = {
REGMAP_IRQ_REG(BXTWC_CHGR_LVL1_IRQ, 0, BIT(5)),
REGMAP_IRQ_REG(BXTWC_GPIO_LVL1_IRQ, 0, BIT(6)),
REGMAP_IRQ_REG(BXTWC_CRIT_LVL1_IRQ, 0, BIT(7)),
REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 1, 0x03),
};
static const struct regmap_irq bxtwc_regmap_irqs_pwrbtn[] = {
REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 0, 0x01),
};
static const struct regmap_irq bxtwc_regmap_irqs_bcu[] = {
@ -125,7 +127,7 @@ static const struct regmap_irq bxtwc_regmap_irqs_adc[] = {
};
static const struct regmap_irq bxtwc_regmap_irqs_chgr[] = {
REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, BIT(5)),
REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, 0x20),
REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, 0x1f),
REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 1, 0x1f),
};
@ -144,7 +146,16 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip = {
.mask_base = BXTWC_MIRQLVL1,
.irqs = bxtwc_regmap_irqs,
.num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs),
.num_regs = 2,
.num_regs = 1,
};
static struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = {
.name = "bxtwc_irq_chip_pwrbtn",
.status_base = BXTWC_PWRBTNIRQ,
.mask_base = BXTWC_MPWRBTNIRQ,
.irqs = bxtwc_regmap_irqs_pwrbtn,
.num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs_pwrbtn),
.num_regs = 1,
};
static struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = {
@ -472,6 +483,16 @@ static int bxtwc_probe(struct platform_device *pdev)
return ret;
}
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
BXTWC_PWRBTN_LVL1_IRQ,
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_pwrbtn,
&pmic->irq_chip_data_pwrbtn);
if (ret) {
dev_err(&pdev->dev, "Failed to add PWRBTN IRQ chip\n");
return ret;
}
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
BXTWC_TMU_LVL1_IRQ,
IRQF_ONESHOT,

View file

@ -153,12 +153,6 @@ static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
pd->ono = irq_of_parse_and_map(dev->of_node, 1);
/*
* ToDo: the 'wakeup' member in the platform data is more of a linux
* specfic information. Hence, there is no binding for that yet and
* not parsed here.
*/
return pd;
}
@ -246,7 +240,7 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
*/
/* MAX8997 has a power button input. */
device_init_wakeup(max8997->dev, pdata->wakeup);
device_init_wakeup(max8997->dev, true);
return ret;

View file

@ -278,7 +278,8 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
if (ret)
goto out;
adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2;
adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 |
MC13XXX_ADC0_CHRGRAWDIV;
adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;
/*

View file

@ -272,7 +272,7 @@ static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
dma_fail:
if (!x100)
dma_pool_free(ep->remote_dev->signal_pool, status,
status->src_dma_addr);
src - offsetof(struct scif_status, val));
alloc_fail:
return err;
}

View file

@ -390,7 +390,6 @@ struct msdc_host {
struct clk *src_clk_cg; /* msdc source clock control gate */
u32 mclk; /* mmc subsystem clock frequency */
u32 src_clk_freq; /* source clock frequency */
u32 sclk; /* SD/MS bus clock frequency */
unsigned char timing;
bool vqmmc_enabled;
u32 latch_ck;
@ -635,10 +634,10 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
host->timeout_ns = ns;
host->timeout_clks = clks;
if (host->sclk == 0) {
if (host->mmc->actual_clock == 0) {
timeout = 0;
} else {
clk_ns = 1000000000UL / host->sclk;
clk_ns = 1000000000UL / host->mmc->actual_clock;
timeout = (ns + clk_ns - 1) / clk_ns + clks;
/* in 1048576 sclk cycle unit */
timeout = (timeout + (0x1 << 20) - 1) >> 20;
@ -683,6 +682,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
if (!hz) {
dev_dbg(host->dev, "set mclk to 0\n");
host->mclk = 0;
host->mmc->actual_clock = 0;
sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
return;
}
@ -761,7 +761,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
host->sclk = sclk;
host->mmc->actual_clock = sclk;
host->mclk = hz;
host->timing = timing;
/* need because clk changed. */
@ -772,7 +772,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
* mmc_select_hs400() will drop to 50Mhz and High speed mode,
* tune result of hs200/200Mhz is not suitable for 50Mhz
*/
if (host->sclk <= 52000000) {
if (host->mmc->actual_clock <= 52000000) {
writel(host->def_tune_para.iocon, host->base + MSDC_IOCON);
writel(host->def_tune_para.pad_tune, host->base + tune_reg);
} else {
@ -787,7 +787,8 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs400_cmd_int_delay);
dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing);
dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock,
timing);
}
static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
@ -1055,6 +1056,7 @@ static void msdc_start_command(struct msdc_host *host,
WARN_ON(host->cmd);
host->cmd = cmd;
mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
if (!msdc_cmd_is_ready(host, mrq, cmd))
return;
@ -1066,7 +1068,6 @@ static void msdc_start_command(struct msdc_host *host,
cmd->error = 0;
rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd);
mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask);
writel(cmd->arg, host->base + SDC_ARG);

View file

@ -1099,12 +1099,16 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
return ret;
}
bcm_sf2_gphy_enable_set(priv->dev->ds, true);
ret = bcm_sf2_mdio_register(ds);
if (ret) {
pr_err("failed to register MDIO bus\n");
return ret;
}
bcm_sf2_gphy_enable_set(priv->dev->ds, false);
ret = bcm_sf2_cfp_rst(priv);
if (ret) {
pr_err("failed to reset CFP\n");

View file

@ -3028,7 +3028,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6390_port_set_speed,
.port_set_speed = mv88e6341_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@ -3649,7 +3649,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed = mv88e6390_port_set_speed,
.port_set_speed = mv88e6341_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,

View file

@ -228,8 +228,11 @@ static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port,
ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000;
break;
case 2500:
ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 |
MV88E6390_PORT_MAC_CTL_ALTSPEED;
if (alt_bit)
ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 |
MV88E6390_PORT_MAC_CTL_ALTSPEED;
else
ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000;
break;
case 10000:
/* all bits set, fall through... */
@ -291,6 +294,24 @@ int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
return mv88e6xxx_port_set_speed(chip, port, speed, false, false);
}
/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6341) */
int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
{
if (speed == SPEED_MAX)
speed = port < 5 ? 1000 : 2500;
if (speed > 2500)
return -EOPNOTSUPP;
if (speed == 200 && port != 0)
return -EOPNOTSUPP;
if (speed == 2500 && port < 5)
return -EOPNOTSUPP;
return mv88e6xxx_port_set_speed(chip, port, speed, !port, true);
}
/* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */
int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
{

View file

@ -280,6 +280,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup);
int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);

Some files were not shown because too many files have changed in this diff Show more