Import Pro1-X kernel source code

This commit is contained in:
TheKit 2022-03-20 00:13:44 +01:00 committed by TheKit
parent 96060693a5
commit 1d86f7ae7b
559 changed files with 90104 additions and 7564 deletions

View file

@ -319,7 +319,7 @@ include scripts/subarch.include
# Alternatively CROSS_COMPILE can be set in the environment.
# Default value for CROSS_COMPILE is not to prefix executables
# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
ARCH := arm64
ARCH ?= $(SUBARCH)
# Architecture as present in compile.h
UTS_MACHINE := $(ARCH)
@ -366,7 +366,7 @@ HOSTCC = gcc
HOSTCXX = g++
endif
KBUILD_HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \
-fomit-frame-pointer -std=gnu89 -pipe $(HOST_LFS_CFLAGS) \
-fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS) \
$(HOSTCFLAGS)
KBUILD_HOSTCXXFLAGS := -O2 $(HOST_LFS_CFLAGS) $(HOSTCXXFLAGS)
KBUILD_HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS)
@ -385,7 +385,7 @@ READELF = llvm-readelf
OBJSIZE = llvm-size
STRIP = llvm-strip
else
CC = $(CROSS_COMPILE)gcc
REAL_CC = $(CROSS_COMPILE)gcc
LD = $(CROSS_COMPILE)ld
AR = $(CROSS_COMPILE)ar
NM = $(CROSS_COMPILE)nm
@ -409,7 +409,7 @@ CHECK = sparse
# Use the wrapper for the compiler. This wrapper scans for new
# warnings and causes the build to stop upon encountering them
# CC = $(PYTHON2) $(srctree)/scripts/gcc-wrapper.py $(REAL_CC)
CC = $(PYTHON) $(srctree)/scripts/gcc-wrapper.py $(REAL_CC)
CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF)
@ -439,7 +439,7 @@ LINUXINCLUDE := \
$(USERINCLUDE)
KBUILD_AFLAGS := -D__ASSEMBLY__
KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs -pipe \
KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -fno-common -fshort-wchar \
-Werror-implicit-function-declaration \
-Wno-format-security \
@ -688,18 +688,11 @@ KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias)
KBUILD_CFLAGS += $(call cc-disable-warning, packed-not-aligned)
KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
KBUILD_CFLAGS += $(call cc-disable-warning, restrict)
KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow)
KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os
else
KBUILD_CFLAGS += -O3
KBUILD_CFLAGS += -O2
endif
# Tell gcc to never replace conditional load with a non-conditional one
@ -961,6 +954,9 @@ KBUILD_CFLAGS += $(call cc-option,-fmerge-constants)
# Make sure -fstack-check isn't enabled (like gentoo apparently did)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,)
# conserve stack if available
KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)
# disallow errors like 'EXPORT_GPL(foo);' with missing header
KBUILD_CFLAGS += $(call cc-option,-Werror=implicit-int)
@ -1349,7 +1345,7 @@ headers_install: __headers
$(error Headers not exportable for the $(SRCARCH) architecture))
$(Q)$(MAKE) $(hdr-inst)=include/uapi dst=include
$(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi $(hdr-dst)
$(Q)$(MAKE) $(hdr-inst)=techpack/audio/include/uapi dst=techpack/audio/include
$(Q)$(MAKE) $(hdr-inst)=techpack
PHONY += headers_check_all
headers_check_all: headers_install_all
@ -1359,7 +1355,7 @@ PHONY += headers_check
headers_check: headers_install
$(Q)$(MAKE) $(hdr-inst)=include/uapi dst=include HDRCHECK=1
$(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi $(hdr-dst) HDRCHECK=1
$(Q)$(MAKE) $(hdr-inst)=techpack/audio/include/uapi dst=techpack/audio/include HDRCHECK=1
$(Q)$(MAKE) $(hdr-inst)=techpack HDRCHECK=1
# ---------------------------------------------------------------------------
# Kernel selftest

View file

@ -563,7 +563,6 @@ config CFI_CLANG_SHADOW
config ARCH_SUPPORTS_SHADOW_CALL_STACK
bool
default y
help
An architecture should select this if it supports Clang's Shadow
Call Stack, has asm/scs.h, and implements runtime support for shadow
@ -944,16 +943,6 @@ config STRICT_MODULE_RWX
config ARCH_HAS_PHYS_TO_DMA
bool
config ARCH_HAS_REFCOUNT_FULL
bool
select ARCH_HAS_REFCOUNT
help
An architecture selects this when the optimized refcount_t
implementation it provides covers all the cases that
CONFIG_REFCOUNT_FULL covers as well, in which case it makes no
sense to even offer CONFIG_REFCOUNT_FULL as a user selectable
option.
config ARCH_HAS_REFCOUNT
bool
help
@ -967,7 +956,7 @@ config ARCH_HAS_REFCOUNT
against bugs in reference counts.
config REFCOUNT_FULL
bool "Perform full reference count validation at the expense of speed" if !ARCH_HAS_REFCOUNT_FULL
bool "Perform full reference count validation at the expense of speed"
help
Enabling this switches the refcounting infrastructure from a fast
unchecked atomic_t implementation to a fully state checked

View file

@ -0,0 +1,655 @@
CONFIG_LOCALVERSION="-perf"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_SCHED_WALT=y
CONFIG_TASKSTATS=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_PSI=y
CONFIG_RCU_EXPERT=y
CONFIG_RCU_FAST_NO_HZ=y
CONFIG_RCU_NOCB_CPU=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_BPF=y
CONFIG_SCHED_CORE_CTL=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_SCHED_TUNE=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_FHANDLE is not set
# CONFIG_BASE_FULL is not set
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_QM215=y
CONFIG_ARCH_MSM8917=y
# CONFIG_VDSO is not set
CONFIG_SMP=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
CONFIG_ARM_PSCI=y
CONFIG_HIGHMEM=y
CONFIG_SECCOMP=y
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_TIMES=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
CONFIG_CPU_FREQ_MSM=y
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_KERNEL_MODE_NEON=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
CONFIG_ENERGY_MODEL=y
CONFIG_MSM_TZ_LOG=y
CONFIG_ARM_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM_NEON=y
CONFIG_CRYPTO_SHA2_ARM_CE=y
CONFIG_CRYPTO_AES_ARM_BS=y
CONFIG_CRYPTO_AES_ARM_CE=y
CONFIG_CRYPTO_GHASH_ARM_CE=y
CONFIG_ARCH_MMAP_RND_BITS=16
CONFIG_PANIC_ON_REFCOUNT_ERROR=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SIG=y
CONFIG_MODULE_SIG_FORCE=y
CONFIG_MODULE_SIG_SHA512=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_BLK_DEV_ZONED=y
CONFIG_BLK_INLINE_ENCRYPTION=y
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_IOSCHED_BFQ=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_CLEANCACHE=y
CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y
CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=y
CONFIG_XFRM_INTERFACE=y
CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_NET_IPGRE_DEMUX=y
CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
CONFIG_INET_UDP_DIAG=y
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y
CONFIG_INET6_AH=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=y
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_AMANDA=y
CONFIG_NF_CONNTRACK_FTP=y
CONFIG_NF_CONNTRACK_H323=y
CONFIG_NF_CONNTRACK_IRC=y
CONFIG_NF_CONNTRACK_NETBIOS_NS=y
CONFIG_NF_CONNTRACK_PPTP=y
CONFIG_NF_CONNTRACK_SANE=y
CONFIG_NF_CONNTRACK_TFTP=y
CONFIG_NF_CT_NETLINK=y
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
CONFIG_NETFILTER_XT_TARGET_LOG=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
CONFIG_NETFILTER_XT_TARGET_TEE=y
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
CONFIG_NETFILTER_XT_TARGET_TRACE=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
CONFIG_NETFILTER_XT_MATCH_BPF=y
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
CONFIG_NETFILTER_XT_MATCH_DSCP=y
CONFIG_NETFILTER_XT_MATCH_ESP=y
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
CONFIG_NETFILTER_XT_MATCH_HELPER=y
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
CONFIG_NETFILTER_XT_MATCH_MAC=y
CONFIG_NETFILTER_XT_MATCH_MARK=y
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
CONFIG_NETFILTER_XT_MATCH_OWNER=y
CONFIG_NETFILTER_XT_MATCH_POLICY=y
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
CONFIG_NETFILTER_XT_MATCH_STRING=y
CONFIG_NETFILTER_XT_MATCH_TIME=y
CONFIG_NETFILTER_XT_MATCH_U32=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_MATCH_AH=y
CONFIG_IP_NF_MATCH_ECN=y
CONFIG_IP_NF_MATCH_RPFILTER=y
CONFIG_IP_NF_MATCH_TTL=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
CONFIG_IP_NF_NAT=y
CONFIG_IP_NF_TARGET_MASQUERADE=y
CONFIG_IP_NF_TARGET_NETMAP=y
CONFIG_IP_NF_TARGET_REDIRECT=y
CONFIG_IP_NF_MANGLE=y
CONFIG_IP_NF_RAW=y
CONFIG_IP_NF_SECURITY=y
CONFIG_IP_NF_ARPTABLES=y
CONFIG_IP_NF_ARPFILTER=y
CONFIG_IP_NF_ARP_MANGLE=y
CONFIG_IP6_NF_IPTABLES=y
CONFIG_IP6_NF_MATCH_RPFILTER=y
CONFIG_IP6_NF_FILTER=y
CONFIG_IP6_NF_TARGET_REJECT=y
CONFIG_IP6_NF_MANGLE=y
CONFIG_IP6_NF_RAW=y
CONFIG_BRIDGE_NF_EBTABLES=y
CONFIG_BRIDGE_EBT_BROUTE=y
CONFIG_L2TP=y
CONFIG_L2TP_DEBUGFS=y
CONFIG_L2TP_V3=y
CONFIG_L2TP_IP=y
CONFIG_L2TP_ETH=y
CONFIG_BRIDGE=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_HTB=y
CONFIG_NET_SCH_PRIO=y
CONFIG_NET_SCH_INGRESS=y
CONFIG_NET_CLS_FW=y
CONFIG_NET_CLS_U32=y
CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=y
CONFIG_NET_CLS_BPF=y
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_CMP=y
CONFIG_NET_EMATCH_NBYTE=y
CONFIG_NET_EMATCH_U32=y
CONFIG_NET_EMATCH_META=y
CONFIG_NET_EMATCH_TEXT=y
CONFIG_NET_CLS_ACT=y
CONFIG_DNS_RESOLVER=y
CONFIG_QRTR=y
CONFIG_QRTR_SMD=y
CONFIG_BT=y
# CONFIG_BT_BREDR is not set
# CONFIG_BT_LE is not set
CONFIG_MSM_BT_POWER=y
CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
CONFIG_CFG80211_INTERNAL_REGDB=y
# CONFIG_CFG80211_CRDA_SUPPORT is not set
CONFIG_RFKILL=y
CONFIG_NFC_NQ=y
CONFIG_FW_LOADER_USER_HELPER=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
# CONFIG_FW_CACHE is not set
CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_HDCP_QSEECOM=y
CONFIG_QSEECOM=y
CONFIG_UID_SYS_STATS=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_CRYPTO=y
CONFIG_SCSI_UFS_CRYPTO_QTI=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
CONFIG_DM_ANDROID_VERITY=y
CONFIG_DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED=y
CONFIG_DM_BOW=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
CONFIG_TUN=y
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HISILICON is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=y
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=y
CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=y
CONFIG_PPTP=y
CONFIG_PPPOL2TP=y
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
CONFIG_USB_RTL8152=y
CONFIG_USB_USBNET=y
# CONFIG_WLAN_VENDOR_ADMTEK is not set
# CONFIG_WLAN_VENDOR_ATH is not set
# CONFIG_WLAN_VENDOR_ATMEL is not set
# CONFIG_WLAN_VENDOR_BROADCOM is not set
# CONFIG_WLAN_VENDOR_CISCO is not set
# CONFIG_WLAN_VENDOR_INTEL is not set
# CONFIG_WLAN_VENDOR_INTERSIL is not set
# CONFIG_WLAN_VENDOR_MARVELL is not set
# CONFIG_WLAN_VENDOR_MEDIATEK is not set
# CONFIG_WLAN_VENDOR_RALINK is not set
# CONFIG_WLAN_VENDOR_REALTEK is not set
# CONFIG_WLAN_VENDOR_RSI is not set
# CONFIG_WLAN_VENDOR_ST is not set
# CONFIG_WLAN_VENDOR_TI is not set
# CONFIG_WLAN_VENDOR_ZYDAS is not set
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CLD_LL_CORE=y
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_UINPUT=y
# CONFIG_SERIO_SERPORT is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVMEM is not set
CONFIG_SERIAL_MSM_HS=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_SMD_PKT=y
CONFIG_DIAG_CHAR=y
CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MSM_V2=y
CONFIG_SPI=y
CONFIG_SPI_QUP=y
CONFIG_SPI_SPIDEV=y
CONFIG_SPMI=y
CONFIG_PINCTRL_MSM8937=y
CONFIG_PINCTRL_MSM8917=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_QCOM=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_QPNP_SMB5=y
CONFIG_SMB1351_USB_CHARGER=y
CONFIG_SMB1355_SLAVE_CHARGER=y
CONFIG_QPNP_QG=y
CONFIG_THERMAL=y
CONFIG_THERMAL_WRITABLE_TRIPS=y
CONFIG_THERMAL_GOV_USER_SPACE=y
CONFIG_THERMAL_GOV_LOW_LIMITS=y
CONFIG_CPU_THERMAL=y
CONFIG_DEVFREQ_THERMAL=y
CONFIG_THERMAL_TSENS=y
CONFIG_QTI_VIRTUAL_SENSOR=y
CONFIG_QTI_BCL_PMIC5=y
CONFIG_QTI_BCL_SOC_DRIVER=y
CONFIG_QTI_QMI_COOLING_DEVICE=y
CONFIG_REGULATOR_COOLING_DEVICE=y
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_PROXY_CONSUMER=y
CONFIG_REGULATOR_QPNP_LABIBB=y
CONFIG_REGULATOR_QPNP_LCDB=y
CONFIG_REGULATOR_MEM_ACC=y
CONFIG_REGULATOR_CPR=y
CONFIG_REGULATOR_RPM_SMD=y
CONFIG_REGULATOR_SPM=y
CONFIG_REGULATOR_STUB=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_MSM_VIDC_3X_GOVERNORS=y
CONFIG_MSM_VIDC_3X_V4L2=y
CONFIG_MSM_CAMERA=y
CONFIG_MSMB_CAMERA=y
CONFIG_MSM_CAMERA_SENSOR=y
CONFIG_MSM_CPP=y
CONFIG_MSM_CCI=y
CONFIG_MSM_CSI20_HEADER=y
CONFIG_MSM_CSI22_HEADER=y
CONFIG_MSM_CSI30_HEADER=y
CONFIG_MSM_CSI31_HEADER=y
CONFIG_MSM_CSIPHY=y
CONFIG_MSM_CSID=y
CONFIG_MSM_EEPROM=y
CONFIG_MSM_ISPIF_V2=y
CONFIG_IMX134=y
CONFIG_IMX132=y
CONFIG_OV9724=y
CONFIG_OV5648=y
CONFIG_GC0339=y
CONFIG_OV8825=y
CONFIG_OV8865=y
CONFIG_s5k4e1=y
CONFIG_OV12830=y
CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
CONFIG_MSMB_JPEG=y
CONFIG_MSM_FD=y
CONFIG_FB=y
CONFIG_FB_MSM=y
CONFIG_FB_MSM_MDSS=y
CONFIG_FB_MSM_MDSS_WRITEBACK=y
CONFIG_FB_MSM_MDSS_SPI_PANEL=y
CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_DYNAMIC_MINORS=y
CONFIG_SND_USB_AUDIO=y
CONFIG_SND_SOC=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
CONFIG_HID_ELECOM=y
CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
CONFIG_HID_MULTITOUCH=y
CONFIG_HID_SONY=y
CONFIG_USB_HIDDEV=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_EHCI_MSM=y
CONFIG_USB_ACM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_STORAGE_DATAFAB=y
CONFIG_USB_STORAGE_FREECOM=y
CONFIG_USB_STORAGE_ISD200=y
CONFIG_USB_STORAGE_USBAT=y
CONFIG_USB_STORAGE_SDDR09=y
CONFIG_USB_STORAGE_SDDR55=y
CONFIG_USB_STORAGE_JUMPSHOT=y
CONFIG_USB_STORAGE_ALAUDA=y
CONFIG_USB_STORAGE_ONETOUCH=y
CONFIG_USB_STORAGE_KARMA=y
CONFIG_USB_STORAGE_CYPRESS_ATACB=y
CONFIG_USB_SERIAL=y
CONFIG_USB_EHSET_TEST_FIXTURE=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG_FILES=y
CONFIG_USB_GADGET_DEBUG_FS=y
CONFIG_USB_GADGET_VBUS_DRAW=500
CONFIG_USB_CI13XXX_MSM=y
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_UEVENT=y
CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_NCM=y
CONFIG_USB_CONFIGFS_RNDIS=y
CONFIG_USB_CONFIGFS_RMNET_BAM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_CCID=y
CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_USB_CONFIGFS_F_MTP=y
CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_TYPEC=y
CONFIG_MMC=y
# CONFIG_PWRSEQ_EMMC is not set
# CONFIG_PWRSEQ_SIMPLE is not set
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
CONFIG_MMC_IPC_LOGGING=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_CQHCI_CRYPTO=y
CONFIG_MMC_CQHCI_CRYPTO_QTI=y
CONFIG_LEDS_QTI_TRI_LED=y
CONFIG_LEDS_QPNP_FLASH_V2=y
CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_DMADEVICES=y
CONFIG_QCOM_SPS_DMA=y
CONFIG_UIO=y
CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ION=y
CONFIG_ION_POOL_AUTO_REFILL=y
CONFIG_QPNP_REVID=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_IPA=y
CONFIG_RMNET_IPA=y
CONFIG_RNDIS_IPA=y
CONFIG_MDSS_PLL=y
CONFIG_QCOM_CLK_SMD_RPM=y
CONFIG_SDM_GCC_429W=y
CONFIG_SDM_DEBUGCC_429W=y
CONFIG_CLOCK_CPU_SDM=y
CONFIG_HWSPINLOCK=y
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_MAILBOX=y
CONFIG_ARM_SMMU=y
CONFIG_QCOM_LAZY_MAPPING=y
CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_QCOM_GLINK_RPM=y
CONFIG_RPMSG_QCOM_GLINK_SMEM=y
CONFIG_RPMSG_QCOM_SMD=y
CONFIG_MSM_RPM_SMD=y
CONFIG_QCOM_CPUSS_DUMP=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_QCOM_QMI_HELPERS=y
CONFIG_QCOM_SMEM=y
CONFIG_QCOM_SMD_RPM=y
CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
CONFIG_QCOM_EARLY_RANDOM=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
CONFIG_QCOM_SMP2P=y
CONFIG_QCOM_SMSM=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_QCOM_SECURE_BUFFER=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_QCOM_DCC_V2=y
CONFIG_MSM_CORE_HANG_DETECT=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QCOM_BUS_SCALING=y
CONFIG_QCOM_GLINK=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_PM=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QTEE_SHM_BRIDGE=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_MSM_PERFORMANCE=y
CONFIG_QTI_CRYPTO_COMMON=y
CONFIG_QTI_CRYPTO_TZ=y
CONFIG_WCNSS_CORE=y
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
CONFIG_DEVFREQ_GOV_MEMLAT=y
CONFIG_DEVFREQ_SIMPLE_DEV=y
CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_IIO=y
CONFIG_PWM=y
CONFIG_PWM_QTI_LPG=y
CONFIG_QCOM_SHOW_RESUME_IRQ=y
CONFIG_QCOM_MPM=y
CONFIG_RAS=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
CONFIG_ANDROID_BINDERFS=y
CONFIG_QCOM_QFPROM=y
CONFIG_SLIMBUS_MSM_NGD=y
CONFIG_SENSORS_SSC=y
CONFIG_QCOM_KGSL=y
CONFIG_LEGACY_ENERGY_MODEL_DT=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_FS_ENCRYPTION=y
CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
CONFIG_FS_VERITY=y
CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
CONFIG_FUSE_FS=y
CONFIG_OVERLAY_FS=y
CONFIG_INCREMENTAL_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_SDCARD_FS=y
CONFIG_SQUASHFS=y
CONFIG_SQUASHFS_XATTR=y
CONFIG_SQUASHFS_LZ4=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_LSM_MMAP_MIN_ADDR=4096
CONFIG_HARDENED_USERCOPY=y
CONFIG_HARDENED_USERCOPY_PAGESPAN=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_STATIC_USERMODEHELPER=y
CONFIG_STATIC_USERMODEHELPER_PATH=""
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCE=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_FRAME_WARN=2048
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_PANIC_TIMEOUT=5
CONFIG_SCHED_STACK_END_CHECK=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_FAULT_INJECTION=y
CONFIG_FAIL_PAGE_ALLOC=y
CONFIG_IPC_LOGGING=y
# CONFIG_FTRACE is not set
CONFIG_LKDTM=m
CONFIG_BUG_ON_DATA_CORRUPTION=y

View file

@ -0,0 +1,711 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_SCHED_WALT=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_PSI=y
CONFIG_RCU_EXPERT=y
CONFIG_RCU_FAST_NO_HZ=y
CONFIG_RCU_NOCB_CPU=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_DEBUG_BLK_CGROUP=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_BPF=y
CONFIG_CGROUP_DEBUG=y
CONFIG_SCHED_CORE_CTL=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_SCHED_TUNE=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_FHANDLE is not set
# CONFIG_BASE_FULL is not set
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_QM215=y
CONFIG_ARCH_MSM8917=y
# CONFIG_VDSO is not set
CONFIG_SMP=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
CONFIG_ARM_PSCI=y
CONFIG_HIGHMEM=y
CONFIG_SECCOMP=y
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_TIMES=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
CONFIG_CPU_FREQ_MSM=y
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_KERNEL_MODE_NEON=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
CONFIG_PM_DEBUG=y
CONFIG_ENERGY_MODEL=y
CONFIG_MSM_TZ_LOG=y
CONFIG_ARM_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM_NEON=y
CONFIG_CRYPTO_SHA2_ARM_CE=y
CONFIG_CRYPTO_AES_ARM_BS=y
CONFIG_CRYPTO_AES_ARM_CE=y
CONFIG_CRYPTO_GHASH_ARM_CE=y
CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_ARCH_MMAP_RND_BITS=16
CONFIG_PANIC_ON_REFCOUNT_ERROR=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SIG=y
CONFIG_MODULE_SIG_FORCE=y
CONFIG_MODULE_SIG_SHA512=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_BLK_DEV_ZONED=y
CONFIG_BLK_INLINE_ENCRYPTION=y
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_IOSCHED_BFQ=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_CLEANCACHE=y
CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y
CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=y
CONFIG_XFRM_INTERFACE=y
CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_NET_IPGRE_DEMUX=y
CONFIG_NET_IPVTI=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
CONFIG_INET_UDP_DIAG=y
CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y
CONFIG_INET6_AH=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
CONFIG_IPV6_VTI=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=y
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_AMANDA=y
CONFIG_NF_CONNTRACK_FTP=y
CONFIG_NF_CONNTRACK_H323=y
CONFIG_NF_CONNTRACK_IRC=y
CONFIG_NF_CONNTRACK_NETBIOS_NS=y
CONFIG_NF_CONNTRACK_PPTP=y
CONFIG_NF_CONNTRACK_SANE=y
CONFIG_NF_CONNTRACK_TFTP=y
CONFIG_NF_CT_NETLINK=y
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
CONFIG_NETFILTER_XT_TARGET_LOG=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
CONFIG_NETFILTER_XT_TARGET_TEE=y
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
CONFIG_NETFILTER_XT_TARGET_TRACE=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
CONFIG_NETFILTER_XT_MATCH_BPF=y
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
CONFIG_NETFILTER_XT_MATCH_DSCP=y
CONFIG_NETFILTER_XT_MATCH_ESP=y
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
CONFIG_NETFILTER_XT_MATCH_HELPER=y
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
CONFIG_NETFILTER_XT_MATCH_MAC=y
CONFIG_NETFILTER_XT_MATCH_MARK=y
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
CONFIG_NETFILTER_XT_MATCH_OWNER=y
CONFIG_NETFILTER_XT_MATCH_POLICY=y
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
CONFIG_NETFILTER_XT_MATCH_STRING=y
CONFIG_NETFILTER_XT_MATCH_TIME=y
CONFIG_NETFILTER_XT_MATCH_U32=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_MATCH_AH=y
CONFIG_IP_NF_MATCH_ECN=y
CONFIG_IP_NF_MATCH_RPFILTER=y
CONFIG_IP_NF_MATCH_TTL=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
CONFIG_IP_NF_NAT=y
CONFIG_IP_NF_TARGET_MASQUERADE=y
CONFIG_IP_NF_TARGET_NETMAP=y
CONFIG_IP_NF_TARGET_REDIRECT=y
CONFIG_IP_NF_MANGLE=y
CONFIG_IP_NF_RAW=y
CONFIG_IP_NF_SECURITY=y
CONFIG_IP_NF_ARPTABLES=y
CONFIG_IP_NF_ARPFILTER=y
CONFIG_IP_NF_ARP_MANGLE=y
CONFIG_IP6_NF_IPTABLES=y
CONFIG_IP6_NF_MATCH_RPFILTER=y
CONFIG_IP6_NF_FILTER=y
CONFIG_IP6_NF_TARGET_REJECT=y
CONFIG_IP6_NF_MANGLE=y
CONFIG_IP6_NF_RAW=y
CONFIG_BRIDGE_NF_EBTABLES=y
CONFIG_BRIDGE_EBT_BROUTE=y
CONFIG_L2TP=y
CONFIG_L2TP_DEBUGFS=y
CONFIG_L2TP_V3=y
CONFIG_L2TP_IP=y
CONFIG_L2TP_ETH=y
CONFIG_BRIDGE=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_HTB=y
CONFIG_NET_SCH_PRIO=y
CONFIG_NET_SCH_INGRESS=y
CONFIG_NET_CLS_FW=y
CONFIG_NET_CLS_U32=y
CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=y
CONFIG_NET_CLS_BPF=y
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_CMP=y
CONFIG_NET_EMATCH_NBYTE=y
CONFIG_NET_EMATCH_U32=y
CONFIG_NET_EMATCH_META=y
CONFIG_NET_EMATCH_TEXT=y
CONFIG_NET_CLS_ACT=y
CONFIG_DNS_RESOLVER=y
CONFIG_QRTR=y
CONFIG_QRTR_SMD=y
CONFIG_BT=y
# CONFIG_BT_BREDR is not set
# CONFIG_BT_LE is not set
CONFIG_MSM_BT_POWER=y
CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
CONFIG_CFG80211_INTERNAL_REGDB=y
# CONFIG_CFG80211_CRDA_SUPPORT is not set
CONFIG_RFKILL=y
CONFIG_NFC_NQ=y
CONFIG_FW_LOADER_USER_HELPER=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
# CONFIG_FW_CACHE is not set
CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_HDCP_QSEECOM=y
CONFIG_QSEECOM=y
CONFIG_UID_SYS_STATS=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
CONFIG_SCSI_UFS_CRYPTO=y
CONFIG_SCSI_UFS_CRYPTO_QTI=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
CONFIG_DM_ANDROID_VERITY=y
CONFIG_DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED=y
CONFIG_DM_BOW=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
CONFIG_TUN=y
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HISILICON is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=y
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=y
CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=y
CONFIG_PPTP=y
CONFIG_PPPOL2TP=y
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
CONFIG_USB_RTL8152=y
CONFIG_USB_USBNET=y
# CONFIG_WLAN_VENDOR_ADMTEK is not set
# CONFIG_WLAN_VENDOR_ATH is not set
# CONFIG_WLAN_VENDOR_ATMEL is not set
# CONFIG_WLAN_VENDOR_BROADCOM is not set
# CONFIG_WLAN_VENDOR_CISCO is not set
# CONFIG_WLAN_VENDOR_INTEL is not set
# CONFIG_WLAN_VENDOR_INTERSIL is not set
# CONFIG_WLAN_VENDOR_MARVELL is not set
# CONFIG_WLAN_VENDOR_MEDIATEK is not set
# CONFIG_WLAN_VENDOR_RALINK is not set
# CONFIG_WLAN_VENDOR_REALTEK is not set
# CONFIG_WLAN_VENDOR_RSI is not set
# CONFIG_WLAN_VENDOR_ST is not set
# CONFIG_WLAN_VENDOR_TI is not set
# CONFIG_WLAN_VENDOR_ZYDAS is not set
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CLD_LL_CORE=y
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_UINPUT=y
# CONFIG_SERIO_SERPORT is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVMEM is not set
CONFIG_SERIAL_MSM=y
CONFIG_SERIAL_MSM_CONSOLE=y
CONFIG_SERIAL_MSM_HS=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_SMD_PKT=y
CONFIG_DIAG_CHAR=y
CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MSM_V2=y
CONFIG_SPI=y
CONFIG_SPI_QUP=y
CONFIG_SPI_SPIDEV=y
CONFIG_SPMI=y
CONFIG_PINCTRL_MSM8937=y
CONFIG_PINCTRL_MSM8917=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_QCOM=y
CONFIG_QPNP_SMB5=y
CONFIG_SMB1351_USB_CHARGER=y
CONFIG_SMB1355_SLAVE_CHARGER=y
CONFIG_QPNP_QG=y
CONFIG_THERMAL=y
CONFIG_THERMAL_WRITABLE_TRIPS=y
CONFIG_THERMAL_GOV_USER_SPACE=y
CONFIG_THERMAL_GOV_LOW_LIMITS=y
CONFIG_CPU_THERMAL=y
CONFIG_DEVFREQ_THERMAL=y
CONFIG_THERMAL_TSENS=y
CONFIG_QTI_VIRTUAL_SENSOR=y
CONFIG_QTI_BCL_PMIC5=y
CONFIG_QTI_BCL_SOC_DRIVER=y
CONFIG_QTI_QMI_COOLING_DEVICE=y
CONFIG_REGULATOR_COOLING_DEVICE=y
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_PROXY_CONSUMER=y
CONFIG_REGULATOR_QPNP_LABIBB=y
CONFIG_REGULATOR_QPNP_LCDB=y
CONFIG_REGULATOR_MEM_ACC=y
CONFIG_REGULATOR_CPR=y
CONFIG_REGULATOR_RPM_SMD=y
CONFIG_REGULATOR_SPM=y
CONFIG_REGULATOR_STUB=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_MSM_VIDC_3X_GOVERNORS=y
CONFIG_MSM_VIDC_3X_V4L2=y
CONFIG_MSM_CAMERA=y
CONFIG_MSM_CAMERA_DEBUG=y
CONFIG_MSMB_CAMERA=y
CONFIG_MSMB_CAMERA_DEBUG=y
CONFIG_MSM_CAMERA_SENSOR=y
CONFIG_MSM_CPP=y
CONFIG_MSM_CCI=y
CONFIG_MSM_CSI20_HEADER=y
CONFIG_MSM_CSI22_HEADER=y
CONFIG_MSM_CSI30_HEADER=y
CONFIG_MSM_CSI31_HEADER=y
CONFIG_MSM_CSIPHY=y
CONFIG_MSM_CSID=y
CONFIG_MSM_EEPROM=y
CONFIG_MSM_ISPIF_V2=y
CONFIG_IMX134=y
CONFIG_IMX132=y
CONFIG_OV9724=y
CONFIG_OV5648=y
CONFIG_GC0339=y
CONFIG_OV8825=y
CONFIG_OV8865=y
CONFIG_s5k4e1=y
CONFIG_OV12830=y
CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
CONFIG_MSMB_JPEG=y
CONFIG_MSM_FD=y
CONFIG_FB=y
CONFIG_FB_VIRTUAL=y
CONFIG_FB_MSM=y
CONFIG_FB_MSM_MDSS=y
CONFIG_FB_MSM_MDSS_WRITEBACK=y
CONFIG_FB_MSM_MDSS_SPI_PANEL=y
CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_DYNAMIC_MINORS=y
CONFIG_SND_USB_AUDIO=y
CONFIG_SND_SOC=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
CONFIG_HID_ELECOM=y
CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
CONFIG_HID_MULTITOUCH=y
CONFIG_HID_SONY=y
CONFIG_USB_HIDDEV=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_EHCI_MSM=y
CONFIG_USB_ACM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_STORAGE_DATAFAB=y
CONFIG_USB_STORAGE_FREECOM=y
CONFIG_USB_STORAGE_ISD200=y
CONFIG_USB_STORAGE_USBAT=y
CONFIG_USB_STORAGE_SDDR09=y
CONFIG_USB_STORAGE_SDDR55=y
CONFIG_USB_STORAGE_JUMPSHOT=y
CONFIG_USB_STORAGE_ALAUDA=y
CONFIG_USB_STORAGE_ONETOUCH=y
CONFIG_USB_STORAGE_KARMA=y
CONFIG_USB_STORAGE_CYPRESS_ATACB=y
CONFIG_USB_SERIAL=y
CONFIG_USB_EHSET_TEST_FIXTURE=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG_FILES=y
CONFIG_USB_GADGET_DEBUG_FS=y
CONFIG_USB_GADGET_VBUS_DRAW=500
CONFIG_USB_CI13XXX_MSM=y
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_UEVENT=y
CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_NCM=y
CONFIG_USB_CONFIGFS_RNDIS=y
CONFIG_USB_CONFIGFS_RMNET_BAM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_CCID=y
CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_USB_CONFIGFS_F_MTP=y
CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_TYPEC=y
CONFIG_MMC=y
# CONFIG_PWRSEQ_EMMC is not set
# CONFIG_PWRSEQ_SIMPLE is not set
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
CONFIG_MMC_IPC_LOGGING=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_CQHCI_CRYPTO=y
CONFIG_MMC_CQHCI_CRYPTO_QTI=y
CONFIG_LEDS_QTI_TRI_LED=y
CONFIG_LEDS_QPNP_FLASH_V2=y
CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_DMADEVICES=y
CONFIG_QCOM_SPS_DMA=y
CONFIG_UIO=y
CONFIG_UIO_MSM_SHAREDMEM=y
CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ION=y
CONFIG_ION_POOL_AUTO_REFILL=y
CONFIG_MSM_EXT_DISPLAY=y
CONFIG_QPNP_REVID=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_IPA=y
CONFIG_RMNET_IPA=y
CONFIG_RNDIS_IPA=y
CONFIG_MDSS_PLL=y
CONFIG_QCOM_CLK_SMD_RPM=y
CONFIG_SDM_GCC_429W=y
CONFIG_SDM_DEBUGCC_429W=y
CONFIG_CLOCK_CPU_SDM=y
CONFIG_HWSPINLOCK=y
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_MAILBOX=y
CONFIG_ARM_SMMU=y
CONFIG_QCOM_LAZY_MAPPING=y
CONFIG_IOMMU_DEBUG=y
CONFIG_IOMMU_TESTS=y
CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_QCOM_GLINK_RPM=y
CONFIG_RPMSG_QCOM_GLINK_SMEM=y
CONFIG_RPMSG_QCOM_SMD=y
CONFIG_MSM_RPM_SMD=y
CONFIG_QCOM_CPUSS_DUMP=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_QCOM_QMI_HELPERS=y
CONFIG_QCOM_SMEM=y
CONFIG_QCOM_SMD_RPM=y
CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
CONFIG_QCOM_EARLY_RANDOM=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
CONFIG_QCOM_SMP2P=y
CONFIG_QCOM_SMSM=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
CONFIG_QCOM_SECURE_BUFFER=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_QCOM_DCC_V2=y
CONFIG_MSM_CORE_HANG_DETECT=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QCOM_BUS_SCALING=y
CONFIG_QCOM_GLINK=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_PM=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QTEE_SHM_BRIDGE=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_MSM_PERFORMANCE=y
CONFIG_QTI_CRYPTO_COMMON=y
CONFIG_QTI_CRYPTO_TZ=y
CONFIG_WCNSS_CORE=y
CONFIG_WCNSS_CORE_PRONTO=y
CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
CONFIG_DEVFREQ_GOV_MEMLAT=y
CONFIG_DEVFREQ_SIMPLE_DEV=y
CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_IIO=y
CONFIG_PWM=y
CONFIG_PWM_QTI_LPG=y
CONFIG_QCOM_SHOW_RESUME_IRQ=y
CONFIG_QCOM_MPM=y
CONFIG_RAS=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
CONFIG_ANDROID_BINDERFS=y
CONFIG_QCOM_QFPROM=y
CONFIG_STM=y
CONFIG_SLIMBUS_MSM_NGD=y
CONFIG_SENSORS_SSC=y
CONFIG_QCOM_KGSL=y
CONFIG_LEGACY_ENERGY_MODEL_DT=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
CONFIG_F2FS_CHECK_FS=y
CONFIG_F2FS_FS_ENCRYPTION=y
CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
CONFIG_FS_VERITY=y
CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
CONFIG_FUSE_FS=y
CONFIG_OVERLAY_FS=y
CONFIG_INCREMENTAL_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_SDCARD_FS=y
CONFIG_SQUASHFS=y
CONFIG_SQUASHFS_XATTR=y
CONFIG_SQUASHFS_LZ4=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_LSM_MMAP_MIN_ADDR=4096
CONFIG_HARDENED_USERCOPY=y
CONFIG_HARDENED_USERCOPY_PAGESPAN=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_STATIC_USERMODEHELPER=y
CONFIG_STATIC_USERMODEHELPER_PATH=""
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCE=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_CONSOLE_UNHASHED_POINTERS=y
CONFIG_DEBUG_MODULE_LOAD_INFO=y
CONFIG_DEBUG_INFO=y
CONFIG_FRAME_WARN=2048
CONFIG_PAGE_OWNER=y
CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y
CONFIG_SLUB_DEBUG_PANIC_ON=y
CONFIG_DEBUG_PANIC_ON_OOM=y
CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
CONFIG_PAGE_POISONING=y
CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
CONFIG_DEBUG_OBJECTS=y
CONFIG_DEBUG_OBJECTS_FREE=y
CONFIG_DEBUG_OBJECTS_TIMERS=y
CONFIG_DEBUG_OBJECTS_WORK=y
CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
CONFIG_DEBUG_KMEMLEAK=y
CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_PANIC_TIMEOUT=5
CONFIG_PANIC_ON_SCHED_BUG=y
CONFIG_PANIC_ON_RT_THROTTLING=y
CONFIG_SCHED_STACK_END_CHECK=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_LOCK_TORTURE_TEST=m
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
CONFIG_DEBUG_CREDENTIALS=y
CONFIG_FAULT_INJECTION=y
CONFIG_FAIL_PAGE_ALLOC=y
CONFIG_FAULT_INJECTION_DEBUG_FS=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_IPC_LOGGING=y
CONFIG_QCOM_RTB=y
CONFIG_QCOM_RTB_SEPARATE_CPUS=y
CONFIG_PREEMPTIRQ_EVENTS=y
CONFIG_IRQSOFF_TRACER=y
CONFIG_PREEMPT_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_LKDTM=y
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_MEMTEST=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_PANIC_ON_DATA_CORRUPTION=y
CONFIG_DEBUG_USER=y
CONFIG_FORCE_PAGES=y
CONFIG_PID_IN_CONTEXTIDR=y

View file

@ -130,6 +130,42 @@ config ARCH_MSM8917
select COMMON_CLK
select COMMON_CLK_QCOM
config ARCH_MSM8937
bool "Enable support for MSM8937"
select CPU_V7
select HAVE_ARM_ARCH_TIMER
select PINCTRL
select QCOM_SCM if SMP
select PM_DEVFREQ
select CLKDEV_LOOKUP
select HAVE_CLK
select HAVE_CLK_PREPARE
select COMMON_CLK_QCOM
config ARCH_SDM439
bool "Enable support for SDM439"
select CPU_V7
select HAVE_ARM_ARCH_TIMER
select PINCTRL
select QCOM_SCM if SMP
select PM_DEVFREQ
select CLKDEV_LOOKUP
select HAVE_CLK
select HAVE_CLK_PREPARE
select COMMON_CLK_QCOM
config ARCH_SDM429
bool "Enable support for SDM429"
select CPU_V7
select HAVE_ARM_ARCH_TIMER
select PINCTRL
select QCOM_SCM if SMP
select PM_DEVFREQ
select CLKDEV_LOOKUP
select HAVE_CLK
select HAVE_CLK_PREPARE
select COMMON_CLK_QCOM
config ARCH_SCUBA
bool "Enable Support for Qualcomm Technologies, Inc. SCUBA"
select COMMON_CLK_QCOM
@ -153,5 +189,27 @@ config ARCH_SCUBA
This enables support for the SCUBA chipset. If you do not
wish to build a kernel that runs on this chipset, say 'N' here.
config ARCH_MSM8953
bool "Enable support for MSM8953"
select CPU_V7
select HAVE_ARM_ARCH_TIMER
select PINCTRL
select QCOM_SCM if SMP
select PM_DEVFREQ
select COMMON_CLK
select COMMON_CLK_QCOM
select QCOM_GDSC
config ARCH_SDM450
bool "Enable support for SDM450"
select CPU_V7
select HAVE_ARM_ARCH_TIMER
select PINCTRL
select QCOM_SCM if SMP
select PM_DEVFREQ
select COMMON_CLK
select COMMON_CLK_QCOM
select QCOM_GDSC
endmenu
endif

View file

@ -4,3 +4,9 @@ obj-$(CONFIG_ARCH_BENGAL) += board-bengal.o
obj-$(CONFIG_ARCH_SCUBA) += board-scuba.o
obj-$(CONFIG_ARCH_SDM660) += board-660.o
obj-$(CONFIG_ARCH_MSM8917) += board-msm8917.o
obj-$(CONFIG_ARCH_QM215) += board-qm215.o
obj-$(CONFIG_ARCH_MSM8953) += board-msm8953.o
obj-$(CONFIG_ARCH_SDM450) += board-sdm450.o
obj-$(CONFIG_ARCH_MSM8937) += board-msm8937.o
obj-$(CONFIG_ARCH_SDM429) += board-sdm429.o
obj-$(CONFIG_ARCH_SDM439) += board-sdm439.o

View file

@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
* Copyright (C) 2020 XiaoMi, Inc.
*/
#include <linux/kernel.h>
@ -12,6 +11,7 @@
static const char *trinket_dt_match[] __initconst = {
"qcom,bengal",
"qcom,bengal-iot",
"qcom,bengalp-iot",
NULL
};

View file

@ -0,0 +1,25 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018, 2021, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include "board-dt.h"
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
static const char *msm8937_dt_match[] __initconst = {
"qcom,msm8937",
NULL
};
static void __init msm8937_init(void)
{
board_dt_populate(NULL);
}
DT_MACHINE_START(MSM8937_DT,
"Qualcomm Technologies, Inc. MSM8937 (Flattened Device Tree)")
.init_machine = msm8937_init,
.dt_compat = msm8937_dt_match,
MACHINE_END

View file

@ -0,0 +1,25 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017,2021, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include "board-dt.h"
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
static const char *msm8953_dt_match[] __initconst = {
"qcom,msm8953",
NULL
};
static void __init msm8953_init(void)
{
board_dt_populate(NULL);
}
DT_MACHINE_START(MSM8953_DT,
"Qualcomm Technologies, Inc. MSM8953 (Flattened Device Tree)")
.init_machine = msm8953_init,
.dt_compat = msm8953_dt_match,
MACHINE_END

View file

@ -0,0 +1,25 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018, 2021, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include "board-dt.h"
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
static const char *qm215_dt_match[] __initconst = {
"qcom,qm215",
NULL
};
static void __init qm215_init(void)
{
board_dt_populate(NULL);
}
DT_MACHINE_START(QM215_DT,
"Qualcomm Technologies, Inc. QM215")
.init_machine = qm215_init,
.dt_compat = qm215_dt_match,
MACHINE_END

View file

@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
* Copyright (C) 2020 XiaoMi, Inc.
*/
#include <linux/kernel.h>

View file

@ -0,0 +1,25 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018, 2021, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include "board-dt.h"
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
static const char *sdm429_dt_match[] __initconst = {
"qcom,sdm429",
NULL
};
static void __init sdm429_init(void)
{
board_dt_populate(NULL);
}
DT_MACHINE_START(SDM429_DT,
"Qualcomm Technologies, Inc. SDM429 (Flattened Device Tree)")
.init_machine = sdm429_init,
.dt_compat = sdm429_dt_match,
MACHINE_END

View file

@ -0,0 +1,25 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018, 2021, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include "board-dt.h"
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
static const char *sdm439_dt_match[] __initconst = {
"qcom,sdm439",
NULL
};
static void __init sdm439_init(void)
{
board_dt_populate(NULL);
}
DT_MACHINE_START(SDM439_DT,
"Qualcomm Technologies, Inc. SDM439 (Flattened Device Tree)")
.init_machine = sdm439_init,
.dt_compat = sdm439_dt_match,
MACHINE_END

View file

@ -0,0 +1,25 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017,2021, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include "board-dt.h"
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
static const char *sdm450_dt_match[] __initconst = {
"qcom,sdm450",
NULL
};
static void __init sdm450_init(void)
{
board_dt_populate(NULL);
}
DT_MACHINE_START(SDM450_DT,
"Qualcomm Technologies, Inc. SDM450 (Flattened Device Tree)")
.init_machine = sdm450_init,
.dt_compat = sdm450_dt_match,
MACHINE_END

View file

@ -20,7 +20,6 @@ config ARM64
select ARCH_HAS_KCOV
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_REFCOUNT_FULL
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX
@ -505,8 +504,6 @@ config ARM64_ERRATUM_1024718
config ARM64_ERRATUM_1188873
bool "Cortex-A76: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
default y
select ARM_ARCH_TIMER_OOL_WORKAROUND
depends on ARM_ARCH_TIMER
help
This option adds work arounds for ARM Cortex-A76 erratum 1188873
@ -1579,13 +1576,6 @@ config KRYO_PMU_WORKAROUND
Enable this flag for effect SoCs.
config BOOT_INFO
bool "Boot information from bootloader"
default y
help
On embedded linux device, we try to collect more information from
bootloader to kernel. eg. powerup reason.
config BUILD_ARM64_DT_OVERLAY
bool "enable DT overlay compilation support"
depends on OF

View file

@ -248,6 +248,26 @@ config ARCH_SDM439
This enables support for the sdm439 chipset. If you do not
wish to build a kernel that runs on this chipset, say 'N' here.
config ARCH_MSM8953
bool "Enable Support for Qualcomm Technologies Inc. MSM8953"
depends on ARCH_QCOM
select COMMON_CLK_QCOM
select QCOM_GDSC
select CPU_FREQ_QCOM
help
This enables support for the MSM8953 chipset. If you do not
wish to build a kernel that runs on this chipset, say 'N' here.
config ARCH_SDM450
bool "Enable Support for Qualcomm Technologies Inc. SDM450"
depends on ARCH_QCOM
select COMMON_CLK_QCOM
select QCOM_GDSC
select CPU_FREQ_QCOM
help
This enables support for the sdm450 chipset. If you do not
wish to build a kernel that runs on this chipset, say 'N' here.
config ARCH_ROCKCHIP
bool "Rockchip Platforms"
select ARCH_HAS_RESET_CONTROLLER

View file

@ -148,10 +148,6 @@ endif
KBUILD_DTBS := dtbs
KBUILD_DTBO_IMG := dtbo.img
KBUILD_DTB_IMG := dtb.img
ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
export DTC_FLAGS := -@
endif
@ -184,17 +180,6 @@ Image-dtb: vmlinux scripts dtbs
Image.gz-dtb: vmlinux scripts dtbs Image.gz
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
$(KBUILD_DTBO_IMG): dtbs
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
all: $(KBUILD_DTBO_IMG)
$(KBUILD_DTB_IMG): dtbs
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
all: $(KBUILD_DTB_IMG)
endif
PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@

View file

@ -18,7 +18,7 @@ include $(srctree)/arch/arm64/boot/dts/Makefile
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo dtbo.img dtb.img
targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo
DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES))
ifneq ($(DTB_NAMES),)
@ -28,8 +28,6 @@ DTB_LIST := $(dtb-y)
endif
DTB_OBJS := $(shell find $(obj)/dts/ -name \*.dtb)
DTBO_OBJS := $(shell find $(obj)/dts/ -name \*.dtbo)
# Add RTIC DTB to the DTB list if RTIC MPGen is enabled
# Note, we keep this for compatibility with
# BUILD_ARM64_APPENDED_DTB_IMAGE targets.
@ -70,12 +68,6 @@ $(obj)/Image.lzo: $(obj)/Image FORCE
$(obj)/Image.gz-dtb: $(obj)/Image.gz $(DTB_OBJS) FORCE
$(call if_changed,cat)
$(obj)/dtbo.img: $(DTBO_OBJS) FORCE
$(call if_changed,mkdtimg)
$(obj)/dtb.img: $(DTB_OBJS) FORCE
$(call if_changed,cat)
install:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/Image System.map "$(INSTALL_PATH)"

View file

@ -16,6 +16,7 @@ subdir-y += lg
subdir-y += marvell
subdir-y += mediatek
subdir-y += nvidia
subdir-y += qcom
subdir-y += realtek
subdir-y += renesas
subdir-y += rockchip

View file

@ -1,15 +1,13 @@
CONFIG_LOCALVERSION="-CartelProject"
CONFIG_LOCALVERSION="-perf"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
CONFIG_BOEFFLA_WL_BLOCKER=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_SCHED_WALT=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_PSI=y
@ -19,7 +17,7 @@ CONFIG_RCU_FAST_NO_HZ=y
CONFIG_RCU_NOCB_CPU=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=21
CONFIG_IKHEADERS=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
@ -37,11 +35,13 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
# CONFIG_FHANDLE is not set
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y
@ -50,9 +50,9 @@ CONFIG_PROFILING=y
CONFIG_HOTPLUG_SIZE_BITS=29
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_BENGAL=y
CONFIG_ARCH_SCUBA=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
CONFIG_HZ_100=y
CONFIG_SECCOMP=y
# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
CONFIG_HARDEN_BRANCH_PREDICTOR=y
@ -63,9 +63,6 @@ CONFIG_SETEND_EMULATION=y
CONFIG_ARM64_SW_TTBR0_PAN=y
# CONFIG_ARM64_VHE is not set
CONFIG_RANDOMIZE_BASE=y
CONFIG_CMDLINE="ramoops_memreserve=4M mitigations=off"
CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y
CONFIG_BUILD_ARM64_DT_OVERLAY=y
CONFIG_COMPAT=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
@ -74,7 +71,6 @@ CONFIG_ENERGY_MODEL=y
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_TIMES=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
@ -83,6 +79,7 @@ CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_BOOST=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
CONFIG_ARM_QCOM_CPUFREQ_HW=y
CONFIG_MSM_TZ_LOG=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
@ -90,8 +87,7 @@ CONFIG_CRYPTO_GHASH_ARM64_CE=y
CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
# CONFIG_STACKPROTECTOR is not set
# CONFIG_VMAP_STACK is not set
CONFIG_PANIC_ON_REFCOUNT_ERROR=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
@ -99,7 +95,6 @@ CONFIG_MODULE_SIG=y
CONFIG_MODULE_SIG_FORCE=y
CONFIG_MODULE_SIG_SHA512=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_BLK_WBT=y
CONFIG_BLK_INLINE_ENCRYPTION=y
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_PARTITION_ADVANCED=y
@ -237,7 +232,6 @@ CONFIG_L2TP_V3=y
CONFIG_L2TP_IP=y
CONFIG_L2TP_ETH=y
CONFIG_BRIDGE=y
# CONFIG_BRIDGE_IGMP_SNOOPING is not set
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_HTB=y
CONFIG_NET_SCH_PRIO=y
@ -275,6 +269,7 @@ CONFIG_FW_LOADER_USER_HELPER=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
# CONFIG_FW_CACHE is not set
CONFIG_REGMAP_WCD_IRQ=y
CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
CONFIG_ZRAM_DEDUP=y
@ -284,11 +279,7 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_QSEECOM=y
CONFIG_UID_SYS_STATS=y
CONFIG_SIMTRAY_STATUS=y
CONFIG_DPDT_STATUS=y
CONFIG_FPR_FPC=y
CONFIG_MI_FS=y
CONFIG_HQ_SYSFS_SUPPORT=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
@ -346,10 +337,10 @@ CONFIG_INPUT_JOYSTICK=y
CONFIG_JOYSTICK_XPAD=y
CONFIG_JOYSTICK_XPAD_LEDS=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_NT36672A=y
CONFIG_VITURALSAR=y
CONFIG_TOUCHSCREEN_FT8719=y
CONFIG_TOUCHSCREEN_XIAOMI_TOUCHFEATURE=y
# CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_REFLASH is not set
# CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_RECOVERY is not set
# CONFIG_TOUCHSCREEN_FTS is not set
# CONFIG_TOUCHSCREEN_NT36XXX is not set
CONFIG_INPUT_MISC=y
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_UINPUT=y
@ -363,7 +354,7 @@ CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_DIAG_CHAR=y
CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=y
CONFIG_MSM_RDBG=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_QCOM_GENI=y
CONFIG_SPI=y
@ -372,6 +363,7 @@ CONFIG_SPI_SPIDEV=y
CONFIG_SPMI=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_PINCTRL_BENGAL=y
CONFIG_PINCTRL_SCUBA=y
CONFIG_GPIO_SYSFS=y
CONFIG_POWER_RESET_QCOM=y
CONFIG_POWER_RESET_XGENE=y
@ -410,11 +402,6 @@ CONFIG_REGULATOR_QPNP_LCDB=y
CONFIG_REGULATOR_RPM_SMD=y
CONFIG_REGULATOR_STUB=y
CONFIG_REGULATOR_PM8008=y
CONFIG_RC_CORE=y
CONFIG_LIRC=y
CONFIG_RC_DECODERS=y
CONFIG_IR_SPI=y
CONFIG_RC_DEVICES=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
@ -422,13 +409,12 @@ CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_VIDEO_FIXED_MINOR_RANGES=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_DVB_MPQ=y
CONFIG_DVB_MPQ_DEMUX=y
CONFIG_DVB_MPQ=m
CONFIG_DVB_MPQ_DEMUX=m
CONFIG_DVB_MPQ_SW=y
CONFIG_VIDEO_V4L2_VIDEOBUF2_CORE=y
CONFIG_DRM=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
@ -495,7 +481,7 @@ CONFIG_LEDS_QTI_FLASH=y
CONFIG_LEDS_PWM=y
CONFIG_LEDS_QTI_TRI_LED=y
CONFIG_LEDS_QPNP_FLASH_V2=y
CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
# CONFIG_LEDS_QPNP_VIBRATOR_LDO is not set
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PM8XXX=y
@ -507,7 +493,6 @@ CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ION=y
CONFIG_ION_POOL_AUTO_REFILL=y
CONFIG_QCA_CLD_WLAN=y
CONFIG_QPNP_REVID=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
@ -515,14 +500,17 @@ CONFIG_IPA3=y
CONFIG_IPA_WDI_UNIFIED_API=y
CONFIG_RMNET_IPA3=y
CONFIG_RNDIS_IPA=y
CONFIG_IPA_UT=y
CONFIG_USB_BAM=y
CONFIG_QCOM_GENI_SE=y
CONFIG_MACH_XIAOMI_LIME=y
CONFIG_QCOM_CLK_SMD_RPM=y
CONFIG_SPMI_PMIC_CLKDIV=y
CONFIG_SM_GPUCC_BENGAL=y
CONFIG_SM_DISPCC_BENGAL=y
CONFIG_SM_DEBUGCC_BENGAL=y
CONFIG_QM_DISPCC_SCUBA=y
CONFIG_QM_GPUCC_SCUBA=y
CONFIG_QM_DEBUGCC_SCUBA=y
CONFIG_HWSPINLOCK=y
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_MAILBOX=y
@ -530,6 +518,8 @@ CONFIG_QCOM_APCS_IPC=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_ARM_SMMU=y
CONFIG_QCOM_LAZY_MAPPING=y
CONFIG_IOMMU_DEBUG=y
CONFIG_IOMMU_TESTS=y
CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_QCOM_GLINK_RPM=y
CONFIG_RPMSG_QCOM_GLINK_SMEM=y
@ -571,16 +561,15 @@ CONFIG_QCOM_GLINK_PKT=y
CONFIG_QCOM_SMP2P_SLEEPSTATE=y
CONFIG_MSM_CDSP_LOADER=y
CONFIG_QCOM_SMCINVOKE=y
CONFIG_SERIAL_NUM=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_PM=y
CONFIG_QTI_L2_REUSE=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QTEE_SHM_BRIDGE=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
# CONFIG_MSM_PERFORMANCE is not set
CONFIG_MSM_PERFORMANCE=y
CONFIG_QCOM_CDSP_RM=y
# CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION is not set
CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
CONFIG_QCOM_CX_IPEAK=y
CONFIG_QTI_CRYPTO_COMMON=y
CONFIG_QTI_CRYPTO_TZ=y
@ -610,9 +599,7 @@ CONFIG_ANDROID_BINDERFS=y
# CONFIG_NVMEM_SYSFS is not set
CONFIG_QCOM_QFPROM=y
CONFIG_NVMEM_SPMI_SDAM=y
CONFIG_STM=y
CONFIG_SLIMBUS_MSM_NGD=y
CONFIG_SWITCH=y
CONFIG_QCOM_KGSL=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@ -631,19 +618,10 @@ CONFIG_OVERLAY_FS=y
CONFIG_INCREMENTAL_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_EXFAT_FS=y
CONFIG_EXFAT_DEFAULT_CODEPAGE=437
CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8"
CONFIG_NTFS_FS=y
CONFIG_NTFS_RW=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_EFIVAR_FS=y
CONFIG_SDCARD_FS=y
CONFIG_PSTORE=y
CONFIG_PSTORE_CONSOLE=y
CONFIG_PSTORE_PMSG=y
CONFIG_PSTORE_RAM=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
@ -658,37 +636,39 @@ CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCE=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_STACK_HASH_ORDER_SHIFT=12
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_PAGE_OWNER=y
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_PANIC_TIMEOUT=5
CONFIG_STACKTRACE=y
# CONFIG_RUNTIME_TESTING_MENU is not set
# CONFIG_ARM64_ERRATUM_826319 is not set
# CONFIG_ARM64_ERRATUM_827319 is not set
# CONFIG_ARM64_ERRATUM_824069 is not set
# CONFIG_ARM64_ERRATUM_819472 is not set
# CONFIG_ARM64_ERRATUM_832075 is not set
# CONFIG_ARM64_ERRATUM_845719 is not set
# CONFIG_ARM64_ERRATUM_843419 is not set
# CONFIG_ARM64_ERRATUM_1024718 is not set
# CONFIG_ARM64_ERRATUM_1463225 is not set
# CONFIG_ARM64_ERRATUM_1542418 is not set
# CONFIG_CAVIUM_ERRATUM_22375 is not set
# CONFIG_CAVIUM_ERRATUM_23154 is not set
# CONFIG_CAVIUM_ERRATUM_27456 is not set
# CONFIG_CAVIUM_ERRATUM_30115 is not set
# CONFIG_QCOM_FALKOR_ERRATUM_1003 is not set
# CONFIG_QCOM_FALKOR_ERRATUM_1009 is not set
# CONFIG_QCOM_QDF2400_ERRATUM_0065 is not set
# CONFIG_SOCIONEXT_SYNQUACER_PREITS is not set
# CONFIG_HISILICON_ERRATUM_161600802 is not set
# CONFIG_QCOM_FALKOR_ERRATUM_E1041 is not set
# CONFIG_FSL_ERRATUM_A008585 is not set
# CONFIG_HISILICON_ERRATUM_161010101 is not set
# CONFIG_ARM64_ERRATUM_858921 is not set
CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_DEBUG_LIST=y
CONFIG_IPC_LOGGING=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
CONFIG_CORESIGHT_STM=y
CONFIG_CORESIGHT_CTI=y
CONFIG_CORESIGHT_CTI_SAVE_DISABLE=y
CONFIG_CORESIGHT_TPDA=y
CONFIG_CORESIGHT_TPDM=y
CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_TGU=y
CONFIG_TOUCHSCREEN_GOODIX_GT1X=y
CONFIG_KEYBOARD_AW9523B=y
CONFIG_GPIO_PCA953X=y
CONFIG_LEDS_GPIO=y
# CONFIG_TOUCHSCREEN_ST is not set
CONFIG_AW862XX_HAPTIC=y
CONFIG_MICROARRAY_FINGERPRINT=y

View file

@ -54,7 +54,6 @@ CONFIG_ARCH_BENGAL=y
CONFIG_ARCH_SCUBA=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
CONFIG_HZ_100=y
CONFIG_SECCOMP=y
# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
CONFIG_HARDEN_BRANCH_PREDICTOR=y
@ -349,10 +348,10 @@ CONFIG_INPUT_JOYSTICK=y
CONFIG_JOYSTICK_XPAD=y
CONFIG_JOYSTICK_XPAD_LEDS=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_REFLASH=y
CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_RECOVERY=y
CONFIG_TOUCHSCREEN_FTS=y
CONFIG_TOUCHSCREEN_NT36XXX=y
# CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_REFLASH is not set
# CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_RECOVERY is not set
# CONFIG_TOUCHSCREEN_FTS is not set
# CONFIG_TOUCHSCREEN_NT36XXX is not set
CONFIG_INPUT_MISC=y
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_UINPUT=y
@ -497,7 +496,7 @@ CONFIG_LEDS_QTI_FLASH=y
CONFIG_LEDS_PWM=y
CONFIG_LEDS_QTI_TRI_LED=y
CONFIG_LEDS_QPNP_FLASH_V2=y
CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
# CONFIG_LEDS_QPNP_VIBRATOR_LDO is not set
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_EDAC=y
CONFIG_EDAC_CORTEX_ARM64=y
@ -740,3 +739,11 @@ CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_TGU=y
CONFIG_TOUCHSCREEN_GOODIX_GT1X=y
CONFIG_KEYBOARD_AW9523B=y
CONFIG_GPIO_PCA953X=y
CONFIG_LEDS_GPIO=y
# CONFIG_TOUCHSCREEN_ST is not set
CONFIG_AW862XX_HAPTIC=y
CONFIG_MICROARRAY_FINGERPRINT=y
CONFIG_AW862xx_HAPTIC=y

View file

@ -306,6 +306,7 @@ CONFIG_SCSI_UFS_CRYPTO_QTI=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_SNAPSHOT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
@ -377,6 +378,7 @@ CONFIG_SPI_QCOM_GENI=y
CONFIG_SPI_SPIDEV=y
CONFIG_SPMI=y
CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
CONFIG_PINCTRL_SX150X=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_PINCTRL_KONA=y
CONFIG_GPIO_SYSFS=y
@ -448,6 +450,7 @@ CONFIG_SND_DYNAMIC_MINORS=y
CONFIG_SND_USB_AUDIO=y
CONFIG_SND_USB_AUDIO_QMI=y
CONFIG_SND_SOC=y
CONFIG_HIDRAW=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
CONFIG_HID_ELECOM=y
@ -530,8 +533,6 @@ CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ION=y
CONFIG_ION_POOL_AUTO_REFILL=y
CONFIG_QCA_CLD_WLAN=y
CONFIG_QCA_CLD_WLAN_PROFILE="qca6390"
CONFIG_QPNP_REVID=y
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
@ -645,6 +646,8 @@ CONFIG_DEVFREQ_GOV_STATICMAP=y
CONFIG_EXTCON_USB_GPIO=y
CONFIG_IIO=y
CONFIG_QCOM_SPMI_ADC5=y
CONFIG_CH101_I2C=y
CONFIG_ADS7052_TDK_THERMISTOR=y
CONFIG_PWM=y
CONFIG_PWM_QTI_LPG=y
CONFIG_QCOM_PDC=y

View file

@ -321,6 +321,7 @@ CONFIG_SCSI_UFS_CRYPTO_QTI=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_DEFAULT_KEY=y
CONFIG_DM_SNAPSHOT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
@ -395,6 +396,7 @@ CONFIG_SPI_QCOM_GENI=y
CONFIG_SPI_SPIDEV=y
CONFIG_SPMI=y
CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
CONFIG_PINCTRL_SX150X=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_PINCTRL_KONA=y
CONFIG_GPIO_SYSFS=y
@ -467,6 +469,7 @@ CONFIG_SND_DYNAMIC_MINORS=y
CONFIG_SND_USB_AUDIO=y
CONFIG_SND_USB_AUDIO_QMI=y
CONFIG_SND_SOC=y
CONFIG_HIDRAW=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
CONFIG_HID_ELECOM=y
@ -675,6 +678,8 @@ CONFIG_DEVFREQ_GOV_STATICMAP=y
CONFIG_EXTCON_USB_GPIO=y
CONFIG_IIO=y
CONFIG_QCOM_SPMI_ADC5=y
CONFIG_CH101_I2C=y
CONFIG_ADS7052_TDK_THERMISTOR=y
CONFIG_PWM=y
CONFIG_PWM_QTI_LPG=y
CONFIG_QCOM_PDC=y

View file

@ -21,37 +21,13 @@
#define __ASM_ATOMIC_H
#include <linux/compiler.h>
#include <linux/stringify.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/brk-imm.h>
#include <asm/lse.h>
#ifdef __KERNEL__
/*
* To avoid having to allocate registers that pass the counter address and
* address of the call site to the overflow handler, encode the register and
* call site offset in a dummy cbz instruction that we can decode later.
*/
#define REFCOUNT_CHECK_TAIL \
" .subsection 1\n" \
"33: brk " __stringify(REFCOUNT_BRK_IMM) "\n" \
" cbz %[counter], 22b\n" /* never reached */ \
" .previous\n"
#define REFCOUNT_POST_CHECK_NEG \
"22: b.mi 33f\n" \
REFCOUNT_CHECK_TAIL
#define REFCOUNT_POST_CHECK_NEG_OR_ZERO \
" b.eq 33f\n" \
REFCOUNT_POST_CHECK_NEG
#define REFCOUNT_PRE_CHECK_ZERO(reg) "ccmp " #reg ", wzr, #8, pl\n"
#define REFCOUNT_PRE_CHECK_NONE(reg)
#define __ARM64_IN_ATOMIC_IMPL
#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE)

View file

@ -327,54 +327,4 @@ __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
#undef __CMPXCHG_DBL
#define REFCOUNT_OP(op, asm_op, pre, post, l) \
__LL_SC_INLINE int \
__LL_SC_PREFIX(__refcount_##op(int i, atomic_t *r)) \
{ \
unsigned int tmp; \
int result; \
\
asm volatile("// refcount_" #op "\n" \
" prfm pstl1strm, %[cval]\n" \
"1: ldxr %w1, %[cval]\n" \
" " #asm_op " %w[val], %w1, %w[i]\n" \
REFCOUNT_PRE_CHECK_ ## pre (%w1) \
" st" #l "xr %w1, %w[val], %[cval]\n" \
" cbnz %w1, 1b\n" \
REFCOUNT_POST_CHECK_ ## post \
: [val] "=&r"(result), "=&r"(tmp), [cval] "+Q"(r->counter) \
: [counter] "r"(&r->counter), [i] "Ir" (i) \
: "cc"); \
\
return result; \
} \
__LL_SC_EXPORT(__refcount_##op);
REFCOUNT_OP(add_lt, adds, ZERO, NEG_OR_ZERO, );
REFCOUNT_OP(sub_lt, subs, NONE, NEG, l);
REFCOUNT_OP(sub_le, subs, NONE, NEG_OR_ZERO, l);
__LL_SC_INLINE int
__LL_SC_PREFIX(__refcount_add_not_zero(int i, atomic_t *r))
{
unsigned int tmp;
int result;
asm volatile("// refcount_add_not_zero\n"
" prfm pstl1strm, %[cval]\n"
"1: ldxr %w[val], %[cval]\n"
" cbz %w[val], 2f\n"
" adds %w[val], %w[val], %w[i]\n"
" stxr %w1, %w[val], %[cval]\n"
" cbnz %w1, 1b\n"
REFCOUNT_POST_CHECK_NEG
"2:"
: [val] "=&r" (result), "=&r" (tmp), [cval] "+Q" (r->counter)
: [counter] "r"(&r->counter), [i] "Ir" (i)
: "cc");
return result;
}
__LL_SC_EXPORT(__refcount_add_not_zero);
#endif /* __ASM_ATOMIC_LL_SC_H */

View file

@ -32,12 +32,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(op) \
__nops(1), \
/* LSE atomics */ \
" prfm pstl1strm, %[v]\n" \
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
" " #asm_op " %w[i], %[v]\n") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \
@ -59,10 +54,8 @@ static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(fetch_##op##name) \
__nops(1), \
__LL_SC_ATOMIC(fetch_##op##name), \
/* LSE atomics */ \
" prfm pstl1strm, %[v]\n" \
" " #asm_op #mb " %w[i], %w[i], %[v]") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \
@ -94,9 +87,8 @@ static inline int atomic_add_return##name(int i, atomic_t *v) \
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(add_return##name) \
__nops(2), \
__nops(1), \
/* LSE atomics */ \
" prfm pstl1strm, %[v]\n" \
" ldadd" #mb " %w[i], w30, %[v]\n" \
" add %w[i], %w[i], w30") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \
@ -121,9 +113,8 @@ static inline void atomic_and(int i, atomic_t *v)
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC(and)
__nops(2),
__nops(1),
/* LSE atomics */
" prfm pstl1strm, %[v]\n"
" mvn %w[i], %w[i]\n"
" stclr %w[i], %[v]")
: [i] "+&r" (w0), [v] "+Q" (v->counter)
@ -140,9 +131,8 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(fetch_and##name) \
__nops(2), \
__nops(1), \
/* LSE atomics */ \
" prfm pstl1strm, %[v]\n" \
" mvn %w[i], %w[i]\n" \
" ldclr" #mb " %w[i], %w[i], %[v]") \
: [i] "+&r" (w0), [v] "+Q" (v->counter) \
@ -167,9 +157,8 @@ static inline void atomic_sub(int i, atomic_t *v)
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC(sub)
__nops(2),
__nops(1),
/* LSE atomics */
" prfm pstl1strm, %[v]\n"
" neg %w[i], %w[i]\n"
" stadd %w[i], %[v]")
: [i] "+&r" (w0), [v] "+Q" (v->counter)
@ -186,9 +175,8 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(sub_return##name) \
__nops(3), \
__nops(2), \
/* LSE atomics */ \
" prfm pstl1strm, %[v]\n" \
" neg %w[i], %w[i]\n" \
" ldadd" #mb " %w[i], w30, %[v]\n" \
" add %w[i], %w[i], w30") \
@ -215,9 +203,8 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(fetch_sub##name) \
__nops(2), \
__nops(1), \
/* LSE atomics */ \
" prfm pstl1strm, %[v]\n" \
" neg %w[i], %w[i]\n" \
" ldadd" #mb " %w[i], %w[i], %[v]") \
: [i] "+&r" (w0), [v] "+Q" (v->counter) \
@ -242,12 +229,7 @@ static inline void atomic64_##op(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(op) \
__nops(1), \
/* LSE atomics */ \
" prfm pstl1strm, %[v]\n" \
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
" " #asm_op " %[i], %[v]\n") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \
@ -269,10 +251,8 @@ static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(fetch_##op##name) \
__nops(1), \
__LL_SC_ATOMIC64(fetch_##op##name), \
/* LSE atomics */ \
" prfm pstl1strm, %[v]\n" \
" " #asm_op #mb " %[i], %[i], %[v]") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \
@ -304,9 +284,8 @@ static inline long atomic64_add_return##name(long i, atomic64_t *v) \
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(add_return##name) \
__nops(2), \
__nops(1), \
/* LSE atomics */ \
" prfm pstl1strm, %[v]\n" \
" ldadd" #mb " %[i], x30, %[v]\n" \
" add %[i], %[i], x30") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \
@ -331,9 +310,8 @@ static inline void atomic64_and(long i, atomic64_t *v)
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC64(and)
__nops(2),
__nops(1),
/* LSE atomics */
" prfm pstl1strm, %[v]\n"
" mvn %[i], %[i]\n"
" stclr %[i], %[v]")
: [i] "+&r" (x0), [v] "+Q" (v->counter)
@ -350,9 +328,8 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(fetch_and##name) \
__nops(2), \
__nops(1), \
/* LSE atomics */ \
" prfm pstl1strm, %[v]\n" \
" mvn %[i], %[i]\n" \
" ldclr" #mb " %[i], %[i], %[v]") \
: [i] "+&r" (x0), [v] "+Q" (v->counter) \
@ -377,9 +354,8 @@ static inline void atomic64_sub(long i, atomic64_t *v)
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC64(sub)
__nops(2),
__nops(1),
/* LSE atomics */
" prfm pstl1strm, %[v]\n"
" neg %[i], %[i]\n"
" stadd %[i], %[v]")
: [i] "+&r" (x0), [v] "+Q" (v->counter)
@ -396,9 +372,8 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(sub_return##name) \
__nops(3), \
__nops(2), \
/* LSE atomics */ \
" prfm pstl1strm, %[v]\n" \
" neg %[i], %[i]\n" \
" ldadd" #mb " %[i], x30, %[v]\n" \
" add %[i], %[i], x30") \
@ -425,9 +400,8 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(fetch_sub##name) \
__nops(2), \
__nops(1), \
/* LSE atomics */ \
" prfm pstl1strm, %[v]\n" \
" neg %[i], %[i]\n" \
" ldadd" #mb " %[i], %[i], %[v]") \
: [i] "+&r" (x0), [v] "+Q" (v->counter) \
@ -451,9 +425,8 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC64(dec_if_positive)
__nops(7),
__nops(6),
/* LSE atomics */
" prfm pstl1strm, %[v]\n"
"1: ldr x30, %[v]\n"
" subs %[ret], x30, #1\n"
" b.lt 2f\n"
@ -485,9 +458,8 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_CMPXCHG(name) \
__nops(3), \
__nops(2), \
/* LSE atomics */ \
" prfm pstl1strm, %[v]\n" \
" mov " #w "30, %" #w "[old]\n" \
" cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
" mov %" #w "[ret], " #w "30") \
@ -538,9 +510,8 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_CMPXCHG_DBL(name) \
__nops(4), \
__nops(3), \
/* LSE atomics */ \
" prfm pstl1strm, %[v]\n" \
" casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
" eor %[old1], %[old1], %[oldval1]\n" \
" eor %[old2], %[old2], %[oldval2]\n" \
@ -560,88 +531,4 @@ __CMPXCHG_DBL(_mb, al, "memory")
#undef __LL_SC_CMPXCHG_DBL
#undef __CMPXCHG_DBL
#define REFCOUNT_ADD_OP(op, pre, post) \
static inline int __refcount_##op(int i, atomic_t *r) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = r; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_CALL(__refcount_##op) \
" cmp %w0, wzr\n" \
__nops(2), \
/* LSE atomics */ \
" prfm pstl1strm, %[cval]\n" \
" ldadd %w[i], w30, %[cval]\n" \
" adds %w[i], %w[i], w30\n" \
REFCOUNT_PRE_CHECK_ ## pre (w30)) \
REFCOUNT_POST_CHECK_ ## post \
: [i] "+r" (w0), [cval] "+Q" (r->counter) \
: [counter] "r"(&r->counter), "r" (x1) \
: __LL_SC_CLOBBERS, "cc"); \
\
return w0; \
}
REFCOUNT_ADD_OP(add_lt, ZERO, NEG_OR_ZERO);
#define REFCOUNT_SUB_OP(op, post) \
static inline int __refcount_##op(int i, atomic_t *r) \
{ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = r; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_CALL(__refcount_##op) \
" cmp %w0, wzr\n" \
__nops(2), \
/* LSE atomics */ \
" prfm pstl1strm, %[cval]\n" \
" neg %w[i], %w[i]\n" \
" ldaddl %w[i], w30, %[cval]\n" \
" adds %w[i], %w[i], w30\n") \
REFCOUNT_POST_CHECK_ ## post \
: [i] "+r" (w0), [cval] "+Q" (r->counter) \
: [counter] "r" (&r->counter), "r" (x1) \
: __LL_SC_CLOBBERS, "cc"); \
\
return w0; \
}
REFCOUNT_SUB_OP(sub_lt, NEG);
REFCOUNT_SUB_OP(sub_le, NEG_OR_ZERO);
static inline int __refcount_add_not_zero(int i, atomic_t *r)
{
register int result asm ("w0");
register atomic_t *x1 asm ("x1") = r;
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
" mov %w0, %w[i]\n"
__LL_SC_CALL(__refcount_add_not_zero)
" cmp %w0, wzr\n"
__nops(7),
/* LSE atomics */
" prfm pstl1strm, %[cval]\n"
" ldr %w0, %[cval]\n"
"1: cmp %w0, wzr\n"
" b.eq 2f\n"
" add w30, %w0, %w[i]\n"
" cas %w0, w30, %[cval]\n"
" sub w30, w30, %w[i]\n"
" cmp %w0, w30\n"
" b.ne 1b\n"
" adds %w0, w30, %w[i]\n"
"2:\n")
REFCOUNT_POST_CHECK_NEG
: "=&r" (result), [cval] "+Q" (r->counter)
: [counter] "r" (&r->counter), [i] "Ir" (i), "r" (x1)
: __LL_SC_CLOBBERS, "cc");
return result;
}
#endif /* __ASM_ATOMIC_LSE_H */

View file

@ -19,11 +19,9 @@
* 0x9xx: tag-based KASAN trap (allowed values 0x900 - 0x9ff)
*/
#define FAULT_BRK_IMM 0x100
#define REFCOUNT_BRK_IMM 0x101
#define KGDB_DYN_DBG_BRK_IMM 0x400
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
#define BUG_BRK_IMM 0x800
#define KASAN_BRK_IMM 0x900
#define KASAN_BRK_MASK 0x0ff
#endif

View file

@ -95,24 +95,18 @@ struct step_hook {
int (*fn)(struct pt_regs *regs, unsigned int esr);
};
void register_user_step_hook(struct step_hook *hook);
void unregister_user_step_hook(struct step_hook *hook);
void register_kernel_step_hook(struct step_hook *hook);
void unregister_kernel_step_hook(struct step_hook *hook);
void register_step_hook(struct step_hook *hook);
void unregister_step_hook(struct step_hook *hook);
struct break_hook {
struct list_head node;
u32 esr_val;
u32 esr_mask;
int (*fn)(struct pt_regs *regs, unsigned int esr);
u16 imm;
u16 mask; /* These bits are ignored when comparing with imm */
};
void register_user_break_hook(struct break_hook *hook);
void unregister_user_break_hook(struct break_hook *hook);
void register_kernel_break_hook(struct break_hook *hook);
void unregister_kernel_break_hook(struct break_hook *hook);
void register_break_hook(struct break_hook *hook);
void unregister_break_hook(struct break_hook *hook);
u8 debug_monitors_arch(void);

View file

@ -44,7 +44,6 @@ arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
arm64-obj-$(CONFIG_KGDB) += kgdb.o
arm64-obj-$(CONFIG_BOOT_INFO) += bootinfo.o
arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o \
efi-rt-wrapper.o
arm64-obj-$(CONFIG_PCI) += pci.o

View file

@ -167,46 +167,25 @@ NOKPROBE_SYMBOL(clear_user_regs_spsr_ss);
#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs)
#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs)
static DEFINE_SPINLOCK(debug_hook_lock);
static LIST_HEAD(user_step_hook);
static LIST_HEAD(kernel_step_hook);
/* EL1 Single Step Handler hooks */
static LIST_HEAD(step_hook);
static DEFINE_SPINLOCK(step_hook_lock);
static void register_debug_hook(struct list_head *node, struct list_head *list)
void register_step_hook(struct step_hook *hook)
{
spin_lock(&debug_hook_lock);
list_add_rcu(node, list);
spin_unlock(&debug_hook_lock);
spin_lock(&step_hook_lock);
list_add_rcu(&hook->node, &step_hook);
spin_unlock(&step_hook_lock);
}
static void unregister_debug_hook(struct list_head *node)
void unregister_step_hook(struct step_hook *hook)
{
spin_lock(&debug_hook_lock);
list_del_rcu(node);
spin_unlock(&debug_hook_lock);
spin_lock(&step_hook_lock);
list_del_rcu(&hook->node);
spin_unlock(&step_hook_lock);
synchronize_rcu();
}
void register_user_step_hook(struct step_hook *hook)
{
register_debug_hook(&hook->node, &user_step_hook);
}
void unregister_user_step_hook(struct step_hook *hook)
{
unregister_debug_hook(&hook->node);
}
void register_kernel_step_hook(struct step_hook *hook)
{
register_debug_hook(&hook->node, &kernel_step_hook);
}
void unregister_kernel_step_hook(struct step_hook *hook)
{
unregister_debug_hook(&hook->node);
}
/*
* Call registered single step handlers
* There is no Syndrome info to check for determining the handler.
@ -216,14 +195,11 @@ void unregister_kernel_step_hook(struct step_hook *hook)
static int call_step_hook(struct pt_regs *regs, unsigned int esr)
{
struct step_hook *hook;
struct list_head *list;
int retval = DBG_HOOK_ERROR;
list = user_mode(regs) ? &user_step_hook : &kernel_step_hook;
rcu_read_lock();
list_for_each_entry_rcu(hook, list, node) {
list_for_each_entry_rcu(hook, &step_hook, node) {
retval = hook->fn(regs, esr);
if (retval == DBG_HOOK_HANDLED)
break;
@ -302,44 +278,33 @@ NOKPROBE_SYMBOL(single_step_handler);
* hit within breakpoint handler, especically in kprobes.
* Use reader/writer locks instead of plain spinlock.
*/
static LIST_HEAD(user_break_hook);
static LIST_HEAD(kernel_break_hook);
static LIST_HEAD(break_hook);
static DEFINE_SPINLOCK(break_hook_lock);
void register_user_break_hook(struct break_hook *hook)
void register_break_hook(struct break_hook *hook)
{
register_debug_hook(&hook->node, &user_break_hook);
spin_lock(&break_hook_lock);
list_add_rcu(&hook->node, &break_hook);
spin_unlock(&break_hook_lock);
}
void unregister_user_break_hook(struct break_hook *hook)
void unregister_break_hook(struct break_hook *hook)
{
unregister_debug_hook(&hook->node);
}
void register_kernel_break_hook(struct break_hook *hook)
{
register_debug_hook(&hook->node, &kernel_break_hook);
}
void unregister_kernel_break_hook(struct break_hook *hook)
{
unregister_debug_hook(&hook->node);
spin_lock(&break_hook_lock);
list_del_rcu(&hook->node);
spin_unlock(&break_hook_lock);
synchronize_rcu();
}
static int call_break_hook(struct pt_regs *regs, unsigned int esr)
{
struct break_hook *hook;
struct list_head *list;
int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
list = user_mode(regs) ? &user_break_hook : &kernel_break_hook;
rcu_read_lock();
list_for_each_entry_rcu(hook, list, node) {
unsigned int comment = esr & BRK64_ESR_MASK;
if ((comment & ~hook->mask) == hook->imm)
list_for_each_entry_rcu(hook, &break_hook, node)
if ((esr & hook->esr_mask) == hook->esr_val)
fn = hook->fn;
}
rcu_read_unlock();
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;

View file

@ -275,13 +275,15 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
NOKPROBE_SYMBOL(kgdb_step_brk_fn);
static struct break_hook kgdb_brkpt_hook = {
.fn = kgdb_brk_fn,
.imm = KGDB_DYN_DBG_BRK_IMM,
.esr_mask = 0xffffffff,
.esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_DYN_DBG_BRK_IMM),
.fn = kgdb_brk_fn
};
static struct break_hook kgdb_compiled_brkpt_hook = {
.fn = kgdb_compiled_brk_fn,
.imm = KGDB_COMPILED_DBG_BRK_IMM,
.esr_mask = 0xffffffff,
.esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_COMPILED_DBG_BRK_IMM),
.fn = kgdb_compiled_brk_fn
};
static struct step_hook kgdb_step_hook = {
@ -342,9 +344,9 @@ int kgdb_arch_init(void)
if (ret != 0)
return ret;
register_kernel_break_hook(&kgdb_brkpt_hook);
register_kernel_break_hook(&kgdb_compiled_brkpt_hook);
register_kernel_step_hook(&kgdb_step_hook);
register_break_hook(&kgdb_brkpt_hook);
register_break_hook(&kgdb_compiled_brkpt_hook);
register_step_hook(&kgdb_step_hook);
return 0;
}
@ -355,9 +357,9 @@ int kgdb_arch_init(void)
*/
void kgdb_arch_exit(void)
{
unregister_kernel_break_hook(&kgdb_brkpt_hook);
unregister_kernel_break_hook(&kgdb_compiled_brkpt_hook);
unregister_kernel_step_hook(&kgdb_step_hook);
unregister_break_hook(&kgdb_brkpt_hook);
unregister_break_hook(&kgdb_compiled_brkpt_hook);
unregister_step_hook(&kgdb_step_hook);
unregister_die_notifier(&kgdb_notifier);
}

View file

@ -195,7 +195,8 @@ static int uprobe_single_step_handler(struct pt_regs *regs,
/* uprobe breakpoint handler hook */
static struct break_hook uprobes_break_hook = {
.imm = BRK64_ESR_UPROBES,
.esr_mask = BRK64_ESR_MASK,
.esr_val = BRK64_ESR_UPROBES,
.fn = uprobe_breakpoint_handler,
};
@ -206,8 +207,8 @@ static struct step_hook uprobes_step_hook = {
static int __init arch_init_uprobes(void)
{
register_user_break_hook(&uprobes_break_hook);
register_user_step_hook(&uprobes_step_hook);
register_break_hook(&uprobes_break_hook);
register_step_hook(&uprobes_step_hook);
return 0;
}

View file

@ -975,8 +975,9 @@ static int bug_handler(struct pt_regs *regs, unsigned int esr)
}
static struct break_hook bug_break_hook = {
.esr_val = 0xf2000000 | BUG_BRK_IMM,
.esr_mask = 0xffffffff,
.fn = bug_handler,
.imm = BUG_BRK_IMM,
};
#ifdef CONFIG_KASAN_SW_TAGS
@ -1025,9 +1026,9 @@ static int kasan_handler(struct pt_regs *regs, unsigned int esr)
#define KASAN_ESR_MASK 0xffffff00
static struct break_hook kasan_break_hook = {
.esr_val = KASAN_ESR_VAL,
.esr_mask = KASAN_ESR_MASK,
.fn = kasan_handler,
.imm = KASAN_BRK_IMM,
.mask = KASAN_BRK_MASK,
};
#endif
@ -1045,48 +1046,11 @@ int __init early_brk64(unsigned long addr, unsigned int esr,
return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
}
static int refcount_overflow_handler(struct pt_regs *regs, unsigned int esr)
{
u32 dummy_cbz = le32_to_cpup((__le32 *)(regs->pc + 4));
bool zero = regs->pstate & PSR_Z_BIT;
u32 rt;
/*
* Find the register that holds the counter address from the
* dummy 'cbz' instruction that follows the 'brk' instruction
* that sent us here.
*/
rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, dummy_cbz);
/* First unconditionally saturate the refcount. */
*(int *)regs->regs[rt] = INT_MIN / 2;
/*
* This function has been called because either a negative refcount
* value was seen by any of the refcount functions, or a zero
* refcount value was seen by refcount_{add,dec}().
*/
/* point pc to the branch instruction that detected the overflow */
regs->pc += 4 + aarch64_get_branch_offset(dummy_cbz);
refcount_error_report(regs, zero ? "hit zero" : "overflow");
/* advance pc and proceed */
regs->pc += 4;
return DBG_HOOK_HANDLED;
}
static struct break_hook refcount_break_hook = {
.fn = refcount_overflow_handler,
.imm = REFCOUNT_BRK_IMM,
};
/* This registration must happen early, before debug_traps_init(). */
void __init trap_init(void)
{
register_kernel_break_hook(&bug_break_hook);
register_break_hook(&bug_break_hook);
#ifdef CONFIG_KASAN_SW_TAGS
register_kernel_break_hook(&kasan_break_hook);
register_break_hook(&kasan_break_hook);
#endif
register_kernel_break_hook(&refcount_break_hook);
}

View file

@ -76,6 +76,10 @@ jiffies = jiffies_64;
#define TRAMP_TEXT
#endif
#define RTIC_BSS \
. = ALIGN(PAGE_SIZE); \
KEEP(*(.bss.rtic)); \
. = ALIGN(PAGE_SIZE); \
/*
* The size of the PE/COFF section that covers the kernel image, which
* runs from stext to _edata, must be a round multiple of the PE/COFF
@ -256,6 +260,10 @@ SECTIONS
STABS_DEBUG
HEAD_SYMBOLS
.bss : { /* bss segment */
RTIC_BSS
}
}
/*

View file

@ -1,15 +1,3 @@
#include <asm/atomic.h>
#define __ARM64_IN_ATOMIC_IMPL
/*
* Disarm the refcount checks in the out-of-line LL/SC routines. These are
* redundant, given that the LSE callers already perform the same checks.
* We do have to make sure that we exit with a zero value if the pre-check
* detected a zero value.
*/
#undef REFCOUNT_POST_CHECK_NEG
#undef REFCOUNT_POST_CHECK_NEG_OR_ZERO
#define REFCOUNT_POST_CHECK_NEG
#define REFCOUNT_POST_CHECK_NEG_OR_ZERO "csel %w[val], wzr, %w[val], eq\n"
#include <asm/atomic_ll_sc.h>

View file

@ -51,7 +51,6 @@ C_h .req x12
D_l .req x13
D_h .req x14
prfm pldl1strm, [src, #(1*L1_CACHE_BYTES)]
mov dst, dstin
cmp count, #16
/*When memory length is less than 16, the accessed are not aligned.*/
@ -182,7 +181,6 @@ D_h .req x14
ldp1 C_l, C_h, src, #16
stp1 D_l, D_h, dst, #16
ldp1 D_l, D_h, src, #16
prfm pldl1strm, [src, #(4*L1_CACHE_BYTES)]
subs count, count, #64
b.ge 1b
stp1 A_l, A_h, dst, #16

View file

@ -1,131 +1,258 @@
/*
* Copyright (c) 2017 ARM Ltd
* All rights reserved.
* Copyright (C) 2013 ARM Ltd.
* Copyright (C) 2013 Linaro.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
* This code is based on glibc cortex strings work originally authored by Linaro
* and re-licensed under GPLv2 for the Linux kernel. The original code can
* be found @
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
* files/head:/src/aarch64/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* Assumptions:
*
* ARMv8-a, AArch64, unaligned accesses.
*/
/* includes here */
#include <linux/linkage.h>
#include <asm/assembler.h>
/*
* compare memory areas(when two memory areas' offset are different,
* alignment handled by the hardware)
*
* Parameters:
* x0 - const memory area 1 pointer
* x1 - const memory area 2 pointer
* x2 - the maximal compare byte length
* Returns:
* x0 - a compare result, maybe less than, equal to, or greater than ZERO
*/
/* Parameters and result. */
#define src1 x0
#define src2 x1
#define limit x2
#define result w0
src1 .req x0
src2 .req x1
limit .req x2
result .req x0
/* Internal variables. */
#define data1 x3
#define data1w w3
#define data2 x4
#define data2w w4
#define tmp1 x5
data1 .req x3
data1w .req w3
data2 .req x4
data2w .req w4
has_nul .req x5
diff .req x6
endloop .req x7
tmp1 .req x8
tmp2 .req x9
tmp3 .req x10
pos .req x11
limit_wd .req x12
mask .req x13
/* Small inputs of less than 8 bytes are handled separately. This allows the
main code to be sped up using unaligned loads since there are now at least
8 bytes to be compared. If the first 8 bytes are equal, align src1.
This ensures each iteration does at most one unaligned access even if both
src1 and src2 are unaligned, and mutually aligned inputs behave as if
aligned. After the main loop, process the last 8 bytes using unaligned
accesses. */
.p2align 6
WEAK(memcmp)
subs limit, limit, 8
b.lo .Lless8
cbz limit, .Lret0
eor tmp1, src1, src2
tst tmp1, #7
b.ne .Lmisaligned8
ands tmp1, src1, #7
b.ne .Lmutual_align
sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */
/*
* The input source addresses are at alignment boundary.
* Directly compare eight bytes each time.
*/
.Lloop_aligned:
ldr data1, [src1], #8
ldr data2, [src2], #8
.Lstart_realigned:
subs limit_wd, limit_wd, #1
eor diff, data1, data2 /* Non-zero if differences found. */
csinv endloop, diff, xzr, cs /* Last Dword or differences. */
cbz endloop, .Lloop_aligned
/* Limit >= 8, so check first 8 bytes using unaligned loads. */
ldr data1, [src1], 8
ldr data2, [src2], 8
and tmp1, src1, 7
add limit, limit, tmp1
cmp data1, data2
bne .Lreturn
/* Not reached the limit, must have found a diff. */
tbz limit_wd, #63, .Lnot_limit
/* Align src1 and adjust src2 with bytes not yet done. */
sub src1, src1, tmp1
sub src2, src2, tmp1
/* Limit % 8 == 0 => the diff is in the last 8 bytes. */
ands limit, limit, #7
b.eq .Lnot_limit
/*
* The remained bytes less than 8. It is needed to extract valid data
* from last eight bytes of the intended memory range.
*/
lsl limit, limit, #3 /* bytes-> bits. */
mov mask, #~0
CPU_BE( lsr mask, mask, limit )
CPU_LE( lsl mask, mask, limit )
bic data1, data1, mask
bic data2, data2, mask
subs limit, limit, 8
b.ls .Llast_bytes
orr diff, diff, mask
b .Lnot_limit
/* Loop performing 8 bytes per iteration using aligned src1.
Limit is pre-decremented by 8 and must be larger than zero.
Exit if <= 8 bytes left to do or if the data is not equal. */
.p2align 4
.Lloop8:
ldr data1, [src1], 8
ldr data2, [src2], 8
subs limit, limit, 8
ccmp data1, data2, 0, hi /* NZCV = 0b0000. */
b.eq .Lloop8
.Lmutual_align:
/*
* Sources are mutually aligned, but are not currently at an
* alignment boundary. Round down the addresses and then mask off
* the bytes that precede the start point.
*/
bic src1, src1, #7
bic src2, src2, #7
ldr data1, [src1], #8
ldr data2, [src2], #8
/*
* We can not add limit with alignment offset(tmp1) here. Since the
* addition probably make the limit overflown.
*/
sub limit_wd, limit, #1/*limit != 0, so no underflow.*/
and tmp3, limit_wd, #7
lsr limit_wd, limit_wd, #3
add tmp3, tmp3, tmp1
add limit_wd, limit_wd, tmp3, lsr #3
add limit, limit, tmp1/* Adjust the limit for the extra. */
cmp data1, data2
bne .Lreturn
lsl tmp1, tmp1, #3/* Bytes beyond alignment -> bits.*/
neg tmp1, tmp1/* Bits to alignment -64. */
mov tmp2, #~0
/*mask off the non-intended bytes before the start address.*/
CPU_BE( lsl tmp2, tmp2, tmp1 )/*Big-endian.Early bytes are at MSB*/
/* Little-endian. Early bytes are at LSB. */
CPU_LE( lsr tmp2, tmp2, tmp1 )
/* Compare last 1-8 bytes using unaligned access. */
.Llast_bytes:
ldr data1, [src1, limit]
ldr data2, [src2, limit]
orr data1, data1, tmp2
orr data2, data2, tmp2
b .Lstart_realigned
/* Compare data bytes and set return value to 0, -1 or 1. */
.Lreturn:
#ifndef __AARCH64EB__
rev data1, data1
rev data2, data2
#endif
cmp data1, data2
.Lret_eq:
cset result, ne
cneg result, result, lo
ret
/*src1 and src2 have different alignment offset.*/
.Lmisaligned8:
cmp limit, #8
b.lo .Ltiny8proc /*limit < 8: compare byte by byte*/
.p2align 4
/* Compare up to 8 bytes. Limit is [-8..-1]. */
.Lless8:
adds limit, limit, 4
b.lo .Lless4
ldr data1w, [src1], 4
ldr data2w, [src2], 4
and tmp1, src1, #7
neg tmp1, tmp1
add tmp1, tmp1, #8/*valid length in the first 8 bytes of src1*/
and tmp2, src2, #7
neg tmp2, tmp2
add tmp2, tmp2, #8/*valid length in the first 8 bytes of src2*/
subs tmp3, tmp1, tmp2
csel pos, tmp1, tmp2, hi /*Choose the maximum.*/
sub limit, limit, pos
/*compare the proceeding bytes in the first 8 byte segment.*/
.Ltinycmp:
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
subs pos, pos, #1
ccmp data1w, data2w, #0, ne /* NZCV = 0b0000. */
b.eq .Ltinycmp
cbnz pos, 1f /*diff occurred before the last byte.*/
cmp data1w, data2w
b.ne .Lreturn
sub limit, limit, 4
.Lless4:
adds limit, limit, 4
beq .Lret_eq
.Lbyte_loop:
ldrb data1w, [src1], 1
ldrb data2w, [src2], 1
subs limit, limit, 1
ccmp data1w, data2w, 0, ne /* NZCV = 0b0000. */
b.eq .Lbyte_loop
sub result, data1w, data2w
b.eq .Lstart_align
1:
sub result, data1, data2
ret
.Lstart_align:
lsr limit_wd, limit, #3
cbz limit_wd, .Lremain8
ands xzr, src1, #7
b.eq .Lrecal_offset
/*process more leading bytes to make src1 aligned...*/
add src1, src1, tmp3 /*backwards src1 to alignment boundary*/
add src2, src2, tmp3
sub limit, limit, tmp3
lsr limit_wd, limit, #3
cbz limit_wd, .Lremain8
/*load 8 bytes from aligned SRC1..*/
ldr data1, [src1], #8
ldr data2, [src2], #8
subs limit_wd, limit_wd, #1
eor diff, data1, data2 /*Non-zero if differences found.*/
csinv endloop, diff, xzr, ne
cbnz endloop, .Lunequal_proc
/*How far is the current SRC2 from the alignment boundary...*/
and tmp3, tmp3, #7
.Lrecal_offset:/*src1 is aligned now..*/
neg pos, tmp3
.Lloopcmp_proc:
/*
* Divide the eight bytes into two parts. First,backwards the src2
* to an alignment boundary,load eight bytes and compare from
* the SRC2 alignment boundary. If all 8 bytes are equal,then start
* the second part's comparison. Otherwise finish the comparison.
* This special handle can garantee all the accesses are in the
* thread/task space in avoid to overrange access.
*/
ldr data1, [src1,pos]
ldr data2, [src2,pos]
eor diff, data1, data2 /* Non-zero if differences found. */
cbnz diff, .Lnot_limit
/*The second part process*/
ldr data1, [src1], #8
ldr data2, [src2], #8
eor diff, data1, data2 /* Non-zero if differences found. */
subs limit_wd, limit_wd, #1
csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
cbz endloop, .Lloopcmp_proc
.Lunequal_proc:
cbz diff, .Lremain8
/* There is difference occurred in the latest comparison. */
.Lnot_limit:
/*
* For little endian,reverse the low significant equal bits into MSB,then
* following CLZ can find how many equal bits exist.
*/
CPU_LE( rev diff, diff )
CPU_LE( rev data1, data1 )
CPU_LE( rev data2, data2 )
/*
* The MS-non-zero bit of DIFF marks either the first bit
* that is different, or the end of the significant data.
* Shifting left now will bring the critical information into the
* top bits.
*/
clz pos, diff
lsl data1, data1, pos
lsl data2, data2, pos
/*
* We need to zero-extend (char is unsigned) the value and then
* perform a signed subtraction.
*/
lsr data1, data1, #56
sub result, data1, data2, lsr #56
ret
.Lremain8:
/* Limit % 8 == 0 =>. all data are equal.*/
ands limit, limit, #7
b.eq .Lret0
.Ltiny8proc:
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
subs limit, limit, #1
ccmp data1w, data2w, #0, ne /* NZCV = 0b0000. */
b.eq .Ltiny8proc
sub result, data1, data2
ret
.Lret0:
mov result, #0
ret
ENDPIPROC(memcmp)

View file

@ -60,7 +60,6 @@ D_h .req x14
.weak memmove
ENTRY(__memmove)
ENTRY(memmove)
prfm pldl1strm, [src, #L1_CACHE_BYTES]
cmp dstin, src
b.lo __memcpy
add tmp1, src, count
@ -187,7 +186,6 @@ ENTRY(memmove)
ldp C_l, C_h, [src, #-48]
stp D_l, D_h, [dst, #-64]!
ldp D_l, D_h, [src, #-64]!
prfm pldl1strm, [src, #(4*L1_CACHE_BYTES)]
subs count, count, #64
b.ge 1b
stp A_l, A_h, [dst, #-16]

View file

@ -508,10 +508,9 @@ void __init arm64_memblock_init(void)
* Save bootloader imposed memory limit before we overwirte
* memblock.
*/
if (memory_limit == PHYS_ADDR_MAX)
bootloader_memory_limit = memblock_max_addr(memory_limit);
if (bootloader_memory_limit > memblock_end_of_DRAM())
bootloader_memory_limit = memblock_end_of_DRAM();
else
bootloader_memory_limit = memblock_max_addr(memory_limit);
update_memory_limit();

View file

@ -11,8 +11,6 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
genhd.o partition-generic.o ioprio.o \
badblocks.o partitions/ blk-rq-qos.o
CFLAGS_blk-mq.o := $(call cc-disable-warning, align-mismatch)
obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_ioctl.o
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
@ -41,4 +39,4 @@ obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += keyslot-manager.o bio-crypt-ctx.o \
blk-crypto.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o

View file

@ -3,7 +3,6 @@
* main.c - Multi purpose firmware loading support
*
* Copyright (c) 2003 Manuel Estrada Sainz
* Copyright (C) 2020 XiaoMi, Inc.
*
* Please see Documentation/firmware_class/ for more information.
*
@ -284,12 +283,12 @@ static void free_fw_priv(struct fw_priv *fw_priv)
static char fw_path_para[256];
static const char * const fw_path[] = {
fw_path_para,
"/system/vendor/firmware",
"/system/etc/firmware",
"/lib/firmware/updates/" UTS_RELEASE,
"/lib/firmware/updates",
"/lib/firmware/" UTS_RELEASE,
"/lib/firmware",
"/system/vendor/firmware",
"/vendor/firmware"
"/lib/firmware"
};
/*

View file

@ -31,6 +31,7 @@
#include <linux/suspend.h>
#include <trace/events/power.h>
#include <linux/cpufreq.h>
#include <linux/cpuidle.h>
#include <linux/timer.h>
#include <linux/wakeup_reason.h>
@ -797,6 +798,7 @@ void dpm_noirq_end(void)
{
resume_device_irqs();
device_wakeup_disarm_wake_irqs();
cpuidle_resume();
}
/**
@ -1418,6 +1420,7 @@ static int device_suspend_noirq(struct device *dev)
void dpm_noirq_begin(void)
{
cpuidle_pause();
device_wakeup_arm_wake_irqs();
suspend_device_irqs();
}

View file

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
/*
@ -388,6 +388,7 @@ static int bt_configure_gpios(int on)
if (rc) {
BT_PWR_ERR("%s:bt_enable_bt_reset_gpios_safely failed",
__func__);
return rc;
}
msleep(50);

View file

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
#ifndef BTFM_SLIM_SLAVE_H
@ -97,6 +97,9 @@ enum {
QCA_COMANCHE_SOC_ID_0101 = 0x40070101,
QCA_COMANCHE_SOC_ID_0110 = 0x40070110,
QCA_COMANCHE_SOC_ID_0120 = 0x40070120,
QCA_COMANCHE_SOC_ID_0130 = 0x40070130,
QCA_COMANCHE_SOC_ID_5120 = 0x40075120,
QCA_COMANCHE_SOC_ID_5130 = 0x40075130,
};
enum {

View file

@ -1511,6 +1511,15 @@ int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client)
}
if (mhi_cntrl->rddm_supported) {
/* check EP is in proper state */
if (mhi_cntrl->link_status(mhi_cntrl, mhi_cntrl->priv_data)) {
MHI_ERR("Unable to access EP Config space\n");
write_unlock_irq(&mhi_cntrl->pm_lock);
tasklet_enable(&mhi_cntrl->mhi_event->task);
return -ETIMEDOUT;
}
if (mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM &&
!mhi_cntrl->power_down) {
mhi_cntrl->ee = MHI_EE_RDDM;

View file

@ -538,6 +538,27 @@ config DEVPORT
source "drivers/s390/char/Kconfig"
config MSM_SMD_PKT
bool "Enable device interface for some SMD packet ports"
default n
depends on RPMSG_QCOM_SMD
help
smd_pkt driver provides the interface for the userspace clients
to communicate over smd via device nodes. This enable the
usersapce clients to read and write to some smd packets channel
for MSM chipset.
config TILE_SROM
tristate "Character-device access via hypervisor to the Tilera SPI ROM"
depends on TILE
default y
help
This device provides character-level read-write access
to the SROM, typically via the "0", "1", and "2" devices
in /dev/srom/. The Tilera hypervisor makes the flash
device appear much like a simple EEPROM, and knows
how to partition a single ROM for multiple purposes.
source "drivers/char/xillybus/Kconfig"
source "drivers/char/diag/Kconfig"

View file

@ -14,6 +14,8 @@ obj-$(CONFIG_MSPEC) += mspec.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
obj-$(CONFIG_IBM_BSR) += bsr.o
obj-$(CONFIG_SGI_MBCS) += mbcs.o
obj-$(CONFIG_BFIN_OTP) += bfin-otp.o
obj-$(CONFIG_MSM_SMD_PKT) += msm_smd_pkt.o
obj-$(CONFIG_PRINTER) += lp.o

View file

@ -8,7 +8,6 @@
#include <linux/completion.h>
#include <linux/pagemap.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/cdev.h>
@ -23,7 +22,6 @@
#include <soc/qcom/service-notifier.h>
#include <soc/qcom/service-locator.h>
#include <linux/scatterlist.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/device.h>
#include <linux/of.h>
@ -1698,7 +1696,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
uintptr_t args;
size_t rlen = 0, copylen = 0, metalen = 0, lrpralen = 0;
int i, oix;
int err = 0;
int err = 0, j = 0;
int mflags = 0;
uint64_t *fdlist;
uint32_t *crclist;
@ -1743,6 +1741,8 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
FASTRPC_ATTR_NOVA, 0, 0, dmaflags,
&ctx->maps[i]);
if (err) {
for (j = bufs; j < i; j++)
fastrpc_mmap_free(ctx->maps[j], 0);
mutex_unlock(&ctx->fl->map_mutex);
goto bail;
}
@ -2780,19 +2780,6 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
return err;
}
static int fastrpc_kstat(const char *filename, struct kstat *stat)
{
int result;
mm_segment_t fs_old;
fs_old = get_fs();
set_fs(KERNEL_DS);
result = vfs_stat((const char __user *)filename, stat);
set_fs(fs_old);
return result;
}
static int fastrpc_send_cpuinfo_to_dsp(struct fastrpc_file *fl)
{
int err = 0;
@ -2835,33 +2822,29 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_file *fl,
int err = 0, dsp_support = 0;
struct fastrpc_ioctl_invoke_crc ioctl;
remote_arg_t ra[2];
struct kstat sb;
struct fastrpc_apps *me = &gfa;
// Querying device about DSP support
switch (domain) {
case ADSP_DOMAIN_ID:
if (!fastrpc_kstat("/dev/subsys_adsp", &sb))
case SDSP_DOMAIN_ID:
case CDSP_DOMAIN_ID:
if (me->channel[domain].issubsystemup)
dsp_support = 1;
break;
case MDSP_DOMAIN_ID:
//Modem not supported for fastRPC
break;
case SDSP_DOMAIN_ID:
if (!fastrpc_kstat("/dev/subsys_slpi", &sb))
dsp_support = 1;
break;
case CDSP_DOMAIN_ID:
if (!fastrpc_kstat("/dev/subsys_cdsp", &sb))
dsp_support = 1;
break;
default:
dsp_support = 0;
break;
}
dsp_attr_buf[0] = dsp_support;
if (dsp_support == 0)
if (dsp_support == 0) {
err = -ENOTCONN;
goto bail;
}
err = fastrpc_channel_open(fl);
if (err)
@ -3315,8 +3298,13 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
mutex_unlock(&fl->map_mutex);
if (err)
goto bail;
VERIFY(err, map != NULL);
if (err) {
err = -EINVAL;
goto bail;
}
VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr,
map->phys, map->size, map->flags));
map->phys, map->size, map->flags));
if (err)
goto bail;
mutex_lock(&fl->map_mutex);
@ -4737,6 +4725,8 @@ static const struct of_device_id fastrpc_match_table[] = {
{ .compatible = "qcom,msm-fastrpc-adsp", },
{ .compatible = "qcom,msm-fastrpc-compute", },
{ .compatible = "qcom,msm-fastrpc-compute-cb", },
{ .compatible = "qcom,msm-fastrpc-legacy-compute", },
{ .compatible = "qcom,msm-fastrpc-legacy-compute-cb", },
{ .compatible = "qcom,msm-adsprpc-mem-region", },
{}
};
@ -4834,6 +4824,85 @@ static int fastrpc_cb_probe(struct device *dev)
return err;
}
static int fastrpc_cb_legacy_probe(struct device *dev)
{
struct fastrpc_channel_ctx *chan;
struct fastrpc_session_ctx *first_sess = NULL, *sess = NULL;
struct fastrpc_apps *me = &gfa;
const char *name;
unsigned int *sids = NULL, sids_size = 0;
int err = 0, ret = 0, i;
uint32_t dma_addr_pool[2] = {0, 0};
VERIFY(err, NULL != (name = of_get_property(dev->of_node,
"label", NULL)));
if (err)
goto bail;
for (i = 0; i < NUM_CHANNELS; i++) {
if (!gcinfo[i].name)
continue;
if (!strcmp(name, gcinfo[i].name))
break;
}
VERIFY(err, i < NUM_CHANNELS);
if (err)
goto bail;
chan = &gcinfo[i];
VERIFY(err, chan->sesscount < NUM_SESSIONS);
if (err)
goto bail;
first_sess = &chan->session[chan->sesscount];
VERIFY(err, NULL != of_get_property(dev->of_node,
"sids", &sids_size));
if (err)
goto bail;
VERIFY(err, NULL != (sids = kzalloc(sids_size, GFP_KERNEL)));
if (err)
goto bail;
ret = of_property_read_u32_array(dev->of_node, "sids", sids,
sids_size/sizeof(unsigned int));
if (ret)
goto bail;
if (err)
goto bail;
for (i = 0; i < sids_size/sizeof(unsigned int); i++) {
VERIFY(err, chan->sesscount < NUM_SESSIONS);
if (err)
goto bail;
sess = &chan->session[chan->sesscount];
sess->smmu.cb = sids[i];
sess->smmu.dev = dev;
sess->smmu.dev_name = dev_name(dev);
sess->smmu.enabled = 1;
sess->used = 0;
sess->smmu.coherent = false;
sess->smmu.secure = false;
chan->sesscount++;
if (!sess->smmu.dev->dma_parms)
sess->smmu.dev->dma_parms = devm_kzalloc(sess->smmu.dev,
sizeof(*sess->smmu.dev->dma_parms), GFP_KERNEL);
dma_set_max_seg_size(sess->smmu.dev, DMA_BIT_MASK(32));
dma_set_seg_boundary(sess->smmu.dev,
(unsigned long)DMA_BIT_MASK(64));
}
of_property_read_u32_array(dev->of_node, "qcom,iommu-dma-addr-pool",
dma_addr_pool, 2);
me->max_size_limit = (dma_addr_pool[1] == 0 ? 0x78000000 :
dma_addr_pool[1]);
bail:
kfree(sids);
return err;
}
static void init_secure_vmid_list(struct device *dev, char *prop_name,
struct secure_vm *destvm)
{
@ -4964,6 +5033,9 @@ static int fastrpc_probe(struct platform_device *pdev)
if (of_device_is_compatible(dev->of_node,
"qcom,msm-fastrpc-compute-cb"))
return fastrpc_cb_probe(dev);
if (of_device_is_compatible(dev->of_node,
"qcom,msm-fastrpc-legacy-compute-cb"))
return fastrpc_cb_legacy_probe(dev);
if (of_device_is_compatible(dev->of_node,
"qcom,msm-adsprpc-mem-region")) {
@ -5112,7 +5184,7 @@ static struct platform_driver fastrpc_driver = {
static const struct rpmsg_device_id fastrpc_rpmsg_match[] = {
{ FASTRPC_GLINK_GUID },
{ },
{ FASTRPC_SMD_GUID },
};
static const struct of_device_id fastrpc_rpmsg_of_match[] = {

View file

@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2008-2020, The Linux Foundation. All rights reserved.
/* Copyright (c) 2008-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/slab.h>
@ -3120,8 +3120,13 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
return -EINVAL;
}
err = copy_to_user(buf, mask_info->update_buf_client,
if ((count - (sizeof(int))) >=
mask_info->update_buf_client_len) {
err = copy_to_user(buf, mask_info->update_buf_client,
mask_info->update_buf_client_len);
} else {
err = -EINVAL;
}
if (err) {
pr_err("diag: In %s Unable to send msg masks to user space clients, err: %d\n",
__func__, err);
@ -3147,8 +3152,13 @@ int diag_copy_to_user_log_mask(char __user *buf, size_t count,
return -EINVAL;
}
err = copy_to_user(buf, mask_info->update_buf_client,
if ((count - (sizeof(int))) >=
mask_info->update_buf_client_len) {
err = copy_to_user(buf, mask_info->update_buf_client,
mask_info->update_buf_client_len);
} else {
err = -EINVAL;
}
if (err) {
pr_err("diag: In %s Unable to send msg masks to user space clients, err: %d\n",
__func__, err);

View file

@ -312,7 +312,9 @@ static void usb_read_work_fn(struct work_struct *work)
atomic_set(&ch->read_pending, 1);
req->buf = ch->read_buf;
req->length = USB_MAX_OUT_BUF;
spin_unlock_irqrestore(&ch->lock, flags);
err = usb_diag_read(ch->hdl, req);
spin_lock_irqsave(&ch->lock, flags);
if (err) {
pr_debug("diag: In %s, error in reading from USB %s, err: %d\n",
__func__, ch->name, err);

View file

@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2019, 2021, The Linux Foundation. All rights reserved.
*/
#include <linux/slab.h>
#include <linux/err.h>
@ -1345,11 +1345,26 @@ int diagfwd_channel_open(struct diagfwd_info *fwd_info)
int diagfwd_channel_close(struct diagfwd_info *fwd_info)
{
struct diag_rpmsg_info *rpmsg_info = NULL;
struct diag_socket_info *socket_info = NULL;
if (!fwd_info)
return -EIO;
mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
fwd_info->ch_open = 0;
rpmsg_info = diag_get_rpmsg_info_ptr(fwd_info->type,
fwd_info->peripheral);
socket_info = diag_get_socket_info_ptr(fwd_info->type,
fwd_info->peripheral);
if (rpmsg_info && socket_info && rpmsg_info->probed
&& socket_info->reset_flag) {
mutex_unlock(
&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
return 0;
}
if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
fwd_info->c_ops->close(fwd_info);
@ -1440,6 +1455,7 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int buf_num)
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
fwd_info->peripheral, fwd_info->type, buf_num);
rpmsg_mark_buffers_free(peripheral, type, buf_num);
}
} else if (buf_num == 2 && fwd_info->buf_2) {
/*
@ -1466,6 +1482,7 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int buf_num)
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
fwd_info->peripheral, fwd_info->type, buf_num);
rpmsg_mark_buffers_free(peripheral, type, buf_num);
}
} else if (buf_num >= 3 && (buf_num % 2)) {
/*
@ -1501,6 +1518,7 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int buf_num)
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
fwd_info->peripheral, fwd_info->type, buf_num);
rpmsg_mark_buffers_free(peripheral, type, 1);
}
} else if (buf_num >= 4 && !(buf_num % 2)) {
/*
@ -1536,7 +1554,8 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int buf_num)
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
fwd_info->peripheral, fwd_info->type, buf_num);
}
rpmsg_mark_buffers_free(peripheral, type, 2);
}
} else
pr_err("diag: In %s, invalid buf_num %d\n", __func__, buf_num);

View file

@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2017-2019, 2021, The Linux Foundation. All rights reserved.
*/
#include <linux/slab.h>
@ -24,11 +24,22 @@
#define PERI_RPMSG rpmsg_info->peripheral
struct diag_rpmsg_read_work {
struct diag_rpmsg_info *rpmsg_info;
const void *ptr_read_done;
const void *ptr_rx_done;
size_t ptr_read_size;
struct work_struct work;
struct list_head rx_list_head;
spinlock_t rx_lock;
};
static struct diag_rpmsg_read_work *read_work_struct;
/**
** struct rx_buff_list - holds rx rpmsg data, before it will be consumed
** by diagfwd_channel_read_done worker, item per rx packet
**/
struct rx_buff_list {
struct list_head list;
void *rpmsg_rx_buf;
int rx_buf_size;
struct diag_rpmsg_info *rpmsg_info;
};
struct diag_rpmsg_info rpmsg_data[NUM_PERIPHERALS] = {
@ -36,7 +47,7 @@ struct diag_rpmsg_info rpmsg_data[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_MODEM,
.type = TYPE_DATA,
.edge = "mpss",
.name = "DIAG_DATA",
.name = "DIAG",
.buf1 = NULL,
.buf2 = NULL,
.hdl = NULL
@ -45,7 +56,7 @@ struct diag_rpmsg_info rpmsg_data[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_LPASS,
.type = TYPE_DATA,
.edge = "lpass",
.name = "DIAG_DATA",
.name = "DIAG",
.buf1 = NULL,
.buf2 = NULL,
.hdl = NULL
@ -54,7 +65,7 @@ struct diag_rpmsg_info rpmsg_data[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_WCNSS,
.type = TYPE_DATA,
.edge = "wcnss",
.name = "DIAG_DATA",
.name = "APPS_RIVA_DATA",
.buf1 = NULL,
.buf2 = NULL,
.hdl = NULL
@ -102,7 +113,7 @@ struct diag_rpmsg_info rpmsg_cntl[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_MODEM,
.type = TYPE_CNTL,
.edge = "mpss",
.name = "DIAG_CTRL",
.name = "DIAG_CNTL",
.buf1 = NULL,
.buf2 = NULL,
.hdl = NULL
@ -111,7 +122,7 @@ struct diag_rpmsg_info rpmsg_cntl[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_LPASS,
.type = TYPE_CNTL,
.edge = "lpass",
.name = "DIAG_CTRL",
.name = "DIAG_CNTL",
.buf1 = NULL,
.buf2 = NULL,
.hdl = NULL
@ -120,7 +131,7 @@ struct diag_rpmsg_info rpmsg_cntl[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_WCNSS,
.type = TYPE_CNTL,
.edge = "wcnss",
.name = "DIAG_CTRL",
.name = "APPS_RIVA_CTRL",
.buf1 = NULL,
.buf2 = NULL,
.hdl = NULL
@ -168,7 +179,7 @@ struct diag_rpmsg_info rpmsg_dci[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_MODEM,
.type = TYPE_DCI,
.edge = "mpss",
.name = "DIAG_DCI_DATA",
.name = "DIAG_2",
.buf1 = NULL,
.buf2 = NULL,
.hdl = NULL
@ -300,7 +311,7 @@ struct diag_rpmsg_info rpmsg_dci_cmd[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_MODEM,
.type = TYPE_DCI_CMD,
.edge = "mpss",
.name = "DIAG_DCI_CMD",
.name = "DIAG_2_CMD",
.buf1 = NULL,
.buf2 = NULL,
.hdl = NULL
@ -463,7 +474,7 @@ static int diag_rpmsg_read(void *ctxt, unsigned char *buf, int buf_len)
rpmsg_info->buf2 = buf;
}
mutex_unlock(&driver->diagfwd_channel_mutex[rpmsg_info->peripheral]);
queue_work(rpmsg_info->wq, &read_work_struct->work);
return ret_val;
}
@ -488,14 +499,13 @@ static void diag_rpmsg_read_work_fn(struct work_struct *work)
return;
}
mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
diagfwd_channel_read(rpmsg_info->fwd_ctxt);
}
static int diag_rpmsg_write(void *ctxt, unsigned char *buf, int len)
{
struct diag_rpmsg_info *rpmsg_info = NULL;
int err = 0;
struct diag_rpmsg_info *rpmsg_info = NULL;
struct rpmsg_device *rpdev = NULL;
if (!ctxt || !buf)
@ -518,6 +528,7 @@ static int diag_rpmsg_write(void *ctxt, unsigned char *buf, int len)
}
rpdev = (struct rpmsg_device *)rpmsg_info->hdl;
err = rpmsg_send(rpdev->ept, buf, len);
if (!err) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s wrote to rpmsg, len: %d\n",
@ -599,85 +610,178 @@ static int diag_rpmsg_notify_cb(struct rpmsg_device *rpdev, void *data, int len,
void *priv, u32 src)
{
struct diag_rpmsg_info *rpmsg_info = NULL;
struct diagfwd_info *fwd_info = NULL;
struct diag_rpmsg_read_work *read_work = NULL;
void *buf = NULL;
struct rx_buff_list *rx_item;
unsigned long flags;
if (!rpdev || !data)
return -EINVAL;
rpmsg_info = dev_get_drvdata(&rpdev->dev);
if (!rpmsg_info || !rpmsg_info->fwd_ctxt) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "diag: Invalid rpmsg info\n");
return 0;
return -EINVAL;
}
if (!rpmsg_info->buf1 && !rpmsg_info->buf2) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"dropping data for %s len %d\n",
rpmsg_info->name, len);
return 0;
rx_item = kzalloc(sizeof(*rx_item), GFP_ATOMIC);
if (!rx_item)
return -ENOMEM;
rx_item->rpmsg_rx_buf = kmemdup(data, len, GFP_ATOMIC);
if (!rx_item->rpmsg_rx_buf) {
kfree(rx_item);
return -ENOMEM;
}
fwd_info = rpmsg_info->fwd_ctxt;
rx_item->rx_buf_size = len;
rx_item->rpmsg_info = rpmsg_info;
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"diag: received data of length: %d for p:%d, t:%d\n",
len, rpmsg_info->peripheral, rpmsg_info->type);
spin_lock_irqsave(&read_work_struct->rx_lock, flags);
list_add(&rx_item->list, &read_work_struct->rx_list_head);
spin_unlock_irqrestore(&read_work_struct->rx_lock, flags);
if (rpmsg_info->buf1 && !fwd_info->buffer_status[BUF_1_INDEX] &&
atomic_read(&fwd_info->buf_1->in_busy)) {
buf = rpmsg_info->buf1;
fwd_info->buffer_status[BUF_1_INDEX] = 1;
} else if (rpmsg_info->buf2 && !fwd_info->buffer_status[BUF_2_INDEX] &&
atomic_read(&fwd_info->buf_2->in_busy) &&
(fwd_info->type == TYPE_DATA)) {
buf = rpmsg_info->buf2;
fwd_info->buffer_status[BUF_2_INDEX] = 1;
} else {
buf = NULL;
}
if (!buf)
return 0;
memcpy(buf, data, len);
read_work = kmalloc(sizeof(*read_work), GFP_ATOMIC);
if (!read_work) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"diag: Could not allocate read_work\n");
return 0;
}
read_work->rpmsg_info = rpmsg_info;
read_work->ptr_read_done = buf;
read_work->ptr_read_size = len;
INIT_WORK(&read_work->work, diag_rpmsg_notify_rx_work_fn);
queue_work(rpmsg_info->wq, &read_work->work);
queue_work(rpmsg_info->wq, &read_work_struct->work);
return 0;
}
static void diag_rpmsg_notify_rx_work_fn(struct work_struct *work)
{
struct diag_rpmsg_read_work *read_work = container_of(work,
struct diag_rpmsg_read_work, work);
struct diag_rpmsg_info *rpmsg_info = read_work->rpmsg_info;
struct diag_rpmsg_info *rpmsg_info;
struct rx_buff_list *rx_item;
struct diagfwd_info *fwd_info;
void *buf = NULL;
unsigned long flags;
int err_flag = 0;
if (!rpmsg_info || !rpmsg_info->hdl) {
kfree(read_work);
read_work = NULL;
spin_lock_irqsave(&read_work_struct->rx_lock, flags);
if (!list_empty(&read_work_struct->rx_list_head)) {
/* detach last entry */
rx_item = list_last_entry(&read_work_struct->rx_list_head,
struct rx_buff_list, list);
if (!rx_item) {
err_flag = 1;
goto err_handling;
}
rpmsg_info = rx_item->rpmsg_info;
if (!rpmsg_info) {
err_flag = 1;
goto err_handling;
}
fwd_info = rpmsg_info->fwd_ctxt;
if (!fwd_info) {
err_flag = 1;
goto err_handling;
}
if (!rpmsg_info->buf1 && !rpmsg_info->buf2) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"retry data send for %s len %d\n",
rpmsg_info->name, rx_item->rx_buf_size);
err_flag = 1;
goto err_handling;
}
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"diag: received data of length: %d, p: %d, t: %d\n",
rx_item->rx_buf_size, rpmsg_info->peripheral,
rpmsg_info->type);
if (rpmsg_info->buf1 && !fwd_info->buffer_status[BUF_1_INDEX] &&
atomic_read(&(fwd_info->buf_1->in_busy))) {
buf = rpmsg_info->buf1;
fwd_info->buffer_status[BUF_1_INDEX] = 1;
} else if (rpmsg_info->buf2 &&
!fwd_info->buffer_status[BUF_2_INDEX] &&
atomic_read(&fwd_info->buf_2->in_busy) &&
(fwd_info->type == TYPE_DATA)) {
buf = rpmsg_info->buf2;
fwd_info->buffer_status[BUF_2_INDEX] = 1;
} else {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"Both the rpmsg buffers are busy\n");
buf = NULL;
}
if (!buf) {
err_flag = 1;
goto err_handling;
}
err_handling:
if (!err_flag) {
memcpy(buf, rx_item->rpmsg_rx_buf,
rx_item->rx_buf_size);
list_del(&rx_item->list);
spin_unlock_irqrestore(&read_work_struct->rx_lock,
flags);
} else {
spin_unlock_irqrestore(&read_work_struct->rx_lock,
flags);
goto end;
}
mutex_lock(&driver->diagfwd_channel_mutex[PERI_RPMSG]);
diagfwd_channel_read_done(rpmsg_info->fwd_ctxt,
(unsigned char *)(buf), rx_item->rx_buf_size);
mutex_unlock(&driver->diagfwd_channel_mutex[PERI_RPMSG]);
kfree(rx_item->rpmsg_rx_buf);
kfree(rx_item);
} else {
spin_unlock_irqrestore(&read_work_struct->rx_lock, flags);
}
end:
return;
}
struct diag_rpmsg_info *diag_get_rpmsg_info_ptr(int type, int peripheral)
{
if (type == TYPE_CMD)
return &rpmsg_cmd[peripheral];
else if (type == TYPE_CNTL)
return &rpmsg_cntl[peripheral];
else if (type == TYPE_DATA)
return &rpmsg_data[peripheral];
else if (type == TYPE_DCI_CMD)
return &rpmsg_dci_cmd[peripheral];
else if (type == TYPE_DCI)
return &rpmsg_dci[peripheral];
else
return NULL;
}
void rpmsg_mark_buffers_free(uint8_t peripheral, uint8_t type, int buf_num)
{
struct diag_rpmsg_info *rpmsg_info;
switch (peripheral) {
case PERIPHERAL_WDSP:
break;
case PERIPHERAL_WCNSS:
break;
case PERIPHERAL_MODEM:
break;
case PERIPHERAL_LPASS:
break;
default:
return;
}
mutex_lock(&driver->diagfwd_channel_mutex[rpmsg_info->peripheral]);
diagfwd_channel_read_done(rpmsg_info->fwd_ctxt,
(unsigned char *)(read_work->ptr_read_done),
read_work->ptr_read_size);
rpmsg_info = diag_get_rpmsg_info_ptr(type, peripheral);
if (!rpmsg_info)
return;
if (read_work->ptr_read_done == rpmsg_info->buf1)
if (buf_num == 1) {
rpmsg_info->buf1 = NULL;
else if (read_work->ptr_read_done == rpmsg_info->buf2)
DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "marked buf1 NULL");
} else if (buf_num == 2) {
rpmsg_info->buf2 = NULL;
kfree(read_work);
read_work = NULL;
mutex_unlock(&driver->diagfwd_channel_mutex[rpmsg_info->peripheral]);
DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "marked buf2 NULL");
}
}
static void rpmsg_late_init(struct diag_rpmsg_info *rpmsg_info)
@ -745,6 +849,7 @@ static void __diag_rpmsg_init(struct diag_rpmsg_info *rpmsg_info)
mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
rpmsg_info->hdl = NULL;
rpmsg_info->fwd_ctxt = NULL;
rpmsg_info->probed = 0;
atomic_set(&rpmsg_info->opened, 0);
atomic_set(&rpmsg_info->diag_state, 0);
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@ -771,8 +876,19 @@ int diag_rpmsg_init(void)
struct diag_rpmsg_info *rpmsg_info = NULL;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
if (peripheral != PERIPHERAL_WDSP)
switch (peripheral) {
case PERIPHERAL_WDSP:
break;
case PERIPHERAL_WCNSS:
break;
case PERIPHERAL_MODEM:
break;
case PERIPHERAL_LPASS:
break;
default:
continue;
}
rpmsg_info = &rpmsg_cntl[peripheral];
__diag_rpmsg_init(rpmsg_info);
diagfwd_cntl_register(TRANSPORT_RPMSG, rpmsg_info->peripheral,
@ -788,6 +904,18 @@ int diag_rpmsg_init(void)
__diag_rpmsg_init(&rpmsg_dci[peripheral]);
__diag_rpmsg_init(&rpmsg_dci_cmd[peripheral]);
}
read_work_struct = kmalloc(sizeof(*read_work_struct), GFP_ATOMIC);
if (!read_work_struct) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"diag: Could not allocate read_work\n");
return 0;
}
kmemleak_not_leak(read_work_struct);
INIT_WORK(&read_work_struct->work, diag_rpmsg_notify_rx_work_fn);
INIT_LIST_HEAD(&read_work_struct->rx_list_head);
spin_lock_init(&read_work_struct->rx_lock);
return 0;
}
@ -815,8 +943,19 @@ void diag_rpmsg_early_exit(void)
int peripheral = 0;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
if (peripheral != PERIPHERAL_WDSP)
switch (peripheral) {
case PERIPHERAL_WDSP:
break;
case PERIPHERAL_WCNSS:
break;
case PERIPHERAL_MODEM:
break;
case PERIPHERAL_LPASS:
break;
default:
continue;
}
mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
__diag_rpmsg_exit(&rpmsg_cntl[peripheral]);
mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
@ -837,72 +976,124 @@ void diag_rpmsg_exit(void)
}
}
static struct diag_rpmsg_info *diag_get_rpmsg_ptr(char *name)
static struct diag_rpmsg_info *diag_get_rpmsg_ptr(char *name, int pid)
{
if (!name)
return NULL;
if (!strcmp(name, "DIAG_CMD"))
return &rpmsg_cmd[PERIPHERAL_WDSP];
else if (!strcmp(name, "DIAG_CTRL"))
return &rpmsg_cntl[PERIPHERAL_WDSP];
else if (!strcmp(name, "DIAG_DATA"))
return &rpmsg_data[PERIPHERAL_WDSP];
else if (!strcmp(name, "DIAG_DCI_CMD"))
return &rpmsg_dci_cmd[PERIPHERAL_WDSP];
else if (!strcmp(name, "DIAG_DCI_DATA"))
return &rpmsg_dci[PERIPHERAL_WDSP];
else
return NULL;
if (pid == PERIPHERAL_WDSP) {
if (!strcmp(name, "DIAG_CMD"))
return &rpmsg_cmd[PERIPHERAL_WDSP];
else if (!strcmp(name, "DIAG_CTRL"))
return &rpmsg_cntl[PERIPHERAL_WDSP];
else if (!strcmp(name, "DIAG_DATA"))
return &rpmsg_data[PERIPHERAL_WDSP];
else if (!strcmp(name, "DIAG_DCI_CMD"))
return &rpmsg_dci_cmd[PERIPHERAL_WDSP];
else if (!strcmp(name, "DIAG_DCI_DATA"))
return &rpmsg_dci[PERIPHERAL_WDSP];
else
return NULL;
} else if (pid == PERIPHERAL_WCNSS) {
if (!strcmp(name, "APPS_RIVA_DATA"))
return &rpmsg_data[PERIPHERAL_WCNSS];
else if (!strcmp(name, "APPS_RIVA_CTRL"))
return &rpmsg_cntl[PERIPHERAL_WCNSS];
else
return NULL;
} else if (pid == PERIPHERAL_MODEM) {
if (!strcmp(name, "DIAG_CMD"))
return &rpmsg_cmd[PERIPHERAL_MODEM];
else if (!strcmp(name, "DIAG_CNTL"))
return &rpmsg_cntl[PERIPHERAL_MODEM];
else if (!strcmp(name, "DIAG"))
return &rpmsg_data[PERIPHERAL_MODEM];
else if (!strcmp(name, "DIAG_2_CMD"))
return &rpmsg_dci_cmd[PERIPHERAL_MODEM];
else if (!strcmp(name, "DIAG_2"))
return &rpmsg_dci[PERIPHERAL_MODEM];
else
return NULL;
} else if (pid == PERIPHERAL_LPASS) {
if (!strcmp(name, "DIAG"))
return &rpmsg_data[PERIPHERAL_LPASS];
else if (!strcmp(name, "DIAG_CNTL"))
return &rpmsg_cntl[PERIPHERAL_LPASS];
else
return NULL;
}
return NULL;
}
static int diag_rpmsg_probe(struct rpmsg_device *rpdev)
{
struct diag_rpmsg_info *rpmsg_info = NULL;
int peripheral = -1;
if (!rpdev)
return 0;
if (strcmp(rpdev->dev.parent->of_node->name, "wdsp"))
return 0;
rpmsg_info = diag_get_rpmsg_ptr(rpdev->id.name);
if (!strcmp(rpdev->dev.parent->of_node->name, "wdsp"))
peripheral = PERIPHERAL_WDSP;
else if (!strcmp(rpdev->dev.parent->of_node->name, "wcnss"))
peripheral = PERIPHERAL_WCNSS;
else if (!strcmp(rpdev->dev.parent->of_node->name, "modem"))
peripheral = PERIPHERAL_MODEM;
else if (!strcmp(rpdev->dev.parent->of_node->name, "adsp"))
peripheral = PERIPHERAL_LPASS;
rpmsg_info = diag_get_rpmsg_ptr(rpdev->id.name, peripheral);
if (rpmsg_info) {
mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
rpmsg_info->hdl = rpdev;
atomic_set(&rpmsg_info->opened, 1);
mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
rpmsg_info->probed = 1;
dev_set_drvdata(&rpdev->dev, rpmsg_info);
diagfwd_channel_read(rpmsg_info->fwd_ctxt);
queue_work(rpmsg_info->wq, &rpmsg_info->open_work);
}
return 0;
}
static void diag_rpmsg_remove(struct rpmsg_device *rpdev)
{
struct diag_rpmsg_info *rpmsg_info = NULL;
int peripheral = -1;
if (!rpdev)
return;
rpmsg_info = diag_get_rpmsg_ptr(rpdev->id.name);
if (!strcmp(rpdev->dev.parent->of_node->name, "wdsp"))
peripheral = PERIPHERAL_WDSP;
else if (!strcmp(rpdev->dev.parent->of_node->name, "wcnss"))
peripheral = PERIPHERAL_WCNSS;
else if (!strcmp(rpdev->dev.parent->of_node->name, "modem"))
peripheral = PERIPHERAL_MODEM;
else if (!strcmp(rpdev->dev.parent->of_node->name, "adsp"))
peripheral = PERIPHERAL_LPASS;
rpmsg_info = diag_get_rpmsg_ptr(rpdev->id.name, peripheral);
if (rpmsg_info) {
mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
atomic_set(&rpmsg_info->opened, 0);
mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
rpmsg_info->probed = 0;
queue_work(rpmsg_info->wq, &rpmsg_info->close_work);
}
}
static struct rpmsg_device_id rpmsg_diag_table[] = {
{ .name = "DIAG_CMD" },
{ .name = "DIAG_CTRL" },
{ .name = "DIAG_DATA" },
{ .name = "DIAG_DCI_CMD" },
{ .name = "DIAG_DCI_DATA" },
{ .name = "APPS_RIVA_DATA" },
{ .name = "APPS_RIVA_CTRL" },
{ .name = "DIAG" },
{ .name = "DIAG_CNTL" },
{ .name = "DIAG_2" },
{ .name = "DIAG_2_CMD" },
{ .name = "DIAG_CMD" },
{ .name = "DIAG_CTRL" },
{ .name = "DIAG_DATA" },
{ .name = "DIAG_DCI_CMD" },
{ .name = "DIAG_DCI_DATA" },
{ },
};
MODULE_DEVICE_TABLE(rpmsg, rpmsg_diag_table);

View file

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2017-2018, 2021, The Linux Foundation. All rights reserved.
*/
#ifndef DIAGFWD_RPMSG_H
@ -12,6 +12,7 @@ struct diag_rpmsg_info {
uint8_t peripheral;
uint8_t type;
uint8_t inited;
uint8_t probed;
atomic_t opened;
atomic_t diag_state;
uint32_t fifo_size;
@ -43,5 +44,7 @@ int diag_rpmsg_init(void);
void diag_rpmsg_early_exit(void);
void diag_rpmsg_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt);
int diag_rpmsg_check_state(void *ctxt);
void rpmsg_mark_buffers_free(uint8_t peripheral, uint8_t type, int buf_num);
struct diag_rpmsg_info *diag_get_rpmsg_info_ptr(int type, int peripheral);
#endif

View file

@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/slab.h>
@ -244,6 +244,22 @@ struct diag_socket_info socket_dci_cmd[NUM_PERIPHERALS] = {
}
};
struct diag_socket_info *diag_get_socket_info_ptr(int type, int peripheral)
{
if (type == TYPE_CMD)
return &socket_cmd[peripheral];
else if (type == TYPE_CNTL)
return &socket_cntl[peripheral];
else if (type == TYPE_DATA)
return &socket_data[peripheral];
else if (type == TYPE_DCI_CMD)
return &socket_dci_cmd[peripheral];
else if (type == TYPE_DCI)
return &socket_dci[peripheral];
else
return NULL;
}
struct restart_notifier_block {
unsigned int processor;
char *name;
@ -611,7 +627,9 @@ static void socket_read_work_fn(struct work_struct *work)
err = sock_error(info->hdl->sk);
mutex_unlock(&info->socket_info_mutex);
if (unlikely(err == -ENETRESET)) {
info->reset_flag = 1;
socket_close_channel(info);
info->reset_flag = 0;
if (info->port_type == PORT_TYPE_SERVER)
socket_init_work_fn(&info->init_work);
diag_ws_release();
@ -831,7 +849,9 @@ static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len)
mutex_lock(channel_mutex);
diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
mutex_unlock(channel_mutex);
info->reset_flag = 1;
socket_close_channel(info);
info->reset_flag = 0;
if (info->port_type == PORT_TYPE_SERVER)
socket_init_work_fn(&info->init_work);
return read_len;
@ -980,6 +1000,7 @@ static void __diag_socket_init(struct diag_socket_info *info)
info->hdl = NULL;
info->fwd_ctxt = NULL;
info->data_ready = 0;
info->reset_flag = 0;
atomic_set(&info->flow_cnt, 0);
spin_lock_init(&info->lock);
strlcpy(wq_name, info->name, sizeof(wq_name));

View file

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
#ifndef DIAGFWD_SOCKET_H
@ -33,6 +33,7 @@ struct diag_socket_info {
uint8_t type;
uint8_t port_type;
uint8_t inited;
uint8_t reset_flag;
atomic_t opened;
atomic_t diag_state;
uint32_t pkt_len;
@ -68,4 +69,5 @@ int diag_socket_check_state(void *ctxt);
int diag_socket_init(void);
void diag_socket_exit(void);
int diag_socket_init_peripheral(uint8_t peripheral);
struct diag_socket_info *diag_get_socket_info_ptr(int type, int peripheral);
#endif

View file

@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2008-2014, 2016-2019 The Linux Foundation. All rights reserved.
/* Copyright (c) 2008-2014, 2016-2019, 2021 The Linux Foundation. All rights reserved.
*/
#include <linux/init.h>
@ -143,6 +143,9 @@ void diagmem_setsize(int pool_idx, int itemsize, int poolsize)
}
diag_mempools[pool_idx].itemsize = itemsize;
if (diag_mempools[pool_idx].pool)
diag_mempools[pool_idx].pool->pool_data =
(void *)(uintptr_t)itemsize;
diag_mempools[pool_idx].poolsize = poolsize;
pr_debug("diag: Mempool %s sizes: itemsize %d poolsize %d\n",
diag_mempools[pool_idx].name, diag_mempools[pool_idx].itemsize,
@ -168,7 +171,8 @@ void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type)
mempool->name);
break;
}
if (size == 0 || size > mempool->itemsize) {
if (size == 0 || size > mempool->itemsize ||
size > (int)mempool->pool->pool_data) {
pr_err_ratelimited("diag: cannot alloc from mempool %s, invalid size: %d\n",
mempool->name, size);
break;

977
drivers/char/msm_smd_pkt.c Normal file
View file

@ -0,0 +1,977 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/platform_device.h>
#include <linux/ipc_logging.h>
#include <linux/refcount.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/rpmsg.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/idr.h>
#include <linux/of.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/termios.h>
#include <linux/msm_smd_pkt.h>
#define MODULE_NAME "msm_smdpkt"
#define DEVICE_NAME "smdpkt"
#define SMD_PKT_IPC_LOG_PAGE_CNT 2
/**
* struct smd_pkt - driver context, relates rpdev to cdev
* @dev: smd pkt device
* @cdev: cdev for the smd pkt device
* @drv: rpmsg driver for registering to rpmsg bus
* @lock: synchronization of @rpdev and @open_tout modifications
* @ch_open: wait object for opening the smd channel
* @refcount: count how many userspace clients have handles
* @rpdev: underlaying rpmsg device
* @queue_lock: synchronization of @queue operations
* @queue: incoming message queue
* @readq: wait object for incoming queue
* @sig_change: flag to indicate serial signal change
* @notify_state_update: notify channel state
* @fragmented_read: set from dt node for partial read
* @dev_name: /dev/@dev_name for smd_pkt device
* @ch_name: smd channel to match to
* @edge: smd edge to match to
* @open_tout: timeout for open syscall, configurable in sysfs
* @rskb: current skb being read
* @rdata: data pointer in current skb
* @rdata_len: remaining data to be read from skb
*/
struct smd_pkt_dev {
struct device dev;
struct cdev cdev;
struct rpmsg_driver drv;
struct mutex lock;
struct completion ch_open;
refcount_t refcount;
struct rpmsg_device *rpdev;
spinlock_t queue_lock;
struct sk_buff_head queue;
wait_queue_head_t readq;
int sig_change;
bool notify_state_update;
bool fragmented_read;
const char *dev_name;
const char *ch_name;
const char *edge;
int open_tout;
struct sk_buff *rskb;
unsigned char *rdata;
size_t rdata_len;
};
#define dev_to_smd_pkt_devp(_dev) container_of(_dev, struct smd_pkt_dev, dev)
#define cdev_to_smd_pkt_devp(_cdev) container_of(_cdev, \
struct smd_pkt_dev, cdev)
#define drv_to_rpdrv(_drv) container_of(_drv, struct rpmsg_driver, drv)
#define rpdrv_to_smd_pkt_devp(_rdrv) container_of(_rdrv, \
struct smd_pkt_dev, drv)
static void *smd_pkt_ilctxt;
static int smd_pkt_debug_mask;
module_param_named(debug_mask, smd_pkt_debug_mask, int, 0664);
enum {
SMD_PKT_INFO = 1U << 0,
};
#define SMD_PKT_INFO(x, ...) \
do { \
if (smd_pkt_debug_mask & SMD_PKT_INFO) { \
ipc_log_string(smd_pkt_ilctxt, \
"[%s]: "x, __func__, ##__VA_ARGS__); \
} \
} while (0)
#define SMD_PKT_ERR(x, ...) \
do { \
pr_err_ratelimited("[%s]: "x, __func__, ##__VA_ARGS__); \
ipc_log_string(smd_pkt_ilctxt, "[%s]: "x, __func__, ##__VA_ARGS__); \
} while (0)
#define SMD_PKT_IOCTL_QUEUE_RX_INTENT \
_IOW(SMD_PKT_IOCTL_MAGIC, 0, unsigned int)
static dev_t smd_pkt_major;
static struct class *smd_pkt_class;
static int num_smd_pkt_devs;
static DEFINE_IDA(smd_pkt_minor_ida);
static ssize_t open_timeout_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t n)
{
struct smd_pkt_dev *smd_pkt_devp = dev_to_smd_pkt_devp(dev);
long tmp;
mutex_lock(&smd_pkt_devp->lock);
if (kstrtol(buf, 0, &tmp)) {
mutex_unlock(&smd_pkt_devp->lock);
SMD_PKT_ERR("unable to convert:%s to an int for /dev/%s\n",
buf, smd_pkt_devp->dev_name);
return -EINVAL;
}
smd_pkt_devp->open_tout = tmp;
mutex_unlock(&smd_pkt_devp->lock);
return n;
}
static ssize_t open_timeout_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct smd_pkt_dev *smd_pkt_devp = dev_to_smd_pkt_devp(dev);
ssize_t ret;
mutex_lock(&smd_pkt_devp->lock);
ret = scnprintf(buf, PAGE_SIZE, "%d\n", smd_pkt_devp->open_tout);
mutex_unlock(&smd_pkt_devp->lock);
return ret;
}
static DEVICE_ATTR(open_timeout, 0664, open_timeout_show, open_timeout_store);
static int smd_pkt_rpdev_probe(struct rpmsg_device *rpdev)
{
struct device_driver *drv = rpdev->dev.driver;
struct rpmsg_driver *rpdrv = drv_to_rpdrv(drv);
struct smd_pkt_dev *smd_pkt_devp = rpdrv_to_smd_pkt_devp(rpdrv);
mutex_lock(&smd_pkt_devp->lock);
smd_pkt_devp->rpdev = rpdev;
smd_pkt_devp->notify_state_update = true;
mutex_unlock(&smd_pkt_devp->lock);
dev_set_drvdata(&rpdev->dev, smd_pkt_devp);
complete_all(&smd_pkt_devp->ch_open);
return 0;
}
static int smd_pkt_rpdev_cb(struct rpmsg_device *rpdev, void *buf, int len,
void *priv, u32 addr)
{
struct smd_pkt_dev *smd_pkt_devp = dev_get_drvdata(&rpdev->dev);
unsigned long flags;
struct sk_buff *skb;
if (!smd_pkt_devp)
return -EINVAL;
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
skb_put_data(skb, buf, len);
spin_lock_irqsave(&smd_pkt_devp->queue_lock, flags);
skb_queue_tail(&smd_pkt_devp->queue, skb);
spin_unlock_irqrestore(&smd_pkt_devp->queue_lock, flags);
/* wake up any blocking processes, waiting for new data */
wake_up_interruptible(&smd_pkt_devp->readq);
return 0;
}
static int smd_pkt_rpdev_sigs(struct rpmsg_device *rpdev, u32 old, u32 new)
{
struct device_driver *drv = rpdev->dev.driver;
struct rpmsg_driver *rpdrv = drv_to_rpdrv(drv);
struct smd_pkt_dev *smd_pkt_devp = rpdrv_to_smd_pkt_devp(rpdrv);
unsigned long flags;
spin_lock_irqsave(&smd_pkt_devp->queue_lock, flags);
smd_pkt_devp->sig_change = true;
spin_unlock_irqrestore(&smd_pkt_devp->queue_lock, flags);
/* wake up any blocking processes, waiting for new data */
wake_up_interruptible(&smd_pkt_devp->readq);
return 0;
}
/**
* smd_pkt_tiocmset() - set the signals for smd_pkt device
* smd_pkt_devp: Pointer to the smd_pkt device structure.
* cmd: IOCTL command.
* arg: Arguments to the ioctl call.
*
* This function is used to set the signals on the smd pkt device
* when userspace client do a ioctl() system call with TIOCMBIS,
* TIOCMBIC and TICOMSET.
*/
static int smd_pkt_tiocmset(struct smd_pkt_dev *smd_pkt_devp, unsigned int cmd,
unsigned long arg)
{
u32 lsigs, rsigs, val;
int ret;
ret = get_user(val, (u32 *)arg);
if (ret)
return ret;
ret = rpmsg_get_sigs(smd_pkt_devp->rpdev->ept, &lsigs, &rsigs);
if (ret < 0) {
SMD_PKT_ERR("Get signals failed[%d]\n", ret);
return ret;
}
switch (cmd) {
case TIOCMBIS:
lsigs |= val;
break;
case TIOCMBIC:
lsigs &= ~val;
break;
case TIOCMSET:
lsigs = val;
break;
}
return rpmsg_set_sigs(smd_pkt_devp->rpdev->ept, lsigs);
SMD_PKT_INFO("sigs[0x%x] ret[%d]\n", lsigs, ret);
return ret;
}
/**
* smd_pkt_ioctl() - ioctl() syscall for the smd_pkt device
* file: Pointer to the file structure.
* cmd: IOCTL command.
* arg: Arguments to the ioctl call.
*
* This function is used to ioctl on the smd pkt device when
* userspace client do a ioctl() system call. All input arguments are
* validated by the virtual file system before calling this function.
*/
static long smd_pkt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct smd_pkt_dev *smd_pkt_devp;
unsigned long flags;
u32 lsigs, rsigs, resetsigs;
int ret;
smd_pkt_devp = file->private_data;
if (!smd_pkt_devp || refcount_read(&smd_pkt_devp->refcount) == 1) {
SMD_PKT_ERR("invalid device handle\n");
return -EINVAL;
}
if (mutex_lock_interruptible(&smd_pkt_devp->lock))
return -ERESTARTSYS;
if (!completion_done(&smd_pkt_devp->ch_open)) {
SMD_PKT_ERR("%s channel in reset\n", smd_pkt_devp->ch_name);
if ((cmd == TIOCMGET) && (smd_pkt_devp->notify_state_update)) {
resetsigs = TIOCM_OUT1 | TIOCM_OUT2;
smd_pkt_devp->notify_state_update = false;
mutex_unlock(&smd_pkt_devp->lock);
SMD_PKT_ERR("%s: reset notified resetsigs=%d\n",
smd_pkt_devp->ch_name, resetsigs);
ret = put_user(resetsigs, (uint32_t __user *)arg);
return ret;
}
mutex_unlock(&smd_pkt_devp->lock);
return -ENETRESET;
}
switch (cmd) {
case TIOCMGET:
resetsigs = 0;
spin_lock_irqsave(&smd_pkt_devp->queue_lock, flags);
smd_pkt_devp->sig_change = false;
if (smd_pkt_devp->notify_state_update) {
resetsigs = TIOCM_OUT2;
smd_pkt_devp->notify_state_update = false;
SMD_PKT_ERR("%s: reset notified resetsigs=%d\n",
smd_pkt_devp->ch_name, resetsigs);
}
spin_unlock_irqrestore(&smd_pkt_devp->queue_lock, flags);
ret = rpmsg_get_sigs(smd_pkt_devp->rpdev->ept, &lsigs, &rsigs);
if (!ret)
ret = put_user(rsigs | resetsigs,
(uint32_t __user *)arg);
break;
case TIOCMSET:
case TIOCMBIS:
case TIOCMBIC:
ret = smd_pkt_tiocmset(smd_pkt_devp, cmd, arg);
break;
case SMD_PKT_IOCTL_QUEUE_RX_INTENT:
/* Return success to not break userspace client logic */
ret = 0;
break;
default:
SMD_PKT_ERR("unrecognized ioctl command 0x%x\n", cmd);
ret = -ENOIOCTLCMD;
break;
}
mutex_unlock(&smd_pkt_devp->lock);
return ret;
}
/**
* smd_pkt_read() - read() syscall for the smd_pkt device
* file: Pointer to the file structure.
* buf: Pointer to the userspace buffer.
* count: Number bytes to read from the file.
* ppos: Pointer to the position into the file.
*
* This function is used to Read the data from smd pkt device when
* userspace client do a read() system call. All input arguments are
* validated by the virtual file system before calling this function.
*/
ssize_t smd_pkt_read(struct file *file,
char __user *buf,
size_t count,
loff_t *ppos)
{
struct smd_pkt_dev *smd_pkt_devp = file->private_data;
unsigned long flags;
int use;
if (!smd_pkt_devp ||
refcount_read(&smd_pkt_devp->refcount) == 1) {
SMD_PKT_ERR("invalid device handle\n");
return -EINVAL;
}
if (!completion_done(&smd_pkt_devp->ch_open)) {
SMD_PKT_ERR("%s channel in reset\n", smd_pkt_devp->ch_name);
return -ENETRESET;
}
SMD_PKT_INFO(
"begin for %s by %s:%d ref_cnt[%d], remaining[%d], count[%d]\n",
smd_pkt_devp->ch_name, current->comm,
task_pid_nr(current),
refcount_read(&smd_pkt_devp->refcount),
smd_pkt_devp->rdata_len, count);
spin_lock_irqsave(&smd_pkt_devp->queue_lock, flags);
/* Wait for data in the queue */
if (skb_queue_empty(&smd_pkt_devp->queue) && !smd_pkt_devp->rskb) {
spin_unlock_irqrestore(&smd_pkt_devp->queue_lock, flags);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
/* Wait until we get data or the endpoint goes away */
if (wait_event_interruptible(smd_pkt_devp->readq,
!skb_queue_empty(&smd_pkt_devp->queue) ||
!completion_done(&smd_pkt_devp->ch_open)))
return -ERESTARTSYS;
spin_lock_irqsave(&smd_pkt_devp->queue_lock, flags);
}
if (!smd_pkt_devp->rskb) {
smd_pkt_devp->rskb = skb_dequeue(&smd_pkt_devp->queue);
if (!smd_pkt_devp->rskb) {
spin_unlock_irqrestore(&smd_pkt_devp->queue_lock,
flags);
return -EFAULT;
}
smd_pkt_devp->rdata = smd_pkt_devp->rskb->data;
smd_pkt_devp->rdata_len = smd_pkt_devp->rskb->len;
}
spin_unlock_irqrestore(&smd_pkt_devp->queue_lock, flags);
use = min_t(size_t, count, smd_pkt_devp->rdata_len);
if (copy_to_user(buf, smd_pkt_devp->rdata, use))
use = -EFAULT;
if (!smd_pkt_devp->fragmented_read && smd_pkt_devp->rdata_len == use) {
struct sk_buff *skb = smd_pkt_devp->rskb;
spin_lock_irqsave(&smd_pkt_devp->queue_lock, flags);
smd_pkt_devp->rskb = NULL;
smd_pkt_devp->rdata = NULL;
smd_pkt_devp->rdata_len = 0;
spin_unlock_irqrestore(&smd_pkt_devp->queue_lock, flags);
kfree_skb(skb);
} else {
struct sk_buff *skb = NULL;
spin_lock_irqsave(&smd_pkt_devp->queue_lock, flags);
smd_pkt_devp->rdata += use;
smd_pkt_devp->rdata_len -= use;
if (smd_pkt_devp->rdata_len == 0) {
skb = smd_pkt_devp->rskb;
smd_pkt_devp->rskb = NULL;
smd_pkt_devp->rdata = NULL;
smd_pkt_devp->rdata_len = 0;
}
spin_unlock_irqrestore(&smd_pkt_devp->queue_lock, flags);
if (skb)
kfree_skb(skb);
}
SMD_PKT_INFO("end for %s by %s:%d ret[%d], remaining[%d]\n",
smd_pkt_devp->ch_name, current->comm,
task_pid_nr(current), use, smd_pkt_devp->rdata_len);
return use;
}
/**
* smd_pkt_write() - write() syscall for the smd_pkt device
* file: Pointer to the file structure.
* buf: Pointer to the userspace buffer.
* count: Number bytes to read from the file.
* ppos: Pointer to the position into the file.
*
* This function is used to write the data to smd pkt device when
* userspace client do a write() system call. All input arguments are
* validated by the virtual file system before calling this function.
*/
ssize_t smd_pkt_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *ppos)
{
struct smd_pkt_dev *smd_pkt_devp = file->private_data;
void *kbuf;
int ret;
smd_pkt_devp = file->private_data;
if (!smd_pkt_devp || refcount_read(&smd_pkt_devp->refcount) == 1) {
SMD_PKT_ERR("invalid device handle\n");
return -EINVAL;
}
SMD_PKT_INFO("begin to %s buffer_size %zu\n",
smd_pkt_devp->ch_name, count);
kbuf = memdup_user(buf, count);
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
if (mutex_lock_interruptible(&smd_pkt_devp->lock)) {
ret = -ERESTARTSYS;
goto free_kbuf;
}
if (!completion_done(&smd_pkt_devp->ch_open) ||
!smd_pkt_devp->rpdev) {
SMD_PKT_ERR("%s channel in reset\n", smd_pkt_devp->ch_name);
ret = -ENETRESET;
goto unlock_ch;
}
if (file->f_flags & O_NONBLOCK)
ret = rpmsg_trysend(smd_pkt_devp->rpdev->ept, kbuf, count);
else
ret = rpmsg_send(smd_pkt_devp->rpdev->ept, kbuf, count);
unlock_ch:
mutex_unlock(&smd_pkt_devp->lock);
free_kbuf:
kfree(kbuf);
SMD_PKT_INFO("finish to %s ret %d\n", smd_pkt_devp->ch_name, ret);
return ret < 0 ? ret : count;
}
/**
* smd_pkt_poll() - poll() syscall for the smd_pkt device
* file: Pointer to the file structure.
* wait: pointer to Poll table.
*
* This function is used to poll on the smd pkt device when
* userspace client do a poll() system call. All input arguments are
* validated by the virtual file system before calling this function.
*/
static unsigned int smd_pkt_poll(struct file *file, poll_table *wait)
{
struct smd_pkt_dev *smd_pkt_devp = file->private_data;
unsigned int mask = 0;
unsigned long flags;
smd_pkt_devp = file->private_data;
if (!smd_pkt_devp || refcount_read(&smd_pkt_devp->refcount) == 1) {
SMD_PKT_ERR("invalid device handle\n");
return POLLERR;
}
if (!completion_done(&smd_pkt_devp->ch_open)) {
SMD_PKT_ERR("%s channel in reset\n", smd_pkt_devp->ch_name);
return POLLHUP;
}
poll_wait(file, &smd_pkt_devp->readq, wait);
mutex_lock(&smd_pkt_devp->lock);
if (!completion_done(&smd_pkt_devp->ch_open) ||
!smd_pkt_devp->rpdev) {
SMD_PKT_ERR("%s channel reset after wait\n",
smd_pkt_devp->ch_name);
mutex_unlock(&smd_pkt_devp->lock);
return POLLHUP;
}
spin_lock_irqsave(&smd_pkt_devp->queue_lock, flags);
if (!skb_queue_empty(&smd_pkt_devp->queue) || smd_pkt_devp->rskb)
mask |= POLLIN | POLLRDNORM;
if (smd_pkt_devp->sig_change)
mask |= POLLPRI;
spin_unlock_irqrestore(&smd_pkt_devp->queue_lock, flags);
mask |= rpmsg_poll(smd_pkt_devp->rpdev->ept, file, wait);
mutex_unlock(&smd_pkt_devp->lock);
return mask;
}
static void smd_pkt_rpdev_remove(struct rpmsg_device *rpdev)
{
struct device_driver *drv = rpdev->dev.driver;
struct rpmsg_driver *rpdrv = drv_to_rpdrv(drv);
struct smd_pkt_dev *smd_pkt_devp = rpdrv_to_smd_pkt_devp(rpdrv);
mutex_lock(&smd_pkt_devp->lock);
smd_pkt_devp->rpdev = NULL;
smd_pkt_devp->notify_state_update = true;
mutex_unlock(&smd_pkt_devp->lock);
dev_set_drvdata(&rpdev->dev, NULL);
/* wake up any blocked readers */
reinit_completion(&smd_pkt_devp->ch_open);
wake_up_interruptible(&smd_pkt_devp->readq);
}
/**
* smd_pkt_open() - open() syscall for the smd_pkt device
* inode: Pointer to the inode structure.
* file: Pointer to the file structure.
*
* This function is used to open the smd pkt device when
* userspace client do a open() system call. All input arguments are
* validated by the virtual file system before calling this function.
*/
int smd_pkt_open(struct inode *inode, struct file *file)
{
struct smd_pkt_dev *smd_pkt_devp = cdev_to_smd_pkt_devp(inode->i_cdev);
int tout = msecs_to_jiffies(smd_pkt_devp->open_tout * 1000);
struct device *dev = &smd_pkt_devp->dev;
int ret;
refcount_inc(&smd_pkt_devp->refcount);
get_device(dev);
SMD_PKT_INFO("begin for %s by %s:%d ref_cnt[%d]\n",
smd_pkt_devp->ch_name, current->comm,
task_pid_nr(current),
refcount_read(&smd_pkt_devp->refcount));
ret = wait_for_completion_interruptible_timeout(&smd_pkt_devp->ch_open,
tout);
if (ret <= 0) {
refcount_dec(&smd_pkt_devp->refcount);
put_device(dev);
SMD_PKT_INFO("timeout for %s by %s:%d\n", smd_pkt_devp->ch_name,
current->comm, task_pid_nr(current));
return -ETIMEDOUT;
}
file->private_data = smd_pkt_devp;
SMD_PKT_INFO("end for %s by %s:%d ref_cnt[%d]\n",
smd_pkt_devp->ch_name, current->comm,
task_pid_nr(current),
refcount_read(&smd_pkt_devp->refcount));
return 0;
}
/**
* smd_pkt_release() - release operation on smd_pkt device
* inode: Pointer to the inode structure.
* file: Pointer to the file structure.
*
* This function is used to release the smd pkt device when
* userspace client do a close() system call. All input arguments are
* validated by the virtual file system before calling this function.
*/
int smd_pkt_release(struct inode *inode, struct file *file)
{
struct smd_pkt_dev *smd_pkt_devp = cdev_to_smd_pkt_devp(inode->i_cdev);
struct device *dev = &smd_pkt_devp->dev;
struct sk_buff *skb;
unsigned long flags;
SMD_PKT_INFO("for %s by %s:%d ref_cnt[%d]\n",
smd_pkt_devp->ch_name, current->comm,
task_pid_nr(current),
refcount_read(&smd_pkt_devp->refcount));
refcount_dec(&smd_pkt_devp->refcount);
if (refcount_read(&smd_pkt_devp->refcount) == 1) {
spin_lock_irqsave(&smd_pkt_devp->queue_lock, flags);
/* Discard all SKBs */
if (smd_pkt_devp->rskb) {
kfree_skb(smd_pkt_devp->rskb);
smd_pkt_devp->rskb = NULL;
smd_pkt_devp->rdata = NULL;
smd_pkt_devp->rdata_len = 0;
}
while (!skb_queue_empty(&smd_pkt_devp->queue)) {
skb = skb_dequeue(&smd_pkt_devp->queue);
kfree_skb(skb);
}
wake_up_interruptible(&smd_pkt_devp->readq);
smd_pkt_devp->sig_change = false;
spin_unlock_irqrestore(&smd_pkt_devp->queue_lock, flags);
}
put_device(dev);
return 0;
}
static const struct file_operations smd_pkt_fops = {
.owner = THIS_MODULE,
.open = smd_pkt_open,
.release = smd_pkt_release,
.read = smd_pkt_read,
.write = smd_pkt_write,
.poll = smd_pkt_poll,
.unlocked_ioctl = smd_pkt_ioctl,
.compat_ioctl = smd_pkt_ioctl,
};
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct smd_pkt_dev *smd_pkt_devp = dev_to_smd_pkt_devp(dev);
return scnprintf(buf, RPMSG_NAME_SIZE, "%s\n", smd_pkt_devp->ch_name);
}
static DEVICE_ATTR_RO(name);
static struct attribute *smd_pkt_device_attrs[] = {
&dev_attr_name.attr,
NULL
};
ATTRIBUTE_GROUPS(smd_pkt_device);
/**
* parse_smdpkt_devicetree() - parse device tree binding for a subnode
*
* np: pointer to a device tree node
* smd_pkt_devp: pointer to SMD PACKET device
*
* Return: 0 on success, standard Linux error codes on error.
*/
static int smd_pkt_parse_devicetree(struct device_node *np,
struct smd_pkt_dev *smd_pkt_devp)
{
char *key;
int ret;
key = "qcom,smdpkt-edge";
ret = of_property_read_string(np, key, &smd_pkt_devp->edge);
if (ret < 0)
goto error;
key = "qcom,smdpkt-ch-name";
ret = of_property_read_string(np, key, &smd_pkt_devp->ch_name);
if (ret < 0)
goto error;
key = "qcom,smdpkt-dev-name";
ret = of_property_read_string(np, key, &smd_pkt_devp->dev_name);
if (ret < 0)
goto error;
key = "qcom,smdpkt-fragmented-read";
smd_pkt_devp->fragmented_read = of_property_read_bool(np, key);
SMD_PKT_INFO("Parsed %s:%s /dev/%s\n", smd_pkt_devp->edge,
smd_pkt_devp->ch_name,
smd_pkt_devp->dev_name,
smd_pkt_devp->fragmented_read);
return 0;
error:
SMD_PKT_ERR("missing key: %s\n", key);
return ret;
}
static void smd_pkt_release_device(struct device *dev)
{
struct smd_pkt_dev *smd_pkt_devp = dev_to_smd_pkt_devp(dev);
ida_simple_remove(&smd_pkt_minor_ida, MINOR(smd_pkt_devp->dev.devt));
cdev_del(&smd_pkt_devp->cdev);
}
static int smd_pkt_init_rpmsg(struct smd_pkt_dev *smd_pkt_devp)
{
struct rpmsg_driver *rpdrv = &smd_pkt_devp->drv;
struct device *dev = &smd_pkt_devp->dev;
struct rpmsg_device_id *match;
char *drv_name;
/* zalloc array of two to NULL terminate the match list */
match = devm_kzalloc(dev, 2 * sizeof(*match), GFP_KERNEL);
if (!match)
return -ENOMEM;
snprintf(match->name, RPMSG_NAME_SIZE, "%s", smd_pkt_devp->ch_name);
drv_name = devm_kasprintf(dev, GFP_KERNEL,
"%s_%s", "msm_smd_pkt", smd_pkt_devp->dev_name);
if (!drv_name)
return -ENOMEM;
rpdrv->probe = smd_pkt_rpdev_probe;
rpdrv->remove = smd_pkt_rpdev_remove;
rpdrv->callback = smd_pkt_rpdev_cb;
rpdrv->signals = smd_pkt_rpdev_sigs;
rpdrv->id_table = match;
rpdrv->drv.name = drv_name;
register_rpmsg_driver(rpdrv);
return 0;
}
/**
* smdpkt - Create smd packet device and add cdev
* parent: pointer to the parent device of this smd packet device
* np: pointer to device node this smd packet device represents
*
* return: 0 for success, Standard Linux errors
*/
static int smd_pkt_create_device(struct device *parent,
struct device_node *np)
{
struct smd_pkt_dev *smd_pkt_devp;
struct device *dev;
int ret;
smd_pkt_devp = devm_kzalloc(parent, sizeof(*smd_pkt_devp), GFP_KERNEL);
if (!smd_pkt_devp)
return -ENOMEM;
ret = smd_pkt_parse_devicetree(np, smd_pkt_devp);
if (ret < 0) {
SMD_PKT_ERR("failed to parse dt ret:%d\n", ret);
goto free_smd_pkt_devp;
}
dev = &smd_pkt_devp->dev;
mutex_init(&smd_pkt_devp->lock);
refcount_set(&smd_pkt_devp->refcount, 1);
init_completion(&smd_pkt_devp->ch_open);
/* Default open timeout for open is 120 sec */
smd_pkt_devp->open_tout = 120;
smd_pkt_devp->sig_change = false;
spin_lock_init(&smd_pkt_devp->queue_lock);
smd_pkt_devp->rskb = NULL;
smd_pkt_devp->rdata = NULL;
smd_pkt_devp->rdata_len = 0;
skb_queue_head_init(&smd_pkt_devp->queue);
init_waitqueue_head(&smd_pkt_devp->readq);
device_initialize(dev);
dev->class = smd_pkt_class;
dev->parent = parent;
dev->groups = smd_pkt_device_groups;
dev_set_drvdata(dev, smd_pkt_devp);
cdev_init(&smd_pkt_devp->cdev, &smd_pkt_fops);
smd_pkt_devp->cdev.owner = THIS_MODULE;
ret = ida_simple_get(&smd_pkt_minor_ida, 0, num_smd_pkt_devs,
GFP_KERNEL);
if (ret < 0)
goto free_dev;
dev->devt = MKDEV(MAJOR(smd_pkt_major), ret);
dev_set_name(dev, smd_pkt_devp->dev_name, ret);
ret = cdev_add(&smd_pkt_devp->cdev, dev->devt, 1);
if (ret) {
SMD_PKT_ERR("cdev_add failed for %s ret:%d\n",
smd_pkt_devp->dev_name, ret);
goto free_minor_ida;
}
dev->release = smd_pkt_release_device;
ret = device_add(dev);
if (ret) {
SMD_PKT_ERR("device_create failed for %s ret:%d\n",
smd_pkt_devp->dev_name, ret);
goto free_minor_ida;
}
if (device_create_file(dev, &dev_attr_open_timeout))
SMD_PKT_ERR("device_create_file failed for %s\n",
smd_pkt_devp->dev_name);
if (smd_pkt_init_rpmsg(smd_pkt_devp))
goto free_minor_ida;
return 0;
free_minor_ida:
ida_simple_remove(&smd_pkt_minor_ida, MINOR(dev->devt));
free_dev:
put_device(dev);
free_smd_pkt_devp:
return ret;
}
/**
* smd_pkt_deinit() - De-initialize this module
*
* This function frees all the memory and unregisters the char device region.
*/
static void smd_pkt_deinit(void)
{
class_destroy(smd_pkt_class);
unregister_chrdev_region(MAJOR(smd_pkt_major), num_smd_pkt_devs);
}
/**
* smd_pkt_probe() - Probe a SMD packet device
*
* pdev: Pointer to platform device.
*
* return: 0 on success, standard Linux error codes on error.
*
* This function is called when the underlying device tree driver registers
* a platform device, mapped to a SMD packet device.
*/
static int msm_smd_pkt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *cn;
int ret;
num_smd_pkt_devs = of_get_child_count(dev->of_node);
ret = alloc_chrdev_region(&smd_pkt_major, 0, num_smd_pkt_devs,
"smdpkt");
if (ret < 0) {
SMD_PKT_ERR("alloc_chrdev_region failed ret:%d\n", ret);
return ret;
}
smd_pkt_class = class_create(THIS_MODULE, "smdpkt");
if (IS_ERR(smd_pkt_class)) {
SMD_PKT_ERR("class_create failed ret:%ld\n",
PTR_ERR(smd_pkt_class));
goto error_deinit;
}
for_each_child_of_node(dev->of_node, cn)
smd_pkt_create_device(dev, cn);
SMD_PKT_INFO("smd Packet Port Driver Initialized\n");
return 0;
error_deinit:
smd_pkt_deinit();
return ret;
}
static const struct of_device_id msm_smd_pkt_match_table[] = {
{ .compatible = "qcom,smdpkt" },
{},
};
static struct platform_driver msm_smd_pkt_driver = {
.probe = msm_smd_pkt_probe,
.driver = {
.name = MODULE_NAME,
.of_match_table = msm_smd_pkt_match_table,
},
};
/**
* smd_pkt_init() - Initialization function for this module
*
* returns: 0 on success, standard Linux error code otherwise.
*/
static int __init smd_pkt_init(void)
{
int rc;
rc = platform_driver_register(&msm_smd_pkt_driver);
if (rc) {
SMD_PKT_ERR("msm_smd_pkt driver register failed %d\n", rc);
return rc;
}
smd_pkt_ilctxt = ipc_log_context_create(SMD_PKT_IPC_LOG_PAGE_CNT,
"smd_pkt", 0);
return 0;
}
module_init(smd_pkt_init);
/**
* smd_pkt_exit() - Exit function for this module
*
* This function is used to cleanup the module during the exit.
*/
static void __exit smd_pkt_exit(void)
{
smd_pkt_deinit();
}
module_exit(smd_pkt_exit);
MODULE_DESCRIPTION("MSM Shared Memory Packet Port");
MODULE_LICENSE("GPL v2");

View file

@ -1828,7 +1828,7 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
return urandom_read_nowarn(file, buf, nbytes, ppos);
}
static ssize_t __maybe_unused
static ssize_t
random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
int ret;
@ -1958,7 +1958,7 @@ static int random_fasync(int fd, struct file *filp, int on)
}
const struct file_operations random_fops = {
.read = urandom_read,
.read = random_read,
.write = random_write,
.poll = random_poll,
.unlocked_ioctl = random_ioctl,
@ -2296,4 +2296,4 @@ void add_bootloader_randomness(const void *buf, unsigned int size)
else
add_device_randomness(buf, size);
}
EXPORT_SYMBOL_GPL(add_bootloader_randomness);
EXPORT_SYMBOL_GPL(add_bootloader_randomness);

View file

@ -584,3 +584,27 @@ config CLOCK_CPU_OSM_660
frequency and voltage requests for multiple clusters via the
existence of multiple OSM domains.
Say Y if you want to support OSM clocks.
config SDM_GCC_429W
tristate "SDM429w Global Clock Controller"
depends on COMMON_CLK_QCOM
help
Support for the global clock controller on SDM429w/QM215 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
I2C, USB, UFS, SDCC, Display, Camera, Video etc.
config SDM_DEBUGCC_429W
tristate "SDM429W Debug Clock Controller"
depends on SDM_GCC_429W
help
Support for the debug clock controller on Qualcomm Technologies, Inc
SDM429W/QM215 devices.
Say Y if you want to support the clock measurement functionality.
config CLOCK_CPU_SDM
bool "CPU SDM Clock Controller"
depends on COMMON_CLK_QCOM
help
Support for the cpu clock controller on SDM based devices(e.g. QM215/SDM429).
Say Y if you want to support CPU clock scaling using
CPUfreq drivers for dynamic power management.

View file

@ -20,6 +20,7 @@ clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
obj-$(CONFIG_CLOCK_CPU_OSM_660) += clk-cpu-osm-660.o
obj-$(CONFIG_CLOCK_CPU_SDM) += clk-cpu-sdm.o
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
@ -55,9 +56,11 @@ obj-$(CONFIG_QM_GCC_SCUBA) += gcc-scuba.o
obj-$(CONFIG_QM_GPUCC_SCUBA) += gpucc-scuba.o
obj-$(CONFIG_QM_DEBUGCC_SCUBA) += debugcc-scuba.o
obj-$(CONFIG_SDM_CAMCC_LAGOON) += camcc-lagoon.o
obj-$(CONFIG_SDM_DEBUGCC_429W) += debugcc-sdm429w.o
obj-$(CONFIG_SDM_DEBUGCC_LAGOON) += debugcc-lagoon.o
obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
obj-$(CONFIG_SDM_DISPCC_LAGOON) += dispcc-lagoon.o
obj-$(CONFIG_SDM_GCC_429W) += gcc-sdm429w.o
obj-$(CONFIG_SDM_GCC_660) += gcc-sdm660.o
obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
obj-$(CONFIG_SDM_GCC_LAGOON) += gcc-lagoon.o

View file

@ -17,7 +17,15 @@
#include "clk-regmap.h"
#include "clk-regmap-mux-div.h"
static const u32 gpll0_a53cc_map[] = { 4, 5 };
enum apcs_mux_clk_parent {
P_GPLL0,
P_APCS_CPU_PLL,
};
static const struct parent_map gpll0_a53cc_map[] = {
{ P_GPLL0, 4 },
{ P_APCS_CPU_PLL, 5 },
};
static const char * const gpll0_a53cc[] = {
"gpll0_vote",

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016, 2019-2020 The Linux Foundation. All rights reserved. */
/* Copyright (c) 2016, 2019-2021 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
#include <linux/export.h>
@ -15,6 +15,7 @@
#include "clk-regmap.h"
#include "clk-debug.h"
#include "common.h"
#include "gdsc-debug.h"
static struct clk_hw *measure;
@ -413,16 +414,22 @@ EXPORT_SYMBOL(map_debug_bases);
/**
* qcom_clk_dump - dump the HW specific registers associated with this clock
* and regulator
* @clk: clock source
* @regulator: regulator
* @calltrace: indicates whether calltrace is required
*
* This function attempts to print all the registers associated with the
* clock and it's parents.
* clock, it's parents and regulator.
*/
void qcom_clk_dump(struct clk *clk, bool calltrace)
void qcom_clk_dump(struct clk *clk, struct regulator *regulator,
bool calltrace)
{
struct clk_hw *hw;
if (!IS_ERR_OR_NULL(regulator))
gdsc_debug_print_regs(regulator);
if (IS_ERR_OR_NULL(clk))
return;
@ -437,22 +444,27 @@ EXPORT_SYMBOL(qcom_clk_dump);
/**
* qcom_clk_bulk_dump - dump the HW specific registers associated with clocks
* @clks: the clk_bulk_data table of consumer
* and regulator
* @num_clks: the number of clk_bulk_data
* @clks: the clk_bulk_data table of consumer
* @regulator: regulator source
* @calltrace: indicates whether calltrace is required
*
* This function attempts to print all the registers associated with the
* clock and it's parents for all the clocks in the list.
* clocks in the list and regulator.
*/
void qcom_clk_bulk_dump(int num_clks, struct clk_bulk_data *clks,
bool calltrace)
struct regulator *regulator, bool calltrace)
{
int i;
if (!IS_ERR_OR_NULL(regulator))
gdsc_debug_print_regs(regulator);
if (IS_ERR_OR_NULL(clks))
return;
for (i = 0; i < num_clks; i++)
qcom_clk_dump(clks[i].clk, calltrace);
qcom_clk_dump(clks[i].clk, NULL, calltrace);
}
EXPORT_SYMBOL(qcom_clk_bulk_dump);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
* Copyright (c) 2013, 2021, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@ -342,3 +342,64 @@ const struct clk_ops clk_pll_sr2_ops = {
.determine_rate = clk_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_pll_sr2_ops);
static int
clk_pll_hf_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate)
{
struct clk_pll *pll = to_clk_pll(hw);
bool enabled;
u32 mode, l_val;
u32 enable_mask = PLL_OUTCTRL | PLL_BYPASSNL | PLL_RESET_N;
regmap_read(pll->clkr.regmap, pll->mode_reg, &mode);
enabled = (mode & enable_mask) == enable_mask;
if (enabled)
clk_pll_disable(hw);
l_val = rate / prate;
regmap_update_bits(pll->clkr.regmap, pll->l_reg, 0x3ff, l_val);
regmap_update_bits(pll->clkr.regmap, pll->m_reg, 0x7ffff, 0);
regmap_update_bits(pll->clkr.regmap, pll->n_reg, 0x7ffff, 1);
if (enabled)
clk_pll_sr2_enable(hw);
return 0;
}
static void clk_pll_hf_list_registers(struct seq_file *f, struct clk_hw *hw)
{
struct clk_pll *pll = to_clk_pll(hw);
int size, i, val;
static struct clk_register_data data[] = {
{"PLL_MODE", 0x0},
{"PLL_L_VAL", 0x4},
{"PLL_M_VAL", 0x8},
{"PLL_N_VAL", 0xC},
{"PLL_USER_CTL", 0x10},
{"PLL_CONFIG_CTL", 0x14},
{"PLL_STATUS_CTL", 0x1C},
};
size = ARRAY_SIZE(data);
for (i = 0; i < size; i++) {
regmap_read(pll->clkr.regmap, pll->mode_reg + data[i].offset,
&val);
clock_debug_output(f, false,
"%20s: 0x%.8x\n", data[i].name, val);
}
}
const struct clk_ops clk_pll_hf_ops = {
.enable = clk_pll_sr2_enable,
.disable = clk_pll_disable,
.set_rate = clk_pll_hf_set_rate,
.recalc_rate = clk_pll_recalc_rate,
.determine_rate = clk_pll_determine_rate,
.list_registers = clk_pll_hf_list_registers,
};
EXPORT_SYMBOL(clk_pll_hf_ops);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
* Copyright (c) 2013, 2021, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@ -63,6 +63,7 @@ struct clk_pll {
extern const struct clk_ops clk_pll_ops;
extern const struct clk_ops clk_pll_vote_ops;
extern const struct clk_ops clk_pll_sr2_ops;
extern const struct clk_ops clk_pll_hf_ops;
#define to_clk_pll(_hw) container_of(to_clk_regmap(_hw), struct clk_pll, clkr)

View file

@ -56,20 +56,26 @@ int mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div)
}
EXPORT_SYMBOL_GPL(mux_div_set_src_div);
static void mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src,
u32 *div)
int mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src,
u32 *div)
{
int ret = 0;
u32 val, d, s;
const char *name = clk_hw_get_name(&md->clkr.hw);
regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, &val);
ret = regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, &val);
if (ret)
return ret;
if (val & CMD_RCGR_DIRTY_CFG) {
pr_err("%s: RCG configuration is pending\n", name);
return;
return -EBUSY;
}
regmap_read(md->clkr.regmap, CFG_RCGR + md->reg_offset, &val);
ret = regmap_read(md->clkr.regmap, CFG_RCGR + md->reg_offset, &val);
if (ret)
return ret;
s = (val >> md->src_shift);
s &= BIT(md->src_width) - 1;
*src = s;
@ -77,6 +83,8 @@ static void mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src,
d = (val >> md->hid_shift);
d &= BIT(md->hid_width) - 1;
*div = d;
return ret;
}
static inline bool is_better_rate(unsigned long req, unsigned long best,
@ -142,7 +150,7 @@ static int __mux_div_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
if (is_better_rate(rate, best_rate, actual_rate)) {
best_rate = actual_rate;
best_src = md->parent_map[i];
best_src = md->parent_map[i].cfg;
best_div = div - 1;
}
@ -169,7 +177,7 @@ static u8 mux_div_get_parent(struct clk_hw *hw)
mux_div_get_src_div(md, &src, &div);
for (i = 0; i < clk_hw_get_num_parents(hw); i++)
if (src == md->parent_map[i])
if (src == md->parent_map[i].cfg)
return i;
pr_err("%s: Can't find parent with src %d\n", name, src);
@ -180,7 +188,7 @@ static int mux_div_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
return mux_div_set_src_div(md, md->parent_map[index], md->div);
return mux_div_set_src_div(md, md->parent_map[index].cfg, md->div);
}
static int mux_div_set_rate(struct clk_hw *hw,
@ -197,7 +205,7 @@ static int mux_div_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
return __mux_div_set_rate_and_parent(hw, rate, prate,
md->parent_map[index]);
md->parent_map[index].cfg);
}
static unsigned long mux_div_recalc_rate(struct clk_hw *hw, unsigned long prate)
@ -209,7 +217,7 @@ static unsigned long mux_div_recalc_rate(struct clk_hw *hw, unsigned long prate)
mux_div_get_src_div(md, &src, &div);
for (i = 0; i < num_parents; i++)
if (src == md->parent_map[i]) {
if (src == md->parent_map[i].cfg) {
struct clk_hw *p = clk_hw_get_parent_by_index(hw, i);
unsigned long parent_rate = clk_hw_get_rate(p);
@ -220,7 +228,23 @@ static unsigned long mux_div_recalc_rate(struct clk_hw *hw, unsigned long prate)
return 0;
}
static int mux_div_enable(struct clk_hw *hw)
{
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
return mux_div_set_src_div(md, md->src, md->div);
}
static void mux_div_disable(struct clk_hw *hw)
{
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
mux_div_set_src_div(md, md->safe_src, md->safe_div);
}
const struct clk_ops clk_regmap_mux_div_ops = {
.enable = mux_div_enable,
.disable = mux_div_disable,
.get_parent = mux_div_get_parent,
.set_parent = mux_div_set_parent,
.set_rate = mux_div_set_rate,

View file

@ -8,6 +8,7 @@
#define __QCOM_CLK_REGMAP_MUX_DIV_H__
#include <linux/clk-provider.h>
#include "common.h"
#include "clk-regmap.h"
/**
@ -19,7 +20,19 @@
* @src_shift: lowest bit of source select field
* @div: the divider raw configuration value
* @src: the mux index which will be used if the clock is enabled
* @parent_map: map from parent_names index to src_sel field
* @safe_src: the safe source mux value we switch to, while the main PLL is
* reconfigured
* @safe_div: the safe divider value that we set, while the main PLL is
* reconfigured
* @safe_freq: When switching rates from A to B, the mux div clock will
* instead switch from A -> safe_freq -> B. This allows the
* mux_div clock to change rates while enabled, even if this
* behavior is not supported by the parent clocks.
* If changing the rate of parent A also causes the rate of
* parent B to change, then safe_freq must be defined.
* safe_freq is expected to have a source clock which is always
* on and runs at only one rate.
* @parent_map: pointer to parent_map struct
* @clkr: handle between common and hardware-specific interfaces
* @pclk: the input PLL clock
* @clk_nb: clock notifier for rate changes of the input PLL
@ -32,7 +45,10 @@ struct clk_regmap_mux_div {
u32 src_shift;
u32 div;
u32 src;
const u32 *parent_map;
u32 safe_src;
u32 safe_div;
unsigned long safe_freq;
const struct parent_map *parent_map;
struct clk_regmap clkr;
struct clk *pclk;
struct notifier_block clk_nb;
@ -40,5 +56,6 @@ struct clk_regmap_mux_div {
extern const struct clk_ops clk_regmap_mux_div_ops;
extern int mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div);
int mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src, u32 *div);
#endif

View file

@ -1080,6 +1080,108 @@ static const struct rpm_smd_clk_desc rpm_clk_sdm660 = {
.num_clks = ARRAY_SIZE(sdm660_clks),
};
/* sdm429w SMD clocks */
DEFINE_CLK_SMD_RPM_BRANCH(sdm429w, bi_tcxo, bi_tcxo_ao,
QCOM_SMD_RPM_MISC_CLK, 0, 19200000);
DEFINE_CLK_SMD_RPM(sdm429w, pnoc_clk, pnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
DEFINE_CLK_SMD_RPM(sdm429w, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
DEFINE_CLK_SMD_RPM(sdm429w, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
DEFINE_CLK_SMD_RPM(sdm429w, sysmmnoc_clk, sysmmnoc_a_clk, QCOM_SMD_RPM_BUS_CLK,
2);
DEFINE_CLK_SMD_RPM_QDSS(sdm429w, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK,
1);
DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm429w, bb_clk1, bb_clk1_a, 1);
DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm429w, bb_clk2, bb_clk2_a, 2);
DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm429w, rf_clk2, rf_clk2_a, 5);
DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm429w, div_clk2, div_clk2_a, 0xc);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm429w, bb_clk1_pin, bb_clk1_a_pin, 1);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm429w, bb_clk2_pin, bb_clk2_a_pin, 2);
/* Voter clocks */
static DEFINE_CLK_VOTER(pnoc_msmbus_clk, pnoc_clk, LONG_MAX);
static DEFINE_CLK_VOTER(pnoc_msmbus_a_clk, pnoc_a_clk, LONG_MAX);
static DEFINE_CLK_VOTER(pnoc_keepalive_a_clk, pnoc_a_clk, LONG_MAX);
static DEFINE_CLK_VOTER(sysmmnoc_msmbus_clk, sysmmnoc_clk, LONG_MAX);
static DEFINE_CLK_VOTER(sysmmnoc_msmbus_a_clk, sysmmnoc_a_clk, LONG_MAX);
static DEFINE_CLK_VOTER(pnoc_usb_clk, pnoc_clk, LONG_MAX);
static DEFINE_CLK_VOTER(snoc_usb_clk, snoc_clk, LONG_MAX);
static DEFINE_CLK_VOTER(bimc_usb_clk, bimc_clk, LONG_MAX);
static DEFINE_CLK_VOTER(pnoc_usb_a_clk, pnoc_a_clk, LONG_MAX);
static DEFINE_CLK_VOTER(snoc_usb_a_clk, snoc_a_clk, LONG_MAX);
static DEFINE_CLK_VOTER(bimc_usb_a_clk, bimc_a_clk, LONG_MAX);
static DEFINE_CLK_VOTER(snoc_wcnss_a_clk, snoc_a_clk, LONG_MAX);
static DEFINE_CLK_VOTER(bimc_wcnss_a_clk, bimc_a_clk, LONG_MAX);
/* Branch Voter clocks */
static DEFINE_CLK_BRANCH_VOTER(bi_tcxo_lpm_clk, bi_tcxo);
static struct clk_hw *qm215_clks[] = {
[RPM_SMD_XO_CLK_SRC] = &sdm429w_bi_tcxo.hw,
[RPM_SMD_XO_A_CLK_SRC] = &sdm429w_bi_tcxo_ao.hw,
[RPM_SMD_QDSS_CLK] = &sdm429w_qdss_clk.hw,
[RPM_SMD_QDSS_A_CLK] = &sdm429w_qdss_a_clk.hw,
[RPM_SMD_PNOC_CLK] = &sdm429w_pnoc_clk.hw,
[RPM_SMD_PNOC_A_CLK] = &sdm429w_pnoc_a_clk.hw,
[RPM_SMD_SNOC_CLK] = &sdm429w_snoc_clk.hw,
[RPM_SMD_SNOC_A_CLK] = &sdm429w_snoc_a_clk.hw,
[RPM_SMD_BIMC_CLK] = &sdm429w_bimc_clk.hw,
[RPM_SMD_BIMC_A_CLK] = &sdm429w_bimc_a_clk.hw,
[RPM_SMD_BIMC_GPU_CLK] = &scuba_bimc_gpu_clk.hw,
[RPM_SMD_BIMC_GPU_A_CLK] = &scuba_bimc_gpu_a_clk.hw,
[RPM_SMD_SYSMMNOC_CLK] = &sdm429w_sysmmnoc_clk.hw,
[RPM_SMD_SYSMMNOC_A_CLK] = &sdm429w_sysmmnoc_a_clk.hw,
[RPM_SMD_BB_CLK1] = &sdm429w_bb_clk1.hw,
[RPM_SMD_BB_CLK1_A] = &sdm429w_bb_clk1_a.hw,
[RPM_SMD_BB_CLK2] = &sdm429w_bb_clk2.hw,
[RPM_SMD_BB_CLK2_A] = &sdm429w_bb_clk2_a.hw,
[RPM_SMD_BB_CLK1_PIN] = &sdm429w_bb_clk1_pin.hw,
[RPM_SMD_BB_CLK1_A_PIN] = &sdm429w_bb_clk1_a_pin.hw,
[RPM_SMD_BB_CLK2_PIN] = &sdm429w_bb_clk2_pin.hw,
[RPM_SMD_BB_CLK2_A_PIN] = &sdm429w_bb_clk2_a_pin.hw,
[RPM_SMD_RF_CLK2] = &sdm429w_rf_clk2.hw,
[RPM_SMD_RF_CLK2_A] = &sdm429w_rf_clk2_a.hw,
[RPM_SMD_DIV_CLK2] = &sdm429w_div_clk2.hw,
[RPM_SMD_DIV_A_CLK2] = &sdm429w_div_clk2_a.hw,
[PNOC_MSMBUS_CLK] = &pnoc_msmbus_clk.hw,
[PNOC_MSMBUS_A_CLK] = &pnoc_msmbus_a_clk.hw,
[PNOC_KEEPALIVE_A_CLK] = &pnoc_keepalive_a_clk.hw,
[SNOC_MSMBUS_CLK] = &snoc_msmbus_clk.hw,
[SNOC_MSMBUS_A_CLK] = &snoc_msmbus_a_clk.hw,
[BIMC_MSMBUS_CLK] = &bimc_msmbus_clk.hw,
[BIMC_MSMBUS_A_CLK] = &bimc_msmbus_a_clk.hw,
[PNOC_USB_CLK] = &pnoc_usb_clk.hw,
[PNOC_USB_A_CLK] = &pnoc_usb_a_clk.hw,
[SNOC_USB_CLK] = &snoc_usb_clk.hw,
[SNOC_USB_A_CLK] = &snoc_usb_a_clk.hw,
[BIMC_USB_CLK] = &bimc_usb_clk.hw,
[BIMC_USB_A_CLK] = &bimc_usb_a_clk.hw,
[SNOC_WCNSS_A_CLK] = &snoc_wcnss_a_clk.hw,
[BIMC_WCNSS_A_CLK] = &bimc_wcnss_a_clk.hw,
[SYSMMNOC_MSMBUS_CLK] = &sysmmnoc_msmbus_clk.hw,
[SYSMMNOC_MSMBUS_A_CLK] = &sysmmnoc_msmbus_a_clk.hw,
[CXO_SMD_OTG_CLK] = &bi_tcxo_otg_clk.hw,
[CXO_SMD_LPM_CLK] = &bi_tcxo_lpm_clk.hw,
[CXO_SMD_PIL_PRONTO_CLK] = &bi_tcxo_pil_pronto_clk.hw,
[CXO_SMD_PIL_MSS_CLK] = &bi_tcxo_pil_mss_clk.hw,
[CXO_SMD_WLAN_CLK] = &bi_tcxo_wlan_clk.hw,
[CXO_SMD_PIL_LPASS_CLK] = &bi_tcxo_pil_lpass_clk.hw,
};
static const struct rpm_smd_clk_desc rpm_clk_qm215 = {
.clks = qm215_clks,
.num_rpm_clks = RPM_SMD_SYSMMNOC_A_CLK,
.num_clks = ARRAY_SIZE(qm215_clks),
};
static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
{ .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
@ -1087,6 +1189,7 @@ static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-bengal", .data = &rpm_clk_bengal},
{ .compatible = "qcom,rpmcc-scuba", .data = &rpm_clk_scuba},
{ .compatible = "qcom,rpmcc-sdm660", .data = &rpm_clk_sdm660 },
{ .compatible = "qcom,rpmcc-qm215", .data = &rpm_clk_qm215 },
{ }
};
MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
@ -1097,7 +1200,7 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
struct clk *clk;
struct rpm_cc *rcc;
struct clk_onecell_data *data;
int ret, is_bengal, is_scuba, is_sdm660;
int ret, is_bengal, is_scuba, is_sdm660, is_qm215;
size_t num_clks, i;
struct clk_hw **hw_clks;
const struct rpm_smd_clk_desc *desc;
@ -1115,12 +1218,22 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
is_sdm660 = of_device_is_compatible(pdev->dev.of_node,
"qcom,rpmcc-sdm660");
is_qm215 = of_device_is_compatible(pdev->dev.of_node,
"qcom,rpmcc-qm215");
if (is_sdm660) {
ret = clk_vote_bimc(&sdm660_bimc_clk.hw, INT_MAX);
if (ret < 0)
return ret;
}
if (is_qm215) {
ret = clk_vote_bimc(&sdm429w_bimc_clk.hw, INT_MAX);
if (ret < 0)
return ret;
}
desc = of_device_get_match_data(&pdev->dev);
if (!desc)
return -EINVAL;
@ -1204,6 +1317,15 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
/* Hold an active set vote for the cnoc_periph resource */
clk_set_rate(cnoc_periph_keepalive_a_clk.hw.clk, 19200000);
clk_prepare_enable(cnoc_periph_keepalive_a_clk.hw.clk);
} else if (is_qm215) {
clk_prepare_enable(sdm429w_bi_tcxo_ao.hw.clk);
/*
* Hold an active set vote for the pnoc_periph PCNOC AHB
* resource. Sleep set vote is 0
*/
clk_set_rate(pnoc_keepalive_a_clk.hw.clk, 19200000);
clk_prepare_enable(pnoc_keepalive_a_clk.hw.clk);
}
dev_info(&pdev->dev, "Registered RPM clocks\n");

View file

@ -0,0 +1,385 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "clk: %s: " fmt, __func__
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "clk-debug.h"
#include "common.h"
static struct measure_clk_data debug_mux_priv = {
.ctl_reg = 0x74004,
.status_reg = 0x74008,
.xo_div4_cbcr = 0x30034,
};
static const char *const gcc_debug_mux_parent_names[] = {
"gcc_ahb_clk",
"gcc_apss_ahb_clk",
"gcc_apss_axi_clk",
"gcc_bimc_gfx_clk",
"gcc_bimc_gpu_clk",
"gcc_blsp1_ahb_clk",
"gcc_blsp1_qup2_i2c_apps_clk",
"gcc_blsp1_qup2_spi_apps_clk",
"gcc_blsp1_qup3_i2c_apps_clk",
"gcc_blsp1_qup3_spi_apps_clk",
"gcc_blsp1_qup4_i2c_apps_clk",
"gcc_blsp1_qup4_spi_apps_clk",
"gcc_blsp1_sleep_clk",
"gcc_blsp1_uart1_apps_clk",
"gcc_blsp1_uart1_sim_clk",
"gcc_blsp1_uart2_apps_clk",
"gcc_blsp1_uart2_sim_clk",
"gcc_blsp2_ahb_clk",
"gcc_blsp2_qup1_i2c_apps_clk",
"gcc_blsp2_qup1_spi_apps_clk",
"gcc_blsp2_qup2_i2c_apps_clk",
"gcc_blsp2_qup2_spi_apps_clk",
"gcc_blsp2_qup3_i2c_apps_clk",
"gcc_blsp2_qup3_spi_apps_clk",
"gcc_blsp2_sleep_clk",
"gcc_blsp2_uart1_apps_clk",
"gcc_blsp2_uart1_sim_clk",
"gcc_blsp2_uart2_apps_clk",
"gcc_blsp2_uart2_sim_clk",
"gcc_boot_rom_ahb_clk",
"gcc_camss_ahb_clk",
"gcc_camss_cci_ahb_clk",
"gcc_camss_cci_clk",
"gcc_camss_cpp_ahb_clk",
"gcc_camss_cpp_axi_clk",
"gcc_camss_cpp_clk",
"gcc_camss_csi0_ahb_clk",
"gcc_camss_csi0_clk",
"gcc_camss_csi0phy_clk",
"gcc_camss_csi0phytimer_clk",
"gcc_camss_csi0pix_clk",
"gcc_camss_csi0rdi_clk",
"gcc_camss_csi1_ahb_clk",
"gcc_camss_csi1_clk",
"gcc_camss_csi1phy_clk",
"gcc_camss_csi1phytimer_clk",
"gcc_camss_csi1pix_clk",
"gcc_camss_csi1rdi_clk",
"gcc_camss_csi2_ahb_clk",
"gcc_camss_csi2_clk",
"gcc_camss_csi2phy_clk",
"gcc_camss_csi2pix_clk",
"gcc_camss_csi2rdi_clk",
"gcc_camss_csi_vfe0_clk",
"gcc_camss_csi_vfe1_clk",
"gcc_camss_gp0_clk",
"gcc_camss_gp1_clk",
"gcc_camss_ispif_ahb_clk",
"gcc_camss_jpeg0_clk",
"gcc_camss_jpeg_ahb_clk",
"gcc_camss_jpeg_axi_clk",
"gcc_camss_mclk0_clk",
"gcc_camss_mclk1_clk",
"gcc_camss_mclk2_clk",
"gcc_camss_micro_ahb_clk",
"gcc_camss_top_ahb_clk",
"gcc_camss_vfe0_clk",
"gcc_camss_vfe1_ahb_clk",
"gcc_camss_vfe1_axi_clk",
"gcc_camss_vfe1_clk",
"gcc_camss_vfe_ahb_clk",
"gcc_camss_vfe_axi_clk",
"gcc_crypto_ahb_clk",
"gcc_crypto_axi_clk",
"gcc_crypto_clk",
"gcc_gp1_clk",
"gcc_gp2_clk",
"gcc_gp3_clk",
"gcc_im_sleep_clk",
"gcc_lpass_mport_axi_clk",
"gcc_lpass_q6_axi_clk",
"gcc_lpass_sway_clk",
"gcc_mdss_ahb_clk",
"gcc_mdss_axi_clk",
"gcc_mdss_byte0_clk",
"gcc_mdss_esc0_clk",
"gcc_mdss_mdp_clk",
"gcc_mdss_pclk0_clk",
"gcc_mdss_vsync_clk",
"gcc_mpm_ahb_clk",
"gcc_msg_ram_ahb_clk",
"gcc_oxili_ahb_clk",
"gcc_oxili_aon_clk",
"gcc_oxili_gfx3d_clk",
"gcc_pcnoc_mpu_cfg_ahb_clk",
"gcc_pdm2_clk",
"gcc_pdm_ahb_clk",
"gcc_pdm_xo4_clk",
"gcc_prng_ahb_clk",
"gcc_q6_mpu_cfg_ahb_clk",
"gcc_rpm_cfg_xpu_clk",
"gcc_sdcc1_ahb_clk",
"gcc_sdcc1_apps_clk",
"gcc_sdcc1_ice_core_clk",
"gcc_sdcc2_ahb_clk",
"gcc_sdcc2_apps_clk",
"gcc_sec_ctrl_acc_clk",
"gcc_sec_ctrl_ahb_clk",
"gcc_sec_ctrl_boot_rom_patch_clk",
"gcc_sec_ctrl_clk",
"gcc_sec_ctrl_sense_clk",
"gcc_tcsr_ahb_clk",
"gcc_tlmm_ahb_clk",
"gcc_tlmm_clk",
"gcc_usb2a_phy_sleep_clk",
"gcc_usb_hs_ahb_clk",
"gcc_usb_hs_inactivity_timers_clk",
"gcc_usb_hs_phy_cfg_ahb_clk",
"gcc_usb_hs_system_clk",
"gcc_venus0_ahb_clk",
"gcc_venus0_axi_clk",
"gcc_venus0_core0_vcodec0_clk",
"gcc_venus0_vcodec0_clk",
"gcc_xo_clk",
"gcc_xo_div4_clk",
"gcc_gfx_tbu_clk",
"gcc_gfx_tcu_clk",
"gcc_gtcu_ahb_clk",
"gcc_bimc_clk",
"gcc_smmu_cfg_clk",
};
static int gcc_debug_mux_sels[] = {
0x148, /* gcc_ahb_clk */
0x168, /* gcc_apss_ahb_clk */
0x169, /* gcc_apss_axi_clk */
0x2D, /* gcc_bimc_gfx_clk */
0x157, /* gcc_bimc_gpu_clk */
0x88, /* gcc_blsp1_ahb_clk */
0x90, /* gcc_blsp1_qup2_i2c_apps_clk */
0x8E, /* gcc_blsp1_qup2_spi_apps_clk */
0x94, /* gcc_blsp1_qup3_i2c_apps_clk */
0x93, /* gcc_blsp1_qup3_spi_apps_clk */
0x96, /* gcc_blsp1_qup4_i2c_apps_clk */
0x95, /* gcc_blsp1_qup4_spi_apps_clk */
0x89, /* gcc_blsp1_sleep_clk */
0x8C, /* gcc_blsp1_uart1_apps_clk */
0x8D, /* gcc_blsp1_uart1_sim_clk */
0x91, /* gcc_blsp1_uart2_apps_clk */
0x92, /* gcc_blsp1_uart2_sim_clk */
0x98, /* gcc_blsp2_ahb_clk */
0x9B, /* gcc_blsp2_qup1_i2c_apps_clk */
0x9A, /* gcc_blsp2_qup1_spi_apps_clk */
0xA0, /* gcc_blsp2_qup2_i2c_apps_clk */
0x9E, /* gcc_blsp2_qup2_spi_apps_clk */
0xA4, /* gcc_blsp2_qup3_i2c_apps_clk */
0xA3, /* gcc_blsp2_qup3_spi_apps_clk */
0x99, /* gcc_blsp2_sleep_clk */
0x9C, /* gcc_blsp2_uart1_apps_clk */
0x9D, /* gcc_blsp2_uart1_sim_clk */
0x9A, /* gcc_blsp2_uart2_apps_clk */
0xA2, /* gcc_blsp2_uart2_sim_clk */
0xF8, /* gcc_boot_rom_ahb_clk */
0xA8, /* gcc_camss_ahb_clk */
0xB0, /* gcc_camss_cci_ahb_clk */
0xAF, /* gcc_camss_cci_clk */
0xBA, /* gcc_camss_cpp_ahb_clk */
0x1A3, /* gcc_camss_cpp_axi_clk */
0xB9, /* gcc_camss_cpp_clk */
0xC1, /* gcc_camss_csi0_ahb_clk */
0xC0, /* gcc_camss_csi0_clk */
0xC2, /* gcc_camss_csi0phy_clk */
0xB1, /* gcc_camss_csi0phytimer_clk */
0xC4, /* gcc_camss_csi0pix_clk */
0xC3, /* gcc_camss_csi0rdi_clk */
0xC6, /* gcc_camss_csi1_ahb_clk */
0xC5, /* gcc_camss_csi1_clk */
0xC7, /* gcc_camss_csi1phy_clk */
0xB2, /* gcc_camss_csi1phytimer_clk */
0xE1, /* gcc_camss_csi1pix_clk */
0xE0, /* gcc_camss_csi1rdi_clk */
0xE4, /* gcc_camss_csi2_ahb_clk */
0xE3, /* gcc_camss_csi2_clk */
0xE5, /* gcc_camss_csi2phy_clk */
0xE7, /* gcc_camss_csi2pix_clk */
0xE6, /* gcc_camss_csi2rdi_clk */
0xBF, /* gcc_camss_csi_vfe0_clk */
0x1A0, /* gcc_camss_csi_vfe1_clk */
0xAB, /* gcc_camss_gp0_clk */
0xAC, /* gcc_camss_gp1_clk */
0xE2, /* gcc_camss_ispif_ahb_clk */
0xB3, /* gcc_camss_jpeg0_clk */
0xB4, /* gcc_camss_jpeg_ahb_clk */
0xB5, /* gcc_camss_jpeg_axi_clk */
0xAD, /* gcc_camss_mclk0_clk */
0xAE, /* gcc_camss_mclk1_clk */
0x1BD, /* gcc_camss_mclk2_clk */
0xAA, /* gcc_camss_micro_ahb_clk */
0xA9, /* gcc_camss_top_ahb_clk */
0xB8, /* gcc_camss_vfe0_clk */
0x1A2, /* gcc_camss_vfe1_ahb_clk */
0x1A4, /* gcc_camss_vfe1_axi_clk */
0x1A1, /* gcc_camss_vfe1_clk */
0xBB, /* gcc_camss_vfe_ahb_clk */
0xBC, /* gcc_camss_vfe_axi_clk */
0x13A, /* gcc_crypto_ahb_clk */
0x139, /* gcc_crypto_axi_clk */
0x138, /* gcc_crypto_clk */
0x10, /* gcc_gp1_clk */
0x11, /* gcc_gp2_clk */
0x12, /* gcc_gp3_clk */
0x14B, /* gcc_im_sleep_clk */
0x162, /* gcc_lpass_mport_axi_clk */
0x160, /* gcc_lpass_q6_axi_clk */
0x163, /* gcc_lpass_sway_clk */
0x1F6, /* gcc_mdss_ahb_clk */
0x1F7, /* gcc_mdss_axi_clk */
0x1FC, /* gcc_mdss_byte0_clk */
0x1FD, /* gcc_mdss_esc0_clk */
0x1F9, /* gcc_mdss_mdp_clk */
0x1F8, /* gcc_mdss_pclk0_clk */
0x1FB, /* gcc_mdss_vsync_clk */
0x110, /* gcc_mpm_ahb_clk */
0x100, /* gcc_msg_ram_ahb_clk */
0x1EB, /* gcc_oxili_ahb_clk */
0xEE, /* gcc_oxili_aon_clk */
0x1EA, /* gcc_oxili_gfx3d_clk */
0xC9, /* gcc_pcnoc_mpu_cfg_ahb_clk */
0xD2, /* gcc_pdm2_clk */
0xD0, /* gcc_pdm_ahb_clk */
0xD1, /* gcc_pdm_xo4_clk */
0xD8, /* gcc_prng_ahb_clk */
0xC8, /* gcc_q6_mpu_cfg_ahb_clk */
0x38, /* gcc_rpm_cfg_xpu_clk */
0x69, /* gcc_sdcc1_ahb_clk */
0x68, /* gcc_sdcc1_apps_clk */
0x6A, /* gcc_sdcc1_ice_core_clk */
0x71, /* gcc_sdcc2_ahb_clk */
0x70, /* gcc_sdcc2_apps_clk */
0x120, /* gcc_sec_ctrl_acc_clk */
0x121, /* gcc_sec_ctrl_ahb_clk */
0x124, /* gcc_sec_ctrl_boot_rom_patch_clk */
0x122, /* gcc_sec_ctrl_clk */
0x123, /* gcc_sec_ctrl_sense_clk */
0xE8, /* gcc_tcsr_ahb_clk */
0x108, /* gcc_tlmm_ahb_clk */
0x109, /* gcc_tlmm_clk */
0x63, /* gcc_usb2a_phy_sleep_clk */
0x61, /* gcc_usb_hs_ahb_clk */
0x62, /* gcc_usb_hs_inactivity_timers_clk */
0x64, /* gcc_usb_hs_phy_cfg_ahb_clk */
0x60, /* gcc_usb_hs_system_clk */
0x1F3, /* gcc_venus0_ahb_clk */
0x1F2, /* gcc_venus0_axi_clk */
0x1B8, /* gcc_venus0_core0_vcodec0_clk */
0x1F1, /* gcc_venus0_vcodec0_clk */
0x149, /* gcc_xo_clk */
0x14A, /* gcc_xo_div4_clk */
0x52, /* gcc_gfx_tbu_clk */
0x53, /* gcc_gfx_tcu_clk */
0x58, /* gcc_gtcu_ahb_clk */
0x15A, /* gcc_bimc_clk */
0x5B, /* gcc_smmu_cfg_clk */
};
static struct clk_debug_mux gcc_debug_mux = {
.priv = &debug_mux_priv,
.en_mask = BIT(16),
.debug_offset = 0x74000,
.post_div_offset = 0x74000,
.cbcr_offset = 0x74000,
.src_sel_mask = 0x1FF,
.src_sel_shift = 0,
.post_div_mask = 0xF000,
.post_div_shift = 12,
.post_div_val = 1,
.mux_sels = gcc_debug_mux_sels,
.hw.init = &(struct clk_init_data){
.name = "gcc_debug_mux",
.ops = &clk_debug_mux_ops,
.parent_names = gcc_debug_mux_parent_names,
.num_parents = ARRAY_SIZE(gcc_debug_mux_parent_names),
.flags = CLK_IS_MEASURE,
},
};
static struct mux_regmap_names mux_list[] = {
{ .mux = &gcc_debug_mux, .regmap_name = "qcom,gcc" },
};
static const struct of_device_id clk_debug_match_table[] = {
{ .compatible = "qcom,sdm429w-debugcc" },
{ .compatible = "qcom,qm215-debugcc" },
{ }
};
static int clk_debug_sdm429w_probe(struct platform_device *pdev)
{
struct clk *clk;
int ret, i;
BUILD_BUG_ON(ARRAY_SIZE(gcc_debug_mux_parent_names) !=
ARRAY_SIZE(gcc_debug_mux_sels));
clk = devm_clk_get(&pdev->dev, "xo_clk_src");
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
dev_err(&pdev->dev, "Unable to get xo clock\n");
return PTR_ERR(clk);
}
debug_mux_priv.cxo = clk;
for (i = 0; i < ARRAY_SIZE(mux_list); i++) {
ret = map_debug_bases(pdev, mux_list[i].regmap_name,
mux_list[i].mux);
if (ret == -EBADR)
continue;
else if (ret)
return ret;
clk = devm_clk_register(&pdev->dev, &mux_list[i].mux->hw);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Unable to register %s, err:(%d)\n",
clk_hw_get_name(&mux_list[i].mux->hw),
PTR_ERR(clk));
return PTR_ERR(clk);
}
}
ret = clk_debug_measure_register(&gcc_debug_mux.hw);
if (ret) {
dev_err(&pdev->dev, "Could not register Measure clocks\n");
return ret;
}
dev_info(&pdev->dev, "Registered debug measure clocks\n");
return ret;
}
static struct platform_driver clk_debug_driver = {
.probe = clk_debug_sdm429w_probe,
.driver = {
.name = "sdm429w-debugcc",
.of_match_table = clk_debug_match_table,
},
};
static int __init clk_debug_sdm429w_init(void)
{
return platform_driver_register(&clk_debug_driver);
}
fs_initcall(clk_debug_sdm429w_init);
MODULE_DESCRIPTION("QTI DEBUG CC SDM429W Driver");
MODULE_LICENSE("GPL v2");

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*/
#ifndef __QCOM_GDSC_DEBUG_H__
#define __QCOM_GDSC_DEBUG_H__
void gdsc_debug_print_regs(struct regulator *regulator);
#endif /* __QCOM_GDSC_DEBUG_H__ */

View file

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
@ -22,6 +22,8 @@
#include <linux/clk/qcom.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
#include "../../regulator/internal.h"
#include "gdsc-debug.h"
/* GDSCR */
#define PWR_ON_MASK BIT(31)
@ -615,13 +617,38 @@ static struct regulator_ops gdsc_ops = {
.get_mode = gdsc_get_mode,
};
static const struct regmap_config gdsc_regmap_config = {
static struct regmap_config gdsc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x8,
.fast_io = true,
};
void gdsc_debug_print_regs(struct regulator *regulator)
{
struct gdsc *sc = rdev_get_drvdata(regulator->rdev);
uint32_t regvals[3] = {0};
int ret;
if (!sc) {
pr_err("Failed to get GDSC Handle\n");
return;
}
ret = regmap_bulk_read(sc->regmap, REG_OFFSET, regvals,
gdsc_regmap_config.max_register ? 3 : 1);
if (ret) {
pr_err("Failed to read %s registers\n", sc->rdesc.name);
return;
}
pr_info("Dumping %s Registers:\n", sc->rdesc.name);
pr_info("GDSCR: 0x%.8x CFG: 0x%.8x CFG2: 0x%.8x\n",
regvals[0], regvals[1], regvals[2]);
}
EXPORT_SYMBOL(gdsc_debug_print_regs);
static int gdsc_parse_dt_data(struct gdsc *sc, struct device *dev,
struct regulator_init_data **init_data)
{
@ -732,6 +759,9 @@ static int gdsc_get_resources(struct gdsc *sc, struct platform_device *pdev)
if (sc->gdscr == NULL)
return -ENOMEM;
if (of_property_read_bool(dev->of_node, "qcom,no-config-gdscr"))
gdsc_regmap_config.max_register = 0;
sc->regmap = devm_regmap_init_mmio(dev, sc->gdscr, &gdsc_regmap_config);
if (!sc->regmap) {
dev_err(dev, "Couldn't get regmap\n");

View file

@ -4,3 +4,7 @@ obj-$(CONFIG_MDSS_PLL) += mdss-pll.o
obj-$(CONFIG_MDSS_PLL) += mdss-dsi-pll-14nm.o
obj-$(CONFIG_MDSS_PLL) += mdss-dsi-pll-14nm-util.o
obj-$(CONFIG_MDSS_PLL) += mdss-dp-pll-14nm.o
obj-$(CONFIG_MDSS_PLL) += mdss-dsi-pll-28lpm.o
obj-$(CONFIG_MDSS_PLL) += mdss-dsi-pll-28nm-util.o
obj-$(CONFIG_MDSS_PLL) += mdss-dsi-pll-12nm.o
obj-$(CONFIG_MDSS_PLL) += mdss-dsi-pll-12nm-util.o

View file

@ -0,0 +1,979 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/delay.h>
#include "mdss-pll.h"
#include "mdss-dsi-pll.h"
#include "mdss-dsi-pll-12nm.h"
#define DSI_PLL_POLL_MAX_READS 15
#define DSI_PLL_POLL_TIMEOUT_US 1000
int pixel_div_set_div(void *context, unsigned int reg,
unsigned int div)
{
struct mdss_pll_resources *pll = context;
struct dsi_pll_db *pdb;
pdb = (struct dsi_pll_db *)pll->priv;
/* Programming during vco_prepare. Keep this value */
pdb->param.pixel_divhf = (div - 1);
pr_debug("ndx=%d div=%d divhf=%d\n",
pll->index, div, pdb->param.pixel_divhf);
return 0;
}
int pixel_div_get_div(void *context, unsigned int reg,
unsigned int *div)
{
int rc;
struct mdss_pll_resources *pll = context;
if (is_gdsc_disabled(pll))
return 0;
rc = mdss_pll_resource_enable(pll, true);
if (rc) {
pr_err("Failed to enable mdss dsi pll resources\n");
return rc;
}
*div = (MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SSC9) & 0x7F);
pr_debug("pixel_div = %d\n", (*div+1));
mdss_pll_resource_enable(pll, false);
return 0;
}
int set_post_div_mux_sel(void *context, unsigned int reg,
unsigned int sel)
{
struct mdss_pll_resources *pll = context;
struct dsi_pll_db *pdb;
pdb = (struct dsi_pll_db *)pll->priv;
/* Programming during vco_prepare. Keep this value */
pdb->param.post_div_mux = sel;
pr_debug("ndx=%d post_div_mux_sel=%d p_div=%d\n",
pll->index, sel, (u32) BIT(sel));
return 0;
}
int get_post_div_mux_sel(void *context, unsigned int reg,
unsigned int *sel)
{
u32 vco_cntrl = 0, cpbias_cntrl = 0;
int rc;
struct mdss_pll_resources *pll = context;
if (is_gdsc_disabled(pll))
return 0;
rc = mdss_pll_resource_enable(pll, true);
if (rc) {
pr_err("Failed to enable mdss dsi pll resources\n");
return rc;
}
vco_cntrl = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_PLL_VCO_CTRL);
vco_cntrl &= 0x30;
cpbias_cntrl = MDSS_PLL_REG_R(pll->pll_base,
DSIPHY_PLL_CHAR_PUMP_BIAS_CTRL);
cpbias_cntrl = ((cpbias_cntrl >> 6) & 0x1);
if (cpbias_cntrl == 0) {
if (vco_cntrl == 0x00)
*sel = 0;
else if (vco_cntrl == 0x10)
*sel = 2;
else if (vco_cntrl == 0x20)
*sel = 3;
else if (vco_cntrl == 0x30)
*sel = 4;
} else if (cpbias_cntrl == 1) {
if (vco_cntrl == 0x30)
*sel = 2;
else if (vco_cntrl == 0x00)
*sel = 5;
}
mdss_pll_resource_enable(pll, false);
return 0;
}
int set_gp_mux_sel(void *context, unsigned int reg,
unsigned int sel)
{
struct mdss_pll_resources *pll = context;
struct dsi_pll_db *pdb;
pdb = (struct dsi_pll_db *)pll->priv;
/* Programming during vco_prepare. Keep this value */
pdb->param.gp_div_mux = sel;
pr_debug("ndx=%d gp_div_mux_sel=%d gp_cntrl=%d\n",
pll->index, sel, (u32) BIT(sel));
return 0;
}
int get_gp_mux_sel(void *context, unsigned int reg,
unsigned int *sel)
{
int rc;
struct mdss_pll_resources *pll = context;
u32 reg_val;
if (is_gdsc_disabled(pll))
return 0;
rc = mdss_pll_resource_enable(pll, true);
if (rc) {
pr_err("Failed to enable mdss dsi pll resources\n");
return rc;
}
reg_val = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_PLL_CTRL);
*sel = (reg_val >> 5) & 0x7;
pr_debug("gp_cntrl = %d\n", *sel);
mdss_pll_resource_enable(pll, false);
return 0;
}
static bool pll_is_pll_locked_12nm(struct mdss_pll_resources *pll,
bool is_handoff)
{
u32 status;
bool pll_locked;
/* poll for PLL ready status */
if (readl_poll_timeout_atomic((pll->pll_base +
DSIPHY_STAT0),
status,
((status & BIT(1)) > 0),
DSI_PLL_POLL_MAX_READS,
DSI_PLL_POLL_TIMEOUT_US)) {
if (!is_handoff)
pr_err("DSI PLL ndx=%d status=%x failed to Lock\n",
pll->index, status);
pll_locked = false;
} else {
pll_locked = true;
}
return pll_locked;
}
int dsi_pll_enable_seq_12nm(struct mdss_pll_resources *pll)
{
int rc = 0;
struct dsi_pll_db *pdb;
void __iomem *pll_base;
if (!pll) {
pr_err("Invalid PLL resources\n");
return -EINVAL;
}
pdb = (struct dsi_pll_db *)pll->priv;
if (!pdb) {
pr_err("No priv found\n");
return -EINVAL;
}
pll_base = pll->pll_base;
MDSS_PLL_REG_W(pll_base, DSIPHY_SYS_CTRL, 0x49);
wmb(); /* make sure register committed before enabling branch clocks */
udelay(5); /* h/w recommended delay */
MDSS_PLL_REG_W(pll_base, DSIPHY_SYS_CTRL, 0xc9);
wmb(); /* make sure register committed before enabling branch clocks */
udelay(50); /* h/w recommended delay */
if (!pll_is_pll_locked_12nm(pll, false)) {
pr_err("DSI PLL ndx=%d lock failed!\n",
pll->index);
rc = -EINVAL;
goto init_lock_err;
}
pr_debug("DSI PLL ndx:%d Locked!\n", pll->index);
init_lock_err:
return rc;
}
static int dsi_pll_enable(struct clk_hw *hw)
{
int i, rc = 0;
struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
/* Try all enable sequences until one succeeds */
for (i = 0; i < vco->pll_en_seq_cnt; i++) {
rc = vco->pll_enable_seqs[i](pll);
pr_debug("DSI PLL %s after sequence #%d\n",
rc ? "unlocked" : "locked", i + 1);
if (!rc)
break;
}
if (rc)
pr_err("ndx=%d DSI PLL failed to lock\n", pll->index);
else
pll->pll_on = true;
return rc;
}
static int dsi_pll_relock(struct mdss_pll_resources *pll)
{
void __iomem *pll_base = pll->pll_base;
u32 data = 0;
int rc = 0;
data = MDSS_PLL_REG_R(pll_base, DSIPHY_PLL_POWERUP_CTRL);
data &= ~BIT(1); /* remove ONPLL_OVR_EN bit */
data |= 0x1; /* set ONPLL_OVN to 0x1 */
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_POWERUP_CTRL, data);
ndelay(500); /* h/w recommended delay */
MDSS_PLL_REG_W(pll_base, DSIPHY_SYS_CTRL, 0x49);
wmb(); /* make sure register committed before enabling branch clocks */
udelay(5); /* h/w recommended delay */
MDSS_PLL_REG_W(pll_base, DSIPHY_SYS_CTRL, 0xc9);
wmb(); /* make sure register committed before enabling branch clocks */
udelay(50); /* h/w recommended delay */
if (!pll_is_pll_locked_12nm(pll, false)) {
pr_err("DSI PLL ndx=%d lock failed!\n",
pll->index);
rc = -EINVAL;
goto relock_err;
}
ndelay(50); /* h/w recommended delay */
data = MDSS_PLL_REG_R(pll_base, DSIPHY_PLL_CTRL);
data |= 0x01; /* set CLK_SEL bits to 0x1 */
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_CTRL, data);
ndelay(500); /* h/w recommended delay */
wmb(); /* make sure register committed before enabling branch clocks */
pll->pll_on = true;
relock_err:
return rc;
}
static void dsi_pll_disable(struct clk_hw *hw)
{
struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
void __iomem *pll_base = pll->pll_base;
u32 data = 0;
if (!pll->pll_on &&
mdss_pll_resource_enable(pll, true)) {
pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
return;
}
data = MDSS_PLL_REG_R(pll_base, DSIPHY_SSC0);
data &= ~BIT(6); /* disable GP_CLK_EN */
MDSS_PLL_REG_W(pll_base, DSIPHY_SSC0, data);
ndelay(500); /* h/w recommended delay */
data = MDSS_PLL_REG_R(pll_base, DSIPHY_PLL_CTRL);
data &= ~0x03; /* remove CLK_SEL bits */
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_CTRL, data);
ndelay(500); /* h/w recommended delay */
data = MDSS_PLL_REG_R(pll_base, DSIPHY_PLL_POWERUP_CTRL);
data &= ~0x1; /* remove ONPLL_OVR bit */
data |= BIT(1); /* set ONPLL_OVR_EN to 0x1 */
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_POWERUP_CTRL, data);
ndelay(500); /* h/w recommended delay */
wmb(); /* make sure register committed before disabling branch clocks */
pll->handoff_resources = false;
mdss_pll_resource_enable(pll, false);
pll->pll_on = false;
pr_debug("DSI PLL ndx=%d Disabled\n", pll->index);
}
static u32 __mdss_dsi_get_hsfreqrange(u64 target_freq)
{
u64 bitclk_rate_mhz = div_u64((target_freq * 2), 1000000);
if (bitclk_rate_mhz >= 80 && bitclk_rate_mhz < 90)
return 0x00;
else if (bitclk_rate_mhz >= 90 && bitclk_rate_mhz < 100)
return 0x10;
else if (bitclk_rate_mhz >= 100 && bitclk_rate_mhz < 110)
return 0x20;
else if (bitclk_rate_mhz >= 110 && bitclk_rate_mhz < 120)
return 0x30;
else if (bitclk_rate_mhz >= 120 && bitclk_rate_mhz < 130)
return 0x01;
else if (bitclk_rate_mhz >= 130 && bitclk_rate_mhz < 140)
return 0x11;
else if (bitclk_rate_mhz >= 140 && bitclk_rate_mhz < 150)
return 0x21;
else if (bitclk_rate_mhz >= 150 && bitclk_rate_mhz < 160)
return 0x31;
else if (bitclk_rate_mhz >= 160 && bitclk_rate_mhz < 170)
return 0x02;
else if (bitclk_rate_mhz >= 170 && bitclk_rate_mhz < 180)
return 0x12;
else if (bitclk_rate_mhz >= 180 && bitclk_rate_mhz < 190)
return 0x22;
else if (bitclk_rate_mhz >= 190 && bitclk_rate_mhz < 205)
return 0x32;
else if (bitclk_rate_mhz >= 205 && bitclk_rate_mhz < 220)
return 0x03;
else if (bitclk_rate_mhz >= 220 && bitclk_rate_mhz < 235)
return 0x13;
else if (bitclk_rate_mhz >= 235 && bitclk_rate_mhz < 250)
return 0x23;
else if (bitclk_rate_mhz >= 250 && bitclk_rate_mhz < 275)
return 0x33;
else if (bitclk_rate_mhz >= 275 && bitclk_rate_mhz < 300)
return 0x04;
else if (bitclk_rate_mhz >= 300 && bitclk_rate_mhz < 325)
return 0x14;
else if (bitclk_rate_mhz >= 325 && bitclk_rate_mhz < 350)
return 0x25;
else if (bitclk_rate_mhz >= 350 && bitclk_rate_mhz < 400)
return 0x35;
else if (bitclk_rate_mhz >= 400 && bitclk_rate_mhz < 450)
return 0x05;
else if (bitclk_rate_mhz >= 450 && bitclk_rate_mhz < 500)
return 0x16;
else if (bitclk_rate_mhz >= 500 && bitclk_rate_mhz < 550)
return 0x26;
else if (bitclk_rate_mhz >= 550 && bitclk_rate_mhz < 600)
return 0x37;
else if (bitclk_rate_mhz >= 600 && bitclk_rate_mhz < 650)
return 0x07;
else if (bitclk_rate_mhz >= 650 && bitclk_rate_mhz < 700)
return 0x18;
else if (bitclk_rate_mhz >= 700 && bitclk_rate_mhz < 750)
return 0x28;
else if (bitclk_rate_mhz >= 750 && bitclk_rate_mhz < 800)
return 0x39;
else if (bitclk_rate_mhz >= 800 && bitclk_rate_mhz < 850)
return 0x09;
else if (bitclk_rate_mhz >= 850 && bitclk_rate_mhz < 900)
return 0x19;
else if (bitclk_rate_mhz >= 900 && bitclk_rate_mhz < 950)
return 0x29;
else if (bitclk_rate_mhz >= 950 && bitclk_rate_mhz < 1000)
return 0x3a;
else if (bitclk_rate_mhz >= 1000 && bitclk_rate_mhz < 1050)
return 0x0a;
else if (bitclk_rate_mhz >= 1050 && bitclk_rate_mhz < 1100)
return 0x1a;
else if (bitclk_rate_mhz >= 1100 && bitclk_rate_mhz < 1150)
return 0x2a;
else if (bitclk_rate_mhz >= 1150 && bitclk_rate_mhz < 1200)
return 0x3b;
else if (bitclk_rate_mhz >= 1200 && bitclk_rate_mhz < 1250)
return 0x0b;
else if (bitclk_rate_mhz >= 1250 && bitclk_rate_mhz < 1300)
return 0x1b;
else if (bitclk_rate_mhz >= 1300 && bitclk_rate_mhz < 1350)
return 0x2b;
else if (bitclk_rate_mhz >= 1350 && bitclk_rate_mhz < 1400)
return 0x3c;
else if (bitclk_rate_mhz >= 1400 && bitclk_rate_mhz < 1450)
return 0x0c;
else if (bitclk_rate_mhz >= 1450 && bitclk_rate_mhz < 1500)
return 0x1c;
else if (bitclk_rate_mhz >= 1500 && bitclk_rate_mhz < 1550)
return 0x2c;
else if (bitclk_rate_mhz >= 1550 && bitclk_rate_mhz < 1600)
return 0x3d;
else if (bitclk_rate_mhz >= 1600 && bitclk_rate_mhz < 1650)
return 0x0d;
else if (bitclk_rate_mhz >= 1650 && bitclk_rate_mhz < 1700)
return 0x1d;
else if (bitclk_rate_mhz >= 1700 && bitclk_rate_mhz < 1750)
return 0x2e;
else if (bitclk_rate_mhz >= 1750 && bitclk_rate_mhz < 1800)
return 0x3e;
else if (bitclk_rate_mhz >= 1800 && bitclk_rate_mhz < 1850)
return 0x0e;
else if (bitclk_rate_mhz >= 1850 && bitclk_rate_mhz < 1900)
return 0x1e;
else if (bitclk_rate_mhz >= 1900 && bitclk_rate_mhz < 1950)
return 0x2f;
else if (bitclk_rate_mhz >= 1950 && bitclk_rate_mhz < 2000)
return 0x3f;
else if (bitclk_rate_mhz >= 2000 && bitclk_rate_mhz < 2050)
return 0x0f;
else if (bitclk_rate_mhz >= 2050 && bitclk_rate_mhz < 2100)
return 0x40;
else if (bitclk_rate_mhz >= 2100 && bitclk_rate_mhz < 2150)
return 0x41;
else if (bitclk_rate_mhz >= 2150 && bitclk_rate_mhz < 2200)
return 0x42;
else if (bitclk_rate_mhz >= 2200 && bitclk_rate_mhz <= 2249)
return 0x43;
else if (bitclk_rate_mhz > 2249 && bitclk_rate_mhz < 2300)
return 0x44;
else if (bitclk_rate_mhz >= 2300 && bitclk_rate_mhz < 2350)
return 0x45;
else if (bitclk_rate_mhz >= 2350 && bitclk_rate_mhz < 2400)
return 0x46;
else if (bitclk_rate_mhz >= 2400 && bitclk_rate_mhz < 2450)
return 0x47;
else if (bitclk_rate_mhz >= 2450 && bitclk_rate_mhz < 2500)
return 0x48;
else
return 0x49;
}
static void __mdss_dsi_get_pll_vco_cntrl(u64 target_freq, u32 post_div_mux,
u32 *vco_cntrl, u32 *cpbias_cntrl)
{
u64 target_freq_mhz = div_u64(target_freq, 1000000);
u32 p_div = BIT(post_div_mux);
if (p_div == 1) {
*vco_cntrl = 0x00;
*cpbias_cntrl = 0;
} else if (p_div == 2) {
*vco_cntrl = 0x30;
*cpbias_cntrl = 1;
} else if (p_div == 4) {
*vco_cntrl = 0x10;
*cpbias_cntrl = 0;
} else if (p_div == 8) {
*vco_cntrl = 0x20;
*cpbias_cntrl = 0;
} else if (p_div == 16) {
*vco_cntrl = 0x30;
*cpbias_cntrl = 0;
} else {
*vco_cntrl = 0x00;
*cpbias_cntrl = 1;
}
if (target_freq_mhz <= 1250 && target_freq_mhz >= 1092)
*vco_cntrl = *vco_cntrl | 2;
else if (target_freq_mhz < 1092 && target_freq_mhz >= 950)
*vco_cntrl = *vco_cntrl | 3;
else if (target_freq_mhz < 950 && target_freq_mhz >= 712)
*vco_cntrl = *vco_cntrl | 1;
else if (target_freq_mhz < 712 && target_freq_mhz >= 546)
*vco_cntrl = *vco_cntrl | 2;
else if (target_freq_mhz < 546 && target_freq_mhz >= 475)
*vco_cntrl = *vco_cntrl | 3;
else if (target_freq_mhz < 475 && target_freq_mhz >= 356)
*vco_cntrl = *vco_cntrl | 1;
else if (target_freq_mhz < 356 && target_freq_mhz >= 273)
*vco_cntrl = *vco_cntrl | 2;
else if (target_freq_mhz < 273 && target_freq_mhz >= 237)
*vco_cntrl = *vco_cntrl | 3;
else if (target_freq_mhz < 237 && target_freq_mhz >= 178)
*vco_cntrl = *vco_cntrl | 1;
else if (target_freq_mhz < 178 && target_freq_mhz >= 136)
*vco_cntrl = *vco_cntrl | 2;
else if (target_freq_mhz < 136 && target_freq_mhz >= 118)
*vco_cntrl = *vco_cntrl | 3;
else if (target_freq_mhz < 118 && target_freq_mhz >= 89)
*vco_cntrl = *vco_cntrl | 1;
else if (target_freq_mhz < 89 && target_freq_mhz >= 68)
*vco_cntrl = *vco_cntrl | 2;
else if (target_freq_mhz < 68 && target_freq_mhz >= 57)
*vco_cntrl = *vco_cntrl | 3;
else if (target_freq_mhz < 57 && target_freq_mhz >= 44)
*vco_cntrl = *vco_cntrl | 1;
else
*vco_cntrl = *vco_cntrl | 2;
}
static u32 __mdss_dsi_get_osc_freq_target(u64 target_freq)
{
u64 target_freq_mhz = div_u64(target_freq, 1000000);
if (target_freq_mhz <= 1000)
return 1315;
else if (target_freq_mhz > 1000 && target_freq_mhz <= 1500)
return 1839;
else
return 0;
}
static u64 __mdss_dsi_pll_get_m_div(u64 vco_rate)
{
return div_u64((vco_rate * 4), 19200000);
}
static u32 __mdss_dsi_get_fsm_ovr_ctrl(u64 target_freq)
{
u64 bitclk_rate_mhz = div_u64((target_freq * 2), 1000000);
if (bitclk_rate_mhz > 1500 && bitclk_rate_mhz <= 2500)
return 0;
else
return BIT(6);
}
static void mdss_dsi_pll_12nm_calc_reg(struct mdss_pll_resources *pll,
struct dsi_pll_db *pdb)
{
struct dsi_pll_param *param = &pdb->param;
u64 target_freq = 0;
target_freq = div_u64(pll->vco_current_rate,
BIT(pdb->param.post_div_mux));
param->hsfreqrange = __mdss_dsi_get_hsfreqrange(target_freq);
__mdss_dsi_get_pll_vco_cntrl(target_freq, param->post_div_mux,
&param->vco_cntrl, &param->cpbias_cntrl);
param->osc_freq_target = __mdss_dsi_get_osc_freq_target(target_freq);
param->m_div = (u32) __mdss_dsi_pll_get_m_div(pll->vco_current_rate);
param->fsm_ovr_ctrl = __mdss_dsi_get_fsm_ovr_ctrl(target_freq);
param->prop_cntrl = 0x05;
param->int_cntrl = 0x00;
param->gmp_cntrl = 0x1;
}
static u32 __mdss_dsi_get_multi_intX100(u64 vco_rate, u32 *rem)
{
u32 reminder = 0;
u64 temp = 0;
const u32 ref_clk_rate = 19200000, quarterX100 = 25;
temp = div_u64_rem(vco_rate, ref_clk_rate, &reminder);
temp *= 100;
/*
* Multiplication integer needs to be floored in steps of 0.25
* Hence multi_intX100 needs to be rounded off in steps of 25
*/
if (reminder < (ref_clk_rate / 4)) {
*rem = reminder;
return temp;
} else if ((reminder >= (ref_clk_rate / 4)) &&
reminder < (ref_clk_rate / 2)) {
*rem = (reminder - (ref_clk_rate / 4));
return (temp + quarterX100);
} else if ((reminder >= (ref_clk_rate / 2)) &&
(reminder < ((3 * ref_clk_rate) / 4))) {
*rem = (reminder - (ref_clk_rate / 2));
return (temp + (quarterX100 * 2));
}
*rem = (reminder - ((3 * ref_clk_rate) / 4));
return (temp + (quarterX100 * 3));
}
static u32 __calc_gcd(u32 num1, u32 num2)
{
if (num2 != 0)
return __calc_gcd(num2, (num1 % num2));
else
return num1;
}
static void mdss_dsi_pll_12nm_calc_ssc(struct mdss_pll_resources *pll,
struct dsi_pll_db *pdb)
{
struct dsi_pll_param *param = &pdb->param;
u64 multi_intX100 = 0, temp = 0;
u32 temp_rem1 = 0, temp_rem2 = 0;
const u64 power_2_17 = 131072, power_2_10 = 1024;
const u32 ref_clk_rate = 19200000;
multi_intX100 = __mdss_dsi_get_multi_intX100(pll->vco_current_rate,
&temp_rem1);
/* Calculation for mpll_ssc_peak_i */
temp = (multi_intX100 * pll->ssc_ppm * power_2_17);
temp = div_u64(temp, 100); /* 100 div for multi_intX100 */
param->mpll_ssc_peak_i =
(u32) div_u64(temp, 1000000); /*10^6 for SSC PPM */
/* Calculation for mpll_stepsize_i */
param->mpll_stepsize_i = (u32) div_u64((param->mpll_ssc_peak_i *
pll->ssc_freq * power_2_10), ref_clk_rate);
/* Calculation for mpll_mint_i */
param->mpll_mint_i = (u32) (div_u64((multi_intX100 * 4), 100) - 32);
/* Calculation for mpll_frac_den */
param->mpll_frac_den = (u32) div_u64(ref_clk_rate,
__calc_gcd((u32)pll->vco_current_rate, ref_clk_rate));
/* Calculation for mpll_frac_quot_i */
temp = (temp_rem1 * power_2_17);
param->mpll_frac_quot_i =
(u32) div_u64_rem(temp, ref_clk_rate, &temp_rem2);
/* Calculation for mpll_frac_rem */
param->mpll_frac_rem = (u32) div_u64(((u64)temp_rem2 *
param->mpll_frac_den), ref_clk_rate);
pr_debug("mpll_ssc_peak_i=%d mpll_stepsize_i=%d mpll_mint_i=%d\n",
param->mpll_ssc_peak_i, param->mpll_stepsize_i,
param->mpll_mint_i);
pr_debug("mpll_frac_den=%d mpll_frac_quot_i=%d mpll_frac_rem=%d\n",
param->mpll_frac_den, param->mpll_frac_quot_i,
param->mpll_frac_rem);
}
static void pll_db_commit_12nm_ssc(struct mdss_pll_resources *pll,
struct dsi_pll_db *pdb)
{
void __iomem *pll_base = pll->pll_base;
struct dsi_pll_param *param = &pdb->param;
char data = 0;
MDSS_PLL_REG_W(pll_base, DSIPHY_SSC0, 0x27);
data = (param->mpll_mint_i & 0xff);
MDSS_PLL_REG_W(pll_base, DSIPHY_SSC7, data);
data = ((param->mpll_mint_i & 0xff00) >> 8);
MDSS_PLL_REG_W(pll_base, DSIPHY_SSC8, data);
data = (param->mpll_ssc_peak_i & 0xff);
MDSS_PLL_REG_W(pll_base, DSIPHY_SSC1, data);
data = ((param->mpll_ssc_peak_i & 0xff00) >> 8);
MDSS_PLL_REG_W(pll_base, DSIPHY_SSC2, data);
data = ((param->mpll_ssc_peak_i & 0xf0000) >> 16);
MDSS_PLL_REG_W(pll_base, DSIPHY_SSC3, data);
data = (param->mpll_stepsize_i & 0xff);
MDSS_PLL_REG_W(pll_base, DSIPHY_SSC4, data);
data = ((param->mpll_stepsize_i & 0xff00) >> 8);
MDSS_PLL_REG_W(pll_base, DSIPHY_SSC5, data);
data = ((param->mpll_stepsize_i & 0x1f0000) >> 16);
MDSS_PLL_REG_W(pll_base, DSIPHY_SSC6, data);
data = (param->mpll_frac_quot_i & 0xff);
MDSS_PLL_REG_W(pll_base, DSIPHY_SSC10, data);
data = ((param->mpll_frac_quot_i & 0xff00) >> 8);
MDSS_PLL_REG_W(pll_base, DSIPHY_SSC11, data);
data = (param->mpll_frac_rem & 0xff);
MDSS_PLL_REG_W(pll_base, DSIPHY_SSC12, data);
data = ((param->mpll_frac_rem & 0xff00) >> 8);
MDSS_PLL_REG_W(pll_base, DSIPHY_SSC13, data);
data = (param->mpll_frac_den & 0xff);
MDSS_PLL_REG_W(pll_base, DSIPHY_SSC14, data);
data = ((param->mpll_frac_den & 0xff00) >> 8);
MDSS_PLL_REG_W(pll_base, DSIPHY_SSC15, data);
}
static void pll_db_commit_12nm(struct mdss_pll_resources *pll,
struct dsi_pll_db *pdb)
{
void __iomem *pll_base = pll->pll_base;
struct dsi_pll_param *param = &pdb->param;
char data = 0;
MDSS_PLL_REG_W(pll_base, DSIPHY_CTRL0, 0x01);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_CTRL, 0x05);
MDSS_PLL_REG_W(pll_base, DSIPHY_SLEWRATE_DDL_LOOP_CTRL, 0x01);
data = ((param->hsfreqrange & 0x7f) | BIT(7));
MDSS_PLL_REG_W(pll_base, DSIPHY_HS_FREQ_RAN_SEL, data);
data = ((param->vco_cntrl & 0x3f) | BIT(6));
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_CTRL, data);
data = (param->osc_freq_target & 0x7f);
MDSS_PLL_REG_W(pll_base, DSIPHY_SLEWRATE_DDL_CYC_FRQ_ADJ_0, data);
data = ((param->osc_freq_target & 0xf80) >> 7);
MDSS_PLL_REG_W(pll_base, DSIPHY_SLEWRATE_DDL_CYC_FRQ_ADJ_1, data);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_INPUT_LOOP_DIV_RAT_CTRL, 0x30);
data = (param->m_div & 0x3f);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_LOOP_DIV_RATIO_0, data);
data = ((param->m_div & 0xfc0) >> 6);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_LOOP_DIV_RATIO_1, data);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_INPUT_DIV_PLL_OVR, 0x60);
data = (param->prop_cntrl & 0x3f);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PROP_CHRG_PUMP_CTRL, data);
data = (param->int_cntrl & 0x3f);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_INTEG_CHRG_PUMP_CTRL, data);
data = ((param->gmp_cntrl & 0x3) << 4);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_GMP_CTRL_DIG_TST, data);
data = ((param->cpbias_cntrl & 0x1) << 6) | BIT(4);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_CHAR_PUMP_BIAS_CTRL, data);
data = ((param->gp_div_mux & 0x7) << 5) | 0x5;
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_CTRL, data);
data = (param->pixel_divhf & 0x7f);
MDSS_PLL_REG_W(pll_base, DSIPHY_SSC9, data);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_ANA_PROG_CTRL, 0x03);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_ANA_TST_LOCK_ST_OVR_CTRL, 0x50);
MDSS_PLL_REG_W(pll_base,
DSIPHY_SLEWRATE_FSM_OVR_CTRL, param->fsm_ovr_ctrl);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PHA_ERR_CTRL_0, 0x01);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PHA_ERR_CTRL_1, 0x00);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_LOCK_FILTER, 0xff);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_UNLOCK_FILTER, 0x03);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PRO_DLY_RELOCK, 0x0c);
MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_LOCK_DET_MODE_SEL, 0x02);
if (pll->ssc_en)
pll_db_commit_12nm_ssc(pll, pdb);
pr_debug("pll:%d\n", pll->index);
wmb(); /* make sure register committed before preparing the clocks */
}
int pll_vco_set_rate_12nm(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
int rc = 0;
struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
struct dsi_pll_db *pdb;
pdb = (struct dsi_pll_db *)pll->priv;
if (!pdb) {
pr_err("pll pdb not found\n");
rc = -EINVAL;
goto error;
}
pr_debug("%s: ndx=%d rate=%lu\n", __func__, pll->index, rate);
pll->vco_current_rate = rate;
pll->vco_ref_clk_rate = vco->ref_clk_rate;
error:
return rc;
}
static unsigned long pll_vco_get_rate_12nm(struct clk_hw *hw)
{
u64 vco_rate = 0;
u32 m_div_5_0 = 0, m_div_11_6 = 0, m_div = 0;
struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
u64 ref_clk = vco->ref_clk_rate;
int rc;
struct mdss_pll_resources *pll = vco->priv;
if (is_gdsc_disabled(pll))
return 0;
rc = mdss_pll_resource_enable(pll, true);
if (rc) {
pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
return rc;
}
m_div_5_0 = MDSS_PLL_REG_R(pll->pll_base,
DSIPHY_PLL_LOOP_DIV_RATIO_0);
m_div_5_0 &= 0x3f;
pr_debug("m_div_5_0 = 0x%x\n", m_div_5_0);
m_div_11_6 = MDSS_PLL_REG_R(pll->pll_base,
DSIPHY_PLL_LOOP_DIV_RATIO_1);
m_div_11_6 &= 0x3f;
pr_debug("m_div_11_6 = 0x%x\n", m_div_11_6);
m_div = ((m_div_11_6 << 6) | (m_div_5_0));
vco_rate = div_u64((ref_clk * m_div), 4);
pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
mdss_pll_resource_enable(pll, false);
return (unsigned long)vco_rate;
}
long pll_vco_round_rate_12nm(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
unsigned long rrate = rate;
struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
if (rate < vco->min_rate)
rrate = vco->min_rate;
if (rate > vco->max_rate)
rrate = vco->max_rate;
*parent_rate = rrate;
return rrate;
}
unsigned long vco_12nm_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
unsigned long rate = 0;
int rc;
if (!pll && is_gdsc_disabled(pll)) {
pr_err("gdsc disabled\n");
return 0;
}
rc = mdss_pll_resource_enable(pll, true);
if (rc) {
pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
return 0;
}
if (pll_is_pll_locked_12nm(pll, true)) {
pll->handoff_resources = true;
pll->pll_on = true;
rate = pll_vco_get_rate_12nm(hw);
} else {
mdss_pll_resource_enable(pll, false);
}
return rate;
}
int pll_vco_prepare_12nm(struct clk_hw *hw)
{
int rc = 0;
struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
struct dsi_pll_db *pdb;
u32 data = 0;
if (!pll) {
pr_err("Dsi pll resources are not available\n");
return -EINVAL;
}
pdb = (struct dsi_pll_db *)pll->priv;
if (!pdb) {
pr_err("No prov found\n");
return -EINVAL;
}
rc = mdss_pll_resource_enable(pll, true);
if (rc) {
pr_err("ndx=%d Failed to enable mdss dsi pll resources\n",
pll->index);
return rc;
}
if ((pll->vco_cached_rate != 0)
&& (pll->vco_cached_rate == clk_hw_get_rate(hw))) {
rc = hw->init->ops->set_rate(hw, pll->vco_cached_rate,
pll->vco_cached_rate);
if (rc) {
pr_err("index=%d vco_set_rate failed. rc=%d\n",
pll->index, rc);
goto error;
}
}
/*
* For cases where DSI PHY is already enabled like:
* 1.) LP-11 during static screen
* 2.) ULPS during static screen
* 3.) Boot up with cont splash enabled where PHY is programmed in LK
* Execute the Re-lock sequence to enable the DSI PLL.
*/
data = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SYS_CTRL);
if (data & BIT(7)) {
rc = dsi_pll_relock(pll);
if (rc)
goto error;
else
goto end;
}
mdss_dsi_pll_12nm_calc_reg(pll, pdb);
if (pll->ssc_en)
mdss_dsi_pll_12nm_calc_ssc(pll, pdb);
/* commit DSI vco */
pll_db_commit_12nm(pll, pdb);
rc = dsi_pll_enable(hw);
error:
if (rc) {
mdss_pll_resource_enable(pll, false);
pr_err("ndx=%d failed to enable dsi pll\n", pll->index);
}
end:
return rc;
}
void pll_vco_unprepare_12nm(struct clk_hw *hw)
{
struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
if (!pll) {
pr_err("Dsi pll resources are not available\n");
return;
}
pll->vco_cached_rate = clk_hw_get_rate(hw);
dsi_pll_disable(hw);
}
int pll_vco_enable_12nm(struct clk_hw *hw)
{
struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
struct mdss_pll_resources *pll = vco->priv;
u32 data = 0;
if (!pll) {
pr_err("Dsi pll resources are not available\n");
return -EINVAL;
}
if (!pll->pll_on) {
pr_err("DSI PLL not enabled, return\n");
return -EINVAL;
}
data = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SSC0);
data |= BIT(6); /* enable GP_CLK_EN */
MDSS_PLL_REG_W(pll->pll_base, DSIPHY_SSC0, data);
wmb(); /* make sure register committed before enabling branch clocks */
return 0;
}

View file

@ -0,0 +1,709 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/delay.h>
#include "mdss-dsi-pll.h"
#include "mdss-pll.h"
#include <dt-bindings/clock/mdss-12nm-pll-clk.h>
#include "mdss-dsi-pll-12nm.h"
#define VCO_DELAY_USEC 1
static struct regmap_config dsi_pll_12nm_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x800,
};
static const struct clk_ops clk_ops_vco_12nm = {
.recalc_rate = vco_12nm_recalc_rate,
.set_rate = pll_vco_set_rate_12nm,
.round_rate = pll_vco_round_rate_12nm,
.prepare = pll_vco_prepare_12nm,
.unprepare = pll_vco_unprepare_12nm,
};
static struct regmap_bus pclk_div_regmap_bus = {
.reg_write = pixel_div_set_div,
.reg_read = pixel_div_get_div,
};
static struct regmap_bus post_div_mux_regmap_bus = {
.reg_write = set_post_div_mux_sel,
.reg_read = get_post_div_mux_sel,
};
static struct regmap_bus gp_div_mux_regmap_bus = {
.reg_write = set_gp_mux_sel,
.reg_read = get_gp_mux_sel,
};
/*
* Clock tree model for generating DSI byte clock and pclk for 12nm DSI PLL
*
*
* +---------------+
* +----------| vco_clk |----------+
* | +---------------+ |
* | |
* | |
* | |
* +---------+---------+----+----+---------+---------+ |
* | | | | | | |
* | | | | | | |
* | | | | | | |
* +---v---+ +---v---+ +---v---+ +---v---+ +---v---+ +---v---+ |
* | DIV(1)| | DIV(2)| | DIV(4)| | DIV(8)| |DIV(16)| |DIV(32)| |
* +---+---+ +---+---+ +---+---+ +---+---+ +---+---+ +---+---+ |
* | | | | | | |
* | | +---+ +---+ | | |
* | +-----------+ | | +-----------+ | |
* +-------------------+ | | | | +-------------------+ |
* | | | | | | |
* +--v-v-v-v-v-v---+ |
* \ post_div_mux / |
* \ / |
* +-----+----+ +---------------------+
* | |
* +------------------------+ |
* | |
* +----v----+ +---------+---------+----+----+---------+---------+
* | DIV-4 | | | | | | |
* +----+----+ | | | | | |
* | +---v---+ +---v---+ +---v---+ +---v---+ +---v---+ +---v---+
* | | DIV(1)| | DIV(2)| | DIV(4)| | DIV(8)| |DIV(16)| |DIV(32)|
* | +---+---+ +---+---+ +---+---+ +---+---+ +---+---+ +---+---+
* | | | | | | |
* v | | +---+ +---+ | |
* byte_clk_src | +-----------+ | | +-----------+ |
* +-------------------+ | | | | +-------------------+
* | | | | | |
* +--v-v-v-v-v-v---+
* \ gp_cntrl_mux /
* \ /
* +-----+----+
* |
* |
* +-------v-------+
* | (DIV + 1) |
* | DIV = 0...127 |
* +-------+-------+
* |
* |
* v
* dsi_pclk input to Clock Controller MND
*/
static struct dsi_pll_db pll_db[DSI_PLL_MAX];
static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
.ref_clk_rate = 19200000UL,
.min_rate = 1000000000UL,
.max_rate = 2000000000UL,
.pll_en_seq_cnt = 1,
.pll_enable_seqs[0] = dsi_pll_enable_seq_12nm,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_vco_clk",
.parent_names = (const char *[]){"bi_tcxo"},
.num_parents = 1,
.ops = &clk_ops_vco_12nm,
.flags = CLK_GET_RATE_NOCACHE,
},
};
static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
.ref_clk_rate = 19200000UL,
.min_rate = 1000000000UL,
.max_rate = 2000000000UL,
.pll_en_seq_cnt = 1,
.pll_enable_seqs[0] = dsi_pll_enable_seq_12nm,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_vco_clk",
.parent_names = (const char *[]){"bi_tcxo"},
.num_parents = 1,
.ops = &clk_ops_vco_12nm,
.flags = CLK_GET_RATE_NOCACHE,
},
};
static struct clk_fixed_factor dsi0pll_post_div1 = {
.div = 1,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_post_div1",
.parent_names = (const char *[]){"dsi0pll_vco_clk"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi0pll_post_div2 = {
.div = 2,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_post_div2",
.parent_names = (const char *[]){"dsi0pll_vco_clk"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi0pll_post_div4 = {
.div = 4,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_post_div4",
.parent_names = (const char *[]){"dsi0pll_vco_clk"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi0pll_post_div8 = {
.div = 8,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_post_div8",
.parent_names = (const char *[]){"dsi0pll_vco_clk"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi0pll_post_div16 = {
.div = 16,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_post_div16",
.parent_names = (const char *[]){"dsi0pll_vco_clk"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi0pll_post_div32 = {
.div = 32,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_post_div32",
.parent_names = (const char *[]){"dsi0pll_vco_clk"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_regmap_mux dsi0pll_post_div_mux = {
.reg = DSIPHY_PLL_VCO_CTRL,
.shift = 4,
.width = 2,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_post_div_mux",
.parent_names = (const char *[]){"dsi0pll_post_div1",
"dsi0pll_post_div2",
"dsi0pll_post_div4",
"dsi0pll_post_div8",
"dsi0pll_post_div16",
"dsi0pll_post_div32"},
.num_parents = 6,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_mux_closest_ops,
},
},
};
static struct clk_fixed_factor dsi1pll_post_div1 = {
.div = 1,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_post_div1",
.parent_names = (const char *[]){"dsi1pll_vco_clk"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi1pll_post_div2 = {
.div = 2,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_post_div2",
.parent_names = (const char *[]){"dsi1pll_vco_clk"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi1pll_post_div4 = {
.div = 4,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_post_div4",
.parent_names = (const char *[]){"dsi1pll_vco_clk"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi1pll_post_div8 = {
.div = 8,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_post_div8",
.parent_names = (const char *[]){"dsi1pll_vco_clk"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi1pll_post_div16 = {
.div = 16,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_post_div16",
.parent_names = (const char *[]){"dsi1pll_vco_clk"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi1pll_post_div32 = {
.div = 32,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_post_div32",
.parent_names = (const char *[]){"dsi1pll_vco_clk"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_regmap_mux dsi1pll_post_div_mux = {
.reg = DSIPHY_PLL_VCO_CTRL,
.shift = 4,
.width = 2,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_post_div_mux",
.parent_names = (const char *[]){"dsi1pll_post_div1",
"dsi1pll_post_div2",
"dsi1pll_post_div4",
"dsi1pll_post_div8",
"dsi1pll_post_div16",
"dsi1pll_post_div32"},
.num_parents = 6,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_mux_closest_ops,
},
},
};
static struct clk_fixed_factor dsi0pll_gp_div1 = {
.div = 1,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_gp_div1",
.parent_names = (const char *[]){"dsi0pll_vco_clk"},
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi0pll_gp_div2 = {
.div = 2,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_gp_div2",
.parent_names = (const char *[]){"dsi0pll_vco_clk"},
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi0pll_gp_div4 = {
.div = 4,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_gp_div4",
.parent_names = (const char *[]){"dsi0pll_vco_clk"},
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi0pll_gp_div8 = {
.div = 8,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_gp_div8",
.parent_names = (const char *[]){"dsi0pll_vco_clk"},
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi0pll_gp_div16 = {
.div = 16,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_gp_div16",
.parent_names = (const char *[]){"dsi0pll_vco_clk"},
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi0pll_gp_div32 = {
.div = 32,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_gp_div32",
.parent_names = (const char *[]){"dsi0pll_vco_clk"},
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_regmap_mux dsi0pll_gp_div_mux = {
.reg = DSIPHY_PLL_CTRL,
.shift = 5,
.width = 3,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_gp_div_mux",
.parent_names = (const char *[]){"dsi0pll_gp_div1",
"dsi0pll_gp_div2",
"dsi0pll_gp_div4",
"dsi0pll_gp_div8",
"dsi0pll_gp_div16",
"dsi0pll_gp_div32"},
.num_parents = 6,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_mux_closest_ops,
},
},
};
static struct clk_fixed_factor dsi1pll_gp_div1 = {
.div = 1,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_gp_div1",
.parent_names = (const char *[]){"dsi1pll_vco_clk"},
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi1pll_gp_div2 = {
.div = 2,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_gp_div2",
.parent_names = (const char *[]){"dsi1pll_vco_clk"},
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi1pll_gp_div4 = {
.div = 4,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_gp_div4",
.parent_names = (const char *[]){"dsi1pll_vco_clk"},
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi1pll_gp_div8 = {
.div = 8,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_gp_div8",
.parent_names = (const char *[]){"dsi1pll_vco_clk"},
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi1pll_gp_div16 = {
.div = 16,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_gp_div16",
.parent_names = (const char *[]){"dsi1pll_vco_clk"},
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi1pll_gp_div32 = {
.div = 32,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_gp_div32",
.parent_names = (const char *[]){"dsi1pll_vco_clk"},
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_regmap_mux dsi1pll_gp_div_mux = {
.reg = DSIPHY_PLL_CTRL,
.shift = 5,
.width = 3,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_gp_div_mux",
.parent_names = (const char *[]){"dsi1pll_gp_div1",
"dsi1pll_gp_div2",
"dsi1pll_gp_div4",
"dsi1pll_gp_div8",
"dsi1pll_gp_div16",
"dsi1pll_gp_div32"},
.num_parents = 6,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_mux_closest_ops,
},
},
};
static struct clk_regmap_div dsi0pll_pclk_src = {
.reg = DSIPHY_SSC9,
.shift = 0,
.width = 6,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_pclk_src",
.parent_names = (const char *[]){
"dsi0pll_gp_div_mux"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_div_ops,
},
},
};
static struct clk_regmap_div dsi1pll_pclk_src = {
.reg = DSIPHY_SSC9,
.shift = 0,
.width = 6,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_pclk_src",
.parent_names = (const char *[]){
"dsi1pll_gp_div_mux"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_div_ops,
},
},
};
static struct clk_fixed_factor dsi0pll_byte_clk_src = {
.div = 4,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_byte_clk_src",
.parent_names = (const char *[]){"dsi0pll_post_div_mux"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi1pll_byte_clk_src = {
.div = 4,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_byte_clk_src",
.parent_names = (const char *[]){"dsi1pll_post_div_mux"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_hw *mdss_dsi_pllcc_12nm[] = {
[VCO_CLK_0] = &dsi0pll_vco_clk.hw,
[POST_DIV1_0_CLK] = &dsi0pll_post_div1.hw,
[POST_DIV2_0_CLK] = &dsi0pll_post_div2.hw,
[POST_DIV4_0_CLK] = &dsi0pll_post_div4.hw,
[POST_DIV8_0_CLK] = &dsi0pll_post_div8.hw,
[POST_DIV16_0_CLK] = &dsi0pll_post_div16.hw,
[POST_DIV32_0_CLK] = &dsi0pll_post_div32.hw,
[POST_DIV_MUX_0_CLK] = &dsi0pll_post_div_mux.clkr.hw,
[GP_DIV1_0_CLK] = &dsi0pll_gp_div1.hw,
[GP_DIV2_0_CLK] = &dsi0pll_gp_div2.hw,
[GP_DIV4_0_CLK] = &dsi0pll_gp_div4.hw,
[GP_DIV8_0_CLK] = &dsi0pll_gp_div8.hw,
[GP_DIV16_0_CLK] = &dsi0pll_gp_div16.hw,
[GP_DIV32_0_CLK] = &dsi0pll_gp_div32.hw,
[GP_DIV_MUX_0_CLK] = &dsi0pll_gp_div_mux.clkr.hw,
[PCLK_SRC_MUX_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
[BYTE_CLK_SRC_0_CLK] = &dsi0pll_byte_clk_src.hw,
[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
[POST_DIV1_1_CLK] = &dsi1pll_post_div1.hw,
[POST_DIV2_1_CLK] = &dsi1pll_post_div2.hw,
[POST_DIV4_1_CLK] = &dsi1pll_post_div4.hw,
[POST_DIV8_1_CLK] = &dsi1pll_post_div8.hw,
[POST_DIV16_1_CLK] = &dsi1pll_post_div16.hw,
[POST_DIV32_1_CLK] = &dsi1pll_post_div32.hw,
[POST_DIV_MUX_1_CLK] = &dsi1pll_post_div_mux.clkr.hw,
[GP_DIV1_1_CLK] = &dsi1pll_gp_div1.hw,
[GP_DIV2_1_CLK] = &dsi1pll_gp_div2.hw,
[GP_DIV4_1_CLK] = &dsi1pll_gp_div4.hw,
[GP_DIV8_1_CLK] = &dsi1pll_gp_div8.hw,
[GP_DIV16_1_CLK] = &dsi1pll_gp_div16.hw,
[GP_DIV32_1_CLK] = &dsi1pll_gp_div32.hw,
[GP_DIV_MUX_1_CLK] = &dsi1pll_gp_div_mux.clkr.hw,
[PCLK_SRC_MUX_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
[BYTE_CLK_SRC_1_CLK] = &dsi1pll_byte_clk_src.hw,
};
int dsi_pll_clock_register_12nm(struct platform_device *pdev,
struct mdss_pll_resources *pll_res)
{
int rc = 0, ndx, i;
struct clk *clk;
struct clk_onecell_data *clk_data;
int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_12nm);
struct regmap *rmap;
struct dsi_pll_db *pdb;
if (!pdev || !pdev->dev.of_node ||
!pll_res || !pll_res->pll_base || !pll_res->phy_base) {
pr_err("Invalid params\n");
return -EINVAL;
}
ndx = pll_res->index;
if (ndx >= DSI_PLL_MAX) {
pr_err("pll index(%d) NOT supported\n", ndx);
return -EINVAL;
}
pdb = &pll_db[ndx];
pll_res->priv = pdb;
pll_res->vco_delay = VCO_DELAY_USEC;
pdb->pll = pll_res;
ndx++;
ndx %= DSI_PLL_MAX;
pdb->next = &pll_db[ndx];
clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
sizeof(struct clk *)), GFP_KERNEL);
if (!clk_data->clks)
return -ENOMEM;
clk_data->clk_num = num_clks;
/* Establish client data */
if (ndx == 0) {
rmap = devm_regmap_init(&pdev->dev, &post_div_mux_regmap_bus,
pll_res, &dsi_pll_12nm_config);
dsi0pll_post_div_mux.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &gp_div_mux_regmap_bus,
pll_res, &dsi_pll_12nm_config);
dsi0pll_gp_div_mux.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &pclk_div_regmap_bus,
pll_res, &dsi_pll_12nm_config);
dsi0pll_pclk_src.clkr.regmap = rmap;
dsi0pll_vco_clk.priv = pll_res;
for (i = VCO_CLK_0; i <= BYTE_CLK_SRC_0_CLK; i++) {
clk = devm_clk_register(&pdev->dev,
mdss_dsi_pllcc_12nm[i]);
if (IS_ERR(clk)) {
pr_err("clk registration failed for DSI clock:%d\n",
pll_res->index);
rc = -EINVAL;
goto clk_register_fail;
}
clk_data->clks[i] = clk;
}
rc = of_clk_add_provider(pdev->dev.of_node,
of_clk_src_onecell_get, clk_data);
} else {
rmap = devm_regmap_init(&pdev->dev, &post_div_mux_regmap_bus,
pll_res, &dsi_pll_12nm_config);
dsi1pll_post_div_mux.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &gp_div_mux_regmap_bus,
pll_res, &dsi_pll_12nm_config);
dsi1pll_gp_div_mux.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &pclk_div_regmap_bus,
pll_res, &dsi_pll_12nm_config);
dsi1pll_pclk_src.clkr.regmap = rmap;
dsi1pll_vco_clk.priv = pll_res;
for (i = VCO_CLK_1; i <= BYTE_CLK_SRC_1_CLK; i++) {
clk = devm_clk_register(&pdev->dev,
mdss_dsi_pllcc_12nm[i]);
if (IS_ERR(clk)) {
pr_err("clk registration failed for DSI clock:%d\n",
pll_res->index);
rc = -EINVAL;
goto clk_register_fail;
}
clk_data->clks[i] = clk;
}
rc = of_clk_add_provider(pdev->dev.of_node,
of_clk_src_onecell_get, clk_data);
}
if (!rc) {
pr_info("Registered DSI PLL ndx=%d,clocks successfully\n", ndx);
return rc;
}
clk_register_fail:
return rc;
}

View file

@ -0,0 +1,113 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */
#ifndef MDSS_DSI_PLL_12NM_H
#define MDSS_DSI_PLL_12NM_H
#define DSIPHY_PLL_POWERUP_CTRL 0x034
#define DSIPHY_PLL_PROP_CHRG_PUMP_CTRL 0x038
#define DSIPHY_PLL_INTEG_CHRG_PUMP_CTRL 0x03c
#define DSIPHY_PLL_ANA_TST_LOCK_ST_OVR_CTRL 0x044
#define DSIPHY_PLL_VCO_CTRL 0x048
#define DSIPHY_PLL_GMP_CTRL_DIG_TST 0x04c
#define DSIPHY_PLL_PHA_ERR_CTRL_0 0x050
#define DSIPHY_PLL_LOCK_FILTER 0x054
#define DSIPHY_PLL_UNLOCK_FILTER 0x058
#define DSIPHY_PLL_INPUT_DIV_PLL_OVR 0x05c
#define DSIPHY_PLL_LOOP_DIV_RATIO_0 0x060
#define DSIPHY_PLL_INPUT_LOOP_DIV_RAT_CTRL 0x064
#define DSIPHY_PLL_PRO_DLY_RELOCK 0x06c
#define DSIPHY_PLL_CHAR_PUMP_BIAS_CTRL 0x070
#define DSIPHY_PLL_LOCK_DET_MODE_SEL 0x074
#define DSIPHY_PLL_ANA_PROG_CTRL 0x07c
#define DSIPHY_HS_FREQ_RAN_SEL 0x110
#define DSIPHY_SLEWRATE_FSM_OVR_CTRL 0x280
#define DSIPHY_SLEWRATE_DDL_LOOP_CTRL 0x28c
#define DSIPHY_SLEWRATE_DDL_CYC_FRQ_ADJ_0 0x290
#define DSIPHY_PLL_PHA_ERR_CTRL_1 0x2e4
#define DSIPHY_PLL_LOOP_DIV_RATIO_1 0x2e8
#define DSIPHY_SLEWRATE_DDL_CYC_FRQ_ADJ_1 0x328
#define DSIPHY_SSC0 0x394
#define DSIPHY_SSC7 0x3b0
#define DSIPHY_SSC8 0x3b4
#define DSIPHY_SSC1 0x398
#define DSIPHY_SSC2 0x39c
#define DSIPHY_SSC3 0x3a0
#define DSIPHY_SSC4 0x3a4
#define DSIPHY_SSC5 0x3a8
#define DSIPHY_SSC6 0x3ac
#define DSIPHY_SSC10 0x360
#define DSIPHY_SSC11 0x364
#define DSIPHY_SSC12 0x368
#define DSIPHY_SSC13 0x36c
#define DSIPHY_SSC14 0x370
#define DSIPHY_SSC15 0x374
#define DSIPHY_SSC7 0x3b0
#define DSIPHY_SSC8 0x3b4
#define DSIPHY_SSC9 0x3b8
#define DSIPHY_STAT0 0x3e0
#define DSIPHY_CTRL0 0x3e8
#define DSIPHY_SYS_CTRL 0x3f0
#define DSIPHY_PLL_CTRL 0x3f8
struct dsi_pll_param {
u32 hsfreqrange;
u32 vco_cntrl;
u32 osc_freq_target;
u32 m_div;
u32 prop_cntrl;
u32 int_cntrl;
u32 gmp_cntrl;
u32 cpbias_cntrl;
/* mux and dividers */
u32 gp_div_mux;
u32 post_div_mux;
u32 pixel_divhf;
u32 fsm_ovr_ctrl;
/* ssc_params */
u32 mpll_ssc_peak_i;
u32 mpll_stepsize_i;
u32 mpll_mint_i;
u32 mpll_frac_den;
u32 mpll_frac_quot_i;
u32 mpll_frac_rem;
};
enum {
DSI_PLL_0,
DSI_PLL_1,
DSI_PLL_MAX
};
struct dsi_pll_db {
struct dsi_pll_db *next;
struct mdss_pll_resources *pll;
struct dsi_pll_param param;
};
int pll_vco_set_rate_12nm(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
long pll_vco_round_rate_12nm(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate);
unsigned long vco_12nm_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate);
int pll_vco_prepare_12nm(struct clk_hw *hw);
void pll_vco_unprepare_12nm(struct clk_hw *hw);
int pll_vco_enable_12nm(struct clk_hw *hw);
int pixel_div_set_div(void *context, unsigned int reg,
unsigned int div);
int pixel_div_get_div(void *context, unsigned int reg,
unsigned int *div);
int set_post_div_mux_sel(void *context, unsigned int reg,
unsigned int sel);
int get_post_div_mux_sel(void *context, unsigned int reg,
unsigned int *sel);
int set_gp_mux_sel(void *context, unsigned int reg,
unsigned int sel);
int get_gp_mux_sel(void *context, unsigned int reg,
unsigned int *sel);
int dsi_pll_enable_seq_12nm(struct mdss_pll_resources *pll);
#endif /* MDSS_DSI_PLL_12NM_H */

View file

@ -0,0 +1,557 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2012-2018, 2021, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <dt-bindings/clock/mdss-28nm-pll-clk.h>
#include "mdss-pll.h"
#include "mdss-dsi-pll.h"
#include "mdss-dsi-pll-28nm.h"
#define VCO_DELAY_USEC 1000
enum {
DSI_PLL_0,
DSI_PLL_1,
DSI_PLL_MAX
};
static struct lpfr_cfg lpfr_lut_struct[] = {
{479500000, 8},
{480000000, 11},
{575500000, 8},
{576000000, 12},
{610500000, 8},
{659500000, 9},
{671500000, 10},
{672000000, 14},
{708500000, 10},
{750000000, 11},
};
static void dsi_pll_sw_reset(struct mdss_pll_resources *rsc)
{
/*
* DSI PLL software reset. Add HW recommended delays after toggling
* the software reset bit off and back on.
*/
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x01);
ndelay(500);
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x00);
}
static void dsi_pll_toggle_lock_detect(
struct mdss_pll_resources *rsc)
{
/* DSI PLL toggle lock detect setting */
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x04);
ndelay(500);
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x05);
udelay(512);
}
static int dsi_pll_check_lock_status(
struct mdss_pll_resources *rsc)
{
int rc = 0;
rc = dsi_pll_lock_status(rsc);
if (rc)
pr_debug("PLL Locked\n");
else
pr_err("PLL failed to lock\n");
return rc;
}
static int dsi_pll_enable_seq_gf2(struct mdss_pll_resources *rsc)
{
int pll_locked = 0;
dsi_pll_sw_reset(rsc);
/*
* GF PART 2 PLL power up sequence.
* Add necessary delays recommended by hardware.
*/
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1, 0x04);
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
udelay(3);
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
udelay(500);
dsi_pll_toggle_lock_detect(rsc);
pll_locked = dsi_pll_check_lock_status(rsc);
return pll_locked ? 0 : -EINVAL;
}
static int dsi_pll_enable_seq_gf1(struct mdss_pll_resources *rsc)
{
int pll_locked = 0;
dsi_pll_sw_reset(rsc);
/*
* GF PART 1 PLL power up sequence.
* Add necessary delays recommended by hardware.
*/
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1, 0x14);
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
udelay(3);
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
udelay(500);
dsi_pll_toggle_lock_detect(rsc);
pll_locked = dsi_pll_check_lock_status(rsc);
return pll_locked ? 0 : -EINVAL;
}
static int dsi_pll_enable_seq_tsmc(struct mdss_pll_resources *rsc)
{
int pll_locked = 0;
dsi_pll_sw_reset(rsc);
/*
* TSMC PLL power up sequence.
* Add necessary delays recommended by hardware.
*/
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1, 0x34);
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
udelay(500);
dsi_pll_toggle_lock_detect(rsc);
pll_locked = dsi_pll_check_lock_status(rsc);
return pll_locked ? 0 : -EINVAL;
}
static struct regmap_config dsi_pll_28lpm_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0xF4,
};
static struct regmap_bus analog_postdiv_regmap_bus = {
.reg_write = analog_postdiv_reg_write,
.reg_read = analog_postdiv_reg_read,
};
static struct regmap_bus byteclk_src_mux_regmap_bus = {
.reg_write = byteclk_mux_write_sel,
.reg_read = byteclk_mux_read_sel,
};
static struct regmap_bus pclk_src_regmap_bus = {
.reg_write = pixel_clk_set_div,
.reg_read = pixel_clk_get_div,
};
static const struct clk_ops clk_ops_vco_28lpm = {
.recalc_rate = vco_28nm_recalc_rate,
.set_rate = vco_28nm_set_rate,
.round_rate = vco_28nm_round_rate,
.prepare = vco_28nm_prepare,
.unprepare = vco_28nm_unprepare,
};
static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
.ref_clk_rate = 19200000UL,
.min_rate = 350000000UL,
.max_rate = 750000000UL,
.pll_en_seq_cnt = 9,
.pll_enable_seqs[0] = dsi_pll_enable_seq_tsmc,
.pll_enable_seqs[1] = dsi_pll_enable_seq_tsmc,
.pll_enable_seqs[2] = dsi_pll_enable_seq_tsmc,
.pll_enable_seqs[3] = dsi_pll_enable_seq_gf1,
.pll_enable_seqs[4] = dsi_pll_enable_seq_gf1,
.pll_enable_seqs[5] = dsi_pll_enable_seq_gf1,
.pll_enable_seqs[6] = dsi_pll_enable_seq_gf2,
.pll_enable_seqs[7] = dsi_pll_enable_seq_gf2,
.pll_enable_seqs[8] = dsi_pll_enable_seq_gf2,
.lpfr_lut_size = 10,
.lpfr_lut = lpfr_lut_struct,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_vco_clk",
.parent_names = (const char *[]){"bi_tcxo"},
.num_parents = 1,
.ops = &clk_ops_vco_28lpm,
.flags = CLK_GET_RATE_NOCACHE,
},
};
static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
.ref_clk_rate = 19200000UL,
.min_rate = 350000000UL,
.max_rate = 750000000UL,
.pll_en_seq_cnt = 9,
.pll_enable_seqs[0] = dsi_pll_enable_seq_tsmc,
.pll_enable_seqs[1] = dsi_pll_enable_seq_tsmc,
.pll_enable_seqs[2] = dsi_pll_enable_seq_tsmc,
.pll_enable_seqs[3] = dsi_pll_enable_seq_gf1,
.pll_enable_seqs[4] = dsi_pll_enable_seq_gf1,
.pll_enable_seqs[5] = dsi_pll_enable_seq_gf1,
.pll_enable_seqs[6] = dsi_pll_enable_seq_gf2,
.pll_enable_seqs[7] = dsi_pll_enable_seq_gf2,
.pll_enable_seqs[8] = dsi_pll_enable_seq_gf2,
.lpfr_lut_size = 10,
.lpfr_lut = lpfr_lut_struct,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_vco_clk",
.parent_names = (const char *[]){"bi_tcxo"},
.num_parents = 1,
.ops = &clk_ops_vco_28lpm,
.flags = CLK_GET_RATE_NOCACHE,
},
};
static struct clk_regmap_div dsi0pll_analog_postdiv = {
.reg = DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG,
.shift = 0,
.width = 4,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_analog_postdiv",
.parent_names = (const char *[]){"dsi0pll_vco_clk"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_div_ops,
},
},
};
static struct clk_regmap_div dsi1pll_analog_postdiv = {
.reg = DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG,
.shift = 0,
.width = 4,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_analog_postdiv",
.parent_names = (const char *[]){"dsi1pll_vco_clk"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_div_ops,
},
},
};
static struct clk_fixed_factor dsi0pll_indirect_path_src = {
.div = 2,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_indirect_path_src",
.parent_names = (const char *[]){"dsi0pll_analog_postdiv"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi1pll_indirect_path_src = {
.div = 2,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_indirect_path_src",
.parent_names = (const char *[]){"dsi1pll_analog_postdiv"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_regmap_mux dsi0pll_byteclk_src_mux = {
.reg = DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG,
.shift = 1,
.width = 1,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0pll_byteclk_src_mux",
.parent_names = (const char *[]){
"dsi0pll_vco_clk",
"dsi0pll_indirect_path_src"},
.num_parents = 2,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_mux_closest_ops,
},
},
};
static struct clk_regmap_mux dsi1pll_byteclk_src_mux = {
.reg = DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG,
.shift = 1,
.width = 1,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1pll_byteclk_src_mux",
.parent_names = (const char *[]){
"dsi1pll_vco_clk",
"dsi1pll_indirect_path_src"},
.num_parents = 2,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_regmap_mux_closest_ops,
},
},
};
static struct clk_fixed_factor dsi0pll_byteclk_src = {
.div = 4,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi0_phy_pll_out_byteclk",
.parent_names = (const char *[]){
"dsi0pll_byteclk_src_mux"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_fixed_factor dsi1pll_byteclk_src = {
.div = 4,
.mult = 1,
.hw.init = &(struct clk_init_data){
.name = "dsi1_phy_pll_out_byteclk",
.parent_names = (const char *[]){
"dsi1pll_byteclk_src_mux"},
.num_parents = 1,
.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
.ops = &clk_fixed_factor_ops,
},
};
static struct clk_regmap_div dsi0pll_pclk_src = {
.reg = DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG,
.shift = 0,
.width = 8,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0_phy_pll_out_dsiclk",
.parent_names = (const char *[]){"dsi0pll_vco_clk"},
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_regmap_div_ops,
},
},
};
static struct clk_regmap_div dsi1pll_pclk_src = {
.reg = DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG,
.shift = 0,
.width = 8,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1_phy_pll_out_dsiclk",
.parent_names = (const char *[]){"dsi1pll_vco_clk"},
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_regmap_div_ops,
},
},
};
static struct clk_hw *mdss_dsi_pllcc_28lpm[] = {
[VCO_CLK_0] = &dsi0pll_vco_clk.hw,
[ANALOG_POSTDIV_0_CLK] = &dsi0pll_analog_postdiv.clkr.hw,
[INDIRECT_PATH_SRC_0_CLK] = &dsi0pll_indirect_path_src.hw,
[BYTECLK_SRC_MUX_0_CLK] = &dsi0pll_byteclk_src_mux.clkr.hw,
[BYTECLK_SRC_0_CLK] = &dsi0pll_byteclk_src.hw,
[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
[ANALOG_POSTDIV_1_CLK] = &dsi1pll_analog_postdiv.clkr.hw,
[INDIRECT_PATH_SRC_1_CLK] = &dsi1pll_indirect_path_src.hw,
[BYTECLK_SRC_MUX_1_CLK] = &dsi1pll_byteclk_src_mux.clkr.hw,
[BYTECLK_SRC_1_CLK] = &dsi1pll_byteclk_src.hw,
[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
};
int dsi_pll_clock_register_28lpm(struct platform_device *pdev,
struct mdss_pll_resources *pll_res)
{
int rc = 0, ndx, i;
struct clk *clk;
struct clk_onecell_data *clk_data;
int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_28lpm);
struct regmap *rmap;
int const ssc_freq_min = 30000; /* min. recommended freq. value */
int const ssc_freq_max = 33000; /* max. recommended freq. value */
int const ssc_ppm_max = 5000; /* max. recommended ppm */
if (!pdev || !pdev->dev.of_node ||
!pll_res || !pll_res->pll_base) {
pr_err("Invalid params\n");
return -EINVAL;
}
ndx = pll_res->index;
if (ndx >= DSI_PLL_MAX) {
pr_err("pll index(%d) NOT supported\n", ndx);
return -EINVAL;
}
pll_res->vco_delay = VCO_DELAY_USEC;
if (pll_res->ssc_en) {
if (!pll_res->ssc_freq || (pll_res->ssc_freq < ssc_freq_min) ||
(pll_res->ssc_freq > ssc_freq_max)) {
pll_res->ssc_freq = ssc_freq_min;
pr_debug("SSC frequency out of recommended range. Set to default=%d\n",
pll_res->ssc_freq);
}
if (!pll_res->ssc_ppm || (pll_res->ssc_ppm > ssc_ppm_max)) {
pll_res->ssc_ppm = ssc_ppm_max;
pr_debug("SSC PPM out of recommended range. Set to default=%d\n",
pll_res->ssc_ppm);
}
}
clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
sizeof(struct clk *)), GFP_KERNEL);
if (!clk_data->clks)
return -ENOMEM;
clk_data->clk_num = num_clks;
/* Establish client data */
if (ndx == 0) {
rmap = devm_regmap_init(&pdev->dev, &byteclk_src_mux_regmap_bus,
pll_res, &dsi_pll_28lpm_config);
if (IS_ERR(rmap)) {
pr_err("regmap init failed for DSI clock:%d\n",
pll_res->index);
return -EINVAL;
}
dsi0pll_byteclk_src_mux.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &analog_postdiv_regmap_bus,
pll_res, &dsi_pll_28lpm_config);
if (IS_ERR(rmap)) {
pr_err("regmap init failed for DSI clock:%d\n",
pll_res->index);
return -EINVAL;
}
dsi0pll_analog_postdiv.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
pll_res, &dsi_pll_28lpm_config);
if (IS_ERR(rmap)) {
pr_err("regmap init failed for DSI clock:%d\n",
pll_res->index);
return -EINVAL;
}
dsi0pll_pclk_src.clkr.regmap = rmap;
dsi0pll_vco_clk.priv = pll_res;
for (i = VCO_CLK_0; i <= PCLK_SRC_0_CLK; i++) {
clk = devm_clk_register(&pdev->dev,
mdss_dsi_pllcc_28lpm[i]);
if (IS_ERR(clk)) {
pr_err("clk registration failed for DSI clock:%d\n",
pll_res->index);
rc = -EINVAL;
goto clk_register_fail;
}
clk_data->clks[i] = clk;
}
rc = of_clk_add_provider(pdev->dev.of_node,
of_clk_src_onecell_get, clk_data);
} else {
rmap = devm_regmap_init(&pdev->dev, &byteclk_src_mux_regmap_bus,
pll_res, &dsi_pll_28lpm_config);
if (IS_ERR(rmap)) {
pr_err("regmap init failed for DSI clock:%d\n",
pll_res->index);
return -EINVAL;
}
dsi1pll_byteclk_src_mux.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &analog_postdiv_regmap_bus,
pll_res, &dsi_pll_28lpm_config);
if (IS_ERR(rmap)) {
pr_err("regmap init failed for DSI clock:%d\n",
pll_res->index);
return -EINVAL;
}
dsi1pll_analog_postdiv.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
pll_res, &dsi_pll_28lpm_config);
if (IS_ERR(rmap)) {
pr_err("regmap init failed for DSI clock:%d\n",
pll_res->index);
return -EINVAL;
}
dsi1pll_pclk_src.clkr.regmap = rmap;
dsi1pll_vco_clk.priv = pll_res;
for (i = VCO_CLK_1; i <= PCLK_SRC_1_CLK; i++) {
clk = devm_clk_register(&pdev->dev,
mdss_dsi_pllcc_28lpm[i]);
if (IS_ERR(clk)) {
pr_err("clk registration failed for DSI clock:%d\n",
pll_res->index);
rc = -EINVAL;
goto clk_register_fail;
}
clk_data->clks[i] = clk;
}
rc = of_clk_add_provider(pdev->dev.of_node,
of_clk_src_onecell_get, clk_data);
}
if (!rc) {
pr_info("Registered DSI PLL ndx=%d,clocks successfully\n", ndx);
return rc;
}
clk_register_fail:
return rc;
}

View file

@ -0,0 +1,665 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2012-2018, 2021, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/delay.h>
#include "mdss-pll.h"
#include "mdss-dsi-pll.h"
#include "mdss-dsi-pll-28nm.h"
#define DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG (0x0)
#define DSI_PHY_PLL_UNIPHY_PLL_CHGPUMP_CFG (0x0008)
#define DSI_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG (0x000C)
#define DSI_PHY_PLL_UNIPHY_PLL_PWRGEN_CFG (0x0014)
#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG (0x0024)
#define DSI_PHY_PLL_UNIPHY_PLL_LPFR_CFG (0x002C)
#define DSI_PHY_PLL_UNIPHY_PLL_LPFC1_CFG (0x0030)
#define DSI_PHY_PLL_UNIPHY_PLL_LPFC2_CFG (0x0034)
#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0 (0x0038)
#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1 (0x003C)
#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2 (0x0040)
#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3 (0x0044)
#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG4 (0x0048)
#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG0 (0x004C)
#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG1 (0x0050)
#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG2 (0x0054)
#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG3 (0x0058)
#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG0 (0x006C)
#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG2 (0x0074)
#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG3 (0x0078)
#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG4 (0x007C)
#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG5 (0x0080)
#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG6 (0x0084)
#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG7 (0x0088)
#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG8 (0x008C)
#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG9 (0x0090)
#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG10 (0x0094)
#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG11 (0x0098)
#define DSI_PHY_PLL_UNIPHY_PLL_EFUSE_CFG (0x009C)
#define DSI_PHY_PLL_UNIPHY_PLL_STATUS (0x00C0)
#define DSI_PLL_POLL_DELAY_US 50
#define DSI_PLL_POLL_TIMEOUT_US 500
int analog_postdiv_reg_read(void *context, unsigned int reg,
unsigned int *div)
{
int rc = 0;
struct mdss_pll_resources *rsc = context;
rc = mdss_pll_resource_enable(rsc, true);
if (rc) {
pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
return rc;
}
*div = MDSS_PLL_REG_R(rsc->pll_base, reg);
pr_debug("analog_postdiv div = %d\n", *div);
(void)mdss_pll_resource_enable(rsc, false);
return rc;
}
int analog_postdiv_reg_write(void *context, unsigned int reg,
unsigned int div)
{
int rc = 0;
struct mdss_pll_resources *rsc = context;
rc = mdss_pll_resource_enable(rsc, true);
if (rc) {
pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
return rc;
}
pr_debug("analog_postdiv div = %d\n", div);
MDSS_PLL_REG_W(rsc->pll_base, reg, div);
(void)mdss_pll_resource_enable(rsc, false);
return rc;
}
int byteclk_mux_read_sel(void *context, unsigned int reg,
unsigned int *val)
{
int rc = 0;
struct mdss_pll_resources *rsc = context;
rc = mdss_pll_resource_enable(rsc, true);
if (rc) {
pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
return rc;
}
*val = (MDSS_PLL_REG_R(rsc->pll_base, reg) & BIT(1));
pr_debug("byteclk mux mode = %s\n", *val ? "indirect" : "direct");
(void)mdss_pll_resource_enable(rsc, false);
return rc;
}
int byteclk_mux_write_sel(void *context, unsigned int reg,
unsigned int val)
{
int rc = 0;
u32 reg_val = 0;
struct mdss_pll_resources *rsc = context;
rc = mdss_pll_resource_enable(rsc, true);
if (rc) {
pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
return rc;
}
pr_debug("byteclk mux set to %s mode\n", val ? "indirect" : "direct");
reg_val = MDSS_PLL_REG_R(rsc->pll_base, reg);
reg_val &= ~0x02;
reg_val |= val;
MDSS_PLL_REG_W(rsc->pll_base, reg, reg_val);
(void)mdss_pll_resource_enable(rsc, false);
return rc;
}
int pixel_clk_get_div(void *context, unsigned int reg,
unsigned int *div)
{
int rc = 0;
struct mdss_pll_resources *rsc = context;
rc = mdss_pll_resource_enable(rsc, true);
if (rc) {
pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
return rc;
}
*div = MDSS_PLL_REG_R(rsc->pll_base, reg);
pr_debug("pclk_src div = %d\n", *div);
(void)mdss_pll_resource_enable(rsc, false);
return rc;
}
int pixel_clk_set_div(void *context, unsigned int reg,
unsigned int div)
{
int rc = 0;
struct mdss_pll_resources *rsc = context;
rc = mdss_pll_resource_enable(rsc, true);
if (rc) {
pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
return rc;
}
pr_debug("pclk_src div = %d\n", div);
MDSS_PLL_REG_W(rsc->pll_base, reg, div);
(void)mdss_pll_resource_enable(rsc, false);
return rc;
}
int dsi_pll_lock_status(struct mdss_pll_resources *rsc)
{
u32 status;
int pll_locked;
/* poll for PLL ready status */
if (readl_poll_timeout_atomic((rsc->pll_base +
DSI_PHY_PLL_UNIPHY_PLL_STATUS),
status,
((status & BIT(0)) == 1),
DSI_PLL_POLL_DELAY_US,
DSI_PLL_POLL_TIMEOUT_US)) {
pr_debug("DSI PLL status=%x failed to Lock\n", status);
pll_locked = 0;
} else {
pll_locked = 1;
}
return pll_locked;
}
static int pll_28nm_vco_rate_calc(struct dsi_pll_vco_clk *vco,
struct mdss_dsi_vco_calc *vco_calc, unsigned long vco_clk_rate)
{
s32 rem;
s64 frac_n_mode, ref_doubler_en_b;
s64 ref_clk_to_pll, div_fb, frac_n_value;
int i;
/* Configure the Loop filter resistance */
for (i = 0; i < vco->lpfr_lut_size; i++)
if (vco_clk_rate <= vco->lpfr_lut[i].vco_rate)
break;
if (i == vco->lpfr_lut_size) {
pr_err("unable to get loop filter resistance. vco=%ld\n",
vco_clk_rate);
return -EINVAL;
}
vco_calc->lpfr_lut_res = vco->lpfr_lut[i].r;
div_s64_rem(vco_clk_rate, vco->ref_clk_rate, &rem);
if (rem) {
vco_calc->refclk_cfg = 0x1;
frac_n_mode = 1;
ref_doubler_en_b = 0;
} else {
vco_calc->refclk_cfg = 0x0;
frac_n_mode = 0;
ref_doubler_en_b = 1;
}
pr_debug("refclk_cfg = %lld\n", vco_calc->refclk_cfg);
ref_clk_to_pll = ((vco->ref_clk_rate * 2 * (vco_calc->refclk_cfg))
+ (ref_doubler_en_b * vco->ref_clk_rate));
div_fb = div_s64_rem(vco_clk_rate, ref_clk_to_pll, &rem);
frac_n_value = div_s64(((s64)rem * (1 << 16)), ref_clk_to_pll);
vco_calc->gen_vco_clk = vco_clk_rate;
pr_debug("ref_clk_to_pll = %lld\n", ref_clk_to_pll);
pr_debug("div_fb = %lld\n", div_fb);
pr_debug("frac_n_value = %lld\n", frac_n_value);
pr_debug("Generated VCO Clock: %lld\n", vco_calc->gen_vco_clk);
rem = 0;
if (frac_n_mode) {
vco_calc->sdm_cfg0 = 0;
vco_calc->sdm_cfg1 = (div_fb & 0x3f) - 1;
vco_calc->sdm_cfg3 = div_s64_rem(frac_n_value, 256, &rem);
vco_calc->sdm_cfg2 = rem;
} else {
vco_calc->sdm_cfg0 = (0x1 << 5);
vco_calc->sdm_cfg0 |= (div_fb & 0x3f) - 1;
vco_calc->sdm_cfg1 = 0;
vco_calc->sdm_cfg2 = 0;
vco_calc->sdm_cfg3 = 0;
}
pr_debug("sdm_cfg0=%lld\n", vco_calc->sdm_cfg0);
pr_debug("sdm_cfg1=%lld\n", vco_calc->sdm_cfg1);
pr_debug("sdm_cfg2=%lld\n", vco_calc->sdm_cfg2);
pr_debug("sdm_cfg3=%lld\n", vco_calc->sdm_cfg3);
vco_calc->cal_cfg11 = div_s64_rem(vco_calc->gen_vco_clk,
256 * 1000000, &rem);
vco_calc->cal_cfg10 = rem / 1000000;
pr_debug("cal_cfg10=%lld, cal_cfg11=%lld\n",
vco_calc->cal_cfg10, vco_calc->cal_cfg11);
return 0;
}
static void pll_28nm_ssc_param_calc(struct dsi_pll_vco_clk *vco,
struct mdss_dsi_vco_calc *vco_calc)
{
struct mdss_pll_resources *rsc = vco->priv;
s64 ppm_freq, incr, spread_freq, div_rf, frac_n_value;
s32 rem;
if (!rsc->ssc_en) {
pr_debug("DSI PLL SSC not enabled\n");
return;
}
vco_calc->ssc.kdiv = DIV_ROUND_CLOSEST(vco->ref_clk_rate,
1000000) - 1;
vco_calc->ssc.triang_steps = DIV_ROUND_CLOSEST(vco->ref_clk_rate,
rsc->ssc_freq * (vco_calc->ssc.kdiv + 1));
ppm_freq = div_s64(vco_calc->gen_vco_clk * rsc->ssc_ppm,
1000000);
incr = div64_s64(ppm_freq * 65536, vco->ref_clk_rate * 2 *
vco_calc->ssc.triang_steps);
vco_calc->ssc.triang_inc_7_0 = incr & 0xff;
vco_calc->ssc.triang_inc_9_8 = (incr >> 8) & 0x3;
if (!rsc->ssc_center)
spread_freq = vco_calc->gen_vco_clk - ppm_freq;
else
spread_freq = vco_calc->gen_vco_clk - (ppm_freq / 2);
div_rf = div_s64(spread_freq, 2 * vco->ref_clk_rate);
vco_calc->ssc.dc_offset = (div_rf - 1);
div_s64_rem(spread_freq, 2 * vco->ref_clk_rate, &rem);
frac_n_value = div_s64((s64)rem * 65536, 2 * vco->ref_clk_rate);
vco_calc->ssc.freq_seed_7_0 = frac_n_value & 0xff;
vco_calc->ssc.freq_seed_15_8 = (frac_n_value >> 8) & 0xff;
}
static void pll_28nm_vco_config(struct dsi_pll_vco_clk *vco,
struct mdss_dsi_vco_calc *vco_calc)
{
struct mdss_pll_resources *rsc = vco->priv;
void __iomem *pll_base = rsc->pll_base;
u32 vco_delay_us = rsc->vco_delay;
bool ssc_en = rsc->ssc_en;
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LPFR_CFG,
vco_calc->lpfr_lut_res);
/* Loop filter capacitance values : c1 and c2 */
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LPFC1_CFG, 0x70);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LPFC2_CFG, 0x15);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CHGPUMP_CFG, 0x02);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG3, 0x2b);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG4, 0x66);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x0d);
if (!ssc_en) {
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1,
(u32)(vco_calc->sdm_cfg1 & 0xff));
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2,
(u32)(vco_calc->sdm_cfg2 & 0xff));
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3,
(u32)(vco_calc->sdm_cfg3 & 0xff));
} else {
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1,
(u32)vco_calc->ssc.dc_offset);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2,
(u32)vco_calc->ssc.freq_seed_7_0);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3,
(u32)vco_calc->ssc.freq_seed_15_8);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG0,
(u32)vco_calc->ssc.kdiv);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG1,
(u32)vco_calc->ssc.triang_inc_7_0);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG2,
(u32)vco_calc->ssc.triang_inc_9_8);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG3,
(u32)vco_calc->ssc.triang_steps);
}
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG4, 0x00);
/* Add hardware recommended delay for correct PLL configuration */
if (vco_delay_us)
udelay(vco_delay_us);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG,
(u32)vco_calc->refclk_cfg);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_PWRGEN_CFG, 0x00);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG, 0x71);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0,
(u32)vco_calc->sdm_cfg0);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG0, 0x12);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG6, 0x30);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG7, 0x00);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG8, 0x60);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG9, 0x00);
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG10,
(u32)(vco_calc->cal_cfg10 & 0xff));
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG11,
(u32)(vco_calc->cal_cfg11 & 0xff));
MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_EFUSE_CFG, 0x20);
MDSS_PLL_REG_W(pll_base,
DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG, 0x3); /* Fixed div-4 */
}
static int vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate)
{
struct mdss_dsi_vco_calc vco_calc = {0};
int rc = 0;
rc = pll_28nm_vco_rate_calc(vco, &vco_calc, rate);
if (rc) {
pr_err("vco rate calculation failed\n");
return rc;
}
pll_28nm_ssc_param_calc(vco, &vco_calc);
pll_28nm_vco_config(vco, &vco_calc);
return 0;
}
static unsigned long vco_get_rate(struct dsi_pll_vco_clk *vco)
{
struct mdss_pll_resources *rsc = vco->priv;
int rc;
u32 sdm0, doubler, sdm_byp_div;
u64 vco_rate;
u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
u64 ref_clk = vco->ref_clk_rate;
rc = mdss_pll_resource_enable(rsc, true);
if (rc) {
pr_err("Failed to enable mdss dsi pll resources\n");
return rc;
}
/* Check to see if the ref clk doubler is enabled */
doubler = MDSS_PLL_REG_R(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG) & BIT(0);
ref_clk += (doubler * vco->ref_clk_rate);
/* see if it is integer mode or sdm mode */
sdm0 = MDSS_PLL_REG_R(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0);
if (sdm0 & BIT(6)) {
/* integer mode */
sdm_byp_div = (MDSS_PLL_REG_R(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0) & 0x3f) + 1;
vco_rate = ref_clk * sdm_byp_div;
} else {
/* sdm mode */
sdm_dc_off = MDSS_PLL_REG_R(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1) & 0xFF;
pr_debug("sdm_dc_off = %d\n", sdm_dc_off);
sdm2 = MDSS_PLL_REG_R(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2) & 0xFF;
sdm3 = MDSS_PLL_REG_R(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3) & 0xFF;
sdm_freq_seed = (sdm3 << 8) | sdm2;
pr_debug("sdm_freq_seed = %d\n", sdm_freq_seed);
vco_rate = (ref_clk * (sdm_dc_off + 1)) +
mult_frac(ref_clk, sdm_freq_seed, BIT(16));
pr_debug("vco rate = %lld\n", vco_rate);
}
pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
mdss_pll_resource_enable(rsc, false);
return (unsigned long)vco_rate;
}
static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
{
int i, rc;
struct mdss_pll_resources *rsc = vco->priv;
rc = mdss_pll_resource_enable(rsc, true);
if (rc) {
pr_err("failed to enable dsi pll(%d) resources\n",
rsc->index);
return rc;
}
/* Try all enable sequences until one succeeds */
for (i = 0; i < vco->pll_en_seq_cnt; i++) {
rc = vco->pll_enable_seqs[i](rsc);
pr_debug("DSI PLL %s after sequence #%d\n",
rc ? "unlocked" : "locked", i + 1);
if (!rc)
break;
}
if (rc) {
mdss_pll_resource_enable(rsc, false);
pr_err("DSI PLL failed to lock\n");
}
rsc->pll_on = true;
return rc;
}
static void dsi_pll_disable(struct dsi_pll_vco_clk *vco)
{
struct mdss_pll_resources *rsc = vco->priv;
if (!rsc->pll_on &&
mdss_pll_resource_enable(rsc, true)) {
pr_err("failed to enable dsi pll(%d) resources\n",
rsc->index);
return;
}
rsc->handoff_resources = false;
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x00);
mdss_pll_resource_enable(rsc, false);
rsc->pll_on = false;
pr_debug("DSI PLL Disabled\n");
}
int vco_28nm_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
struct mdss_pll_resources *rsc = vco->priv;
int rc;
if (!rsc) {
pr_err("pll resource not found\n");
return -EINVAL;
}
if (rsc->pll_on)
return 0;
pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
rc = mdss_pll_resource_enable(rsc, true);
if (rc) {
pr_err("failed to enable mdss dsi pll(%d), rc=%d\n",
rsc->index, rc);
return rc;
}
/*
* DSI PLL software reset. Add HW recommended delays after toggling
* the software reset bit off and back on.
*/
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x01);
udelay(1000);
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x00);
udelay(1000);
rc = vco_set_rate(vco, rate);
rsc->vco_current_rate = rate;
mdss_pll_resource_enable(rsc, false);
return 0;
}
long vco_28nm_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
unsigned long rrate = rate;
struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
if (rate < vco->min_rate)
rrate = vco->min_rate;
if (rate > vco->max_rate)
rrate = vco->max_rate;
*parent_rate = rrate;
return rrate;
}
unsigned long vco_28nm_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
struct mdss_pll_resources *rsc = vco->priv;
int rc;
u64 vco_rate = 0;
if (!rsc) {
pr_err("dsi pll resources not available\n");
return 0;
}
if (rsc->vco_current_rate)
return (unsigned long)rsc->vco_current_rate;
if (is_gdsc_disabled(rsc))
return 0;
rc = mdss_pll_resource_enable(rsc, true);
if (rc) {
pr_err("failed to enable dsi pll(%d) resources\n",
rsc->index);
return 0;
}
if (dsi_pll_lock_status(rsc)) {
rsc->handoff_resources = true;
rsc->cont_splash_enabled = true;
rsc->pll_on = true;
vco_rate = vco_get_rate(vco);
} else {
mdss_pll_resource_enable(rsc, false);
}
return (unsigned long)vco_rate;
}
int vco_28nm_prepare(struct clk_hw *hw)
{
int rc = 0;
struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
struct mdss_pll_resources *rsc = vco->priv;
if (!rsc) {
pr_err("dsi pll resources not available\n");
return -EINVAL;
}
if ((rsc->vco_cached_rate != 0)
&& (rsc->vco_cached_rate == clk_hw_get_rate(hw))) {
rc = hw->init->ops->set_rate(hw, rsc->vco_cached_rate,
rsc->vco_cached_rate);
if (rc) {
pr_err("pll(%d ) set_rate failed. rc=%d\n",
rsc->index, rc);
goto error;
}
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG,
rsc->cached_postdiv1);
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG,
rsc->cached_postdiv3);
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG,
rsc->cached_vreg_cfg);
} else if (!rsc->handoff_resources && rsc->cont_splash_enabled) {
MDSS_PLL_REG_W(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG,
rsc->cached_vreg_cfg);
}
rc = dsi_pll_enable(vco);
error:
return rc;
}
void vco_28nm_unprepare(struct clk_hw *hw)
{
struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
struct mdss_pll_resources *rsc = vco->priv;
if (!rsc) {
pr_err("dsi pll resources not available\n");
return;
}
rsc->cached_postdiv1 = MDSS_PLL_REG_R(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG);
rsc->cached_postdiv3 = MDSS_PLL_REG_R(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG);
rsc->cached_vreg_cfg = MDSS_PLL_REG_R(rsc->pll_base,
DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG);
rsc->vco_cached_rate = clk_hw_get_rate(hw);
dsi_pll_disable(vco);
}

View file

@ -0,0 +1,73 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2012-2018, 2021, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __MDSS_DSI_PLL_28NM_H
#define __MDSS_DSI_PLL_28NM_H
#define DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG (0x0020)
#define DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2 (0x0064)
#define DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG (0x0068)
#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1 (0x0070)
#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG (0x0004)
#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG (0x0028)
#define DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG (0x0010)
struct ssc_params {
s32 kdiv;
s64 triang_inc_7_0;
s64 triang_inc_9_8;
s64 triang_steps;
s64 dc_offset;
s64 freq_seed_7_0;
s64 freq_seed_15_8;
};
struct mdss_dsi_vco_calc {
s64 sdm_cfg0;
s64 sdm_cfg1;
s64 sdm_cfg2;
s64 sdm_cfg3;
s64 cal_cfg10;
s64 cal_cfg11;
s64 refclk_cfg;
s64 gen_vco_clk;
u32 lpfr_lut_res;
struct ssc_params ssc;
};
unsigned long vco_28nm_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate);
int vco_28nm_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
long vco_28nm_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate);
int vco_28nm_prepare(struct clk_hw *hw);
void vco_28nm_unprepare(struct clk_hw *hw);
int analog_postdiv_reg_write(void *context,
unsigned int reg, unsigned int div);
int analog_postdiv_reg_read(void *context,
unsigned int reg, unsigned int *div);
int byteclk_mux_write_sel(void *context,
unsigned int reg, unsigned int val);
int byteclk_mux_read_sel(void *context,
unsigned int reg, unsigned int *val);
int pixel_clk_set_div(void *context,
unsigned int reg, unsigned int div);
int pixel_clk_get_div(void *context,
unsigned int reg, unsigned int *div);
int dsi_pll_lock_status(struct mdss_pll_resources *rsc);
#endif /* __MDSS_DSI_PLL_28NM_H */

View file

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2012-2018, 2020 The Linux Foundation. All rights reserved. */
/* Copyright (c) 2012-2018, 2020-2021, The Linux Foundation. All rights reserved. */
#ifndef __MDSS_DSI_PLL_H
#define __MDSS_DSI_PLL_H
@ -39,4 +39,8 @@ static inline struct dsi_pll_vco_clk *to_vco_clk_hw(struct clk_hw *hw)
int dsi_pll_clock_register_14nm(struct platform_device *pdev,
struct mdss_pll_resources *pll_res);
int dsi_pll_clock_register_28lpm(struct platform_device *pdev,
struct mdss_pll_resources *pll_res);
int dsi_pll_clock_register_12nm(struct platform_device *pdev,
struct mdss_pll_resources *pll_res);
#endif

View file

@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. */
/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved. */
#define pr_fmt(fmt) "%s: " fmt, __func__
@ -128,7 +128,9 @@ static int mdss_pll_resource_parse(struct platform_device *pdev,
pll_res->pll_interface_type = MDSS_DP_PLL_14NM;
pll_res->target_id = MDSS_PLL_TARGET_SDM660;
pll_res->revision = 2;
} else
} else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_28lpm"))
pll_res->pll_interface_type = MDSS_DSI_PLL_28LPM;
else
goto err;
return rc;
@ -156,6 +158,12 @@ static int mdss_pll_clock_register(struct platform_device *pdev,
case MDSS_DP_PLL_14NM:
rc = dp_pll_clock_register_14nm(pdev, pll_res);
break;
case MDSS_DSI_PLL_28LPM:
rc = dsi_pll_clock_register_28lpm(pdev, pll_res);
break;
case MDSS_DSI_PLL_12NM:
rc = dsi_pll_clock_register_12nm(pdev, pll_res);
break;
case MDSS_UNKNOWN_PLL:
default:
rc = -EINVAL;
@ -386,6 +394,8 @@ static const struct of_device_id mdss_pll_dt_match[] = {
{.compatible = "qcom,mdss_dp_pll_14nm"},
{.compatible = "qcom,mdss_dsi_pll_sdm660"},
{.compatible = "qcom,mdss_dp_pll_sdm660"},
{.compatible = "qcom,mdss_dsi_pll_12nm"},
{.compatible = "qcom,mdss_dsi_pll_28lpm"},
{}
};

View file

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. */
/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved. */
#ifndef __MDSS_PLL_H
#define __MDSS_PLL_H
@ -146,6 +146,7 @@ struct mdss_pll_resources {
* feature is disabled.
*/
bool handoff_resources;
bool cont_splash_enabled;
/*
* caching the pll trim codes in the case of dynamic refresh
@ -218,7 +219,8 @@ static inline bool is_gdsc_disabled(struct mdss_pll_resources *pll_res)
WARN(1, "gdsc_base register is not defined\n");
return true;
}
if (pll_res->target_id == MDSS_PLL_TARGET_SDM660)
if ((pll_res->target_id == MDSS_PLL_TARGET_SDM660) ||
(pll_res->pll_interface_type == MDSS_DSI_PLL_28LPM))
ret = ((readl_relaxed(pll_res->gdsc_base + 0x4) & BIT(31)) &&
(!(readl_relaxed(pll_res->gdsc_base) & BIT(0)))) ? false : true;
else

View file

@ -0,0 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*/
#ifndef __DRIVERS_CLK_QCOM_VDD_LEVEL_CPU_H
#define __DRIVERS_CLK_QCOM_VDD_LEVEL_CPU_H
#include <linux/regulator/rpm-smd-regulator.h>
#include <linux/regulator/consumer.h>
enum vdd_hf_pll_levels {
VDD_HF_PLL_OFF,
VDD_HF_PLL_SVS,
VDD_HF_PLL_NOM,
VDD_HF_PLL_TUR,
VDD_HF_PLL_NUM,
};
static int vdd_hf_levels[] = {
0, RPM_REGULATOR_LEVEL_NONE, /* VDD_HF_PLL_OFF */
1800000, RPM_REGULATOR_LEVEL_SVS, /* VDD_HF_PLL_SVS */
1800000, RPM_REGULATOR_LEVEL_NOM, /* VDD_HF_PLL_NOM */
1800000, RPM_REGULATOR_LEVEL_TURBO, /* VDD_HF_PLL_TUR */
};
#endif

View file

@ -0,0 +1,31 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*/
#ifndef __DRIVERS_CLK_QCOM_VDD_LEVEL_SDM429W_H
#define __DRIVERS_CLK_QCOM_VDD_LEVEL_SDM429W_H
#include <linux/regulator/rpm-smd-regulator.h>
#include <linux/regulator/consumer.h>
enum vdd_levels {
VDD_NONE,
VDD_LOW,
VDD_LOW_L1,
VDD_NOMINAL,
VDD_NOMINAL_L1,
VDD_HIGH,
VDD_NUM
};
static int vdd_corner[] = {
RPM_REGULATOR_LEVEL_NONE, /* VDD_NONE */
RPM_REGULATOR_LEVEL_SVS, /* VDD_SVS */
RPM_REGULATOR_LEVEL_SVS_PLUS, /* VDD_SVS_PLUS */
RPM_REGULATOR_LEVEL_NOM, /* VDD_NOM */
RPM_REGULATOR_LEVEL_NOM_PLUS, /* VDD_NOM_PLUS */
RPM_REGULATOR_LEVEL_TURBO, /* VDD_TURBO */
};
#endif

View file

@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2015,2017,2019, The Linux Foundation. All rights reserved.
* Copyright (C) 2020 XiaoMi, Inc.
*/
#define pr_fmt(fmt) "cpu-boost: " fmt
@ -41,16 +40,13 @@ struct cpu_sync {
int cpu;
unsigned int input_boost_min;
unsigned int input_boost_freq;
unsigned int powerkey_input_boost_freq;
};
static DEFINE_PER_CPU(struct cpu_sync, sync_info);
static struct workqueue_struct *cpu_boost_wq;
static struct work_struct input_boost_work;
static struct work_struct powerkey_input_boost_work;
static bool input_boost_enabled;
static unsigned int input_boost_ms = 40;
@ -58,22 +54,11 @@ show_one(input_boost_ms);
store_one(input_boost_ms);
cpu_boost_attr_rw(input_boost_ms);
static unsigned int powerkey_input_boost_ms = 400;
show_one(powerkey_input_boost_ms);
store_one(powerkey_input_boost_ms);
cpu_boost_attr_rw(powerkey_input_boost_ms);
static unsigned int sched_boost_on_input;
show_one(sched_boost_on_input);
store_one(sched_boost_on_input);
cpu_boost_attr_rw(sched_boost_on_input);
static bool sched_boost_on_powerkey_input = true;
show_one(sched_boost_on_powerkey_input);
store_one(sched_boost_on_powerkey_input);
cpu_boost_attr_rw(sched_boost_on_powerkey_input);
static bool sched_boost_active;
static struct delayed_work input_boost_rem;
@ -145,72 +130,6 @@ static ssize_t show_input_boost_freq(struct kobject *kobj,
}
cpu_boost_attr_rw(input_boost_freq);
static ssize_t store_powerkey_input_boost_freq(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int i, ntokens = 0;
unsigned int val, cpu;
const char *cp = buf;
bool enabled = false;
while ((cp = strpbrk(cp + 1, " :")))
ntokens++;
/* single number: apply to all CPUs */
if (!ntokens) {
if (sscanf(buf, "%u\n", &val) != 1)
return -EINVAL;
for_each_possible_cpu(i)
per_cpu(sync_info, i).powerkey_input_boost_freq = val;
goto check_enable;
}
/* CPU:value pair */
if (!(ntokens % 2))
return -EINVAL;
cp = buf;
for (i = 0; i < ntokens; i += 2) {
if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
return -EINVAL;
if (cpu >= num_possible_cpus())
return -EINVAL;
per_cpu(sync_info, cpu).powerkey_input_boost_freq = val;
cp = strnchr(cp, PAGE_SIZE - (cp - buf), ' ');
cp++;
}
check_enable:
for_each_possible_cpu(i) {
if (per_cpu(sync_info, i).powerkey_input_boost_freq) {
enabled = true;
break;
}
}
input_boost_enabled = enabled;
return count;
}
static ssize_t show_powerkey_input_boost_freq(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
int cnt = 0, cpu;
struct cpu_sync *s;
unsigned int boost_freq = 0;
for_each_possible_cpu(cpu) {
s = &per_cpu(sync_info, cpu);
boost_freq = s->powerkey_input_boost_freq;
cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
"%d:%u ", cpu, boost_freq);
}
cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
return cnt;
}
cpu_boost_attr_rw(powerkey_input_boost_freq);
/*
* The CPUFREQ_ADJUST notifier is used to override the current policy min to
@ -318,40 +237,6 @@ static void do_input_boost(struct work_struct *work)
msecs_to_jiffies(input_boost_ms));
}
static void do_powerkey_input_boost(struct work_struct *work)
{
unsigned int i, ret;
struct cpu_sync *i_sync_info;
cancel_delayed_work_sync(&input_boost_rem);
if (sched_boost_active) {
sched_set_boost(0);
sched_boost_active = false;
}
/* Set the powerkey_input_boost_min for all CPUs in the system */
pr_debug("Setting powerkey input boost min for all CPUs\n");
for_each_possible_cpu(i) {
i_sync_info = &per_cpu(sync_info, i);
i_sync_info->input_boost_min = i_sync_info->powerkey_input_boost_freq;
}
/* Update policies for all online CPUs */
update_policy_online();
/* Enable scheduler boost to migrate tasks to big cluster */
if (sched_boost_on_powerkey_input) {
ret = sched_set_boost(1);
if (ret)
pr_err("cpu-boost: HMP boost enable failed\n");
else
sched_boost_active = true;
}
queue_delayed_work(cpu_boost_wq, &input_boost_rem,
msecs_to_jiffies(powerkey_input_boost_ms));
}
static void cpuboost_input_event(struct input_handle *handle,
unsigned int type, unsigned int code, int value)
{
@ -367,11 +252,7 @@ static void cpuboost_input_event(struct input_handle *handle,
if (work_pending(&input_boost_work))
return;
if (type == EV_KEY && code == KEY_POWER) {
queue_work(cpu_boost_wq, &powerkey_input_boost_work);
} else {
queue_work(cpu_boost_wq, &input_boost_work);
}
queue_work(cpu_boost_wq, &input_boost_work);
last_input_time = ktime_to_us(ktime_get());
}
@ -457,7 +338,6 @@ static int cpu_boost_init(void)
return -EFAULT;
INIT_WORK(&input_boost_work, do_input_boost);
INIT_WORK(&powerkey_input_boost_work, do_powerkey_input_boost);
INIT_DELAYED_WORK(&input_boost_rem, do_input_boost_rem);
for_each_possible_cpu(cpu) {
@ -475,27 +355,15 @@ static int cpu_boost_init(void)
if (ret)
pr_err("Failed to create input_boost_ms node: %d\n", ret);
ret = sysfs_create_file(cpu_boost_kobj, &powerkey_input_boost_ms_attr.attr);
if (ret)
pr_err("Failed to create powerkey_input_boost_ms node: %d\n", ret);
ret = sysfs_create_file(cpu_boost_kobj, &input_boost_freq_attr.attr);
if (ret)
pr_err("Failed to create input_boost_freq node: %d\n", ret);
ret = sysfs_create_file(cpu_boost_kobj, &powerkey_input_boost_freq_attr.attr);
if (ret)
pr_err("Failed to create powerkey_input_boost_freq node: %d\n", ret);
ret = sysfs_create_file(cpu_boost_kobj,
&sched_boost_on_input_attr.attr);
if (ret)
pr_err("Failed to create sched_boost_on_input node: %d\n", ret);
ret = sysfs_create_file(cpu_boost_kobj,
&sched_boost_on_powerkey_input_attr.attr);
if (ret)
pr_err("Failed to create sched_boost_on_powerkey_input node: %d\n", ret);
ret = input_register_handler(&cpuboost_input_handler);
return 0;
}

View file

@ -2270,9 +2270,6 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_INCOMPATIBLE, new_policy);
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_THERMAL, new_policy);
/*
* verify the cpu speed can be set within this limit, which might be
* different to the first one

View file

@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
* Copyright (C) 2020 XiaoMi, Inc.
*/
#include <linux/cpufreq.h>

View file

@ -250,17 +250,17 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
if (!cpuidle_state_is_coupled(drv, index))
local_irq_enable();
diff = ktime_us_delta(time_end, time_start);
if (diff > INT_MAX)
diff = INT_MAX;
dev->last_residency = (int) diff;
if (entered_state >= 0) {
/*
* Update cpuidle counters
* This can be moved to within driver enter routine,
/* Update cpuidle counters */
/* This can be moved to within driver enter routine
* but that results in multiple copies of same code.
*/
diff = ktime_us_delta(time_end, time_start);
if (diff > INT_MAX)
diff = INT_MAX;
dev->last_residency = (int)diff;
dev->states_usage[entered_state].time += dev->last_residency;
dev->states_usage[entered_state].usage++;
} else {

View file

@ -34,6 +34,7 @@
#include <soc/qcom/event_timer.h>
#include <soc/qcom/lpm_levels.h>
#include <soc/qcom/lpm-stats.h>
#include <soc/qcom/minidump.h>
#include <asm/arch_timer.h>
#include <asm/suspend.h>
#include <asm/cpuidle.h>
@ -47,6 +48,30 @@
#define PSCI_POWER_STATE(reset) (reset << 30)
#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
enum {
MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
};
enum debug_event {
CPU_ENTER,
CPU_EXIT,
CLUSTER_ENTER,
CLUSTER_EXIT,
CPU_HP_STARTING,
CPU_HP_DYING,
};
struct lpm_debug {
u64 time;
enum debug_event evt;
int cpu;
uint32_t arg1;
uint32_t arg2;
uint32_t arg3;
uint32_t arg4;
};
static struct system_pm_ops *sys_pm_ops;
@ -83,6 +108,9 @@ static bool suspend_in_progress;
static struct hrtimer lpm_hrtimer;
static DEFINE_PER_CPU(struct hrtimer, histtimer);
static DEFINE_PER_CPU(struct hrtimer, biastimer);
static struct lpm_debug *lpm_debug;
static phys_addr_t lpm_debug_phys;
static const int num_dbg_elements = 0x100;
static void cluster_unprepare(struct lpm_cluster *cluster,
const struct cpumask *cpu, int child_idx, bool from_idle,
@ -254,10 +282,38 @@ int lpm_get_latency(struct latency_level *level, uint32_t *latency)
}
EXPORT_SYMBOL(lpm_get_latency);
static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
uint32_t arg2, uint32_t arg3, uint32_t arg4)
{
struct lpm_debug *dbg;
int idx;
static DEFINE_SPINLOCK(debug_lock);
static int pc_event_index;
if (!lpm_debug)
return;
spin_lock(&debug_lock);
idx = pc_event_index++;
dbg = &lpm_debug[idx & (num_dbg_elements - 1)];
dbg->evt = event;
dbg->time = arch_counter_get_cntvct();
dbg->cpu = raw_smp_processor_id();
dbg->arg1 = arg1;
dbg->arg2 = arg2;
dbg->arg3 = arg3;
dbg->arg4 = arg4;
spin_unlock(&debug_lock);
}
static int lpm_dying_cpu(unsigned int cpu)
{
struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
update_debug_pc_event(CPU_HP_DYING, cpu,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], false);
cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
return 0;
}
@ -266,6 +322,9 @@ static int lpm_starting_cpu(unsigned int cpu)
{
struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
update_debug_pc_event(CPU_HP_STARTING, cpu,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], false);
cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false,
0, true);
return 0;
@ -1048,6 +1107,9 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
return -EPERM;
if (idx != cluster->default_level) {
update_debug_pc_event(CLUSTER_ENTER, idx,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle);
trace_cluster_enter(cluster->cluster_name, idx,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle);
@ -1201,6 +1263,9 @@ static void cluster_unprepare(struct lpm_cluster *cluster,
if (sys_pm_ops && sys_pm_ops->exit)
sys_pm_ops->exit(success);
update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle);
trace_cluster_exit(cluster->cluster_name, cluster->last_level,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle);
@ -1312,11 +1377,15 @@ static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
state_id += power_state + affinity_level + cpu->levels[idx].psci_id;
update_debug_pc_event(CPU_ENTER, state_id,
0xdeaffeed, 0xdeaffeed, from_idle);
stop_critical_timings();
success = !arm_cpuidle_suspend(state_id);
start_critical_timings();
update_debug_pc_event(CPU_EXIT, state_id,
success, 0xdeaffeed, from_idle);
if (from_idle && cpu->levels[idx].use_bc_timer)
tick_broadcast_exit();
@ -1704,9 +1773,11 @@ static const struct platform_s2idle_ops lpm_s2idle_ops = {
static int lpm_probe(struct platform_device *pdev)
{
int ret;
int size;
unsigned int cpu;
struct hrtimer *cpu_histtimer;
struct kobject *module_kobj = NULL;
struct md_region md_entry;
get_online_cpus();
lpm_root_node = lpm_of_parse_cluster(pdev);
@ -1738,6 +1809,10 @@ static int lpm_probe(struct platform_device *pdev)
cluster_timer_init(lpm_root_node);
size = num_dbg_elements * sizeof(struct lpm_debug);
lpm_debug = dma_alloc_coherent(&pdev->dev, size,
&lpm_debug_phys, GFP_KERNEL);
register_cluster_lpm_stats(lpm_root_node, NULL);
ret = cluster_cpuidle_register(lpm_root_node);
@ -1768,6 +1843,15 @@ static int lpm_probe(struct platform_device *pdev)
set_update_ipi_history_callback(update_ipi_history);
/* Add lpm_debug to Minidump*/
strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name));
md_entry.virt_addr = (uintptr_t)lpm_debug;
md_entry.phys_addr = lpm_debug_phys;
md_entry.size = size;
md_entry.id = MINIDUMP_DEFAULT_ID;
if (msm_minidump_add_region(&md_entry) < 0)
pr_info("Failed to add lpm_debug in Minidump\n");
return 0;
failed:
free_cluster_node(lpm_root_node);

View file

@ -58,7 +58,6 @@
#define ICE_CRYPTO_CXT_FDE 1
#define ICE_CRYPTO_CXT_FBE 2
#define ICE_INSTANCE_TYPE_LENGTH 12
static int ice_fde_flag;
@ -584,33 +583,37 @@ static int register_ice_device(struct ice_device *ice_dev)
unsigned int baseminor = 0;
unsigned int count = 1;
struct device *class_dev;
char ice_type[ICE_INSTANCE_TYPE_LENGTH];
if (!strcmp(ice_dev->ice_instance_type, "sdcc"))
strlcpy(ice_type, QCOM_SDCC_ICE_DEV, sizeof(ice_type));
else if (!strcmp(ice_dev->ice_instance_type, "ufscard"))
strlcpy(ice_type, QCOM_UFS_CARD_ICE_DEV, sizeof(ice_type));
else
strlcpy(ice_type, QCOM_UFS_ICE_DEV, sizeof(ice_type));
int is_sdcc_ice = !strcmp(ice_dev->ice_instance_type, "sdcc");
int is_ufscard_ice = !strcmp(ice_dev->ice_instance_type, "ufscard");
rc = alloc_chrdev_region(&ice_dev->device_no, baseminor, count,
ice_type);
is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
if (rc < 0) {
pr_err("alloc_chrdev_region failed %d for %s\n", rc,
ice_type);
is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
return rc;
}
ice_dev->driver_class = class_create(THIS_MODULE, ice_type);
ice_dev->driver_class = class_create(THIS_MODULE,
is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
if (IS_ERR(ice_dev->driver_class)) {
rc = -ENOMEM;
pr_err("class_create failed %d for %s\n", rc, ice_type);
pr_err("class_create failed %d for %s\n", rc,
is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
goto exit_unreg_chrdev_region;
}
class_dev = device_create(ice_dev->driver_class, NULL,
ice_dev->device_no, NULL, ice_type);
ice_dev->device_no, NULL,
is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
if (!class_dev) {
pr_err("class_device_create failed %d for %s\n", rc, ice_type);
pr_err("class_device_create failed %d for %s\n", rc,
is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
rc = -ENOMEM;
goto exit_destroy_class;
}
@ -620,7 +623,9 @@ static int register_ice_device(struct ice_device *ice_dev)
rc = cdev_add(&ice_dev->cdev, MKDEV(MAJOR(ice_dev->device_no), 0), 1);
if (rc < 0) {
pr_err("cdev_add failed %d for %s\n", rc, ice_type);
pr_err("cdev_add failed %d for %s\n", rc,
is_sdcc_ice ? QCOM_SDCC_ICE_DEV : is_ufscard_ice ?
QCOM_UFS_CARD_ICE_DEV : QCOM_UFS_ICE_DEV);
goto exit_destroy_device;
}
return 0;

View file

@ -2,7 +2,7 @@
/*
* QTI Crypto Engine driver.
*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "QCE50: %s: " fmt, __func__
@ -922,6 +922,11 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
break;
case CIPHER_ALG_3DES:
if (creq->mode != QCE_MODE_ECB) {
if (ivsize > MAX_IV_LENGTH) {
pr_err("%s: error: Invalid length parameter\n",
__func__);
return -EINVAL;
}
_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
pce = cmdlistinfo->encr_cntr_iv;
pce->data = enciv32[0];
@ -970,6 +975,11 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
}
}
if (creq->mode != QCE_MODE_ECB) {
if (ivsize > MAX_IV_LENGTH) {
pr_err("%s: error: Invalid length parameter\n",
__func__);
return -EINVAL;
}
if (creq->mode == QCE_MODE_XTS)
_byte_stream_swap_to_net_words(enciv32,
creq->iv, ivsize);

View file

@ -1400,9 +1400,7 @@ static int __init devfreq_init(void)
return PTR_ERR(devfreq_class);
}
devfreq_wq = alloc_workqueue("devfreq_wq",
WQ_HIGHPRI | WQ_UNBOUND | WQ_FREEZABLE |
WQ_MEM_RECLAIM, 0);
devfreq_wq = create_freezable_workqueue("devfreq_wq");
if (!devfreq_wq) {
class_destroy(devfreq_class);
pr_err("%s: couldn't create workqueue\n", __FILE__);

View file

@ -124,6 +124,36 @@ struct dma_fence *sync_file_get_fence(int fd)
}
EXPORT_SYMBOL(sync_file_get_fence);
/**
* sync_file_get_name - get the name of the sync_file
* @sync_file: sync_file to get the fence from
* @buf: destination buffer to copy sync_file name into
* @len: available size of destination buffer.
*
* Each sync_file may have a name assigned either by the user (when merging
* sync_files together) or created from the fence it contains. In the latter
* case construction of the name is deferred until use, and so requires
* sync_file_get_name().
*
* Returns: a string representing the name.
*/
char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len)
{
if (sync_file->user_name[0]) {
strlcpy(buf, sync_file->user_name, len);
} else {
struct dma_fence *fence = sync_file->fence;
snprintf(buf, len, "%s-%s%llu-%d",
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
fence->context,
fence->seqno);
}
return buf;
}
static int sync_file_set_fence(struct sync_file *sync_file,
struct dma_fence **fences, int num_fences)
{
@ -186,7 +216,7 @@ static void add_fence(struct dma_fence **fences,
* @a and @b. @a and @b remain valid, independent sync_file. Returns the
* new merged sync_file or NULL in case of error.
*/
static struct sync_file *sync_file_merge(struct sync_file *a,
static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b)
{
struct sync_file *sync_file;
@ -261,6 +291,7 @@ static struct sync_file *sync_file_merge(struct sync_file *a,
goto err;
}
strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name));
return sync_file;
err:
@ -304,14 +335,11 @@ static long sync_file_ioctl_merge(struct sync_file *sync_file,
int err;
struct sync_file *fence2, *fence3;
struct sync_merge_data data;
size_t len;
if (fd < 0)
return fd;
arg += offsetof(typeof(data), fd2);
len = sizeof(data) - offsetof(typeof(data), fd2);
if (copy_from_user(&data.fd2, (void __user *)arg, len)) {
if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
err = -EFAULT;
goto err_put_fd;
}
@ -327,14 +355,15 @@ static long sync_file_ioctl_merge(struct sync_file *sync_file,
goto err_put_fd;
}
fence3 = sync_file_merge(sync_file, fence2);
data.name[sizeof(data.name) - 1] = '\0';
fence3 = sync_file_merge(data.name, sync_file, fence2);
if (!fence3) {
err = -ENOMEM;
goto err_put_fence2;
}
data.fence = fd;
if (copy_to_user((void __user *)arg, &data.fd2, len)) {
if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
err = -EFAULT;
goto err_put_fence3;
}
@ -357,6 +386,11 @@ static long sync_file_ioctl_merge(struct sync_file *sync_file,
static int sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info)
{
strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
sizeof(info->obj_name));
strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
sizeof(info->driver_name));
info->status = dma_fence_get_status(fence);
while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
@ -373,13 +407,12 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
unsigned long arg)
{
struct sync_file_info info;
struct sync_fence_info *fence_info = NULL;
struct dma_fence **fences;
size_t len, offset;
int num_fences, i;
__u32 size;
int num_fences, ret, i;
arg += offsetof(typeof(info), status);
len = sizeof(info) - offsetof(typeof(info), status);
if (copy_from_user(&info.status, (void __user *)arg, len))
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
return -EFAULT;
if (info.flags || info.pad)
@ -403,31 +436,35 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (info.num_fences < num_fences)
return -EINVAL;
offset = offsetof(struct sync_fence_info, status);
for (i = 0; i < num_fences; i++) {
struct {
__s32 status;
__u32 flags;
__u64 timestamp_ns;
} fence_info;
struct sync_fence_info *finfo = (void *)&fence_info - offset;
int status = sync_fill_fence_info(fences[i], finfo);
u64 dest;
size = num_fences * sizeof(*fence_info);
fence_info = kzalloc(size, GFP_KERNEL);
if (!fence_info)
return -ENOMEM;
/* Don't leak kernel memory to userspace via finfo->flags */
finfo->flags = 0;
for (i = 0; i < num_fences; i++) {
int status = sync_fill_fence_info(fences[i], &fence_info[i]);
info.status = info.status <= 0 ? info.status : status;
dest = info.sync_fence_info + i * sizeof(*finfo) + offset;
if (copy_to_user(u64_to_user_ptr(dest), &fence_info,
sizeof(fence_info)))
return -EFAULT;
}
if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
size)) {
ret = -EFAULT;
goto out;
}
no_fences:
sync_file_get_name(sync_file, info.name, sizeof(info.name));
info.num_fences = num_fences;
if (copy_to_user((void __user *)arg, &info.status, len))
return -EFAULT;
return 0;
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
ret = -EFAULT;
else
ret = 0;
out:
kfree(fence_info);
return ret;
}
static long sync_file_ioctl(struct file *file, unsigned int cmd,

View file

@ -739,6 +739,7 @@ static int device_pca953x_init(struct pca953x_chip *chip, u32 invert)
memset(val, 0, NBANK(chip));
ret = pca953x_write_regs(chip, PCA953X_INVERT, val);
pr_err("device_pca953x_init %d\n",ret);
out:
return ret;
}
@ -771,7 +772,10 @@ static int device_pca957x_init(struct pca953x_chip *chip, u32 invert)
memset(val, 0x02, NBANK(chip));
ret = pca953x_write_regs(chip, PCA957X_BKEN, val);
if (ret)
{
goto out;
}
pr_err("device_pca957x_init %d\n",ret);
return 0;
out:
@ -788,7 +792,8 @@ static int pca953x_probe(struct i2c_client *client,
int irq_base = 0;
int ret;
u32 invert = 0;
struct regulator *reg;
//struct regulator *reg;
dev_err(&client->dev,"pca953x_probe\n");
chip = devm_kzalloc(&client->dev,
sizeof(struct pca953x_chip), GFP_KERNEL);
@ -822,7 +827,7 @@ static int pca953x_probe(struct i2c_client *client,
chip->client = client;
reg = devm_regulator_get(&client->dev, "vcc");
/*reg = devm_regulator_get(&client->dev, "vcc");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
if (ret != -EPROBE_DEFER)
@ -834,7 +839,7 @@ static int pca953x_probe(struct i2c_client *client,
dev_err(&client->dev, "reg en err: %d\n", ret);
return ret;
}
chip->regulator = reg;
chip->regulator = reg;*/
if (i2c_id) {
chip->driver_data = i2c_id->driver_data;
@ -919,7 +924,7 @@ static int pca953x_probe(struct i2c_client *client,
return 0;
err_exit:
regulator_disable(chip->regulator);
//regulator_disable(chip->regulator);
return ret;
}
@ -939,7 +944,7 @@ static int pca953x_remove(struct i2c_client *client)
ret = 0;
}
regulator_disable(chip->regulator);
//regulator_disable(chip->regulator);
return ret;
}

View file

@ -3,6 +3,7 @@
* Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
@ -27,7 +28,7 @@
/* Include the master list of GPU cores that are supported */
#include "adreno-gpulist.h"
static void adreno_pwr_on_work(struct work_struct *work);
static void adreno_input_work(struct work_struct *work);
static unsigned int counter_delta(struct kgsl_device *device,
unsigned int reg, unsigned int *counter);
@ -56,6 +57,8 @@ static struct adreno_device device_3d0 = {
.ft_policy = KGSL_FT_DEFAULT_POLICY,
.ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY,
.long_ib_detect = 1,
.input_work = __WORK_INITIALIZER(device_3d0.input_work,
adreno_input_work),
.pwrctrl_flag = BIT(ADRENO_THROTTLING_CTRL) | BIT(ADRENO_HWCG_CTRL),
.profile.enabled = false,
.active_list = LIST_HEAD_INIT(device_3d0.active_list),
@ -67,8 +70,6 @@ static struct adreno_device device_3d0 = {
.skipsaverestore = 1,
.usesgmem = 1,
},
.pwr_on_work = __WORK_INITIALIZER(device_3d0.pwr_on_work,
adreno_pwr_on_work),
};
/* Ptr to array for the current set of fault detect registers */
@ -90,6 +91,9 @@ static unsigned int adreno_ft_regs_default[] = {
/* Nice level for the higher priority GPU start thread */
int adreno_wake_nice = -7;
/* Number of milliseconds to stay active active after a wake on touch */
unsigned int adreno_wake_timeout = 100;
void adreno_reglist_write(struct adreno_device *adreno_dev,
const struct adreno_reglist *list, u32 count)
{
@ -353,17 +357,152 @@ void adreno_gmu_send_nmi(struct adreno_device *adreno_dev)
wmb();
}
static void adreno_pwr_on_work(struct work_struct *work)
/*
* A workqueue callback responsible for actually turning on the GPU after a
* touch event. kgsl_pwrctrl_change_state(ACTIVE) is used without any
* active_count protection to avoid the need to maintain state. Either
* somebody will start using the GPU or the idle timer will fire and put the
* GPU back into slumber.
*/
static void adreno_input_work(struct work_struct *work)
{
struct adreno_device *adreno_dev =
container_of(work, typeof(*adreno_dev), pwr_on_work);
struct adreno_device *adreno_dev = container_of(work,
struct adreno_device, input_work);
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
mutex_lock(&device->mutex);
device->flags |= KGSL_FLAG_WAKE_ON_TOUCH;
/*
* Don't schedule adreno_start in a high priority workqueue, we are
* already in a workqueue which should be sufficient
*/
kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
/*
* When waking up from a touch event we want to stay active long enough
* for the user to send a draw command. The default idle timer timeout
* is shorter than we want so go ahead and push the idle timer out
* further for this special case
*/
mod_timer(&device->idle_timer,
jiffies + msecs_to_jiffies(adreno_wake_timeout));
mutex_unlock(&device->mutex);
}
/*
* Process input events and schedule work if needed. At this point we are only
* interested in groking EV_ABS touchscreen events
*/
static void adreno_input_event(struct input_handle *handle, unsigned int type,
unsigned int code, int value)
{
struct kgsl_device *device = handle->handler->private;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
/* Only consider EV_ABS (touch) events */
if (type != EV_ABS)
return;
/*
* Don't do anything if anything hasn't been rendered since we've been
* here before
*/
if (device->flags & KGSL_FLAG_WAKE_ON_TOUCH)
return;
/*
* If the device is in nap, kick the idle timer to make sure that we
* don't go into slumber before the first render. If the device is
* already in slumber schedule the wake.
*/
if (device->state == KGSL_STATE_NAP) {
/*
* Set the wake on touch bit to keep from coming back here and
* keeping the device in nap without rendering
*/
device->flags |= KGSL_FLAG_WAKE_ON_TOUCH;
mod_timer(&device->idle_timer,
jiffies + device->pwrctrl.interval_timeout);
} else if (device->state == KGSL_STATE_SLUMBER) {
schedule_work(&adreno_dev->input_work);
}
}
#ifdef CONFIG_INPUT
static int adreno_input_connect(struct input_handler *handler,
struct input_dev *dev, const struct input_device_id *id)
{
struct input_handle *handle;
int ret;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (handle == NULL)
return -ENOMEM;
handle->dev = dev;
handle->handler = handler;
handle->name = handler->name;
ret = input_register_handle(handle);
if (ret) {
kfree(handle);
return ret;
}
ret = input_open_device(handle);
if (ret) {
input_unregister_handle(handle);
kfree(handle);
}
return ret;
}
static void adreno_input_disconnect(struct input_handle *handle)
{
input_close_device(handle);
input_unregister_handle(handle);
kfree(handle);
}
#else
static int adreno_input_connect(struct input_handler *handler,
struct input_dev *dev, const struct input_device_id *id)
{
return 0;
}
static void adreno_input_disconnect(struct input_handle *handle) {}
#endif
/*
* We are only interested in EV_ABS events so only register handlers for those
* input devices that have EV_ABS events
*/
static const struct input_device_id adreno_input_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
.evbit = { BIT_MASK(EV_ABS) },
/* assumption: MT_.._X & MT_.._Y are in the same long */
.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
BIT_MASK(ABS_MT_POSITION_X) |
BIT_MASK(ABS_MT_POSITION_Y) },
},
{ },
};
static struct input_handler adreno_input_handler = {
.event = adreno_input_event,
.connect = adreno_input_connect,
.disconnect = adreno_input_disconnect,
.name = "kgsl",
.id_table = adreno_input_ids,
};
/*
* _soft_reset() - Soft reset GPU
* @adreno_dev: Pointer to adreno device
@ -887,6 +1026,7 @@ static void adreno_of_get_initial_pwrlevel(struct adreno_device *adreno_dev,
init_level = 1;
pwr->active_pwrlevel = init_level;
pwr->default_pwrlevel = init_level;
}
static void adreno_of_get_limits(struct adreno_device *adreno_dev,
@ -1070,13 +1210,16 @@ static int adreno_of_get_power(struct adreno_device *adreno_dev,
device->pwrctrl.pm_qos_wakeup_latency = 101;
if (of_property_read_u32(node, "qcom,idle-timeout", &timeout))
timeout = 58;
timeout = 80;
device->pwrctrl.interval_timeout = msecs_to_jiffies(timeout);
device->pwrctrl.bus_control = of_property_read_bool(node,
"qcom,bus-control");
device->pwrctrl.input_disable = of_property_read_bool(node,
"qcom,disable-wake-on-touch");
return 0;
}
@ -1407,6 +1550,21 @@ static int adreno_probe(struct platform_device *pdev)
dev_warn(device->dev,
"Failed to get gpuhtw LLC slice descriptor %ld\n",
PTR_ERR(adreno_dev->gpuhtw_llc_slice));
#ifdef CONFIG_INPUT
if (!device->pwrctrl.input_disable) {
adreno_input_handler.private = device;
/*
* It isn't fatal if we cannot register the input handler. Sad,
* perhaps, but not fatal
*/
if (input_register_handler(&adreno_input_handler)) {
adreno_input_handler.private = NULL;
dev_err(device->dev,
"Unable to register the input handler\n");
}
}
#endif
out:
if (status) {
adreno_ringbuffer_close(adreno_dev);
@ -1462,6 +1620,10 @@ static int adreno_remove(struct platform_device *pdev)
/* The memory is fading */
_adreno_free_memories(adreno_dev);
#ifdef CONFIG_INPUT
if (adreno_input_handler.private)
input_unregister_handler(&adreno_input_handler);
#endif
adreno_sysfs_close(adreno_dev);
adreno_coresight_remove(adreno_dev);
@ -3871,6 +4033,19 @@ static bool adreno_is_hwcg_on(struct kgsl_device *device)
return test_bit(ADRENO_HWCG_CTRL, &adreno_dev->pwrctrl_flag);
}
u32 adreno_get_ucode_version(const u32 *data)
{
u32 version;
version = data[1];
if ((version & 0xf) != 0xa)
return version;
version &= ~0xfff;
return version | ((data[3] & 0xfff000) >> 12);
}
static const struct kgsl_functable adreno_functable = {
/* Mandatory functions */
.regread = adreno_regread,

View file

@ -279,8 +279,8 @@ enum adreno_preempt_states {
/**
* struct adreno_preemption
* @state: The current state of preemption
* @counters: Memory descriptor for the memory where the GPU writes the
* preemption counters on switch
* @scratch: Memory descriptor for the memory where the GPU writes the
* current ctxt record address and preemption counters on switch
* @timer: A timer to make sure preemption doesn't stall
* @work: A work struct for the preemption worker (for 5XX)
* preempt_level: The level of preemption (for 6XX)
@ -290,7 +290,7 @@ enum adreno_preempt_states {
*/
struct adreno_preemption {
atomic_t state;
struct kgsl_memdesc counters;
struct kgsl_memdesc scratch;
struct timer_list timer;
struct work_struct work;
unsigned int preempt_level;
@ -428,7 +428,7 @@ enum gpu_coresight_sources {
* @dispatcher: Container for adreno GPU dispatcher
* @pwron_fixup: Command buffer to run a post-power collapse shader workaround
* @pwron_fixup_dwords: Number of dwords in the command buffer
* @pwr_on_work: Work struct for turning on the GPU
* @input_work: Work struct for turning on the GPU after a touch event
* @busy_data: Struct holding GPU VBIF busy stats
* @ram_cycles_lo: Number of DDR clock cycles for the monitor session (Only
* DDR channel 0 read cycles in case of GBIF)
@ -508,7 +508,7 @@ struct adreno_device {
struct adreno_dispatcher dispatcher;
struct kgsl_memdesc pwron_fixup;
unsigned int pwron_fixup_dwords;
struct work_struct pwr_on_work;
struct work_struct input_work;
struct adreno_busy_data busy_data;
unsigned int ram_cycles_lo;
unsigned int ram_cycles_lo_ch1_read;
@ -896,6 +896,7 @@ struct adreno_gpudev {
struct adreno_irq *irq;
int num_prio_levels;
int cp_rb_cntl;
unsigned int vbif_xin_halt_ctrl0_mask;
unsigned int gbif_client_halt_mask;
unsigned int gbif_arb_halt_mask;
@ -1042,6 +1043,7 @@ extern struct adreno_gpudev adreno_a5xx_gpudev;
extern struct adreno_gpudev adreno_a6xx_gpudev;
extern int adreno_wake_nice;
extern unsigned int adreno_wake_timeout;
int adreno_start(struct kgsl_device *device, int priority);
int adreno_soft_reset(struct kgsl_device *device);
@ -1123,6 +1125,7 @@ void adreno_rscc_regread(struct adreno_device *adreno_dev,
unsigned int offsetwords, unsigned int *value);
void adreno_isense_regread(struct adreno_device *adreno_dev,
unsigned int offsetwords, unsigned int *value);
u32 adreno_get_ucode_version(const u32 *data);
#define ADRENO_TARGET(_name, _id) \

View file

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2012-2019,2021, The Linux Foundation. All rights reserved.
*/
#include <linux/clk/qcom.h>
@ -1094,8 +1094,14 @@ struct {
{ A3XX_CP_PROTECT_REG_0 + 13, 0x0cc0, 0 },
/* VBIF */
{ A3XX_CP_PROTECT_REG_0 + 14, 0x3000, 6 },
/* SMMU */
{ A3XX_CP_PROTECT_REG_0 + 15, 0xa000, 12 },
/*
* SMMU
* For A3xx, base offset for smmu region is 0xa000 and length is
* 0x1000 bytes. Offset must be in dword and length of the block
* must be ilog2(dword length).
* 0xa000 >> 2 = 0x2800, ilog2(0x1000 >> 2) = 10.
*/
{ A3XX_CP_PROTECT_REG_0 + 15, 0x2800, 10 },
/* There are no remaining protected mode registers for a3xx */
};

View file

@ -1724,12 +1724,15 @@ static int a5xx_post_start(struct adreno_device *adreno_dev)
*cmds++ = 0xF;
}
if (adreno_is_preemption_enabled(adreno_dev))
if (adreno_is_preemption_enabled(adreno_dev)) {
cmds += _preemption_init(adreno_dev, rb, cmds, NULL);
rb->_wptr = rb->_wptr - (42 - (cmds - start));
ret = adreno_ringbuffer_submit_spin_nosync(rb, NULL, 2000);
} else {
rb->_wptr = rb->_wptr - (42 - (cmds - start));
ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
}
rb->_wptr = rb->_wptr - (42 - (cmds - start));
ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
if (ret)
adreno_spin_idle_debug(adreno_dev,
"hw initialization failed to idle\n");
@ -2038,7 +2041,7 @@ static int _load_firmware(struct kgsl_device *device, const char *fwfile,
memcpy(firmware->memdesc.hostptr, &fw->data[4], fw->size - 4);
firmware->size = (fw->size - 4) / sizeof(uint32_t);
firmware->version = *(unsigned int *)&fw->data[4];
firmware->version = adreno_get_ucode_version((u32 *)fw->data);
done:
release_firmware(fw);

View file

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2015-2017,2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2015-2017,2019-2020 The Linux Foundation. All rights reserved.
*/
#ifndef _ADRENO_A5XX_H_
@ -134,7 +134,7 @@ void a5xx_crashdump_init(struct adreno_device *adreno_dev);
void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on);
#define A5XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \
#define A5XX_CP_RB_CNTL_DEFAULT ((1 << 27) | ((ilog2(4) << 8) & 0x1F00) | \
(ilog2(KGSL_RB_DWORDS >> 1) & 0x3F))
/* GPMU interrupt multiplexor */
#define FW_INTR_INFO (0)

View file

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2017,2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2014-2017,2019-2020 The Linux Foundation. All rights reserved.
*/
#include "adreno.h"
@ -570,7 +570,7 @@ static void _preemption_close(struct adreno_device *adreno_dev)
unsigned int i;
del_timer(&preempt->timer);
kgsl_free_global(device, &preempt->counters);
kgsl_free_global(device, &preempt->scratch);
a5xx_preemption_iommu_close(adreno_dev);
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
@ -604,14 +604,14 @@ int a5xx_preemption_init(struct adreno_device *adreno_dev)
timer_setup(&preempt->timer, _a5xx_preemption_timer, 0);
/* Allocate mem for storing preemption counters */
ret = kgsl_allocate_global(device, &preempt->counters,
ret = kgsl_allocate_global(device, &preempt->scratch,
adreno_dev->num_ringbuffers *
A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0,
"preemption_counters");
if (ret)
goto err;
addr = preempt->counters.gpuaddr;
addr = preempt->scratch.gpuaddr;
/* Allocate mem for storing preemption switch record */
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {

Some files were not shown because too many files have changed in this diff Show more