diff --git a/Android.bp b/Android.bp index 1fd921a0e88c..3d5382bfbd47 100644 --- a/Android.bp +++ b/Android.bp @@ -9,9 +9,9 @@ cc_binary_host { genrule { name: "gen-headers_install.sh", srcs: ["scripts/headers_install.sh"], - tools: ["unifdef"], out: ["headers_install.sh"], - cmd: "sed 's+scripts/unifdef+$(location unifdef)+g' $(in) > $(out)", + // (Ie3b8c36b7d60bd950c28bac566e04f43de78cf98,b/178500203) + cmd: "sed 's+scripts/unifdef+$$LOC_UNIFDEF+g' $(in) > $(out)", } cc_prebuilt_binary { diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 4fa822cb561e..52c6080bc5cf 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -483,16 +483,21 @@ ccw_timeout_log [S390] See Documentation/s390/CommonIO for details. - cgroup_disable= [KNL] Disable a particular controller - Format: {name of the controller(s) to disable} + cgroup_disable= [KNL] Disable a particular controller or optional feature + Format: {name of the controller(s) or feature(s) to disable} The effects of cgroup_disable=foo are: - foo isn't auto-mounted if you mount all cgroups in a single hierarchy - foo isn't visible as an individually mountable subsystem + - if foo is an optional feature then the feature is + disabled and corresponding cgroup files are not + created {Currently only "memory" controller deal with this and cut the overhead, others just disable the usage. So only cgroup_disable=memory is actually worthy} + Specifying "pressure" disables per-cgroup pressure + stall information accounting feature cgroup_no_v1= [KNL] Disable cgroup controllers and named hierarchies in v1 Format: { { controller | "all" | "named" } diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index ba12088a96d3..2345f8aad355 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -720,8 +720,7 @@ allowed to execute. perf_event_paranoid: Controls use of the performance events system by unprivileged -users (without CAP_SYS_ADMIN). The default value is 3 if -CONFIG_SECURITY_PERF_EVENTS_RESTRICT is set, or 2 otherwise. +users (without CAP_SYS_ADMIN). The default value is 2. -1: Allow use of (almost) all events by all users Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK @@ -729,7 +728,6 @@ CONFIG_SECURITY_PERF_EVENTS_RESTRICT is set, or 2 otherwise. Disallow raw tracepoint access by users without CAP_SYS_ADMIN >=1: Disallow CPU event access by users without CAP_SYS_ADMIN >=2: Disallow kernel profiling by users without CAP_SYS_ADMIN ->=3: Disallow all event access by users without CAP_SYS_ADMIN ============================================================== diff --git a/MAINTAINERS b/MAINTAINERS index b8a90bd17dc9..31dfd1262fcd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6611,6 +6611,12 @@ F: drivers/hid/ F: include/linux/hid* F: include/uapi/linux/hid* +HID PLAYSTATION DRIVER +M: Roderick Colenbrander +L: linux-input@vger.kernel.org +S: Supported +F: drivers/hid/hid-playstation.c + HID SENSOR HUB DRIVERS M: Jiri Kosina M: Jonathan Cameron diff --git a/Makefile b/Makefile index e6ffc28de7c4..22986dda02e8 100644 --- a/Makefile +++ b/Makefile @@ -581,8 +581,12 @@ KBUILD_MODULES := KBUILD_BUILTIN := 1 # If we have only "make modules", don't compile built-in objects. +# When we're building modules with modversions, we need to consider +# the built-in objects during the descend as well, in order to +# make sure the checksums are up to date before we record them. + ifeq ($(MAKECMDGOALS),modules) - KBUILD_BUILTIN := + KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) endif # If we have "make modules", compile modules @@ -1385,13 +1389,6 @@ ifdef CONFIG_MODULES all: modules -# When we're building modules with modversions, we need to consider -# the built-in objects during the descend as well, in order to -# make sure the checksums are up to date before we record them. -ifdef CONFIG_MODVERSIONS - KBUILD_BUILTIN := 1 -endif - # Build modules # # A module can be listed more than once in obj-m resulting in diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 793c8f003b4c..fcb7a55911a6 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -2128,6 +2128,17 @@ config AUTO_ZRELADDR config EFI_STUB bool +config ARM_DECOMPRESSOR_LIMIT + hex "Limit the decompressor memory area" + default 0x3200000 + help + Allows overriding of the memory size that decompressor maps with + read, write and execute permissions to avoid speculative prefetch. + + By default ARM_DECOMPRESSOR_LIMIT maps first 1GB of memory + with read, write and execute permissions and reset of the memory + as strongly ordered. + config EFI bool "UEFI runtime support" depends on OF && !CPU_BIG_ENDIAN && MMU && AUTO_ZRELADDR && !XIP_KERNEL diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index e205bbbe2794..e987a002a032 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -703,7 +703,7 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size mov r0, r3 mov r9, r0, lsr #18 mov r9, r9, lsl #18 @ start of RAM - add r10, r9, #0x10000000 @ a reasonable RAM size + add r10, r9, #CONFIG_ARM_DECOMPRESSOR_LIMIT mov r1, #0x12 @ XN|U + section mapping orr r1, r1, #3 << 10 @ AP=11 add r2, r3, #16384 diff --git a/arch/arm/configs/vendor/bengal-perf_defconfig b/arch/arm/configs/vendor/bengal-perf_defconfig index ae4945b63512..d758da471451 100644 --- a/arch/arm/configs/vendor/bengal-perf_defconfig +++ b/arch/arm/configs/vendor/bengal-perf_defconfig @@ -51,6 +51,8 @@ CONFIG_NR_CPUS=8 CONFIG_ARM_PSCI=y CONFIG_HIGHMEM=y CONFIG_SECCOMP=y +CONFIG_CMDLINE="cgroup_disable=pressure" +CONFIG_CMDLINE_EXTEND=y CONFIG_CPU_FREQ_TIMES=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y @@ -585,7 +587,6 @@ CONFIG_SDCARD_FS=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_LSM_MMAP_MIN_ADDR=4096 CONFIG_HARDENED_USERCOPY=y diff --git a/arch/arm/configs/vendor/bengal_defconfig b/arch/arm/configs/vendor/bengal_defconfig index 84c5be4362c3..8076a3de036a 100644 --- a/arch/arm/configs/vendor/bengal_defconfig +++ b/arch/arm/configs/vendor/bengal_defconfig @@ -54,6 +54,8 @@ CONFIG_NR_CPUS=8 CONFIG_ARM_PSCI=y CONFIG_HIGHMEM=y CONFIG_SECCOMP=y +CONFIG_CMDLINE="cgroup_disable=pressure" +CONFIG_CMDLINE_EXTEND=y CONFIG_EFI=y CONFIG_CPU_FREQ_TIMES=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y @@ -638,7 +640,6 @@ CONFIG_SDCARD_FS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_LSM_MMAP_MIN_ADDR=4096 CONFIG_HARDENED_USERCOPY=y diff --git a/arch/arm/configs/vendor/msm8937-perf_defconfig b/arch/arm/configs/vendor/msm8937-perf_defconfig new file mode 100644 index 000000000000..68c85e7327e0 --- /dev/null +++ b/arch/arm/configs/vendor/msm8937-perf_defconfig @@ -0,0 +1,687 @@ +CONFIG_LOCALVERSION="-perf" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_AUDIT=y +# CONFIG_AUDITSYSCALL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_SCHED_WALT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_RCU_EXPERT=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y +CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_BPF=y +CONFIG_SCHED_CORE_CTL=y +CONFIG_NAMESPACES=y +# CONFIG_PID_NS is not set +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_TUNE=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +# CONFIG_FHANDLE is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +CONFIG_EMBEDDED=y +# CONFIG_SLUB_DEBUG is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_QM215=y +CONFIG_ARCH_MSM8917=y +CONFIG_ARCH_MSM8937=y +CONFIG_ARCH_SDM439=y +CONFIG_ARCH_SDM429=y +# CONFIG_VDSO is not set +CONFIG_SMP=y +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=8 +CONFIG_ARM_PSCI=y +CONFIG_HIGHMEM=y +CONFIG_SECCOMP=y +CONFIG_CMDLINE="cgroup_disable=pressure" +CONFIG_CMDLINE_EXTEND=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TIMES=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_BOOST=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPU_FREQ_MSM=y +CONFIG_CPU_IDLE=y +CONFIG_VFP=y +CONFIG_NEON=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_ENERGY_MODEL=y +CONFIG_MSM_TZ_LOG=y +CONFIG_ARM_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM_NEON=y +CONFIG_CRYPTO_SHA2_ARM_CE=y +CONFIG_CRYPTO_AES_ARM_BS=y +CONFIG_CRYPTO_AES_ARM_CE=y +CONFIG_CRYPTO_GHASH_ARM_CE=y +CONFIG_ARCH_MMAP_RND_BITS=16 +CONFIG_PANIC_ON_REFCOUNT_ERROR=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_SHA512=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y +CONFIG_PARTITION_ADVANCED=y +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_IOSCHED_BFQ=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_CLEANCACHE=y +CONFIG_CMA=y +CONFIG_CMA_DEBUGFS=y +CONFIG_ZSMALLOC=y +CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPGRE_DEMUX=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_UDP_DIAG=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_IP_SCTP=y +CONFIG_L2TP=y +CONFIG_L2TP_DEBUGFS=y +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=y +CONFIG_L2TP_ETH=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_PRIO=y +CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_CLS_BPF=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +CONFIG_DNS_RESOLVER=y +CONFIG_QRTR=y +CONFIG_QRTR_SMD=y +CONFIG_BPF_JIT=y +CONFIG_BT=y +# CONFIG_BT_BREDR is not set +# CONFIG_BT_LE is not set +CONFIG_MSM_BT_POWER=y +CONFIG_BTFM_SLIM_WCN3990=y +CONFIG_CFG80211=y +CONFIG_CFG80211_INTERNAL_REGDB=y +# CONFIG_CFG80211_CRDA_SUPPORT is not set +CONFIG_RFKILL=y +CONFIG_NFC_NQ=y +CONFIG_FW_LOADER_USER_HELPER=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +# CONFIG_FW_CACHE is not set +CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y +CONFIG_DMA_CMA=y +CONFIG_ZRAM=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=16 +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_HDCP_QSEECOM=y +CONFIG_QSEECOM=y +CONFIG_UID_SYS_STATS=y +CONFIG_FPR_FPC=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_CRYPTO=y +CONFIG_SCSI_UFS_CRYPTO_QTI=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y +CONFIG_DM_SNAPSHOT=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_ANDROID_VERITY=y +CONFIG_DM_BOW=y +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +CONFIG_TUN=y +CONFIG_VETH=y +# CONFIG_NET_VENDOR_AMAZON is not set +CONFIG_MSM_RMNET_BAM=y +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_HISILICON is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +CONFIG_RMNET=y +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=y +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=y +CONFIG_PPTP=y +CONFIG_PPPOL2TP=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_SYNC_TTY=y +CONFIG_USB_RTL8152=y +CONFIG_USB_USBNET=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +# CONFIG_WLAN_VENDOR_ATH is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +# CONFIG_WLAN_VENDOR_BROADCOM is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +# CONFIG_WLAN_VENDOR_INTEL is not set +# CONFIG_WLAN_VENDOR_INTERSIL is not set +# CONFIG_WLAN_VENDOR_MARVELL is not set +# CONFIG_WLAN_VENDOR_MEDIATEK is not set +# CONFIG_WLAN_VENDOR_RALINK is not set +# CONFIG_WLAN_VENDOR_REALTEK is not set +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +CONFIG_WCNSS_MEM_PRE_ALLOC=y +CONFIG_CLD_LL_CORE=y +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_GPIO=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_HBTP_INPUT=y +CONFIG_INPUT_QPNP_POWER_ON=y +CONFIG_INPUT_UINPUT=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_MSM_LEGACY=y +CONFIG_MSM_SMD_PKT=y +CONFIG_DIAG_CHAR=y +CONFIG_MSM_ADSPRPC=y +CONFIG_MSM_RDBG=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MSM_V2=y +CONFIG_SPI=y +CONFIG_SPI_QUP=y +CONFIG_SPI_SPIDEV=y +CONFIG_SPMI=y +CONFIG_PINCTRL_MSM8937=y +CONFIG_PINCTRL_MSM8917=y +CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_QPNP_SMB5=y +CONFIG_QPNP_VM_BMS=y +CONFIG_QPNP_LINEAR_CHARGER=y +CONFIG_SMB1351_USB_CHARGER=y +CONFIG_SMB1360_CHARGER_FG=y +CONFIG_SMB1355_SLAVE_CHARGER=y +CONFIG_QPNP_QG=y +CONFIG_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_QCOM_SPMI_TEMP_ALARM=y +CONFIG_THERMAL_QPNP_ADC_TM=y +CONFIG_THERMAL_TSENS=y +CONFIG_QTI_ADC_TM=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_BCL_PMIC5=y +CONFIG_QTI_BCL_SOC_DRIVER=y +CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_REGULATOR_COOLING_DEVICE=y +CONFIG_MFD_I2C_PMIC=y +CONFIG_MFD_SPMI_PMIC=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_PROXY_CONSUMER=y +CONFIG_REGULATOR_QPNP_LABIBB=y +CONFIG_REGULATOR_QPNP_LCDB=y +CONFIG_REGULATOR_MEM_ACC=y +CONFIG_REGULATOR_CPR=y +CONFIG_REGULATOR_RPM_SMD=y +CONFIG_REGULATOR_SPM=y +CONFIG_REGULATOR_STUB=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_VIDEO_CLASS=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_MSM_VIDC_3X_GOVERNORS=y +CONFIG_MSM_VIDC_3X_V4L2=y +CONFIG_MSM_CAMERA=y +CONFIG_MSMB_CAMERA=y +CONFIG_MSM_CAMERA_SENSOR=y +CONFIG_MSM_CPP=y +CONFIG_MSM_CCI=y +CONFIG_MSM_CSI20_HEADER=y +CONFIG_MSM_CSI22_HEADER=y +CONFIG_MSM_CSI30_HEADER=y +CONFIG_MSM_CSI31_HEADER=y +CONFIG_MSM_CSIPHY=y +CONFIG_MSM_CSID=y +CONFIG_MSM_EEPROM=y +CONFIG_MSM_ISPIF_V2=y +CONFIG_IMX134=y +CONFIG_IMX132=y +CONFIG_OV9724=y +CONFIG_OV5648=y +CONFIG_GC0339=y +CONFIG_OV8825=y +CONFIG_OV8865=y +CONFIG_s5k4e1=y +CONFIG_OV12830=y +CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y +CONFIG_MSMB_JPEG=y +CONFIG_MSM_FD=y +CONFIG_RADIO_IRIS=y +CONFIG_RADIO_IRIS_TRANSPORT=y +CONFIG_FB=y +CONFIG_FB_MSM=y +CONFIG_FB_MSM_MDSS=y +CONFIG_FB_MSM_MDSS_WRITEBACK=y +CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y +CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_BACKLIGHT_CLASS_DEVICE is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_USB_AUDIO=y +CONFIG_SND_SOC=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_SONY=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_EHCI_MSM=y +CONFIG_USB_ACM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_STORAGE_DATAFAB=y +CONFIG_USB_STORAGE_FREECOM=y +CONFIG_USB_STORAGE_ISD200=y +CONFIG_USB_STORAGE_USBAT=y +CONFIG_USB_STORAGE_SDDR09=y +CONFIG_USB_STORAGE_SDDR55=y +CONFIG_USB_STORAGE_JUMPSHOT=y +CONFIG_USB_STORAGE_ALAUDA=y +CONFIG_USB_STORAGE_ONETOUCH=y +CONFIG_USB_STORAGE_KARMA=y +CONFIG_USB_STORAGE_CYPRESS_ATACB=y +CONFIG_USB_SERIAL=y +CONFIG_USB_EHSET_TEST_FIXTURE=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_DEBUG_FILES=y +CONFIG_USB_GADGET_DEBUG_FS=y +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_CI13XXX_MSM=y +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_SERIAL=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_RNDIS=y +CONFIG_USB_CONFIGFS_RMNET_BAM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_USB_CONFIGFS_F_HID=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_CDEV=y +CONFIG_USB_CONFIGFS_F_CCID=y +CONFIG_USB_CONFIGFS_F_QDSS=y +CONFIG_USB_CONFIGFS_F_MTP=y +CONFIG_USB_CONFIGFS_F_PTP=y +CONFIG_TYPEC=y +CONFIG_MMC=y +# CONFIG_PWRSEQ_EMMC is not set +# CONFIG_PWRSEQ_SIMPLE is not set +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_MMC_IPC_LOGGING=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_CQHCI_CRYPTO=y +CONFIG_MMC_CQHCI_CRYPTO_QTI=y +CONFIG_LEDS_QTI_TRI_LED=y +CONFIG_LEDS_QPNP_FLASH_V2=y +CONFIG_LEDS_QPNP_VIBRATOR_LDO=y +CONFIG_LEDS_QPNP_VIBRATOR=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_EDAC=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PM8XXX=y +CONFIG_DMADEVICES=y +CONFIG_QCOM_SPS_DMA=y +CONFIG_UIO=y +CONFIG_UIO_MSM_SHAREDMEM=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ION=y +CONFIG_ION_POOL_AUTO_REFILL=y +CONFIG_QPNP_REVID=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_IPA=y +CONFIG_RMNET_IPA=y +CONFIG_RNDIS_IPA=y +CONFIG_USB_BAM=y +CONFIG_MDSS_PLL=y +CONFIG_QCOM_CLK_SMD_RPM=y +CONFIG_SDM_GCC_429W=y +CONFIG_SDM_DEBUGCC_429W=y +CONFIG_CLOCK_CPU_SDM=y +CONFIG_SDM_DEBUGCC_439=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y +CONFIG_MAILBOX=y +CONFIG_ARM_SMMU=y +CONFIG_QCOM_LAZY_MAPPING=y +CONFIG_RPMSG_CHAR=y +CONFIG_RPMSG_QCOM_GLINK_RPM=y +CONFIG_RPMSG_QCOM_GLINK_SMEM=y +CONFIG_RPMSG_QCOM_SMD=y +CONFIG_MSM_RPM_SMD=y +CONFIG_QCOM_CPUSS_DUMP=y +CONFIG_QCOM_RUN_QUEUE_STATS=y +CONFIG_QCOM_QMI_HELPERS=y +CONFIG_QCOM_SMEM=y +CONFIG_QCOM_SMD_RPM=y +CONFIG_MSM_SPM=y +CONFIG_MSM_L2_SPM=y +CONFIG_QCOM_EARLY_RANDOM=y +CONFIG_QCOM_MEMORY_DUMP_V2=y +CONFIG_QCOM_SMP2P=y +CONFIG_QCOM_SMSM=y +CONFIG_MSM_PIL_MSS_QDSP6V5=y +CONFIG_QCOM_SECURE_BUFFER=y +CONFIG_MSM_TZ_SMMU=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_QMI_COMM=y +CONFIG_MSM_PIL_SSR_GENERIC=y +CONFIG_MSM_BOOT_STATS=y +CONFIG_MSM_CORE_HANG_DETECT=y +CONFIG_QCOM_WATCHDOG_V2=y +CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y +CONFIG_QCOM_BUS_SCALING=y +CONFIG_QCOM_GLINK=y +CONFIG_MSM_EVENT_TIMER=y +CONFIG_MSM_PM=y +CONFIG_QCOM_DCC=y +CONFIG_QTI_RPM_STATS_LOG=y +CONFIG_QTEE_SHM_BRIDGE=y +CONFIG_MEM_SHARE_QMI_SERVICE=y +CONFIG_MSM_PERFORMANCE=y +CONFIG_QTI_CRYPTO_COMMON=y +CONFIG_QTI_CRYPTO_TZ=y +CONFIG_MSM_BAM_DMUX=y +CONFIG_WCNSS_CORE=y +CONFIG_WCNSS_CORE_PRONTO=y +CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y +CONFIG_DEVFREQ_GOV_PASSIVE=y +CONFIG_QCOM_BIMC_BWMON=y +CONFIG_ARM_MEMLAT_MON=y +CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y +CONFIG_DEVFREQ_GOV_MEMLAT=y +CONFIG_DEVFREQ_SIMPLE_DEV=y +CONFIG_QCOM_DEVFREQ_DEVBW=y +CONFIG_IIO=y +CONFIG_QCOM_SPMI_ADC5=y +CONFIG_QCOM_SPMI_VADC=y +CONFIG_PWM=y +CONFIG_PWM_QTI_LPG=y +CONFIG_QCOM_SHOW_RESUME_IRQ=y +CONFIG_QCOM_MPM=y +CONFIG_RAS=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_BINDERFS=y +CONFIG_QCOM_QFPROM=y +CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_SENSORS_SSC=y +CONFIG_QCOM_KGSL=y +CONFIG_LEGACY_ENERGY_MODEL_DT=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_F2FS_FS=y +CONFIG_F2FS_FS_SECURITY=y +CONFIG_F2FS_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y +CONFIG_FS_VERITY=y +CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=y +CONFIG_INCREMENTAL_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_SDCARD_FS=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_SECURITY=y +CONFIG_LSM_MMAP_MIN_ADDR=4096 +CONFIG_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY_PAGESPAN=y +CONFIG_FORTIFY_SOURCE=y +CONFIG_STATIC_USERMODEHELPER=y +CONFIG_STATIC_USERMODEHELPER_PATH="" +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SMACK=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_LZ4=y +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DEV_QCE=y +CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y +CONFIG_CRYPTO_DEV_QCRYPTO=y +CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +CONFIG_FRAME_WARN=2048 +CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_WQ_WATCHDOG=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_FAULT_INJECTION=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_IPC_LOGGING=y +# CONFIG_FTRACE is not set +CONFIG_LKDTM=m +CONFIG_BUG_ON_DATA_CORRUPTION=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y +CONFIG_CORESIGHT_STM=y +CONFIG_CORESIGHT_CTI=y +CONFIG_CORESIGHT_TPDA=y +CONFIG_CORESIGHT_TPDM=y +CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y +CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_TGU=y diff --git a/arch/arm/configs/vendor/msm8937_32go-perf_defconfig b/arch/arm/configs/vendor/msm8937_32go-perf_defconfig index b5070161ecbc..d322972ec0d2 100644 --- a/arch/arm/configs/vendor/msm8937_32go-perf_defconfig +++ b/arch/arm/configs/vendor/msm8937_32go-perf_defconfig @@ -1,7 +1,6 @@ CONFIG_LOCALVERSION="-perf" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_AUDIT=y -# CONFIG_AUDITSYSCALL is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT=y @@ -16,18 +15,17 @@ CONFIG_RCU_FAST_NO_HZ=y CONFIG_RCU_NOCB_CPU=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_BLK_CGROUP=y -CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_FREEZER=y CONFIG_CPUSETS=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_BPF=y CONFIG_SCHED_CORE_CTL=y CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_TUNE=y @@ -37,7 +35,6 @@ CONFIG_BLK_DEV_INITRD=y # CONFIG_RD_XZ is not set # CONFIG_RD_LZO is not set # CONFIG_RD_LZ4 is not set -CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_FHANDLE is not set # CONFIG_BASE_FULL is not set CONFIG_KALLSYMS_ALL=y @@ -49,6 +46,9 @@ CONFIG_PROFILING=y CONFIG_ARCH_QCOM=y CONFIG_ARCH_QM215=y CONFIG_ARCH_MSM8917=y +CONFIG_ARCH_MSM8937=y +CONFIG_ARCH_SDM439=y +CONFIG_ARCH_SDM429=y # CONFIG_VDSO is not set CONFIG_SMP=y CONFIG_SCHED_MC=y @@ -56,7 +56,8 @@ CONFIG_NR_CPUS=8 CONFIG_ARM_PSCI=y CONFIG_HIGHMEM=y CONFIG_SECCOMP=y -CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y +CONFIG_CMDLINE="cgroup_disable=pressure" +CONFIG_CMDLINE_EXTEND=y CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_TIMES=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y @@ -80,26 +81,22 @@ CONFIG_CRYPTO_SHA1_ARM_NEON=y CONFIG_CRYPTO_SHA2_ARM_CE=y CONFIG_CRYPTO_AES_ARM_BS=y CONFIG_CRYPTO_AES_ARM_CE=y -CONFIG_CRYPTO_GHASH_ARM_CE=y CONFIG_ARCH_MMAP_RND_BITS=16 CONFIG_PANIC_ON_REFCOUNT_ERROR=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y -# CONFIG_BLK_DEV_BSG is not set CONFIG_BLK_DEV_ZONED=y CONFIG_BLK_INLINE_ENCRYPTION=y CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_DEADLINE is not set -CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_CLEANCACHE=y CONFIG_CMA=y CONFIG_CMA_DEBUGFS=y CONFIG_ZSMALLOC=y @@ -239,9 +236,9 @@ CONFIG_NET_EMATCH_U32=y CONFIG_NET_EMATCH_META=y CONFIG_NET_EMATCH_TEXT=y CONFIG_NET_CLS_ACT=y -CONFIG_DNS_RESOLVER=y CONFIG_QRTR=y CONFIG_QRTR_SMD=y +CONFIG_BPF_JIT=y CONFIG_BT=y # CONFIG_BT_BREDR is not set # CONFIG_BT_LE is not set @@ -280,6 +277,8 @@ CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y +CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -289,11 +288,14 @@ CONFIG_DM_BOW=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y CONFIG_TUN=y +CONFIG_VETH=y # CONFIG_NET_VENDOR_AMAZON is not set +CONFIG_MSM_RMNET_BAM=y # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_HISILICON is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_NETRONOME is not set +CONFIG_RMNET=y # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set CONFIG_PPP=y @@ -330,6 +332,7 @@ CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y CONFIG_INPUT_MISC=y CONFIG_INPUT_HBTP_INPUT=y CONFIG_INPUT_QPNP_POWER_ON=y @@ -358,9 +361,11 @@ CONFIG_GPIOLIB=y CONFIG_GPIO_SYSFS=y CONFIG_POWER_RESET=y CONFIG_POWER_RESET_QCOM=y -CONFIG_POWER_RESET_SYSCON=y CONFIG_QPNP_SMB5=y +CONFIG_QPNP_VM_BMS=y +CONFIG_QPNP_LINEAR_CHARGER=y CONFIG_SMB1351_USB_CHARGER=y +CONFIG_SMB1360_CHARGER_FG=y CONFIG_SMB1355_SLAVE_CHARGER=y CONFIG_QPNP_QG=y CONFIG_THERMAL=y @@ -369,7 +374,10 @@ CONFIG_THERMAL_GOV_USER_SPACE=y CONFIG_THERMAL_GOV_LOW_LIMITS=y CONFIG_CPU_THERMAL=y CONFIG_DEVFREQ_THERMAL=y +CONFIG_QCOM_SPMI_TEMP_ALARM=y +CONFIG_THERMAL_QPNP_ADC_TM=y CONFIG_THERMAL_TSENS=y +CONFIG_QTI_ADC_TM=y CONFIG_QTI_VIRTUAL_SENSOR=y CONFIG_QTI_BCL_PMIC5=y CONFIG_QTI_BCL_SOC_DRIVER=y @@ -421,6 +429,8 @@ CONFIG_OV12830=y CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y CONFIG_MSMB_JPEG=y CONFIG_MSM_FD=y +CONFIG_RADIO_IRIS=y +CONFIG_RADIO_IRIS_TRANSPORT=y CONFIG_FB=y CONFIG_FB_MSM=y CONFIG_FB_MSM_MDSS=y @@ -441,9 +451,9 @@ CONFIG_HID_ELECOM=y CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NINTENDO=y CONFIG_HID_SONY=y CONFIG_USB_HIDDEV=y -CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y @@ -494,7 +504,6 @@ CONFIG_MMC=y # CONFIG_PWRSEQ_SIMPLE is not set CONFIG_MMC_BLOCK_MINORS=32 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y -CONFIG_MMC_IPC_LOGGING=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y @@ -503,9 +512,11 @@ CONFIG_MMC_CQHCI_CRYPTO_QTI=y CONFIG_LEDS_QTI_TRI_LED=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QPNP_VIBRATOR_LDO=y +CONFIG_LEDS_QPNP_VIBRATOR=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_EDAC=y CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PM8XXX=y CONFIG_DMADEVICES=y CONFIG_QCOM_SPS_DMA=y CONFIG_UIO=y @@ -520,11 +531,13 @@ CONFIG_SPS_SUPPORT_NDP_BAM=y CONFIG_IPA=y CONFIG_RMNET_IPA=y CONFIG_RNDIS_IPA=y +CONFIG_USB_BAM=y CONFIG_MDSS_PLL=y CONFIG_QCOM_CLK_SMD_RPM=y CONFIG_SDM_GCC_429W=y CONFIG_SDM_DEBUGCC_429W=y CONFIG_CLOCK_CPU_SDM=y +CONFIG_SDM_DEBUGCC_439=y CONFIG_HWSPINLOCK=y CONFIG_HWSPINLOCK_QCOM=y CONFIG_MAILBOX=y @@ -535,7 +548,6 @@ CONFIG_RPMSG_QCOM_GLINK_RPM=y CONFIG_RPMSG_QCOM_GLINK_SMEM=y CONFIG_RPMSG_QCOM_SMD=y CONFIG_MSM_RPM_SMD=y -CONFIG_QCOM_CPUSS_DUMP=y CONFIG_QCOM_RUN_QUEUE_STATS=y CONFIG_QCOM_QMI_HELPERS=y CONFIG_QCOM_SMEM=y @@ -548,28 +560,29 @@ CONFIG_QCOM_SMP2P=y CONFIG_QCOM_SMSM=y CONFIG_MSM_PIL_MSS_QDSP6V5=y CONFIG_QCOM_SECURE_BUFFER=y +CONFIG_MSM_TZ_SMMU=y CONFIG_MSM_SUBSYSTEM_RESTART=y CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_QMI_COMM=y CONFIG_MSM_PIL_SSR_GENERIC=y CONFIG_MSM_BOOT_STATS=y -CONFIG_QCOM_DCC_V2=y -CONFIG_MSM_CORE_HANG_DETECT=y CONFIG_QCOM_WATCHDOG_V2=y CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y CONFIG_QCOM_BUS_SCALING=y CONFIG_QCOM_GLINK=y CONFIG_MSM_EVENT_TIMER=y CONFIG_MSM_PM=y +CONFIG_QCOM_DCC=y CONFIG_QTI_RPM_STATS_LOG=y CONFIG_QTEE_SHM_BRIDGE=y CONFIG_MEM_SHARE_QMI_SERVICE=y CONFIG_MSM_PERFORMANCE=y CONFIG_QTI_CRYPTO_COMMON=y CONFIG_QTI_CRYPTO_TZ=y +CONFIG_MSM_BAM_DMUX=y CONFIG_WCNSS_CORE=y CONFIG_WCNSS_CORE_PRONTO=y CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y -CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y @@ -577,6 +590,8 @@ CONFIG_DEVFREQ_GOV_MEMLAT=y CONFIG_DEVFREQ_SIMPLE_DEV=y CONFIG_QCOM_DEVFREQ_DEVBW=y CONFIG_IIO=y +CONFIG_QCOM_SPMI_ADC5=y +CONFIG_QCOM_SPMI_VADC=y CONFIG_PWM=y CONFIG_PWM_QTI_LPG=y CONFIG_QCOM_SHOW_RESUME_IRQ=y @@ -616,23 +631,18 @@ CONFIG_SQUASHFS_LZ4=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_LSM_MMAP_MIN_ADDR=4096 CONFIG_HARDENED_USERCOPY=y -CONFIG_HARDENED_USERCOPY_PAGESPAN=y -CONFIG_FORTIFY_SOURCE=y CONFIG_STATIC_USERMODEHELPER=y CONFIG_STATIC_USERMODEHELPER_PATH="" CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SMACK=y CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_MD4=y CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_LZ4=y CONFIG_CRYPTO_ANSI_CPRNG=y -CONFIG_CRYPTO_DEV_QCE=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y @@ -642,14 +652,19 @@ CONFIG_DEBUG_INFO=y CONFIG_FRAME_WARN=2048 CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y -CONFIG_WQ_WATCHDOG=y -CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_TIMEOUT=5 CONFIG_SCHED_STACK_END_CHECK=y # CONFIG_DEBUG_PREEMPT is not set -CONFIG_FAULT_INJECTION=y -CONFIG_FAIL_PAGE_ALLOC=y CONFIG_IPC_LOGGING=y # CONFIG_FTRACE is not set -CONFIG_LKDTM=m CONFIG_BUG_ON_DATA_CORRUPTION=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y +CONFIG_CORESIGHT_STM=y +CONFIG_CORESIGHT_TPDA=y +CONFIG_CORESIGHT_TPDM=y +CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y +CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_TGU=y diff --git a/arch/arm/configs/vendor/msm8937_32go_defconfig b/arch/arm/configs/vendor/msm8937_32go_defconfig index 433e6853d1be..2b026a14b8c9 100644 --- a/arch/arm/configs/vendor/msm8937_32go_defconfig +++ b/arch/arm/configs/vendor/msm8937_32go_defconfig @@ -1,6 +1,5 @@ # CONFIG_LOCALVERSION_AUTO is not set CONFIG_AUDIT=y -# CONFIG_AUDITSYSCALL is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT=y @@ -16,12 +15,11 @@ CONFIG_RCU_FAST_NO_HZ=y CONFIG_RCU_NOCB_CPU=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_BLK_CGROUP=y -CONFIG_DEBUG_BLK_CGROUP=y -CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_FREEZER=y CONFIG_CPUSETS=y CONFIG_CGROUP_CPUACCT=y @@ -29,7 +27,6 @@ CONFIG_CGROUP_BPF=y CONFIG_CGROUP_DEBUG=y CONFIG_SCHED_CORE_CTL=y CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_TUNE=y @@ -39,7 +36,6 @@ CONFIG_BLK_DEV_INITRD=y # CONFIG_RD_XZ is not set # CONFIG_RD_LZO is not set # CONFIG_RD_LZ4 is not set -CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_FHANDLE is not set # CONFIG_BASE_FULL is not set CONFIG_KALLSYMS_ALL=y @@ -50,6 +46,9 @@ CONFIG_PROFILING=y CONFIG_ARCH_QCOM=y CONFIG_ARCH_QM215=y CONFIG_ARCH_MSM8917=y +CONFIG_ARCH_MSM8937=y +CONFIG_ARCH_SDM439=y +CONFIG_ARCH_SDM429=y # CONFIG_VDSO is not set CONFIG_SMP=y CONFIG_SCHED_MC=y @@ -57,7 +56,8 @@ CONFIG_NR_CPUS=8 CONFIG_ARM_PSCI=y CONFIG_HIGHMEM=y CONFIG_SECCOMP=y -CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y +CONFIG_CMDLINE="cgroup_disable=pressure" +CONFIG_CMDLINE_EXTEND=y CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_TIMES=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y @@ -82,28 +82,24 @@ CONFIG_CRYPTO_SHA1_ARM_NEON=y CONFIG_CRYPTO_SHA2_ARM_CE=y CONFIG_CRYPTO_AES_ARM_BS=y CONFIG_CRYPTO_AES_ARM_CE=y -CONFIG_CRYPTO_GHASH_ARM_CE=y CONFIG_OPROFILE=m CONFIG_KPROBES=y CONFIG_ARCH_MMAP_RND_BITS=16 CONFIG_PANIC_ON_REFCOUNT_ERROR=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y -# CONFIG_BLK_DEV_BSG is not set CONFIG_BLK_DEV_ZONED=y CONFIG_BLK_INLINE_ENCRYPTION=y CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_DEADLINE is not set -CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_CLEANCACHE=y CONFIG_CMA=y CONFIG_CMA_DEBUGFS=y CONFIG_ZSMALLOC=y @@ -246,6 +242,7 @@ CONFIG_NET_CLS_ACT=y CONFIG_DNS_RESOLVER=y CONFIG_QRTR=y CONFIG_QRTR_SMD=y +CONFIG_BPF_JIT=y CONFIG_BT=y # CONFIG_BT_BREDR is not set # CONFIG_BT_LE is not set @@ -285,6 +282,8 @@ CONFIG_SCSI_UFS_CRYPTO_QTI=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y +CONFIG_DM_SNAPSHOT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y @@ -294,11 +293,14 @@ CONFIG_DM_BOW=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y CONFIG_TUN=y +CONFIG_VETH=y # CONFIG_NET_VENDOR_AMAZON is not set +CONFIG_MSM_RMNET_BAM=y # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_HISILICON is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_NETRONOME is not set +CONFIG_RMNET=y # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set CONFIG_PPP=y @@ -335,6 +337,7 @@ CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y CONFIG_INPUT_MISC=y CONFIG_INPUT_HBTP_INPUT=y CONFIG_INPUT_QPNP_POWER_ON=y @@ -366,7 +369,10 @@ CONFIG_GPIO_SYSFS=y CONFIG_POWER_RESET=y CONFIG_POWER_RESET_QCOM=y CONFIG_QPNP_SMB5=y +CONFIG_QPNP_VM_BMS=y +CONFIG_QPNP_LINEAR_CHARGER=y CONFIG_SMB1351_USB_CHARGER=y +CONFIG_SMB1360_CHARGER_FG=y CONFIG_SMB1355_SLAVE_CHARGER=y CONFIG_QPNP_QG=y CONFIG_THERMAL=y @@ -375,7 +381,10 @@ CONFIG_THERMAL_GOV_USER_SPACE=y CONFIG_THERMAL_GOV_LOW_LIMITS=y CONFIG_CPU_THERMAL=y CONFIG_DEVFREQ_THERMAL=y +CONFIG_QCOM_SPMI_TEMP_ALARM=y +CONFIG_THERMAL_QPNP_ADC_TM=y CONFIG_THERMAL_TSENS=y +CONFIG_QTI_ADC_TM=y CONFIG_QTI_VIRTUAL_SENSOR=y CONFIG_QTI_BCL_PMIC5=y CONFIG_QTI_BCL_SOC_DRIVER=y @@ -427,6 +436,8 @@ CONFIG_OV12830=y CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y CONFIG_MSMB_JPEG=y CONFIG_MSM_FD=y +CONFIG_RADIO_IRIS=y +CONFIG_RADIO_IRIS_TRANSPORT=y CONFIG_FB=y CONFIG_FB_VIRTUAL=y CONFIG_FB_MSM=y @@ -448,9 +459,9 @@ CONFIG_HID_ELECOM=y CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NINTENDO=y CONFIG_HID_SONY=y CONFIG_USB_HIDDEV=y -CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y @@ -510,9 +521,11 @@ CONFIG_MMC_CQHCI_CRYPTO_QTI=y CONFIG_LEDS_QTI_TRI_LED=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QPNP_VIBRATOR_LDO=y +CONFIG_LEDS_QPNP_VIBRATOR=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_EDAC=y CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PM8XXX=y CONFIG_DMADEVICES=y CONFIG_QCOM_SPS_DMA=y CONFIG_UIO=y @@ -528,11 +541,13 @@ CONFIG_SPS_SUPPORT_NDP_BAM=y CONFIG_IPA=y CONFIG_RMNET_IPA=y CONFIG_RNDIS_IPA=y +CONFIG_USB_BAM=y CONFIG_MDSS_PLL=y CONFIG_QCOM_CLK_SMD_RPM=y CONFIG_SDM_GCC_429W=y CONFIG_SDM_DEBUGCC_429W=y CONFIG_CLOCK_CPU_SDM=y +CONFIG_SDM_DEBUGCC_439=y CONFIG_HWSPINLOCK=y CONFIG_HWSPINLOCK_QCOM=y CONFIG_MAILBOX=y @@ -554,15 +569,17 @@ CONFIG_MSM_SPM=y CONFIG_MSM_L2_SPM=y CONFIG_QCOM_EARLY_RANDOM=y CONFIG_QCOM_MEMORY_DUMP_V2=y +CONFIG_MSM_DEBUG_LAR_UNLOCK=y CONFIG_QCOM_SMP2P=y CONFIG_QCOM_SMSM=y CONFIG_MSM_PIL_MSS_QDSP6V5=y CONFIG_QCOM_SECURE_BUFFER=y +CONFIG_MSM_TZ_SMMU=y CONFIG_MSM_SUBSYSTEM_RESTART=y CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_QMI_COMM=y CONFIG_MSM_PIL_SSR_GENERIC=y CONFIG_MSM_BOOT_STATS=y -CONFIG_QCOM_DCC_V2=y CONFIG_MSM_CORE_HANG_DETECT=y CONFIG_QCOM_WATCHDOG_V2=y CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y @@ -570,16 +587,17 @@ CONFIG_QCOM_BUS_SCALING=y CONFIG_QCOM_GLINK=y CONFIG_MSM_EVENT_TIMER=y CONFIG_MSM_PM=y +CONFIG_QCOM_DCC=y CONFIG_QTI_RPM_STATS_LOG=y CONFIG_QTEE_SHM_BRIDGE=y CONFIG_MEM_SHARE_QMI_SERVICE=y CONFIG_MSM_PERFORMANCE=y CONFIG_QTI_CRYPTO_COMMON=y CONFIG_QTI_CRYPTO_TZ=y +CONFIG_MSM_BAM_DMUX=y CONFIG_WCNSS_CORE=y CONFIG_WCNSS_CORE_PRONTO=y CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y -CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y @@ -587,6 +605,8 @@ CONFIG_DEVFREQ_GOV_MEMLAT=y CONFIG_DEVFREQ_SIMPLE_DEV=y CONFIG_QCOM_DEVFREQ_DEVBW=y CONFIG_IIO=y +CONFIG_QCOM_SPMI_ADC5=y +CONFIG_QCOM_SPMI_VADC=y CONFIG_PWM=y CONFIG_PWM_QTI_LPG=y CONFIG_QCOM_SHOW_RESUME_IRQ=y @@ -596,7 +616,6 @@ CONFIG_ANDROID=y CONFIG_ANDROID_BINDER_IPC=y CONFIG_ANDROID_BINDERFS=y CONFIG_QCOM_QFPROM=y -CONFIG_STM=y CONFIG_SLIMBUS_MSM_NGD=y CONFIG_SENSORS_SSC=y CONFIG_QCOM_KGSL=y @@ -628,23 +647,18 @@ CONFIG_SQUASHFS_LZ4=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_LSM_MMAP_MIN_ADDR=4096 CONFIG_HARDENED_USERCOPY=y -CONFIG_HARDENED_USERCOPY_PAGESPAN=y -CONFIG_FORTIFY_SOURCE=y CONFIG_STATIC_USERMODEHELPER=y CONFIG_STATIC_USERMODEHELPER_PATH="" CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SMACK=y CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_MD4=y CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_LZ4=y CONFIG_CRYPTO_ANSI_CPRNG=y -CONFIG_CRYPTO_DEV_QCE=y CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y CONFIG_CRYPTO_DEV_QCRYPTO=y CONFIG_CRYPTO_DEV_QCEDEV=y @@ -677,7 +691,6 @@ CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y CONFIG_DEBUG_STACK_USAGE=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_WQ_WATCHDOG=y -CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_TIMEOUT=5 CONFIG_PANIC_ON_SCHED_BUG=y CONFIG_PANIC_ON_RT_THROTTLING=y @@ -708,4 +721,15 @@ CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_PANIC_ON_DATA_CORRUPTION=y CONFIG_DEBUG_USER=y CONFIG_FORCE_PAGES=y -CONFIG_PID_IN_CONTEXTIDR=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_SOURCE_ETM4X=y +CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y +CONFIG_CORESIGHT_STM=y +CONFIG_CORESIGHT_CTI=y +CONFIG_CORESIGHT_TPDA=y +CONFIG_CORESIGHT_TPDM=y +CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y +CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_TGU=y diff --git a/arch/arm/configs/vendor/msm8937_defconfig b/arch/arm/configs/vendor/msm8937_defconfig new file mode 100644 index 000000000000..a3bc5274eaa4 --- /dev/null +++ b/arch/arm/configs/vendor/msm8937_defconfig @@ -0,0 +1,745 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_AUDIT=y +# CONFIG_AUDITSYSCALL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_SCHED_WALT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_RCU_EXPERT=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y +CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_BLK_CGROUP=y +CONFIG_DEBUG_BLK_CGROUP=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_DEBUG=y +CONFIG_SCHED_CORE_CTL=y +CONFIG_NAMESPACES=y +# CONFIG_PID_NS is not set +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_TUNE=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +# CONFIG_FHANDLE is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_QM215=y +CONFIG_ARCH_MSM8917=y +CONFIG_ARCH_MSM8937=y +CONFIG_ARCH_SDM439=y +CONFIG_ARCH_SDM429=y +# CONFIG_VDSO is not set +CONFIG_SMP=y +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=8 +CONFIG_ARM_PSCI=y +CONFIG_HIGHMEM=y +CONFIG_SECCOMP=y +CONFIG_CMDLINE="cgroup_disable=pressure" +CONFIG_CMDLINE_EXTEND=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TIMES=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_BOOST=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPU_FREQ_MSM=y +CONFIG_CPU_IDLE=y +CONFIG_VFP=y +CONFIG_NEON=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_ENERGY_MODEL=y +CONFIG_MSM_TZ_LOG=y +CONFIG_ARM_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM_NEON=y +CONFIG_CRYPTO_SHA2_ARM_CE=y +CONFIG_CRYPTO_AES_ARM_BS=y +CONFIG_CRYPTO_AES_ARM_CE=y +CONFIG_CRYPTO_GHASH_ARM_CE=y +CONFIG_OPROFILE=m +CONFIG_KPROBES=y +CONFIG_ARCH_MMAP_RND_BITS=16 +CONFIG_PANIC_ON_REFCOUNT_ERROR=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_SHA512=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y +CONFIG_PARTITION_ADVANCED=y +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_IOSCHED_BFQ=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_CLEANCACHE=y +CONFIG_CMA=y +CONFIG_CMA_DEBUGFS=y +CONFIG_ZSMALLOC=y +CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPGRE_DEMUX=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_UDP_DIAG=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_IP_SCTP=y +CONFIG_L2TP=y +CONFIG_L2TP_DEBUGFS=y +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=y +CONFIG_L2TP_ETH=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_PRIO=y +CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_CLS_BPF=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +CONFIG_DNS_RESOLVER=y +CONFIG_QRTR=y +CONFIG_QRTR_SMD=y +CONFIG_BPF_JIT=y +CONFIG_BT=y +# CONFIG_BT_BREDR is not set +# CONFIG_BT_LE is not set +CONFIG_MSM_BT_POWER=y +CONFIG_BTFM_SLIM_WCN3990=y +CONFIG_CFG80211=y +CONFIG_CFG80211_INTERNAL_REGDB=y +# CONFIG_CFG80211_CRDA_SUPPORT is not set +CONFIG_RFKILL=y +CONFIG_NFC_NQ=y +CONFIG_FW_LOADER_USER_HELPER=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +# CONFIG_FW_CACHE is not set +CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y +CONFIG_DMA_CMA=y +CONFIG_ZRAM=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=16 +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_HDCP_QSEECOM=y +CONFIG_QSEECOM=y +CONFIG_UID_SYS_STATS=y +CONFIG_FPR_FPC=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFSHCD_CMD_LOGGING=y +CONFIG_SCSI_UFS_CRYPTO=y +CONFIG_SCSI_UFS_CRYPTO_QTI=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y +CONFIG_DM_SNAPSHOT=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_ANDROID_VERITY=y +CONFIG_DM_BOW=y +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +CONFIG_TUN=y +CONFIG_VETH=y +# CONFIG_NET_VENDOR_AMAZON is not set +CONFIG_MSM_RMNET_BAM=y +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_HISILICON is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +CONFIG_RMNET=y +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=y +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=y +CONFIG_PPTP=y +CONFIG_PPPOL2TP=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_SYNC_TTY=y +CONFIG_USB_RTL8152=y +CONFIG_USB_USBNET=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +# CONFIG_WLAN_VENDOR_ATH is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +# CONFIG_WLAN_VENDOR_BROADCOM is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +# CONFIG_WLAN_VENDOR_INTEL is not set +# CONFIG_WLAN_VENDOR_INTERSIL is not set +# CONFIG_WLAN_VENDOR_MARVELL is not set +# CONFIG_WLAN_VENDOR_MEDIATEK is not set +# CONFIG_WLAN_VENDOR_RALINK is not set +# CONFIG_WLAN_VENDOR_REALTEK is not set +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +CONFIG_WCNSS_MEM_PRE_ALLOC=y +CONFIG_CLD_LL_CORE=y +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_GPIO=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_HBTP_INPUT=y +CONFIG_INPUT_QPNP_POWER_ON=y +CONFIG_INPUT_UINPUT=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +CONFIG_SERIAL_MSM_HS=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_MSM_LEGACY=y +CONFIG_MSM_SMD_PKT=y +CONFIG_DIAG_CHAR=y +CONFIG_MSM_ADSPRPC=y +CONFIG_MSM_RDBG=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MSM_V2=y +CONFIG_SPI=y +CONFIG_SPI_QUP=y +CONFIG_SPI_SPIDEV=y +CONFIG_SPMI=y +CONFIG_PINCTRL_MSM8937=y +CONFIG_PINCTRL_MSM8917=y +CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_QPNP_SMB5=y +CONFIG_QPNP_VM_BMS=y +CONFIG_QPNP_LINEAR_CHARGER=y +CONFIG_SMB1351_USB_CHARGER=y +CONFIG_SMB1360_CHARGER_FG=y +CONFIG_SMB1355_SLAVE_CHARGER=y +CONFIG_QPNP_QG=y +CONFIG_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_QCOM_SPMI_TEMP_ALARM=y +CONFIG_THERMAL_QPNP_ADC_TM=y +CONFIG_THERMAL_TSENS=y +CONFIG_QTI_ADC_TM=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_BCL_PMIC5=y +CONFIG_QTI_BCL_SOC_DRIVER=y +CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_REGULATOR_COOLING_DEVICE=y +CONFIG_MFD_I2C_PMIC=y +CONFIG_MFD_SPMI_PMIC=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_PROXY_CONSUMER=y +CONFIG_REGULATOR_QPNP_LABIBB=y +CONFIG_REGULATOR_QPNP_LCDB=y +CONFIG_REGULATOR_MEM_ACC=y +CONFIG_REGULATOR_CPR=y +CONFIG_REGULATOR_RPM_SMD=y +CONFIG_REGULATOR_SPM=y +CONFIG_REGULATOR_STUB=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_VIDEO_CLASS=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_MSM_VIDC_3X_GOVERNORS=y +CONFIG_MSM_VIDC_3X_V4L2=y +CONFIG_MSM_CAMERA=y +CONFIG_MSM_CAMERA_DEBUG=y +CONFIG_MSMB_CAMERA=y +CONFIG_MSMB_CAMERA_DEBUG=y +CONFIG_MSM_CAMERA_SENSOR=y +CONFIG_MSM_CPP=y +CONFIG_MSM_CCI=y +CONFIG_MSM_CSI20_HEADER=y +CONFIG_MSM_CSI22_HEADER=y +CONFIG_MSM_CSI30_HEADER=y +CONFIG_MSM_CSI31_HEADER=y +CONFIG_MSM_CSIPHY=y +CONFIG_MSM_CSID=y +CONFIG_MSM_EEPROM=y +CONFIG_MSM_ISPIF_V2=y +CONFIG_IMX134=y +CONFIG_IMX132=y +CONFIG_OV9724=y +CONFIG_OV5648=y +CONFIG_GC0339=y +CONFIG_OV8825=y +CONFIG_OV8865=y +CONFIG_s5k4e1=y +CONFIG_OV12830=y +CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y +CONFIG_MSMB_JPEG=y +CONFIG_MSM_FD=y +CONFIG_RADIO_IRIS=y +CONFIG_RADIO_IRIS_TRANSPORT=y +CONFIG_FB=y +CONFIG_FB_VIRTUAL=y +CONFIG_FB_MSM=y +CONFIG_FB_MSM_MDSS=y +CONFIG_FB_MSM_MDSS_WRITEBACK=y +CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y +CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_BACKLIGHT_CLASS_DEVICE is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_USB_AUDIO=y +CONFIG_SND_SOC=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_SONY=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_EHCI_MSM=y +CONFIG_USB_ACM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_STORAGE_DATAFAB=y +CONFIG_USB_STORAGE_FREECOM=y +CONFIG_USB_STORAGE_ISD200=y +CONFIG_USB_STORAGE_USBAT=y +CONFIG_USB_STORAGE_SDDR09=y +CONFIG_USB_STORAGE_SDDR55=y +CONFIG_USB_STORAGE_JUMPSHOT=y +CONFIG_USB_STORAGE_ALAUDA=y +CONFIG_USB_STORAGE_ONETOUCH=y +CONFIG_USB_STORAGE_KARMA=y +CONFIG_USB_STORAGE_CYPRESS_ATACB=y +CONFIG_USB_SERIAL=y +CONFIG_USB_EHSET_TEST_FIXTURE=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_DEBUG_FILES=y +CONFIG_USB_GADGET_DEBUG_FS=y +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_CI13XXX_MSM=y +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_SERIAL=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_RNDIS=y +CONFIG_USB_CONFIGFS_RMNET_BAM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_USB_CONFIGFS_F_HID=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_CDEV=y +CONFIG_USB_CONFIGFS_F_CCID=y +CONFIG_USB_CONFIGFS_F_QDSS=y +CONFIG_USB_CONFIGFS_F_MTP=y +CONFIG_USB_CONFIGFS_F_PTP=y +CONFIG_TYPEC=y +CONFIG_MMC=y +# CONFIG_PWRSEQ_EMMC is not set +# CONFIG_PWRSEQ_SIMPLE is not set +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_MMC_IPC_LOGGING=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_CQHCI_CRYPTO=y +CONFIG_MMC_CQHCI_CRYPTO_QTI=y +CONFIG_LEDS_QTI_TRI_LED=y +CONFIG_LEDS_QPNP_FLASH_V2=y +CONFIG_LEDS_QPNP_VIBRATOR_LDO=y +CONFIG_LEDS_QPNP_VIBRATOR=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_EDAC=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PM8XXX=y +CONFIG_DMADEVICES=y +CONFIG_QCOM_SPS_DMA=y +CONFIG_UIO=y +CONFIG_UIO_MSM_SHAREDMEM=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ION=y +CONFIG_ION_POOL_AUTO_REFILL=y +CONFIG_MSM_EXT_DISPLAY=y +CONFIG_QPNP_REVID=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_IPA=y +CONFIG_RMNET_IPA=y +CONFIG_RNDIS_IPA=y +CONFIG_USB_BAM=y +CONFIG_MDSS_PLL=y +CONFIG_QCOM_CLK_SMD_RPM=y +CONFIG_SDM_GCC_429W=y +CONFIG_SDM_DEBUGCC_429W=y +CONFIG_CLOCK_CPU_SDM=y +CONFIG_SDM_DEBUGCC_439=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y +CONFIG_MAILBOX=y +CONFIG_ARM_SMMU=y +CONFIG_QCOM_LAZY_MAPPING=y +CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_TESTS=y +CONFIG_RPMSG_CHAR=y +CONFIG_RPMSG_QCOM_GLINK_RPM=y +CONFIG_RPMSG_QCOM_GLINK_SMEM=y +CONFIG_RPMSG_QCOM_SMD=y +CONFIG_MSM_RPM_SMD=y +CONFIG_QCOM_CPUSS_DUMP=y +CONFIG_QCOM_RUN_QUEUE_STATS=y +CONFIG_QCOM_QMI_HELPERS=y +CONFIG_QCOM_SMEM=y +CONFIG_QCOM_SMD_RPM=y +CONFIG_MSM_SPM=y +CONFIG_MSM_L2_SPM=y +CONFIG_QCOM_EARLY_RANDOM=y +CONFIG_QCOM_MEMORY_DUMP_V2=y +CONFIG_MSM_DEBUG_LAR_UNLOCK=y +CONFIG_QCOM_SMP2P=y +CONFIG_QCOM_SMSM=y +CONFIG_MSM_PIL_MSS_QDSP6V5=y +CONFIG_QCOM_SECURE_BUFFER=y +CONFIG_MSM_TZ_SMMU=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_QMI_COMM=y +CONFIG_MSM_PIL_SSR_GENERIC=y +CONFIG_MSM_BOOT_STATS=y +CONFIG_MSM_CORE_HANG_DETECT=y +CONFIG_QCOM_WATCHDOG_V2=y +CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y +CONFIG_QCOM_BUS_SCALING=y +CONFIG_QCOM_GLINK=y +CONFIG_MSM_EVENT_TIMER=y +CONFIG_MSM_PM=y +CONFIG_QCOM_DCC=y +CONFIG_QTI_RPM_STATS_LOG=y +CONFIG_QTEE_SHM_BRIDGE=y +CONFIG_MEM_SHARE_QMI_SERVICE=y +CONFIG_MSM_PERFORMANCE=y +CONFIG_QTI_CRYPTO_COMMON=y +CONFIG_QTI_CRYPTO_TZ=y +CONFIG_MSM_BAM_DMUX=y +CONFIG_WCNSS_CORE=y +CONFIG_WCNSS_CORE_PRONTO=y +CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y +CONFIG_DEVFREQ_GOV_PASSIVE=y +CONFIG_QCOM_BIMC_BWMON=y +CONFIG_ARM_MEMLAT_MON=y +CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y +CONFIG_DEVFREQ_GOV_MEMLAT=y +CONFIG_DEVFREQ_SIMPLE_DEV=y +CONFIG_QCOM_DEVFREQ_DEVBW=y +CONFIG_IIO=y +CONFIG_QCOM_SPMI_ADC5=y +CONFIG_QCOM_SPMI_VADC=y +CONFIG_PWM=y +CONFIG_PWM_QTI_LPG=y +CONFIG_QCOM_SHOW_RESUME_IRQ=y +CONFIG_QCOM_MPM=y +CONFIG_RAS=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_BINDERFS=y +CONFIG_QCOM_QFPROM=y +CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_SENSORS_SSC=y +CONFIG_QCOM_KGSL=y +CONFIG_LEGACY_ENERGY_MODEL_DT=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_F2FS_FS=y +CONFIG_F2FS_FS_SECURITY=y +CONFIG_F2FS_CHECK_FS=y +CONFIG_F2FS_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y +CONFIG_FS_VERITY=y +CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=y +CONFIG_INCREMENTAL_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_SDCARD_FS=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_SECURITY=y +CONFIG_LSM_MMAP_MIN_ADDR=4096 +CONFIG_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY_PAGESPAN=y +CONFIG_FORTIFY_SOURCE=y +CONFIG_STATIC_USERMODEHELPER=y +CONFIG_STATIC_USERMODEHELPER_PATH="" +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SMACK=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_LZ4=y +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DEV_QCE=y +CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y +CONFIG_CRYPTO_DEV_QCRYPTO=y +CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_CONSOLE_UNHASHED_POINTERS=y +CONFIG_DEBUG_MODULE_LOAD_INFO=y +CONFIG_DEBUG_INFO=y +CONFIG_FRAME_WARN=2048 +CONFIG_PAGE_OWNER=y +CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_PAGEALLOC=y +CONFIG_SLUB_DEBUG_PANIC_ON=y +CONFIG_DEBUG_PANIC_ON_OOM=y +CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y +CONFIG_PAGE_POISONING=y +CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000 +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_WQ_WATCHDOG=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_PANIC_ON_SCHED_BUG=y +CONFIG_PANIC_ON_RT_THROTTLING=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_LOCK_TORTURE_TEST=m +CONFIG_DEBUG_SG=y +CONFIG_DEBUG_NOTIFIERS=y +CONFIG_DEBUG_CREDENTIALS=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_IPC_LOGGING=y +CONFIG_QCOM_RTB=y +CONFIG_QCOM_RTB_SEPARATE_CPUS=y +CONFIG_PREEMPTIRQ_EVENTS=y +CONFIG_IRQSOFF_TRACER=y +CONFIG_PREEMPT_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_LKDTM=y +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_MEMTEST=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +CONFIG_PANIC_ON_DATA_CORRUPTION=y +CONFIG_DEBUG_USER=y +CONFIG_FORCE_PAGES=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_SOURCE_ETM4X=y +CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y +CONFIG_CORESIGHT_STM=y +CONFIG_CORESIGHT_CTI=y +CONFIG_CORESIGHT_TPDA=y +CONFIG_CORESIGHT_TPDM=y +CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y +CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_TGU=y diff --git a/arch/arm/include/asm/etmv4x.h b/arch/arm/include/asm/etmv4x.h new file mode 100644 index 000000000000..1b46b82aba36 --- /dev/null +++ b/arch/arm/include/asm/etmv4x.h @@ -0,0 +1,388 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2016, 2018, 2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ETMV4X_H +#define __ASM_ETMV4X_H + +#include + + +/* 32 bit register read for AArch32 */ +#define trc_readl(reg) RSYSL_##reg() +#define trc_readq(reg) RSYSL_##reg() + +/* 32 bit register write for AArch32 */ +#define trc_write(val, reg) WSYS_##reg(val) + +#define MRC(op0, op1, crn, crm, op2) \ +({ \ +uint32_t val; \ +asm volatile("mrc p"#op0", "#op1", %0, "#crn", "#crm", "#op2 : "=r" (val)); \ +val; \ +}) + +#define MCR(val, op0, op1, crn, crm, op2) \ +({ \ +asm volatile("mcr p"#op0", "#op1", %0, "#crn", "#crm", "#op2 : : "r" (val));\ +}) + +/* Clock and Power Management Register */ +#define RSYSL_CPMR_EL1() MRC(15, 7, c15, c0, 5) +#define WSYS_CPMR_EL1(val) MCR(val, 15, 7, c15, c0, 5) + +/* + * ETMv4 Registers + * + * Read only + * ETMAUTHSTATUS, ETMDEVARCH, ETMDEVID, ETMIDRn[0-13], ETMOSLSR, ETMSTATR + * + * Write only + * ETMOSLAR + */ +/* 32 bit registers */ +#define RSYSL_ETMAUTHSTATUS() MRC(14, 1, c7, c14, 6) +#define RSYSL_ETMAUXCTLR() MRC(14, 1, c0, c6, 0) +#define RSYSL_ETMCCCTLR() MRC(14, 1, c0, c14, 0) +#define RSYSL_ETMCIDCCTLR0() MRC(14, 1, c3, c0, 2) +#define RSYSL_ETMCNTCTLR0() MRC(14, 1, c0, c4, 5) +#define RSYSL_ETMCNTCTLR1() MRC(14, 1, c0, c5, 5) +#define RSYSL_ETMCNTCTLR2() MRC(14, 1, c0, c6, 5) +#define RSYSL_ETMCNTCTLR3() MRC(14, 1, c0, c7, 5) +#define RSYSL_ETMCNTRLDVR0() MRC(14, 1, c0, c0, 5) +#define RSYSL_ETMCNTRLDVR1() MRC(14, 1, c0, c1, 5) +#define RSYSL_ETMCNTRLDVR2() MRC(14, 1, c0, c2, 5) +#define RSYSL_ETMCNTRLDVR3() MRC(14, 1, c0, c3, 5) +#define RSYSL_ETMCNTVR0() MRC(14, 1, c0, c8, 5) +#define RSYSL_ETMCNTVR1() MRC(14, 1, c0, c9, 5) +#define RSYSL_ETMCNTVR2() MRC(14, 1, c0, c10, 5) +#define RSYSL_ETMCNTVR3() MRC(14, 1, c0, c11, 5) +#define RSYSL_ETMCONFIGR() MRC(14, 1, c0, c4, 0) +#define RSYSL_ETMDEVARCH() MRC(14, 1, c7, c15, 6) +#define RSYSL_ETMDEVID() MRC(14, 1, c7, c2, 7) +#define RSYSL_ETMEVENTCTL0R() MRC(14, 1, c0, c8, 0) +#define RSYSL_ETMEVENTCTL1R() MRC(14, 1, c0, c9, 0) +#define RSYSL_ETMEXTINSELR() MRC(14, 1, c0, c8, 4) +#define RSYSL_ETMIDR0() MRC(14, 1, c0, c8, 7) +#define RSYSL_ETMIDR1() MRC(14, 1, c0, c9, 7) +#define RSYSL_ETMIDR10() MRC(14, 1, c0, c2, 6) +#define RSYSL_ETMIDR11() MRC(14, 1, c0, c3, 6) +#define RSYSL_ETMIDR12() MRC(14, 1, c0, c4, 6) +#define RSYSL_ETMIDR13() MRC(14, 1, c0, c5, 6) +#define RSYSL_ETMIDR2() MRC(14, 1, c0, c10, 7) +#define RSYSL_ETMIDR3() MRC(14, 1, c0, c11, 7) +#define RSYSL_ETMIDR4() MRC(14, 1, c0, c12, 7) +#define RSYSL_ETMIDR5() MRC(14, 1, c0, c13, 7) +#define RSYSL_ETMIDR6() MRC(14, 1, c0, c14, 7) +#define RSYSL_ETMIDR7() MRC(14, 1, c0, c15, 7) +#define RSYSL_ETMIDR8() MRC(14, 1, c0, c0, 6) +#define RSYSL_ETMIDR9() MRC(14, 1, c0, c1, 6) +#define RSYSL_ETMIMSPEC0() MRC(14, 1, c0, c0, 7) +#define RSYSL_ETMOSLSR() MRC(14, 1, c1, c1, 4) +#define RSYSL_ETMPRGCTLR() MRC(14, 1, c0, c1, 0) +#define RSYSL_ETMRSCTLR10() MRC(14, 1, c1, c10, 0) +#define RSYSL_ETMRSCTLR11() MRC(14, 1, c1, c11, 0) +#define RSYSL_ETMRSCTLR12() MRC(14, 1, c1, c12, 0) +#define RSYSL_ETMRSCTLR13() MRC(14, 1, c1, c13, 0) +#define RSYSL_ETMRSCTLR14() MRC(14, 1, c1, c14, 0) +#define RSYSL_ETMRSCTLR15() MRC(14, 1, c1, c15, 0) +#define RSYSL_ETMRSCTLR2() MRC(14, 1, c1, c2, 0) +#define RSYSL_ETMRSCTLR3() MRC(14, 1, c1, c3, 0) +#define RSYSL_ETMRSCTLR4() MRC(14, 1, c1, c4, 0) +#define RSYSL_ETMRSCTLR5() MRC(14, 1, c1, c5, 0) +#define RSYSL_ETMRSCTLR6() MRC(14, 1, c1, c6, 0) +#define RSYSL_ETMRSCTLR7() MRC(14, 1, c1, c7, 0) +#define RSYSL_ETMRSCTLR8() MRC(14, 1, c1, c8, 0) +#define RSYSL_ETMRSCTLR9() MRC(14, 1, c1, c9, 0) +#define RSYSL_ETMRSCTLR16() MRC(14, 1, c1, c0, 1) +#define RSYSL_ETMRSCTLR17() MRC(14, 1, c1, c1, 1) +#define RSYSL_ETMRSCTLR18() MRC(14, 1, c1, c2, 1) +#define RSYSL_ETMRSCTLR19() MRC(14, 1, c1, c3, 1) +#define RSYSL_ETMRSCTLR20() MRC(14, 1, c1, c4, 1) +#define RSYSL_ETMRSCTLR21() MRC(14, 1, c1, c5, 1) +#define RSYSL_ETMRSCTLR22() MRC(14, 1, c1, c6, 1) +#define RSYSL_ETMRSCTLR23() MRC(14, 1, c1, c7, 1) +#define RSYSL_ETMRSCTLR24() MRC(14, 1, c1, c8, 1) +#define RSYSL_ETMRSCTLR25() MRC(14, 1, c1, c9, 1) +#define RSYSL_ETMRSCTLR26() MRC(14, 1, c1, c10, 1) +#define RSYSL_ETMRSCTLR27() MRC(14, 1, c1, c11, 1) +#define RSYSL_ETMRSCTLR28() MRC(14, 1, c1, c12, 1) +#define RSYSL_ETMRSCTLR29() MRC(14, 1, c1, c13, 1) +#define RSYSL_ETMRSCTLR30() MRC(14, 1, c1, c14, 1) +#define RSYSL_ETMRSCTLR31() MRC(14, 1, c1, c15, 1) +#define RSYSL_ETMSEQEVR0() MRC(14, 1, c0, c0, 4) +#define RSYSL_ETMSEQEVR1() MRC(14, 1, c0, c1, 4) +#define RSYSL_ETMSEQEVR2() MRC(14, 1, c0, c2, 4) +#define RSYSL_ETMSEQRSTEVR() MRC(14, 1, c0, c6, 4) +#define RSYSL_ETMSEQSTR() MRC(14, 1, c0, c7, 4) +#define RSYSL_ETMSTALLCTLR() MRC(14, 1, c0, c11, 0) +#define RSYSL_ETMSTATR() MRC(14, 1, c0, c3, 0) +#define RSYSL_ETMSYNCPR() MRC(14, 1, c0, c13, 0) +#define RSYSL_ETMTRACEIDR() MRC(14, 1, c0, c0, 1) +#define RSYSL_ETMTSCTLR() MRC(14, 1, c0, c12, 0) +#define RSYSL_ETMVICTLR() MRC(14, 1, c0, c0, 2) +#define RSYSL_ETMVIIECTLR() MRC(14, 1, c0, c1, 2) +#define RSYSL_ETMVISSCTLR() MRC(14, 1, c0, c2, 2) +#define RSYSL_ETMSSCCR0() MRC(14, 1, c1, c0, 2) +#define RSYSL_ETMSSCCR1() MRC(14, 1, c1, c1, 2) +#define RSYSL_ETMSSCCR2() MRC(14, 1, c1, c2, 2) +#define RSYSL_ETMSSCCR3() MRC(14, 1, c1, c3, 2) +#define RSYSL_ETMSSCCR4() MRC(14, 1, c1, c4, 2) +#define RSYSL_ETMSSCCR5() MRC(14, 1, c1, c5, 2) +#define RSYSL_ETMSSCCR6() MRC(14, 1, c1, c6, 2) +#define RSYSL_ETMSSCCR7() MRC(14, 1, c1, c7, 2) +#define RSYSL_ETMSSCSR0() MRC(14, 1, c1, c8, 2) +#define RSYSL_ETMSSCSR1() MRC(14, 1, c1, c9, 2) +#define RSYSL_ETMSSCSR2() MRC(14, 1, c1, c10, 2) +#define RSYSL_ETMSSCSR3() MRC(14, 1, c1, c11, 2) +#define RSYSL_ETMSSCSR4() MRC(14, 1, c1, c12, 2) +#define RSYSL_ETMSSCSR5() MRC(14, 1, c1, c13, 2) +#define RSYSL_ETMSSCSR6() MRC(14, 1, c1, c14, 2) +#define RSYSL_ETMSSCSR7() MRC(14, 1, c1, c15, 2) +#define RSYSL_ETMSSPCICR0() MRC(14, 1, c1, c0, 3) +#define RSYSL_ETMSSPCICR1() MRC(14, 1, c1, c1, 3) +#define RSYSL_ETMSSPCICR2() MRC(14, 1, c1, c2, 3) +#define RSYSL_ETMSSPCICR3() MRC(14, 1, c1, c3, 3) +#define RSYSL_ETMSSPCICR4() MRC(14, 1, c1, c4, 3) +#define RSYSL_ETMSSPCICR5() MRC(14, 1, c1, c5, 3) +#define RSYSL_ETMSSPCICR6() MRC(14, 1, c1, c6, 3) +#define RSYSL_ETMSSPCICR7() MRC(14, 1, c1, c7, 3) + +/* + * 64 bit registers, ignore the upper 32bit + * A read from a 32-bit register location using a 64-bit access result + * in the upper 32bits being return as RES0. + */ +#define RSYSL_ETMACATR0() MRC(14, 1, c2, c0, 2) +#define RSYSL_ETMACATR1() MRC(14, 1, c2, c2, 2) +#define RSYSL_ETMACATR2() MRC(14, 1, c2, c4, 2) +#define RSYSL_ETMACATR3() MRC(14, 1, c2, c6, 2) +#define RSYSL_ETMACATR4() MRC(14, 1, c2, c8, 2) +#define RSYSL_ETMACATR5() MRC(14, 1, c2, c10, 2) +#define RSYSL_ETMACATR6() MRC(14, 1, c2, c12, 2) +#define RSYSL_ETMACATR7() MRC(14, 1, c2, c14, 2) +#define RSYSL_ETMACATR8() MRC(14, 1, c2, c0, 3) +#define RSYSL_ETMACATR9() MRC(14, 1, c2, c2, 3) +#define RSYSL_ETMACATR10() MRC(14, 1, c2, c4, 3) +#define RSYSL_ETMACATR11() MRC(14, 1, c2, c6, 3) +#define RSYSL_ETMACATR12() MRC(14, 1, c2, c8, 3) +#define RSYSL_ETMACATR13() MRC(14, 1, c2, c10, 3) +#define RSYSL_ETMACATR14() MRC(14, 1, c2, c12, 3) +#define RSYSL_ETMACATR15() MRC(14, 1, c2, c14, 3) +#define RSYSL_ETMCIDCVR0() MRC(14, 1, c3, c0, 0) +#define RSYSL_ETMCIDCVR1() MRC(14, 1, c3, c2, 0) +#define RSYSL_ETMCIDCVR2() MRC(14, 1, c3, c4, 0) +#define RSYSL_ETMCIDCVR3() MRC(14, 1, c3, c6, 0) +#define RSYSL_ETMCIDCVR4() MRC(14, 1, c3, c8, 0) +#define RSYSL_ETMCIDCVR5() MRC(14, 1, c3, c10, 0) +#define RSYSL_ETMCIDCVR6() MRC(14, 1, c3, c12, 0) +#define RSYSL_ETMCIDCVR7() MRC(14, 1, c3, c14, 0) +#define RSYSL_ETMACVR0() MRC(14, 1, c2, c0, 0) +#define RSYSL_ETMACVR1() MRC(14, 1, c2, c2, 0) +#define RSYSL_ETMACVR2() MRC(14, 1, c2, c4, 0) +#define RSYSL_ETMACVR3() MRC(14, 1, c2, c6, 0) +#define RSYSL_ETMACVR4() MRC(14, 1, c2, c8, 0) +#define RSYSL_ETMACVR5() MRC(14, 1, c2, c10, 0) +#define RSYSL_ETMACVR6() MRC(14, 1, c2, c12, 0) +#define RSYSL_ETMACVR7() MRC(14, 1, c2, c14, 0) +#define RSYSL_ETMACVR8() MRC(14, 1, c2, c0, 1) +#define RSYSL_ETMACVR9() MRC(14, 1, c2, c2, 1) +#define RSYSL_ETMACVR10() MRC(14, 1, c2, c4, 1) +#define RSYSL_ETMACVR11() MRC(14, 1, c2, c6, 1) +#define RSYSL_ETMACVR12() MRC(14, 1, c2, c8, 1) +#define RSYSL_ETMACVR13() MRC(14, 1, c2, c10, 1) +#define RSYSL_ETMACVR14() MRC(14, 1, c2, c12, 1) +#define RSYSL_ETMACVR15() MRC(14, 1, c2, c14, 1) +#define RSYSL_ETMVMIDCVR0() MRC(14, 1, c3, c0, 1) +#define RSYSL_ETMVMIDCVR1() MRC(14, 1, c3, c2, 1) +#define RSYSL_ETMVMIDCVR2() MRC(14, 1, c3, c4, 1) +#define RSYSL_ETMVMIDCVR3() MRC(14, 1, c3, c6, 1) +#define RSYSL_ETMVMIDCVR4() MRC(14, 1, c3, c8, 1) +#define RSYSL_ETMVMIDCVR5() MRC(14, 1, c3, c10, 1) +#define RSYSL_ETMVMIDCVR6() MRC(14, 1, c3, c12, 1) +#define RSYSL_ETMVMIDCVR7() MRC(14, 1, c3, c14, 1) +#define RSYSL_ETMDVCVR0() MRC(14, 1, c2, c0, 4) +#define RSYSL_ETMDVCVR1() MRC(14, 1, c2, c4, 4) +#define RSYSL_ETMDVCVR2() MRC(14, 1, c2, c8, 4) +#define RSYSL_ETMDVCVR3() MRC(14, 1, c2, c12, 4) +#define RSYSL_ETMDVCVR4() MRC(14, 1, c2, c0, 5) +#define RSYSL_ETMDVCVR5() MRC(14, 1, c2, c4, 5) +#define RSYSL_ETMDVCVR6() MRC(14, 1, c2, c8, 5) +#define RSYSL_ETMDVCVR7() MRC(14, 1, c2, c12, 5) +#define RSYSL_ETMDVCMR0() MRC(14, 1, c2, c0, 6) +#define RSYSL_ETMDVCMR1() MRC(14, 1, c2, c4, 6) +#define RSYSL_ETMDVCMR2() MRC(14, 1, c2, c8, 6) +#define RSYSL_ETMDVCMR3() MRC(14, 1, c2, c12, 6) +#define RSYSL_ETMDVCMR4() MRC(14, 1, c2, c0, 7) +#define RSYSL_ETMDVCMR5() MRC(14, 1, c2, c4, 7) +#define RSYSL_ETMDVCMR6() MRC(14, 1, c2, c8, 7) +#define RSYSL_ETMDVCMR7() MRC(14, 1, c2, c12, 7) + +/* + * 32 and 64 bit registers + * A write to a 32-bit register location using a 64-bit access result + * in the upper 32bit of access + */ +#define WSYS_ETMAUXCTLR(val) MCR(val, 14, 1, c0, c6, 0) +#define WSYS_ETMACATR0(val) MCR(val, 14, 1, c2, c0, 2) +#define WSYS_ETMACATR1(val) MCR(val, 14, 1, c2, c2, 2) +#define WSYS_ETMACATR2(val) MCR(val, 14, 1, c2, c4, 2) +#define WSYS_ETMACATR3(val) MCR(val, 14, 1, c2, c6, 2) +#define WSYS_ETMACATR4(val) MCR(val, 14, 1, c2, c8, 2) +#define WSYS_ETMACATR5(val) MCR(val, 14, 1, c2, c10, 2) +#define WSYS_ETMACATR6(val) MCR(val, 14, 1, c2, c12, 2) +#define WSYS_ETMACATR7(val) MCR(val, 14, 1, c2, c14, 2) +#define WSYS_ETMACATR8(val) MCR(val, 14, 1, c2, c0, 3) +#define WSYS_ETMACATR9(val) MCR(val, 14, 1, c2, c2, 3) +#define WSYS_ETMACATR10(val) MCR(val, 14, 1, c2, c4, 3) +#define WSYS_ETMACATR11(val) MCR(val, 14, 1, c2, c6, 3) +#define WSYS_ETMACATR12(val) MCR(val, 14, 1, c2, c8, 3) +#define WSYS_ETMACATR13(val) MCR(val, 14, 1, c2, c10, 3) +#define WSYS_ETMACATR14(val) MCR(val, 14, 1, c2, c12, 3) +#define WSYS_ETMACATR15(val) MCR(val, 14, 1, c2, c14, 3) +#define WSYS_ETMACVR0(val) MCR(val, 14, 1, c2, c0, 0) +#define WSYS_ETMACVR1(val) MCR(val, 14, 1, c2, c2, 0) +#define WSYS_ETMACVR2(val) MCR(val, 14, 1, c2, c4, 0) +#define WSYS_ETMACVR3(val) MCR(val, 14, 1, c2, c6, 0) +#define WSYS_ETMACVR4(val) MCR(val, 14, 1, c2, c8, 0) +#define WSYS_ETMACVR5(val) MCR(val, 14, 1, c2, c10, 0) +#define WSYS_ETMACVR6(val) MCR(val, 14, 1, c2, c12, 0) +#define WSYS_ETMACVR7(val) MCR(val, 14, 1, c2, c14, 0) +#define WSYS_ETMACVR8(val) MCR(val, 14, 1, c2, c0, 1) +#define WSYS_ETMACVR9(val) MCR(val, 14, 1, c2, c2, 1) +#define WSYS_ETMACVR10(val) MCR(val, 14, 1, c2, c4, 1) +#define WSYS_ETMACVR11(val) MCR(val, 14, 1, c2, c6, 1) +#define WSYS_ETMACVR12(val) MCR(val, 14, 1, c2, c8, 1) +#define WSYS_ETMACVR13(val) MCR(val, 14, 1, c2, c10, 1) +#define WSYS_ETMACVR14(val) MCR(val, 14, 1, c2, c12, 1) +#define WSYS_ETMACVR15(val) MCR(val, 14, 1, c2, c14, 1) +#define WSYS_ETMCCCTLR(val) MCR(val, 14, 1, c0, c14, 0) +#define WSYS_ETMCIDCCTLR0(val) MCR(val, 14, 1, c3, c0, 2) +#define WSYS_ETMCIDCVR0(val) MCR(val, 14, 1, c3, c0, 0) +#define WSYS_ETMCIDCVR1(val) MCR(val, 14, 1, c3, c2, 0) +#define WSYS_ETMCIDCVR2(val) MCR(val, 14, 1, c3, c4, 0) +#define WSYS_ETMCIDCVR3(val) MCR(val, 14, 1, c3, c6, 0) +#define WSYS_ETMCIDCVR4(val) MCR(val, 14, 1, c3, c8, 0) +#define WSYS_ETMCIDCVR5(val) MCR(val, 14, 1, c3, c10, 0) +#define WSYS_ETMCIDCVR6(val) MCR(val, 14, 1, c3, c12, 0) +#define WSYS_ETMCIDCVR7(val) MCR(val, 14, 1, c3, c14, 0) +#define WSYS_ETMCNTCTLR0(val) MCR(val, 14, 1, c0, c4, 5) +#define WSYS_ETMCNTCTLR1(val) MCR(val, 14, 1, c0, c5, 5) +#define WSYS_ETMCNTCTLR2(val) MCR(val, 14, 1, c0, c6, 5) +#define WSYS_ETMCNTCTLR3(val) MCR(val, 14, 1, c0, c7, 5) +#define WSYS_ETMCNTRLDVR0(val) MCR(val, 14, 1, c0, c0, 5) +#define WSYS_ETMCNTRLDVR1(val) MCR(val, 14, 1, c0, c1, 5) +#define WSYS_ETMCNTRLDVR2(val) MCR(val, 14, 1, c0, c2, 5) +#define WSYS_ETMCNTRLDVR3(val) MCR(val, 14, 1, c0, c3, 5) +#define WSYS_ETMCNTVR0(val) MCR(val, 14, 1, c0, c8, 5) +#define WSYS_ETMCNTVR1(val) MCR(val, 14, 1, c0, c9, 5) +#define WSYS_ETMCNTVR2(val) MCR(val, 14, 1, c0, c10, 5) +#define WSYS_ETMCNTVR3(val) MCR(val, 14, 1, c0, c11, 5) +#define WSYS_ETMCONFIGR(val) MCR(val, 14, 1, c0, c4, 0) +#define WSYS_ETMEVENTCTL0R(val) MCR(val, 14, 1, c0, c8, 0) +#define WSYS_ETMEVENTCTL1R(val) MCR(val, 14, 1, c0, c9, 0) +#define WSYS_ETMEXTINSELR(val) MCR(val, 14, 1, c0, c8, 4) +#define WSYS_ETMIMSPEC0(val) MCR(val, 14, 1, c0, c0, 7) +#define WSYS_ETMOSLAR(val) MCR(val, 14, 1, c1, c0, 4) +#define WSYS_ETMPRGCTLR(val) MCR(val, 14, 1, c0, c1, 0) +#define WSYS_ETMRSCTLR10(val) MCR(val, 14, 1, c1, c10, 0) +#define WSYS_ETMRSCTLR11(val) MCR(val, 14, 1, c1, c11, 0) +#define WSYS_ETMRSCTLR12(val) MCR(val, 14, 1, c1, c12, 0) +#define WSYS_ETMRSCTLR13(val) MCR(val, 14, 1, c1, c13, 0) +#define WSYS_ETMRSCTLR14(val) MCR(val, 14, 1, c1, c14, 0) +#define WSYS_ETMRSCTLR15(val) MCR(val, 14, 1, c1, c15, 0) +#define WSYS_ETMRSCTLR2(val) MCR(val, 14, 1, c1, c2, 0) +#define WSYS_ETMRSCTLR3(val) MCR(val, 14, 1, c1, c3, 0) +#define WSYS_ETMRSCTLR4(val) MCR(val, 14, 1, c1, c4, 0) +#define WSYS_ETMRSCTLR5(val) MCR(val, 14, 1, c1, c5, 0) +#define WSYS_ETMRSCTLR6(val) MCR(val, 14, 1, c1, c6, 0) +#define WSYS_ETMRSCTLR7(val) MCR(val, 14, 1, c1, c7, 0) +#define WSYS_ETMRSCTLR8(val) MCR(val, 14, 1, c1, c8, 0) +#define WSYS_ETMRSCTLR9(val) MCR(val, 14, 1, c1, c9, 0) +#define WSYS_ETMRSCTLR16(val) MCR(val, 14, 1, c1, c0, 1) +#define WSYS_ETMRSCTLR17(val) MCR(val, 14, 1, c1, c1, 1) +#define WSYS_ETMRSCTLR18(val) MCR(val, 14, 1, c1, c2, 1) +#define WSYS_ETMRSCTLR19(val) MCR(val, 14, 1, c1, c3, 1) +#define WSYS_ETMRSCTLR20(val) MCR(val, 14, 1, c1, c4, 1) +#define WSYS_ETMRSCTLR21(val) MCR(val, 14, 1, c1, c5, 1) +#define WSYS_ETMRSCTLR22(val) MCR(val, 14, 1, c1, c6, 1) +#define WSYS_ETMRSCTLR23(val) MCR(val, 14, 1, c1, c7, 1) +#define WSYS_ETMRSCTLR24(val) MCR(val, 14, 1, c1, c8, 1) +#define WSYS_ETMRSCTLR25(val) MCR(val, 14, 1, c1, c9, 1) +#define WSYS_ETMRSCTLR26(val) MCR(val, 14, 1, c1, c10, 1) +#define WSYS_ETMRSCTLR27(val) MCR(val, 14, 1, c1, c11, 1) +#define WSYS_ETMRSCTLR28(val) MCR(val, 14, 1, c1, c12, 1) +#define WSYS_ETMRSCTLR29(val) MCR(val, 14, 1, c1, c13, 1) +#define WSYS_ETMRSCTLR30(val) MCR(val, 14, 1, c1, c14, 1) +#define WSYS_ETMRSCTLR31(val) MCR(val, 14, 1, c1, c15, 1) +#define WSYS_ETMSEQEVR0(val) MCR(val, 14, 1, c0, c0, 4) +#define WSYS_ETMSEQEVR1(val) MCR(val, 14, 1, c0, c1, 4) +#define WSYS_ETMSEQEVR2(val) MCR(val, 14, 1, c0, c2, 4) +#define WSYS_ETMSEQRSTEVR(val) MCR(val, 14, 1, c0, c6, 4) +#define WSYS_ETMSEQSTR(val) MCR(val, 14, 1, c0, c7, 4) +#define WSYS_ETMSTALLCTLR(val) MCR(val, 14, 1, c0, c11, 0) +#define WSYS_ETMSYNCPR(val) MCR(val, 14, 1, c0, c13, 0) +#define WSYS_ETMTRACEIDR(val) MCR(val, 14, 1, c0, c0, 1) +#define WSYS_ETMTSCTLR(val) MCR(val, 14, 1, c0, c12, 0) +#define WSYS_ETMVICTLR(val) MCR(val, 14, 1, c0, c0, 2) +#define WSYS_ETMVIIECTLR(val) MCR(val, 14, 1, c0, c1, 2) +#define WSYS_ETMVISSCTLR(val) MCR(val, 14, 1, c0, c2, 2) +#define WSYS_ETMVMIDCVR0(val) MCR(val, 14, 1, c3, c0, 1) +#define WSYS_ETMVMIDCVR1(val) MCR(val, 14, 1, c3, c2, 1) +#define WSYS_ETMVMIDCVR2(val) MCR(val, 14, 1, c3, c4, 1) +#define WSYS_ETMVMIDCVR3(val) MCR(val, 14, 1, c3, c6, 1) +#define WSYS_ETMVMIDCVR4(val) MCR(val, 14, 1, c3, c8, 1) +#define WSYS_ETMVMIDCVR5(val) MCR(val, 14, 1, c3, c10, 1) +#define WSYS_ETMVMIDCVR6(val) MCR(val, 14, 1, c3, c12, 1) +#define WSYS_ETMVMIDCVR7(val) MCR(val, 14, 1, c3, c14, 1) +#define WSYS_ETMDVCVR0(val) MCR(val, 14, 1, c2, c0, 4) +#define WSYS_ETMDVCVR1(val) MCR(val, 14, 1, c2, c4, 4) +#define WSYS_ETMDVCVR2(val) MCR(val, 14, 1, c2, c8, 4) +#define WSYS_ETMDVCVR3(val) MCR(val, 14, 1, c2, c12, 4) +#define WSYS_ETMDVCVR4(val) MCR(val, 14, 1, c2, c0, 5) +#define WSYS_ETMDVCVR5(val) MCR(val, 14, 1, c2, c4, 5) +#define WSYS_ETMDVCVR6(val) MCR(val, 14, 1, c2, c8, 5) +#define WSYS_ETMDVCVR7(val) MCR(val, 14, 1, c2, c12, 5) +#define WSYS_ETMDVCMR0(val) MCR(val, 14, 1, c2, c0, 6) +#define WSYS_ETMDVCMR1(val) MCR(val, 14, 1, c2, c4, 6) +#define WSYS_ETMDVCMR2(val) MCR(val, 14, 1, c2, c8, 6) +#define WSYS_ETMDVCMR3(val) MCR(val, 14, 1, c2, c12, 6) +#define WSYS_ETMDVCMR4(val) MCR(val, 14, 1, c2, c0, 7) +#define WSYS_ETMDVCMR5(val) MCR(val, 14, 1, c2, c4, 7) +#define WSYS_ETMDVCMR6(val) MCR(val, 14, 1, c2, c8, 7) +#define WSYS_ETMDVCMR7(val) MCR(val, 14, 1, c2, c12, 7) +#define WSYS_ETMSSCCR0(val) MCR(val, 14, 1, c1, c0, 2) +#define WSYS_ETMSSCCR1(val) MCR(val, 14, 1, c1, c1, 2) +#define WSYS_ETMSSCCR2(val) MCR(val, 14, 1, c1, c2, 2) +#define WSYS_ETMSSCCR3(val) MCR(val, 14, 1, c1, c3, 2) +#define WSYS_ETMSSCCR4(val) MCR(val, 14, 1, c1, c4, 2) +#define WSYS_ETMSSCCR5(val) MCR(val, 14, 1, c1, c5, 2) +#define WSYS_ETMSSCCR6(val) MCR(val, 14, 1, c1, c6, 2) +#define WSYS_ETMSSCCR7(val) MCR(val, 14, 1, c1, c7, 2) +#define WSYS_ETMSSCSR0(val) MCR(val, 14, 1, c1, c8, 2) +#define WSYS_ETMSSCSR1(val) MCR(val, 14, 1, c1, c9, 2) +#define WSYS_ETMSSCSR2(val) MCR(val, 14, 1, c1, c10, 2) +#define WSYS_ETMSSCSR3(val) MCR(val, 14, 1, c1, c11, 2) +#define WSYS_ETMSSCSR4(val) MCR(val, 14, 1, c1, c12, 2) +#define WSYS_ETMSSCSR5(val) MCR(val, 14, 1, c1, c13, 2) +#define WSYS_ETMSSCSR6(val) MCR(val, 14, 1, c1, c14, 2) +#define WSYS_ETMSSCSR7(val) MCR(val, 14, 1, c1, c15, 2) +#define WSYS_ETMSSPCICR0(val) MCR(val, 14, 1, c1, c0, 3) +#define WSYS_ETMSSPCICR1(val) MCR(val, 14, 1, c1, c1, 3) +#define WSYS_ETMSSPCICR2(val) MCR(val, 14, 1, c1, c2, 3) +#define WSYS_ETMSSPCICR3(val) MCR(val, 14, 1, c1, c3, 3) +#define WSYS_ETMSSPCICR4(val) MCR(val, 14, 1, c1, c4, 3) +#define WSYS_ETMSSPCICR5(val) MCR(val, 14, 1, c1, c5, 3) +#define WSYS_ETMSSPCICR6(val) MCR(val, 14, 1, c1, c6, 3) +#define WSYS_ETMSSPCICR7(val) MCR(val, 14, 1, c1, c7, 3) + +#endif diff --git a/arch/arm/include/asm/hardware/debugv8.h b/arch/arm/include/asm/hardware/debugv8.h new file mode 100644 index 000000000000..6ba0026c0946 --- /dev/null +++ b/arch/arm/include/asm/hardware/debugv8.h @@ -0,0 +1,248 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2016, 2018, 2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_HARDWARE_DEBUGV8_H +#define __ASM_HARDWARE_DEBUGV8_H + +#include + +/* Accessors for CP14 registers */ +#define dbg_read(reg) RCP14_##reg() +#define dbg_write(val, reg) WCP14_##reg(val) + +/* MRC14 registers */ +#define MRC14(op1, crn, crm, op2) \ +({ \ +uint32_t val; \ +asm volatile("mrc p14, "#op1", %0, "#crn", "#crm", "#op2 : "=r" (val)); \ +val; \ +}) + +/* MCR14 registers */ +#define MCR14(val, op1, crn, crm, op2) \ +({ \ +asm volatile("mcr p14, "#op1", %0, "#crn", "#crm", "#op2 : : "r" (val));\ +}) + +/* + * Debug Registers + * + * Read only + * DBGDIDR, DBGDSCRint, DBGDTRRXint, DBGDRAR, DBGOSLSR, DBGOSSRR, DBGDSAR, + * DBGAUTHSTATUS, DBGDEVID2, DBGDEVID1, DBGDEVID + * + * Write only + * DBGDTRTXint, DBGOSLAR + */ +#define RCP14_DBGDIDR() MRC14(0, c0, c0, 0) +#define RCP14_DBGDSCRint() MRC14(0, c0, c1, 0) +#define RCP14_DBGDCCINT() MRC14(0, c0, c2, 0) +#define RCP14_DBGDTRRXint() MRC14(0, c0, c5, 0) +#define RCP14_DBGWFAR() MRC14(0, c0, c6, 0) +#define RCP14_DBGVCR() MRC14(0, c0, c7, 0) +#define RCP14_DBGDTRRXext() MRC14(0, c0, c0, 2) +#define RCP14_DBGDSCRext() MRC14(0, c0, c2, 2) +#define RCP14_DBGDTRTXext() MRC14(0, c0, c3, 2) +#define RCP14_DBGOSECCR() MRC14(0, c0, c6, 2) +#define RCP14_DBGBVR0() MRC14(0, c0, c0, 4) +#define RCP14_DBGBVR1() MRC14(0, c0, c1, 4) +#define RCP14_DBGBVR2() MRC14(0, c0, c2, 4) +#define RCP14_DBGBVR3() MRC14(0, c0, c3, 4) +#define RCP14_DBGBVR4() MRC14(0, c0, c4, 4) +#define RCP14_DBGBVR5() MRC14(0, c0, c5, 4) +#define RCP14_DBGBVR6() MRC14(0, c0, c6, 4) +#define RCP14_DBGBVR7() MRC14(0, c0, c7, 4) +#define RCP14_DBGBVR8() MRC14(0, c0, c8, 4) +#define RCP14_DBGBVR9() MRC14(0, c0, c9, 4) +#define RCP14_DBGBVR10() MRC14(0, c0, c10, 4) +#define RCP14_DBGBVR11() MRC14(0, c0, c11, 4) +#define RCP14_DBGBVR12() MRC14(0, c0, c12, 4) +#define RCP14_DBGBVR13() MRC14(0, c0, c13, 4) +#define RCP14_DBGBVR14() MRC14(0, c0, c14, 4) +#define RCP14_DBGBVR15() MRC14(0, c0, c15, 4) +#define RCP14_DBGBCR0() MRC14(0, c0, c0, 5) +#define RCP14_DBGBCR1() MRC14(0, c0, c1, 5) +#define RCP14_DBGBCR2() MRC14(0, c0, c2, 5) +#define RCP14_DBGBCR3() MRC14(0, c0, c3, 5) +#define RCP14_DBGBCR4() MRC14(0, c0, c4, 5) +#define RCP14_DBGBCR5() MRC14(0, c0, c5, 5) +#define RCP14_DBGBCR6() MRC14(0, c0, c6, 5) +#define RCP14_DBGBCR7() MRC14(0, c0, c7, 5) +#define RCP14_DBGBCR8() MRC14(0, c0, c8, 5) +#define RCP14_DBGBCR9() MRC14(0, c0, c9, 5) +#define RCP14_DBGBCR10() MRC14(0, c0, c10, 5) +#define RCP14_DBGBCR11() MRC14(0, c0, c11, 5) +#define RCP14_DBGBCR12() MRC14(0, c0, c12, 5) +#define RCP14_DBGBCR13() MRC14(0, c0, c13, 5) +#define RCP14_DBGBCR14() MRC14(0, c0, c14, 5) +#define RCP14_DBGBCR15() MRC14(0, c0, c15, 5) +#define RCP14_DBGWVR0() MRC14(0, c0, c0, 6) +#define RCP14_DBGWVR1() MRC14(0, c0, c1, 6) +#define RCP14_DBGWVR2() MRC14(0, c0, c2, 6) +#define RCP14_DBGWVR3() MRC14(0, c0, c3, 6) +#define RCP14_DBGWVR4() MRC14(0, c0, c4, 6) +#define RCP14_DBGWVR5() MRC14(0, c0, c5, 6) +#define RCP14_DBGWVR6() MRC14(0, c0, c6, 6) +#define RCP14_DBGWVR7() MRC14(0, c0, c7, 6) +#define RCP14_DBGWVR8() MRC14(0, c0, c8, 6) +#define RCP14_DBGWVR9() MRC14(0, c0, c9, 6) +#define RCP14_DBGWVR10() MRC14(0, c0, c10, 6) +#define RCP14_DBGWVR11() MRC14(0, c0, c11, 6) +#define RCP14_DBGWVR12() MRC14(0, c0, c12, 6) +#define RCP14_DBGWVR13() MRC14(0, c0, c13, 6) +#define RCP14_DBGWVR14() MRC14(0, c0, c14, 6) +#define RCP14_DBGWVR15() MRC14(0, c0, c15, 6) +#define RCP14_DBGWCR0() MRC14(0, c0, c0, 7) +#define RCP14_DBGWCR1() MRC14(0, c0, c1, 7) +#define RCP14_DBGWCR2() MRC14(0, c0, c2, 7) +#define RCP14_DBGWCR3() MRC14(0, c0, c3, 7) +#define RCP14_DBGWCR4() MRC14(0, c0, c4, 7) +#define RCP14_DBGWCR5() MRC14(0, c0, c5, 7) +#define RCP14_DBGWCR6() MRC14(0, c0, c6, 7) +#define RCP14_DBGWCR7() MRC14(0, c0, c7, 7) +#define RCP14_DBGWCR8() MRC14(0, c0, c8, 7) +#define RCP14_DBGWCR9() MRC14(0, c0, c9, 7) +#define RCP14_DBGWCR10() MRC14(0, c0, c10, 7) +#define RCP14_DBGWCR11() MRC14(0, c0, c11, 7) +#define RCP14_DBGWCR12() MRC14(0, c0, c12, 7) +#define RCP14_DBGWCR13() MRC14(0, c0, c13, 7) +#define RCP14_DBGWCR14() MRC14(0, c0, c14, 7) +#define RCP14_DBGWCR15() MRC14(0, c0, c15, 7) +#define RCP14_DBGDRAR() MRC14(0, c1, c0, 0) +#define RCP14_DBGBXVR0() MRC14(0, c1, c0, 1) +#define RCP14_DBGBXVR1() MRC14(0, c1, c1, 1) +#define RCP14_DBGBXVR2() MRC14(0, c1, c2, 1) +#define RCP14_DBGBXVR3() MRC14(0, c1, c3, 1) +#define RCP14_DBGBXVR4() MRC14(0, c1, c4, 1) +#define RCP14_DBGBXVR5() MRC14(0, c1, c5, 1) +#define RCP14_DBGBXVR6() MRC14(0, c1, c6, 1) +#define RCP14_DBGBXVR7() MRC14(0, c1, c7, 1) +#define RCP14_DBGBXVR8() MRC14(0, c1, c8, 1) +#define RCP14_DBGBXVR9() MRC14(0, c1, c9, 1) +#define RCP14_DBGBXVR10() MRC14(0, c1, c10, 1) +#define RCP14_DBGBXVR11() MRC14(0, c1, c11, 1) +#define RCP14_DBGBXVR12() MRC14(0, c1, c12, 1) +#define RCP14_DBGBXVR13() MRC14(0, c1, c13, 1) +#define RCP14_DBGBXVR14() MRC14(0, c1, c14, 1) +#define RCP14_DBGBXVR15() MRC14(0, c1, c15, 1) +#define RCP14_DBGOSLSR() MRC14(0, c1, c1, 4) +#define RCP14_DBGOSSRR() MRC14(0, c1, c2, 4) +#define RCP14_DBGOSDLR() MRC14(0, c1, c3, 4) +#define RCP14_DBGPRCR() MRC14(0, c1, c4, 4) +#define RCP14_DBGPRSR() MRC14(0, c1, c5, 4) +#define RCP14_DBGDSAR() MRC14(0, c2, c0, 0) +#define RCP14_DBGITCTRL() MRC14(0, c7, c0, 4) +#define RCP14_DBGCLAIMSET() MRC14(0, c7, c8, 6) +#define RCP14_DBGCLAIMCLR() MRC14(0, c7, c9, 6) +#define RCP14_DBGAUTHSTATUS() MRC14(0, c7, c14, 6) +#define RCP14_DBGDEVID2() MRC14(0, c7, c0, 7) +#define RCP14_DBGDEVID1() MRC14(0, c7, c1, 7) +#define RCP14_DBGDEVID() MRC14(0, c7, c2, 7) + +#define WCP14_DBGDCCINT(val) MCR14(val, 0, c0, c2, 0) +#define WCP14_DBGDTRTXint(val) MCR14(val, 0, c0, c5, 0) +#define WCP14_DBGWFAR(val) MCR14(val, 0, c0, c6, 0) +#define WCP14_DBGVCR(val) MCR14(val, 0, c0, c7, 0) +#define WCP14_DBGDTRRXext(val) MCR14(val, 0, c0, c0, 2) +#define WCP14_DBGDSCRext(val) MCR14(val, 0, c0, c2, 2) +#define WCP14_DBGDTRTXext(val) MCR14(val, 0, c0, c3, 2) +#define WCP14_DBGOSECCR(val) MCR14(val, 0, c0, c6, 2) +#define WCP14_DBGBVR0(val) MCR14(val, 0, c0, c0, 4) +#define WCP14_DBGBVR1(val) MCR14(val, 0, c0, c1, 4) +#define WCP14_DBGBVR2(val) MCR14(val, 0, c0, c2, 4) +#define WCP14_DBGBVR3(val) MCR14(val, 0, c0, c3, 4) +#define WCP14_DBGBVR4(val) MCR14(val, 0, c0, c4, 4) +#define WCP14_DBGBVR5(val) MCR14(val, 0, c0, c5, 4) +#define WCP14_DBGBVR6(val) MCR14(val, 0, c0, c6, 4) +#define WCP14_DBGBVR7(val) MCR14(val, 0, c0, c7, 4) +#define WCP14_DBGBVR8(val) MCR14(val, 0, c0, c8, 4) +#define WCP14_DBGBVR9(val) MCR14(val, 0, c0, c9, 4) +#define WCP14_DBGBVR10(val) MCR14(val, 0, c0, c10, 4) +#define WCP14_DBGBVR11(val) MCR14(val, 0, c0, c11, 4) +#define WCP14_DBGBVR12(val) MCR14(val, 0, c0, c12, 4) +#define WCP14_DBGBVR13(val) MCR14(val, 0, c0, c13, 4) +#define WCP14_DBGBVR14(val) MCR14(val, 0, c0, c14, 4) +#define WCP14_DBGBVR15(val) MCR14(val, 0, c0, c15, 4) +#define WCP14_DBGBCR0(val) MCR14(val, 0, c0, c0, 5) +#define WCP14_DBGBCR1(val) MCR14(val, 0, c0, c1, 5) +#define WCP14_DBGBCR2(val) MCR14(val, 0, c0, c2, 5) +#define WCP14_DBGBCR3(val) MCR14(val, 0, c0, c3, 5) +#define WCP14_DBGBCR4(val) MCR14(val, 0, c0, c4, 5) +#define WCP14_DBGBCR5(val) MCR14(val, 0, c0, c5, 5) +#define WCP14_DBGBCR6(val) MCR14(val, 0, c0, c6, 5) +#define WCP14_DBGBCR7(val) MCR14(val, 0, c0, c7, 5) +#define WCP14_DBGBCR8(val) MCR14(val, 0, c0, c8, 5) +#define WCP14_DBGBCR9(val) MCR14(val, 0, c0, c9, 5) +#define WCP14_DBGBCR10(val) MCR14(val, 0, c0, c10, 5) +#define WCP14_DBGBCR11(val) MCR14(val, 0, c0, c11, 5) +#define WCP14_DBGBCR12(val) MCR14(val, 0, c0, c12, 5) +#define WCP14_DBGBCR13(val) MCR14(val, 0, c0, c13, 5) +#define WCP14_DBGBCR14(val) MCR14(val, 0, c0, c14, 5) +#define WCP14_DBGBCR15(val) MCR14(val, 0, c0, c15, 5) +#define WCP14_DBGWVR0(val) MCR14(val, 0, c0, c0, 6) +#define WCP14_DBGWVR1(val) MCR14(val, 0, c0, c1, 6) +#define WCP14_DBGWVR2(val) MCR14(val, 0, c0, c2, 6) +#define WCP14_DBGWVR3(val) MCR14(val, 0, c0, c3, 6) +#define WCP14_DBGWVR4(val) MCR14(val, 0, c0, c4, 6) +#define WCP14_DBGWVR5(val) MCR14(val, 0, c0, c5, 6) +#define WCP14_DBGWVR6(val) MCR14(val, 0, c0, c6, 6) +#define WCP14_DBGWVR7(val) MCR14(val, 0, c0, c7, 6) +#define WCP14_DBGWVR8(val) MCR14(val, 0, c0, c8, 6) +#define WCP14_DBGWVR9(val) MCR14(val, 0, c0, c9, 6) +#define WCP14_DBGWVR10(val) MCR14(val, 0, c0, c10, 6) +#define WCP14_DBGWVR11(val) MCR14(val, 0, c0, c11, 6) +#define WCP14_DBGWVR12(val) MCR14(val, 0, c0, c12, 6) +#define WCP14_DBGWVR13(val) MCR14(val, 0, c0, c13, 6) +#define WCP14_DBGWVR14(val) MCR14(val, 0, c0, c14, 6) +#define WCP14_DBGWVR15(val) MCR14(val, 0, c0, c15, 6) +#define WCP14_DBGWCR0(val) MCR14(val, 0, c0, c0, 7) +#define WCP14_DBGWCR1(val) MCR14(val, 0, c0, c1, 7) +#define WCP14_DBGWCR2(val) MCR14(val, 0, c0, c2, 7) +#define WCP14_DBGWCR3(val) MCR14(val, 0, c0, c3, 7) +#define WCP14_DBGWCR4(val) MCR14(val, 0, c0, c4, 7) +#define WCP14_DBGWCR5(val) MCR14(val, 0, c0, c5, 7) +#define WCP14_DBGWCR6(val) MCR14(val, 0, c0, c6, 7) +#define WCP14_DBGWCR7(val) MCR14(val, 0, c0, c7, 7) +#define WCP14_DBGWCR8(val) MCR14(val, 0, c0, c8, 7) +#define WCP14_DBGWCR9(val) MCR14(val, 0, c0, c9, 7) +#define WCP14_DBGWCR10(val) MCR14(val, 0, c0, c10, 7) +#define WCP14_DBGWCR11(val) MCR14(val, 0, c0, c11, 7) +#define WCP14_DBGWCR12(val) MCR14(val, 0, c0, c12, 7) +#define WCP14_DBGWCR13(val) MCR14(val, 0, c0, c13, 7) +#define WCP14_DBGWCR14(val) MCR14(val, 0, c0, c14, 7) +#define WCP14_DBGWCR15(val) MCR14(val, 0, c0, c15, 7) +#define WCP14_DBGBXVR0(val) MCR14(val, 0, c1, c0, 1) +#define WCP14_DBGBXVR1(val) MCR14(val, 0, c1, c1, 1) +#define WCP14_DBGBXVR2(val) MCR14(val, 0, c1, c2, 1) +#define WCP14_DBGBXVR3(val) MCR14(val, 0, c1, c3, 1) +#define WCP14_DBGBXVR4(val) MCR14(val, 0, c1, c4, 1) +#define WCP14_DBGBXVR5(val) MCR14(val, 0, c1, c5, 1) +#define WCP14_DBGBXVR6(val) MCR14(val, 0, c1, c6, 1) +#define WCP14_DBGBXVR7(val) MCR14(val, 0, c1, c7, 1) +#define WCP14_DBGBXVR8(val) MCR14(val, 0, c1, c8, 1) +#define WCP14_DBGBXVR9(val) MCR14(val, 0, c1, c9, 1) +#define WCP14_DBGBXVR10(val) MCR14(val, 0, c1, c10, 1) +#define WCP14_DBGBXVR11(val) MCR14(val, 0, c1, c11, 1) +#define WCP14_DBGBXVR12(val) MCR14(val, 0, c1, c12, 1) +#define WCP14_DBGBXVR13(val) MCR14(val, 0, c1, c13, 1) +#define WCP14_DBGBXVR14(val) MCR14(val, 0, c1, c14, 1) +#define WCP14_DBGBXVR15(val) MCR14(val, 0, c1, c15, 1) +#define WCP14_DBGOSLAR(val) MCR14(val, 0, c1, c0, 4) +#define WCP14_DBGOSSRR(val) MCR14(val, 0, c1, c2, 4) +#define WCP14_DBGOSDLR(val) MCR14(val, 0, c1, c3, 4) +#define WCP14_DBGPRCR(val) MCR14(val, 0, c1, c4, 4) +#define WCP14_DBGITCTRL(val) MCR14(val, 0, c7, c0, 4) +#define WCP14_DBGCLAIMSET(val) MCR14(val, 0, c7, c8, 6) +#define WCP14_DBGCLAIMCLR(val) MCR14(val, 0, c7, c9, 6) + +#endif diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h index ac54c06764e6..c67df04a2c9d 100644 --- a/arch/arm/include/asm/hw_breakpoint.h +++ b/arch/arm/include/asm/hw_breakpoint.h @@ -53,6 +53,7 @@ static inline void decode_ctrl_reg(u32 reg, #define ARM_DEBUG_ARCH_V7_MM 4 #define ARM_DEBUG_ARCH_V7_1 5 #define ARM_DEBUG_ARCH_V8 6 +#define ARM_DEBUG_ARCH_V8_8 8 /* Breakpoint */ #define ARM_BREAKPOINT_EXECUTE 0 diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index a148ceceb363..f2cfda4c0899 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -183,6 +183,14 @@ config ARCH_BENGAL This enables support for the BENGAL chipset. If you do not wish to build a kernel that runs on this chipset, say 'N' here. +config ARCH_KHAJE + bool "Enable Support for Qualcomm Technologies, Inc. KHAJE" + depends on ARCH_QCOM + select COMMON_CLK_QCOM + help + This enables support for the KHAJE chipset. If you do not + wish to build a kernel that runs on this chipset, say 'N' here. + config ARCH_SCUBA bool "Enable Support for Qualcomm Technologies, Inc. SCUBA" depends on ARCH_QCOM diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig index 6f6ce66e9ae2..7446c16b744d 100644 --- a/arch/arm64/configs/gki_defconfig +++ b/arch/arm64/configs/gki_defconfig @@ -484,7 +484,6 @@ CONFIG_NLS_MAC_ROMANIAN=y CONFIG_NLS_MAC_TURKISH=y CONFIG_NLS_UTF8=y CONFIG_UNICODE=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_SECURITYFS=y CONFIG_SECURITY_NETWORK=y diff --git a/arch/arm64/configs/vendor/bengal-perf_defconfig b/arch/arm64/configs/vendor/bengal-perf_defconfig index 17416d519bd6..6dd5672251ce 100644 --- a/arch/arm64/configs/vendor/bengal-perf_defconfig +++ b/arch/arm64/configs/vendor/bengal-perf_defconfig @@ -50,6 +50,7 @@ CONFIG_PROFILING=y CONFIG_HOTPLUG_SIZE_BITS=29 CONFIG_ARCH_QCOM=y CONFIG_ARCH_BENGAL=y +CONFIG_ARCH_KHAJE=y CONFIG_ARCH_SCUBA=y CONFIG_SCHED_MC=y CONFIG_NR_CPUS=8 @@ -63,6 +64,8 @@ CONFIG_SETEND_EMULATION=y CONFIG_ARM64_SW_TTBR0_PAN=y # CONFIG_ARM64_VHE is not set CONFIG_RANDOMIZE_BASE=y +CONFIG_CMDLINE="cgroup_disable=pressure" +CONFIG_CMDLINE_EXTEND=y CONFIG_COMPAT=y CONFIG_PM_WAKELOCKS=y CONFIG_PM_WAKELOCKS_LIMIT=0 @@ -363,6 +366,7 @@ CONFIG_SPI_SPIDEV=y CONFIG_SPMI=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y CONFIG_PINCTRL_BENGAL=y +CONFIG_PINCTRL_KHAJE=y CONFIG_PINCTRL_SCUBA=y CONFIG_GPIO_SYSFS=y CONFIG_POWER_RESET_QCOM=y @@ -372,8 +376,10 @@ CONFIG_QPNP_SMB5=y CONFIG_QPNP_SMBLITE=y CONFIG_SMB1355_SLAVE_CHARGER=y CONFIG_QPNP_QG=y +CONFIG_SMB1398_CHARGER=y CONFIG_THERMAL=y CONFIG_THERMAL_STATISTICS=y +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=10000 CONFIG_THERMAL_WRITABLE_TRIPS=y CONFIG_THERMAL_GOV_USER_SPACE=y CONFIG_THERMAL_GOV_LOW_LIMITS=y @@ -389,6 +395,7 @@ CONFIG_QTI_BCL_SOC_DRIVER=y CONFIG_QTI_QMI_COOLING_DEVICE=y CONFIG_QTI_THERMAL_LIMITS_DCVS=y CONFIG_REGULATOR_COOLING_DEVICE=y +CONFIG_QTI_RPM_SMD_COOLING_DEVICE=y CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y CONFIG_QTI_LMH_CPU_VDD_COOLING_DEVICE=y CONFIG_QTI_CX_IPEAK_COOLING_DEVICE=y @@ -399,6 +406,7 @@ CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_PROXY_CONSUMER=y CONFIG_REGULATOR_QCOM_SMD_RPM=y CONFIG_REGULATOR_QPNP_LCDB=y +CONFIG_REGULATOR_REFGEN=y CONFIG_REGULATOR_RPM_SMD=y CONFIG_REGULATOR_STUB=y CONFIG_REGULATOR_PM8008=y @@ -446,6 +454,7 @@ CONFIG_USB_LINK_LAYER_TEST=y CONFIG_NOP_USB_XCEIV=y CONFIG_USB_MSM_SSPHY_QMP=y CONFIG_MSM_QUSB_PHY=y +CONFIG_MSM_HSUSB_PHY=y CONFIG_USB_QCOM_EMU_PHY=y CONFIG_USB_GADGET=y CONFIG_USB_GADGET_VBUS_DRAW=900 @@ -466,6 +475,8 @@ CONFIG_USB_CONFIGFS_F_GSI=y CONFIG_USB_CONFIGFS_F_MTP=y CONFIG_USB_CONFIGFS_F_PTP=y CONFIG_TYPEC=y +CONFIG_USB_PD_POLICY=y +CONFIG_QPNP_USB_PDPHY=y CONFIG_MMC=y CONFIG_MMC_BLOCK_MINORS=32 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y @@ -477,6 +488,7 @@ CONFIG_MMC_CQHCI_CRYPTO_QTI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_CLASS_FLASH=y +CONFIG_LEDS_AW2016=y CONFIG_LEDS_QTI_FLASH=y CONFIG_LEDS_PWM=y CONFIG_LEDS_QTI_TRI_LED=y @@ -492,6 +504,7 @@ CONFIG_UIO_MSM_SHAREDMEM=y CONFIG_STAGING=y CONFIG_ASHMEM=y CONFIG_ION=y +CONFIG_ION_SYSTEM_HEAP=y CONFIG_ION_POOL_AUTO_REFILL=y CONFIG_QPNP_REVID=y CONFIG_SPS=y @@ -511,6 +524,9 @@ CONFIG_SM_DEBUGCC_BENGAL=y CONFIG_QM_DISPCC_SCUBA=y CONFIG_QM_GPUCC_SCUBA=y CONFIG_QM_DEBUGCC_SCUBA=y +CONFIG_SM_GPUCC_KHAJE=y +CONFIG_SM_DISPCC_KHAJE=y +CONFIG_SM_DEBUGCC_KHAJE=y CONFIG_HWSPINLOCK=y CONFIG_HWSPINLOCK_QCOM=y CONFIG_MAILBOX=y @@ -625,7 +641,6 @@ CONFIG_SDCARD_FS=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_HARDENED_USERCOPY_PAGESPAN=y diff --git a/arch/arm64/configs/vendor/bengal_defconfig b/arch/arm64/configs/vendor/bengal_defconfig index 3886cd719ae5..d1bb378c2ce8 100644 --- a/arch/arm64/configs/vendor/bengal_defconfig +++ b/arch/arm64/configs/vendor/bengal_defconfig @@ -51,6 +51,7 @@ CONFIG_PROFILING=y CONFIG_HOTPLUG_SIZE_BITS=29 CONFIG_ARCH_QCOM=y CONFIG_ARCH_BENGAL=y +CONFIG_ARCH_KHAJE=y CONFIG_ARCH_SCUBA=y CONFIG_SCHED_MC=y CONFIG_NR_CPUS=8 @@ -65,6 +66,8 @@ CONFIG_SETEND_EMULATION=y CONFIG_ARM64_SW_TTBR0_PAN=y # CONFIG_ARM64_VHE is not set CONFIG_RANDOMIZE_BASE=y +CONFIG_CMDLINE="cgroup_disable=pressure" +CONFIG_CMDLINE_EXTEND=y CONFIG_COMPAT=y CONFIG_PM_WAKELOCKS=y CONFIG_PM_WAKELOCKS_LIMIT=0 @@ -376,6 +379,7 @@ CONFIG_SPI_SPIDEV=y CONFIG_SPMI=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y CONFIG_PINCTRL_BENGAL=y +CONFIG_PINCTRL_KHAJE=y CONFIG_PINCTRL_SCUBA=y CONFIG_GPIO_SYSFS=y CONFIG_POWER_RESET_QCOM=y @@ -385,8 +389,10 @@ CONFIG_QPNP_SMB5=y CONFIG_QPNP_SMBLITE=y CONFIG_SMB1355_SLAVE_CHARGER=y CONFIG_QPNP_QG=y +CONFIG_SMB1398_CHARGER=y CONFIG_THERMAL=y CONFIG_THERMAL_STATISTICS=y +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=10000 CONFIG_THERMAL_WRITABLE_TRIPS=y CONFIG_THERMAL_GOV_USER_SPACE=y CONFIG_THERMAL_GOV_LOW_LIMITS=y @@ -402,6 +408,7 @@ CONFIG_QTI_BCL_SOC_DRIVER=y CONFIG_QTI_QMI_COOLING_DEVICE=y CONFIG_QTI_THERMAL_LIMITS_DCVS=y CONFIG_REGULATOR_COOLING_DEVICE=y +CONFIG_QTI_RPM_SMD_COOLING_DEVICE=y CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y CONFIG_QTI_LMH_CPU_VDD_COOLING_DEVICE=y CONFIG_QTI_CX_IPEAK_COOLING_DEVICE=y @@ -412,6 +419,7 @@ CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_PROXY_CONSUMER=y CONFIG_REGULATOR_QCOM_SMD_RPM=y CONFIG_REGULATOR_QPNP_LCDB=y +CONFIG_REGULATOR_REFGEN=y CONFIG_REGULATOR_RPM_SMD=y CONFIG_REGULATOR_STUB=y CONFIG_REGULATOR_PM8008=y @@ -460,6 +468,7 @@ CONFIG_USB_LINK_LAYER_TEST=y CONFIG_NOP_USB_XCEIV=y CONFIG_USB_MSM_SSPHY_QMP=y CONFIG_MSM_QUSB_PHY=y +CONFIG_MSM_HSUSB_PHY=y CONFIG_USB_QCOM_EMU_PHY=y CONFIG_USB_GADGET=y CONFIG_USB_GADGET_VBUS_DRAW=900 @@ -480,6 +489,8 @@ CONFIG_USB_CONFIGFS_F_GSI=y CONFIG_USB_CONFIGFS_F_MTP=y CONFIG_USB_CONFIGFS_F_PTP=y CONFIG_TYPEC=y +CONFIG_USB_PD_POLICY=y +CONFIG_QPNP_USB_PDPHY=y CONFIG_MMC=y CONFIG_MMC_BLOCK_MINORS=32 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y @@ -492,6 +503,7 @@ CONFIG_MMC_CQHCI_CRYPTO_QTI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_CLASS_FLASH=y +CONFIG_LEDS_AW2016=y CONFIG_LEDS_QTI_FLASH=y CONFIG_LEDS_PWM=y CONFIG_LEDS_QTI_TRI_LED=y @@ -512,6 +524,7 @@ CONFIG_UIO_MSM_SHAREDMEM=y CONFIG_STAGING=y CONFIG_ASHMEM=y CONFIG_ION=y +CONFIG_ION_SYSTEM_HEAP=y CONFIG_ION_POOL_AUTO_REFILL=y CONFIG_QPNP_REVID=y CONFIG_SPS=y @@ -531,6 +544,9 @@ CONFIG_SM_DEBUGCC_BENGAL=y CONFIG_QM_DISPCC_SCUBA=y CONFIG_QM_GPUCC_SCUBA=y CONFIG_QM_DEBUGCC_SCUBA=y +CONFIG_SM_GPUCC_KHAJE=y +CONFIG_SM_DISPCC_KHAJE=y +CONFIG_SM_DEBUGCC_KHAJE=y CONFIG_HWSPINLOCK=y CONFIG_HWSPINLOCK_QCOM=y CONFIG_MAILBOX=y @@ -654,7 +670,6 @@ CONFIG_SDCARD_FS=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_HARDENED_USERCOPY_PAGESPAN=y diff --git a/arch/arm64/configs/vendor/kona-iot-perf_defconfig b/arch/arm64/configs/vendor/kona-iot-perf_defconfig index efcf65575423..19ea62435a8a 100644 --- a/arch/arm64/configs/vendor/kona-iot-perf_defconfig +++ b/arch/arm64/configs/vendor/kona-iot-perf_defconfig @@ -396,6 +396,7 @@ CONFIG_QTI_QMI_COOLING_DEVICE=y CONFIG_QTI_THERMAL_LIMITS_DCVS=y CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y CONFIG_QTI_LIMITS_ISENSE_CDSP=y +CONFIG_QTI_THERMAL_QFPROM=y CONFIG_MFD_I2C_PMIC=y CONFIG_MFD_SPMI_PMIC=y CONFIG_REGULATOR=y @@ -664,7 +665,6 @@ CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_SDCARD_FS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_FORTIFY_SOURCE=y diff --git a/arch/arm64/configs/vendor/kona-iot_defconfig b/arch/arm64/configs/vendor/kona-iot_defconfig index c55859f70ae2..0f3734206d11 100644 --- a/arch/arm64/configs/vendor/kona-iot_defconfig +++ b/arch/arm64/configs/vendor/kona-iot_defconfig @@ -412,6 +412,7 @@ CONFIG_QTI_QMI_COOLING_DEVICE=y CONFIG_QTI_THERMAL_LIMITS_DCVS=y CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE=y CONFIG_QTI_LIMITS_ISENSE_CDSP=y +CONFIG_QTI_THERMAL_QFPROM=y CONFIG_MFD_I2C_PMIC=y CONFIG_MFD_SPMI_PMIC=y CONFIG_REGULATOR=y @@ -698,7 +699,6 @@ CONFIG_SDCARD_FS=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_HARDENED_USERCOPY_PAGESPAN=y diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig index 056562f9b6e9..cd5540d0bac6 100644 --- a/arch/arm64/configs/vendor/kona-perf_defconfig +++ b/arch/arm64/configs/vendor/kona-perf_defconfig @@ -63,6 +63,8 @@ CONFIG_ARM64_SW_TTBR0_PAN=y CONFIG_ARM64_LSE_ATOMICS=y # CONFIG_ARM64_VHE is not set CONFIG_RANDOMIZE_BASE=y +CONFIG_CMDLINE="cgroup_disable=pressure" +CONFIG_CMDLINE_EXTEND=y # CONFIG_EFI is not set CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y CONFIG_KRYO_PMU_WORKAROUND=y @@ -316,6 +318,7 @@ CONFIG_BONDING=y CONFIG_DUMMY=y CONFIG_TUN=y CONFIG_VETH=y +CONFIG_AQFWD=y CONFIG_SKY2=y CONFIG_RMNET=y CONFIG_SMSC911X=y @@ -459,7 +462,10 @@ CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y CONFIG_HID_NINTENDO=y CONFIG_HID_PLANTRONICS=y +CONFIG_HID_PLAYSTATION=y +CONFIG_PLAYSTATION_FF=y CONFIG_HID_SONY=y +CONFIG_SONY_FF=y CONFIG_HID_QVR=y CONFIG_USB_HIDDEV=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y @@ -692,7 +698,6 @@ CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_SDCARD_FS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_FORTIFY_SOURCE=y diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig index 36ef31e29ee9..ed1204b60381 100644 --- a/arch/arm64/configs/vendor/kona_defconfig +++ b/arch/arm64/configs/vendor/kona_defconfig @@ -64,6 +64,8 @@ CONFIG_ARM64_SW_TTBR0_PAN=y CONFIG_ARM64_LSE_ATOMICS=y # CONFIG_ARM64_VHE is not set CONFIG_RANDOMIZE_BASE=y +CONFIG_CMDLINE="cgroup_disable=pressure" +CONFIG_CMDLINE_EXTEND=y CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y CONFIG_KRYO_PMU_WORKAROUND=y CONFIG_COMPAT=y @@ -331,6 +333,7 @@ CONFIG_BONDING=y CONFIG_DUMMY=y CONFIG_TUN=y CONFIG_VETH=y +CONFIG_AQFWD=y CONFIG_RMNET=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y @@ -367,6 +370,7 @@ CONFIG_TABLET_USB_HANWANG=y CONFIG_TABLET_USB_KBTAB=y CONFIG_INPUT_TOUCHSCREEN=y CONFIG_TOUCHSCREEN_FTS=y +CONFIG_TOUCHSCREEN_NT36XXX=y CONFIG_INPUT_MISC=y CONFIG_INPUT_QPNP_POWER_ON=y CONFIG_INPUT_QTI_HAPTICS=y @@ -478,7 +482,10 @@ CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y CONFIG_HID_NINTENDO=y CONFIG_HID_PLANTRONICS=y +CONFIG_HID_PLAYSTATION=y +CONFIG_PLAYSTATION_FF=y CONFIG_HID_SONY=y +CONFIG_SONY_FF=y CONFIG_HID_QVR=y CONFIG_USB_HIDDEV=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y @@ -729,7 +736,6 @@ CONFIG_SDCARD_FS=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_HARDENED_USERCOPY_PAGESPAN=y diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig index 3e38e6cf1ce9..ebd9b748aabb 100644 --- a/arch/arm64/configs/vendor/lito-perf_defconfig +++ b/arch/arm64/configs/vendor/lito-perf_defconfig @@ -61,6 +61,8 @@ CONFIG_SETEND_EMULATION=y CONFIG_ARM64_SW_TTBR0_PAN=y # CONFIG_ARM64_VHE is not set CONFIG_RANDOMIZE_BASE=y +CONFIG_CMDLINE="cgroup_disable=pressure" +CONFIG_CMDLINE_EXTEND=y # CONFIG_EFI is not set CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y CONFIG_COMPAT=y @@ -257,6 +259,7 @@ CONFIG_NET_ACT_MIRRED=y CONFIG_NET_ACT_SKBEDIT=y CONFIG_DNS_RESOLVER=y CONFIG_QRTR=y +CONFIG_QRTR_WAKEUP_MS=500 CONFIG_QRTR_SMD=y CONFIG_QRTR_MHI=y CONFIG_BPF_JIT=y @@ -669,7 +672,6 @@ CONFIG_SDCARD_FS=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_HARDENED_USERCOPY_PAGESPAN=y diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig index ce005437d2c7..e45f42887e4c 100644 --- a/arch/arm64/configs/vendor/lito_defconfig +++ b/arch/arm64/configs/vendor/lito_defconfig @@ -62,6 +62,8 @@ CONFIG_SETEND_EMULATION=y CONFIG_ARM64_SW_TTBR0_PAN=y # CONFIG_ARM64_VHE is not set CONFIG_RANDOMIZE_BASE=y +CONFIG_CMDLINE="cgroup_disable=pressure" +CONFIG_CMDLINE_EXTEND=y CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y CONFIG_COMPAT=y CONFIG_PM_WAKELOCKS=y @@ -263,6 +265,7 @@ CONFIG_NET_ACT_MIRRED=y CONFIG_NET_ACT_SKBEDIT=y CONFIG_DNS_RESOLVER=y CONFIG_QRTR=y +CONFIG_QRTR_WAKEUP_MS=500 CONFIG_QRTR_SMD=y CONFIG_QRTR_MHI=y CONFIG_BPF_JIT=y @@ -691,7 +694,6 @@ CONFIG_SDCARD_FS=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_HARDENED_USERCOPY_PAGESPAN=y diff --git a/arch/arm64/configs/vendor/msm8937-perf_defconfig b/arch/arm64/configs/vendor/msm8937-perf_defconfig new file mode 100644 index 000000000000..46b5b4561041 --- /dev/null +++ b/arch/arm64/configs/vendor/msm8937-perf_defconfig @@ -0,0 +1,667 @@ +CONFIG_LOCALVERSION="-perf" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_AUDIT=y +# CONFIG_AUDITSYSCALL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_SCHED_WALT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_RCU_EXPERT=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y +CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_BPF=y +CONFIG_SCHED_CORE_CTL=y +CONFIG_NAMESPACES=y +# CONFIG_PID_NS is not set +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_TUNE=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +# CONFIG_FHANDLE is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_QM215=y +CONFIG_ARCH_MSM8937=y +CONFIG_ARCH_SDM429=y +CONFIG_ARCH_SDM439=y +# CONFIG_ARM64_ERRATUM_1024718 is not set +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=8 +CONFIG_HZ_100=y +CONFIG_SECCOMP=y +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y +CONFIG_ARM64_SW_TTBR0_PAN=y +# CONFIG_ARM64_VHE is not set +CONFIG_RANDOMIZE_BASE=y +CONFIG_CMDLINE="cgroup_disable=pressure" +CONFIG_CMDLINE_EXTEND=y +CONFIG_COMPAT=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_ENERGY_MODEL=y +CONFIG_CPU_IDLE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TIMES=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_BOOST=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPU_FREQ_MSM=y +CONFIG_MSM_TZ_LOG=y +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM64_CE=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 +CONFIG_PANIC_ON_REFCOUNT_ERROR=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_SHA512=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y +CONFIG_PARTITION_ADVANCED=y +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_CMA=y +CONFIG_ZSMALLOC=y +CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPGRE_DEMUX=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_UDP_DIAG=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_IP_SCTP=y +CONFIG_L2TP=y +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=y +CONFIG_L2TP_ETH=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_PRIO=y +CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_CLS_BPF=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +CONFIG_QRTR=y +CONFIG_QRTR_SMD=y +CONFIG_BPF_JIT=y +CONFIG_BT=y +# CONFIG_BT_BREDR is not set +# CONFIG_BT_LE is not set +CONFIG_MSM_BT_POWER=y +CONFIG_BTFM_SLIM_WCN3990=y +CONFIG_CFG80211=y +CONFIG_CFG80211_INTERNAL_REGDB=y +# CONFIG_CFG80211_CRDA_SUPPORT is not set +CONFIG_RFKILL=y +CONFIG_NFC_NQ=y +CONFIG_FW_LOADER_USER_HELPER=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +# CONFIG_FW_CACHE is not set +CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y +CONFIG_DMA_CMA=y +CONFIG_ZRAM=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=16 +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_HDCP_QSEECOM=y +CONFIG_QSEECOM=y +CONFIG_UID_SYS_STATS=y +CONFIG_FPR_FPC=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_CRYPTO=y +CONFIG_SCSI_UFS_CRYPTO_QTI=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y +CONFIG_DM_SNAPSHOT=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_ANDROID_VERITY=y +CONFIG_DM_BOW=y +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +CONFIG_TUN=y +CONFIG_VETH=y +# CONFIG_NET_VENDOR_AMAZON is not set +CONFIG_MSM_RMNET_BAM=y +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_HISILICON is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +CONFIG_RMNET=y +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=y +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=y +CONFIG_PPTP=y +CONFIG_PPPOL2TP=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_SYNC_TTY=y +CONFIG_USB_RTL8152=y +CONFIG_USB_USBNET=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +# CONFIG_WLAN_VENDOR_ATH is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +# CONFIG_WLAN_VENDOR_BROADCOM is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +# CONFIG_WLAN_VENDOR_INTEL is not set +# CONFIG_WLAN_VENDOR_INTERSIL is not set +# CONFIG_WLAN_VENDOR_MARVELL is not set +# CONFIG_WLAN_VENDOR_MEDIATEK is not set +# CONFIG_WLAN_VENDOR_RALINK is not set +# CONFIG_WLAN_VENDOR_REALTEK is not set +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +CONFIG_WCNSS_MEM_PRE_ALLOC=y +CONFIG_CLD_LL_CORE=y +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_GPIO=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_HBTP_INPUT=y +CONFIG_INPUT_QPNP_POWER_ON=y +CONFIG_INPUT_UINPUT=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_MSM_LEGACY=y +CONFIG_MSM_SMD_PKT=y +CONFIG_DIAG_CHAR=y +CONFIG_MSM_ADSPRPC=y +CONFIG_MSM_RDBG=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MSM_V2=y +CONFIG_SPI=y +CONFIG_SPI_QUP=y +CONFIG_SPI_SPIDEV=y +CONFIG_SPMI=y +CONFIG_PINCTRL_MSM8937=y +CONFIG_PINCTRL_MSM8917=y +CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_GPIO_SYSFS=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_QPNP_SMB5=y +CONFIG_QPNP_VM_BMS=y +CONFIG_QPNP_LINEAR_CHARGER=y +CONFIG_SMB1351_USB_CHARGER=y +CONFIG_SMB1360_CHARGER_FG=y +CONFIG_SMB1355_SLAVE_CHARGER=y +CONFIG_QPNP_QG=y +CONFIG_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_QCOM_SPMI_TEMP_ALARM=y +CONFIG_THERMAL_QPNP_ADC_TM=y +CONFIG_THERMAL_TSENS=y +CONFIG_QTI_ADC_TM=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_BCL_PMIC5=y +CONFIG_QTI_BCL_SOC_DRIVER=y +CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_REGULATOR_COOLING_DEVICE=y +CONFIG_MFD_I2C_PMIC=y +CONFIG_MFD_SPMI_PMIC=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_QPNP_LABIBB=y +CONFIG_REGULATOR_QPNP_LCDB=y +CONFIG_REGULATOR_MEM_ACC=y +CONFIG_REGULATOR_CPR=y +CONFIG_REGULATOR_RPM_SMD=y +CONFIG_REGULATOR_SPM=y +CONFIG_REGULATOR_STUB=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_MSM_VIDC_3X_GOVERNORS=y +CONFIG_MSM_VIDC_3X_V4L2=y +CONFIG_MSM_CAMERA=y +CONFIG_MSMB_CAMERA=y +CONFIG_MSM_CAMERA_SENSOR=y +CONFIG_MSM_CPP=y +CONFIG_MSM_CCI=y +CONFIG_MSM_CSI20_HEADER=y +CONFIG_MSM_CSI22_HEADER=y +CONFIG_MSM_CSI30_HEADER=y +CONFIG_MSM_CSI31_HEADER=y +CONFIG_MSM_CSIPHY=y +CONFIG_MSM_CSID=y +CONFIG_MSM_EEPROM=y +CONFIG_MSM_ISPIF_V2=y +CONFIG_IMX134=y +CONFIG_IMX132=y +CONFIG_OV9724=y +CONFIG_OV5648=y +CONFIG_GC0339=y +CONFIG_OV8825=y +CONFIG_OV8865=y +CONFIG_s5k4e1=y +CONFIG_OV12830=y +CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y +CONFIG_MSMB_JPEG=y +CONFIG_MSM_FD=y +CONFIG_RADIO_IRIS=y +CONFIG_RADIO_IRIS_TRANSPORT=y +CONFIG_FB=y +CONFIG_FB_MSM=y +CONFIG_FB_MSM_MDSS=y +CONFIG_FB_MSM_MDSS_WRITEBACK=y +CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y +CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_BACKLIGHT_CLASS_DEVICE is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_USB_AUDIO=y +CONFIG_SND_SOC=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NINTENDO=y +CONFIG_HID_SONY=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_EHCI_MSM=y +CONFIG_USB_ACM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_STORAGE_DATAFAB=y +CONFIG_USB_STORAGE_FREECOM=y +CONFIG_USB_STORAGE_ISD200=y +CONFIG_USB_STORAGE_USBAT=y +CONFIG_USB_STORAGE_SDDR09=y +CONFIG_USB_STORAGE_SDDR55=y +CONFIG_USB_STORAGE_JUMPSHOT=y +CONFIG_USB_STORAGE_ALAUDA=y +CONFIG_USB_STORAGE_ONETOUCH=y +CONFIG_USB_STORAGE_KARMA=y +CONFIG_USB_STORAGE_CYPRESS_ATACB=y +CONFIG_USB_SERIAL=y +CONFIG_USB_EHSET_TEST_FIXTURE=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_DEBUG_FILES=y +CONFIG_USB_GADGET_DEBUG_FS=y +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_CI13XXX_MSM=y +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_SERIAL=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_QCRNDIS=y +CONFIG_USB_CONFIGFS_RNDIS=y +CONFIG_USB_CONFIGFS_RMNET_BAM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_USB_CONFIGFS_F_HID=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_CDEV=y +CONFIG_USB_CONFIGFS_F_CCID=y +CONFIG_USB_CONFIGFS_F_QDSS=y +CONFIG_USB_CONFIGFS_F_MTP=y +CONFIG_USB_CONFIGFS_F_PTP=y +CONFIG_TYPEC=y +CONFIG_MMC=y +# CONFIG_PWRSEQ_EMMC is not set +# CONFIG_PWRSEQ_SIMPLE is not set +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_CQHCI_CRYPTO=y +CONFIG_MMC_CQHCI_CRYPTO_QTI=y +CONFIG_LEDS_QTI_TRI_LED=y +CONFIG_LEDS_QPNP_FLASH_V2=y +CONFIG_LEDS_QPNP_VIBRATOR_LDO=y +CONFIG_LEDS_QPNP_VIBRATOR=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_EDAC=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PM8XXX=y +CONFIG_DMADEVICES=y +CONFIG_QCOM_SPS_DMA=y +CONFIG_UIO=y +CONFIG_UIO_MSM_SHAREDMEM=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ION=y +CONFIG_ION_POOL_AUTO_REFILL=y +CONFIG_MSM_EXT_DISPLAY=y +CONFIG_QPNP_REVID=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_IPA=y +CONFIG_RMNET_IPA=y +CONFIG_RNDIS_IPA=y +CONFIG_USB_BAM=y +CONFIG_MDSS_PLL=y +CONFIG_QCOM_CLK_SMD_RPM=y +CONFIG_SDM_GCC_429W=y +CONFIG_SDM_DEBUGCC_429W=y +CONFIG_CLOCK_CPU_SDM=y +CONFIG_SDM_DEBUGCC_439=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y +CONFIG_MAILBOX=y +CONFIG_ARM_SMMU=y +CONFIG_QCOM_LAZY_MAPPING=y +CONFIG_RPMSG_CHAR=y +CONFIG_RPMSG_QCOM_GLINK_RPM=y +CONFIG_RPMSG_QCOM_GLINK_SMEM=y +CONFIG_RPMSG_QCOM_SMD=y +CONFIG_MSM_RPM_SMD=y +CONFIG_QCOM_RUN_QUEUE_STATS=y +CONFIG_QPNP_PBS=y +CONFIG_QCOM_QMI_HELPERS=y +CONFIG_QCOM_SMEM=y +CONFIG_QCOM_SMD_RPM=y +CONFIG_MSM_SPM=y +CONFIG_MSM_L2_SPM=y +CONFIG_QCOM_EARLY_RANDOM=y +CONFIG_QCOM_MEMORY_DUMP_V2=y +CONFIG_QCOM_SMP2P=y +CONFIG_QCOM_SMSM=y +CONFIG_MSM_PIL_MSS_QDSP6V5=y +CONFIG_QCOM_SECURE_BUFFER=y +CONFIG_MSM_TZ_SMMU=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_QMI_COMM=y +CONFIG_MSM_PIL_SSR_GENERIC=y +CONFIG_MSM_BOOT_STATS=y +CONFIG_QCOM_WATCHDOG_V2=y +CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y +CONFIG_QCOM_BUS_SCALING=y +CONFIG_QCOM_GLINK=y +CONFIG_MSM_EVENT_TIMER=y +CONFIG_MSM_PM=y +CONFIG_QCOM_DCC=y +CONFIG_QTI_RPM_STATS_LOG=y +CONFIG_QTEE_SHM_BRIDGE=y +CONFIG_MEM_SHARE_QMI_SERVICE=y +CONFIG_MSM_PERFORMANCE=y +CONFIG_QTI_CRYPTO_COMMON=y +CONFIG_QTI_CRYPTO_TZ=y +CONFIG_MSM_BAM_DMUX=y +CONFIG_WCNSS_CORE=y +CONFIG_WCNSS_CORE_PRONTO=y +CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y +CONFIG_QCOM_BIMC_BWMON=y +CONFIG_ARM_MEMLAT_MON=y +CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y +CONFIG_DEVFREQ_GOV_MEMLAT=y +CONFIG_DEVFREQ_SIMPLE_DEV=y +CONFIG_QCOM_DEVFREQ_DEVBW=y +CONFIG_IIO=y +CONFIG_QCOM_SPMI_ADC5=y +CONFIG_QCOM_SPMI_VADC=y +CONFIG_PWM=y +CONFIG_PWM_QTI_LPG=y +CONFIG_ARM_GIC_V3_ACL=y +CONFIG_QCOM_MPM=y +CONFIG_RAS=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_BINDERFS=y +CONFIG_QCOM_QFPROM=y +CONFIG_NVMEM_SPMI_SDAM=y +CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_SENSORS_SSC=y +CONFIG_QCOM_KGSL=y +CONFIG_LEGACY_ENERGY_MODEL_DT=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_F2FS_FS=y +CONFIG_F2FS_FS_SECURITY=y +CONFIG_F2FS_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y +CONFIG_FS_VERITY=y +CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=y +CONFIG_INCREMENTAL_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_SDCARD_FS=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_SECURITY=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_STATIC_USERMODEHELPER=y +CONFIG_STATIC_USERMODEHELPER_PATH="" +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SMACK=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y +CONFIG_CRYPTO_DEV_QCRYPTO=y +CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_IPC_LOGGING=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +CONFIG_DEBUG_ALIGN_RODATA=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y +CONFIG_CORESIGHT_STM=y +CONFIG_CORESIGHT_TPDA=y +CONFIG_CORESIGHT_TPDM=y +CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y +CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_TGU=y diff --git a/arch/arm64/configs/vendor/msm8937_defconfig b/arch/arm64/configs/vendor/msm8937_defconfig new file mode 100644 index 000000000000..dfa5df35c1a7 --- /dev/null +++ b/arch/arm64/configs/vendor/msm8937_defconfig @@ -0,0 +1,736 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_AUDIT=y +# CONFIG_AUDITSYSCALL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_SCHED_WALT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_RCU_EXPERT=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y +CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_DEBUG=y +CONFIG_SCHED_CORE_CTL=y +CONFIG_NAMESPACES=y +# CONFIG_PID_NS is not set +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_TUNE=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +# CONFIG_FHANDLE is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_QM215=y +CONFIG_ARCH_MSM8937=y +CONFIG_ARCH_SDM429=y +CONFIG_ARCH_SDM439=y +# CONFIG_ARM64_ERRATUM_1024718 is not set +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=8 +CONFIG_HZ_100=y +CONFIG_SECCOMP=y +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y +CONFIG_ARM64_SW_TTBR0_PAN=y +# CONFIG_ARM64_VHE is not set +CONFIG_RANDOMIZE_BASE=y +CONFIG_CMDLINE="cgroup_disable=pressure" +CONFIG_CMDLINE_EXTEND=y +CONFIG_COMPAT=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_ENERGY_MODEL=y +CONFIG_CPU_IDLE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TIMES=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_BOOST=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPU_FREQ_MSM=y +CONFIG_MSM_TZ_LOG=y +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM64_CE=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 +CONFIG_PANIC_ON_REFCOUNT_ERROR=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_SHA512=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y +CONFIG_PARTITION_ADVANCED=y +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_CLEANCACHE=y +CONFIG_CMA=y +CONFIG_CMA_DEBUGFS=y +CONFIG_ZSMALLOC=y +CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPGRE_DEMUX=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_UDP_DIAG=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_IP_SCTP=y +CONFIG_L2TP=y +CONFIG_L2TP_DEBUGFS=y +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=y +CONFIG_L2TP_ETH=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_PRIO=y +CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_CLS_BPF=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +CONFIG_DNS_RESOLVER=y +CONFIG_QRTR=y +CONFIG_QRTR_SMD=y +CONFIG_BPF_JIT=y +CONFIG_BT=y +# CONFIG_BT_BREDR is not set +# CONFIG_BT_LE is not set +CONFIG_MSM_BT_POWER=y +CONFIG_BTFM_SLIM_WCN3990=y +CONFIG_CFG80211=y +CONFIG_CFG80211_INTERNAL_REGDB=y +# CONFIG_CFG80211_CRDA_SUPPORT is not set +CONFIG_RFKILL=y +CONFIG_NFC_NQ=y +CONFIG_FW_LOADER_USER_HELPER=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +# CONFIG_FW_CACHE is not set +CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y +CONFIG_DMA_CMA=y +CONFIG_ZRAM=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=16 +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_HDCP_QSEECOM=y +CONFIG_QSEECOM=y +CONFIG_UID_SYS_STATS=y +CONFIG_FPR_FPC=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFSHCD_CMD_LOGGING=y +CONFIG_SCSI_UFS_CRYPTO=y +CONFIG_SCSI_UFS_CRYPTO_QTI=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_DEFAULT_KEY=y +CONFIG_DM_SNAPSHOT=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_ANDROID_VERITY=y +CONFIG_DM_BOW=y +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +CONFIG_TUN=y +CONFIG_VETH=y +# CONFIG_NET_VENDOR_AMAZON is not set +CONFIG_MSM_RMNET_BAM=y +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_HISILICON is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +CONFIG_RMNET=y +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=y +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=y +CONFIG_PPTP=y +CONFIG_PPPOL2TP=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_SYNC_TTY=y +CONFIG_USB_RTL8152=y +CONFIG_USB_USBNET=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +# CONFIG_WLAN_VENDOR_ATH is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +# CONFIG_WLAN_VENDOR_BROADCOM is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +# CONFIG_WLAN_VENDOR_INTEL is not set +# CONFIG_WLAN_VENDOR_INTERSIL is not set +# CONFIG_WLAN_VENDOR_MARVELL is not set +# CONFIG_WLAN_VENDOR_MEDIATEK is not set +# CONFIG_WLAN_VENDOR_RALINK is not set +# CONFIG_WLAN_VENDOR_REALTEK is not set +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +CONFIG_WCNSS_MEM_PRE_ALLOC=y +CONFIG_CLD_LL_CORE=y +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_GPIO=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_HBTP_INPUT=y +CONFIG_INPUT_QPNP_POWER_ON=y +CONFIG_INPUT_UINPUT=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +CONFIG_SERIAL_MSM_HS=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_MSM_LEGACY=y +CONFIG_MSM_SMD_PKT=y +CONFIG_DIAG_CHAR=y +CONFIG_MSM_ADSPRPC=y +CONFIG_MSM_RDBG=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MSM_V2=y +CONFIG_SPI=y +CONFIG_SPI_QUP=y +CONFIG_SPI_SPIDEV=y +CONFIG_SPMI=y +CONFIG_PINCTRL_MSM8937=y +CONFIG_PINCTRL_MSM8917=y +CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_GPIO_SYSFS=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_QPNP_SMB5=y +CONFIG_QPNP_VM_BMS=y +CONFIG_QPNP_LINEAR_CHARGER=y +CONFIG_SMB1351_USB_CHARGER=y +CONFIG_SMB1360_CHARGER_FG=y +CONFIG_SMB1355_SLAVE_CHARGER=y +CONFIG_QPNP_QG=y +CONFIG_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_QCOM_SPMI_TEMP_ALARM=y +CONFIG_THERMAL_QPNP_ADC_TM=y +CONFIG_THERMAL_TSENS=y +CONFIG_QTI_ADC_TM=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_BCL_PMIC5=y +CONFIG_QTI_BCL_SOC_DRIVER=y +CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_REGULATOR_COOLING_DEVICE=y +CONFIG_MFD_I2C_PMIC=y +CONFIG_MFD_SPMI_PMIC=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_QPNP_LABIBB=y +CONFIG_REGULATOR_QPNP_LCDB=y +CONFIG_REGULATOR_MEM_ACC=y +CONFIG_REGULATOR_CPR=y +CONFIG_REGULATOR_RPM_SMD=y +CONFIG_REGULATOR_SPM=y +CONFIG_REGULATOR_STUB=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_MSM_VIDC_3X_GOVERNORS=y +CONFIG_MSM_VIDC_3X_V4L2=y +CONFIG_MSM_CAMERA=y +CONFIG_MSM_CAMERA_DEBUG=y +CONFIG_MSMB_CAMERA=y +CONFIG_MSMB_CAMERA_DEBUG=y +CONFIG_MSM_CAMERA_SENSOR=y +CONFIG_MSM_CPP=y +CONFIG_MSM_CCI=y +CONFIG_MSM_CSI20_HEADER=y +CONFIG_MSM_CSI22_HEADER=y +CONFIG_MSM_CSI30_HEADER=y +CONFIG_MSM_CSI31_HEADER=y +CONFIG_MSM_CSIPHY=y +CONFIG_MSM_CSID=y +CONFIG_MSM_EEPROM=y +CONFIG_MSM_ISPIF_V2=y +CONFIG_IMX134=y +CONFIG_IMX132=y +CONFIG_OV9724=y +CONFIG_OV5648=y +CONFIG_GC0339=y +CONFIG_OV8825=y +CONFIG_OV8865=y +CONFIG_s5k4e1=y +CONFIG_OV12830=y +CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y +CONFIG_MSMB_JPEG=y +CONFIG_MSM_FD=y +CONFIG_RADIO_IRIS=y +CONFIG_RADIO_IRIS_TRANSPORT=y +CONFIG_FB=y +CONFIG_FB_VIRTUAL=y +CONFIG_FB_MSM=y +CONFIG_FB_MSM_MDSS=y +CONFIG_FB_MSM_MDSS_WRITEBACK=y +CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y +CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_BACKLIGHT_CLASS_DEVICE is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_USB_AUDIO=y +CONFIG_SND_SOC=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NINTENDO=y +CONFIG_HID_SONY=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_EHCI_MSM=y +CONFIG_USB_ACM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_STORAGE_DATAFAB=y +CONFIG_USB_STORAGE_FREECOM=y +CONFIG_USB_STORAGE_ISD200=y +CONFIG_USB_STORAGE_USBAT=y +CONFIG_USB_STORAGE_SDDR09=y +CONFIG_USB_STORAGE_SDDR55=y +CONFIG_USB_STORAGE_JUMPSHOT=y +CONFIG_USB_STORAGE_ALAUDA=y +CONFIG_USB_STORAGE_ONETOUCH=y +CONFIG_USB_STORAGE_KARMA=y +CONFIG_USB_STORAGE_CYPRESS_ATACB=y +CONFIG_USB_SERIAL=y +CONFIG_USB_EHSET_TEST_FIXTURE=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_DEBUG_FILES=y +CONFIG_USB_GADGET_DEBUG_FS=y +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_CI13XXX_MSM=y +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_SERIAL=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_QCRNDIS=y +CONFIG_USB_CONFIGFS_RNDIS=y +CONFIG_USB_CONFIGFS_RMNET_BAM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_USB_CONFIGFS_F_HID=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_CDEV=y +CONFIG_USB_CONFIGFS_F_CCID=y +CONFIG_USB_CONFIGFS_F_QDSS=y +CONFIG_USB_CONFIGFS_F_MTP=y +CONFIG_USB_CONFIGFS_F_PTP=y +CONFIG_TYPEC=y +CONFIG_MMC=y +# CONFIG_PWRSEQ_EMMC is not set +# CONFIG_PWRSEQ_SIMPLE is not set +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_MMC_IPC_LOGGING=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_CQHCI_CRYPTO=y +CONFIG_MMC_CQHCI_CRYPTO_QTI=y +CONFIG_LEDS_QTI_TRI_LED=y +CONFIG_LEDS_QPNP_FLASH_V2=y +CONFIG_LEDS_QPNP_VIBRATOR_LDO=y +CONFIG_LEDS_QPNP_VIBRATOR=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_EDAC=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PM8XXX=y +CONFIG_DMADEVICES=y +CONFIG_QCOM_SPS_DMA=y +CONFIG_UIO=y +CONFIG_UIO_MSM_SHAREDMEM=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ION=y +CONFIG_ION_POOL_AUTO_REFILL=y +CONFIG_MSM_EXT_DISPLAY=y +CONFIG_QPNP_REVID=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_IPA=y +CONFIG_RMNET_IPA=y +CONFIG_RNDIS_IPA=y +CONFIG_USB_BAM=y +CONFIG_MDSS_PLL=y +CONFIG_QCOM_CLK_SMD_RPM=y +CONFIG_SDM_GCC_429W=y +CONFIG_SDM_DEBUGCC_429W=y +CONFIG_CLOCK_CPU_SDM=y +CONFIG_SDM_DEBUGCC_439=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y +CONFIG_MAILBOX=y +CONFIG_ARM_SMMU=y +CONFIG_QCOM_LAZY_MAPPING=y +CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_TESTS=y +CONFIG_RPMSG_CHAR=y +CONFIG_RPMSG_QCOM_GLINK_RPM=y +CONFIG_RPMSG_QCOM_GLINK_SMEM=y +CONFIG_RPMSG_QCOM_SMD=y +CONFIG_MSM_RPM_SMD=y +CONFIG_QCOM_CPUSS_DUMP=y +CONFIG_QCOM_RUN_QUEUE_STATS=y +CONFIG_QPNP_PBS=y +CONFIG_QCOM_QMI_HELPERS=y +CONFIG_QCOM_SMEM=y +CONFIG_QCOM_SMD_RPM=y +CONFIG_MSM_SPM=y +CONFIG_MSM_L2_SPM=y +CONFIG_QCOM_EARLY_RANDOM=y +CONFIG_QCOM_MEMORY_DUMP_V2=y +CONFIG_MSM_DEBUG_LAR_UNLOCK=y +CONFIG_QCOM_SMP2P=y +CONFIG_QCOM_SMSM=y +CONFIG_MSM_PIL_MSS_QDSP6V5=y +CONFIG_QCOM_SECURE_BUFFER=y +CONFIG_MSM_TZ_SMMU=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_QMI_COMM=y +CONFIG_MSM_PIL_SSR_GENERIC=y +CONFIG_MSM_BOOT_STATS=y +CONFIG_MSM_CORE_HANG_DETECT=y +CONFIG_QCOM_WATCHDOG_V2=y +CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y +CONFIG_QCOM_BUS_SCALING=y +CONFIG_QCOM_GLINK=y +CONFIG_MSM_EVENT_TIMER=y +CONFIG_MSM_PM=y +CONFIG_QCOM_DCC=y +CONFIG_QTI_RPM_STATS_LOG=y +CONFIG_QTEE_SHM_BRIDGE=y +CONFIG_MEM_SHARE_QMI_SERVICE=y +CONFIG_MSM_PERFORMANCE=y +CONFIG_QTI_CRYPTO_COMMON=y +CONFIG_QTI_CRYPTO_TZ=y +CONFIG_MSM_BAM_DMUX=y +CONFIG_WCNSS_CORE=y +CONFIG_WCNSS_CORE_PRONTO=y +CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y +CONFIG_QCOM_BIMC_BWMON=y +CONFIG_ARM_MEMLAT_MON=y +CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y +CONFIG_DEVFREQ_GOV_MEMLAT=y +CONFIG_DEVFREQ_SIMPLE_DEV=y +CONFIG_QCOM_DEVFREQ_DEVBW=y +CONFIG_IIO=y +CONFIG_QCOM_SPMI_ADC5=y +CONFIG_QCOM_SPMI_VADC=y +CONFIG_PWM=y +CONFIG_PWM_QTI_LPG=y +CONFIG_ARM_GIC_V3_ACL=y +CONFIG_QCOM_MPM=y +CONFIG_RAS=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_BINDERFS=y +CONFIG_QCOM_QFPROM=y +CONFIG_NVMEM_SPMI_SDAM=y +CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_SENSORS_SSC=y +CONFIG_QCOM_KGSL=y +CONFIG_LEGACY_ENERGY_MODEL_DT=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_F2FS_FS=y +CONFIG_F2FS_FS_SECURITY=y +CONFIG_F2FS_CHECK_FS=y +CONFIG_F2FS_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y +CONFIG_FS_VERITY=y +CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=y +CONFIG_INCREMENTAL_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_SDCARD_FS=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_SECURITY=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_STATIC_USERMODEHELPER=y +CONFIG_STATIC_USERMODEHELPER_PATH="" +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SMACK=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y +CONFIG_CRYPTO_DEV_QCRYPTO=y +CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_CONSOLE_UNHASHED_POINTERS=y +CONFIG_DEBUG_MODULE_LOAD_INFO=y +CONFIG_DEBUG_INFO=y +CONFIG_PAGE_OWNER=y +CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_PAGEALLOC=y +CONFIG_SLUB_DEBUG_PANIC_ON=y +CONFIG_DEBUG_PANIC_ON_OOM=y +CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y +CONFIG_PAGE_POISONING=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000 +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_WQ_WATCHDOG=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_PANIC_ON_SCHED_BUG=y +CONFIG_PANIC_ON_RT_THROTTLING=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_LOCK_TORTURE_TEST=m +CONFIG_DEBUG_SG=y +CONFIG_DEBUG_NOTIFIERS=y +CONFIG_DEBUG_CREDENTIALS=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_IPC_LOGGING=y +CONFIG_QCOM_RTB=y +CONFIG_QCOM_RTB_SEPARATE_CPUS=y +CONFIG_FUNCTION_TRACER=y +CONFIG_PREEMPTIRQ_EVENTS=y +CONFIG_IRQSOFF_TRACER=y +CONFIG_PREEMPT_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_LKDTM=y +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_MEMTEST=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +CONFIG_PANIC_ON_DATA_CORRUPTION=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_SOURCE_ETM4X=y +CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y +CONFIG_CORESIGHT_STM=y +CONFIG_CORESIGHT_CTI=y +CONFIG_CORESIGHT_TPDA=y +CONFIG_CORESIGHT_TPDM=y +CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y +CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_TGU=y diff --git a/arch/arm64/configs/vendor/sdm660-perf_defconfig b/arch/arm64/configs/vendor/sdm660-perf_defconfig index 78e77523b2da..57cd65f82a24 100644 --- a/arch/arm64/configs/vendor/sdm660-perf_defconfig +++ b/arch/arm64/configs/vendor/sdm660-perf_defconfig @@ -67,6 +67,8 @@ CONFIG_SETEND_EMULATION=y CONFIG_ARM64_SW_TTBR0_PAN=y # CONFIG_ARM64_VHE is not set CONFIG_RANDOMIZE_BASE=y +CONFIG_CMDLINE="cgroup_disable=pressure" +CONFIG_CMDLINE_EXTEND=y CONFIG_COMPAT=y CONFIG_PM_WAKELOCKS=y CONFIG_PM_WAKELOCKS_LIMIT=0 @@ -431,6 +433,26 @@ CONFIG_USB_VIDEO_CLASS=y CONFIG_V4L_PLATFORM_DRIVERS=y CONFIG_MSM_VIDC_3X_GOVERNORS=y CONFIG_MSM_VIDC_3X_V4L2=y +CONFIG_MSM_CAMERA=y +CONFIG_MSM_CAMERA_DEBUG=y +CONFIG_MSMB_CAMERA=y +CONFIG_MSMB_CAMERA_DEBUG=y +CONFIG_MSM_CAMERA_SENSOR=y +CONFIG_MSM_CPP=y +CONFIG_MSM_CCI=y +CONFIG_MSM_CSI20_HEADER=y +CONFIG_MSM_CSI22_HEADER=y +CONFIG_MSM_CSI30_HEADER=y +CONFIG_MSM_CSI31_HEADER=y +CONFIG_MSM_CSIPHY=y +CONFIG_MSM_CSID=y +CONFIG_MSM_EEPROM=y +CONFIG_MSM_ISPIF=y +CONFIG_MSM_DUAL_ISP_SYNC=y +CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y +CONFIG_MSMB_JPEG=y +CONFIG_MSM_FD=y +CONFIG_MSM_JPEGDMA=y CONFIG_DVB_MPQ=m CONFIG_DVB_MPQ_DEMUX=m CONFIG_SDE_ROTATOR=y @@ -513,8 +535,6 @@ CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y CONFIG_MMC_CQHCI_CRYPTO=y CONFIG_MMC_CQHCI_CRYPTO_QTI=y -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y CONFIG_LEDS_QTI_TRI_LED=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_RTC_CLASS=y @@ -670,7 +690,6 @@ CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_SDCARD_FS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_HARDENED_USERCOPY_PAGESPAN=y @@ -703,10 +722,13 @@ CONFIG_IPC_LOGGING=y CONFIG_DEBUG_ALIGN_RODATA=y CONFIG_CORESIGHT=y CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y -CONFIG_CORESIGHT_SOURCE_ETM4X=y +CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y CONFIG_CORESIGHT_STM=y CONFIG_CORESIGHT_CTI=y +CONFIG_CORESIGHT_CTI_SAVE_DISABLE=y CONFIG_CORESIGHT_TPDA=y CONFIG_CORESIGHT_TPDM=y CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_TGU=y diff --git a/arch/arm64/configs/vendor/sdm660_defconfig b/arch/arm64/configs/vendor/sdm660_defconfig index 6058c62dcbce..bfd72463fbfa 100644 --- a/arch/arm64/configs/vendor/sdm660_defconfig +++ b/arch/arm64/configs/vendor/sdm660_defconfig @@ -67,6 +67,8 @@ CONFIG_SETEND_EMULATION=y CONFIG_ARM64_SW_TTBR0_PAN=y # CONFIG_ARM64_VHE is not set CONFIG_RANDOMIZE_BASE=y +CONFIG_CMDLINE="cgroup_disable=pressure" +CONFIG_CMDLINE_EXTEND=y CONFIG_COMPAT=y CONFIG_PM_WAKELOCKS=y CONFIG_PM_WAKELOCKS_LIMIT=0 @@ -711,7 +713,6 @@ CONFIG_SDCARD_FS=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_HARDENED_USERCOPY_PAGESPAN=y @@ -798,9 +799,12 @@ CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y CONFIG_CORESIGHT=y CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y CONFIG_CORESIGHT_SOURCE_ETM4X=y +CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y CONFIG_CORESIGHT_STM=y CONFIG_CORESIGHT_CTI=y CONFIG_CORESIGHT_TPDA=y CONFIG_CORESIGHT_TPDM=y CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_TGU=y diff --git a/arch/x86/configs/gki_defconfig b/arch/x86/configs/gki_defconfig index 0607d16fcba2..f5d416faf974 100644 --- a/arch/x86/configs/gki_defconfig +++ b/arch/x86/configs/gki_defconfig @@ -418,7 +418,6 @@ CONFIG_NLS_MAC_ROMANIAN=y CONFIG_NLS_MAC_TURKISH=y CONFIG_NLS_UTF8=y CONFIG_UNICODE=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_SECURITYFS=y CONFIG_SECURITY_NETWORK=y diff --git a/block/blk-core.c b/block/blk-core.c index a33775cd97be..d3a6da3012a4 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1741,8 +1741,12 @@ EXPORT_SYMBOL_GPL(part_round_stats); #ifdef CONFIG_PM static void blk_pm_put_request(struct request *rq) { - if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending) - pm_runtime_mark_last_busy(rq->q->dev); + if (rq->q->dev && !(rq->rq_flags & RQF_PM) && + (rq->rq_flags & RQF_PM_ADDED)) { + rq->rq_flags &= ~RQF_PM_ADDED; + if (!--rq->q->nr_pending) + pm_runtime_mark_last_busy(rq->q->dev); + } } #else static inline void blk_pm_put_request(struct request *rq) {} diff --git a/block/elevator.c b/block/elevator.c index 6d94044681f7..335dc50f0081 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -560,15 +560,22 @@ void elv_bio_merged(struct request_queue *q, struct request *rq, #ifdef CONFIG_PM static void blk_pm_requeue_request(struct request *rq) { - if (rq->q->dev && !(rq->rq_flags & RQF_PM)) + if (rq->q->dev && !(rq->rq_flags & RQF_PM) && + (rq->rq_flags & (RQF_PM_ADDED | RQF_FLUSH_SEQ))) { + rq->rq_flags &= ~RQF_PM_ADDED; rq->q->nr_pending--; + } } static void blk_pm_add_request(struct request_queue *q, struct request *rq) { - if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 && - (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING)) - pm_request_resume(q->dev); + if (q->dev && !(rq->rq_flags & RQF_PM)) { + rq->rq_flags |= RQF_PM_ADDED; + if (q->nr_pending++ == 0 && + (q->rpm_status == RPM_SUSPENDED || + q->rpm_status == RPM_SUSPENDING)) + pm_request_resume(q->dev); + } } #else static inline void blk_pm_requeue_request(struct request *rq) {} diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c index f404f2a4f11b..f66ff5c3e34e 100644 --- a/drivers/bluetooth/bluetooth-power.c +++ b/drivers/bluetooth/bluetooth-power.c @@ -448,10 +448,6 @@ static void bt_free_gpios(void) { if (bt_power_pdata->bt_gpio_sys_rst > 0) gpio_free(bt_power_pdata->bt_gpio_sys_rst); - if (bt_power_pdata->wl_gpio_sys_rst > 0) - gpio_free(bt_power_pdata->wl_gpio_sys_rst); - if (bt_power_pdata->bt_gpio_sw_ctrl > 0) - gpio_free(bt_power_pdata->bt_gpio_sw_ctrl); if (bt_power_pdata->bt_gpio_debug > 0) gpio_free(bt_power_pdata->bt_gpio_debug); } @@ -1153,7 +1149,9 @@ static long bt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) soc_id = chipset_version; if (soc_id == QCA_HSP_SOC_ID_0100 || soc_id == QCA_HSP_SOC_ID_0110 || - soc_id == QCA_HSP_SOC_ID_0200) { + soc_id == QCA_HSP_SOC_ID_0200 || + soc_id == QCA_HSP_SOC_ID_0210 || + soc_id == QCA_HSP_SOC_ID_1211) { ret = bt_disable_asd(); } } else { diff --git a/drivers/bluetooth/btfm_slim.c b/drivers/bluetooth/btfm_slim.c index 52930ee285ed..8c94431de2ef 100644 --- a/drivers/bluetooth/btfm_slim.c +++ b/drivers/bluetooth/btfm_slim.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */ #include @@ -442,6 +442,8 @@ int btfm_slim_hw_init(struct btfmslim *btfmslim) if (chipset_ver == QCA_HSP_SOC_ID_0100 || chipset_ver == QCA_HSP_SOC_ID_0110 || + chipset_ver == QCA_HSP_SOC_ID_0210 || + chipset_ver == QCA_HSP_SOC_ID_1211 || chipset_ver == QCA_HSP_SOC_ID_0200) { BTFMSLIM_INFO("chipset is hastings prime, overwriting EA"); slim->e_addr[0] = 0x00; diff --git a/drivers/bluetooth/btfm_slim_codec.c b/drivers/bluetooth/btfm_slim_codec.c index 4f4cbe738e50..1b38246db8db 100644 --- a/drivers/bluetooth/btfm_slim_codec.c +++ b/drivers/bluetooth/btfm_slim_codec.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. */ #include @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -209,6 +210,11 @@ static int btfm_slim_dai_prepare(struct snd_pcm_substream *substream, /* save the enable channel status */ if (ret == 0) bt_soc_enable_status = 1; + + if (ret == -EISCONN) { + BTFMSLIM_ERR("channel opened without closing, return success"); + ret = 0; + } return ret; } diff --git a/drivers/bluetooth/btfm_slim_slave.h b/drivers/bluetooth/btfm_slim_slave.h index 26fc8abae8a3..e706fa916fc8 100644 --- a/drivers/bluetooth/btfm_slim_slave.h +++ b/drivers/bluetooth/btfm_slim_slave.h @@ -98,6 +98,7 @@ enum { QCA_COMANCHE_SOC_ID_0110 = 0x40070110, QCA_COMANCHE_SOC_ID_0120 = 0x40070120, QCA_COMANCHE_SOC_ID_0130 = 0x40070130, + QCA_COMANCHE_SOC_ID_4130 = 0x40074130, QCA_COMANCHE_SOC_ID_5120 = 0x40075120, QCA_COMANCHE_SOC_ID_5130 = 0x40075130, }; @@ -110,6 +111,8 @@ enum { QCA_HSP_SOC_ID_0100 = 0x400C0100, QCA_HSP_SOC_ID_0110 = 0x400C0110, QCA_HSP_SOC_ID_0200 = 0x400C0200, + QCA_HSP_SOC_ID_0210 = 0x400C0210, + QCA_HSP_SOC_ID_1211 = 0x400C1211, }; /* Function Prototype */ diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c index 8eba7f391711..a27b96345f2f 100644 --- a/drivers/bus/mhi/core/mhi_init.c +++ b/drivers/bus/mhi/core/mhi_init.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */ #include #include @@ -89,9 +89,12 @@ struct mhi_controller *find_mhi_controller_by_name(const char *name) const char *to_mhi_pm_state_str(enum MHI_PM_STATE state) { - int index = find_last_bit((unsigned long *)&state, 32); + int index; - if (index >= ARRAY_SIZE(mhi_pm_state_str)) + if (state) + index = __fls(state); + + if (!state || index >= ARRAY_SIZE(mhi_pm_state_str)) return "Invalid State"; return mhi_pm_state_str[index]; @@ -1048,7 +1051,16 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, vfree(buf_ring->base); buf_ring->base = tre_ring->base = NULL; + tre_ring->ctxt_wp = NULL; chan_ctxt->rbase = 0; + chan_ctxt->rlen = 0; + chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; + tre_ring->rp = tre_ring->wp = tre_ring->base; + buf_ring->rp = buf_ring->wp = buf_ring->base; + + /* Update to all cores */ + smp_wmb(); + } int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c index e82530f8f860..3098f3892fd4 100644 --- a/drivers/bus/mhi/core/mhi_main.c +++ b/drivers/bus/mhi/core/mhi_main.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */ #include #include @@ -558,18 +558,6 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, mhi_tre->dword[0] = MHI_RSCTRE_DATA_DWORD0(buf_ring->wp - buf_ring->base); mhi_tre->dword[1] = MHI_RSCTRE_DATA_DWORD1; - /* - * on RSC channel IPA HW has a minimum credit requirement before - * switching to DB mode - */ - n_free_tre = mhi_get_no_free_descriptors(mhi_dev, - DMA_FROM_DEVICE); - n_queued_tre = tre_ring->elements - n_free_tre; - read_lock_bh(&mhi_chan->lock); - if (mhi_chan->db_cfg.db_mode && - n_queued_tre < MHI_RSC_MIN_CREDITS) - ring_db = false; - read_unlock_bh(&mhi_chan->lock); } else { mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); @@ -587,12 +575,25 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, if (mhi_chan->dir == DMA_TO_DEVICE) atomic_inc(&mhi_cntrl->pending_pkts); - if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && ring_db) { - read_lock_bh(&mhi_chan->lock); - mhi_ring_chan_db(mhi_cntrl, mhi_chan); - read_unlock_bh(&mhi_chan->lock); + read_lock_bh(&mhi_chan->lock); + if (mhi_chan->xfer_type == MHI_XFER_RSC_DMA) { + /* + * on RSC channel IPA HW has a minimum credit requirement before + * switching to DB mode + */ + n_free_tre = mhi_get_no_free_descriptors(mhi_dev, + DMA_FROM_DEVICE); + n_queued_tre = tre_ring->elements - n_free_tre; + if (mhi_chan->db_cfg.db_mode && + n_queued_tre < MHI_RSC_MIN_CREDITS) + ring_db = false; } + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && ring_db) + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + + read_unlock_bh(&mhi_chan->lock); + read_unlock_bh(&mhi_cntrl->pm_lock); return 0; diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c index 0b4e3025e953..710d17cb14bb 100644 --- a/drivers/bus/mhi/core/mhi_pm.c +++ b/drivers/bus/mhi/core/mhi_pm.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */ #include #include @@ -395,7 +395,8 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) read_lock_irq(&mhi_chan->lock); /* only ring DB if ring is not empty */ - if (tre_ring->base && tre_ring->wp != tre_ring->rp) + if (tre_ring->base && tre_ring->wp != tre_ring->rp && + mhi_chan->ch_state == MHI_CH_STATE_ENABLED) mhi_ring_chan_db(mhi_cntrl, mhi_chan); read_unlock_irq(&mhi_chan->lock); } @@ -1061,8 +1062,8 @@ void mhi_control_error(struct mhi_controller *mhi_cntrl) sfr_info->buf_addr); } - /* link is not down if device is in RDDM */ - transition_state = (mhi_cntrl->ee == MHI_EE_RDDM) ? + /* link is not down if device supports RDDM */ + transition_state = (mhi_cntrl->rddm_supported) ? MHI_PM_DEVICE_ERR_DETECT : MHI_PM_LD_ERR_FATAL_DETECT; write_lock_irq(&mhi_cntrl->pm_lock); diff --git a/drivers/bus/mhi/devices/mhi_netdev.c b/drivers/bus/mhi/devices/mhi_netdev.c index 724b38d72d2f..8fe1e7fb57d7 100644 --- a/drivers/bus/mhi/devices/mhi_netdev.c +++ b/drivers/bus/mhi/devices/mhi_netdev.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.*/ +/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.*/ #include #include @@ -780,6 +780,7 @@ static void mhi_netdev_push_skb(struct mhi_netdev *mhi_netdev, mhi_result->bytes_xferd, mhi_netdev->mru); skb->dev = mhi_netdev->ndev; skb->protocol = mhi_netdev_ip_type_trans(*(u8 *)mhi_buf->buf); + skb_set_mac_header(skb, 0); netif_receive_skb(skb); } @@ -815,6 +816,7 @@ static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev, /* we support chaining */ skb = alloc_skb(0, GFP_ATOMIC); if (likely(skb)) { + skb_set_mac_header(skb, 0); skb_add_rx_frag(skb, 0, mhi_buf->page, 0, mhi_result->bytes_xferd, mhi_netdev->mru); /* this is first on list */ diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index cc265cd8f882..046d39cdde5e 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -415,6 +415,8 @@ struct fastrpc_mmap { int uncached; int secure; uintptr_t attr; + bool is_filemap; + /* flag to indicate map used in process init */ }; enum fastrpc_perfkeys { @@ -833,9 +835,10 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va, spin_lock(&me->hlock); hlist_for_each_entry_safe(map, n, &me->maps, hn) { - if (map->raddr == va && + if (map->refs == 1 && map->raddr == va && map->raddr + map->len == va + len && - map->refs == 1) { + /* Remove map if not used in process initialization*/ + !map->is_filemap) { match = map; hlist_del_init(&map->hn); break; @@ -847,9 +850,10 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va, return 0; } hlist_for_each_entry_safe(map, n, &fl->maps, hn) { - if (map->raddr == va && + if (map->refs == 1 && map->raddr == va && map->raddr + map->len == va + len && - map->refs == 1) { + /* Remove map if not used in process initialization*/ + !map->is_filemap) { match = map; hlist_del_init(&map->hn); break; @@ -985,6 +989,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, map->fl = fl; map->fd = fd; map->attr = attr; + map->is_filemap = false; if (mflags == ADSP_MMAP_HEAP_ADDR || mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) { map->apps = me; @@ -2496,7 +2501,7 @@ static int fastrpc_get_spd_session(char *name, int *session, int *cid) static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl); static int fastrpc_channel_open(struct fastrpc_file *fl); -static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl); +static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked); static int fastrpc_init_process(struct fastrpc_file *fl, struct fastrpc_ioctl_init_attrs *uproc) { @@ -2509,7 +2514,9 @@ static int fastrpc_init_process(struct fastrpc_file *fl, struct fastrpc_buf *imem = NULL; unsigned long imem_dma_attr = 0; char *proc_name = NULL; - int unsigned_request = (uproc->attrs & FASTRPC_MODE_UNSIGNED_MODULE); + bool init_flags = init->flags == FASTRPC_INIT_CREATE ? true : false; + int proc_attrs = uproc->attrs & FASTRPC_MODE_UNSIGNED_MODULE; + int unsigned_request = proc_attrs && init_flags; int cid = fl->cid; struct fastrpc_channel_ctx *chan = &me->channel[cid]; @@ -2583,6 +2590,8 @@ static int fastrpc_init_process(struct fastrpc_file *fl, mutex_lock(&fl->map_mutex); VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0, init->file, init->filelen, mflags, &file)); + if (file) + file->is_filemap = true; mutex_unlock(&fl->map_mutex); if (err) goto bail; @@ -2664,7 +2673,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl, if (!init->filelen) goto bail; - proc_name = kzalloc(init->filelen, GFP_KERNEL); + proc_name = kzalloc(init->filelen + 1, GFP_KERNEL); VERIFY(err, !IS_ERR_OR_NULL(proc_name)); if (err) goto bail; @@ -2692,6 +2701,8 @@ static int fastrpc_init_process(struct fastrpc_file *fl, err = fastrpc_mmap_create(fl, -1, 0, init->mem, init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR, &mem); + if (mem) + mem->is_filemap = true; mutex_unlock(&fl->map_mutex); if (err) goto bail; @@ -3072,7 +3083,7 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags, } static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys, - size_t size, uint32_t flags) + size_t size, uint32_t flags, int locked) { int err = 0; struct fastrpc_apps *me = &gfa; @@ -3083,13 +3094,14 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys, if (flags == ADSP_MMAP_HEAP_ADDR) { struct fastrpc_ioctl_invoke_crc ioctl; remote_arg_t ra[2]; - int err = 0; + int err = 0, cid = 0; struct { uint8_t skey; } routargs; if (fl == NULL) goto bail; + cid = fl->cid; tgid = fl->tgid; ra[0].buf.pv = (void *)&tgid; ra[0].buf.len = sizeof(tgid); @@ -3104,8 +3116,16 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys, ioctl.attrs = NULL; ioctl.crc = NULL; + if (locked) { + mutex_unlock(&fl->map_mutex); + mutex_unlock(&me->channel[cid].smd_mutex); + } VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, FASTRPC_MODE_PARALLEL, 1, &ioctl))); + if (locked) { + mutex_lock(&me->channel[cid].smd_mutex); + mutex_lock(&fl->map_mutex); + } if (err) goto bail; } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { @@ -3139,7 +3159,8 @@ static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr, goto bail; if (flags == ADSP_MMAP_HEAP_ADDR || flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { - VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, phys, size, flags)); + VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, phys, size, + flags, 0)); if (err) goto bail; } @@ -3147,7 +3168,7 @@ static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr, return err; } -static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl) +static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked) { struct fastrpc_mmap *match = NULL, *map = NULL; struct hlist_node *n = NULL; @@ -3170,7 +3191,7 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl) if (match) { err = fastrpc_munmap_on_dsp_rh(fl, match->phys, - match->size, match->flags); + match->size, match->flags, locked); if (err) goto bail; if (me->ramdump_handle && me->enable_ramdump) { @@ -3217,7 +3238,7 @@ static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl) } if (me->channel[fl->cid].spd[session].pdrcount != me->channel[fl->cid].spd[session].prevpdrcount) { - err = fastrpc_mmap_remove_ssr(fl); + err = fastrpc_mmap_remove_ssr(fl, 0); if (err) pr_warn("adsprpc: %s: %s: failed to unmap remote heap (err %d)\n", __func__, current->comm, err); @@ -4005,12 +4026,12 @@ static int fastrpc_channel_open(struct fastrpc_file *fl) if (cid == ADSP_DOMAIN_ID && me->channel[cid].ssrcount != me->channel[cid].prevssrcount) { mutex_lock(&fl->map_mutex); - err = fastrpc_mmap_remove_ssr(fl); + err = fastrpc_mmap_remove_ssr(fl, 1); + mutex_unlock(&fl->map_mutex); if (err) pr_warn("adsprpc: %s: %s: failed to unmap remote heap for %s (err %d)\n", __func__, current->comm, me->channel[cid].subsys, err); - mutex_unlock(&fl->map_mutex); me->channel[cid].prevssrcount = me->channel[cid].ssrcount; } @@ -4088,11 +4109,14 @@ static int fastrpc_set_process_info(struct fastrpc_file *fl) { int err = 0, buf_size = 0; char strpid[PID_SIZE]; + char cur_comm[TASK_COMM_LEN]; + memcpy(cur_comm, current->comm, TASK_COMM_LEN); + cur_comm[TASK_COMM_LEN-1] = '\0'; fl->tgid = current->tgid; snprintf(strpid, PID_SIZE, "%d", current->pid); if (debugfs_root) { - buf_size = strlen(current->comm) + strlen("_") + buf_size = strlen(cur_comm) + strlen("_") + strlen(strpid) + 1; spin_lock(&fl->hlock); @@ -4107,13 +4131,13 @@ static int fastrpc_set_process_info(struct fastrpc_file *fl) err = -ENOMEM; return err; } - snprintf(fl->debug_buf, UL_SIZE, "%.10s%s%d", - current->comm, "_", current->pid); + snprintf(fl->debug_buf, buf_size, "%.10s%s%d", + cur_comm, "_", current->pid); fl->debugfs_file = debugfs_create_file(fl->debug_buf, 0644, debugfs_root, fl, &debugfs_fops); if (IS_ERR_OR_NULL(fl->debugfs_file)) { pr_warn("Error: %s: %s: failed to create debugfs file %s\n", - current->comm, __func__, fl->debug_buf); + cur_comm, __func__, fl->debug_buf); fl->debugfs_file = NULL; kfree(fl->debug_buf); fl->debug_buf_alloced_attempted = 0; @@ -4551,7 +4575,6 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num, break; default: err = -ENOTTY; - pr_info("bad ioctl: %d\n", ioctl_num); break; } bail: diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c index 4f38dba894f1..5294d337fd18 100644 --- a/drivers/char/diag/diag_dci.c +++ b/drivers/char/diag/diag_dci.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. */ #include @@ -1069,6 +1069,11 @@ void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source, return; } + if (token != entry->client_info.token) { + mutex_unlock(&driver->dci_mutex); + return; + } + mutex_lock(&entry->buffers[data_source].buf_mutex); rsp_buf = entry->buffers[data_source].buf_cmd; @@ -1740,7 +1745,16 @@ static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag, write_len += dci_header_size; *(int *)(buf + write_len) = tag; write_len += sizeof(int); - memcpy(buf + write_len, data, len); + if ((write_len + len) < DIAG_MDM_BUF_SIZE) { + memcpy(buf + write_len, data, len); + } else { + pr_err("diag: skip writing invalid length packet, token: %d, pkt_len: %d\n", + token, (write_len + len)); + spin_lock_irqsave(&driver->dci_mempool_lock, flags); + diagmem_free(driver, buf, dci_ops_tbl[token].mempool); + spin_unlock_irqrestore(&driver->dci_mempool_lock, flags); + return -EAGAIN; + } write_len += len; *(buf + write_len) = CONTROL_CHAR; /* End Terminator */ write_len += sizeof(uint8_t); diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c index 2ee3a46da4ef..4697f1912976 100644 --- a/drivers/char/diag/diag_masks.c +++ b/drivers/char/diag/diag_masks.c @@ -54,7 +54,8 @@ static const struct diag_ssid_range_t msg_mask_tbl[] = { { .ssid_first = MSG_SSID_22, .ssid_last = MSG_SSID_22_LAST }, { .ssid_first = MSG_SSID_23, .ssid_last = MSG_SSID_23_LAST }, { .ssid_first = MSG_SSID_24, .ssid_last = MSG_SSID_24_LAST }, - { .ssid_first = MSG_SSID_25, .ssid_last = MSG_SSID_25_LAST } + { .ssid_first = MSG_SSID_25, .ssid_last = MSG_SSID_25_LAST }, + { .ssid_first = MSG_SSID_26, .ssid_last = MSG_SSID_26_LAST } }; static int diag_save_user_msg_mask(struct diag_md_session_t *info); diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c index be43c84e3849..8f090854f887 100644 --- a/drivers/char/diag/diag_memorydevice.c +++ b/drivers/char/diag/diag_memorydevice.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. */ #include @@ -330,13 +330,19 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size, struct diag_md_info *ch = NULL; struct diag_buf_tbl_t *entry = NULL; uint8_t drain_again = 0; - int peripheral = 0; + int peripheral = 0, tmp_len = 0; struct diag_md_session_t *session_info = NULL; struct pid *pid_struct = NULL; struct task_struct *task_s = NULL; + unsigned char *tmp_buf = NULL; if (!info) return -EINVAL; + + tmp_buf = vzalloc(MAX_PERIPHERAL_HDLC_BUF_SZ); + if (!tmp_buf) + return -ENOMEM; + for (i = 0; i < NUM_DIAG_MD_DEV && !err; i++) { ch = &diag_md[i]; if (!ch->md_info_inited) @@ -348,6 +354,8 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size, spin_unlock_irqrestore(&ch->lock, flags); continue; } + tmp_len = entry->len; + memcpy(tmp_buf, entry->buf, entry->len); peripheral = diag_md_get_peripheral(entry->ctx); if (peripheral < 0) { spin_unlock_irqrestore(&ch->lock, flags); @@ -383,14 +391,6 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size, drain_again = 1; break; } - } else { - if ((ret + (2 * sizeof(int)) + entry->len) >= - buf_size) { - drain_again = 1; - break; - } - } - if (i > 0) { remote_token = diag_get_remote(i); task_s = get_pid_task(pid_struct, PIDTYPE_PID); if (task_s) { @@ -404,23 +404,20 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size, ret += sizeof(int); put_task_struct(task_s); } + } else { + if ((ret + (2 * sizeof(int)) + entry->len) >= + buf_size) { + drain_again = 1; + break; + } } task_s = get_pid_task(pid_struct, PIDTYPE_PID); if (task_s) { - spin_lock_irqsave(&ch->lock, flags); - entry = &ch->tbl[j]; - if (entry->len <= 0 || entry->buf == NULL) { - spin_unlock_irqrestore(&ch->lock, - flags); - continue; - } - spin_unlock_irqrestore(&ch->lock, - flags); /* Copy the length of data being passed */ - if (entry->len) { + if (tmp_len) { err = copy_to_user(buf + ret, - (void *)&(entry->len), + (void *)&(tmp_len), sizeof(int)); if (err) { put_task_struct(task_s); @@ -430,10 +427,10 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size, } /* Copy the actual data being passed */ - if (entry->buf) { + if (tmp_buf) { err = copy_to_user(buf + ret, - (void *)entry->buf, - entry->len); + (void *)tmp_buf, + tmp_len); if (err) { put_task_struct(task_s); goto drop_data; @@ -467,6 +464,8 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size, spin_unlock_irqrestore(&ch->lock, flags); put_pid(pid_struct); + memset(tmp_buf, 0, MAX_PERIPHERAL_HDLC_BUF_SZ); + tmp_len = 0; } } @@ -482,6 +481,8 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size, } put_pid(pid_struct); } + vfree(tmp_buf); + tmp_buf = NULL; diag_ws_on_copy_complete(DIAG_WS_MUX); if (drain_again) chk_logging_wakeup(); diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c index ea48c59b2c0b..100decaf2650 100644 --- a/drivers/char/diag/diagchar_core.c +++ b/drivers/char/diag/diagchar_core.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2008-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2008-2021, The Linux Foundation. All rights reserved. */ #include @@ -376,6 +376,8 @@ static int diagchar_open(struct inode *inode, struct file *file) if (driver->ref_count == 0) diag_mempool_init(); driver->ref_count++; + DIAG_LOG(DIAG_DEBUG_USERSPACE, + "diag: open successful for client pid: %d\n", current->tgid); mutex_unlock(&driver->diagchar_mutex); return 0; } @@ -3794,6 +3796,9 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count, if (driver->data_ready[index] & MSG_MASKS_TYPE) { /*Copy the type of data being passed*/ + DIAG_LOG(DIAG_DEBUG_MASKS, + "diag: msg masks update to client pid: %d\n", current->tgid); + data_type = driver->data_ready[index] & MSG_MASKS_TYPE; mutex_unlock(&driver->diagchar_mutex); mutex_lock(&driver->md_session_lock); @@ -3815,11 +3820,19 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count, mutex_lock(&driver->diagchar_mutex); driver->data_ready[index] ^= MSG_MASKS_TYPE; atomic_dec(&driver->data_ready_notif[index]); + + DIAG_LOG(DIAG_DEBUG_MASKS, + "diag: msg masks update complete for client pid: %d\n", + current->tgid); + goto exit; } if (driver->data_ready[index] & EVENT_MASKS_TYPE) { /*Copy the type of data being passed*/ + DIAG_LOG(DIAG_DEBUG_MASKS, + "diag: event masks update to client pid: %d\n", current->tgid); + data_type = driver->data_ready[index] & EVENT_MASKS_TYPE; mutex_unlock(&driver->diagchar_mutex); mutex_lock(&driver->md_session_lock); @@ -3852,11 +3865,19 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count, mutex_lock(&driver->diagchar_mutex); driver->data_ready[index] ^= EVENT_MASKS_TYPE; atomic_dec(&driver->data_ready_notif[index]); + + DIAG_LOG(DIAG_DEBUG_MASKS, + "diag: %s: event masks update complete for client pid: %d\n", + current->tgid); + goto exit; } if (driver->data_ready[index] & LOG_MASKS_TYPE) { /*Copy the type of data being passed*/ + DIAG_LOG(DIAG_DEBUG_MASKS, + "diag: log masks update to client pid: %d\n", current->tgid); + data_type = driver->data_ready[index] & LOG_MASKS_TYPE; mutex_unlock(&driver->diagchar_mutex); mutex_lock(&driver->md_session_lock); @@ -3878,6 +3899,11 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count, mutex_lock(&driver->diagchar_mutex); driver->data_ready[index] ^= LOG_MASKS_TYPE; atomic_dec(&driver->data_ready_notif[index]); + + DIAG_LOG(DIAG_DEBUG_MASKS, + "diag: log masks update complete for client pid: %d\n", + current->tgid); + goto exit; } diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c index 4bc7a57affb3..da5bfa2f7c54 100644 --- a/drivers/char/diag/diagfwd_peripheral.c +++ b/drivers/char/diag/diagfwd_peripheral.c @@ -178,7 +178,7 @@ static int diag_add_hdlc_encoding(unsigned char *dest_buf, int *dest_len, static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len) { - int i, ctx = 0; + int i, ctx = 0, flag_64k = 0; uint32_t max_size = 0; unsigned long flags; unsigned char *temp_buf = NULL; @@ -189,10 +189,11 @@ static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len) max_size = (2 * len) + 3; if (max_size > PERIPHERAL_BUF_SZ) { - if (max_size > MAX_PERIPHERAL_HDLC_BUF_SZ) { - pr_err("diag: In %s, max_size is going beyond limit %d\n", + if (max_size > MAX_PERIPHERAL_BUF_SZ) { + pr_err("diag: In %s, max_size (%d) is going beyond 32k\n", __func__, max_size); max_size = MAX_PERIPHERAL_HDLC_BUF_SZ; + flag_64k = 1; } mutex_lock(&driver->md_session_lock); @@ -229,11 +230,19 @@ static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len) mutex_unlock(&driver->md_session_lock); return -ENOMEM; } - DIAG_LOG(DIAG_DEBUG_PERIPHERALS, - "Reallocated data buffer: %pK with size: %d\n", - temp_buf, max_size); buf->data = temp_buf; - buf->len = max_size; + + if (flag_64k) + buf->len = MAX_PERIPHERAL_HDLC_BUF_SZ; + else + buf->len = MAX_PERIPHERAL_BUF_SZ; + + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, + "diag: Reallocated data buffer: %pK with size: %d, max_buf_len: %d, p: %d, t: %d, n: %d\n", + temp_buf, max_size, buf->len, + GET_BUF_PERIPHERAL(buf->ctxt), + GET_BUF_TYPE(buf->ctxt), + GET_BUF_NUM(buf->ctxt)); } mutex_unlock(&driver->md_session_lock); } diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h index 9d83cc8c023c..3e8957c3205c 100644 --- a/drivers/char/diag/diagfwd_peripheral.h +++ b/drivers/char/diag/diagfwd_peripheral.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2019, 2021 The Linux Foundation. All rights reserved. */ #ifndef DIAGFWD_PERIPHERAL_H @@ -7,7 +7,7 @@ #define PERIPHERAL_BUF_SZ 16384 #define MAX_PERIPHERAL_BUF_SZ 32768 -#define MAX_PERIPHERAL_HDLC_BUF_SZ 65539 +#define MAX_PERIPHERAL_HDLC_BUF_SZ 65536 #define TRANSPORT_UNKNOWN -1 #define TRANSPORT_SOCKET 0 diff --git a/drivers/char/diag/diagfwd_rpmsg.c b/drivers/char/diag/diagfwd_rpmsg.c index c194e831551d..e642e9266876 100644 --- a/drivers/char/diag/diagfwd_rpmsg.c +++ b/drivers/char/diag/diagfwd_rpmsg.c @@ -22,6 +22,7 @@ #include "diag_ipc_logging.h" #define PERI_RPMSG rpmsg_info->peripheral +#define RX_LIST_MAX_PKT_CNT 10 struct diag_rpmsg_read_work { struct work_struct work; @@ -717,8 +718,15 @@ static void diag_rpmsg_notify_rx_work_fn(struct work_struct *work) spin_unlock_irqrestore(&read_work_struct->rx_lock, flags); } else { + rpmsg_info->list_pkt_cnt += 1; + if (rpmsg_info->list_pkt_cnt > RX_LIST_MAX_PKT_CNT) + list_del(&rx_item->list); spin_unlock_irqrestore(&read_work_struct->rx_lock, flags); + if (rpmsg_info->list_pkt_cnt > RX_LIST_MAX_PKT_CNT) { + kfree(rx_item->rpmsg_rx_buf); + kfree(rx_item); + } goto end; } @@ -782,6 +790,9 @@ void rpmsg_mark_buffers_free(uint8_t peripheral, uint8_t type, int buf_num) rpmsg_info->buf2 = NULL; DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "marked buf2 NULL"); } + + if (rpmsg_info->list_pkt_cnt > 0) + rpmsg_info->list_pkt_cnt -= 1; } static void rpmsg_late_init(struct diag_rpmsg_info *rpmsg_info) @@ -850,6 +861,7 @@ static void __diag_rpmsg_init(struct diag_rpmsg_info *rpmsg_info) rpmsg_info->hdl = NULL; rpmsg_info->fwd_ctxt = NULL; rpmsg_info->probed = 0; + rpmsg_info->list_pkt_cnt = 0; atomic_set(&rpmsg_info->opened, 0); atomic_set(&rpmsg_info->diag_state, 0); DIAG_LOG(DIAG_DEBUG_PERIPHERALS, diff --git a/drivers/char/diag/diagfwd_rpmsg.h b/drivers/char/diag/diagfwd_rpmsg.h index 6a973bac5c07..4cc6955efeef 100644 --- a/drivers/char/diag/diagfwd_rpmsg.h +++ b/drivers/char/diag/diagfwd_rpmsg.h @@ -16,6 +16,7 @@ struct diag_rpmsg_info { atomic_t opened; atomic_t diag_state; uint32_t fifo_size; + uint32_t list_pkt_cnt; struct rpmsg_device *hdl; char edge[DIAG_RPMSG_NAME_SZ]; char name[DIAG_RPMSG_NAME_SZ]; diff --git a/drivers/char/diag/diagmem.c b/drivers/char/diag/diagmem.c index 3cec3053070c..e2d99204f3ac 100644 --- a/drivers/char/diag/diagmem.c +++ b/drivers/char/diag/diagmem.c @@ -172,7 +172,7 @@ void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type) break; } if (size == 0 || size > mempool->itemsize || - size > (int)mempool->pool->pool_data) { + size > (size_t)mempool->pool->pool_data) { pr_err_ratelimited("diag: cannot alloc from mempool %s, invalid size: %d\n", mempool->name, size); break; diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 6c43d83c29ea..228cdec287ba 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -608,3 +608,44 @@ config CLOCK_CPU_SDM Support for the cpu clock controller on SDM based devices(e.g. QM215/SDM429). Say Y if you want to support CPU clock scaling using CPUfreq drivers for dynamic power management. + +config SM_GCC_KHAJE + tristate "KHAJE Global Clock Controller" + depends on COMMON_CLK_QCOM + help + Support for the global clock controller on KHAJE devices. + Say Y if you want to use peripheral devices such as UART, SPI, + I2C, USB, UFS, SDCC, PCIe, Camera, Video etc. + +config SM_GPUCC_KHAJE + tristate "KHAJE Graphics Clock Controller" + select SM_GCC_KHAJE + help + Support for the graphics clock controller on Qualcomm Technologies, Inc + KHAJE devices. + Say Y if you want to support graphics controller devices. + +config SM_DISPCC_KHAJE + tristate "KHAJE Display Clock Controller" + select SM_GCC_KHAJE + help + Support for the display clock controller on Qualcomm Technologies, Inc. + KHAJE devices. + Say Y if you want to support display devices and functionality such as + splash screen. + +config SM_DEBUGCC_KHAJE + tristate "KHAJE Debug Clock Controller" + select SM_GCC_KHAJE + help + Support for the debug clock controller on Qualcomm Technologies, Inc + KHAJE devices. + Say Y if you want to support the clock measurement functionality. + +config SDM_DEBUGCC_439 + tristate "SDM439 SDM429 Debug Clock Controller" + depends on SDM_GCC_429W + help + Support for the debug clock controller on Qualcomm Technologies, Inc + SDM429/SDM439 devices. + Say Y if you want to support the clock measurement functionality. diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index fa1f143e1d24..15abfd421a12 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -57,6 +57,7 @@ obj-$(CONFIG_QM_GPUCC_SCUBA) += gpucc-scuba.o obj-$(CONFIG_QM_DEBUGCC_SCUBA) += debugcc-scuba.o obj-$(CONFIG_SDM_CAMCC_LAGOON) += camcc-lagoon.o obj-$(CONFIG_SDM_DEBUGCC_429W) += debugcc-sdm429w.o +obj-$(CONFIG_SDM_DEBUGCC_439) += debugcc-sdm439.o obj-$(CONFIG_SDM_DEBUGCC_LAGOON) += debugcc-lagoon.o obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o obj-$(CONFIG_SDM_DISPCC_LAGOON) += dispcc-lagoon.o @@ -72,12 +73,16 @@ obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o obj-$(CONFIG_SDM_VIDEOCC_LAGOON) += videocc-lagoon.o obj-$(CONFIG_SM_CAMCC_LITO) += camcc-lito.o obj-$(CONFIG_SM_DEBUGCC_BENGAL) += debugcc-bengal.o +obj-$(CONFIG_SM_DEBUGCC_KHAJE) += debugcc-khaje.o obj-$(CONFIG_SM_DEBUGCC_LITO) += debugcc-lito.o obj-$(CONFIG_SM_DISPCC_BENGAL) += dispcc-bengal.o +obj-$(CONFIG_SM_DISPCC_KHAJE) += dispcc-khaje.o obj-$(CONFIG_SM_DISPCC_LITO) += dispcc-lito.o obj-$(CONFIG_SM_GCC_BENGAL) += gcc-bengal.o +obj-$(CONFIG_SM_GCC_KHAJE) += gcc-khaje.o obj-$(CONFIG_SM_GCC_LITO) += gcc-lito.o obj-$(CONFIG_SM_GPUCC_BENGAL) += gpucc-bengal.o +obj-$(CONFIG_SM_GPUCC_KHAJE) += gpucc-khaje.o obj-$(CONFIG_SM_GPUCC_LITO) += gpucc-lito.o obj-$(CONFIG_SM_NPUCC_LITO) += npucc-lito.o obj-$(CONFIG_SM_VIDEOCC_LITO) += videocc-lito.o diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index 07b6ccc2ae57..785140ccfdbd 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2015, 2018-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2015, 2018-2021, The Linux Foundation. All rights reserved. */ #include @@ -30,6 +30,7 @@ #define PLL_VOTE_FSM_RESET BIT(21) #define PLL_UPDATE BIT(22) #define PLL_UPDATE_BYPASS BIT(23) +#define PLL_FSM_LEGACY_MODE BIT(24) #define PLL_ALPHA_EN BIT(24) #define PLL_OFFLINE_ACK BIT(28) #define ALPHA_PLL_ACK_LATCH BIT(29) @@ -509,9 +510,22 @@ alpha_pll_calc_rate(u64 prate, u32 l, u32 a, u32 alpha_width) return (prate * l) + ((prate * a) >> ALPHA_SHIFT(alpha_width)); } +static void zonda_pll_adjust_l_val(unsigned long rate, unsigned long prate, + u32 *l) +{ + u64 remainder, quotient; + + quotient = rate; + remainder = do_div(quotient, prate); + *l = quotient; + + if ((remainder * 2) / prate) + *l = *l + 1; +} + static unsigned long alpha_pll_round_rate(unsigned long rate, unsigned long prate, u32 *l, u64 *a, - u32 alpha_width) + u32 alpha_width) { u64 remainder; u64 quotient; @@ -1265,10 +1279,10 @@ static int clk_zonda_pll_enable(struct clk_hw *hw) static void clk_zonda_pll_disable(struct clk_hw *hw) { struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); - u32 val, mask, off = pll->offset; + u32 val, mask; int ret; - ret = regmap_read(pll->clkr.regmap, off + PLL_MODE(pll), &val); + ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val); if (ret) return; @@ -1304,12 +1318,13 @@ static int clk_zonda_pll_set_rate(struct clk_hw *hw, unsigned long rate, { struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); unsigned long rrate; - u32 test_ctl_val; + u32 test_ctl_val, alpha_width = pll_alpha_width(pll); u32 l; u64 a; int ret; - rrate = alpha_pll_round_rate(rate, prate, &l, &a, ALPHA_BITWIDTH); + rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width); + /* * Due to a limited number of bits for fractional rate programming, the * rounded up rate could be marginally higher than the requested rate. @@ -1320,9 +1335,15 @@ static int clk_zonda_pll_set_rate(struct clk_hw *hw, unsigned long rate, return -EINVAL; } + if (a && (a & BIT(15))) + zonda_pll_adjust_l_val(rate, prate, &l); + regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a); regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l); + if (!clk_hw_is_enabled(hw)) + return 0; + /* Wait before polling for the frequency latch */ udelay(5); @@ -1344,16 +1365,33 @@ static int clk_zonda_pll_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } +static unsigned long alpha_pll_adjust_calc_rate(u64 prate, u32 l, u32 frac, + u32 alpha_width) +{ + uint64_t tmp; + + frac = 100 - DIV_ROUND_UP_ULL((frac * 100), BIT(alpha_width)); + + tmp = frac * prate; + do_div(tmp, 100); + + return (l * prate) - tmp; +} + static unsigned long clk_zonda_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); - u32 l, frac; + u32 l, frac, alpha_width = pll_alpha_width(pll); regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac); - return alpha_pll_calc_rate(parent_rate, l, frac, ALPHA_BITWIDTH); + if (frac & BIT(15)) + return alpha_pll_adjust_calc_rate(parent_rate, l, frac, + alpha_width); + else + return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width); } static void clk_zonda_pll_list_registers(struct seq_file *f, struct clk_hw *hw) @@ -2076,6 +2114,9 @@ void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, PLL_UPDATE_BYPASS, PLL_UPDATE_BYPASS); + if (pll->flags & SUPPORTS_FSM_LEGACY_MODE) + regmap_update_bits(regmap, PLL_MODE(pll), PLL_FSM_LEGACY_MODE, + PLL_FSM_LEGACY_MODE); /* Disable PLL output */ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0); @@ -2678,8 +2719,7 @@ static int clk_alpha_pll_calibrate(struct clk_hw *hw) * So slew pll to the previously set frequency. */ freq_hz = alpha_pll_round_rate(clk_hw_get_rate(hw), - clk_hw_get_rate(parent), &l, &a, alpha_width); - + clk_hw_get_rate(parent), &l, &a, alpha_width); pr_debug("pll %s: setting back to required rate %lu, freq_hz %ld\n", hw->init->name, clk_hw_get_rate(hw), freq_hz); diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h index 1e8588eb9f44..5fe56ab670f4 100644 --- a/drivers/clk/qcom/clk-alpha-pll.h +++ b/drivers/clk/qcom/clk-alpha-pll.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2015, 2018-2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2015, 2018-2021, The Linux Foundation. All rights reserved. */ #ifndef __QCOM_CLK_ALPHA_PLL_H__ #define __QCOM_CLK_ALPHA_PLL_H__ @@ -91,6 +91,7 @@ struct clk_alpha_pll { #define SUPPORTS_SLEW BIT(4) /* Associated with soft_vote for multiple PLL software instances */ #define SUPPORTS_FSM_VOTE BIT(5) +#define SUPPORTS_FSM_LEGACY_MODE BIT(6) u8 flags; struct clk_regmap clkr; diff --git a/drivers/clk/qcom/clk-cpu-sdm.c b/drivers/clk/qcom/clk-cpu-sdm.c index ebad9f0b0fe5..c09a1cc83f39 100644 --- a/drivers/clk/qcom/clk-cpu-sdm.c +++ b/drivers/clk/qcom/clk-cpu-sdm.c @@ -27,6 +27,8 @@ container_of(to_clk_regmap(_hw), struct clk_regmap_mux_div, clkr) static DEFINE_VDD_REGULATORS(vdd_hf_pll, VDD_HF_PLL_NUM, 2, vdd_hf_levels); +static DEFINE_VDD_REGULATORS(vdd_sr2_pll, VDD_HF_PLL_NUM, 2, vdd_hf_levels); +static DEFINE_VDD_REGS_INIT(vdd_cpu_c0, 1); static DEFINE_VDD_REGS_INIT(vdd_cpu_c1, 1); static DEFINE_VDD_REGS_INIT(vdd_cpu_cci, 1); @@ -36,29 +38,22 @@ enum apcs_mux_clk_parent { P_APCS_CPU_PLL, }; -struct pll_spm_ctrl { - u32 offset; - u32 force_event_offset; - u32 event_bit; - void __iomem *spm_base; -}; - -static struct pll_spm_ctrl apcs_pll_spm = { - .offset = 0x50, - .force_event_offset = 0x4, - .event_bit = 0x4, -}; - static const struct parent_map apcs_mux_clk_parent_map0[] = { { P_BI_TCXO_AO, 0 }, { P_GPLL0_AO_OUT_MAIN, 4 }, { P_APCS_CPU_PLL, 5 }, }; -static const char *const apcs_mux_clk_parent_name0[] = { +static const char *const apcs_mux_clk_c1_parent_name0[] = { "bi_tcxo_ao", "gpll0_ao_out_main", - "apcs_cpu_pll", + "apcs_cpu_pll1", +}; + +static const char *const apcs_mux_clk_c0_parent_name0[] = { + "bi_tcxo_ao", + "gpll0_ao_out_main", + "apcs_cpu_pll0", }; static const struct parent_map apcs_mux_clk_parent_map1[] = { @@ -111,6 +106,20 @@ static int cpucc_clk_set_rate(struct clk_hw *hw, unsigned long rate, return mux_div_set_src_div(cpuclk, cpuclk->src, cpuclk->div); } +static bool freq_from_gpll0(unsigned long req_rate, unsigned long gpll0_rate) +{ + unsigned long temp; + int div; + + for (div = 10; div <= 40; div += 5) { + temp = mult_frac(gpll0_rate, 10, div); + if (req_rate == temp) + return true; + } + + return false; +} + static int cpucc_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { @@ -137,7 +146,7 @@ static int cpucc_clk_determine_rate(struct clk_hw *hw, apcs_gpll0_rate = clk_hw_get_rate(apcs_gpll0_hw); apcs_gpll0_rrate = DIV_ROUND_UP(apcs_gpll0_rate, 1000000) * 1000000; - if (rate <= apcs_gpll0_rrate) { + if (freq_from_gpll0(rate, apcs_gpll0_rrate)) { req->best_parent_hw = apcs_gpll0_hw; req->best_parent_rate = apcs_gpll0_rrate; div = DIV_ROUND_CLOSEST(2 * apcs_gpll0_rrate, rate) - 1; @@ -227,48 +236,7 @@ static u8 cpucc_clk_get_parent(struct clk_hw *hw) return clk_regmap_mux_div_ops.get_parent(hw); } -static void spm_event(struct pll_spm_ctrl *apcs_pll_spm, bool enable) -{ - void __iomem *base = apcs_pll_spm->spm_base; - u32 offset, force_event_offset, bit, val; - - if (!apcs_pll_spm || !base) - return; - - offset = apcs_pll_spm->offset; - force_event_offset = apcs_pll_spm->force_event_offset; - bit = apcs_pll_spm->event_bit; - - if (enable) { - /* L2_SPM_FORCE_EVENT_EN */ - val = readl_relaxed(base + offset); - val |= BIT(bit); - writel_relaxed(val, (base + offset)); - /* Ensure that the write above goes through. */ - mb(); - - /* L2_SPM_FORCE_EVENT */ - val = readl_relaxed(base + offset + force_event_offset); - val |= BIT(bit); - writel_relaxed(val, (base + offset + force_event_offset)); - /* Ensure that the write above goes through. */ - mb(); - } else { - /* L2_SPM_FORCE_EVENT */ - val = readl_relaxed(base + offset + force_event_offset); - val &= ~BIT(bit); - writel_relaxed(val, (base + offset + force_event_offset)); - /* Ensure that the write above goes through. */ - mb(); - - /* L2_SPM_FORCE_EVENT_EN */ - val = readl_relaxed(base + offset); - val &= ~BIT(bit); - writel_relaxed(val, (base + offset)); - /* Ensure that the write above goes through. */ - mb(); - } -} +static void do_nothing(void *unused) { } /* * We use the notifier function for switching to a temporary safe configuration @@ -279,17 +247,31 @@ static int cpucc_notifier_cb(struct notifier_block *nb, unsigned long event, { struct clk_regmap_mux_div *cpuclk = container_of(nb, struct clk_regmap_mux_div, clk_nb); + bool hw_low_power_ctrl = cpuclk->clk_lpm.hw_low_power_ctrl; int ret = 0, safe_src = cpuclk->safe_src; switch (event) { case PRE_RATE_CHANGE: + if (hw_low_power_ctrl) { + memset(&cpuclk->clk_lpm.req, 0, + sizeof(cpuclk->clk_lpm.req)); + cpumask_copy(&cpuclk->clk_lpm.req.cpus_affine, + (const struct cpumask *)&cpuclk->clk_lpm.cpu_reg_mask); + cpuclk->clk_lpm.req.type = PM_QOS_REQ_AFFINE_CORES; + pm_qos_add_request(&cpuclk->clk_lpm.req, + PM_QOS_CPU_DMA_LATENCY, + cpuclk->clk_lpm.cpu_latency_no_l2_pc_us - 1); + smp_call_function_any(&cpuclk->clk_lpm.cpu_reg_mask, + do_nothing, NULL, 1); + } + /* set the mux to safe source gpll0_ao_out & div */ - ret = mux_div_set_src_div(cpuclk, safe_src, 1); - spm_event(&apcs_pll_spm, true); + mux_div_set_src_div(cpuclk, safe_src, 1); break; case POST_RATE_CHANGE: - if (cpuclk->src != safe_src) - spm_event(&apcs_pll_spm, false); + if (hw_low_power_ctrl) + pm_qos_remove_request(&cpuclk->clk_lpm.req); + break; case ABORT_RATE_CHANGE: pr_err("Error in configuring PLL - stay at safe src only\n"); @@ -324,7 +306,7 @@ static const struct pll_config apcs_cpu_pll_config = { .aux_output_mask = BIT(1), }; -static struct clk_pll apcs_cpu_pll = { +static struct clk_pll apcs_cpu_pll0 = { .mode_reg = 0x0, .l_reg = 0x4, .m_reg = 0x8, @@ -332,8 +314,68 @@ static struct clk_pll apcs_cpu_pll = { .config_reg = 0x10, .status_reg = 0x1c, .status_bit = 16, + .spm_ctrl = { + .offset = 0x50, + .event_bit = 0x4, + }, .clkr.hw.init = &(struct clk_init_data){ - .name = "apcs_cpu_pll", + .name = "apcs_cpu_pll0", + .parent_names = (const char *[]){ "bi_tcxo_ao" }, + .num_parents = 1, + .ops = &clk_pll_hf_ops, + .vdd_class = &vdd_sr2_pll, + .rate_max = (unsigned long[VDD_HF_PLL_NUM]) { + [VDD_HF_PLL_SVS] = 1000000000, + [VDD_HF_PLL_NOM] = 1900000000, + }, + .num_rate_max = VDD_HF_PLL_NUM, + }, +}; + +static struct clk_regmap_mux_div apcs_mux_c0_clk = { + .reg_offset = 0x0, + .hid_width = 5, + .hid_shift = 0, + .src_width = 3, + .src_shift = 8, + .safe_src = 4, + .safe_div = 1, + .parent_map = apcs_mux_clk_parent_map0, + .clk_nb.notifier_call = cpucc_notifier_cb, + .clk_lpm = { + /* CPU 4 - 7 */ + .cpu_reg_mask = { 0xf0 }, + .latency_lvl = { + .affinity_level = LPM_AFF_LVL_L2, + .reset_level = LPM_RESET_LVL_GDHS, + .level_name = "pwr", + }, + .cpu_latency_no_l2_pc_us = 300, + }, + .clkr.hw.init = &(struct clk_init_data) { + .name = "apcs_mux_c0_clk", + .parent_names = apcs_mux_clk_c0_parent_name0, + .num_parents = 3, + .vdd_class = &vdd_cpu_c0, + .flags = CLK_SET_RATE_PARENT, + .ops = &cpucc_clk_ops, + }, +}; + +static struct clk_pll apcs_cpu_pll1 = { + .mode_reg = 0x0, + .l_reg = 0x4, + .m_reg = 0x8, + .n_reg = 0xc, + .config_reg = 0x10, + .status_reg = 0x1c, + .status_bit = 16, + .spm_ctrl = { + .offset = 0x50, + .event_bit = 0x4, + }, + .clkr.hw.init = &(struct clk_init_data){ + .name = "apcs_cpu_pll1", .parent_names = (const char *[]){ "bi_tcxo_ao" }, .num_parents = 1, .ops = &clk_pll_hf_ops, @@ -356,9 +398,19 @@ static struct clk_regmap_mux_div apcs_mux_c1_clk = { .safe_div = 1, .parent_map = apcs_mux_clk_parent_map0, .clk_nb.notifier_call = cpucc_notifier_cb, + .clk_lpm = { + /* CPU 0 - 3*/ + .cpu_reg_mask = { 0xf }, + .latency_lvl = { + .affinity_level = LPM_AFF_LVL_L2, + .reset_level = LPM_RESET_LVL_GDHS, + .level_name = "perf", + }, + .cpu_latency_no_l2_pc_us = 300, + }, .clkr.hw.init = &(struct clk_init_data) { .name = "apcs_mux_c1_clk", - .parent_names = apcs_mux_clk_parent_name0, + .parent_names = apcs_mux_clk_c1_parent_name0, .num_parents = 3, .vdd_class = &vdd_cpu_c1, .flags = CLK_SET_RATE_PARENT, @@ -386,6 +438,7 @@ static struct clk_regmap_mux_div apcs_mux_cci_clk = { }; static const struct of_device_id match_table[] = { + { .compatible = "qcom,cpu-clock-sdm439" }, { .compatible = "qcom,cpu-clock-sdm429" }, { .compatible = "qcom,cpu-clock-qm215" }, {} @@ -400,15 +453,24 @@ static struct regmap_config cpu_regmap_config = { }; static struct clk_hw *cpu_clks_hws_qm215[] = { - [APCS_CPU_PLL] = &apcs_cpu_pll.clkr.hw, + [APCS_CPU_PLL1] = &apcs_cpu_pll1.clkr.hw, [APCS_MUX_C1_CLK] = &apcs_mux_c1_clk.clkr.hw, }; static struct clk_hw *cpu_clks_hws_sdm429[] = { - [APCS_CPU_PLL] = &apcs_cpu_pll.clkr.hw, + [APCS_CPU_PLL1] = &apcs_cpu_pll1.clkr.hw, [APCS_MUX_C1_CLK] = &apcs_mux_c1_clk.clkr.hw, [APCS_MUX_CCI_CLK] = &apcs_mux_cci_clk.clkr.hw, }; + +static struct clk_hw *cpu_clks_hws_sdm439[] = { + [APCS_CPU_PLL1] = &apcs_cpu_pll1.clkr.hw, + [APCS_MUX_C1_CLK] = &apcs_mux_c1_clk.clkr.hw, + [APCS_MUX_CCI_CLK] = &apcs_mux_cci_clk.clkr.hw, + [APCS_CPU_PLL0] = &apcs_cpu_pll0.clkr.hw, + [APCS_MUX_C0_CLK] = &apcs_mux_c0_clk.clkr.hw, +}; + static void cpucc_clk_get_speed_bin(struct platform_device *pdev, int *bin, int *version) { @@ -559,41 +621,74 @@ cpucc_clk_add_opp(struct clk_hw *hw, struct device *dev, unsigned long max_rate) return 0; } -static void cpucc_clk_print_opp_table(int cpu) +static void cpucc_clk_print_opp_table(int c0, int c1, bool is_sdm439) { struct dev_pm_opp *oppfmax, *oppfmin; - unsigned long apc_c1_fmax, apc_c1_fmin; - u32 max_index = apcs_mux_c1_clk.clkr.hw.init->num_rate_max; + unsigned long apc_c0_fmax, apc_c0_fmin, apc_c1_fmax, apc_c1_fmin; + u32 max_index; + max_index = apcs_mux_c1_clk.clkr.hw.init->num_rate_max; apc_c1_fmax = apcs_mux_c1_clk.clkr.hw.init->rate_max[max_index - 1]; apc_c1_fmin = apcs_mux_c1_clk.clkr.hw.init->rate_max[1]; - oppfmax = dev_pm_opp_find_freq_exact(get_cpu_device(cpu), - apc_c1_fmax, true); - oppfmin = dev_pm_opp_find_freq_exact(get_cpu_device(cpu), - apc_c1_fmin, true); - pr_info("Clock_cpu:(cpu %d) OPP voltage for %lu: %ld\n", cpu, + oppfmax = dev_pm_opp_find_freq_exact(get_cpu_device(c1), + apc_c1_fmax, true); + oppfmin = dev_pm_opp_find_freq_exact(get_cpu_device(c1), + apc_c1_fmin, true); + + pr_info("Clock_cpu:(cpu %d) OPP voltage for %lu: %ld\n", c1, apc_c1_fmin, dev_pm_opp_get_voltage(oppfmin)); - pr_info("Clock_cpu:(cpu %d) OPP voltage for %lu: %ld\n", cpu, + pr_info("Clock_cpu:(cpu %d) OPP voltage for %lu: %ld\n", c1, apc_c1_fmax, dev_pm_opp_get_voltage(oppfmax)); + if (is_sdm439) { + max_index = apcs_mux_c0_clk.clkr.hw.init->num_rate_max; + apc_c0_fmax = + apcs_mux_c0_clk.clkr.hw.init->rate_max[max_index - 1]; + apc_c0_fmin = apcs_mux_c0_clk.clkr.hw.init->rate_max[1]; + + oppfmax = dev_pm_opp_find_freq_exact(get_cpu_device(c0), + apc_c0_fmax, true); + oppfmin = dev_pm_opp_find_freq_exact(get_cpu_device(c0), + apc_c0_fmin, true); + + pr_info("Clock_cpu:(cpu %d) OPP voltage for %lu: %ld\n", c0, + apc_c0_fmin, dev_pm_opp_get_voltage(oppfmin)); + pr_info("Clock_cpu:(cpu %d) OPP voltage for %lu: %ld\n", c0, + apc_c0_fmax, dev_pm_opp_get_voltage(oppfmax)); + } } -static void cpucc_clk_populate_opp_table(struct platform_device *pdev) +static void cpucc_clk_populate_opp_table(struct platform_device *pdev, + bool is_sdm439) { - unsigned long apc_c1_fmax; - u32 max_index = apcs_mux_c1_clk.clkr.hw.init->num_rate_max; - int cpu, sdm_cpu = 0; + unsigned long apc_c1_fmax, apc_c0_fmax; + u32 max_index; + int cpu, sdm_cpu0 = 0, sdm_cpu1 = 0; + if (is_sdm439) { + max_index = apcs_mux_c0_clk.clkr.hw.init->num_rate_max; + apc_c0_fmax = + apcs_mux_c0_clk.clkr.hw.init->rate_max[max_index - 1]; + } + + max_index = apcs_mux_c1_clk.clkr.hw.init->num_rate_max; apc_c1_fmax = apcs_mux_c1_clk.clkr.hw.init->rate_max[max_index - 1]; for_each_possible_cpu(cpu) { - sdm_cpu = cpu; - WARN(cpucc_clk_add_opp(&apcs_mux_c1_clk.clkr.hw, - get_cpu_device(cpu), apc_c1_fmax), - "Failed to add OPP levels for apcs_mux_c1_clk\n"); + if (cpu/4 == 0) { + sdm_cpu1 = cpu; + WARN(cpucc_clk_add_opp(&apcs_mux_c1_clk.clkr.hw, + get_cpu_device(cpu), apc_c1_fmax), + "Failed to add OPP levels for apcs_mux_c1_clk\n"); + } else if (cpu/4 == 1 && is_sdm439) { + sdm_cpu0 = cpu; + WARN(cpucc_clk_add_opp(&apcs_mux_c0_clk.clkr.hw, + get_cpu_device(cpu), apc_c0_fmax), + "Failed to add OPP levels for apcs_mux_c0_clk\n"); + } } - cpucc_clk_print_opp_table(sdm_cpu); + cpucc_clk_print_opp_table(sdm_cpu0, sdm_cpu1, is_sdm439); } static int clock_sdm429_pm_event(struct notifier_block *this, @@ -620,6 +715,32 @@ static struct notifier_block clock_sdm429_pm_notifier = { .notifier_call = clock_sdm429_pm_event, }; +static int clock_sdm439_pm_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + switch (event) { + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + clk_unprepare(apcs_mux_c0_clk.clkr.hw.clk); + clk_unprepare(apcs_mux_c1_clk.clkr.hw.clk); + clk_unprepare(apcs_mux_cci_clk.clkr.hw.clk); + break; + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + clk_prepare(apcs_mux_c0_clk.clkr.hw.clk); + clk_prepare(apcs_mux_c1_clk.clkr.hw.clk); + clk_prepare(apcs_mux_cci_clk.clkr.hw.clk); + break; + default: + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block clock_sdm439_pm_notifier = { + .notifier_call = clock_sdm439_pm_event, +}; + static int clock_qm215_pm_event(struct notifier_block *this, unsigned long event, void *ptr) { @@ -642,6 +763,126 @@ static struct notifier_block clock_qm215_pm_notifier = { .notifier_call = clock_qm215_pm_event, }; +static int fixup_for_sdm439(struct platform_device *pdev, int speed_bin, + int version) +{ + struct resource *res; + void __iomem *base; + struct device *dev = &pdev->dev; + char prop_name[] = "qcom,speedX-bin-vX-XXX"; + int ret; + + /* Rail Regulator for apcs_pll0 */ + vdd_sr2_pll.regulator[0] = devm_regulator_get(&pdev->dev, + "vdd_sr2_pll"); + if (IS_ERR(vdd_sr2_pll.regulator[0])) { + if (!(PTR_ERR(vdd_sr2_pll.regulator[0]) == + -EPROBE_DEFER)) + dev_err(&pdev->dev, + "Unable to get sr2_pll regulator\n"); + return PTR_ERR(vdd_sr2_pll.regulator[0]); + } + + vdd_sr2_pll.regulator[1] = devm_regulator_get(&pdev->dev, + "vdd_sr2_dig_ao"); + if (IS_ERR(vdd_sr2_pll.regulator[1])) { + if (!(PTR_ERR(vdd_sr2_pll.regulator[1]) == + -EPROBE_DEFER)) + dev_err(&pdev->dev, + "Unable to get dig_ao regulator\n"); + return PTR_ERR(vdd_sr2_pll.regulator[1]); + } + + /* Rail Regulator for APCS C0 mux */ + vdd_cpu_c0.regulator[0] = devm_regulator_get(&pdev->dev, + "cpu-vdd"); + if (IS_ERR(vdd_cpu_c0.regulator[0])) { + if (!(PTR_ERR(vdd_cpu_c0.regulator[0]) == + -EPROBE_DEFER)) + dev_err(&pdev->dev, "Unable to get C0 cpu-vdd regulator\n"); + return PTR_ERR(vdd_cpu_c0.regulator[0]); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "apcs_pll0"); + if (res == NULL) { + dev_err(&pdev->dev, "Failed to get apcs_pll0 resources\n"); + return -EINVAL; + } + + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) { + dev_err(&pdev->dev, "Failed map apcs_cpu_pll0 register base\n"); + return PTR_ERR(base); + } + + cpu_regmap_config.name = "apcs_pll0"; + apcs_cpu_pll0.clkr.regmap = devm_regmap_init_mmio(dev, base, + &cpu_regmap_config); + if (IS_ERR(apcs_cpu_pll0.clkr.regmap)) { + dev_err(&pdev->dev, "Couldn't get regmap for apcs_cpu_pll0\n"); + return PTR_ERR(apcs_cpu_pll0.clkr.regmap); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "apcs-c0-rcg-base"); + if (res == NULL) { + dev_err(&pdev->dev, "Failed to get apcs-c0 resources\n"); + return -EINVAL; + } + + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) { + dev_err(&pdev->dev, "Failed map apcs-c0-rcg register base\n"); + return PTR_ERR(base); + } + + cpu_regmap_config.name = "apcs-c0-rcg-base"; + apcs_mux_c0_clk.clkr.regmap = devm_regmap_init_mmio(dev, base, + &cpu_regmap_config); + if (IS_ERR(apcs_mux_c0_clk.clkr.regmap)) { + dev_err(&pdev->dev, "Couldn't get regmap for apcs-c0-rcg\n"); + return PTR_ERR(apcs_mux_c0_clk.clkr.regmap); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "spm_c0_base"); + if (res == NULL) { + dev_err(&pdev->dev, "Failed to get spm-c0 resources\n"); + return -EINVAL; + } + + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) { + dev_err(&pdev->dev, "Failed to ioremap c0 spm registers\n"); + return -ENOMEM; + } + apcs_cpu_pll0.spm_ctrl.spm_base = base; + + snprintf(prop_name, ARRAY_SIZE(prop_name), + "qcom,speed%d-bin-v%d-%s", speed_bin, version, "c0"); + + ret = cpucc_clk_get_fmax_vdd_class(pdev, + (struct clk_init_data *)apcs_mux_c0_clk.clkr.hw.init, + prop_name); + if (ret) { + dev_err(&pdev->dev, "Didn't get c0 speed bin\n"); + + snprintf(prop_name, ARRAY_SIZE(prop_name), + "qcom,speed0-bin-v0-%s", "c0"); + ret = cpucc_clk_get_fmax_vdd_class(pdev, + (struct clk_init_data *) + apcs_mux_c0_clk.clkr.hw.init, + prop_name); + if (ret) { + dev_err(&pdev->dev, + "Unable to load safe voltage plan for c0\n"); + return ret; + } + } + return 0; +} + static int cpucc_driver_probe(struct platform_device *pdev) { struct resource *res; @@ -651,7 +892,10 @@ static int cpucc_driver_probe(struct platform_device *pdev) int i, ret, speed_bin, version, cpu; char prop_name[] = "qcom,speedX-bin-vX-XXX"; void __iomem *base; - bool is_sdm429, is_qm215; + bool is_sdm439, is_sdm429, is_qm215; + + is_sdm439 = of_device_is_compatible(pdev->dev.of_node, + "qcom,cpu-clock-sdm439"); is_sdm429 = of_device_is_compatible(pdev->dev.of_node, "qcom,cpu-clock-sdm429"); @@ -702,7 +946,7 @@ static int cpucc_driver_probe(struct platform_device *pdev) } /* Rail Regulator for APCS CCI mux */ - if (is_sdm429) { + if (is_sdm429 || is_sdm439) { vdd_cpu_cci.regulator[0] = devm_regulator_get(&pdev->dev, "cpu-vdd"); if (IS_ERR(vdd_cpu_cci.regulator[0])) { @@ -714,26 +958,40 @@ static int cpucc_driver_probe(struct platform_device *pdev) } } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs_pll"); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs_pll1"); if (res == NULL) { - dev_err(&pdev->dev, "Failed to get apcs_pll resources\n"); + dev_err(&pdev->dev, "Failed to get apcs_pll1 resources\n"); return -EINVAL; } base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) { - dev_err(&pdev->dev, "Failed map apcs_cpu_pll register base\n"); + dev_err(&pdev->dev, "Failed map apcs_cpu_pll1 register base\n"); return PTR_ERR(base); } - cpu_regmap_config.name = "apcs_pll"; - apcs_cpu_pll.clkr.regmap = devm_regmap_init_mmio(dev, base, + cpu_regmap_config.name = "apcs_pll1"; + apcs_cpu_pll1.clkr.regmap = devm_regmap_init_mmio(dev, base, &cpu_regmap_config); - if (IS_ERR(apcs_cpu_pll.clkr.regmap)) { - dev_err(&pdev->dev, "Couldn't get regmap for apcs_cpu_pll\n"); - return PTR_ERR(apcs_cpu_pll.clkr.regmap); + if (IS_ERR(apcs_cpu_pll1.clkr.regmap)) { + dev_err(&pdev->dev, "Couldn't get regmap for apcs_cpu_pll1\n"); + return PTR_ERR(apcs_cpu_pll1.clkr.regmap); } + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "spm_c1_base"); + if (res == NULL) { + dev_err(&pdev->dev, "Failed to get spm-c1 resources\n"); + return -EINVAL; + } + + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) { + dev_err(&pdev->dev, "Failed to ioremap c1 spm registers\n"); + return -ENOMEM; + } + apcs_cpu_pll1.spm_ctrl.spm_base = base; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs-c1-rcg-base"); if (res == NULL) { @@ -755,7 +1013,7 @@ static int cpucc_driver_probe(struct platform_device *pdev) return PTR_ERR(apcs_mux_c1_clk.clkr.regmap); } - if (is_sdm429) { + if (is_sdm429 || is_sdm439) { res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs-cci-rcg-base"); if (res == NULL) { @@ -776,21 +1034,6 @@ static int cpucc_driver_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Couldn't get regmap for apcs-cci-rcg\n"); return PTR_ERR(apcs_mux_cci_clk.clkr.regmap); } - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "spm_c1_base"); - if (res == NULL) { - dev_err(&pdev->dev, "Failed to get spm-c1 resources\n"); - return -EINVAL; - } - - base = devm_ioremap_resource(dev, res); - if (IS_ERR(base)) { - dev_err(&pdev->dev, "Failed to ioremap c1 spm registers\n"); - return -ENOMEM; - } - - apcs_pll_spm.spm_base = base; } /* Get speed bin information */ @@ -804,17 +1047,21 @@ static int cpucc_driver_probe(struct platform_device *pdev) prop_name); if (ret) { dev_err(&pdev->dev, "Didn't get c1 speed bin\n"); + + snprintf(prop_name, ARRAY_SIZE(prop_name), + "qcom,speed0-bin-v0-%s", "c1"); ret = cpucc_clk_get_fmax_vdd_class(pdev, (struct clk_init_data *) apcs_mux_c1_clk.clkr.hw.init, prop_name); if (ret) { - dev_err(&pdev->dev, "Unable to get vdd class for c1\n"); + dev_err(&pdev->dev, + "Unable to load safe voltage plan for c1\n"); return ret; } } - if (is_sdm429) { + if (is_sdm429 || is_sdm439) { snprintf(prop_name, ARRAY_SIZE(prop_name), "qcom,speed%d-bin-v%d-%s", speed_bin, version, "cci"); @@ -823,17 +1070,29 @@ static int cpucc_driver_probe(struct platform_device *pdev) prop_name); if (ret) { dev_err(&pdev->dev, "Didn't get cci speed bin\n"); + + snprintf(prop_name, ARRAY_SIZE(prop_name), + "qcom,speed0-bin-v0-%s", "cci"); ret = cpucc_clk_get_fmax_vdd_class(pdev, (struct clk_init_data *) apcs_mux_cci_clk.clkr.hw.init, prop_name); if (ret) { - dev_err(&pdev->dev, "Unable get vdd class for cci\n"); + dev_err(&pdev->dev, + "Unable to load safe voltage plan for cci\n"); return ret; } } } + if (is_sdm439) { + ret = fixup_for_sdm439(pdev, speed_bin, version); + if (ret) { + dev_err(&pdev->dev, "Unable get sdm439 clocks\n"); + return ret; + } + } + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; @@ -864,6 +1123,19 @@ static int cpucc_driver_probe(struct platform_device *pdev) } data->hws[i] = cpu_clks_hws_qm215[i]; } + } else if (is_sdm439) { + data->num = ARRAY_SIZE(cpu_clks_hws_sdm439); + + for (i = 0; i < ARRAY_SIZE(cpu_clks_hws_sdm439); i++) { + ret = devm_clk_hw_register(dev, + cpu_clks_hws_sdm439[i]); + if (ret) { + dev_err(&pdev->dev, + "Failed to register clock\n"); + return ret; + } + data->hws[i] = cpu_clks_hws_sdm439[i]; + } } ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, data); @@ -873,6 +1145,23 @@ static int cpucc_driver_probe(struct platform_device *pdev) } /* For safe freq switching during rate change */ + if (is_sdm439) { + apcs_mux_c0_clk.clk_lpm.hw_low_power_ctrl = true; + ret = clk_notifier_register(apcs_mux_c0_clk.clkr.hw.clk, + &apcs_mux_c0_clk.clk_nb); + if (ret) { + dev_err(dev, "failed to register clock notifier: %d\n", + ret); + return ret; + } + ret = clk_prepare_enable(apcs_cpu_pll0.clkr.hw.clk); + if (ret) { + dev_err(dev, "failed to Enable PLL0 clock: %d\n", ret); + return ret; + } + } + + apcs_mux_c1_clk.clk_lpm.hw_low_power_ctrl = true; ret = clk_notifier_register(apcs_mux_c1_clk.clkr.hw.clk, &apcs_mux_c1_clk.clk_nb); if (ret) { @@ -880,25 +1169,41 @@ static int cpucc_driver_probe(struct platform_device *pdev) return ret; } + ret = clk_prepare_enable(apcs_cpu_pll1.clkr.hw.clk); + if (ret) { + dev_err(dev, "failed to Enable PLL1 clock: %d\n", ret); + return ret; + } + /* * To increase the enable count for the clocks so * that they dont get disabled during late init. */ get_online_cpus(); for_each_online_cpu(cpu) { - WARN(clk_prepare_enable(apcs_mux_c1_clk.clkr.hw.clk), - "Unable to turn on CPU clock\n"); - if (is_sdm429) + if (!(cpu/4)) { + WARN(clk_prepare_enable(apcs_mux_c1_clk.clkr.hw.clk), + "Unable to turn on CPU clock\n"); + } + + if (cpu/4 && is_sdm439) { + WARN(clk_prepare_enable(apcs_mux_c0_clk.clkr.hw.clk), + "Unable to turn on CPU clock\n"); + } + + if (is_sdm429 || is_sdm439) clk_prepare_enable(apcs_mux_cci_clk.clkr.hw.clk); } put_online_cpus(); - if (is_sdm429) + if (is_sdm439) + register_pm_notifier(&clock_sdm439_pm_notifier); + else if (is_sdm429) register_pm_notifier(&clock_sdm429_pm_notifier); else if (is_qm215) register_pm_notifier(&clock_qm215_pm_notifier); - cpucc_clk_populate_opp_table(pdev); + cpucc_clk_populate_opp_table(pdev, is_sdm439); dev_info(dev, "CPU clock Driver probed successfully\n"); return ret; @@ -925,20 +1230,42 @@ static void __exit cpu_clk_exit(void) module_exit(cpu_clk_exit); #define REG_OFFSET 0x4 -#define APCS_PLL 0x0b016000 +#define APCS_PLL0 0x0b116000 +#define APCS_PLL1 0x0b016000 +#define A53SS_MUX_C0 0x0b111050 #define A53SS_MUX_C1 0x0b011050 +static void config_enable_sr2_pll(void __iomem *base) +{ + /* Configure L/M/N values */ + writel_relaxed(0x34, base + apcs_cpu_pll0.l_reg); + writel_relaxed(0x0, base + apcs_cpu_pll0.m_reg); + writel_relaxed(0x1, base + apcs_cpu_pll0.n_reg); + + /* Configure USER_CTL value */ + writel_relaxed(0xf, base + apcs_cpu_pll0.config_reg); + + /* Enable the pll */ + writel_relaxed(0x2, base + apcs_cpu_pll0.mode_reg); + udelay(2); + writel_relaxed(0x6, base + apcs_cpu_pll0.mode_reg); + udelay(50); + writel_relaxed(0x7, base + apcs_cpu_pll0.mode_reg); + /* Ensure that the writes go through before enabling PLL */ + mb(); +} + static void config_enable_hf_pll(void __iomem *base) { /* Configure USER_CTL value */ - writel_relaxed(0xf, base + apcs_cpu_pll.config_reg); + writel_relaxed(0xf, base + apcs_cpu_pll1.config_reg); /* Enable the pll */ - writel_relaxed(0x2, base + apcs_cpu_pll.mode_reg); + writel_relaxed(0x2, base + apcs_cpu_pll1.mode_reg); udelay(2); - writel_relaxed(0x6, base + apcs_cpu_pll.mode_reg); + writel_relaxed(0x6, base + apcs_cpu_pll1.mode_reg); udelay(50); - writel_relaxed(0x7, base + apcs_cpu_pll.mode_reg); + writel_relaxed(0x7, base + apcs_cpu_pll1.mode_reg); /* Ensure that the writes go through before enabling PLL */ mb(); } @@ -948,9 +1275,16 @@ static int __init cpu_clock_init(void) struct device_node *dev; void __iomem *base; int count, regval = 0; + bool is_sdm439 = false; unsigned long enable_mask = GENMASK(2, 0); + dev = of_find_compatible_node(NULL, NULL, "qcom,cpu-clock-sdm439"); - dev = of_find_compatible_node(NULL, NULL, "qcom,cpu-clock-sdm429"); + if (dev) + is_sdm439 = true; + + if (!dev) + dev = of_find_compatible_node(NULL, NULL, + "qcom,cpu-clock-sdm429"); if (!dev) dev = of_find_compatible_node(NULL, NULL, @@ -960,7 +1294,19 @@ static int __init cpu_clock_init(void) return -ENOMEM; } - base = ioremap_nocache(APCS_PLL, SZ_64); + if (is_sdm439) { + base = ioremap_nocache(APCS_PLL0, SZ_64); + if (!base) + return -ENOMEM; + + regval = readl_relaxed(base); + if (!((regval & enable_mask) == enable_mask)) + config_enable_sr2_pll(base); + + iounmap(base); + } + + base = ioremap_nocache(APCS_PLL1, SZ_64); if (!base) return -ENOMEM; @@ -1000,10 +1346,54 @@ static int __init cpu_clock_init(void) udelay(1); } + iounmap(base); return 0; } early_initcall(cpu_clock_init); +static int __init clock_cpu_lpm_get_latency(void) +{ + int rc; + bool is_sdm439 = false; + struct device_node *ofnode = of_find_compatible_node(NULL, NULL, + "qcom,cpu-clock-sdm439"); + if (ofnode) + is_sdm439 = true; + + if (!ofnode) + ofnode = of_find_compatible_node(NULL, NULL, + "qcom,cpu-clock-sdm429"); + + if (!ofnode) + ofnode = of_find_compatible_node(NULL, NULL, + "qcom,cpu-clock-qm215"); + + if (!ofnode) { + pr_err("device node not initialized\n"); + return -ENOMEM; + } + + rc = lpm_get_latency(&apcs_mux_c1_clk.clk_lpm.latency_lvl, + &apcs_mux_c1_clk.clk_lpm.cpu_latency_no_l2_pc_us); + if (rc < 0) + pr_err("Failed to get the L2 PC value for perf\n"); + + if (is_sdm439) { + rc = lpm_get_latency(&apcs_mux_c0_clk.clk_lpm.latency_lvl, + &apcs_mux_c0_clk.clk_lpm.cpu_latency_no_l2_pc_us); + if (rc < 0) + pr_err("Failed to get the L2 PC value for pwr\n"); + pr_debug("Latency for pwr cluster : %d\n", + apcs_mux_c0_clk.clk_lpm.cpu_latency_no_l2_pc_us); + } + + pr_debug("Latency for perf cluster : %d\n", + apcs_mux_c1_clk.clk_lpm.cpu_latency_no_l2_pc_us); + + return rc; +} +late_initcall_sync(clock_cpu_lpm_get_latency); + MODULE_ALIAS("platform:cpu"); MODULE_DESCRIPTION("SDM CPU clock Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c index 1f4298eb7034..0ba162e4f578 100644 --- a/drivers/clk/qcom/clk-pll.c +++ b/drivers/clk/qcom/clk-pll.c @@ -29,6 +29,44 @@ #define PLL_BYPASSNL BIT(1) #define PLL_RESET_N BIT(2) +static void spm_event(void __iomem *base, u32 offset, u32 bit, bool enable) +{ + uint32_t val; + + if (!base) + return; + + if (enable) { + /* L2_SPM_FORCE_EVENT_EN */ + val = readl_relaxed(base + offset); + val |= BIT(bit); + writel_relaxed(val, (base + offset)); + /* Ensure that the write above goes through. */ + mb(); + + /* L2_SPM_FORCE_EVENT */ + val = readl_relaxed(base + offset + 0x4); + val |= BIT(bit); + writel_relaxed(val, (base + offset + 0x4)); + /* Ensure that the write above goes through. */ + mb(); + } else { + /* L2_SPM_FORCE_EVENT */ + val = readl_relaxed(base + offset + 0x4); + val &= ~BIT(bit); + writel_relaxed(val, (base + offset + 0x4)); + /* Ensure that the write above goes through. */ + mb(); + + /* L2_SPM_FORCE_EVENT_EN */ + val = readl_relaxed(base + offset); + val &= ~BIT(bit); + writel_relaxed(val, (base + offset)); + /* Ensure that the write above goes through. */ + mb(); + } +} + static int clk_pll_enable(struct clk_hw *hw) { struct clk_pll *pll = to_clk_pll(hw); @@ -76,6 +114,9 @@ static void clk_pll_disable(struct clk_hw *hw) u32 mask; u32 val; + spm_event(pll->spm_ctrl.spm_base, pll->spm_ctrl.offset, + pll->spm_ctrl.event_bit, true); + regmap_read(pll->clkr.regmap, pll->mode_reg, &val); /* Skip if in FSM mode */ if (val & PLL_VOTE_FSM_ENA) @@ -138,7 +179,8 @@ clk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) f = find_freq(pll->freq_tbl, req->rate); if (!f) - req->rate = clk_pll_recalc_rate(hw, req->best_parent_rate); + req->rate = DIV_ROUND_UP_ULL(req->rate, req->best_parent_rate) + * req->best_parent_rate; else req->rate = f->freq; @@ -175,12 +217,38 @@ clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long p_rate) return 0; } +static void clk_pll_list_registers(struct seq_file *f, struct clk_hw *hw) +{ + struct clk_pll *pll = to_clk_pll(hw); + int size, i, val; + + static struct clk_register_data data[] = { + {"PLL_MODE", 0x0}, + {"PLL_L_VAL", 0x4}, + {"PLL_M_VAL", 0x8}, + {"PLL_N_VAL", 0xC}, + {"PLL_USER_CTL", 0x10}, + {"PLL_CONFIG_CTL", 0x14}, + {"PLL_STATUS_CTL", 0x1C}, + }; + + size = ARRAY_SIZE(data); + + for (i = 0; i < size; i++) { + regmap_read(pll->clkr.regmap, pll->mode_reg + data[i].offset, + &val); + clock_debug_output(f, false, + "%20s: 0x%.8x\n", data[i].name, val); + } +} + const struct clk_ops clk_pll_ops = { .enable = clk_pll_enable, .disable = clk_pll_disable, .recalc_rate = clk_pll_recalc_rate, .determine_rate = clk_pll_determine_rate, .set_rate = clk_pll_set_rate, + .list_registers = clk_pll_list_registers, }; EXPORT_SYMBOL_GPL(clk_pll_ops); @@ -192,7 +260,7 @@ static int wait_for_pll(struct clk_pll *pll) const char *name = clk_hw_get_name(&pll->clkr.hw); /* Wait for pll to enable. */ - for (count = 200; count > 0; count--) { + for (count = 500; count > 0; count--) { ret = regmap_read(pll->clkr.regmap, pll->status_reg, &val); if (ret) return ret; @@ -201,7 +269,8 @@ static int wait_for_pll(struct clk_pll *pll) udelay(1); } - WARN(1, "%s didn't enable after voting for it!\n", name); + WARN_CLK(pll->clkr.hw.core, name, 1, + "didn't enable after voting for it!\n"); return -ETIMEDOUT; } @@ -274,6 +343,9 @@ static int clk_pll_sr2_enable(struct clk_hw *hw) int ret; u32 mode; + spm_event(pll->spm_ctrl.spm_base, pll->spm_ctrl.offset, + pll->spm_ctrl.event_bit, false); + ret = regmap_read(pll->clkr.regmap, pll->mode_reg, &mode); if (ret) return ret; @@ -296,6 +368,10 @@ static int clk_pll_sr2_enable(struct clk_hw *hw) if (ret) return ret; + /* Make sure De-assert active-low PLL reset request goes through */ + mb(); + udelay(50); + ret = wait_for_pll(pll); if (ret) return ret; diff --git a/drivers/clk/qcom/clk-pll.h b/drivers/clk/qcom/clk-pll.h index 31fe26003037..f142bf72774c 100644 --- a/drivers/clk/qcom/clk-pll.h +++ b/drivers/clk/qcom/clk-pll.h @@ -32,6 +32,12 @@ struct pll_freq_tbl { u32 ibits; }; +struct pll_spm_ctrl { + u32 offset; + u32 event_bit; + void __iomem *spm_base; +}; + /** * struct clk_pll - phase locked loop (PLL) * @l_reg: L register @@ -58,6 +64,7 @@ struct clk_pll { const struct pll_freq_tbl *freq_tbl; struct clk_regmap clkr; + struct pll_spm_ctrl spm_ctrl; }; extern const struct clk_ops clk_pll_ops; diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 15035f9e7346..1a1d1c827ddf 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2013, 2018-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2013, 2018-2021, The Linux Foundation. All rights reserved. */ #include @@ -1395,7 +1395,19 @@ static int clk_gfx3d_src_set_rate_and_parent(struct clk_hw *hw, if (ret) return ret; - return update_config(rcg, old_cfg); + if ((!clk_rcg2_is_force_enabled(hw) && (!clk_hw_is_prepared(hw) + || !clk_hw_is_enabled(hw)))) + clk_rcg2_set_force_enable(hw); + + ret = update_config(rcg, old_cfg); + if (ret) + return ret; + + if ((clk_rcg2_is_force_enabled(hw) && (!clk_hw_is_prepared(hw) + || !clk_hw_is_enabled(hw)))) + clk_rcg2_clear_force_enable(hw); + + return ret; } static int clk_gfx3d_src_determine_rate(struct clk_hw *hw, diff --git a/drivers/clk/qcom/clk-regmap-mux-div.c b/drivers/clk/qcom/clk-regmap-mux-div.c index 0ba00428d42e..e420070c553b 100644 --- a/drivers/clk/qcom/clk-regmap-mux-div.c +++ b/drivers/clk/qcom/clk-regmap-mux-div.c @@ -51,7 +51,8 @@ int mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div) udelay(1); } - pr_err("%s: RCG did not update its configuration", name); + WARN_CLK(md->clkr.hw.core, name, 1, + "%s: rcg didn't update its configuration.", name); return -EBUSY; } EXPORT_SYMBOL_GPL(mux_div_set_src_div); diff --git a/drivers/clk/qcom/clk-regmap-mux-div.h b/drivers/clk/qcom/clk-regmap-mux-div.h index 33dd3d06deb7..5f4107c0f038 100644 --- a/drivers/clk/qcom/clk-regmap-mux-div.h +++ b/drivers/clk/qcom/clk-regmap-mux-div.h @@ -8,9 +8,28 @@ #define __QCOM_CLK_REGMAP_MUX_DIV_H__ #include +#include +#include #include "common.h" #include "clk-regmap.h" +/** + * struct clk_regmap_mux_div_lpm - regmap_mux_div_lpm clock + * @cpu_reg_mask: logical cpu mask for node + * @hw_low_power_ctrl: hw low power control + * @req: pm_qos request + * @latency_lvl: lpm latency level + * @cpu_latency_no_l2_pc_us: cpu latency in ms + */ + +struct clk_regmap_mux_div_lpm { + cpumask_t cpu_reg_mask; + bool hw_low_power_ctrl; + struct pm_qos_request req; + struct latency_level latency_lvl; + s32 cpu_latency_no_l2_pc_us; +}; + /** * struct mux_div_clk - combined mux/divider clock * @reg_offset: offset of the mux/divider register @@ -52,6 +71,9 @@ struct clk_regmap_mux_div { struct clk_regmap clkr; struct clk *pclk; struct notifier_block clk_nb; + + /* LPM Latency related */ + struct clk_regmap_mux_div_lpm clk_lpm; }; extern const struct clk_ops clk_regmap_mux_div_ops; diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index 78dbdb012f40..2fe527cf6b26 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2016, Linaro Limited - * Copyright (c) 2014, 2016-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2014, 2016-2021, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -1190,6 +1190,7 @@ static const struct of_device_id rpm_smd_clk_match_table[] = { { .compatible = "qcom,rpmcc-scuba", .data = &rpm_clk_scuba}, { .compatible = "qcom,rpmcc-sdm660", .data = &rpm_clk_sdm660 }, { .compatible = "qcom,rpmcc-qm215", .data = &rpm_clk_qm215 }, + { .compatible = "qcom,rpmcc-sdm439", .data = &rpm_clk_qm215 }, { } }; MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table); @@ -1200,7 +1201,7 @@ static int rpm_smd_clk_probe(struct platform_device *pdev) struct clk *clk; struct rpm_cc *rcc; struct clk_onecell_data *data; - int ret, is_bengal, is_scuba, is_sdm660, is_qm215; + int ret, is_bengal, is_scuba, is_sdm660, is_qm215, is_sdm439; size_t num_clks, i; struct clk_hw **hw_clks; const struct rpm_smd_clk_desc *desc; @@ -1222,13 +1223,16 @@ static int rpm_smd_clk_probe(struct platform_device *pdev) is_qm215 = of_device_is_compatible(pdev->dev.of_node, "qcom,rpmcc-qm215"); + is_sdm439 = of_device_is_compatible(pdev->dev.of_node, + "qcom,rpmcc-sdm439"); + if (is_sdm660) { ret = clk_vote_bimc(&sdm660_bimc_clk.hw, INT_MAX); if (ret < 0) return ret; } - if (is_qm215) { + if (is_qm215 || is_sdm439) { ret = clk_vote_bimc(&sdm429w_bimc_clk.hw, INT_MAX); if (ret < 0) return ret; @@ -1251,6 +1255,11 @@ static int rpm_smd_clk_probe(struct platform_device *pdev) data->clks = clks; data->clk_num = num_clks; + if (is_sdm439) { + rpm_clk_qm215.clks[RPM_SMD_BIMC_GPU_CLK] = NULL; + rpm_clk_qm215.clks[RPM_SMD_BIMC_GPU_A_CLK] = NULL; + } + for (i = 0; i <= desc->num_rpm_clks; i++) { if (!hw_clks[i]) { clks[i] = ERR_PTR(-ENOENT); @@ -1317,7 +1326,7 @@ static int rpm_smd_clk_probe(struct platform_device *pdev) /* Hold an active set vote for the cnoc_periph resource */ clk_set_rate(cnoc_periph_keepalive_a_clk.hw.clk, 19200000); clk_prepare_enable(cnoc_periph_keepalive_a_clk.hw.clk); - } else if (is_qm215) { + } else if (is_qm215 || is_sdm439) { clk_prepare_enable(sdm429w_bi_tcxo_ao.hw.clk); /* diff --git a/drivers/clk/qcom/debugcc-khaje.c b/drivers/clk/qcom/debugcc-khaje.c new file mode 100644 index 000000000000..857af249d8ba --- /dev/null +++ b/drivers/clk/qcom/debugcc-khaje.c @@ -0,0 +1,626 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "clk: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "clk-debug.h" +#include "common.h" + +static struct measure_clk_data debug_mux_priv = { + .ctl_reg = 0x62038, + .status_reg = 0x6203C, + .xo_div4_cbcr = 0x28008, +}; + +static const char *const apss_cc_debug_mux_parent_names[] = { + "perfcl_clk", + "pwrcl_clk", +}; + +static int apss_cc_debug_mux_sels[] = { + 0x1, /* perfcl_clk */ + 0x0, /* pwrclk_clk */ +}; + +static int apss_cc_debug_mux_pre_divs[] = { + 0x8, /* perfcl_clk */ + 0x8, /* pwrcl_clk */ +}; + +static struct clk_debug_mux apss_cc_debug_mux = { + .priv = &debug_mux_priv, + .debug_offset = 0x0, + .post_div_offset = 0x0, + .cbcr_offset = U32_MAX, + .src_sel_mask = 0x3FF00, + .src_sel_shift = 8, + .post_div_mask = 0xF0000000, + .post_div_shift = 28, + .post_div_val = 1, + .mux_sels = apss_cc_debug_mux_sels, + .pre_div_vals = apss_cc_debug_mux_pre_divs, + .hw.init = &(struct clk_init_data){ + .name = "apss_cc_debug_mux", + .ops = &clk_debug_mux_ops, + .parent_names = apss_cc_debug_mux_parent_names, + .num_parents = ARRAY_SIZE(apss_cc_debug_mux_parent_names), + .flags = CLK_IS_MEASURE, + }, +}; + +static const char *const disp_cc_debug_mux_parent_names[] = { + "disp_cc_mdss_ahb_clk", + "disp_cc_mdss_byte0_clk", + "disp_cc_mdss_byte0_intf_clk", + "disp_cc_mdss_esc0_clk", + "disp_cc_mdss_mdp_clk", + "disp_cc_mdss_mdp_lut_clk", + "disp_cc_mdss_non_gdsc_ahb_clk", + "disp_cc_mdss_pclk0_clk", + "disp_cc_mdss_rot_clk", + "disp_cc_mdss_rscc_ahb_clk", + "disp_cc_mdss_rscc_vsync_clk", + "disp_cc_mdss_vsync_clk", + "measure_only_disp_cc_sleep_clk", + "measure_only_disp_cc_xo_clk", +}; + +static int disp_cc_debug_mux_sels[] = { + 0x14, /* disp_cc_mdss_ahb_clk */ + 0xC, /* disp_cc_mdss_byte0_clk */ + 0xD, /* disp_cc_mdss_byte0_intf_clk */ + 0xE, /* disp_cc_mdss_esc0_clk */ + 0x8, /* disp_cc_mdss_mdp_clk */ + 0xA, /* disp_cc_mdss_mdp_lut_clk */ + 0x15, /* disp_cc_mdss_non_gdsc_ahb_clk */ + 0x7, /* disp_cc_mdss_pclk0_clk */ + 0x9, /* disp_cc_mdss_rot_clk */ + 0x17, /* disp_cc_mdss_rscc_ahb_clk */ + 0x16, /* disp_cc_mdss_rscc_vsync_clk */ + 0xB, /* disp_cc_mdss_vsync_clk */ + 0x1D, /* measure_only_disp_cc_sleep_clk */ + 0x1E, /* measure_only_disp_cc_xo_clk */ +}; + +static struct clk_debug_mux disp_cc_debug_mux = { + .priv = &debug_mux_priv, + .debug_offset = 0x7000, + .post_div_offset = 0x3000, + .cbcr_offset = 0x3004, + .src_sel_mask = 0xFF, + .src_sel_shift = 0, + .post_div_mask = 0xF, + .post_div_shift = 0, + .post_div_val = 4, + .mux_sels = disp_cc_debug_mux_sels, + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_debug_mux", + .ops = &clk_debug_mux_ops, + .parent_names = disp_cc_debug_mux_parent_names, + .num_parents = ARRAY_SIZE(disp_cc_debug_mux_parent_names), + .flags = CLK_IS_MEASURE, + }, +}; + +static const char *const gcc_debug_mux_parent_names[] = { + "apss_cc_debug_mux", + "disp_cc_debug_mux", + "gcc_ahb2phy_csi_clk", + "gcc_ahb2phy_usb_clk", + "gcc_bimc_gpu_axi_clk", + "gcc_boot_rom_ahb_clk", + "gcc_cam_throttle_nrt_clk", + "gcc_cam_throttle_rt_clk", + "gcc_camera_ahb_clk", + "gcc_camss_axi_clk", + "gcc_camss_cci_0_clk", + "gcc_camss_cphy_0_clk", + "gcc_camss_cphy_1_clk", + "gcc_camss_cphy_2_clk", + "gcc_camss_csi0phytimer_clk", + "gcc_camss_csi1phytimer_clk", + "gcc_camss_csi2phytimer_clk", + "gcc_camss_mclk0_clk", + "gcc_camss_mclk1_clk", + "gcc_camss_mclk2_clk", + "gcc_camss_mclk3_clk", + "gcc_camss_nrt_axi_clk", + "gcc_camss_ope_ahb_clk", + "gcc_camss_ope_clk", + "gcc_camss_rt_axi_clk", + "gcc_camss_tfe_0_clk", + "gcc_camss_tfe_0_cphy_rx_clk", + "gcc_camss_tfe_0_csid_clk", + "gcc_camss_tfe_1_clk", + "gcc_camss_tfe_1_cphy_rx_clk", + "gcc_camss_tfe_1_csid_clk", + "gcc_camss_tfe_2_clk", + "gcc_camss_tfe_2_cphy_rx_clk", + "gcc_camss_tfe_2_csid_clk", + "gcc_camss_top_ahb_clk", + "gcc_cfg_noc_usb3_prim_axi_clk", + "gcc_disp_ahb_clk", + "gcc_disp_gpll0_div_clk_src", + "gcc_disp_hf_axi_clk", + "gcc_disp_sleep_clk", + "gcc_disp_throttle_core_clk", + "gcc_gp1_clk", + "gcc_gp2_clk", + "gcc_gp3_clk", + "gcc_gpu_gpll0_clk_src", + "gcc_gpu_gpll0_div_clk_src", + "gcc_gpu_memnoc_gfx_clk", + "gcc_gpu_snoc_dvm_gfx_clk", + "gcc_gpu_throttle_core_clk", + "gcc_pdm2_clk", + "gcc_pdm_ahb_clk", + "gcc_pdm_xo4_clk", + "gcc_prng_ahb_clk", + "gcc_qmip_camera_nrt_ahb_clk", + "gcc_qmip_camera_rt_ahb_clk", + "gcc_qmip_disp_ahb_clk", + "gcc_qmip_gpu_cfg_ahb_clk", + "gcc_qmip_video_vcodec_ahb_clk", + "gcc_qupv3_wrap0_core_2x_clk", + "gcc_qupv3_wrap0_core_clk", + "gcc_qupv3_wrap0_s0_clk", + "gcc_qupv3_wrap0_s1_clk", + "gcc_qupv3_wrap0_s2_clk", + "gcc_qupv3_wrap0_s3_clk", + "gcc_qupv3_wrap0_s4_clk", + "gcc_qupv3_wrap0_s5_clk", + "gcc_qupv3_wrap_0_m_ahb_clk", + "gcc_qupv3_wrap_0_s_ahb_clk", + "gcc_sdcc1_ahb_clk", + "gcc_sdcc1_apps_clk", + "gcc_sdcc1_ice_core_clk", + "gcc_sdcc2_ahb_clk", + "gcc_sdcc2_apps_clk", + "gcc_sys_noc_cpuss_ahb_clk", + "gcc_sys_noc_ufs_phy_axi_clk", + "gcc_sys_noc_usb3_prim_axi_clk", + "gcc_ufs_phy_ahb_clk", + "gcc_ufs_phy_axi_clk", + "gcc_ufs_phy_ice_core_clk", + "gcc_ufs_phy_phy_aux_clk", + "gcc_ufs_phy_rx_symbol_0_clk", + "gcc_ufs_phy_rx_symbol_1_clk", + "gcc_ufs_phy_tx_symbol_0_clk", + "gcc_ufs_phy_unipro_core_clk", + "gcc_usb30_prim_master_clk", + "gcc_usb30_prim_mock_utmi_clk", + "gcc_usb30_prim_sleep_clk", + "gcc_usb3_prim_phy_com_aux_clk", + "gcc_usb3_prim_phy_pipe_clk", + "gcc_vcodec0_axi_clk", + "gcc_venus_ahb_clk", + "gcc_venus_ctl_axi_clk", + "gcc_video_ahb_clk", + "gcc_video_axi0_clk", + "gcc_video_throttle_core_clk", + "gcc_video_vcodec0_sys_clk", + "gcc_video_venus_ctl_clk", + "gcc_video_xo_clk", + "gpu_cc_debug_mux", + "mc_cc_debug_mux", + "measure_only_cnoc_clk", + "measure_only_gcc_camera_xo_clk", + "measure_only_gcc_cpuss_gnoc_clk", + "measure_only_gcc_disp_xo_clk", + "measure_only_gcc_gpu_cfg_ahb_clk", + "measure_only_ipa_2x_clk", + "measure_only_snoc_clk", +}; + +static int gcc_debug_mux_sels[] = { + 0xB6, /* apss_cc_debug_mux */ + 0x45, /* disp_cc_debug_mux */ + 0x6A, /* gcc_ahb2phy_csi_clk */ + 0x6B, /* gcc_ahb2phy_usb_clk */ + 0x97, /* gcc_bimc_gpu_axi_clk */ + 0x7D, /* gcc_boot_rom_ahb_clk */ + 0x4F, /* gcc_cam_throttle_nrt_clk */ + 0x4E, /* gcc_cam_throttle_rt_clk */ + 0x3A, /* gcc_camera_ahb_clk */ + 0x141, /* gcc_camss_axi_clk */ + 0x13F, /* gcc_camss_cci_0_clk */ + 0x130, /* gcc_camss_cphy_0_clk */ + 0x131, /* gcc_camss_cphy_1_clk */ + 0x132, /* gcc_camss_cphy_2_clk */ + 0x122, /* gcc_camss_csi0phytimer_clk */ + 0x123, /* gcc_camss_csi1phytimer_clk */ + 0x124, /* gcc_camss_csi2phytimer_clk */ + 0x125, /* gcc_camss_mclk0_clk */ + 0x126, /* gcc_camss_mclk1_clk */ + 0x127, /* gcc_camss_mclk2_clk */ + 0x128, /* gcc_camss_mclk3_clk */ + 0x145, /* gcc_camss_nrt_axi_clk */ + 0x13E, /* gcc_camss_ope_ahb_clk */ + 0x13C, /* gcc_camss_ope_clk */ + 0x147, /* gcc_camss_rt_axi_clk */ + 0x129, /* gcc_camss_tfe_0_clk */ + 0x12D, /* gcc_camss_tfe_0_cphy_rx_clk */ + 0x133, /* gcc_camss_tfe_0_csid_clk */ + 0x12A, /* gcc_camss_tfe_1_clk */ + 0x12E, /* gcc_camss_tfe_1_cphy_rx_clk */ + 0x135, /* gcc_camss_tfe_1_csid_clk */ + 0x12B, /* gcc_camss_tfe_2_clk */ + 0x12F, /* gcc_camss_tfe_2_cphy_rx_clk */ + 0x137, /* gcc_camss_tfe_2_csid_clk */ + 0x140, /* gcc_camss_top_ahb_clk */ + 0x1E, /* gcc_cfg_noc_usb3_prim_axi_clk */ + 0x3B, /* gcc_disp_ahb_clk */ + 0x4A, /* gcc_disp_gpll0_div_clk_src */ + 0x40, /* gcc_disp_hf_axi_clk */ + 0x50, /* gcc_disp_sleep_clk */ + 0x4C, /* gcc_disp_throttle_core_clk */ + 0xC1, /* gcc_gp1_clk */ + 0xC2, /* gcc_gp2_clk */ + 0xC3, /* gcc_gp3_clk */ + 0xF1, /* gcc_gpu_gpll0_clk_src */ + 0xF2, /* gcc_gpu_gpll0_div_clk_src */ + 0xEE, /* gcc_gpu_memnoc_gfx_clk */ + 0xF0, /* gcc_gpu_snoc_dvm_gfx_clk */ + 0xF5, /* gcc_gpu_throttle_core_clk */ + 0x7A, /* gcc_pdm2_clk */ + 0x78, /* gcc_pdm_ahb_clk */ + 0x79, /* gcc_pdm_xo4_clk */ + 0x7B, /* gcc_prng_ahb_clk */ + 0x3D, /* gcc_qmip_camera_nrt_ahb_clk */ + 0x4B, /* gcc_qmip_camera_rt_ahb_clk */ + 0x3E, /* gcc_qmip_disp_ahb_clk */ + 0xF3, /* gcc_qmip_gpu_cfg_ahb_clk */ + 0x3C, /* gcc_qmip_video_vcodec_ahb_clk */ + 0x71, /* gcc_qupv3_wrap0_core_2x_clk */ + 0x70, /* gcc_qupv3_wrap0_core_clk */ + 0x72, /* gcc_qupv3_wrap0_s0_clk */ + 0x73, /* gcc_qupv3_wrap0_s1_clk */ + 0x74, /* gcc_qupv3_wrap0_s2_clk */ + 0x75, /* gcc_qupv3_wrap0_s3_clk */ + 0x76, /* gcc_qupv3_wrap0_s4_clk */ + 0x77, /* gcc_qupv3_wrap0_s5_clk */ + 0x6E, /* gcc_qupv3_wrap_0_m_ahb_clk */ + 0x6F, /* gcc_qupv3_wrap_0_s_ahb_clk */ + 0xF9, /* gcc_sdcc1_ahb_clk */ + 0xF8, /* gcc_sdcc1_apps_clk */ + 0xFA, /* gcc_sdcc1_ice_core_clk */ + 0x6D, /* gcc_sdcc2_ahb_clk */ + 0x6C, /* gcc_sdcc2_apps_clk */ + 0x9, /* gcc_sys_noc_cpuss_ahb_clk */ + 0x19, /* gcc_sys_noc_ufs_phy_axi_clk */ + 0x18, /* gcc_sys_noc_usb3_prim_axi_clk */ + 0x117, /* gcc_ufs_phy_ahb_clk */ + 0x116, /* gcc_ufs_phy_axi_clk */ + 0x11D, /* gcc_ufs_phy_ice_core_clk */ + 0x11E, /* gcc_ufs_phy_phy_aux_clk */ + 0x119, /* gcc_ufs_phy_rx_symbol_0_clk */ + 0x121, /* gcc_ufs_phy_rx_symbol_1_clk */ + 0x118, /* gcc_ufs_phy_tx_symbol_0_clk */ + 0x11C, /* gcc_ufs_phy_unipro_core_clk */ + 0x60, /* gcc_usb30_prim_master_clk */ + 0x62, /* gcc_usb30_prim_mock_utmi_clk */ + 0x61, /* gcc_usb30_prim_sleep_clk */ + 0x63, /* gcc_usb3_prim_phy_com_aux_clk */ + 0x64, /* gcc_usb3_prim_phy_pipe_clk */ + 0x14D, /* gcc_vcodec0_axi_clk */ + 0x14E, /* gcc_venus_ahb_clk */ + 0x14C, /* gcc_venus_ctl_axi_clk */ + 0x39, /* gcc_video_ahb_clk */ + 0x3F, /* gcc_video_axi0_clk */ + 0x4D, /* gcc_video_throttle_core_clk */ + 0x14A, /* gcc_video_vcodec0_sys_clk */ + 0x148, /* gcc_video_venus_ctl_clk */ + 0x41, /* gcc_video_xo_clk */ + 0xED, /* gpu_cc_debug_mux */ + 0xA5, /* mc_cc_debug_mux */ + 0x1C, /* measure_only_cnoc_clk */ + 0x42, /* measure_only_gcc_camera_xo_clk */ + 0xB1, /* measure_only_gcc_cpuss_gnoc_clk */ + 0x43, /* measure_only_gcc_disp_xo_clk */ + 0xEB, /* measure_only_gcc_gpu_cfg_ahb_clk */ + 0xCD, /* measure_only_ipa_2x_clk */ + 0x7, /* measure_only_snoc_clk */ +}; + +static struct clk_debug_mux gcc_debug_mux = { + .priv = &debug_mux_priv, + .debug_offset = 0x62000, + .post_div_offset = 0x30000, + .cbcr_offset = 0x30004, + .src_sel_mask = 0x3FF, + .src_sel_shift = 0, + .post_div_mask = 0xF, + .post_div_shift = 0, + .post_div_val = 1, + .mux_sels = gcc_debug_mux_sels, + .hw.init = &(struct clk_init_data){ + .name = "gcc_debug_mux", + .ops = &clk_debug_mux_ops, + .parent_names = gcc_debug_mux_parent_names, + .num_parents = ARRAY_SIZE(gcc_debug_mux_parent_names), + .flags = CLK_IS_MEASURE, + }, +}; + +static const char *const gpu_cc_debug_mux_parent_names[] = { + "gpu_cc_ahb_clk", + "gpu_cc_crc_ahb_clk", + "gpu_cc_cx_gfx3d_clk", + "gpu_cc_cx_gmu_clk", + "gpu_cc_cx_snoc_dvm_clk", + "gpu_cc_cxo_aon_clk", + "gpu_cc_cxo_clk", + "gpu_cc_gx_gfx3d_clk", + "gpu_cc_sleep_clk", + "measure_only_gcc_gpu_cfg_ahb_clk", + "measure_only_gpu_cc_gx_cxo_clk", +}; + +static int gpu_cc_debug_mux_sels[] = { + 0x10, /* gpu_cc_ahb_clk */ + 0x11, /* gpu_cc_crc_ahb_clk */ + 0x1A, /* gpu_cc_cx_gfx3d_clk */ + 0x18, /* gpu_cc_cx_gmu_clk */ + 0x15, /* gpu_cc_cx_snoc_dvm_clk */ + 0xA, /* gpu_cc_cxo_aon_clk */ + 0x19, /* gpu_cc_cxo_clk */ + 0xB, /* gpu_cc_gx_gfx3d_clk */ + 0x16, /* gpu_cc_sleep_clk */ + 0x1, /* measure_only_gcc_gpu_cfg_ahb_clk */ + 0xE, /* measure_only_gpu_cc_gx_cxo_clk */ +}; + +static struct clk_debug_mux gpu_cc_debug_mux = { + .priv = &debug_mux_priv, + .debug_offset = 0x1568, + .post_div_offset = 0x10FC, + .cbcr_offset = 0x1100, + .src_sel_mask = 0xFF, + .src_sel_shift = 0, + .post_div_mask = 0xF, + .post_div_shift = 0, + .post_div_val = 2, + .mux_sels = gpu_cc_debug_mux_sels, + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_debug_mux", + .ops = &clk_debug_mux_ops, + .parent_names = gpu_cc_debug_mux_parent_names, + .num_parents = ARRAY_SIZE(gpu_cc_debug_mux_parent_names), + .flags = CLK_IS_MEASURE, + }, +}; + +static const char *const mc_cc_debug_mux_parent_names[] = { + "measure_only_mccc_clk", +}; + +static struct clk_debug_mux mc_cc_debug_mux = { + .period_offset = 0x20, + .hw.init = &(struct clk_init_data){ + .name = "mc_cc_debug_mux", + .ops = &clk_debug_mux_ops, + .parent_names = mc_cc_debug_mux_parent_names, + .num_parents = ARRAY_SIZE(mc_cc_debug_mux_parent_names), + .flags = CLK_IS_MEASURE, + }, +}; + +static struct mux_regmap_names mux_list[] = { + { .mux = &apss_cc_debug_mux, .regmap_name = "qcom,cpucc" }, + { .mux = &disp_cc_debug_mux, .regmap_name = "qcom,dispcc" }, + { .mux = &gcc_debug_mux, .regmap_name = "qcom,gcc" }, + { .mux = &gpu_cc_debug_mux, .regmap_name = "qcom,gpucc" }, + { .mux = &mc_cc_debug_mux, .regmap_name = "qcom,mccc" }, +}; + +static struct clk_dummy measure_only_mccc_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_mccc_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_cnoc_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_cnoc_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_disp_cc_sleep_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_disp_cc_sleep_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_disp_cc_xo_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_disp_cc_xo_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_camera_xo_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_camera_xo_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_cpuss_gnoc_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_cpuss_gnoc_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_disp_xo_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_disp_xo_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gcc_gpu_cfg_ahb_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gcc_gpu_cfg_ahb_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_gpu_cc_gx_cxo_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_gpu_cc_gx_cxo_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_ipa_2x_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_ipa_2x_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy measure_only_snoc_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_snoc_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy perfcl_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "perfcl_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy pwrcl_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "pwrcl_clk", + .ops = &clk_dummy_ops, + }, +}; + +struct clk_hw *debugcc_khaje_hws[] = { + &measure_only_cnoc_clk.hw, + &measure_only_disp_cc_sleep_clk.hw, + &measure_only_disp_cc_xo_clk.hw, + &measure_only_gcc_camera_xo_clk.hw, + &measure_only_gcc_cpuss_gnoc_clk.hw, + &measure_only_gcc_disp_xo_clk.hw, + &measure_only_gcc_gpu_cfg_ahb_clk.hw, + &measure_only_gpu_cc_gx_cxo_clk.hw, + &measure_only_ipa_2x_clk.hw, + &measure_only_mccc_clk.hw, + &measure_only_snoc_clk.hw, + &perfcl_clk.hw, + &pwrcl_clk.hw, +}; + +static const struct of_device_id clk_debug_match_table[] = { + { .compatible = "qcom,khaje-debugcc" }, + { } +}; + +static int clk_debug_khaje_probe(struct platform_device *pdev) +{ + struct clk *clk; + int ret = 0, i; + + BUILD_BUG_ON(ARRAY_SIZE(apss_cc_debug_mux_parent_names) != + ARRAY_SIZE(apss_cc_debug_mux_sels)); + BUILD_BUG_ON(ARRAY_SIZE(disp_cc_debug_mux_parent_names) != + ARRAY_SIZE(disp_cc_debug_mux_sels)); + BUILD_BUG_ON(ARRAY_SIZE(gcc_debug_mux_parent_names) != + ARRAY_SIZE(gcc_debug_mux_sels)); + BUILD_BUG_ON(ARRAY_SIZE(gpu_cc_debug_mux_parent_names) != + ARRAY_SIZE(gpu_cc_debug_mux_sels)); + + clk = devm_clk_get(&pdev->dev, "xo_clk_src"); + if (IS_ERR(clk)) { + if (PTR_ERR(clk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "Unable to get xo clock\n"); + return PTR_ERR(clk); + } + + debug_mux_priv.cxo = clk; + + for (i = 0; i < ARRAY_SIZE(mux_list); i++) { + ret = map_debug_bases(pdev, mux_list[i].regmap_name, + mux_list[i].mux); + if (ret == -EBADR) + continue; + else if (ret) + return ret; + + clk = devm_clk_register(&pdev->dev, &mux_list[i].mux->hw); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "Unable to register %s, err:(%d)\n", + mux_list[i].mux->hw.init->name, PTR_ERR(clk)); + return PTR_ERR(clk); + } + } + + for (i = 0; i < ARRAY_SIZE(debugcc_khaje_hws); i++) { + clk = devm_clk_register(&pdev->dev, debugcc_khaje_hws[i]); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "Unable to register %s, err:(%d)\n", + debugcc_khaje_hws[i]->init->name, PTR_ERR(clk)); + return PTR_ERR(clk); + } + } + + ret = clk_debug_measure_register(&gcc_debug_mux.hw); + if (ret) { + dev_err(&pdev->dev, "Could not register Measure clocks\n"); + return ret; + } + + dev_info(&pdev->dev, "Registered debug measure clocks\n"); + + return ret; +} + +static struct platform_driver clk_debug_driver = { + .probe = clk_debug_khaje_probe, + .driver = { + .name = "khaje-debugcc", + .of_match_table = clk_debug_match_table, + }, +}; + +int __init clk_debug_khaje_init(void) +{ + return platform_driver_register(&clk_debug_driver); +} +fs_initcall(clk_debug_khaje_init); + +MODULE_DESCRIPTION("QTI DEBUG CC KHAJE Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/debugcc-sdm429w.c b/drivers/clk/qcom/debugcc-sdm429w.c index d14919a3a168..11b881733bee 100644 --- a/drivers/clk/qcom/debugcc-sdm429w.c +++ b/drivers/clk/qcom/debugcc-sdm429w.c @@ -25,6 +25,39 @@ static struct measure_clk_data debug_mux_priv = { .xo_div4_cbcr = 0x30034, }; +static const char *const apss_cc_debug_mux_parent_names[] = { + "pwrcl_clk", +}; + +static int apss_cc_debug_mux_sels[] = { + 0x0, /* pwrcl_clk */ +}; + +static int apss_cc_debug_mux_pre_divs[] = { + 0x1, /* pwrcl_clk */ +}; + +static struct clk_debug_mux apss_cc_debug_mux = { + .priv = &debug_mux_priv, + .debug_offset = 0x0, + .post_div_offset = 0x0, + .cbcr_offset = U32_MAX, + .src_sel_mask = 0x3FF00, + .src_sel_shift = 8, + .post_div_mask = 0xF0000000, + .post_div_shift = 28, + .post_div_val = 1, + .mux_sels = apss_cc_debug_mux_sels, + .pre_div_vals = apss_cc_debug_mux_pre_divs, + .hw.init = &(struct clk_init_data){ + .name = "apss_cc_debug_mux", + .ops = &clk_debug_mux_ops, + .parent_names = apss_cc_debug_mux_parent_names, + .num_parents = ARRAY_SIZE(apss_cc_debug_mux_parent_names), + .flags = CLK_IS_MEASURE, + }, +}; + static const char *const gcc_debug_mux_parent_names[] = { "gcc_ahb_clk", "gcc_apss_ahb_clk", @@ -154,8 +187,9 @@ static const char *const gcc_debug_mux_parent_names[] = { "gcc_gfx_tbu_clk", "gcc_gfx_tcu_clk", "gcc_gtcu_ahb_clk", - "gcc_bimc_clk", + "bimc_clk", "gcc_smmu_cfg_clk", + "apss_cc_debug_mux", }; static int gcc_debug_mux_sels[] = { @@ -287,8 +321,9 @@ static int gcc_debug_mux_sels[] = { 0x52, /* gcc_gfx_tbu_clk */ 0x53, /* gcc_gfx_tcu_clk */ 0x58, /* gcc_gtcu_ahb_clk */ - 0x15A, /* gcc_bimc_clk */ + 0x15A, /* bimc_clk */ 0x5B, /* gcc_smmu_cfg_clk */ + 0x16A, /* apss_cc_debug_mux */ }; static struct clk_debug_mux gcc_debug_mux = { @@ -312,13 +347,27 @@ static struct clk_debug_mux gcc_debug_mux = { }, }; +static struct clk_dummy pwrcl_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "pwrcl_clk", + .ops = &clk_dummy_ops, + }, +}; + +struct clk_hw *debugcc_qm215_hws[] = { + &pwrcl_clk.hw, +}; + static struct mux_regmap_names mux_list[] = { { .mux = &gcc_debug_mux, .regmap_name = "qcom,gcc" }, + { .mux = &apss_cc_debug_mux, .regmap_name = "qcom,cpu" }, }; static const struct of_device_id clk_debug_match_table[] = { { .compatible = "qcom,sdm429w-debugcc" }, { .compatible = "qcom,qm215-debugcc" }, + { .compatible = "qcom,sdm429-debugcc" }, { } }; @@ -356,6 +405,15 @@ static int clk_debug_sdm429w_probe(struct platform_device *pdev) } } + for (i = 0; i < ARRAY_SIZE(debugcc_qm215_hws); i++) { + clk = devm_clk_register(&pdev->dev, debugcc_qm215_hws[i]); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "Unable to register %s, err:(%d)\n", + debugcc_qm215_hws[i]->init->name, PTR_ERR(clk)); + return PTR_ERR(clk); + } + } + ret = clk_debug_measure_register(&gcc_debug_mux.hw); if (ret) { dev_err(&pdev->dev, "Could not register Measure clocks\n"); diff --git a/drivers/clk/qcom/debugcc-sdm439.c b/drivers/clk/qcom/debugcc-sdm439.c new file mode 100644 index 000000000000..08afa248d49a --- /dev/null +++ b/drivers/clk/qcom/debugcc-sdm439.c @@ -0,0 +1,471 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "clk: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "clk-debug.h" +#include "common.h" + +static struct measure_clk_data debug_mux_priv = { + .ctl_reg = 0x74004, + .status_reg = 0x74008, + .xo_div4_cbcr = 0x30034, +}; + +static const char *const apss_cc_debug_mux_parent_names[] = { + "pwrcl_clk", + "perfcl_clk", +}; + +static int apss_cc_debug_mux_sels[] = { + 0x0, /* pwrcl_clk */ + 0x1, /* perfcl_clk */ +}; + +static int apss_cc_debug_mux_pre_divs[] = { + 0x1, /* pwrcl_clk */ + 0x1, /* perfcl_clk */ +}; + +static struct clk_debug_mux apss_cc_debug_mux = { + .priv = &debug_mux_priv, + .debug_offset = 0x0, + .post_div_offset = 0x0, + .cbcr_offset = U32_MAX, + .src_sel_mask = 0x3FF00, + .src_sel_shift = 8, + .post_div_mask = 0xF0000000, + .post_div_shift = 28, + .post_div_val = 2, + .mux_sels = apss_cc_debug_mux_sels, + .pre_div_vals = apss_cc_debug_mux_pre_divs, + .hw.init = &(struct clk_init_data){ + .name = "apss_cc_debug_mux", + .ops = &clk_debug_mux_ops, + .parent_names = apss_cc_debug_mux_parent_names, + .num_parents = ARRAY_SIZE(apss_cc_debug_mux_parent_names), + .flags = CLK_IS_MEASURE, + }, +}; + +static const char *const gcc_debug_mux_parent_names[] = { + "gcc_ahb_clk", + "gcc_apss_ahb_clk", + "gcc_apss_axi_clk", + "gcc_bimc_gfx_clk", + "gcc_bimc_gpu_clk", + "gcc_blsp1_ahb_clk", + "gcc_blsp1_qup2_i2c_apps_clk", + "gcc_blsp1_qup2_spi_apps_clk", + "gcc_blsp1_qup3_i2c_apps_clk", + "gcc_blsp1_qup3_spi_apps_clk", + "gcc_blsp1_qup4_i2c_apps_clk", + "gcc_blsp1_qup4_spi_apps_clk", + "gcc_blsp1_sleep_clk", + "gcc_blsp1_uart1_apps_clk", + "gcc_blsp1_uart1_sim_clk", + "gcc_blsp1_uart2_apps_clk", + "gcc_blsp1_uart2_sim_clk", + "gcc_blsp2_ahb_clk", + "gcc_blsp2_qup1_i2c_apps_clk", + "gcc_blsp2_qup1_spi_apps_clk", + "gcc_blsp2_qup2_i2c_apps_clk", + "gcc_blsp2_qup2_spi_apps_clk", + "gcc_blsp2_qup3_i2c_apps_clk", + "gcc_blsp2_qup3_spi_apps_clk", + "gcc_blsp2_sleep_clk", + "gcc_blsp2_uart1_apps_clk", + "gcc_blsp2_uart1_sim_clk", + "gcc_blsp2_uart2_apps_clk", + "gcc_blsp2_uart2_sim_clk", + "gcc_boot_rom_ahb_clk", + "gcc_camss_ahb_clk", + "gcc_camss_cci_ahb_clk", + "gcc_camss_cci_clk", + "gcc_camss_cpp_ahb_clk", + "gcc_camss_cpp_axi_clk", + "gcc_camss_cpp_clk", + "gcc_camss_csi0_ahb_clk", + "gcc_camss_csi0_clk", + "gcc_camss_csi0phy_clk", + "gcc_camss_csi0phytimer_clk", + "gcc_camss_csi0pix_clk", + "gcc_camss_csi0rdi_clk", + "gcc_camss_csi1_ahb_clk", + "gcc_camss_csi1_clk", + "gcc_camss_csi1phy_clk", + "gcc_camss_csi1phytimer_clk", + "gcc_camss_csi1pix_clk", + "gcc_camss_csi1rdi_clk", + "gcc_camss_csi2_ahb_clk", + "gcc_camss_csi2_clk", + "gcc_camss_csi2phy_clk", + "gcc_camss_csi2pix_clk", + "gcc_camss_csi2rdi_clk", + "gcc_camss_csi_vfe0_clk", + "gcc_camss_csi_vfe1_clk", + "gcc_camss_gp0_clk", + "gcc_camss_gp1_clk", + "gcc_camss_ispif_ahb_clk", + "gcc_camss_jpeg0_clk", + "gcc_camss_jpeg_ahb_clk", + "gcc_camss_jpeg_axi_clk", + "gcc_camss_mclk0_clk", + "gcc_camss_mclk1_clk", + "gcc_camss_mclk2_clk", + "gcc_camss_micro_ahb_clk", + "gcc_camss_top_ahb_clk", + "gcc_camss_vfe0_clk", + "gcc_camss_vfe1_ahb_clk", + "gcc_camss_vfe1_axi_clk", + "gcc_camss_vfe1_clk", + "gcc_camss_vfe_ahb_clk", + "gcc_camss_vfe_axi_clk", + "gcc_crypto_ahb_clk", + "gcc_crypto_axi_clk", + "gcc_crypto_clk", + "gcc_gp1_clk", + "gcc_gp2_clk", + "gcc_gp3_clk", + "gcc_im_sleep_clk", + "gcc_lpass_mport_axi_clk", + "gcc_lpass_q6_axi_clk", + "gcc_lpass_sway_clk", + "gcc_mdss_ahb_clk", + "gcc_mdss_axi_clk", + "gcc_mdss_byte0_clk", + "gcc_mdss_esc0_clk", + "gcc_mdss_mdp_clk", + "gcc_mdss_pclk0_clk", + "gcc_mdss_vsync_clk", + "gcc_mpm_ahb_clk", + "gcc_msg_ram_ahb_clk", + "gcc_oxili_ahb_clk", + "gcc_oxili_aon_clk", + "gcc_oxili_gfx3d_clk", + "gcc_pcnoc_mpu_cfg_ahb_clk", + "gcc_pdm2_clk", + "gcc_pdm_ahb_clk", + "gcc_pdm_xo4_clk", + "gcc_prng_ahb_clk", + "gcc_q6_mpu_cfg_ahb_clk", + "gcc_rpm_cfg_xpu_clk", + "gcc_sdcc1_ahb_clk", + "gcc_sdcc1_apps_clk", + "gcc_sdcc1_ice_core_clk", + "gcc_sdcc2_ahb_clk", + "gcc_sdcc2_apps_clk", + "gcc_sec_ctrl_acc_clk", + "gcc_sec_ctrl_ahb_clk", + "gcc_sec_ctrl_boot_rom_patch_clk", + "gcc_sec_ctrl_clk", + "gcc_sec_ctrl_sense_clk", + "gcc_tcsr_ahb_clk", + "gcc_tlmm_ahb_clk", + "gcc_tlmm_clk", + "gcc_usb2a_phy_sleep_clk", + "gcc_usb_hs_ahb_clk", + "gcc_usb_hs_inactivity_timers_clk", + "gcc_usb_hs_phy_cfg_ahb_clk", + "gcc_usb_hs_system_clk", + "gcc_venus0_ahb_clk", + "gcc_venus0_axi_clk", + "gcc_venus0_core0_vcodec0_clk", + "gcc_venus0_vcodec0_clk", + "gcc_xo_clk", + "gcc_xo_div4_clk", + "gcc_gfx_tbu_clk", + "gcc_gfx_tcu_clk", + "gcc_gtcu_ahb_clk", + "bimc_clk", + "gcc_smmu_cfg_clk", + "apss_cc_debug_mux", + "gcc_mdss_pclk1_clk", + "gcc_mdss_byte1_clk", + "gcc_mdss_esc1_clk", + "gcc_oxili_timer_clk", + "gcc_blsp1_qup1_spi_apps_clk", + "gcc_blsp1_qup1_i2c_apps_clk", + "gcc_blsp2_qup4_spi_apps_clk", + "gcc_blsp2_qup4_i2c_apps_clk", +}; + +static int gcc_debug_mux_sels[] = { + 0x148, /* gcc_ahb_clk */ + 0x168, /* gcc_apss_ahb_clk */ + 0x169, /* gcc_apss_axi_clk */ + 0x2D, /* gcc_bimc_gfx_clk */ + 0x157, /* gcc_bimc_gpu_clk */ + 0x88, /* gcc_blsp1_ahb_clk */ + 0x90, /* gcc_blsp1_qup2_i2c_apps_clk */ + 0x8E, /* gcc_blsp1_qup2_spi_apps_clk */ + 0x94, /* gcc_blsp1_qup3_i2c_apps_clk */ + 0x93, /* gcc_blsp1_qup3_spi_apps_clk */ + 0x96, /* gcc_blsp1_qup4_i2c_apps_clk */ + 0x95, /* gcc_blsp1_qup4_spi_apps_clk */ + 0x89, /* gcc_blsp1_sleep_clk */ + 0x8C, /* gcc_blsp1_uart1_apps_clk */ + 0x8D, /* gcc_blsp1_uart1_sim_clk */ + 0x91, /* gcc_blsp1_uart2_apps_clk */ + 0x92, /* gcc_blsp1_uart2_sim_clk */ + 0x98, /* gcc_blsp2_ahb_clk */ + 0x9B, /* gcc_blsp2_qup1_i2c_apps_clk */ + 0x9A, /* gcc_blsp2_qup1_spi_apps_clk */ + 0xA0, /* gcc_blsp2_qup2_i2c_apps_clk */ + 0x9E, /* gcc_blsp2_qup2_spi_apps_clk */ + 0xA4, /* gcc_blsp2_qup3_i2c_apps_clk */ + 0xA3, /* gcc_blsp2_qup3_spi_apps_clk */ + 0x99, /* gcc_blsp2_sleep_clk */ + 0x9C, /* gcc_blsp2_uart1_apps_clk */ + 0x9D, /* gcc_blsp2_uart1_sim_clk */ + 0x9A, /* gcc_blsp2_uart2_apps_clk */ + 0xA2, /* gcc_blsp2_uart2_sim_clk */ + 0xF8, /* gcc_boot_rom_ahb_clk */ + 0xA8, /* gcc_camss_ahb_clk */ + 0xB0, /* gcc_camss_cci_ahb_clk */ + 0xAF, /* gcc_camss_cci_clk */ + 0xBA, /* gcc_camss_cpp_ahb_clk */ + 0x1A3, /* gcc_camss_cpp_axi_clk */ + 0xB9, /* gcc_camss_cpp_clk */ + 0xC1, /* gcc_camss_csi0_ahb_clk */ + 0xC0, /* gcc_camss_csi0_clk */ + 0xC2, /* gcc_camss_csi0phy_clk */ + 0xB1, /* gcc_camss_csi0phytimer_clk */ + 0xC4, /* gcc_camss_csi0pix_clk */ + 0xC3, /* gcc_camss_csi0rdi_clk */ + 0xC6, /* gcc_camss_csi1_ahb_clk */ + 0xC5, /* gcc_camss_csi1_clk */ + 0xC7, /* gcc_camss_csi1phy_clk */ + 0xB2, /* gcc_camss_csi1phytimer_clk */ + 0xE1, /* gcc_camss_csi1pix_clk */ + 0xE0, /* gcc_camss_csi1rdi_clk */ + 0xE4, /* gcc_camss_csi2_ahb_clk */ + 0xE3, /* gcc_camss_csi2_clk */ + 0xE5, /* gcc_camss_csi2phy_clk */ + 0xE7, /* gcc_camss_csi2pix_clk */ + 0xE6, /* gcc_camss_csi2rdi_clk */ + 0xBF, /* gcc_camss_csi_vfe0_clk */ + 0x1A0, /* gcc_camss_csi_vfe1_clk */ + 0xAB, /* gcc_camss_gp0_clk */ + 0xAC, /* gcc_camss_gp1_clk */ + 0xE2, /* gcc_camss_ispif_ahb_clk */ + 0xB3, /* gcc_camss_jpeg0_clk */ + 0xB4, /* gcc_camss_jpeg_ahb_clk */ + 0xB5, /* gcc_camss_jpeg_axi_clk */ + 0xAD, /* gcc_camss_mclk0_clk */ + 0xAE, /* gcc_camss_mclk1_clk */ + 0x1BD, /* gcc_camss_mclk2_clk */ + 0xAA, /* gcc_camss_micro_ahb_clk */ + 0xA9, /* gcc_camss_top_ahb_clk */ + 0xB8, /* gcc_camss_vfe0_clk */ + 0x1A2, /* gcc_camss_vfe1_ahb_clk */ + 0x1A4, /* gcc_camss_vfe1_axi_clk */ + 0x1A1, /* gcc_camss_vfe1_clk */ + 0xBB, /* gcc_camss_vfe_ahb_clk */ + 0xBC, /* gcc_camss_vfe_axi_clk */ + 0x13A, /* gcc_crypto_ahb_clk */ + 0x139, /* gcc_crypto_axi_clk */ + 0x138, /* gcc_crypto_clk */ + 0x10, /* gcc_gp1_clk */ + 0x11, /* gcc_gp2_clk */ + 0x12, /* gcc_gp3_clk */ + 0x14B, /* gcc_im_sleep_clk */ + 0x162, /* gcc_lpass_mport_axi_clk */ + 0x160, /* gcc_lpass_q6_axi_clk */ + 0x163, /* gcc_lpass_sway_clk */ + 0x1F6, /* gcc_mdss_ahb_clk */ + 0x1F7, /* gcc_mdss_axi_clk */ + 0x1FC, /* gcc_mdss_byte0_clk */ + 0x1FD, /* gcc_mdss_esc0_clk */ + 0x1F9, /* gcc_mdss_mdp_clk */ + 0x1F8, /* gcc_mdss_pclk0_clk */ + 0x1FB, /* gcc_mdss_vsync_clk */ + 0x110, /* gcc_mpm_ahb_clk */ + 0x100, /* gcc_msg_ram_ahb_clk */ + 0x1EB, /* gcc_oxili_ahb_clk */ + 0xEE, /* gcc_oxili_aon_clk */ + 0x1EA, /* gcc_oxili_gfx3d_clk */ + 0xC9, /* gcc_pcnoc_mpu_cfg_ahb_clk */ + 0xD2, /* gcc_pdm2_clk */ + 0xD0, /* gcc_pdm_ahb_clk */ + 0xD1, /* gcc_pdm_xo4_clk */ + 0xD8, /* gcc_prng_ahb_clk */ + 0xC8, /* gcc_q6_mpu_cfg_ahb_clk */ + 0x38, /* gcc_rpm_cfg_xpu_clk */ + 0x69, /* gcc_sdcc1_ahb_clk */ + 0x68, /* gcc_sdcc1_apps_clk */ + 0x6A, /* gcc_sdcc1_ice_core_clk */ + 0x71, /* gcc_sdcc2_ahb_clk */ + 0x70, /* gcc_sdcc2_apps_clk */ + 0x120, /* gcc_sec_ctrl_acc_clk */ + 0x121, /* gcc_sec_ctrl_ahb_clk */ + 0x124, /* gcc_sec_ctrl_boot_rom_patch_clk */ + 0x122, /* gcc_sec_ctrl_clk */ + 0x123, /* gcc_sec_ctrl_sense_clk */ + 0xE8, /* gcc_tcsr_ahb_clk */ + 0x108, /* gcc_tlmm_ahb_clk */ + 0x109, /* gcc_tlmm_clk */ + 0x63, /* gcc_usb2a_phy_sleep_clk */ + 0x61, /* gcc_usb_hs_ahb_clk */ + 0x62, /* gcc_usb_hs_inactivity_timers_clk */ + 0x64, /* gcc_usb_hs_phy_cfg_ahb_clk */ + 0x60, /* gcc_usb_hs_system_clk */ + 0x1F3, /* gcc_venus0_ahb_clk */ + 0x1F2, /* gcc_venus0_axi_clk */ + 0x1B8, /* gcc_venus0_core0_vcodec0_clk */ + 0x1F1, /* gcc_venus0_vcodec0_clk */ + 0x149, /* gcc_xo_clk */ + 0x14A, /* gcc_xo_div4_clk */ + 0x52, /* gcc_gfx_tbu_clk */ + 0x53, /* gcc_gfx_tcu_clk */ + 0x58, /* gcc_gtcu_ahb_clk */ + 0x15A, /* bimc_clk */ + 0x5B, /* gcc_smmu_cfg_clk */ + 0x16A, /* apss_cc_debug_mux */ + 0x1e3, /* gcc_mdss_pclk1_clk */ + 0x1e4, /* gcc_mdss_byte1_clk */ + 0x1e5, /* gcc_mdss_esc1_clk */ + 0x1e9, /* gcc_oxili_timer_clk */ + 0x8a, /* gcc_blsp1_qup1_spi_apps_clk */ + 0x8b, /* gcc_blsp1_qup1_i2c_apps_clk */ + 0xa5, /* gcc_blsp2_qup4_spi_apps_clk */ + 0xa6, /* gcc_blsp2_qup4_i2c_apps_clk */ +}; + +static struct clk_debug_mux gcc_debug_mux = { + .priv = &debug_mux_priv, + .en_mask = BIT(16), + .debug_offset = 0x74000, + .post_div_offset = 0x74000, + .cbcr_offset = 0x74000, + .src_sel_mask = 0x1FF, + .src_sel_shift = 0, + .post_div_mask = 0xF000, + .post_div_shift = 12, + .post_div_val = 1, + .mux_sels = gcc_debug_mux_sels, + .hw.init = &(struct clk_init_data){ + .name = "gcc_debug_mux", + .ops = &clk_debug_mux_ops, + .parent_names = gcc_debug_mux_parent_names, + .num_parents = ARRAY_SIZE(gcc_debug_mux_parent_names), + .flags = CLK_IS_MEASURE, + }, +}; + +static struct clk_dummy pwrcl_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "pwrcl_clk", + .ops = &clk_dummy_ops, + }, +}; + +static struct clk_dummy perfcl_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "perfcl_clk", + .ops = &clk_dummy_ops, + }, +}; + +struct clk_hw *debugcc_sdm439_hws[] = { + &pwrcl_clk.hw, + &perfcl_clk.hw, +}; + +static struct mux_regmap_names mux_list[] = { + { .mux = &gcc_debug_mux, .regmap_name = "qcom,gcc" }, + { .mux = &apss_cc_debug_mux, .regmap_name = "qcom,cpu" }, +}; + +static const struct of_device_id clk_debug_match_table[] = { + { .compatible = "qcom,sdm439-debugcc" }, + { } +}; + +static int clk_debug_sdm429w_probe(struct platform_device *pdev) +{ + struct clk *clk; + int ret, i; + + BUILD_BUG_ON(ARRAY_SIZE(gcc_debug_mux_parent_names) != + ARRAY_SIZE(gcc_debug_mux_sels)); + + clk = devm_clk_get(&pdev->dev, "xo_clk_src"); + if (IS_ERR(clk)) { + if (PTR_ERR(clk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "Unable to get xo clock\n"); + return PTR_ERR(clk); + } + + debug_mux_priv.cxo = clk; + + for (i = 0; i < ARRAY_SIZE(mux_list); i++) { + ret = map_debug_bases(pdev, mux_list[i].regmap_name, + mux_list[i].mux); + if (ret == -EBADR) + continue; + else if (ret) + return ret; + + clk = devm_clk_register(&pdev->dev, &mux_list[i].mux->hw); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "Unable to register %s, err:(%d)\n", + clk_hw_get_name(&mux_list[i].mux->hw), + PTR_ERR(clk)); + return PTR_ERR(clk); + } + } + + for (i = 0; i < ARRAY_SIZE(debugcc_sdm439_hws); i++) { + clk = devm_clk_register(&pdev->dev, + debugcc_sdm439_hws[i]); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "Unable to register %s, err:(%d)\n", + debugcc_sdm439_hws[i]->init->name, + PTR_ERR(clk)); + return PTR_ERR(clk); + } + } + + ret = clk_debug_measure_register(&gcc_debug_mux.hw); + if (ret) { + dev_err(&pdev->dev, "Could not register Measure clocks\n"); + return ret; + } + + dev_info(&pdev->dev, "Registered debug measure clocks\n"); + + return ret; +} + +static struct platform_driver clk_debug_driver = { + .probe = clk_debug_sdm429w_probe, + .driver = { + .name = "sdm439-debugcc", + .of_match_table = clk_debug_match_table, + }, +}; + +static int __init clk_debug_sdm429w_init(void) +{ + return platform_driver_register(&clk_debug_driver); +} +fs_initcall(clk_debug_sdm429w_init); + +MODULE_DESCRIPTION("QTI DEBUG CC SDM429W Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/dispcc-khaje.c b/drivers/clk/qcom/dispcc-khaje.c new file mode 100644 index 000000000000..95dcfc413f83 --- /dev/null +++ b/drivers/clk/qcom/dispcc-khaje.c @@ -0,0 +1,637 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap-divider.h" +#include "clk-regmap.h" +#include "common.h" +#include "vdd-level-bengal.h" + +static DEFINE_VDD_REGULATORS(vdd_cx, VDD_HIGH + 1, 1, vdd_corner); + + +enum { + P_BI_TCXO, + P_DISP_CC_PLL0_OUT_EVEN, + P_DISP_CC_PLL0_OUT_MAIN, + P_DSI0_PHY_PLL_OUT_BYTECLK, + P_DSI0_PHY_PLL_OUT_DSICLK, + P_GCC_DISP_GPLL0_DIV_CLK_SRC, +}; + +static const struct parent_map disp_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 }, +}; + +static const char * const disp_cc_parent_names_0[] = { + "bi_tcxo", + "dsi0_phy_pll_out_byteclk", +}; + +static const struct parent_map disp_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_DISP_CC_PLL0_OUT_MAIN, 1 }, + { P_GCC_DISP_GPLL0_DIV_CLK_SRC, 4 }, + { P_DISP_CC_PLL0_OUT_EVEN, 5 }, +}; + +static const char * const disp_cc_parent_names_1[] = { + "bi_tcxo", + "disp_cc_pll0", + "gcc_disp_gpll0_div_clk_src", + "disp_cc_pll0", +}; + +static const struct parent_map disp_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_GCC_DISP_GPLL0_DIV_CLK_SRC, 4 }, +}; + +static const char * const disp_cc_parent_names_2[] = { + "bi_tcxo", + "gcc_disp_gpll0_div_clk_src", +}; + +static const struct parent_map disp_cc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_DSI0_PHY_PLL_OUT_DSICLK, 1 }, +}; + +static const char * const disp_cc_parent_names_3[] = { + "bi_tcxo", + "dsi0_phy_pll_out_dsiclk", +}; + +static const struct parent_map disp_cc_parent_map_4[] = { + { P_BI_TCXO, 0 }, +}; + +static const char * const disp_cc_parent_names_4[] = { + "bi_tcxo", +}; + +static struct pll_vco lucid_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +/* 615MHz configuration */ +static const struct alpha_pll_config disp_cc_pll0_config = { + .l = 0x20, + .alpha = 0x800, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002261, + .config_ctl_hi1_val = 0x329A299C, + .user_ctl_val = 0x00000001, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x00000000, +}; + +static struct clk_alpha_pll disp_cc_pll0 = { + .offset = 0x0, + .vco_table = lucid_vco, + .num_vco = ARRAY_SIZE(lucid_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_pll0", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1500000000, + [VDD_NOMINAL] = 1750000000, + [VDD_HIGH] = 2000000000}, + }, + }, +}; + +static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = { + .reg = 0x10dc, + .shift = 0, + .width = 4, + .clkr.hw.init = &(struct clk_init_data) { + .name = "disp_cc_mdss_byte0_div_clk_src", + .parent_names = + (const char *[]){ "disp_cc_mdss_byte0_clk_src" }, + .num_parents = 1, + .ops = &clk_regmap_div_ops, + }, +}; + +static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(37500000, P_GCC_DISP_GPLL0_DIV_CLK_SRC, 8, 0, 0), + F(75000000, P_GCC_DISP_GPLL0_DIV_CLK_SRC, 4, 0, 0), + { } +}; + +static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = { + .cmd_rcgr = 0x115c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_2, + .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_ahb_clk_src", + .parent_names = disp_cc_parent_names_2, + .num_parents = ARRAY_SIZE(disp_cc_parent_names_2), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 37500000, + [VDD_NOMINAL] = 75000000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = { + .cmd_rcgr = 0x10c4, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_0, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_byte0_clk_src", + .parent_names = disp_cc_parent_names_0, + .num_parents = ARRAY_SIZE(disp_cc_parent_names_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_byte2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 187500000, + [VDD_LOW] = 300000000, + [VDD_LOW_L1] = 358000000}, + }, +}; + +static const struct freq_tbl ftbl_disp_cc_mdss_esc0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = { + .cmd_rcgr = 0x10e0, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_0, + .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_esc0_clk_src", + .parent_names = disp_cc_parent_names_0, + .num_parents = ARRAY_SIZE(disp_cc_parent_names_0), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(200000000, P_GCC_DISP_GPLL0_DIV_CLK_SRC, 1.5, 0, 0), + F(300000000, P_GCC_DISP_GPLL0_DIV_CLK_SRC, 1, 0, 0), + F(383000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0), + F(470000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0), + F(560000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = { + .cmd_rcgr = 0x107c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_1, + .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_mdp_clk_src", + .parent_names = disp_cc_parent_names_1, + .num_parents = ARRAY_SIZE(disp_cc_parent_names_1), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 200000000, + [VDD_LOW] = 300000000, + [VDD_LOW_L1] = 383000000, + [VDD_NOMINAL] = 470000000, + [VDD_HIGH] = 560000000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = { + .cmd_rcgr = 0x1064, + .mnd_width = 8, + .hid_width = 5, + .parent_map = disp_cc_parent_map_3, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_pclk0_clk_src", + .parent_names = disp_cc_parent_names_3, + .num_parents = ARRAY_SIZE(disp_cc_parent_names_3), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_pixel_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 300000000, + [VDD_LOW] = 525000000, + [VDD_LOW_L1] = 625000000}, + }, +}; + +static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(200000000, P_GCC_DISP_GPLL0_DIV_CLK_SRC, 1.5, 0, 0), + F(300000000, P_GCC_DISP_GPLL0_DIV_CLK_SRC, 1, 0, 0), + { } +}; + +static struct clk_rcg2 disp_cc_mdss_rot_clk_src = { + .cmd_rcgr = 0x1094, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_1, + .freq_tbl = ftbl_disp_cc_mdss_rot_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_rot_clk_src", + .parent_names = disp_cc_parent_names_1, + .num_parents = ARRAY_SIZE(disp_cc_parent_names_1), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 200000000, + [VDD_LOW] = 300000000}, + }, +}; + +static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = { + .cmd_rcgr = 0x10ac, + .mnd_width = 0, + .hid_width = 5, + .parent_map = disp_cc_parent_map_4, + .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_vsync_clk_src", + .parent_names = disp_cc_parent_names_4, + .num_parents = ARRAY_SIZE(disp_cc_parent_names_4), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static struct clk_branch disp_cc_mdss_ahb_clk = { + .halt_reg = 0x104c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x104c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_ahb_clk", + .parent_names = (const char *[]){ + "disp_cc_mdss_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_byte0_clk = { + .halt_reg = 0x102c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x102c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_byte0_clk", + .parent_names = (const char *[]){ + "disp_cc_mdss_byte0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_byte0_intf_clk = { + .halt_reg = 0x1030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_byte0_intf_clk", + .parent_names = (const char *[]){ + "disp_cc_mdss_byte0_div_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_esc0_clk = { + .halt_reg = 0x1034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1034, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_esc0_clk", + .parent_names = (const char *[]){ + "disp_cc_mdss_esc0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_mdp_clk = { + .halt_reg = 0x1010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_mdp_clk", + .parent_names = (const char *[]){ + "disp_cc_mdss_mdp_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_mdp_lut_clk = { + .halt_reg = 0x1020, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x1020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_mdp_lut_clk", + .parent_names = (const char *[]){ + "disp_cc_mdss_mdp_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = { + .halt_reg = 0x2004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x2004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_non_gdsc_ahb_clk", + .parent_names = (const char *[]){ + "disp_cc_mdss_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_pclk0_clk = { + .halt_reg = 0x1168, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1168, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_pclk0_clk", + .parent_names = (const char *[]){ + "disp_cc_mdss_pclk0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_rot_clk = { + .halt_reg = 0x1018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_rot_clk", + .parent_names = (const char *[]){ + "disp_cc_mdss_rot_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_rscc_ahb_clk = { + .halt_reg = 0x200c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x200c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_rscc_ahb_clk", + .parent_names = (const char *[]){ + "disp_cc_mdss_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_rscc_vsync_clk = { + .halt_reg = 0x2008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_rscc_vsync_clk", + .parent_names = (const char *[]){ + "disp_cc_mdss_vsync_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch disp_cc_mdss_vsync_clk = { + .halt_reg = 0x1028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "disp_cc_mdss_vsync_clk", + .parent_names = (const char *[]){ + "disp_cc_mdss_vsync_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap *disp_cc_khaje_clocks[] = { + [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr, + [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr, + [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr, + [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr, + [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr, + [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr, + [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr, + [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr, + [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr, + [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr, + [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr, + [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr, + [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr, + [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr, + [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr, + [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr, + [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr, + [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr, + [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr, + [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr, + [DISP_CC_PLL0] = &disp_cc_pll0.clkr, +}; + +static const struct regmap_config disp_cc_khaje_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x10000, + .fast_io = true, +}; + +static const struct qcom_cc_desc disp_cc_khaje_desc = { + .config = &disp_cc_khaje_regmap_config, + .clks = disp_cc_khaje_clocks, + .num_clks = ARRAY_SIZE(disp_cc_khaje_clocks), +}; + +static const struct of_device_id disp_cc_khaje_match_table[] = { + { .compatible = "qcom,khaje-dispcc" }, + { } +}; +MODULE_DEVICE_TABLE(of, disp_cc_khaje_match_table); + +static int disp_cc_khaje_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + struct clk *clk; + int ret; + + vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx"); + if (IS_ERR(vdd_cx.regulator[0])) { + if (PTR_ERR(vdd_cx.regulator[0]) != -EPROBE_DEFER) + dev_err(&pdev->dev, + "Unable to get vdd_cx regulator\n"); + return PTR_ERR(vdd_cx.regulator[0]); + } + + clk = clk_get(&pdev->dev, "cfg_ahb_clk"); + if (IS_ERR(clk)) { + if (PTR_ERR(clk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "Unable to get ahb clock handle\n"); + return PTR_ERR(clk); + } + clk_put(clk); + + regmap = qcom_cc_map(pdev, &disp_cc_khaje_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + /* + * Keep the clock always-ON + * DISP_CC_SLEEP_CLK, DISP_CC_XO_CLK + */ + regmap_update_bits(regmap, 0x5004, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x5008, BIT(0), BIT(0)); + + clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); + + ret = qcom_cc_really_probe(pdev, &disp_cc_khaje_desc, regmap); + if (ret) { + dev_err(&pdev->dev, "Failed to register DISP CC clocks\n"); + return ret; + } + + dev_info(&pdev->dev, "Registered DISP CC clocks\n"); + + return ret; +} + +static struct platform_driver disp_cc_khaje_driver = { + .probe = disp_cc_khaje_probe, + .driver = { + .name = "disp_cc-khaje", + .of_match_table = disp_cc_khaje_match_table, + }, +}; + +static int __init disp_cc_khaje_init(void) +{ + return platform_driver_register(&disp_cc_khaje_driver); +} +subsys_initcall(disp_cc_khaje_init); + +static void __exit disp_cc_khaje_exit(void) +{ + platform_driver_unregister(&disp_cc_khaje_driver); +} +module_exit(disp_cc_khaje_exit); + +MODULE_DESCRIPTION("QTI DISP_CC KHAJE Driver"); diff --git a/drivers/clk/qcom/gcc-khaje.c b/drivers/clk/qcom/gcc-khaje.c new file mode 100644 index 000000000000..cd0b8151d384 --- /dev/null +++ b/drivers/clk/qcom/gcc-khaje.c @@ -0,0 +1,3720 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "clk-regmap-divider.h" +#include "clk-regmap-mux.h" +#include "common.h" +#include "reset.h" +#include "vdd-level-bengal.h" + +static DEFINE_VDD_REGULATORS(vdd_cx, VDD_HIGH + 1, 1, vdd_corner); +static DEFINE_VDD_REGULATORS(vdd_mx, VDD_HIGH + 1, 1, vdd_corner); + +enum { + P_BI_TCXO, + P_GPLL0_OUT_EVEN, + P_GPLL0_OUT_MAIN, + P_GPLL10_OUT_MAIN, + P_GPLL11_OUT_EVEN, + P_GPLL11_OUT_MAIN, + P_GPLL11_OUT_ODD, + P_GPLL3_OUT_EVEN, + P_GPLL3_OUT_MAIN, + P_GPLL4_OUT_MAIN, + P_GPLL5_OUT_MAIN, + P_GPLL6_OUT_EVEN, + P_GPLL6_OUT_MAIN, + P_GPLL7_OUT_MAIN, + P_GPLL8_OUT_EVEN, + P_GPLL8_OUT_MAIN, + P_GPLL9_OUT_EARLY, + P_GPLL9_OUT_MAIN, + P_SLEEP_CLK, + P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, +}; + +static const struct parent_map gcc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL0_OUT_EVEN, 2 }, +}; + +static const char * const gcc_parent_names_0[] = { + "bi_tcxo", + "gpll0", + "gpll0_out_even", +}; + +static const struct parent_map gcc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL0_OUT_EVEN, 2 }, + { P_GPLL6_OUT_EVEN, 4 }, +}; + +static const char * const gcc_parent_names_1[] = { + "bi_tcxo", + "gpll0", + "gpll0_out_even", + "gpll6_out_even", +}; + +static const struct parent_map gcc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL0_OUT_EVEN, 2 }, + { P_SLEEP_CLK, 5 }, +}; + +static const char * const gcc_parent_names_2[] = { + "bi_tcxo", + "gpll0", + "gpll0_out_even", + "sleep_clk", +}; + +static const struct parent_map gcc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL9_OUT_EARLY, 2 }, + { P_GPLL10_OUT_MAIN, 3 }, + { P_GPLL9_OUT_MAIN, 5 }, + { P_GPLL3_OUT_EVEN, 6 }, +}; + +static const char * const gcc_parent_names_3[] = { + "bi_tcxo", + "gpll0", + "gpll9", + "gpll10", + "gpll9_out_main", + "gpll3_out_even", +}; + +static const struct parent_map gcc_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL0_OUT_EVEN, 2 }, + { P_GPLL4_OUT_MAIN, 5 }, + { P_GPLL3_OUT_EVEN, 6 }, +}; + +static const char * const gcc_parent_names_4[] = { + "bi_tcxo", + "gpll0", + "gpll0_out_even", + "gpll4", + "gpll3_out_even", +}; + +static const struct parent_map gcc_parent_map_5[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL8_OUT_MAIN, 2 }, + { P_GPLL10_OUT_MAIN, 3 }, + { P_GPLL8_OUT_EVEN, 4 }, + { P_GPLL9_OUT_MAIN, 5 }, + { P_GPLL3_OUT_EVEN, 6 }, +}; + +static const char * const gcc_parent_names_5[] = { + "bi_tcxo", + "gpll0", + "gpll8", + "gpll10", + "gpll8_out_even", + "gpll9_out_main", + "gpll3_out_even", +}; + +static const struct parent_map gcc_parent_map_6[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL8_OUT_MAIN, 2 }, + { P_GPLL5_OUT_MAIN, 3 }, + { P_GPLL6_OUT_EVEN, 4 }, + { P_GPLL9_OUT_MAIN, 5 }, + { P_GPLL3_OUT_MAIN, 6 }, +}; + +static const char * const gcc_parent_names_6[] = { + "bi_tcxo", + "gpll0", + "gpll8", + "gpll5", + "gpll6_out_even", + "gpll9_out_main", + "gpll3", +}; + +static const struct parent_map gcc_parent_map_7[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL0_OUT_EVEN, 2 }, + { P_GPLL10_OUT_MAIN, 3 }, + { P_GPLL4_OUT_MAIN, 5 }, + { P_GPLL3_OUT_MAIN, 6 }, +}; + +static const char * const gcc_parent_names_7[] = { + "bi_tcxo", + "gpll0", + "gpll0_out_even", + "gpll10", + "gpll4", + "gpll3", +}; + +static const struct parent_map gcc_parent_map_8[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL8_OUT_MAIN, 2 }, + { P_GPLL10_OUT_MAIN, 3 }, + { P_GPLL8_OUT_EVEN, 4 }, + { P_GPLL9_OUT_MAIN, 5 }, + { P_GPLL3_OUT_MAIN, 6 }, +}; + +static const char * const gcc_parent_names_8[] = { + "bi_tcxo", + "gpll0", + "gpll8", + "gpll10", + "gpll8_out_even", + "gpll9_out_main", + "gpll3", +}; + +static const struct parent_map gcc_parent_map_9[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL0_OUT_EVEN, 2 }, + { P_GPLL10_OUT_MAIN, 3 }, + { P_GPLL8_OUT_EVEN, 4 }, + { P_GPLL9_OUT_MAIN, 5 }, + { P_GPLL3_OUT_MAIN, 6 }, +}; + +static const char * const gcc_parent_names_9[] = { + "bi_tcxo", + "gpll0", + "gpll0_out_even", + "gpll10", + "gpll8_out_even", + "gpll9_out_main", + "gpll3", +}; + +static const struct parent_map gcc_parent_map_10[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL8_OUT_MAIN, 2 }, + { P_GPLL10_OUT_MAIN, 3 }, + { P_GPLL6_OUT_MAIN, 4 }, + { P_GPLL9_OUT_MAIN, 5 }, + { P_GPLL3_OUT_EVEN, 6 }, +}; + +static const char * const gcc_parent_names_10[] = { + "bi_tcxo", + "gpll0", + "gpll8", + "gpll10", + "gpll6", + "gpll9_out_main", + "gpll3_out_even", +}; + +static const struct parent_map gcc_parent_map_11[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL0_OUT_EVEN, 2 }, + { P_GPLL7_OUT_MAIN, 3 }, + { P_GPLL4_OUT_MAIN, 5 }, +}; + +static const char * const gcc_parent_names_11[] = { + "bi_tcxo", + "gpll0", + "gpll0_out_even", + "gpll7", + "gpll4", +}; + +static const struct parent_map gcc_parent_map_12[] = { + { P_BI_TCXO, 0 }, + { P_SLEEP_CLK, 5 }, +}; + +static const char * const gcc_parent_names_12[] = { + "bi_tcxo", + "sleep_clk", +}; + +static const struct parent_map gcc_parent_map_13[] = { + { P_BI_TCXO, 0 }, + { P_GPLL11_OUT_MAIN, 1 }, + { P_GPLL11_OUT_EVEN, 2 }, + { P_GPLL11_OUT_ODD, 3 }, +}; + +static const char * const gcc_parent_names_13[] = { + "bi_tcxo", + "gpll11", + "gpll11", + "gpll11", +}; + +static const struct parent_map gcc_parent_map_14[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL6_OUT_EVEN, 4 }, +}; + +static const char * const gcc_parent_names_14[] = { + "bi_tcxo", + "gpll0", + "gpll6_out_even", +}; + +static const struct parent_map gcc_parent_map_15[] = { + { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const char * const gcc_parent_names_15[] = { + "usb3_phy_wrapper_gcc_usb30_pipe_clk", + "bi_tcxo", +}; + +static struct pll_vco lucid_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +static struct pll_vco zonda_vco[] = { + { 595200000, 3600000000, 0 }, +}; + +static struct clk_alpha_pll gpll0 = { + .offset = 0x0, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .enable_reg = 0x79000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpll0", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1500000000, + [VDD_NOMINAL] = 1750000000, + [VDD_HIGH] = 2000000000}, + }, + }, +}; + +static const struct clk_div_table post_div_table_gpll0_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv gpll0_out_even = { + .offset = 0x0, + .post_div_shift = 8, + .post_div_table = post_div_table_gpll0_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll0_out_even", + .parent_names = (const char *[]){ "gpll0" }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_lucid_ops, + }, +}; + +static struct clk_alpha_pll gpll1 = { + .offset = 0x1000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .enable_reg = 0x79000, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gpll1", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1500000000, + [VDD_NOMINAL] = 1750000000, + [VDD_HIGH] = 2000000000}, + }, + }, +}; + +/* 1152MHz configuration */ +static const struct alpha_pll_config gpll10_config = { + .l = 0x3C, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002261, + .config_ctl_hi1_val = 0x329A299C, + .user_ctl_val = 0x00000001, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x00000000, +}; + +static struct clk_alpha_pll gpll10 = { + .offset = 0xa000, + .vco_table = lucid_vco, + .num_vco = ARRAY_SIZE(lucid_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .flags = SUPPORTS_FSM_LEGACY_MODE, + .clkr = { + .enable_reg = 0x79000, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gpll10", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1500000000, + [VDD_NOMINAL] = 1750000000, + [VDD_HIGH] = 2000000000}, + }, + }, +}; + +/* 600MHz configuration */ +static const struct alpha_pll_config gpll11_config = { + .l = 0x1F, + .alpha = 0x4000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002261, + .config_ctl_hi1_val = 0x329A299C, + .user_ctl_val = 0x00000001, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x00000000, +}; + +static struct clk_alpha_pll gpll11 = { + .offset = 0xb000, + .vco_table = lucid_vco, + .num_vco = ARRAY_SIZE(lucid_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_LEGACY_MODE, + .clkr = { + .enable_reg = 0x79000, + .enable_mask = BIT(11), + .hw.init = &(struct clk_init_data){ + .name = "gpll11", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1500000000, + [VDD_NOMINAL] = 1750000000, + [VDD_HIGH] = 2000000000}, + }, + }, +}; + +static struct clk_alpha_pll gpll3 = { + .offset = 0x3000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .enable_reg = 0x79000, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gpll3", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1500000000, + [VDD_NOMINAL] = 1750000000, + [VDD_HIGH] = 2000000000}, + }, + }, +}; + +static const struct clk_div_table post_div_table_gpll3_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv gpll3_out_even = { + .offset = 0x3000, + .post_div_shift = 8, + .post_div_table = post_div_table_gpll3_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_gpll3_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll3_out_even", + .parent_names = (const char *[]){ "gpll3" }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_lucid_ops, + }, +}; + +static struct clk_alpha_pll gpll4 = { + .offset = 0x4000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .enable_reg = 0x79000, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gpll4", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1500000000, + [VDD_NOMINAL] = 1750000000, + [VDD_HIGH] = 2000000000}, + }, + }, +}; + +static struct clk_alpha_pll gpll5 = { + .offset = 0x5000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .enable_reg = 0x79000, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "gpll5", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1500000000, + [VDD_NOMINAL] = 1750000000, + [VDD_HIGH] = 2000000000}, + }, + }, +}; + +static struct clk_alpha_pll gpll6 = { + .offset = 0x6000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .enable_reg = 0x79000, + .enable_mask = BIT(6), + .hw.init = &(struct clk_init_data){ + .name = "gpll6", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1500000000, + [VDD_NOMINAL] = 1750000000, + [VDD_HIGH] = 2000000000}, + }, + }, +}; + +static const struct clk_div_table post_div_table_gpll6_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv gpll6_out_even = { + .offset = 0x6000, + .post_div_shift = 8, + .post_div_table = post_div_table_gpll6_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll6_out_even", + .parent_names = (const char *[]){ "gpll6" }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_lucid_ops, + }, +}; + +static struct clk_alpha_pll gpll7 = { + .offset = 0x7000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .enable_reg = 0x79000, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "gpll7", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1500000000, + [VDD_NOMINAL] = 1750000000, + [VDD_HIGH] = 2000000000}, + }, + }, +}; + +/* 400MHz configuration */ +static const struct alpha_pll_config gpll8_config = { + .l = 0x14, + .alpha = 0xD555, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002261, + .config_ctl_hi1_val = 0x329A299C, + .user_ctl_val = 0x00000101, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x00000000, +}; + +static struct clk_alpha_pll gpll8 = { + .offset = 0x8000, + .vco_table = lucid_vco, + .num_vco = ARRAY_SIZE(lucid_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_LEGACY_MODE, + .clkr = { + .enable_reg = 0x79000, + .enable_mask = BIT(8), + .hw.init = &(struct clk_init_data){ + .name = "gpll8", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1500000000, + [VDD_NOMINAL] = 1750000000, + [VDD_HIGH] = 2000000000}, + }, + }, +}; + +static const struct clk_div_table post_div_table_gpll8_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv gpll8_out_even = { + .offset = 0x8000, + .post_div_shift = 8, + .post_div_table = post_div_table_gpll8_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_gpll8_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll8_out_even", + .parent_names = (const char *[]){ "gpll8" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ops, + }, +}; + +/* 1440MHz configuration */ +static const struct alpha_pll_config gpll9_config = { + .l = 0x4B, + .alpha = 0x0, + .config_ctl_val = 0x08200800, + .config_ctl_hi_val = 0x05022011, + .config_ctl_hi1_val = 0x08000000, + .user_ctl_val = 0x00000301, +}; + +static struct clk_alpha_pll gpll9 = { + .offset = 0x9000, + .vco_table = zonda_vco, + .num_vco = ARRAY_SIZE(zonda_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA], + .clkr = { + .enable_reg = 0x79000, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data){ + .name = "gpll9", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_zonda_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 1800000000, + [VDD_LOW] = 2400000000, + [VDD_NOMINAL] = 3000000000, + [VDD_HIGH] = 3600000000}, + }, + }, +}; + +static const struct clk_div_table post_div_table_gpll9_out_main[] = { + { 0x3, 4 }, + { } +}; + +static struct clk_alpha_pll_postdiv gpll9_out_main = { + .offset = 0x9000, + .post_div_shift = 8, + .post_div_table = post_div_table_gpll9_out_main, + .num_post_div = ARRAY_SIZE(post_div_table_gpll9_out_main), + .width = 2, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA], + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll9_out_main", + .parent_names = (const char *[]){ "gpll9" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_zonda_ops, + }, +}; + +static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = { + .reg = 0x1a04c, + .shift = 0, + .width = 4, + .clkr.hw.init = &(struct clk_init_data) { + .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src", + .parent_names = + (const char *[]){ "gcc_usb30_prim_mock_utmi_clk_src" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_axi_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0), + F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0), + F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_camss_axi_clk_src = { + .cmd_rcgr = 0x5802c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_7, + .freq_tbl = ftbl_gcc_camss_axi_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_axi_clk_src", + .parent_names = gcc_parent_names_7, + .num_parents = ARRAY_SIZE(gcc_parent_names_7), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 150000000, + [VDD_LOW_L1] = 240000000, + [VDD_NOMINAL] = 300000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_cci_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_camss_cci_clk_src = { + .cmd_rcgr = 0x56000, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_9, + .freq_tbl = ftbl_gcc_camss_cci_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_cci_clk_src", + .parent_names = gcc_parent_names_9, + .num_parents = ARRAY_SIZE(gcc_parent_names_9), + .flags = CLK_OPS_PARENT_ENABLE, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 37500000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_csi0phytimer_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0), + F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_camss_csi0phytimer_clk_src = { + .cmd_rcgr = 0x59000, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_4, + .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi0phytimer_clk_src", + .parent_names = gcc_parent_names_4, + .num_parents = ARRAY_SIZE(gcc_parent_names_4), + .flags = CLK_OPS_PARENT_ENABLE, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 300000000}, + }, +}; + +static struct clk_rcg2 gcc_camss_csi1phytimer_clk_src = { + .cmd_rcgr = 0x5901c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_4, + .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi1phytimer_clk_src", + .parent_names = gcc_parent_names_4, + .num_parents = ARRAY_SIZE(gcc_parent_names_4), + .flags = CLK_OPS_PARENT_ENABLE, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 300000000}, + }, +}; + +static struct clk_rcg2 gcc_camss_csi2phytimer_clk_src = { + .cmd_rcgr = 0x59038, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_4, + .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi2phytimer_clk_src", + .parent_names = gcc_parent_names_4, + .num_parents = ARRAY_SIZE(gcc_parent_names_4), + .flags = CLK_OPS_PARENT_ENABLE, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 300000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_mclk0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(24000000, P_GPLL9_OUT_MAIN, 1, 1, 15), + F(65454545, P_GPLL9_OUT_EARLY, 11, 1, 2), + { } +}; + +static struct clk_rcg2 gcc_camss_mclk0_clk_src = { + .cmd_rcgr = 0x51000, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_camss_mclk0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_mclk0_clk_src", + .parent_names = gcc_parent_names_3, + .num_parents = ARRAY_SIZE(gcc_parent_names_3), + .flags = CLK_OPS_PARENT_ENABLE, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 65454545}, + }, +}; + +static struct clk_rcg2 gcc_camss_mclk1_clk_src = { + .cmd_rcgr = 0x5101c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_camss_mclk0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_mclk1_clk_src", + .parent_names = gcc_parent_names_3, + .num_parents = ARRAY_SIZE(gcc_parent_names_3), + .flags = CLK_OPS_PARENT_ENABLE, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 65454545}, + }, +}; + +static struct clk_rcg2 gcc_camss_mclk2_clk_src = { + .cmd_rcgr = 0x51038, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_camss_mclk0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_mclk2_clk_src", + .parent_names = gcc_parent_names_3, + .num_parents = ARRAY_SIZE(gcc_parent_names_3), + .flags = CLK_OPS_PARENT_ENABLE, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 65454545}, + }, +}; + +static struct clk_rcg2 gcc_camss_mclk3_clk_src = { + .cmd_rcgr = 0x51054, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_camss_mclk0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_mclk3_clk_src", + .parent_names = gcc_parent_names_3, + .num_parents = ARRAY_SIZE(gcc_parent_names_3), + .flags = CLK_OPS_PARENT_ENABLE, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 65454545}, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_ope_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(171428571, P_GPLL0_OUT_MAIN, 3.5, 0, 0), + F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_camss_ope_ahb_clk_src = { + .cmd_rcgr = 0x55024, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_8, + .freq_tbl = ftbl_gcc_camss_ope_ahb_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_ope_ahb_clk_src", + .parent_names = gcc_parent_names_8, + .num_parents = ARRAY_SIZE(gcc_parent_names_8), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 171428571, + [VDD_NOMINAL] = 240000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_ope_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(200000000, P_GPLL8_OUT_EVEN, 1, 0, 0), + F(266600000, P_GPLL8_OUT_EVEN, 1, 0, 0), + F(480000000, P_GPLL8_OUT_EVEN, 1, 0, 0), + F(580000000, P_GPLL8_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_camss_ope_clk_src = { + .cmd_rcgr = 0x55004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_8, + .freq_tbl = ftbl_gcc_camss_ope_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_ope_clk_src", + .parent_names = gcc_parent_names_8, + .num_parents = ARRAY_SIZE(gcc_parent_names_8), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 200000000, + [VDD_LOW_L1] = 266600000, + [VDD_NOMINAL] = 480000000, + [VDD_HIGH] = 580000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_tfe_0_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(128000000, P_GPLL10_OUT_MAIN, 9, 0, 0), + F(135529412, P_GPLL10_OUT_MAIN, 8.5, 0, 0), + F(144000000, P_GPLL10_OUT_MAIN, 8, 0, 0), + F(153600000, P_GPLL10_OUT_MAIN, 7.5, 0, 0), + F(164571429, P_GPLL10_OUT_MAIN, 7, 0, 0), + F(177230769, P_GPLL10_OUT_MAIN, 6.5, 0, 0), + F(192000000, P_GPLL10_OUT_MAIN, 6, 0, 0), + F(209454545, P_GPLL10_OUT_MAIN, 5.5, 0, 0), + F(230400000, P_GPLL10_OUT_MAIN, 5, 0, 0), + F(256000000, P_GPLL10_OUT_MAIN, 4.5, 0, 0), + F(288000000, P_GPLL10_OUT_MAIN, 4, 0, 0), + F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0), + F(329142857, P_GPLL10_OUT_MAIN, 3.5, 0, 0), + F(384000000, P_GPLL10_OUT_MAIN, 3, 0, 0), + F(460800000, P_GPLL10_OUT_MAIN, 2.5, 0, 0), + F(576000000, P_GPLL10_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_camss_tfe_0_clk_src = { + .cmd_rcgr = 0x52004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_5, + .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_tfe_0_clk_src", + .parent_names = gcc_parent_names_5, + .num_parents = ARRAY_SIZE(gcc_parent_names_5), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 300000000, + [VDD_LOW_L1] = 460800000, + [VDD_NOMINAL] = 576000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_tfe_0_csid_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0), + F(266571429, P_GPLL5_OUT_MAIN, 3.5, 0, 0), + F(426400000, P_GPLL3_OUT_MAIN, 2.5, 0, 0), + F(466500000, P_GPLL5_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_camss_tfe_0_csid_clk_src = { + .cmd_rcgr = 0x52094, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_6, + .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_tfe_0_csid_clk_src", + .parent_names = gcc_parent_names_6, + .num_parents = ARRAY_SIZE(gcc_parent_names_6), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 266571429, + [VDD_LOW_L1] = 426400000, + [VDD_NOMINAL] = 466500000}, + }, +}; + +static struct clk_rcg2 gcc_camss_tfe_1_clk_src = { + .cmd_rcgr = 0x52024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_5, + .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_tfe_1_clk_src", + .parent_names = gcc_parent_names_5, + .num_parents = ARRAY_SIZE(gcc_parent_names_5), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 300000000, + [VDD_LOW_L1] = 460800000, + [VDD_NOMINAL] = 576000000}, + }, +}; + +static struct clk_rcg2 gcc_camss_tfe_1_csid_clk_src = { + .cmd_rcgr = 0x520b4, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_6, + .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_tfe_1_csid_clk_src", + .parent_names = gcc_parent_names_6, + .num_parents = ARRAY_SIZE(gcc_parent_names_6), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 266571429, + [VDD_LOW_L1] = 426400000, + [VDD_NOMINAL] = 466500000}, + }, +}; + +static struct clk_rcg2 gcc_camss_tfe_2_clk_src = { + .cmd_rcgr = 0x52044, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_5, + .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_tfe_2_clk_src", + .parent_names = gcc_parent_names_5, + .num_parents = ARRAY_SIZE(gcc_parent_names_5), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 300000000, + [VDD_LOW_L1] = 460800000, + [VDD_NOMINAL] = 576000000}, + }, +}; + +static struct clk_rcg2 gcc_camss_tfe_2_csid_clk_src = { + .cmd_rcgr = 0x520d4, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_6, + .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_tfe_2_csid_clk_src", + .parent_names = gcc_parent_names_6, + .num_parents = ARRAY_SIZE(gcc_parent_names_6), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 266571429, + [VDD_LOW_L1] = 426400000, + [VDD_NOMINAL] = 466500000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_tfe_cphy_rx_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(256000000, P_GPLL6_OUT_MAIN, 3, 0, 0), + F(384000000, P_GPLL6_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_camss_tfe_cphy_rx_clk_src = { + .cmd_rcgr = 0x52064, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_10, + .freq_tbl = ftbl_gcc_camss_tfe_cphy_rx_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_tfe_cphy_rx_clk_src", + .parent_names = gcc_parent_names_10, + .num_parents = ARRAY_SIZE(gcc_parent_names_10), + .flags = CLK_OPS_PARENT_ENABLE, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 256000000, + [VDD_LOW_L1] = 384000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_camss_top_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(40000000, P_GPLL0_OUT_EVEN, 7.5, 0, 0), + F(80000000, P_GPLL0_OUT_MAIN, 7.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_camss_top_ahb_clk_src = { + .cmd_rcgr = 0x58010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_7, + .freq_tbl = ftbl_gcc_camss_top_ahb_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_camss_top_ahb_clk_src", + .parent_names = gcc_parent_names_7, + .num_parents = ARRAY_SIZE(gcc_parent_names_7), + .flags = CLK_OPS_PARENT_ENABLE, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 80000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = { + F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0), + F(200000000, P_GPLL0_OUT_EVEN, 1.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_gp1_clk_src = { + .cmd_rcgr = 0x4d004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp1_clk_src", + .parent_names = gcc_parent_names_2, + .num_parents = ARRAY_SIZE(gcc_parent_names_2), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 50000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 200000000}, + }, +}; + +static struct clk_rcg2 gcc_gp2_clk_src = { + .cmd_rcgr = 0x4e004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp2_clk_src", + .parent_names = gcc_parent_names_2, + .num_parents = ARRAY_SIZE(gcc_parent_names_2), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 50000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 200000000}, + }, +}; + +static struct clk_rcg2 gcc_gp3_clk_src = { + .cmd_rcgr = 0x4f004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp3_clk_src", + .parent_names = gcc_parent_names_2, + .num_parents = ARRAY_SIZE(gcc_parent_names_2), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 50000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 200000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(60000000, P_GPLL0_OUT_EVEN, 5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pdm2_clk_src = { + .cmd_rcgr = 0x20010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pdm2_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pdm2_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = ARRAY_SIZE(gcc_parent_names_0), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 60000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = { + F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625), + F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625), + F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75), + F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25), + F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75), + F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0), + F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15), + F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25), + F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0), + F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375), + F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75), + F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625), + F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0), + F(128000000, P_GPLL6_OUT_EVEN, 3, 0, 0), + { } +}; + +static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = { + .name = "gcc_qupv3_wrap0_s0_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = ARRAY_SIZE(gcc_parent_names_1), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = { + .cmd_rcgr = 0x1f148, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = { + .name = "gcc_qupv3_wrap0_s1_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = ARRAY_SIZE(gcc_parent_names_1), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = { + .cmd_rcgr = 0x1f278, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = { + .name = "gcc_qupv3_wrap0_s2_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = ARRAY_SIZE(gcc_parent_names_1), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = { + .cmd_rcgr = 0x1f3a8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = { + .name = "gcc_qupv3_wrap0_s3_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = ARRAY_SIZE(gcc_parent_names_1), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = { + .cmd_rcgr = 0x1f4d8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = { + .name = "gcc_qupv3_wrap0_s4_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = ARRAY_SIZE(gcc_parent_names_1), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = { + .cmd_rcgr = 0x1f608, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = { + .name = "gcc_qupv3_wrap0_s5_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = ARRAY_SIZE(gcc_parent_names_1), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = { + .cmd_rcgr = 0x1f738, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init, +}; + +static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = { + F(144000, P_BI_TCXO, 16, 3, 25), + F(400000, P_BI_TCXO, 12, 1, 4), + F(20000000, P_GPLL0_OUT_EVEN, 5, 1, 3), + F(25000000, P_GPLL0_OUT_EVEN, 6, 1, 2), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0), + F(192000000, P_GPLL6_OUT_EVEN, 2, 0, 0), + F(384000000, P_GPLL6_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc1_apps_clk_src = { + .cmd_rcgr = 0x38028, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_apps_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = ARRAY_SIZE(gcc_parent_names_1), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .flags = CLK_OPS_PARENT_ENABLE, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 100000000, + [VDD_LOW_L1] = 384000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = { + F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0), + F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0), + F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = { + .cmd_rcgr = 0x38010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_ice_core_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = ARRAY_SIZE(gcc_parent_names_0), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 100000000, + [VDD_LOW] = 150000000, + [VDD_LOW_L1] = 300000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = { + F(400000, P_BI_TCXO, 12, 1, 4), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0), + F(202000000, P_GPLL7_OUT_MAIN, 4, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc2_apps_clk_src = { + .cmd_rcgr = 0x1e00c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_11, + .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_apps_clk_src", + .parent_names = gcc_parent_names_11, + .num_parents = ARRAY_SIZE(gcc_parent_names_11), + .ops = &clk_rcg2_ops, + .flags = CLK_OPS_PARENT_ENABLE, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 100000000, + [VDD_LOW_L1] = 202000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = { + F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = { + .cmd_rcgr = 0x45020, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_axi_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = ARRAY_SIZE(gcc_parent_names_0), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 50000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 200000000, + [VDD_HIGH] = 240000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = { + F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0), + F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0), + F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0), + F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = { + .cmd_rcgr = 0x45048, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_ice_core_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = ARRAY_SIZE(gcc_parent_names_0), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 150000000, + [VDD_NOMINAL] = 300000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = { + F(9600000, P_BI_TCXO, 2, 0, 0), + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = { + .cmd_rcgr = 0x4507c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_phy_aux_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = ARRAY_SIZE(gcc_parent_names_0), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = { + F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0), + F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0), + F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = { + .cmd_rcgr = 0x45060, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_unipro_core_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = ARRAY_SIZE(gcc_parent_names_0), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 37500000, + [VDD_LOW] = 75000000, + [VDD_NOMINAL] = 150000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = { + F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0), + F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb30_prim_master_clk_src = { + .cmd_rcgr = 0x1a01c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_master_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = ARRAY_SIZE(gcc_parent_names_0), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 66666667, + [VDD_LOW] = 133333333, + [VDD_NOMINAL] = 200000000, + [VDD_HIGH] = 240000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = { + .cmd_rcgr = 0x1a034, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_mock_utmi_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = ARRAY_SIZE(gcc_parent_names_0), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = { + .cmd_rcgr = 0x1a060, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_12, + .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_aux_clk_src", + .parent_names = gcc_parent_names_12, + .num_parents = ARRAY_SIZE(gcc_parent_names_12), + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_video_venus_clk_src[] = { + F(133333333, P_GPLL11_OUT_MAIN, 4.5, 0, 0), + F(240000000, P_GPLL11_OUT_MAIN, 2.5, 0, 0), + F(300000000, P_GPLL11_OUT_MAIN, 2, 0, 0), + F(384000000, P_GPLL11_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_video_venus_clk_src = { + .cmd_rcgr = 0x58060, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_13, + .freq_tbl = ftbl_gcc_video_venus_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_video_venus_clk_src", + .parent_names = gcc_parent_names_13, + .num_parents = ARRAY_SIZE(gcc_parent_names_13), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 133333333, + [VDD_LOW] = 240000000, + [VDD_LOW_L1] = 300000000, + [VDD_NOMINAL] = 384000000}, + }, +}; + +static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = { + .reg = 0x1a05c, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_15, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_pipe_clk_src", + .parent_names = gcc_parent_names_15, + .num_parents = ARRAY_SIZE(gcc_parent_names_15), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_branch gcc_ahb2phy_csi_clk = { + .halt_reg = 0x1d004, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x1d004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x1d004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ahb2phy_csi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ahb2phy_usb_clk = { + .halt_reg = 0x1d008, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x1d008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x1d008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ahb2phy_usb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_bimc_gpu_axi_clk = { + .halt_reg = 0x71154, + .halt_check = BRANCH_HALT_DELAY, + .hwcg_reg = 0x71154, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x71154, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_bimc_gpu_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_boot_rom_ahb_clk = { + .halt_reg = 0x23004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x23004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x79004, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gcc_boot_rom_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cam_throttle_nrt_clk = { + .halt_reg = 0x17070, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x17070, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x79004, + .enable_mask = BIT(27), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cam_throttle_nrt_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cam_throttle_rt_clk = { + .halt_reg = 0x1706c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x1706c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x79004, + .enable_mask = BIT(26), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cam_throttle_rt_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_ahb_clk = { + .halt_reg = 0x17008, + .halt_check = BRANCH_HALT_DELAY, + .hwcg_reg = 0x17008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x17008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camera_ahb_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_axi_clk = { + .halt_reg = 0x58044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x58044, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_axi_clk", + .parent_names = (const char *[]){ + "gcc_camss_axi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_cci_0_clk = { + .halt_reg = 0x56018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x56018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_cci_0_clk", + .parent_names = (const char *[]){ + "gcc_camss_cci_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_cphy_0_clk = { + .halt_reg = 0x52088, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x52088, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_cphy_0_clk", + .parent_names = (const char *[]){ + "gcc_camss_tfe_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_cphy_1_clk = { + .halt_reg = 0x5208c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5208c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_cphy_1_clk", + .parent_names = (const char *[]){ + "gcc_camss_tfe_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_cphy_2_clk = { + .halt_reg = 0x52090, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x52090, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_cphy_2_clk", + .parent_names = (const char *[]){ + "gcc_camss_tfe_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_csi0phytimer_clk = { + .halt_reg = 0x59018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x59018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi0phytimer_clk", + .parent_names = (const char *[]){ + "gcc_camss_csi0phytimer_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_csi1phytimer_clk = { + .halt_reg = 0x59034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x59034, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi1phytimer_clk", + .parent_names = (const char *[]){ + "gcc_camss_csi1phytimer_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_csi2phytimer_clk = { + .halt_reg = 0x59050, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x59050, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_csi2phytimer_clk", + .parent_names = (const char *[]){ + "gcc_camss_csi2phytimer_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_mclk0_clk = { + .halt_reg = 0x51018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x51018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_mclk0_clk", + .parent_names = (const char *[]){ + "gcc_camss_mclk0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_mclk1_clk = { + .halt_reg = 0x51034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x51034, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_mclk1_clk", + .parent_names = (const char *[]){ + "gcc_camss_mclk1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_mclk2_clk = { + .halt_reg = 0x51050, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x51050, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_mclk2_clk", + .parent_names = (const char *[]){ + "gcc_camss_mclk2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_mclk3_clk = { + .halt_reg = 0x5106c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5106c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_mclk3_clk", + .parent_names = (const char *[]){ + "gcc_camss_mclk3_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_nrt_axi_clk = { + .halt_reg = 0x58054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x58054, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_nrt_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_ope_ahb_clk = { + .halt_reg = 0x5503c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5503c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_ope_ahb_clk", + .parent_names = (const char *[]){ + "gcc_camss_ope_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_ope_clk = { + .halt_reg = 0x5501c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5501c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_ope_clk", + .parent_names = (const char *[]){ + "gcc_camss_ope_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_rt_axi_clk = { + .halt_reg = 0x5805c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5805c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_rt_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_tfe_0_clk = { + .halt_reg = 0x5201c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5201c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_tfe_0_clk", + .parent_names = (const char *[]){ + "gcc_camss_tfe_0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_tfe_0_cphy_rx_clk = { + .halt_reg = 0x5207c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5207c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_tfe_0_cphy_rx_clk", + .parent_names = (const char *[]){ + "gcc_camss_tfe_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_tfe_0_csid_clk = { + .halt_reg = 0x520ac, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x520ac, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_tfe_0_csid_clk", + .parent_names = (const char *[]){ + "gcc_camss_tfe_0_csid_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_tfe_1_clk = { + .halt_reg = 0x5203c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5203c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_tfe_1_clk", + .parent_names = (const char *[]){ + "gcc_camss_tfe_1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_tfe_1_cphy_rx_clk = { + .halt_reg = 0x52080, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x52080, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_tfe_1_cphy_rx_clk", + .parent_names = (const char *[]){ + "gcc_camss_tfe_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_tfe_1_csid_clk = { + .halt_reg = 0x520cc, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x520cc, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_tfe_1_csid_clk", + .parent_names = (const char *[]){ + "gcc_camss_tfe_1_csid_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_tfe_2_clk = { + .halt_reg = 0x5205c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5205c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_tfe_2_clk", + .parent_names = (const char *[]){ + "gcc_camss_tfe_2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_tfe_2_cphy_rx_clk = { + .halt_reg = 0x52084, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x52084, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_tfe_2_cphy_rx_clk", + .parent_names = (const char *[]){ + "gcc_camss_tfe_cphy_rx_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_tfe_2_csid_clk = { + .halt_reg = 0x520ec, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x520ec, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_tfe_2_csid_clk", + .parent_names = (const char *[]){ + "gcc_camss_tfe_2_csid_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camss_top_ahb_clk = { + .halt_reg = 0x58028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x58028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camss_top_ahb_clk", + .parent_names = (const char *[]){ + "gcc_camss_top_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = { + .halt_reg = 0x1a084, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x1a084, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x1a084, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cfg_noc_usb3_prim_axi_clk", + .parent_names = (const char *[]){ + "gcc_usb30_prim_master_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_ahb_clk = { + .halt_reg = 0x1700c, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x1700c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x1700c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_disp_ahb_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap_div gcc_disp_gpll0_clk_src = { + .reg = 0x17058, + .shift = 0, + .width = 4, + .clkr.hw.init = &(struct clk_init_data) { + .name = "gcc_disp_gpll0_clk_src", + .parent_names = + (const char *[]){ "gpll0" }, + .num_parents = 1, + .ops = &clk_regmap_div_ops, + }, +}; + +static struct clk_branch gcc_disp_gpll0_div_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x79004, + .enable_mask = BIT(20), + .hw.init = &(struct clk_init_data){ + .name = "gcc_disp_gpll0_div_clk_src", + .parent_names = (const char *[]){ + "gcc_disp_gpll0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_hf_axi_clk = { + .halt_reg = 0x17020, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x17020, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x17020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_disp_hf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_sleep_clk = { + .halt_reg = 0x17074, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x17074, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x17074, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_disp_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_throttle_core_clk = { + .halt_reg = 0x17064, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x17064, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7900c, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "gcc_disp_throttle_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp1_clk = { + .halt_reg = 0x4d000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4d000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp1_clk", + .parent_names = (const char *[]){ + "gcc_gp1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp2_clk = { + .halt_reg = 0x4e000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4e000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp2_clk", + .parent_names = (const char *[]){ + "gcc_gp2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp3_clk = { + .halt_reg = 0x4f000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4f000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp3_clk", + .parent_names = (const char *[]){ + "gcc_gp3_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x79004, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_gpll0_clk_src", + .parent_names = (const char *[]){ + "gpll0", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_div_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x79004, + .enable_mask = BIT(16), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_gpll0_div_clk_src", + .parent_names = (const char *[]){ + "gpll0_out_even", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_iref_clk = { + .halt_reg = 0x36100, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x36100, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_iref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_memnoc_gfx_clk = { + .halt_reg = 0x3600c, + .halt_check = BRANCH_VOTED, + .hwcg_reg = 0x3600c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x3600c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_memnoc_gfx_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = { + .halt_reg = 0x36018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x36018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_snoc_dvm_gfx_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_throttle_core_clk = { + .halt_reg = 0x36048, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x36048, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x79004, + .enable_mask = BIT(31), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_throttle_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm2_clk = { + .halt_reg = 0x2000c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2000c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm2_clk", + .parent_names = (const char *[]){ + "gcc_pdm2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_ahb_clk = { + .halt_reg = 0x20004, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x20004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x20004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_xo4_clk = { + .halt_reg = 0x20008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x20008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm_xo4_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_prng_ahb_clk = { + .halt_reg = 0x21004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x21004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x79004, + .enable_mask = BIT(13), + .hw.init = &(struct clk_init_data){ + .name = "gcc_prng_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = { + .halt_reg = 0x17014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x17014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7900c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_camera_nrt_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_camera_rt_ahb_clk = { + .halt_reg = 0x17060, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x17060, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7900c, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_camera_rt_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_disp_ahb_clk = { + .halt_reg = 0x17018, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x17018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7900c, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_disp_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_gpu_cfg_ahb_clk = { + .halt_reg = 0x36040, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x36040, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7900c, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_gpu_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = { + .halt_reg = 0x17010, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x17010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x79004, + .enable_mask = BIT(25), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_video_vcodec_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = { + .halt_reg = 0x1f014, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x7900c, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_core_2x_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_core_clk = { + .halt_reg = 0x1f00c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x7900c, + .enable_mask = BIT(8), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s0_clk = { + .halt_reg = 0x1f144, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x7900c, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s0_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap0_s0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s1_clk = { + .halt_reg = 0x1f274, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x7900c, + .enable_mask = BIT(11), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s1_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap0_s1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s2_clk = { + .halt_reg = 0x1f3a4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x7900c, + .enable_mask = BIT(12), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s2_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap0_s2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s3_clk = { + .halt_reg = 0x1f4d4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x7900c, + .enable_mask = BIT(13), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s3_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap0_s3_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s4_clk = { + .halt_reg = 0x1f604, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x7900c, + .enable_mask = BIT(14), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s4_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap0_s4_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s5_clk = { + .halt_reg = 0x1f734, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x7900c, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s5_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap0_s5_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = { + .halt_reg = 0x1f004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x1f004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7900c, + .enable_mask = BIT(6), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_0_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = { + .halt_reg = 0x1f008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x1f008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7900c, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_0_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ahb_clk = { + .halt_reg = 0x38008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x38008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_apps_clk = { + .halt_reg = 0x38004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x38004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_apps_clk", + .parent_names = (const char *[]){ + "gcc_sdcc1_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT | CLK_ENABLE_HAND_OFF, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ice_core_clk = { + .halt_reg = 0x3800c, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x3800c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x3800c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_ice_core_clk", + .parent_names = (const char *[]){ + "gcc_sdcc1_ice_core_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc2_ahb_clk = { + .halt_reg = 0x1e008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1e008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc2_apps_clk = { + .halt_reg = 0x1e004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1e004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_apps_clk", + .parent_names = (const char *[]){ + "gcc_sdcc2_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = { + .halt_reg = 0x2b06c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x2b06c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x79004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sys_noc_cpuss_ahb_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sys_noc_ufs_phy_axi_clk = { + .halt_reg = 0x45098, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x45098, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sys_noc_ufs_phy_axi_clk", + .parent_names = (const char *[]){ + "gcc_ufs_phy_axi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sys_noc_usb3_prim_axi_clk = { + .halt_reg = 0x1a080, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x1a080, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x1a080, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sys_noc_usb3_prim_axi_clk", + .parent_names = (const char *[]){ + "gcc_usb30_prim_master_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_clkref_clk = { + .halt_reg = 0x8c000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_ahb_clk = { + .halt_reg = 0x45014, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x45014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x45014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_axi_clk = { + .halt_reg = 0x45010, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x45010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x45010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_axi_clk", + .parent_names = (const char *[]){ + "gcc_ufs_phy_axi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_ice_core_clk = { + .halt_reg = 0x45044, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x45044, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x45044, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_ice_core_clk", + .parent_names = (const char *[]){ + "gcc_ufs_phy_ice_core_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_phy_aux_clk = { + .halt_reg = 0x45078, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x45078, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x45078, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_phy_aux_clk", + .parent_names = (const char *[]){ + "gcc_ufs_phy_phy_aux_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = { + .halt_reg = 0x4501c, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x4501c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_rx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = { + .halt_reg = 0x4509c, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x4509c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_rx_symbol_1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = { + .halt_reg = 0x45018, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x45018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_tx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_unipro_core_clk = { + .halt_reg = 0x45040, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x45040, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x45040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_unipro_core_clk", + .parent_names = (const char *[]){ + "gcc_ufs_phy_unipro_core_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_master_clk = { + .halt_reg = 0x1a010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1a010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_master_clk", + .parent_names = (const char *[]){ + "gcc_usb30_prim_master_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_mock_utmi_clk = { + .halt_reg = 0x1a018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1a018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_mock_utmi_clk", + .parent_names = (const char *[]){ + "gcc_usb30_prim_mock_utmi_postdiv_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_sleep_clk = { + .halt_reg = 0x1a014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1a014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_clkref_clk = { + .halt_reg = 0x9f000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9f000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = { + .halt_reg = 0x1a054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1a054, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_com_aux_clk", + .parent_names = (const char *[]){ + "gcc_usb3_prim_phy_aux_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_pipe_clk = { + .halt_reg = 0x1a058, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x1a058, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x1a058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_pipe_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_vcodec0_axi_clk = { + .halt_reg = 0x6e008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6e008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_vcodec0_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_venus_ahb_clk = { + .halt_reg = 0x6e010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6e010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_venus_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_venus_ctl_axi_clk = { + .halt_reg = 0x6e004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6e004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_venus_ctl_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_ahb_clk = { + .halt_reg = 0x17004, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x17004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x17004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_axi0_clk = { + .halt_reg = 0x1701c, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x1701c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x1701c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_axi0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_throttle_core_clk = { + .halt_reg = 0x17068, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x17068, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x79004, + .enable_mask = BIT(28), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_throttle_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_vcodec0_sys_clk = { + .halt_reg = 0x580a4, + .halt_check = BRANCH_HALT_DELAY, + .hwcg_reg = 0x580a4, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x580a4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_vcodec0_sys_clk", + .parent_names = (const char *[]){ + "gcc_video_venus_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_venus_ctl_clk = { + .halt_reg = 0x5808c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x5808c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_venus_ctl_clk", + .parent_names = (const char *[]){ + "gcc_video_venus_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_xo_clk = { + .halt_reg = 0x17024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x17024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_xo_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap *gcc_khaje_clocks[] = { + [GCC_AHB2PHY_CSI_CLK] = &gcc_ahb2phy_csi_clk.clkr, + [GCC_AHB2PHY_USB_CLK] = &gcc_ahb2phy_usb_clk.clkr, + [GCC_BIMC_GPU_AXI_CLK] = &gcc_bimc_gpu_axi_clk.clkr, + [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, + [GCC_CAM_THROTTLE_NRT_CLK] = &gcc_cam_throttle_nrt_clk.clkr, + [GCC_CAM_THROTTLE_RT_CLK] = &gcc_cam_throttle_rt_clk.clkr, + [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr, + [GCC_CAMSS_AXI_CLK] = &gcc_camss_axi_clk.clkr, + [GCC_CAMSS_AXI_CLK_SRC] = &gcc_camss_axi_clk_src.clkr, + [GCC_CAMSS_CCI_0_CLK] = &gcc_camss_cci_0_clk.clkr, + [GCC_CAMSS_CCI_CLK_SRC] = &gcc_camss_cci_clk_src.clkr, + [GCC_CAMSS_CPHY_0_CLK] = &gcc_camss_cphy_0_clk.clkr, + [GCC_CAMSS_CPHY_1_CLK] = &gcc_camss_cphy_1_clk.clkr, + [GCC_CAMSS_CPHY_2_CLK] = &gcc_camss_cphy_2_clk.clkr, + [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr, + [GCC_CAMSS_CSI0PHYTIMER_CLK_SRC] = &gcc_camss_csi0phytimer_clk_src.clkr, + [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr, + [GCC_CAMSS_CSI1PHYTIMER_CLK_SRC] = &gcc_camss_csi1phytimer_clk_src.clkr, + [GCC_CAMSS_CSI2PHYTIMER_CLK] = &gcc_camss_csi2phytimer_clk.clkr, + [GCC_CAMSS_CSI2PHYTIMER_CLK_SRC] = &gcc_camss_csi2phytimer_clk_src.clkr, + [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr, + [GCC_CAMSS_MCLK0_CLK_SRC] = &gcc_camss_mclk0_clk_src.clkr, + [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr, + [GCC_CAMSS_MCLK1_CLK_SRC] = &gcc_camss_mclk1_clk_src.clkr, + [GCC_CAMSS_MCLK2_CLK] = &gcc_camss_mclk2_clk.clkr, + [GCC_CAMSS_MCLK2_CLK_SRC] = &gcc_camss_mclk2_clk_src.clkr, + [GCC_CAMSS_MCLK3_CLK] = &gcc_camss_mclk3_clk.clkr, + [GCC_CAMSS_MCLK3_CLK_SRC] = &gcc_camss_mclk3_clk_src.clkr, + [GCC_CAMSS_NRT_AXI_CLK] = &gcc_camss_nrt_axi_clk.clkr, + [GCC_CAMSS_OPE_AHB_CLK] = &gcc_camss_ope_ahb_clk.clkr, + [GCC_CAMSS_OPE_AHB_CLK_SRC] = &gcc_camss_ope_ahb_clk_src.clkr, + [GCC_CAMSS_OPE_CLK] = &gcc_camss_ope_clk.clkr, + [GCC_CAMSS_OPE_CLK_SRC] = &gcc_camss_ope_clk_src.clkr, + [GCC_CAMSS_RT_AXI_CLK] = &gcc_camss_rt_axi_clk.clkr, + [GCC_CAMSS_TFE_0_CLK] = &gcc_camss_tfe_0_clk.clkr, + [GCC_CAMSS_TFE_0_CLK_SRC] = &gcc_camss_tfe_0_clk_src.clkr, + [GCC_CAMSS_TFE_0_CPHY_RX_CLK] = &gcc_camss_tfe_0_cphy_rx_clk.clkr, + [GCC_CAMSS_TFE_0_CSID_CLK] = &gcc_camss_tfe_0_csid_clk.clkr, + [GCC_CAMSS_TFE_0_CSID_CLK_SRC] = &gcc_camss_tfe_0_csid_clk_src.clkr, + [GCC_CAMSS_TFE_1_CLK] = &gcc_camss_tfe_1_clk.clkr, + [GCC_CAMSS_TFE_1_CLK_SRC] = &gcc_camss_tfe_1_clk_src.clkr, + [GCC_CAMSS_TFE_1_CPHY_RX_CLK] = &gcc_camss_tfe_1_cphy_rx_clk.clkr, + [GCC_CAMSS_TFE_1_CSID_CLK] = &gcc_camss_tfe_1_csid_clk.clkr, + [GCC_CAMSS_TFE_1_CSID_CLK_SRC] = &gcc_camss_tfe_1_csid_clk_src.clkr, + [GCC_CAMSS_TFE_2_CLK] = &gcc_camss_tfe_2_clk.clkr, + [GCC_CAMSS_TFE_2_CLK_SRC] = &gcc_camss_tfe_2_clk_src.clkr, + [GCC_CAMSS_TFE_2_CPHY_RX_CLK] = &gcc_camss_tfe_2_cphy_rx_clk.clkr, + [GCC_CAMSS_TFE_2_CSID_CLK] = &gcc_camss_tfe_2_csid_clk.clkr, + [GCC_CAMSS_TFE_2_CSID_CLK_SRC] = &gcc_camss_tfe_2_csid_clk_src.clkr, + [GCC_CAMSS_TFE_CPHY_RX_CLK_SRC] = &gcc_camss_tfe_cphy_rx_clk_src.clkr, + [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr, + [GCC_CAMSS_TOP_AHB_CLK_SRC] = &gcc_camss_top_ahb_clk_src.clkr, + [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr, + [GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr, + [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr, + [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr, + [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr, + [GCC_DISP_SLEEP_CLK] = &gcc_disp_sleep_clk.clkr, + [GCC_DISP_THROTTLE_CORE_CLK] = &gcc_disp_throttle_core_clk.clkr, + [GCC_GP1_CLK] = &gcc_gp1_clk.clkr, + [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr, + [GCC_GP2_CLK] = &gcc_gp2_clk.clkr, + [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr, + [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, + [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr, + [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr, + [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr, + [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr, + [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr, + [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr, + [GCC_GPU_THROTTLE_CORE_CLK] = &gcc_gpu_throttle_core_clk.clkr, + [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr, + [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr, + [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr, + [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr, + [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr, + [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr, + [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr, + [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr, + [GCC_QMIP_GPU_CFG_AHB_CLK] = &gcc_qmip_gpu_cfg_ahb_clk.clkr, + [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr, + [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr, + [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr, + [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr, + [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr, + [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr, + [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr, + [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr, + [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr, + [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr, + [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr, + [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr, + [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr, + [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr, + [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr, + [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr, + [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr, + [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr, + [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr, + [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr, + [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr, + [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr, + [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr, + [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr, + [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr, + [GCC_SYS_NOC_UFS_PHY_AXI_CLK] = &gcc_sys_noc_ufs_phy_axi_clk.clkr, + [GCC_SYS_NOC_USB3_PRIM_AXI_CLK] = &gcc_sys_noc_usb3_prim_axi_clk.clkr, + [GCC_UFS_CLKREF_CLK] = &gcc_ufs_clkref_clk.clkr, + [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr, + [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr, + [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr, + [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr, + [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr, + [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr, + [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr, + [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr, + [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr, + [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr, + [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr, + [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = + &gcc_ufs_phy_unipro_core_clk_src.clkr, + [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr, + [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = + &gcc_usb30_prim_mock_utmi_clk_src.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = + &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr, + [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr, + [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr, + [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr, + [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr, + [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr, + [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr, + [GCC_VCODEC0_AXI_CLK] = &gcc_vcodec0_axi_clk.clkr, + [GCC_VENUS_AHB_CLK] = &gcc_venus_ahb_clk.clkr, + [GCC_VENUS_CTL_AXI_CLK] = &gcc_venus_ctl_axi_clk.clkr, + [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr, + [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr, + [GCC_VIDEO_THROTTLE_CORE_CLK] = &gcc_video_throttle_core_clk.clkr, + [GCC_VIDEO_VCODEC0_SYS_CLK] = &gcc_video_vcodec0_sys_clk.clkr, + [GCC_VIDEO_VENUS_CLK_SRC] = &gcc_video_venus_clk_src.clkr, + [GCC_VIDEO_VENUS_CTL_CLK] = &gcc_video_venus_ctl_clk.clkr, + [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr, + [GPLL0] = &gpll0.clkr, + [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr, + [GPLL1] = &gpll1.clkr, + [GPLL10] = &gpll10.clkr, + [GPLL11] = &gpll11.clkr, + [GPLL3] = &gpll3.clkr, + [GPLL3_OUT_EVEN] = &gpll3_out_even.clkr, + [GPLL4] = &gpll4.clkr, + [GPLL5] = &gpll5.clkr, + [GPLL6] = &gpll6.clkr, + [GPLL6_OUT_EVEN] = &gpll6_out_even.clkr, + [GPLL7] = &gpll7.clkr, + [GPLL8] = &gpll8.clkr, + [GPLL8_OUT_EVEN] = &gpll8_out_even.clkr, + [GPLL9] = &gpll9.clkr, + [GPLL9_OUT_MAIN] = &gpll9_out_main.clkr, +}; + +static const struct qcom_reset_map gcc_khaje_resets[] = { + [GCC_QUSB2PHY_PRIM_BCR] = { 0x1c000 }, + [GCC_QUSB2PHY_SEC_BCR] = { 0x1c004 }, + [GCC_SDCC1_BCR] = { 0x38000 }, + [GCC_SDCC2_BCR] = { 0x1e000 }, + [GCC_UFS_PHY_BCR] = { 0x45000 }, + [GCC_USB30_PRIM_BCR] = { 0x1a000 }, + [GCC_USB3PHY_PHY_PRIM_SP0_BCR] = { 0x1b008 }, + [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x1b020 }, + [GCC_USB3_PHY_PRIM_SP0_BCR] = { 0x1b000 }, + [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x1d000 }, + [GCC_VCODEC0_BCR] = { 0x58094 }, + [GCC_VENUS_BCR] = { 0x58078 }, + [GCC_VIDEO_INTERFACE_BCR] = { 0x6e000 }, +}; + + +static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = { + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src), +}; + +static const struct regmap_config gcc_khaje_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0xc7000, + .fast_io = true, +}; + +static const struct qcom_cc_desc gcc_khaje_desc = { + .config = &gcc_khaje_regmap_config, + .clks = gcc_khaje_clocks, + .num_clks = ARRAY_SIZE(gcc_khaje_clocks), + .resets = gcc_khaje_resets, + .num_resets = ARRAY_SIZE(gcc_khaje_resets), +}; + +static const struct of_device_id gcc_khaje_match_table[] = { + { .compatible = "qcom,khaje-gcc" }, + { } +}; +MODULE_DEVICE_TABLE(of, gcc_khaje_match_table); + +static int gcc_khaje_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + regmap = qcom_cc_map(pdev, &gcc_khaje_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx"); + if (IS_ERR(vdd_cx.regulator[0])) { + if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER)) + dev_err(&pdev->dev, + "Unable to get vdd_cx regulator\n"); + return PTR_ERR(vdd_cx.regulator[0]); + } + + vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx"); + if (IS_ERR(vdd_mx.regulator[0])) { + if (!(PTR_ERR(vdd_mx.regulator[0]) == -EPROBE_DEFER)) + dev_err(&pdev->dev, + "Unable to get vdd_mx regulator\n"); + return PTR_ERR(vdd_mx.regulator[0]); + } + + /* + * Keep the clocks always-ON + * GCC_CAMERA_XO_CLK, GCC_CPUSS_GNOC_CLK, + * GCC_DISP_XO_CLK, GCC_GPU_CFG_AHB_CLK + */ + regmap_update_bits(regmap, 0x17028, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x2b004, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x1702c, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x36004, BIT(0), BIT(0)); + + ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, + ARRAY_SIZE(gcc_dfs_clocks)); + if (ret) + return ret; + + clk_lucid_pll_configure(&gpll10, regmap, &gpll10_config); + clk_lucid_pll_configure(&gpll11, regmap, &gpll11_config); + clk_lucid_pll_configure(&gpll8, regmap, &gpll8_config); + clk_zonda_pll_configure(&gpll9, regmap, &gpll9_config); + + + ret = qcom_cc_really_probe(pdev, &gcc_khaje_desc, regmap); + if (ret) { + dev_err(&pdev->dev, "Failed to register GCC clocks\n"); + return ret; + } + + dev_info(&pdev->dev, "Registered GCC clocks\n"); + + return ret; +} + +static struct platform_driver gcc_khaje_driver = { + .probe = gcc_khaje_probe, + .driver = { + .name = "gcc-khaje", + .of_match_table = gcc_khaje_match_table, + }, +}; + +static int __init gcc_khaje_init(void) +{ + return platform_driver_register(&gcc_khaje_driver); +} +subsys_initcall(gcc_khaje_init); + +static void __exit gcc_khaje_exit(void) +{ + platform_driver_unregister(&gcc_khaje_driver); +} +module_exit(gcc_khaje_exit); + +MODULE_DESCRIPTION("QTI GCC KHAJE Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/gcc-sdm429w.c b/drivers/clk/qcom/gcc-sdm429w.c index 96b99c080a59..49d3f4c8a52f 100644 --- a/drivers/clk/qcom/gcc-sdm429w.c +++ b/drivers/clk/qcom/gcc-sdm429w.c @@ -42,6 +42,7 @@ enum { P_GPLL6_OUT_AUX, P_GPLL6_OUT_MAIN, P_SLEEP_CLK, + P_GPLL3_OUT_MAIN_DIV, }; static const struct parent_map gcc_parent_map_0[] = { @@ -240,7 +241,7 @@ static const char * const gcc_parent_names_12[] = { static const struct parent_map gcc_parent_map_14[] = { { P_BI_TCXO, 0 }, { P_GPLL0_OUT_MAIN, 1 }, - { P_GPLL3_OUT_MAIN, 2 }, + { P_GPLL3_OUT_MAIN_DIV, 2 }, { P_GPLL6_OUT_AUX, 3 }, { P_GPLL4_OUT_AUX, 4 }, { P_CORE_BI_PLL_TEST_SE, 7 }, @@ -249,7 +250,7 @@ static const struct parent_map gcc_parent_map_14[] = { static const struct parent_map gcc_parent_map_14_gfx3d[] = { { P_BI_TCXO, 0 }, { P_GPLL0_OUT_MAIN, 5 }, - { P_GPLL3_OUT_MAIN, 2 }, + { P_GPLL3_OUT_MAIN_DIV, 2 }, { P_GPLL6_OUT_AUX, 6 }, { P_GPLL4_OUT_AUX, 4 }, { P_CORE_BI_PLL_TEST_SE, 7 }, @@ -258,7 +259,7 @@ static const struct parent_map gcc_parent_map_14_gfx3d[] = { static const char * const gcc_parent_names_14[] = { "bi_tcxo", "gpll0_out_main", - "gpll3_out_main", + "gpll3_out_main_div", "gpll6_out_aux", "gpll4_out_aux", "core_bi_pll_test_se", @@ -466,6 +467,18 @@ static struct clk_alpha_pll gpll3_out_main = { }, }; +static struct clk_fixed_factor gpll3_out_main_div = { + .mult = 1, + .div = 2, + .hw.init = &(struct clk_init_data){ + .name = "gpll3_out_main_div", + .parent_names = (const char *[]){ "gpll3_out_main" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_fixed_factor_ops, + }, +}; + static struct clk_alpha_pll gpll4_out_main = { .offset = 0x24000, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], @@ -501,17 +514,6 @@ static struct clk_pll gpll6 = { }, }; -static struct clk_regmap gpll6_out_main = { - .enable_reg = 0x45000, - .enable_mask = BIT(7), - .hw.init = &(struct clk_init_data){ - .name = "gpll6_out_main", - .parent_names = (const char *[]){ "gpll6" }, - .num_parents = 1, - .ops = &clk_pll_vote_ops, - }, -}; - static struct clk_regmap gpll6_out_aux = { .enable_reg = 0x45000, .enable_mask = BIT(7), @@ -523,6 +525,17 @@ static struct clk_regmap gpll6_out_aux = { }, }; +static struct clk_fixed_factor gpll6_out_main = { + .mult = 1, + .div = 1, + .hw.init = &(struct clk_init_data){ + .name = "gpll6_out_main", + .parent_names = (const char *[]){ "gpll6_out_aux" }, + .num_parents = 1, + .ops = &clk_fixed_factor_ops, + }, +}; + static const struct freq_tbl ftbl_apss_ahb_clk_src[] = { F(19200000, P_BI_TCXO, 1, 0, 0), F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0), @@ -1258,18 +1271,18 @@ static struct clk_rcg2 sdcc2_apps_clk_src = { }; static const struct freq_tbl ftbl_usb_hs_system_clk_src[] = { - F(57140000, P_GPLL0_OUT_MAIN, 14, 0, 0), + F(57142857, P_GPLL0_OUT_MAIN, 14, 0, 0), F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0), F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0), - F(177780000, P_GPLL0_OUT_MAIN, 4.5, 0, 0), + F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0), { } }; static const struct freq_tbl ftbl_usb_hs_system_clk_src_qm215[] = { F( 80000000, P_GPLL0_OUT_MAIN, 10, 0, 0), F( 100000000, P_GPLL0_OUT_MAIN, 8, 0, 0), - F( 133330000, P_GPLL0_OUT_MAIN, 6, 0, 0), - F( 177780000, P_GPLL0_OUT_MAIN, 4.5, 0, 0), + F( 133333333, P_GPLL0_OUT_MAIN, 6, 0, 0), + F( 177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0), { } }; @@ -1536,7 +1549,7 @@ static struct clk_rcg2 csi0_clk_src = { .rate_max = (unsigned long[VDD_NUM]) { [VDD_LOW] = 100000000, [VDD_LOW_L1] = 200000000, - [VDD_NOMINAL] = 266666667}, + [VDD_NOMINAL] = 266670000}, }, }; @@ -1592,7 +1605,7 @@ static struct clk_rcg2 csi1_clk_src = { .rate_max = (unsigned long[VDD_NUM]) { [VDD_LOW] = 100000000, [VDD_LOW_L1] = 200000000, - [VDD_NOMINAL] = 266666667}, + [VDD_NOMINAL] = 266670000}, }, }; @@ -1633,7 +1646,7 @@ static struct clk_rcg2 csi2_clk_src = { .rate_max = (unsigned long[VDD_NUM]) { [VDD_LOW] = 100000000, [VDD_LOW_L1] = 200000000, - [VDD_NOMINAL] = 266666667}, + [VDD_NOMINAL] = 266670000}, }, }; @@ -1706,13 +1719,13 @@ static const struct freq_tbl ftbl_gfx3d_clk_src[] = { F_SLEW(240000000, P_GPLL6_OUT_AUX, 4.5, 0, 0, FIXED_FREQ_SRC), F_SLEW(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0, FIXED_FREQ_SRC), F_SLEW(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0, FIXED_FREQ_SRC), - F_SLEW(355200000, P_GPLL3_OUT_MAIN, 1, 0, 0, 710400000), - F_SLEW(375000000, P_GPLL3_OUT_MAIN, 1, 0, 0, 750000000), + F_SLEW(355200000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 710400000), + F_SLEW(375000000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 750000000), F_SLEW(400000000, P_GPLL0_OUT_MAIN, 2, 0, 0, FIXED_FREQ_SRC), - F_SLEW(450000000, P_GPLL3_OUT_MAIN, 1, 0, 0, 900000000), - F_SLEW(510000000, P_GPLL3_OUT_MAIN, 1, 0, 0, 1020000000), - F_SLEW(560000000, P_GPLL3_OUT_MAIN, 1, 0, 0, 1120000000), - F_SLEW(650000000, P_GPLL3_OUT_MAIN, 1, 0, 0, 1300000000), + F_SLEW(450000000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 900000000), + F_SLEW(510000000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 1020000000), + F_SLEW(560000000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 1120000000), + F_SLEW(650000000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 1300000000), { } }; @@ -1729,6 +1742,12 @@ static struct freq_tbl ftbl_oxili_gfx3d_clk_src_qm215[] = { F_SLEW( 270000000, P_GPLL6_OUT_AUX, 4, 0, 0, FIXED_FREQ_SRC), F_SLEW( 320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0, FIXED_FREQ_SRC), F_SLEW( 400000000, P_GPLL0_OUT_MAIN, 2, 0, 0, FIXED_FREQ_SRC), + F_SLEW( 465000000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 930000000), + F_SLEW( 484800000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 969600000), + F_SLEW( 500000000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 1000000000), + F_SLEW( 523200000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 1046400000), + F_SLEW( 550000000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 1100000000), + F_SLEW( 598000000, P_GPLL3_OUT_MAIN_DIV, 1, 0, 0, 1196000000), { } }; @@ -1743,6 +1762,7 @@ static struct clk_rcg2 gfx3d_clk_src = { .name = "gfx3d_clk_src", .parent_names = gcc_parent_names_14, .num_parents = 6, + .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; @@ -3920,6 +3940,7 @@ static struct clk_dummy wcnss_m_clk = { struct clk_hw *gcc_sdm429w_hws[] = { [GPLL0_OUT_AUX] = &gpll0_out_aux.hw, + [GPLL6_OUT_MAIN] = &gpll6_out_main.hw, }; static struct clk_regmap *gcc_sdm429w_clocks[] = { @@ -4000,7 +4021,6 @@ static struct clk_regmap *gcc_sdm429w_clocks[] = { [GPLL0_SLEEP_CLK_SRC] = &gpll0_sleep_clk_src.clkr, [GPLL3_OUT_MAIN] = &gpll3_out_main.clkr, [GPLL4_OUT_MAIN] = &gpll4_out_main.clkr, - [GPLL6_OUT_MAIN] = &gpll6_out_main, [GPLL6] = &gpll6.clkr, [GPLL6_OUT_AUX] = &gpll6_out_aux, [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr, @@ -4246,8 +4266,8 @@ static void fixup_for_qm215(struct platform_device *pdev, sdcc1_apps_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 400000000; usb_hs_system_clk_src.clkr.hw.init->rate_max[VDD_LOW] = 800000000; - usb_hs_system_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 133333000; - usb_hs_system_clk_src.clkr.hw.init->rate_max[VDD_HIGH] = 177780000; + usb_hs_system_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 133333333; + usb_hs_system_clk_src.clkr.hw.init->rate_max[VDD_HIGH] = 177777778; usb_hs_system_clk_src.freq_tbl = ftbl_usb_hs_system_clk_src_qm215; /* @@ -4267,9 +4287,19 @@ static void fixup_for_qm215(struct platform_device *pdev, gcc_sdm429w_desc.clks[GCC_MDSS_ESC1_CLK] = NULL; } +static void fixup_for_sdm439_429(void) +{ + /* + * Below clocks are not available on SDM429/439, thus mark them NULL. + */ + gcc_sdm429w_desc.clks[GCC_GFX_TCU_CLK] = NULL; + gcc_sdm429w_desc.clks[GCC_GFX_TBU_CLK] = NULL; + gcc_sdm429w_desc.clks[GCC_GTCU_AHB_CLK] = NULL; +} static const struct of_device_id gcc_sdm429w_match_table[] = { { .compatible = "qcom,gcc-sdm429w" }, { .compatible = "qcom,gcc-qm215" }, + { .compatible = "qcom,gcc-sdm439" }, { } }; MODULE_DEVICE_TABLE(of, gcc_sdm429w_match_table); @@ -4279,11 +4309,14 @@ static int gcc_sdm429w_probe(struct platform_device *pdev) struct regmap *regmap; struct clk *clk; int ret, speed_bin; - bool qm215; + bool qm215, is_sdm439; qm215 = of_device_is_compatible(pdev->dev.of_node, "qcom,gcc-qm215"); + is_sdm439 = of_device_is_compatible(pdev->dev.of_node, + "qcom,gcc-sdm439"); + clk = clk_get(&pdev->dev, "bi_tcxo"); if (IS_ERR(clk)) { if (PTR_ERR(clk) != -EPROBE_DEFER) @@ -4313,6 +4346,9 @@ static int gcc_sdm429w_probe(struct platform_device *pdev) 0xff0, 0xff0); } + if (is_sdm439) + fixup_for_sdm439_429(); + clk_alpha_pll_configure(&gpll3_out_main, regmap, &gpll3_config); clk = devm_clk_register(&pdev->dev, &wcnss_m_clk.hw); @@ -4321,6 +4357,12 @@ static int gcc_sdm429w_probe(struct platform_device *pdev) return PTR_ERR(clk); } + ret = devm_clk_hw_register(&pdev->dev, &gpll3_out_main_div.hw); + if (ret) { + dev_err(&pdev->dev, "Failed to register hardware clock\n"); + return ret; + } + ret = qcom_cc_really_probe(pdev, &gcc_sdm429w_desc, regmap); if (ret) { dev_err(&pdev->dev, "Failed to register GCC clocks\n"); @@ -4380,9 +4422,22 @@ static const struct qcom_cc_desc mdss_sdm429w_desc = { .num_hwclks = ARRAY_SIZE(mdss_sdm429w_hws), }; +static void fixup_for_qm215_gcc_mdss(void) +{ + /* + * Below clocks are not available on QM215, thus mark them NULL. + */ + + mdss_sdm429w_desc.clks[BYTE1_CLK_SRC] = NULL; + mdss_sdm429w_desc.clks[PCLK1_CLK_SRC] = NULL; + mdss_sdm429w_desc.clks[GCC_MDSS_BYTE1_CLK] = NULL; + mdss_sdm429w_desc.clks[GCC_MDSS_PCLK1_CLK] = NULL; +} + static const struct of_device_id mdss_sdm429w_match_table[] = { { .compatible = "qcom,gcc-mdss-sdm429w" }, - { .compatible = "qcom,gcc-mdss-8917" }, + { .compatible = "qcom,gcc-mdss-qm215" }, + { .compatible = "qcom,gcc-mdss-sdm439" }, {} }; MODULE_DEVICE_TABLE(of, mdss_sdm429w_match_table); @@ -4394,6 +4449,10 @@ static int mdss_sdm429w_probe(struct platform_device *pdev) struct resource *res; void __iomem *base; int ret; + bool is_qm215; + + is_qm215 = of_device_is_compatible(pdev->dev.of_node, + "qcom,gcc-mdss-qm215"); clk = clk_get(&pdev->dev, "pclk0_src"); if (IS_ERR(clk)) { @@ -4426,6 +4485,9 @@ static int mdss_sdm429w_probe(struct platform_device *pdev) if (IS_ERR(regmap)) return PTR_ERR(regmap); + if (is_qm215) + fixup_for_qm215_gcc_mdss(); + ret = qcom_cc_really_probe(pdev, &mdss_sdm429w_desc, regmap); if (ret) { dev_err(&pdev->dev, "Failed to register MDSS clocks\n"); diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c index 606934ddd970..ea79f1386629 100644 --- a/drivers/clk/qcom/gcc-sdm660.c +++ b/drivers/clk/qcom/gcc-sdm660.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2016-2017, 2019-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2017, 2019-2021, The Linux Foundation. All rights reserved. */ #include @@ -1901,6 +1901,7 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = { static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = { .halt_reg = 0x8a004, + .halt_check = BRANCH_VOTED, .clkr = { .enable_reg = 0x8a004, .enable_mask = BIT(0), diff --git a/drivers/clk/qcom/gpucc-khaje.c b/drivers/clk/qcom/gpucc-khaje.c new file mode 100644 index 000000000000..a8643848682b --- /dev/null +++ b/drivers/clk/qcom/gpucc-khaje.c @@ -0,0 +1,491 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "common.h" +#include "vdd-level-bengal.h" + +#define CX_GMU_CBCR_SLEEP_MASK 0xf +#define CX_GMU_CBCR_SLEEP_SHIFT 4 +#define CX_GMU_CBCR_WAKE_MASK 0xf +#define CX_GMU_CBCR_WAKE_SHIFT 8 + +static DEFINE_VDD_REGULATORS(vdd_cx, VDD_HIGH_L1 + 1, 1, vdd_corner); +static DEFINE_VDD_REGULATORS(vdd_mx, VDD_HIGH_L1 + 1, 1, vdd_corner); + +enum { + P_BI_TCXO, + P_GPLL0_OUT_MAIN, + P_GPLL0_OUT_MAIN_DIV, + P_GPU_CC_PLL0_2X_DIV_CLK_SRC, + P_GPU_CC_PLL0_OUT_MAIN, + P_GPU_CC_PLL1_OUT_EVEN, + P_GPU_CC_PLL1_OUT_MAIN, + P_GPU_CC_PLL1_OUT_ODD, +}; + +static const struct parent_map gpu_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_GPU_CC_PLL0_OUT_MAIN, 1 }, + { P_GPU_CC_PLL1_OUT_MAIN, 3 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_GPLL0_OUT_MAIN_DIV, 6 }, +}; + +static const char * const gpu_cc_parent_names_0[] = { + "bi_tcxo", + "gpu_cc_pll0_out_main", + "gpu_cc_pll1", + "gcc_gpu_gpll0_clk_src", + "gcc_gpu_gpll0_div_clk_src", +}; + +static const struct parent_map gpu_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_GPU_CC_PLL0_OUT_MAIN, 1 }, + { P_GPU_CC_PLL0_2X_DIV_CLK_SRC, 2 }, + { P_GPU_CC_PLL1_OUT_EVEN, 3 }, + { P_GPU_CC_PLL1_OUT_ODD, 4 }, + { P_GPLL0_OUT_MAIN, 5 }, +}; + +static const char * const gpu_cc_parent_names_1[] = { + "bi_tcxo", + "gpu_cc_pll0_out_main", + "gpu_cc_pll0", + "gpu_cc_pll1", + "gpu_cc_pll1", + "gcc_gpu_gpll0_clk_src", +}; + +static struct pll_vco lucid_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +static struct pll_vco zonda_vco[] = { + { 595200000, 3600000000, 0 }, +}; + +/* 640MHz configuration */ +static const struct alpha_pll_config gpu_cc_pll0_config = { + .l = 0x21, + .alpha = 0x5555, + .config_ctl_val = 0x08200800, + .config_ctl_hi_val = 0x05022001, + .config_ctl_hi1_val = 0x00000010, + .user_ctl_val = 0x01000101, +}; + +static struct clk_alpha_pll gpu_cc_pll0 = { + .offset = 0x0, + .vco_table = zonda_vco, + .num_vco = ARRAY_SIZE(zonda_vco), + .flags = SUPPORTS_DYNAMIC_UPDATE, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_pll0", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_zonda_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 1800000000, + [VDD_LOW] = 2400000000, + [VDD_NOMINAL] = 3000000000, + [VDD_HIGH] = 3600000000}, + }, + }, +}; + +static const struct clk_div_table post_div_table_gpu_cc_pll0_out_main[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv gpu_cc_pll0_out_main = { + .offset = 0x0, + .post_div_shift = 8, + .post_div_table = post_div_table_gpu_cc_pll0_out_main, + .num_post_div = ARRAY_SIZE(post_div_table_gpu_cc_pll0_out_main), + .width = 2, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA], + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpu_cc_pll0_out_main", + .parent_names = (const char *[]){ "gpu_cc_pll0" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_ro_ops, + }, +}; + +/* 690MHz configuration */ +static const struct alpha_pll_config gpu_cc_pll1_config = { + .l = 0x23, + .alpha = 0xF000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002261, + .config_ctl_hi1_val = 0x329A299C, + .user_ctl_val = 0x00000001, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x00000000, +}; + +static struct clk_alpha_pll gpu_cc_pll1 = { + .offset = 0x100, + .vco_table = lucid_vco, + .num_vco = ARRAY_SIZE(lucid_vco), + .flags = SUPPORTS_DYNAMIC_UPDATE, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_pll1", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 615000000, + [VDD_LOW] = 1066000000, + [VDD_LOW_L1] = 1500000000, + [VDD_NOMINAL] = 1750000000, + [VDD_HIGH] = 2000000000}, + }, + }, +}; + +static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = { + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gpu_cc_gmu_clk_src = { + .cmd_rcgr = 0x1120, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpu_cc_parent_map_0, + .freq_tbl = ftbl_gpu_cc_gmu_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpu_cc_gmu_clk_src", + .parent_names = gpu_cc_parent_names_0, + .num_parents = ARRAY_SIZE(gpu_cc_parent_names_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 200000000}, + }, +}; + +static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = { + F(320000000, P_GPU_CC_PLL0_OUT_MAIN, 1, 0, 0), + F(465000000, P_GPU_CC_PLL0_OUT_MAIN, 1, 0, 0), + F(600000000, P_GPU_CC_PLL0_OUT_MAIN, 1, 0, 0), + F(785000000, P_GPU_CC_PLL0_OUT_MAIN, 1, 0, 0), + F(820000000, P_GPU_CC_PLL0_OUT_MAIN, 1, 0, 0), + F(980000000, P_GPU_CC_PLL0_OUT_MAIN, 1, 0, 0), + F(1025000000, P_GPU_CC_PLL0_OUT_MAIN, 1, 0, 0), + F(1100000000, P_GPU_CC_PLL0_OUT_MAIN, 1, 0, 0), + F(1114800000, P_GPU_CC_PLL0_OUT_MAIN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = { + .cmd_rcgr = 0x101c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpu_cc_parent_map_1, + .freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpu_cc_gx_gfx3d_clk_src", + .parent_names = gpu_cc_parent_names_1, + .num_parents = ARRAY_SIZE(gpu_cc_parent_names_1), + .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 320000097, + [VDD_LOW] = 465000000, + [VDD_LOW_L1] = 600000000, + [VDD_NOMINAL] = 785088000, + [VDD_HIGH] = 1025088000, + [VDD_HIGH_L1] = 1114800000}, + }, +}; + +static struct clk_branch gpu_cc_ahb_clk = { + .halt_reg = 0x1078, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x1078, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_ahb_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_crc_ahb_clk = { + .halt_reg = 0x107c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x107c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_crc_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_gfx3d_clk = { + .halt_reg = 0x10a4, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x10a4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cx_gfx3d_clk", + .parent_names = (const char *[]){ + "gpu_cc_gx_gfx3d_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_gmu_clk = { + .halt_reg = 0x1098, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1098, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cx_gmu_clk", + .parent_names = (const char *[]){ + "gpu_cc_gmu_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_snoc_dvm_clk = { + .halt_reg = 0x108c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x108c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cx_snoc_dvm_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cxo_aon_clk = { + .halt_reg = 0x1004, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x1004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cxo_aon_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cxo_clk = { + .halt_reg = 0x109c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x109c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cxo_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_gx_gfx3d_clk = { + .halt_reg = 0x1054, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x1054, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_gx_gfx3d_clk", + .parent_names = (const char *[]){ + "gpu_cc_gx_gfx3d_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_sleep_clk = { + .halt_reg = 0x1090, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x1090, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = { + .halt_reg = 0x5000, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x5000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_hlos1_vote_gpu_smmu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap *gpu_cc_khaje_clocks[] = { + [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr, + [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr, + [GPU_CC_CX_GFX3D_CLK] = &gpu_cc_cx_gfx3d_clk.clkr, + [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr, + [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr, + [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr, + [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr, + [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr, + [GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr, + [GPU_CC_GX_GFX3D_CLK_SRC] = &gpu_cc_gx_gfx3d_clk_src.clkr, + [GPU_CC_PLL0] = &gpu_cc_pll0.clkr, + [GPU_CC_PLL0_OUT_MAIN] = &gpu_cc_pll0_out_main.clkr, + [GPU_CC_PLL1] = &gpu_cc_pll1.clkr, + [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr, + [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr, +}; + +static const struct regmap_config gpu_cc_khaje_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x7008, + .fast_io = true, +}; + +static const struct qcom_cc_desc gpu_cc_khaje_desc = { + .config = &gpu_cc_khaje_regmap_config, + .clks = gpu_cc_khaje_clocks, + .num_clks = ARRAY_SIZE(gpu_cc_khaje_clocks), +}; + +static const struct of_device_id gpu_cc_khaje_match_table[] = { + { .compatible = "qcom,khaje-gpucc" }, + { } +}; +MODULE_DEVICE_TABLE(of, gpu_cc_khaje_match_table); + +static int gpu_cc_khaje_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + unsigned int value, mask; + int ret; + + vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx"); + if (IS_ERR(vdd_cx.regulator[0])) { + if (PTR_ERR(vdd_cx.regulator[0]) != -EPROBE_DEFER) + dev_err(&pdev->dev, "Unable to get vdd_cx regulator\n"); + return PTR_ERR(vdd_cx.regulator[0]); + } + + vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx"); + if (IS_ERR(vdd_mx.regulator[0])) { + if (PTR_ERR(vdd_mx.regulator[0]) != -EPROBE_DEFER) + dev_err(&pdev->dev, "Unable to get vdd_mx regulator\n"); + return PTR_ERR(vdd_mx.regulator[0]); + } + + regmap = qcom_cc_map(pdev, &gpu_cc_khaje_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + /* + * Keep the clock always-ON + * GPU_CC_GX_CXO_CLK + */ + regmap_update_bits(regmap, 0x1060, BIT(0), BIT(0)); + + clk_zonda_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config); + clk_lucid_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config); + + /* Recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */ + mask = CX_GMU_CBCR_WAKE_MASK << CX_GMU_CBCR_WAKE_SHIFT; + mask |= CX_GMU_CBCR_SLEEP_MASK << CX_GMU_CBCR_SLEEP_SHIFT; + value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT; + regmap_update_bits(regmap, gpu_cc_cx_gmu_clk.clkr.enable_reg, + mask, value); + + ret = qcom_cc_really_probe(pdev, &gpu_cc_khaje_desc, regmap); + if (ret) { + dev_err(&pdev->dev, "Failed to register GPUCC clocks\n"); + return ret; + } + + dev_info(&pdev->dev, "Registered GPU CC clocks\n"); + + return ret; +} + +static struct platform_driver gpu_cc_khaje_driver = { + .probe = gpu_cc_khaje_probe, + .driver = { + .name = "gpu_cc-khaje", + .of_match_table = gpu_cc_khaje_match_table, + }, +}; + +static int __init gpu_cc_khaje_init(void) +{ + return platform_driver_register(&gpu_cc_khaje_driver); +} +subsys_initcall(gpu_cc_khaje_init); + +static void __exit gpu_cc_khaje_exit(void) +{ + platform_driver_unregister(&gpu_cc_khaje_driver); +} +module_exit(gpu_cc_khaje_exit); + +MODULE_DESCRIPTION("QTI GPU_CC KHAJE Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/gpucc-sdm660.c b/drivers/clk/qcom/gpucc-sdm660.c index ef5b819ab21e..987d2f4527fd 100644 --- a/drivers/clk/qcom/gpucc-sdm660.c +++ b/drivers/clk/qcom/gpucc-sdm660.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */ #include diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-12nm-util.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-12nm-util.c index e26698eb0d0e..6eab33a81da6 100644 --- a/drivers/clk/qcom/mdss/mdss-dsi-pll-12nm-util.c +++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-12nm-util.c @@ -15,17 +15,32 @@ #define DSI_PLL_POLL_MAX_READS 15 #define DSI_PLL_POLL_TIMEOUT_US 1000 +static void __mdss_dsi_get_pll_vco_cntrl(u64 target_freq, u32 post_div_mux, + u32 *vco_cntrl, u32 *cpbias_cntrl); + int pixel_div_set_div(void *context, unsigned int reg, unsigned int div) { struct mdss_pll_resources *pll = context; + void __iomem *pll_base = pll->pll_base; + int rc; + char data = 0; struct dsi_pll_db *pdb; pdb = (struct dsi_pll_db *)pll->priv; + rc = mdss_pll_resource_enable(pll, true); + if (rc) { + pr_err("Failed to enable mdss dsi pll resources\n"); + return rc; + } /* Programming during vco_prepare. Keep this value */ - pdb->param.pixel_divhf = (div - 1); + data = (div & 0x7f); + MDSS_PLL_REG_W(pll_base, DSIPHY_SSC9, data); + pdb->param.pixel_divhf = data; + pll->cached_postdiv3 = data; + mdss_pll_resource_enable(pll, false); pr_debug("ndx=%d div=%d divhf=%d\n", pll->index, div, pdb->param.pixel_divhf); @@ -37,6 +52,7 @@ int pixel_div_get_div(void *context, unsigned int reg, { int rc; struct mdss_pll_resources *pll = context; + u32 val = 0; if (is_gdsc_disabled(pll)) return 0; @@ -47,8 +63,9 @@ int pixel_div_get_div(void *context, unsigned int reg, return rc; } - *div = (MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SSC9) & 0x7F); - pr_debug("pixel_div = %d\n", (*div+1)); + val = (MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SSC9) & 0x7F); + *div = val; + pr_debug("pixel_div = %d\n", (*div)); mdss_pll_resource_enable(pll, false); @@ -59,13 +76,33 @@ int set_post_div_mux_sel(void *context, unsigned int reg, unsigned int sel) { struct mdss_pll_resources *pll = context; + void __iomem *pll_base = pll->pll_base; struct dsi_pll_db *pdb; + u64 target_freq = 0; + u32 vco_cntrl = 0, cpbias_cntrl = 0; + char data = 0; pdb = (struct dsi_pll_db *)pll->priv; /* Programming during vco_prepare. Keep this value */ pdb->param.post_div_mux = sel; + target_freq = div_u64(pll->vco_current_rate, + BIT(pdb->param.post_div_mux)); + __mdss_dsi_get_pll_vco_cntrl(target_freq, pdb->param.post_div_mux, + &vco_cntrl, &cpbias_cntrl); + + data = ((vco_cntrl & 0x3f) | BIT(6)); + MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_CTRL, data); + pr_debug("%s: vco_cntrl 0x%x\n", __func__, vco_cntrl); + pll->cached_cfg0 = data; + wmb(); /* make sure register committed before preparing the clocks */ + + data = ((cpbias_cntrl & 0x1) << 6) | BIT(4); + MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_CHAR_PUMP_BIAS_CTRL, data); + pr_debug("%s: cpbias_cntrl 0x%x\n", __func__, cpbias_cntrl); + + pll->cached_cfg1 = data; pr_debug("ndx=%d post_div_mux_sel=%d p_div=%d\n", pll->index, sel, (u32) BIT(sel)); @@ -90,6 +127,7 @@ int get_post_div_mux_sel(void *context, unsigned int reg, vco_cntrl = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_PLL_VCO_CTRL); vco_cntrl &= 0x30; + pr_debug("%s: vco_cntrl 0x%x\n", __func__, vco_cntrl); cpbias_cntrl = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_PLL_CHAR_PUMP_BIAS_CTRL); @@ -112,6 +150,7 @@ int get_post_div_mux_sel(void *context, unsigned int reg, } mdss_pll_resource_enable(pll, false); + pr_debug("%s: sel = %d\n", __func__, *sel); return 0; } @@ -120,16 +159,25 @@ int set_gp_mux_sel(void *context, unsigned int reg, unsigned int sel) { struct mdss_pll_resources *pll = context; - struct dsi_pll_db *pdb; + void __iomem *pll_base = pll->pll_base; + char data = 0; + int rc; - pdb = (struct dsi_pll_db *)pll->priv; + rc = mdss_pll_resource_enable(pll, true); + if (rc) { + pr_err("Failed to enable mdss dsi pll resources\n"); + return rc; + } /* Programming during vco_prepare. Keep this value */ - pdb->param.gp_div_mux = sel; + data = ((sel & 0x7) << 5) | 0x5; + MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_CTRL, data); + pll->cached_postdiv1 = data; pr_debug("ndx=%d gp_div_mux_sel=%d gp_cntrl=%d\n", pll->index, sel, (u32) BIT(sel)); + mdss_pll_resource_enable(pll, false); return 0; } @@ -175,7 +223,9 @@ static bool pll_is_pll_locked_12nm(struct mdss_pll_resources *pll, pr_err("DSI PLL ndx=%d status=%x failed to Lock\n", pll->index, status); pll_locked = false; + pr_debug("%s: not locked\n", __func__); } else { + pr_debug("%s: locked\n", __func__); pll_locked = true; } @@ -542,13 +592,13 @@ static void mdss_dsi_pll_12nm_calc_reg(struct mdss_pll_resources *pll, { struct dsi_pll_param *param = &pdb->param; u64 target_freq = 0; + u32 post_div_mux = 0; + get_post_div_mux_sel(pll, 0, &post_div_mux); target_freq = div_u64(pll->vco_current_rate, - BIT(pdb->param.post_div_mux)); + BIT(post_div_mux)); param->hsfreqrange = __mdss_dsi_get_hsfreqrange(target_freq); - __mdss_dsi_get_pll_vco_cntrl(target_freq, param->post_div_mux, - ¶m->vco_cntrl, ¶m->cpbias_cntrl); param->osc_freq_target = __mdss_dsi_get_osc_freq_target(target_freq); param->m_div = (u32) __mdss_dsi_pll_get_m_div(pll->vco_current_rate); param->fsm_ovr_ctrl = __mdss_dsi_get_fsm_ovr_ctrl(target_freq); @@ -707,9 +757,6 @@ static void pll_db_commit_12nm(struct mdss_pll_resources *pll, data = ((param->hsfreqrange & 0x7f) | BIT(7)); MDSS_PLL_REG_W(pll_base, DSIPHY_HS_FREQ_RAN_SEL, data); - data = ((param->vco_cntrl & 0x3f) | BIT(6)); - MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_CTRL, data); - data = (param->osc_freq_target & 0x7f); MDSS_PLL_REG_W(pll_base, DSIPHY_SLEWRATE_DDL_CYC_FRQ_ADJ_0, data); @@ -733,15 +780,6 @@ static void pll_db_commit_12nm(struct mdss_pll_resources *pll, data = ((param->gmp_cntrl & 0x3) << 4); MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_GMP_CTRL_DIG_TST, data); - data = ((param->cpbias_cntrl & 0x1) << 6) | BIT(4); - MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_CHAR_PUMP_BIAS_CTRL, data); - - data = ((param->gp_div_mux & 0x7) << 5) | 0x5; - MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_CTRL, data); - - data = (param->pixel_divhf & 0x7f); - MDSS_PLL_REG_W(pll_base, DSIPHY_SSC9, data); - MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_ANA_PROG_CTRL, 0x03); MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_ANA_TST_LOCK_ST_OVR_CTRL, 0x50); MDSS_PLL_REG_W(pll_base, @@ -779,6 +817,14 @@ int pll_vco_set_rate_12nm(struct clk_hw *hw, unsigned long rate, pll->vco_current_rate = rate; pll->vco_ref_clk_rate = vco->ref_clk_rate; + + mdss_dsi_pll_12nm_calc_reg(pll, pdb); + if (pll->ssc_en) + mdss_dsi_pll_12nm_calc_ssc(pll, pdb); + + /* commit DSI vco */ + pll_db_commit_12nm(pll, pdb); + error: return rc; } @@ -791,9 +837,13 @@ static unsigned long pll_vco_get_rate_12nm(struct clk_hw *hw) u64 ref_clk = vco->ref_clk_rate; int rc; struct mdss_pll_resources *pll = vco->priv; + u32 post_div_mux; + u32 cpbias_cntrl = 0; - if (is_gdsc_disabled(pll)) + if (is_gdsc_disabled(pll)) { + pr_err("%s:gdsc disabled\n", __func__); return 0; + } rc = mdss_pll_resource_enable(pll, true); if (rc) { @@ -811,6 +861,16 @@ static unsigned long pll_vco_get_rate_12nm(struct clk_hw *hw) m_div_11_6 &= 0x3f; pr_debug("m_div_11_6 = 0x%x\n", m_div_11_6); + post_div_mux = MDSS_PLL_REG_R(pll->pll_base, + DSIPHY_PLL_VCO_CTRL); + + pr_debug("post_div_mux = 0x%x\n", post_div_mux); + + cpbias_cntrl = MDSS_PLL_REG_R(pll->pll_base, + DSIPHY_PLL_CHAR_PUMP_BIAS_CTRL); + cpbias_cntrl = ((cpbias_cntrl >> 6) & 0x1); + pr_debug("cpbias_cntrl = 0x%x\n", cpbias_cntrl); + m_div = ((m_div_11_6 << 6) | (m_div_5_0)); vco_rate = div_u64((ref_clk * m_div), 4); @@ -845,12 +905,21 @@ unsigned long vco_12nm_recalc_rate(struct clk_hw *hw, struct mdss_pll_resources *pll = vco->priv; unsigned long rate = 0; int rc; + struct dsi_pll_db *pdb; + + pdb = (struct dsi_pll_db *)pll->priv; if (!pll && is_gdsc_disabled(pll)) { pr_err("gdsc disabled\n"); return 0; } + if (pll->vco_current_rate != 0) { + pr_debug("%s:returning vco rate = %lld\n", __func__, + pll->vco_current_rate); + return pll->vco_current_rate; + } + rc = mdss_pll_resource_enable(pll, true); if (rc) { pr_err("Failed to enable mdss dsi pll=%d\n", pll->index); @@ -861,6 +930,7 @@ unsigned long vco_12nm_recalc_rate(struct clk_hw *hw, pll->handoff_resources = true; pll->pll_on = true; rate = pll_vco_get_rate_12nm(hw); + pr_debug("%s: pll locked. rate %lu\n", __func__, rate); } else { mdss_pll_resource_enable(pll, false); } @@ -881,6 +951,13 @@ int pll_vco_prepare_12nm(struct clk_hw *hw) return -EINVAL; } + /* Skip vco recalculation for continuous splash use case */ + if (pll->handoff_resources) { + pr_debug("%s: Skip recalculation during cont splash\n", + __func__); + return rc; + } + pdb = (struct dsi_pll_db *)pll->priv; if (!pdb) { pr_err("No prov found\n"); @@ -905,6 +982,22 @@ int pll_vco_prepare_12nm(struct clk_hw *hw) } } + if (!pll->handoff_resources) { + pr_debug("%s ndx = %d cache PLL regs\n", __func__, pll->index); + MDSS_PLL_REG_W(pll->pll_base, + DSIPHY_PLL_VCO_CTRL, pll->cached_cfg0); + udelay(1); + MDSS_PLL_REG_W(pll->pll_base, + DSIPHY_PLL_CHAR_PUMP_BIAS_CTRL, pll->cached_cfg1); + udelay(1); + MDSS_PLL_REG_W(pll->pll_base, + DSIPHY_PLL_CTRL, pll->cached_postdiv1); + udelay(1); + MDSS_PLL_REG_W(pll->pll_base, + DSIPHY_SSC9, pll->cached_postdiv3); + udelay(5); /* h/w recommended delay */ + } + /* * For cases where DSI PHY is already enabled like: * 1.) LP-11 during static screen @@ -921,15 +1014,7 @@ int pll_vco_prepare_12nm(struct clk_hw *hw) goto end; } - mdss_dsi_pll_12nm_calc_reg(pll, pdb); - if (pll->ssc_en) - mdss_dsi_pll_12nm_calc_ssc(pll, pdb); - - /* commit DSI vco */ - pll_db_commit_12nm(pll, pdb); - rc = dsi_pll_enable(hw); - error: if (rc) { mdss_pll_resource_enable(pll, false); @@ -951,6 +1036,12 @@ void pll_vco_unprepare_12nm(struct clk_hw *hw) } pll->vco_cached_rate = clk_hw_get_rate(hw); + + pll->cached_cfg0 = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_PLL_VCO_CTRL); + pll->cached_cfg1 = MDSS_PLL_REG_R(pll->pll_base, + DSIPHY_PLL_CHAR_PUMP_BIAS_CTRL); + pll->cached_postdiv1 = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_PLL_CTRL); + pll->cached_postdiv3 = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_SSC9); dsi_pll_disable(hw); } diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-12nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-12nm.c index 6b63ed28d569..5b70d74ca3cf 100644 --- a/drivers/clk/qcom/mdss/mdss-dsi-pll-12nm.c +++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-12nm.c @@ -27,6 +27,7 @@ static const struct clk_ops clk_ops_vco_12nm = { .round_rate = pll_vco_round_rate_12nm, .prepare = pll_vco_prepare_12nm, .unprepare = pll_vco_unprepare_12nm, + .enable = pll_vco_enable_12nm, }; static struct regmap_bus pclk_div_regmap_bus = { @@ -206,8 +207,8 @@ static struct clk_fixed_factor dsi0pll_post_div32 = { static struct clk_regmap_mux dsi0pll_post_div_mux = { .reg = DSIPHY_PLL_VCO_CTRL, - .shift = 4, - .width = 2, + .shift = 0, + .width = 3, .clkr = { .hw.init = &(struct clk_init_data){ .name = "dsi0pll_post_div_mux", @@ -298,8 +299,8 @@ static struct clk_fixed_factor dsi1pll_post_div32 = { static struct clk_regmap_mux dsi1pll_post_div_mux = { .reg = DSIPHY_PLL_VCO_CTRL, - .shift = 4, - .width = 2, + .shift = 0, + .width = 3, .clkr = { .hw.init = &(struct clk_init_data){ .name = "dsi1pll_post_div_mux", @@ -390,7 +391,7 @@ static struct clk_fixed_factor dsi0pll_gp_div32 = { static struct clk_regmap_mux dsi0pll_gp_div_mux = { .reg = DSIPHY_PLL_CTRL, - .shift = 5, + .shift = 0, .width = 3, .clkr = { .hw.init = &(struct clk_init_data){ @@ -482,7 +483,7 @@ static struct clk_fixed_factor dsi1pll_gp_div32 = { static struct clk_regmap_mux dsi1pll_gp_div_mux = { .reg = DSIPHY_PLL_CTRL, - .shift = 5, + .shift = 0, .width = 3, .clkr = { .hw.init = &(struct clk_init_data){ @@ -503,10 +504,10 @@ static struct clk_regmap_mux dsi1pll_gp_div_mux = { static struct clk_regmap_div dsi0pll_pclk_src = { .reg = DSIPHY_SSC9, .shift = 0, - .width = 6, + .width = 3, .clkr = { .hw.init = &(struct clk_init_data){ - .name = "dsi0pll_pclk_src", + .name = "dsi0_phy_pll_out_dsiclk", .parent_names = (const char *[]){ "dsi0pll_gp_div_mux"}, .num_parents = 1, @@ -519,10 +520,10 @@ static struct clk_regmap_div dsi0pll_pclk_src = { static struct clk_regmap_div dsi1pll_pclk_src = { .reg = DSIPHY_SSC9, .shift = 0, - .width = 6, + .width = 3, .clkr = { .hw.init = &(struct clk_init_data){ - .name = "dsi1pll_pclk_src", + .name = "dsi1_phy_pll_out_dsiclk", .parent_names = (const char *[]){ "dsi1pll_gp_div_mux"}, .num_parents = 1, @@ -536,7 +537,7 @@ static struct clk_fixed_factor dsi0pll_byte_clk_src = { .div = 4, .mult = 1, .hw.init = &(struct clk_init_data){ - .name = "dsi0pll_byte_clk_src", + .name = "dsi0_phy_pll_out_byteclk", .parent_names = (const char *[]){"dsi0pll_post_div_mux"}, .num_parents = 1, .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT), @@ -548,7 +549,7 @@ static struct clk_fixed_factor dsi1pll_byte_clk_src = { .div = 4, .mult = 1, .hw.init = &(struct clk_init_data){ - .name = "dsi1pll_byte_clk_src", + .name = "dsi1_phy_pll_out_byteclk", .parent_names = (const char *[]){"dsi1pll_post_div_mux"}, .num_parents = 1, .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT), @@ -556,7 +557,6 @@ static struct clk_fixed_factor dsi1pll_byte_clk_src = { }, }; - static struct clk_hw *mdss_dsi_pllcc_12nm[] = { [VCO_CLK_0] = &dsi0pll_vco_clk.hw, [POST_DIV1_0_CLK] = &dsi0pll_post_div1.hw, @@ -598,14 +598,14 @@ int dsi_pll_clock_register_12nm(struct platform_device *pdev, struct mdss_pll_resources *pll_res) { int rc = 0, ndx, i; - struct clk *clk; + struct clk *clk = NULL; struct clk_onecell_data *clk_data; int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_12nm); struct regmap *rmap; struct dsi_pll_db *pdb; if (!pdev || !pdev->dev.of_node || - !pll_res || !pll_res->pll_base || !pll_res->phy_base) { + !pll_res || !pll_res->pll_base) { pr_err("Invalid params\n"); return -EINVAL; } @@ -637,7 +637,7 @@ int dsi_pll_clock_register_12nm(struct platform_device *pdev, clk_data->clk_num = num_clks; /* Establish client data */ - if (ndx == 0) { + if (pll_res->index == 0) { rmap = devm_regmap_init(&pdev->dev, &post_div_mux_regmap_bus, pll_res, &dsi_pll_12nm_config); dsi0pll_post_div_mux.clkr.regmap = rmap; @@ -700,8 +700,8 @@ int dsi_pll_clock_register_12nm(struct platform_device *pdev, of_clk_src_onecell_get, clk_data); } if (!rc) { - pr_info("Registered DSI PLL ndx=%d,clocks successfully\n", ndx); - + pr_info("Registered DSI PLL ndx=%d, clocks successfully\n", + pll_res->index); return rc; } clk_register_fail: diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-28lpm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-28lpm.c index 0da6b4a85c68..04db3bd66016 100644 --- a/drivers/clk/qcom/mdss/mdss-dsi-pll-28lpm.c +++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-28lpm.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include "mdss-pll.h" #include "mdss-dsi-pll.h" @@ -388,13 +388,13 @@ static struct clk_regmap_div dsi1pll_pclk_src = { }; static struct clk_hw *mdss_dsi_pllcc_28lpm[] = { - [VCO_CLK_0] = &dsi0pll_vco_clk.hw, + [VCOCLK_0] = &dsi0pll_vco_clk.hw, [ANALOG_POSTDIV_0_CLK] = &dsi0pll_analog_postdiv.clkr.hw, [INDIRECT_PATH_SRC_0_CLK] = &dsi0pll_indirect_path_src.hw, [BYTECLK_SRC_MUX_0_CLK] = &dsi0pll_byteclk_src_mux.clkr.hw, [BYTECLK_SRC_0_CLK] = &dsi0pll_byteclk_src.hw, [PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw, - [VCO_CLK_1] = &dsi1pll_vco_clk.hw, + [VCOCLK_1] = &dsi1pll_vco_clk.hw, [ANALOG_POSTDIV_1_CLK] = &dsi1pll_analog_postdiv.clkr.hw, [INDIRECT_PATH_SRC_1_CLK] = &dsi1pll_indirect_path_src.hw, [BYTECLK_SRC_MUX_1_CLK] = &dsi1pll_byteclk_src_mux.clkr.hw, @@ -486,7 +486,7 @@ int dsi_pll_clock_register_28lpm(struct platform_device *pdev, dsi0pll_pclk_src.clkr.regmap = rmap; dsi0pll_vco_clk.priv = pll_res; - for (i = VCO_CLK_0; i <= PCLK_SRC_0_CLK; i++) { + for (i = VCOCLK_0; i <= PCLK_SRC_0_CLK; i++) { clk = devm_clk_register(&pdev->dev, mdss_dsi_pllcc_28lpm[i]); if (IS_ERR(clk)) { @@ -531,7 +531,7 @@ int dsi_pll_clock_register_28lpm(struct platform_device *pdev, dsi1pll_pclk_src.clkr.regmap = rmap; dsi1pll_vco_clk.priv = pll_res; - for (i = VCO_CLK_1; i <= PCLK_SRC_1_CLK; i++) { + for (i = VCOCLK_1; i <= PCLK_SRC_1_CLK; i++) { clk = devm_clk_register(&pdev->dev, mdss_dsi_pllcc_28lpm[i]); if (IS_ERR(clk)) { diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c index ab9589f686ba..d01d9afaf265 100644 --- a/drivers/clk/qcom/mdss/mdss-pll.c +++ b/drivers/clk/qcom/mdss/mdss-pll.c @@ -128,7 +128,9 @@ static int mdss_pll_resource_parse(struct platform_device *pdev, pll_res->pll_interface_type = MDSS_DP_PLL_14NM; pll_res->target_id = MDSS_PLL_TARGET_SDM660; pll_res->revision = 2; - } else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_28lpm")) + } else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_12nm")) + pll_res->pll_interface_type = MDSS_DSI_PLL_12NM; + else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_28lpm")) pll_res->pll_interface_type = MDSS_DSI_PLL_28LPM; else goto err; diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h index 2a54194d50a6..5290c7cf90c8 100644 --- a/drivers/clk/qcom/mdss/mdss-pll.h +++ b/drivers/clk/qcom/mdss/mdss-pll.h @@ -220,12 +220,13 @@ static inline bool is_gdsc_disabled(struct mdss_pll_resources *pll_res) return true; } if ((pll_res->target_id == MDSS_PLL_TARGET_SDM660) || - (pll_res->pll_interface_type == MDSS_DSI_PLL_28LPM)) + (pll_res->pll_interface_type == MDSS_DSI_PLL_28LPM) || + (pll_res->pll_interface_type == MDSS_DSI_PLL_12NM)) ret = ((readl_relaxed(pll_res->gdsc_base + 0x4) & BIT(31)) && (!(readl_relaxed(pll_res->gdsc_base) & BIT(0)))) ? false : true; else ret = readl_relaxed(pll_res->gdsc_base) & BIT(31) ? - false : true; + false : true; return ret; } diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c index bd0fe9da5a58..5abdb88e6f8f 100644 --- a/drivers/clk/qcom/mmcc-sdm660.c +++ b/drivers/clk/qcom/mmcc-sdm660.c @@ -2798,7 +2798,7 @@ static struct clk_branch mmss_snoc_dvm_axi_clk = { static struct clk_branch mmss_video_ahb_clk = { .halt_reg = 0x1030, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0x1030, .enable_mask = BIT(0), diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c index f8d7b5e3b975..2db33e178615 100644 --- a/drivers/cpuidle/lpm-levels-of.c +++ b/drivers/cpuidle/lpm-levels-of.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2019,2021, The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME @@ -638,6 +638,7 @@ struct lpm_cluster *parse_cluster(struct device_node *node, if (ret) return NULL; + INIT_LIST_HEAD(&c->list); INIT_LIST_HEAD(&c->child); INIT_LIST_HEAD(&c->cpu); c->parent = parent; diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c index 138671c2dbd2..d9b1998e3b52 100644 --- a/drivers/crypto/msm/ice.c +++ b/drivers/crypto/msm/ice.c @@ -2,7 +2,7 @@ /* * QTI Inline Crypto Engine (ICE) driver * - * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2020, 2021 The Linux Foundation. All rights reserved. */ #include @@ -59,6 +59,8 @@ #define ICE_CRYPTO_CXT_FDE 1 #define ICE_CRYPTO_CXT_FBE 2 +#define ICE_FDE_KEY_INDEX 31 + static int ice_fde_flag; struct ice_clk_info { @@ -103,6 +105,11 @@ static int qti_ice_setting_config(struct request *req, setting->encr_bypass = true; setting->decr_bypass = true; } + /* Qseecom now sets the FDE key to slot 31 by default, instead + * of slot 0, so use the same slot here during read/write + */ + if (cxt == ICE_CRYPTO_CXT_FDE) + setting->crypto_data.key_index = ICE_FDE_KEY_INDEX; } return 0; diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 6ddd1356fd3e..be0597f58d47 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -102,35 +102,12 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) dentry->d_name.name, ret > 0 ? name : ""); } -static const struct dentry_operations dma_buf_dentry_ops = { - .d_dname = dmabuffs_dname, -}; - -static struct vfsmount *dma_buf_mnt; - -static struct dentry *dma_buf_fs_mount(struct file_system_type *fs_type, - int flags, const char *name, void *data) -{ - return mount_pseudo(fs_type, "dmabuf:", NULL, &dma_buf_dentry_ops, - DMA_BUF_MAGIC); -} - -static struct file_system_type dma_buf_fs_type = { - .name = "dmabuf", - .mount = dma_buf_fs_mount, - .kill_sb = kill_anon_super, -}; - -static int dma_buf_release(struct inode *inode, struct file *file) +static void dma_buf_release(struct dentry *dentry) { struct dma_buf *dmabuf; - struct dentry *dentry = file->f_path.dentry; int dtor_ret = 0; - if (!is_dma_buf_file(file)) - return -EINVAL; - - dmabuf = file->private_data; + dmabuf = dentry->d_fsdata; spin_lock(&dentry->d_lock); dentry->d_fsdata = NULL; @@ -167,9 +144,28 @@ static int dma_buf_release(struct inode *inode, struct file *file) module_put(dmabuf->owner); dmabuf_dent_put(dmabuf); - return 0; } +static const struct dentry_operations dma_buf_dentry_ops = { + .d_dname = dmabuffs_dname, + .d_release = dma_buf_release, +}; + +static struct vfsmount *dma_buf_mnt; + +static struct dentry *dma_buf_fs_mount(struct file_system_type *fs_type, + int flags, const char *name, void *data) +{ + return mount_pseudo(fs_type, "dmabuf:", NULL, &dma_buf_dentry_ops, + DMA_BUF_MAGIC); +} + +static struct file_system_type dma_buf_fs_type = { + .name = "dmabuf", + .mount = dma_buf_fs_mount, + .kill_sb = kill_anon_super, +}; + static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) { struct dma_buf *dmabuf; @@ -488,7 +484,6 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) } static const struct file_operations dma_buf_fops = { - .release = dma_buf_release, .mmap = dma_buf_mmap_internal, .llseek = dma_buf_llseek, .poll = dma_buf_poll, diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index 35ef9cd78a46..97f6d529bd0d 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -1324,8 +1324,10 @@ int extcon_dev_register(struct extcon_dev *edev) goto err_dev; } - for (index = 0; index < edev->max_supported; index++) + for (index = 0; index < edev->max_supported; index++) { RAW_INIT_NOTIFIER_HEAD(&edev->nh[index]); + BLOCKING_INIT_NOTIFIER_HEAD(&edev->bnh[index]); + } RAW_INIT_NOTIFIER_HEAD(&edev->nh_all); diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile index 8165e688dcd3..126bafb1ec18 100644 --- a/drivers/gpu/msm/Makefile +++ b/drivers/gpu/msm/Makefile @@ -17,7 +17,8 @@ msm_kgsl_core-y = \ kgsl_rgmu.o \ kgsl_hfi.o \ kgsl_pool.o \ - kgsl_reclaim.o + kgsl_reclaim.o \ + kgsl_timeline.o msm_kgsl_core-$(CONFIG_QCOM_KGSL_IOMMU) += kgsl_iommu.o msm_kgsl_core-$(CONFIG_DEBUG_FS) += kgsl_debugfs.o diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h index 7fd704bce7a2..aa824ff60958 100644 --- a/drivers/gpu/msm/adreno-gpulist.h +++ b/drivers/gpu/msm/adreno-gpulist.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2002,2007-2021, The Linux Foundation. All rights reserved. */ #define ANY_ID (~0) @@ -272,9 +272,9 @@ static const struct adreno_reglist a50x_hwcg_regs[] = { {A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} }; -static const struct adreno_a5xx_core adreno_gpu_core_a505 = { +static const struct adreno_a5xx_core adreno_gpu_core_a504 = { .base = { - DEFINE_ADRENO_REV(ADRENO_REV_A505, 5, 0, 5, ANY_ID), + DEFINE_ADRENO_REV(ADRENO_REV_A504, 5, 0, 4, ANY_ID), .features = ADRENO_PREEMPTION | ADRENO_64BIT, .gpudev = &adreno_a5xx_gpudev, .gmem_size = (SZ_128K + SZ_8K), @@ -289,6 +289,25 @@ static const struct adreno_a5xx_core adreno_gpu_core_a505 = { .vbif_count = ARRAY_SIZE(a530_vbif_regs), }; +static const struct adreno_a5xx_core adreno_gpu_core_a505 = { + .base = { + DEFINE_ADRENO_REV(ADRENO_REV_A505, 5, 0, 5, ANY_ID), + .features = ADRENO_PREEMPTION | ADRENO_64BIT | + ADRENO_CONTENT_PROTECTION | ADRENO_CPZ_RETENTION, + .gpudev = &adreno_a5xx_gpudev, + .gmem_size = (SZ_128K + SZ_8K), + .busy_mask = 0xfffffffe, + .bus_width = 16, + }, + .pm4fw_name = "a530_pm4.fw", + .pfpfw_name = "a530_pfp.fw", + .zap_name = "a506_zap", + .hwcg = a50x_hwcg_regs, + .hwcg_count = ARRAY_SIZE(a50x_hwcg_regs), + .vbif = a530_vbif_regs, + .vbif_count = ARRAY_SIZE(a530_vbif_regs), +}; + static const struct adreno_a5xx_core adreno_gpu_core_a506 = { .base = { DEFINE_ADRENO_REV(ADRENO_REV_A506, 5, 0, 6, ANY_ID), @@ -919,7 +938,7 @@ static const struct adreno_a6xx_core adreno_gpu_core_a619 = { }, .prim_fifo_threshold = 0x0018000, .gmu_major = 1, - .gmu_minor = 10, + .gmu_minor = 11, .sqefw_name = "a630_sqe.fw", .gmufw_name = "a619_gmu.bin", .zap_name = "a615_zap", @@ -1496,6 +1515,7 @@ static const struct adreno_gpu_core *adreno_gpulist[] = { &adreno_gpu_core_a540v2.base, &adreno_gpu_core_a512.base, &adreno_gpu_core_a508.base, + &adreno_gpu_core_a504.base, &adreno_gpu_core_a630v1, /* Deprecated */ &adreno_gpu_core_a630v2.base, &adreno_gpu_core_a615.base, diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 957abd446ec7..730903b33d74 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2002,2007-2021, The Linux Foundation. All rights reserved. */ #include #include @@ -240,6 +240,56 @@ int adreno_efuse_read_u32(struct adreno_device *adreno_dev, unsigned int offset, return 0; } +void adreno_efuse_speed_bin_array(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + int ret, count, i = 0; + unsigned int val, vector_size = 3; + unsigned int *bin_vector; + + /* + * Here count is no of 32 bit elements in the + * speed-bin-vector array. If there are two fuses + * i.e If no of fuses are 2 then no of elements will be + * 2 * 3 = 6(elements of 32 bit each). + */ + count = of_property_count_u32_elems(device->pdev->dev.of_node, + "qcom,gpu-speed-bin-vectors"); + + if ((count <= 0) || (count % vector_size)) + return; + + bin_vector = kmalloc_array(count, sizeof(unsigned int), GFP_KERNEL); + if (bin_vector == NULL) + return; + + if (of_property_read_u32_array(device->pdev->dev.of_node, + "qcom,gpu-speed-bin-vectors", + bin_vector, count)) { + dev_err(device->dev, + "Speed-bin-vectors is invalid\n"); + kfree(bin_vector); + return; + } + + /* + * Final value of adreno_dev->speed_bin is the value formed by + * OR'ing the values read from all the fuses. + */ + while (i < count) { + ret = adreno_efuse_read_u32(adreno_dev, bin_vector[i], &val); + + if (ret < 0) + break; + + adreno_dev->speed_bin |= (val & bin_vector[i+1]) + >> bin_vector[i+2]; + i += vector_size; + } + + kfree(bin_vector); +} + static int _get_counter(struct adreno_device *adreno_dev, int group, int countable, unsigned int *lo, unsigned int *hi) diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 1423135b1089..02a34615ac22 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2008-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2008-2021, The Linux Foundation. All rights reserved. */ #ifndef __ADRENO_H #define __ADRENO_H @@ -202,6 +202,7 @@ enum adreno_gpurev { ADRENO_REV_A418 = 418, ADRENO_REV_A420 = 420, ADRENO_REV_A430 = 430, + ADRENO_REV_A504 = 504, ADRENO_REV_A505 = 505, ADRENO_REV_A506 = 506, ADRENO_REV_A508 = 508, @@ -1107,6 +1108,7 @@ int adreno_efuse_map(struct adreno_device *adreno_dev); int adreno_efuse_read_u32(struct adreno_device *adreno_dev, unsigned int offset, unsigned int *val); void adreno_efuse_unmap(struct adreno_device *adreno_dev); +void adreno_efuse_speed_bin_array(struct adreno_device *adreno_dev); bool adreno_is_cx_dbgc_register(struct kgsl_device *device, unsigned int offset); @@ -1150,6 +1152,7 @@ static inline int adreno_is_a5xx(struct adreno_device *adreno_dev) ADRENO_GPUREV(adreno_dev) < 600; } +ADRENO_TARGET(a504, ADRENO_REV_A504) ADRENO_TARGET(a505, ADRENO_REV_A505) ADRENO_TARGET(a506, ADRENO_REV_A506) ADRENO_TARGET(a508, ADRENO_REV_A508) @@ -1170,9 +1173,9 @@ static inline int adreno_is_a530v3(struct adreno_device *adreno_dev) (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 2); } -static inline int adreno_is_a505_or_a506(struct adreno_device *adreno_dev) +static inline int adreno_is_a504_to_a506(struct adreno_device *adreno_dev) { - return ADRENO_GPUREV(adreno_dev) >= 505 && + return ADRENO_GPUREV(adreno_dev) >= 504 && ADRENO_GPUREV(adreno_dev) <= 506; } diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index 7ac31898f326..9289054d5921 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. */ #include @@ -89,13 +89,34 @@ static void a530_efuse_speed_bin(struct adreno_device *adreno_dev) adreno_dev->speed_bin = (val & speed_bin[1]) >> speed_bin[2]; } +static void a5xx_efuse_speed_bin(struct adreno_device *adreno_dev) +{ + unsigned int val; + unsigned int speed_bin[3]; + struct kgsl_device *device = &adreno_dev->dev; + + if (of_get_property(device->pdev->dev.of_node, + "qcom,gpu-speed-bin-vectors", NULL)) { + adreno_efuse_speed_bin_array(adreno_dev); + return; + } + + if (!of_property_read_u32_array(device->pdev->dev.of_node, + "qcom,gpu-speed-bin", speed_bin, 3)) { + adreno_efuse_read_u32(adreno_dev, speed_bin[0], &val); + adreno_dev->speed_bin = (val & speed_bin[1]) >> speed_bin[2]; + return; + } +} + static const struct { int (*check)(struct adreno_device *adreno_dev); void (*func)(struct adreno_device *adreno_dev); } a5xx_efuse_funcs[] = { { adreno_is_a530, a530_efuse_leakage }, { adreno_is_a530, a530_efuse_speed_bin }, - { adreno_is_a505, a530_efuse_speed_bin }, + { adreno_is_a504, a5xx_efuse_speed_bin }, + { adreno_is_a505, a5xx_efuse_speed_bin }, { adreno_is_a512, a530_efuse_speed_bin }, { adreno_is_a508, a530_efuse_speed_bin }, }; @@ -119,7 +140,7 @@ static void a5xx_platform_setup(struct adreno_device *adreno_dev) { struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); - if (adreno_is_a505_or_a506(adreno_dev) || adreno_is_a508(adreno_dev)) { + if (adreno_is_a504_to_a506(adreno_dev) || adreno_is_a508(adreno_dev)) { gpudev->snapshot_data->sect_sizes->cp_meq = 32; gpudev->snapshot_data->sect_sizes->cp_merciu = 1024; gpudev->snapshot_data->sect_sizes->roq = 256; @@ -1525,7 +1546,7 @@ static void a5xx_start(struct adreno_device *adreno_dev) * Below CP registers are 0x0 by default, program init * values based on a5xx flavor. */ - if (adreno_is_a505_or_a506(adreno_dev) || adreno_is_a508(adreno_dev)) { + if (adreno_is_a504_to_a506(adreno_dev) || adreno_is_a508(adreno_dev)) { kgsl_regwrite(device, A5XX_CP_MEQ_THRESHOLDS, 0x20); kgsl_regwrite(device, A5XX_CP_MERCIU_SIZE, 0x400); kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030); @@ -1551,7 +1572,7 @@ static void a5xx_start(struct adreno_device *adreno_dev) * vtxFifo and primFifo thresholds default values * are different. */ - if (adreno_is_a505_or_a506(adreno_dev) || adreno_is_a508(adreno_dev)) + if (adreno_is_a504_to_a506(adreno_dev) || adreno_is_a508(adreno_dev)) kgsl_regwrite(device, A5XX_PC_DBG_ECO_CNTL, (0x100 << 11 | 0x100 << 22)); else if (adreno_is_a510(adreno_dev) || adreno_is_a512(adreno_dev)) @@ -1832,6 +1853,7 @@ static int _me_init_ucode_workarounds(struct adreno_device *adreno_dev) switch (ADRENO_GPUREV(adreno_dev)) { case ADRENO_REV_A510: return 0x00000001; /* Ucode workaround for token end syncs */ + case ADRENO_REV_A504: case ADRENO_REV_A505: case ADRENO_REV_A506: case ADRENO_REV_A530: diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index 6d18580125a8..5d0dd684788c 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. */ #include @@ -89,9 +89,27 @@ static u32 a6xx_ifpc_pwrup_reglist[] = { }; /* Applicable to a620 and a650 */ +static u32 a650_ifpc_pwrup_reglist[] = { + A6XX_CP_PROTECT_REG+32, + A6XX_CP_PROTECT_REG+33, + A6XX_CP_PROTECT_REG+34, + A6XX_CP_PROTECT_REG+35, + A6XX_CP_PROTECT_REG+36, + A6XX_CP_PROTECT_REG+37, + A6XX_CP_PROTECT_REG+38, + A6XX_CP_PROTECT_REG+39, + A6XX_CP_PROTECT_REG+40, + A6XX_CP_PROTECT_REG+41, + A6XX_CP_PROTECT_REG+42, + A6XX_CP_PROTECT_REG+43, + A6XX_CP_PROTECT_REG+44, + A6XX_CP_PROTECT_REG+45, + A6XX_CP_PROTECT_REG+46, + A6XX_CP_PROTECT_REG+47, +}; + static u32 a650_pwrup_reglist[] = { A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, - A6XX_CP_PROTECT_REG + 47, /* Programmed for infinite span */ A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1, A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2, @@ -351,14 +369,21 @@ struct a6xx_reglist_list { static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev) { - struct a6xx_reglist_list reglist[3]; + struct a6xx_reglist_list reglist[4]; void *ptr = adreno_dev->pwrup_reglist.hostptr; struct cpu_gpu_lock *lock = ptr; int items = 0, i, j; u32 *dest = ptr + sizeof(*lock); + u16 list_offset = 0; /* Static IFPC-only registers */ - reglist[items++] = REGLIST(a6xx_ifpc_pwrup_reglist); + reglist[items] = REGLIST(a6xx_ifpc_pwrup_reglist); + list_offset += reglist[items++].count * 2; + + if (adreno_is_a650_family(adreno_dev)) { + reglist[items] = REGLIST(a650_ifpc_pwrup_reglist); + list_offset += reglist[items++].count * 2; + } /* Static IFPC + preemption registers */ reglist[items++] = REGLIST(a6xx_pwrup_reglist); @@ -401,7 +426,7 @@ static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev) * all the lists and list_offset should be specified as the size in * dwords of the first entry in the list. */ - lock->list_offset = reglist[0].count * 2; + lock->list_offset = list_offset; } /* diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c index d67945724643..e3a46780a496 100644 --- a/drivers/gpu/msm/adreno_debugfs.c +++ b/drivers/gpu/msm/adreno_debugfs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2002,2008-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2002,2008-2021, The Linux Foundation. All rights reserved. */ #include @@ -147,11 +147,22 @@ static void sync_event_print(struct seq_file *s, break; } case KGSL_CMD_SYNCPOINT_TYPE_FENCE: { + struct event_fence_info *info = sync_event ? + sync_event->priv : NULL; int i; - for (i = 0; i < sync_event->info.num_fences; i++) + for (i = 0; info && i < info->num_fences; i++) seq_printf(s, "sync: %s", - sync_event->info.fences[i].name); + info->fences[i].name); + break; + } + case KGSL_CMD_SYNCPOINT_TYPE_TIMELINE: { + struct event_timeline_info *info = sync_event->priv; + int j; + + for (j = 0; info && info[j].timeline; j++) + seq_printf(s, "timeline: %d seqno: %d", + info[j].timeline, info[j].seqno); break; } default: diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c index 055afd6d6816..8d4b3e4e3bce 100644 --- a/drivers/gpu/msm/adreno_dispatch.c +++ b/drivers/gpu/msm/adreno_dispatch.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved. */ #include @@ -8,6 +8,7 @@ #include "adreno.h" #include "adreno_trace.h" #include "kgsl_gmu_core.h" +#include "kgsl_timeline.h" #define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s)) @@ -276,6 +277,7 @@ static void _retire_timestamp(struct kgsl_drawobj *drawobj) KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp), drawobj->timestamp); + drawctxt->submitted_timestamp = drawobj->timestamp; /* Retire pending GPU events for the object */ kgsl_process_event_group(device, &context->events); @@ -342,12 +344,14 @@ static void _retire_sparseobj(struct kgsl_drawobj_sparse *sparseobj, _retire_timestamp(DRAWOBJ(sparseobj)); } -static int _retire_markerobj(struct kgsl_drawobj_cmd *cmdobj, +static int dispatch_retire_markerobj(struct kgsl_drawobj *drawobj, struct adreno_context *drawctxt) { + struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj); + if (_marker_expired(cmdobj)) { _pop_drawobj(drawctxt); - _retire_timestamp(DRAWOBJ(cmdobj)); + _retire_timestamp(drawobj); return 0; } @@ -363,12 +367,14 @@ static int _retire_markerobj(struct kgsl_drawobj_cmd *cmdobj, return test_bit(CMDOBJ_SKIP, &cmdobj->priv) ? 1 : -EAGAIN; } -static int _retire_syncobj(struct kgsl_drawobj_sync *syncobj, +static int dispatch_retire_syncobj(struct kgsl_drawobj *drawobj, struct adreno_context *drawctxt) { + struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj); + if (!kgsl_drawobj_events_pending(syncobj)) { _pop_drawobj(drawctxt); - kgsl_drawobj_destroy(DRAWOBJ(syncobj)); + kgsl_drawobj_destroy(drawobj); return 0; } @@ -384,6 +390,22 @@ static int _retire_syncobj(struct kgsl_drawobj_sync *syncobj, return -EAGAIN; } +static int drawqueue_retire_timelineobj(struct kgsl_drawobj *drawobj, + struct adreno_context *drawctxt) +{ + struct kgsl_drawobj_timeline *timelineobj = TIMELINEOBJ(drawobj); + int i; + + for (i = 0; i < timelineobj->count; i++) + kgsl_timeline_signal(timelineobj->timelines[i].timeline, + timelineobj->timelines[i].seqno); + + _pop_drawobj(drawctxt); + _retire_timestamp(drawobj); + + return 0; +} + /* * Retires all expired marker and sync objs from the context * queue and returns one of the below @@ -397,35 +419,40 @@ static struct kgsl_drawobj *_process_drawqueue_get_next_drawobj( { struct kgsl_drawobj *drawobj; unsigned int i = drawctxt->drawqueue_head; - int ret = 0; if (drawctxt->drawqueue_head == drawctxt->drawqueue_tail) return NULL; for (i = drawctxt->drawqueue_head; i != drawctxt->drawqueue_tail; i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE)) { + int ret = 0; drawobj = drawctxt->drawqueue[i]; - - if (drawobj == NULL) + if (!drawobj) return NULL; - if (drawobj->type == CMDOBJ_TYPE) + switch (drawobj->type) { + case CMDOBJ_TYPE: return drawobj; - else if (drawobj->type == MARKEROBJ_TYPE) { - ret = _retire_markerobj(CMDOBJ(drawobj), drawctxt); + case MARKEROBJ_TYPE: + ret = dispatch_retire_markerobj(drawobj, drawctxt); /* Special case where marker needs to be sent to GPU */ if (ret == 1) return drawobj; - } else if (drawobj->type == SYNCOBJ_TYPE) - ret = _retire_syncobj(SYNCOBJ(drawobj), drawctxt); - else - return ERR_PTR(-EINVAL); + break; + case SYNCOBJ_TYPE: + ret = dispatch_retire_syncobj(drawobj, drawctxt); + break; + case TIMELINEOBJ_TYPE: + ret = drawqueue_retire_timelineobj(drawobj, drawctxt); + break; + default: + ret = -EINVAL; + break; + } - if (ret == -EAGAIN) - return ERR_PTR(-EAGAIN); - - continue; + if (ret) + return ERR_PTR(ret); } return NULL; @@ -696,7 +723,7 @@ static struct kgsl_drawobj_sparse *_get_next_sparseobj( return NULL; if (drawobj->type == SYNCOBJ_TYPE) - ret = _retire_syncobj(SYNCOBJ(drawobj), drawctxt); + ret = dispatch_retire_syncobj(drawobj, drawctxt); else if (drawobj->type == SPARSEOBJ_TYPE) return SPARSEOBJ(drawobj); else @@ -1247,12 +1274,27 @@ static int _queue_sparseobj(struct adreno_device *adreno_dev, return 0; } +static int drawctxt_queue_auxobj(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt, struct kgsl_drawobj *drawobj, + u32 *timestamp, u32 user_ts) +{ + int ret; -static int _queue_markerobj(struct adreno_device *adreno_dev, - struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *markerobj, + ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts); + if (ret) + return ret; + + drawctxt->queued_timestamp = *timestamp; + _queue_drawobj(drawctxt, drawobj); + + return 0; +} + +static int drawctxt_queue_markerobj(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt, struct kgsl_drawobj *drawobj, uint32_t *timestamp, unsigned int user_ts) { - struct kgsl_drawobj *drawobj = DRAWOBJ(markerobj); + struct kgsl_drawobj_cmd *markerobj = CMDOBJ(drawobj); int ret; ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts); @@ -1285,11 +1327,11 @@ static int _queue_markerobj(struct adreno_device *adreno_dev, return 0; } -static int _queue_cmdobj(struct adreno_device *adreno_dev, - struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *cmdobj, +static int drawctxt_queue_cmdobj(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt, struct kgsl_drawobj *drawobj, uint32_t *timestamp, unsigned int user_ts) { - struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj); unsigned int j; int ret; @@ -1323,11 +1365,9 @@ static int _queue_cmdobj(struct adreno_device *adreno_dev, return 0; } -static void _queue_syncobj(struct adreno_context *drawctxt, - struct kgsl_drawobj_sync *syncobj, uint32_t *timestamp) +static void drawctxt_queue_syncobj(struct adreno_context *drawctxt, + struct kgsl_drawobj *drawobj, uint32_t *timestamp) { - struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); - *timestamp = 0; drawobj->timestamp = 0; @@ -1401,29 +1441,34 @@ int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv, switch (drawobj[i]->type) { case MARKEROBJ_TYPE: - ret = _queue_markerobj(adreno_dev, drawctxt, - CMDOBJ(drawobj[i]), - timestamp, user_ts); - if (ret == 1) { + ret = drawctxt_queue_markerobj(adreno_dev, drawctxt, + drawobj[i], timestamp, user_ts); + if (ret) spin_unlock(&drawctxt->lock); + + if (ret == 1) goto done; - } else if (ret) { - spin_unlock(&drawctxt->lock); + else if (ret) return ret; - } break; case CMDOBJ_TYPE: - ret = _queue_cmdobj(adreno_dev, drawctxt, - CMDOBJ(drawobj[i]), - timestamp, user_ts); + ret = drawctxt_queue_cmdobj(adreno_dev, drawctxt, + drawobj[i], timestamp, user_ts); if (ret) { spin_unlock(&drawctxt->lock); return ret; } break; case SYNCOBJ_TYPE: - _queue_syncobj(drawctxt, SYNCOBJ(drawobj[i]), - timestamp); + drawctxt_queue_syncobj(drawctxt, drawobj[i], timestamp); + break; + case TIMELINEOBJ_TYPE: + ret = drawctxt_queue_auxobj(adreno_dev, + drawctxt, drawobj[i], timestamp, user_ts); + if (ret) { + spin_unlock(&drawctxt->lock); + return ret; + } break; case SPARSEOBJ_TYPE: ret = _queue_sparseobj(adreno_dev, drawctxt, diff --git a/drivers/gpu/msm/adreno_ioctl.c b/drivers/gpu/msm/adreno_ioctl.c index 4d29c0ff0328..ee0dc9326490 100644 --- a/drivers/gpu/msm/adreno_ioctl.c +++ b/drivers/gpu/msm/adreno_ioctl.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2002,2007-2021, The Linux Foundation. All rights reserved. */ #include @@ -183,11 +183,8 @@ long adreno_ioctl_helper(struct kgsl_device_private *dev_priv, break; } - if (i == len) { - dev_err(dev_priv->device->dev, - "invalid ioctl code 0x%08X\n", cmd); + if (i == len) return -ENOIOCTLCMD; - } if (_IOC_SIZE(cmds[i].cmd > sizeof(data))) { dev_err_ratelimited(dev_priv->device->dev, diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c index 780e89d58899..8529084eaaf0 100644 --- a/drivers/gpu/msm/adreno_snapshot.c +++ b/drivers/gpu/msm/adreno_snapshot.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. */ #include @@ -195,7 +195,17 @@ static inline void parse_ib(struct kgsl_device *device, * then push it into the static blob otherwise put it in the dynamic * list */ - if (gpuaddr == snapshot->ib1base) { + if (kgsl_addr_range_overlap(gpuaddr, dwords, + snapshot->ib1base, snapshot->ib1size)) { + /* + * During restore after preemption, ib1base in the register + * can be updated by CP. In such scenarios, to dump complete + * IB1 in snapshot, we should consider ib1base from ringbuffer. + */ + if (gpuaddr != snapshot->ib1base) { + snapshot->ib1base = gpuaddr; + snapshot->ib1size = dwords; + } kgsl_snapshot_push_object(device, process, gpuaddr, dwords); return; } @@ -309,16 +319,29 @@ static void snapshot_rb_ibs(struct kgsl_device *device, } if (adreno_cmd_is_ib(adreno_dev, rbptr[index])) { - if (ADRENO_LEGACY_PM4(adreno_dev)) { - if (rbptr[index + 1] == snapshot->ib1base) - break; - } else { - uint64_t ibaddr; + uint64_t ibaddr; + uint64_t ibsize; + if (ADRENO_LEGACY_PM4(adreno_dev)) { + ibaddr = rbptr[index + 1]; + ibsize = rbptr[index + 2]; + } else { ibaddr = rbptr[index + 2]; ibaddr = ibaddr << 32 | rbptr[index + 1]; - if (ibaddr == snapshot->ib1base) - break; + ibsize = rbptr[index + 3]; + } + + if (kgsl_addr_range_overlap(ibaddr, ibsize, + snapshot->ib1base, snapshot->ib1size)) { + /* + * During restore after preemption, ib1base in + * the register can be updated by CP. In such + * scenario, to dump complete IB1 in snapshot, + * we should consider ib1base from ringbuffer. + */ + snapshot->ib1base = ibaddr; + snapshot->ib1size = ibsize; + break; } } } while (index != rb->wptr); @@ -916,8 +939,7 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot, * figure how often this really happens. */ - if (-ENOENT == find_object(snapshot->ib1base, snapshot->process) && - snapshot->ib1size) { + if (-ENOENT == find_object(snapshot->ib1base, snapshot->process)) { struct kgsl_mem_entry *entry; u64 ibsize; diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index c365b3e49e98..2244a9f3dcbd 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -293,8 +293,12 @@ static void remove_dmabuf_list(struct kgsl_dma_buf_meta *meta) } #ifdef CONFIG_DMA_SHARED_BUFFER -static void kgsl_destroy_ion(struct kgsl_dma_buf_meta *meta) +static void kgsl_destroy_ion(struct kgsl_memdesc *memdesc) { + struct kgsl_mem_entry *entry = container_of(memdesc, + struct kgsl_mem_entry, memdesc); + struct kgsl_dma_buf_meta *meta = entry->priv_data; + if (meta != NULL) { remove_dmabuf_list(meta); dma_buf_unmap_attachment(meta->attach, meta->table, @@ -303,14 +307,45 @@ static void kgsl_destroy_ion(struct kgsl_dma_buf_meta *meta) dma_buf_put(meta->dmabuf); kfree(meta); } -} -#else -static void kgsl_destroy_ion(struct kgsl_dma_buf_meta *meta) -{ + /* + * Ion takes care of freeing the sg_table for us so + * clear the sg table to ensure kgsl_sharedmem_free + * doesn't try to free it again + */ + memdesc->sgt = NULL; } + +static struct kgsl_memdesc_ops kgsl_dmabuf_ops = { + .free = kgsl_destroy_ion, +}; #endif +static void kgsl_destroy_anon(struct kgsl_memdesc *memdesc) +{ + int i = 0, j; + struct scatterlist *sg; + struct page *page; + + for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i) { + page = sg_page(sg); + for (j = 0; j < (sg->length >> PAGE_SHIFT); j++) { + /* + * Mark the page in the scatterlist as dirty if they + * were writable by the GPU. + */ + if (!(memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY)) + set_page_dirty_lock(nth_page(page, j)); + + /* + * Put the page reference taken using get_user_pages + * during memdesc_sg_virt. + */ + put_page(nth_page(page, j)); + } + } +} + static void mem_entry_destroy(struct kgsl_mem_entry *entry) { unsigned int memtype; @@ -329,41 +364,8 @@ static void mem_entry_destroy(struct kgsl_mem_entry *entry) atomic_long_sub(entry->memdesc.size, &kgsl_driver.stats.mapped); - /* - * Ion takes care of freeing the sg_table for us so - * clear the sg table before freeing the sharedmem - * so kgsl_sharedmem_free doesn't try to free it again - */ - if (memtype == KGSL_MEM_ENTRY_ION) - entry->memdesc.sgt = NULL; - - if ((memtype == KGSL_MEM_ENTRY_USER) - && !(entry->memdesc.flags & KGSL_MEMFLAGS_GPUREADONLY)) { - int i = 0, j; - struct scatterlist *sg; - struct page *page; - /* - * Mark all of pages in the scatterlist as dirty since they - * were writable by the GPU. - */ - for_each_sg(entry->memdesc.sgt->sgl, sg, - entry->memdesc.sgt->nents, i) { - page = sg_page(sg); - for (j = 0; j < (sg->length >> PAGE_SHIFT); j++) - set_page_dirty_lock(nth_page(page, j)); - } - } - kgsl_sharedmem_free(&entry->memdesc); - switch (memtype) { - case KGSL_MEM_ENTRY_ION: - kgsl_destroy_ion(entry->priv_data); - break; - default: - break; - } - kfree(entry); } @@ -696,8 +698,7 @@ void kgsl_context_detach(struct kgsl_context *context) /* Remove the event group from the list */ kgsl_del_event_group(&context->events); - kgsl_sync_timeline_put(context->ktimeline); - + kgsl_sync_timeline_detach(context->ktimeline); kgsl_context_put(context); } @@ -716,6 +717,8 @@ kgsl_context_destroy(struct kref *kref) */ BUG_ON(!kgsl_context_detached(context)); + kgsl_sync_timeline_put(context->ktimeline); + write_lock(&device->context_lock); if (context->id != KGSL_CONTEXT_INVALID) { @@ -739,7 +742,6 @@ kgsl_context_destroy(struct kref *kref) context->id = KGSL_CONTEXT_INVALID; } write_unlock(&device->context_lock); - kgsl_sync_timeline_destroy(context); kgsl_process_private_put(context->proc_priv); device->ftbl->drawctxt_destroy(context); @@ -904,11 +906,27 @@ static void kgsl_destroy_process_private(struct kref *kref) struct kgsl_process_private *private = container_of(kref, struct kgsl_process_private, refcount); + mutex_lock(&kgsl_driver.process_mutex); + + debugfs_remove_recursive(private->debug_root); + kgsl_process_uninit_sysfs(private); + + /* When using global pagetables, do not detach global pagetable */ + if (private->pagetable->name != KGSL_MMU_GLOBAL_PT) + kgsl_mmu_detach_pagetable(private->pagetable); + + /* Remove the process struct from the master list */ + spin_lock(&kgsl_driver.proclist_lock); + list_del(&private->list); + spin_unlock(&kgsl_driver.proclist_lock); + + mutex_unlock(&kgsl_driver.process_mutex); + put_pid(private->pid); idr_destroy(&private->mem_idr); idr_destroy(&private->syncsource_idr); - /* When using global pagetables, do not detach global pagetable */ + /* When using global pagetables, do not put global pagetable */ if (private->pagetable->name != KGSL_MMU_GLOBAL_PT) kgsl_mmu_putpagetable(private->pagetable); @@ -951,13 +969,6 @@ static struct kgsl_process_private *kgsl_process_private_new( struct kgsl_process_private *private; struct pid *cur_pid = get_task_pid(current->group_leader, PIDTYPE_PID); - /* - * Flush mem_workqueue to make sure that any lingering - * structs (process pagetable etc) are released before - * starting over again. - */ - flush_workqueue(kgsl_driver.mem_workqueue); - /* Search in the process list */ list_for_each_entry(private, &kgsl_driver.process_list, list) { if (private->pid == cur_pid) { @@ -1006,9 +1017,17 @@ static struct kgsl_process_private *kgsl_process_private_new( put_pid(private->pid); kfree(private); - private = ERR_PTR(err); + return ERR_PTR(err); } + /* create the debug directories and add it to the process list */ + kgsl_process_init_sysfs(device, private); + kgsl_process_init_debugfs(private); + + spin_lock(&kgsl_driver.proclist_lock); + list_add(&private->list, &kgsl_driver.process_list); + spin_unlock(&kgsl_driver.proclist_lock); + return private; } @@ -1057,30 +1076,13 @@ static void kgsl_process_private_close(struct kgsl_device_private *dev_priv, * directories and garbage collect any outstanding resources */ - kgsl_process_uninit_sysfs(private); + process_release_memory(private); /* Release all syncsource objects from process private */ kgsl_syncsource_process_release_syncsources(private); - /* When using global pagetables, do not detach global pagetable */ - if (private->pagetable->name != KGSL_MMU_GLOBAL_PT) - kgsl_mmu_detach_pagetable(private->pagetable); - - /* Remove the process struct from the master list */ - spin_lock(&kgsl_driver.proclist_lock); - list_del(&private->list); - spin_unlock(&kgsl_driver.proclist_lock); - - /* - * Unlock the mutex before releasing the memory and the debugfs - * nodes - this prevents deadlocks with the IOMMU and debugfs - * locks. - */ mutex_unlock(&kgsl_driver.process_mutex); - process_release_memory(private); - debugfs_remove_recursive(private->debug_root); - kgsl_process_private_put(private); } @@ -1090,25 +1092,20 @@ static struct kgsl_process_private *kgsl_process_private_open( { struct kgsl_process_private *private; + /* + * Flush mem_workqueue to make sure that any lingering + * structs (process pagetable etc) are released before + * starting over again. + */ + flush_workqueue(kgsl_driver.mem_workqueue); + mutex_lock(&kgsl_driver.process_mutex); private = kgsl_process_private_new(device); if (IS_ERR(private)) goto done; - /* - * If this is a new process create the debug directories and add it to - * the process list - */ - - if (private->fd_count++ == 0) { - kgsl_process_init_sysfs(device, private); - kgsl_process_init_debugfs(private); - - spin_lock(&kgsl_driver.proclist_lock); - list_add(&private->list, &kgsl_driver.process_list); - spin_unlock(&kgsl_driver.proclist_lock); - } + private->fd_count++; done: mutex_unlock(&kgsl_driver.process_mutex); @@ -2110,6 +2107,129 @@ long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv, return result; } +long kgsl_ioctl_gpu_aux_command(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + struct kgsl_gpu_aux_command *param = data; + struct kgsl_device *device = dev_priv->device; + struct kgsl_context *context; + struct kgsl_drawobj **drawobjs; + struct kgsl_drawobj_sync *tsobj; + void __user *cmdlist; + u32 queued, count; + int i, index = 0; + long ret; + struct kgsl_gpu_aux_command_generic generic; + + /* We support only one aux command */ + if (param->numcmds != 1) + return -EINVAL; + + if (!(param->flags & KGSL_GPU_AUX_COMMAND_TIMELINE)) + return -EINVAL; + + context = kgsl_context_get_owner(dev_priv, param->context_id); + if (!context) + return -EINVAL; + + /* + * param->numcmds is always one and we have one additional drawobj + * for the timestamp sync if KGSL_GPU_AUX_COMMAND_SYNC flag is passed. + * On top of that we make an implicit sync object for the last queued + * timestamp on this context. + */ + count = (param->flags & KGSL_GPU_AUX_COMMAND_SYNC) ? 3 : 2; + + drawobjs = kvcalloc(count, sizeof(*drawobjs), GFP_KERNEL); + + if (!drawobjs) { + kgsl_context_put(context); + return -ENOMEM; + } + + trace_kgsl_aux_command(context->id, param->numcmds, param->flags, + param->timestamp); + + if (param->flags & KGSL_GPU_AUX_COMMAND_SYNC) { + struct kgsl_drawobj_sync *syncobj = + kgsl_drawobj_sync_create(device, context); + + if (IS_ERR(syncobj)) { + ret = PTR_ERR(syncobj); + goto err; + } + + drawobjs[index++] = DRAWOBJ(syncobj); + + ret = kgsl_drawobj_sync_add_synclist(device, syncobj, + u64_to_user_ptr(param->synclist), + param->syncsize, param->numsyncs); + if (ret) + goto err; + } + + kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED, &queued); + + /* + * Make an implicit sync object for the last queued timestamp on this + * context + */ + tsobj = kgsl_drawobj_create_timestamp_syncobj(device, + context, queued); + + if (IS_ERR(tsobj)) { + ret = PTR_ERR(tsobj); + goto err; + } + + drawobjs[index++] = DRAWOBJ(tsobj); + + cmdlist = u64_to_user_ptr(param->cmdlist); + + /* Create a draw object for KGSL_GPU_AUX_COMMAND_TIMELINE */ + if (kgsl_copy_struct_from_user(&generic, sizeof(generic), + cmdlist, param->cmdsize)) { + ret = -EFAULT; + goto err; + } + + if (generic.type == KGSL_GPU_AUX_COMMAND_TIMELINE) { + struct kgsl_drawobj_timeline *timelineobj; + + timelineobj = kgsl_drawobj_timeline_create(device, + context); + + if (IS_ERR(timelineobj)) { + ret = PTR_ERR(timelineobj); + goto err; + } + + drawobjs[index++] = DRAWOBJ(timelineobj); + + ret = kgsl_drawobj_add_timeline(dev_priv, timelineobj, + u64_to_user_ptr(generic.priv), generic.size); + if (ret) + goto err; + } else { + ret = -EINVAL; + goto err; + } + + ret = device->ftbl->queue_cmds(dev_priv, context, + drawobjs, index, ¶m->timestamp); + +err: + kgsl_context_put(context); + + if (ret && ret != -EPROTO) { + for (i = 0; i < count; i++) + kgsl_drawobj_destroy(drawobjs[i]); + } + + kvfree(drawobjs); + return ret; +} + long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) @@ -2501,6 +2621,10 @@ static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, unsigned long useraddr) return ret; } +static struct kgsl_memdesc_ops kgsl_usermem_ops = { + .free = kgsl_destroy_anon, +}; + static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable, struct kgsl_mem_entry *entry, unsigned long hostptr, size_t offset, size_t size) @@ -2515,12 +2639,19 @@ static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable, entry->memdesc.pagetable = pagetable; entry->memdesc.size = (uint64_t) size; entry->memdesc.flags |= (uint64_t)KGSL_MEMFLAGS_USERMEM_ADDR; + entry->memdesc.ops = &kgsl_usermem_ops; if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) { /* Register the address in the database */ ret = kgsl_mmu_set_svm_region(pagetable, (uint64_t) hostptr, (uint64_t) size); + /* if OOM, retry once after flushing mem_workqueue */ + if (ret == -ENOMEM) { + flush_workqueue(kgsl_driver.mem_workqueue); + ret = kgsl_mmu_set_svm_region(pagetable, + (uint64_t) hostptr, (uint64_t) size); + } if (ret) return ret; @@ -2834,11 +2965,6 @@ long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv, return 0; unmap: - if (kgsl_memdesc_usermem_type(&entry->memdesc) == KGSL_MEM_ENTRY_ION) { - kgsl_destroy_ion(entry->priv_data); - entry->memdesc.sgt = NULL; - } - kgsl_sharedmem_free(&entry->memdesc); out: @@ -2943,6 +3069,7 @@ static int kgsl_setup_dma_buf(struct kgsl_device *device, entry->priv_data = meta; entry->memdesc.pagetable = pagetable; entry->memdesc.size = 0; + entry->memdesc.ops = &kgsl_dmabuf_ops; /* USE_CPU_MAP is not impemented for ION. */ entry->memdesc.flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP); entry->memdesc.flags |= (uint64_t)KGSL_MEMFLAGS_USERMEM_ION; @@ -3148,14 +3275,6 @@ long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv, return result; error_attach: - switch (kgsl_memdesc_usermem_type(&entry->memdesc)) { - case KGSL_MEM_ENTRY_ION: - kgsl_destroy_ion(entry->priv_data); - entry->memdesc.sgt = NULL; - break; - default: - break; - } kgsl_sharedmem_free(&entry->memdesc); error: /* Clear gpuaddr here so userspace doesn't get any wrong ideas */ @@ -4894,6 +5013,11 @@ kgsl_get_unmapped_area(struct file *file, unsigned long addr, pgoff, len, (int) val); } else { val = _get_svm_area(private, entry, addr, len, flags); + /* if OOM, retry once after flushing mem_workqueue */ + if (val == -ENOMEM) { + flush_workqueue(kgsl_driver.mem_workqueue); + val = _get_svm_area(private, entry, addr, len, flags); + } if (IS_ERR_VALUE(val)) dev_err_ratelimited(device->dev, "_get_svm_area: pid %d mmap_base %lx addr %lx pgoff %lx len %ld failed error %d\n", @@ -5221,6 +5345,9 @@ int kgsl_device_platform_probe(struct kgsl_device *device) if (status) goto error_close_mmu; + idr_init(&device->timelines); + spin_lock_init(&device->timelines_lock); + /* * The default request type PM_QOS_REQ_ALL_CORES is * applicable to all CPU cores that are online and @@ -5295,6 +5422,7 @@ void kgsl_device_platform_remove(struct kgsl_device *device) pm_qos_remove_request(&device->pwrctrl.l2pc_cpus_qos); idr_destroy(&device->context_idr); + idr_destroy(&device->timelines); kgsl_mmu_close(device); diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h index 03183cc9208f..62ad5304171e 100644 --- a/drivers/gpu/msm/kgsl.h +++ b/drivers/gpu/msm/kgsl.h @@ -425,6 +425,20 @@ long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data); long kgsl_ioctl_gpuobj_set_info(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data); +long kgsl_ioctl_gpu_aux_command(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data); +long kgsl_ioctl_timeline_create(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data); +long kgsl_ioctl_timeline_wait(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data); +long kgsl_ioctl_timeline_query(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data); +long kgsl_ioctl_timeline_fence_get(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data); +long kgsl_ioctl_timeline_signal(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data); +long kgsl_ioctl_timeline_destroy(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data); long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data); diff --git a/drivers/gpu/msm/kgsl_compat.c b/drivers/gpu/msm/kgsl_compat.c index d38ceb0355a1..c9999495ea0b 100644 --- a/drivers/gpu/msm/kgsl_compat.c +++ b/drivers/gpu/msm/kgsl_compat.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2019, 2021, The Linux Foundation. All rights reserved. */ #include "kgsl_device.h" @@ -352,6 +352,20 @@ static const struct kgsl_ioctl kgsl_compat_ioctl_funcs[] = { kgsl_ioctl_sparse_bind), KGSL_IOCTL_FUNC(IOCTL_KGSL_GPU_SPARSE_COMMAND, kgsl_ioctl_gpu_sparse_command), + KGSL_IOCTL_FUNC(IOCTL_KGSL_GPU_AUX_COMMAND, + kgsl_ioctl_gpu_aux_command), + KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMELINE_CREATE, + kgsl_ioctl_timeline_create), + KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMELINE_WAIT, + kgsl_ioctl_timeline_wait), + KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMELINE_FENCE_GET, + kgsl_ioctl_timeline_fence_get), + KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMELINE_QUERY, + kgsl_ioctl_timeline_query), + KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMELINE_SIGNAL, + kgsl_ioctl_timeline_signal), + KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMELINE_DESTROY, + kgsl_ioctl_timeline_destroy), }; long kgsl_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) @@ -370,8 +384,6 @@ long kgsl_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) if (ret == -ENOIOCTLCMD) { if (device->ftbl->compat_ioctl != NULL) return device->ftbl->compat_ioctl(dev_priv, cmd, arg); - - dev_err(device->dev, "invalid ioctl code 0x%08X\n", cmd); } return ret; diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index 9cb255046d34..90f270c180dd 100644 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2002,2007-2021, The Linux Foundation. All rights reserved. */ #ifndef __KGSL_DEVICE_H #define __KGSL_DEVICE_H @@ -16,6 +16,73 @@ [_IOC_NR((_cmd))] = \ { .cmd = (_cmd), .func = (_func) } +/** + * kgsl_copy_struct_from_user: copy a struct from userspace + * @dst: Destination address, in kernel space. This buffer must be @ksize + * bytes long. + * @ksize: Size of @dst struct. + * @src: Source address, in userspace. + * @usize: (Alleged) size of @src struct. + * + * Copies a struct from userspace to kernel space, in a way that guarantees + * backwards-compatibility for struct syscall arguments (as long as future + * struct extensions are made such that all new fields are *appended* to the + * old struct, and zeroed-out new fields have the same meaning as the old + * struct). + * + * @ksize is just sizeof(*dst), and @usize should've been passed by userspace. + * The recommended usage is something like the following: + * + * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize) + * { + * int err; + * struct foo karg = {}; + * + * if (usize > PAGE_SIZE) + * return -E2BIG; + * if (usize < FOO_SIZE_VER0) + * return -EINVAL; + * + * err = kgsl_copy_struct_from_user(&karg, sizeof(karg), uarg, usize); + * if (err) + * return err; + * + * // ... + * } + * + * There are three cases to consider: + * * If @usize == @ksize, then it's copied verbatim. + * * If @usize < @ksize, then the userspace has passed an old struct to a + * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize) + * are to be zero-filled. + * * If @usize > @ksize, then the userspace has passed a new struct to an + * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize) + * are checked to ensure they are zeroed, otherwise -E2BIG is returned. + * + * Returns (in all cases, some data may have been copied): + * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src. + * * -EFAULT: access to userspace failed. + */ +static __always_inline __must_check int +kgsl_copy_struct_from_user(void *dst, size_t ksize, const void __user *src, + size_t usize) +{ + size_t size = min(ksize, usize); + size_t rest = max(ksize, usize) - size; + + /* Deal with trailing bytes. */ + if (usize < ksize) { + memset(dst + size, 0, rest); + } else if (usize > ksize) { + if (memchr_inv(src + size, 0, rest)) + return -E2BIG; + } + /* Copy the interoperable parts of the struct. */ + if (copy_from_user(dst, src, size)) + return -EFAULT; + return 0; +} + /* * KGSL device state is initialized to INIT when platform_probe * * successfully initialized the device. Once a device has been opened * @@ -322,6 +389,10 @@ struct kgsl_device { unsigned int num_l3_pwrlevels; /* store current L3 vote to determine if we should change our vote */ unsigned int cur_l3_pwrlevel; + /** @timelines: Iterator for assigning IDs to timelines */ + struct idr timelines; + /** @timelines_lock: Spinlock to protect the timelines idr */ + spinlock_t timelines_lock; }; #define KGSL_MMU_DEVICE(_mmu) \ diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c index ce3fd4b0972f..91b3e4e2602f 100644 --- a/drivers/gpu/msm/kgsl_drawobj.c +++ b/drivers/gpu/msm/kgsl_drawobj.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */ /* @@ -19,12 +19,14 @@ */ #include +#include #include "adreno_drawctxt.h" #include "kgsl_compat.h" #include "kgsl_device.h" #include "kgsl_drawobj.h" #include "kgsl_sync.h" +#include "kgsl_timeline.h" #include "kgsl_trace.h" /* @@ -34,41 +36,53 @@ static struct kmem_cache *memobjs_cache; static struct kmem_cache *sparseobjs_cache; -static void free_fence_names(struct kgsl_drawobj_sync *syncobj) +static void syncobj_destroy_object(struct kgsl_drawobj *drawobj) { - unsigned int i; + struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj); + int i; for (i = 0; i < syncobj->numsyncs; i++) { struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i]; - if (event->type == KGSL_CMD_SYNCPOINT_TYPE_FENCE) - kfree(event->info.fences); + if (event->type == KGSL_CMD_SYNCPOINT_TYPE_FENCE) { + struct event_fence_info *priv = event ? + event->priv : NULL; + + if (priv) { + kfree(priv->fences); + kfree(priv); + } + } else if (event->type == KGSL_CMD_SYNCPOINT_TYPE_TIMELINE) { + kfree(event->priv); + } } + + kfree(syncobj->synclist); + kfree(syncobj); +} + +static void cmdobj_destroy_object(struct kgsl_drawobj *drawobj) +{ + kfree(CMDOBJ(drawobj)); +} + +static void timelineobj_destroy_object(struct kgsl_drawobj *drawobj) +{ + kfree(TIMELINEOBJ(drawobj)); +} + +static void sparseobj_destroy_object(struct kgsl_drawobj *drawobj) +{ + kfree(SPARSEOBJ(drawobj)); } void kgsl_drawobj_destroy_object(struct kref *kref) { struct kgsl_drawobj *drawobj = container_of(kref, struct kgsl_drawobj, refcount); - struct kgsl_drawobj_sync *syncobj; kgsl_context_put(drawobj->context); - - switch (drawobj->type) { - case SYNCOBJ_TYPE: - syncobj = SYNCOBJ(drawobj); - free_fence_names(syncobj); - kfree(syncobj->synclist); - kfree(syncobj); - break; - case CMDOBJ_TYPE: - case MARKEROBJ_TYPE: - kfree(CMDOBJ(drawobj)); - break; - case SPARSEOBJ_TYPE: - kfree(SPARSEOBJ(drawobj)); - break; - } + drawobj->destroy_object(drawobj); } void kgsl_dump_syncpoints(struct kgsl_device *device, @@ -99,13 +113,23 @@ void kgsl_dump_syncpoints(struct kgsl_device *device, } case KGSL_CMD_SYNCPOINT_TYPE_FENCE: { int j; - struct event_fence_info *info = &event->info; + struct event_fence_info *info = event ? + event->priv : NULL; - for (j = 0; j < info->num_fences; j++) + for (j = 0; info && j < info->num_fences; j++) dev_err(device->dev, "[%d] fence: %s\n", i, info->fences[j].name); break; } + case KGSL_CMD_SYNCPOINT_TYPE_TIMELINE: { + int j; + struct event_timeline_info *info = event->priv; + + for (j = 0; info && info[j].timeline; j++) + dev_err(device->dev, "[%d] timeline: %d seqno %lld\n", + i, info[j].timeline, info[j].seqno); + break; + } } } } @@ -156,13 +180,27 @@ static void syncobj_timer(struct timer_list *t) break; case KGSL_CMD_SYNCPOINT_TYPE_FENCE: { int j; - struct event_fence_info *info = &event->info; + struct event_fence_info *info = event ? + event->priv : NULL; - for (j = 0; j < info->num_fences; j++) + for (j = 0; info && j < info->num_fences; j++) dev_err(device->dev, " [%u] FENCE %s\n", i, info->fences[j].name); break; } + case KGSL_CMD_SYNCPOINT_TYPE_TIMELINE: { + int j; + struct event_timeline_info *info = event->priv; + + dev_err(device->dev, " [%u] FENCE %s\n", + i, dma_fence_is_signaled(event->fence) ? + "signaled" : "not signaled"); + + for (j = 0; info && info[j].timeline; j++) + dev_err(device->dev, " TIMELINE %d SEQNO %lld\n", + info[j].timeline, info[j].seqno); + break; + } } } @@ -247,7 +285,30 @@ static void drawobj_destroy_sparse(struct kgsl_drawobj *drawobj) } } -static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) +static void drawobj_sync_timeline_fence_work(struct irq_work *work) +{ + struct kgsl_drawobj_sync_event *event = container_of(work, + struct kgsl_drawobj_sync_event, work); + + dma_fence_put(event->fence); + kgsl_drawobj_put(&event->syncobj->base); +} + +static void drawobj_sync_timeline_fence_callback(struct dma_fence *f, + struct dma_fence_cb *cb) +{ + struct kgsl_drawobj_sync_event *event = container_of(cb, + struct kgsl_drawobj_sync_event, cb); + + /* + * Mark the event as synced and then fire off a worker to handle + * removing the fence + */ + if (drawobj_sync_expire(event->device, event)) + irq_work_queue(&event->work); +} + +static void syncobj_destroy(struct kgsl_drawobj *drawobj) { struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj); unsigned int i; @@ -285,6 +346,11 @@ static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) kgsl_sync_fence_async_cancel(event->handle); kgsl_drawobj_put(drawobj); break; + case KGSL_CMD_SYNCPOINT_TYPE_TIMELINE: + dma_fence_remove_callback(event->fence, &event->cb); + dma_fence_put(event->fence); + kgsl_drawobj_put(drawobj); + break; } } @@ -299,7 +365,20 @@ static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) } -static void drawobj_destroy_cmd(struct kgsl_drawobj *drawobj) +static void timelineobj_destroy(struct kgsl_drawobj *drawobj) +{ + struct kgsl_drawobj_timeline *timelineobj = TIMELINEOBJ(drawobj); + int i; + + for (i = 0; i < timelineobj->count; i++) + kgsl_timeline_put(timelineobj->timelines[i].timeline); + + kvfree(timelineobj->timelines); + timelineobj->timelines = NULL; + timelineobj->count = 0; +} + +static void cmdobj_destroy(struct kgsl_drawobj *drawobj) { struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj); @@ -330,17 +409,10 @@ static void drawobj_destroy_cmd(struct kgsl_drawobj *drawobj) */ void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj) { - if (!drawobj) + if (IS_ERR_OR_NULL(drawobj)) return; - if (drawobj->type & SYNCOBJ_TYPE) - drawobj_destroy_sync(drawobj); - else if (drawobj->type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) - drawobj_destroy_cmd(drawobj); - else if (drawobj->type == SPARSEOBJ_TYPE) - drawobj_destroy_sparse(drawobj); - else - return; + drawobj->destroy(drawobj); kgsl_drawobj_put(drawobj); } @@ -349,11 +421,12 @@ EXPORT_SYMBOL(kgsl_drawobj_destroy); static bool drawobj_sync_fence_func(void *priv) { struct kgsl_drawobj_sync_event *event = priv; + struct event_fence_info *info = event ? event->priv : NULL; int i; - for (i = 0; i < event->info.num_fences; i++) + for (i = 0; info && i < info->num_fences; i++) trace_syncpoint_fence_expire(event->syncobj, - event->info.fences[i].name); + info->fences[i].name); /* * Only call kgsl_drawobj_put() if it's not marked for cancellation @@ -366,21 +439,108 @@ static bool drawobj_sync_fence_func(void *priv) return false; } -/* drawobj_add_sync_fence() - Add a new sync fence syncpoint - * @device: KGSL device - * @syncobj: KGSL sync obj to add the sync point to - * @priv: Private structure passed by the user - * - * Add a new fence sync syncpoint to the sync obj. - */ -static int drawobj_add_sync_fence(struct kgsl_device *device, - struct kgsl_drawobj_sync *syncobj, void *priv) +static struct event_timeline_info * +drawobj_get_sync_timeline_priv(void __user *uptr, u64 usize, u32 count) { - struct kgsl_cmd_syncpoint_fence *sync = priv; + int i; + struct event_timeline_info *priv; + + /* Make sure we don't accidently overflow count */ + if (count == UINT_MAX) + return NULL; + + priv = kcalloc(count + 1, sizeof(*priv), GFP_KERNEL); + if (!priv) + return NULL; + + for (i = 0; i < count; i++, uptr += usize) { + struct kgsl_timeline_val val; + + if (kgsl_copy_struct_from_user(&val, sizeof(val), uptr, usize)) + continue; + + priv[i].timeline = val.timeline; + priv[i].seqno = val.seqno; + } + + priv[i].timeline = 0; + return priv; +} + +static int drawobj_add_sync_timeline(struct kgsl_device *device, + + struct kgsl_drawobj_sync *syncobj, void __user *uptr, + u64 usize) +{ + struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); + struct kgsl_cmd_syncpoint_timeline sync; + struct kgsl_drawobj_sync_event *event; + struct dma_fence *fence; + unsigned int id; + int ret; + + if (kgsl_copy_struct_from_user(&sync, sizeof(sync), uptr, usize)) + return -EFAULT; + + fence = kgsl_timelines_to_fence_array(device, sync.timelines, + sync.count, sync.timelines_size, false); + if (IS_ERR(fence)) + return PTR_ERR(fence); + + kref_get(&drawobj->refcount); + + id = syncobj->numsyncs++; + + event = &syncobj->synclist[id]; + + event->id = id; + event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMELINE; + event->syncobj = syncobj; + event->device = device; + event->context = NULL; + event->fence = fence; + init_irq_work(&event->work, drawobj_sync_timeline_fence_work); + + INIT_LIST_HEAD(&event->cb.node); + + event->priv = + drawobj_get_sync_timeline_priv(u64_to_user_ptr(sync.timelines), + sync.timelines_size, sync.count); + + /* Set pending flag before adding callback to avoid race */ + set_bit(event->id, &syncobj->pending); + + ret = dma_fence_add_callback(event->fence, + &event->cb, drawobj_sync_timeline_fence_callback); + + if (ret) { + clear_bit(event->id, &syncobj->pending); + + if (dma_fence_is_signaled(event->fence)) { + trace_syncpoint_fence_expire(syncobj, "signaled"); + dma_fence_put(event->fence); + ret = 0; + } + + kgsl_drawobj_put(drawobj); + } + + return ret; +} + +static int drawobj_add_sync_fence(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void __user *data, + u64 datasize) +{ + struct kgsl_cmd_syncpoint_fence sync; struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); struct kgsl_drawobj_sync_event *event; + struct event_fence_info *priv; unsigned int id, i; + if (kgsl_copy_struct_from_user(&sync, sizeof(sync), data, datasize)) + return -EFAULT; + kref_get(&drawobj->refcount); id = syncobj->numsyncs++; @@ -393,11 +553,14 @@ static int drawobj_add_sync_fence(struct kgsl_device *device, event->device = device; event->context = NULL; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + set_bit(event->id, &syncobj->pending); - event->handle = kgsl_sync_fence_async_wait(sync->fd, - drawobj_sync_fence_func, event, - &event->info); + event->handle = kgsl_sync_fence_async_wait(sync.fd, + drawobj_sync_fence_func, event, priv); + + event->priv = priv; if (IS_ERR_OR_NULL(event->handle)) { int ret = PTR_ERR(event->handle); @@ -417,8 +580,8 @@ static int drawobj_add_sync_fence(struct kgsl_device *device, return ret; } - for (i = 0; i < event->info.num_fences; i++) - trace_syncpoint_fence(syncobj, event->info.fences[i].name); + for (i = 0; priv && i < priv->num_fences; i++) + trace_syncpoint_fence(syncobj, priv->fences[i].name); return 0; } @@ -431,12 +594,13 @@ static int drawobj_add_sync_fence(struct kgsl_device *device, * Add a new sync point timestamp event to the sync obj. */ static int drawobj_add_sync_timestamp(struct kgsl_device *device, - struct kgsl_drawobj_sync *syncobj, void *priv) + struct kgsl_drawobj_sync *syncobj, + struct kgsl_cmd_syncpoint_timestamp *timestamp) + { - struct kgsl_cmd_syncpoint_timestamp *sync = priv; struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); struct kgsl_context *context = kgsl_context_get(device, - sync->context_id); + timestamp->context_id); struct kgsl_drawobj_sync_event *event; int ret = -EINVAL; unsigned int id; @@ -457,10 +621,10 @@ static int drawobj_add_sync_timestamp(struct kgsl_device *device, kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED, &queued); - if (timestamp_cmp(sync->timestamp, queued) > 0) { + if (timestamp_cmp(timestamp->timestamp, queued) > 0) { dev_err(device->dev, "Cannot create syncpoint for future timestamp %d (current %d)\n", - sync->timestamp, queued); + timestamp->timestamp, queued); goto done; } } @@ -475,19 +639,20 @@ static int drawobj_add_sync_timestamp(struct kgsl_device *device, event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP; event->syncobj = syncobj; event->context = context; - event->timestamp = sync->timestamp; + event->timestamp = timestamp->timestamp; event->device = device; set_bit(event->id, &syncobj->pending); - ret = kgsl_add_event(device, &context->events, sync->timestamp, + ret = kgsl_add_event(device, &context->events, timestamp->timestamp, drawobj_sync_func, event); if (ret) { clear_bit(event->id, &syncobj->pending); kgsl_drawobj_put(drawobj); } else { - trace_syncpoint_timestamp(syncobj, context, sync->timestamp); + trace_syncpoint_timestamp(syncobj, context, + timestamp->timestamp); } done: @@ -497,6 +662,19 @@ static int drawobj_add_sync_timestamp(struct kgsl_device *device, return ret; } +static int drawobj_add_sync_timestamp_from_user(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void __user *data, + u64 datasize) +{ + struct kgsl_cmd_syncpoint_timestamp timestamp; + + if (kgsl_copy_struct_from_user(×tamp, sizeof(timestamp), + data, datasize)) + return -EFAULT; + + return drawobj_add_sync_timestamp(device, syncobj, ×tamp); +} + /** * kgsl_drawobj_sync_add_sync() - Add a sync point to a command * batch @@ -511,44 +689,22 @@ int kgsl_drawobj_sync_add_sync(struct kgsl_device *device, struct kgsl_drawobj_sync *syncobj, struct kgsl_cmd_syncpoint *sync) { - void *priv; - int ret, psize; struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); - int (*func)(struct kgsl_device *device, - struct kgsl_drawobj_sync *syncobj, - void *priv); - switch (sync->type) { - case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: - psize = sizeof(struct kgsl_cmd_syncpoint_timestamp); - func = drawobj_add_sync_timestamp; - break; - case KGSL_CMD_SYNCPOINT_TYPE_FENCE: - psize = sizeof(struct kgsl_cmd_syncpoint_fence); - func = drawobj_add_sync_fence; - break; - default: - dev_err(device->dev, - "bad syncpoint type ctxt %d type 0x%x size %zu\n", - drawobj->context->id, sync->type, sync->size); - return -EINVAL; - } + if (sync->type == KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP) + return drawobj_add_sync_timestamp_from_user(device, + syncobj, sync->priv, sync->size); + else if (sync->type == KGSL_CMD_SYNCPOINT_TYPE_FENCE) + return drawobj_add_sync_fence(device, + syncobj, sync->priv, sync->size); + else if (sync->type == KGSL_CMD_SYNCPOINT_TYPE_TIMELINE) + return drawobj_add_sync_timeline(device, + syncobj, sync->priv, sync->size); - if (sync->size != psize) { - dev_err(device->dev, - "bad syncpoint size ctxt %d type 0x%x size %zu\n", - drawobj->context->id, sync->type, sync->size); - return -EINVAL; - } + dev_err(device->dev, "bad syncpoint type %d for ctxt %d\n", + sync->type, drawobj->context->id); - priv = memdup_user(sync->priv, sync->size); - if (IS_ERR(priv)) - return PTR_ERR(priv); - - ret = func(device, syncobj, priv); - kfree(priv); - - return ret; + return -EINVAL; } static void add_profiling_buffer(struct kgsl_device *device, @@ -558,6 +714,7 @@ static void add_profiling_buffer(struct kgsl_device *device, { struct kgsl_mem_entry *entry; struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + u64 start; if (!(drawobj->flags & KGSL_DRAWOBJ_PROFILING)) return; @@ -574,7 +731,14 @@ static void add_profiling_buffer(struct kgsl_device *device, gpuaddr); if (entry != NULL) { - if (!kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size)) { + start = id ? (entry->memdesc.gpuaddr + offset) : gpuaddr; + /* + * Make sure there is enough room in the object to store the + * entire profiling buffer object + */ + if (!kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size) || + !kgsl_gpuaddr_in_memdesc(&entry->memdesc, start, + sizeof(struct kgsl_drawobj_profiling_buffer))) { kgsl_mem_entry_put(entry); entry = NULL; } @@ -587,28 +751,7 @@ static void add_profiling_buffer(struct kgsl_device *device, return; } - - if (!id) { - cmdobj->profiling_buffer_gpuaddr = gpuaddr; - } else { - u64 off = offset + sizeof(struct kgsl_drawobj_profiling_buffer); - - /* - * Make sure there is enough room in the object to store the - * entire profiling buffer object - */ - if (off < offset || off >= entry->memdesc.size) { - dev_err(device->dev, - "ignore invalid profile offset ctxt %d id %d offset %lld gpuaddr %llx size %lld\n", - drawobj->context->id, id, offset, gpuaddr, size); - kgsl_mem_entry_put(entry); - return; - } - - cmdobj->profiling_buffer_gpuaddr = - entry->memdesc.gpuaddr + offset; - } - + cmdobj->profiling_buffer_gpuaddr = start; cmdobj->profiling_buf_entry = entry; } @@ -673,26 +816,16 @@ int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device, return 0; } -static void *_drawobj_create(struct kgsl_device *device, - struct kgsl_context *context, unsigned int size, - unsigned int type) +static int drawobj_init(struct kgsl_device *device, + struct kgsl_context *context, struct kgsl_drawobj *drawobj, + int type) { - void *obj = kzalloc(size, GFP_KERNEL); - struct kgsl_drawobj *drawobj; - - if (obj == NULL) - return ERR_PTR(-ENOMEM); - /* * Increase the reference count on the context so it doesn't disappear * during the lifetime of this object */ - if (!_kgsl_context_get(context)) { - kfree(obj); - return ERR_PTR(-ENOENT); - } - - drawobj = obj; + if (!_kgsl_context_get(context)) + return -ENOENT; kref_init(&drawobj->refcount); @@ -700,7 +833,92 @@ static void *_drawobj_create(struct kgsl_device *device, drawobj->context = context; drawobj->type = type; - return obj; + return 0; +} + +struct kgsl_drawobj_timeline * +kgsl_drawobj_timeline_create(struct kgsl_device *device, + struct kgsl_context *context) +{ + int ret; + struct kgsl_drawobj_timeline *timelineobj = + kzalloc(sizeof(*timelineobj), GFP_KERNEL); + + if (!timelineobj) + return ERR_PTR(-ENOMEM); + + ret = drawobj_init(device, context, &timelineobj->base, + TIMELINEOBJ_TYPE); + if (ret) { + kfree(timelineobj); + return ERR_PTR(ret); + } + + timelineobj->base.destroy = timelineobj_destroy; + timelineobj->base.destroy_object = timelineobj_destroy_object; + + return timelineobj; +} + +int kgsl_drawobj_add_timeline(struct kgsl_device_private *dev_priv, + struct kgsl_drawobj_timeline *timelineobj, + void __user *src, u64 cmdsize) +{ + struct kgsl_gpu_aux_command_timeline cmd; + int i, ret; + + if (kgsl_copy_struct_from_user(&cmd, sizeof(cmd), src, cmdsize)) + return -EFAULT; + + if (!cmd.count) + return -EINVAL; + + timelineobj->timelines = kvcalloc(cmd.count, + sizeof(*timelineobj->timelines), + GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); + if (!timelineobj->timelines) + return -ENOMEM; + + src = u64_to_user_ptr(cmd.timelines); + + for (i = 0; i < cmd.count; i++) { + struct kgsl_timeline_val val; + + if (kgsl_copy_struct_from_user(&val, sizeof(val), src, + cmd.timelines_size)) { + ret = -EFAULT; + goto err; + } + + if (val.padding) { + ret = -EINVAL; + goto err; + } + + timelineobj->timelines[i].timeline = + kgsl_timeline_by_id(dev_priv->device, + val.timeline); + + if (!timelineobj->timelines[i].timeline) { + ret = -ENODEV; + goto err; + } + + trace_kgsl_drawobj_timeline(val.timeline, val.seqno); + timelineobj->timelines[i].seqno = val.seqno; + + src += cmd.timelines_size; + } + + timelineobj->count = cmd.count; + return 0; +err: + for (i = 0; i < cmd.count; i++) + kgsl_timeline_put(timelineobj->timelines[i].timeline); + + kvfree(timelineobj->timelines); + timelineobj->timelines = NULL; + return ret; } /** @@ -715,11 +933,24 @@ struct kgsl_drawobj_sparse *kgsl_drawobj_sparse_create( struct kgsl_device *device, struct kgsl_context *context, unsigned int flags) { - struct kgsl_drawobj_sparse *sparseobj = _drawobj_create(device, - context, sizeof(*sparseobj), SPARSEOBJ_TYPE); + int ret; + struct kgsl_drawobj_sparse *sparseobj = + kzalloc(sizeof(*sparseobj), GFP_KERNEL); - if (!IS_ERR(sparseobj)) - INIT_LIST_HEAD(&sparseobj->sparselist); + if (!sparseobj) + return ERR_PTR(-ENOMEM); + + ret = drawobj_init(device, + context, &sparseobj->base, SPARSEOBJ_TYPE); + if (ret) { + kfree(sparseobj); + return ERR_PTR(ret); + } + + INIT_LIST_HEAD(&sparseobj->sparselist); + + sparseobj->base.destroy = drawobj_destroy_sparse; + sparseobj->base.destroy_object = sparseobj_destroy_object; return sparseobj; } @@ -735,12 +966,23 @@ struct kgsl_drawobj_sparse *kgsl_drawobj_sparse_create( struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device, struct kgsl_context *context) { - struct kgsl_drawobj_sync *syncobj = _drawobj_create(device, - context, sizeof(*syncobj), SYNCOBJ_TYPE); + struct kgsl_drawobj_sync *syncobj = + kzalloc(sizeof(*syncobj), GFP_KERNEL); + int ret; - /* Add a timer to help debug sync deadlocks */ - if (!IS_ERR(syncobj)) - timer_setup(&syncobj->timer, syncobj_timer, 0); + if (!syncobj) + return ERR_PTR(-ENOMEM); + + ret = drawobj_init(device, context, &syncobj->base, SYNCOBJ_TYPE); + if (ret) { + kfree(syncobj); + return ERR_PTR(ret); + } + + syncobj->base.destroy = syncobj_destroy; + syncobj->base.destroy_object = syncobj_destroy_object; + + timer_setup(&syncobj->timer, syncobj_timer, 0); return syncobj; } @@ -759,27 +1001,37 @@ struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device, struct kgsl_context *context, unsigned int flags, unsigned int type) { - struct kgsl_drawobj_cmd *cmdobj = _drawobj_create(device, - context, sizeof(*cmdobj), + struct kgsl_drawobj_cmd *cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL); + int ret; + + if (!cmdobj) + return ERR_PTR(-ENOMEM); + + ret = drawobj_init(device, context, &cmdobj->base, (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE))); - - if (!IS_ERR(cmdobj)) { - /* sanitize our flags for drawobj's */ - cmdobj->base.flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH - | KGSL_DRAWOBJ_MARKER - | KGSL_DRAWOBJ_END_OF_FRAME - | KGSL_DRAWOBJ_PWR_CONSTRAINT - | KGSL_DRAWOBJ_MEMLIST - | KGSL_DRAWOBJ_PROFILING - | KGSL_DRAWOBJ_PROFILING_KTIME); - - INIT_LIST_HEAD(&cmdobj->cmdlist); - INIT_LIST_HEAD(&cmdobj->memlist); - - if (type & CMDOBJ_TYPE) - atomic_inc(&context->proc_priv->cmd_count); + if (ret) { + kfree(cmdobj); + return ERR_PTR(ret); } + cmdobj->base.destroy = cmdobj_destroy; + cmdobj->base.destroy_object = cmdobj_destroy_object; + + /* sanitize our flags for drawobjs */ + cmdobj->base.flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH + | KGSL_DRAWOBJ_MARKER + | KGSL_DRAWOBJ_END_OF_FRAME + | KGSL_DRAWOBJ_PWR_CONSTRAINT + | KGSL_DRAWOBJ_MEMLIST + | KGSL_DRAWOBJ_PROFILING + | KGSL_DRAWOBJ_PROFILING_KTIME); + + INIT_LIST_HEAD(&cmdobj->cmdlist); + INIT_LIST_HEAD(&cmdobj->memlist); + + if (type & CMDOBJ_TYPE) + atomic_inc(&context->proc_priv->cmd_count); + return cmdobj; } @@ -1114,6 +1366,36 @@ int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device, return 0; } +struct kgsl_drawobj_sync * +kgsl_drawobj_create_timestamp_syncobj(struct kgsl_device *device, + struct kgsl_context *context, unsigned int timestamp) +{ + struct kgsl_drawobj_sync *syncobj; + struct kgsl_cmd_syncpoint_timestamp priv; + int ret; + + syncobj = kgsl_drawobj_sync_create(device, context); + if (IS_ERR(syncobj)) + return syncobj; + + syncobj->synclist = kzalloc(sizeof(*syncobj->synclist), GFP_KERNEL); + if (!syncobj->synclist) { + kgsl_drawobj_destroy(DRAWOBJ(syncobj)); + return ERR_PTR(-ENOMEM); + } + + priv.timestamp = timestamp; + priv.context_id = context->id; + + ret = drawobj_add_sync_timestamp(device, syncobj, &priv); + if (ret) { + kgsl_drawobj_destroy(DRAWOBJ(syncobj)); + return ERR_PTR(ret); + } + + return syncobj; +} + int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device, struct kgsl_drawobj_sync *syncobj, void __user *ptr, unsigned int size, unsigned int count) diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h index 7f3dd2624a5f..f61c5d6842c6 100644 --- a/drivers/gpu/msm/kgsl_drawobj.h +++ b/drivers/gpu/msm/kgsl_drawobj.h @@ -1,11 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2019, 2021, The Linux Foundation. All rights reserved. */ #ifndef __KGSL_DRAWOBJ_H #define __KGSL_DRAWOBJ_H +#include +#include #include #define DRAWOBJ(obj) (&obj->base) @@ -15,11 +17,14 @@ container_of(obj, struct kgsl_drawobj_cmd, base) #define SPARSEOBJ(obj) \ container_of(obj, struct kgsl_drawobj_sparse, base) +#define TIMELINEOBJ(obj) \ + container_of(obj, struct kgsl_drawobj_timeline, base) #define CMDOBJ_TYPE BIT(0) #define MARKEROBJ_TYPE BIT(1) #define SYNCOBJ_TYPE BIT(2) #define SPARSEOBJ_TYPE BIT(3) +#define TIMELINEOBJ_TYPE BIT(4) /** * struct kgsl_drawobj - KGSL drawobj descriptor @@ -37,6 +42,10 @@ struct kgsl_drawobj { uint32_t timestamp; unsigned long flags; struct kref refcount; + /** @destroy: Callback function to take down the object */ + void (*destroy)(struct kgsl_drawobj *drawobj); + /** @destroy_object: Callback function to free the object memory */ + void (*destroy_object)(struct kgsl_drawobj *drawobj); }; /** @@ -100,6 +109,22 @@ struct kgsl_drawobj_sync { unsigned long timeout_jiffies; }; +/** + * struct kgsl_drawobj_timeline - KGSL timeline signal operation + */ +struct kgsl_drawobj_timeline { + /** @base: &struct kgsl_drawobj container */ + struct kgsl_drawobj base; + struct { + /** @timeline: Pointer to a &struct kgsl_timeline */ + struct kgsl_timeline *timeline; + /** @seqno: Sequence number to signal */ + u64 seqno; + } *timelines; + /** @count: Number of items in timelines */ + int count; +}; + #define KGSL_FENCE_NAME_LEN 74 struct fence_info { @@ -111,9 +136,14 @@ struct event_fence_info { int num_fences; }; +struct event_timeline_info { + u64 seqno; + u32 timeline; +}; + /** * struct kgsl_drawobj_sync_event - * @id: identifer (positiion within the pending bitmap) + * @id: Identifer (position within the pending bitmap) * @type: Syncpoint type * @syncobj: Pointer to the syncobj that owns the sync event * @context: KGSL context for whose timestamp we want to @@ -121,7 +151,6 @@ struct event_fence_info { * @timestamp: Pending timestamp for the event * @handle: Pointer to a sync fence handle * @device: Pointer to the KGSL device - * @info: structure to hold info about the fence */ struct kgsl_drawobj_sync_event { unsigned int id; @@ -131,7 +160,17 @@ struct kgsl_drawobj_sync_event { unsigned int timestamp; struct kgsl_sync_fence_cb *handle; struct kgsl_device *device; - struct event_fence_info info; + /** @priv: Type specific private information */ + void *priv; + /** + * @fence: Pointer to a dma fence for KGSL_CMD_SYNCPOINT_TYPE_TIMELINE + * events + */ + struct dma_fence *fence; + /** @cb: Callback struct for KGSL_CMD_SYNCPOINT_TYPE_TIMELINE */ + struct dma_fence_cb cb; + /** @work : irq worker for KGSL_CMD_SYNCPOINT_TYPE_TIMELINE */ + struct irq_work work; }; /** @@ -240,4 +279,44 @@ static inline void kgsl_drawobj_put(struct kgsl_drawobj *drawobj) kref_put(&drawobj->refcount, kgsl_drawobj_destroy_object); } +/** + * kgsl_drawobj_create_timestamp_syncobj - Create a syncobj for a timestamp + * @device: A GPU device handle + * @context: Draw context for the syncobj + * @timestamp: Timestamp to sync on + * + * Create a sync object for @timestamp on @context. + * Return: A pointer to the sync object + */ +struct kgsl_drawobj_sync * +kgsl_drawobj_create_timestamp_syncobj(struct kgsl_device *device, + struct kgsl_context *context, unsigned int timestamp); + + +/** + * kgsl_drawobj_timeline_create - Create a timeline draw object + * @device: A GPU device handle + * @context: Draw context for the drawobj + * + * Create a timeline draw object on @context. + * Return: A pointer to the draw object + */ +struct kgsl_drawobj_timeline * +kgsl_drawobj_timeline_create(struct kgsl_device *device, + struct kgsl_context *context); + +/** + * kgsl_drawobj_add_timeline - Add a timeline to a timeline drawobj + * @dev_priv: Pointer to the process private data + * @timelineobj: Pointer to a timeline drawobject + * @src: Ponter to the &struct kgsl_timeline_val from userspace + * @cmdsize: size of the object in @src + * + * Add a timeline to an draw object. + * Return: 0 on success or negative on failure + */ +int kgsl_drawobj_add_timeline(struct kgsl_device_private *dev_priv, + struct kgsl_drawobj_timeline *timelineobj, + void __user *src, u64 cmdsize); + #endif /* __KGSL_DRAWOBJ_H */ diff --git a/drivers/gpu/msm/kgsl_ioctl.c b/drivers/gpu/msm/kgsl_ioctl.c index 8df9166bec50..5492f046e626 100644 --- a/drivers/gpu/msm/kgsl_ioctl.c +++ b/drivers/gpu/msm/kgsl_ioctl.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2008-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2008-2019, 2021, The Linux Foundation. All rights reserved. */ #include "kgsl_device.h" @@ -84,6 +84,20 @@ static const struct kgsl_ioctl kgsl_ioctl_funcs[] = { kgsl_ioctl_sparse_bind), KGSL_IOCTL_FUNC(IOCTL_KGSL_GPU_SPARSE_COMMAND, kgsl_ioctl_gpu_sparse_command), + KGSL_IOCTL_FUNC(IOCTL_KGSL_GPU_AUX_COMMAND, + kgsl_ioctl_gpu_aux_command), + KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMELINE_CREATE, + kgsl_ioctl_timeline_create), + KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMELINE_WAIT, + kgsl_ioctl_timeline_wait), + KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMELINE_FENCE_GET, + kgsl_ioctl_timeline_fence_get), + KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMELINE_QUERY, + kgsl_ioctl_timeline_query), + KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMELINE_SIGNAL, + kgsl_ioctl_timeline_signal), + KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMELINE_DESTROY, + kgsl_ioctl_timeline_destroy), }; long kgsl_ioctl_copy_in(unsigned int kernel_cmd, unsigned int user_cmd, @@ -167,8 +181,6 @@ long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) return device->ftbl->compat_ioctl(dev_priv, cmd, arg); else if (device->ftbl->ioctl != NULL) return device->ftbl->ioctl(dev_priv, cmd, arg); - - dev_err(device->dev, "invalid ioctl code 0x%08X\n", cmd); } return ret; diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c index 9c768458a7c7..ecb05b9b9a06 100644 --- a/drivers/gpu/msm/kgsl_pool.c +++ b/drivers/gpu/msm/kgsl_pool.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */ #include @@ -176,9 +176,7 @@ kgsl_pool_reduce(unsigned int target_pages, bool exit) if (!pool->allocation_allowed && !exit) continue; - total_pages -= pcount; - - nr_removed = total_pages - target_pages; + nr_removed = total_pages - target_pages - pcount; if (nr_removed <= 0) return pcount; diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c index a834ad1bd36a..621ab3db5a49 100644 --- a/drivers/gpu/msm/kgsl_sharedmem.c +++ b/drivers/gpu/msm/kgsl_sharedmem.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2002,2007-2021, The Linux Foundation. All rights reserved. */ #include @@ -171,7 +171,18 @@ imported_mem_show(struct kgsl_process_private *priv, } } - kgsl_mem_entry_put(entry); + /* + * If refcount on mem entry is the last refcount, we will + * call kgsl_mem_entry_destroy and detach it from process + * list. When there is no refcount on the process private, + * we will call kgsl_destroy_process_private to do cleanup. + * During cleanup, we will try to remove the same sysfs + * node which is in use by the current thread and this + * situation will end up in a deadloack. + * To avoid this situation, use a worker to put the refcount + * on mem entry. + */ + kgsl_mem_entry_put_deferred(entry); spin_lock(&priv->mem_lock); } spin_unlock(&priv->mem_lock); @@ -247,13 +258,9 @@ static ssize_t process_sysfs_store(struct kobject *kobj, return -EIO; } +/* Dummy release function - we have nothing to do here */ static void process_sysfs_release(struct kobject *kobj) { - struct kgsl_process_private *priv; - - priv = container_of(kobj, struct kgsl_process_private, kobj); - /* Put the refcount we got in kgsl_process_init_sysfs */ - kgsl_process_private_put(priv); } static const struct sysfs_ops process_sysfs_ops = { @@ -301,9 +308,6 @@ void kgsl_process_init_sysfs(struct kgsl_device *device, { int i; - /* Keep private valid until the sysfs enries are removed. */ - kgsl_process_private_get(private); - if (kobject_init_and_add(&private->kobj, &process_ktype, kgsl_driver.prockobj, "%d", pid_nr(private->pid))) { dev_err(device->dev, "Unable to add sysfs for process %d\n", @@ -1236,7 +1240,8 @@ void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc) if (memdesc->sgt) { sg_free_table(memdesc->sgt); - kvfree(memdesc->sgt); + kfree(memdesc->sgt); + memdesc->sgt = NULL; } memdesc->page_count = 0; diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c index a90b9e359571..229f8baa7822 100644 --- a/drivers/gpu/msm/kgsl_sync.c +++ b/drivers/gpu/msm/kgsl_sync.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. */ #include @@ -249,27 +249,41 @@ static void kgsl_sync_timeline_value_str(struct dma_fence *fence, { struct kgsl_sync_fence *kfence = (struct kgsl_sync_fence *)fence; struct kgsl_sync_timeline *ktimeline = kfence->parent; + struct kgsl_context *context = NULL; + unsigned long flags; + int ret = 0; - unsigned int timestamp_retired = 0; - unsigned int timestamp_queued = 0; + unsigned int timestamp_retired; + unsigned int timestamp_queued; if (!kref_get_unless_zero(&ktimeline->kref)) return; + if (!ktimeline->device) + goto put_timeline; - /* - * ktimeline->device might be NULL here but kgsl_readtimestamp() - * will handle that correctly - */ - kgsl_readtimestamp(ktimeline->device, ktimeline->context, - KGSL_TIMESTAMP_RETIRED, ×tamp_retired); + spin_lock_irqsave(&ktimeline->lock, flags); + ret = _kgsl_context_get(ktimeline->context); + context = ret ? ktimeline->context : NULL; + spin_unlock_irqrestore(&ktimeline->lock, flags); - kgsl_readtimestamp(ktimeline->device, ktimeline->context, - KGSL_TIMESTAMP_QUEUED, ×tamp_queued); + /* Get the last signaled timestamp if the context is not valid */ + timestamp_queued = ktimeline->last_timestamp; + timestamp_retired = timestamp_queued; + if (context) { + kgsl_readtimestamp(ktimeline->device, context, + KGSL_TIMESTAMP_RETIRED, ×tamp_retired); + + kgsl_readtimestamp(ktimeline->device, context, + KGSL_TIMESTAMP_QUEUED, ×tamp_queued); + + kgsl_context_put(context); + } snprintf(str, size, "%u queued:%u retired:%u", ktimeline->last_timestamp, timestamp_queued, timestamp_retired); +put_timeline: kgsl_sync_timeline_put(ktimeline); } @@ -298,7 +312,7 @@ int kgsl_sync_timeline_create(struct kgsl_context *context) { struct kgsl_sync_timeline *ktimeline; - /* Put context when timeline is released */ + /* Put context at detach time */ if (!_kgsl_context_get(context)) return -ENOENT; @@ -319,6 +333,11 @@ int kgsl_sync_timeline_create(struct kgsl_context *context) INIT_LIST_HEAD(&ktimeline->child_list_head); spin_lock_init(&ktimeline->lock); ktimeline->device = context->device; + + /* + * The context pointer is valid till detach time, where we put the + * refcount on the context + */ ktimeline->context = context; context->ktimeline = ktimeline; @@ -351,30 +370,31 @@ static void kgsl_sync_timeline_signal(struct kgsl_sync_timeline *ktimeline, kgsl_sync_timeline_put(ktimeline); } -void kgsl_sync_timeline_destroy(struct kgsl_context *context) +void kgsl_sync_timeline_detach(struct kgsl_sync_timeline *ktimeline) { - struct kgsl_sync_timeline *ktimeline = context->ktimeline; + unsigned long flags; + struct kgsl_context *context = ktimeline->context; + + /* Set context pointer to NULL and drop our refcount on the context */ + spin_lock_irqsave(&ktimeline->lock, flags); + ktimeline->context = NULL; + spin_unlock_irqrestore(&ktimeline->lock, flags); + kgsl_context_put(context); +} + +static void kgsl_sync_timeline_destroy(struct kref *kref) +{ + struct kgsl_sync_timeline *ktimeline = + container_of(kref, struct kgsl_sync_timeline, kref); kfree(ktimeline->name); kfree(ktimeline); } -static void kgsl_sync_timeline_release(struct kref *kref) -{ - struct kgsl_sync_timeline *ktimeline = - container_of(kref, struct kgsl_sync_timeline, kref); - - /* - * Only put the context refcount here. The context destroy function - * will call kgsl_sync_timeline_destroy() to kfree it - */ - kgsl_context_put(ktimeline->context); -} - void kgsl_sync_timeline_put(struct kgsl_sync_timeline *ktimeline) { if (ktimeline) - kref_put(&ktimeline->kref, kgsl_sync_timeline_release); + kref_put(&ktimeline->kref, kgsl_sync_timeline_destroy); } static const struct dma_fence_ops kgsl_sync_fence_ops = { diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h index 43209b1532cb..f49f9e04f255 100644 --- a/drivers/gpu/msm/kgsl_sync.h +++ b/drivers/gpu/msm/kgsl_sync.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2012-2014,2018-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2014,2018-2021 The Linux Foundation. All rights reserved. */ #ifndef __KGSL_SYNC_H #define __KGSL_SYNC_H @@ -9,7 +9,8 @@ /** * struct kgsl_sync_timeline - A sync timeline associated with a kgsl context - * @kref: Refcount to keep the struct alive until all its fences are released + * @kref: Refcount to keep the struct alive until all its fences are signaled, + and as long as the context exists * @name: String to describe this timeline * @fence_context: Used by the fence driver to identify fences belonging to * this context @@ -80,7 +81,7 @@ int kgsl_add_fence_event(struct kgsl_device *device, int kgsl_sync_timeline_create(struct kgsl_context *context); -void kgsl_sync_timeline_destroy(struct kgsl_context *context); +void kgsl_sync_timeline_detach(struct kgsl_sync_timeline *ktimeline); void kgsl_sync_timeline_put(struct kgsl_sync_timeline *ktimeline); @@ -118,7 +119,8 @@ static inline int kgsl_sync_timeline_create(struct kgsl_context *context) return 0; } -static inline void kgsl_sync_timeline_destroy(struct kgsl_context *context) +static inline void kgsl_sync_timeline_detach( + struct kgsl_sync_timeline *ktimeline) { } diff --git a/drivers/gpu/msm/kgsl_timeline.c b/drivers/gpu/msm/kgsl_timeline.c new file mode 100644 index 000000000000..897fe61a0477 --- /dev/null +++ b/drivers/gpu/msm/kgsl_timeline.c @@ -0,0 +1,565 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "kgsl_device.h" +#include "kgsl_sharedmem.h" +#include "kgsl_timeline.h" +#include "kgsl_trace.h" + +struct kgsl_timeline_fence { + struct dma_fence base; + struct kgsl_timeline *timeline; + struct list_head node; +}; + +struct dma_fence *kgsl_timelines_to_fence_array(struct kgsl_device *device, + u64 timelines, u32 count, u64 usize, bool any) +{ + void __user *uptr = u64_to_user_ptr(timelines); + struct dma_fence_array *array; + struct dma_fence **fences; + int i, ret = 0; + + if (!count || count > INT_MAX) + return ERR_PTR(-EINVAL); + + fences = kcalloc(count, sizeof(*fences), + GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); + + if (!fences) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < count; i++) { + struct kgsl_timeline_val val; + struct kgsl_timeline *timeline; + + if (kgsl_copy_struct_from_user(&val, sizeof(val), + uptr, usize)) { + ret = -EFAULT; + goto err; + } + + if (val.padding) { + ret = -EINVAL; + goto err; + } + + timeline = kgsl_timeline_by_id(device, val.timeline); + if (!timeline) { + ret = -ENOENT; + goto err; + } + + fences[i] = kgsl_timeline_fence_alloc(timeline, val.seqno); + kgsl_timeline_put(timeline); + + if (IS_ERR(fences[i])) { + ret = PTR_ERR(fences[i]); + goto err; + } + + uptr += usize; + } + + /* No need for a fence array for only one fence */ + if (count == 1) { + struct dma_fence *fence = fences[0]; + + kfree(fences); + return fence; + } + + array = dma_fence_array_create(count, fences, + dma_fence_context_alloc(1), 0, any); + + if (array) + return &array->base; + + ret = -ENOMEM; +err: + for (i = 0; i < count; i++) { + if (!IS_ERR_OR_NULL(fences[i])) + dma_fence_put(fences[i]); + } + + kfree(fences); + return ERR_PTR(ret); +} + +void kgsl_timeline_destroy(struct kref *kref) +{ + struct kgsl_timeline *timeline = container_of(kref, + struct kgsl_timeline, ref); + + WARN_ON(!list_empty(&timeline->fences)); + + trace_kgsl_timeline_destroy(timeline->id); + + kfree(timeline); +} + +struct kgsl_timeline *kgsl_timeline_get(struct kgsl_timeline *timeline) +{ + if (timeline) { + if (!kref_get_unless_zero(&timeline->ref)) + return NULL; + } + + return timeline; +} + +static struct kgsl_timeline *kgsl_timeline_alloc( + struct kgsl_device_private *dev_priv, u64 initial) +{ + struct kgsl_device *device = dev_priv->device; + struct kgsl_timeline *timeline; + int id; + + timeline = kzalloc(sizeof(*timeline), GFP_KERNEL); + if (!timeline) + return ERR_PTR(-ENOMEM); + + idr_preload(GFP_KERNEL); + spin_lock(&device->timelines_lock); + /* Allocate the ID but don't attach the pointer just yet */ + id = idr_alloc(&device->timelines, NULL, 1, 0, GFP_NOWAIT); + spin_unlock(&device->timelines_lock); + idr_preload_end(); + + if (id < 0) { + kfree(timeline); + return ERR_PTR(id); + } + + timeline->context = dma_fence_context_alloc(1); + timeline->id = id; + INIT_LIST_HEAD(&timeline->fences); + timeline->value = initial; + timeline->dev_priv = dev_priv; + + snprintf((char *) timeline->name, sizeof(timeline->name), + "kgsl-sw-timeline-%d", id); + + trace_kgsl_timeline_alloc(id, initial); + + spin_lock_init(&timeline->lock); + spin_lock_init(&timeline->fence_lock); + + kref_init(&timeline->ref); + + return timeline; +} + +static struct kgsl_timeline_fence *to_timeline_fence(struct dma_fence *fence) +{ + return container_of(fence, struct kgsl_timeline_fence, base); +} + +static void timeline_fence_release(struct dma_fence *fence) +{ + struct kgsl_timeline_fence *f = to_timeline_fence(fence); + struct kgsl_timeline *timeline = f->timeline; + struct kgsl_timeline_fence *cur, *temp; + unsigned long flags; + + spin_lock_irqsave(&timeline->fence_lock, flags); + + /* If the fence is still on the active list, remove it */ + list_for_each_entry_safe(cur, temp, &timeline->fences, node) { + if (f != cur) + continue; + + list_del_init(&f->node); + break; + } + spin_unlock_irqrestore(&timeline->fence_lock, flags); + + trace_kgsl_timeline_fence_release(f->timeline->id, fence->seqno); + + kgsl_timeline_put(f->timeline); + dma_fence_free(fence); +} + +static bool timeline_fence_signaled(struct dma_fence *fence) +{ + struct kgsl_timeline_fence *f = to_timeline_fence(fence); + + return !__dma_fence_is_later(fence->seqno, f->timeline->value); +} + +static bool timeline_fence_enable_signaling(struct dma_fence *fence) +{ + /* + * Return value of false indicates the fence already passed. + * When fence is not passed we return true indicating successful + * enabling. + */ + return !timeline_fence_signaled(fence); +} + +static const char *timeline_get_driver_name(struct dma_fence *fence) +{ + return "kgsl-sw-timeline"; +} + +static const char *timeline_get_timeline_name(struct dma_fence *fence) +{ + struct kgsl_timeline_fence *f = to_timeline_fence(fence); + + return f->timeline->name; +} + +static void timeline_get_value_str(struct dma_fence *fence, + char *str, int size) +{ + struct kgsl_timeline_fence *f = to_timeline_fence(fence); + + snprintf(str, size, "%lld", f->timeline->value); +} + +static const struct dma_fence_ops timeline_fence_ops = { + .get_driver_name = timeline_get_driver_name, + .get_timeline_name = timeline_get_timeline_name, + .signaled = timeline_fence_signaled, + .release = timeline_fence_release, + .enable_signaling = timeline_fence_enable_signaling, + .timeline_value_str = timeline_get_value_str, +}; + +static void kgsl_timeline_add_fence(struct kgsl_timeline *timeline, + struct kgsl_timeline_fence *fence) +{ + struct kgsl_timeline_fence *entry; + unsigned long flags; + + spin_lock_irqsave(&timeline->fence_lock, flags); + list_for_each_entry(entry, &timeline->fences, node) { + if (fence->base.seqno < entry->base.seqno) { + list_add_tail(&fence->node, &entry->node); + spin_unlock_irqrestore(&timeline->fence_lock, flags); + return; + } + } + + list_add_tail(&fence->node, &timeline->fences); + spin_unlock_irqrestore(&timeline->fence_lock, flags); +} + +void kgsl_timeline_signal(struct kgsl_timeline *timeline, u64 seqno) +{ + struct kgsl_timeline_fence *fence, *tmp; + struct list_head temp; + + INIT_LIST_HEAD(&temp); + + spin_lock_irq(&timeline->lock); + + if (seqno < timeline->value) + goto unlock; + + trace_kgsl_timeline_signal(timeline->id, seqno); + + timeline->value = seqno; + + spin_lock(&timeline->fence_lock); + list_for_each_entry_safe(fence, tmp, &timeline->fences, node) + if (timeline_fence_signaled(&fence->base) && + kref_get_unless_zero(&fence->base.refcount)) + list_move(&fence->node, &temp); + spin_unlock(&timeline->fence_lock); + + list_for_each_entry_safe(fence, tmp, &temp, node) { + dma_fence_signal_locked(&fence->base); + dma_fence_put(&fence->base); + } + +unlock: + spin_unlock_irq(&timeline->lock); +} + +struct dma_fence *kgsl_timeline_fence_alloc(struct kgsl_timeline *timeline, + u64 seqno) +{ + struct kgsl_timeline_fence *fence; + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (!fence) + return ERR_PTR(-ENOMEM); + + fence->timeline = kgsl_timeline_get(timeline); + if (!fence->timeline) { + kfree(fence); + return ERR_PTR(-ENOENT); + } + + dma_fence_init(&fence->base, &timeline_fence_ops, + &timeline->lock, timeline->context, seqno); + + INIT_LIST_HEAD(&fence->node); + + /* + * Once fence is checked as not signaled, allow it to be added + * in the list before other thread such as kgsl_timeline_signal + * can get chance to signal. + */ + spin_lock_irq(&timeline->lock); + if (!dma_fence_is_signaled_locked(&fence->base)) + kgsl_timeline_add_fence(timeline, fence); + + trace_kgsl_timeline_fence_alloc(timeline->id, seqno); + spin_unlock_irq(&timeline->lock); + + return &fence->base; +} + +long kgsl_ioctl_timeline_create(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + struct kgsl_device *device = dev_priv->device; + struct kgsl_timeline_create *param = data; + struct kgsl_timeline *timeline; + + timeline = kgsl_timeline_alloc(dev_priv, param->seqno); + if (IS_ERR(timeline)) + return PTR_ERR(timeline); + + /* Commit the pointer to the timeline in timline idr */ + spin_lock(&device->timelines_lock); + idr_replace(&device->timelines, timeline, timeline->id); + param->id = timeline->id; + spin_unlock(&device->timelines_lock); + return 0; +} + +struct kgsl_timeline *kgsl_timeline_by_id(struct kgsl_device *device, + u32 id) +{ + struct kgsl_timeline *timeline; + int ret = 0; + + spin_lock(&device->timelines_lock); + timeline = idr_find(&device->timelines, id); + + if (timeline) + ret = kref_get_unless_zero(&timeline->ref); + spin_unlock(&device->timelines_lock); + + return ret ? timeline : NULL; +} + +long kgsl_ioctl_timeline_wait(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + struct kgsl_device *device = dev_priv->device; + struct kgsl_timeline_wait *param = data; + struct dma_fence *fence; + unsigned long timeout; + signed long ret; + + if (param->flags != KGSL_TIMELINE_WAIT_ANY && + param->flags != KGSL_TIMELINE_WAIT_ALL) + return -EINVAL; + + if (param->padding) + return -EINVAL; + + fence = kgsl_timelines_to_fence_array(device, param->timelines, + param->count, param->timelines_size, + (param->flags == KGSL_TIMELINE_WAIT_ANY)); + + if (IS_ERR(fence)) + return PTR_ERR(fence); + + if (param->tv_sec >= KTIME_SEC_MAX) + timeout = MAX_SCHEDULE_TIMEOUT; + else { + ktime_t time = ktime_set(param->tv_sec, param->tv_nsec); + + timeout = msecs_to_jiffies(ktime_to_ms(time)); + } + + trace_kgsl_timeline_wait(param->flags, param->tv_sec, param->tv_nsec); + + /* secs.nsecs to jiffies */ + if (!timeout) + ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY; + else { + ret = dma_fence_wait_timeout(fence, true, timeout); + + if (!ret) + ret = -ETIMEDOUT; + else if (ret > 0) + ret = 0; + } + + dma_fence_put(fence); + + return ret; +} + +long kgsl_ioctl_timeline_query(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + struct kgsl_timeline_val *param = data; + struct kgsl_timeline *timeline; + + if (param->padding) + return -EINVAL; + + timeline = kgsl_timeline_by_id(dev_priv->device, param->timeline); + if (!timeline) + return -ENODEV; + + param->seqno = timeline->value; + kgsl_timeline_put(timeline); + + return 0; +} + +long kgsl_ioctl_timeline_fence_get(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + struct kgsl_device *device = dev_priv->device; + struct kgsl_timeline_fence_get *param = data; + struct kgsl_timeline *timeline; + struct sync_file *sync_file; + struct dma_fence *fence; + int ret = 0, fd; + + timeline = kgsl_timeline_by_id(device, param->timeline); + if (!timeline) + return -ENODEV; + + fence = kgsl_timeline_fence_alloc(timeline, param->seqno); + + if (IS_ERR(fence)) { + kgsl_timeline_put(timeline); + return PTR_ERR(fence); + } + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + ret = fd; + goto out; + } + + sync_file = sync_file_create(fence); + if (sync_file) { + fd_install(fd, sync_file->file); + param->handle = fd; + } else { + put_unused_fd(fd); + ret = -ENOMEM; + } + +out: + dma_fence_put(fence); + kgsl_timeline_put(timeline); + + return ret; +} + +long kgsl_ioctl_timeline_signal(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + struct kgsl_device *device = dev_priv->device; + struct kgsl_timeline_signal *param = data; + u64 timelines; + int i; + + if (!param->timelines_size) { + param->timelines_size = sizeof(struct kgsl_timeline_val); + return -EAGAIN; + } + + if (!param->count) + return -EINVAL; + + timelines = param->timelines; + + for (i = 0; i < param->count; i++) { + struct kgsl_timeline *timeline; + struct kgsl_timeline_val val; + + if (kgsl_copy_struct_from_user(&val, sizeof(val), + u64_to_user_ptr(timelines), param->timelines_size)) + return -EFAULT; + + if (val.padding) + return -EINVAL; + + timeline = kgsl_timeline_by_id(device, val.timeline); + if (!timeline) + return -ENODEV; + + kgsl_timeline_signal(timeline, val.seqno); + + kgsl_timeline_put(timeline); + + timelines += param->timelines_size; + } + + return 0; +} + +long kgsl_ioctl_timeline_destroy(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + struct kgsl_device *device = dev_priv->device; + struct kgsl_timeline_fence *fence, *tmp; + struct kgsl_timeline *timeline; + struct list_head temp; + u32 *param = data; + + if (*param == 0) + return -ENODEV; + + spin_lock(&device->timelines_lock); + timeline = idr_find(&device->timelines, *param); + + if (timeline == NULL) { + spin_unlock(&device->timelines_lock); + return -ENODEV; + } + + /* + * Validate that the id given is owned by the dev_priv + * instance that is passed in. If not, abort. + */ + if (timeline->dev_priv != dev_priv) { + spin_unlock(&device->timelines_lock); + return -EINVAL; + } + + idr_remove(&device->timelines, timeline->id); + spin_unlock(&device->timelines_lock); + + INIT_LIST_HEAD(&temp); + + spin_lock(&timeline->fence_lock); + list_for_each_entry_safe(fence, tmp, &timeline->fences, node) + if (!kref_get_unless_zero(&fence->base.refcount)) + list_del_init(&fence->node); + list_replace_init(&timeline->fences, &temp); + spin_unlock(&timeline->fence_lock); + + spin_lock_irq(&timeline->lock); + list_for_each_entry_safe(fence, tmp, &temp, node) { + dma_fence_set_error(&fence->base, -ENOENT); + dma_fence_signal_locked(&fence->base); + dma_fence_put(&fence->base); + } + spin_unlock_irq(&timeline->lock); + + kgsl_timeline_put(timeline); + + return timeline ? 0 : -ENODEV; +} diff --git a/drivers/gpu/msm/kgsl_timeline.h b/drivers/gpu/msm/kgsl_timeline.h new file mode 100644 index 000000000000..a1eef003605d --- /dev/null +++ b/drivers/gpu/msm/kgsl_timeline.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __KGSL_TIMELINE_H +#define __KGSL_TIMELINE_H + +/** + * struct kgsl_timeline - Container for a timeline object + */ +struct kgsl_timeline { + /** @context: dma-fence timeline context */ + u64 context; + /** @id: Timeline identifier */ + int id; + /** @value: Current value of the timeline */ + u64 value; + /** @fence_lock: Lock to protect @fences */ + spinlock_t fence_lock; + /** @lock: Lock to use for locking each fence in @fences */ + spinlock_t lock; + /** @ref: Reference count for the struct */ + struct kref ref; + /** @fences: sorted list of active fences */ + struct list_head fences; + /** @name: Name of the timeline for debugging */ + const char name[32]; + /** @dev_priv: pointer to the owning device instance */ + struct kgsl_device_private *dev_priv; +}; + +/** + * kgsl_timeline_signal - Signal the timeline + * @timeline: Pointer to a timeline container + * @seqno: Seqeuence number to signal + * + * Advance @timeline to sequence number @seqno and signal any fences that might + * have expired. + */ +void kgsl_timeline_signal(struct kgsl_timeline *timeline, u64 seqno); + +/** + * kgsl_timeline_destroy - Timeline destroy callback + * @kref: Refcount pointer for the timeline + * + * Reference count callback for the timeline called when the all the object + * references have been released. + */ +void kgsl_timeline_destroy(struct kref *kref); + +/** + * kgsl_timeline_fence_alloc - Allocate a new fence on a timeline + * @timeline: Pointer to a timeline container + * @seqno: Sequence number for the new fence to wait for + * + * Create and return a new fence on the timeline that will expire when the + * timeline value is greater or equal to @seqno. + * Return: A pointer to the newly created fence + */ +struct dma_fence *kgsl_timeline_fence_alloc(struct kgsl_timeline *timeline, + u64 seqno); + +/** + * kgsl_timeline_by_id - Look up a timeline by an id + * @device: A KGSL device handle + * @id: Lookup identifier + * + * Find and return the timeline associated with identifer @id. + * Return: A pointer to a timeline or PTR_ERR() encoded error on failure. + */ +struct kgsl_timeline *kgsl_timeline_by_id(struct kgsl_device *device, + u32 id); + +/** + * kgsl_timeline_get - Get a reference to an existing timeline + * @timeline: Pointer to a timeline container + * + * Get a new reference to the timeline and return the pointer back to the user. + * Return: The pointer to the timeline or PTR_ERR encoded error on failure + */ +struct kgsl_timeline *kgsl_timeline_get(struct kgsl_timeline *timeline); + +/** + * kgsl_timeline_put - Release a reference to a timeline + * @timeline: Pointer to a timeline container + * + * Release a reference to a timeline and destroy it if there are no other + * references + */ +static inline void kgsl_timeline_put(struct kgsl_timeline *timeline) +{ + if (!IS_ERR_OR_NULL(timeline)) + kref_put(&timeline->ref, kgsl_timeline_destroy); +} + +/** + * kgsl_timelines_to_fence_array - Return a dma-fence array of timeline fences + * @device: A KGSL device handle + * @timelines: Userspace pointer to an array of &struct kgsl_timeline_val + * @count: Number of entries in @timelines + * @usize: Size of each entry in @timelines + * @any: True if the fence should expire on any timeline expiring or false if it + * should wait until all timelines have expired + * + * Give a list of &struct kgsl_timeline_val entries, create a dma-fence-array + * containing fences for each timeline/seqno pair. If @any is set the + * dma-fence-array will be set to expire if any of the encapsulated timeline + * fences expire. If @any is false, then the fence will wait for ALL of the + * encapsulated timeline fences to expire. + */ +struct dma_fence *kgsl_timelines_to_fence_array(struct kgsl_device *device, + u64 timelines, u32 count, u64 usize, bool any); + +#endif diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h index 501d97eb0e3a..60fbe5b03348 100644 --- a/drivers/gpu/msm/kgsl_trace.h +++ b/drivers/gpu/msm/kgsl_trace.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2011-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2021, The Linux Foundation. All rights reserved. */ #if !defined(_KGSL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) @@ -1292,6 +1292,183 @@ TRACE_EVENT(kgsl_opp_notify, __entry->min_freq, __entry->max_freq ) ); + +TRACE_EVENT(kgsl_timeline_alloc, + TP_PROTO( + u32 id, + u64 seqno + ), + TP_ARGS( + id, + seqno + ), + TP_STRUCT__entry( + __field(u32, id) + __field(u64, seqno) + ), + TP_fast_assign( + __entry->id = id; + __entry->seqno = seqno; + ), + TP_printk("id=%u initial=%llu", + __entry->id, __entry->seqno + ) +); + +TRACE_EVENT(kgsl_timeline_destroy, + TP_PROTO( + u32 id + ), + TP_ARGS( + id + ), + TP_STRUCT__entry( + __field(u32, id) + ), + TP_fast_assign( + __entry->id = id; + ), + TP_printk("id=%u", + __entry->id + ) +); + + +TRACE_EVENT(kgsl_timeline_signal, + TP_PROTO( + u32 id, + u64 seqno + ), + TP_ARGS( + id, + seqno + ), + TP_STRUCT__entry( + __field(u32, id) + __field(u64, seqno) + ), + TP_fast_assign( + __entry->id = id; + __entry->seqno = seqno; + ), + TP_printk("id=%u seqno=%llu", + __entry->id, __entry->seqno + ) +); + +TRACE_EVENT(kgsl_timeline_fence_alloc, + TP_PROTO( + u32 timeline, + u64 seqno + ), + TP_ARGS( + timeline, + seqno + ), + TP_STRUCT__entry( + __field(u32, timeline) + __field(u64, seqno) + ), + TP_fast_assign( + __entry->timeline = timeline; + __entry->seqno = seqno; + ), + TP_printk("timeline=%u seqno=%llu", + __entry->timeline, __entry->seqno + ) +); + +TRACE_EVENT(kgsl_timeline_fence_release, + TP_PROTO( + u32 timeline, + u64 seqno + ), + TP_ARGS( + timeline, + seqno + ), + TP_STRUCT__entry( + __field(u32, timeline) + __field(u64, seqno) + ), + TP_fast_assign( + __entry->timeline = timeline; + __entry->seqno = seqno; + ), + TP_printk("timeline=%u seqno=%llu", + __entry->timeline, __entry->seqno + ) +); + + +TRACE_EVENT(kgsl_timeline_wait, + TP_PROTO( + u32 flags, + s64 tv_sec, + s64 tv_nsec + ), + TP_ARGS( + flags, + tv_sec, + tv_nsec + ), + TP_STRUCT__entry( + __field(u32, flags) + __field(s64, tv_sec) + __field(s64, tv_nsec) + ), + TP_fast_assign( + __entry->flags = flags; + __entry->tv_sec = tv_sec; + __entry->tv_nsec = tv_nsec; + ), + TP_printk("flags=0x%x tv_sec=%llu tv_nsec=%llu", + __entry->flags, __entry->tv_sec, __entry->tv_nsec + + ) +); + +TRACE_EVENT(kgsl_aux_command, + TP_PROTO(u32 drawctxt_id, u32 numcmds, u32 flags, u32 timestamp + ), + TP_ARGS(drawctxt_id, numcmds, flags, timestamp + ), + TP_STRUCT__entry( + __field(u32, drawctxt_id) + __field(u32, numcmds) + __field(u32, flags) + __field(u32, timestamp) + ), + TP_fast_assign( + __entry->drawctxt_id = drawctxt_id; + __entry->numcmds = numcmds; + __entry->flags = flags; + __entry->timestamp = timestamp; + ), + TP_printk("context=%u numcmds=%u flags=0x%x timestamp=%u", + __entry->drawctxt_id, __entry->numcmds, __entry->flags, + __entry->timestamp + ) +); + +TRACE_EVENT(kgsl_drawobj_timeline, + TP_PROTO(u32 timeline, u64 seqno + ), + TP_ARGS(timeline, seqno + ), + TP_STRUCT__entry( + __field(u32, timeline) + __field(u64, seqno) + ), + TP_fast_assign( + __entry->timeline = timeline; + __entry->seqno = seqno; + ), + TP_printk("timeline=%u seqno=%llu", + __entry->timeline, __entry->seqno + ) +); + #endif /* _KGSL_TRACE_H */ /* This part must be outside protection */ diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index e1e11d6e8a7d..1bba50925201 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -797,6 +797,27 @@ config HID_PLANTRONICS Say M here if you may ever plug in a Plantronics USB audio device. +config HID_PLAYSTATION + tristate "PlayStation HID Driver" + depends on HID + select CRC32 + select POWER_SUPPLY + ---help--- + Provides support for Sony PS5 controllers including support for + its special functionalities e.g. touchpad, lights and motion + sensors. + +config PLAYSTATION_FF + bool "PlayStation force feedback support" + depends on HID_PLAYSTATION + select INPUT_FF_MEMLESS + help + Provides the force feedback support for Playstation game + controllers. + + Say Y here if you would like to enable force feedback support for + PlayStation game controllers. + config HID_PRIMAX tristate "Primax non-fully HID-compliant devices" depends on HID diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 3fb5a1b73a87..0ea94cbe0de1 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -87,6 +87,7 @@ hid-picolcd-$(CONFIG_HID_PICOLCD_CIR) += hid-picolcd_cir.o hid-picolcd-$(CONFIG_DEBUG_FS) += hid-picolcd_debugfs.o obj-$(CONFIG_HID_PLANTRONICS) += hid-plantronics.o +obj-$(CONFIG_HID_PLAYSTATION) += hid-playstation.o obj-$(CONFIG_HID_PRIMAX) += hid-primax.o obj-$(CONFIG_HID_REDRAGON) += hid-redragon.o obj-$(CONFIG_HID_RETRODE) += hid-retrode.o diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 05122167d9d8..bde5cef3290f 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -93,7 +93,7 @@ EXPORT_SYMBOL_GPL(hid_register_report); * Register a new field for this report. */ -static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values) +static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages) { struct hid_field *field; @@ -104,7 +104,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned field = kzalloc((sizeof(struct hid_field) + usages * sizeof(struct hid_usage) + - values * sizeof(unsigned)), GFP_KERNEL); + usages * sizeof(unsigned)), GFP_KERNEL); if (!field) return NULL; @@ -300,7 +300,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign usages = max_t(unsigned, parser->local.usage_index, parser->global.report_count); - field = hid_register_field(report, usages, parser->global.report_count); + field = hid_register_field(report, usages); if (!field) return 0; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 5791b01296e0..9badaf1e69d7 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -442,6 +442,10 @@ #define USB_VENDOR_ID_FRUCTEL 0x25B6 #define USB_DEVICE_ID_GAMETEL_MT_MODE 0x0002 +#define USB_VENDOR_ID_GAMEVICE 0x27F8 +#define USB_DEVICE_ID_GAMEVICE_GV186 0x0BBE +#define USB_DEVICE_ID_GAMEVICE_KISHI 0x0BBF + #define USB_VENDOR_ID_GAMERON 0x0810 #define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR 0x0001 #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002 @@ -1031,6 +1035,7 @@ #define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4 #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0 +#define USB_DEVICE_ID_SONY_PS5_CONTROLLER 0x0ce6 #define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5 #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f #define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002 diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c new file mode 100644 index 000000000000..4532d6b57174 --- /dev/null +++ b/drivers/hid/hid-playstation.c @@ -0,0 +1,1370 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * HID driver for Sony DualSense(TM) controller. + * + * Copyright (c) 2020 Sony Interactive Entertainment + */ +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "hid-ids.h" + +/* List of connected playstation devices. */ +static DEFINE_MUTEX(ps_devices_lock); +static LIST_HEAD(ps_devices_list); + +static DEFINE_IDA(ps_player_id_allocator); + +#define HID_PLAYSTATION_VERSION_PATCH 0x8000 + +/* Base class for playstation devices. */ +struct ps_device { + struct list_head list; + struct hid_device *hdev; + spinlock_t lock; + + uint32_t player_id; + + struct power_supply_desc battery_desc; + struct power_supply *battery; + uint8_t battery_capacity; + int battery_status; + uint8_t mac_address[6]; /* Note: stored in little endian order. */ + int32_t hw_version; + int32_t fw_version; + int (*parse_report)(struct ps_device *dev, struct hid_report *report, u8 *data, int size); +}; + +/* Seed values for DualShock4 / DualSense CRC32 for different report types. */ +#define PS_INPUT_CRC32_SEED 0xA1 +#define PS_OUTPUT_CRC32_SEED 0xA2 +#define PS_FEATURE_CRC32_SEED 0xA3 + +struct ps_calibration_data { + int abs_code; + short bias; + int sens_numer; + int sens_denom; +}; + +#define DS_INPUT_REPORT_USB 0x01 +#define DS_INPUT_REPORT_USB_SIZE 64 + +#define DS_FEATURE_REPORT_CALIBRATION 0x05 +#define DS_FEATURE_REPORT_CALIBRATION_SIZE 41 + +#define DS_INPUT_REPORT_BT 0x31 +#define DS_INPUT_REPORT_BT_SIZE 78 +#define DS_OUTPUT_REPORT_USB 0x02 +#define DS_OUTPUT_REPORT_USB_SIZE 63 +#define DS_OUTPUT_REPORT_BT 0x31 +#define DS_OUTPUT_REPORT_BT_SIZE 78 + +#define DS_FEATURE_REPORT_PAIRING_INFO 0x09 +#define DS_FEATURE_REPORT_PAIRING_INFO_SIZE 20 + +#define DS_FEATURE_REPORT_FIRMWARE_INFO 0x20 +#define DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE 64 + +/* Button masks for DualSense input report. */ +#define DS_BUTTONS0_HAT_SWITCH GENMASK(3, 0) +#define DS_BUTTONS0_SQUARE BIT(4) +#define DS_BUTTONS0_CROSS BIT(5) +#define DS_BUTTONS0_CIRCLE BIT(6) +#define DS_BUTTONS0_TRIANGLE BIT(7) +#define DS_BUTTONS1_L1 BIT(0) +#define DS_BUTTONS1_R1 BIT(1) +#define DS_BUTTONS1_L2 BIT(2) +#define DS_BUTTONS1_R2 BIT(3) +#define DS_BUTTONS1_CREATE BIT(4) +#define DS_BUTTONS1_OPTIONS BIT(5) +#define DS_BUTTONS1_L3 BIT(6) +#define DS_BUTTONS1_R3 BIT(7) +#define DS_BUTTONS2_PS_HOME BIT(0) +#define DS_BUTTONS2_TOUCHPAD BIT(1) +#define DS_BUTTONS2_MIC_MUTE BIT(2) + +/* Status field of DualSense input report. */ +#define DS_STATUS_BATTERY_CAPACITY GENMASK(3, 0) +#define DS_STATUS_CHARGING GENMASK(7, 4) +#define DS_STATUS_CHARGING_SHIFT 4 + +/* ++ * Status of a DualSense touch point contact. ++ * Contact IDs, with highest bit set are 'inactive' ++ * and any associated data is then invalid. ++ */ +#define DS_TOUCH_POINT_INACTIVE BIT(7) + +/* Magic value required in tag field of Bluetooth output report. */ +#define DS_OUTPUT_TAG 0x10 +/* Flags for DualSense output report. */ +#define DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION BIT(0) +#define DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT BIT(1) +#define DS_OUTPUT_VALID_FLAG1_MIC_MUTE_LED_CONTROL_ENABLE BIT(0) +#define DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE BIT(1) +#define DS_OUTPUT_VALID_FLAG1_LIGHTBAR_CONTROL_ENABLE BIT(2) +#define DS_OUTPUT_VALID_FLAG1_RELEASE_LEDS BIT(3) +#define DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE BIT(4) +#define DS_OUTPUT_VALID_FLAG2_LIGHTBAR_SETUP_CONTROL_ENABLE BIT(1) +#define DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE BIT(4) +#define DS_OUTPUT_LIGHTBAR_SETUP_LIGHT_OUT BIT(1) + +#define DS_ACC_RES_PER_G 8192 +#define DS_ACC_RANGE (4*DS_ACC_RES_PER_G) +#define DS_GYRO_RES_PER_DEG_S 1024 +#define DS_GYRO_RANGE (2048*DS_GYRO_RES_PER_DEG_S) + +/* DualSense hardware limits */ +#define DS_TOUCHPAD_WIDTH 1920 +#define DS_TOUCHPAD_HEIGHT 1080 + +struct dualsense { + struct ps_device base; + struct input_dev *gamepad; + struct input_dev *sensors; + struct input_dev *touchpad; + + /* Calibration data for accelerometer and gyroscope. */ + struct ps_calibration_data accel_calib_data[3]; + struct ps_calibration_data gyro_calib_data[3]; + /* Timestamp for sensor data */ + bool sensor_timestamp_initialized; + uint32_t prev_sensor_timestamp; + uint32_t sensor_timestamp_us; + + /* Compatible rumble state */ + bool update_rumble; + uint8_t motor_left; + uint8_t motor_right; + + /* RGB lightbar */ + bool update_lightbar; + uint8_t lightbar_red; + uint8_t lightbar_green; + uint8_t lightbar_blue; + + /* Microphone */ + bool update_mic_mute; + bool mic_muted; + bool last_btn_mic_state; + + /* Player leds */ + bool update_player_leds; + uint8_t player_leds_state; + struct led_classdev player_leds[5]; + + struct work_struct output_worker; + void *output_report_dmabuf; + uint8_t output_seq; /* Sequence number for output report. */ +}; + +struct dualsense_touch_point { + uint8_t contact; + uint8_t x_lo; + uint8_t x_hi:4, y_lo:4; + uint8_t y_hi; +} __packed; + +/* Common data between DualSense BT/USB main output report. */ +struct dualsense_output_report_common { + uint8_t valid_flag0; + uint8_t valid_flag1; + + /* For DualShock 4 compatibility mode. */ + uint8_t motor_right; + uint8_t motor_left; + + /* Audio controls */ + uint8_t reserved[4]; + uint8_t mute_button_led; + + uint8_t power_save_control; + uint8_t reserved2[28]; + + /* LEDs and lightbar */ + uint8_t valid_flag2; + uint8_t reserved3[2]; + uint8_t lightbar_setup; + uint8_t led_brightness; + uint8_t player_leds; + uint8_t lightbar_red; + uint8_t lightbar_green; + uint8_t lightbar_blue; +} __packed; + +struct dualsense_output_report_bt { + uint8_t report_id; /* 0x31 */ + uint8_t seq_tag; + uint8_t tag; + struct dualsense_output_report_common common; + uint8_t reserved[24]; + __le32 crc32; +} __packed; + +struct dualsense_output_report_usb { + uint8_t report_id; /* 0x02 */ + struct dualsense_output_report_common common; + uint8_t reserved[15]; +} __packed; + +/* + * The DualSense has a main output report used to control most features. + * It is largely the same between Bluetooth and USB except for different + * headers and CRC. This structure hide the differences between the two to + * simplify sending output reports. + */ +struct dualsense_output_report { + uint8_t *data; /* Start of data */ + uint8_t len; /* Size of output report */ + + /* Points to Bluetooth data payload + * in case for a Bluetooth report else NULL. + */ + struct dualsense_output_report_bt *bt; + /* Points to USB data payload in case for a USB report else NULL. */ + struct dualsense_output_report_usb *usb; + /* Points to common section of report, so past any headers. */ + struct dualsense_output_report_common *common; +}; + +/* Main DualSense input report excluding any BT/USB specific headers. */ +struct dualsense_input_report { + uint8_t x, y; + uint8_t rx, ry; + uint8_t z, rz; + uint8_t seq_number; + uint8_t buttons[4]; + uint8_t reserved[4]; + + /* Motion sensors */ + __le16 gyro[3]; /* x, y, z */ + __le16 accel[3]; /* x, y, z */ + __le32 sensor_timestamp; + uint8_t reserved2; + + /* Touchpad */ + struct dualsense_touch_point points[2]; + + uint8_t reserved3[12]; + uint8_t status; + uint8_t reserved4[10]; +} __packed; + +/* + * Common gamepad buttons across DualShock 3 / 4 and DualSense. + * Note: for device with a touchpad, touchpad button is not included + * as it will be part of the touchpad device. + */ +static const int ps_gamepad_buttons[] = { + BTN_WEST, /* Square */ + BTN_NORTH, /* Triangle */ + BTN_EAST, /* Circle */ + BTN_SOUTH, /* Cross */ + BTN_TL, /* L1 */ + BTN_TR, /* R1 */ + BTN_TL2, /* L2 */ + BTN_TR2, /* R2 */ + BTN_SELECT, /* Create (PS5) / Share (PS4) */ + BTN_START, /* Option */ + BTN_THUMBL, /* L3 */ + BTN_THUMBR, /* R3 */ + BTN_MODE, /* PS Home */ +}; + +static const struct {int x; int y; } ps_gamepad_hat_mapping[] = { + {0, -1}, {1, -1}, {1, 0}, {1, 1}, {0, 1}, {-1, 1}, {-1, 0}, {-1, -1}, + {0, 0}, +}; + +/* + * Add a new ps_device to ps_devices if it doesn't exist. + * Return error on duplicate device, which can happen if the same + * device is connected using both Bluetooth and USB. + */ +static int ps_devices_list_add(struct ps_device *dev) +{ + struct ps_device *entry; + + mutex_lock(&ps_devices_lock); + list_for_each_entry(entry, &ps_devices_list, list) { + if (!memcmp(entry->mac_address, dev->mac_address, sizeof(dev->mac_address))) { + hid_err(dev->hdev, "Duplicate device found for MAC address %pMR.\n", + dev->mac_address); + mutex_unlock(&ps_devices_lock); + return -EEXIST; + } + } + + list_add_tail(&dev->list, &ps_devices_list); + mutex_unlock(&ps_devices_lock); + return 0; +} + +static int ps_devices_list_remove(struct ps_device *dev) +{ + mutex_lock(&ps_devices_lock); + list_del(&dev->list); + mutex_unlock(&ps_devices_lock); + return 0; +} + +static int ps_device_set_player_id(struct ps_device *dev) +{ + int ret = ida_alloc(&ps_player_id_allocator, GFP_KERNEL); + + if (ret < 0) + return ret; + + dev->player_id = ret; + return 0; +} + +static void ps_device_release_player_id(struct ps_device *dev) +{ + ida_free(&ps_player_id_allocator, dev->player_id); + + dev->player_id = U32_MAX; +} + +static struct input_dev *ps_allocate_input_dev(struct hid_device *hdev, const char *name_suffix) +{ + struct input_dev *input_dev; + + input_dev = devm_input_allocate_device(&hdev->dev); + if (!input_dev) + return ERR_PTR(-ENOMEM); + + input_dev->id.bustype = hdev->bus; + input_dev->id.vendor = hdev->vendor; + input_dev->id.product = hdev->product; + input_dev->id.version = hdev->version; + input_dev->uniq = hdev->uniq; + + if (name_suffix) { + input_dev->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s", hdev->name, + name_suffix); + if (!input_dev->name) + return ERR_PTR(-ENOMEM); + } else { + input_dev->name = hdev->name; + } + + input_set_drvdata(input_dev, hdev); + + return input_dev; +} + +/* Compute crc32 of HID data and compare against expected CRC. */ +static bool ps_check_crc32(uint8_t seed, uint8_t *data, size_t len, uint32_t report_crc) +{ + uint32_t crc; + + crc = crc32_le(0xFFFFFFFF, &seed, 1); + crc = ~crc32_le(crc, data, len); + + return crc == report_crc; +} + +static enum power_supply_property ps_power_supply_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_SCOPE, +}; + +static int ps_battery_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct ps_device *dev = power_supply_get_drvdata(psy); + uint8_t battery_capacity; + int battery_status; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&dev->lock, flags); + battery_capacity = dev->battery_capacity; + battery_status = dev->battery_status; + spin_unlock_irqrestore(&dev->lock, flags); + + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + val->intval = battery_status; + break; + case POWER_SUPPLY_PROP_PRESENT: + val->intval = 1; + break; + case POWER_SUPPLY_PROP_CAPACITY: + val->intval = battery_capacity; + break; + case POWER_SUPPLY_PROP_SCOPE: + val->intval = POWER_SUPPLY_SCOPE_DEVICE; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int ps_device_register_battery(struct ps_device *dev) +{ + struct power_supply *battery; + struct power_supply_config battery_cfg = { .drv_data = dev }; + int ret; + + dev->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY; + dev->battery_desc.properties = ps_power_supply_props; + dev->battery_desc.num_properties = ARRAY_SIZE(ps_power_supply_props); + dev->battery_desc.get_property = ps_battery_get_property; + dev->battery_desc.name = devm_kasprintf(&dev->hdev->dev, GFP_KERNEL, + "ps-controller-battery-%pMR", dev->mac_address); + if (!dev->battery_desc.name) + return -ENOMEM; + + battery = devm_power_supply_register(&dev->hdev->dev, &dev->battery_desc, &battery_cfg); + if (IS_ERR(battery)) { + ret = PTR_ERR(battery); + hid_err(dev->hdev, "Unable to register battery device: %d\n", ret); + return ret; + } + dev->battery = battery; + + ret = power_supply_powers(dev->battery, &dev->hdev->dev); + if (ret) { + hid_err(dev->hdev, "Unable to activate battery device: %d\n", ret); + return ret; + } + + return 0; +} + +static struct input_dev *ps_gamepad_create(struct hid_device *hdev, + int (*play_effect)(struct input_dev *, void *, struct ff_effect *)) +{ + struct input_dev *gamepad; + unsigned int i; + int ret; + + gamepad = ps_allocate_input_dev(hdev, NULL); + if (IS_ERR(gamepad)) + return ERR_CAST(gamepad); + + input_set_abs_params(gamepad, ABS_X, 0, 255, 0, 0); + input_set_abs_params(gamepad, ABS_Y, 0, 255, 0, 0); + input_set_abs_params(gamepad, ABS_Z, 0, 255, 0, 0); + input_set_abs_params(gamepad, ABS_RX, 0, 255, 0, 0); + input_set_abs_params(gamepad, ABS_RY, 0, 255, 0, 0); + input_set_abs_params(gamepad, ABS_RZ, 0, 255, 0, 0); + + input_set_abs_params(gamepad, ABS_HAT0X, -1, 1, 0, 0); + input_set_abs_params(gamepad, ABS_HAT0Y, -1, 1, 0, 0); + + for (i = 0; i < ARRAY_SIZE(ps_gamepad_buttons); i++) + input_set_capability(gamepad, EV_KEY, ps_gamepad_buttons[i]); + +#if IS_ENABLED(CONFIG_PLAYSTATION_FF) + if (play_effect) { + input_set_capability(gamepad, EV_FF, FF_RUMBLE); + input_ff_create_memless(gamepad, NULL, play_effect); + } +#endif + + ret = input_register_device(gamepad); + if (ret) + return ERR_PTR(ret); + + return gamepad; +} + +static int ps_get_report(struct hid_device *hdev, uint8_t report_id, + uint8_t *buf, size_t size) +{ + int ret; + + ret = hid_hw_raw_request(hdev, report_id, buf, size, HID_FEATURE_REPORT, + HID_REQ_GET_REPORT); + if (ret < 0) { + hid_err(hdev, "Failed to retrieve: reportID %d: %d\n", report_id, ret); + return ret; + } + + if (ret != size) { + hid_err(hdev, "Invalid byte count, expected %zu got %d\n", size, ret); + return -EINVAL; + } + + if (buf[0] != report_id) { + hid_err(hdev, "Invalid reportID: expected %d got %d\n", report_id, buf[0]); + return -EINVAL; + } + + if (hdev->bus == BUS_BLUETOOTH) { + /* Last 4 bytes contains crc32. */ + uint8_t crc_offset = size - 4; + uint32_t report_crc = get_unaligned_le32(&buf[crc_offset]); + + if (!ps_check_crc32(PS_FEATURE_CRC32_SEED, buf, crc_offset, report_crc)) { + hid_err(hdev, "CRC check failed for reportID=%d\n", report_id); + return -EILSEQ; + } + } + + return 0; +} + +static struct input_dev *ps_sensors_create(struct hid_device *hdev, + int accel_range, int accel_res, int gyro_range, int gyro_res) +{ + struct input_dev *sensors; + int ret; + + sensors = ps_allocate_input_dev(hdev, "Motion Sensors"); + if (IS_ERR(sensors)) + return ERR_CAST(sensors); + + __set_bit(INPUT_PROP_ACCELEROMETER, sensors->propbit); + __set_bit(EV_MSC, sensors->evbit); + __set_bit(MSC_TIMESTAMP, sensors->mscbit); + + /* Accelerometer */ + input_set_abs_params(sensors, ABS_X, -accel_range, accel_range, 16, 0); + input_set_abs_params(sensors, ABS_Y, -accel_range, accel_range, 16, 0); + input_set_abs_params(sensors, ABS_Z, -accel_range, accel_range, 16, 0); + input_abs_set_res(sensors, ABS_X, accel_res); + input_abs_set_res(sensors, ABS_Y, accel_res); + input_abs_set_res(sensors, ABS_Z, accel_res); + + /* Gyroscope */ + input_set_abs_params(sensors, ABS_RX, -gyro_range, gyro_range, 16, 0); + input_set_abs_params(sensors, ABS_RY, -gyro_range, gyro_range, 16, 0); + input_set_abs_params(sensors, ABS_RZ, -gyro_range, gyro_range, 16, 0); + input_abs_set_res(sensors, ABS_RX, gyro_res); + input_abs_set_res(sensors, ABS_RY, gyro_res); + input_abs_set_res(sensors, ABS_RZ, gyro_res); + + ret = input_register_device(sensors); + if (ret) + return ERR_PTR(ret); + + return sensors; +} + +static struct input_dev *ps_touchpad_create(struct hid_device *hdev, int width, int height, + unsigned int num_contacts) +{ + struct input_dev *touchpad; + int ret; + + touchpad = ps_allocate_input_dev(hdev, "Touchpad"); + if (IS_ERR(touchpad)) + return ERR_CAST(touchpad); + + /* Map button underneath touchpad to BTN_LEFT. */ + input_set_capability(touchpad, EV_KEY, BTN_LEFT); + __set_bit(INPUT_PROP_BUTTONPAD, touchpad->propbit); + + input_set_abs_params(touchpad, ABS_MT_POSITION_X, 0, width - 1, 0, 0); + input_set_abs_params(touchpad, ABS_MT_POSITION_Y, 0, height - 1, 0, 0); + + ret = input_mt_init_slots(touchpad, num_contacts, INPUT_MT_POINTER); + if (ret) + return ERR_PTR(ret); + + ret = input_register_device(touchpad); + if (ret) + return ERR_PTR(ret); + + return touchpad; +} + +static ssize_t firmware_version_show(struct device *dev, + struct device_attribute + *attr, char *buf) +{ + struct hid_device *hdev = to_hid_device(dev); + struct ps_device *ps_dev = hid_get_drvdata(hdev); + + return scnprintf(buf, PAGE_SIZE, "0x%08x\n", ps_dev->fw_version); +} + +static DEVICE_ATTR_RO(firmware_version); + +static ssize_t hardware_version_show(struct device *dev, + struct device_attribute + *attr, char *buf) +{ + struct hid_device *hdev = to_hid_device(dev); + struct ps_device *ps_dev = hid_get_drvdata(hdev); + + return scnprintf(buf, PAGE_SIZE, "0x%08x\n", ps_dev->hw_version); +} + +static DEVICE_ATTR_RO(hardware_version); + +static struct attribute *ps_device_attributes[] = { + &dev_attr_firmware_version.attr, + &dev_attr_hardware_version.attr, + NULL +}; + +static const struct attribute_group ps_device_attribute_group = { + .attrs = ps_device_attributes, +}; + +static int dualsense_get_calibration_data(struct dualsense *ds) +{ + short gyro_pitch_bias, gyro_pitch_plus, gyro_pitch_minus; + short gyro_yaw_bias, gyro_yaw_plus, gyro_yaw_minus; + short gyro_roll_bias, gyro_roll_plus, gyro_roll_minus; + short gyro_speed_plus, gyro_speed_minus; + short acc_x_plus, acc_x_minus; + short acc_y_plus, acc_y_minus; + short acc_z_plus, acc_z_minus; + int speed_2x; + int range_2g; + int ret = 0; + uint8_t *buf; + + buf = kzalloc(DS_FEATURE_REPORT_CALIBRATION_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_CALIBRATION, buf, + DS_FEATURE_REPORT_CALIBRATION_SIZE); + if (ret) { + hid_err(ds->base.hdev, "Failed: DualSense cal info: %d\n", ret); + goto err_free; + } + + gyro_pitch_bias = get_unaligned_le16(&buf[1]); + gyro_yaw_bias = get_unaligned_le16(&buf[3]); + gyro_roll_bias = get_unaligned_le16(&buf[5]); + gyro_pitch_plus = get_unaligned_le16(&buf[7]); + gyro_pitch_minus = get_unaligned_le16(&buf[9]); + gyro_yaw_plus = get_unaligned_le16(&buf[11]); + gyro_yaw_minus = get_unaligned_le16(&buf[13]); + gyro_roll_plus = get_unaligned_le16(&buf[15]); + gyro_roll_minus = get_unaligned_le16(&buf[17]); + gyro_speed_plus = get_unaligned_le16(&buf[19]); + gyro_speed_minus = get_unaligned_le16(&buf[21]); + acc_x_plus = get_unaligned_le16(&buf[23]); + acc_x_minus = get_unaligned_le16(&buf[25]); + acc_y_plus = get_unaligned_le16(&buf[27]); + acc_y_minus = get_unaligned_le16(&buf[29]); + acc_z_plus = get_unaligned_le16(&buf[31]); + acc_z_minus = get_unaligned_le16(&buf[33]); + + /* + * Set gyroscope calibration and normalization parameters. + * Data values will be normalized to 1/DS_GYRO_RES_PER_DEG_S degree/s. + */ + speed_2x = (gyro_speed_plus + gyro_speed_minus); + ds->gyro_calib_data[0].abs_code = ABS_RX; + ds->gyro_calib_data[0].bias = gyro_pitch_bias; + ds->gyro_calib_data[0].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S; + ds->gyro_calib_data[0].sens_denom = gyro_pitch_plus - gyro_pitch_minus; + + ds->gyro_calib_data[1].abs_code = ABS_RY; + ds->gyro_calib_data[1].bias = gyro_yaw_bias; + ds->gyro_calib_data[1].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S; + ds->gyro_calib_data[1].sens_denom = gyro_yaw_plus - gyro_yaw_minus; + + ds->gyro_calib_data[2].abs_code = ABS_RZ; + ds->gyro_calib_data[2].bias = gyro_roll_bias; + ds->gyro_calib_data[2].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S; + ds->gyro_calib_data[2].sens_denom = gyro_roll_plus - gyro_roll_minus; + + /* + * Set accelerometer calibration and normalization parameters. + * Data values will be normalized to 1/DS_ACC_RES_PER_G g. + */ + range_2g = acc_x_plus - acc_x_minus; + ds->accel_calib_data[0].abs_code = ABS_X; + ds->accel_calib_data[0].bias = acc_x_plus - range_2g / 2; + ds->accel_calib_data[0].sens_numer = 2*DS_ACC_RES_PER_G; + ds->accel_calib_data[0].sens_denom = range_2g; + + range_2g = acc_y_plus - acc_y_minus; + ds->accel_calib_data[1].abs_code = ABS_Y; + ds->accel_calib_data[1].bias = acc_y_plus - range_2g / 2; + ds->accel_calib_data[1].sens_numer = 2*DS_ACC_RES_PER_G; + ds->accel_calib_data[1].sens_denom = range_2g; + + range_2g = acc_z_plus - acc_z_minus; + ds->accel_calib_data[2].abs_code = ABS_Z; + ds->accel_calib_data[2].bias = acc_z_plus - range_2g / 2; + ds->accel_calib_data[2].sens_numer = 2*DS_ACC_RES_PER_G; + ds->accel_calib_data[2].sens_denom = range_2g; + +err_free: + kfree(buf); + return ret; +} + +static int dualsense_get_firmware_info(struct dualsense *ds) +{ + uint8_t *buf; + int ret; + + buf = kzalloc(DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_FIRMWARE_INFO, buf, + DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE); + if (ret) { + hid_err(ds->base.hdev, "Failed to retrieve DualSense firmware info: %d\n", ret); + goto err_free; + } + + ds->base.hw_version = get_unaligned_le32(&buf[24]); + ds->base.fw_version = get_unaligned_le32(&buf[28]); + +err_free: + kfree(buf); + return ret; +} + +static int dualsense_get_mac_address(struct dualsense *ds) +{ + uint8_t *buf; + int ret = 0; + + buf = kzalloc(DS_FEATURE_REPORT_PAIRING_INFO_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_PAIRING_INFO, buf, + DS_FEATURE_REPORT_PAIRING_INFO_SIZE); + if (ret) { + hid_err(ds->base.hdev, "Failed to retrieve DualSense pair: %d\n", ret); + goto err_free; + } + + memcpy(ds->base.mac_address, &buf[1], sizeof(ds->base.mac_address)); + +err_free: + kfree(buf); + return ret; +} + +static void dualsense_init_output_report(struct dualsense *ds, + struct dualsense_output_report *rp, void *buf) +{ + struct hid_device *hdev = ds->base.hdev; + + if (hdev->bus == BUS_BLUETOOTH) { + struct dualsense_output_report_bt *bt = buf; + + memset(bt, 0, sizeof(*bt)); + bt->report_id = DS_OUTPUT_REPORT_BT; + bt->tag = DS_OUTPUT_TAG; /* Tag to be set. It is unclear. */ + + /* + * Highest 4-bit is a sequence number, which needs to be + * increased every report. Lowest 4-bit is tag and can be + * zero for now. + */ + bt->seq_tag = (ds->output_seq << 4) | 0x0; + if (++ds->output_seq == 16) + ds->output_seq = 0; + + rp->data = buf; + rp->len = sizeof(*bt); + rp->bt = bt; + rp->usb = NULL; + rp->common = &bt->common; + } else { /* USB */ + struct dualsense_output_report_usb *usb = buf; + + memset(usb, 0, sizeof(*usb)); + usb->report_id = DS_OUTPUT_REPORT_USB; + + rp->data = buf; + rp->len = sizeof(*usb); + rp->bt = NULL; + rp->usb = usb; + rp->common = &usb->common; + } +} + +/* + * Helper function to send DualSense output reports. Applies a CRC + * at the end of a report for Bluetooth reports. + */ +static void dualsense_send_output_report(struct dualsense *ds, + struct dualsense_output_report *report) +{ + struct hid_device *hdev = ds->base.hdev; + + /* Bluetooth packets need to be signed + * with a CRC in the last 4 bytes. + */ + if (report->bt) { + uint32_t crc; + uint8_t seed = PS_OUTPUT_CRC32_SEED; + + crc = crc32_le(0xFFFFFFFF, &seed, 1); + crc = ~crc32_le(crc, report->data, report->len - 4); + + report->bt->crc32 = cpu_to_le32(crc); + } + + hid_hw_output_report(hdev, report->data, report->len); +} + +static void dualsense_output_worker(struct work_struct *work) +{ + struct dualsense *ds = container_of(work, struct dualsense, + output_worker); + struct dualsense_output_report report; + struct dualsense_output_report_common *common; + unsigned long flags; + + dualsense_init_output_report(ds, &report, ds->output_report_dmabuf); + common = report.common; + + spin_lock_irqsave(&ds->base.lock, flags); + + if (ds->update_rumble) { + /* Select classic rumble style haptics and enable it. */ + common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT; + common->valid_flag0 |= + DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION; + common->motor_left = ds->motor_left; + common->motor_right = ds->motor_right; + ds->update_rumble = false; + } + + if (ds->update_lightbar) { + common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_LIGHTBAR_CONTROL_ENABLE; + common->lightbar_red = ds->lightbar_red; + common->lightbar_green = ds->lightbar_green; + common->lightbar_blue = ds->lightbar_blue; + + ds->update_lightbar = false; + } + + if (ds->update_player_leds) { + common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE; + common->player_leds = ds->player_leds_state; + + ds->update_player_leds = false; + } + + if (ds->update_mic_mute) { + common->valid_flag1 |= + DS_OUTPUT_VALID_FLAG1_MIC_MUTE_LED_CONTROL_ENABLE; + common->mute_button_led = ds->mic_muted; + + if (ds->mic_muted) { + /* Disable microphone */ + common->valid_flag1 |= + DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE; + common->power_save_control |= + DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE; + } else { + /* Enable microphone */ + common->valid_flag1 |= + DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE; + common->power_save_control &= + ~DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE; + } + + ds->update_mic_mute = false; + } + + spin_unlock_irqrestore(&ds->base.lock, flags); + + dualsense_send_output_report(ds, &report); +} + +static int dualsense_parse_report(struct ps_device *ps_dev, + struct hid_report *report, u8 *data, int size) +{ + struct hid_device *hdev = ps_dev->hdev; + struct dualsense *ds = container_of(ps_dev, struct dualsense, base); + struct dualsense_input_report *ds_report; + uint8_t battery_data, battery_capacity, charging_status, value; + int battery_status; + unsigned long flags; + bool btn_mic_state; + uint32_t sensor_timestamp; + int i; + + /* + * DualSense in USB uses the full HID report for reportID 1, but + * Bluetooth uses a minimal HID report for reportID 1 and reports + * the full report using reportID 49. + */ + if (hdev->bus == BUS_USB && report->id == DS_INPUT_REPORT_USB && + size == DS_INPUT_REPORT_USB_SIZE) { + ds_report = (struct dualsense_input_report *)&data[1]; + } else if (hdev->bus == BUS_BLUETOOTH && + report->id == DS_INPUT_REPORT_BT && + size == DS_INPUT_REPORT_BT_SIZE) { + /* Last 4 bytes of input report contain crc32 */ + uint32_t report_crc = get_unaligned_le32(&data[size - 4]); + + if (!ps_check_crc32(PS_INPUT_CRC32_SEED, data, size - 4, + report_crc)) { + hid_err(hdev, "DualSense input CRC's check failed\n"); + return -EILSEQ; + } + + ds_report = (struct dualsense_input_report *)&data[2]; + } else { + hid_err(hdev, "Unhandled reportID=%d\n", report->id); + return -1; + } + + input_report_abs(ds->gamepad, ABS_X, ds_report->x); + input_report_abs(ds->gamepad, ABS_Y, ds_report->y); + input_report_abs(ds->gamepad, ABS_RX, ds_report->rx); + input_report_abs(ds->gamepad, ABS_RY, ds_report->ry); + input_report_abs(ds->gamepad, ABS_Z, ds_report->z); + input_report_abs(ds->gamepad, ABS_RZ, ds_report->rz); + + value = ds_report->buttons[0] & DS_BUTTONS0_HAT_SWITCH; + if (value >= ARRAY_SIZE(ps_gamepad_hat_mapping)) + value = 8; /* center */ + input_report_abs(ds->gamepad, ABS_HAT0X, ps_gamepad_hat_mapping[value].x); + input_report_abs(ds->gamepad, ABS_HAT0Y, ps_gamepad_hat_mapping[value].y); + + input_report_key(ds->gamepad, BTN_WEST, ds_report->buttons[0] & DS_BUTTONS0_SQUARE); + input_report_key(ds->gamepad, BTN_SOUTH, ds_report->buttons[0] & DS_BUTTONS0_CROSS); + input_report_key(ds->gamepad, BTN_EAST, ds_report->buttons[0] & DS_BUTTONS0_CIRCLE); + input_report_key(ds->gamepad, BTN_NORTH, ds_report->buttons[0] & DS_BUTTONS0_TRIANGLE); + input_report_key(ds->gamepad, BTN_TL, ds_report->buttons[1] & DS_BUTTONS1_L1); + input_report_key(ds->gamepad, BTN_TR, ds_report->buttons[1] & DS_BUTTONS1_R1); + input_report_key(ds->gamepad, BTN_TL2, ds_report->buttons[1] & DS_BUTTONS1_L2); + input_report_key(ds->gamepad, BTN_TR2, ds_report->buttons[1] & DS_BUTTONS1_R2); + input_report_key(ds->gamepad, BTN_SELECT, ds_report->buttons[1] & DS_BUTTONS1_CREATE); + input_report_key(ds->gamepad, BTN_START, ds_report->buttons[1] & DS_BUTTONS1_OPTIONS); + input_report_key(ds->gamepad, BTN_THUMBL, ds_report->buttons[1] & DS_BUTTONS1_L3); + input_report_key(ds->gamepad, BTN_THUMBR, ds_report->buttons[1] & DS_BUTTONS1_R3); + input_report_key(ds->gamepad, BTN_MODE, ds_report->buttons[2] & DS_BUTTONS2_PS_HOME); + input_sync(ds->gamepad); + + /* + * The DualSense has an internal microphone, which can bemuted + * through a mute button on the device. The driver is expected + * to read the button state and program the device + * to mute/unmute audio at the hardware level. + */ + btn_mic_state = !!(ds_report->buttons[2] & DS_BUTTONS2_MIC_MUTE); + if (btn_mic_state && !ds->last_btn_mic_state) { + spin_lock_irqsave(&ps_dev->lock, flags); + ds->update_mic_mute = true; + ds->mic_muted = !ds->mic_muted; /* toggle */ + spin_unlock_irqrestore(&ps_dev->lock, flags); + + /* Schedule updating of microphone state at hardware level. */ + schedule_work(&ds->output_worker); + } + ds->last_btn_mic_state = btn_mic_state; + + /* Parse and calibrate gyroscope data. */ + for (i = 0; i < ARRAY_SIZE(ds_report->gyro); i++) { + int raw_data = (short)le16_to_cpu(ds_report->gyro[i]); + int calib_data = mult_frac(ds->gyro_calib_data[i].sens_numer, + raw_data - ds->gyro_calib_data[i].bias, + ds->gyro_calib_data[i].sens_denom); + + input_report_abs(ds->sensors, ds->gyro_calib_data[i].abs_code, + calib_data); + } + + /* Parse and calibrate accelerometer data. */ + for (i = 0; i < ARRAY_SIZE(ds_report->accel); i++) { + int raw_data = (short)le16_to_cpu(ds_report->accel[i]); + int calib_data = mult_frac(ds->accel_calib_data[i].sens_numer, + raw_data - ds->accel_calib_data[i].bias, + ds->accel_calib_data[i].sens_denom); + + input_report_abs(ds->sensors, ds->accel_calib_data[i].abs_code, + calib_data); + } + + /* Convert timestamp (in 0.33us unit) to timestamp_us */ + sensor_timestamp = le32_to_cpu(ds_report->sensor_timestamp); + if (!ds->sensor_timestamp_initialized) { + ds->sensor_timestamp_us = + DIV_ROUND_CLOSEST(sensor_timestamp, 3); + ds->sensor_timestamp_initialized = true; + } else { + uint32_t delta; + + if (ds->prev_sensor_timestamp > sensor_timestamp) + delta = (U32_MAX - ds->prev_sensor_timestamp + + sensor_timestamp + 1); + else + delta = sensor_timestamp - ds->prev_sensor_timestamp; + ds->sensor_timestamp_us += DIV_ROUND_CLOSEST(delta, 3); + } + ds->prev_sensor_timestamp = sensor_timestamp; + input_event(ds->sensors, EV_MSC, MSC_TIMESTAMP, + ds->sensor_timestamp_us); + input_sync(ds->sensors); + + for (i = 0; i < ARRAY_SIZE(ds_report->points); i++) { + struct dualsense_touch_point *point = &ds_report->points[i]; + bool active = (point->contact & + DS_TOUCH_POINT_INACTIVE) ? false : true; + + input_mt_slot(ds->touchpad, i); + input_mt_report_slot_state(ds->touchpad, MT_TOOL_FINGER, active); + + if (active) { + int x = (point->x_hi << 8) | point->x_lo; + int y = (point->y_hi << 4) | point->y_lo; + + input_report_abs(ds->touchpad, ABS_MT_POSITION_X, x); + input_report_abs(ds->touchpad, ABS_MT_POSITION_Y, y); + } + } + input_mt_sync_frame(ds->touchpad); + input_report_key(ds->touchpad, BTN_LEFT, ds_report->buttons[2] & DS_BUTTONS2_TOUCHPAD); + input_sync(ds->touchpad); + + battery_data = ds_report->status & DS_STATUS_BATTERY_CAPACITY; + charging_status = (ds_report->status & DS_STATUS_CHARGING) >> DS_STATUS_CHARGING_SHIFT; + + switch (charging_status) { + case 0x0: + /* + * Each unit of battery data corresponds to 10% + * 0 = 0-9%, 1 = 10-19%, .. and 10 = 100% + */ + battery_capacity = min(battery_data * 10 + 5, 100); + battery_status = POWER_SUPPLY_STATUS_DISCHARGING; + break; + case 0x1: + battery_capacity = min(battery_data * 10 + 5, 100); + battery_status = POWER_SUPPLY_STATUS_CHARGING; + break; + case 0x2: + battery_capacity = 100; + battery_status = POWER_SUPPLY_STATUS_FULL; + break; + case 0xa: /* voltage or temperature out of range */ + case 0xb: /* temperature error */ + battery_capacity = 0; + battery_status = POWER_SUPPLY_STATUS_NOT_CHARGING; + break; + case 0xf: /* charging error */ + default: + battery_capacity = 0; + battery_status = POWER_SUPPLY_STATUS_UNKNOWN; + } + + spin_lock_irqsave(&ps_dev->lock, flags); + ps_dev->battery_capacity = battery_capacity; + ps_dev->battery_status = battery_status; + spin_unlock_irqrestore(&ps_dev->lock, flags); + + return 0; +} + +static int dualsense_play_effect(struct input_dev *dev, + void *data, struct ff_effect *effect) +{ + struct hid_device *hdev = input_get_drvdata(dev); + struct dualsense *ds = hid_get_drvdata(hdev); + unsigned long flags; + + if (effect->type != FF_RUMBLE) + return 0; + + spin_lock_irqsave(&ds->base.lock, flags); + ds->update_rumble = true; + ds->motor_left = effect->u.rumble.strong_magnitude / 256; + ds->motor_right = effect->u.rumble.weak_magnitude / 256; + spin_unlock_irqrestore(&ds->base.lock, flags); + + schedule_work(&ds->output_worker); + return 0; +} + +static int dualsense_reset_leds(struct dualsense *ds) +{ + struct dualsense_output_report report; + uint8_t *buf; + + buf = kzalloc(sizeof(struct dualsense_output_report_bt), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + dualsense_init_output_report(ds, &report, buf); + /* + * On Bluetooth the DualSense outputs an animation on the lightbar + * during startup and maintains a color afterwards. We need to explicitly + * reconfigure the lightbar before we can do any programming later on. + * In USB the lightbar is not on by default, but redoing the setup there + * doesn't hurt. + */ + report.common->valid_flag2 = DS_OUTPUT_VALID_FLAG2_LIGHTBAR_SETUP_CONTROL_ENABLE; + report.common->lightbar_setup = DS_OUTPUT_LIGHTBAR_SETUP_LIGHT_OUT; /* Fade light out. */ + dualsense_send_output_report(ds, &report); + + kfree(buf); + return 0; +} + +static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t green, uint8_t blue) +{ + ds->update_lightbar = true; + ds->lightbar_red = red; + ds->lightbar_green = green; + ds->lightbar_blue = blue; + + schedule_work(&ds->output_worker); +} + +static void dualsense_set_player_leds(struct dualsense *ds) +{ + /* + * The DualSense controller has a row of 5 LEDs used for player ids. + * Behavior on the PlayStation 5 console is to center the player id + * across the LEDs, so e.g. player 1 would be "--x--" with x being 'on'. + * Follow a similar mapping here. + */ + static const int player_ids[5] = { + BIT(2), + BIT(3) | BIT(1), + BIT(4) | BIT(2) | BIT(0), + BIT(4) | BIT(3) | BIT(1) | BIT(0), + BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0) + }; + + uint8_t player_id = ds->base.player_id % ARRAY_SIZE(player_ids); + + ds->update_player_leds = true; + ds->player_leds_state = player_ids[player_id]; + schedule_work(&ds->output_worker); +} + +static struct ps_device *dualsense_create(struct hid_device *hdev) +{ + struct dualsense *ds; + struct ps_device *ps_dev; + uint8_t max_output_report_size; + int ret; + + ds = devm_kzalloc(&hdev->dev, sizeof(*ds), GFP_KERNEL); + if (!ds) + return ERR_PTR(-ENOMEM); + + /* + * Patch version to allow userspace to distinguish between + * hid-generic vs hid-playstation axis and button mapping. + */ + hdev->version |= HID_PLAYSTATION_VERSION_PATCH; + + ps_dev = &ds->base; + ps_dev->hdev = hdev; + spin_lock_init(&ps_dev->lock); + ps_dev->battery_capacity = 100; /* initial value until parse_report. */ + ps_dev->battery_status = POWER_SUPPLY_STATUS_UNKNOWN; + ps_dev->parse_report = dualsense_parse_report; + INIT_WORK(&ds->output_worker, dualsense_output_worker); + hid_set_drvdata(hdev, ds); + + max_output_report_size = sizeof(struct dualsense_output_report_bt); + ds->output_report_dmabuf = devm_kzalloc(&hdev->dev, + max_output_report_size, GFP_KERNEL); + if (!ds->output_report_dmabuf) + return ERR_PTR(-ENOMEM); + + ret = dualsense_get_mac_address(ds); + if (ret) { + hid_err(hdev, "Failed to get MAC address from DualSense\n"); + return ERR_PTR(ret); + } + snprintf(hdev->uniq, sizeof(hdev->uniq), "%pMR", ds->base.mac_address); + + ret = dualsense_get_firmware_info(ds); + if (ret) { + hid_err(hdev, "Failed to get firmware info from DualSense\n"); + return ERR_PTR(ret); + } + + ret = ps_devices_list_add(ps_dev); + if (ret) + return ERR_PTR(ret); + + ret = dualsense_get_calibration_data(ds); + if (ret) { + hid_err(hdev, "Failed to get calibration data from DualSense\n"); + goto err; + } + + ds->gamepad = ps_gamepad_create(hdev, dualsense_play_effect); + if (IS_ERR(ds->gamepad)) { + ret = PTR_ERR(ds->gamepad); + goto err; + } + + ds->sensors = ps_sensors_create(hdev, DS_ACC_RANGE, DS_ACC_RES_PER_G, + DS_GYRO_RANGE, DS_GYRO_RES_PER_DEG_S); + if (IS_ERR(ds->sensors)) { + ret = PTR_ERR(ds->sensors); + goto err; + } + + ds->touchpad = ps_touchpad_create(hdev, DS_TOUCHPAD_WIDTH, + DS_TOUCHPAD_HEIGHT, 2); + if (IS_ERR(ds->touchpad)) { + ret = PTR_ERR(ds->touchpad); + goto err; + } + + ret = ps_device_register_battery(ps_dev); + if (ret) + goto err; + + /* + * The hardware may have control over the LEDs (e.g. in Bluetooth on startup). + * Reset the LEDs (lightbar, mute, player leds), so we can control them + * from software. + */ + ret = dualsense_reset_leds(ds); + if (ret) + goto err; + + dualsense_set_lightbar(ds, 0, 0, 128); /* blue */ + + ret = ps_device_set_player_id(ps_dev); + if (ret) { + hid_err(hdev, "Failed to assign player id for DualSense: %d\n", ret); + goto err; + } + + /* Set player LEDs to our player id. */ + dualsense_set_player_leds(ds); + + /* + * Reporting hardware and firmware is important as there are frequent updates, which + * can change behavior. + */ + hid_info(hdev, "Registered DualSense controller hw_version=0x%08x fw_version=0x%08x\n", + ds->base.hw_version, ds->base.fw_version); + + return &ds->base; + +err: + ps_devices_list_remove(ps_dev); + return ERR_PTR(ret); +} + +static int ps_raw_event(struct hid_device *hdev, struct hid_report *report, + u8 *data, int size) +{ + struct ps_device *dev = hid_get_drvdata(hdev); + + if (dev && dev->parse_report) + return dev->parse_report(dev, report, data, size); + + return 0; +} + +static int ps_probe(struct hid_device *hdev, const struct hid_device_id *id) +{ + struct ps_device *dev; + int ret; + + ret = hid_parse(hdev); + if (ret) { + hid_err(hdev, "Parse failed\n"); + return ret; + } + + ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); + if (ret) { + hid_err(hdev, "Failed to start HID device\n"); + return ret; + } + + ret = hid_hw_open(hdev); + if (ret) { + hid_err(hdev, "Failed to open HID device\n"); + goto err_stop; + } + + if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER) { + dev = dualsense_create(hdev); + if (IS_ERR(dev)) { + hid_err(hdev, "Failed to create dualsense.\n"); + ret = PTR_ERR(dev); + goto err_close; + } + } + + ret = devm_device_add_group(&hdev->dev, &ps_device_attribute_group); + if (ret) { + hid_err(hdev, "Failed to register sysfs nodes.\n"); + goto err_close; + } + + return ret; + +err_close: + hid_hw_close(hdev); +err_stop: + hid_hw_stop(hdev); + return ret; +} + +static void ps_remove(struct hid_device *hdev) +{ + struct ps_device *dev = hid_get_drvdata(hdev); + + ps_devices_list_remove(dev); + ps_device_release_player_id(dev); + + hid_hw_close(hdev); + hid_hw_stop(hdev); +} + +static const struct hid_device_id ps_devices[] = { + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) }, + { } +}; +MODULE_DEVICE_TABLE(hid, ps_devices); + +static struct hid_driver ps_driver = { + .name = "playstation", + .id_table = ps_devices, + .probe = ps_probe, + .remove = ps_remove, + .raw_event = ps_raw_event, +}; + +static int __init ps_init(void) +{ + return hid_register_driver(&ps_driver); +} + +static void __exit ps_exit(void) +{ + hid_unregister_driver(&ps_driver); + ida_destroy(&ps_player_id_allocator); +} + +module_init(ps_init); +module_exit(ps_exit); + +MODULE_AUTHOR("Sony Interactive Entertainment"); +MODULE_DESCRIPTION("HID Driver for PlayStation peripherals."); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 8ae357bf5a09..6fc57d9f758e 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -86,6 +86,8 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD), HID_QUIRK_MULTI_INPUT }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_GAMEVICE, USB_DEVICE_ID_GAMEVICE_GV186), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_GAMEVICE, USB_DEVICE_ID_GAMEVICE_KISHI), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.c b/drivers/hwtracing/coresight/coresight-byte-cntr.c index 48756a8607aa..936bafce3457 100644 --- a/drivers/hwtracing/coresight/coresight-byte-cntr.c +++ b/drivers/hwtracing/coresight/coresight-byte-cntr.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. * * Description: CoreSight Trace Memory Controller driver */ @@ -181,7 +181,8 @@ static int tmc_etr_byte_cntr_release(struct inode *in, struct file *fp) mutex_lock(&byte_cntr_data->byte_cntr_lock); byte_cntr_data->read_active = false; - coresight_csr_set_byte_cntr(byte_cntr_data->csr, 0); + if (byte_cntr_data->enable) + coresight_csr_set_byte_cntr(byte_cntr_data->csr, 0); mutex_unlock(&byte_cntr_data->byte_cntr_lock); return 0; @@ -546,15 +547,19 @@ void usb_bypass_notifier(void *priv, unsigned int event, if (!drvdata) return; - if (tmcdrvdata->out_mode != TMC_ETR_OUT_MODE_USB - || tmcdrvdata->mode == CS_MODE_DISABLED) { - dev_err(&tmcdrvdata->csdev->dev, - "%s: ETR is not USB mode, or ETR is disabled.\n", __func__); + if (tmcdrvdata->out_mode != TMC_ETR_OUT_MODE_USB) { + dev_err_ratelimited(&tmcdrvdata->csdev->dev, + "%s: ETR is not USB mode\n", __func__); return; } switch (event) { case USB_QDSS_CONNECT: + if (tmcdrvdata->mode == CS_MODE_DISABLED) { + dev_err_ratelimited(&tmcdrvdata->csdev->dev, + "%s: ETR is disabled.\n", __func__); + return; + } ret = usb_bypass_start(drvdata); if (ret < 0) return; @@ -564,6 +569,11 @@ void usb_bypass_notifier(void *priv, unsigned int event, break; case USB_QDSS_DISCONNECT: + if (tmcdrvdata->mode == CS_MODE_DISABLED) { + dev_err_ratelimited(&tmcdrvdata->csdev->dev, + "%s: ETR is disabled.\n", __func__); + return; + } usb_bypass_stop(drvdata); break; diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 8bc9b50b4510..a756453deaa4 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -2014,15 +2014,17 @@ static int _tmc_disable_etr_sink(struct coresight_device *csdev, /* Complain if we (somehow) got out of sync */ WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED); if (drvdata->mode != CS_MODE_DISABLED) { + drvdata->mode = CS_MODE_DISABLED; if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) { if (!drvdata->byte_cntr->sw_usb) { __tmc_etr_disable_to_bam(drvdata); spin_unlock_irqrestore(&drvdata->spinlock, flags); tmc_etr_bam_disable(drvdata); + mutex_unlock(&drvdata->mem_lock); usb_qdss_close(drvdata->usbch); + mutex_lock(&drvdata->mem_lock); drvdata->usbch = NULL; - drvdata->mode = CS_MODE_DISABLED; goto out; } else { spin_unlock_irqrestore(&drvdata->spinlock, @@ -2034,12 +2036,10 @@ static int _tmc_disable_etr_sink(struct coresight_device *csdev, } else { tmc_etr_disable_hw(drvdata); } - drvdata->mode = CS_MODE_DISABLED; } /* Dissociate from monitored process. */ drvdata->pid = -1; - drvdata->mode = CS_MODE_DISABLED; /* Reset perf specific data */ drvdata->perf_buf = NULL; @@ -2099,23 +2099,25 @@ int tmc_etr_switch_mode(struct tmc_drvdata *drvdata, const char *out_mode) return 0; } + mutex_unlock(&drvdata->mem_lock); coresight_disable_all_source_link(); + mutex_lock(&drvdata->mem_lock); _tmc_disable_etr_sink(drvdata->csdev, true); old_mode = drvdata->out_mode; drvdata->out_mode = new_mode; if (tmc_enable_etr_sink_sysfs(drvdata->csdev)) { drvdata->out_mode = old_mode; tmc_enable_etr_sink_sysfs(drvdata->csdev); + mutex_unlock(&drvdata->mem_lock); coresight_enable_all_source_link(); dev_err(drvdata->dev, "Switch to %s failed. Fall back to %s.\n", str_tmc_etr_out_mode[new_mode], str_tmc_etr_out_mode[old_mode]); - mutex_unlock(&drvdata->mem_lock); return -EINVAL; } - coresight_enable_all_source_link(); mutex_unlock(&drvdata->mem_lock); + coresight_enable_all_source_link(); return 0; } diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index de9577449593..1571a3f978a5 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2012,2017-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012,2017-2019,2021, The Linux Foundation. All rights reserved. * * Description: CoreSight Trace Memory Controller driver */ @@ -23,10 +23,13 @@ #include #include #include +#include #include "coresight-priv.h" #include "coresight-tmc.h" +#define TMC_REG_DUMP_MAGIC 0x42445953 + void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata) { /* Ensure formatter, unformatter and hardware fifo are empty */ @@ -56,10 +59,84 @@ void tmc_flush_and_stop(struct tmc_drvdata *drvdata) tmc_wait_for_tmcready(drvdata); } +static void __tmc_reg_dump(struct tmc_drvdata *drvdata) +{ + struct dump_vaddr_entry *dump_entry; + struct msm_dump_data *dump_data; + uint32_t *reg_buf; + + if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { + dump_entry = get_msm_dump_ptr(MSM_DUMP_DATA_TMC_ETR_REG); + dev_dbg(drvdata->dev, "%s: TMC ETR dump entry ptr is %pK\n", + __func__, dump_entry); + } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETB || + drvdata->config_type == TMC_CONFIG_TYPE_ETF) { + dump_entry = get_msm_dump_ptr(MSM_DUMP_DATA_TMC_ETF_REG); + dev_dbg(drvdata->dev, "%s: TMC ETF dump entry ptr is %pK\n", + __func__, dump_entry); + } else + return; + + if (dump_entry == NULL) + return; + + reg_buf = (uint32_t *)(dump_entry->dump_vaddr); + dump_data = dump_entry->dump_data_vaddr; + + if (reg_buf == NULL || dump_data == NULL) + return; + + dev_dbg(drvdata->dev, "%s: TMC dump reg ptr is %pK, dump_data is %pK\n", + __func__, reg_buf, dump_data); + + reg_buf[1] = readl_relaxed(drvdata->base + TMC_RSZ); + reg_buf[3] = readl_relaxed(drvdata->base + TMC_STS); + reg_buf[5] = readl_relaxed(drvdata->base + TMC_RRP); + reg_buf[6] = readl_relaxed(drvdata->base + TMC_RWP); + reg_buf[7] = readl_relaxed(drvdata->base + TMC_TRG); + reg_buf[8] = readl_relaxed(drvdata->base + TMC_CTL); + reg_buf[10] = readl_relaxed(drvdata->base + TMC_MODE); + reg_buf[11] = readl_relaxed(drvdata->base + TMC_LBUFLEVEL); + reg_buf[12] = readl_relaxed(drvdata->base + TMC_CBUFLEVEL); + reg_buf[13] = readl_relaxed(drvdata->base + TMC_BUFWM); + if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { + reg_buf[14] = readl_relaxed(drvdata->base + TMC_RRPHI); + reg_buf[15] = readl_relaxed(drvdata->base + TMC_RWPHI); + reg_buf[68] = readl_relaxed(drvdata->base + TMC_AXICTL); + reg_buf[70] = readl_relaxed(drvdata->base + TMC_DBALO); + reg_buf[71] = readl_relaxed(drvdata->base + TMC_DBAHI); + } + reg_buf[192] = readl_relaxed(drvdata->base + TMC_FFSR); + reg_buf[193] = readl_relaxed(drvdata->base + TMC_FFCR); + reg_buf[194] = readl_relaxed(drvdata->base + TMC_PSCR); + reg_buf[1000] = readl_relaxed(drvdata->base + CORESIGHT_CLAIMSET); + reg_buf[1001] = readl_relaxed(drvdata->base + CORESIGHT_CLAIMCLR); + reg_buf[1005] = readl_relaxed(drvdata->base + CORESIGHT_LSR); + reg_buf[1006] = readl_relaxed(drvdata->base + CORESIGHT_AUTHSTATUS); + reg_buf[1010] = readl_relaxed(drvdata->base + CORESIGHT_DEVID); + reg_buf[1011] = readl_relaxed(drvdata->base + CORESIGHT_DEVTYPE); + reg_buf[1012] = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR4); + reg_buf[1013] = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR5); + reg_buf[1014] = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR6); + reg_buf[1015] = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR7); + reg_buf[1016] = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR0); + reg_buf[1017] = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR1); + reg_buf[1018] = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR2); + reg_buf[1019] = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR3); + reg_buf[1020] = readl_relaxed(drvdata->base + CORESIGHT_COMPIDR0); + reg_buf[1021] = readl_relaxed(drvdata->base + CORESIGHT_COMPIDR1); + reg_buf[1022] = readl_relaxed(drvdata->base + CORESIGHT_COMPIDR2); + reg_buf[1023] = readl_relaxed(drvdata->base + CORESIGHT_COMPIDR3); + + dump_data->magic = TMC_REG_DUMP_MAGIC; +} + void tmc_enable_hw(struct tmc_drvdata *drvdata) { drvdata->enable = true; writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL); + if (drvdata->force_reg_dump) + __tmc_reg_dump(drvdata); } void tmc_disable_hw(struct tmc_drvdata *drvdata) @@ -650,6 +727,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) return -EPROBE_DEFER; } } + if (of_property_read_bool(drvdata->dev->of_node, "qcom,force-reg-dump")) + drvdata->force_reg_dump = true; desc.pdata = pdata; desc.dev = dev; diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h index 1ca22dd77b50..9583a8bf8cad 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.h +++ b/drivers/hwtracing/coresight/coresight-tmc.h @@ -268,6 +268,7 @@ struct tmc_drvdata { struct idr idr; struct mutex idr_mutex; struct etr_buf *perf_buf; + bool force_reg_dump; }; struct etr_buf_operations { diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index e7d4b78a451d..116d4914a799 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. */ #include @@ -165,7 +165,7 @@ struct geni_i2c_clk_fld { static struct geni_i2c_clk_fld geni_i2c_clk_map[] = { {KHz(100), 7, 10, 11, 26}, - {KHz(400), 2, 5, 12, 24}, + {KHz(400), 2, 7, 10, 24}, {KHz(1000), 1, 3, 9, 18}, }; diff --git a/drivers/i3c/master/i3c-master-qcom-geni.c b/drivers/i3c/master/i3c-master-qcom-geni.c index 3d9fc7858820..f882f534a40d 100644 --- a/drivers/i3c/master/i3c-master-qcom-geni.c +++ b/drivers/i3c/master/i3c-master-qcom-geni.c @@ -782,8 +782,8 @@ static int _i3c_geni_execute_command geni_setup_m_cmd(gi3c->se.base, xfer->m_cmd, xfer->m_param); GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, - "I3C cmd:0x%x param:0x%x READ len:%d, m_cmd: 0x%x\n", - xfer->m_cmd, xfer->m_param, len, + "Read_mode:%d cmd:0x%x param:0x%x len:%d m_cmd:0x%x\n", + xfer->mode, xfer->m_cmd, xfer->m_param, len, geni_read_reg(gi3c->se.base, SE_GENI_M_CMD0)); if (xfer->mode == SE_DMA) { @@ -794,6 +794,10 @@ static int _i3c_geni_execute_command GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, "DMA Err:%d, FIFO mode enabled\n", ret); xfer->mode = FIFO_MODE; + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "DMA Read Err:%d,Enabling FIFO mode\n", + ret); + WARN_ON(1); geni_se_select_mode(gi3c->se.base, xfer->mode); } } @@ -802,8 +806,8 @@ static int _i3c_geni_execute_command geni_setup_m_cmd(gi3c->se.base, xfer->m_cmd, xfer->m_param); GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, - "I3C cmd:0x%x param:0x%x WRITE len:%d, m_cmd: 0x%x\n", - xfer->m_cmd, xfer->m_param, len, + "Write_mode:%d cmd:0x%x param:0x%x len:%d m_cmd:0x%x\n", + xfer->mode, xfer->m_cmd, xfer->m_param, len, geni_read_reg(gi3c->se.base, SE_GENI_M_CMD0)); if (xfer->mode == SE_DMA) { @@ -814,6 +818,10 @@ static int _i3c_geni_execute_command GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, "DMA Err:%d, FIFO mode enabled\n", ret); xfer->mode = FIFO_MODE; + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "DMA Write Err:%d,Enabling FIFO mode\n", + ret); + WARN_ON(1); geni_se_select_mode(gi3c->se.base, xfer->mode); } } @@ -1139,7 +1147,7 @@ static int geni_i3c_master_priv_xfers for (i = 0; i < nxfers; i++) { bool stall = (i < (nxfers - 1)); - struct i3c_xfer_params xfer = { FIFO_MODE }; + struct i3c_xfer_params xfer = { SE_DMA }; xfer.m_param = (stall ? STOP_STRETCH : 0); xfer.m_param |= ((dev->info.dyn_addr & I3C_ADDR_MASK) diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c index 3680e0d47412..d27246cb3cb6 100644 --- a/drivers/iio/adc/qcom-spmi-vadc.c +++ b/drivers/iio/adc/qcom-spmi-vadc.c @@ -1,14 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * Copyright (c) 2012-2016,2021, The Linux Foundation. All rights reserved. */ #include @@ -560,19 +552,19 @@ static const struct vadc_channels vadc_chans[] = { VADC_CHAN_NO_SCALE(SPARE1_03, 1) VADC_CHAN_NO_SCALE(USB_ID_MV, 1) VADC_CHAN_VOLT(VCOIN, 1, SCALE_DEFAULT) - VADC_CHAN_NO_SCALE(VBAT_SNS, 1) + VADC_CHAN_VOLT(VBAT_SNS, 1, SCALE_DEFAULT) VADC_CHAN_VOLT(VSYS, 1, SCALE_DEFAULT) VADC_CHAN_TEMP(DIE_TEMP, 0, SCALE_PMIC_THERM) VADC_CHAN_VOLT(REF_625MV, 0, SCALE_DEFAULT) VADC_CHAN_VOLT(REF_1250MV, 0, SCALE_DEFAULT) VADC_CHAN_NO_SCALE(CHG_TEMP, 0) - VADC_CHAN_NO_SCALE(SPARE1, 0) + VADC_CHAN_VOLT(SPARE1, 0, SCALE_DEFAULT) VADC_CHAN_TEMP(SPARE2, 0, SCALE_PMI_CHG_TEMP) VADC_CHAN_VOLT(GND_REF, 0, SCALE_DEFAULT) VADC_CHAN_VOLT(VDD_VADC, 0, SCALE_DEFAULT) VADC_CHAN_NO_SCALE(P_MUX1_1_1, 0) - VADC_CHAN_NO_SCALE(P_MUX2_1_1, 0) + VADC_CHAN_TEMP(P_MUX2_1_1, 0, SCALE_THERM_100K_PULLUP) VADC_CHAN_NO_SCALE(P_MUX3_1_1, 0) VADC_CHAN_NO_SCALE(P_MUX4_1_1, 0) VADC_CHAN_NO_SCALE(P_MUX5_1_1, 0) @@ -606,18 +598,18 @@ static const struct vadc_channels vadc_chans[] = { VADC_CHAN_NO_SCALE(P_MUX16_1_3, 1) VADC_CHAN_NO_SCALE(LR_MUX1_BAT_THERM, 0) - VADC_CHAN_NO_SCALE(LR_MUX2_BAT_ID, 0) - VADC_CHAN_NO_SCALE(LR_MUX3_XO_THERM, 0) + VADC_CHAN_VOLT(LR_MUX2_BAT_ID, 0, SCALE_DEFAULT) + VADC_CHAN_TEMP(LR_MUX3_XO_THERM, 0, SCALE_THERM_100K_PULLUP) VADC_CHAN_NO_SCALE(LR_MUX4_AMUX_THM1, 0) VADC_CHAN_NO_SCALE(LR_MUX5_AMUX_THM2, 0) VADC_CHAN_NO_SCALE(LR_MUX6_AMUX_THM3, 0) - VADC_CHAN_NO_SCALE(LR_MUX7_HW_ID, 0) + VADC_CHAN_TEMP(LR_MUX7_HW_ID, 0, SCALE_THERM_100K_PULLUP) VADC_CHAN_NO_SCALE(LR_MUX8_AMUX_THM4, 0) VADC_CHAN_NO_SCALE(LR_MUX9_AMUX_THM5, 0) VADC_CHAN_NO_SCALE(LR_MUX10_USB_ID, 0) VADC_CHAN_NO_SCALE(AMUX_PU1, 0) VADC_CHAN_NO_SCALE(AMUX_PU2, 0) - VADC_CHAN_NO_SCALE(LR_MUX3_BUF_XO_THERM, 0) + VADC_CHAN_TEMP(LR_MUX3_BUF_XO_THERM, 0, SCALE_THERM_100K_PULLUP) VADC_CHAN_NO_SCALE(LR_MUX1_PU1_BAT_THERM, 0) VADC_CHAN_NO_SCALE(LR_MUX2_PU1_BAT_ID, 0) @@ -735,6 +727,12 @@ static int vadc_get_dt_channel_data(struct device *dev, else prop->calibration = VADC_CALIB_ABSOLUTE; + prop->scale_fn_type = -EINVAL; + ret = of_property_read_u32(node, "qcom,scale-fn-type", &value); + + if (!ret && value < SCALE_HW_CALIB_MAX) + prop->scale_fn_type = value; + dev_dbg(dev, "%02x name %s\n", chan, name); return 0; @@ -747,6 +745,7 @@ static int vadc_get_dt_data(struct vadc_priv *vadc, struct device_node *node) struct vadc_channel_prop prop; struct device_node *child; unsigned int index = 0; + bool scale_fn_type_from_dt = false; int ret; vadc->nchannels = of_get_available_child_count(node); @@ -772,14 +771,24 @@ static int vadc_get_dt_data(struct vadc_priv *vadc, struct device_node *node) return ret; } - prop.scale_fn_type = vadc_chans[prop.channel].scale_fn_type; + if (prop.scale_fn_type == -EINVAL) { + prop.scale_fn_type = + vadc_chans[prop.channel].scale_fn_type; + } else { + scale_fn_type_from_dt = true; + } + vadc->chan_props[index] = prop; vadc_chan = &vadc_chans[prop.channel]; iio_chan->channel = prop.channel; iio_chan->datasheet_name = vadc_chan->datasheet_name; - iio_chan->info_mask_separate = vadc_chan->info_mask; + if (!scale_fn_type_from_dt) + iio_chan->info_mask_separate = vadc_chan->info_mask; + else + iio_chan->info_mask_separate = + vadc_chan->info_mask | BIT(IIO_CHAN_INFO_PROCESSED); iio_chan->type = vadc_chan->type; iio_chan->indexed = 1; iio_chan->address = index++; diff --git a/drivers/iio/adc/qcom-vadc-common.c b/drivers/iio/adc/qcom-vadc-common.c index 1b2b6f003169..8467d0fbddf4 100644 --- a/drivers/iio/adc/qcom-vadc-common.c +++ b/drivers/iio/adc/qcom-vadc-common.c @@ -759,6 +759,61 @@ static const struct vadc_map_pt adcmap7_100k[] = { { 2420, 130048 } }; +/* Voltage to temperature */ +static const struct vadc_map_pt adcmap_batt_therm_qrd_215[] = { + {1575, -200}, + {1549, -180}, + {1522, -160}, + {1493, -140}, + {1463, -120}, + {1431, -100}, + {1398, -80}, + {1364, -60}, + {1329, -40}, + {1294, -20}, + {1258, 0}, + {1222, 20}, + {1187, 40}, + {1151, 60}, + {1116, 80}, + {1082, 100}, + {1049, 120}, + {1016, 140}, + {985, 160}, + {955, 180}, + {926, 200}, + {899, 220}, + {873, 240}, + {849, 260}, + {825, 280}, + {804, 300}, + {783, 320}, + {764, 340}, + {746, 360}, + {729, 380}, + {714, 400}, + {699, 420}, + {686, 440}, + {673, 460}, + {662, 480}, + {651, 500}, + {641, 520}, + {632, 540}, + {623, 560}, + {615, 580}, + {608, 600}, + {601, 620}, + {595, 640}, + {589, 660}, + {583, 680}, + {578, 700}, + {574, 720}, + {569, 740}, + {565, 760}, + {562, 780}, + {558, 800} +}; + static int qcom_vadc_map_voltage_temp(const struct vadc_map_pt *pts, u32 tablesize, s32 input, s64 *output) { @@ -830,6 +885,10 @@ static int qcom_vadc_scale_volt(const struct vadc_linear_graph *calib_graph, voltage = voltage * prescale->den; result = div64_s64(voltage, prescale->num); + + if (!absolute) + result *= 1000; + *result_uv = result; return 0; @@ -860,6 +919,29 @@ static int qcom_vadc_scale_therm(const struct vadc_linear_graph *calib_graph, return 0; } +static int qcom_vadc_scale_therm_qrd_215( + const struct vadc_linear_graph *calib_graph, + const struct vadc_prescale_ratio *prescale, + bool absolute, u16 adc_code, + int *result_mdec) +{ + s64 voltage = 0, result = 0; + int ret; + + qcom_vadc_scale_calib(calib_graph, adc_code, absolute, &voltage); + + ret = qcom_vadc_map_voltage_temp(adcmap_batt_therm_qrd_215, + ARRAY_SIZE(adcmap_batt_therm_qrd_215), + voltage, &result); + if (ret) + return ret; + + *result_mdec = result; + + return 0; +} + + static int qcom_vadc_scale_die_temp(const struct vadc_linear_graph *calib_graph, const struct vadc_prescale_ratio *prescale, bool absolute, @@ -1307,6 +1389,10 @@ int qcom_vadc_scale(enum vadc_scale_fn_type scaletype, return qcom_vadc_scale_therm(calib_graph, prescale, absolute, adc_code, result); + case SCALE_BATT_THERM_QRD_215: + return qcom_vadc_scale_therm_qrd_215(calib_graph, prescale, + absolute, adc_code, + result); case SCALE_PMIC_THERM: return qcom_vadc_scale_die_temp(calib_graph, prescale, absolute, adc_code, diff --git a/drivers/iio/adc/qcom-vadc-common.h b/drivers/iio/adc/qcom-vadc-common.h index ccebabb5a4f2..e0ff98bf5f15 100644 --- a/drivers/iio/adc/qcom-vadc-common.h +++ b/drivers/iio/adc/qcom-vadc-common.h @@ -198,6 +198,7 @@ enum vadc_scale_fn_type { SCALE_HW_CALIB_PM2250_S3_DIE_TEMP, SCALE_HW_CALIB_THERM_100K_PU_PM7, SCALE_HW_CALIB_PMIC_THERM_PM7, + SCALE_BATT_THERM_QRD_215, SCALE_HW_CALIB_MAX, }; diff --git a/drivers/input/misc/qti-haptics.c b/drivers/input/misc/qti-haptics.c index 6b89a10f8dc9..dac72b3e9e46 100644 --- a/drivers/input/misc/qti-haptics.c +++ b/drivers/input/misc/qti-haptics.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */ #include @@ -1687,11 +1687,11 @@ static ssize_t brake_pattern_dbgfs_write(struct file *filep, { struct qti_hap_effect *effect = (struct qti_hap_effect *)filep->private_data; - char *kbuf, *token; + char *kbuf, *str, *token; int rc = 0, i = 0, j; u32 val; - kbuf = kmalloc(count + 1, GFP_KERNEL); + kbuf = kzalloc(count + 1, GFP_KERNEL); if (!kbuf) return -ENOMEM; @@ -1703,8 +1703,8 @@ static ssize_t brake_pattern_dbgfs_write(struct file *filep, kbuf[count] = '\0'; *ppos += count; - - while ((token = strsep(&kbuf, " ")) != NULL) { + str = kbuf; + while ((token = strsep(&str, " ")) != NULL) { rc = kstrtouint(token, 0, &val); if (rc < 0) { rc = -EINVAL; @@ -1770,11 +1770,11 @@ static ssize_t pattern_dbgfs_write(struct file *filep, { struct qti_hap_effect *effect = (struct qti_hap_effect *)filep->private_data; - char *kbuf, *token; + char *kbuf, *str, *token; int rc = 0, i = 0, j; u32 val; - kbuf = kmalloc(count + 1, GFP_KERNEL); + kbuf = kzalloc(count + 1, GFP_KERNEL); if (!kbuf) return -ENOMEM; @@ -1786,8 +1786,8 @@ static ssize_t pattern_dbgfs_write(struct file *filep, kbuf[count] = '\0'; *ppos += count; - - while ((token = strsep(&kbuf, " ")) != NULL) { + str = kbuf; + while ((token = strsep(&str, " ")) != NULL) { rc = kstrtouint(token, 0, &val); if (rc < 0) { rc = -EINVAL; diff --git a/drivers/input/touchscreen/nt36xxx/nt36xxx.c b/drivers/input/touchscreen/nt36xxx/nt36xxx.c index 0ade7d19d7d3..4c7f39ccee10 100644 --- a/drivers/input/touchscreen/nt36xxx/nt36xxx.c +++ b/drivers/input/touchscreen/nt36xxx/nt36xxx.c @@ -1121,8 +1121,8 @@ void nvt_stop_crc_reboot(void) * * return: * Executive outcomes. 0---NVT IC. -1---not NVT IC. - *******************************************************/ -static int8_t nvt_ts_check_chip_ver_trim(void) +*******************************************************/ +static int8_t nvt_ts_check_chip_ver_trim(uint32_t chip_ver_trim_addr) { uint8_t buf[8] = {0}; int32_t retry = 0; @@ -1142,9 +1142,9 @@ static int8_t nvt_ts_check_chip_ver_trim(void) CTP_I2C_WRITE(ts->client, I2C_HW_Address, buf, 2); msleep(10); - nvt_set_page(I2C_BLDR_Address, 0x1F64E); + nvt_set_page(I2C_BLDR_Address, chip_ver_trim_addr); - buf[0] = 0x4E; + buf[0] = chip_ver_trim_addr & 0xFF; buf[1] = 0x00; buf[2] = 0x00; buf[3] = 0x00; @@ -1292,11 +1292,15 @@ static int32_t nvt_ts_late_probe(struct i2c_client *client, } //---check chip version trim--- - ret = nvt_ts_check_chip_ver_trim(); + ret = nvt_ts_check_chip_ver_trim(CHIP_VER_TRIM_ADDR); if (ret) { - NVT_ERR("chip is not identified\n"); - ret = -EINVAL; - goto err_chipvertrim_failed; + NVT_LOG("try to check from old chip ver trim address\n"); + ret = nvt_ts_check_chip_ver_trim(CHIP_VER_TRIM_OLD_ADDR); + if (ret) { + NVT_ERR("chip is not identified\n"); + ret = -EINVAL; + goto err_chipvertrim_failed; + } } nvt_bootloader_reset(); diff --git a/drivers/input/touchscreen/nt36xxx/nt36xxx.h b/drivers/input/touchscreen/nt36xxx/nt36xxx.h index eef848bfe9dd..d3ae17038eaa 100644 --- a/drivers/input/touchscreen/nt36xxx/nt36xxx.h +++ b/drivers/input/touchscreen/nt36xxx/nt36xxx.h @@ -62,7 +62,7 @@ //---Touch info.--- #define TOUCH_DEFAULT_MAX_WIDTH 1080 -#define TOUCH_DEFAULT_MAX_HEIGHT 1920 +#define TOUCH_DEFAULT_MAX_HEIGHT 2408 #define TOUCH_MAX_FINGER_NUM 10 #define TOUCH_KEY_NUM 0 #if TOUCH_KEY_NUM > 0 @@ -82,7 +82,7 @@ extern const uint16_t touch_key_array[TOUCH_KEY_NUM]; #if WAKEUP_GESTURE extern const uint16_t gesture_key_array[]; #endif -#define BOOT_UPDATE_FIRMWARE 0 +#define BOOT_UPDATE_FIRMWARE 1 #define BOOT_UPDATE_FIRMWARE_NAME "novatek_ts_fw.bin" //---ESD Protect.--- diff --git a/drivers/input/touchscreen/nt36xxx/nt36xxx_fw_update.c b/drivers/input/touchscreen/nt36xxx/nt36xxx_fw_update.c index 52086dc58eae..24648eb75283 100644 --- a/drivers/input/touchscreen/nt36xxx/nt36xxx_fw_update.c +++ b/drivers/input/touchscreen/nt36xxx/nt36xxx_fw_update.c @@ -47,9 +47,15 @@ static int32_t nvt_get_fw_need_write_size(const struct firmware *fw_entry) for (i = total_sectors_to_check; i > 0; i--) { /* printk("current end flag address checked = 0x%X\n", i * FLASH_SECTOR_SIZE - NVT_FLASH_END_FLAG_LEN); */ /* check if there is end flag "NVT" at the end of this sector */ - if (memcmp(&fw_entry->data[i * FLASH_SECTOR_SIZE - NVT_FLASH_END_FLAG_LEN], "NVT", NVT_FLASH_END_FLAG_LEN) == 0) { + if ((memcmp((const char *)&fw_entry->data[i * + FLASH_SECTOR_SIZE - NVT_FLASH_END_FLAG_LEN], + "NVT", NVT_FLASH_END_FLAG_LEN) == 0) || + (memcmp((const char *)&fw_entry->data[i * + FLASH_SECTOR_SIZE - NVT_FLASH_END_FLAG_LEN], + "MOD", NVT_FLASH_END_FLAG_LEN) == 0)) { fw_need_write_size = i * FLASH_SECTOR_SIZE; - NVT_LOG("fw_need_write_size = %zu(0x%zx)\n", fw_need_write_size, fw_need_write_size); + NVT_LOG("fw_need_write_size = %zu(0x%zx)\n", + fw_need_write_size, fw_need_write_size); return 0; } } @@ -75,7 +81,7 @@ int32_t update_firmware_request(char *filename) NVT_LOG("filename is %s\n", filename); - ret = request_firmware_nowarn(&fw_entry, filename, &ts->client->dev); + ret = request_firmware(&fw_entry, filename, &ts->client->dev); if (ret) { NVT_ERR("firmware load failed, ret=%d\n", ret); return ret; @@ -966,10 +972,11 @@ int32_t nvt_check_flash_end_flag(void) } //buf[3:5] => NVT End Flag - strlcpy(nvt_end_flag, &buf[3], NVT_FLASH_END_FLAG_LEN); + strlcpy(nvt_end_flag, &buf[3], sizeof(nvt_end_flag)); NVT_LOG("nvt_end_flag=%s (%02X %02X %02X)\n", nvt_end_flag, buf[3], buf[4], buf[5]); - if (memcmp(nvt_end_flag, "NVT", NVT_FLASH_END_FLAG_LEN) == 0) { + if ((memcmp(nvt_end_flag, "NVT", NVT_FLASH_END_FLAG_LEN) == 0) || + (memcmp(nvt_end_flag, "MOD", NVT_FLASH_END_FLAG_LEN) == 0)) { return 0; } else { NVT_ERR("\"NVT\" end flag not found!\n"); @@ -994,6 +1001,11 @@ void Boot_Update_Firmware(struct work_struct *work) snprintf(firmware_name, sizeof(firmware_name), BOOT_UPDATE_FIRMWARE_NAME); + if (ts->nvt_pid == 0x5B0B) { + NVT_LOG("Skip Firmware Update\n"); + return; + } + // request bin file in "/etc/firmware" ret = update_firmware_request(firmware_name); if (ret) { diff --git a/drivers/input/touchscreen/nt36xxx/nt36xxx_mem_map.h b/drivers/input/touchscreen/nt36xxx/nt36xxx_mem_map.h index 94040a41c3d2..bdfb71aac99e 100644 --- a/drivers/input/touchscreen/nt36xxx/nt36xxx_mem_map.h +++ b/drivers/input/touchscreen/nt36xxx/nt36xxx_mem_map.h @@ -16,6 +16,8 @@ * more details. * */ +#define CHIP_VER_TRIM_ADDR 0x3F004 +#define CHIP_VER_TRIM_OLD_ADDR 0x1F64E struct nvt_ts_mem_map { uint32_t EVENT_BUF_ADDR; @@ -173,6 +175,14 @@ struct nvt_ts_trim_id_table { }; static const struct nvt_ts_trim_id_table trim_id_table[] = { + {.id = {0x20, 0xFF, 0xFF, 0x72, 0x66, 0x03}, .mask = {1, 0, 0, 1, 1, 1}, + .mmap = &NT36675_memory_map, .hwinfo = &NT36675_hw_info}, + {.id = {0x00, 0xFF, 0xFF, 0x80, 0x66, 0x03}, .mask = {1, 0, 0, 1, 1, 1}, + .mmap = &NT36675_memory_map, .hwinfo = &NT36675_hw_info}, + {.id = {0x0C, 0xFF, 0xFF, 0x25, 0x65, 0x03}, .mask = {1, 0, 0, 1, 1, 1}, + .mmap = &NT36672A_memory_map, .hwinfo = &NT36672A_hw_info}, + {.id = {0x0E, 0xFF, 0xFF, 0x72, 0x66, 0x03}, .mask = {1, 0, 0, 1, 1, 1}, + .mmap = &NT36675_memory_map, .hwinfo = &NT36675_hw_info}, {.id = {0x0C, 0xFF, 0xFF, 0x72, 0x66, 0x03}, .mask = {1, 0, 0, 1, 1, 1}, .mmap = &NT36675_memory_map, .hwinfo = &NT36675_hw_info}, {.id = {0xFF, 0xFF, 0xFF, 0x26, 0x65, 0x03}, .mask = {0, 0, 0, 1, 1, 1}, diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 61ae03a0a563..469649eb78e7 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -59,6 +59,7 @@ #include #include #include +#include #include @@ -266,6 +267,7 @@ struct arm_smmu_device { #define ARM_SMMU_OPT_STATIC_CB (1 << 6) #define ARM_SMMU_OPT_DISABLE_ATOS (1 << 7) #define ARM_SMMU_OPT_NO_DYNAMIC_ASID (1 << 8) +#define ARM_SMMU_OPT_HALT (1 << 9) u32 options; enum arm_smmu_arch_version version; enum arm_smmu_implementation model; @@ -307,6 +309,7 @@ struct arm_smmu_device { unsigned int num_impl_def_attach_registers; struct arm_smmu_power_resources *pwr; + struct notifier_block regulator_nb; spinlock_t atos_lock; @@ -451,6 +454,7 @@ static struct arm_smmu_option_prop arm_smmu_options[] = { { ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"}, { ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" }, { ARM_SMMU_OPT_NO_DYNAMIC_ASID, "qcom,no-dynamic-asid" }, + { ARM_SMMU_OPT_HALT, "qcom,enable-smmu-halt"}, { 0, NULL}, }; @@ -5161,6 +5165,71 @@ static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr) return 0; } +static int regulator_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + int ret = 0; + struct arm_smmu_device *smmu = container_of(nb, struct arm_smmu_device, + regulator_nb); + + if (event != REGULATOR_EVENT_PRE_DISABLE && + event != REGULATOR_EVENT_ENABLE) + return NOTIFY_OK; + + ret = arm_smmu_prepare_clocks(smmu->pwr); + if (ret) + goto out; + + ret = arm_smmu_power_on_atomic(smmu->pwr); + if (ret) + goto unprepare_clock; + + if (event == REGULATOR_EVENT_PRE_DISABLE) + qsmmuv2_halt(smmu); + else if (event == REGULATOR_EVENT_ENABLE) { + if (arm_smmu_restore_sec_cfg(smmu, 0)) + goto power_off; + qsmmuv2_resume(smmu); + } +power_off: + arm_smmu_power_off_atomic(smmu->pwr); +unprepare_clock: + arm_smmu_unprepare_clocks(smmu->pwr); +out: + return NOTIFY_OK; +} + +static int register_regulator_notifier(struct arm_smmu_device *smmu) +{ + struct device *dev = smmu->dev; + struct regulator_bulk_data *consumers; + int ret = 0, num_consumers; + struct arm_smmu_power_resources *pwr = smmu->pwr; + + if (!(smmu->options & ARM_SMMU_OPT_HALT)) + goto out; + + num_consumers = pwr->num_gdscs; + consumers = pwr->gdscs; + + if (!num_consumers) { + dev_info(dev, "no regulator info exist for %s\n", + dev_name(dev)); + goto out; + } + + smmu->regulator_nb.notifier_call = regulator_notifier; + /* registering the notifier against one gdsc is sufficient as + * we do enable/disable regulators in group. + */ + ret = regulator_register_notifier(consumers[0].consumer, + &smmu->regulator_nb); + if (ret) + dev_err(dev, "Regulator notifier request failed\n"); +out: + return ret; +} + static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr) { const char *cname; @@ -5778,6 +5847,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) if (!using_legacy_binding) arm_smmu_bus_init(); + err = register_regulator_notifier(smmu); + if (err) + goto out_power_off; + return 0; out_power_off: diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c index e17409c17a6d..39aa562e5363 100644 --- a/drivers/iommu/dma-mapping-fast.c +++ b/drivers/iommu/dma-mapping-fast.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */ #include @@ -460,7 +460,7 @@ static void fast_smmu_unmap_sg(struct device *dev, struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev); unsigned long flags; dma_addr_t start; - size_t len; + size_t len, offset; struct scatterlist *tmp; int i; @@ -472,12 +472,14 @@ static void fast_smmu_unmap_sg(struct device *dev, * contiguous IOVA allocation, so this is incredibly easy. */ start = sg_dma_address(sg); + offset = start & ~FAST_PAGE_MASK; for_each_sg(sg_next(sg), tmp, nelems - 1, i) { if (sg_dma_len(tmp) == 0) break; sg = tmp; } - len = sg_dma_address(sg) + sg_dma_len(sg) - start; + len = ALIGN(sg_dma_address(sg) + sg_dma_len(sg) - (start - offset), + FAST_PAGE_SIZE); av8l_fast_unmap_public(mapping->pgtbl_ops, start, len); diff --git a/drivers/iommu/io-pgtable-msm-secure.c b/drivers/iommu/io-pgtable-msm-secure.c index 0d5025842e35..1481c9a604c0 100644 --- a/drivers/iommu/io-pgtable-msm-secure.c +++ b/drivers/iommu/io-pgtable-msm-secure.c @@ -66,6 +66,7 @@ int msm_iommu_sec_pgtbl_init(void) /* Now allocate memory for the secure page tables */ attrs = DMA_ATTR_NO_KERNEL_MAPPING; dev.coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8); + arch_setup_dma_ops(&dev, 0, 0, NULL, 0); cpu_addr = dma_alloc_attrs(&dev, psize[0], &paddr, GFP_KERNEL, attrs); if (!cpu_addr) { pr_err("%s: Failed to allocate %d bytes for PTBL\n", diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c index f4d1047e0e61..71f23d0a5d75 100644 --- a/drivers/iommu/io-pgtable.c +++ b/drivers/iommu/io-pgtable.c @@ -43,6 +43,9 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = { #ifdef CONFIG_IOMMU_IO_PGTABLE_FAST [ARM_V8L_FAST] = &io_pgtable_av8l_fast_init_fns, #endif +#ifdef CONFIG_MSM_TZ_SMMU + [ARM_MSM_SECURE] = &io_pgtable_arm_msm_secure_init_fns, +#endif }; static struct dentry *io_pgtable_top; diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 2c28f3a072b9..ff212b652f52 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -88,5 +88,5 @@ obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o obj-$(CONFIG_NDS32) += irq-ativic32.o obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o -obj-$(CONFIG_QCOM_MPM) += qcom-mpm.o qcom-mpm-bengal.o qcom-mpm-scuba.o qcom-mpm-sdm660.o qcom-mpm-msm8937.o qcom-mpm-msm8953.o +obj-$(CONFIG_QCOM_MPM) += qcom-mpm.o qcom-mpm-bengal.o qcom-mpm-scuba.o qcom-mpm-sdm660.o qcom-mpm-msm8937.o qcom-mpm-msm8953.o qcom-mpm-khaje.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o diff --git a/drivers/irqchip/qcom-mpm-khaje.c b/drivers/irqchip/qcom-mpm-khaje.c new file mode 100644 index 000000000000..f62edabb52d2 --- /dev/null +++ b/drivers/irqchip/qcom-mpm-khaje.c @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include +const struct mpm_pin mpm_khaje_gic_chip_data[] = { + {2, 222}, + {12, 454}, /* b3_lfps_rxterm_irq */ + {86, 215}, /* mpm_wake,spmi_m */ + {91, 216}, /* eud_p0_dpse_int_mx */ + {90, 220}, /* eud_p0_dmse_int_mx */ + {5, 328}, /* lpass_irq_out_sdc */ + {24, 111}, /* bi_px_lpi_1_aoss_mx */ + {-1}, +}; diff --git a/drivers/irqchip/qcom-mpm.c b/drivers/irqchip/qcom-mpm.c index 7540a4600b16..95fc7fb218f0 100644 --- a/drivers/irqchip/qcom-mpm.c +++ b/drivers/irqchip/qcom-mpm.c @@ -604,6 +604,10 @@ static const struct of_device_id mpm_gic_chip_data_table[] = { .compatible = "qcom,mpm-gic-msm8953", .data = mpm_msm8953_gic_chip_data, }, + { + .compatible = "qcom,mpm-gic-khaje", + .data = mpm_khaje_gic_chip_data, + }, {} }; MODULE_DEVICE_TABLE(of, mpm_gic_chip_data_table); diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index a92e8970ceb4..e2b8820d125b 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -78,6 +78,13 @@ config LEDS_AS3645A controller. V4L2 flash API is provided as well if CONFIG_V4L2_FLASH_API is enabled. +config LEDS_AW2016 + tristate "LED Support for AW2016" + depends on LEDS_CLASS && I2C + help + this option enables support for the AW2016 RGB LED connected + through I2C. Say Y to enable support for the AW2016 LED + config LEDS_BCM6328 tristate "LED Support for Broadcom BCM6328" depends on LEDS_CLASS diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 43af291b9e1c..b306664b7932 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o obj-$(CONFIG_LEDS_AAT1290) += leds-aat1290.o obj-$(CONFIG_LEDS_APU) += leds-apu.o obj-$(CONFIG_LEDS_AS3645A) += leds-as3645a.o +obj-$(CONFIG_LEDS_AW2016) += leds-aw2016.o obj-$(CONFIG_LEDS_BCM6328) += leds-bcm6328.o obj-$(CONFIG_LEDS_BCM6358) += leds-bcm6358.o obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 112502a56e3e..77b7ea8291c5 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -57,6 +57,7 @@ static ssize_t brightness_store(struct device *dev, if (state == LED_OFF && !(led_cdev->flags & LED_KEEP_TRIGGER)) led_trigger_remove(led_cdev); led_set_brightness(led_cdev, state); + led_cdev->usr_brightness_req = state; ret = size; unlock: @@ -72,7 +73,24 @@ static ssize_t max_brightness_show(struct device *dev, return sprintf(buf, "%u\n", led_cdev->max_brightness); } -static DEVICE_ATTR_RO(max_brightness); + +static ssize_t max_brightness_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + unsigned long state; + ssize_t ret = -EINVAL; + + ret = kstrtoul(buf, 10, &state); + if (ret) + return ret; + + led_cdev->max_brightness = state; + led_set_brightness(led_cdev, led_cdev->usr_brightness_req); + + return size; +} +static DEVICE_ATTR_RW(max_brightness); #ifdef CONFIG_LEDS_TRIGGERS static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store); diff --git a/drivers/leds/leds-aw2016.c b/drivers/leds/leds-aw2016.c new file mode 100644 index 000000000000..908cfb59dfdf --- /dev/null +++ b/drivers/leds/leds-aw2016.c @@ -0,0 +1,688 @@ +/* + * Copyright (c) 2017, 2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "leds-aw2016.h" + +#define AW2016_DRIVER_VERSION "V1.0.3" + +/* register address */ +#define AW2016_REG_RESET 0x00 +#define AW2016_REG_GCR1 0x01 +#define AW2016_REG_STATUS 0x02 +#define AW2016_REG_PATST 0x03 +#define AW2016_REG_GCR2 0x04 +#define AW2016_REG_LEDEN 0x30 +#define AW2016_REG_LCFG1 0x31 +#define AW2016_REG_LCFG2 0x32 +#define AW2016_REG_LCFG3 0x33 +#define AW2016_REG_PWM1 0x34 +#define AW2016_REG_PWM2 0x35 +#define AW2016_REG_PWM3 0x36 +#define AW2016_REG_LED1T0 0x37 +#define AW2016_REG_LED1T1 0x38 +#define AW2016_REG_LED1T2 0x39 +#define AW2016_REG_LED2T0 0x3A +#define AW2016_REG_LED2T1 0x3B +#define AW2016_REG_LED2T2 0x3C +#define AW2016_REG_LED3T0 0x3D +#define AW2016_REG_LED3T1 0x3E +#define AW2016_REG_LED3T2 0x3F + +/* register bits */ +#define AW2016_CHIPID 0x09 +#define AW2016_CHIP_RESET_MASK 0x55 +#define AW2016_CHIP_DISABLE_MASK 0x00 +#define AW2016_CHIP_ENABLE_MASK 0x01 +#define AW2016_CHARGE_DISABLE_MASK 0x02 +#define AW2016_LED_BREATH_MODE_MASK 0x10 +#define AW2016_LED_MANUAL_MODE_MASK 0x00 +#define AW2016_LED_BREATHE_PWM_MASK 0xFF +#define AW2016_LED_MANUAL_PWM_MASK 0xFF +#define AW2016_LED_FADEIN_MODE_MASK 0x20 +#define AW2016_LED_FADEOUT_MODE_MASK 0x40 +#define AW2016_CHIP_STANDBY 0x02 + +#define MAX_RISE_TIME_MS 15 +#define MAX_HOLD_TIME_MS 15 +#define MAX_FALL_TIME_MS 15 +#define MAX_OFF_TIME_MS 15 + +/* aw2016 register read/write access*/ +#define REG_NONE_ACCESS 0 +#define REG_RD_ACCESS (1 << 0) +#define REG_WR_ACCESS (1 << 1) +#define AW2016_REG_MAX 0x7F + +const unsigned char aw2016_reg_access[AW2016_REG_MAX] = { + [AW2016_REG_RESET] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_GCR1] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_STATUS] = REG_RD_ACCESS, + [AW2016_REG_PATST] = REG_RD_ACCESS, + [AW2016_REG_GCR2] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_LEDEN] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_LCFG1] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_LCFG2] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_LCFG3] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_PWM1] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_PWM2] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_PWM3] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_LED1T0] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_LED1T1] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_LED1T2] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_LED2T0] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_LED2T1] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_LED2T2] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_LED3T0] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_LED3T1] = REG_RD_ACCESS | REG_WR_ACCESS, + [AW2016_REG_LED3T2] = REG_RD_ACCESS | REG_WR_ACCESS, +}; + +struct aw2016_led { + struct i2c_client *client; + struct led_classdev cdev; + struct aw2016_platform_data *pdata; + struct work_struct brightness_work; + struct work_struct blink_work; + struct mutex lock; + int num_leds; + int id; + int blinking; +}; + +static int aw2016_write(struct aw2016_led *led, u8 reg, u8 val) +{ + int ret = -EINVAL, retry_times = 0; + + do { + ret = i2c_smbus_write_byte_data(led->client, reg, val); + retry_times++; + if (retry_times == 5) + break; + } while (ret < 0); + + return ret; +} + +static int aw2016_read(struct aw2016_led *led, u8 reg, u8 *val) +{ + int ret = -EINVAL, retry_times = 0; + + do { + ret = i2c_smbus_read_byte_data(led->client, reg); + retry_times++; + if (retry_times == 5) + break; + } while (ret < 0); + + if (ret < 0) + return ret; + + *val = ret; + return 0; +} + +static void aw2016_soft_reset(struct aw2016_led *led) +{ + aw2016_write(led, AW2016_REG_RESET, AW2016_CHIP_RESET_MASK); + usleep_range(5000, 6000); +} + +static void aw2016_brightness_work(struct work_struct *work) +{ + struct aw2016_led *led = + container_of(work, struct aw2016_led, brightness_work); + u8 val; + + mutex_lock(&led->pdata->led->lock); + + /* enable aw2016 if disabled */ + aw2016_read(led, AW2016_REG_GCR1, &val); + if (!(val & AW2016_CHIP_ENABLE_MASK)) { + aw2016_write(led, AW2016_REG_GCR1, + AW2016_CHARGE_DISABLE_MASK | + AW2016_CHIP_ENABLE_MASK); + usleep_range(2000, 3000); + } + + if (led->cdev.brightness > 0) { + if (led->cdev.brightness > led->cdev.max_brightness) + led->cdev.brightness = led->cdev.max_brightness; + aw2016_write(led, AW2016_REG_GCR2, led->pdata->imax); + aw2016_write(led, AW2016_REG_LCFG1 + led->id, + (AW2016_LED_MANUAL_MODE_MASK | + led->pdata->led_current)); + aw2016_write(led, AW2016_REG_PWM1 + led->id, + led->cdev.brightness); + aw2016_read(led, AW2016_REG_LEDEN, &val); + aw2016_write(led, AW2016_REG_LEDEN, val | (1 << led->id)); + } else { + aw2016_read(led, AW2016_REG_LEDEN, &val); + aw2016_write(led, AW2016_REG_LEDEN, val & (~(1 << led->id))); + } + + /* + * If value in AW2016_REG_LEDEN is 0, it means the RGB leds are + * all off. So we need to power it off. + */ + aw2016_read(led, AW2016_REG_LEDEN, &val); + if (val == 0) { + aw2016_write(led, AW2016_REG_GCR1, + AW2016_CHARGE_DISABLE_MASK | + AW2016_CHIP_DISABLE_MASK); + mutex_unlock(&led->pdata->led->lock); + return; + } + + mutex_unlock(&led->pdata->led->lock); +} + +static void aw2016_blink_work(struct work_struct *work) +{ + struct aw2016_led *led = + container_of(work, struct aw2016_led, blink_work); + u8 val; + + mutex_lock(&led->pdata->led->lock); + + /* enable aw2016 if disabled */ + aw2016_read(led, AW2016_REG_GCR1, &val); + if (!(val & AW2016_CHIP_ENABLE_MASK)) { + aw2016_write(led, AW2016_REG_GCR1, + AW2016_CHARGE_DISABLE_MASK | + AW2016_CHIP_ENABLE_MASK); + usleep_range(2000, 3000); + } + + led->cdev.brightness = led->blinking ? led->cdev.max_brightness : 0; + + if (led->blinking > 0) { + aw2016_write(led, AW2016_REG_GCR2, led->pdata->imax); + aw2016_write(led, AW2016_REG_PWM1 + led->id, + led->cdev.brightness); + aw2016_write(led, AW2016_REG_LED1T0 + led->id * 3, + (led->pdata->rise_time_ms << 4 | + led->pdata->hold_time_ms)); + aw2016_write(led, AW2016_REG_LED1T1 + led->id * 3, + (led->pdata->fall_time_ms << 4 | + led->pdata->off_time_ms)); + aw2016_write(led, AW2016_REG_LCFG1 + led->id, + (AW2016_LED_BREATH_MODE_MASK | + led->pdata->led_current)); + aw2016_read(led, AW2016_REG_LEDEN, &val); + aw2016_write(led, AW2016_REG_LEDEN, val | (1 << led->id)); + } else { + aw2016_read(led, AW2016_REG_LEDEN, &val); + aw2016_write(led, AW2016_REG_LEDEN, val & (~(1 << led->id))); + } + + /* + * If value in AW2016_REG_LEDEN is 0, it means the RGB leds are + * all off. So we need to power it off. + */ + aw2016_read(led, AW2016_REG_LEDEN, &val); + if (val == 0) { + aw2016_write(led, AW2016_REG_GCR1, + AW2016_CHARGE_DISABLE_MASK | + AW2016_CHIP_DISABLE_MASK); + } + + mutex_unlock(&led->pdata->led->lock); +} + +static enum led_brightness aw2016_get_brightness(struct led_classdev *cdev) +{ + return cdev->brightness; +} + +static void aw2016_set_brightness(struct led_classdev *cdev, + enum led_brightness brightness) +{ + struct aw2016_led *led = container_of(cdev, struct aw2016_led, cdev); + + led->cdev.brightness = brightness; + + schedule_work(&led->brightness_work); +} + +static ssize_t aw2016_breath_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct aw2016_led *led = container_of(led_cdev, struct aw2016_led, + cdev); + + return led->blinking; +} + +static ssize_t aw2016_breath_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + unsigned long blinking; + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct aw2016_led *led = + container_of(led_cdev, struct aw2016_led, cdev); + ssize_t ret = -EINVAL; + + ret = kstrtoul(buf, 10, &blinking); + if (ret) + return ret; + led->blinking = (int)blinking; + schedule_work(&led->blink_work); + + return len; +} + +static ssize_t led_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct aw2016_led *led = + container_of(led_cdev, struct aw2016_led, cdev); + + return snprintf(buf, PAGE_SIZE, "%d %d %d %d\n", + led->pdata->rise_time_ms, led->pdata->hold_time_ms, + led->pdata->fall_time_ms, led->pdata->off_time_ms); +} + +static ssize_t led_time_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct aw2016_led *led = + container_of(led_cdev, struct aw2016_led, cdev); + int rc, rise_time_ms, hold_time_ms, fall_time_ms, off_time_ms; + + rc = sscanf(buf, "%d %d %d %d", &rise_time_ms, &hold_time_ms, + &fall_time_ms, &off_time_ms); + + mutex_lock(&led->pdata->led->lock); + led->pdata->rise_time_ms = (rise_time_ms > MAX_RISE_TIME_MS) ? + MAX_RISE_TIME_MS : + rise_time_ms; + led->pdata->hold_time_ms = (hold_time_ms > MAX_HOLD_TIME_MS) ? + MAX_HOLD_TIME_MS : + hold_time_ms; + led->pdata->fall_time_ms = (fall_time_ms > MAX_FALL_TIME_MS) ? + MAX_FALL_TIME_MS : + fall_time_ms; + led->pdata->off_time_ms = + (off_time_ms > MAX_OFF_TIME_MS) ? MAX_OFF_TIME_MS : off_time_ms; + led->blinking = 1; + mutex_unlock(&led->pdata->led->lock); + + schedule_work(&led->blink_work); + + return len; +} + +static ssize_t reg_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct aw2016_led *led = + container_of(led_cdev, struct aw2016_led, cdev); + + unsigned char i, reg_val; + ssize_t len = 0; + + for (i = 0; i < AW2016_REG_MAX; i++) { + if (!(aw2016_reg_access[i] & REG_RD_ACCESS)) + continue; + aw2016_read(led, i, ®_val); + len += snprintf(buf + len, PAGE_SIZE - len, + "reg:0x%02x=0x%02x\n", i, reg_val); + } + + return len; +} + +static ssize_t reg_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t len) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct aw2016_led *led = + container_of(led_cdev, struct aw2016_led, cdev); + + unsigned int databuf[2]; + + if (sscanf(buf, "%x %x", &databuf[0], &databuf[1]) == 2) { + aw2016_write(led, (unsigned char)databuf[0], + (unsigned char)databuf[1]); + } + + return len; +} + +static DEVICE_ATTR(breath, 0644, aw2016_breath_show, aw2016_breath_store); +static DEVICE_ATTR_RW(led_time); +static DEVICE_ATTR_RW(reg); + +static struct attribute *aw2016_led_attributes[] = { + &dev_attr_breath.attr, + &dev_attr_led_time.attr, + &dev_attr_reg.attr, + NULL, +}; + +static struct attribute_group aw2016_led_attr_group = { + .attrs = aw2016_led_attributes +}; +static int aw2016_check_chipid(struct aw2016_led *led) +{ + u8 val; + u8 cnt; + + for (cnt = 5; cnt > 0; cnt--) { + aw2016_read(led, AW2016_REG_RESET, &val); + dev_notice(&led->client->dev, "aw2016 chip id %0x", val); + if (val == AW2016_CHIPID) + return 0; + } + + return -EINVAL; +} + +static int aw2016_led_err_handle(struct aw2016_led *led_array, int parsed_leds) +{ + int i; + /* + * If probe fails, cannot free resource of all LEDs, only free + * resources of LEDs which have allocated these resource really. + */ + for (i = 0; i < parsed_leds; i++) { + sysfs_remove_group(&led_array[i].cdev.dev->kobj, + &aw2016_led_attr_group); + led_classdev_unregister(&led_array[i].cdev); + cancel_work_sync(&led_array[i].brightness_work); + cancel_work_sync(&led_array[i].blink_work); + devm_kfree(&led_array->client->dev, led_array[i].pdata); + led_array[i].pdata = NULL; + } + return i; +} + +static int aw2016_led_parse_child_node(struct aw2016_led *led_array, + struct device_node *node) +{ + struct aw2016_led *led; + struct device_node *temp; + struct aw2016_platform_data *pdata; + int rc = 0, parsed_leds = 0; + + for_each_child_of_node(node, temp) { + led = &led_array[parsed_leds]; + led->client = led_array->client; + + pdata = devm_kzalloc(&led->client->dev, + sizeof(struct aw2016_platform_data), + GFP_KERNEL); + if (!pdata) { + dev_err(&led->client->dev, + "Failed to allocate memory\n"); + goto free_err; + } + pdata->led = led_array; + led->pdata = pdata; + + rc = of_property_read_string(temp, "awinic,name", + &led->cdev.name); + if (rc < 0) { + dev_err(&led->client->dev, + "Failure reading led name, rc = %d\n", rc); + goto free_pdata; + } + + rc = of_property_read_u32(temp, "awinic,id", &led->id); + if (rc < 0) { + dev_err(&led->client->dev, + "Failure reading id, rc = %d\n", rc); + goto free_pdata; + } + + rc = of_property_read_u32(temp, "awinic,imax", + &led->pdata->imax); + if (rc < 0) { + dev_err(&led->client->dev, + "Failure reading imax, rc = %d\n", rc); + goto free_pdata; + } + + rc = of_property_read_u32(temp, "awinic,led-current", + &led->pdata->led_current); + if (rc < 0) { + dev_err(&led->client->dev, + "Failure reading led-current, rc = %d\n", rc); + goto free_pdata; + } + + rc = of_property_read_u32(temp, "awinic,max-brightness", + &led->cdev.max_brightness); + if (rc < 0) { + dev_err(&led->client->dev, + "Failure reading max-brightness, rc = %d\n", + rc); + goto free_pdata; + } + + rc = of_property_read_u32(temp, "awinic,rise-time-ms", + &led->pdata->rise_time_ms); + if (rc < 0) { + dev_err(&led->client->dev, + "Failure reading rise-time-ms, rc = %d\n", rc); + goto free_pdata; + } + + rc = of_property_read_u32(temp, "awinic,hold-time-ms", + &led->pdata->hold_time_ms); + if (rc < 0) { + dev_err(&led->client->dev, + "Failure reading hold-time-ms, rc = %d\n", rc); + goto free_pdata; + } + + rc = of_property_read_u32(temp, "awinic,fall-time-ms", + &led->pdata->fall_time_ms); + if (rc < 0) { + dev_err(&led->client->dev, + "Failure reading fall-time-ms, rc = %d\n", rc); + goto free_pdata; + } + + rc = of_property_read_u32(temp, "awinic,off-time-ms", + &led->pdata->off_time_ms); + if (rc < 0) { + dev_err(&led->client->dev, + "Failure reading off-time-ms, rc = %d\n", rc); + goto free_pdata; + } + + INIT_WORK(&led->brightness_work, aw2016_brightness_work); + INIT_WORK(&led->blink_work, aw2016_blink_work); + + led->cdev.brightness_set = aw2016_set_brightness; + led->cdev.brightness_get = aw2016_get_brightness; + + rc = led_classdev_register(&led->client->dev, &led->cdev); + if (rc) { + dev_err(&led->client->dev, + "unable to register led %d,rc=%d\n", led->id, + rc); + goto free_pdata; + } + + rc = sysfs_create_group(&led->cdev.dev->kobj, + &aw2016_led_attr_group); + if (rc) { + dev_err(&led->client->dev, "led sysfs rc: %d\n", rc); + goto free_class; + } + parsed_leds++; + } + + return 0; + +free_class: + aw2016_led_err_handle(led_array, parsed_leds); + led_classdev_unregister(&led_array[parsed_leds].cdev); + cancel_work_sync(&led_array[parsed_leds].brightness_work); + cancel_work_sync(&led_array[parsed_leds].blink_work); + devm_kfree(&led->client->dev, led_array[parsed_leds].pdata); + led_array[parsed_leds].pdata = NULL; + return rc; + +free_pdata: + aw2016_led_err_handle(led_array, parsed_leds); + devm_kfree(&led->client->dev, led_array[parsed_leds].pdata); + return rc; + +free_err: + aw2016_led_err_handle(led_array, parsed_leds); + return rc; +} + +static int aw2016_led_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct aw2016_led *led_array; + struct device_node *node; + int ret = -EINVAL, num_leds = 0; + + node = client->dev.of_node; + if (node == NULL) + return -EINVAL; + + num_leds = of_get_child_count(node); + + if (!num_leds) + return -EINVAL; + + led_array = devm_kzalloc(&client->dev, + (sizeof(struct aw2016_led) * num_leds), + GFP_KERNEL); + if (!led_array) + return -ENOMEM; + + led_array->client = client; + led_array->num_leds = num_leds; + + mutex_init(&led_array->lock); + + ret = aw2016_led_parse_child_node(led_array, node); + if (ret) { + dev_err(&client->dev, "parsed node error\n"); + goto free_led_arry; + } + + i2c_set_clientdata(client, led_array); + + ret = aw2016_check_chipid(led_array); + if (ret) { + dev_err(&client->dev, "Check chip id error\n"); + goto fail_parsed_node; + } + + /* soft rst */ + aw2016_soft_reset(led_array); + + return 0; + +fail_parsed_node: + aw2016_led_err_handle(led_array, num_leds); +free_led_arry: + mutex_destroy(&led_array->lock); + devm_kfree(&client->dev, led_array); + led_array = NULL; + return ret; +} + +static int aw2016_led_remove(struct i2c_client *client) +{ + struct aw2016_led *led_array = i2c_get_clientdata(client); + int i, parsed_leds = led_array->num_leds; + + for (i = 0; i < parsed_leds; i++) { + sysfs_remove_group(&led_array[i].cdev.dev->kobj, + &aw2016_led_attr_group); + led_classdev_unregister(&led_array[i].cdev); + cancel_work_sync(&led_array[i].brightness_work); + cancel_work_sync(&led_array[i].blink_work); + devm_kfree(&client->dev, led_array[i].pdata); + led_array[i].pdata = NULL; + } + mutex_destroy(&led_array->lock); + devm_kfree(&client->dev, led_array); + led_array = NULL; + return 0; +} + +static void aw2016_led_shutdown(struct i2c_client *client) +{ + struct aw2016_led *led = i2c_get_clientdata(client); + + aw2016_write(led, AW2016_REG_GCR1, AW2016_CHIP_STANDBY); +} + +static const struct i2c_device_id aw2016_led_id[] = { + { "aw2016_led", 0 }, + {}, +}; + +MODULE_DEVICE_TABLE(i2c, aw2016_led_id); + +static const struct of_device_id aw2016_match_table[] = { + { + .compatible = "awinic,aw2016_led", + }, + {}, +}; + +static struct i2c_driver aw2016_led_driver = { + .probe = aw2016_led_probe, + .remove = aw2016_led_remove, + .shutdown = aw2016_led_shutdown, + .driver = { + .name = "aw2016_led", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(aw2016_match_table), + }, + .id_table = aw2016_led_id, +}; + +static int __init aw2016_led_init(void) +{ + pr_info("%s: driver version: %s\n", __func__, AW2016_DRIVER_VERSION); + return i2c_add_driver(&aw2016_led_driver); +} +module_init(aw2016_led_init); + +static void __exit aw2016_led_exit(void) +{ + i2c_del_driver(&aw2016_led_driver); +} +module_exit(aw2016_led_exit); + +MODULE_AUTHOR(""); +MODULE_DESCRIPTION("AWINIC AW2016 LED driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/leds/leds-aw2016.h b/drivers/leds/leds-aw2016.h new file mode 100644 index 000000000000..2b6912cdbee5 --- /dev/null +++ b/drivers/leds/leds-aw2016.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_AW2016_LED_H__ +#define __LINUX_AW2016_LED_H__ + +/* The definition of each time described as shown in figure. + * /-----------\ + * / | \ + * /| | |\ + * / | | | \----------- + * |hold_time_ms | | + * | | | + * rise_time_ms fall_time_ms | + * off_time_ms + */ + +struct aw2016_platform_data { + int imax; + int led_current; + int rise_time_ms; + int hold_time_ms; + int fall_time_ms; + int off_time_ms; + struct aw2016_led *led; +}; + +#endif diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c index ca8c8d92b23d..bef43900c0cc 100644 --- a/drivers/leds/leds-qpnp-flash-v2.c +++ b/drivers/leds/leds-qpnp-flash-v2.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "flashv2: %s: " fmt, __func__ @@ -26,6 +26,8 @@ #include #include "leds.h" +#define FLASH_LED_REG_PERPH_SUBTYPE(base) (base + 0x05) + #define FLASH_LED_REG_LED_STATUS1(base) (base + 0x08) #define FLASH_LED_REG_LED_STATUS2(base) (base + 0x09) @@ -85,6 +87,14 @@ #define FLASH_LED_REG_THERMAL_DEBOUNCE(base) (base + 0x5A) #define FLASH_LED_THERMAL_DEBOUNCE_MASK GENMASK(1, 0) +#define FLASH_LED_REG_RGLR_RAMP_RATE(base) (base + 0x5B) +#define FLASH_LED_RAMP_UP_STEP_MASK GENMASK(6, 4) +#define FLASH_LED_RAMP_UP_STEP_SHIFT 4 +#define FLASH_LED_RAMP_DOWN_STEP_MASK GENMASK(2, 0) +#define FLASH_LED_RAMP_STEP_MIN_NS 200 +#define FLASH_LED_RAMP_STEP_MAX_NS 25600 +#define FLASH_LED_RAMP_STEP_DEFAULT_NS 6400 + #define FLASH_LED_REG_VPH_DROOP_THRESHOLD(base) (base + 0x61) #define FLASH_LED_VPH_DROOP_HYSTERESIS_MASK GENMASK(5, 4) #define FLASH_LED_VPH_DROOP_THRESHOLD_MASK GENMASK(2, 0) @@ -125,6 +135,9 @@ #define FLASH_LED_REG_CURRENT_DERATE_EN(base) (base + 0x76) #define FLASH_LED_CURRENT_DERATE_EN_MASK GENMASK(2, 0) +#define FLASH_LED_REG_CHICKEN_BITS(base) (base + 0x87) +#define FLASH_LED_EN_ITAR_FLY_BIT BIT(0) + #define VPH_DROOP_DEBOUNCE_US_TO_VAL(val_us) (val_us / 8) #define VPH_DROOP_HYST_MV_TO_VAL(val_mv) (val_mv / 25) #define VPH_DROOP_THRESH_VAL_TO_UV(val) ((val + 25) * 100000) @@ -188,6 +201,13 @@ /* notifier call chain for flash-led irqs */ static ATOMIC_NOTIFIER_HEAD(irq_notifier_list); +enum flash_led_subtype { + PMI8998_FLASH_SUBTYPE = 3, + PM660L_FLASH_SUBTYPE = 3, + PM6150L_FLASH_SUBTYPE, + PMI632_FLASH_SUBTYPE, +}; + enum flash_charger_mitigation { FLASH_DISABLE_CHARGER_MITIGATION, FLASH_HW_CHARGER_MITIGATION_BY_ILED_THRSHLD, @@ -278,6 +298,8 @@ struct flash_led_platform_data { int thermal_thrsh1; int thermal_thrsh2; int thermal_thrsh3; + int ramp_up_step; + int ramp_down_step; int hw_strobe_option; u32 led1n2_iclamp_low_ma; u32 led1n2_iclamp_mid_ma; @@ -321,6 +343,7 @@ struct qpnp_flash_led { u16 base; bool trigger_lmh; bool trigger_chgr; + bool torch_current_update; }; static int thermal_derate_slow_table[] = { @@ -631,6 +654,16 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) if (rc < 0) return rc; + val = led->pdata->ramp_up_step << FLASH_LED_RAMP_UP_STEP_SHIFT; + val |= led->pdata->ramp_down_step; + rc = qpnp_flash_led_masked_write(led, + FLASH_LED_REG_RGLR_RAMP_RATE(led->base), + FLASH_LED_RAMP_UP_STEP_MASK | + FLASH_LED_RAMP_DOWN_STEP_MASK, + val); + if (rc < 0) + return rc; + rc = qpnp_flash_led_masked_write(led, FLASH_LED_REG_VPH_DROOP_DEBOUNCE(led->base), FLASH_LED_VPH_DROOP_DEBOUNCE_MASK, @@ -682,6 +715,27 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) if (rc < 0) return rc; + rc = qpnp_flash_led_read(led, + FLASH_LED_REG_PERPH_SUBTYPE(led->base), + &val); + if (rc < 0) + return rc; + + /* + * Updating torch current on-the-fly is possible + * from PM6150L onwards. + */ + if (val >= PM6150L_FLASH_SUBTYPE) { + rc = qpnp_flash_led_masked_read(led, + FLASH_LED_REG_CHICKEN_BITS(led->base), + FLASH_LED_EN_ITAR_FLY_BIT, + &val); + if (rc < 0) + return rc; + + led->torch_current_update = !!val; + } + if (led->pdata->led1n2_iclamp_low_ma) { val = get_current_reg_code(led->pdata->led1n2_iclamp_low_ma, led->fnode[LED1].ires_ua); @@ -1560,13 +1614,28 @@ static int qpnp_flash_led_module_enable(struct flash_switch_data *snode) static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on) { struct qpnp_flash_led *led = dev_get_drvdata(&snode->pdev->dev); + struct flash_node_data fnode; int rc, i, addr_offset; u8 val, mask; + bool torch_current_update = false; if (snode->enabled == on) { - pr_debug("Switch node is already %s!\n", - on ? "enabled" : "disabled"); - return 0; + if (on && led->torch_current_update) { + for (i = 0; i < led->num_fnodes; i++) { + fnode = led->fnode[i]; + if (snode->led_mask & BIT(fnode.id) && + fnode.led_on) { + torch_current_update = (fnode.type == + FLASH_LED_TYPE_TORCH); + } + } + } + + if (!torch_current_update) { + pr_debug("Switch node is already %s!\n", + on ? "enabled" : "disabled"); + return 0; + } } if (!on) { @@ -1595,6 +1664,22 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on) if (rc < 0) return rc; + if (torch_current_update) { + for (i = 0; i < led->num_fnodes; i++) { + if (snode->led_mask & BIT(led->fnode[i].id) && + led->fnode[i].led_on) { + addr_offset = led->fnode[i].id; + rc = qpnp_flash_led_masked_write(led, + FLASH_LED_REG_TGR_CURRENT(led->base + + addr_offset), FLASH_LED_CURRENT_MASK, + led->fnode[i].current_reg_val); + if (rc < 0) + return rc; + } + } + return 0; + } + val = 0; for (i = 0; i < led->num_fnodes; i++) { if (!led->fnode[i].led_on || @@ -2874,6 +2959,30 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led, return rc; } + val = FLASH_LED_RAMP_STEP_DEFAULT_NS; + rc = of_property_read_u32(node, "qcom,ramp-up-step", &val); + if (!rc && (val < FLASH_LED_RAMP_STEP_MIN_NS || + val > FLASH_LED_RAMP_STEP_MAX_NS)) { + pr_err("Invalid ramp-up-step %d\n", val); + return -EINVAL; + } else if (rc && rc != -EINVAL) { + pr_err("Unable to read ramp-up-step, rc=%d\n", rc); + return rc; + } + led->pdata->ramp_up_step = ilog2(val / 100) - 1; + + val = FLASH_LED_RAMP_STEP_DEFAULT_NS; + rc = of_property_read_u32(node, "qcom,ramp-down-step", &val); + if (!rc && (val < FLASH_LED_RAMP_STEP_MIN_NS || + val > FLASH_LED_RAMP_STEP_MAX_NS)) { + pr_err("Invalid ramp-down-step %d\n", val); + return -EINVAL; + } else if (rc && rc != -EINVAL) { + pr_err("Unable to read ramp-down-step, rc=%d\n", rc); + return rc; + } + led->pdata->ramp_down_step = ilog2(val / 100) - 1; + rc = qpnp_flash_led_parse_battery_prop_dt(led, node); if (rc < 0) return rc; diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c index 76a1ff702842..f791c48dfed4 100644 --- a/drivers/md/dm-default-key.c +++ b/drivers/md/dm-default-key.c @@ -81,7 +81,8 @@ static void default_key_dtr(struct dm_target *ti) } static int default_key_ctr_optional(struct dm_target *ti, - unsigned int argc, char **argv) + unsigned int argc, char **argv, + bool is_legacy) { struct default_key_c *dkc = ti->private; struct dm_arg_set as; @@ -121,7 +122,7 @@ static int default_key_ctr_optional(struct dm_target *ti, iv_large_sectors = true; } else if (!strcmp(opt_string, "wrappedkey_v0")) { dkc->is_hw_wrapped = true; - } else if (!strcmp(opt_string, "set_dun")) { + } else if (!strcmp(opt_string, "set_dun") && is_legacy) { dkc->set_dun = true; } else { ti->error = "Invalid feature arguments"; @@ -166,7 +167,7 @@ static void default_key_adjust_sector_size_and_iv(char **argv, !strcmp((*dkc)->dev->bdev->bd_disk->disk_name, "mmcblk0"))) (*dkc)->sector_size = SECTOR_SIZE; - if (dev->bdev->bd_part) + if (dev->bdev->bd_part && !(*dkc)->set_dun) (*dkc)->iv_offset += dev->bdev->bd_part->start_sect; } } @@ -189,22 +190,31 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) unsigned long long tmpll; char dummy; int err; + int __argc; char *_argv[10]; bool is_legacy = false; if (argc >= 4 && !strcmp(argv[0], "AES-256-XTS")) { - argc = 0; - _argv[argc++] = "aes-xts-plain64"; - _argv[argc++] = argv[1]; - _argv[argc++] = "0"; - _argv[argc++] = argv[2]; - _argv[argc++] = argv[3]; - _argv[argc++] = "3"; - _argv[argc++] = "allow_discards"; - _argv[argc++] = "sector_size:4096"; - _argv[argc++] = "iv_large_sectors"; - _argv[argc] = NULL; + __argc = 0; + _argv[__argc++] = "aes-xts-plain64"; + _argv[__argc++] = argv[1]; + _argv[__argc++] = "0"; + _argv[__argc++] = argv[2]; + _argv[__argc++] = argv[3]; + if (argc > 4) + _argv[__argc++] = "4"; + else + _argv[__argc++] = "3"; + + _argv[__argc++] = "allow_discards"; + _argv[__argc++] = "sector_size:4096"; + _argv[__argc++] = "iv_large_sectors"; + if (argc > 4) + _argv[__argc++] = argv[5]; + + _argv[__argc] = NULL; argv = _argv; + argc = __argc; is_legacy = true; } @@ -276,7 +286,8 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) /* optional arguments */ dkc->sector_size = SECTOR_SIZE; if (argc > 5) { - err = default_key_ctr_optional(ti, argc - 5, &argv[5]); + err = default_key_ctr_optional(ti, argc - 5, &argv[5], + is_legacy); if (err) goto bad; } @@ -325,6 +336,13 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) return err; } +static void default_key_map_dun(struct bio *bio, u64 *dun) +{ + dun[0] += 1; + memcpy(bio->bi_crypt_context->bc_dun, dun, + sizeof(bio->bi_crypt_context->bc_dun)); + bio->bi_crypt_context->is_ext4 = false; +} static int default_key_map(struct dm_target *ti, struct bio *bio) { const struct default_key_c *dkc = ti->private; @@ -349,14 +367,15 @@ static int default_key_map(struct dm_target *ti, struct bio *bio) * file's contents), or if it doesn't have any data (e.g. if it's a * DISCARD request), there's nothing more to do. */ - if (bio_should_skip_dm_default_key(bio) || !bio_has_data(bio)) + if ((bio_should_skip_dm_default_key(bio) && !dkc->set_dun) || + !bio_has_data(bio)) return DM_MAPIO_REMAPPED; /* * Else, dm-default-key needs to set this bio's encryption context. * It must not already have one. */ - if (WARN_ON_ONCE(bio_has_crypt_ctx(bio))) + if (WARN_ON_ONCE(bio_has_crypt_ctx(bio) && !dkc->set_dun)) return DM_MAPIO_KILL; /* Calculate the DUN and enforce data-unit (crypto sector) alignment. */ @@ -372,7 +391,14 @@ static int default_key_map(struct dm_target *ti, struct bio *bio) if (WARN_ON_ONCE(dun[0] > dkc->max_dun)) return DM_MAPIO_KILL; - bio_crypt_set_ctx(bio, &dkc->key, dun, GFP_NOIO); + if (!bio_has_crypt_ctx(bio)) { + bio_crypt_set_ctx(bio, &dkc->key, dun, GFP_NOIO); + if (dkc->set_dun) + default_key_map_dun(bio, dun); + } else { + if (dkc->set_dun && bio->bi_crypt_context->is_ext4) + default_key_map_dun(bio, dun); + } return DM_MAPIO_REMAPPED; } @@ -400,6 +426,8 @@ static void default_key_status(struct dm_target *ti, status_type_t type, num_feature_args += 2; if (dkc->is_hw_wrapped) num_feature_args += 1; + if (dkc->set_dun) + num_feature_args += 1; if (num_feature_args != 0) { DMEMIT(" %d", num_feature_args); if (ti->num_discard_bios) @@ -410,6 +438,8 @@ static void default_key_status(struct dm_target *ti, status_type_t type, } if (dkc->is_hw_wrapped) DMEMIT(" wrappedkey_v0"); + if (dkc->set_dun) + DMEMIT(" set_dun"); } break; } diff --git a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c index f57415658d09..27b7b5ef68d9 100644 --- a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c +++ b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -2109,13 +2109,18 @@ static int cam_smmu_populate_sids(struct device *dev, struct cam_context_bank_info *cb) { int i, j, rc = 0; - unsigned int cnt = 0; + unsigned int cnt = 0, iommu_cells_cnt = 0; const void *property; /* set the name of the context bank */ property = of_get_property(dev->of_node, "iommus", &cnt); cnt /= 4; - for (i = 0, j = 0; i < cnt; i = i + 2, j++) { + rc = of_property_read_u32_index(dev->of_node, "iommu-cells", 0, + &iommu_cells_cnt); + if (rc < 0) + pr_err("invalid iommu-cells count : %d\n", rc); + iommu_cells_cnt++; + for (i = 0, j = 0; i < cnt; i = i + iommu_cells_cnt, j++) { rc = of_property_read_u32_index(dev->of_node, "iommus", i + 1, &cb->sids[j]); if (rc < 0) diff --git a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c index 821e1de288a2..1e9f732127dd 100644 --- a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c +++ b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -282,7 +282,7 @@ static void msm_buf_mngr_contq_listdel(struct msm_buf_mngr_device *dev, cont_save, &dev->cont_qhead, entry) { if ((cont_bufs->sessid == session) && (cont_bufs->strid == stream)) { - if (cnt && unmap) { + if (cnt == 1 && unmap) { /* dma_buf_vunmap ignored vaddr(2nd argument) */ dma_buf_vunmap(cont_bufs->dmabuf, cont_bufs->paddr); diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_10_0_0_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_10_0_0_hwreg.h index ccea30e0e181..f61c9ac52bfb 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_10_0_0_hwreg.h +++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_10_0_0_hwreg.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018, 2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -29,6 +29,9 @@ #define mask_enable_clk_B 0x2 #define mask_ctrl_1_A 0x5 #define mask_ctrl_1_B 0xA +#define mask_reset_A 0x1 +#define mask_reset_B 0x7 +#define mask_shutdown_A 0x3 #define mask_hs_freq_range 0x7F #define mask_osc_freq_2 0xFF #define mask_osc_freq_3 0xF00 @@ -57,8 +60,6 @@ static struct csiphy_reg_snps_parms_t csiphy_v10_0_0_snps = { {0x58C, 0xFF}, /* mipi_csiphy_irq_mask_ctrl_lane_0 */ {0x5C8, 0xFF}, /* mipi_csiphy_irq_mask_ctrl_lane_clk_0 */ {0x20, 0x0}, /* mipi_csiphy_rx_sys_7_00 */ - {0x28, 0x43}, /* mipi_csiphy_rx_sys_9_00 */ - {0x380, 0x0}, /* mipi_csiphy_rx_startup_ovr_0_00 */ {0x384, 0x0}, /* mipi_csiphy_rx_startup_ovr_1_00 */ {0x388, 0xCC}, /* mipi_csiphy_rx_startup_ovr_2_00 */ {0x38C, 0x1}, /* mipi_csiphy_rx_startup_ovr_3_00 */ @@ -73,6 +74,10 @@ static struct csiphy_reg_snps_parms_t csiphy_v10_0_0_snps = { {0x12c, 0x0}, /* mipi_csiphy_rx_lane_0_7_00 */ {0x220, 0x0}, /* mipi_csiphy_rx_lane_1_7_00 */ {0xCC, 0x0}, /* mipi_csiphy_rx_clk_lane_7_00 */ + {0x1F8, 0x20}, /* mipi_csiphy_rx_lane0_ddl_2_00 */ + {0x1FC, 0x10}, /* mipi_csiphy_rx_lane0_ddl_3_00 */ + {0x22C, 0x80}, /* mipi_csiphy_rx_lane_1_10_00 */ + {0x230, 0x10}, /* mipi_csiphy_rx_lane_1_11_00 */ }; static struct snps_freq_value snps_v100_freq_values[] = { diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c index dfa09920d955..2f86108d3ce8 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c +++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2011-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -210,7 +210,7 @@ static int msm_csiphy_snps_2_lane_config( diff = abs(snps_v100_freq_values[0].default_bit_rate - local_data_rate); /* ToDo: Can be optimized to a O(1) search */ - for (i = 1; i < ARRAY_SIZE(snps_v100_freq_values)/ + for (i = 1; i < sizeof(snps_v100_freq_values)/ sizeof(snps_v100_freq_values[0]);) { diff_i = abs(snps_v100_freq_values[i].default_bit_rate - local_data_rate); @@ -220,7 +220,7 @@ static int msm_csiphy_snps_2_lane_config( } diff = diff_i; i++; - if (ARRAY_SIZE(snps_v100_freq_values)/ + if (sizeof(snps_v100_freq_values)/ sizeof(snps_v100_freq_values[0]) == i) { i--; break; @@ -259,10 +259,13 @@ static int msm_csiphy_snps_2_lane_config( csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg .mipi_csiphy_rx_sys_7_00.addr + offset); - msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg - .mipi_csiphy_rx_sys_9_00.data, + value = msm_camera_io_r(csiphybase + + csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_rx_clk_lane_6_00.addr + offset); + value |= SET_THE_BIT(7); + msm_camera_io_w(value, csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg - .mipi_csiphy_rx_sys_9_00.addr + offset); + .mipi_csiphy_rx_clk_lane_6_00.addr + offset); msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg .mipi_csiphy_rx_startup_ovr_4_00.data, @@ -292,6 +295,32 @@ static int msm_csiphy_snps_2_lane_config( csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg .mipi_csiphy_rx_cb_2_00.addr + offset); + if (local_data_rate <= 1500) { + msm_camera_io_w( + csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_rx_lane0_ddl_2_00.data, + csiphybase + + csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_rx_lane0_ddl_2_00.addr + offset); + + msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_rx_lane0_ddl_3_00.data, + csiphybase + + csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_rx_lane0_ddl_3_00.addr + offset); + + msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_rx_lane_1_10_00.data, + csiphybase + + csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_rx_lane_1_10_00.addr + offset); + + msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_rx_lane_1_11_00.data, + csiphybase + + csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_rx_lane_1_11_00.addr + offset); + } return 0; } @@ -303,7 +332,7 @@ static int msm_csiphy_snps_lane_config( uint16_t lane_mask = 0; void __iomem *csiphybase; enum snps_csiphy_mode mode = INVALID_MODE; - uint32_t value, num_tries, num_lanes, offset; + uint32_t value, num_tries, num_lanes, offset = SNPS_INTERPHY_OFFSET; uint32_t clk_mux_reg = 0; csiphybase = csiphy_dev->base; @@ -481,17 +510,6 @@ static int msm_csiphy_snps_lane_config( .mipi_csiphy_rx_clk_lane_7_00.addr + SNPS_INTERPHY_OFFSET); - value = msm_camera_io_r(csiphybase + - csiphy_dev->ctrl_reg->csiphy_snps_reg - .mipi_csiphy_rx_startup_ovr_0_00.addr + - SNPS_INTERPHY_OFFSET); - value |= SET_THE_BIT(0); - value |= SET_THE_BIT(1); - msm_camera_io_w(value, - csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg - .mipi_csiphy_rx_startup_ovr_0_00.addr + - SNPS_INTERPHY_OFFSET); - value = msm_camera_io_r(csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg .mipi_csiphy_rx_startup_ovr_1_00.addr + @@ -507,6 +525,7 @@ static int msm_csiphy_snps_lane_config( csiphy_dev->ctrl_reg->csiphy_snps_reg .mipi_csiphy_rx_clk_lane_6_00.addr); value |= SET_THE_BIT(2); + value &= ~(SET_THE_BIT(7)); msm_camera_io_w(value, csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg .mipi_csiphy_rx_clk_lane_6_00.addr); @@ -516,7 +535,7 @@ static int msm_csiphy_snps_lane_config( .mipi_csiphy_rx_clk_lane_6_00.addr + SNPS_INTERPHY_OFFSET); value |= SET_THE_BIT(3); - value |= SET_THE_BIT(7); + value &= ~(SET_THE_BIT(7)); value &= ~(SET_THE_BIT(2)); msm_camera_io_w(value, csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg @@ -578,36 +597,109 @@ static int msm_csiphy_snps_lane_config( csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg .mipi_csiphy_enable_clk.addr); - value = 0x0; - if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_A) - value |= mask_ctrl_1_A; - if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_B) - value |= mask_ctrl_1_B; - msm_camera_io_w(value, + if (mode == TWO_LANE_PHY_A) { + msm_camera_io_w(mask_reset_A, csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg .mipi_csiphy_ctrl_1.addr); - if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_A) - offset = 0x0; - else - offset = SNPS_INTERPHY_OFFSET; + msm_camera_io_w(mask_ctrl_1_A, + csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_ctrl_1.addr); - value = 0x0; - num_tries = 0; + value = 0x0; + num_tries = 0; - do { - num_tries++; - value = msm_camera_io_r(csiphybase + - csiphy_dev->ctrl_reg->csiphy_snps_reg - .mipi_csiphy_rx_startup_obs_2_00.addr + offset); - if ((value | SET_THE_BIT(4)) == value) - break; - usleep_range(100, 150); - } while (num_tries < 6); + do { + num_tries++; + value = msm_camera_io_r(csiphybase + + csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_rx_startup_obs_2_00.addr); + if ((value | SET_THE_BIT(4)) == value) + break; + usleep_range(100, 150); + } while (num_tries < 6); + if ((value | SET_THE_BIT(4)) != value) { + pr_err("%s: SNPS phy config failed\n", __func__); + return -EINVAL; + } + } - if ((value | SET_THE_BIT(4)) != value) { - pr_err("%s: SNPS phy config failed\n", __func__); - return -EINVAL; + if (mode == TWO_LANE_PHY_B) { + msm_camera_io_w(mask_reset_B, + csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_ctrl_1.addr); + + msm_camera_io_w(mask_ctrl_1_A|mask_ctrl_1_B, + csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_ctrl_1.addr); + + value = 0x0; + num_tries = 0; + + do { + num_tries++; + value = msm_camera_io_r(csiphybase + + csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_rx_startup_obs_2_00.addr + offset); + if ((value | SET_THE_BIT(4)) == value) + break; + usleep_range(100, 150); + } while (num_tries < 6); + + if ((value | SET_THE_BIT(4)) != value) { + pr_err("%s: SNPS phy config failed\n", __func__); + return -EINVAL; + } + } + + if (mode == AGGREGATE_MODE) { + msm_camera_io_w(mask_shutdown_A, + csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_ctrl_1.addr); + + msm_camera_io_w(mask_reset_B, + csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_ctrl_1.addr); + + value = 0x0; + num_tries = 0; + + do { + num_tries++; + value = msm_camera_io_r(csiphybase + + csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_rx_startup_obs_2_00.addr); + if ((value | SET_THE_BIT(4)) == value) + break; + usleep_range(100, 150); + } while (num_tries < 6); + + if ((value | SET_THE_BIT(4)) != value) { + pr_err("%s: SNPS phy config failed\n", __func__); + return -EINVAL; + } + + msm_camera_io_w(mask_ctrl_1_A|mask_ctrl_1_B, + csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_ctrl_1.addr); + + value = 0x0; + num_tries = 0; + + do { + num_tries++; + value = msm_camera_io_r(csiphybase + + csiphy_dev->ctrl_reg->csiphy_snps_reg + .mipi_csiphy_rx_startup_obs_2_00.addr + offset); + if ((value | SET_THE_BIT(4)) == value) + break; + usleep_range(100, 150); + } while (num_tries < 6); + + if ((value | SET_THE_BIT(4)) != value) { + pr_err("%s: SNPS phy config failed\n", __func__); + return -EINVAL; + } } msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h index 7a068572d008..ff69f04f862b 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h +++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2011-2018, 2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2018, 2020-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -84,8 +84,6 @@ struct csiphy_reg_snps_parms_t { struct csiphy_reg_t mipi_csiphy_irq_mask_ctrl_lane_0; struct csiphy_reg_t mipi_csiphy_irq_mask_ctrl_lane_clk_0; struct csiphy_reg_t mipi_csiphy_rx_sys_7_00; - struct csiphy_reg_t mipi_csiphy_rx_sys_9_00; - struct csiphy_reg_t mipi_csiphy_rx_startup_ovr_0_00; struct csiphy_reg_t mipi_csiphy_rx_startup_ovr_1_00; struct csiphy_reg_t mipi_csiphy_rx_startup_ovr_2_00; struct csiphy_reg_t mipi_csiphy_rx_startup_ovr_3_00; @@ -100,6 +98,10 @@ struct csiphy_reg_snps_parms_t { struct csiphy_reg_t mipi_csiphy_rx_lane_0_7_00; struct csiphy_reg_t mipi_csiphy_rx_lane_1_7_00; struct csiphy_reg_t mipi_csiphy_rx_clk_lane_7_00; + struct csiphy_reg_t mipi_csiphy_rx_lane0_ddl_2_00; + struct csiphy_reg_t mipi_csiphy_rx_lane0_ddl_3_00; + struct csiphy_reg_t mipi_csiphy_rx_lane_1_10_00; + struct csiphy_reg_t mipi_csiphy_rx_lane_1_11_00; }; struct csiphy_reg_3ph_parms_t { diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/Makefile b/drivers/media/platform/msm/camera_v2/sensor/flash/Makefile index edd6d64145ba..cb20f75ba93c 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/flash/Makefile +++ b/drivers/media/platform/msm/camera_v2/sensor/flash/Makefile @@ -4,3 +4,4 @@ ccflags-y += -Idrivers/media/platform/msm/camera_v2 ccflags-y += -Idrivers/media/platform/msm/camera_v2/common ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io obj-$(CONFIG_MSMB_CAMERA) += msm_flash.o +obj-$(CONFIG_MSMB_CAMERA) += qm215_gpio_flash.o diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c index 409e13e69ef9..c0af5ac313e6 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c +++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2009-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2009-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -18,6 +18,7 @@ #include #include #include "msm_flash.h" +#include "msm_camera_io_util.h" #include "msm_camera_dt_util.h" #include "msm_cci.h" @@ -491,6 +492,8 @@ static int32_t msm_flash_init( __func__, __LINE__, flash_data->cfg.flash_init_info->flash_driver_type); } + if (flash_ctrl->platform_flash_init) + flash_ctrl->platform_flash_init(flash_ctrl, flash_data); if (flash_ctrl->func_tbl->camera_flash_init) { rc = flash_ctrl->func_tbl->camera_flash_init( @@ -577,6 +580,8 @@ static int32_t msm_flash_prepare( __func__, __LINE__, flash_ctrl->flash_state); if (flash_ctrl->switch_trigger == NULL) { + if (flash_ctrl->platform_flash_init) + return ret; pr_err("%s:%d Invalid argument\n", __func__, __LINE__); return -EINVAL; @@ -1263,6 +1268,8 @@ static int32_t msm_flash_platform_probe(struct platform_device *pdev) kfree(flash_ctrl); return -EINVAL; } + if (flash_ctrl->flash_driver_type == FLASH_DRIVER_GPIO) + platform_set_drvdata(pdev, flash_ctrl); flash_ctrl->flash_state = MSM_CAMERA_FLASH_RELEASE; flash_ctrl->power_info.dev = &flash_ctrl->pdev->dev; @@ -1312,6 +1319,11 @@ static int32_t msm_flash_platform_probe(struct platform_device *pdev) return rc; } +int32_t camera_flash_platform_probe(struct platform_device *pdev) +{ + return msm_flash_platform_probe(pdev); +} + MODULE_DEVICE_TABLE(of, msm_flash_dt_match); static struct platform_driver msm_flash_platform_driver = { diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h index 904dc6285ee1..419eaa3c3fd5 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h +++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2009-2016, 2018, 2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2009-2016, 2018, 2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -104,6 +104,8 @@ struct msm_flash_ctrl_t { /* flash state */ enum msm_camera_flash_state_t flash_state; + int32_t (*platform_flash_init)(struct msm_flash_ctrl_t *flash_ctrl, + struct msm_flash_cfg_data_t *flash_data); }; int msm_flash_i2c_probe(struct i2c_client *client, @@ -126,4 +128,6 @@ int msm_flash_led_release(struct msm_flash_ctrl_t *fctrl); int msm_flash_led_off(struct msm_flash_ctrl_t *fctrl); int msm_flash_led_low(struct msm_flash_ctrl_t *fctrl); int msm_flash_led_high(struct msm_flash_ctrl_t *fctrl); +int32_t camera_flash_platform_probe(struct platform_device *pdev); + #endif diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/qm215_gpio_flash.c b/drivers/media/platform/msm/camera_v2/sensor/flash/qm215_gpio_flash.c new file mode 100644 index 000000000000..916a79f0dcce --- /dev/null +++ b/drivers/media/platform/msm/camera_v2/sensor/flash/qm215_gpio_flash.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include "msm_camera_io_util.h" +#include "msm_flash.h" + +#define FLASH_NAME "qcom,gpio-flash" + +#undef CDBG +#define CDBG(fmt, args...) pr_debug(fmt, ##args) + +static const struct of_device_id msm_gpio_flash_dt_match[] = { + {.compatible = "qcom,qm215-gpio-flash", .data = NULL}, + {} +}; +static struct msm_flash_table qm215_gpio_flash_table; +MODULE_DEVICE_TABLE(of, msm_flash_dt_match); + +static int32_t qm215_flash_low( + struct msm_flash_ctrl_t *flash_ctrl, + struct msm_flash_cfg_data_t *flash_data) +{ + struct msm_camera_power_ctrl_t *power_info = NULL; + struct msm_camera_gpio_num_info *gpio_num_info = NULL; + + if (!flash_ctrl) { + pr_err("device data NULL\n"); + return -EINVAL; + } + + CDBG("Enter"); + power_info = &flash_ctrl->power_info; + gpio_num_info = power_info->gpio_conf->gpio_num_info; + + if (flash_ctrl->flash_driver_type == FLASH_DRIVER_GPIO && + gpio_num_info->valid[SENSOR_GPIO_FL_NOW] && + gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN]) { + + CDBG("flash op low gpio num %d(state:%d) %d(state: %d)\n", + gpio_num_info->gpio_num[SENSOR_GPIO_FL_NOW], + GPIO_OUT_HIGH, + gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN], + GPIO_OUT_HIGH); + gpio_set_value_cansleep( + gpio_num_info->gpio_num[SENSOR_GPIO_FL_NOW], + GPIO_OUT_HIGH); + gpio_set_value_cansleep( + gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN], + GPIO_OUT_HIGH); + } + CDBG("Exit\n"); + return 0; +} + +static int32_t qm215_flash_high( + struct msm_flash_ctrl_t *flash_ctrl, + struct msm_flash_cfg_data_t *flash_data) +{ + struct msm_camera_power_ctrl_t *power_info = NULL; + struct msm_camera_gpio_num_info *gpio_num_info = NULL; + + if (!flash_ctrl) { + pr_err("device data NULL\n"); + return -EINVAL; + } + + CDBG("Enter\n"); + power_info = &flash_ctrl->power_info; + gpio_num_info = power_info->gpio_conf->gpio_num_info; + + if (flash_ctrl->flash_driver_type == FLASH_DRIVER_GPIO && + gpio_num_info->valid[SENSOR_GPIO_FL_NOW] && + gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN]) { + + CDBG("flash op high gpio num %d(state:%d) %d(state:%d)\n", + gpio_num_info->gpio_num[SENSOR_GPIO_FL_NOW], + GPIO_OUT_LOW, + gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN], + GPIO_OUT_HIGH); + gpio_set_value_cansleep( + gpio_num_info->gpio_num[SENSOR_GPIO_FL_NOW], + GPIO_OUT_LOW); + gpio_set_value_cansleep( + gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN], + GPIO_OUT_HIGH); + } + CDBG("Exit\n"); + + return 0; +} + +static int32_t qm215_flash_release( + struct msm_flash_ctrl_t *flash_ctrl) +{ + int32_t rc = 0; + + if (!flash_ctrl) { + pr_err("device data NULL\n"); + return -EINVAL; + } + + CDBG("Enter\n"); + rc = flash_ctrl->func_tbl->camera_flash_off(flash_ctrl, NULL); + if (rc < 0) { + pr_err("%s:%d camera_flash_init failed rc = %d\n", + __func__, __LINE__, rc); + return rc; + } + flash_ctrl->flash_state = MSM_CAMERA_FLASH_RELEASE; + CDBG("Exit\n"); + return 0; +} + +static int32_t qm215_flash_off(struct msm_flash_ctrl_t *flash_ctrl, + struct msm_flash_cfg_data_t *flash_data) +{ + struct msm_camera_power_ctrl_t *power_info = NULL; + struct msm_camera_gpio_num_info *gpio_num_info = NULL; + + if (!flash_ctrl) { + pr_err("device data NULL\n"); + return -EINVAL; + } + + CDBG("Enter\n"); + power_info = &flash_ctrl->power_info; + gpio_num_info = power_info->gpio_conf->gpio_num_info; + + if (flash_ctrl->flash_driver_type == FLASH_DRIVER_GPIO && + gpio_num_info->valid[SENSOR_GPIO_FL_NOW] && + gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN]) { + + CDBG("flash off gpio num %d(state:%d) %d(state: %d)\n", + gpio_num_info->gpio_num[SENSOR_GPIO_FL_NOW], + GPIO_OUT_LOW, + gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN], + GPIO_OUT_LOW); + gpio_set_value_cansleep( + gpio_num_info->gpio_num[SENSOR_GPIO_FL_NOW], + GPIO_OUT_LOW); + gpio_set_value_cansleep( + gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN], + GPIO_OUT_LOW); + } + + CDBG("Exit\n"); + return 0; +} + +static int32_t qm215_flash_gpio_init( + struct msm_flash_ctrl_t *flash_ctrl, + struct msm_flash_cfg_data_t *flash_data) +{ + int32_t rc = 0; + + CDBG("Enter"); + rc = flash_ctrl->func_tbl->camera_flash_off(flash_ctrl, flash_data); + + CDBG("Exit"); + return rc; +} + + +static int32_t qm215_platform_flash_init(struct msm_flash_ctrl_t *flash_ctrl, + struct msm_flash_cfg_data_t *flash_data) +{ + if (!flash_ctrl) { + pr_err("devices data NULL\n"); + return -EINVAL; + } + + if (flash_ctrl->flash_driver_type == FLASH_DRIVER_GPIO) + flash_ctrl->func_tbl = &qm215_gpio_flash_table.func_tbl; + + return 0; +} +static int32_t qm215_flash_platform_probe(struct platform_device *pdev) +{ + int32_t rc = 0; + struct msm_flash_ctrl_t *flash_ctrl = NULL; + struct msm_camera_power_ctrl_t *power_info = NULL; + struct msm_camera_gpio_conf *gpio_conf = NULL; + + if (!pdev->dev.of_node) { + pr_err("of_node NULL\n"); + return -EINVAL; + } + CDBG("enter probe\n"); + rc = camera_flash_platform_probe(pdev); + if (rc >= 0) { + flash_ctrl = + (struct msm_flash_ctrl_t *) platform_get_drvdata(pdev); + CDBG("device data %pK\n", flash_ctrl); + if (!flash_ctrl) { + pr_err("of_node NULL\n"); + return -EINVAL; + } + power_info = &flash_ctrl->power_info; + gpio_conf = power_info->gpio_conf; + rc = msm_camera_request_gpio_table(gpio_conf->cam_gpio_req_tbl, + gpio_conf->cam_gpio_req_tbl_size, 1); + if (rc < 0) { + pr_err("%s: request gpio failed\n", __func__); + return rc; + } + flash_ctrl->platform_flash_init = qm215_platform_flash_init; + } + return rc; +} + +static struct platform_driver msm_gpio_flash_platform_driver = { + .probe = qm215_flash_platform_probe, + .driver = { + .name = "qcom,camera-gpio-flash", + .of_match_table = msm_gpio_flash_dt_match, + }, +}; + +static int __init qm215_gpio_flash_init_module(void) +{ + int32_t rc = 0; + + CDBG("Enter\n"); + rc = platform_driver_register(&msm_gpio_flash_platform_driver); + if (rc) + pr_err("platform probe for flash failed\n"); + + return rc; +} + +static void __exit qm215_gpio_flash_exit_module(void) +{ + platform_driver_unregister(&msm_gpio_flash_platform_driver); +} + +static struct msm_flash_table qm215_gpio_flash_table = { + .flash_driver_type = FLASH_DRIVER_GPIO, + .func_tbl = { + .camera_flash_init = qm215_flash_gpio_init, + .camera_flash_release = qm215_flash_release, + .camera_flash_off = qm215_flash_off, + .camera_flash_low = qm215_flash_low, + .camera_flash_high = qm215_flash_high, + .camera_flash_query_current = NULL, + }, +}; + +module_init(qm215_gpio_flash_init_module); +module_exit(qm215_gpio_flash_exit_module); +MODULE_DESCRIPTION("MSM GPIO FLASH"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c index 5f72b555e0d2..e75fcadd3be0 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c +++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2011-2018, 2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2018, 2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -205,6 +205,9 @@ int msm_sensor_power_up(struct msm_sensor_ctrl_t *s_ctrl) &msm_sensor_secure_func_tbl; } } +#if IS_ENABLED(CONFIG_ARCH_QM215) + msleep(60); +#endif rc = msm_camera_power_up(power_info, s_ctrl->sensor_device_type, sensor_i2c_client); if (rc < 0) diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h index 3538f762be28..d5be84b99b0c 100644 --- a/drivers/media/platform/msm/npu/npu_common.h +++ b/drivers/media/platform/msm/npu/npu_common.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. */ #ifndef _NPU_COMMON_H @@ -134,7 +134,7 @@ struct npu_mbox { }; /** - * struct npul_pwrlevel - Struct holding different pwrlevel info obtained from + * struct npu_pwrlevel - Struct holding different pwrlevel info obtained * from dtsi file * @pwr_level: NPU power level * @freq[]: NPU frequency vote in Hz @@ -246,6 +246,7 @@ struct mbox_bridge_data { struct npu_device { struct mutex dev_lock; + spinlock_t ipc_lock; struct platform_device *pdev; diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.c b/drivers/media/platform/msm/npu/npu_host_ipc.c index 64d91d9d87e4..0787406a830a 100644 --- a/drivers/media/platform/msm/npu/npu_host_ipc.c +++ b/drivers/media/platform/msm/npu/npu_host_ipc.c @@ -74,6 +74,8 @@ static int npu_host_ipc_init_hfi(struct npu_device *npu_dev) uint32_t q_size = 0; uint32_t cur_start_offset = 0; + spin_lock_init(&npu_dev->ipc_lock); + reg_val = REGR(npu_dev, REG_NPU_FW_CTRL_STATUS); /* @@ -140,6 +142,7 @@ static int npu_host_ipc_init_hfi(struct npu_device *npu_dev) reg_val = REGR(npu_dev, (uint32_t)REG_NPU_HOST_CTRL_STATUS); REGW(npu_dev, (uint32_t)REG_NPU_HOST_CTRL_STATUS, reg_val | HOST_CTRL_STATUS_IPC_ADDRESS_READY_VAL); + return status; } @@ -149,13 +152,17 @@ static int npu_host_ipc_send_cmd_hfi(struct npu_device *npu_dev, int status = 0; uint8_t is_rx_req_set = 0; uint32_t retry_cnt = 5; + unsigned long flags; + spin_lock_irqsave(&npu_dev->ipc_lock, flags); status = ipc_queue_write(npu_dev, q_idx, (uint8_t *)cmd_ptr, &is_rx_req_set); if (status == -ENOSPC) { do { + spin_unlock_irqrestore(&npu_dev->ipc_lock, flags); msleep(20); + spin_lock_irqsave(&npu_dev->ipc_lock, flags); status = ipc_queue_write(npu_dev, q_idx, (uint8_t *)cmd_ptr, &is_rx_req_set); } while ((status == -ENOSPC) && (--retry_cnt > 0)); @@ -165,6 +172,7 @@ static int npu_host_ipc_send_cmd_hfi(struct npu_device *npu_dev, if (is_rx_req_set == 1) status = INTERRUPT_RAISE_NPU(npu_dev); } + spin_unlock_irqrestore(&npu_dev->ipc_lock, flags); if (status) NPU_ERR("Cmd Msg put on Command Queue - FAILURE\n"); diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c index 32adf1babe58..8e93a71aa0e1 100644 --- a/drivers/media/platform/msm/npu/npu_mgr.c +++ b/drivers/media/platform/msm/npu/npu_mgr.c @@ -15,6 +15,7 @@ #include "npu_common.h" #include #include +#include /* ------------------------------------------------------------------------- * Defines @@ -279,6 +280,50 @@ int load_fw(struct npu_device *npu_dev) return 0; } +static void complete_pending_commands(struct npu_host_ctx *host_ctx) +{ + struct npu_network *network = NULL; + struct npu_kevent kevt; + struct npu_network_cmd *cmd; + struct npu_misc_cmd *misc_cmd; + int i; + + /* flush all pending npu cmds */ + for (i = 0; i < MAX_LOADED_NETWORK; i++) { + network = &host_ctx->networks[i]; + if (!network->is_valid || !network->fw_error) + continue; + + if (network->is_async) { + NPU_DBG("async cmd, queue ssr event\n"); + kevt.evt.type = MSM_NPU_EVENT_TYPE_SSR; + kevt.evt.u.ssr.network_hdl = + network->network_hdl; + if (npu_queue_event(network->client, &kevt)) + NPU_ERR("queue npu event failed\n"); + + while (!list_empty(&network->cmd_list)) { + cmd = list_first_entry(&network->cmd_list, + struct npu_network_cmd, list); + npu_dequeue_network_cmd(network, cmd); + npu_free_network_cmd(host_ctx, cmd); + } + } else { + list_for_each_entry(cmd, &network->cmd_list, list) { + NPU_INFO("complete network %llx trans_id %d\n", + network->id, cmd->trans_id); + complete(&cmd->cmd_done); + } + } + } + + list_for_each_entry(misc_cmd, &host_ctx->misc_cmd_list, list) { + NPU_INFO("complete misc cmd trans_id %d\n", + misc_cmd->trans_id); + complete(&misc_cmd->cmd_done); + } +} + int unload_fw(struct npu_device *npu_dev) { struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; @@ -294,7 +339,9 @@ int unload_fw(struct npu_device *npu_dev) mutex_unlock(&host_ctx->lock); return 0; } else if (host_ctx->fw_state == FW_ENABLED) { - NPU_ERR("fw is enabled now, can't be unloaded\n"); + NPU_ERR("fw is enabled now, device is shutting down?\n"); + host_ctx->dev_shuttingdown = true; + complete_pending_commands(host_ctx); mutex_unlock(&host_ctx->lock); return -EBUSY; } @@ -315,6 +362,11 @@ static int enable_fw_nolock(struct npu_device *npu_dev) int ret = 0; uint32_t reg_val; + if (host_ctx->dev_shuttingdown) { + NPU_ERR("device is shutting down, ignore enable request\n"); + return -EIO; + } + if (host_ctx->fw_state == FW_UNLOADED) { ret = load_fw_nolock(npu_dev, host_ctx->auto_pil_disable ? true : false); @@ -470,6 +522,11 @@ static int disable_fw_nolock(struct npu_device *npu_dev) if (host_ctx->fw_ref_cnt > 0) return ret; + if (host_ctx->dev_shuttingdown) { + NPU_ERR("device is shutting down, ignore disable request\n"); + return -EIO; + } + /* turn on auto ACK for warm shuts down */ npu_cc_reg_write(npu_dev, NPU_CC_NPU_CPC_RSC_CTRL, 3); reinit_completion(&host_ctx->fw_shutdown_done); @@ -712,6 +769,24 @@ static int npu_panic_handler(struct notifier_block *this, return NOTIFY_DONE; } +static int npu_reboot_handler(struct notifier_block *this, + unsigned long code, void *unused) +{ + struct npu_host_ctx *host_ctx = + container_of(this, struct npu_host_ctx, reboot_nb); + + NPU_INFO("Device is rebooting with code %d\n", code); + + if ((code == NOTIFY_DONE) || (code == SYS_POWER_OFF)) { + mutex_lock(&host_ctx->lock); + host_ctx->dev_shuttingdown = true; + complete_pending_commands(host_ctx); + mutex_unlock(&host_ctx->lock); + } + + return NOTIFY_DONE; +} + static void npu_update_pwr_work(struct work_struct *work) { int ret; @@ -764,6 +839,13 @@ int npu_host_init(struct npu_device *npu_dev) goto fail; } + host_ctx->reboot_nb.notifier_call = npu_reboot_handler; + ret = register_reboot_notifier(&host_ctx->reboot_nb); + if (ret) { + NPU_ERR("register reboot notifier failed\n"); + goto fail; + } + host_ctx->panic_nb.notifier_call = npu_panic_handler; ret = atomic_notifier_chain_register(&panic_notifier_list, &host_ctx->panic_nb); @@ -839,6 +921,7 @@ int npu_host_init(struct npu_device *npu_dev) if (host_ctx->notif_hdle) subsys_notif_unregister_notifier(host_ctx->notif_hdle, &host_ctx->nb); + unregister_reboot_notifier(&host_ctx->reboot_nb); mutex_destroy(&host_ctx->lock); return ret; } @@ -854,6 +937,7 @@ void npu_host_deinit(struct npu_device *npu_dev) destroy_workqueue(host_ctx->wq); destroy_workqueue(host_ctx->wq_pri); subsys_notif_unregister_notifier(host_ctx->notif_hdle, &host_ctx->nb); + unregister_reboot_notifier(&host_ctx->reboot_nb); mutex_destroy(&host_ctx->lock); } @@ -947,9 +1031,6 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force) { struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; struct npu_network *network = NULL; - struct npu_kevent kevt; - struct npu_network_cmd *cmd; - struct npu_misc_cmd *misc_cmd; bool fw_alive = true; int i, ret = 0; @@ -961,6 +1042,12 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force) return 0; } + if (host_ctx->dev_shuttingdown) { + NPU_INFO("device is shutting down, igonre error handler\n"); + mutex_unlock(&host_ctx->lock); + return -EIO; + } + if (host_ctx->wdg_irq_sts) { NPU_INFO("watchdog irq triggered\n"); fw_alive = false; @@ -1070,41 +1157,8 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force) } complete(&host_ctx->fw_deinit_done); + complete_pending_commands(host_ctx); - /* flush all pending npu cmds */ - for (i = 0; i < MAX_LOADED_NETWORK; i++) { - network = &host_ctx->networks[i]; - if (!network->is_valid || !network->fw_error) - continue; - - if (network->is_async) { - NPU_DBG("async cmd, queue ssr event\n"); - kevt.evt.type = MSM_NPU_EVENT_TYPE_SSR; - kevt.evt.u.ssr.network_hdl = - network->network_hdl; - if (npu_queue_event(network->client, &kevt)) - NPU_ERR("queue npu event failed\n"); - - while (!list_empty(&network->cmd_list)) { - cmd = list_first_entry(&network->cmd_list, - struct npu_network_cmd, list); - npu_dequeue_network_cmd(network, cmd); - npu_free_network_cmd(host_ctx, cmd); - } - } else { - list_for_each_entry(cmd, &network->cmd_list, list) { - NPU_DBG("complete network %llx trans_id %d\n", - network->id, cmd->trans_id); - complete(&cmd->cmd_done); - } - } - } - - list_for_each_entry(misc_cmd, &host_ctx->misc_cmd_list, list) { - NPU_DBG("complete misc cmd trans_id %d\n", - misc_cmd->trans_id); - complete(&misc_cmd->cmd_done); - } mutex_unlock(&host_ctx->lock); return ret; @@ -2086,6 +2140,7 @@ static int npu_send_network_cmd(struct npu_device *npu_dev, WARN_ON(!mutex_is_locked(&host_ctx->lock)); if (network->fw_error || host_ctx->fw_error || + host_ctx->dev_shuttingdown || (host_ctx->fw_state != FW_ENABLED)) { NPU_ERR("fw is in error state or disabled\n"); ret = -EIO; @@ -2111,7 +2166,8 @@ static int npu_send_misc_cmd(struct npu_device *npu_dev, uint32_t q_idx, WARN_ON(!mutex_is_locked(&host_ctx->lock)); - if (host_ctx->fw_error || (host_ctx->fw_state != FW_ENABLED)) { + if (host_ctx->fw_error || host_ctx->dev_shuttingdown || + (host_ctx->fw_state != FW_ENABLED)) { NPU_ERR("fw is in error state or disabled\n"); ret = -EIO; } else { @@ -2548,6 +2604,12 @@ int32_t npu_host_load_network_v2(struct npu_client *client, goto free_load_cmd; } + if (host_ctx->dev_shuttingdown) { + ret = -EIO; + NPU_ERR("device is shutting down\n"); + goto free_load_cmd; + } + if (!ret) { NPU_ERR("npu: NPU_IPC_CMD_LOAD time out %lld:%d\n", network->id, load_cmd->trans_id); @@ -2633,6 +2695,11 @@ int32_t npu_host_unload_network(struct npu_client *client, goto free_network; } + if (host_ctx->dev_shuttingdown) { + NPU_ERR("device is shutting down, skip unload network in fw\n"); + goto free_network; + } + NPU_DBG("Unload network %lld\n", network->id); /* prepare IPC packet for UNLOAD */ unload_packet.header.cmd_type = NPU_IPC_CMD_UNLOAD; @@ -2686,7 +2753,7 @@ int32_t npu_host_unload_network(struct npu_client *client, mutex_lock(&host_ctx->lock); - if (network->fw_error) { + if (network->fw_error || host_ctx->dev_shuttingdown) { ret = -EIO; NPU_ERR("fw is in error state during unload network\n"); goto free_network; @@ -2779,6 +2846,12 @@ int32_t npu_host_exec_network_v2(struct npu_client *client, goto exec_v2_done; } + if (host_ctx->dev_shuttingdown) { + NPU_ERR("device is shutting down\n"); + ret = -EIO; + goto exec_v2_done; + } + if (network->is_async && !async_ioctl) { NPU_ERR("network is in async mode\n"); ret = -EINVAL; @@ -2869,6 +2942,12 @@ int32_t npu_host_exec_network_v2(struct npu_client *client, goto free_exec_cmd; } + if (host_ctx->dev_shuttingdown) { + ret = -EIO; + NPU_ERR("device is shutting down during execute_v2 network\n"); + goto free_exec_cmd; + } + if (!ret) { NPU_ERR("npu: %llx:%d NPU_IPC_CMD_EXECUTE_V2 time out\n", network->id, exec_cmd->trans_id); diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h index e44fb38d827b..f22e721379c2 100644 --- a/drivers/media/platform/msm/npu/npu_mgr.h +++ b/drivers/media/platform/msm/npu/npu_mgr.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. */ #ifndef _NPU_MGR_H @@ -131,10 +131,12 @@ struct npu_host_ctx { uint32_t err_irq_sts; uint32_t wdg_irq_sts; bool fw_error; + bool dev_shuttingdown; bool cancel_work; bool app_crashed; struct notifier_block nb; struct notifier_block panic_nb; + struct notifier_block reboot_nb; void *notif_hdle; spinlock_t bridge_mbox_lock; bool bridge_mbox_pwr_on; diff --git a/drivers/media/platform/msm/synx/synx.c b/drivers/media/platform/msm/synx/synx.c index cd6795cb0bcb..4006ebbd4167 100644 --- a/drivers/media/platform/msm/synx/synx.c +++ b/drivers/media/platform/msm/synx/synx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "synx: " fmt @@ -36,6 +36,12 @@ void synx_external_callback(s32 sync_obj, int status, void *data) pr_debug("signaling synx 0x%x from external callback %d\n", synx_obj, sync_obj); synx_signal_core(row, status); + + /* + * release the reference on the metadata + * obtained during callback registration. + */ + synx_release_handle(row); } else { pr_err("invalid callback from sync external obj %d\n", sync_obj); @@ -148,6 +154,7 @@ int synx_register_callback(s32 synx_obj, pr_err("duplicate registration for synx 0x%x\n", synx_obj); mutex_unlock(&synx_dev->row_locks[row->index]); + synx_release_handle(row); return -EALREADY; } } @@ -155,6 +162,7 @@ int synx_register_callback(s32 synx_obj, synx_cb = kzalloc(sizeof(*synx_cb), GFP_KERNEL); if (!synx_cb) { mutex_unlock(&synx_dev->row_locks[row->index]); + synx_release_handle(row); return -ENOMEM; } @@ -172,12 +180,13 @@ int synx_register_callback(s32 synx_obj, queue_work(synx_dev->work_queue, &synx_cb->cb_dispatch_work); mutex_unlock(&synx_dev->row_locks[row->index]); + synx_release_handle(row); return 0; } list_add_tail(&synx_cb->list, &row->callback_list); mutex_unlock(&synx_dev->row_locks[row->index]); - + synx_release_handle(row); return 0; } @@ -217,6 +226,7 @@ int synx_deregister_callback(s32 synx_obj, } mutex_unlock(&synx_dev->row_locks[row->index]); + synx_release_handle(row); return 0; } @@ -318,6 +328,7 @@ int synx_signal_core(struct synx_table_row *row, u32 status) if (!bind_ops) { pr_err("invalid bind ops for %u\n", bind_descs[i].external_desc.type); + synx_release_handle(row); kfree(data); continue; } @@ -327,9 +338,18 @@ int synx_signal_core(struct synx_table_row *row, u32 status) */ ret = bind_ops->deregister_callback( synx_external_callback, data, sync_id); - if (ret < 0) + if (ret < 0) { pr_err("de-registration fail on sync: %d, err: %d\n", sync_id, ret); + continue; + } + + /* + * release the reference on the metadata + * obtained during callback registration. + */ + synx_release_handle(row); + pr_debug("signaling external sync: %d, status: %u\n", sync_id, status); /* optional function to enable external signaling */ @@ -361,6 +381,7 @@ int synx_signal_core(struct synx_table_row *row, u32 status) int synx_signal(s32 synx_obj, u32 status) { + int rc; struct synx_table_row *row = NULL; row = synx_from_handle(synx_obj); @@ -369,7 +390,9 @@ int synx_signal(s32 synx_obj, u32 status) return -EINVAL; } - return synx_signal_core(row, status); + rc = synx_signal_core(row, status); + synx_release_handle(row); + return rc; } int synx_merge(s32 *synx_objs, u32 num_objs, s32 *synx_merged) @@ -440,6 +463,29 @@ int synx_merge(s32 *synx_objs, u32 num_objs, s32 *synx_merged) return rc; } +static void synx_remove_handle(struct kref *kref) +{ + struct synx_obj_node *obj_node, *temp_obj_node; + struct synx_handle_entry *entry = + container_of(kref, struct synx_handle_entry, refcount); + struct synx_table_row *row = entry->row; + + idr_remove(&synx_dev->synx_ids, entry->synx_obj); + if (row) + list_for_each_entry_safe(obj_node, + temp_obj_node, &row->synx_obj_list, list) { + if (obj_node->synx_obj == entry->synx_obj) { + pr_debug("removed synx obj at 0x%x successful\n", + obj_node->synx_obj); + list_del_init(&obj_node->list); + kfree(obj_node); + } + } + + pr_debug("released handle entry %pK\n", entry); + kfree(entry); +} + static int synx_release_core(struct synx_table_row *row) { s32 idx; @@ -450,10 +496,10 @@ static int synx_release_core(struct synx_table_row *row) * (definitely for merged synx on invoing deinit) * be carefull while accessing the metadata */ - mutex_lock(&synx_dev->row_locks[row->index]); - fence = row->fence; idx = row->index; - if (!idx) { + mutex_lock(&synx_dev->row_locks[idx]); + fence = row->fence; + if ((!idx) || (!fence)) { mutex_unlock(&synx_dev->row_locks[idx]); pr_err("object already cleaned up at %d\n", idx); return -EINVAL; @@ -466,7 +512,7 @@ static int synx_release_core(struct synx_table_row *row) if (is_merged_synx(row)) synx_deinit_object(row); - /* do not reference fence and row in the function after this */ + /* remove the reference taken during create */ dma_fence_put(fence); mutex_unlock(&synx_dev->row_locks[idx]); pr_debug("Exit %s\n", __func__); @@ -476,7 +522,11 @@ static int synx_release_core(struct synx_table_row *row) int synx_release(s32 synx_obj) { + int rc; struct synx_table_row *row = NULL; + struct dma_fence *fence; + struct synx_handle_entry *entry; + unsigned long flags; pr_debug("Enter %s\n", __func__); @@ -486,7 +536,21 @@ int synx_release(s32 synx_obj) return -EINVAL; } - return synx_release_core(row); + spin_lock_irqsave(&synx_dev->idr_lock, flags); + entry = idr_find(&synx_dev->synx_ids, synx_obj); + if (entry) + kref_put(&entry->refcount, synx_remove_handle); + spin_unlock_irqrestore(&synx_dev->idr_lock, flags); + + if (!entry) { + pr_err("synx already released: 0x%x\n", synx_obj); + return -EINVAL; + } + + fence = row->fence; + rc = synx_release_core(row); + dma_fence_put(fence); + return rc; } int synx_wait(s32 synx_obj, u64 timeout_ms) @@ -507,6 +571,7 @@ int synx_wait(s32 synx_obj, u64 timeout_ms) mutex_unlock(&synx_dev->row_locks[row->index]); pr_err("object already cleaned up at %d\n", row->index); + synx_release_handle(row); return -EINVAL; } mutex_unlock(&synx_dev->row_locks[row->index]); @@ -515,14 +580,17 @@ int synx_wait(s32 synx_obj, u64 timeout_ms) msecs_to_jiffies(timeout_ms)); if (timeleft <= 0) { pr_err("timed out for synx obj 0x%x\n", synx_obj); + synx_release_handle(row); return -ETIMEDOUT; } if (synx_status(row) != SYNX_STATE_SIGNALED_SUCCESS) { pr_err("signaled error on synx obj 0x%x\n", synx_obj); + synx_release_handle(row); return -EINVAL; } + synx_release_handle(row); pr_debug("Exit %s\n", __func__); return 0; @@ -546,6 +614,7 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync) if (is_merged_synx(row)) { pr_err("cannot bind to merged fence: 0x%x\n", synx_obj); + synx_release_handle(row); return -EINVAL; } @@ -553,18 +622,22 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync) if (!bind_ops) { pr_err("invalid bind ops for %u\n", external_sync.type); + synx_release_handle(row); return -EINVAL; } data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) + if (!data) { + synx_release_handle(row); return -ENOMEM; + } mutex_lock(&synx_dev->row_locks[row->index]); if (synx_status(row) != SYNX_STATE_ACTIVE) { pr_err("bind to non-active synx is prohibited 0x%x\n", synx_obj); mutex_unlock(&synx_dev->row_locks[row->index]); + synx_release_handle(row); kfree(data); return -EINVAL; } @@ -573,6 +646,7 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync) pr_err("max number of bindings reached for synx_objs 0x%x\n", synx_obj); mutex_unlock(&synx_dev->row_locks[row->index]); + synx_release_handle(row); kfree(data); return -ENOMEM; } @@ -584,6 +658,7 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync) pr_err("duplicate binding for external sync %d\n", external_sync.id[0]); mutex_unlock(&synx_dev->row_locks[row->index]); + synx_release_handle(row); kfree(data); return -EALREADY; } @@ -593,12 +668,21 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync) data->synx_obj = synx_obj; data->secure_key = synx_generate_secure_key(row); + /* + * get reference to avoid cleanup of synx handle. + * reference would be released in the callback function. + */ + dma_fence_get(row->fence); + rc = bind_ops->register_callback(synx_external_callback, data, external_sync.id[0]); if (rc < 0) { pr_err("callback registration failed for %d\n", external_sync.id[0]); + /* release the callback reference obtained */ + dma_fence_put(row->fence); mutex_unlock(&synx_dev->row_locks[row->index]); + synx_release_handle(row); kfree(data); return rc; } @@ -612,12 +696,14 @@ int synx_bind(s32 synx_obj, struct synx_external_desc external_sync) pr_debug("added external sync %d to bindings of 0x%x\n", external_sync.id[0], synx_obj); + synx_release_handle(row); pr_debug("Exit %s\n", __func__); return rc; } int synx_get_status(s32 synx_obj) { + int rc; struct synx_table_row *row = NULL; pr_debug("getting the status for synx 0x%x\n", synx_obj); @@ -628,12 +714,16 @@ int synx_get_status(s32 synx_obj) return SYNX_STATE_INVALID; } - return synx_status(row); + rc = synx_status(row); + synx_release_handle(row); + return rc; } int synx_addrefcount(s32 synx_obj, s32 count) { struct synx_table_row *row = NULL; + struct synx_handle_entry *entry; + unsigned long flags; row = synx_from_handle(synx_obj); if (!row) { @@ -647,11 +737,36 @@ int synx_addrefcount(s32 synx_obj, s32 count) return -EINVAL; } + spin_lock_irqsave(&synx_dev->idr_lock, flags); + entry = idr_find(&synx_dev->synx_ids, synx_obj); + if (entry) + kref_get(&entry->refcount); + spin_unlock_irqrestore(&synx_dev->idr_lock, flags); + + if (!entry) { + pr_err("invalid synx handle entry: 0x%x\n", synx_obj); + synx_release_handle(row); + return -EINVAL; + } + mutex_lock(&synx_dev->row_locks[row->index]); - while (count--) + while (count--) { dma_fence_get(row->fence); + /* + * since each addrefcount needs to be released + * explicitly through separate release call, + * additional references have to be taken on + * handle entry to prevent leak. + */ + kref_get(&entry->refcount); + } mutex_unlock(&synx_dev->row_locks[row->index]); + spin_lock_irqsave(&synx_dev->idr_lock, flags); + kref_put(&entry->refcount, synx_remove_handle); + spin_unlock_irqrestore(&synx_dev->idr_lock, flags); + + synx_release_handle(row); return 0; } @@ -714,8 +829,9 @@ int synx_import(s32 synx_obj, u32 import_key, s32 *new_synx_obj) int synx_export(s32 synx_obj, u32 *import_key) { - int rc; + int rc = 0; struct synx_table_row *row = NULL; + struct dma_fence *fence = NULL; pr_debug("Enter %s\n", __func__); @@ -723,10 +839,6 @@ int synx_export(s32 synx_obj, u32 *import_key) if (!row) return -EINVAL; - rc = synx_generate_import_key(row, synx_obj, import_key); - if (rc < 0) - return rc; - mutex_lock(&synx_dev->row_locks[row->index]); /* * to make sure the synx is not lost if the process dies or @@ -735,11 +847,29 @@ int synx_export(s32 synx_obj, u32 *import_key) * and account for the extra reference. Otherwise, this will * be a dangling reference and needs to be garbage collected. */ - dma_fence_get(row->fence); + if (row->fence) { + dma_fence_get(row->fence); + fence = row->fence; + } else { + rc = -EINVAL; + } mutex_unlock(&synx_dev->row_locks[row->index]); + + if (rc) { + pr_err("invalid export of synx obj %d\n", synx_obj); + return rc; + } + + rc = synx_generate_import_key(row, synx_obj, import_key, fence); + if (rc < 0) { + dma_fence_put(fence); + pr_err("export of synx obj %d failed\n", synx_obj); + } + + synx_release_handle(row); pr_debug("Exit %s\n", __func__); - return 0; + return rc; } @@ -952,12 +1082,15 @@ static int synx_handle_register_user_payload( if (!client) { pr_err("invalid client for process %d\n", current->pid); + synx_release_handle(row); return -EINVAL; } user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL); - if (!user_payload_kernel) + if (!user_payload_kernel) { + synx_release_handle(row); return -ENOMEM; + } user_payload_kernel->client = client; user_payload_kernel->data.synx_obj = synx_obj; @@ -975,6 +1108,7 @@ static int synx_handle_register_user_payload( list_add_tail(&user_payload_kernel->list, &client->eventq); mutex_unlock(&client->eventq_lock); mutex_unlock(&synx_dev->row_locks[row->index]); + synx_release_handle(row); wake_up_all(&client->wq); return 0; } @@ -988,6 +1122,7 @@ static int synx_handle_register_user_payload( pr_err("callback already registered on 0x%x\n", synx_obj); mutex_unlock(&synx_dev->row_locks[row->index]); + synx_release_handle(row); kfree(user_payload_kernel); return -EALREADY; } @@ -995,6 +1130,7 @@ static int synx_handle_register_user_payload( list_add_tail(&user_payload_kernel->list, &row->user_payload_list); mutex_unlock(&synx_dev->row_locks[row->index]); + synx_release_handle(row); pr_debug("Exit %s\n", __func__); return 0; @@ -1030,6 +1166,7 @@ static int synx_handle_deregister_user_payload( if (!client) { pr_err("invalid client for process %d\n", current->pid); + synx_release_handle(row); return -EINVAL; } @@ -1078,6 +1215,7 @@ static int synx_handle_deregister_user_payload( wake_up_all(&client->wq); } + synx_release_handle(row); pr_debug("Exit %s\n", __func__); return 0; } diff --git a/drivers/media/platform/msm/synx/synx_private.h b/drivers/media/platform/msm/synx/synx_private.h index ac182af5b562..7a67fc610f54 100644 --- a/drivers/media/platform/msm/synx/synx_private.h +++ b/drivers/media/platform/msm/synx/synx_private.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. */ #ifndef __SYNX_PRIVATE_H__ @@ -153,6 +153,19 @@ struct synx_table_row { struct list_head user_payload_list; }; +/** + * struct synx_handle_entry - Entry saved in the IDR handle table + * + * @row : Pointer to synx object + * @synx_obj : Synx unique ID + * @refcount : Reference count + */ +struct synx_handle_entry { + struct synx_table_row *row; + s32 synx_obj; + struct kref refcount; +}; + /** * struct synx_registered_ops - External sync clients registered for bind * operations with synx driver diff --git a/drivers/media/platform/msm/synx/synx_util.c b/drivers/media/platform/msm/synx/synx_util.c index 21da43414eb1..6b0cf44ee6a6 100644 --- a/drivers/media/platform/msm/synx/synx_util.c +++ b/drivers/media/platform/msm/synx/synx_util.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "synx: " fmt @@ -73,7 +73,7 @@ int synx_init_object(struct synx_table_row *table, mutex_unlock(&synx_dev->row_locks[idx]); pr_debug("synx obj init: id:0x%x state:%u fence: 0x%pK\n", - synx_status(row), fence); + id, synx_status(row), fence); return 0; } @@ -175,6 +175,7 @@ int synx_deinit_object(struct synx_table_row *row) struct synx_callback_info *synx_cb, *temp_cb; struct synx_cb_data *upayload_info, *temp_upayload; struct synx_obj_node *obj_node, *temp_obj_node; + struct synx_handle_entry *entry; unsigned long flags; if (!row || !synx_dev) @@ -184,17 +185,22 @@ int synx_deinit_object(struct synx_table_row *row) spin_lock_irqsave(&synx_dev->idr_lock, flags); list_for_each_entry_safe(obj_node, temp_obj_node, &row->synx_obj_list, list) { - if ((struct synx_table_row *)idr_remove(&synx_dev->synx_ids, - obj_node->synx_obj) != row) { + entry = idr_remove(&synx_dev->synx_ids, + obj_node->synx_obj); + if (!entry) { pr_err("removing data in idr table failed 0x%x\n", obj_node->synx_obj); - spin_unlock_irqrestore(&synx_dev->idr_lock, flags); - return -EINVAL; + list_del_init(&obj_node->list); + kfree(obj_node); + continue; } pr_debug("removed synx obj at 0x%x successful\n", obj_node->synx_obj); list_del_init(&obj_node->list); kfree(obj_node); + pr_debug("released handle entry %pK\n", + entry); + kfree(entry); } spin_unlock_irqrestore(&synx_dev->idr_lock, flags); @@ -358,6 +364,7 @@ s32 synx_merge_error(s32 *synx_objs, u32 num_objs) mutex_lock(&synx_dev->row_locks[row->index]); synx_release_reference(row->fence); mutex_unlock(&synx_dev->row_locks[row->index]); + synx_release_handle(row); } return 0; @@ -389,6 +396,7 @@ int synx_util_validate_merge(s32 *synx_objs, mutex_lock(&synx_dev->row_locks[row->index]); count += synx_add_reference(row->fence); mutex_unlock(&synx_dev->row_locks[row->index]); + synx_release_handle(row); } fences = kcalloc(count, sizeof(*fences), GFP_KERNEL); @@ -410,6 +418,7 @@ int synx_util_validate_merge(s32 *synx_objs, mutex_lock(&synx_dev->row_locks[row->index]); count += synx_fence_add(row->fence, fences, count); mutex_unlock(&synx_dev->row_locks[row->index]); + synx_release_handle(row); } /* eliminate duplicates */ @@ -556,15 +565,24 @@ u32 synx_status_locked(struct synx_table_row *row) void *synx_from_handle(s32 synx_obj) { s32 base; - struct synx_table_row *row; + struct synx_table_row *row = NULL; + struct synx_handle_entry *entry; unsigned long flags; if (!synx_dev) return NULL; spin_lock_irqsave(&synx_dev->idr_lock, flags); - row = (struct synx_table_row *) idr_find(&synx_dev->synx_ids, - synx_obj); + entry = idr_find(&synx_dev->synx_ids, synx_obj); + if (entry && entry->row) { + row = entry->row; + /* + * obtain additional reference at the start of each function + * so that release will not affect cleanup the object which + * still being used by other function. + */ + dma_fence_get(row->fence); + } spin_unlock_irqrestore(&synx_dev->idr_lock, flags); if (!row) { @@ -584,18 +602,44 @@ void *synx_from_handle(s32 synx_obj) return row; } +void synx_release_handle(void *pObj) +{ + struct synx_table_row *row = pObj; + + if (!row) + return; + + dma_fence_put(row->fence); +} + s32 synx_create_handle(void *pObj) { s32 base = current->tgid << 16; s32 id; + struct synx_handle_entry *entry; unsigned long flags; if (!synx_dev) return -EINVAL; + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return -ENOMEM; + + /* + * handle entry is added to IDR table on create + * and is removed on release function, after which + * handle will not be available to clients. + * But release will not affect process which already + * own a reference. + */ + kref_init(&entry->refcount); + entry->row = pObj; + spin_lock_irqsave(&synx_dev->idr_lock, flags); - id = idr_alloc(&synx_dev->synx_ids, pObj, + id = idr_alloc(&synx_dev->synx_ids, entry, base, base + 0x10000, GFP_ATOMIC); + entry->synx_obj = id; spin_unlock_irqrestore(&synx_dev->idr_lock, flags); pr_debug("generated Id: 0x%x, base: 0x%x, client: 0x%x\n", @@ -659,7 +703,8 @@ struct synx_table_row *synx_from_import_key(s32 synx_obj, u32 key) int synx_generate_import_key(struct synx_table_row *row, s32 synx_obj, - u32 *key) + u32 *key, + struct dma_fence *fence) { bool bit; long idx = 0; @@ -712,7 +757,7 @@ int synx_generate_import_key(struct synx_table_row *row, new_row = synx_dev->synx_table + idx; /* both metadata points to same dma fence */ - new_row->fence = row->fence; + new_row->fence = fence; new_row->index = idx; INIT_LIST_HEAD(&new_row->synx_obj_list); INIT_LIST_HEAD(&new_row->callback_list); @@ -732,18 +777,23 @@ int synx_generate_import_key(struct synx_table_row *row, void *synx_from_key(s32 id, u32 secure_key) { struct synx_table_row *row = NULL; + struct synx_handle_entry *entry; + unsigned long flags; if (!synx_dev) return NULL; - spin_lock_bh(&synx_dev->idr_lock); - row = (struct synx_table_row *) idr_find(&synx_dev->synx_ids, id); + spin_lock_irqsave(&synx_dev->idr_lock, flags); + entry = idr_find(&synx_dev->synx_ids, id); + if (entry && entry->row) + row = entry->row; + spin_unlock_irqrestore(&synx_dev->idr_lock, flags); + if (!row) { - pr_err("invalid synx obj 0x%x\n", id); - spin_unlock_bh(&synx_dev->idr_lock); + pr_err( + "synx handle does not exist 0x%x\n", id); return NULL; } - spin_unlock_bh(&synx_dev->idr_lock); if (row->secure_key != secure_key) row = NULL; diff --git a/drivers/media/platform/msm/synx/synx_util.h b/drivers/media/platform/msm/synx/synx_util.h index 97859adc8b37..847140372a61 100644 --- a/drivers/media/platform/msm/synx/synx_util.h +++ b/drivers/media/platform/msm/synx/synx_util.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. */ #ifndef __SYNX_UTIL_H__ @@ -184,6 +184,9 @@ struct synx_table_row *synx_from_fence(struct dma_fence *fence); /** * @brief: Function to look up a synx handle * + * Function acquires reference on synx_table_row returned, + * which should be released through synx_release_handle function + * * @param synx_id : Synx handle * * @return The synx corresponding to the given handle or NULL if @@ -191,6 +194,13 @@ struct synx_table_row *synx_from_fence(struct dma_fence *fence); */ void *synx_from_handle(s32 synx_id); +/** + * @brief: Function to release handle reference + * + * @param pObj : Pointer returned from synx_from_handle + */ +void synx_release_handle(void *pObj); + /** * @brief: Function to create a new synx handle * @@ -229,12 +239,14 @@ int synx_generate_secure_key(struct synx_table_row *row); * @param row : Pointer to the synx object row * @param synx_obj : Synx handle * @param key : Pointer to key (filled by the function) + * @param fence : Pointer to dma fence backing synx objcet row * * @return Status of operation. Negative in case of error. Zero otherwise. */ int synx_generate_import_key(struct synx_table_row *row, s32 synx_obj, - u32 *key); + u32 *key, + struct dma_fence *fence); /** * @brief: Function to authenticate requests for importing synx handle diff --git a/drivers/media/platform/msm/vidc_3x/governors/msm_vidc_table_gov.c b/drivers/media/platform/msm/vidc_3x/governors/msm_vidc_table_gov.c index 38816ff2208c..f4b1d10f959b 100644 --- a/drivers/media/platform/msm/vidc_3x/governors/msm_vidc_table_gov.c +++ b/drivers/media/platform/msm/vidc_3x/governors/msm_vidc_table_gov.c @@ -24,27 +24,6 @@ #include "../vidc_hfi_api.h" -enum bus_profile { - VIDC_BUS_PROFILE_NORMAL = BIT(0), - VIDC_BUS_PROFILE_LOW = BIT(1), - VIDC_BUS_PROFILE_UBWC = BIT(2), -}; - -struct bus_profile_entry { - struct { - u32 load, freq; - } *bus_table; - u32 bus_table_size; - u32 codec_mask; - enum bus_profile profile; -}; - -struct msm_vidc_bus_table_gov { - struct bus_profile_entry *bus_prof_entries; - u32 count; - struct devfreq_governor devfreq_gov; -}; - static int __get_bus_freq(struct msm_vidc_bus_table_gov *gov, struct vidc_bus_vote_data *data, enum bus_profile profile) @@ -85,31 +64,19 @@ static int __get_bus_freq(struct msm_vidc_bus_table_gov *gov, } -static int msm_vidc_table_get_target_freq(struct devfreq *dev, - unsigned long *frequency) +int msm_vidc_table_get_target_freq(struct msm_vidc_bus_table_gov *gov, + struct msm_vidc_gov_data *vidc_data, + unsigned long *frequency) { - struct devfreq_dev_status status = {0}; - struct msm_vidc_gov_data *vidc_data = NULL; - struct msm_vidc_bus_table_gov *gov = NULL; enum bus_profile profile = 0; int i = 0; - if (!dev || !frequency) { - dprintk(VIDC_ERR, "%s: Invalid params %pK, %pK\n", - __func__, dev, frequency); + if (!frequency || !gov || !vidc_data) { + dprintk(VIDC_ERR, "%s: Invalid params %pK\n", + __func__, frequency); return -EINVAL; } - gov = container_of(dev->governor, - struct msm_vidc_bus_table_gov, devfreq_gov); - if (!gov) { - dprintk(VIDC_ERR, "%s: governor not found\n", __func__); - return -EINVAL; - } - - dev->profile->get_dev_status(dev->dev.parent, &status); - vidc_data = (struct msm_vidc_gov_data *)status.private_data; - *frequency = 0; for (i = 0; i < vidc_data->data_count; i++) { struct vidc_bus_vote_data *data = &vidc_data->data[i]; @@ -149,232 +116,3 @@ static int msm_vidc_table_get_target_freq(struct devfreq *dev, exit: return 0; } - -int msm_vidc_table_event_handler(struct devfreq *devfreq, - unsigned int event, void *data) -{ - int rc = 0; - - if (!devfreq) { - dprintk(VIDC_ERR, "%s: NULL devfreq\n", __func__); - return -EINVAL; - } - - switch (event) { - case DEVFREQ_GOV_START: - case DEVFREQ_GOV_RESUME: - mutex_lock(&devfreq->lock); - rc = update_devfreq(devfreq); - mutex_unlock(&devfreq->lock); - break; - } - - return rc; -} - -static int msm_vidc_free_bus_table(struct platform_device *pdev, - struct msm_vidc_bus_table_gov *data) -{ - int rc = 0, i = 0; - - if (!pdev || !data) { - dprintk(VIDC_ERR, "%s: invalid args %pK %pK\n", - __func__, pdev, data); - return -EINVAL; - } - - for (i = 0; i < data->count; i++) - data->bus_prof_entries[i].bus_table = NULL; - - data->bus_prof_entries = NULL; - data->count = 0; - - return rc; -} - -static int msm_vidc_load_bus_table(struct platform_device *pdev, - struct msm_vidc_bus_table_gov *data) -{ - int rc = 0, i = 0, j = 0; - const char *name = NULL; - struct bus_profile_entry *entry = NULL; - struct device_node *parent_node = NULL; - struct device_node *child_node = NULL; - - if (!pdev || !data) { - dprintk(VIDC_ERR, "%s: invalid args %pK %pK\n", - __func__, pdev, data); - return -EINVAL; - } - - of_property_read_string(pdev->dev.of_node, "name", &name); - if (strlen(name) > ARRAY_SIZE(data->devfreq_gov.name) - 1) { - dprintk(VIDC_ERR, - "%s: name is too long, max should be %zu chars\n", - __func__, ARRAY_SIZE(data->devfreq_gov.name) - 1); - return -EINVAL; - } - - strlcpy((char *)data->devfreq_gov.name, name, - ARRAY_SIZE(data->devfreq_gov.name)); - data->devfreq_gov.get_target_freq = msm_vidc_table_get_target_freq; - data->devfreq_gov.event_handler = msm_vidc_table_event_handler; - - parent_node = of_find_node_by_name(pdev->dev.of_node, - "qcom,bus-freq-table"); - if (!parent_node) { - dprintk(VIDC_DBG, "Node qcom,bus-freq-table not found.\n"); - return 0; - } - - data->count = of_get_child_count(parent_node); - if (!data->count) { - dprintk(VIDC_DBG, "No child nodes in qcom,bus-freq-table\n"); - return 0; - } - - data->bus_prof_entries = devm_kzalloc(&pdev->dev, - sizeof(*data->bus_prof_entries) * data->count, - GFP_KERNEL); - if (!data->bus_prof_entries) { - dprintk(VIDC_DBG, "no memory to allocate bus_prof_entries\n"); - return -ENOMEM; - } - - for_each_child_of_node(parent_node, child_node) { - - if (i >= data->count) { - dprintk(VIDC_ERR, - "qcom,bus-freq-table: invalid child node %d, max is %d\n", - i, data->count); - break; - } - entry = &data->bus_prof_entries[i]; - - if (of_find_property(child_node, "qcom,codec-mask", NULL)) { - rc = of_property_read_u32(child_node, - "qcom,codec-mask", &entry->codec_mask); - if (rc) { - dprintk(VIDC_ERR, - "qcom,codec-mask not found\n"); - break; - } - } - - if (of_find_property(child_node, "qcom,low-power-mode", NULL)) - entry->profile = VIDC_BUS_PROFILE_LOW; - else if (of_find_property(child_node, "qcom,ubwc-mode", NULL)) - entry->profile = VIDC_BUS_PROFILE_UBWC; - else - entry->profile = VIDC_BUS_PROFILE_NORMAL; - - if (of_find_property(child_node, - "qcom,load-busfreq-tbl", NULL)) { - rc = msm_vidc_load_u32_table(pdev, child_node, - "qcom,load-busfreq-tbl", - sizeof(*entry->bus_table), - (u32 **)&entry->bus_table, - &entry->bus_table_size); - if (rc) { - dprintk(VIDC_ERR, - "qcom,load-busfreq-tbl failed\n"); - break; - } - } else { - entry->bus_table = NULL; - entry->bus_table_size = 0; - } - - dprintk(VIDC_DBG, - "qcom,load-busfreq-tbl: size %d, codec_mask %#x, profile %#x\n", - entry->bus_table_size, entry->codec_mask, - entry->profile); - for (j = 0; j < entry->bus_table_size; j++) - dprintk(VIDC_DBG, " load %8d freq %8d\n", - entry->bus_table[j].load, - entry->bus_table[j].freq); - - i++; - } - - return rc; -} - -static int msm_vidc_bus_table_probe(struct platform_device *pdev) -{ - int rc = 0; - struct msm_vidc_bus_table_gov *gov = NULL; - - dprintk(VIDC_DBG, "%s\n", __func__); - - gov = devm_kzalloc(&pdev->dev, sizeof(*gov), GFP_KERNEL); - if (!gov) { - dprintk(VIDC_ERR, "%s: allocation failed\n", __func__); - return -ENOMEM; - } - - platform_set_drvdata(pdev, gov); - - rc = msm_vidc_load_bus_table(pdev, gov); - if (rc) - return rc; - - rc = devfreq_add_governor(&gov->devfreq_gov); - if (rc) - dprintk(VIDC_ERR, "%s: add governor failed\n", __func__); - - return rc; -} - -static int msm_vidc_bus_table_remove(struct platform_device *pdev) -{ - int rc = 0; - struct msm_vidc_bus_table_gov *gov = NULL; - - dprintk(VIDC_DBG, "%s\n", __func__); - - gov = platform_get_drvdata(pdev); - if (IS_ERR_OR_NULL(gov)) - return PTR_ERR(gov); - - rc = msm_vidc_free_bus_table(pdev, gov); - if (rc) - dprintk(VIDC_WARN, "%s: free bus table failed\n", __func__); - - rc = devfreq_remove_governor(&gov->devfreq_gov); - - return rc; -} - -static const struct of_device_id device_id[] = { - {.compatible = "qcom,msm-vidc,governor,table"}, - {} -}; - -static struct platform_driver msm_vidc_bus_table_driver = { - .probe = msm_vidc_bus_table_probe, - .remove = msm_vidc_bus_table_remove, - .driver = { - .name = "msm_vidc_bus_table_governor", - .of_match_table = device_id, - }, -}; - -static int __init msm_vidc_bus_table_init(void) -{ - - dprintk(VIDC_DBG, "%s\n", __func__); - - return platform_driver_register(&msm_vidc_bus_table_driver); -} - -module_init(msm_vidc_bus_table_init); - -static void __exit msm_vidc_bus_table_exit(void) -{ - dprintk(VIDC_DBG, "%s\n", __func__); - platform_driver_unregister(&msm_vidc_bus_table_driver); -} - -module_exit(msm_vidc_bus_table_exit); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/vidc_3x/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc_3x/msm_v4l2_vidc.c index 00a35099babd..a94198f5b3ab 100644 --- a/drivers/media/platform/msm/vidc_3x/msm_v4l2_vidc.c +++ b/drivers/media/platform/msm/vidc_3x/msm_v4l2_vidc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -334,7 +334,6 @@ static int msm_vidc_initialize_core(struct platform_device *pdev, init_completion(&core->completions[i]); } - INIT_DELAYED_WORK(&core->fw_unload_work, msm_vidc_fw_unload_handler); return rc; } diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc.c b/drivers/media/platform/msm/vidc_3x/msm_vidc.c index eb5cda963c54..f141ff9b5a36 100644 --- a/drivers/media/platform/msm/vidc_3x/msm_vidc.c +++ b/drivers/media/platform/msm/vidc_3x/msm_vidc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -533,7 +533,7 @@ int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b) } dprintk(VIDC_DBG, "%s: [MAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n", - __func__, binfo, i, binfo->smem[i], + __func__, binfo, i, &binfo->smem[i], &binfo->device_addr[i], binfo->fd[i], binfo->buff_off[i], binfo->mapped[i]); } @@ -586,7 +586,7 @@ int unmap_and_deregister_buf(struct msm_vidc_inst *inst, for (i = 0; i < temp->num_planes; i++) { dprintk(VIDC_DBG, "%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n", - __func__, temp, i, temp->smem[i], + __func__, temp, i, &temp->smem[i], &temp->device_addr[i], temp->fd[i], temp->buff_off[i], temp->mapped[i]); /* @@ -902,7 +902,7 @@ int msm_vidc_release_buffers(void *instance, int buffer_type) if (bi->mapped[i]) { dprintk(VIDC_DBG, "%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n", - __func__, bi, i, bi->smem[i], + __func__, bi, i, &bi->smem[i], &bi->device_addr[i], bi->fd[i], bi->buff_off[i], bi->mapped[i]); msm_comm_smem_free(inst, diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c b/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c index 3dcb707a7d2a..bae2b39a088f 100644 --- a/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_common.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -2831,35 +2831,15 @@ static int msm_vidc_deinit_core(struct msm_vidc_inst *inst) if (core->state == VIDC_CORE_UNINIT) { dprintk(VIDC_INFO, "Video core: %d is already in state: %d\n", core->id, core->state); + mutex_unlock(&core->lock); goto core_already_uninited; } mutex_unlock(&core->lock); msm_comm_scale_clocks_and_bus(inst); - mutex_lock(&core->lock); - - if (!core->resources.never_unload_fw) { - cancel_delayed_work(&core->fw_unload_work); - - /* - * Delay unloading of firmware. This is useful - * in avoiding firmware download delays in cases where we - * will have a burst of back to back video playback sessions - * e.g. thumbnail generation. - */ - schedule_delayed_work(&core->fw_unload_work, - msecs_to_jiffies(core->state == VIDC_CORE_INVALID ? - 0 : msm_vidc_firmware_unload_delay)); - - dprintk(VIDC_DBG, "firmware unload delayed by %u ms\n", - core->state == VIDC_CORE_INVALID ? - 0 : msm_vidc_firmware_unload_delay); - } - core_already_uninited: change_inst_state(inst, MSM_VIDC_CORE_UNINIT); - mutex_unlock(&core->lock); return 0; } @@ -5313,43 +5293,6 @@ int msm_comm_smem_cache_operations(struct msm_vidc_inst *inst, mem->size, cache_ops); } -void msm_vidc_fw_unload_handler(struct work_struct *work) -{ - struct msm_vidc_core *core = NULL; - struct hfi_device *hdev = NULL; - int rc = 0; - - core = container_of(work, struct msm_vidc_core, fw_unload_work.work); - if (!core || !core->device) { - dprintk(VIDC_ERR, "%s - invalid work or core handle\n", - __func__); - return; - } - - hdev = core->device; - - mutex_lock(&core->lock); - if (list_empty(&core->instances) && - core->state != VIDC_CORE_UNINIT) { - if (core->state > VIDC_CORE_INIT) { - dprintk(VIDC_DBG, "Calling vidc_hal_core_release\n"); - rc = call_hfi_op(hdev, core_release, - hdev->hfi_device_data); - if (rc) { - dprintk(VIDC_ERR, - "Failed to release core, id = %d\n", - core->id); - mutex_unlock(&core->lock); - return; - } - } - core->state = VIDC_CORE_UNINIT; - kfree(core->capabilities); - core->capabilities = NULL; - } - mutex_unlock(&core->lock); -} - int msm_comm_set_color_format(struct msm_vidc_inst *inst, enum hal_buffer buffer_type, int fourcc) { diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h b/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h index c008d8ff3cb9..5b2daa3c4394 100644 --- a/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h +++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -262,7 +262,6 @@ struct msm_vidc_core { u32 dec_codec_supported; u32 codec_count; struct msm_vidc_capability *capabilities; - struct delayed_work fw_unload_work; bool smmu_fault_handled; }; @@ -414,7 +413,6 @@ struct msm_smem *msm_smem_user_to_kernel(struct msm_vidc_inst *inst, int fd, u32 offset, u32 size, enum hal_buffer buffer_type); -void msm_vidc_fw_unload_handler(struct work_struct *work); /* XXX: normally should be in msm_vidc.h, but that's meant for public APIs, * whereas this is private */ diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc_3x/msm_vidc_res_parse.c index ae245f4b5d30..6874a5dcb89d 100644 --- a/drivers/media/platform/msm/vidc_3x/msm_vidc_res_parse.c +++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_res_parse.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -72,7 +72,23 @@ static inline enum imem_type read_imem_type(struct platform_device *pdev) IMEM_NONE; } +static inline void msm_vidc_free_bus_table( + struct msm_vidc_platform_resources *res) +{ + int i = 0; + struct msm_vidc_bus_table_gov *data = res->gov_data; + if (!data) { + dprintk(VIDC_ERR, "%s: invalid args %pK\n", + __func__, data); + } + + for (i = 0; i < data->count; i++) + data->bus_prof_entries[i].bus_table = NULL; + + data->bus_prof_entries = NULL; + data->count = 0; +} static inline void msm_vidc_free_allowed_clocks_table( struct msm_vidc_platform_resources *res) { @@ -174,6 +190,7 @@ void msm_vidc_free_platform_resources( struct msm_vidc_platform_resources *res) { msm_vidc_free_clock_table(res); + msm_vidc_free_bus_table(res); msm_vidc_free_regulator_table(res); msm_vidc_free_freq_table(res); msm_vidc_free_platform_version_table(res); @@ -820,6 +837,8 @@ static int msm_vidc_populate_bus(struct device *dev, &bus->mode); if (!rc && !strcmp(bus->mode, PERF_GOV)) bus->is_prfm_gov_used = true; + else + bus->is_prfm_gov_used = false; rc = of_property_read_u32_array(dev->of_node, "qcom,bus-range-kbps", range, ARRAY_SIZE(range)); @@ -1063,7 +1082,108 @@ static int msm_vidc_load_clock_table( err_load_clk_table_fail: return rc; } +static int msm_vidc_load_bus_table(struct msm_vidc_platform_resources *res) +{ + int rc = 0, i = 0, j = 0; + struct bus_profile_entry *entry = NULL; + struct device_node *parent_node = NULL; + struct device_node *child_node = NULL; + struct msm_vidc_bus_table_gov *gov_data; + struct platform_device *pdev = res->pdev; + dprintk(VIDC_DBG, "%s\n", __func__); + if (!pdev) { + dprintk(VIDC_ERR, "%s: invalid args %pK\n", + __func__, pdev); + return -EINVAL; + } + + res->gov_data = devm_kzalloc(&pdev->dev, sizeof(*gov_data), GFP_KERNEL); + if (!res->gov_data) { + dprintk(VIDC_ERR, "%s: allocation failed\n", __func__); + return -ENOMEM; + } + + gov_data = res->gov_data; + parent_node = of_find_node_by_name(pdev->dev.of_node, + "qcom,bus-freq-table"); + if (!parent_node) { + dprintk(VIDC_DBG, "Node qcom,bus-freq-table not found.\n"); + return 0; + } + + gov_data->count = of_get_child_count(parent_node); + if (!gov_data->count) { + dprintk(VIDC_DBG, "No child nodes in qcom,bus-freq-table\n"); + return 0; + } + + gov_data->bus_prof_entries = devm_kzalloc(&pdev->dev, + sizeof(*gov_data->bus_prof_entries) * gov_data->count, + GFP_KERNEL); + if (!gov_data->bus_prof_entries) { + dprintk(VIDC_DBG, "no memory to allocate bus_prof_entries\n"); + return -ENOMEM; + } + + for_each_child_of_node(parent_node, child_node) { + + if (i >= gov_data->count) { + dprintk(VIDC_ERR, + "qcom,bus-freq-table: invalid child node %d, max is %d\n", + i, gov_data->count); + break; + } + entry = &gov_data->bus_prof_entries[i]; + + if (of_find_property(child_node, "qcom,codec-mask", NULL)) { + rc = of_property_read_u32(child_node, + "qcom,codec-mask", &entry->codec_mask); + if (rc) { + dprintk(VIDC_ERR, + "qcom,codec-mask not found\n"); + break; + } + } + + if (of_find_property(child_node, "qcom,low-power-mode", NULL)) + entry->profile = VIDC_BUS_PROFILE_LOW; + else if (of_find_property(child_node, "qcom,ubwc-mode", NULL)) + entry->profile = VIDC_BUS_PROFILE_UBWC; + else + entry->profile = VIDC_BUS_PROFILE_NORMAL; + + if (of_find_property(child_node, + "qcom,load-busfreq-tbl", NULL)) { + rc = msm_vidc_load_u32_table(pdev, child_node, + "qcom,load-busfreq-tbl", + sizeof(*entry->bus_table), + (u32 **)&entry->bus_table, + &entry->bus_table_size); + if (rc) { + dprintk(VIDC_ERR, + "qcom,load-busfreq-tbl failed\n"); + break; + } + } else { + entry->bus_table = NULL; + entry->bus_table_size = 0; + } + + dprintk(VIDC_DBG, + "qcom,load-busfreq-tbl: size %d, codec_mask %#x, profile %#x\n", + entry->bus_table_size, entry->codec_mask, + entry->profile); + for (j = 0; j < entry->bus_table_size; j++) + dprintk(VIDC_DBG, " load %8d freq %8d\n", + entry->bus_table[j].load, + entry->bus_table[j].freq); + + i++; + } + + return rc; +} int read_platform_resources_from_dt( struct msm_vidc_platform_resources *res) { @@ -1183,6 +1303,13 @@ int read_platform_resources_from_dt( goto err_load_allowed_clocks_table; } + rc = msm_vidc_load_bus_table(res); + if (rc) { + dprintk(VIDC_ERR, + "Failed to load bus table: %d\n", rc); + goto err_load_bus_table; + } + rc = of_property_read_u32(pdev->dev.of_node, "qcom,max-hw-load", &res->max_load); if (rc) { @@ -1214,9 +1341,6 @@ int read_platform_resources_from_dt( dprintk(VIDC_DBG, "Power collapse supported = %s\n", res->sw_power_collapsible ? "yes" : "no"); - res->never_unload_fw = of_property_read_bool(pdev->dev.of_node, - "qcom,never-unload-fw"); - of_property_read_u32(pdev->dev.of_node, "qcom,pm-qos-latency-us", &res->pm_qos_latency_us); @@ -1234,6 +1358,8 @@ int read_platform_resources_from_dt( err_setup_legacy_cb: err_load_max_hw_load: msm_vidc_free_allowed_clocks_table(res); +err_load_bus_table: + msm_vidc_free_bus_table(res); err_load_allowed_clocks_table: msm_vidc_free_cycles_per_mb_table(res); err_load_cycles_per_mb_table: diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_resources.h b/drivers/media/platform/msm/vidc_3x/msm_vidc_resources.h index 11ffeee70789..fd4709649fac 100644 --- a/drivers/media/platform/msm/vidc_3x/msm_vidc_resources.h +++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_resources.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -149,6 +149,26 @@ struct clock_freq_table { u32 count; }; +enum bus_profile { + VIDC_BUS_PROFILE_NORMAL = BIT(0), + VIDC_BUS_PROFILE_LOW = BIT(1), + VIDC_BUS_PROFILE_UBWC = BIT(2), +}; + +struct bus_profile_entry { + struct { + u32 load, freq; + } *bus_table; + u32 bus_table_size; + u32 codec_mask; + enum bus_profile profile; +}; + +struct msm_vidc_bus_table_gov { + struct bus_profile_entry *bus_prof_entries; + u32 count; +}; + struct msm_vidc_platform_resources { phys_addr_t firmware_base; phys_addr_t register_base; @@ -176,6 +196,7 @@ struct msm_vidc_platform_resources { struct platform_device *pdev; struct regulator_set regulator_set; struct clock_set clock_set; + struct msm_vidc_bus_table_gov *gov_data; struct bus_set bus_set; bool use_non_secure_pil; bool sw_power_collapsible; @@ -185,7 +206,6 @@ struct msm_vidc_platform_resources { bool thermal_mitigable; const char *fw_name; const char *hfi_version; - bool never_unload_fw; uint32_t pm_qos_latency_us; uint32_t max_inst_count; uint32_t max_secure_inst_count; diff --git a/drivers/media/platform/msm/vidc_3x/venus_hfi.c b/drivers/media/platform/msm/vidc_3x/venus_hfi.c index 12231205d528..113fcd299e37 100644 --- a/drivers/media/platform/msm/vidc_3x/venus_hfi.c +++ b/drivers/media/platform/msm/vidc_3x/venus_hfi.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2016, 2018-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2016, 2018-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -113,7 +113,10 @@ static inline void __strict_check(struct venus_hfi_device *device) WARN_ON(VIDC_DBG_WARN_ENABLE); } } - +static inline bool is_clock_bus_voted(struct venus_hfi_device *device) +{ + return (device->bus_vote.total_bw_ddr && device->clk_freq); +} static inline void __set_state(struct venus_hfi_device *device, enum venus_hfi_state state) { @@ -132,7 +135,7 @@ static void __dump_packet(u8 *packet) /* row must contain enough for 0xdeadbaad * 8 to be converted into * "de ad ba ab " * 8 + '\0' */ - char row[3 * row_size]; + char row[96]; /*char row[3 * row_size];*/ for (c = 0; c * row_size < packet_size; ++c) { int bytes_to_read = ((c + 1) * row_size > packet_size) ? @@ -769,22 +772,14 @@ static int __unvote_buses(struct venus_hfi_device *device) { int rc = 0; struct bus_info *bus = NULL; - unsigned long freq = 0, zero = 0; + unsigned long freq = 0; venus_hfi_for_each_bus(device, bus) { - if (!bus->is_prfm_gov_used) { - freq = __calc_bw(bus, &device->bus_vote); rc = __vote_bandwidth(bus, &freq); - } else - rc = __vote_bandwidth(bus, &zero); - - if (rc) - goto err_unknown_device; + if (rc) + goto err_unknown_device; } - if (rc) - dprintk(VIDC_WARN, "Failed to unvote some buses\n"); - err_unknown_device: return rc; } @@ -795,7 +790,7 @@ static int __vote_buses(struct venus_hfi_device *device, int rc = 0; struct bus_info *bus = NULL; struct vidc_bus_vote_data *new_data = NULL; - unsigned long freq = 0, zero = 0; + unsigned long freq = 0; if (!num_data) { dprintk(VIDC_DBG, "No vote data available\n"); @@ -820,11 +815,18 @@ static int __vote_buses(struct venus_hfi_device *device, venus_hfi_for_each_bus(device, bus) { if (!bus->is_prfm_gov_used) { - freq = __calc_bw(bus, &device->bus_vote); - rc = __vote_bandwidth(bus, &freq); + rc = msm_vidc_table_get_target_freq( + device->res->gov_data, + &device->bus_vote, &freq); + if (rc) { + dprintk(VIDC_ERR, "unable to get freq\n"); + return rc; + } + device->bus_vote.total_bw_ddr = freq; } else - rc = __vote_bandwidth(bus, &zero); + freq = bus->range[1]; + rc = __vote_bandwidth(bus, &freq); if (rc) return rc; } @@ -1533,6 +1535,12 @@ static int __iface_cmdq_write_relaxed(struct venus_hfi_device *device, goto err_q_write; } + if (cmd_packet->packet_type == HFI_CMD_SESSION_EMPTY_BUFFER && + !is_clock_bus_voted(device)) + dprintk(VIDC_ERR, "%s: bus %llu bps or clock %lu MHz\n", + __func__, device->bus_vote.total_bw_ddr, + device->clk_freq); + if (!__write_queue(q_info, (u8 *)pkt, requires_interrupt)) { if (device->res->sw_power_collapsible) { cancel_delayed_work(&venus_hfi_pm_work); diff --git a/drivers/media/platform/msm/vidc_3x/venus_hfi.h b/drivers/media/platform/msm/vidc_3x/venus_hfi.h index 30e4a1dfdb2d..83a984062d6c 100644 --- a/drivers/media/platform/msm/vidc_3x/venus_hfi.h +++ b/drivers/media/platform/msm/vidc_3x/venus_hfi.h @@ -52,6 +52,10 @@ extern unsigned long __calc_bw(struct bus_info *bus, struct msm_vidc_gov_data *vidc_data); + +extern int msm_vidc_table_get_target_freq(struct msm_vidc_bus_table_gov *gov, + struct msm_vidc_gov_data *vidc_data, + unsigned long *frequency); struct hfi_queue_table_header { u32 qtbl_version; u32 qtbl_size; diff --git a/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h b/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h index 6e3bf2eca6eb..c6c0b261694a 100644 --- a/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h +++ b/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1434,6 +1434,7 @@ struct msm_vidc_gov_data { struct vidc_bus_vote_data *data; u32 data_count; int imem_size; + unsigned long total_bw_ddr; }; enum msm_vidc_power_mode { diff --git a/drivers/media/radio/radio-iris-transport.c b/drivers/media/radio/radio-iris-transport.c index db429e0b9ca4..389398bd1df0 100644 --- a/drivers/media/radio/radio-iris-transport.c +++ b/drivers/media/radio/radio-iris-transport.c @@ -14,15 +14,19 @@ #include #include #include +#include #include -#include #include #include + + struct radio_data { struct radio_hci_dev *hdev; struct tasklet_struct rx_task; - struct smd_channel *fm_channel; + struct rpmsg_endpoint *fm_channel; + unsigned char *data; + int length; }; struct radio_data hs; DEFINE_MUTEX(fm_smd_enable); @@ -46,44 +50,29 @@ static void radio_hci_smd_recv_event(unsigned long temp) int rc; struct sk_buff *skb; unsigned char *buf; - struct radio_data *hsmd = &hs; - - len = smd_read_avail(hsmd->fm_channel); - - while (len) { - skb = alloc_skb(len, GFP_ATOMIC); - if (!skb) { - FMDERR("Memory not allocated for the socket\n"); - return; - } - - buf = kmalloc(len, GFP_ATOMIC); - if (!buf) { - kfree_skb(skb); - return; - } - - rc = smd_read(hsmd->fm_channel, (void *)buf, len); - - memcpy(skb_put(skb, len), buf, len); - - skb_orphan(skb); - skb->dev = (struct net_device *)hs.hdev; - - rc = radio_hci_recv_frame(skb); - - kfree(buf); - len = smd_read_avail(hsmd->fm_channel); + FMDBG("smd_recv event: is called\n"); + len = hs.length; + buf = hs.data; + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) { + FMDERR("Memory not allocated for the socket\n"); + return; } + + memcpy(skb_put(skb, len), buf, len); + skb_orphan(skb); + skb->dev = (struct net_device *)hs.hdev; + rc = radio_hci_recv_frame(skb); + kfree(buf); } static int radio_hci_smd_send_frame(struct sk_buff *skb) { int len = 0; + FMDBG("hci_send_frame: is called\n"); + FM_INFO("skb %pK\n", skb); - FMDBG("skb %pK\n", skb); - - len = smd_write(hs.fm_channel, skb->data, skb->len); + len = rpmsg_send(hs.fm_channel, skb->data, skb->len); if (len < skb->len) { FMDERR("Failed to write Data %d\n", len); kfree_skb(skb); @@ -118,107 +107,128 @@ static void send_disable_event(struct work_struct *worker) kfree(worker); } -static void radio_hci_smd_notify_cmd(void *data, unsigned int event) -{ - struct radio_hci_dev *hdev = (struct radio_hci_dev *)data; - - FMDBG("data %p event %u\n", data, event); - - if (!hdev) { - FMDERR("Frame for unknown HCI device (hdev=NULL)\n"); - return; - } - - switch (event) { - case SMD_EVENT_DATA: - tasklet_schedule(&hs.rx_task); - break; - case SMD_EVENT_OPEN: - break; - case SMD_EVENT_CLOSE: - reset_worker = kzalloc(sizeof(*reset_worker), GFP_ATOMIC); - if (reset_worker) { - INIT_WORK(reset_worker, send_disable_event); - schedule_work(reset_worker); - } - break; - default: - break; - } -} - static int radio_hci_smd_register_dev(struct radio_data *hsmd) { struct radio_hci_dev *hdev; - int rc; - - FMDBG("hsmd: %pK\n", hsmd); - + FMDBG("smd_register event: is called\n"); if (hsmd == NULL) return -ENODEV; - hdev = kmalloc(sizeof(struct radio_hci_dev), GFP_KERNEL); if (hdev == NULL) return -ENODEV; tasklet_init(&hsmd->rx_task, radio_hci_smd_recv_event, - (unsigned long) hsmd); - hdev->send = radio_hci_smd_send_frame; + (unsigned long) hsmd); + hdev->send = radio_hci_smd_send_frame; hdev->destruct = radio_hci_smd_destruct; hdev->close_smd = radio_hci_smd_exit; - /* Open the SMD Channel and device and register the callback function */ - rc = smd_named_open_on_edge("APPS_FM", SMD_APPS_WCNSS, - &hsmd->fm_channel, hdev, radio_hci_smd_notify_cmd); - - if (rc < 0) { - FMDERR("Cannot open the command channel\n"); - hsmd->hdev = NULL; - kfree(hdev); - return -ENODEV; - } - - smd_disable_read_intr(hsmd->fm_channel); - if (radio_hci_register_dev(hdev) < 0) { FMDERR("Can't register HCI device\n"); - smd_close(hsmd->fm_channel); hsmd->hdev = NULL; kfree(hdev); return -ENODEV; } - hsmd->hdev = hdev; return 0; } static void radio_hci_smd_deregister(void) { + FM_INFO("smd_deregister: is called\n"); radio_hci_unregister_dev(); kfree(hs.hdev); hs.hdev = NULL; - - smd_close(hs.fm_channel); hs.fm_channel = 0; fmsmd_set = 0; } -static int radio_hci_smd_init(void) + +static int qcom_smd_fm_callback(struct rpmsg_device *rpdev, + void *data, int len, void *priv, u32 addr) +{ + FM_INFO("fm_callback: is called\n"); + if (!len) { + FMDERR("length received is NULL\n"); + return -EINVAL; + } + + hs.data = kmemdup((unsigned char *)data, len, GFP_ATOMIC); + if (!hs.data) { + FMDERR("Memory not allocated\n"); + return -ENOMEM; + } + + hs.length = len; + tasklet_schedule(&hs.rx_task); + return 0; +} + +static int qcom_smd_fm_probe(struct rpmsg_device *rpdev) { int ret; + FM_INFO("fm_probe: is called\n"); if (chan_opened) { FMDBG("Channel is already opened\n"); return 0; } - /* this should be called with fm_smd_enable lock held */ + hs.fm_channel = rpdev->ept; ret = radio_hci_smd_register_dev(&hs); if (ret < 0) { - FMDERR("Failed to register smd device\n"); + FMDERR("Failed to register with rpmsg device\n"); chan_opened = false; return ret; } + FMDBG("probe succeeded\n"); + chan_opened = true; + return ret; +} + +static void qcom_smd_fm_remove(struct rpmsg_device *rpdev) +{ + FM_INFO("fm_remove: is called\n"); + reset_worker = kzalloc(sizeof(*reset_worker), GFP_ATOMIC); + if (reset_worker) { + INIT_WORK(reset_worker, send_disable_event); + schedule_work(reset_worker); + } +} + + +static const struct rpmsg_device_id qcom_smd_fm_match[] = { + { "APPS_FM" }, + {} +}; + + +static struct rpmsg_driver qcom_smd_fm_driver = { + .probe = qcom_smd_fm_probe, + .remove = qcom_smd_fm_remove, + .callback = qcom_smd_fm_callback, + .id_table = qcom_smd_fm_match, + .drv = { + .name = "qcom_smd_fm", + }, +}; + +static int radio_hci_smd_init(void) +{ + int ret = 0; + + FM_INFO("smd_init : is called\n"); + if (chan_opened) { + FMDBG("Channel is already opened\n"); + return 0; + } + + ret = register_rpmsg_driver(&qcom_smd_fm_driver); + if (ret < 0) { + FMDERR("%s: Failed to register with rpmsg\n", __func__); + return ret; + } + chan_opened = true; return ret; } @@ -232,6 +242,7 @@ static void radio_hci_smd_exit(void) /* this should be called with fm_smd_enable lock held */ radio_hci_smd_deregister(); + unregister_rpmsg_driver(&qcom_smd_fm_driver); chan_opened = false; } @@ -256,7 +267,11 @@ static int hcismd_fm_set_enable(const char *val, const struct kernel_param *kp) } done: mutex_unlock(&fm_smd_enable); + return ret; } + +MODULE_ALIAS("rpmsg:APPS_FM"); + MODULE_DESCRIPTION("FM SMD driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/radio/rtc6226/radio-rtc6226-common.c b/drivers/media/radio/rtc6226/radio-rtc6226-common.c index d3020e201eba..2766bfe0c88d 100644 --- a/drivers/media/radio/rtc6226/radio-rtc6226-common.c +++ b/drivers/media/radio/rtc6226/radio-rtc6226-common.c @@ -312,6 +312,7 @@ void rtc6226_scan(struct work_struct *work) struct kfifo *data_b; int len = 0; u32 next_freq_khz; + u8 factor; int retval = 0; int i, rssi; @@ -427,7 +428,29 @@ void rtc6226_scan(struct work_struct *work) TUNE_STEP_SIZE) == next_freq_khz) { FMDERR("%s Seek one more time if lower freq is valid\n", __func__); - retval = rtc6226_set_seek(radio, SRCH_UP, WRAP_ENABLE); + // Tuned to band low limit + chan spacing then seek + // down with bandlimit config + if (radio->space == 0) + factor = 20; + else if (radio->space == 1) + factor = 10; + else + factor = 5; + retval = rtc6226_set_freq(radio, + (radio->recv_conf.band_low_limit + factor) + * TUNE_STEP_SIZE); + if (retval < 0) + goto seek_tune_fail; + /* wait for tune to complete. */ + if (!wait_for_completion_timeout(&radio->completion, + msecs_to_jiffies(TUNE_TIMEOUT_MSEC))) { + FMDERR("In %s, didn't receive STC for tune\n", + __func__); + rtc6226_q_event(radio, RTC6226_EVT_ERROR); + return; + } + retval = rtc6226_set_seek(radio, SRCH_DOWN, + WRAP_DISABLE); if (retval < 0) { FMDERR("%s seek fail %d\n", __func__, retval); goto seek_tune_fail; @@ -454,10 +477,13 @@ void rtc6226_scan(struct work_struct *work) rssi = radio->registers[RSSI] & RSSI_RSSI; FMDBG("%s freq %d, rssi %d rssi threshold %d\n", __func__, next_freq_khz, rssi, radio->rssi_th); - if ((radio->recv_conf.band_low_limit * - TUNE_STEP_SIZE) == - next_freq_khz && - rssi >= radio->rssi_th) { + // Reach to band low limit, + // if SF is 0, means frequency at low limit is a + // commercial station Or a invalid channel + if (((radio->registers[STATUS] & + STATUS_SF) == 0) || + (radio->recv_conf.band_high_limit * + TUNE_STEP_SIZE) == next_freq_khz) { FMDERR("lower band freq is valid\n"); rtc6226_q_event(radio, RTC6226_EVT_TUNE_SUCC); diff --git a/drivers/media/radio/rtc6226/radio-rtc6226.h b/drivers/media/radio/rtc6226/radio-rtc6226.h index 8410e88b5687..cfaf13bc91b4 100644 --- a/drivers/media/radio/rtc6226/radio-rtc6226.h +++ b/drivers/media/radio/rtc6226/radio-rtc6226.h @@ -200,7 +200,7 @@ #define SCAN_PENDING 3 #define START_SCAN 1 #define TUNE_TIMEOUT_MSEC 3000 -#define SEEK_TIMEOUT_MSEC 15000 +#define SEEK_TIMEOUT_MSEC 30000 #define RTC6226_MIN_SRCH_MODE 0x00 #define RTC6226_MAX_SRCH_MODE 0x02 diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index ffffb66d51a0..ce64c58aceb3 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -26,6 +26,8 @@ #include "uvcvideo.h" +#define CONFIG_DMA_NONCOHERENT 1 + /* ------------------------------------------------------------------------ * UVC Controls */ diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 8914ddb693c2..fc40ca9af080 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -1456,7 +1456,8 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) if (!file->f_op->unlocked_ioctl) return ret; - if (_IOC_TYPE(cmd) == 'V' && _IOC_NR(cmd) < BASE_VIDIOC_PRIVATE) + if (((_IOC_TYPE(cmd) == 'V') || (_IOC_TYPE(cmd) == 'U')) && + _IOC_NR(cmd) < BASE_VIDIOC_PRIVATE) ret = do_video_ioctl(file, cmd, arg); else if (vdev->fops->compat_ioctl32) ret = vdev->fops->compat_ioctl32(file, cmd, arg); diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 1a25bf1dfe1b..be5c1e8095aa 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -77,4 +77,3 @@ obj-$(CONFIG_QTI_MAXIM_FAN_CONTROLLER) += max31760.o obj-$(CONFIG_QTI_XR_SMRTVWR_MISC) += qxr-stdalonevwr.o obj-$(CONFIG_FPR_FPC) += fpr_FingerprintCard/ obj-y += qrc/ -obj-$(CONFIG_AW862XX_HAPTIC) += aw862xx_haptic/ diff --git a/drivers/misc/max31760.c b/drivers/misc/max31760.c index 2479583fd2f5..3d7b1726faf9 100644 --- a/drivers/misc/max31760.c +++ b/drivers/misc/max31760.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */ #include @@ -121,7 +121,7 @@ static ssize_t fan_show(struct device *dev, struct device_attribute *attr, static ssize_t fan_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - long val; + long val, val1; struct max31760 *pdata; pdata = dev_get_drvdata(dev); @@ -131,8 +131,15 @@ static ssize_t fan_store(struct device *dev, struct device_attribute *attr, } kstrtol(buf, 0, &val); - pr_debug("%s, count:%d val:%lx, buf:%s\n", - __func__, count, val, buf); + val1 = val >> 8; + pr_debug("%s, count:%d val:%lx, val1:%lx, buf:%s\n", + __func__, count, val, val1, buf); + if (val1 == 0x50) { + val1 = val & 0xFF; + pr_debug("%s, reg value val1:%lx\n", __func__, val1); + max31760_i2c_reg_set(pdata, 0x50, val1); + return count; + } if (val == 0xff) { turn_gpio(pdata, false); diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index 5d2c707b27cc..1cc9db100d59 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -91,7 +91,7 @@ #define TWO 2 #define QSEECOM_UFS_ICE_CE_NUM 10 #define QSEECOM_SDCC_ICE_CE_NUM 20 -#define QSEECOM_ICE_FDE_KEY_INDEX 0 +#define QSEECOM_ICE_FDE_KEY_INDEX 31 #define PHY_ADDR_4G (1ULL<<32) @@ -2623,12 +2623,6 @@ static int __qseecom_reentrancy_process_incomplete_cmd( case QSEOS_RESULT_CBACK_REQUEST: pr_warn("get cback req app_id = %d, resp->data = %d\n", data->client.app_id, resp->data); - resp->resp_type = SMCINVOKE_RESULT_INBOUND_REQ_NEEDED; - /* We are here because scm call sent to TZ has requested - * for another callback request. This call has been a - * success and hence setting result = 0 - */ - resp->result = 0; break; default: pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n", @@ -3710,8 +3704,8 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data, (uint32_t)(__qseecom_uvirt_to_kphys( data, (uintptr_t)req->resp_buf)); } else { - send_data_req.req_ptr = (uint32_t)req->cmd_req_buf; - send_data_req.rsp_ptr = (uint32_t)req->resp_buf; + send_data_req.req_ptr = (uintptr_t)req->cmd_req_buf; + send_data_req.rsp_ptr = (uintptr_t)req->resp_buf; } send_data_req.req_len = req->cmd_req_len; diff --git a/drivers/misc/qxr-stdalonevwr.c b/drivers/misc/qxr-stdalonevwr.c index c3fd763ae6c6..b69caf7e2b9e 100644 --- a/drivers/misc/qxr-stdalonevwr.c +++ b/drivers/misc/qxr-stdalonevwr.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */ #include #include @@ -15,7 +15,8 @@ struct qxr_stdalonevwr { struct platform_device *pdev; struct regulator *reg_imu; - int ndi_5v_en; + struct regulator *reg_dmic; + /*int ndi_5v_en;*/ bool initDone; }; @@ -24,7 +25,6 @@ static struct qxr_stdalonevwr *pdata; static int qxr_stdalonevwr_allocate_res(void) { int rc = -EINVAL; - bool gpioEnabled = false; if (pdata->initDone) { pr_debug("%s init is done already\n", __func__); @@ -35,27 +35,20 @@ static int qxr_stdalonevwr_allocate_res(void) if (!IS_ERR(pdata->reg_imu)) { regulator_set_load(pdata->reg_imu, 600000); rc = regulator_enable(pdata->reg_imu); - if (rc < 0) { + if (rc < 0) pr_err("%s IMU rail pm8150a_l11 failed\n", __func__); - devm_regulator_put(pdata->reg_imu); - } } - if (gpio_is_valid(pdata->ndi_5v_en)) { - rc = gpio_request(pdata->ndi_5v_en, "ndi_5v_en"); - if (!rc) { - rc = gpio_direction_output(pdata->ndi_5v_en, 0); - if (!rc) { - gpio_set_value(pdata->ndi_5v_en, 1); - gpioEnabled = true; - msleep(20); - } - } - } - if (!gpioEnabled) { - pr_err("%s NDI_5V_EN gpio failed to allocate\n", __func__); - gpio_free(pdata->ndi_5v_en); + /* Oracle MIC BIAS Voltage regulator */ + pdata->reg_dmic = devm_regulator_get(&pdata->pdev->dev, "pm8150_l10"); + if (!IS_ERR(pdata->reg_dmic)) { + regulator_set_load(pdata->reg_dmic, 600000); + rc = regulator_enable(pdata->reg_dmic); + if (rc < 0) + pr_err("%s Oracle MIC BIAS reg pm8150_l10 failed\n", + __func__); } + pdata->initDone = true; pr_debug("%s rc:%d\n", __func__, rc); return rc; @@ -64,11 +57,10 @@ static int qxr_stdalonevwr_allocate_res(void) static void qxr_stdalonevwr_free_res(void) { if (pdata->initDone) { - if (pdata->reg_imu) { + if (pdata->reg_imu) regulator_disable(pdata->reg_imu); - devm_regulator_put(pdata->reg_imu); - } - gpio_free(pdata->ndi_5v_en); + if (pdata->reg_dmic) + regulator_disable(pdata->reg_dmic); pdata->initDone = false; } pr_debug("%s initDone:%d\n", __func__, pdata->initDone); @@ -82,7 +74,7 @@ static int qxr_stdalonevwr_probe(struct platform_device *pdev) return -ENOMEM; pdata->pdev = pdev; - pdata->ndi_5v_en = 1237; + /*pdata->ndi_5v_en = 1237;*/ pdata->initDone = false; qxr_stdalonevwr_allocate_res(); pr_info("%s done\n", __func__); diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index a78d2efc3b9b..e23a6bdbb9cb 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -205,6 +205,18 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) if (rocr && !mmc_host_is_spi(host)) *rocr = cmd.resp[0]; + /* + * As per design, internal CRC error flag will be cleared after 3 + * MCLK once clear command issued. Since the MCLK will be running + * at 400KHz during initialization, design is taking max of 7.5us + * to clear the status. So if the CMD_CRC_CHECK_EN bit is enabled + * before the source is cleared, CRC INTR bit will be set in the 17th + * bit of INTR status register. So it expected to issue the next + * command and enable CMD_CRC_CHK_EN after 7.5us (3*MCLK) delay. + */ + if (!err) + udelay(8); + return err; } diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c index 47056d8d1bac..5accfb354612 100644 --- a/drivers/mmc/core/sd_ops.c +++ b/drivers/mmc/core/sd_ops.c @@ -180,6 +180,18 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) if (rocr && !mmc_host_is_spi(host)) *rocr = cmd.resp[0]; + /* + * As per design, internal CRC error flag will be cleared after 3 + * MCLK once clear command issued. Since the MCLK will be running + * at 400KHz during initialization, design is taking max of 7.5us + * to clear the status. So if the CMD_CRC_CHECK_EN bit is enabled + * before the source is cleared, CRC INTR bit will be set in the 17th + * bit of INTR status register. So it expected to issue the next + * command and enable CMD_CRC_CHK_EN after 7.5us (3*MCLK) delay. + */ + if (!err) + udelay(8); + return err; } diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index ef31feaac8c9..393bc94b48e6 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -1086,6 +1086,8 @@ static int mmc_sdio_resume(struct mmc_host *host) host->ops->enable_sdio_irq(host, 1); } + mmc_retune_needed(host); + out: mmc_log_string(host, "Exit err: %d\n", err); mmc_release_host(host); diff --git a/drivers/mmc/host/cqhci-crypto-qti.c b/drivers/mmc/host/cqhci-crypto-qti.c index 67a9747b2230..cca5e46d07b4 100644 --- a/drivers/mmc/host/cqhci-crypto-qti.c +++ b/drivers/mmc/host/cqhci-crypto-qti.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2020, Linux Foundation. All rights reserved. + * Copyright (c) 2020-2021, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -290,7 +290,8 @@ int cqhci_crypto_qti_init_crypto(struct cqhci_host *host, if (!cqhci_ice_memres) { pr_debug("%s ICE not supported\n", __func__); host->icemmio = NULL; - return PTR_ERR(cqhci_ice_memres); + host->caps &= ~CQHCI_CAP_CRYPTO_SUPPORT; + return err; } host->icemmio = devm_ioremap(&msm_host->pdev->dev, @@ -370,6 +371,9 @@ int cqhci_crypto_qti_prep_desc(struct cqhci_host *host, struct mmc_request *mrq, if (!(atomic_read(&keycache) & (1 << bc->bc_keyslot))) { if (bc->is_ext4) cmdq_use_default_du_size = true; + else + cmdq_use_default_du_size = false; + ret = cqhci_crypto_qti_keyslot_program(host->ksm, bc->bc_key, bc->bc_keyslot); if (ret) { diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c index b89f2b795fe3..71a3ef820dc3 100644 --- a/drivers/mmc/host/cqhci.c +++ b/drivers/mmc/host/cqhci.c @@ -281,7 +281,6 @@ static void __cqhci_enable(struct cqhci_host *cq_host) cqcfg |= CQHCI_TASK_DESC_SZ; if (cqhci_host_is_crypto_supported(cq_host)) { - cqhci_crypto_enable(cq_host); cqcfg |= CQHCI_ICE_ENABLE; /* For SDHC v5.0 onwards, ICE 3.0 specific registers are added * in CQ register space, due to which few CQ registers are @@ -325,9 +324,6 @@ static void __cqhci_disable(struct cqhci_host *cq_host) { u32 cqcfg; - if (cqhci_host_is_crypto_supported(cq_host)) - cqhci_crypto_disable(cq_host); - cqcfg = cqhci_readl(cq_host, CQHCI_CFG); cqcfg &= ~CQHCI_ENABLE; cqhci_writel(cq_host, cqcfg, CQHCI_CFG); @@ -373,8 +369,14 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card) if (err) return err; + if (cqhci_host_is_crypto_supported(cq_host)) + cqhci_crypto_enable(cq_host); + __cqhci_enable(cq_host); + if (cq_host->ops->enhanced_strobe_mask) + cq_host->ops->enhanced_strobe_mask(mmc, true); + cq_host->enabled = true; #ifdef DEBUG @@ -428,8 +430,14 @@ static void cqhci_disable(struct mmc_host *mmc) cqhci_off(mmc); + if (cqhci_host_is_crypto_supported(cq_host)) + cqhci_crypto_disable(cq_host); + __cqhci_disable(cq_host); + if (cq_host->ops->enhanced_strobe_mask) + cq_host->ops->enhanced_strobe_mask(mmc, false); + dmam_free_coherent(mmc_dev(mmc), cq_host->data_size, cq_host->trans_desc_base, cq_host->trans_desc_dma_base); diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h index 8c54e978c405..6ad4d2a6301c 100644 --- a/drivers/mmc/host/cqhci.h +++ b/drivers/mmc/host/cqhci.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -339,6 +339,7 @@ struct cqhci_host_ops { u32 (*read_l)(struct cqhci_host *host, int reg); void (*enable)(struct mmc_host *mmc); void (*disable)(struct mmc_host *mmc, bool recovery); + void (*enhanced_strobe_mask)(struct mmc_host *mmc, bool set); }; static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg) diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 09fa4dd7ad7c..deb39f7d92f7 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. * * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform * driver source file @@ -2201,6 +2201,23 @@ static int sdhci_msm_dt_parse_hsr_info(struct device *dev, return ret; } +int sdhci_msm_parse_reset_data(struct device *dev, + struct sdhci_msm_host *msm_host) +{ + int ret = 0; + + msm_host->core_reset = devm_reset_control_get(dev, + "core_reset"); + if (IS_ERR(msm_host->core_reset)) { + ret = PTR_ERR(msm_host->core_reset); + dev_err(dev, "core_reset unavailable,err = %d\n", + ret); + msm_host->core_reset = NULL; + } + + return ret; +} + static int sdhci_msm_parse_regulator_info(struct device *dev, struct sdhci_msm_pltfm_data *pdata) { @@ -2226,34 +2243,11 @@ static int sdhci_msm_parse_regulator_info(struct device *dev, goto out; } - if (sdhci_msm_dt_parse_vreg_info(dev, - &pdata->vreg_data->vdd_io_bias_data, - "vdd-io-bias")) { - dev_err(dev, "No vdd-io-bias regulator data\n"); - } - return ret; out: return -EINVAL; } -int sdhci_msm_parse_reset_data(struct device *dev, - struct sdhci_msm_host *msm_host) -{ - int ret = 0; - - msm_host->core_reset = devm_reset_control_get(dev, - "core_reset"); - if (IS_ERR(msm_host->core_reset)) { - ret = PTR_ERR(msm_host->core_reset); - dev_err(dev, "core_reset unavailable,err = %d\n", - ret); - msm_host->core_reset = NULL; - } - - return ret; -} - /* Parse platform data */ static struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev, @@ -2349,6 +2343,7 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev, MMC_SCALING_LOWER_DDR52_MODE; } + /* Add func to avoid merge warnings*/ if (sdhci_msm_parse_regulator_info(dev, pdata)) goto out; @@ -2392,6 +2387,9 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev, pdata->largeaddressbus = of_property_read_bool(np, "qcom,large-address-bus"); + msm_host->vbias_skip_wa = + of_property_read_bool(np, "qcom,vbias-skip-wa"); + sdhci_msm_pm_qos_parse(dev, pdata); if (of_get_property(np, "qcom,core_3_0v_support", NULL)) @@ -2553,10 +2551,46 @@ void sdhci_msm_cqe_sdhci_dumpregs(struct mmc_host *mmc) sdhci_dumpregs(host); } +/* + * sdhci_msm_enhanced_strobe_mask :- + * Before running CMDQ transfers in HS400 Enhanced Strobe mode, + * SW should write 3 to + * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register. + * The default reset value of this register is 2. + */ +static void sdhci_msm_enhanced_strobe_mask(struct mmc_host *mmc, bool set) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + const struct sdhci_msm_offset *msm_host_offset = + msm_host->offset; + + if (!msm_host->enhanced_strobe || + !mmc_card_strobe(msm_host->mmc->card)) { + pr_debug("%s: host/card does not support hs400 enhanced strobe\n", + mmc_hostname(host->mmc)); + return; + } + + if (set) { + writel_relaxed((readl_relaxed(host->ioaddr + + msm_host_offset->CORE_VENDOR_SPEC3) + | CORE_CMDEN_HS400_INPUT_MASK_CNT), + host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3); + } else { + writel_relaxed((readl_relaxed(host->ioaddr + + msm_host_offset->CORE_VENDOR_SPEC3) + & ~CORE_CMDEN_HS400_INPUT_MASK_CNT), + host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3); + } +} + static const struct cqhci_host_ops sdhci_msm_cqhci_ops = { .enable = sdhci_msm_cqe_enable, .disable = sdhci_msm_cqe_disable, .dumpregs = sdhci_msm_cqe_sdhci_dumpregs, + .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask, }; #ifdef CONFIG_MMC_CQHCI @@ -2863,7 +2897,7 @@ static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata, { int ret = 0, i; struct sdhci_msm_slot_reg_data *curr_slot; - struct sdhci_msm_reg_data *vreg_table[3]; + struct sdhci_msm_reg_data *vreg_table[2]; curr_slot = pdata->vreg_data; if (!curr_slot) { @@ -2874,7 +2908,6 @@ static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata, vreg_table[0] = curr_slot->vdd_data; vreg_table[1] = curr_slot->vdd_io_data; - vreg_table[2] = curr_slot->vdd_io_bias_data; for (i = 0; i < ARRAY_SIZE(vreg_table); i++) { if (vreg_table[i]) { @@ -2897,8 +2930,7 @@ static int sdhci_msm_vreg_init(struct device *dev, { int ret = 0; struct sdhci_msm_slot_reg_data *curr_slot; - struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg, - *curr_vdd_io_bias_reg; + struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg; curr_slot = pdata->vreg_data; if (!curr_slot) @@ -2906,11 +2938,10 @@ static int sdhci_msm_vreg_init(struct device *dev, curr_vdd_reg = curr_slot->vdd_data; curr_vdd_io_reg = curr_slot->vdd_io_data; - curr_vdd_io_bias_reg = curr_slot->vdd_io_bias_data; if (!is_init) /* Deregister all regulators from regulator framework */ - goto vdd_io_bias_reg_deinit; + goto vdd_io_reg_deinit; /* * Get the regulator handle from voltage regulator framework @@ -2926,19 +2957,11 @@ static int sdhci_msm_vreg_init(struct device *dev, if (ret) goto vdd_reg_deinit; } - if (curr_vdd_io_bias_reg) { - ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_bias_reg); - if (ret) - goto vdd_io_reg_deinit; - } if (ret) dev_err(dev, "vreg reset failed (%d)\n", ret); goto out; -vdd_io_bias_reg_deinit: - if (curr_vdd_io_bias_reg) - sdhci_msm_vreg_deinit_reg(curr_vdd_io_bias_reg); vdd_io_reg_deinit: if (curr_vdd_io_reg) sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg); @@ -3110,6 +3133,45 @@ static int sdhci_msm_clear_pwrctl_status(struct sdhci_host *host, u8 value) return ret; } +static void sdhci_msm_vbias_bypass_wa(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + const struct sdhci_msm_offset *msm_host_offset = + msm_host->offset; + struct mmc_host *mmc = host->mmc; + u32 config; + int card_detect = 0; + + if (mmc->ops->get_cd) + card_detect = mmc->ops->get_cd(mmc); + + config = readl_relaxed(host->ioaddr + + msm_host_offset->CORE_VENDOR_SPEC); + /* + * Following cases are covered. + * 1. Card Probe + * 2. Card suspend + * 3. Card Resume + * 4. Card remove + */ + if ((mmc->card == NULL) && card_detect && + (mmc->ios.power_mode == MMC_POWER_UP)) + config &= ~CORE_IO_PAD_PWR_SWITCH; + else if (mmc->card && card_detect && + (mmc->ios.power_mode == MMC_POWER_OFF)) + config |= CORE_IO_PAD_PWR_SWITCH; + else if (mmc->card && card_detect && + (mmc->ios.power_mode == MMC_POWER_UP)) + config &= ~CORE_IO_PAD_PWR_SWITCH; + else if (mmc->card == NULL && !card_detect && + (mmc->ios.power_mode == MMC_POWER_OFF)) + config |= CORE_IO_PAD_PWR_SWITCH; + + writel_relaxed(config, host->ioaddr + + msm_host_offset->CORE_VENDOR_SPEC); +} + static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data) { struct sdhci_host *host = (struct sdhci_host *)data; @@ -3122,8 +3184,6 @@ static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data) int ret = 0; int pwr_state = 0, io_level = 0; unsigned long flags; - struct sdhci_msm_reg_data *vreg_io_bias = - msm_host->pdata->vreg_data->vdd_io_bias_data; irq_status = sdhci_msm_readb_relaxed(host, msm_host_offset->CORE_PWRCTL_STATUS); @@ -3170,8 +3230,6 @@ static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data) if (irq_status & CORE_PWRCTL_IO_LOW) { /* Switch voltage Low */ ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0); - if (!ret && vreg_io_bias) - ret = sdhci_msm_vreg_disable(vreg_io_bias); if (ret) irq_ack |= CORE_PWRCTL_IO_FAIL; else @@ -3202,17 +3260,21 @@ static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data) mb(); if ((io_level & REQ_IO_HIGH) && (msm_host->caps_0 & CORE_3_0V_SUPPORT) && - !msm_host->core_3_0v_support) - writel_relaxed((readl_relaxed(host->ioaddr + - msm_host_offset->CORE_VENDOR_SPEC) & - ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr + - msm_host_offset->CORE_VENDOR_SPEC); - else if ((io_level & REQ_IO_LOW) || - (msm_host->caps_0 & CORE_1_8V_SUPPORT)) + !msm_host->core_3_0v_support) { + if (msm_host->vbias_skip_wa) + sdhci_msm_vbias_bypass_wa(host); + else + writel_relaxed((readl_relaxed(host->ioaddr + + msm_host_offset->CORE_VENDOR_SPEC) & + ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr + + msm_host_offset->CORE_VENDOR_SPEC); + } else if ((io_level & REQ_IO_LOW) || + (msm_host->caps_0 & CORE_1_8V_SUPPORT)) { writel_relaxed((readl_relaxed(host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC) | CORE_IO_PAD_PWR_SWITCH), host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC); + } /* * SDHC has core_mem and hc_mem device memory and these memory * addresses do not fall within 1KB region. Hence, any update to @@ -4360,40 +4422,6 @@ static void sdhci_msm_reset(struct sdhci_host *host, u8 mask) cqhci_suspend(host->mmc); } -/* - * sdhci_msm_enhanced_strobe_mask :- - * Before running CMDQ transfers in HS400 Enhanced Strobe mode, - * SW should write 3 to - * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register. - * The default reset value of this register is 2. - */ -static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set) -{ - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_msm_host *msm_host = pltfm_host->priv; - const struct sdhci_msm_offset *msm_host_offset = - msm_host->offset; - - if (!msm_host->enhanced_strobe || - !mmc_card_strobe(msm_host->mmc->card)) { - pr_debug("%s: host/card does not support hs400 enhanced strobe\n", - mmc_hostname(host->mmc)); - return; - } - - if (set) { - writel_relaxed((readl_relaxed(host->ioaddr + - msm_host_offset->CORE_VENDOR_SPEC3) - | CORE_CMDEN_HS400_INPUT_MASK_CNT), - host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3); - } else { - writel_relaxed((readl_relaxed(host->ioaddr + - msm_host_offset->CORE_VENDOR_SPEC3) - & ~CORE_CMDEN_HS400_INPUT_MASK_CNT), - host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3); - } -} - static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -5059,7 +5087,6 @@ static struct sdhci_ops sdhci_msm_ops = { .set_bus_width = sdhci_set_bus_width, .reset = sdhci_msm_reset, .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs, - .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask, .reset_workaround = sdhci_msm_reset_workaround, .init = sdhci_msm_init, .pre_req = sdhci_msm_pre_req, @@ -5148,7 +5175,7 @@ static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host, * starts coming. */ if ((major == 1) && ((minor == 0x42) || (minor == 0x46) || - (minor == 0x49) || (minor >= 0x6b))) + (minor == 0x49) || (minor == 0x4D) || (minor >= 0x6b))) msm_host->use_14lpp_dll = true; /* Fake 3.0V support for SDIO devices which requires such voltage */ diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h index 0dd973cec8e1..bd2220785591 100644 --- a/drivers/mmc/host/sdhci-msm.h +++ b/drivers/mmc/host/sdhci-msm.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. * */ @@ -104,8 +104,6 @@ struct sdhci_msm_slot_reg_data { struct sdhci_msm_reg_data *vdd_data; /* keeps VDD IO regulator info */ struct sdhci_msm_reg_data *vdd_io_data; - /* Keeps VDD IO parent regulator info*/ - struct sdhci_msm_reg_data *vdd_io_bias_data; }; struct sdhci_msm_gpio { @@ -324,6 +322,7 @@ struct sdhci_msm_host { u32 ice_clk_rate; bool debug_mode_enabled; bool reg_store; + bool vbias_skip_wa; struct reset_control *core_reset; u32 minor; }; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index ca3fb4b5f155..5787a17605fd 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -3376,6 +3376,14 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) } else { pr_msg = true; } + + if (host->mmc->ops->get_cd && + !host->mmc->ops->get_cd(host->mmc)) { + pr_msg = false; + pr_err("%s: Got data error(%d) during card removal\n", + mmc_hostname(host->mmc), host->data->error); + } + if (pr_msg && __ratelimit(&host->dbg_dump_rs)) { pr_err("%s: data txfr (0x%08x) error: %d after %lld ms\n", mmc_hostname(host->mmc), intmask, diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 0e179f59a9a7..44f631a4b1a4 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -733,7 +733,6 @@ struct sdhci_ops { bool enable, u32 type); int (*enable_controller_clock)(struct sdhci_host *host); void (*clear_set_dumpregs)(struct sdhci_host *host, bool set); - void (*enhanced_strobe_mask)(struct sdhci_host *host, bool set); void (*dump_vendor_regs)(struct sdhci_host *host); void (*voltage_switch)(struct sdhci_host *host); int (*select_drive_strength)(struct sdhci_host *host, diff --git a/drivers/net/can/spi/mcp25xxfd.c b/drivers/net/can/spi/mcp25xxfd.c index 3ba46637ae75..32f1854dd575 100644 --- a/drivers/net/can/spi/mcp25xxfd.c +++ b/drivers/net/can/spi/mcp25xxfd.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2021, The Linux Foundation. All rights reserved. * * CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface * @@ -4148,7 +4148,6 @@ static void mcp25xxfd_debugfs_remove(struct mcp25xxfd_priv *priv) #else static void mcp25xxfd_debugfs_add(struct mcp25xxfd_priv *priv) { - return 0; } static void mcp25xxfd_debugfs_remove(struct mcp25xxfd_priv *priv) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index f239139647bc..16bb2b25c763 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. * * RMNET Data ingress/egress handler * @@ -370,7 +370,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, required_headroom = sizeof(struct rmnet_map_header); csum_type = 0; - if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) { + if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV3) { + additional_header_len = sizeof(struct rmnet_map_ul_csum_header); + csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV3; + } else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) { additional_header_len = sizeof(struct rmnet_map_ul_csum_header); csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4; } else if ((port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) || diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c index ea3bfeeceeb2..add4348d35fb 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c @@ -208,6 +208,31 @@ static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr) } } +static void +rmnet_map_ipv4_ul_csumv3_header(void *iphdr, + struct rmnet_map_ul_csum_header *ul_header, + struct sk_buff *skb) +{ + struct iphdr *ip4h = (struct iphdr *)iphdr; + __be16 *net_hdr = (__be16 *)ul_header, offset; + u16 *host_hdr = (u16 *)ul_header; + + offset = htons((u16)(skb_transport_header(skb) - + (unsigned char *)iphdr)); + ul_header->csum_start_offset = offset; + ul_header->csum_insert_offset = skb->csum_offset; + ul_header->csum_enabled = 1; + if (ip4h->protocol == IPPROTO_UDP) + ul_header->udp_ind = 1; + else + ul_header->udp_ind = 0; + + /* Changing remaining fields to network order */ + net_hdr[1] = htons(host_hdr[1]); + + skb->ip_summed = CHECKSUM_NONE; +} + static void rmnet_map_ipv4_ul_csum_header(void *iphdr, struct rmnet_map_ul_csum_header *ul_header, @@ -277,6 +302,32 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr, rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr); } + +static void +rmnet_map_ipv6_ul_csumv3_header(void *ip6hdr, + struct rmnet_map_ul_csum_header *ul_header, + struct sk_buff *skb) +{ + struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr; + __be16 *net_hdr = (__be16 *)ul_header, offset; + u16 *host_hdr = (u16 *)ul_header; + + offset = htons((u16)(skb_transport_header(skb) - + (unsigned char *)ip6hdr)); + ul_header->csum_start_offset = offset; + ul_header->csum_insert_offset = skb->csum_offset; + ul_header->csum_enabled = 1; + + if (ip6h->nexthdr == IPPROTO_UDP) + ul_header->udp_ind = 1; + else + ul_header->udp_ind = 0; + + /* Changing remaining fields to network order */ + net_hdr[1] = htons(host_hdr[1]); + + skb->ip_summed = CHECKSUM_NONE; +} #endif /* Adds MAP header to front of skb->data @@ -436,6 +487,51 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) } EXPORT_SYMBOL(rmnet_map_checksum_downlink_packet); +void rmnet_map_v3_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev) +{ + struct rmnet_priv *priv = netdev_priv(orig_dev); + struct rmnet_map_ul_csum_header *ul_header; + void *iphdr; + + ul_header = (struct rmnet_map_ul_csum_header *) + skb_push(skb, sizeof(struct rmnet_map_ul_csum_header)); + + if (unlikely(!(orig_dev->features & + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))) + goto sw_csum; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + iphdr = (char *)ul_header + + sizeof(struct rmnet_map_ul_csum_header); + + if (skb->protocol == htons(ETH_P_IP)) { + rmnet_map_ipv4_ul_csumv3_header(iphdr, ul_header, skb); + priv->stats.csum_hw++; + return; + } else if (skb->protocol == htons(ETH_P_IPV6)) { +#if IS_ENABLED(CONFIG_IPV6) + rmnet_map_ipv6_ul_csumv3_header(iphdr, ul_header, skb); + priv->stats.csum_hw++; + return; +#else + priv->stats.csum_err_invalid_ip_version++; + goto sw_csum; +#endif + } else { + priv->stats.csum_err_invalid_ip_version++; + } + } + +sw_csum: + ul_header->csum_start_offset = 0; + ul_header->csum_insert_offset = 0; + ul_header->csum_enabled = 0; + ul_header->udp_ind = 0; + + priv->stats.csum_sw++; +} + void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb, struct net_device *orig_dev) { @@ -556,6 +652,9 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, int csum_type) { switch (csum_type) { + case RMNET_FLAGS_EGRESS_MAP_CKSUMV3: + rmnet_map_v3_checksum_uplink_packet(skb, orig_dev); + break; case RMNET_FLAGS_EGRESS_MAP_CKSUMV4: rmnet_map_v4_checksum_uplink_packet(skb, orig_dev); break; diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index e1acafed7ad9..7ac7755f60f5 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -1050,8 +1050,6 @@ static int wil_cfg80211_change_iface(struct wiphy *wiphy, compressed_rx_status = wil->use_compressed_rx_status; if (type == NL80211_IFTYPE_MONITOR) wil->use_compressed_rx_status = false; - else if (wdev->iftype == NL80211_IFTYPE_MONITOR) - wil->use_compressed_rx_status = true; /* do not reset FW when there are active VIFs, * because it can cause significant disruption diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 875281f710ff..a8eb58ce1559 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: ISC /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */ #include @@ -313,6 +313,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) /* statistics */ memset(&sta->stats, 0, sizeof(sta->stats)); sta->stats.tx_latency_min_us = U32_MAX; + wil_sta_info_amsdu_init(sta); } static void _wil6210_disconnect_complete(struct wil6210_vif *vif, @@ -723,6 +724,13 @@ void wil_bcast_fini_all(struct wil6210_priv *wil) } } +void wil_sta_info_amsdu_init(struct wil_sta_info *sta) +{ + sta->amsdu_drop_sn = -1; + sta->amsdu_drop_tid = -1; + sta->amsdu_drop = 0; +} + int wil_priv_init(struct wil6210_priv *wil) { uint i; diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index d326f99a5ad9..e94c90e6b73a 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: ISC /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. - * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */ #include @@ -98,7 +98,6 @@ int wil_set_capabilities(struct wil6210_priv *wil) set_bit(hw_capa_no_flash, wil->hw_capa); wil->use_enhanced_dma_hw = true; wil->use_rx_hw_reordering = true; - wil->use_compressed_rx_status = true; if (wil_ipa_offload()) /* IPA offload must use single MSI */ n_msi = 1; diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index e204a60c2af6..474361b1784e 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: ISC /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */ #include @@ -935,7 +935,9 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid, dev_kfree_skb(skb); goto stats; } - } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) { + } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate && + /* pass EAPOL packets to local net stack only */ + (wil_skb_get_protocol(skb) != htons(ETH_P_PAE))) { if (mcast) { /* send multicast frames both to higher layers in * local net stack and back to the wireless medium @@ -1003,6 +1005,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) { int cid, security; struct wil6210_priv *wil = ndev_to_wil(ndev); + struct wil6210_vif *vif = ndev_to_vif(ndev); struct wil_net_stats *stats; wil->txrx_ops.get_netif_rx_params(skb, &cid, &security); @@ -1011,6 +1014,18 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) skb_orphan(skb); + /* pass only EAPOL packets as plaintext */ + if (vif->privacy && !security && + wil_skb_get_protocol(skb) != htons(ETH_P_PAE)) { + wil_dbg_txrx(wil, + "Rx drop plaintext frame with %d bytes in secure network\n", + skb->len); + dev_kfree_skb(skb); + ndev->stats.rx_dropped++; + stats->rx_dropped++; + return; + } + if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) { dev_kfree_skb(skb); ndev->stats.rx_dropped++; diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index f3b557bad76c..8184d5bef767 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */ #ifndef WIL6210_TXRX_H @@ -625,6 +625,13 @@ static inline u8 *wil_skb_get_sa(struct sk_buff *skb) return eth->h_source; } +static inline __be16 wil_skb_get_protocol(struct sk_buff *skb) +{ + struct ethhdr *eth = (void *)skb->data; + + return eth->h_proto; +} + static inline bool wil_need_txstat(struct sk_buff *skb) { const u8 *da = wil_skb_get_da(skb); diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index c1fe8add5054..9960a14e95f8 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: ISC /* - * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021 The Linux Foundation. All rights reserved. */ #include @@ -811,6 +811,104 @@ static int wil_tx_ring_modify_edma(struct wil6210_vif *vif, int ring_id, return -EOPNOTSUPP; } +static int wil_check_amsdu(struct wil6210_priv *wil, void *msg, int cid, + struct wil_ring_rx_data *rxdata, + struct sk_buff *skb) +{ + u8 *sa, *da; + int mid, tid; + u16 seq; + struct wil6210_vif *vif; + struct net_device *ndev; + struct wil_sta_info *sta; + + /* drop all WDS packets - not supported */ + if (wil_rx_status_get_ds_type(wil, msg) == WIL_RX_EDMA_DS_TYPE_WDS) { + wil_dbg_txrx(wil, "WDS is not supported"); + return -EAGAIN; + } + + /* check amsdu packets */ + sta = &wil->sta[cid]; + if (!wil_rx_status_is_basic_amsdu(msg)) { + if (sta->amsdu_drop_sn != -1) + wil_sta_info_amsdu_init(sta); + return 0; + } + + mid = wil_rx_status_get_mid(msg); + tid = wil_rx_status_get_tid(msg); + seq = le16_to_cpu(wil_rx_status_get_seq(wil, msg)); + vif = wil->vifs[mid]; + + if (unlikely(!vif)) { + wil_dbg_txrx(wil, "amsdu with invalid mid %d", mid); + return -EAGAIN; + } + + if (unlikely(sta->amsdu_drop)) { + if (sta->amsdu_drop_sn == seq && sta->amsdu_drop_tid == tid) { + wil_dbg_txrx(wil, "Drop AMSDU sub frame, sn=%d\n", + seq); + return -EAGAIN; + } + + /* previous AMSDU finished - clear drop amsdu flag */ + sta->amsdu_drop = 0; + } + + da = wil_skb_get_da(skb); + /* for all sub frame of the AMSDU, check that the SA or DA are valid + * compared with client/AP mac addresses + */ + switch (vif->wdev.iftype) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + /* check if the MSDU (a sub-frame of AMSDU) is multicast */ + if (is_multicast_ether_addr(da)) + return 0; + + /* check if the current AMSDU (MPDU) frame is a multicast. + * If so we have unicast sub frame as part of a multicast + * AMSDU. Current frame and all sub frames should be dropped. + */ + if (wil_rx_status_get_mcast(msg)) { + wil_dbg_txrx(wil, + "Found unicast sub frame in a multicast mpdu. Drop it\n"); + goto out; + } + + /* On client side, DA should be the client mac address */ + ndev = vif_to_ndev(vif); + if (ether_addr_equal(ndev->dev_addr, da)) + return 0; + break; + + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_AP: + sa = wil_skb_get_sa(skb); + /* On AP side, the packet SA should be the client mac address. + * check also the DA is not rfc 1042 header + */ + if (ether_addr_equal(sta->addr, sa) && + !ether_addr_equal(rfc1042_header, da)) + return 0; + break; + default: + return 0; + } + +out: + sta->amsdu_drop_sn = seq; + sta->amsdu_drop_tid = tid; + sta->amsdu_drop = 1; + wil_dbg_txrx(wil, + "Drop AMSDU frame, sn=%d tid=%d. Drop this and all next sub frames\n", + seq, tid); + + return -EAGAIN; +} + /* This function is used only for RX SW reorder */ static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid, struct sk_buff *skb, struct wil_net_stats *stats) @@ -1159,6 +1257,12 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil, wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb_headlen(skb), false); + if (!wil->use_compressed_rx_status && + wil_check_amsdu(wil, msg, cid, rxdata, skb)) { + kfree_skb(skb); + goto again; + } + /* use radiotap header only if required */ if (ndev->type == ARPHRD_IEEE80211_RADIOTAP) wil_rx_add_radiotap_header_edma(wil, msg, skb); diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h index af6de2927ce7..75d658dc01bc 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.h +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: ISC */ -/* Copyright (c) 2012-2016,2018-2019, The Linux Foundation. +/* Copyright (c) 2012-2016,2018-2021, The Linux Foundation. * All rights reserved. */ @@ -47,6 +47,9 @@ #define WIL_RX_EDMA_MID_VALID_BIT BIT(22) +#define WIL_RX_EDMA_AMSDU_BASIC_MASK 0x1 +#define WIL_RX_EDMA_DS_TYPE_WDS 0x3 + #define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS 16 #define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_LEN 6 @@ -369,7 +372,7 @@ static inline u16 wil_rx_status_get_flow_id(void *msg) static inline u8 wil_rx_status_get_mcast(void *msg) { return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0, - 26, 26); + 25, 26); } /** @@ -457,6 +460,21 @@ static inline int wil_rx_status_get_fc1(struct wil6210_priv *wil, void *msg) 0, 5) << 2; } +static inline int wil_rx_status_get_ds_type(struct wil6210_priv *wil, void *msg) +{ + if (wil->use_compressed_rx_status) + return 0; + + return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d0, + 19, 20); +} + +static inline int wil_rx_status_is_basic_amsdu(void *msg) +{ + return (WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1, + 28, 29) == WIL_RX_EDMA_AMSDU_BASIC_MASK); +} + static inline __le16 wil_rx_status_get_seq(struct wil6210_priv *wil, void *msg) { if (wil->use_compressed_rx_status) diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index f82bdcad582a..135f781c08b1 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */ #ifndef __WIL6210_H__ @@ -797,6 +797,11 @@ struct wil_sta_info { struct wil_tid_crypto_rx group_crypto_rx; u8 aid; /* 1-254; 0 if unknown/not reported */ u8 fst_link_loss; + + /* amsdu frame related info to check if the frame is valid */ + int amsdu_drop_sn; + int amsdu_drop_tid; + u8 amsdu_drop; }; enum { @@ -1615,4 +1620,5 @@ int wmi_set_cqm_rssi_config(struct wil6210_priv *wil, int wmi_set_fst_config(struct wil6210_priv *wil, const u8 *bssid, u8 enabled, u8 entry_mcs, u8 exit_mcs, u8 slevel); int wmi_ut_update_txlatency_base(struct wil6210_priv *wil); +void wil_sta_info_amsdu_init(struct wil_sta_info *sta); #endif /* __WIL6210_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 862eedf76caa..e33020f4dba4 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: ISC /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */ #include @@ -1102,6 +1102,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len) ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid); wil->sta[evt->cid].mid = vif->mid; wil->sta[evt->cid].status = wil_sta_conn_pending; + wil_sta_info_amsdu_init(&wil->sta[evt->cid]); rc = wil_ring_init_tx(vif, evt->cid); if (rc) { diff --git a/drivers/net/wireless/cnss2/bus.c b/drivers/net/wireless/cnss2/bus.c index 211451e536ee..3e7fb390d3d5 100644 --- a/drivers/net/wireless/cnss2/bus.c +++ b/drivers/net/wireless/cnss2/bus.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */ #include "bus.h" #include "debug.h" @@ -435,6 +435,23 @@ int cnss_bus_update_status(struct cnss_plat_data *plat_priv, } } +int cnss_bus_update_uevent(struct cnss_plat_data *plat_priv, + enum cnss_driver_status status, void *data) +{ + if (!plat_priv) + return -ENODEV; + + switch (plat_priv->bus_type) { + case CNSS_BUS_PCI: + return cnss_pci_call_driver_uevent(plat_priv->bus_priv, + status, data); + default: + cnss_pr_err("Unsupported bus type: %d\n", + plat_priv->bus_type); + return -EINVAL; + } +} + int cnss_bus_is_device_down(struct cnss_plat_data *plat_priv) { if (!plat_priv) diff --git a/drivers/net/wireless/cnss2/bus.h b/drivers/net/wireless/cnss2/bus.h index 2be106881ad2..86b90eb6b6fa 100644 --- a/drivers/net/wireless/cnss2/bus.h +++ b/drivers/net/wireless/cnss2/bus.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */ #ifndef _CNSS_BUS_H #define _CNSS_BUS_H @@ -50,6 +50,8 @@ int cnss_bus_call_driver_modem_status(struct cnss_plat_data *plat_priv, int modem_current_status); int cnss_bus_update_status(struct cnss_plat_data *plat_priv, enum cnss_driver_status status); +int cnss_bus_update_uevent(struct cnss_plat_data *plat_priv, + enum cnss_driver_status status, void *data); int cnss_bus_is_device_down(struct cnss_plat_data *plat_priv); int cnss_bus_check_link_status(struct cnss_plat_data *plat_priv); int cnss_bus_recover_link_down(struct cnss_plat_data *plat_priv); diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c index adfcad97430b..eff54c674e43 100644 --- a/drivers/net/wireless/cnss2/main.c +++ b/drivers/net/wireless/cnss2/main.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */ #include #include @@ -41,6 +41,7 @@ #define CNSS_QMI_TIMEOUT_DEFAULT 10000 #define CNSS_BDF_TYPE_DEFAULT CNSS_BDF_ELF #define CNSS_TIME_SYNC_PERIOD_DEFAULT 900000 +#define CNSS_MIN_TIME_SYNC_PERIOD 2000 static struct cnss_plat_data *plat_env; @@ -606,6 +607,38 @@ int cnss_driver_event_post(struct cnss_plat_data *plat_priv, return ret; } +/** + * cnss_get_timeout - Get timeout for corresponding type. + * @plat_priv: Pointer to platform driver context. + * @cnss_timeout_type: Timeout type. + * + * Return: Timeout in milliseconds. + */ +unsigned int cnss_get_timeout(struct cnss_plat_data *plat_priv, + enum cnss_timeout_type timeout_type) +{ + unsigned int qmi_timeout = cnss_get_qmi_timeout(plat_priv); + + switch (timeout_type) { + case CNSS_TIMEOUT_QMI: + return qmi_timeout; + case CNSS_TIMEOUT_POWER_UP: + return (qmi_timeout << 2); + case CNSS_TIMEOUT_IDLE_RESTART: + return ((qmi_timeout << 1) + WLAN_WD_TIMEOUT_MS); + case CNSS_TIMEOUT_CALIBRATION: + return (qmi_timeout << 2); + case CNSS_TIMEOUT_WLAN_WATCHDOG: + return ((qmi_timeout << 1) + WLAN_WD_TIMEOUT_MS); + case CNSS_TIMEOUT_RDDM: + return CNSS_RDDM_TIMEOUT_MS; + case CNSS_TIMEOUT_RECOVERY: + return RECOVERY_TIMEOUT; + default: + return qmi_timeout; + } +} + unsigned int cnss_get_boot_timeout(struct device *dev) { struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); @@ -615,7 +648,7 @@ unsigned int cnss_get_boot_timeout(struct device *dev) return 0; } - return cnss_get_qmi_timeout(plat_priv); + return cnss_get_timeout(plat_priv, CNSS_TIMEOUT_QMI); } EXPORT_SYMBOL(cnss_get_boot_timeout); @@ -641,13 +674,14 @@ int cnss_power_up(struct device *dev) if (plat_priv->device_id == QCA6174_DEVICE_ID) goto out; - timeout = cnss_get_boot_timeout(dev); + timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_POWER_UP); reinit_completion(&plat_priv->power_up_complete); ret = wait_for_completion_timeout(&plat_priv->power_up_complete, - msecs_to_jiffies(timeout) << 2); + msecs_to_jiffies(timeout)); if (!ret) { - cnss_pr_err("Timeout waiting for power up to complete\n"); + cnss_pr_err("Timeout (%ums) waiting for power up to complete\n", + timeout); ret = -EAGAIN; goto out; } @@ -713,10 +747,9 @@ int cnss_idle_restart(struct device *dev) goto out; } - timeout = cnss_get_boot_timeout(dev); + timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_IDLE_RESTART); ret = wait_for_completion_timeout(&plat_priv->power_up_complete, - msecs_to_jiffies((timeout << 1) + - WLAN_WD_TIMEOUT_MS)); + msecs_to_jiffies(timeout)); if (plat_priv->power_up_error) { ret = plat_priv->power_up_error; clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state); @@ -726,7 +759,8 @@ int cnss_idle_restart(struct device *dev) } if (!ret) { - cnss_pr_err("Timeout waiting for idle restart to complete\n"); + cnss_pr_err("Timeout (%ums) waiting for idle restart to complete\n", + timeout); ret = -ETIMEDOUT; goto out; } @@ -750,6 +784,7 @@ EXPORT_SYMBOL(cnss_idle_restart); int cnss_idle_shutdown(struct device *dev) { struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + unsigned int timeout; int ret; if (!plat_priv) { @@ -769,10 +804,12 @@ int cnss_idle_shutdown(struct device *dev) goto skip_wait; reinit_completion(&plat_priv->recovery_complete); + timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY); ret = wait_for_completion_timeout(&plat_priv->recovery_complete, - msecs_to_jiffies(RECOVERY_TIMEOUT)); + msecs_to_jiffies(timeout)); if (!ret) { - cnss_pr_err("Timeout waiting for recovery to complete\n"); + cnss_pr_err("Timeout (%ums) waiting for recovery to complete\n", + timeout); CNSS_ASSERT(0); } @@ -1314,6 +1351,7 @@ EXPORT_SYMBOL(cnss_force_fw_assert); int cnss_force_collect_rddm(struct device *dev) { struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + unsigned int timeout; int ret = 0; if (!plat_priv) { @@ -1349,11 +1387,14 @@ int cnss_force_collect_rddm(struct device *dev) return ret; reinit_completion(&plat_priv->rddm_complete); - ret = wait_for_completion_timeout - (&plat_priv->rddm_complete, - msecs_to_jiffies(CNSS_RDDM_TIMEOUT_MS)); - if (!ret) + timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RDDM); + ret = wait_for_completion_timeout(&plat_priv->rddm_complete, + msecs_to_jiffies(timeout)); + if (!ret) { + cnss_pr_err("Timeout (%ums) waiting for RDDM to complete\n", + timeout); ret = -ETIMEDOUT; + } return ret; } @@ -2289,6 +2330,33 @@ static void cnss_unregister_bus_scale(struct cnss_plat_data *plat_priv) msm_bus_scale_unregister_client(bus_bw_info->bus_client); } +static ssize_t qtime_sync_period_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cnss_plat_data *plat_priv = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%u\n", + plat_priv->ctrl_params.time_sync_period); +} + +static ssize_t qtime_sync_period_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int qtime_sync_period = 0; + + if (sscanf(buf, "%du", &qtime_sync_period) != 1) { + cnss_pr_err("Invalid qtime sync sysfs command\n"); + return -EINVAL; + } + + if (qtime_sync_period >= CNSS_MIN_TIME_SYNC_PERIOD) + cnss_pci_update_qtime_sync_period(dev, qtime_sync_period); + + return count; +} + static ssize_t recovery_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -2379,11 +2447,13 @@ static ssize_t fs_ready_store(struct device *dev, static DEVICE_ATTR_WO(fs_ready); static DEVICE_ATTR_WO(shutdown); static DEVICE_ATTR_WO(recovery); +static DEVICE_ATTR_RW(qtime_sync_period); static struct attribute *cnss_attrs[] = { &dev_attr_fs_ready.attr, &dev_attr_shutdown.attr, &dev_attr_recovery.attr, + &dev_attr_qtime_sync_period.attr, NULL, }; diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h index 3eb58332f963..4bc74a3e6801 100644 --- a/drivers/net/wireless/cnss2/main.h +++ b/drivers/net/wireless/cnss2/main.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */ #ifndef _CNSS_MAIN_H #define _CNSS_MAIN_H @@ -84,6 +84,7 @@ struct cnss_pinctrl_info { struct pinctrl_state *wlan_en_active; struct pinctrl_state *wlan_en_sleep; int bt_en_gpio; + int sw_ctrl_gpio; }; #ifdef CONFIG_MSM_SUBSYSTEM_RESTART @@ -331,6 +332,16 @@ enum cnss_ce_index { CNSS_CE_COMMON, }; +enum cnss_timeout_type { + CNSS_TIMEOUT_QMI, + CNSS_TIMEOUT_POWER_UP, + CNSS_TIMEOUT_IDLE_RESTART, + CNSS_TIMEOUT_CALIBRATION, + CNSS_TIMEOUT_WLAN_WATCHDOG, + CNSS_TIMEOUT_RDDM, + CNSS_TIMEOUT_RECOVERY, +}; + struct cnss_plat_data { struct platform_device *plat_dev; void *bus_priv; @@ -364,6 +375,7 @@ struct cnss_plat_data { struct wlfw_rf_board_info board_info; struct wlfw_soc_info soc_info; struct wlfw_fw_version_info fw_version_info; + struct cnss_dev_mem_info dev_mem_info[CNSS_MAX_DEV_MEM_NUM]; char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN + 1]; u32 otp_version; u32 fw_mem_seg_len; @@ -466,5 +478,10 @@ int cnss_minidump_add_region(struct cnss_plat_data *plat_priv, int cnss_minidump_remove_region(struct cnss_plat_data *plat_priv, enum cnss_fw_dump_type type, int seg_no, void *va, phys_addr_t pa, size_t size); +unsigned int cnss_get_timeout(struct cnss_plat_data *plat_priv, + enum cnss_timeout_type); +int cnss_pci_update_qtime_sync_period(struct device *dev, + unsigned int qtime_sync_period); +int cnss_get_gpio_value(struct cnss_plat_data *plat_priv, int gpio_num); #endif /* _CNSS_MAIN_H */ diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c index 1d9f8a3b3dc3..5e59a7153541 100644 --- a/drivers/net/wireless/cnss2/pci.c +++ b/drivers/net/wireless/cnss2/pci.c @@ -43,6 +43,7 @@ #define DEFAULT_FW_FILE_NAME "amss.bin" #define FW_V2_FILE_NAME "amss20.bin" #define FW_V2_NUMBER 2 +#define DEVICE_MAJOR_VERSION_MASK 0xF #define WAKE_MSI_NAME "WAKE" @@ -129,6 +130,13 @@ static struct cnss_pci_reg qdss_csr[] = { { NULL }, }; +static struct cnss_pci_reg pci_scratch[] = { + { "PCIE_SCRATCH_0", PCIE_SCRATCH_0_SOC_PCIE_REG }, + { "PCIE_SCRATCH_1", PCIE_SCRATCH_1_SOC_PCIE_REG }, + { "PCIE_SCRATCH_2", PCIE_SCRATCH_2_SOC_PCIE_REG }, + { NULL }, +}; + static struct cnss_misc_reg wcss_reg_access_seq[] = { {0, QCA6390_GCC_DEBUG_CLK_CTL, 0}, {1, QCA6390_GCC_DEBUG_CLK_CTL, 0x802}, @@ -364,6 +372,31 @@ static struct cnss_misc_reg wlaon_reg_access_seq[] = { #define PCIE_REG_SIZE ARRAY_SIZE(pcie_reg_access_seq) #define WLAON_REG_SIZE ARRAY_SIZE(wlaon_reg_access_seq) +#if IS_ENABLED(CONFIG_PCI_MSM) +/** + * _cnss_pci_get_reg_dump() - Dump PCIe RC registers for debug + * @pci_priv: driver PCI bus context pointer + * @buf: destination buffer pointer + * @len: length of the buffer + * + * This function shall call corresponding PCIe root complex driver API + * to dump PCIe RC registers for debug purpose. + * + * Return: 0 for success, negative value for error + */ +static int _cnss_pci_get_reg_dump(struct cnss_pci_data *pci_priv, + u8 *buf, u32 len) +{ + return msm_pcie_reg_dump(pci_priv->pci_dev, buf, len); +} +#else +static int _cnss_pci_get_reg_dump(struct cnss_pci_data *pci_priv, + u8 *buf, u32 len) +{ + return 0; +} +#endif /* CONFIG_PCI_MSM */ + int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv) { u16 device_id; @@ -371,7 +404,7 @@ int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv) if (pci_priv->pci_link_state == PCI_LINK_DOWN) { cnss_pr_dbg("%ps: PCIe link is in suspend state\n", (void *)_RET_IP_); - return -EIO; + return -EACCES; } if (pci_priv->pci_link_down_ind) { @@ -696,6 +729,35 @@ static int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up) return ret; } +static void cnss_pci_soc_scratch_reg_dump(struct cnss_pci_data *pci_priv) +{ + u32 reg_offset, val; + int i; + + switch (pci_priv->device_id) { + case QCA6490_DEVICE_ID: + break; + default: + return; + } + + if (in_interrupt() || irqs_disabled()) + return; + + if (cnss_pci_check_link_status(pci_priv)) + return; + + cnss_pr_dbg("Start to dump SOC Scratch registers\n"); + + for (i = 0; pci_scratch[i].name; i++) { + reg_offset = pci_scratch[i].offset; + if (cnss_pci_reg_read(pci_priv, reg_offset, &val)) + return; + cnss_pr_dbg("PCIE_SOC_REG_%s = 0x%x\n", + pci_scratch[i].name, val); + } +} + int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv) { int ret = 0; @@ -721,6 +783,9 @@ int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv) cnss_pr_err("Failed to set D3Hot, err = %d\n", ret); } + /* Always do PCIe L2 suspend during power off/PCIe link recovery */ + pci_priv->drv_connected_last = 0; + ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN); if (ret) goto out; @@ -812,6 +877,7 @@ int cnss_pci_recover_link_down(struct cnss_pci_data *pci_priv) jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT)); mhi_debug_reg_dump(pci_priv->mhi_ctrl); + cnss_pci_soc_scratch_reg_dump(pci_priv); return 0; } @@ -864,6 +930,17 @@ void cnss_pci_allow_l1(struct device *dev) } EXPORT_SYMBOL(cnss_pci_allow_l1); +static void cnss_pci_update_link_event(struct cnss_pci_data *pci_priv, + enum cnss_bus_event_type type, + void *data) +{ + struct cnss_bus_event bus_event; + + bus_event.etype = type; + bus_event.event_data = data; + cnss_pci_call_driver_uevent(pci_priv, CNSS_BUS_EVENT, &bus_event); +} + static void cnss_pci_handle_linkdown(struct cnss_pci_data *pci_priv) { struct cnss_plat_data *plat_priv = pci_priv->plat_priv; @@ -885,9 +962,18 @@ static void cnss_pci_handle_linkdown(struct cnss_pci_data *pci_priv) reinit_completion(&pci_priv->wake_event); + /* Notify MHI about link down */ + mhi_control_error(pci_priv->mhi_ctrl); + if (pci_dev->device == QCA6174_DEVICE_ID) disable_irq(pci_dev->irq); + /* Notify bus related event. Now for all supported chips. + * Here PCIe LINK_DOWN notification taken care. + * uevent buffer can be extended later, to cover more bus info. + */ + cnss_pci_update_link_event(pci_priv, BUS_EVENT_PCI_LINK_DOWN, NULL); + cnss_fatal_err("PCI link down, schedule recovery\n"); cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN); } @@ -930,15 +1016,21 @@ EXPORT_SYMBOL(cnss_pci_link_down); int cnss_pci_get_reg_dump(struct device *dev, uint8_t *buffer, uint32_t len) { struct pci_dev *pci_dev = to_pci_dev(dev); + struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); - if (!pci_dev) { - cnss_pr_err("pci_dev is NULL\n"); - return -EINVAL; + if (!pci_priv) { + cnss_pr_err("pci_priv is NULL\n"); + return -ENODEV; } - cnss_pr_dbg("Get pci reg dump for hang data\n"); + if (pci_priv->pci_link_state == PCI_LINK_DOWN) { + cnss_pr_dbg("No PCIe reg dump since PCIe device is suspended(D3)\n"); + return -EACCES; + } - return msm_pcie_reg_dump(pci_dev, buffer, len); + cnss_pr_dbg("Start to get PCIe reg dump\n"); + + return _cnss_pci_get_reg_dump(pci_priv, buffer, len); } EXPORT_SYMBOL(cnss_pci_get_reg_dump); @@ -1471,6 +1563,24 @@ static void cnss_pci_stop_time_sync_update(struct cnss_pci_data *pci_priv) cancel_delayed_work_sync(&pci_priv->time_sync_work); } +int cnss_pci_update_qtime_sync_period(struct device *dev, + unsigned int qtime_sync_period) +{ + struct cnss_plat_data *plat_priv = dev_get_drvdata(dev); + struct cnss_pci_data *pci_priv = plat_priv->bus_priv; + + if (!plat_priv || !pci_priv) + return -ENODEV; + + cnss_pci_stop_time_sync_update(pci_priv); + plat_priv->ctrl_params.time_sync_period = qtime_sync_period; + cnss_pci_start_time_sync_update(pci_priv); + cnss_pr_dbg("WLAN qtime sync period %u\n", + plat_priv->ctrl_params.time_sync_period); + + return 0; +} + int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv) { int ret = 0; @@ -1685,6 +1795,7 @@ static void cnss_pci_dump_misc_reg(struct cnss_pci_data *pci_priv) return; mhi_debug_reg_dump(pci_priv->mhi_ctrl); + cnss_pci_soc_scratch_reg_dump(pci_priv); cnss_pci_misc_reg_dump(pci_priv, pci_priv->wcss_reg, pci_priv->wcss_reg_size, "wcss"); cnss_pci_misc_reg_dump(pci_priv, pci_priv->pcie_reg, @@ -1702,6 +1813,7 @@ static void cnss_pci_dump_mhi_reg(struct cnss_pci_data *pci_priv) return; mhi_debug_reg_dump(pci_priv->mhi_ctrl); + cnss_pci_soc_scratch_reg_dump(pci_priv); } static void cnss_pci_dump_shadow_reg(struct cnss_pci_data *pci_priv) @@ -1812,9 +1924,9 @@ static void cnss_pci_dump_qca6390_sram_mem(struct cnss_pci_data *pci_priv) sbl_log_size = (sbl_log_size > QCA6390_DEBUG_SBL_LOG_SRAM_MAX_SIZE ? QCA6390_DEBUG_SBL_LOG_SRAM_MAX_SIZE : sbl_log_size); - if (sbl_log_start < QCA6390_V2_SBL_DATA_START || - sbl_log_start > QCA6390_V2_SBL_DATA_END || - (sbl_log_start + sbl_log_size) > QCA6390_V2_SBL_DATA_END) + if (sbl_log_start < SRAM_START || + sbl_log_start > SRAM_END || + (sbl_log_start + sbl_log_size) > SRAM_END) goto out; cnss_pr_dbg("Dumping SBL log data\n"); @@ -1882,17 +1994,11 @@ static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv) sbl_log_size = (sbl_log_size > QCA6490_DEBUG_SBL_LOG_SRAM_MAX_SIZE ? QCA6490_DEBUG_SBL_LOG_SRAM_MAX_SIZE : sbl_log_size); - if (plat_priv->device_version.major_version == FW_V2_NUMBER) { - if (sbl_log_start < QCA6490_V2_SBL_DATA_START || - sbl_log_start > QCA6490_V2_SBL_DATA_END || - (sbl_log_start + sbl_log_size) > QCA6490_V2_SBL_DATA_END) - goto out; - } else { - if (sbl_log_start < QCA6490_V1_SBL_DATA_START || - sbl_log_start > QCA6490_V1_SBL_DATA_END || - (sbl_log_start + sbl_log_size) > QCA6490_V1_SBL_DATA_END) - goto out; - } + + if (sbl_log_start < SRAM_START || + sbl_log_start > SRAM_END || + (sbl_log_start + sbl_log_size) > SRAM_END) + goto out; cnss_pr_dbg("Dumping SBL log data"); for (i = 0; i < sbl_log_size; i += sizeof(val)) { @@ -1988,7 +2094,7 @@ static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv) int ret = 0; struct cnss_plat_data *plat_priv = pci_priv->plat_priv; unsigned int timeout; - int retry = 0; + int retry = 0, sw_ctrl_gpio = plat_priv->pinctrl_info.sw_ctrl_gpio; if (plat_priv->ramdump_info_v2.dump_data_valid) { cnss_pci_clear_dump_info(pci_priv); @@ -2005,6 +2111,8 @@ static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv) ret = cnss_resume_pci_link(pci_priv); if (ret) { + cnss_pr_dbg("Value of SW_CNTRL GPIO: %d\n", + cnss_get_gpio_value(plat_priv, sw_ctrl_gpio)); cnss_pr_err("Failed to resume PCI link, err = %d\n", ret); if (test_bit(IGNORE_PCI_LINK_FAILURE, &plat_priv->ctrl_params.quirks)) { @@ -2015,6 +2123,9 @@ static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv) if (ret == -EAGAIN && retry++ < POWER_ON_RETRY_MAX_TIMES) { cnss_power_off_device(plat_priv); cnss_pr_dbg("Retry to resume PCI link #%d\n", retry); + cnss_pr_dbg("Value of SW_CNTRL GPIO: %d\n", + cnss_get_gpio_value(plat_priv, + sw_ctrl_gpio)); msleep(POWER_ON_RETRY_DELAY_MS * retry); goto retry; } @@ -2024,7 +2135,7 @@ static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv) } cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false, false); - timeout = cnss_get_boot_timeout(&pci_priv->pci_dev->dev); + timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_QMI); ret = cnss_pci_start_mhi(pci_priv); if (ret) { @@ -2088,7 +2199,8 @@ static int cnss_qca6290_shutdown(struct cnss_pci_data *pci_priv) if ((test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) || test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) || test_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state) || - test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) && + test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state) || + test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state)) && test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) { del_timer(&pci_priv->dev_rddm_timer); cnss_pci_collect_dump(pci_priv); @@ -2310,11 +2422,12 @@ int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops) cnss_pr_dbg("Start to wait for calibration to complete\n"); - timeout = cnss_get_boot_timeout(&pci_priv->pci_dev->dev); + timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_CALIBRATION); ret = wait_for_completion_timeout(&plat_priv->cal_complete, - msecs_to_jiffies(timeout) << 2); + msecs_to_jiffies(timeout)); if (!ret) { - cnss_pr_err("Timeout waiting for calibration to complete\n"); + cnss_pr_err("Timeout (%ums) waiting for calibration to complete\n", + timeout); if (!test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) { cnss_pci_dump_bl_sram_mem(pci_priv); CNSS_ASSERT(0); @@ -2362,12 +2475,12 @@ void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops) if (plat_priv->device_id == QCA6174_DEVICE_ID) goto skip_wait_power_up; - timeout = cnss_get_qmi_timeout(plat_priv); + timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_WLAN_WATCHDOG); ret = wait_for_completion_timeout(&plat_priv->power_up_complete, - msecs_to_jiffies((timeout << 1) + - WLAN_WD_TIMEOUT_MS)); + msecs_to_jiffies(timeout)); if (!ret) { - cnss_pr_err("Timeout waiting for driver power up to complete\n"); + cnss_pr_err("Timeout (%ums) waiting for driver power up to complete\n", + timeout); CNSS_ASSERT(0); } @@ -2377,10 +2490,12 @@ void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops) goto skip_wait_recovery; reinit_completion(&plat_priv->recovery_complete); + timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY); ret = wait_for_completion_timeout(&plat_priv->recovery_complete, - msecs_to_jiffies(RECOVERY_TIMEOUT)); + msecs_to_jiffies(timeout)); if (!ret) { - cnss_pr_err("Timeout waiting for recovery to complete\n"); + cnss_pr_err("Timeout (%ums) waiting for recovery to complete\n", + timeout); CNSS_ASSERT(0); } @@ -3328,8 +3443,7 @@ int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv) if (!fw_mem[i].va) { cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n", fw_mem[i].size, fw_mem[i].type); - - return -ENOMEM; + BUG(); } } } @@ -3752,6 +3866,8 @@ int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info) sizeof(info->fw_build_timestamp)); memcpy(&info->device_version, &plat_priv->device_version, sizeof(info->device_version)); + memcpy(&info->dev_mem_info, &plat_priv->dev_mem_info, + sizeof(info->dev_mem_info)); return 0; } @@ -4096,6 +4212,7 @@ static void cnss_pci_dump_registers(struct cnss_pci_data *pci_priv) return; mhi_debug_reg_dump(pci_priv->mhi_ctrl); + cnss_pci_soc_scratch_reg_dump(pci_priv); cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_COMMON); cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_09); cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_10); @@ -4186,8 +4303,8 @@ static void cnss_pci_remove_dump_seg(struct cnss_pci_data *pci_priv, cnss_minidump_remove_region(plat_priv, type, seg_no, va, pa, size); } -int cnss_call_driver_uevent(struct cnss_pci_data *pci_priv, - enum cnss_driver_status status, void *data) +int cnss_pci_call_driver_uevent(struct cnss_pci_data *pci_priv, + enum cnss_driver_status status, void *data) { struct cnss_uevent_data uevent_data; struct cnss_wlan_driver *driver_ops; @@ -4247,7 +4364,7 @@ static void cnss_pci_send_hang_event(struct cnss_pci_data *pci_priv) } } - cnss_call_driver_uevent(pci_priv, CNSS_HANG_EVENT, &hang_event); + cnss_pci_call_driver_uevent(pci_priv, CNSS_HANG_EVENT, &hang_event); kfree(hang_event.hang_event_data); hang_event.hang_event_data = NULL; @@ -4272,8 +4389,29 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic) return; } - if (cnss_pci_check_link_status(pci_priv)) + if (!cnss_is_device_powered_on(plat_priv)) { + cnss_pr_dbg("Device is already powered off, skip\n"); return; + } + + if (!in_panic) { + mutex_lock(&pci_priv->bus_lock); + ret = cnss_pci_check_link_status(pci_priv); + if (ret) { + if (ret != -EACCES) { + mutex_unlock(&pci_priv->bus_lock); + return; + } + if (cnss_pci_resume_bus(pci_priv)) { + mutex_unlock(&pci_priv->bus_lock); + return; + } + } + mutex_unlock(&pci_priv->bus_lock); + } else { + if (cnss_pci_check_link_status(pci_priv)) + return; + } cnss_pci_dump_misc_reg(pci_priv); cnss_pci_dump_shadow_reg(pci_priv); @@ -4455,6 +4593,9 @@ static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv) plat_priv->device_version.major_version, plat_priv->device_version.minor_version); + /* Only keep lower 4 bits as real device major version */ + plat_priv->device_version.major_version &= DEVICE_MAJOR_VERSION_MASK; + switch (pci_priv->device_id) { case QCA6390_DEVICE_ID: if (plat_priv->device_version.major_version < FW_V2_NUMBER) { @@ -4536,6 +4677,7 @@ static void cnss_dev_rddm_timeout_hdlr(struct timer_list *t) cnss_pr_err("Unable to collect ramdumps due to abrupt reset\n"); mhi_debug_reg_dump(mhi_ctrl); + cnss_pci_soc_scratch_reg_dump(pci_priv); cnss_schedule_recovery(&pci_priv->pci_dev->dev, CNSS_REASON_TIMEOUT); } diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h index 37064b05ce0d..51dee4adf402 100644 --- a/drivers/net/wireless/cnss2/pci.h +++ b/drivers/net/wireless/cnss2/pci.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */ #ifndef _CNSS_PCI_H #define _CNSS_PCI_H @@ -228,8 +228,8 @@ void cnss_pci_pm_runtime_put_noidle(struct cnss_pci_data *pci_priv, void cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data *pci_priv); int cnss_pci_update_status(struct cnss_pci_data *pci_priv, enum cnss_driver_status status); -int cnss_call_driver_uevent(struct cnss_pci_data *pci_priv, - enum cnss_driver_status status, void *data); +int cnss_pci_call_driver_uevent(struct cnss_pci_data *pci_priv, + enum cnss_driver_status status, void *data); int cnss_pcie_is_device_down(struct cnss_pci_data *pci_priv); int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv); int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv); diff --git a/drivers/net/wireless/cnss2/power.c b/drivers/net/wireless/cnss2/power.c index 13e92c3be166..1a8d88c3a465 100644 --- a/drivers/net/wireless/cnss2/power.c +++ b/drivers/net/wireless/cnss2/power.c @@ -1,13 +1,13 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */ #include #include #include +#include #include #include #include -#include #include "main.h" #include "debug.h" @@ -42,6 +42,7 @@ static struct cnss_clk_cfg cnss_clk_list[] = { #define BOOTSTRAP_GPIO "qcom,enable-bootstrap-gpio" #define BOOTSTRAP_ACTIVE "bootstrap_active" #define WLAN_EN_GPIO "wlan-en-gpio" +#define SW_CTRL_GPIO "qcom,sw-ctrl-gpio" #define BT_EN_GPIO "qcom,bt-en-gpio" #define WLAN_EN_ACTIVE "wlan_en_active" #define WLAN_EN_SLEEP "wlan_en_sleep" @@ -712,6 +713,16 @@ int cnss_get_pinctrl(struct cnss_plat_data *plat_priv) pinctrl_info->bt_en_gpio = -EINVAL; } + if (of_find_property(dev->of_node, SW_CTRL_GPIO, NULL)) { + pinctrl_info->sw_ctrl_gpio = of_get_named_gpio(dev->of_node, + SW_CTRL_GPIO, + 0); + cnss_pr_dbg("Switch control GPIO: %d\n", + pinctrl_info->sw_ctrl_gpio); + } else { + pinctrl_info->sw_ctrl_gpio = -EINVAL; + } + return 0; out: return ret; @@ -789,10 +800,17 @@ static int cnss_select_pinctrl_enable(struct cnss_plat_data *plat_priv) int ret = 0, bt_en_gpio = plat_priv->pinctrl_info.bt_en_gpio; u8 wlan_en_state = 0; - if (bt_en_gpio < 0 || plat_priv->device_id != QCA6490_DEVICE_ID || - plat_priv->device_id != QCA6390_DEVICE_ID) + if (bt_en_gpio < 0) goto set_wlan_en; + switch (plat_priv->device_id) { + case QCA6390_DEVICE_ID: + case QCA6490_DEVICE_ID: + break; + default: + goto set_wlan_en; + } + if (gpio_get_value(bt_en_gpio)) { cnss_pr_dbg("BT_EN_GPIO State: On\n"); ret = cnss_select_pinctrl_state(plat_priv, true); @@ -808,7 +826,9 @@ static int cnss_select_pinctrl_enable(struct cnss_plat_data *plat_priv) cnss_select_pinctrl_state(plat_priv, false); wlan_en_state = 0; } - /* 100 ms delay for BT_EN and WLAN_EN QCA6490 PMU sequencing */ + /* 100 ms delay for BT_EN and WLAN_EN QCA6490/QCA6390 PMU + * sequencing. + */ msleep(100); } set_wlan_en: @@ -817,6 +837,23 @@ static int cnss_select_pinctrl_enable(struct cnss_plat_data *plat_priv) return ret; } +int cnss_get_gpio_value(struct cnss_plat_data *plat_priv, int gpio_num) +{ + int ret = 0; + + if (gpio_num < 0) + return -EINVAL; + + ret = gpio_direction_input(gpio_num); + if (ret) { + cnss_pr_err("Failed to set direction of the GPIO(%d), err %d", + gpio_num, ret); + return -EINVAL; + } + + return gpio_get_value(gpio_num); +} + int cnss_power_on_device(struct cnss_plat_data *plat_priv) { int ret = 0; diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c index da9ba280f2e7..07ed7a1c31cd 100644 --- a/drivers/net/wireless/cnss2/qmi.c +++ b/drivers/net/wireless/cnss2/qmi.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */ #include #include @@ -14,11 +14,16 @@ #define WLFW_CLIENT_ID 0x4b4e454c #define BDF_FILE_NAME_PREFIX "bdwlan" #define ELF_BDF_FILE_NAME "bdwlan.elf" +#define ELF_BDF_FILE_NAME_GF "bdwlang.elf" #define ELF_BDF_FILE_NAME_PREFIX "bdwlan.e" +#define ELF_BDF_FILE_NAME_GF_PREFIX "bdwlang.e" #define BIN_BDF_FILE_NAME "bdwlan.bin" +#define BIN_BDF_FILE_NAME_GF "bdwlang.bin" #define BIN_BDF_FILE_NAME_PREFIX "bdwlan.b" +#define BIN_BDF_FILE_NAME_GF_PREFIX "bdwlang.b" #define REGDB_FILE_NAME "regdb.bin" #define DUMMY_BDF_FILE_NAME "bdwlan.dmy" +#define CHIP_ID_GF_MASK 0x10 #define QMI_WLFW_TIMEOUT_MS (plat_priv->ctrl_params.qmi_timeout) #define QMI_WLFW_TIMEOUT_JF msecs_to_jiffies(QMI_WLFW_TIMEOUT_MS) @@ -375,7 +380,7 @@ int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv) struct wlfw_cap_resp_msg_v01 *resp; struct qmi_txn txn; char *fw_build_timestamp; - int ret = 0; + int ret = 0, i; cnss_pr_dbg("Sending target capability message, state: 0x%lx\n", plat_priv->driver_state); @@ -460,7 +465,17 @@ int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv) } if (resp->otp_version_valid) plat_priv->otp_version = resp->otp_version; - + if (resp->dev_mem_info_valid) { + for (i = 0; i < QMI_WLFW_MAX_DEV_MEM_NUM_V01; i++) { + plat_priv->dev_mem_info[i].start = + resp->dev_mem_info[i].start; + plat_priv->dev_mem_info[i].size = + resp->dev_mem_info[i].size; + cnss_pr_dbg("Device memory info[%d]: start = 0x%llx, size = 0x%llx\n", + i, plat_priv->dev_mem_info[i].start, + plat_priv->dev_mem_info[i].size); + } + } if (resp->fw_caps_valid) plat_priv->fw_pcie_gen_switch = !!(resp->fw_caps & QMI_WLFW_HOST_PCIE_GEN_SWITCH_V01); @@ -494,30 +509,53 @@ static int cnss_get_bdf_file_name(struct cnss_plat_data *plat_priv, switch (bdf_type) { case CNSS_BDF_ELF: - if (plat_priv->board_info.board_id == 0xFF) - snprintf(filename_tmp, filename_len, ELF_BDF_FILE_NAME); - else if (plat_priv->board_info.board_id < 0xFF) - snprintf(filename_tmp, filename_len, - ELF_BDF_FILE_NAME_PREFIX "%02x", - plat_priv->board_info.board_id); - else + /* Board ID will be equal or less than 0xFF in GF mask case */ + if (plat_priv->board_info.board_id == 0xFF) { + if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK) + snprintf(filename_tmp, filename_len, + ELF_BDF_FILE_NAME_GF); + else + snprintf(filename_tmp, filename_len, + ELF_BDF_FILE_NAME); + } else if (plat_priv->board_info.board_id < 0xFF) { + if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK) + snprintf(filename_tmp, filename_len, + ELF_BDF_FILE_NAME_GF_PREFIX "%02x", + plat_priv->board_info.board_id); + else + snprintf(filename_tmp, filename_len, + ELF_BDF_FILE_NAME_PREFIX "%02x", + plat_priv->board_info.board_id); + } else { snprintf(filename_tmp, filename_len, BDF_FILE_NAME_PREFIX "%02x.e%02x", plat_priv->board_info.board_id >> 8 & 0xFF, plat_priv->board_info.board_id & 0xFF); + } break; case CNSS_BDF_BIN: - if (plat_priv->board_info.board_id == 0xFF) - snprintf(filename_tmp, filename_len, BIN_BDF_FILE_NAME); - else if (plat_priv->board_info.board_id < 0xFF) - snprintf(filename_tmp, filename_len, - BIN_BDF_FILE_NAME_PREFIX "%02x", - plat_priv->board_info.board_id); - else + if (plat_priv->board_info.board_id == 0xFF) { + if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK) + snprintf(filename_tmp, filename_len, + BIN_BDF_FILE_NAME_GF); + else + snprintf(filename_tmp, filename_len, + BIN_BDF_FILE_NAME); + } else if (plat_priv->board_info.board_id < 0xFF) { + if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK) + snprintf(filename_tmp, filename_len, + BIN_BDF_FILE_NAME_GF_PREFIX "%02x", + plat_priv->board_info.board_id); + else + snprintf(filename_tmp, filename_len, + BIN_BDF_FILE_NAME_PREFIX "%02x", + plat_priv->board_info.board_id); + } else { snprintf(filename_tmp, filename_len, BDF_FILE_NAME_PREFIX "%02x.b%02x", plat_priv->board_info.board_id >> 8 & 0xFF, plat_priv->board_info.board_id & 0xFF); + } break; case CNSS_BDF_REGDB: snprintf(filename_tmp, filename_len, REGDB_FILE_NAME); @@ -1779,8 +1817,6 @@ int cnss_wlfw_get_info_send_sync(struct cnss_plat_data *plat_priv, int type, unsigned int cnss_get_qmi_timeout(struct cnss_plat_data *plat_priv) { - cnss_pr_dbg("QMI timeout is %u ms\n", QMI_WLFW_TIMEOUT_MS); - return QMI_WLFW_TIMEOUT_MS; } diff --git a/drivers/net/wireless/cnss2/reg.h b/drivers/net/wireless/cnss2/reg.h index 1c8c12e72a15..3bfbd7a652ff 100644 --- a/drivers/net/wireless/cnss2/reg.h +++ b/drivers/net/wireless/cnss2/reg.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */ #ifndef _CNSS_REG_H #define _CNSS_REG_H @@ -267,12 +267,8 @@ #define QCA6390_SYSPM_DBG_BUS_SEL_REG 0x1F82008 #define QCA6390_SYSPM_WCSSAON_SR_STATUS 0x1F8200C -#define QCA6490_DEBUG_PBL_LOG_SRAM_START 0x1403D58 +#define QCA6490_DEBUG_PBL_LOG_SRAM_START 0x01403DA0 #define QCA6490_DEBUG_PBL_LOG_SRAM_MAX_SIZE 40 -#define QCA6490_V1_SBL_DATA_START 0x143b000 -#define QCA6490_V1_SBL_DATA_END (0x143b000 + 0x00011000) -#define QCA6490_V2_SBL_DATA_START 0x1435000 -#define QCA6490_V2_SBL_DATA_END (0x1435000 + 0x00011000) #define QCA6490_DEBUG_SBL_LOG_SRAM_MAX_SIZE 48 #define QCA6490_TCSR_PBL_LOGGING_REG 0x01B000F8 #define QCA6490_PCIE_BHI_ERRDBG2_REG 0x01E0E238 @@ -282,12 +278,18 @@ #define QCA6390_DEBUG_PBL_LOG_SRAM_START 0x01403D58 #define QCA6390_DEBUG_PBL_LOG_SRAM_MAX_SIZE 80 -#define QCA6390_V2_SBL_DATA_START 0x016c8580 -#define QCA6390_V2_SBL_DATA_END (0x016c8580 + 0x00011000) #define QCA6390_DEBUG_SBL_LOG_SRAM_MAX_SIZE 44 #define QCA6390_TCSR_PBL_LOGGING_REG 0x01B000F8 #define QCA6390_PCIE_BHI_ERRDBG2_REG 0x01E0E238 #define QCA6390_PCIE_BHI_ERRDBG3_REG 0x01E0E23C #define QCA6390_PBL_WLAN_BOOT_CFG 0x01E22B34 #define QCA6390_PBL_BOOTSTRAP_STATUS 0x01910008 + +#define SRAM_START 0x01400000 +#define SRAM_END 0x01800000 + +/* PCIE SOC scratch registers, address same for QCA6390 & QCA6490*/ +#define PCIE_SCRATCH_0_SOC_PCIE_REG 0x1E04040 +#define PCIE_SCRATCH_1_SOC_PCIE_REG 0x1E04044 +#define PCIE_SCRATCH_2_SOC_PCIE_REG 0x1E0405C #endif diff --git a/drivers/net/wireless/cnss_utils/cnss_utils.c b/drivers/net/wireless/cnss_utils/cnss_utils.c index ba7c5301f3f4..c7c81b483dc7 100644 --- a/drivers/net/wireless/cnss_utils/cnss_utils.c +++ b/drivers/net/wireless/cnss_utils/cnss_utils.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2017, 2019-2021 The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "cnss_utils: " fmt @@ -10,7 +10,7 @@ #include #include -#define CNSS_MAX_CH_NUM 45 +#define CNSS_MAX_CH_NUM 157 struct cnss_unsafe_channel_list { u16 unsafe_ch_count; u16 unsafe_ch_list[CNSS_MAX_CH_NUM]; diff --git a/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.c index 616dd1ffdc8e..5419b1b219c0 100644 --- a/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.c +++ b/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2021, The Linux Foundation. All rights reserved. */ #include "wlan_firmware_service_v01.h" @@ -464,7 +464,8 @@ static struct qmi_elem_info wlfw_m3_segment_info_s_v01_ei[] = { .elem_size = sizeof(enum wlfw_m3_segment_type_v01), .array_type = NO_ARRAY, .tlv_type = 0, - .offset = offsetof(struct wlfw_m3_segment_info_s_v01, + .offset = offsetof(struct + wlfw_m3_segment_info_s_v01, type), }, { @@ -473,7 +474,8 @@ static struct qmi_elem_info wlfw_m3_segment_info_s_v01_ei[] = { .elem_size = sizeof(u64), .array_type = NO_ARRAY, .tlv_type = 0, - .offset = offsetof(struct wlfw_m3_segment_info_s_v01, + .offset = offsetof(struct + wlfw_m3_segment_info_s_v01, addr), }, { @@ -482,7 +484,8 @@ static struct qmi_elem_info wlfw_m3_segment_info_s_v01_ei[] = { .elem_size = sizeof(u64), .array_type = NO_ARRAY, .tlv_type = 0, - .offset = offsetof(struct wlfw_m3_segment_info_s_v01, + .offset = offsetof(struct + wlfw_m3_segment_info_s_v01, size), }, { @@ -491,7 +494,8 @@ static struct qmi_elem_info wlfw_m3_segment_info_s_v01_ei[] = { .elem_size = sizeof(char), .array_type = NO_ARRAY, .tlv_type = 0, - .offset = offsetof(struct wlfw_m3_segment_info_s_v01, + .offset = offsetof(struct + wlfw_m3_segment_info_s_v01, name), }, { @@ -501,6 +505,34 @@ static struct qmi_elem_info wlfw_m3_segment_info_s_v01_ei[] = { }, }; +static struct qmi_elem_info wlfw_dev_mem_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct + wlfw_dev_mem_info_s_v01, + start), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct + wlfw_dev_mem_info_s_v01, + size), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { { .data_type = QMI_OPT_FLAG, @@ -851,7 +883,8 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .array_type = NO_ARRAY, .tlv_type = 0x23, .offset = - offsetof(struct wlfw_ind_register_req_msg_v01, + offsetof(struct + wlfw_ind_register_req_msg_v01, m3_dump_upload_segments_req_enable_valid), }, { @@ -860,7 +893,8 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x23, - .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + .offset = offsetof(struct + wlfw_ind_register_req_msg_v01, m3_dump_upload_segments_req_enable), }, { @@ -1524,6 +1558,27 @@ struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = { .offset = offsetof(struct wlfw_cap_resp_msg_v01, rd_card_chain_cap), }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct + wlfw_cap_resp_msg_v01, + dev_mem_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_DEV_MEM_NUM_V01, + .elem_size = sizeof(struct wlfw_dev_mem_info_s_v01), + .array_type = STATIC_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct + wlfw_cap_resp_msg_v01, + dev_mem_info), + .ei_array = wlfw_dev_mem_info_s_v01_ei, + }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, @@ -1759,6 +1814,26 @@ struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = { .offset = offsetof(struct wlfw_cal_report_req_msg_v01, cal_remove_supported), }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + wlfw_cal_report_req_msg_v01, + cal_file_download_size_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + wlfw_cal_report_req_msg_v01, + cal_file_download_size), + }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, @@ -2996,7 +3071,8 @@ struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = { .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, - .offset = offsetof(struct wlfw_fw_init_done_ind_msg_v01, + .offset = offsetof(struct + wlfw_fw_init_done_ind_msg_v01, hang_data_addr_offset_valid), }, { @@ -3005,7 +3081,8 @@ struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = { .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x10, - .offset = offsetof(struct wlfw_fw_init_done_ind_msg_v01, + .offset = offsetof(struct + wlfw_fw_init_done_ind_msg_v01, hang_data_addr_offset), }, { @@ -3014,7 +3091,8 @@ struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = { .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x11, - .offset = offsetof(struct wlfw_fw_init_done_ind_msg_v01, + .offset = offsetof(struct + wlfw_fw_init_done_ind_msg_v01, hang_data_length_valid), }, { @@ -3023,7 +3101,8 @@ struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = { .elem_size = sizeof(u16), .array_type = NO_ARRAY, .tlv_type = 0x11, - .offset = offsetof(struct wlfw_fw_init_done_ind_msg_v01, + .offset = offsetof(struct + wlfw_fw_init_done_ind_msg_v01, hang_data_length), }, { @@ -3289,6 +3368,26 @@ struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[] = { }; struct qmi_elem_info wlfw_cal_done_ind_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_cal_done_ind_msg_v01, + cal_file_upload_size_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_cal_done_ind_msg_v01, + cal_file_upload_size), + }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, @@ -4323,6 +4422,46 @@ struct qmi_elem_info wlfw_device_info_resp_msg_v01_ei[] = { .offset = offsetof(struct wlfw_device_info_resp_msg_v01, bar_size), }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + wlfw_device_info_resp_msg_v01, + mhi_state_info_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + wlfw_device_info_resp_msg_v01, + mhi_state_info_addr), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct + wlfw_device_info_resp_msg_v01, + mhi_state_info_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct + wlfw_device_info_resp_msg_v01, + mhi_state_info_size), + }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, @@ -4685,7 +4824,8 @@ struct qmi_elem_info wlfw_m3_dump_upload_segments_req_ind_msg_v01_ei[] = { .array_type = NO_ARRAY, .tlv_type = 0x01, .offset = - offsetof(struct wlfw_m3_dump_upload_segments_req_ind_msg_v01, + offsetof(struct + wlfw_m3_dump_upload_segments_req_ind_msg_v01, pdev_id), }, { @@ -4695,7 +4835,8 @@ struct qmi_elem_info wlfw_m3_dump_upload_segments_req_ind_msg_v01_ei[] = { .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = - offsetof(struct wlfw_m3_dump_upload_segments_req_ind_msg_v01, + offsetof(struct + wlfw_m3_dump_upload_segments_req_ind_msg_v01, no_of_valid_segments), }, { @@ -4705,7 +4846,8 @@ struct qmi_elem_info wlfw_m3_dump_upload_segments_req_ind_msg_v01_ei[] = { .array_type = STATIC_ARRAY, .tlv_type = 0x03, .offset = - offsetof(struct wlfw_m3_dump_upload_segments_req_ind_msg_v01, + offsetof(struct + wlfw_m3_dump_upload_segments_req_ind_msg_v01, m3_segment), .ei_array = wlfw_m3_segment_info_s_v01_ei, }, diff --git a/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.h index 9fe450a74dc0..b5ed622b2d5e 100644 --- a/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.h +++ b/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2021, The Linux Foundation. All rights reserved. */ #ifndef WLAN_FIRMWARE_SERVICE_V01_H #define WLAN_FIRMWARE_SERVICE_V01_H @@ -99,7 +99,7 @@ #define QMI_WLFW_MAX_M3_SEGMENTS_SIZE_V01 10 #define QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01 2 -#define QMI_WLFW_MAX_NUM_MEM_SEG_V01 32 +#define QMI_WLFW_MAX_NUM_MEM_SEG_V01 52 #define QMI_WLFW_MAX_NUM_CAL_V01 5 #define QMI_WLFW_MAX_DATA_SIZE_V01 6144 #define QMI_WLFW_FUNCTION_NAME_LEN_V01 128 @@ -115,6 +115,7 @@ #define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24 #define QMI_WLFW_MAC_ADDR_SIZE_V01 6 #define QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01 36 +#define QMI_WLFW_MAX_DEV_MEM_NUM_V01 4 #define QMI_WLFW_MAX_PLATFORM_NAME_LEN_V01 64 #define QMI_WLFW_MAX_NUM_SVC_V01 24 @@ -348,6 +349,11 @@ struct wlfw_m3_segment_info_s_v01 { char name[QMI_WLFW_MAX_STR_LEN_V01 + 1]; }; +struct wlfw_dev_mem_info_s_v01 { + u64 start; + u64 size; +}; + struct wlfw_ind_register_req_msg_v01 { u8 fw_ready_enable_valid; u8 fw_ready_enable; @@ -390,7 +396,6 @@ struct wlfw_ind_register_req_msg_v01 { u8 m3_dump_upload_segments_req_enable_valid; u8 m3_dump_upload_segments_req_enable; }; - #define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 86 extern struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[]; @@ -506,9 +511,11 @@ struct wlfw_cap_resp_msg_v01 { u64 fw_caps; u8 rd_card_chain_cap_valid; enum wlfw_rd_card_chain_cap_v01 rd_card_chain_cap; + u8 dev_mem_info_valid; + struct wlfw_dev_mem_info_s_v01 + dev_mem_info[QMI_WLFW_MAX_DEV_MEM_NUM_V01]; }; - -#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 253 +#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 320 extern struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[]; struct wlfw_bdf_download_req_msg_v01 { @@ -546,8 +553,11 @@ struct wlfw_cal_report_req_msg_v01 { u8 xo_cal_data; u8 cal_remove_supported_valid; u8 cal_remove_supported; + u8 cal_file_download_size_valid; + u64 cal_file_download_size; }; -#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 32 + +#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 43 extern struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[]; struct wlfw_cal_report_resp_msg_v01 { @@ -775,14 +785,16 @@ struct wlfw_request_mem_ind_msg_v01 { u32 mem_seg_len; struct wlfw_mem_seg_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01]; }; -#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 1124 + +#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 1824 extern struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[]; struct wlfw_respond_mem_req_msg_v01 { u32 mem_seg_len; struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01]; }; -#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 548 + +#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 888 extern struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[]; struct wlfw_respond_mem_resp_msg_v01 { @@ -803,7 +815,6 @@ struct wlfw_fw_init_done_ind_msg_v01 { u8 hang_data_length_valid; u16 hang_data_length; }; - #define WLFW_FW_INIT_DONE_IND_MSG_V01_MAX_MSG_LEN 12 extern struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[]; @@ -869,28 +880,33 @@ struct wlfw_xo_cal_ind_msg_v01 { extern struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[]; struct wlfw_cal_done_ind_msg_v01 { - char placeholder; + u8 cal_file_upload_size_valid; + u64 cal_file_upload_size; }; -#define WLFW_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0 + +#define WLFW_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 11 extern struct qmi_elem_info wlfw_cal_done_ind_msg_v01_ei[]; struct wlfw_qdss_trace_req_mem_ind_msg_v01 { u32 mem_seg_len; struct wlfw_mem_seg_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01]; }; -#define WLFW_QDSS_TRACE_REQ_MEM_IND_MSG_V01_MAX_MSG_LEN 1124 + +#define WLFW_QDSS_TRACE_REQ_MEM_IND_MSG_V01_MAX_MSG_LEN 1824 extern struct qmi_elem_info wlfw_qdss_trace_req_mem_ind_msg_v01_ei[]; struct wlfw_qdss_trace_mem_info_req_msg_v01 { u32 mem_seg_len; struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01]; }; -#define WLFW_QDSS_TRACE_MEM_INFO_REQ_MSG_V01_MAX_MSG_LEN 548 + +#define WLFW_QDSS_TRACE_MEM_INFO_REQ_MSG_V01_MAX_MSG_LEN 888 extern struct qmi_elem_info wlfw_qdss_trace_mem_info_req_msg_v01_ei[]; struct wlfw_qdss_trace_mem_info_resp_msg_v01 { struct qmi_response_type_v01 resp; }; + #define WLFW_QDSS_TRACE_MEM_INFO_RESP_MSG_V01_MAX_MSG_LEN 7 extern struct qmi_elem_info wlfw_qdss_trace_mem_info_resp_msg_v01_ei[]; @@ -903,7 +919,8 @@ struct wlfw_qdss_trace_save_ind_msg_v01 { u8 file_name_valid; char file_name[QMI_WLFW_MAX_STR_LEN_V01 + 1]; }; -#define WLFW_QDSS_TRACE_SAVE_IND_MSG_V01_MAX_MSG_LEN 581 + +#define WLFW_QDSS_TRACE_SAVE_IND_MSG_V01_MAX_MSG_LEN 921 extern struct qmi_elem_info wlfw_qdss_trace_save_ind_msg_v01_ei[]; struct wlfw_qdss_trace_data_req_msg_v01 { @@ -969,7 +986,8 @@ struct wlfw_qdss_trace_free_ind_msg_v01 { u32 mem_seg_len; struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01]; }; -#define WLFW_QDSS_TRACE_FREE_IND_MSG_V01_MAX_MSG_LEN 548 + +#define WLFW_QDSS_TRACE_FREE_IND_MSG_V01_MAX_MSG_LEN 888 extern struct qmi_elem_info wlfw_qdss_trace_free_ind_msg_v01_ei[]; struct wlfw_shutdown_req_msg_v01 { @@ -1048,6 +1066,7 @@ extern struct qmi_elem_info wlfw_get_info_req_msg_v01_ei[]; struct wlfw_get_info_resp_msg_v01 { struct qmi_response_type_v01 resp; }; + #define WLFW_GET_INFO_RESP_MSG_V01_MAX_MSG_LEN 7 extern struct qmi_elem_info wlfw_get_info_resp_msg_v01_ei[]; @@ -1076,8 +1095,13 @@ struct wlfw_device_info_resp_msg_v01 { u64 bar_addr; u8 bar_size_valid; u32 bar_size; + u8 mhi_state_info_addr_valid; + u64 mhi_state_info_addr; + u8 mhi_state_info_size_valid; + u32 mhi_state_info_size; }; -#define WLFW_DEVICE_INFO_RESP_MSG_V01_MAX_MSG_LEN 25 + +#define WLFW_DEVICE_INFO_RESP_MSG_V01_MAX_MSG_LEN 43 extern struct qmi_elem_info wlfw_device_info_resp_msg_v01_ei[]; struct wlfw_m3_dump_upload_req_ind_msg_v01 { @@ -1174,7 +1198,6 @@ struct wlfw_m3_dump_upload_segments_req_ind_msg_v01 { struct wlfw_m3_segment_info_s_v01 m3_segment[QMI_WLFW_MAX_M3_SEGMENTS_SIZE_V01]; }; - #define WLFW_M3_DUMP_UPLOAD_SEGMENTS_REQ_IND_MSG_V01_MAX_MSG_LEN 387 extern struct qmi_elem_info wlfw_m3_dump_upload_segments_req_ind_msg_v01_ei[]; diff --git a/drivers/pci/controller/pci-msm.c b/drivers/pci/controller/pci-msm.c index 6809741c4c47..89656663fa32 100644 --- a/drivers/pci/controller/pci-msm.c +++ b/drivers/pci/controller/pci-msm.c @@ -6884,6 +6884,7 @@ int msm_pci_probe(struct pci_dev *pci_dev, static struct pci_device_id msm_pci_device_id[] = { {PCI_DEVICE(0x17cb, 0x0108)}, {PCI_DEVICE(0x17cb, 0x010b)}, + {PCI_DEVICE(0x1b21, 0x2806)}, {0}, }; diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index 4be8b71bcf42..868c30989a7a 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -212,6 +212,14 @@ config PINCTRL_BENGAL Qualcomm Technologies Inc TLMM block found on the Qualcomm Technologies Inc BENGAL platform. +config PINCTRL_KHAJE + tristate "Qualcomm Technologies, Inc KHAJE pin controller driver" + depends on GPIOLIB && OF + select PINCTRL_MSM + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + QTI TLMM block found on the QTI KHAJE platform. + config PINCTRL_SCUBA tristate "Qualcomm Technologies Inc SCUBA pin controller driver" depends on GPIOLIB && OF diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile index 82932fcb3db9..d5ea857844ba 100644 --- a/drivers/pinctrl/qcom/Makefile +++ b/drivers/pinctrl/qcom/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o obj-$(CONFIG_PINCTRL_KONA) += pinctrl-kona.o obj-$(CONFIG_PINCTRL_LITO) += pinctrl-lito.o obj-$(CONFIG_PINCTRL_BENGAL) += pinctrl-bengal.o +obj-$(CONFIG_PINCTRL_KHAJE) += pinctrl-khaje.o obj-$(CONFIG_PINCTRL_LAGOON) += pinctrl-lagoon.o obj-$(CONFIG_PINCTRL_SCUBA) += pinctrl-scuba.o obj-$(CONFIG_PINCTRL_SDM660) += pinctrl-sdm660.o diff --git a/drivers/pinctrl/qcom/pinctrl-khaje.c b/drivers/pinctrl/qcom/pinctrl-khaje.c new file mode 100644 index 000000000000..b739361a8bf7 --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-khaje.c @@ -0,0 +1,1587 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include + +#include "pinctrl-msm.h" + +#define FUNCTION(fname) \ + [msm_mux_##fname] = { \ + .name = #fname, \ + .groups = fname##_groups, \ + .ngroups = ARRAY_SIZE(fname##_groups), \ + } + +#define SOUTH 0x00500000 +#define WEST 0x00100000 +#define EAST 0x00900000 +#define DUMMY 0x0 +#define REG_SIZE 0x1000 +#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9, wake_off, bit) \ + { \ + .name = "gpio" #id, \ + .pins = gpio##id##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \ + .funcs = (int[]){ \ + msm_mux_gpio, /* gpio mode */ \ + msm_mux_##f1, \ + msm_mux_##f2, \ + msm_mux_##f3, \ + msm_mux_##f4, \ + msm_mux_##f5, \ + msm_mux_##f6, \ + msm_mux_##f7, \ + msm_mux_##f8, \ + msm_mux_##f9 \ + }, \ + .nfuncs = 10, \ + .ctl_reg = base + REG_SIZE * id, \ + .io_reg = base + 0x4 + REG_SIZE * id, \ + .intr_cfg_reg = base + 0x8 + REG_SIZE * id, \ + .intr_status_reg = base + 0xc + REG_SIZE * id, \ + .intr_target_reg = base + 0x8 + REG_SIZE * id, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .egpio_enable = 12, \ + .egpio_present = 11, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_target_bit = 5, \ + .intr_target_kpss_val = 3, \ + .intr_raw_status_bit = 4, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 2, \ + .wake_reg = base + wake_off, \ + .wake_bit = bit, \ + } + +#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = ctl, \ + .io_reg = 0, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = pull, \ + .drv_bit = drv, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = -1, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +#define UFS_RESET(pg_name, offset) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = offset, \ + .io_reg = offset + 0x4, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = 3, \ + .drv_bit = 0, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = 0, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +static const struct pinctrl_pin_desc khaje_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70"), + PINCTRL_PIN(71, "GPIO_71"), + PINCTRL_PIN(72, "GPIO_72"), + PINCTRL_PIN(73, "GPIO_73"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), + PINCTRL_PIN(88, "GPIO_88"), + PINCTRL_PIN(89, "GPIO_89"), + PINCTRL_PIN(90, "GPIO_90"), + PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93"), + PINCTRL_PIN(94, "GPIO_94"), + PINCTRL_PIN(95, "GPIO_95"), + PINCTRL_PIN(96, "GPIO_96"), + PINCTRL_PIN(97, "GPIO_97"), + PINCTRL_PIN(98, "GPIO_98"), + PINCTRL_PIN(99, "GPIO_99"), + PINCTRL_PIN(100, "GPIO_100"), + PINCTRL_PIN(101, "GPIO_101"), + PINCTRL_PIN(102, "GPIO_102"), + PINCTRL_PIN(103, "GPIO_103"), + PINCTRL_PIN(104, "GPIO_104"), + PINCTRL_PIN(105, "GPIO_105"), + PINCTRL_PIN(106, "GPIO_106"), + PINCTRL_PIN(107, "GPIO_107"), + PINCTRL_PIN(108, "GPIO_108"), + PINCTRL_PIN(109, "GPIO_109"), + PINCTRL_PIN(110, "GPIO_110"), + PINCTRL_PIN(111, "GPIO_111"), + PINCTRL_PIN(112, "GPIO_112"), + PINCTRL_PIN(113, "SDC1_RCLK"), + PINCTRL_PIN(114, "SDC1_CLK"), + PINCTRL_PIN(115, "SDC1_CMD"), + PINCTRL_PIN(116, "SDC1_DATA"), + PINCTRL_PIN(117, "SDC2_CLK"), + PINCTRL_PIN(118, "SDC2_CMD"), + PINCTRL_PIN(119, "SDC2_DATA"), + PINCTRL_PIN(120, "UFS_RESET"), +}; + +#define DECLARE_MSM_GPIO_PINS(pin) \ +static const unsigned int gpio##pin##_pins[] = { pin } +DECLARE_MSM_GPIO_PINS(0); +DECLARE_MSM_GPIO_PINS(1); +DECLARE_MSM_GPIO_PINS(2); +DECLARE_MSM_GPIO_PINS(3); +DECLARE_MSM_GPIO_PINS(4); +DECLARE_MSM_GPIO_PINS(5); +DECLARE_MSM_GPIO_PINS(6); +DECLARE_MSM_GPIO_PINS(7); +DECLARE_MSM_GPIO_PINS(8); +DECLARE_MSM_GPIO_PINS(9); +DECLARE_MSM_GPIO_PINS(10); +DECLARE_MSM_GPIO_PINS(11); +DECLARE_MSM_GPIO_PINS(12); +DECLARE_MSM_GPIO_PINS(13); +DECLARE_MSM_GPIO_PINS(14); +DECLARE_MSM_GPIO_PINS(15); +DECLARE_MSM_GPIO_PINS(16); +DECLARE_MSM_GPIO_PINS(17); +DECLARE_MSM_GPIO_PINS(18); +DECLARE_MSM_GPIO_PINS(19); +DECLARE_MSM_GPIO_PINS(20); +DECLARE_MSM_GPIO_PINS(21); +DECLARE_MSM_GPIO_PINS(22); +DECLARE_MSM_GPIO_PINS(23); +DECLARE_MSM_GPIO_PINS(24); +DECLARE_MSM_GPIO_PINS(25); +DECLARE_MSM_GPIO_PINS(26); +DECLARE_MSM_GPIO_PINS(27); +DECLARE_MSM_GPIO_PINS(28); +DECLARE_MSM_GPIO_PINS(29); +DECLARE_MSM_GPIO_PINS(30); +DECLARE_MSM_GPIO_PINS(31); +DECLARE_MSM_GPIO_PINS(32); +DECLARE_MSM_GPIO_PINS(33); +DECLARE_MSM_GPIO_PINS(34); +DECLARE_MSM_GPIO_PINS(35); +DECLARE_MSM_GPIO_PINS(36); +DECLARE_MSM_GPIO_PINS(37); +DECLARE_MSM_GPIO_PINS(38); +DECLARE_MSM_GPIO_PINS(39); +DECLARE_MSM_GPIO_PINS(40); +DECLARE_MSM_GPIO_PINS(41); +DECLARE_MSM_GPIO_PINS(42); +DECLARE_MSM_GPIO_PINS(43); +DECLARE_MSM_GPIO_PINS(44); +DECLARE_MSM_GPIO_PINS(45); +DECLARE_MSM_GPIO_PINS(46); +DECLARE_MSM_GPIO_PINS(47); +DECLARE_MSM_GPIO_PINS(48); +DECLARE_MSM_GPIO_PINS(49); +DECLARE_MSM_GPIO_PINS(50); +DECLARE_MSM_GPIO_PINS(51); +DECLARE_MSM_GPIO_PINS(52); +DECLARE_MSM_GPIO_PINS(53); +DECLARE_MSM_GPIO_PINS(54); +DECLARE_MSM_GPIO_PINS(55); +DECLARE_MSM_GPIO_PINS(56); +DECLARE_MSM_GPIO_PINS(57); +DECLARE_MSM_GPIO_PINS(58); +DECLARE_MSM_GPIO_PINS(59); +DECLARE_MSM_GPIO_PINS(60); +DECLARE_MSM_GPIO_PINS(61); +DECLARE_MSM_GPIO_PINS(62); +DECLARE_MSM_GPIO_PINS(63); +DECLARE_MSM_GPIO_PINS(64); +DECLARE_MSM_GPIO_PINS(65); +DECLARE_MSM_GPIO_PINS(66); +DECLARE_MSM_GPIO_PINS(67); +DECLARE_MSM_GPIO_PINS(68); +DECLARE_MSM_GPIO_PINS(69); +DECLARE_MSM_GPIO_PINS(70); +DECLARE_MSM_GPIO_PINS(71); +DECLARE_MSM_GPIO_PINS(72); +DECLARE_MSM_GPIO_PINS(73); +DECLARE_MSM_GPIO_PINS(74); +DECLARE_MSM_GPIO_PINS(75); +DECLARE_MSM_GPIO_PINS(76); +DECLARE_MSM_GPIO_PINS(77); +DECLARE_MSM_GPIO_PINS(78); +DECLARE_MSM_GPIO_PINS(79); +DECLARE_MSM_GPIO_PINS(80); +DECLARE_MSM_GPIO_PINS(81); +DECLARE_MSM_GPIO_PINS(82); +DECLARE_MSM_GPIO_PINS(83); +DECLARE_MSM_GPIO_PINS(84); +DECLARE_MSM_GPIO_PINS(85); +DECLARE_MSM_GPIO_PINS(86); +DECLARE_MSM_GPIO_PINS(87); +DECLARE_MSM_GPIO_PINS(88); +DECLARE_MSM_GPIO_PINS(89); +DECLARE_MSM_GPIO_PINS(90); +DECLARE_MSM_GPIO_PINS(91); +DECLARE_MSM_GPIO_PINS(92); +DECLARE_MSM_GPIO_PINS(93); +DECLARE_MSM_GPIO_PINS(94); +DECLARE_MSM_GPIO_PINS(95); +DECLARE_MSM_GPIO_PINS(96); +DECLARE_MSM_GPIO_PINS(97); +DECLARE_MSM_GPIO_PINS(98); +DECLARE_MSM_GPIO_PINS(99); +DECLARE_MSM_GPIO_PINS(100); +DECLARE_MSM_GPIO_PINS(101); +DECLARE_MSM_GPIO_PINS(102); +DECLARE_MSM_GPIO_PINS(103); +DECLARE_MSM_GPIO_PINS(104); +DECLARE_MSM_GPIO_PINS(105); +DECLARE_MSM_GPIO_PINS(106); +DECLARE_MSM_GPIO_PINS(107); +DECLARE_MSM_GPIO_PINS(108); +DECLARE_MSM_GPIO_PINS(109); +DECLARE_MSM_GPIO_PINS(110); +DECLARE_MSM_GPIO_PINS(111); +DECLARE_MSM_GPIO_PINS(112); + +static const unsigned int sdc1_rclk_pins[] = { 113 }; +static const unsigned int sdc1_clk_pins[] = { 114 }; +static const unsigned int sdc1_cmd_pins[] = { 115 }; +static const unsigned int sdc1_data_pins[] = { 116 }; +static const unsigned int sdc2_clk_pins[] = { 117 }; +static const unsigned int sdc2_cmd_pins[] = { 118 }; +static const unsigned int sdc2_data_pins[] = { 119 }; +static const unsigned int ufs_reset_pins[] = { 120 }; + +enum khaje_functions { + msm_mux_gpio, + msm_mux_qup0, + msm_mux_usb2phy_ac, + msm_mux_ddr_bist, + msm_mux_m_voc, + msm_mux_phase_flag0, + msm_mux_qdss_gpio8, + msm_mux_atest_tsens, + msm_mux_mpm_pwr, + msm_mux_phase_flag1, + msm_mux_qdss_gpio9, + msm_mux_atest_tsens2, + msm_mux_phase_flag2, + msm_mux_qdss_gpio10, + msm_mux_dac_calib0, + msm_mux_atest_usb10, + msm_mux_phase_flag3, + msm_mux_qdss_gpio11, + msm_mux_dac_calib1, + msm_mux_atest_usb11, + msm_mux_qup1, + msm_mux_CRI_TRNG0, + msm_mux_phase_flag4, + msm_mux_dac_calib2, + msm_mux_atest_usb12, + msm_mux_CRI_TRNG1, + msm_mux_phase_flag5, + msm_mux_dac_calib3, + msm_mux_atest_usb13, + msm_mux_qup2, + msm_mux_phase_flag6, + msm_mux_dac_calib4, + msm_mux_atest_usb1, + msm_mux_qup3, + msm_mux_pbs_out, + msm_mux_PLL_BIST, + msm_mux_qdss_gpio, + msm_mux_tsense_pwm, + msm_mux_AGERA_PLL, + msm_mux_pbs0, + msm_mux_qdss_gpio0, + msm_mux_pbs1, + msm_mux_qdss_gpio1, + msm_mux_qup4, + msm_mux_tgu_ch0, + msm_mux_tgu_ch1, + msm_mux_qup5, + msm_mux_tgu_ch2, + msm_mux_phase_flag7, + msm_mux_qdss_gpio4, + msm_mux_dac_calib5, + msm_mux_tgu_ch3, + msm_mux_phase_flag8, + msm_mux_qdss_gpio5, + msm_mux_dac_calib6, + msm_mux_phase_flag9, + msm_mux_qdss_gpio6, + msm_mux_dac_calib7, + msm_mux_phase_flag10, + msm_mux_qdss_gpio7, + msm_mux_dac_calib8, + msm_mux_SDC2_TB, + msm_mux_CRI_TRNG, + msm_mux_pbs2, + msm_mux_qdss_gpio2, + msm_mux_SDC1_TB, + msm_mux_pbs3, + msm_mux_qdss_gpio3, + msm_mux_cam_mclk, + msm_mux_pbs4, + msm_mux_adsp_ext, + msm_mux_pbs5, + msm_mux_cci_i2c, + msm_mux_prng_rosc, + msm_mux_pbs6, + msm_mux_phase_flag11, + msm_mux_dac_calib9, + msm_mux_atest_usb20, + msm_mux_pbs7, + msm_mux_phase_flag12, + msm_mux_dac_calib10, + msm_mux_atest_usb21, + msm_mux_CCI_TIMER1, + msm_mux_GCC_GP1, + msm_mux_pbs8, + msm_mux_phase_flag13, + msm_mux_dac_calib11, + msm_mux_atest_usb22, + msm_mux_cci_async, + msm_mux_CCI_TIMER0, + msm_mux_pbs9, + msm_mux_phase_flag14, + msm_mux_dac_calib12, + msm_mux_atest_usb23, + msm_mux_pbs10, + msm_mux_phase_flag15, + msm_mux_dac_calib13, + msm_mux_atest_usb2, + msm_mux_vsense_trigger, + msm_mux_qdss_cti, + msm_mux_CCI_TIMER2, + msm_mux_phase_flag16, + msm_mux_dac_calib14, + msm_mux_atest_char, + msm_mux_phase_flag17, + msm_mux_dac_calib15, + msm_mux_atest_char0, + msm_mux_GP_PDM0, + msm_mux_phase_flag18, + msm_mux_dac_calib16, + msm_mux_atest_char1, + msm_mux_CCI_TIMER3, + msm_mux_GP_PDM1, + msm_mux_phase_flag19, + msm_mux_dac_calib17, + msm_mux_atest_char2, + msm_mux_GP_PDM2, + msm_mux_phase_flag20, + msm_mux_dac_calib18, + msm_mux_atest_char3, + msm_mux_phase_flag21, + msm_mux_qdss_gpio14, + msm_mux_phase_flag22, + msm_mux_qdss_gpio15, + msm_mux_NAV_GPIO, + msm_mux_phase_flag23, + msm_mux_phase_flag24, + msm_mux_phase_flag25, + msm_mux_pbs14, + msm_mux_vfr_1, + msm_mux_pbs15, + msm_mux_PA_INDICATOR, + msm_mux_gsm1_tx, + msm_mux_SSBI_WTR1, + msm_mux_pll_clk, + msm_mux_pll_bypassnl, + msm_mux_pll_reset, + msm_mux_phase_flag26, + msm_mux_ddr_pxi0, + msm_mux_gsm0_tx, + msm_mux_phase_flag27, + msm_mux_GCC_GP2, + msm_mux_qdss_gpio12, + msm_mux_ddr_pxi1, + msm_mux_GCC_GP3, + msm_mux_qdss_gpio13, + msm_mux_dbg_out, + msm_mux_uim2_data, + msm_mux_uim2_clk, + msm_mux_uim2_reset, + msm_mux_uim2_present, + msm_mux_uim1_data, + msm_mux_uim1_clk, + msm_mux_uim1_reset, + msm_mux_uim1_present, + msm_mux_dac_calib19, + msm_mux_mdp_vsync, + msm_mux_mdp_vsync_out_0, + msm_mux_mdp_vsync_out_1, + msm_mux_dac_calib20, + msm_mux_dac_calib21, + msm_mux_atest_bbrx1, + msm_mux_pbs11, + msm_mux_usb_phy, + msm_mux_atest_bbrx0, + msm_mux_mss_lte, + msm_mux_pbs12, + msm_mux_pbs13, + msm_mux_wlan1_adc0, + msm_mux_wlan1_adc1, + msm_mux_sd_write, + msm_mux_JITTER_BIST, + msm_mux_atest_gpsadc_dtest0_native, + msm_mux_atest_gpsadc_dtest1_native, + msm_mux_phase_flag28, + msm_mux_dac_calib22, + msm_mux_ddr_pxi2, + msm_mux_phase_flag29, + msm_mux_dac_calib23, + msm_mux_phase_flag30, + msm_mux_dac_calib24, + msm_mux_ddr_pxi3, + msm_mux_phase_flag31, + msm_mux_dac_calib25, + msm_mux_NA, +}; + +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", + "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", + "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", + "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", + "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", + "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", + "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", + "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", + "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", + "gpio111", "gpio112", +}; +static const char * const qup0_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio82", "gpio86", +}; +static const char * const usb2phy_ac_groups[] = { + "gpio0", +}; +static const char * const ddr_bist_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", +}; +static const char * const m_voc_groups[] = { + "gpio0", +}; +static const char * const phase_flag0_groups[] = { + "gpio0", +}; +static const char * const qdss_gpio8_groups[] = { + "gpio0", "gpio24", +}; +static const char * const atest_tsens_groups[] = { + "gpio0", +}; +static const char * const mpm_pwr_groups[] = { + "gpio1", +}; +static const char * const phase_flag1_groups[] = { + "gpio1", +}; +static const char * const qdss_gpio9_groups[] = { + "gpio1", "gpio25", +}; +static const char * const atest_tsens2_groups[] = { + "gpio1", +}; +static const char * const phase_flag2_groups[] = { + "gpio2", +}; +static const char * const qdss_gpio10_groups[] = { + "gpio2", "gpio26", +}; +static const char * const dac_calib0_groups[] = { + "gpio2", +}; +static const char * const atest_usb10_groups[] = { + "gpio2", +}; +static const char * const phase_flag3_groups[] = { + "gpio3", +}; +static const char * const qdss_gpio11_groups[] = { + "gpio3", "gpio87", +}; +static const char * const dac_calib1_groups[] = { + "gpio3", +}; +static const char * const atest_usb11_groups[] = { + "gpio3", +}; +static const char * const qup1_groups[] = { + "gpio4", "gpio5", "gpio69", "gpio70", +}; +static const char * const CRI_TRNG0_groups[] = { + "gpio4", +}; +static const char * const phase_flag4_groups[] = { + "gpio4", +}; +static const char * const dac_calib2_groups[] = { + "gpio4", +}; +static const char * const atest_usb12_groups[] = { + "gpio4", +}; +static const char * const CRI_TRNG1_groups[] = { + "gpio5", +}; +static const char * const phase_flag5_groups[] = { + "gpio5", +}; +static const char * const dac_calib3_groups[] = { + "gpio5", +}; +static const char * const atest_usb13_groups[] = { + "gpio5", +}; +static const char * const qup2_groups[] = { + "gpio6", "gpio7", "gpio71", "gpio80", +}; +static const char * const phase_flag6_groups[] = { + "gpio6", +}; +static const char * const dac_calib4_groups[] = { + "gpio6", +}; +static const char * const atest_usb1_groups[] = { + "gpio6", +}; +static const char * const qup3_groups[] = { + "gpio8", "gpio9", "gpio10", "gpio11", +}; +static const char * const pbs_out_groups[] = { + "gpio8", "gpio9", "gpio52", +}; +static const char * const PLL_BIST_groups[] = { + "gpio8", "gpio9", +}; +static const char * const qdss_gpio_groups[] = { + "gpio8", "gpio9", "gpio105", "gpio106", +}; +static const char * const tsense_pwm_groups[] = { + "gpio8", +}; +static const char * const AGERA_PLL_groups[] = { + "gpio10", "gpio11", +}; +static const char * const pbs0_groups[] = { + "gpio10", +}; +static const char * const qdss_gpio0_groups[] = { + "gpio10", "gpio107", +}; +static const char * const pbs1_groups[] = { + "gpio11", +}; +static const char * const qdss_gpio1_groups[] = { + "gpio11", "gpio104", +}; +static const char * const qup4_groups[] = { + "gpio12", "gpio13", "gpio96", "gpio97", +}; +static const char * const tgu_ch0_groups[] = { + "gpio12", +}; +static const char * const tgu_ch1_groups[] = { + "gpio13", +}; +static const char * const qup5_groups[] = { + "gpio14", "gpio15", "gpio16", "gpio17", +}; +static const char * const tgu_ch2_groups[] = { + "gpio14", +}; +static const char * const phase_flag7_groups[] = { + "gpio14", +}; +static const char * const qdss_gpio4_groups[] = { + "gpio14", "gpio20", +}; +static const char * const dac_calib5_groups[] = { + "gpio14", +}; +static const char * const tgu_ch3_groups[] = { + "gpio15", +}; +static const char * const phase_flag8_groups[] = { + "gpio15", +}; +static const char * const qdss_gpio5_groups[] = { + "gpio15", "gpio21", +}; +static const char * const dac_calib6_groups[] = { + "gpio15", +}; +static const char * const phase_flag9_groups[] = { + "gpio16", +}; +static const char * const qdss_gpio6_groups[] = { + "gpio16", "gpio22", +}; +static const char * const dac_calib7_groups[] = { + "gpio16", +}; +static const char * const phase_flag10_groups[] = { + "gpio17", +}; +static const char * const qdss_gpio7_groups[] = { + "gpio17", "gpio23", +}; +static const char * const dac_calib8_groups[] = { + "gpio17", +}; +static const char * const SDC2_TB_groups[] = { + "gpio18", +}; +static const char * const CRI_TRNG_groups[] = { + "gpio18", +}; +static const char * const pbs2_groups[] = { + "gpio18", +}; +static const char * const qdss_gpio2_groups[] = { + "gpio18", "gpio109", +}; +static const char * const SDC1_TB_groups[] = { + "gpio19", +}; +static const char * const pbs3_groups[] = { + "gpio19", +}; +static const char * const qdss_gpio3_groups[] = { + "gpio19", "gpio110", +}; +static const char * const cam_mclk_groups[] = { + "gpio20", "gpio21", "gpio27", "gpio28", +}; +static const char * const pbs4_groups[] = { + "gpio20", +}; +static const char * const adsp_ext_groups[] = { + "gpio21", +}; +static const char * const pbs5_groups[] = { + "gpio21", +}; +static const char * const cci_i2c_groups[] = { + "gpio22", "gpio23", "gpio29", "gpio30", +}; +static const char * const prng_rosc_groups[] = { + "gpio22", "gpio23", +}; +static const char * const pbs6_groups[] = { + "gpio22", +}; +static const char * const phase_flag11_groups[] = { + "gpio22", +}; +static const char * const dac_calib9_groups[] = { + "gpio22", +}; +static const char * const atest_usb20_groups[] = { + "gpio22", +}; +static const char * const pbs7_groups[] = { + "gpio23", +}; +static const char * const phase_flag12_groups[] = { + "gpio23", +}; +static const char * const dac_calib10_groups[] = { + "gpio23", +}; +static const char * const atest_usb21_groups[] = { + "gpio23", +}; +static const char * const CCI_TIMER1_groups[] = { + "gpio24", +}; +static const char * const GCC_GP1_groups[] = { + "gpio24", "gpio86", +}; +static const char * const pbs8_groups[] = { + "gpio24", +}; +static const char * const phase_flag13_groups[] = { + "gpio24", +}; +static const char * const dac_calib11_groups[] = { + "gpio24", +}; +static const char * const atest_usb22_groups[] = { + "gpio24", +}; +static const char * const cci_async_groups[] = { + "gpio25", +}; +static const char * const CCI_TIMER0_groups[] = { + "gpio25", +}; +static const char * const pbs9_groups[] = { + "gpio25", +}; +static const char * const phase_flag14_groups[] = { + "gpio25", +}; +static const char * const dac_calib12_groups[] = { + "gpio25", +}; +static const char * const atest_usb23_groups[] = { + "gpio25", +}; +static const char * const pbs10_groups[] = { + "gpio26", +}; +static const char * const phase_flag15_groups[] = { + "gpio26", +}; +static const char * const dac_calib13_groups[] = { + "gpio26", +}; +static const char * const atest_usb2_groups[] = { + "gpio26", +}; +static const char * const vsense_trigger_groups[] = { + "gpio26", +}; +static const char * const qdss_cti_groups[] = { + "gpio27", "gpio28", "gpio72", "gpio73", "gpio96", "gpio97", +}; +static const char * const CCI_TIMER2_groups[] = { + "gpio28", +}; +static const char * const phase_flag16_groups[] = { + "gpio29", +}; +static const char * const dac_calib14_groups[] = { + "gpio29", +}; +static const char * const atest_char_groups[] = { + "gpio29", +}; +static const char * const phase_flag17_groups[] = { + "gpio30", +}; +static const char * const dac_calib15_groups[] = { + "gpio30", +}; +static const char * const atest_char0_groups[] = { + "gpio30", +}; +static const char * const GP_PDM0_groups[] = { + "gpio31", "gpio95", +}; +static const char * const phase_flag18_groups[] = { + "gpio31", +}; +static const char * const dac_calib16_groups[] = { + "gpio31", +}; +static const char * const atest_char1_groups[] = { + "gpio31", +}; +static const char * const CCI_TIMER3_groups[] = { + "gpio32", +}; +static const char * const GP_PDM1_groups[] = { + "gpio32", "gpio96", +}; +static const char * const phase_flag19_groups[] = { + "gpio32", +}; +static const char * const dac_calib17_groups[] = { + "gpio32", +}; +static const char * const atest_char2_groups[] = { + "gpio32", +}; +static const char * const GP_PDM2_groups[] = { + "gpio33", "gpio97", +}; +static const char * const phase_flag20_groups[] = { + "gpio33", +}; +static const char * const dac_calib18_groups[] = { + "gpio33", +}; +static const char * const atest_char3_groups[] = { + "gpio33", +}; +static const char * const phase_flag21_groups[] = { + "gpio35", +}; +static const char * const qdss_gpio14_groups[] = { + "gpio35", "gpio94", +}; +static const char * const phase_flag22_groups[] = { + "gpio36", +}; +static const char * const qdss_gpio15_groups[] = { + "gpio36", "gpio95", +}; +static const char * const NAV_GPIO_groups[] = { + "gpio42", "gpio47", "gpio52", "gpio95", "gpio96", "gpio97", "gpio106", + "gpio107", "gpio108", +}; +static const char * const phase_flag23_groups[] = { + "gpio43", +}; +static const char * const phase_flag24_groups[] = { + "gpio44", +}; +static const char * const phase_flag25_groups[] = { + "gpio45", +}; +static const char * const pbs14_groups[] = { + "gpio47", +}; +static const char * const vfr_1_groups[] = { + "gpio48", +}; +static const char * const pbs15_groups[] = { + "gpio48", +}; +static const char * const PA_INDICATOR_groups[] = { + "gpio49", +}; +static const char * const gsm1_tx_groups[] = { + "gpio53", +}; +static const char * const SSBI_WTR1_groups[] = { + "gpio59", "gpio60", +}; +static const char * const pll_clk_groups[] = { + "gpio61", +}; +static const char * const pll_bypassnl_groups[] = { + "gpio62", +}; +static const char * const pll_reset_groups[] = { + "gpio63", +}; +static const char * const phase_flag26_groups[] = { + "gpio63", +}; +static const char * const ddr_pxi0_groups[] = { + "gpio63", "gpio64", +}; +static const char * const gsm0_tx_groups[] = { + "gpio64", +}; +static const char * const phase_flag27_groups[] = { + "gpio64", +}; +static const char * const GCC_GP2_groups[] = { + "gpio69", "gpio107", +}; +static const char * const qdss_gpio12_groups[] = { + "gpio69", "gpio90", +}; +static const char * const ddr_pxi1_groups[] = { + "gpio69", "gpio70", +}; +static const char * const GCC_GP3_groups[] = { + "gpio70", "gpio106", +}; +static const char * const qdss_gpio13_groups[] = { + "gpio70", "gpio91", +}; +static const char * const dbg_out_groups[] = { + "gpio71", +}; +static const char * const uim2_data_groups[] = { + "gpio72", +}; +static const char * const uim2_clk_groups[] = { + "gpio73", +}; +static const char * const uim2_reset_groups[] = { + "gpio74", +}; +static const char * const uim2_present_groups[] = { + "gpio75", +}; +static const char * const uim1_data_groups[] = { + "gpio76", +}; +static const char * const uim1_clk_groups[] = { + "gpio77", +}; +static const char * const uim1_reset_groups[] = { + "gpio78", +}; +static const char * const uim1_present_groups[] = { + "gpio79", +}; +static const char * const dac_calib19_groups[] = { + "gpio80", +}; +static const char * const mdp_vsync_groups[] = { + "gpio81", "gpio96", "gpio97", +}; +static const char * const mdp_vsync_out_0_groups[] = { + "gpio81", +}; +static const char * const mdp_vsync_out_1_groups[] = { + "gpio81", +}; +static const char * const dac_calib20_groups[] = { + "gpio81", +}; +static const char * const dac_calib21_groups[] = { + "gpio82", +}; +static const char * const atest_bbrx1_groups[] = { + "gpio86", +}; +static const char * const pbs11_groups[] = { + "gpio87", +}; +static const char * const usb_phy_groups[] = { + "gpio89", +}; +static const char * const atest_bbrx0_groups[] = { + "gpio89", +}; +static const char * const mss_lte_groups[] = { + "gpio90", "gpio91", +}; +static const char * const pbs12_groups[] = { + "gpio90", +}; +static const char * const pbs13_groups[] = { + "gpio91", +}; +static const char * const wlan1_adc0_groups[] = { + "gpio94", +}; +static const char * const wlan1_adc1_groups[] = { + "gpio95", +}; +static const char * const sd_write_groups[] = { + "gpio96", +}; +static const char * const JITTER_BIST_groups[] = { + "gpio96", "gpio97", +}; +static const char * const atest_gpsadc_dtest0_native_groups[] = { + "gpio100", +}; +static const char * const atest_gpsadc_dtest1_native_groups[] = { + "gpio101", +}; +static const char * const phase_flag28_groups[] = { + "gpio102", +}; +static const char * const dac_calib22_groups[] = { + "gpio102", +}; +static const char * const ddr_pxi2_groups[] = { + "gpio102", "gpio103", +}; +static const char * const phase_flag29_groups[] = { + "gpio103", +}; +static const char * const dac_calib23_groups[] = { + "gpio103", +}; +static const char * const phase_flag30_groups[] = { + "gpio104", +}; +static const char * const dac_calib24_groups[] = { + "gpio104", +}; +static const char * const ddr_pxi3_groups[] = { + "gpio104", "gpio105", +}; +static const char * const phase_flag31_groups[] = { + "gpio105", +}; +static const char * const dac_calib25_groups[] = { + "gpio105", +}; + +static const struct msm_function khaje_functions[] = { + FUNCTION(gpio), + FUNCTION(qup0), + FUNCTION(usb2phy_ac), + FUNCTION(ddr_bist), + FUNCTION(m_voc), + FUNCTION(phase_flag0), + FUNCTION(qdss_gpio8), + FUNCTION(atest_tsens), + FUNCTION(mpm_pwr), + FUNCTION(phase_flag1), + FUNCTION(qdss_gpio9), + FUNCTION(atest_tsens2), + FUNCTION(phase_flag2), + FUNCTION(qdss_gpio10), + FUNCTION(dac_calib0), + FUNCTION(atest_usb10), + FUNCTION(phase_flag3), + FUNCTION(qdss_gpio11), + FUNCTION(dac_calib1), + FUNCTION(atest_usb11), + FUNCTION(qup1), + FUNCTION(CRI_TRNG0), + FUNCTION(phase_flag4), + FUNCTION(dac_calib2), + FUNCTION(atest_usb12), + FUNCTION(CRI_TRNG1), + FUNCTION(phase_flag5), + FUNCTION(dac_calib3), + FUNCTION(atest_usb13), + FUNCTION(qup2), + FUNCTION(phase_flag6), + FUNCTION(dac_calib4), + FUNCTION(atest_usb1), + FUNCTION(qup3), + FUNCTION(pbs_out), + FUNCTION(PLL_BIST), + FUNCTION(qdss_gpio), + FUNCTION(tsense_pwm), + FUNCTION(AGERA_PLL), + FUNCTION(pbs0), + FUNCTION(qdss_gpio0), + FUNCTION(pbs1), + FUNCTION(qdss_gpio1), + FUNCTION(qup4), + FUNCTION(tgu_ch0), + FUNCTION(tgu_ch1), + FUNCTION(qup5), + FUNCTION(tgu_ch2), + FUNCTION(phase_flag7), + FUNCTION(qdss_gpio4), + FUNCTION(dac_calib5), + FUNCTION(tgu_ch3), + FUNCTION(phase_flag8), + FUNCTION(qdss_gpio5), + FUNCTION(dac_calib6), + FUNCTION(phase_flag9), + FUNCTION(qdss_gpio6), + FUNCTION(dac_calib7), + FUNCTION(phase_flag10), + FUNCTION(qdss_gpio7), + FUNCTION(dac_calib8), + FUNCTION(SDC2_TB), + FUNCTION(CRI_TRNG), + FUNCTION(pbs2), + FUNCTION(qdss_gpio2), + FUNCTION(SDC1_TB), + FUNCTION(pbs3), + FUNCTION(qdss_gpio3), + FUNCTION(cam_mclk), + FUNCTION(pbs4), + FUNCTION(adsp_ext), + FUNCTION(pbs5), + FUNCTION(cci_i2c), + FUNCTION(prng_rosc), + FUNCTION(pbs6), + FUNCTION(phase_flag11), + FUNCTION(dac_calib9), + FUNCTION(atest_usb20), + FUNCTION(pbs7), + FUNCTION(phase_flag12), + FUNCTION(dac_calib10), + FUNCTION(atest_usb21), + FUNCTION(CCI_TIMER1), + FUNCTION(GCC_GP1), + FUNCTION(pbs8), + FUNCTION(phase_flag13), + FUNCTION(dac_calib11), + FUNCTION(atest_usb22), + FUNCTION(cci_async), + FUNCTION(CCI_TIMER0), + FUNCTION(pbs9), + FUNCTION(phase_flag14), + FUNCTION(dac_calib12), + FUNCTION(atest_usb23), + FUNCTION(pbs10), + FUNCTION(phase_flag15), + FUNCTION(dac_calib13), + FUNCTION(atest_usb2), + FUNCTION(vsense_trigger), + FUNCTION(qdss_cti), + FUNCTION(CCI_TIMER2), + FUNCTION(phase_flag16), + FUNCTION(dac_calib14), + FUNCTION(atest_char), + FUNCTION(phase_flag17), + FUNCTION(dac_calib15), + FUNCTION(atest_char0), + FUNCTION(GP_PDM0), + FUNCTION(phase_flag18), + FUNCTION(dac_calib16), + FUNCTION(atest_char1), + FUNCTION(CCI_TIMER3), + FUNCTION(GP_PDM1), + FUNCTION(phase_flag19), + FUNCTION(dac_calib17), + FUNCTION(atest_char2), + FUNCTION(GP_PDM2), + FUNCTION(phase_flag20), + FUNCTION(dac_calib18), + FUNCTION(atest_char3), + FUNCTION(phase_flag21), + FUNCTION(qdss_gpio14), + FUNCTION(phase_flag22), + FUNCTION(qdss_gpio15), + FUNCTION(NAV_GPIO), + FUNCTION(phase_flag23), + FUNCTION(phase_flag24), + FUNCTION(phase_flag25), + FUNCTION(pbs14), + FUNCTION(vfr_1), + FUNCTION(pbs15), + FUNCTION(PA_INDICATOR), + FUNCTION(gsm1_tx), + FUNCTION(SSBI_WTR1), + FUNCTION(pll_clk), + FUNCTION(pll_bypassnl), + FUNCTION(pll_reset), + FUNCTION(phase_flag26), + FUNCTION(ddr_pxi0), + FUNCTION(gsm0_tx), + FUNCTION(phase_flag27), + FUNCTION(GCC_GP2), + FUNCTION(qdss_gpio12), + FUNCTION(ddr_pxi1), + FUNCTION(GCC_GP3), + FUNCTION(qdss_gpio13), + FUNCTION(dbg_out), + FUNCTION(uim2_data), + FUNCTION(uim2_clk), + FUNCTION(uim2_reset), + FUNCTION(uim2_present), + FUNCTION(uim1_data), + FUNCTION(uim1_clk), + FUNCTION(uim1_reset), + FUNCTION(uim1_present), + FUNCTION(dac_calib19), + FUNCTION(mdp_vsync), + FUNCTION(mdp_vsync_out_0), + FUNCTION(mdp_vsync_out_1), + FUNCTION(dac_calib20), + FUNCTION(dac_calib21), + FUNCTION(atest_bbrx1), + FUNCTION(pbs11), + FUNCTION(usb_phy), + FUNCTION(atest_bbrx0), + FUNCTION(mss_lte), + FUNCTION(pbs12), + FUNCTION(pbs13), + FUNCTION(wlan1_adc0), + FUNCTION(wlan1_adc1), + FUNCTION(sd_write), + FUNCTION(JITTER_BIST), + FUNCTION(atest_gpsadc_dtest0_native), + FUNCTION(atest_gpsadc_dtest1_native), + FUNCTION(phase_flag28), + FUNCTION(dac_calib22), + FUNCTION(ddr_pxi2), + FUNCTION(phase_flag29), + FUNCTION(dac_calib23), + FUNCTION(phase_flag30), + FUNCTION(dac_calib24), + FUNCTION(ddr_pxi3), + FUNCTION(phase_flag31), + FUNCTION(dac_calib25), +}; + +/* Every pin is maintained as a single group, and missing or non-existing pin + * would be maintained as dummy group to synchronize pin group index with + * pin descriptor registered with pinctrl core. + * Clients would not be able to request these dummy pin groups. + */ +static const struct msm_pingroup khaje_groups[] = { + [0] = PINGROUP(0, WEST, qup0, usb2phy_ac, m_voc, ddr_bist, NA, + phase_flag0, qdss_gpio8, atest_tsens, NA, 0x71000, 1), + [1] = PINGROUP(1, WEST, qup0, mpm_pwr, ddr_bist, NA, phase_flag1, + qdss_gpio9, atest_tsens2, NA, NA, 0, -1), + [2] = PINGROUP(2, WEST, qup0, ddr_bist, NA, phase_flag2, qdss_gpio10, + dac_calib0, atest_usb10, NA, NA, 0, -1), + [3] = PINGROUP(3, WEST, qup0, ddr_bist, NA, phase_flag3, qdss_gpio11, + dac_calib1, atest_usb11, NA, NA, 0x71000, 2), + [4] = PINGROUP(4, WEST, qup1, CRI_TRNG0, NA, phase_flag4, dac_calib2, + atest_usb12, NA, NA, NA, 0x71000, 3), + [5] = PINGROUP(5, WEST, qup1, CRI_TRNG1, NA, phase_flag5, dac_calib3, + atest_usb13, NA, NA, NA, 0, -1), + [6] = PINGROUP(6, WEST, qup2, NA, phase_flag6, dac_calib4, atest_usb1, + NA, NA, NA, NA, 0x71000, 4), + [7] = PINGROUP(7, WEST, qup2, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [8] = PINGROUP(8, EAST, qup3, pbs_out, PLL_BIST, NA, qdss_gpio, NA, + tsense_pwm, NA, NA, 0x71000, 0), + [9] = PINGROUP(9, EAST, qup3, pbs_out, PLL_BIST, NA, qdss_gpio, NA, NA, + NA, NA, 0, -1), + [10] = PINGROUP(10, EAST, qup3, AGERA_PLL, NA, pbs0, qdss_gpio0, NA, + NA, NA, NA, 0, -1), + [11] = PINGROUP(11, EAST, qup3, AGERA_PLL, NA, pbs1, qdss_gpio1, NA, + NA, NA, NA, 0x71000, 1), + [12] = PINGROUP(12, WEST, qup4, tgu_ch0, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [13] = PINGROUP(13, WEST, qup4, tgu_ch1, NA, NA, NA, NA, NA, NA, NA, + 0x71000, 5), + [14] = PINGROUP(14, WEST, qup5, tgu_ch2, NA, phase_flag7, qdss_gpio4, + dac_calib5, NA, NA, NA, 0x71000, 6), + [15] = PINGROUP(15, WEST, qup5, tgu_ch3, NA, phase_flag8, qdss_gpio5, + dac_calib6, NA, NA, NA, 0, -1), + [16] = PINGROUP(16, WEST, qup5, NA, phase_flag9, qdss_gpio6, + dac_calib7, NA, NA, NA, NA, 0, -1), + [17] = PINGROUP(17, WEST, qup5, NA, phase_flag10, qdss_gpio7, + dac_calib8, NA, NA, NA, NA, 0x71000, 7), + [18] = PINGROUP(18, EAST, SDC2_TB, CRI_TRNG, pbs2, qdss_gpio2, NA, NA, + NA, NA, NA, 0x71000, 2), + [19] = PINGROUP(19, EAST, SDC1_TB, pbs3, qdss_gpio3, NA, NA, NA, NA, + NA, NA, 0x71000, 3), + [20] = PINGROUP(20, EAST, cam_mclk, pbs4, qdss_gpio4, NA, NA, NA, NA, + NA, NA, 0, -1), + [21] = PINGROUP(21, EAST, cam_mclk, adsp_ext, pbs5, qdss_gpio5, NA, NA, + NA, NA, NA, 0, -1), + [22] = PINGROUP(22, EAST, cci_i2c, prng_rosc, NA, pbs6, phase_flag11, + qdss_gpio6, dac_calib9, atest_usb20, NA, 0, -1), + [23] = PINGROUP(23, EAST, cci_i2c, prng_rosc, NA, pbs7, phase_flag12, + qdss_gpio7, dac_calib10, atest_usb21, NA, 0, -1), + [24] = PINGROUP(24, EAST, CCI_TIMER1, GCC_GP1, NA, pbs8, phase_flag13, + qdss_gpio8, dac_calib11, atest_usb22, NA, 0x71000, 4), + [25] = PINGROUP(25, EAST, cci_async, CCI_TIMER0, NA, pbs9, + phase_flag14, qdss_gpio9, dac_calib12, atest_usb23, NA, + 0x71000, 5), + [26] = PINGROUP(26, EAST, NA, pbs10, phase_flag15, qdss_gpio10, + dac_calib13, atest_usb2, vsense_trigger, NA, NA, 0, -1), + [27] = PINGROUP(27, EAST, cam_mclk, qdss_cti, NA, NA, NA, NA, NA, NA, + NA, 0x71000, 6), + [28] = PINGROUP(28, EAST, cam_mclk, CCI_TIMER2, qdss_cti, NA, NA, NA, + NA, NA, NA, 0x71000, 7), + [29] = PINGROUP(29, EAST, cci_i2c, NA, phase_flag16, dac_calib14, + atest_char, NA, NA, NA, NA, 0, -1), + [30] = PINGROUP(30, EAST, cci_i2c, NA, phase_flag17, dac_calib15, + atest_char0, NA, NA, NA, NA, 0, -1), + [31] = PINGROUP(31, EAST, GP_PDM0, NA, phase_flag18, dac_calib16, + atest_char1, NA, NA, NA, NA, 0x71000, 8), + [32] = PINGROUP(32, EAST, CCI_TIMER3, GP_PDM1, NA, phase_flag19, + dac_calib17, atest_char2, NA, NA, NA, 0x71000, 9), + [33] = PINGROUP(33, EAST, GP_PDM2, NA, phase_flag20, dac_calib18, + atest_char3, NA, NA, NA, NA, 0x71000, 10), + [34] = PINGROUP(34, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x71000, 11), + [35] = PINGROUP(35, EAST, NA, phase_flag21, qdss_gpio14, NA, NA, NA, + NA, NA, NA, 0x71000, 12), + [36] = PINGROUP(36, EAST, NA, phase_flag22, qdss_gpio15, NA, NA, NA, + NA, NA, NA, 0x71000, 13), + [37] = PINGROUP(37, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [38] = PINGROUP(38, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [39] = PINGROUP(39, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x71000, 14), + [40] = PINGROUP(40, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [41] = PINGROUP(41, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [42] = PINGROUP(42, EAST, NA, NAV_GPIO, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [43] = PINGROUP(43, EAST, NA, phase_flag23, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [44] = PINGROUP(44, EAST, NA, NA, phase_flag24, NA, NA, NA, NA, NA, NA, + 0, -1), + [45] = PINGROUP(45, EAST, NA, NA, phase_flag25, NA, NA, NA, NA, NA, NA, + 0, -1), + [46] = PINGROUP(46, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x71000, 15), + [47] = PINGROUP(47, EAST, NA, NAV_GPIO, NA, pbs14, NA, NA, NA, NA, NA, + 0, -1), + [48] = PINGROUP(48, EAST, NA, vfr_1, NA, pbs15, NA, NA, NA, NA, NA, + 0, -1), + [49] = PINGROUP(49, EAST, NA, PA_INDICATOR, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [50] = PINGROUP(50, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [51] = PINGROUP(51, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [52] = PINGROUP(52, EAST, NA, NAV_GPIO, pbs_out, NA, NA, NA, NA, NA, + NA, 0, -1), + [53] = PINGROUP(53, EAST, NA, gsm1_tx, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [54] = PINGROUP(54, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [55] = PINGROUP(55, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [56] = PINGROUP(56, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [57] = PINGROUP(57, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [58] = PINGROUP(58, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [59] = PINGROUP(59, EAST, NA, SSBI_WTR1, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [60] = PINGROUP(60, EAST, NA, SSBI_WTR1, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [61] = PINGROUP(61, EAST, NA, pll_clk, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [62] = PINGROUP(62, EAST, NA, pll_bypassnl, NA, NA, NA, NA, NA, NA, NA, + 0x71000, 16), + [63] = PINGROUP(63, EAST, pll_reset, NA, phase_flag26, ddr_pxi0, NA, + NA, NA, NA, NA, 0x71000, 17), + [64] = PINGROUP(64, EAST, gsm0_tx, NA, phase_flag27, ddr_pxi0, NA, NA, + NA, NA, NA, 0x71000, 18), + [65] = PINGROUP(65, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x71000, 8), + [66] = PINGROUP(66, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x71000, 9), + [67] = PINGROUP(67, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x71000, 10), + [68] = PINGROUP(68, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [69] = PINGROUP(69, WEST, qup1, GCC_GP2, qdss_gpio12, ddr_pxi1, NA, NA, + NA, NA, NA, 0x71000, 11), + [70] = PINGROUP(70, WEST, qup1, GCC_GP3, qdss_gpio13, ddr_pxi1, NA, NA, + NA, NA, NA, 0x71000, 12), + [71] = PINGROUP(71, WEST, qup2, dbg_out, NA, NA, NA, NA, NA, NA, NA, + 0x71000, 18), + [72] = PINGROUP(72, SOUTH, uim2_data, qdss_cti, NA, NA, NA, NA, NA, NA, + NA, 0x71000, 3), + [73] = PINGROUP(73, SOUTH, uim2_clk, qdss_cti, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [74] = PINGROUP(74, SOUTH, uim2_reset, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [75] = PINGROUP(75, SOUTH, uim2_present, NA, NA, NA, NA, NA, NA, NA, + NA, 0x71000, 4), + [76] = PINGROUP(76, SOUTH, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [77] = PINGROUP(77, SOUTH, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [78] = PINGROUP(78, SOUTH, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [79] = PINGROUP(79, SOUTH, uim1_present, NA, NA, NA, NA, NA, NA, NA, + NA, 0x71000, 5), + [80] = PINGROUP(80, WEST, qup2, dac_calib19, NA, NA, NA, NA, NA, NA, + NA, 0x71000, 13), + [81] = PINGROUP(81, WEST, mdp_vsync_out_0, mdp_vsync_out_1, mdp_vsync, + dac_calib20, NA, NA, NA, NA, NA, 0x71000, 14), + [82] = PINGROUP(82, WEST, qup0, dac_calib21, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [83] = PINGROUP(83, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x71000, 15), + [84] = PINGROUP(84, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x71000, 16), + [85] = PINGROUP(85, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x71000, 17), + [86] = PINGROUP(86, WEST, qup0, GCC_GP1, NA, atest_bbrx1, NA, NA, NA, + NA, NA, 0, -1), + [87] = PINGROUP(87, EAST, pbs11, qdss_gpio11, NA, NA, NA, NA, NA, NA, + NA, 0x71000, 19), + [88] = PINGROUP(88, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x71000, 20), + [89] = PINGROUP(89, WEST, usb_phy, atest_bbrx0, NA, NA, NA, NA, NA, NA, + NA, 0x71000, 19), + [90] = PINGROUP(90, EAST, mss_lte, pbs12, qdss_gpio12, NA, NA, NA, NA, + NA, NA, 0, -1), + [91] = PINGROUP(91, EAST, mss_lte, pbs13, qdss_gpio13, NA, NA, NA, NA, + NA, NA, 0x71000, 21), + [92] = PINGROUP(92, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [93] = PINGROUP(93, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x71000, 20), + [94] = PINGROUP(94, WEST, NA, NA, qdss_gpio14, wlan1_adc0, NA, NA, NA, + NA, NA, 0x71000, 21), + [95] = PINGROUP(95, WEST, NAV_GPIO, GP_PDM0, NA, qdss_gpio15, + wlan1_adc1, NA, NA, NA, NA, 0x71000, 22), + [96] = PINGROUP(96, WEST, qup4, NAV_GPIO, mdp_vsync, GP_PDM1, sd_write, + JITTER_BIST, NA, qdss_cti, qdss_cti, 0x71000, 23), + [97] = PINGROUP(97, WEST, qup4, NAV_GPIO, mdp_vsync, GP_PDM2, + JITTER_BIST, NA, qdss_cti, qdss_cti, NA, 0x71000, 24), + [98] = PINGROUP(98, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [99] = PINGROUP(99, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x71000, 6), + [100] = PINGROUP(100, SOUTH, atest_gpsadc_dtest0_native, NA, NA, NA, + NA, NA, NA, NA, NA, 0, -1), + [101] = PINGROUP(101, SOUTH, atest_gpsadc_dtest1_native, NA, NA, NA, + NA, NA, NA, NA, NA, 0, -1), + [102] = PINGROUP(102, SOUTH, phase_flag28, dac_calib22, ddr_pxi2, NA, + NA, NA, NA, NA, NA, 0x71000, 7), + [103] = PINGROUP(103, SOUTH, phase_flag29, dac_calib23, ddr_pxi2, NA, + NA, NA, NA, NA, NA, 0x71000, 8), + [104] = PINGROUP(104, SOUTH, phase_flag30, qdss_gpio1, dac_calib24, + ddr_pxi3, NA, NA, NA, NA, NA, 0x71000, 9), + [105] = PINGROUP(105, SOUTH, phase_flag31, qdss_gpio, dac_calib25, + ddr_pxi3, NA, NA, NA, NA, NA, 0x71000, 10), + [106] = PINGROUP(106, SOUTH, NAV_GPIO, GCC_GP3, qdss_gpio, NA, NA, NA, + NA, NA, NA, 0x71000, 11), + [107] = PINGROUP(107, SOUTH, NAV_GPIO, GCC_GP2, qdss_gpio0, NA, NA, NA, + NA, NA, NA, 0x71000, 12), + [108] = PINGROUP(108, SOUTH, NAV_GPIO, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [109] = PINGROUP(109, SOUTH, qdss_gpio2, NA, NA, NA, NA, NA, NA, NA, + NA, 0x71000, 13), + [110] = PINGROUP(110, SOUTH, qdss_gpio3, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [111] = PINGROUP(111, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [112] = PINGROUP(112, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x71000, 14), + [113] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x175000, 15, 0), + [114] = SDC_QDSD_PINGROUP(sdc1_clk, 0x175000, 13, 6), + [115] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x175000, 11, 3), + [116] = SDC_QDSD_PINGROUP(sdc1_data, 0x175000, 9, 0), + [117] = SDC_QDSD_PINGROUP(sdc2_clk, 0x573000, 14, 6), + [118] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x573000, 11, 3), + [119] = SDC_QDSD_PINGROUP(sdc2_data, 0x573000, 9, 0), + [120] = UFS_RESET(ufs_reset, 0x178000), +}; + +static const int khaje_reserved_gpios[] = { + 0, 1, 2, 3, -1 +}; + +static const struct msm_pinctrl_soc_data khaje_pinctrl = { + .pins = khaje_pins, + .npins = ARRAY_SIZE(khaje_pins), + .functions = khaje_functions, + .nfunctions = ARRAY_SIZE(khaje_functions), + .groups = khaje_groups, + .ngroups = ARRAY_SIZE(khaje_groups), + .reserved_gpios = khaje_reserved_gpios, + .ngpios = 113, +}; + +static int khaje_pinctrl_probe(struct platform_device *pdev) +{ + return msm_pinctrl_probe(pdev, &khaje_pinctrl); +} + +static const struct of_device_id khaje_pinctrl_of_match[] = { + { .compatible = "qcom,khaje-pinctrl", }, + { }, +}; + +static struct platform_driver khaje_pinctrl_driver = { + .driver = { + .name = "khaje-pinctrl", + .of_match_table = khaje_pinctrl_of_match, + }, + .probe = khaje_pinctrl_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init khaje_pinctrl_init(void) +{ + return platform_driver_register(&khaje_pinctrl_driver); +} +arch_initcall(khaje_pinctrl_init); + +static void __exit khaje_pinctrl_exit(void) +{ + platform_driver_unregister(&khaje_pinctrl_driver); +} +module_exit(khaje_pinctrl_exit); + +MODULE_DESCRIPTION("QTI khaje pinctrl driver"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(of, khaje_pinctrl_of_match); diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c index ad15e0d3fe71..5615d01faf4e 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c @@ -356,6 +356,7 @@ static int ipa_uc_ntn_alloc_conn_smmu_info(struct ipa_ntn_setup_info *dest, source->buff_pool_base_sgt); if (result) { kfree(dest->data_buff_list); + dest->data_buff_list = NULL; return result; } @@ -363,6 +364,7 @@ static int ipa_uc_ntn_alloc_conn_smmu_info(struct ipa_ntn_setup_info *dest, source->ring_base_sgt); if (result) { kfree(dest->data_buff_list); + dest->data_buff_list = NULL; ipa_smmu_free_sgt(&dest->buff_pool_base_sgt); return result; } @@ -373,6 +375,7 @@ static int ipa_uc_ntn_alloc_conn_smmu_info(struct ipa_ntn_setup_info *dest, static void ipa_uc_ntn_free_conn_smmu_info(struct ipa_ntn_setup_info *params) { kfree(params->data_buff_list); + params->data_buff_list = NULL; ipa_smmu_free_sgt(¶ms->buff_pool_base_sgt); ipa_smmu_free_sgt(¶ms->ring_base_sgt); } diff --git a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c index 26f25ed3614f..ccbd39c7a0b2 100644 --- a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c +++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved. */ #include @@ -77,7 +77,8 @@ int ipa_rm_dep_graph_get_resource( goto bail; } resource_index = ipa_rm_dep_get_index(resource_name); - if (resource_index == IPA_RM_INDEX_INVALID) { + if (resource_index == IPA_RM_INDEX_INVALID || + resource_index >= IPA_RM_RESOURCE_MAX) { result = -EINVAL; goto bail; } @@ -109,7 +110,8 @@ int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph, goto bail; } resource_index = ipa_rm_dep_get_index(resource->name); - if (resource_index == IPA_RM_INDEX_INVALID) { + if (resource_index == IPA_RM_INDEX_INVALID || + resource_index >= IPA_RM_RESOURCE_MAX) { result = -EINVAL; goto bail; } diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c index 7e8e083d8715..9a528dea30cf 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2018, 2020-2021, The Linux Foundation. All rights reserved. */ #include @@ -654,15 +654,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) sizeof(struct ipa_ioc_nat_dma_cmd) + pre_entry * sizeof(struct ipa_ioc_nat_dma_one); param = memdup_user((const void __user *)arg, pyld_sz); - if (!param) { + if (IS_ERR(param)) { retval = -ENOMEM; break; } - if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { - retval = -EFAULT; - break; - } /* add check in case user-space module compromised */ if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries != pre_entry)) { @@ -702,14 +698,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) sizeof(struct ipa_ioc_add_hdr) + pre_entry * sizeof(struct ipa_hdr_add); param = memdup_user((const void __user *)arg, pyld_sz); - if (!param) { + if (IS_ERR(param)) { retval = -ENOMEM; break; } - if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { - retval = -EFAULT; - break; - } /* add check in case user-space module compromised */ if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs != pre_entry)) { @@ -742,14 +734,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) sizeof(struct ipa_ioc_del_hdr) + pre_entry * sizeof(struct ipa_hdr_del); param = memdup_user((const void __user *)arg, pyld_sz); - if (!param) { + if (IS_ERR(param)) { retval = -ENOMEM; break; } - if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { - retval = -EFAULT; - break; - } /* add check in case user-space module compromised */ if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls != pre_entry)) { @@ -782,14 +770,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) sizeof(struct ipa_ioc_add_rt_rule) + pre_entry * sizeof(struct ipa_rt_rule_add); param = memdup_user((const void __user *)arg, pyld_sz); - if (!param) { + if (IS_ERR(param)) { retval = -ENOMEM; break; } - if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { - retval = -EFAULT; - break; - } /* add check in case user-space module compromised */ if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules != pre_entry)) { @@ -823,14 +807,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) sizeof(struct ipa_ioc_mdfy_rt_rule) + pre_entry * sizeof(struct ipa_rt_rule_mdfy); param = memdup_user((const void __user *)arg, pyld_sz); - if (!param) { + if (IS_ERR(param)) { retval = -ENOMEM; break; } - if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { - retval = -EFAULT; - break; - } /* add check in case user-space module compromised */ if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules != pre_entry)) { @@ -863,14 +843,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) sizeof(struct ipa_ioc_del_rt_rule) + pre_entry * sizeof(struct ipa_rt_rule_del); param = memdup_user((const void __user *)arg, pyld_sz); - if (!param) { + if (IS_ERR(param)) { retval = -ENOMEM; break; } - if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { - retval = -EFAULT; - break; - } /* add check in case user-space module compromised */ if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls != pre_entry)) { @@ -902,14 +878,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) sizeof(struct ipa_ioc_add_flt_rule) + pre_entry * sizeof(struct ipa_flt_rule_add); param = memdup_user((const void __user *)arg, pyld_sz); - if (!param) { + if (IS_ERR(param)) { retval = -ENOMEM; break; } - if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { - retval = -EFAULT; - break; - } /* add check in case user-space module compromised */ if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules != pre_entry)) { @@ -943,14 +915,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) sizeof(struct ipa_ioc_del_flt_rule) + pre_entry * sizeof(struct ipa_flt_rule_del); param = memdup_user((const void __user *)arg, pyld_sz); - if (!param) { + if (IS_ERR(param)) { retval = -ENOMEM; break; } - if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { - retval = -EFAULT; - break; - } /* add check in case user-space module compromised */ if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls != pre_entry)) { @@ -983,14 +951,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) sizeof(struct ipa_ioc_mdfy_flt_rule) + pre_entry * sizeof(struct ipa_flt_rule_mdfy); param = memdup_user((const void __user *)arg, pyld_sz); - if (!param) { + if (IS_ERR(param)) { retval = -ENOMEM; break; } - if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { - retval = -EFAULT; - break; - } /* add check in case user-space module compromised */ if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules != pre_entry)) { @@ -1120,14 +1084,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) pyld_sz = sz + pre_entry * sizeof(struct ipa_ioc_tx_intf_prop); param = memdup_user((const void __user *)arg, pyld_sz); - if (!param) { + if (IS_ERR(param)) { retval = -ENOMEM; break; } - if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { - retval = -EFAULT; - break; - } /* add check in case user-space module compromised */ if (unlikely(((struct ipa_ioc_query_intf_tx_props *) param)->num_tx_props @@ -1166,14 +1126,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) pyld_sz = sz + pre_entry * sizeof(struct ipa_ioc_rx_intf_prop); param = memdup_user((const void __user *)arg, pyld_sz); - if (!param) { + if (IS_ERR(param)) { retval = -ENOMEM; break; } - if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { - retval = -EFAULT; - break; - } /* add check in case user-space module compromised */ if (unlikely(((struct ipa_ioc_query_intf_rx_props *) param)->num_rx_props != pre_entry)) { @@ -1211,14 +1167,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) pyld_sz = sz + pre_entry * sizeof(struct ipa_ioc_ext_intf_prop); param = memdup_user((const void __user *)arg, pyld_sz); - if (!param) { + if (IS_ERR(param)) { retval = -ENOMEM; break; } - if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { - retval = -EFAULT; - break; - } /* add check in case user-space module compromised */ if (unlikely(((struct ipa_ioc_query_intf_ext_props *) param)->num_ext_props != pre_entry)) { @@ -1249,14 +1201,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) pyld_sz = sizeof(struct ipa_msg_meta) + pre_entry; param = memdup_user((const void __user *)arg, pyld_sz); - if (!param) { + if (IS_ERR(param)) { retval = -ENOMEM; break; } - if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { - retval = -EFAULT; - break; - } /* add check in case user-space module compromised */ if (unlikely(((struct ipa_msg_meta *)param)->msg_len != pre_entry)) { @@ -1389,14 +1337,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) sizeof(struct ipa_ioc_add_hdr_proc_ctx) + pre_entry * sizeof(struct ipa_hdr_proc_ctx_add); param = memdup_user((const void __user *)arg, pyld_sz); - if (!param) { + if (IS_ERR(param)) { retval = -ENOMEM; break; } - if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { - retval = -EFAULT; - break; - } /* add check in case user-space module compromised */ if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *) param)->num_proc_ctxs != pre_entry)) { @@ -1428,14 +1372,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) sizeof(struct ipa_ioc_del_hdr_proc_ctx) + pre_entry * sizeof(struct ipa_hdr_proc_ctx_del); param = memdup_user((const void __user *)arg, pyld_sz); - if (!param) { + if (IS_ERR(param)) { retval = -ENOMEM; break; } - if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { - retval = -EFAULT; - break; - } /* add check in case user-space module compromised */ if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *) param)->num_hdls != pre_entry)) { @@ -1490,7 +1430,8 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return -ENOTTY; } - kfree(param); + if (!IS_ERR(param)) + kfree(param); IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c index b3f66161f263..417de103f49b 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2018, 2020-2021, The Linux Foundation. All rights reserved. */ #include @@ -2600,6 +2600,7 @@ static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb, } } + sys->free_skb(skb); out: return 0; } diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c index 418a142a2911..e642ed081016 100644 --- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2014-2018, 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2018, 2020-2021, The Linux Foundation. All rights reserved. */ /* @@ -2837,6 +2837,16 @@ static int rmnet_ipa_query_tethering_stats_modem( struct ipa_get_data_stats_resp_msg_v01 *resp; int pipe_len, rc; + if (data != NULL) { + /* prevent string buffer overflows */ + data->upstreamIface[IFNAMSIZ-1] = '\0'; + data->tetherIface[IFNAMSIZ-1] = '\0'; + } else if (!reset) { + /* only reset can have data == NULL*/ + IPAWANERR("query without allocate tether_stats strucutre\n"); + return -EINVAL; + } + req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01), GFP_KERNEL); if (!req) { @@ -2860,7 +2870,13 @@ static int rmnet_ipa_query_tethering_stats_modem( IPAWANDBG("reset the pipe stats\n"); } else { /* print tethered-client enum */ - IPAWANDBG_LOW("Tethered-client enum(%d)\n", data->ipa_client); + if (data == NULL) { + kfree(req); + kfree(resp); + return -EINVAL; + } + IPAWANDBG_LOW("Tethered-client enum(%d)\n", + data->ipa_client); } rc = ipa_qmi_get_data_stats(req, resp); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index 7f03e6d17bc1..eb4925edec70 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. */ #include @@ -393,6 +393,7 @@ static void ipa3_active_clients_log_destroy(void) kfree(active_clients_table_buf); active_clients_table_buf = NULL; kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]); + ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = NULL; ipa3_ctx->ipa3_active_clients_logging.log_head = 0; ipa3_ctx->ipa3_active_clients_logging.log_tail = IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1; @@ -708,6 +709,7 @@ static int ipa3_ioctl_add_rt_rule_v2(unsigned long arg) u32 pyld_sz; u64 uptr = 0; u8 *param = NULL; + u8 *param2 = NULL; u8 *kptr = NULL; if (copy_from_user(header, (const void __user *)arg, @@ -746,6 +748,24 @@ static int ipa3_ioctl_add_rt_rule_v2(unsigned long arg) retval = -EFAULT; goto free_param_kptr; } + + param2 = memdup_user((const void __user *)arg, + sizeof(struct ipa_ioc_add_rt_rule_v2)); + if (IS_ERR(param2)) { + retval = -EFAULT; + goto free_param_kptr; + } + + + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_rt_rule_v2 *)param2)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_rt_rule_v2 *)param2)-> + num_rules, pre_entry); + retval = -EFAULT; + goto free_param_kptr; + } /* alloc kernel pointer with actual payload size */ kptr = kzalloc(pyld_sz, GFP_KERNEL); if (!kptr) { @@ -785,6 +805,8 @@ static int ipa3_ioctl_add_rt_rule_v2(unsigned long arg) free_param_kptr: if (!IS_ERR(param)) kfree(param); + if (!IS_ERR(param2)) + kfree(param2); kfree(kptr); return retval; @@ -800,6 +822,7 @@ static int ipa3_ioctl_add_rt_rule_ext_v2(unsigned long arg) u32 pyld_sz; u64 uptr = 0; u8 *param = NULL; + u8 *param2 = NULL; u8 *kptr = NULL; if (copy_from_user(header, @@ -841,6 +864,24 @@ static int ipa3_ioctl_add_rt_rule_ext_v2(unsigned long arg) retval = -EFAULT; goto free_param_kptr; } + + param2 = memdup_user((const void __user *)arg, + sizeof(struct ipa_ioc_add_rt_rule_ext_v2)); + if (IS_ERR(param2)) { + retval = -EFAULT; + goto free_param_kptr; + } + + + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_rt_rule_ext_v2 *)param2)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_rt_rule_ext_v2 *)param2)-> + num_rules, pre_entry); + retval = -EFAULT; + goto free_param_kptr; + } /* alloc kernel pointer with actual payload size */ kptr = kzalloc(pyld_sz, GFP_KERNEL); if (!kptr) { @@ -882,6 +923,8 @@ static int ipa3_ioctl_add_rt_rule_ext_v2(unsigned long arg) free_param_kptr: if (!IS_ERR(param)) kfree(param); + if (!IS_ERR(param2)) + kfree(param2); kfree(kptr); return retval; @@ -897,6 +940,7 @@ static int ipa3_ioctl_add_rt_rule_after_v2(unsigned long arg) u32 pyld_sz; u64 uptr = 0; u8 *param = NULL; + u8 *param2 = NULL; u8 *kptr = NULL; if (copy_from_user(header, (const void __user *)arg, @@ -937,6 +981,23 @@ static int ipa3_ioctl_add_rt_rule_after_v2(unsigned long arg) retval = -EFAULT; goto free_param_kptr; } + + param2 = memdup_user((const void __user *)arg, + sizeof(struct ipa_ioc_add_rt_rule_after_v2)); + if (IS_ERR(param2)) { + retval = -EFAULT; + goto free_param_kptr; + } + + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_rt_rule_after_v2 *)param2)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_rt_rule_after_v2 *)param2)-> + num_rules, pre_entry); + retval = -EFAULT; + goto free_param_kptr; + } /* alloc kernel pointer with actual payload size */ kptr = kzalloc(pyld_sz, GFP_KERNEL); if (!kptr) { @@ -976,6 +1037,8 @@ static int ipa3_ioctl_add_rt_rule_after_v2(unsigned long arg) free_param_kptr: if (!IS_ERR(param)) kfree(param); + if (!IS_ERR(param2)) + kfree(param2); kfree(kptr); return retval; @@ -991,6 +1054,7 @@ static int ipa3_ioctl_mdfy_rt_rule_v2(unsigned long arg) u32 pyld_sz; u64 uptr = 0; u8 *param = NULL; + u8 *param2 = NULL; u8 *kptr = NULL; if (copy_from_user(header, (const void __user *)arg, @@ -1031,6 +1095,23 @@ static int ipa3_ioctl_mdfy_rt_rule_v2(unsigned long arg) retval = -EFAULT; goto free_param_kptr; } + + param2 = memdup_user((const void __user *)arg, + sizeof(struct ipa_ioc_mdfy_rt_rule_v2)); + if (IS_ERR(param2)) { + retval = -EFAULT; + goto free_param_kptr; + } + + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_mdfy_rt_rule_v2 *)param2)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_mdfy_rt_rule_v2 *)param2)-> + num_rules, pre_entry); + retval = -EFAULT; + goto free_param_kptr; + } /* alloc kernel pointer with actual payload size */ kptr = kzalloc(pyld_sz, GFP_KERNEL); if (!kptr) { @@ -1070,6 +1151,8 @@ static int ipa3_ioctl_mdfy_rt_rule_v2(unsigned long arg) free_param_kptr: if (!IS_ERR(param)) kfree(param); + if (!IS_ERR(param2)) + kfree(param2); kfree(kptr); return retval; @@ -1085,6 +1168,7 @@ static int ipa3_ioctl_add_flt_rule_v2(unsigned long arg) u32 pyld_sz; u64 uptr = 0; u8 *param = NULL; + u8 *param2 = NULL; u8 *kptr = NULL; if (copy_from_user(header, (const void __user *)arg, @@ -1124,6 +1208,23 @@ static int ipa3_ioctl_add_flt_rule_v2(unsigned long arg) retval = -EFAULT; goto free_param_kptr; } + + param2 = memdup_user((const void __user *)arg, + sizeof(struct ipa_ioc_add_flt_rule_v2)); + if (IS_ERR(param2)) { + retval = -EFAULT; + goto free_param_kptr; + } + + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_flt_rule_v2 *)param2)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_flt_rule_v2 *)param2)-> + num_rules, pre_entry); + retval = -EFAULT; + goto free_param_kptr; + } /* alloc kernel pointer with actual payload size */ kptr = kzalloc(pyld_sz, GFP_KERNEL); if (!kptr) { @@ -1162,6 +1263,8 @@ static int ipa3_ioctl_add_flt_rule_v2(unsigned long arg) free_param_kptr: if (!IS_ERR(param)) kfree(param); + if (!IS_ERR(param2)) + kfree(param2); kfree(kptr); return retval; @@ -1177,6 +1280,7 @@ static int ipa3_ioctl_add_flt_rule_after_v2(unsigned long arg) u32 pyld_sz; u64 uptr = 0; u8 *param = NULL; + u8 *param2 = NULL; u8 *kptr = NULL; if (copy_from_user(header, (const void __user *)arg, @@ -1217,6 +1321,23 @@ static int ipa3_ioctl_add_flt_rule_after_v2(unsigned long arg) retval = -EFAULT; goto free_param_kptr; } + + param2 = memdup_user((const void __user *)arg, + sizeof(struct ipa_ioc_add_flt_rule_after_v2)); + if (IS_ERR(param2)) { + retval = -EFAULT; + goto free_param_kptr; + } + + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_flt_rule_after_v2 *)param2)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_flt_rule_after_v2 *)param2)-> + num_rules, pre_entry); + retval = -EFAULT; + goto free_param_kptr; + } /* alloc kernel pointer with actual payload size */ kptr = kzalloc(pyld_sz, GFP_KERNEL); if (!kptr) { @@ -1256,6 +1377,8 @@ static int ipa3_ioctl_add_flt_rule_after_v2(unsigned long arg) free_param_kptr: if (!IS_ERR(param)) kfree(param); + if (!IS_ERR(param2)) + kfree(param2); kfree(kptr); return retval; @@ -1271,6 +1394,7 @@ static int ipa3_ioctl_mdfy_flt_rule_v2(unsigned long arg) u32 pyld_sz; u64 uptr = 0; u8 *param = NULL; + u8 *param2 = NULL; u8 *kptr = NULL; if (copy_from_user(header, (const void __user *)arg, @@ -1311,6 +1435,23 @@ static int ipa3_ioctl_mdfy_flt_rule_v2(unsigned long arg) retval = -EFAULT; goto free_param_kptr; } + + param2 = memdup_user((const void __user *)arg, + sizeof(struct ipa_ioc_mdfy_flt_rule_v2)); + if (IS_ERR(param2)) { + retval = -EFAULT; + goto free_param_kptr; + } + + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_mdfy_flt_rule_v2 *)param2)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_mdfy_flt_rule_v2 *)param2)-> + num_rules, pre_entry); + retval = -EFAULT; + goto free_param_kptr; + } /* alloc kernel pointer with actual payload size */ kptr = kzalloc(pyld_sz, GFP_KERNEL); if (!kptr) { @@ -1350,6 +1491,8 @@ static int ipa3_ioctl_mdfy_flt_rule_v2(unsigned long arg) free_param_kptr: if (!IS_ERR(param)) kfree(param); + if (!IS_ERR(param2)) + kfree(param2); kfree(kptr); return retval; @@ -7121,8 +7264,10 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, fail_init_mem_partition: fail_bind: kfree(ipa3_ctx->ctrl); + ipa3_ctx->ctrl = NULL; fail_mem_ctrl: kfree(ipa3_ctx->ipa_tz_unlock_reg); + ipa3_ctx->ipa_tz_unlock_reg = NULL; fail_tz_unlock_reg: if (ipa3_ctx->logbuf) ipc_log_context_destroy(ipa3_ctx->logbuf); @@ -7561,6 +7706,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev, IPAERR("failed to read register addresses\n"); kfree(ipa_tz_unlock_reg); kfree(ipa_drv_res->ipa_tz_unlock_reg); + ipa_drv_res->ipa_tz_unlock_reg = NULL; return -EFAULT; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c index 4d771cb56b6a..5ba3830ccd83 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. */ #include @@ -73,6 +73,11 @@ int ipa3_enable_data_path(u32 clnt_hdl) holb_cfg.tmr_val = IPA_HOLB_TMR_VAL; else holb_cfg.tmr_val = IPA_HOLB_TMR_VAL_4_5; + } else if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5 && + ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ && + ep->client == IPA_CLIENT_USB_CONS) { + holb_cfg.en = IPA_HOLB_TMR_EN; + holb_cfg.tmr_val = IPA_HOLB_TMR_VAL_4_5; } else { holb_cfg.en = IPA_HOLB_TMR_DIS; holb_cfg.tmr_val = 0; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c index 327122bd9c0d..bb96cfb6be6a 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. */ #ifdef CONFIG_DEBUG_FS @@ -1249,8 +1249,9 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf, "lan_rx_empty=%u\n" "lan_repl_rx_empty=%u\n" "flow_enable=%u\n" - "flow_disable=%u\n", - "rx_page_drop_cnt=%u\n", + "flow_disable=%u\n" + "rx_page_drop_cnt=%u\n" + "zero_len_frag_pkt_cnt=%u\n", ipa3_ctx->stats.tx_sw_pkts, ipa3_ctx->stats.tx_hw_pkts, ipa3_ctx->stats.tx_non_linear, @@ -1267,7 +1268,8 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf, ipa3_ctx->stats.lan_repl_rx_empty, ipa3_ctx->stats.flow_enable, ipa3_ctx->stats.flow_disable, - ipa3_ctx->stats.rx_page_drop_cnt); + ipa3_ctx->stats.rx_page_drop_cnt, + ipa3_ctx->stats.zero_len_frag_pkt_cnt); cnt += nbytes; for (i = 0; i < IPAHAL_PKT_STATUS_EXCEPTION_MAX; i++) { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c index 5d6191ebd1d4..12ad78e3607b 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c @@ -283,19 +283,21 @@ static void ipa3_send_nop_desc(struct work_struct *work) return; } list_add_tail(&tx_pkt->link, &sys->head_desc_list); - sys->nop_pending = false; memset(&nop_xfer, 0, sizeof(nop_xfer)); nop_xfer.type = GSI_XFER_ELEM_NOP; nop_xfer.flags = GSI_XFER_FLAG_EOT; nop_xfer.xfer_user_data = tx_pkt; if (gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1, &nop_xfer, true)) { + list_del(&tx_pkt->link); + kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt); spin_unlock_bh(&sys->spinlock); IPAERR("gsi_queue_xfer for ch:%lu failed\n", sys->ep->gsi_chan_hdl); queue_work(sys->wq, &sys->work); return; } + sys->nop_pending = false; spin_unlock_bh(&sys->spinlock); /* make sure TAG process is sent before clocks are gated */ @@ -371,6 +373,12 @@ int ipa3_send(struct ipa3_sys_context *sys, spin_lock_bh(&sys->spinlock); + if (unlikely(atomic_read(&sys->ep->disconnect_in_progress))) { + IPAERR("Pipe disconnect in progress dropping the packet\n"); + spin_unlock_bh(&sys->spinlock); + return -EFAULT; + } + for (i = 0; i < num_desc; i++) { tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, GFP_ATOMIC); @@ -1289,10 +1297,12 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl) ep->sys->repl_hdlr = ipa3_replenish_rx_cache; ep->sys->repl->capacity = 0; kfree(ep->sys->repl); + ep->sys->repl = NULL; fail_page_recycle_repl: if (ep->sys->page_recycle_repl) { ep->sys->page_recycle_repl->capacity = 0; kfree(ep->sys->page_recycle_repl); + ep->sys->page_recycle_repl = NULL; } fail_gen2: ipa_pm_deregister(ep->sys->pm_hdl); @@ -1340,6 +1350,7 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl) if (IPA_CLIENT_IS_PROD(ep->client)) { do { spin_lock_bh(&ep->sys->spinlock); + atomic_set(&ep->disconnect_in_progress, 1); empty = list_empty(&ep->sys->head_desc_list); spin_unlock_bh(&ep->sys->spinlock); if (!empty) @@ -1718,22 +1729,29 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, data_idx++; for (f = 0; f < num_frags; f++) { - desc[data_idx + f].frag = &skb_shinfo(skb)->frags[f]; - desc[data_idx + f].type = IPA_DATA_DESC_SKB_PAGED; - desc[data_idx + f].len = - skb_frag_size(desc[data_idx + f].frag); + if (skb_frag_size(&skb_shinfo(skb)->frags[f]) != 0) { + desc[data_idx].frag = + &skb_shinfo(skb)->frags[f]; + desc[data_idx].type = + IPA_DATA_DESC_SKB_PAGED; + desc[data_idx].len = + skb_frag_size(desc[data_idx].frag); + data_idx++; + } else { + IPAERR_RL("Received zero len SKB frag pkt\n"); + IPA_STATS_INC_CNT( + ipa3_ctx->stats.zero_len_frag_pkt_cnt); + } } /* don't free skb till frag mappings are released */ if (num_frags) { - desc[data_idx + f - 1].callback = - desc[skb_idx].callback; - desc[data_idx + f - 1].user1 = desc[skb_idx].user1; - desc[data_idx + f - 1].user2 = desc[skb_idx].user2; + desc[data_idx - 1].callback = desc[skb_idx].callback; + desc[data_idx - 1].user1 = desc[skb_idx].user1; + desc[data_idx - 1].user2 = desc[skb_idx].user2; desc[skb_idx].callback = NULL; } - if (unlikely(ipa3_send(sys, num_frags + data_idx, - desc, true))) { + if (unlikely(ipa3_send(sys, data_idx, desc, true))) { IPAERR_RL("fail to send skb %pK num_frags %u SWP\n", skb, num_frags); goto fail_send; @@ -2687,6 +2705,7 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys) kfree(sys->repl->cache); kfree(sys->repl); + sys->repl = NULL; } if (sys->page_recycle_repl) { for (i = 0; i < sys->page_recycle_repl->capacity; i++) { @@ -2705,6 +2724,7 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys) } kfree(sys->page_recycle_repl->cache); kfree(sys->page_recycle_repl); + sys->page_recycle_repl = NULL; } } @@ -3486,17 +3506,21 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) { rx_skb = alloc_skb(0, GFP_ATOMIC); if (unlikely(!rx_skb)) { - IPAERR("skb alloc failure\n"); - list_del(&rx_pkt->link); - if (!rx_page.is_tmp_alloc) { - init_page_count(rx_page.page); - } else { - dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr, - rx_pkt->len, DMA_FROM_DEVICE); - __free_pages(rx_pkt->page_data.page, + IPAERR("skb alloc failure, free all pending pages\n"); + list_for_each_entry_safe(rx_pkt, tmp, head, link) { + rx_page = rx_pkt->page_data; + list_del_init(&rx_pkt->link); + if (!rx_page.is_tmp_alloc) { + init_page_count(rx_page.page); + } else { + dma_unmap_page(ipa3_ctx->pdev, + rx_page.dma_addr, + rx_pkt->len, DMA_FROM_DEVICE); + __free_pages(rx_pkt->page_data.page, IPA_WAN_PAGE_ORDER); + } + rx_pkt->sys->free_rx_wrapper(rx_pkt); } - rx_pkt->sys->free_rx_wrapper(rx_pkt); IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_page_drop_cnt); return NULL; } @@ -3756,7 +3780,9 @@ static void ipa3_set_aggr_limit(struct ipa_sys_connect_params *in, sys->ep->status.status_en = false; sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(adjusted_sz); - if (in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) + if (in->client == IPA_CLIENT_APPS_WAN_COAL_CONS || + (in->client == IPA_CLIENT_APPS_WAN_CONS && + ipa3_ctx->ipa_hw_type <= IPA_HW_v4_2)) in->ipa_ep_cfg.aggr.aggr_hard_byte_limit_en = 1; *aggr_byte_limit = sys->rx_buff_sz < *aggr_byte_limit ? diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c index d425bbb963e7..03ba4c473903 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. */ #include "ipa_i.h" @@ -222,6 +222,9 @@ static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip, /* only body (no header) */ tbl_mem.size = tbl->sz[rlt] - ipahal_get_hw_tbl_hdr_width(); + /* Add prefetech buf size. */ + tbl_mem.size += + ipahal_get_hw_prefetch_buf_size(); if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) { IPAERR("fail to alloc sys tbl of size %d\n", tbl_mem.size); @@ -1070,7 +1073,7 @@ static int __ipa_del_flt_rule(u32 rule_hdl) list_del(&entry->link); entry->tbl->rule_cnt--; - if (entry->rt_tbl) + if (entry->rt_tbl && !ipa3_check_idr_if_freed(entry->rt_tbl)) entry->rt_tbl->ref_cnt--; IPADBG("del flt rule rule_cnt=%d rule_id=%d\n", entry->tbl->rule_cnt, entry->rule_id); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c index d64a4bff4504..79527916e5e7 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2019,2021 The Linux Foundation. All rights reserved. */ #include "ipa_i.h" @@ -90,6 +90,15 @@ static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem, 0 : 1; } } + /* Check the pointer and header length to avoid + * dangerous overflow in HW + */ + if (unlikely(!entry->hdr || !entry->hdr->offset_entry || + !entry->offset_entry || + entry->hdr->hdr_len == 0 || + entry->hdr->hdr_len > + ipa_hdr_bin_sz[IPA_HDR_BIN_MAX - 1])) + return -EINVAL; ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base, entry->offset_entry->offset, @@ -747,7 +756,7 @@ int __ipa3_del_hdr(u32 hdr_hdl, bool by_user) return 0; } - if (entry->is_hdr_proc_ctx) { + if (entry->is_hdr_proc_ctx || entry->proc_ctx) { dma_unmap_single(ipa3_ctx->pdev, entry->phys_base, entry->hdr_len, @@ -1076,6 +1085,7 @@ int ipa3_reset_hdr(bool user_only) if (ipa3_id_find(entry->id) == NULL) { mutex_unlock(&ipa3_ctx->lock); + IPAERR_RL("Invalid header ID\n"); WARN_ON_RATELIMIT_IPA(1); return -EFAULT; } @@ -1086,6 +1096,7 @@ int ipa3_reset_hdr(bool user_only) entry->phys_base, entry->hdr_len, DMA_TO_DEVICE); + entry->proc_ctx->hdr = NULL; entry->proc_ctx = NULL; } else { /* move the offset entry to free list */ @@ -1143,6 +1154,7 @@ int ipa3_reset_hdr(bool user_only) if (ipa3_id_find(ctx_entry->id) == NULL) { mutex_unlock(&ipa3_ctx->lock); + IPAERR_RL("Invalid proc header ID\n"); WARN_ON_RATELIMIT_IPA(1); return -EFAULT; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index 29a8fbdb0e1b..e9572d3d43a0 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. */ #ifndef _IPA3_I_H_ @@ -85,6 +85,10 @@ #define NAPI_WEIGHT 64 +/* Bit alignment for IPA4.5 GSI rings */ +#define IPA_LOW_16_BIT_MASK (0xFFFF) +#define IPA4_5_GSI_RING_SIZE_ALIGN (16 * PAGE_SIZE) + #define IPADBG(fmt, args...) \ do { \ pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ @@ -272,7 +276,7 @@ enum { #define IPA_AGGR_MAX_STR_LENGTH (10) -#define CLEANUP_TAG_PROCESS_TIMEOUT 1000 +#define CLEANUP_TAG_PROCESS_TIMEOUT 5000 #define IPA_AGGR_STR_IN_BYTES(str) \ (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1) @@ -1377,6 +1381,7 @@ struct ipa3_stats { u32 flow_disable; u32 tx_non_linear; u32 rx_page_drop_cnt; + u32 zero_len_frag_pkt_cnt; struct ipa3_page_recycle_stats page_recycle_stats[2]; }; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c index 938bb36c191c..94526698bf0a 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c @@ -250,6 +250,7 @@ int ipa3_send_adpl_msg(unsigned long skb_data) list_add_tail(&msg->link, &ipa3_odl_ctx->adpl_msg_list); atomic_inc(&ipa3_odl_ctx->stats.numer_in_queue); mutex_unlock(&ipa3_odl_ctx->adpl_msg_lock); + wake_up(&ipa3_odl_ctx->adpl_msg_waitq); IPA_STATS_INC_CNT(ipa3_odl_ctx->stats.odl_rx_pkt); return 0; @@ -534,7 +535,9 @@ static ssize_t ipa_adpl_read(struct file *filp, char __user *buf, size_t count, int ret = 0; char __user *start = buf; struct ipa3_push_msg_odl *msg; + DEFINE_WAIT_FUNC(wait, woken_wake_function); + add_wait_queue(&ipa3_odl_ctx->adpl_msg_waitq, &wait); while (1) { IPADBG_LOW("Writing message to adpl pipe\n"); if (!ipa3_odl_ctx->odl_state.odl_open) @@ -579,9 +582,6 @@ static ssize_t ipa_adpl_read(struct file *filp, char __user *buf, size_t count, IPA_STATS_INC_CNT(ipa3_odl_ctx->stats.odl_tx_diag_pkt); kfree(msg); msg = NULL; - } else { - ret = -EAGAIN; - break; } ret = -EAGAIN; @@ -594,9 +594,9 @@ static ssize_t ipa_adpl_read(struct file *filp, char __user *buf, size_t count, if (start != buf) break; - + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } - + remove_wait_queue(&ipa3_odl_ctx->adpl_msg_waitq, &wait); if (start != buf && ret != -EFAULT) ret = buf - start; @@ -672,6 +672,7 @@ int ipa_odl_init(void) odl_cdev = ipa3_odl_ctx->odl_cdev; INIT_LIST_HEAD(&ipa3_odl_ctx->adpl_msg_list); + init_waitqueue_head(&ipa3_odl_ctx->adpl_msg_waitq); mutex_init(&ipa3_odl_ctx->adpl_msg_lock); mutex_init(&ipa3_odl_ctx->pipe_lock); @@ -769,6 +770,7 @@ int ipa_odl_init(void) class_destroy(odl_cdev[0].class); create_char_dev0_fail: kfree(ipa3_odl_ctx); + ipa3_odl_ctx = NULL; fail_mem_ctx: return result; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h index 4e876d167246..0976d8c8b3f7 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.h @@ -58,6 +58,7 @@ struct ipa_odl_context { bool odl_ctl_msg_wq_flag; struct ipa3_odlstats stats; u32 odl_pm_hdl; + wait_queue_head_t adpl_msg_waitq; }; struct ipa3_push_msg_odl { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c index 5409af242471..efd3e16123e7 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c @@ -635,6 +635,7 @@ int ipa_pm_init(struct ipa_pm_init_params *params) if (!ipa_pm_ctx->wq) { IPA_PM_ERR("create workqueue failed\n"); kfree(ipa_pm_ctx); + ipa_pm_ctx = NULL; return -ENOMEM; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c index 61fea015ef5c..40254a4b89fb 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved. */ #include @@ -484,8 +484,11 @@ static int ipa3_qmi_send_req_wait(struct qmi_handle *client_handle, req_desc->ei_array, req); - if (unlikely(!ipa_q6_clnt)) + if (unlikely(!ipa_q6_clnt)) { + mutex_unlock(&ipa3_qmi_lock); return -EINVAL; + } + mutex_unlock(&ipa3_qmi_lock); if (ret < 0) { @@ -930,7 +933,10 @@ int ipa3_qmi_add_offload_request_send( } /* check if the filter rules from IPACM is valid */ - if (req->filter_spec_ex2_list_len == 0) { + if (req->filter_spec_ex2_list_len < 0) { + IPAWANERR("IPACM pass invalid num of rules\n"); + return -EINVAL; + } else if (req->filter_spec_ex2_list_len == 0) { IPAWANDBG("IPACM pass zero rules to Q6\n"); } else { IPAWANDBG("IPACM pass %u rules to Q6\n", @@ -938,9 +944,10 @@ int ipa3_qmi_add_offload_request_send( } /* currently set total max to 64 */ - if (req->filter_spec_ex2_list_len + - ipa3_qmi_ctx->num_ipa_offload_connection - >= QMI_IPA_MAX_FILTERS_V01) { + if ((ipa3_qmi_ctx->num_ipa_offload_connection < 0) || + (req->filter_spec_ex2_list_len >= + (QMI_IPA_MAX_FILTERS_V01 - + ipa3_qmi_ctx->num_ipa_offload_connection))) { IPAWANDBG( "cur(%d), req(%d), exceed limit (%d)\n", ipa3_qmi_ctx->num_ipa_offload_connection, diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c index 19d610e4b89c..12633b7c5498 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. */ #include @@ -169,6 +169,9 @@ static int ipa_translate_rt_tbl_to_hw_fmt(enum ipa_ip_type ip, /* only body (no header) */ tbl_mem.size = tbl->sz[rlt] - ipahal_get_hw_tbl_hdr_width(); + /* Add prefetech buf size. */ + tbl_mem.size += + ipahal_get_hw_prefetch_buf_size(); if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) { IPAERR_RL("fail to alloc sys tbl of size %d\n", tbl_mem.size); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c index 19ead0f676d8..f5607b87a4c6 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. */ #include "ipa_i.h" @@ -1195,7 +1195,7 @@ int ipa3_uc_debug_stats_alloc( result = ipa3_uc_send_cmd((u32)(cmd.phys_base), command, IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, - false, 10 * HZ); + false, 20 * HZ); if (result) { IPAERR("fail to alloc offload stats\n"); goto cleanup; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c index 7f85a9ebf9df..73a7ebb6c7bb 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c @@ -635,6 +635,17 @@ static int ipa_create_ap_smmu_mapping_sgt(struct sg_table *sgt, start_iova = va; } + /* + * In IPA4.5, GSI HW has such requirement: + * Lower 16_bits of Ring base + ring length can’t exceed 16 bits + */ + if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_5 && + ((u32)(va & IPA_LOW_16_BIT_MASK) + len) >= + IPA4_5_GSI_RING_SIZE_ALIGN) { + va = roundup(cb->next_addr, IPA4_5_GSI_RING_SIZE_ALIGN); + start_iova = va; + } + for_each_sg(sgt->sgl, sg, sgt->nents, i) { /* directly get sg_tbl PA from wlan-driver */ phys = sg->dma_address; @@ -742,6 +753,7 @@ static void ipa_release_ap_smmu_mappings(enum ipa_client_type client) ipa3_ctx->wdi_map_cnt--; } kfree(wdi_res[i].res); + wdi_res[i].res = NULL; wdi_res[i].valid = false; } } @@ -778,6 +790,7 @@ static void ipa_release_uc_smmu_mappings(enum ipa_client_type client) ipa3_ctx->wdi_map_cnt--; } kfree(wdi_res[i].res); + wdi_res[i].res = NULL; wdi_res[i].valid = false; } } @@ -929,6 +942,7 @@ void ipa3_release_wdi3_gsi_smmu_mappings(u8 dir) ipa3_ctx->wdi_map_cnt--; } kfree(wdi_res[i].res); + wdi_res[i].res = NULL; wdi_res[i].valid = false; } } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c index 9e2ec20c6193..f85c80403b85 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. */ #include @@ -44,6 +44,7 @@ * @flt_parse_hw_rule: Parse flt rule read from H/W * @eq_bitfield: Array of the bit fields of the support equations. * 0xFF means the equation is not supported + * @prefetech_buf_size: Prefetch buf size; */ struct ipahal_fltrt_obj { bool support_hash; @@ -73,6 +74,7 @@ struct ipahal_fltrt_obj { int (*rt_parse_hw_rule)(u8 *addr, struct ipahal_rt_rule_entry *rule); int (*flt_parse_hw_rule)(u8 *addr, struct ipahal_flt_rule_entry *rule); u8 eq_bitfield[IPA_EQ_MAX]; + u32 prefetech_buf_size; }; @@ -654,6 +656,7 @@ static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = { [IPA_IS_FRAG] = 15, [IPA_IS_PURE_ACK] = 0xFF, }, + IPA3_0_HW_RULE_PREFETCH_BUF_SIZE, }, /* IPAv4 */ @@ -699,6 +702,7 @@ static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = { [IPA_IS_FRAG] = 15, [IPA_IS_PURE_ACK] = 0xFF, }, + IPA3_0_HW_RULE_PREFETCH_BUF_SIZE, }, /* IPAv4.2 */ @@ -744,6 +748,7 @@ static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = { [IPA_IS_FRAG] = 15, [IPA_IS_PURE_ACK] = 0xFF, }, + IPA3_0_HW_RULE_PREFETCH_BUF_SIZE, }, /* IPAv4.5 */ @@ -789,6 +794,7 @@ static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = { [IPA_IS_FRAG] = 15, [IPA_IS_PURE_ACK] = 0, }, + IPA3_0_HW_RULE_PREFETCH_BUF_SIZE, }, }; @@ -3593,6 +3599,12 @@ u32 ipahal_get_lcl_tbl_addr_alignment(void) return ipahal_fltrt_objs[ipahal_ctx->hw_type].lcladdr_alignment; } +/* Get the H/W (flt/rt) prefetch buf size */ +u32 ipahal_get_hw_prefetch_buf_size(void) +{ + return ipahal_fltrt_objs[ipahal_ctx->hw_type].prefetech_buf_size; +} + /* * Rule priority is used to distinguish rules order * at the integrated table consisting from hashable and diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h index f18700d0c78c..ad2394e52856 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */ #ifndef _IPAHAL_FLTRT_H_ @@ -140,6 +140,9 @@ u32 ipahal_get_hw_tbl_hdr_width(void); */ u32 ipahal_get_lcl_tbl_addr_alignment(void); +/* Get the H/W (flt/rt) prefetch buf size */ +u32 ipahal_get_hw_prefetch_buf_size(void); + /* * Rule priority is used to distinguish rules order * at the integrated table consisting from hashable and diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h index c8a6a3023864..283786236a07 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. */ #ifndef _IPAHAL_FLTRT_I_H_ @@ -44,6 +44,7 @@ enum ipa_fltrt_equations { #define IPA3_0_HW_TBL_ADDR_MASK (127) #define IPA3_0_HW_RULE_BUF_SIZE (256) #define IPA3_0_HW_RULE_START_ALIGNMENT (7) +#define IPA3_0_HW_RULE_PREFETCH_BUF_SIZE (128) /* diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c index 3edca59dcf8c..560ee549d410 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2019, 2021, The Linux Foundation. All rights reserved. */ #include "ipahal.h" @@ -225,7 +225,7 @@ static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_flt_rt_v4_5( void *params, bool is_atomic_ctx) { struct ipahal_stats_init_pyld *pyld; - int num = (int)(params); + int num = (uintptr_t)(params); if (num > IPA_MAX_FLT_RT_CNT_INDEX || num <= 0) { diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index e178b8ef2eeb..a0284deed24f 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. */ /* @@ -3268,6 +3268,16 @@ static int rmnet_ipa3_query_tethering_stats_modem( int pipe_len, rc; struct ipa_pipe_stats_info_type_v01 *stat_ptr; + if (data != NULL) { + /* prevent string buffer overflows */ + data->upstreamIface[IFNAMSIZ-1] = '\0'; + data->tetherIface[IFNAMSIZ-1] = '\0'; + } else if (!reset) { + /* only reset can have data == NULL */ + IPAWANERR("query without allocate tether_stats strucutre\n"); + return -EINVAL; + } + req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01), GFP_KERNEL); if (!req) diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c index b8666893f168..032143296522 100644 --- a/drivers/platform/msm/qcom-geni-se.c +++ b/drivers/platform/msm/qcom-geni-se.c @@ -1566,6 +1566,10 @@ void geni_se_dump_dbg_regs(struct se_geni_rsc *rsc, void __iomem *base, u32 se_dma_rx_len_in = 0; u32 se_dma_tx_len = 0; u32 se_dma_tx_len_in = 0; + u32 geni_m_irq_en = 0; + u32 geni_s_irq_en = 0; + u32 geni_dma_tx_irq_en = 0; + u32 geni_dma_rx_irq_en = 0; struct geni_se_device *geni_se_dev; if (!ipc) @@ -1596,6 +1600,10 @@ void geni_se_dump_dbg_regs(struct se_geni_rsc *rsc, void __iomem *base, se_dma_rx_len_in = geni_read_reg(base, SE_DMA_RX_LEN_IN); se_dma_tx_len = geni_read_reg(base, SE_DMA_TX_LEN); se_dma_tx_len_in = geni_read_reg(base, SE_DMA_TX_LEN_IN); + geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN); + geni_s_irq_en = geni_read_reg(base, SE_GENI_S_IRQ_EN); + geni_dma_tx_irq_en = geni_read_reg(base, SE_DMA_TX_IRQ_EN); + geni_dma_rx_irq_en = geni_read_reg(base, SE_DMA_RX_IRQ_EN); GENI_SE_DBG(ipc, true, rsc->ctrl_dev, "%s: m_cmd0:0x%x, m_irq_status:0x%x, s_irq_status:0x%x, geni_status:0x%x, geni_ios:0x%x\n", @@ -1608,6 +1616,9 @@ void geni_se_dump_dbg_regs(struct se_geni_rsc *rsc, void __iomem *base, se_dma_dbg, m_cmd_ctrl, se_dma_rx_len, se_dma_rx_len_in); GENI_SE_DBG(ipc, true, rsc->ctrl_dev, "dma_txlen:0x%x, dma_txlen_in:0x%x\n", se_dma_tx_len, se_dma_tx_len_in); + GENI_SE_DBG(ipc, false, NULL, + "dma_txirq_en:0x%x, dma_rxirq_en:0x%x geni_m_irq_en:0x%x geni_s_irq_en:0x%x\n", + geni_dma_tx_irq_en, geni_dma_rx_irq_en, geni_m_irq_en, geni_s_irq_en); } EXPORT_SYMBOL(geni_se_dump_dbg_regs); diff --git a/drivers/platform/msm/qpnp-revid.c b/drivers/platform/msm/qpnp-revid.c index 2d2980fffade..a7529487ba5d 100644 --- a/drivers/platform/msm/qpnp-revid.c +++ b/drivers/platform/msm/qpnp-revid.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved. */ #include @@ -64,6 +64,7 @@ static const char *const pmic_names[] = { [PMR735B_SUBTYPE] = "PMR735B", [PM6125_SUBTYPE] = "PM6125", [PM8008_SUBTYPE] = "PM8008", + [PM8010_SUBTYPE] = "PM8010", [SMB1355_SUBTYPE] = "SMB1355", [SMB1390_SUBTYPE] = "SMB1390", }; diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c index 157385855c43..a304574418d4 100644 --- a/drivers/platform/msm/usb_bam.c +++ b/drivers/platform/msm/usb_bam.c @@ -2876,7 +2876,7 @@ static struct msm_usb_bam_data *usb_bam_dt_to_data( struct device_node *node = pdev->dev.of_node; int rc = 0; u8 i = 0; - u32 bam; + u32 bam = DWC3_CTRL; u32 addr = 0; u32 threshold, max_connections = 0; static struct usb_bam_pipe_connect *usb_bam_connections; @@ -2886,12 +2886,9 @@ static struct msm_usb_bam_data *usb_bam_dt_to_data( if (!usb_bam_data) return NULL; - rc = of_property_read_u32(node, "qcom,bam-type", &bam); - if (rc) { - log_event_err("%s: bam type is missing in device tree\n", - __func__); - return NULL; - } + /* override bam-type if specified, default is dwc3 */ + of_property_read_u32(node, "qcom,bam-type", &bam); + if (bam >= MAX_BAMS) { log_event_err("%s: Invalid bam type %d in device tree\n", __func__, bam); diff --git a/drivers/power/supply/qcom/Kconfig b/drivers/power/supply/qcom/Kconfig index c52014ece27c..9581455fc4d2 100644 --- a/drivers/power/supply/qcom/Kconfig +++ b/drivers/power/supply/qcom/Kconfig @@ -40,6 +40,25 @@ config QPNP_SMBLITE as fuel gauge and USB. VBUS regulator is registered for supporting OTG. +config QPNP_VM_BMS + tristate "QPNP Voltage-Mode Battery Monitoring System driver" + depends on MFD_SPMI_PMIC + help + Say Y here to enable support for QPNP chip vm-bms device. + The voltage-mode (vm) BMS driver uses periodic VBATT + readings from the battery to calculate the State of + Charge. + +config QPNP_LINEAR_CHARGER + tristate "QPNP Linear Charger driver" + depends on MFD_SPMI_PMIC + help + Say Y here to enable the Linear battery charger which supports USB + detection and charging. The driver also offers relevant information + to userspace via the power supply framework. + The power supply framework is used to communicate battery and + usb properties to userspace and other driver consumers like USB. + config SMB138X_CHARGER tristate "SMB138X Battery Charger" depends on MFD_I2C_PMIC @@ -73,6 +92,17 @@ config SMB1351_USB_CHARGER notification support. The driver controls SMB1351 via I2C and supports device-tree interface. +config SMB1360_CHARGER_FG + tristate "SMB1360 Charger and Fuel Gauge" + depends on I2C + help + Say Y to include support for SMB1360 Charger and Fuel Gauge. + SMB1360 is a single path switching mode charger capable of charging + the battery with 1.5Amps of current. It supports a fuel gauge which + uses voltage and coloumb counting for state of charge reporting. + The driver reports the status via the power supply framework. + A status change triggers an IRQ via the device STAT pin. + config SMB1355_SLAVE_CHARGER tristate "SMB1355 Slave Battery Charger" depends on MFD_I2C_PMIC diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile index 397decf8753a..b9cc5e6b00f2 100644 --- a/drivers/power/supply/qcom/Makefile +++ b/drivers/power/supply/qcom/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only - +obj-$(CONFIG_SMB1360_CHARGER_FG) += smb1360-charger-fg.o obj-$(CONFIG_QPNP_SMB5) += step-chg-jeita.o battery.o qpnp-smb5.o smb5-lib.o pmic-voter.o storm-watch.o schgm-flash.o obj-$(CONFIG_SMB1390_CHARGE_PUMP_PSY) += smb1390-charger-psy.o pmic-voter.o obj-$(CONFIG_SMB1355_SLAVE_CHARGER) += smb1355-charger.o pmic-voter.o @@ -13,3 +13,5 @@ obj-$(CONFIG_QPNP_QG) += qpnp-qg.o pmic-voter.o qg-util.o qg-soc.o qg-sdam.o q obj-$(CONFIG_HL6111R) += hl6111r.o obj-$(CONFIG_SMB1398_CHARGER) += smb1398-charger.o pmic-voter.o obj-$(CONFIG_QPNP_SMBLITE) += step-chg-jeita.o battery.o qpnp-smblite.o smblite-lib.o pmic-voter.o storm-watch.o schgm-flashlite.o +obj-$(CONFIG_QPNP_VM_BMS) += qpnp-vm-bms.o batterydata-lib.o batterydata-interface.o +obj-$(CONFIG_QPNP_LINEAR_CHARGER) += qpnp-linear-charger.o diff --git a/drivers/power/supply/qcom/battery.h b/drivers/power/supply/qcom/battery.h index c61454480ddf..5cb3d635fe9a 100644 --- a/drivers/power/supply/qcom/battery.h +++ b/drivers/power/supply/qcom/battery.h @@ -11,6 +11,7 @@ struct charger_param { u32 fcc_step_size_ua; u32 smb_version; u32 hvdcp2_max_icl_ua; + u32 hvdcp2_12v_max_icl_ua; u32 hvdcp3_max_icl_ua; u32 forced_main_fcc; u32 qc4_max_icl_ua; diff --git a/drivers/power/supply/qcom/batterydata-interface.c b/drivers/power/supply/qcom/batterydata-interface.c new file mode 100644 index 000000000000..a6c78805db50 --- /dev/null +++ b/drivers/power/supply/qcom/batterydata-interface.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014, 2018, 2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "BATTERY: %s: " fmt, __func__ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct battery_data { + dev_t dev_no; + struct class *battery_class; + struct device *battery_device; + struct cdev battery_cdev; + struct bms_battery_data *profile; +}; +static struct battery_data *the_battery; + +static int battery_data_open(struct inode *inode, struct file *file) +{ + struct battery_data *battery = container_of(inode->i_cdev, + struct battery_data, battery_cdev); + + pr_debug("battery_data device opened\n"); + + file->private_data = battery; + + return 0; +} + +static long battery_data_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct battery_data *battery = file->private_data; + struct battery_params __user *bp_user = + (struct battery_params __user *)arg; + struct battery_params bp; + int soc, rbatt_sf, slope, fcc_mah; + int rc = 0; + + if (!battery->profile) { + pr_err("Battery data not set!\n"); + return -EINVAL; + } + if (copy_from_user(&bp, bp_user, sizeof(bp))) { + pr_err("copy_from_user failed\n"); + return -EFAULT; + } + + switch (cmd) { + case BPIOCXSOC: + soc = interpolate_pc(battery->profile->pc_temp_ocv_lut, + bp.batt_temp, bp.ocv_uv / 1000); + rc = put_user(soc, &bp_user->soc); + if (rc) { + pr_err("BPIOCXSOC: Failed to 'put_user' rc=%d\n", rc); + goto ret_err; + } + pr_debug("BPIOCXSOC: ocv=%d batt_temp=%d soc=%d\n", + bp.ocv_uv / 1000, bp.batt_temp, soc); + break; + case BPIOCXRBATT: + rbatt_sf = interpolate_scalingfactor( + battery->profile->rbatt_sf_lut, + bp.batt_temp, bp.soc); + rc = put_user(rbatt_sf, &bp_user->rbatt_sf); + if (rc) { + pr_err("BPIOCXRBATT: Failed to 'put_user' rc=%d\n", rc); + goto ret_err; + } + pr_debug("BPIOCXRBATT: soc=%d batt_temp=%d rbatt_sf=%d\n", + bp.soc, bp.batt_temp, rbatt_sf); + break; + case BPIOCXSLOPE: + slope = interpolate_slope(battery->profile->pc_temp_ocv_lut, + bp.batt_temp, bp.soc); + rc = put_user(slope, &bp_user->slope); + if (rc) { + pr_err("BPIOCXSLOPE: Failed to 'put_user' rc=%d\n", rc); + goto ret_err; + } + pr_debug("BPIOCXSLOPE: soc=%d batt_temp=%d slope=%d\n", + bp.soc, bp.batt_temp, slope); + break; + case BPIOCXFCC: + fcc_mah = interpolate_fcc(battery->profile->fcc_temp_lut, + bp.batt_temp); + rc = put_user(fcc_mah, &bp_user->fcc_mah); + if (rc) { + pr_err("BPIOCXFCC: Failed to 'put_user' rc=%d\n", rc); + goto ret_err; + } + pr_debug("BPIOCXFCC: batt_temp=%d fcc_mah=%d\n", + bp.batt_temp, fcc_mah); + break; + default: + pr_err("IOCTL %d not supported\n", cmd); + rc = -EINVAL; + + } +ret_err: + return rc; +} + +static int battery_data_release(struct inode *inode, struct file *file) +{ + pr_debug("battery_data device closed\n"); + + return 0; +} + +static const struct file_operations battery_data_fops = { + .owner = THIS_MODULE, + .open = battery_data_open, + .unlocked_ioctl = battery_data_ioctl, + .compat_ioctl = battery_data_ioctl, + .release = battery_data_release, +}; + +int config_battery_data(struct bms_battery_data *profile) +{ + if (!the_battery) { + pr_err("Battery data not initialized\n"); + return -ENODEV; + } + + the_battery->profile = profile; + + pr_debug("Battery profile set - %s\n", + the_battery->profile->battery_type); + + return 0; +} + +static int batterydata_init(void) +{ + int rc; + struct battery_data *battery; + + battery = kzalloc(sizeof(*battery), GFP_KERNEL); + if (!battery) + return -ENOMEM; + + /* character device to access the battery-data from userspace */ + rc = alloc_chrdev_region(&battery->dev_no, 0, 1, "battery_data"); + if (rc) { + pr_err("Unable to allocate chrdev rc=%d\n", rc); + return rc; + } + cdev_init(&battery->battery_cdev, &battery_data_fops); + rc = cdev_add(&battery->battery_cdev, battery->dev_no, 1); + if (rc) { + pr_err("Unable to add battery_cdev rc=%d\n", rc); + goto unregister_chrdev; + } + + battery->battery_class = class_create(THIS_MODULE, "battery_data"); + if (IS_ERR_OR_NULL(battery->battery_class)) { + pr_err("Fail to create battery class\n"); + rc = -ENODEV; + goto delete_cdev; + } + + battery->battery_device = device_create(battery->battery_class, + NULL, battery->dev_no, + NULL, "battery_data"); + if (IS_ERR(battery->battery_device)) { + pr_err("Fail to create battery_device device\n"); + rc = -ENODEV; + goto delete_cdev; + } + + the_battery = battery; + + pr_info("Battery-data device created!\n"); + + return 0; + +delete_cdev: + cdev_del(&battery->battery_cdev); +unregister_chrdev: + unregister_chrdev_region(battery->dev_no, 1); + the_battery = NULL; + return rc; +} +subsys_initcall(batterydata_init); + +static void batterydata_exit(void) +{ + if (the_battery) { + device_destroy(the_battery->battery_class, the_battery->dev_no); + cdev_del(&the_battery->battery_cdev); + unregister_chrdev_region(the_battery->dev_no, 1); + } + kfree(the_battery); + the_battery = NULL; +} +module_exit(batterydata_exit); + +MODULE_DESCRIPTION("Battery-data Interface driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/power/supply/qcom/batterydata-lib.c b/drivers/power/supply/qcom/batterydata-lib.c new file mode 100644 index 000000000000..2f68c04a8f02 --- /dev/null +++ b/drivers/power/supply/qcom/batterydata-lib.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2014, 2018, 2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include + +int linear_interpolate(int y0, int x0, int y1, int x1, int x) +{ + if (y0 == y1 || x == x0) + return y0; + if (x1 == x0 || x == x1) + return y1; + + return y0 + ((y1 - y0) * (x - x0) / (x1 - x0)); +} + +static int interpolate_single_lut_scaled(struct single_row_lut *lut, + int x, int scale) +{ + int i, result; + + if (x < lut->x[0] * scale) { + pr_debug("x %d less than known range return y = %d lut = %pS\n", + x, lut->y[0], lut); + return lut->y[0]; + } + if (x > lut->x[lut->cols - 1] * scale) { + pr_debug("x %d more than known range return y = %d lut = %pS\n", + x, lut->y[lut->cols - 1], lut); + return lut->y[lut->cols - 1]; + } + + for (i = 0; i < lut->cols; i++) + if (x <= lut->x[i] * scale) + break; + if (x == lut->x[i] * scale) { + result = lut->y[i]; + } else { + result = linear_interpolate( + lut->y[i - 1], + lut->x[i - 1] * scale, + lut->y[i], + lut->x[i] * scale, + x); + } + return result; +} + +int interpolate_fcc(struct single_row_lut *fcc_temp_lut, int batt_temp) +{ + return interpolate_single_lut_scaled(fcc_temp_lut, + batt_temp, + DEGC_SCALE); +} + +int interpolate_scalingfactor_fcc(struct single_row_lut *fcc_sf_lut, + int cycles) +{ + /* + * sf table could be null when no battery aging data is available, in + * that case return 100% + */ + if (fcc_sf_lut) + return interpolate_single_lut_scaled(fcc_sf_lut, cycles, 1); + else + return 100; +} + +int interpolate_scalingfactor(struct sf_lut *sf_lut, int row_entry, int pc) +{ + int i, scalefactorrow1, scalefactorrow2, scalefactor, rows, cols; + int row1 = 0; + int row2 = 0; + + /* + * sf table could be null when no battery aging data is available, in + * that case return 100% + */ + if (!sf_lut) + return 100; + + rows = sf_lut->rows; + cols = sf_lut->cols; + if (pc > sf_lut->percent[0]) { + pr_debug("pc %d greater than known pc ranges for sfd\n", pc); + row1 = 0; + row2 = 0; + } else if (pc < sf_lut->percent[rows - 1]) { + pr_debug("pc %d less than known pc ranges for sf\n", pc); + row1 = rows - 1; + row2 = rows - 1; + } else { + for (i = 0; i < rows; i++) { + if (pc == sf_lut->percent[i]) { + row1 = i; + row2 = i; + break; + } + if (pc > sf_lut->percent[i]) { + row1 = i - 1; + row2 = i; + break; + } + } + } + + if (row_entry < sf_lut->row_entries[0] * DEGC_SCALE) + row_entry = sf_lut->row_entries[0] * DEGC_SCALE; + if (row_entry > sf_lut->row_entries[cols - 1] * DEGC_SCALE) + row_entry = sf_lut->row_entries[cols - 1] * DEGC_SCALE; + + for (i = 0; i < cols; i++) + if (row_entry <= sf_lut->row_entries[i] * DEGC_SCALE) + break; + if (row_entry == sf_lut->row_entries[i] * DEGC_SCALE) { + scalefactor = linear_interpolate( + sf_lut->sf[row1][i], + sf_lut->percent[row1], + sf_lut->sf[row2][i], + sf_lut->percent[row2], + pc); + return scalefactor; + } + + scalefactorrow1 = linear_interpolate( + sf_lut->sf[row1][i - 1], + sf_lut->row_entries[i - 1] * DEGC_SCALE, + sf_lut->sf[row1][i], + sf_lut->row_entries[i] * DEGC_SCALE, + row_entry); + + scalefactorrow2 = linear_interpolate( + sf_lut->sf[row2][i - 1], + sf_lut->row_entries[i - 1] * DEGC_SCALE, + sf_lut->sf[row2][i], + sf_lut->row_entries[i] * DEGC_SCALE, + row_entry); + + scalefactor = linear_interpolate( + scalefactorrow1, + sf_lut->percent[row1], + scalefactorrow2, + sf_lut->percent[row2], + pc); + + return scalefactor; +} + +/* get ocv given a soc -- reverse lookup */ +int interpolate_ocv(struct pc_temp_ocv_lut *pc_temp_ocv, + int batt_temp, int pc) +{ + int i, ocvrow1, ocvrow2, ocv, rows, cols; + int row1 = 0; + int row2 = 0; + + rows = pc_temp_ocv->rows; + cols = pc_temp_ocv->cols; + if (pc > pc_temp_ocv->percent[0]) { + pr_debug("pc %d greater than known pc ranges for sfd\n", pc); + row1 = 0; + row2 = 0; + } else if (pc < pc_temp_ocv->percent[rows - 1]) { + pr_debug("pc %d less than known pc ranges for sf\n", pc); + row1 = rows - 1; + row2 = rows - 1; + } else { + for (i = 0; i < rows; i++) { + if (pc == pc_temp_ocv->percent[i]) { + row1 = i; + row2 = i; + break; + } + if (pc > pc_temp_ocv->percent[i]) { + row1 = i - 1; + row2 = i; + break; + } + } + } + + if (batt_temp < pc_temp_ocv->temp[0] * DEGC_SCALE) + batt_temp = pc_temp_ocv->temp[0] * DEGC_SCALE; + if (batt_temp > pc_temp_ocv->temp[cols - 1] * DEGC_SCALE) + batt_temp = pc_temp_ocv->temp[cols - 1] * DEGC_SCALE; + + for (i = 0; i < cols; i++) + if (batt_temp <= pc_temp_ocv->temp[i] * DEGC_SCALE) + break; + if (batt_temp == pc_temp_ocv->temp[i] * DEGC_SCALE) { + ocv = linear_interpolate( + pc_temp_ocv->ocv[row1][i], + pc_temp_ocv->percent[row1], + pc_temp_ocv->ocv[row2][i], + pc_temp_ocv->percent[row2], + pc); + return ocv; + } + + ocvrow1 = linear_interpolate( + pc_temp_ocv->ocv[row1][i - 1], + pc_temp_ocv->temp[i - 1] * DEGC_SCALE, + pc_temp_ocv->ocv[row1][i], + pc_temp_ocv->temp[i] * DEGC_SCALE, + batt_temp); + + ocvrow2 = linear_interpolate( + pc_temp_ocv->ocv[row2][i - 1], + pc_temp_ocv->temp[i - 1] * DEGC_SCALE, + pc_temp_ocv->ocv[row2][i], + pc_temp_ocv->temp[i] * DEGC_SCALE, + batt_temp); + + ocv = linear_interpolate( + ocvrow1, + pc_temp_ocv->percent[row1], + ocvrow2, + pc_temp_ocv->percent[row2], + pc); + + return ocv; +} + +int interpolate_pc(struct pc_temp_ocv_lut *pc_temp_ocv, + int batt_temp, int ocv) +{ + int i, j, pcj, pcj_minus_one, pc; + int rows = pc_temp_ocv->rows; + int cols = pc_temp_ocv->cols; + + if (batt_temp < pc_temp_ocv->temp[0] * DEGC_SCALE) { + pr_debug("batt_temp %d < known temp range\n", batt_temp); + batt_temp = pc_temp_ocv->temp[0] * DEGC_SCALE; + } + + if (batt_temp > pc_temp_ocv->temp[cols - 1] * DEGC_SCALE) { + pr_debug("batt_temp %d > known temp range\n", batt_temp); + batt_temp = pc_temp_ocv->temp[cols - 1] * DEGC_SCALE; + } + + for (j = 0; j < cols; j++) + if (batt_temp <= pc_temp_ocv->temp[j] * DEGC_SCALE) + break; + if (batt_temp == pc_temp_ocv->temp[j] * DEGC_SCALE) { + /* found an exact match for temp in the table */ + if (ocv >= pc_temp_ocv->ocv[0][j]) + return pc_temp_ocv->percent[0]; + if (ocv <= pc_temp_ocv->ocv[rows - 1][j]) + return pc_temp_ocv->percent[rows - 1]; + for (i = 0; i < rows; i++) { + if (ocv >= pc_temp_ocv->ocv[i][j]) { + if (ocv == pc_temp_ocv->ocv[i][j]) + return pc_temp_ocv->percent[i]; + pc = linear_interpolate( + pc_temp_ocv->percent[i], + pc_temp_ocv->ocv[i][j], + pc_temp_ocv->percent[i - 1], + pc_temp_ocv->ocv[i - 1][j], + ocv); + return pc; + } + } + } + + /* + * batt_temp is within temperature for + * column j-1 and j + */ + if (ocv >= pc_temp_ocv->ocv[0][j]) + return pc_temp_ocv->percent[0]; + if (ocv <= pc_temp_ocv->ocv[rows - 1][j - 1]) + return pc_temp_ocv->percent[rows - 1]; + + pcj_minus_one = 0; + pcj = 0; + for (i = 0; i < rows-1; i++) { + if (pcj == 0 + && is_between(pc_temp_ocv->ocv[i][j], + pc_temp_ocv->ocv[i+1][j], ocv)) { + pcj = linear_interpolate( + pc_temp_ocv->percent[i], + pc_temp_ocv->ocv[i][j], + pc_temp_ocv->percent[i + 1], + pc_temp_ocv->ocv[i+1][j], + ocv); + } + + if (pcj_minus_one == 0 + && is_between(pc_temp_ocv->ocv[i][j-1], + pc_temp_ocv->ocv[i+1][j-1], ocv)) { + pcj_minus_one = linear_interpolate( + pc_temp_ocv->percent[i], + pc_temp_ocv->ocv[i][j-1], + pc_temp_ocv->percent[i + 1], + pc_temp_ocv->ocv[i+1][j-1], + ocv); + } + + if (pcj && pcj_minus_one) { + pc = linear_interpolate( + pcj_minus_one, + pc_temp_ocv->temp[j-1] * DEGC_SCALE, + pcj, + pc_temp_ocv->temp[j] * DEGC_SCALE, + batt_temp); + return pc; + } + } + + if (pcj) + return pcj; + + if (pcj_minus_one) + return pcj_minus_one; + + pr_debug("%d ocv wasn't found for temp %d in the LUT returning 100%%\n", + ocv, batt_temp); + return 100; +} + +int interpolate_slope(struct pc_temp_ocv_lut *pc_temp_ocv, + int batt_temp, int pc) +{ + int i, ocvrow1, ocvrow2, rows, cols; + int row1 = 0; + int row2 = 0; + int slope; + + rows = pc_temp_ocv->rows; + cols = pc_temp_ocv->cols; + if (pc >= pc_temp_ocv->percent[0]) { + pr_debug("pc %d >= max pc range - use the slope at pc=%d\n", + pc, pc_temp_ocv->percent[0]); + row1 = 0; + row2 = 1; + } else if (pc <= pc_temp_ocv->percent[rows - 1]) { + pr_debug("pc %d is <= min pc range - use the slope at pc=%d\n", + pc, pc_temp_ocv->percent[rows - 1]); + row1 = rows - 2; + row2 = rows - 1; + } else { + for (i = 0; i < rows; i++) { + if (pc == pc_temp_ocv->percent[i]) { + row1 = i - 1; + row2 = i; + break; + } + if (pc > pc_temp_ocv->percent[i]) { + row1 = i - 1; + row2 = i; + break; + } + } + } + + if (batt_temp < pc_temp_ocv->temp[0] * DEGC_SCALE) + batt_temp = pc_temp_ocv->temp[0] * DEGC_SCALE; + if (batt_temp > pc_temp_ocv->temp[cols - 1] * DEGC_SCALE) + batt_temp = pc_temp_ocv->temp[cols - 1] * DEGC_SCALE; + + for (i = 0; i < cols; i++) + if (batt_temp <= pc_temp_ocv->temp[i] * DEGC_SCALE) + break; + + if (batt_temp == pc_temp_ocv->temp[i] * DEGC_SCALE) { + slope = (pc_temp_ocv->ocv[row1][i] - + pc_temp_ocv->ocv[row2][i]); + if (slope <= 0) { + pr_warn("Slope=%d for pc=%d, using 1\n", slope, pc); + slope = 1; + } + slope *= 1000; + slope /= (pc_temp_ocv->percent[row1] - + pc_temp_ocv->percent[row2]); + return slope; + } + ocvrow1 = linear_interpolate( + pc_temp_ocv->ocv[row1][i - 1], + pc_temp_ocv->temp[i - 1] * DEGC_SCALE, + pc_temp_ocv->ocv[row1][i], + pc_temp_ocv->temp[i] * DEGC_SCALE, + batt_temp); + + ocvrow2 = linear_interpolate( + pc_temp_ocv->ocv[row2][i - 1], + pc_temp_ocv->temp[i - 1] * DEGC_SCALE, + pc_temp_ocv->ocv[row2][i], + pc_temp_ocv->temp[i] * DEGC_SCALE, + batt_temp); + + slope = (ocvrow1 - ocvrow2); + if (slope <= 0) { + pr_warn("Slope=%d for pc=%d, using 1\n", slope, pc); + slope = 1; + } + slope *= 1000; + slope /= (pc_temp_ocv->percent[row1] - pc_temp_ocv->percent[row2]); + + return slope; +} + +int interpolate_acc(struct ibat_temp_acc_lut *ibat_acc_lut, + int batt_temp, int ibat) +{ + int i, accrow1, accrow2, rows, cols; + int row1 = 0; + int row2 = 0; + int acc; + + rows = ibat_acc_lut->rows; + cols = ibat_acc_lut->cols; + + if (ibat > ibat_acc_lut->ibat[rows - 1]) { + pr_debug("ibatt(%d) > max range(%d)\n", ibat, + ibat_acc_lut->ibat[rows - 1]); + row1 = rows - 1; + row2 = rows - 2; + } else if (ibat < ibat_acc_lut->ibat[0]) { + pr_debug("ibatt(%d) < max range(%d)\n", ibat, + ibat_acc_lut->ibat[0]); + row1 = 0; + row2 = 0; + } else { + for (i = 0; i < rows; i++) { + if (ibat == ibat_acc_lut->ibat[i]) { + row1 = i; + row2 = i; + break; + } + if (ibat < ibat_acc_lut->ibat[i]) { + row1 = i; + row2 = i - 1; + break; + } + } + } + + if (batt_temp < ibat_acc_lut->temp[0] * DEGC_SCALE) + batt_temp = ibat_acc_lut->temp[0] * DEGC_SCALE; + if (batt_temp > ibat_acc_lut->temp[cols - 1] * DEGC_SCALE) + batt_temp = ibat_acc_lut->temp[cols - 1] * DEGC_SCALE; + + for (i = 0; i < cols; i++) + if (batt_temp <= ibat_acc_lut->temp[i] * DEGC_SCALE) + break; + + if (batt_temp == (ibat_acc_lut->temp[i] * DEGC_SCALE)) { + acc = linear_interpolate( + ibat_acc_lut->acc[row1][i], + ibat_acc_lut->ibat[row1], + ibat_acc_lut->acc[row2][i], + ibat_acc_lut->ibat[row2], + ibat); + return acc; + } + + accrow1 = linear_interpolate( + ibat_acc_lut->acc[row1][i - 1], + ibat_acc_lut->temp[i - 1] * DEGC_SCALE, + ibat_acc_lut->acc[row1][i], + ibat_acc_lut->temp[i] * DEGC_SCALE, + batt_temp); + + accrow2 = linear_interpolate( + ibat_acc_lut->acc[row2][i - 1], + ibat_acc_lut->temp[i - 1] * DEGC_SCALE, + ibat_acc_lut->acc[row2][i], + ibat_acc_lut->temp[i] * DEGC_SCALE, + batt_temp); + + acc = linear_interpolate(accrow1, + ibat_acc_lut->ibat[row1], + accrow2, + ibat_acc_lut->ibat[row2], + ibat); + + if (acc < 0) + acc = 0; + + return acc; +} diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h index fb28230db608..6b7432746fbe 100644 --- a/drivers/power/supply/qcom/qg-core.h +++ b/drivers/power/supply/qcom/qg-core.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. */ #ifndef __QG_CORE_H__ @@ -145,6 +145,7 @@ struct qpnp_qg { bool fvss_active; bool tcss_active; bool bass_active; + bool first_profile_load; int charge_status; int charge_type; int chg_iterm_ma; diff --git a/drivers/power/supply/qcom/qpnp-linear-charger.c b/drivers/power/supply/qcom/qpnp-linear-charger.c new file mode 100644 index 000000000000..a87d6d27ce59 --- /dev/null +++ b/drivers/power/supply/qcom/qpnp-linear-charger.c @@ -0,0 +1,3683 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2013-2015, 2017-2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "CHG: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_MASK(NUM_BITS, POS) \ + ((unsigned char) (((1 << (NUM_BITS)) - 1) << (POS))) +#define LBC_MASK(MSB_BIT, LSB_BIT) \ + CREATE_MASK(MSB_BIT - LSB_BIT + 1, LSB_BIT) + +/* Interrupt offsets */ +#define INT_RT_STS_REG 0x10 +#define FAST_CHG_ON_IRQ BIT(5) +#define OVERTEMP_ON_IRQ BIT(4) +#define BAT_TEMP_OK_IRQ BIT(1) +#define BATT_PRES_IRQ BIT(0) + +/* USB CHARGER PATH peripheral register offsets */ +#define USB_IN_VALID_MASK BIT(1) +#define CHG_GONE_BIT BIT(2) +#define USB_SUSP_REG 0x47 +#define USB_SUSPEND_BIT BIT(0) +#define USB_COMP_OVR1_REG 0xEA +#define USBIN_LLIMIT_OK_MASK LBC_MASK(1, 0) +#define USBIN_LLIMIT_OK_NO_OVERRIDE 0x00 +#define USBIN_LLIMIT_OK_OVERRIDE_1 0x03 +#define USB_OVP_TST5_REG 0xE7 +#define CHG_GONE_OK_EN_BIT BIT(2) + +/* CHARGER peripheral register offset */ +#define CHG_OPTION_REG 0x08 +#define CHG_OPTION_MASK BIT(7) +#define CHG_STATUS_REG 0x09 +#define CHG_ON_BIT BIT(0) +#define CHG_VDD_LOOP_BIT BIT(1) +#define VINMIN_LOOP_BIT BIT(3) +#define CHG_VDD_MAX_REG 0x40 +#define CHG_VDD_SAFE_REG 0x41 +#define CHG_IBAT_MAX_REG 0x44 +#define CHG_IBAT_SAFE_REG 0x45 +#define CHG_VIN_MIN_REG 0x47 +#define CHG_CTRL_REG 0x49 +#define CHG_ENABLE BIT(7) +#define CHG_FORCE_BATT_ON BIT(0) +#define CHG_EN_MASK (BIT(7) | BIT(0)) +#define CHG_FAILED_REG 0x4A +#define CHG_FAILED_BIT BIT(7) +#define CHG_VBAT_WEAK_REG 0x52 +#define CHG_IBATTERM_EN_REG 0x5B +#define CHG_USB_ENUM_T_STOP_REG 0x4E +#define CHG_TCHG_MAX_EN_REG 0x60 +#define CHG_TCHG_MAX_EN_BIT BIT(7) +#define CHG_TCHG_MAX_MASK LBC_MASK(6, 0) +#define CHG_TCHG_MAX_REG 0x61 +#define CHG_WDOG_EN_REG 0x65 +#define CHG_PERPH_RESET_CTRL3_REG 0xDA +#define CHG_COMP_OVR1 0xEE +#define CHG_VBAT_DET_OVR_MASK LBC_MASK(1, 0) +#define CHG_TEST_LOOP_REG 0xE5 +#define VIN_MIN_LOOP_DISABLE_BIT BIT(0) +#define OVERRIDE_0 0x2 +#define OVERRIDE_NONE 0x0 + +/* BATTIF peripheral register offset */ +#define BAT_IF_PRES_STATUS_REG 0x08 +#define BATT_PRES_MASK BIT(7) +#define BAT_IF_TEMP_STATUS_REG 0x09 +#define BATT_TEMP_HOT_MASK BIT(6) +#define BATT_TEMP_COLD_MASK LBC_MASK(7, 6) +#define BATT_TEMP_OK_MASK BIT(7) +#define BAT_IF_VREF_BAT_THM_CTRL_REG 0x4A +#define VREF_BATT_THERM_FORCE_ON LBC_MASK(7, 6) +#define VREF_BAT_THM_ENABLED_FSM BIT(7) +#define BAT_IF_BPD_CTRL_REG 0x48 +#define BATT_BPD_CTRL_SEL_MASK LBC_MASK(1, 0) +#define BATT_BPD_OFFMODE_EN BIT(3) +#define BATT_THM_EN BIT(1) +#define BATT_ID_EN BIT(0) +#define BAT_IF_BTC_CTRL 0x49 +#define BTC_COMP_EN_MASK BIT(7) +#define BTC_COLD_MASK BIT(1) +#define BTC_HOT_MASK BIT(0) +#define BTC_COMP_OVERRIDE_REG 0xE5 + +/* MISC peripheral register offset */ +#define MISC_REV2_REG 0x01 +#define MISC_BOOT_DONE_REG 0x42 +#define MISC_BOOT_DONE BIT(7) +#define MISC_TRIM3_REG 0xF3 +#define MISC_TRIM3_VDD_MASK LBC_MASK(5, 4) +#define MISC_TRIM4_REG 0xF4 +#define MISC_TRIM4_VDD_MASK BIT(4) + +#define PERP_SUBTYPE_REG 0x05 +#define SEC_ACCESS 0xD0 + +/* Linear peripheral subtype values */ +#define LBC_CHGR_SUBTYPE 0x15 +#define LBC_BAT_IF_SUBTYPE 0x16 +#define LBC_USB_PTH_SUBTYPE 0x17 +#define LBC_MISC_SUBTYPE 0x18 + +#define QPNP_CHG_I_MAX_MIN_90 90 + +/* Feature flags */ +#define VDD_TRIM_SUPPORTED BIT(0) + +#define QPNP_CHARGER_DEV_NAME "qcom,qpnp-linear-charger" + +/* usb_interrupts */ + +struct qpnp_lbc_irq { + int irq; + unsigned long disabled; + bool is_wake; +}; + +enum { + USBIN_VALID = 0, + USB_OVER_TEMP, + USB_CHG_GONE, + BATT_PRES, + BATT_TEMPOK, + CHG_DONE, + CHG_FAILED, + CHG_FAST_CHG, + CHG_VBAT_DET_LO, + MAX_IRQS, +}; + +enum { + USER = BIT(0), + THERMAL = BIT(1), + CURRENT = BIT(2), + SOC = BIT(3), + PARALLEL = BIT(4), + COLLAPSE = BIT(5), + DEBUG_BOARD = BIT(6), +}; + +enum bpd_type { + BPD_TYPE_BAT_ID, + BPD_TYPE_BAT_THM, + BPD_TYPE_BAT_THM_BAT_ID, +}; + +static const char * const bpd_label[] = { + [BPD_TYPE_BAT_ID] = "bpd_id", + [BPD_TYPE_BAT_THM] = "bpd_thm", + [BPD_TYPE_BAT_THM_BAT_ID] = "bpd_thm_id", +}; + +enum btc_type { + HOT_THD_25_PCT = 25, + HOT_THD_35_PCT = 35, + COLD_THD_70_PCT = 70, + COLD_THD_80_PCT = 80, +}; + +static u8 btc_value[] = { + [HOT_THD_25_PCT] = 0x0, + [HOT_THD_35_PCT] = BIT(0), + [COLD_THD_70_PCT] = 0x0, + [COLD_THD_80_PCT] = BIT(1), +}; + +static inline int get_bpd(const char *name) +{ + int i = 0; + + for (i = 0; i < ARRAY_SIZE(bpd_label); i++) { + if (strcmp(bpd_label[i], name) == 0) + return i; + } + + return -EINVAL; +} + +static enum power_supply_property msm_batt_power_props[] = { + POWER_SUPPLY_PROP_CHARGING_ENABLED, + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_CHARGE_TYPE, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CHARGE_COUNTER, + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_DEBUG_BATTERY, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_COOL_TEMP, + POWER_SUPPLY_PROP_WARM_TEMP, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, + POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, +}; + +static char *pm_batt_supplied_to[] = { + "bms", +}; + +struct vddtrim_map { + int trim_uv; + int trim_val; +}; + +/* + * VDDTRIM is a 3 bit value which is split across two + * register TRIM3(bit 5:4) -> VDDTRIM bit(2:1) + * register TRIM4(bit 4) -> VDDTRIM bit(0) + */ +#define TRIM_CENTER 4 +#define MAX_VDD_EA_TRIM_CFG 8 +#define VDD_TRIM3_MASK LBC_MASK(2, 1) +#define VDD_TRIM3_SHIFT 3 +#define VDD_TRIM4_MASK BIT(0) +#define VDD_TRIM4_SHIFT 4 +#define AVG(VAL1, VAL2) ((VAL1 + VAL2) / 2) + +/* + * VDDTRIM table containing map of trim voltage and + * corresponding trim value. + */ +static struct vddtrim_map vddtrim_map[] = { + {36700, 0x00}, + {28000, 0x01}, + {19800, 0x02}, + {10760, 0x03}, + {0, 0x04}, + {-8500, 0x05}, + {-16800, 0x06}, + {-25440, 0x07}, +}; + +static const unsigned int qpnp_lbc_extcon_cable[] = { + EXTCON_USB, + EXTCON_USB_HOST, + EXTCON_NONE, +}; + +/* + * struct qpnp_lbc_chip - device information + * @dev: device pointer to access the parent + * @pdev: pdev pointer to access platform information + * @chgr_base: charger peripheral base address + * @bat_if_base: battery interface peripheral base address + * @usb_chgpth_base: USB charge path peripheral base address + * @misc_base: misc peripheral base address + * @bat_is_cool: indicates that battery is cool + * @bat_is_warm: indicates that battery is warm + * @chg_done: indicates that charging is completed + * @usb_present: present status of USB + * @batt_present: present status of battery + * @cfg_charging_disabled: disable drawing current from USB. + * @cfg_use_fake_battery: flag to report default battery properties + * @fastchg_on: indicate charger in fast charge mode + * @cfg_btc_disabled: flag to disable btc (disables hot and cold + * irqs) + * @cfg_max_voltage_mv: the max volts the batt should be charged up to + * @cfg_min_voltage_mv: VIN_MIN configuration + * @cfg_batt_weak_voltage_uv: weak battery voltage threshold + * @cfg_warm_bat_chg_ma: warm battery maximum charge current in mA + * @cfg_cool_bat_chg_ma: cool battery maximum charge current in mA + * @cfg_safe_voltage_mv: safe voltage to which battery can charge + * @cfg_warm_bat_mv: warm temperature battery target voltage + * @cfg_warm_bat_mv: warm temperature battery target voltage + * @cfg_cool_bat_mv: cool temperature battery target voltage + * @cfg_soc_resume_limit: SOC at which battery resumes charging + * @cfg_float_charge: enable float charging + * @charger_disabled: maintain USB path state. + * @cfg_charger_detect_eoc: charger can detect end of charging + * @cfg_disable_vbatdet_based_recharge: keep VBATDET comparator overridden to + * low and VBATDET irq disabled. + * @cfg_collapsible_chgr_support: support collapsible charger + * @cfg_chgr_led_support: support charger led work. + * @cfg_safe_current: battery safety current setting + * @cfg_hot_batt_p: hot battery threshold setting + * @cfg_cold_batt_p: eold battery threshold setting + * @cfg_warm_bat_decidegc: warm battery temperature in degree Celsius + * @cfg_cool_bat_decidegc: cool battery temperature in degree Celsius + * @fake_battery_soc: SOC value to be reported to userspace + * @cfg_tchg_mins: maximum allowed software initiated charge time + * @chg_failed_count: counter to maintained number of times charging + * failed + * @cfg_bpd_detection: battery present detection mechanism selection + * @cfg_thermal_levels: amount of thermal mitigation levels + * @cfg_thermal_mitigation: thermal mitigation level values + * @therm_lvl_sel: thermal mitigation level selection + * @jeita_configure_lock: lock to serialize jeita configuration request + * @hw_access_lock: lock to serialize access to charger registers + * @ibat_change_lock: lock to serialize ibat change requests from + * USB and thermal. + * @irq_lock lock to serialize enabling/disabling of irq + * @supported_feature_flag bitmask for all supported features + * @vddtrim_alarm alarm to schedule trim work at regular + * interval + * @vddtrim_work work to perform actual vddmax trimming + * @init_trim_uv initial trim voltage at bootup + * @delta_vddmax_uv current vddmax trim voltage + * @chg_enable_lock: lock to serialize charging enable/disable for + * SOC based resume charging + * @usb_psy: power supply to export information to + * userspace + * @bms_psy: power supply to export information to + * userspace + * @batt_psy: power supply to export information to + * userspace + */ +struct qpnp_lbc_chip { + struct device *dev; + struct platform_device *pdev; + struct regmap *regmap; + u16 chgr_base; + u16 bat_if_base; + u16 usb_chgpth_base; + u16 misc_base; + bool bat_is_cool; + bool bat_is_warm; + bool chg_done; + bool usb_present; + bool batt_present; + bool cfg_charging_disabled; + bool cfg_btc_disabled; + bool cfg_use_fake_battery; + bool fastchg_on; + bool cfg_use_external_charger; + bool cfg_chgr_led_support; + bool non_collapsible_chgr_detected; + bool debug_board; + unsigned int cfg_warm_bat_chg_ma; + unsigned int cfg_cool_bat_chg_ma; + unsigned int cfg_safe_voltage_mv; + unsigned int cfg_max_voltage_mv; + unsigned int cfg_min_voltage_mv; + unsigned int cfg_charger_detect_eoc; + unsigned int cfg_disable_vbatdet_based_recharge; + unsigned int cfg_batt_weak_voltage_uv; + unsigned int cfg_collapsible_chgr_support; + unsigned int cfg_warm_bat_mv; + unsigned int cfg_cool_bat_mv; + unsigned int cfg_hot_batt_p; + unsigned int cfg_cold_batt_p; + unsigned int cfg_thermal_levels; + unsigned int therm_lvl_sel; + unsigned int *thermal_mitigation; + unsigned int cfg_safe_current; + unsigned int cfg_volt_cutoff_mv; + unsigned int cutoff_threshold_uv; + unsigned int cfg_tchg_mins; + unsigned int chg_failed_count; + unsigned int supported_feature_flag; + int usb_online; + int cfg_bpd_detection; + int cfg_warm_bat_decidegc; + int cfg_cool_bat_decidegc; + int fake_battery_soc; + int cfg_soc_resume_limit; + int cfg_float_charge; + int charger_disabled; + int prev_max_ma; + int usb_psy_ma; + int delta_vddmax_uv; + int init_trim_uv; + enum power_supply_type usb_supply_type; + struct delayed_work collapsible_detection_work; + + /* parallel-chg params */ + int parallel_charging_enabled; + int lbc_max_chg_current; + int ichg_now; + int current_soc; + int cutoff_count; + + struct alarm vddtrim_alarm; + struct work_struct vddtrim_work; + struct qpnp_lbc_irq irqs[MAX_IRQS]; + struct mutex jeita_configure_lock; + struct mutex chg_enable_lock; + spinlock_t ibat_change_lock; + spinlock_t hw_access_lock; + spinlock_t irq_lock; + struct power_supply *usb_psy; + struct power_supply_desc usb_psy_d; + struct power_supply *bms_psy; + struct power_supply *batt_psy; + struct power_supply_desc batt_psy_d; + struct qpnp_adc_tm_btm_param adc_param; + struct iio_channel *vbat_sns; + struct iio_channel *lr_mux1_batt_therm; + struct qpnp_adc_tm_chip *adc_tm_dev; + struct led_classdev led_cdev; + struct dentry *debug_root; + struct work_struct debug_board_work; + + /* parallel-chg params */ + struct power_supply *parallel_psy; + struct power_supply_desc parallel_psy_d; + struct delayed_work parallel_work; + struct extcon_dev *extcon; +}; + +static void qpnp_lbc_enable_irq(struct qpnp_lbc_chip *chip, + struct qpnp_lbc_irq *irq) +{ + unsigned long flags; + + spin_lock_irqsave(&chip->irq_lock, flags); + if (__test_and_clear_bit(0, &irq->disabled)) { + pr_debug("number = %d\n", irq->irq); + enable_irq(irq->irq); + if (irq->is_wake) + enable_irq_wake(irq->irq); + } + spin_unlock_irqrestore(&chip->irq_lock, flags); +} + +static void qpnp_lbc_disable_irq(struct qpnp_lbc_chip *chip, + struct qpnp_lbc_irq *irq) +{ + unsigned long flags; + + spin_lock_irqsave(&chip->irq_lock, flags); + if (!__test_and_set_bit(0, &irq->disabled)) { + pr_debug("number = %d\n", irq->irq); + disable_irq_nosync(irq->irq); + if (irq->is_wake) + disable_irq_wake(irq->irq); + } + spin_unlock_irqrestore(&chip->irq_lock, flags); +} + +static int __qpnp_lbc_read(struct qpnp_lbc_chip *chip, u16 base, + u8 *val, int count) +{ + int rc = 0; + + if (base == 0) { + pr_err("base addr cannot be zero\n"); + return -EINVAL; + } + + rc = regmap_bulk_read(chip->regmap, base, val, count); + if (rc) + pr_err("SPMI read failed base=0x%02x rc=%d\n", base, rc); + + return rc; +} + +static int __qpnp_lbc_write(struct qpnp_lbc_chip *chip, u16 base, + u8 *val, int count) +{ + int rc; + + if (base == 0) { + pr_err("base addr cannot be zero\n"); + return -EINVAL; + } + + rc = regmap_bulk_write(chip->regmap, base, val, count); + if (rc) + pr_err("SPMI write failed base=0x%02x rc=%d\n", base, rc); + + return rc; +} + +static int __qpnp_lbc_secure_write(struct qpnp_lbc_chip *chip, u16 base, + u16 offset, u8 *val, int count) +{ + int rc; + u8 reg_val; + + reg_val = 0xA5; + rc = __qpnp_lbc_write(chip, base + SEC_ACCESS, ®_val, 1); + if (rc) + return rc; + + return __qpnp_lbc_write(chip, base + offset, val, 1); +} + +static int qpnp_lbc_read(struct qpnp_lbc_chip *chip, u16 base, + u8 *val, int count) +{ + int rc = 0; + unsigned long flags; + + if (base == 0) { + pr_err("base addr cannot be zero\n"); + return -EINVAL; + } + + spin_lock_irqsave(&chip->hw_access_lock, flags); + rc = __qpnp_lbc_read(chip, base, val, count); + spin_unlock_irqrestore(&chip->hw_access_lock, flags); + + return rc; +} + +static int qpnp_lbc_write(struct qpnp_lbc_chip *chip, u16 base, + u8 *val, int count) +{ + int rc = 0; + unsigned long flags; + + if (base == 0) { + pr_err("base addr cannot be zero\n"); + return -EINVAL; + } + + spin_lock_irqsave(&chip->hw_access_lock, flags); + rc = __qpnp_lbc_write(chip, base, val, count); + spin_unlock_irqrestore(&chip->hw_access_lock, flags); + + return rc; +} + +static int qpnp_lbc_masked_write(struct qpnp_lbc_chip *chip, u16 base, + u8 mask, u8 val) +{ + int rc; + u8 reg_val; + unsigned long flags; + + spin_lock_irqsave(&chip->hw_access_lock, flags); + rc = __qpnp_lbc_read(chip, base, ®_val, 1); + if (rc) + goto out; + + pr_debug("addr = 0x%x read 0x%x\n", base, reg_val); + + reg_val &= ~mask; + reg_val |= val & mask; + + pr_debug("writing to base=%x val=%x\n", base, reg_val); + + rc = __qpnp_lbc_write(chip, base, ®_val, 1); + +out: + spin_unlock_irqrestore(&chip->hw_access_lock, flags); + return rc; +} + +static int __qpnp_lbc_secure_masked_write(struct qpnp_lbc_chip *chip, u16 base, + u16 offset, u8 mask, u8 val) +{ + int rc; + u8 reg_val, reg_val1; + + rc = __qpnp_lbc_read(chip, base + offset, ®_val, 1); + if (rc) + return rc; + + pr_debug("addr = 0x%x read 0x%x\n", base, reg_val); + + reg_val &= ~mask; + reg_val |= val & mask; + pr_debug("writing to base=%x val=%x\n", base, reg_val); + + reg_val1 = 0xA5; + rc = __qpnp_lbc_write(chip, base + SEC_ACCESS, ®_val1, 1); + if (rc) + return rc; + + rc = __qpnp_lbc_write(chip, base + offset, ®_val, 1); + + return rc; +} + +static int qpnp_lbc_get_trim_voltage(u8 trim_reg) +{ + int i; + + for (i = 0; i < MAX_VDD_EA_TRIM_CFG; i++) + if (trim_reg == vddtrim_map[i].trim_val) + return vddtrim_map[i].trim_uv; + + pr_err("Invalid trim reg reg_val=%x\n", trim_reg); + return -EINVAL; +} + +static u8 qpnp_lbc_get_trim_val(struct qpnp_lbc_chip *chip) +{ + int i, sign; + int delta_uv; + + sign = (chip->delta_vddmax_uv >= 0) ? -1 : 1; + + switch (sign) { + case -1: + for (i = TRIM_CENTER; i >= 0; i--) { + if (vddtrim_map[i].trim_uv > chip->delta_vddmax_uv) { + delta_uv = AVG(vddtrim_map[i].trim_uv, + vddtrim_map[i + 1].trim_uv); + if (chip->delta_vddmax_uv >= delta_uv) + return vddtrim_map[i].trim_val; + else + return vddtrim_map[i + 1].trim_val; + } + } + i = 0; + break; + case 1: + for (i = TRIM_CENTER; i < ARRAY_SIZE(vddtrim_map); i++) { + if (vddtrim_map[i].trim_uv < chip->delta_vddmax_uv) { + delta_uv = AVG(vddtrim_map[i].trim_uv, + vddtrim_map[i - 1].trim_uv); + if (chip->delta_vddmax_uv >= delta_uv) + return vddtrim_map[i - 1].trim_val; + else + return vddtrim_map[i].trim_val; + } + } + i = ARRAY_SIZE(vddtrim_map) - 1; + break; + } + + return vddtrim_map[i].trim_val; +} + +static int qpnp_lbc_is_usb_chg_plugged_in(struct qpnp_lbc_chip *chip) +{ + u8 usbin_valid_rt_sts; + int rc; + + rc = qpnp_lbc_read(chip, chip->usb_chgpth_base + INT_RT_STS_REG, + &usbin_valid_rt_sts, 1); + if (rc) + return rc; + + pr_debug("rt_sts 0x%x\n", usbin_valid_rt_sts); + + return (usbin_valid_rt_sts & USB_IN_VALID_MASK) ? 1 : 0; +} + +static int qpnp_lbc_is_chg_gone(struct qpnp_lbc_chip *chip) +{ + u8 rt_sts; + int rc; + + rc = qpnp_lbc_read(chip, chip->usb_chgpth_base + INT_RT_STS_REG, + &rt_sts, 1); + if (rc) + return rc; + + pr_debug("rt_sts 0x%x\n", rt_sts); + + return (rt_sts & CHG_GONE_BIT) ? 1 : 0; +} + +static int qpnp_lbc_charger_enable(struct qpnp_lbc_chip *chip, int reason, + int enable) +{ + int disabled = chip->charger_disabled; + u8 reg_val; + int rc = 0; + + pr_debug("reason=%d requested_enable=%d disabled_status=%d\n", + reason, enable, disabled); + if (enable) + disabled &= ~reason; + else + disabled |= reason; + + if (!!chip->charger_disabled == !!disabled) + goto skip; + + reg_val = !!disabled ? CHG_FORCE_BATT_ON : CHG_ENABLE; + rc = qpnp_lbc_masked_write(chip, chip->chgr_base + CHG_CTRL_REG, + CHG_EN_MASK, reg_val); + if (rc) { + pr_err("Failed to %s charger\n", + reg_val ? "enable" : "disable"); + return rc; + } +skip: + chip->charger_disabled = disabled; + return rc; +} + +static int qpnp_lbc_is_batt_present(struct qpnp_lbc_chip *chip) +{ + u8 batt_pres_rt_sts; + int rc; + + rc = qpnp_lbc_read(chip, chip->bat_if_base + INT_RT_STS_REG, + &batt_pres_rt_sts, 1); + if (rc) + return rc; + + return (batt_pres_rt_sts & BATT_PRES_IRQ) ? 1 : 0; +} + +static int qpnp_lbc_bat_if_configure_btc(struct qpnp_lbc_chip *chip) +{ + u8 btc_cfg = 0, mask = 0, rc; + + /* Do nothing if battery peripheral not present */ + if (!chip->bat_if_base) + return 0; + + if ((chip->cfg_hot_batt_p == HOT_THD_25_PCT) + || (chip->cfg_hot_batt_p == HOT_THD_35_PCT)) { + btc_cfg |= btc_value[chip->cfg_hot_batt_p]; + mask |= BTC_HOT_MASK; + } + + if ((chip->cfg_cold_batt_p == COLD_THD_70_PCT) || + (chip->cfg_cold_batt_p == COLD_THD_80_PCT)) { + btc_cfg |= btc_value[chip->cfg_cold_batt_p]; + mask |= BTC_COLD_MASK; + } + + mask |= BTC_COMP_EN_MASK; + if (!chip->cfg_btc_disabled) + btc_cfg |= BTC_COMP_EN_MASK; + + pr_debug("BTC configuration mask=%x\n", btc_cfg); + + rc = qpnp_lbc_masked_write(chip, + chip->bat_if_base + BAT_IF_BTC_CTRL, + mask, btc_cfg); + if (rc) + pr_err("Failed to configure BTC\n"); + + return rc; +} + +static int qpnp_chg_collapsible_chgr_config(struct qpnp_lbc_chip *chip, + bool enable) +{ + u8 reg_val; + int rc; + + pr_debug("Configure for %scollapsible charger\n", + enable ? "" : "non-"); + /* + * The flow to enable/disable the collapsible charger configuration: + * Enable: Override USBIN_LLIMIT_OK --> + * Disable VIN_MIN comparator --> + * Enable CHG_GONE comparator + * Disable: Enable VIN_MIN comparator --> + * Enable USBIN_LLIMIT_OK --> + * Disable CHG_GONE comparator + */ + if (enable) { + /* Override USBIN_LLIMIT_OK */ + reg_val = USBIN_LLIMIT_OK_OVERRIDE_1; + rc = __qpnp_lbc_secure_masked_write(chip, + chip->usb_chgpth_base, + USB_COMP_OVR1_REG, + USBIN_LLIMIT_OK_MASK, reg_val); + if (rc) { + pr_err("Failed to override USB_LLIMIT_OK\n"); + return rc; + } + } + + /* Configure VIN_MIN comparator */ + rc = __qpnp_lbc_secure_masked_write(chip, + chip->chgr_base, CHG_TEST_LOOP_REG, + VIN_MIN_LOOP_DISABLE_BIT, + enable ? VIN_MIN_LOOP_DISABLE_BIT : 0); + if (rc) { + pr_err("Failed to %s VIN_MIN comparator\n", + enable ? "disable" : "enable"); + return rc; + } + + if (!enable) { + /* Enable USBIN_LLIMIT_OK */ + reg_val = USBIN_LLIMIT_OK_NO_OVERRIDE; + rc = __qpnp_lbc_secure_masked_write(chip, + chip->usb_chgpth_base, + USB_COMP_OVR1_REG, + USBIN_LLIMIT_OK_MASK, reg_val); + if (rc) { + pr_err("Failed to override USB_LLIMIT_OK\n"); + return rc; + } + } + + /* Configure CHG_GONE comparator */ + reg_val = enable ? CHG_GONE_OK_EN_BIT : 0; + rc = __qpnp_lbc_secure_masked_write(chip, + chip->usb_chgpth_base, USB_OVP_TST5_REG, + CHG_GONE_OK_EN_BIT, reg_val); + if (rc) { + pr_err("Failed to write CHG_GONE comparator\n"); + return rc; + } + + return 0; +} + +#define QPNP_LBC_VBATWEAK_MIN_UV 3000000 +#define QPNP_LBC_VBATWEAK_MAX_UV 3581250 +#define QPNP_LBC_VBATWEAK_STEP_UV 18750 +static int qpnp_lbc_vbatweak_set(struct qpnp_lbc_chip *chip, int voltage) +{ + u8 reg_val; + int rc; + + if (voltage < QPNP_LBC_VBATWEAK_MIN_UV || + voltage > QPNP_LBC_VBATWEAK_MAX_UV) { + rc = -EINVAL; + } else { + reg_val = (voltage - QPNP_LBC_VBATWEAK_MIN_UV) / + QPNP_LBC_VBATWEAK_STEP_UV; + pr_debug("VBAT_WEAK=%d setting %02x\n", + chip->cfg_batt_weak_voltage_uv, reg_val); + rc = qpnp_lbc_write(chip, chip->chgr_base + CHG_VBAT_WEAK_REG, + ®_val, 1); + if (rc) + pr_err("Failed to set VBAT_WEAK\n"); + } + + return rc; +} + +#define QPNP_LBC_VBAT_MIN_MV 4000 +#define QPNP_LBC_VBAT_MAX_MV 4775 +#define QPNP_LBC_VBAT_STEP_MV 25 +static int qpnp_lbc_vddsafe_set(struct qpnp_lbc_chip *chip, int voltage) +{ + u8 reg_val; + int rc; + + if (voltage < QPNP_LBC_VBAT_MIN_MV + || voltage > QPNP_LBC_VBAT_MAX_MV) { + pr_err("Invalid vddsafe voltage mV=%d min=%d max=%d\n", + voltage, QPNP_LBC_VBAT_MIN_MV, + QPNP_LBC_VBAT_MAX_MV); + return -EINVAL; + } + reg_val = (voltage - QPNP_LBC_VBAT_MIN_MV) / QPNP_LBC_VBAT_STEP_MV; + pr_debug("voltage=%d setting %02x\n", voltage, reg_val); + rc = qpnp_lbc_write(chip, chip->chgr_base + CHG_VDD_SAFE_REG, + ®_val, 1); + if (rc) + pr_err("Failed to set VDD_SAFE\n"); + + return rc; +} + +static int qpnp_lbc_vddmax_set(struct qpnp_lbc_chip *chip, int voltage) +{ + u8 reg_val; + int rc, trim_val; + unsigned long flags; + + if (voltage < QPNP_LBC_VBAT_MIN_MV + || voltage > QPNP_LBC_VBAT_MAX_MV) { + pr_err("Invalid vddmax voltage mV=%d min=%d max=%d\n", + voltage, QPNP_LBC_VBAT_MIN_MV, + QPNP_LBC_VBAT_MAX_MV); + return -EINVAL; + } + + spin_lock_irqsave(&chip->hw_access_lock, flags); + reg_val = (voltage - QPNP_LBC_VBAT_MIN_MV) / QPNP_LBC_VBAT_STEP_MV; + pr_debug("voltage=%d setting %02x\n", voltage, reg_val); + rc = __qpnp_lbc_write(chip, chip->chgr_base + CHG_VDD_MAX_REG, + ®_val, 1); + if (rc) { + pr_err("Failed to set VDD_MAX\n"); + goto out; + } + + /* Update trim value */ + if (chip->supported_feature_flag & VDD_TRIM_SUPPORTED) { + trim_val = qpnp_lbc_get_trim_val(chip); + reg_val = (trim_val & VDD_TRIM3_MASK) << VDD_TRIM3_SHIFT; + rc = __qpnp_lbc_secure_masked_write(chip, + chip->misc_base, MISC_TRIM3_REG, + MISC_TRIM3_VDD_MASK, reg_val); + if (rc) { + pr_err("Failed to set MISC_TRIM3_REG\n"); + goto out; + } + + reg_val = (trim_val & VDD_TRIM4_MASK) << VDD_TRIM4_SHIFT; + rc = __qpnp_lbc_secure_masked_write(chip, + chip->misc_base, MISC_TRIM4_REG, + MISC_TRIM4_VDD_MASK, reg_val); + if (rc) { + pr_err("Failed to set MISC_TRIM4_REG\n"); + goto out; + } + + chip->delta_vddmax_uv = qpnp_lbc_get_trim_voltage(trim_val); + if (chip->delta_vddmax_uv == -EINVAL) { + pr_err("Invalid trim voltage=%d\n", + chip->delta_vddmax_uv); + rc = -EINVAL; + goto out; + } + + pr_debug("VDD_MAX delta=%d trim value=%x\n", + chip->delta_vddmax_uv, trim_val); + } + +out: + spin_unlock_irqrestore(&chip->hw_access_lock, flags); + return rc; +} + +static int qpnp_lbc_set_appropriate_vddmax(struct qpnp_lbc_chip *chip) +{ + int rc; + + if (chip->bat_is_cool) + rc = qpnp_lbc_vddmax_set(chip, chip->cfg_cool_bat_mv); + else if (chip->bat_is_warm) + rc = qpnp_lbc_vddmax_set(chip, chip->cfg_warm_bat_mv); + else + rc = qpnp_lbc_vddmax_set(chip, chip->cfg_max_voltage_mv); + if (rc) + pr_err("Failed to set appropriate vddmax\n"); + + return rc; +} + +#define QPNP_LBC_MIN_DELTA_UV 13000 +static void qpnp_lbc_adjust_vddmax(struct qpnp_lbc_chip *chip, int vbat_uv) +{ + int delta_uv, prev_delta_uv, rc; + + prev_delta_uv = chip->delta_vddmax_uv; + delta_uv = (int)(chip->cfg_max_voltage_mv * 1000) - vbat_uv; + + /* + * If delta_uv is positive, apply trim if delta_uv > 13mv + * If delta_uv is negative always apply trim. + */ + if (delta_uv > 0 && delta_uv < QPNP_LBC_MIN_DELTA_UV) { + pr_debug("vbat is not low enough to increase vdd\n"); + return; + } + + pr_debug("vbat=%d current delta_uv=%d prev delta_vddmax_uv=%d\n", + vbat_uv, delta_uv, chip->delta_vddmax_uv); + chip->delta_vddmax_uv = delta_uv + chip->delta_vddmax_uv; + pr_debug("new delta_vddmax_uv %d\n", chip->delta_vddmax_uv); + rc = qpnp_lbc_set_appropriate_vddmax(chip); + if (rc) + chip->delta_vddmax_uv = prev_delta_uv; +} + +#define QPNP_LBC_VINMIN_MIN_MV 4200 +#define QPNP_LBC_VINMIN_MAX_MV 5037 +#define QPNP_LBC_VINMIN_STEP_MV 27 +static int qpnp_lbc_vinmin_set(struct qpnp_lbc_chip *chip, int voltage) +{ + u8 reg_val; + int rc; + + if ((voltage < QPNP_LBC_VINMIN_MIN_MV) + || (voltage > QPNP_LBC_VINMIN_MAX_MV)) { + pr_err("Invalid vinmin voltage mV=%d min=%d max=%d\n", + voltage, QPNP_LBC_VINMIN_MIN_MV, + QPNP_LBC_VINMIN_MAX_MV); + return -EINVAL; + } + + reg_val = (voltage - QPNP_LBC_VINMIN_MIN_MV) / QPNP_LBC_VINMIN_STEP_MV; + pr_debug("VIN_MIN=%d setting %02x\n", voltage, reg_val); + rc = qpnp_lbc_write(chip, chip->chgr_base + CHG_VIN_MIN_REG, + ®_val, 1); + if (rc) + pr_err("Failed to set VIN_MIN\n"); + + return rc; +} + +#define QPNP_LBC_IBATSAFE_MIN_MA 90 +#define QPNP_LBC_IBATSAFE_MAX_MA 1440 +#define QPNP_LBC_I_STEP_MA 90 +static int qpnp_lbc_ibatsafe_set(struct qpnp_lbc_chip *chip, int safe_current) +{ + u8 reg_val; + int rc; + + if (safe_current < QPNP_LBC_IBATSAFE_MIN_MA + || safe_current > QPNP_LBC_IBATSAFE_MAX_MA) { + pr_err("Invalid safecurrent mA=%d min=%d max=%d\n", + safe_current, QPNP_LBC_IBATSAFE_MIN_MA, + QPNP_LBC_IBATSAFE_MAX_MA); + return -EINVAL; + } + + reg_val = (safe_current - QPNP_LBC_IBATSAFE_MIN_MA) + / QPNP_LBC_I_STEP_MA; + pr_debug("Ibate_safe=%d setting %02x\n", safe_current, reg_val); + + rc = qpnp_lbc_write(chip, chip->chgr_base + CHG_IBAT_SAFE_REG, + ®_val, 1); + if (rc) + pr_err("Failed to set IBAT_SAFE\n"); + + return rc; +} + +#define QPNP_LBC_IBATMAX_MIN 90 +#define QPNP_LBC_IBATMAX_MAX 1440 +/* + * Set maximum current limit from charger + * ibat = System current + charging current + */ +static int qpnp_lbc_ibatmax_set(struct qpnp_lbc_chip *chip, int chg_current) +{ + u8 reg_val; + int rc; + + if (chg_current > QPNP_LBC_IBATMAX_MAX) + pr_debug("Invalid max charge current mA=%d max=%d\n", + chg_current, + QPNP_LBC_IBATMAX_MAX); + + chg_current = clamp(chg_current, QPNP_LBC_IBATMAX_MIN, + QPNP_LBC_IBATMAX_MAX); + reg_val = (chg_current - QPNP_LBC_IBATMAX_MIN) / QPNP_LBC_I_STEP_MA; + + rc = qpnp_lbc_write(chip, chip->chgr_base + CHG_IBAT_MAX_REG, + ®_val, 1); + if (rc) + pr_err("Failed to set IBAT_MAX\n"); + else + chip->prev_max_ma = chg_current; + + return rc; +} + +#define QPNP_LBC_TCHG_MIN 4 +#define QPNP_LBC_TCHG_MAX 512 +#define QPNP_LBC_TCHG_STEP 4 +static int qpnp_lbc_tchg_max_set(struct qpnp_lbc_chip *chip, int minutes) +{ + u8 reg_val = 0; + int rc; + + /* Disable timer */ + rc = qpnp_lbc_masked_write(chip, chip->chgr_base + CHG_TCHG_MAX_EN_REG, + CHG_TCHG_MAX_EN_BIT, 0); + if (rc) { + pr_err("Failed to write tchg_max_en\n"); + return rc; + } + + /* If minutes is 0, just disable timer */ + if (!minutes) { + pr_debug("Charger safety timer disabled\n"); + return rc; + } + + minutes = clamp(minutes, QPNP_LBC_TCHG_MIN, QPNP_LBC_TCHG_MAX); + + reg_val = (minutes / QPNP_LBC_TCHG_STEP) - 1; + + pr_debug("TCHG_MAX=%d mins setting %x\n", minutes, reg_val); + rc = qpnp_lbc_masked_write(chip, chip->chgr_base + CHG_TCHG_MAX_REG, + CHG_TCHG_MAX_MASK, reg_val); + if (rc) { + pr_err("Failed to write tchg_max_reg\n"); + return rc; + } + + /* Enable timer */ + rc = qpnp_lbc_masked_write(chip, chip->chgr_base + CHG_TCHG_MAX_EN_REG, + CHG_TCHG_MAX_EN_BIT, CHG_TCHG_MAX_EN_BIT); + if (rc) + pr_err("Failed to write tchg_max_en\n"); + + return rc; +} + +#define LBC_CHGR_LED 0x4D +#define CHGR_LED_ON BIT(0) +#define CHGR_LED_OFF 0x0 +#define CHGR_LED_STAT_MASK LBC_MASK(1, 0) +static void qpnp_lbc_chgr_led_brightness_set(struct led_classdev *cdev, + enum led_brightness value) +{ + struct qpnp_lbc_chip *chip = container_of(cdev, struct qpnp_lbc_chip, + led_cdev); + u8 reg; + int rc; + + if (value > LED_FULL) + value = LED_FULL; + + pr_debug("set the charger led brightness to value=%d\n", value); + reg = (value > LED_OFF) ? CHGR_LED_ON : CHGR_LED_OFF; + + rc = qpnp_lbc_masked_write(chip, chip->chgr_base + LBC_CHGR_LED, + CHGR_LED_STAT_MASK, reg); + if (rc) + pr_err("Failed to write charger led\n"); +} + +static enum +led_brightness qpnp_lbc_chgr_led_brightness_get(struct led_classdev *cdev) +{ + + struct qpnp_lbc_chip *chip = container_of(cdev, struct qpnp_lbc_chip, + led_cdev); + u8 reg_val, chgr_led_sts; + int rc; + + rc = qpnp_lbc_read(chip, chip->chgr_base + LBC_CHGR_LED, + ®_val, 1); + if (rc) { + pr_err("Failed to read charger led\n"); + return rc; + } + + chgr_led_sts = reg_val & CHGR_LED_STAT_MASK; + pr_debug("charger led brightness chgr_led_sts=%d\n", chgr_led_sts); + + return (chgr_led_sts == CHGR_LED_ON) ? LED_FULL : LED_OFF; +} + +static int qpnp_lbc_register_chgr_led(struct qpnp_lbc_chip *chip) +{ + int rc; + + chip->led_cdev.name = "red"; + chip->led_cdev.brightness_set = qpnp_lbc_chgr_led_brightness_set; + chip->led_cdev.brightness_get = qpnp_lbc_chgr_led_brightness_get; + + rc = led_classdev_register(chip->dev, &chip->led_cdev); + if (rc) + pr_err("unable to register charger led, rc=%d\n", rc); + + return rc; +}; + +static int is_vinmin_set(struct qpnp_lbc_chip *chip) +{ + u8 reg; + int rc; + + rc = qpnp_lbc_read(chip, chip->chgr_base + CHG_STATUS_REG, ®, 1); + if (rc) { + pr_err("Unable to read charger status\n"); + return false; + } + pr_debug("chg_status=0x%x\n", reg); + + return !!(reg & VINMIN_LOOP_BIT); + +} + +static int is_battery_charging(struct qpnp_lbc_chip *chip) +{ + u8 reg; + int rc; + + rc = qpnp_lbc_read(chip, chip->chgr_base + CHG_STATUS_REG, ®, 1); + if (rc) { + pr_err("Unable to read charger status\n"); + return false; + } + pr_debug("chg_status=0x%x\n", reg); + + return !!(reg & CHG_ON_BIT); +} + +static int qpnp_lbc_vbatdet_override(struct qpnp_lbc_chip *chip, int ovr_val) +{ + int rc; + u8 reg_val; + unsigned long flags; + + spin_lock_irqsave(&chip->hw_access_lock, flags); + + rc = __qpnp_lbc_read(chip, chip->chgr_base + CHG_COMP_OVR1, + ®_val, 1); + if (rc) + goto out; + + pr_debug("addr = 0x%x read 0x%x\n", chip->chgr_base, reg_val); + + reg_val &= ~CHG_VBAT_DET_OVR_MASK; + reg_val |= ovr_val & CHG_VBAT_DET_OVR_MASK; + + pr_debug("writing to base=%x val=%x\n", chip->chgr_base, reg_val); + + rc = __qpnp_lbc_secure_write(chip, chip->chgr_base, CHG_COMP_OVR1, + ®_val, 1); + +out: + spin_unlock_irqrestore(&chip->hw_access_lock, flags); + return rc; +} + +static int get_prop_battery_voltage_now(struct qpnp_lbc_chip *chip) +{ + int rc = 0, batt_volt; + + rc = iio_read_channel_processed(chip->vbat_sns, &batt_volt); + if (rc < 0) { + pr_err("Unable to read vbat rc=%d\n", rc); + return 0; + } + + return batt_volt; +} + +static int get_prop_batt_present(struct qpnp_lbc_chip *chip) +{ + u8 reg_val; + int rc; + + rc = qpnp_lbc_read(chip, chip->bat_if_base + BAT_IF_PRES_STATUS_REG, + ®_val, 1); + if (rc) { + pr_err("Failed to read battery status read failed\n"); + return 0; + } + + return (reg_val & BATT_PRES_MASK) ? 1 : 0; +} + +static int get_prop_batt_health(struct qpnp_lbc_chip *chip) +{ + u8 reg_val; + int rc; + + rc = qpnp_lbc_read(chip, chip->bat_if_base + BAT_IF_TEMP_STATUS_REG, + ®_val, 1); + if (rc) { + pr_err("Failed to read battery health\n"); + return POWER_SUPPLY_HEALTH_UNKNOWN; + } + + if (BATT_TEMP_HOT_MASK & reg_val) + return POWER_SUPPLY_HEALTH_OVERHEAT; + if (!(BATT_TEMP_COLD_MASK & reg_val)) + return POWER_SUPPLY_HEALTH_COLD; + if (chip->bat_is_cool) + return POWER_SUPPLY_HEALTH_COOL; + if (chip->bat_is_warm) + return POWER_SUPPLY_HEALTH_WARM; + + return POWER_SUPPLY_HEALTH_GOOD; +} + +static int get_prop_charge_type(struct qpnp_lbc_chip *chip) +{ + int rc; + u8 reg_val; + + if (!get_prop_batt_present(chip)) + return POWER_SUPPLY_CHARGE_TYPE_NONE; + + rc = qpnp_lbc_read(chip, chip->chgr_base + INT_RT_STS_REG, + ®_val, 1); + if (rc) { + pr_err("Failed to read interrupt sts\n"); + return POWER_SUPPLY_CHARGE_TYPE_NONE; + } + + if (reg_val & FAST_CHG_ON_IRQ) + return POWER_SUPPLY_CHARGE_TYPE_FAST; + + return POWER_SUPPLY_CHARGE_TYPE_NONE; +} + +static int get_prop_current_now(struct qpnp_lbc_chip *chip) +{ + union power_supply_propval ret = {0,}; + + if (chip->bms_psy) { + power_supply_get_property(chip->bms_psy, + POWER_SUPPLY_PROP_CURRENT_NOW, &ret); + return ret.intval; + } + + pr_debug("No BMS supply registered return 0\n"); + return 0; +} + +#define DEFAULT_CAPACITY 50 +static int get_prop_capacity(struct qpnp_lbc_chip *chip) +{ + union power_supply_propval ret = {0,}; + int soc; + + if (!chip->bms_psy) + chip->bms_psy = power_supply_get_by_name("bms"); + + if (chip->fake_battery_soc >= 0) + return chip->fake_battery_soc; + + if (chip->cfg_use_fake_battery || !get_prop_batt_present(chip)) + return DEFAULT_CAPACITY; + + if (chip->bms_psy) { + power_supply_get_property(chip->bms_psy, + POWER_SUPPLY_PROP_CAPACITY, &ret); + soc = ret.intval; + if (soc == 0) { + if (!qpnp_lbc_is_usb_chg_plugged_in(chip)) + pr_warn_ratelimited("Batt 0, CHG absent\n"); + } + return soc; + } + pr_debug("No BMS supply registered return %d\n", DEFAULT_CAPACITY); + + /* + * Return default capacity to avoid userspace + * from shutting down unecessarily + */ + return DEFAULT_CAPACITY; +} + +#define CUTOFF_COUNT 3 +static int get_prop_batt_status(struct qpnp_lbc_chip *chip) +{ + int rc, curr_now, soc_now, curr_volt; + u8 reg_val; + + /* + * If SOC = 0 and we are discharging with input connected, report + * the battery status as DISCHARGING. + */ + soc_now = chip->current_soc; + curr_now = get_prop_current_now(chip); + curr_volt = get_prop_battery_voltage_now(chip); + if (qpnp_lbc_is_usb_chg_plugged_in(chip) && soc_now == 0) { + if ((curr_now > 0) && (curr_volt < chip->cutoff_threshold_uv)) { + if (chip->cutoff_count > CUTOFF_COUNT) + return POWER_SUPPLY_STATUS_DISCHARGING; + chip->cutoff_count++; + } else { + chip->cutoff_count = 0; + } + } else { + chip->cutoff_count = 0; + } + + if (qpnp_lbc_is_usb_chg_plugged_in(chip) && chip->chg_done) + return POWER_SUPPLY_STATUS_FULL; + + rc = qpnp_lbc_read(chip, chip->chgr_base + INT_RT_STS_REG, + ®_val, 1); + if (rc) { + pr_err("Failed to read interrupt sts\n"); + return POWER_SUPPLY_CHARGE_TYPE_NONE; + } + + if (reg_val & FAST_CHG_ON_IRQ) + return POWER_SUPPLY_STATUS_CHARGING; + + return POWER_SUPPLY_STATUS_DISCHARGING; +} + +static int get_bms_property(struct qpnp_lbc_chip *chip, + enum power_supply_property psy_prop) +{ + union power_supply_propval ret = {0,}; + + if (!chip->bms_psy) + chip->bms_psy = power_supply_get_by_name("bms"); + + if (chip->bms_psy) { + power_supply_get_property(chip->bms_psy, psy_prop, &ret); + return ret.intval; + } + pr_debug("No BMS supply registered\n"); + + return -EINVAL; +} + +static int get_prop_charge_count(struct qpnp_lbc_chip *chip) +{ + union power_supply_propval ret = {0,}; + + if (!chip->bms_psy) + chip->bms_psy = power_supply_get_by_name("bms"); + + if (chip->bms_psy) { + power_supply_get_property(chip->bms_psy, + POWER_SUPPLY_PROP_CHARGE_COUNTER, &ret); + } else { + pr_debug("No BMS supply registered return 0\n"); + } + + return ret.intval; +} + +#define DEFAULT_TEMP 250 +static int get_prop_batt_temp(struct qpnp_lbc_chip *chip) +{ + int rc = 0, batt_temp; + + if (chip->cfg_use_fake_battery || !get_prop_batt_present(chip)) + return DEFAULT_TEMP; + + rc = iio_read_channel_processed(chip->lr_mux1_batt_therm, &batt_temp); + if (rc < 0) { + pr_debug("Unable to read batt temperature rc=%d\n", rc); + return DEFAULT_TEMP; + } + pr_debug("get_bat_temp %d\n", batt_temp); + + return batt_temp; +} + +static void qpnp_lbc_set_appropriate_current(struct qpnp_lbc_chip *chip) +{ + unsigned int chg_current = chip->usb_psy_ma; + + if (chip->bat_is_cool && chip->cfg_cool_bat_chg_ma) + chg_current = min(chg_current, chip->cfg_cool_bat_chg_ma); + if (chip->bat_is_warm && chip->cfg_warm_bat_chg_ma) + chg_current = min(chg_current, chip->cfg_warm_bat_chg_ma); + if (chip->therm_lvl_sel != 0 && chip->thermal_mitigation) + chg_current = min(chg_current, + chip->thermal_mitigation[chip->therm_lvl_sel]); + + pr_debug("setting charger current %d mA\n", chg_current); + qpnp_lbc_ibatmax_set(chip, chg_current); +} + +static void qpnp_batt_external_power_changed(struct power_supply *psy) +{ + struct qpnp_lbc_chip *chip = power_supply_get_drvdata(psy); + + if (chip->bat_if_base && chip->batt_psy) { + pr_debug("power supply changed batt_psy\n"); + power_supply_changed(chip->batt_psy); + } +} + +static int qpnp_lbc_system_temp_level_set(struct qpnp_lbc_chip *chip, + int lvl_sel) +{ + int rc = 0; + int prev_therm_lvl; + unsigned long flags; + + if (!chip->thermal_mitigation) { + pr_err("Thermal mitigation not supported\n"); + return -EINVAL; + } + + if (lvl_sel < 0) { + pr_err("Unsupported level selected %d\n", lvl_sel); + return -EINVAL; + } + + if (lvl_sel >= chip->cfg_thermal_levels) { + pr_err("Unsupported level selected %d forcing %d\n", lvl_sel, + chip->cfg_thermal_levels - 1); + lvl_sel = chip->cfg_thermal_levels - 1; + } + + if (lvl_sel == chip->therm_lvl_sel) + return 0; + + spin_lock_irqsave(&chip->ibat_change_lock, flags); + prev_therm_lvl = chip->therm_lvl_sel; + chip->therm_lvl_sel = lvl_sel; + if (chip->therm_lvl_sel == (chip->cfg_thermal_levels - 1)) { + /* Disable charging if highest value selected by */ + rc = qpnp_lbc_charger_enable(chip, THERMAL, 0); + if (rc < 0) + pr_err("Failed to set disable charging\n"); + goto out; + } + + qpnp_lbc_set_appropriate_current(chip); + + if (prev_therm_lvl == chip->cfg_thermal_levels - 1) { + /* + * If previously highest value was selected charging must have + * been disabed. Enable charging. + */ + rc = qpnp_lbc_charger_enable(chip, THERMAL, 1); + if (rc < 0) + pr_err("Failed to enable charging\n"); + } +out: + spin_unlock_irqrestore(&chip->ibat_change_lock, flags); + return rc; +} + +#define MIN_COOL_TEMP -300 +#define MAX_WARM_TEMP 1000 +#define HYSTERISIS_DECIDEGC 20 + +static int qpnp_lbc_configure_jeita(struct qpnp_lbc_chip *chip, + enum power_supply_property psp, int temp_degc) +{ + int rc = 0; + + if ((temp_degc < MIN_COOL_TEMP) || (temp_degc > MAX_WARM_TEMP)) { + pr_err("Invalid temp range=%d min=%d max=%d\n", + temp_degc, MIN_COOL_TEMP, MAX_WARM_TEMP); + return -EINVAL; + } + + if (chip->cfg_use_fake_battery || chip->debug_board) + return 0; + + mutex_lock(&chip->jeita_configure_lock); + switch (psp) { + case POWER_SUPPLY_PROP_COOL_TEMP: + if (temp_degc >= + (chip->cfg_warm_bat_decidegc - HYSTERISIS_DECIDEGC)) { + pr_err("Can't set cool %d higher than warm %d - hysterisis %d\n", + temp_degc, + chip->cfg_warm_bat_decidegc, + HYSTERISIS_DECIDEGC); + rc = -EINVAL; + goto mutex_unlock; + } + if (chip->bat_is_cool) + chip->adc_param.high_temp = + temp_degc + HYSTERISIS_DECIDEGC; + else if (!chip->bat_is_warm) + chip->adc_param.low_temp = temp_degc; + + chip->cfg_cool_bat_decidegc = temp_degc; + break; + case POWER_SUPPLY_PROP_WARM_TEMP: + if (temp_degc <= + (chip->cfg_cool_bat_decidegc + HYSTERISIS_DECIDEGC)) { + pr_err("Can't set warm %d higher than cool %d + hysterisis %d\n", + temp_degc, + chip->cfg_warm_bat_decidegc, + HYSTERISIS_DECIDEGC); + rc = -EINVAL; + goto mutex_unlock; + } + if (chip->bat_is_warm) + chip->adc_param.low_temp = + temp_degc - HYSTERISIS_DECIDEGC; + else if (!chip->bat_is_cool) + chip->adc_param.high_temp = temp_degc; + + chip->cfg_warm_bat_decidegc = temp_degc; + break; + default: + rc = -EINVAL; + goto mutex_unlock; + } + + if (qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->adc_param)) + pr_err("request ADC error\n"); + +mutex_unlock: + mutex_unlock(&chip->jeita_configure_lock); + return rc; +} + +static void qpnp_lbc_debug_board_work_fn(struct work_struct *work) +{ + struct qpnp_lbc_chip *chip = container_of(work, struct qpnp_lbc_chip, + debug_board_work); + int rc = 0; + + if (chip->adc_param.channel == LR_MUX1_BATT_THERM + && chip->debug_board) { + pr_debug("Disable adc-tm notifications for debug board\n"); + rc = qpnp_adc_tm_disable_chan_meas(chip->adc_tm_dev, + &chip->adc_param); + if (rc < 0) + pr_err("failed to disable tm %d\n", rc); + } +} + +static int qpnp_batt_property_is_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + case POWER_SUPPLY_PROP_CAPACITY: + case POWER_SUPPLY_PROP_CHARGING_ENABLED: + case POWER_SUPPLY_PROP_COOL_TEMP: + case POWER_SUPPLY_PROP_VOLTAGE_MIN: + case POWER_SUPPLY_PROP_WARM_TEMP: + case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT: + return 1; + default: + break; + } + + return 0; +} + +/* + * End of charge happens only when BMS reports the battery status as full. For + * charging to end the s/w must put the usb path in suspend. Note that there + * is no battery fet and usb path suspend is the only control to prevent any + * current going in to the battery (and the system) + * Charging can begin only when VBATDET comparator outputs 0. This indicates + * that the battery is a at a lower voltage than 4% of the vddmax value. + * S/W can override this comparator to output a favourable value - this is + * used while resuming charging when the battery hasnt fallen below 4% but + * the SOC has fallen below the resume threshold. + * + * In short, when SOC resume happens: + * a. override the comparator to output 0 + * b. enable charging + * + * When vbatdet based resume happens: + * a. enable charging + * + * When end of charge happens: + * a. disable the overrides in the comparator + * (may be from a previous soc resume) + * b. disable charging + */ +static int qpnp_batt_power_set_property(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val) +{ + struct qpnp_lbc_chip *chip = power_supply_get_drvdata(psy); + int rc = 0; + + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + mutex_lock(&chip->chg_enable_lock); + switch (val->intval) { + case POWER_SUPPLY_STATUS_FULL: + if (chip->cfg_float_charge) + break; + /* Disable charging */ + rc = qpnp_lbc_charger_enable(chip, SOC, 0); + if (!rc) + chip->chg_done = true; + + /* + * Enable VBAT_DET based charging: + * To enable charging when VBAT falls below VBAT_DET + * and device stays suspended after EOC. + */ + if (!chip->cfg_disable_vbatdet_based_recharge) { + /* No override for VBAT_DET_LO comp */ + rc = qpnp_lbc_vbatdet_override(chip, + OVERRIDE_NONE); + if (rc) + pr_err("Failed to override VBAT_DET rc=%d\n", + rc); + else + qpnp_lbc_enable_irq(chip, + &chip->irqs[CHG_VBAT_DET_LO]); + } + break; + case POWER_SUPPLY_STATUS_CHARGING: + chip->chg_done = false; + pr_debug("resuming charging by bms\n"); + if (!chip->cfg_disable_vbatdet_based_recharge) + qpnp_lbc_vbatdet_override(chip, OVERRIDE_0); + + qpnp_lbc_charger_enable(chip, SOC, 1); + break; + case POWER_SUPPLY_STATUS_DISCHARGING: + chip->chg_done = false; + pr_debug("status = DISCHARGING chg_done = %d\n", + chip->chg_done); + break; + default: + break; + } + mutex_unlock(&chip->chg_enable_lock); + break; + case POWER_SUPPLY_PROP_COOL_TEMP: + rc = qpnp_lbc_configure_jeita(chip, psp, val->intval); + break; + case POWER_SUPPLY_PROP_WARM_TEMP: + rc = qpnp_lbc_configure_jeita(chip, psp, val->intval); + break; + case POWER_SUPPLY_PROP_CAPACITY: + chip->fake_battery_soc = val->intval; + pr_debug("power supply changed batt_psy\n"); + break; + case POWER_SUPPLY_PROP_CHARGING_ENABLED: + chip->cfg_charging_disabled = !(val->intval); + rc = qpnp_lbc_charger_enable(chip, USER, + !chip->cfg_charging_disabled); + break; + case POWER_SUPPLY_PROP_DEBUG_BATTERY: + chip->debug_board = val->intval; + schedule_work(&chip->debug_board_work); + rc = qpnp_lbc_charger_enable(chip, DEBUG_BOARD, + !(val->intval)); + break; + case POWER_SUPPLY_PROP_VOLTAGE_MIN: + qpnp_lbc_vinmin_set(chip, val->intval / 1000); + break; + case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT: + qpnp_lbc_system_temp_level_set(chip, val->intval); + break; + default: + return -EINVAL; + } + + if (chip->bat_if_base && chip->batt_psy) + power_supply_changed(chip->batt_psy); + + return rc; +} + +static int qpnp_batt_power_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct qpnp_lbc_chip *chip = power_supply_get_drvdata(psy); + + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + val->intval = get_prop_batt_status(chip); + break; + case POWER_SUPPLY_PROP_CHARGE_TYPE: + val->intval = get_prop_charge_type(chip); + break; + case POWER_SUPPLY_PROP_HEALTH: + val->intval = get_prop_batt_health(chip); + break; + case POWER_SUPPLY_PROP_PRESENT: + val->intval = get_prop_batt_present(chip); + break; + case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: + val->intval = chip->cfg_max_voltage_mv * 1000; + break; + case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: + val->intval = chip->cfg_min_voltage_mv * 1000; + break; + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + val->intval = chip->cfg_max_voltage_mv * 1000; + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + val->intval = get_prop_battery_voltage_now(chip); + break; + case POWER_SUPPLY_PROP_TEMP: + val->intval = get_prop_batt_temp(chip); + break; + case POWER_SUPPLY_PROP_COOL_TEMP: + val->intval = chip->cfg_cool_bat_decidegc; + break; + case POWER_SUPPLY_PROP_WARM_TEMP: + val->intval = chip->cfg_warm_bat_decidegc; + break; + case POWER_SUPPLY_PROP_CAPACITY: + val->intval = get_prop_capacity(chip); + chip->current_soc = val->intval; + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + val->intval = get_prop_current_now(chip); + break; + case POWER_SUPPLY_PROP_CHARGE_COUNTER: + val->intval = get_prop_charge_count(chip); + break; + case POWER_SUPPLY_PROP_CYCLE_COUNT: + val->intval = get_bms_property(chip, psp); + break; + case POWER_SUPPLY_PROP_CHARGE_FULL: + val->intval = get_bms_property(chip, psp); + break; + case POWER_SUPPLY_PROP_CHARGING_ENABLED: + val->intval = !(chip->cfg_charging_disabled); + break; + case POWER_SUPPLY_PROP_DEBUG_BATTERY: + val->intval = chip->debug_board; + break; + case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT: + val->intval = chip->therm_lvl_sel; + break; + case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX: + val->intval = chip->cfg_thermal_levels; + break; + default: + return -EINVAL; + } + + return 0; +} + +#define VINMIN_DELAY msecs_to_jiffies(500) +static void qpnp_lbc_parallel_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct qpnp_lbc_chip *chip = container_of(dwork, + struct qpnp_lbc_chip, parallel_work); + + if (is_vinmin_set(chip)) { + /* vinmin-loop triggered - stop ibat increase */ + pr_debug("vinmin_loop triggered ichg_now=%d\n", chip->ichg_now); + goto exit_work; + } else { + int temp = chip->ichg_now + QPNP_LBC_I_STEP_MA; + + if (temp > chip->lbc_max_chg_current) { + pr_debug("ichg_now=%d beyond max_chg_limit=%d - stopping\n", + temp, chip->lbc_max_chg_current); + goto exit_work; + } + chip->ichg_now = temp; + qpnp_lbc_ibatmax_set(chip, chip->ichg_now); + pr_debug("ichg_now increased to %d\n", chip->ichg_now); + } + + schedule_delayed_work(&chip->parallel_work, VINMIN_DELAY); + + return; + +exit_work: + pm_relax(chip->dev); +} + +static int qpnp_lbc_parallel_charging_config(struct qpnp_lbc_chip *chip, + int enable) +{ + chip->parallel_charging_enabled = !!enable; + + if (enable) { + /* Prevent sleep until charger is configured */ + chip->ichg_now = QPNP_LBC_IBATMAX_MIN; + qpnp_lbc_ibatmax_set(chip, chip->ichg_now); + qpnp_lbc_charger_enable(chip, PARALLEL, 1); + pm_stay_awake(chip->dev); + schedule_delayed_work(&chip->parallel_work, VINMIN_DELAY); + } else { + cancel_delayed_work_sync(&chip->parallel_work); + pm_relax(chip->dev); + /* set minimum charging current and disable charging */ + chip->ichg_now = 0; + chip->lbc_max_chg_current = 0; + qpnp_lbc_ibatmax_set(chip, 0); + qpnp_lbc_charger_enable(chip, PARALLEL, 0); + } + + pr_debug("charging=%d ichg_now=%d max_chg_current=%d\n", + enable, chip->ichg_now, chip->lbc_max_chg_current); + + return 0; +} + +static void qpnp_lbc_set_current(struct qpnp_lbc_chip *chip, int current_ma) +{ + pr_debug("USB present=%d current_ma=%dmA\n", chip->usb_present, + current_ma); + + if (current_ma <= 2 && get_prop_batt_present(chip)) { + qpnp_lbc_charger_enable(chip, CURRENT, 0); + chip->usb_psy_ma = QPNP_CHG_I_MAX_MIN_90; + qpnp_lbc_set_appropriate_current(chip); + } else { + chip->usb_psy_ma = current_ma; + qpnp_lbc_set_appropriate_current(chip); + qpnp_lbc_charger_enable(chip, CURRENT, 1); + } +} + +static enum power_supply_property qpnp_lbc_usb_properties[] = { + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_CURRENT_MAX, + POWER_SUPPLY_PROP_TYPE, + POWER_SUPPLY_PROP_REAL_TYPE, + POWER_SUPPLY_PROP_SDP_CURRENT_MAX, + POWER_SUPPLY_PROP_VOLTAGE_MAX, +}; +#define MICRO_5V 5000000 +static int qpnp_lbc_usb_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct qpnp_lbc_chip *chip = power_supply_get_drvdata(psy); + + switch (psp) { + case POWER_SUPPLY_PROP_SDP_CURRENT_MAX: + case POWER_SUPPLY_PROP_CURRENT_MAX: + val->intval = chip->usb_psy_ma * 1000; + break; + case POWER_SUPPLY_PROP_PRESENT: + val->intval = chip->usb_present; + break; + case POWER_SUPPLY_PROP_ONLINE: + if (is_battery_charging(chip)) + val->intval = 1; + else + val->intval = 0; + break; + case POWER_SUPPLY_PROP_TYPE: + val->intval = POWER_SUPPLY_TYPE_USB; + if (chip->usb_present && + (chip->usb_supply_type != POWER_SUPPLY_TYPE_UNKNOWN)) + val->intval = chip->usb_supply_type; + break; + case POWER_SUPPLY_PROP_REAL_TYPE: + val->intval = POWER_SUPPLY_TYPE_UNKNOWN; + if (chip->usb_present && + (chip->usb_supply_type != POWER_SUPPLY_TYPE_UNKNOWN)) + val->intval = chip->usb_supply_type; + break; + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + if (chip->usb_present) + val->intval = MICRO_5V; + else + val->intval = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int qpnp_lbc_usb_set_property(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val) +{ + struct qpnp_lbc_chip *chip = power_supply_get_drvdata(psy); + + switch (psp) { + case POWER_SUPPLY_PROP_SDP_CURRENT_MAX: + case POWER_SUPPLY_PROP_CURRENT_MAX: + qpnp_lbc_set_current(chip, (val->intval / 1000)); + break; + case POWER_SUPPLY_PROP_TYPE: + case POWER_SUPPLY_PROP_REAL_TYPE: + chip->usb_supply_type = val->intval; + break; + default: + return -EINVAL; + } + + power_supply_changed(psy); + return 0; +} +static int qpnp_lbc_usb_is_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + switch (psp) { + case POWER_SUPPLY_PROP_CURRENT_MAX: + return 1; + default: + break; + } + return 0; +} + +static enum power_supply_property qpnp_lbc_parallel_properties[] = { + POWER_SUPPLY_PROP_CHARGING_ENABLED, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CHARGE_TYPE, + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION, +}; + +static int qpnp_lbc_parallel_set_property(struct power_supply *psy, + enum power_supply_property prop, + const union power_supply_propval *val) +{ + int rc = 0; + struct qpnp_lbc_chip *chip = power_supply_get_drvdata(psy); + + switch (prop) { + case POWER_SUPPLY_PROP_CHARGING_ENABLED: + qpnp_lbc_parallel_charging_config(chip, !!val->intval); + break; + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: + chip->lbc_max_chg_current = val->intval / 1000; + pr_debug("lbc_max_current=%d\n", chip->lbc_max_chg_current); + break; + default: + return -EINVAL; + } + + return rc; +} + +static int qpnp_lbc_parallel_is_writeable(struct power_supply *psy, + enum power_supply_property prop) +{ + int rc; + + switch (prop) { + case POWER_SUPPLY_PROP_CHARGING_ENABLED: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: + rc = 1; + break; + default: + rc = 0; + break; + } + return rc; +} + +static int qpnp_lbc_parallel_get_property(struct power_supply *psy, + enum power_supply_property prop, + union power_supply_propval *val) +{ + struct qpnp_lbc_chip *chip = power_supply_get_drvdata(psy); + + switch (prop) { + case POWER_SUPPLY_PROP_CHARGING_ENABLED: + val->intval = chip->parallel_charging_enabled; + break; + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: + val->intval = chip->lbc_max_chg_current * 1000; + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + val->intval = chip->ichg_now * 1000; + break; + case POWER_SUPPLY_PROP_CHARGE_TYPE: + val->intval = get_prop_charge_type(chip); + break; + case POWER_SUPPLY_PROP_STATUS: + val->intval = get_prop_batt_status(chip); + break; + case POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION: + val->intval = is_vinmin_set(chip); + break; + default: + return -EINVAL; + } + return 0; +} + + +static void qpnp_lbc_jeita_adc_notification(enum qpnp_tm_state state, void *ctx) +{ + struct qpnp_lbc_chip *chip = ctx; + bool bat_warm = false, bat_cool = false; + int temp; + unsigned long flags; + + if (state >= ADC_TM_STATE_NUM) { + pr_err("invalid notification %d\n", state); + return; + } + + temp = get_prop_batt_temp(chip); + + pr_debug("temp = %d state = %s\n", temp, + state == ADC_TM_WARM_STATE ? "warm" : "cool"); + + if (state == ADC_TM_WARM_STATE) { + if (temp >= chip->cfg_warm_bat_decidegc) { + /* Normal to warm */ + bat_warm = true; + bat_cool = false; + chip->adc_param.low_temp = + chip->cfg_warm_bat_decidegc + - HYSTERISIS_DECIDEGC; + chip->adc_param.state_request = + ADC_TM_COOL_THR_ENABLE; + } else if (temp >= + chip->cfg_cool_bat_decidegc + HYSTERISIS_DECIDEGC) { + /* Cool to normal */ + bat_warm = false; + bat_cool = false; + + chip->adc_param.low_temp = + chip->cfg_cool_bat_decidegc; + chip->adc_param.high_temp = + chip->cfg_warm_bat_decidegc; + chip->adc_param.state_request = + ADC_TM_HIGH_LOW_THR_ENABLE; + } + } else { + if (temp <= chip->cfg_cool_bat_decidegc) { + /* Normal to cool */ + bat_warm = false; + bat_cool = true; + chip->adc_param.high_temp = + chip->cfg_cool_bat_decidegc + + HYSTERISIS_DECIDEGC; + chip->adc_param.state_request = + ADC_TM_WARM_THR_ENABLE; + } else if (temp <= (chip->cfg_warm_bat_decidegc - + HYSTERISIS_DECIDEGC)){ + /* Warm to normal */ + bat_warm = false; + bat_cool = false; + + chip->adc_param.low_temp = + chip->cfg_cool_bat_decidegc; + chip->adc_param.high_temp = + chip->cfg_warm_bat_decidegc; + chip->adc_param.state_request = + ADC_TM_HIGH_LOW_THR_ENABLE; + } + } + + if (chip->bat_is_cool ^ bat_cool || chip->bat_is_warm ^ bat_warm) { + spin_lock_irqsave(&chip->ibat_change_lock, flags); + chip->bat_is_cool = bat_cool; + chip->bat_is_warm = bat_warm; + qpnp_lbc_set_appropriate_vddmax(chip); + qpnp_lbc_set_appropriate_current(chip); + spin_unlock_irqrestore(&chip->ibat_change_lock, flags); + } + + pr_debug("warm %d, cool %d, low = %d deciDegC, high = %d deciDegC\n", + chip->bat_is_warm, chip->bat_is_cool, + chip->adc_param.low_temp, chip->adc_param.high_temp); + + if (qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->adc_param)) + pr_err("request ADC error\n"); +} + +#define IBAT_TERM_EN_MASK BIT(3) +static int qpnp_lbc_chg_init(struct qpnp_lbc_chip *chip) +{ + int rc; + u8 reg_val; + + qpnp_lbc_vbatweak_set(chip, chip->cfg_batt_weak_voltage_uv); + rc = qpnp_lbc_vinmin_set(chip, chip->cfg_min_voltage_mv); + if (rc) { + pr_err("Failed to set vin_min rc=%d\n", rc); + return rc; + } + rc = qpnp_lbc_vddsafe_set(chip, chip->cfg_safe_voltage_mv); + if (rc) { + pr_err("Failed to set vdd_safe rc=%d\n", rc); + return rc; + } + rc = qpnp_lbc_vddmax_set(chip, chip->cfg_max_voltage_mv); + if (rc) { + pr_err("Failed to set vdd_safe rc=%d\n", rc); + return rc; + } + rc = qpnp_lbc_ibatsafe_set(chip, chip->cfg_safe_current); + if (rc) { + pr_err("Failed to set ibat_safe rc=%d\n", rc); + return rc; + } + + if (of_find_property(chip->dev->of_node, "qcom,tchg-mins", NULL)) { + rc = qpnp_lbc_tchg_max_set(chip, chip->cfg_tchg_mins); + if (rc) { + pr_err("Failed to set tchg_mins rc=%d\n", rc); + return rc; + } + } + + /* + * Override VBAT_DET comparator to enable charging + * irrespective of VBAT above VBAT_DET. + */ + rc = qpnp_lbc_vbatdet_override(chip, OVERRIDE_0); + if (rc) { + pr_err("Failed to override comp rc=%d\n", rc); + return rc; + } + + /* + * Disable iterm comparator of linear charger to disable charger + * detecting end of charge condition based on DT configuration + * and float charge configuration. + */ + if (!chip->cfg_charger_detect_eoc || chip->cfg_float_charge) { + rc = qpnp_lbc_masked_write(chip, + chip->chgr_base + CHG_IBATTERM_EN_REG, + IBAT_TERM_EN_MASK, 0); + if (rc) { + pr_err("Failed to disable EOC comp rc=%d\n", rc); + return rc; + } + } + + /* Disable charger watchdog */ + reg_val = 0; + rc = qpnp_lbc_write(chip, chip->chgr_base + CHG_WDOG_EN_REG, + ®_val, 1); + + return rc; +} + +static int qpnp_lbc_bat_if_init(struct qpnp_lbc_chip *chip) +{ + u8 reg_val; + int rc; + + /* Select battery presence detection */ + switch (chip->cfg_bpd_detection) { + case BPD_TYPE_BAT_THM: + reg_val = BATT_THM_EN; + break; + case BPD_TYPE_BAT_ID: + reg_val = BATT_ID_EN; + break; + case BPD_TYPE_BAT_THM_BAT_ID: + reg_val = BATT_THM_EN | BATT_ID_EN; + break; + default: + reg_val = BATT_THM_EN; + break; + } + + rc = qpnp_lbc_masked_write(chip, + chip->bat_if_base + BAT_IF_BPD_CTRL_REG, + BATT_BPD_CTRL_SEL_MASK, reg_val); + if (rc) { + pr_err("Failed to choose BPD rc=%d\n", rc); + return rc; + } + + /* Force on VREF_BAT_THM */ + reg_val = VREF_BATT_THERM_FORCE_ON; + rc = qpnp_lbc_write(chip, + chip->bat_if_base + BAT_IF_VREF_BAT_THM_CTRL_REG, + ®_val, 1); + if (rc) { + pr_err("Failed to force on VREF_BAT_THM rc=%d\n", rc); + return rc; + } + + return 0; +} + +static int qpnp_lbc_usb_path_init(struct qpnp_lbc_chip *chip) +{ + int rc; + u8 reg_val; + + if (qpnp_lbc_is_usb_chg_plugged_in(chip)) { + reg_val = 0; + rc = qpnp_lbc_write(chip, + chip->usb_chgpth_base + CHG_USB_ENUM_T_STOP_REG, + ®_val, 1); + if (rc) { + pr_err("Failed to write enum stop rc=%d\n", rc); + return -ENXIO; + } + } + + if (chip->cfg_charging_disabled) { + rc = qpnp_lbc_charger_enable(chip, USER, 0); + if (rc) + pr_err("Failed to disable charging rc=%d\n", rc); + + /* + * Disable follow-on-reset if charging is explicitly disabled, + * this forces the charging to be disabled across reset. + * Note: Explicitly disabling charging is only a debug/test + * configuration + */ + reg_val = 0x0; + rc = __qpnp_lbc_secure_write(chip, chip->chgr_base, + CHG_PERPH_RESET_CTRL3_REG, ®_val, 1); + if (rc) + pr_err("Failed to configure PERPH_CTRL3 rc=%d\n", rc); + else + pr_debug("Charger is not following PMIC reset\n"); + } else { + /* + * Enable charging explicitly, + * because not sure the default behavior. + */ + reg_val = CHG_ENABLE; + rc = qpnp_lbc_masked_write(chip, chip->chgr_base + CHG_CTRL_REG, + CHG_EN_MASK, reg_val); + if (rc) + pr_err("Failed to enable charger rc=%d\n", rc); + } + + return rc; +} + +#define LBC_MISC_DIG_VERSION_1 0x01 +static int qpnp_lbc_misc_init(struct qpnp_lbc_chip *chip) +{ + int rc; + u8 reg_val, reg_val1, trim_center; + + /* Check if this LBC MISC version supports VDD trimming */ + rc = qpnp_lbc_read(chip, chip->misc_base + MISC_REV2_REG, + ®_val, 1); + if (rc) { + pr_err("Failed to read VDD_EA TRIM3 reg rc=%d\n", rc); + return rc; + } + + if (reg_val >= LBC_MISC_DIG_VERSION_1) { + chip->supported_feature_flag |= VDD_TRIM_SUPPORTED; + /* Read initial VDD trim value */ + rc = qpnp_lbc_read(chip, chip->misc_base + MISC_TRIM3_REG, + ®_val, 1); + if (rc) { + pr_err("Failed to read VDD_EA TRIM3 reg rc=%d\n", rc); + return rc; + } + + rc = qpnp_lbc_read(chip, chip->misc_base + MISC_TRIM4_REG, + ®_val1, 1); + if (rc) { + pr_err("Failed to read VDD_EA TRIM3 reg rc=%d\n", rc); + return rc; + } + + trim_center = ((reg_val & MISC_TRIM3_VDD_MASK) + >> VDD_TRIM3_SHIFT) + | ((reg_val1 & MISC_TRIM4_VDD_MASK) + >> VDD_TRIM4_SHIFT); + chip->init_trim_uv = qpnp_lbc_get_trim_voltage(trim_center); + chip->delta_vddmax_uv = chip->init_trim_uv; + pr_debug("Initial trim center %x trim_uv %d\n", + trim_center, chip->init_trim_uv); + } + + pr_debug("Setting BOOT_DONE\n"); + reg_val = MISC_BOOT_DONE; + rc = qpnp_lbc_write(chip, chip->misc_base + MISC_BOOT_DONE_REG, + ®_val, 1); + + return rc; +} + +static int show_lbc_config(struct seq_file *m, void *data) +{ + struct qpnp_lbc_chip *chip = m->private; + + seq_printf(m, "cfg_charging_disabled\t=\t%d\n" + "cfg_btc_disabled\t=\t%d\n" + "cfg_use_fake_battery\t=\t%d\n" + "cfg_use_external_charger\t=\t%d\n" + "cfg_chgr_led_support\t=\t%d\n" + "cfg_warm_bat_chg_ma\t=\t%d\n" + "cfg_cool_bat_chg_ma\t=\t%d\n" + "cfg_safe_voltage_mv\t=\t%d\n" + "cfg_max_voltage_mv\t=\t%d\n" + "cfg_min_voltage_mv\t=\t%d\n" + "cfg_charger_detect_eoc\t=\t%d\n" + "cfg_disable_vbatdet_based_recharge\t=\t%d\n" + "cfg_collapsible_chgr_support\t=\t%d\n" + "cfg_batt_weak_voltage_uv\t=\t%d\n" + "cfg_warm_bat_mv\t=\t%d\n" + "cfg_cool_bat_mv\t=\t%d\n" + "cfg_hot_batt_p\t=\t%d\n" + "cfg_cold_batt_p\t=\t%d\n" + "cfg_thermal_levels\t=\t%d\n" + "cfg_safe_current\t=\t%d\n" + "cfg_tchg_mins\t=\t%d\n" + "cfg_bpd_detection\t=\t%d\n" + "cfg_warm_bat_decidegc\t=\t%d\n" + "cfg_cool_bat_decidegc\t=\t%d\n" + "cfg_soc_resume_limit\t=\t%d\n" + "cfg_float_charge\t=\t%d\n", + chip->cfg_charging_disabled, + chip->cfg_btc_disabled, + chip->cfg_use_fake_battery, + chip->cfg_use_external_charger, + chip->cfg_chgr_led_support, + chip->cfg_warm_bat_chg_ma, + chip->cfg_cool_bat_chg_ma, + chip->cfg_safe_voltage_mv, + chip->cfg_max_voltage_mv, + chip->cfg_min_voltage_mv, + chip->cfg_charger_detect_eoc, + chip->cfg_disable_vbatdet_based_recharge, + chip->cfg_collapsible_chgr_support, + chip->cfg_batt_weak_voltage_uv, + chip->cfg_warm_bat_mv, + chip->cfg_cool_bat_mv, + chip->cfg_hot_batt_p, + chip->cfg_cold_batt_p, + chip->cfg_thermal_levels, + chip->cfg_safe_current, + chip->cfg_tchg_mins, + chip->cfg_bpd_detection, + chip->cfg_warm_bat_decidegc, + chip->cfg_cool_bat_decidegc, + chip->cfg_soc_resume_limit, + chip->cfg_float_charge); + + return 0; +} + +static int qpnp_lbc_config_open(struct inode *inode, struct file *file) +{ + struct qpnp_lbc_chip *chip = inode->i_private; + + return single_open(file, show_lbc_config, chip); +} + +static const struct file_operations qpnp_lbc_config_debugfs_ops = { + .owner = THIS_MODULE, + .open = qpnp_lbc_config_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#define OF_PROP_READ(chip, prop, qpnp_dt_property, retval, optional) \ +do { \ + if (retval) \ + break; \ + \ + retval = of_property_read_u32(chip->dev->of_node, \ + "qcom," qpnp_dt_property, \ + &chip->prop); \ + \ + if ((retval == -EINVAL) && optional) \ + retval = 0; \ + else if (retval) \ + pr_err("Error reading " #qpnp_dt_property \ + " property rc = %d\n", rc); \ +} while (0) + +#define DEFAULT_CUTOFF_MV 3400 +static int qpnp_charger_read_dt_props(struct qpnp_lbc_chip *chip) +{ + int rc = 0; + const char *bpd; + + OF_PROP_READ(chip, cfg_max_voltage_mv, "vddmax-mv", rc, 0); + OF_PROP_READ(chip, cfg_safe_voltage_mv, "vddsafe-mv", rc, 0); + OF_PROP_READ(chip, cfg_min_voltage_mv, "vinmin-mv", rc, 0); + OF_PROP_READ(chip, cfg_safe_current, "ibatsafe-ma", rc, 0); + OF_PROP_READ(chip, cfg_volt_cutoff_mv, "v-cutoff-mv", rc, 0); + if (rc) + pr_err("Error reading required property rc=%d\n", rc); + + if (!chip->cfg_volt_cutoff_mv) + chip->cfg_volt_cutoff_mv = DEFAULT_CUTOFF_MV; + + chip->cutoff_threshold_uv = (chip->cfg_volt_cutoff_mv - 100) * 1000; + + OF_PROP_READ(chip, cfg_tchg_mins, "tchg-mins", rc, 1); + OF_PROP_READ(chip, cfg_warm_bat_decidegc, "warm-bat-decidegc", rc, 1); + OF_PROP_READ(chip, cfg_cool_bat_decidegc, "cool-bat-decidegc", rc, 1); + OF_PROP_READ(chip, cfg_hot_batt_p, "batt-hot-percentage", rc, 1); + OF_PROP_READ(chip, cfg_cold_batt_p, "batt-cold-percentage", rc, 1); + OF_PROP_READ(chip, cfg_batt_weak_voltage_uv, "vbatweak-uv", rc, 1); + OF_PROP_READ(chip, cfg_soc_resume_limit, "resume-soc", rc, 1); + if (rc) { + pr_err("Error reading optional property rc=%d\n", rc); + return rc; + } + + rc = of_property_read_string(chip->dev->of_node, + "qcom,bpd-detection", &bpd); + if (rc) { + + chip->cfg_bpd_detection = BPD_TYPE_BAT_THM; + rc = 0; + } else { + chip->cfg_bpd_detection = get_bpd(bpd); + if (chip->cfg_bpd_detection < 0) { + pr_err("Failed to determine bpd schema rc=%d\n", rc); + return -EINVAL; + } + } + + /* + * Look up JEITA compliance parameters if cool and warm temp + * provided + */ + if (chip->cfg_cool_bat_decidegc || chip->cfg_warm_bat_decidegc) { + chip->adc_tm_dev = qpnp_get_adc_tm(chip->dev, "chg"); + if (IS_ERR(chip->adc_tm_dev)) { + rc = PTR_ERR(chip->adc_tm_dev); + if (rc != -EPROBE_DEFER) + pr_err("Failed to get adc-tm rc=%d\n", rc); + return rc; + } + + OF_PROP_READ(chip, cfg_warm_bat_chg_ma, "ibatmax-warm-ma", + rc, 1); + OF_PROP_READ(chip, cfg_cool_bat_chg_ma, "ibatmax-cool-ma", + rc, 1); + OF_PROP_READ(chip, cfg_warm_bat_mv, "warm-bat-mv", rc, 1); + OF_PROP_READ(chip, cfg_cool_bat_mv, "cool-bat-mv", rc, 1); + if (rc) { + pr_err("Error reading battery temp prop rc=%d\n", rc); + return rc; + } + } + + /* Get the btc-disabled property */ + chip->cfg_btc_disabled = of_property_read_bool( + chip->dev->of_node, "qcom,btc-disabled"); + + /* Get the charging-disabled property */ + chip->cfg_charging_disabled = + of_property_read_bool(chip->dev->of_node, + "qcom,charging-disabled"); + + /* Get the fake-batt-values property */ + chip->cfg_use_fake_battery = + of_property_read_bool(chip->dev->of_node, + "qcom,use-default-batt-values"); + + /* Get the float charging property */ + chip->cfg_float_charge = + of_property_read_bool(chip->dev->of_node, + "qcom,float-charge"); + + /* Get the charger EOC detect property */ + chip->cfg_charger_detect_eoc = + of_property_read_bool(chip->dev->of_node, + "qcom,charger-detect-eoc"); + + /* Get the vbatdet disable property */ + chip->cfg_disable_vbatdet_based_recharge = + of_property_read_bool(chip->dev->of_node, + "qcom,disable-vbatdet-based-recharge"); + + /* Get the charger led support property */ + chip->cfg_chgr_led_support = + of_property_read_bool(chip->dev->of_node, + "qcom,chgr-led-support"); + + /* Get the collapsible charger support property */ + chip->cfg_collapsible_chgr_support = + of_property_read_bool(chip->dev->of_node, + "qcom,collapsible-chgr-support"); + + /* Disable charging when faking battery values */ + if (chip->cfg_use_fake_battery) + chip->cfg_charging_disabled = true; + + chip->cfg_use_external_charger = of_property_read_bool( + chip->dev->of_node, "qcom,use-external-charger"); + + if (of_find_property(chip->dev->of_node, + "qcom,thermal-mitigation", + &chip->cfg_thermal_levels)) { + chip->thermal_mitigation = devm_kzalloc(chip->dev, + chip->cfg_thermal_levels, + GFP_KERNEL); + + if (chip->thermal_mitigation == NULL) { + pr_err("thermal mitigation kzalloc() failed.\n"); + return -ENOMEM; + } + + chip->cfg_thermal_levels /= sizeof(int); + rc = of_property_read_u32_array(chip->dev->of_node, + "qcom,thermal-mitigation", + chip->thermal_mitigation, + chip->cfg_thermal_levels); + if (rc) { + pr_err("Failed to read threm limits rc = %d\n", rc); + return rc; + } + } + + pr_debug("vddmax-mv=%d, vddsafe-mv=%d, vinmin-mv=%d, v-cutoff-mv=%d, ibatsafe-ma=$=%d\n", + chip->cfg_max_voltage_mv, + chip->cfg_safe_voltage_mv, + chip->cfg_min_voltage_mv, + chip->cfg_volt_cutoff_mv, + chip->cfg_safe_current); + pr_debug("warm-bat-decidegc=%d, cool-bat-decidegc=%d, batt-hot-percentage=%d, batt-cold-percentage=%d\n", + chip->cfg_warm_bat_decidegc, + chip->cfg_cool_bat_decidegc, + chip->cfg_hot_batt_p, + chip->cfg_cold_batt_p); + pr_debug("tchg-mins=%d, vbatweak-uv=%d, resume-soc=%d\n", + chip->cfg_tchg_mins, + chip->cfg_batt_weak_voltage_uv, + chip->cfg_soc_resume_limit); + pr_debug("bpd-detection=%d, ibatmax-warm-ma=%d, ibatmax-cool-ma=%d, warm-bat-mv=%d, cool-bat-mv=%d\n", + chip->cfg_bpd_detection, + chip->cfg_warm_bat_chg_ma, + chip->cfg_cool_bat_chg_ma, + chip->cfg_warm_bat_mv, + chip->cfg_cool_bat_mv); + pr_debug("btc-disabled=%d, charging-disabled=%d, use-default-batt-values=%d, float-charge=%d\n", + chip->cfg_btc_disabled, + chip->cfg_charging_disabled, + chip->cfg_use_fake_battery, + chip->cfg_float_charge); + pr_debug("charger-detect-eoc=%d, disable-vbatdet-based-recharge=%d, chgr-led-support=%d\n", + chip->cfg_charger_detect_eoc, + chip->cfg_disable_vbatdet_based_recharge, + chip->cfg_chgr_led_support); + pr_debug("collapsible-chg-support=%d, use-external-charger=%d, thermal_levels=%d\n", + chip->cfg_collapsible_chgr_support, + chip->cfg_use_external_charger, + chip->cfg_thermal_levels); + return rc; +} + +#define CHG_REMOVAL_DETECT_DLY_MS 300 +static irqreturn_t qpnp_lbc_chg_gone_irq_handler(int irq, void *_chip) +{ + struct qpnp_lbc_chip *chip = _chip; + int chg_gone; + + if (chip->cfg_collapsible_chgr_support) { + chg_gone = qpnp_lbc_is_chg_gone(chip); + pr_debug("chg-gone triggered, rt_sts: %d\n", chg_gone); + if (chg_gone) { + /* + * Disable charger to prevent fastchg irq storming + * if a non-collapsible charger is being used. + */ + pr_debug("disable charging for non-collapsbile charger\n"); + qpnp_lbc_charger_enable(chip, COLLAPSE, 0); + qpnp_lbc_disable_irq(chip, &chip->irqs[USBIN_VALID]); + qpnp_lbc_disable_irq(chip, &chip->irqs[USB_CHG_GONE]); + qpnp_chg_collapsible_chgr_config(chip, 0); + /* + * Check after a delay if the charger is still + * inserted. It decides if a non-collapsible + * charger is being used, or charger has been + * removed. + */ + schedule_delayed_work(&chip->collapsible_detection_work, + msecs_to_jiffies(CHG_REMOVAL_DETECT_DLY_MS)); + } + } + + return IRQ_HANDLED; +} + +static irqreturn_t qpnp_lbc_usbin_valid_irq_handler(int irq, void *_chip) +{ + struct qpnp_lbc_chip *chip = _chip; + int usb_present; + unsigned long flags; + + usb_present = qpnp_lbc_is_usb_chg_plugged_in(chip); + pr_debug("usbin-valid triggered: %d\n", usb_present); + + if (chip->usb_present ^ usb_present) { + chip->usb_present = usb_present; + if (!usb_present) { + chip->usb_supply_type = POWER_SUPPLY_TYPE_UNKNOWN; + qpnp_lbc_charger_enable(chip, CURRENT, 0); + spin_lock_irqsave(&chip->ibat_change_lock, flags); + chip->usb_psy_ma = QPNP_CHG_I_MAX_MIN_90; + qpnp_lbc_set_appropriate_current(chip); + spin_unlock_irqrestore(&chip->ibat_change_lock, + flags); + if (chip->cfg_collapsible_chgr_support) + chip->non_collapsible_chgr_detected = false; + + if (chip->supported_feature_flag & VDD_TRIM_SUPPORTED) + alarm_try_to_cancel(&chip->vddtrim_alarm); + } else { + /* + * Override VBAT_DET comparator to start charging + * even if VBAT > VBAT_DET. + */ + if (!chip->cfg_disable_vbatdet_based_recharge) + qpnp_lbc_vbatdet_override(chip, OVERRIDE_0); + + /* + * If collapsible charger supported, enable chgr_gone + * irq, and configure for collapsible charger. + */ + if (chip->cfg_collapsible_chgr_support && + !chip->non_collapsible_chgr_detected) { + qpnp_lbc_enable_irq(chip, + &chip->irqs[USB_CHG_GONE]); + qpnp_chg_collapsible_chgr_config(chip, 1); + } + /* + * Enable SOC based charging to make sure + * charging gets enabled on USB insertion + * irrespective of battery SOC above resume_soc. + */ + qpnp_lbc_charger_enable(chip, SOC, 1); + } + + pr_debug("Updating usb_psy PRESENT property\n"); + if (chip->usb_present) + extcon_set_state_sync(chip->extcon, + EXTCON_USB, true); + else + extcon_set_state_sync(chip->extcon, + EXTCON_USB, false); + } + + power_supply_changed(chip->usb_psy); + if (chip->bat_if_base) { + pr_debug("power supply changed batt_psy\n"); + power_supply_changed(chip->batt_psy); + } + + return IRQ_HANDLED; +} + +static int qpnp_lbc_is_batt_temp_ok(struct qpnp_lbc_chip *chip) +{ + u8 reg_val; + int rc; + + rc = qpnp_lbc_read(chip, chip->bat_if_base + INT_RT_STS_REG, + ®_val, 1); + if (rc) { + pr_err("reg read failed: addr=%03X, rc=%d\n", + chip->bat_if_base + INT_RT_STS_REG, rc); + return rc; + } + + return (reg_val & BAT_TEMP_OK_IRQ) ? 1 : 0; +} + +static irqreturn_t qpnp_lbc_batt_temp_irq_handler(int irq, void *_chip) +{ + struct qpnp_lbc_chip *chip = _chip; + int batt_temp_good; + + batt_temp_good = qpnp_lbc_is_batt_temp_ok(chip); + pr_debug("batt-temp triggered: %d\n", batt_temp_good); + + pr_debug("power supply changed batt_psy\n"); + power_supply_changed(chip->batt_psy); + return IRQ_HANDLED; +} + +static irqreturn_t qpnp_lbc_batt_pres_irq_handler(int irq, void *_chip) +{ + struct qpnp_lbc_chip *chip = _chip; + int batt_present; + + if (chip->debug_board) + return IRQ_HANDLED; + + batt_present = qpnp_lbc_is_batt_present(chip); + pr_debug("batt-pres triggered: %d\n", batt_present); + + if (chip->batt_present ^ batt_present) { + chip->batt_present = batt_present; + pr_debug("power supply changed batt_psy\n"); + power_supply_changed(chip->batt_psy); + + if ((chip->cfg_cool_bat_decidegc + || chip->cfg_warm_bat_decidegc) + && batt_present && !chip->cfg_use_fake_battery) { + pr_debug("enabling vadc notifications\n"); + if (qpnp_adc_tm_channel_measure(chip->adc_tm_dev, + &chip->adc_param)) + pr_err("request ADC error\n"); + } else if ((chip->cfg_cool_bat_decidegc + || chip->cfg_warm_bat_decidegc) + && !batt_present && !chip->cfg_use_fake_battery) { + qpnp_adc_tm_disable_chan_meas(chip->adc_tm_dev, + &chip->adc_param); + pr_debug("disabling vadc notifications\n"); + } + } + return IRQ_HANDLED; +} + +static irqreturn_t qpnp_lbc_chg_failed_irq_handler(int irq, void *_chip) +{ + struct qpnp_lbc_chip *chip = _chip; + int rc; + u8 reg_val = CHG_FAILED_BIT; + + pr_debug("chg_failed triggered count=%u\n", ++chip->chg_failed_count); + rc = qpnp_lbc_write(chip, chip->chgr_base + CHG_FAILED_REG, + ®_val, 1); + if (rc) + pr_err("Failed to write chg_fail clear bit rc=%d\n", rc); + + if (chip->bat_if_base) { + pr_debug("power supply changed batt_psy\n"); + power_supply_changed(chip->batt_psy); + } + + return IRQ_HANDLED; +} + +static int qpnp_lbc_is_fastchg_on(struct qpnp_lbc_chip *chip) +{ + u8 reg_val; + int rc; + + rc = qpnp_lbc_read(chip, chip->chgr_base + INT_RT_STS_REG, + ®_val, 1); + if (rc) { + pr_err("Failed to read interrupt status rc=%d\n", rc); + return rc; + } + pr_debug("charger status %x\n", reg_val); + return (reg_val & FAST_CHG_ON_IRQ) ? 1 : 0; +} + +#define TRIM_PERIOD_NS (50LL * NSEC_PER_SEC) +static irqreturn_t qpnp_lbc_fastchg_irq_handler(int irq, void *_chip) +{ + ktime_t kt; + struct qpnp_lbc_chip *chip = _chip; + bool fastchg_on = false; + + fastchg_on = qpnp_lbc_is_fastchg_on(chip); + + pr_debug("FAST_CHG IRQ triggered, fastchg_on: %d\n", fastchg_on); + + if (chip->fastchg_on ^ fastchg_on) { + chip->fastchg_on = fastchg_on; + if (fastchg_on) { + mutex_lock(&chip->chg_enable_lock); + chip->chg_done = false; + mutex_unlock(&chip->chg_enable_lock); + /* + * Start alarm timer to periodically calculate + * and update VDD_MAX trim value. + */ + if (chip->supported_feature_flag & + VDD_TRIM_SUPPORTED) { + kt = ns_to_ktime(TRIM_PERIOD_NS); + alarm_start_relative(&chip->vddtrim_alarm, + kt); + } + } + } + + if (chip->bat_if_base) { + pr_debug("power supply changed batt_psy\n"); + power_supply_changed(chip->batt_psy); + } + + return IRQ_HANDLED; +} + +static irqreturn_t qpnp_lbc_chg_done_irq_handler(int irq, void *_chip) +{ + struct qpnp_lbc_chip *chip = _chip; + + pr_debug("charging done triggered\n"); + + chip->chg_done = true; + pr_debug("power supply changed batt_psy\n"); + power_supply_changed(chip->batt_psy); + + return IRQ_HANDLED; +} + +static irqreturn_t qpnp_lbc_vbatdet_lo_irq_handler(int irq, void *_chip) +{ + struct qpnp_lbc_chip *chip = _chip; + int rc; + + pr_debug("vbatdet-lo triggered\n"); + + /* + * Disable vbatdet irq to prevent interrupt storm when VBAT is + * close to VBAT_DET. + */ + qpnp_lbc_disable_irq(chip, &chip->irqs[CHG_VBAT_DET_LO]); + + /* + * Override VBAT_DET comparator to 0 to fix comparator toggling + * near VBAT_DET threshold. + */ + qpnp_lbc_vbatdet_override(chip, OVERRIDE_0); + + /* + * Battery has fallen below the vbatdet threshold and it is + * time to resume charging. + */ + rc = qpnp_lbc_charger_enable(chip, SOC, 1); + if (rc) + pr_err("Failed to enable charging\n"); + + return IRQ_HANDLED; +} + +static int qpnp_lbc_is_overtemp(struct qpnp_lbc_chip *chip) +{ + u8 reg_val; + int rc; + + rc = qpnp_lbc_read(chip, chip->usb_chgpth_base + INT_RT_STS_REG, + ®_val, 1); + if (rc) { + pr_err("Failed to read interrupt status rc=%d\n", rc); + return rc; + } + + pr_debug("OVERTEMP rt status %x\n", reg_val); + return (reg_val & OVERTEMP_ON_IRQ) ? 1 : 0; +} + +static irqreturn_t qpnp_lbc_usb_overtemp_irq_handler(int irq, void *_chip) +{ + struct qpnp_lbc_chip *chip = _chip; + int overtemp = qpnp_lbc_is_overtemp(chip); + + pr_warn_ratelimited("charger %s temperature limit\n", + overtemp ? "exceeds" : "within"); + + return IRQ_HANDLED; +} + +static int qpnp_disable_lbc_charger(struct qpnp_lbc_chip *chip) +{ + int rc; + u8 reg; + + reg = CHG_FORCE_BATT_ON; + rc = qpnp_lbc_masked_write(chip, chip->chgr_base + CHG_CTRL_REG, + CHG_EN_MASK, reg); + /* disable BTC */ + rc |= qpnp_lbc_masked_write(chip, chip->bat_if_base + BAT_IF_BTC_CTRL, + BTC_COMP_EN_MASK, 0); + /* Enable BID and disable THM based BPD */ + reg = BATT_ID_EN | BATT_BPD_OFFMODE_EN; + rc |= qpnp_lbc_write(chip, chip->bat_if_base + BAT_IF_BPD_CTRL_REG, + ®, 1); + return rc; +} + +#define REQUEST_IRQ(chip, idx, rc, irq_name, threaded, flags, wake)\ +do { \ + if (rc) \ + break; \ + if (chip->irqs[idx].irq) { \ + if (threaded) \ + rc = devm_request_threaded_irq(chip->dev, \ + chip->irqs[idx].irq, NULL, \ + qpnp_lbc_##irq_name##_irq_handler, \ + flags, #irq_name, chip); \ + else \ + rc = devm_request_irq(chip->dev, \ + chip->irqs[idx].irq, \ + qpnp_lbc_##irq_name##_irq_handler, \ + flags, #irq_name, chip); \ + if (rc < 0) { \ + pr_err("Unable to request " #irq_name " %d\n", \ + rc); \ + } else { \ + rc = 0; \ + if (wake) { \ + enable_irq_wake(chip->irqs[idx].irq); \ + chip->irqs[idx].is_wake = true; \ + } \ + } \ + } \ +} while (0) + +static inline void get_irq_resource(struct qpnp_lbc_chip *chip, int idx, + const char *name, struct device_node *child) +{ + int rc = 0; + + rc = of_irq_get_byname(child, name); + if (rc < 0) + pr_err("Unable to get irq resource for %s - %d\n", name, rc); + else + chip->irqs[idx].irq = rc; +} + +static int qpnp_lbc_request_irqs(struct qpnp_lbc_chip *chip) +{ + int rc = 0; + + REQUEST_IRQ(chip, CHG_FAILED, rc, chg_failed, 0, + IRQF_TRIGGER_RISING, 1); + + REQUEST_IRQ(chip, CHG_FAST_CHG, rc, fastchg, 1, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING + | IRQF_ONESHOT, 1); + + REQUEST_IRQ(chip, CHG_DONE, rc, chg_done, 0, + IRQF_TRIGGER_RISING, 0); + + REQUEST_IRQ(chip, CHG_VBAT_DET_LO, rc, vbatdet_lo, 0, + IRQF_TRIGGER_FALLING, 1); + + REQUEST_IRQ(chip, BATT_PRES, rc, batt_pres, 1, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING + | IRQF_ONESHOT, 1); + + REQUEST_IRQ(chip, BATT_TEMPOK, rc, batt_temp, 0, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1); + + REQUEST_IRQ(chip, USBIN_VALID, rc, usbin_valid, 1, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING + | IRQF_ONESHOT, 1); + + REQUEST_IRQ(chip, USB_CHG_GONE, rc, chg_gone, 0, + IRQF_TRIGGER_RISING, 1); + + REQUEST_IRQ(chip, USB_OVER_TEMP, rc, usb_overtemp, 0, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 0); + + return 0; +} + +static int qpnp_lbc_get_irqs(struct qpnp_lbc_chip *chip, u8 subtype, + struct device_node *child) +{ + switch (subtype) { + case LBC_CHGR_SUBTYPE: + get_irq_resource(chip, CHG_FAST_CHG, "fast-chg-on", child); + get_irq_resource(chip, CHG_FAILED, "chg-failed", child); + + if (!chip->cfg_disable_vbatdet_based_recharge) + get_irq_resource(chip, CHG_VBAT_DET_LO, + "vbat-det-lo", child); + if (chip->cfg_charger_detect_eoc) + get_irq_resource(chip, CHG_DONE, "chg-done", child); + break; + + case LBC_BAT_IF_SUBTYPE: + get_irq_resource(chip, BATT_PRES, "batt-pres", child); + get_irq_resource(chip, BATT_TEMPOK, "bat-temp-ok", child); + break; + + case LBC_USB_PTH_SUBTYPE: + get_irq_resource(chip, USBIN_VALID, "usbin-valid", child); + get_irq_resource(chip, USB_OVER_TEMP, "usb-over-temp", child); + get_irq_resource(chip, USB_CHG_GONE, "chg-gone", child); + break; + + default: + return -EINVAL; + } + + return 0; +} + +/* Get/Set initial state of charger */ +static void determine_initial_status(struct qpnp_lbc_chip *chip) +{ + chip->usb_present = qpnp_lbc_is_usb_chg_plugged_in(chip); + power_supply_changed(chip->usb_psy); + /* + * Set USB psy online to avoid userspace from shutting down if battery + * capacity is at zero and no chargers online. + */ + if (chip->usb_present) { + if (chip->cfg_collapsible_chgr_support && + !chip->non_collapsible_chgr_detected) { + qpnp_lbc_enable_irq(chip, + &chip->irqs[USB_CHG_GONE]); + qpnp_chg_collapsible_chgr_config(chip, 1); + } + extcon_set_state_sync(chip->extcon, EXTCON_USB, true); + } else { + extcon_set_state_sync(chip->extcon, EXTCON_USB, false); + } + power_supply_changed(chip->usb_psy); +} + +static void qpnp_lbc_collapsible_detection_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct qpnp_lbc_chip *chip = container_of(dwork, + struct qpnp_lbc_chip, + collapsible_detection_work); + + if (qpnp_lbc_is_usb_chg_plugged_in(chip)) { + chip->non_collapsible_chgr_detected = true; + pr_debug("Non-collapsible charger detected\n"); + } else { + chip->non_collapsible_chgr_detected = false; + pr_debug("Charger removal detected\n"); + } + qpnp_lbc_charger_enable(chip, COLLAPSE, 1); + qpnp_lbc_enable_irq(chip, &chip->irqs[USBIN_VALID]); +} + +#define IBAT_TRIM -300 +static void qpnp_lbc_vddtrim_work_fn(struct work_struct *work) +{ + int rc, vbat_now_uv, ibat_now; + u8 reg_val; + ktime_t kt; + struct qpnp_lbc_chip *chip = container_of(work, struct qpnp_lbc_chip, + vddtrim_work); + + vbat_now_uv = get_prop_battery_voltage_now(chip); + ibat_now = get_prop_current_now(chip) / 1000; + pr_debug("vbat %d ibat %d capacity %d\n", + vbat_now_uv, ibat_now, get_prop_capacity(chip)); + + /* + * Stop trimming under following condition: + * USB removed + * Charging Stopped + */ + if (!qpnp_lbc_is_fastchg_on(chip) || + !qpnp_lbc_is_usb_chg_plugged_in(chip)) { + pr_debug("stop trim charging stopped\n"); + goto exit; + } else { + rc = qpnp_lbc_read(chip, chip->chgr_base + CHG_STATUS_REG, + ®_val, 1); + if (rc) { + pr_err("Failed to read chg status rc=%d\n", rc); + goto out; + } + + /* + * Update VDD trim voltage only if following conditions are + * met: + * If charger is in VDD loop AND + * If ibat is between 0 ma and -300 ma + */ + if ((reg_val & CHG_VDD_LOOP_BIT) && + ((ibat_now < 0) && (ibat_now > IBAT_TRIM))) + qpnp_lbc_adjust_vddmax(chip, vbat_now_uv); + } + +out: + kt = ns_to_ktime(TRIM_PERIOD_NS); + alarm_start_relative(&chip->vddtrim_alarm, kt); +exit: + pm_relax(chip->dev); +} + +static enum alarmtimer_restart vddtrim_callback(struct alarm *alarm, + ktime_t now) +{ + struct qpnp_lbc_chip *chip = container_of(alarm, struct qpnp_lbc_chip, + vddtrim_alarm); + + pm_stay_awake(chip->dev); + schedule_work(&chip->vddtrim_work); + + return ALARMTIMER_NORESTART; +} + +static int qpnp_lbc_parallel_charger_init(struct qpnp_lbc_chip *chip) +{ + u8 reg_val; + int rc; + + rc = qpnp_lbc_vinmin_set(chip, chip->cfg_min_voltage_mv); + if (rc) { + pr_err("Failed to set vin_min rc=%d\n", rc); + return rc; + } + rc = qpnp_lbc_vddsafe_set(chip, chip->cfg_max_voltage_mv); + if (rc) { + pr_err("Failed to set vdd_safe rc=%d\n", rc); + return rc; + } + rc = qpnp_lbc_vddmax_set(chip, chip->cfg_max_voltage_mv); + if (rc) { + pr_err("Failed to set vdd_max rc=%d\n", rc); + return rc; + } + + /* set the minimum charging current */ + rc = qpnp_lbc_ibatmax_set(chip, 0); + if (rc) { + pr_err("Failed to set IBAT_MAX to 0 rc=%d\n", rc); + return rc; + } + + /* disable charging */ + rc = qpnp_lbc_charger_enable(chip, PARALLEL, 0); + if (rc) { + pr_err("Unable to disable charging rc=%d\n", rc); + return 0; + } + + /* Enable BID and disable THM based BPD */ + reg_val = BATT_ID_EN | BATT_BPD_OFFMODE_EN; + rc = qpnp_lbc_write(chip, chip->bat_if_base + BAT_IF_BPD_CTRL_REG, + ®_val, 1); + if (rc) + pr_err("Failed to override BPD configuration rc=%d\n", rc); + + /* Disable and override BTC */ + reg_val = 0x2A; + rc = __qpnp_lbc_secure_write(chip, chip->bat_if_base, + BTC_COMP_OVERRIDE_REG, ®_val, 1); + if (rc) + pr_err("Failed to disable BTC override rc=%d\n", rc); + + reg_val = 0; + rc = qpnp_lbc_write(chip, + chip->bat_if_base + BAT_IF_BTC_CTRL, ®_val, 1); + if (rc) + pr_err("Failed to disable BTC rc=%d\n", rc); + + /* override VBAT_DET */ + rc = qpnp_lbc_vbatdet_override(chip, OVERRIDE_0); + if (rc) + pr_err("Failed to override VBAT_DET rc=%d\n", rc); + + /* Set BOOT_DONE and ENUM complete */ + reg_val = 0; + rc = qpnp_lbc_write(chip, + chip->usb_chgpth_base + CHG_USB_ENUM_T_STOP_REG, + ®_val, 1); + if (rc) + pr_err("Failed to stop enum-timer rc=%d\n", rc); + + reg_val = MISC_BOOT_DONE; + rc = qpnp_lbc_write(chip, chip->misc_base + MISC_BOOT_DONE_REG, + ®_val, 1); + if (rc) + pr_err("Failed to set boot-done rc=%d\n", rc); + + return rc; +} + +static int qpnp_lbc_parse_resources(struct qpnp_lbc_chip *chip) +{ + u8 subtype; + int rc = 0; + struct platform_device *pdev = chip->pdev; + struct device_node *child; + unsigned int base; + + if (of_get_available_child_count(pdev->dev.of_node) == 0) { + pr_err("no child nodes\n"); + goto fail_charger_enable; + } + + for_each_available_child_of_node(pdev->dev.of_node, child) { + rc = of_property_read_u32(child, "reg", &base); + pr_debug("register address = %#X rc = %d\n", base, rc); + if (rc < 0) { + pr_err("Couldn`t find reg in node = %s rc = %d\n", + child->full_name, rc); + goto fail_charger_enable; + } + + rc = qpnp_lbc_read(chip, base + PERP_SUBTYPE_REG, &subtype, 1); + if (rc) { + pr_err("Peripheral subtype read failed rc=%d\n", rc); + return rc; + } + + switch (subtype) { + case LBC_CHGR_SUBTYPE: + chip->chgr_base = base; + rc = qpnp_lbc_get_irqs(chip, subtype, child); + if (rc) { + pr_err("Failed to get CHGR irqs rc=%d\n", rc); + return rc; + } + break; + case LBC_USB_PTH_SUBTYPE: + chip->usb_chgpth_base = base; + rc = qpnp_lbc_get_irqs(chip, subtype, child); + if (rc) { + pr_err("Failed to get USB_PTH irqs rc=%d\n", + rc); + return rc; + } + break; + case LBC_BAT_IF_SUBTYPE: + chip->bat_if_base = base; + rc = qpnp_lbc_get_irqs(chip, subtype, child); + if (rc) { + pr_err("Failed to get BAT_IF irqs rc=%d\n", rc); + return rc; + } + break; + case LBC_MISC_SUBTYPE: + chip->misc_base = base; + break; + default: + pr_err("Invalid peripheral subtype=0x%x\n", subtype); + rc = -EINVAL; + } + } + + pr_debug("chgr_base=%x usb_chgpth_base=%x bat_if_base=%x misc_base=%x\n", + chip->chgr_base, chip->usb_chgpth_base, + chip->bat_if_base, chip->misc_base); + + return rc; + +fail_charger_enable: + dev_set_drvdata(&pdev->dev, NULL); + return -ENXIO; +} + +static int qpnp_lbc_parallel_probe(struct platform_device *pdev) +{ + int rc = 0; + struct qpnp_lbc_chip *chip; + struct power_supply_config parallel_psy_cfg = {}; + + chip = devm_kzalloc(&pdev->dev, sizeof(struct qpnp_lbc_chip), + GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!chip->regmap) { + pr_err("Couldn't get parent's regmap\n"); + return -EINVAL; + } + + chip->dev = &pdev->dev; + chip->pdev = pdev; + dev_set_drvdata(&pdev->dev, chip); + device_init_wakeup(&pdev->dev, 1); + spin_lock_init(&chip->hw_access_lock); + spin_lock_init(&chip->ibat_change_lock); + INIT_DELAYED_WORK(&chip->parallel_work, qpnp_lbc_parallel_work); + + OF_PROP_READ(chip, cfg_max_voltage_mv, "vddmax-mv", rc, 0); + if (rc) + return rc; + OF_PROP_READ(chip, cfg_min_voltage_mv, "vinmin-mv", rc, 0); + if (rc) + return rc; + + rc = qpnp_lbc_parse_resources(chip); + if (rc) { + pr_err("Unable to parse LBC(parallel) resources rc=%d\n", rc); + return rc; + } + + rc = qpnp_lbc_parallel_charger_init(chip); + if (rc) { + pr_err("Unable to initialize LBC(parallel) rc=%d\n", rc); + return rc; + } + + chip->parallel_psy_d.name = "parallel"; + chip->parallel_psy_d.type = POWER_SUPPLY_TYPE_PARALLEL; + chip->parallel_psy_d.get_property = qpnp_lbc_parallel_get_property; + chip->parallel_psy_d.set_property = qpnp_lbc_parallel_set_property; + chip->parallel_psy_d.properties = qpnp_lbc_parallel_properties; + chip->parallel_psy_d.property_is_writeable = + qpnp_lbc_parallel_is_writeable; + chip->parallel_psy_d.num_properties = + ARRAY_SIZE(qpnp_lbc_parallel_properties); + + parallel_psy_cfg.drv_data = chip; + parallel_psy_cfg.num_supplicants = 0; + + chip->parallel_psy = devm_power_supply_register(chip->dev, + &chip->parallel_psy_d, + ¶llel_psy_cfg); + if (IS_ERR(chip->parallel_psy)) { + pr_err("Unable to register LBC parallel_psy rc = %ld\n", + PTR_ERR(chip->parallel_psy)); + return PTR_ERR(chip->parallel_psy); + } + + pr_debug("LBC (parallel) registered successfully!\n"); + + return 0; +} + +static int qpnp_lbc_main_probe(struct platform_device *pdev) +{ + ktime_t kt; + struct qpnp_lbc_chip *chip; + struct power_supply_config batt_psy_cfg = {}; + struct power_supply_config usb_psy_cfg = {}; + int rc = 0; + + chip = devm_kzalloc(&pdev->dev, sizeof(struct qpnp_lbc_chip), + GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!chip->regmap) + return -EINVAL; + + chip->dev = &pdev->dev; + chip->pdev = pdev; + dev_set_drvdata(&pdev->dev, chip); + device_init_wakeup(&pdev->dev, 1); + chip->fake_battery_soc = -EINVAL; + chip->current_soc = DEFAULT_CAPACITY; + chip->usb_supply_type = POWER_SUPPLY_TYPE_UNKNOWN; + + chip->extcon = devm_extcon_dev_allocate(chip->dev, + qpnp_lbc_extcon_cable); + if (IS_ERR(chip->extcon)) { + pr_err("failed to allocate extcon device\n"); + rc = PTR_ERR(chip->extcon); + return rc; + } + + rc = devm_extcon_dev_register(chip->dev, chip->extcon); + if (rc) { + pr_err("failed to register extcon device\n"); + return rc; + } + + mutex_init(&chip->jeita_configure_lock); + mutex_init(&chip->chg_enable_lock); + spin_lock_init(&chip->hw_access_lock); + spin_lock_init(&chip->ibat_change_lock); + spin_lock_init(&chip->irq_lock); + INIT_WORK(&chip->vddtrim_work, qpnp_lbc_vddtrim_work_fn); + alarm_init(&chip->vddtrim_alarm, ALARM_REALTIME, vddtrim_callback); + INIT_DELAYED_WORK(&chip->collapsible_detection_work, + qpnp_lbc_collapsible_detection_work); + INIT_WORK(&chip->debug_board_work, qpnp_lbc_debug_board_work_fn); + /* Get all device-tree properties */ + rc = qpnp_charger_read_dt_props(chip); + if (rc) { + pr_err("Failed to read DT properties rc=%d\n", rc); + return rc; + } + + rc = qpnp_lbc_parse_resources(chip); + if (rc) { + pr_err("Unable to parse LBC resources rc=%d\n", rc); + goto fail_chg_enable; + } + + if (chip->cfg_use_external_charger) { + pr_warn("Disabling Linear Charger (e-external-charger = 1)\n"); + rc = qpnp_disable_lbc_charger(chip); + if (rc) + pr_err("Unable to disable charger rc=%d\n", rc); + return -ENODEV; + } + + chip->usb_psy_d.name = "usb"; + chip->usb_psy_d.type = POWER_SUPPLY_TYPE_USB; + chip->usb_psy_d.properties = qpnp_lbc_usb_properties; + chip->usb_psy_d.num_properties = ARRAY_SIZE(qpnp_lbc_usb_properties); + chip->usb_psy_d.get_property = qpnp_lbc_usb_get_property; + chip->usb_psy_d.set_property = qpnp_lbc_usb_set_property; + chip->usb_psy_d.property_is_writeable = qpnp_lbc_usb_is_writeable; + + usb_psy_cfg.drv_data = chip; + usb_psy_cfg.num_supplicants = 0; + + chip->usb_psy = devm_power_supply_register(chip->dev, + &chip->usb_psy_d, &usb_psy_cfg); + if (IS_ERR(chip->usb_psy)) { + pr_err("Unable to register usb_psy rc = %ld\n", + PTR_ERR(chip->usb_psy)); + rc = PTR_ERR(chip->usb_psy); + goto fail_chg_enable; + } + + + chip->vbat_sns = iio_channel_get(&pdev->dev, "vbat_sns"); + if (IS_ERR(chip->vbat_sns)) { + if (PTR_ERR(chip->vbat_sns) != -EPROBE_DEFER) + pr_err("vbat_sns unavailable %ld\n", + PTR_ERR(chip->vbat_sns)); + rc = PTR_ERR(chip->vbat_sns); + chip->vbat_sns = NULL; + goto fail_chg_enable; + } + + chip->lr_mux1_batt_therm = iio_channel_get(&pdev->dev, "batt_therm"); + if (IS_ERR(chip->lr_mux1_batt_therm)) { + if (PTR_ERR(chip->lr_mux1_batt_therm) != -EPROBE_DEFER) + pr_err("lr_mux1_batt_therm unavailable %ld\n", + PTR_ERR(chip->lr_mux1_batt_therm)); + rc = PTR_ERR(chip->lr_mux1_batt_therm); + chip->lr_mux1_batt_therm = NULL; + goto fail_chg_enable; + } + + /* Initialize h/w */ + rc = qpnp_lbc_misc_init(chip); + if (rc) { + pr_err("unable to initialize LBC MISC rc=%d\n", rc); + return rc; + } + rc = qpnp_lbc_chg_init(chip); + if (rc) { + pr_err("unable to initialize LBC charger rc=%d\n", rc); + return rc; + } + rc = qpnp_lbc_bat_if_init(chip); + if (rc) { + pr_err("unable to initialize LBC BAT_IF rc=%d\n", rc); + return rc; + } + rc = qpnp_lbc_usb_path_init(chip); + if (rc) { + pr_err("unable to initialize LBC USB path rc=%d\n", rc); + return rc; + } + + if (chip->cfg_chgr_led_support) { + rc = qpnp_lbc_register_chgr_led(chip); + if (rc) { + pr_err("unable to register charger led rc=%d\n", rc); + return rc; + } + } + + if (chip->bat_if_base) { + chip->batt_present = qpnp_lbc_is_batt_present(chip); + chip->batt_psy_d.name = "battery"; + chip->batt_psy_d.type = POWER_SUPPLY_TYPE_BATTERY; + chip->batt_psy_d.properties = msm_batt_power_props; + chip->batt_psy_d.num_properties = + ARRAY_SIZE(msm_batt_power_props); + chip->batt_psy_d.get_property = qpnp_batt_power_get_property; + chip->batt_psy_d.set_property = qpnp_batt_power_set_property; + chip->batt_psy_d.external_power_changed = + qpnp_batt_external_power_changed; + chip->batt_psy_d.property_is_writeable = + qpnp_batt_property_is_writeable; + + batt_psy_cfg.drv_data = chip; + batt_psy_cfg.supplied_to = pm_batt_supplied_to; + batt_psy_cfg.num_supplicants = + ARRAY_SIZE(pm_batt_supplied_to); + + chip->batt_psy = devm_power_supply_register(chip->dev, + &chip->batt_psy_d, + &batt_psy_cfg); + if (IS_ERR(chip->batt_psy)) { + pr_err("Unable to register LBC batt_psy rc = %ld\n", + PTR_ERR(chip->batt_psy)); + goto fail_chg_enable; + } + } + + if ((chip->cfg_cool_bat_decidegc || chip->cfg_warm_bat_decidegc) + && chip->bat_if_base && !chip->cfg_use_fake_battery) { + chip->adc_param.low_temp = chip->cfg_cool_bat_decidegc; + chip->adc_param.high_temp = chip->cfg_warm_bat_decidegc; + chip->adc_param.timer_interval = ADC_MEAS1_INTERVAL_1S; + chip->adc_param.state_request = ADC_TM_HIGH_LOW_THR_ENABLE; + chip->adc_param.btm_ctx = chip; + chip->adc_param.threshold_notification = + qpnp_lbc_jeita_adc_notification; + chip->adc_param.channel = LR_MUX1_BATT_THERM; + + if (get_prop_batt_present(chip)) { + rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev, + &chip->adc_param); + if (rc) { + pr_err("request ADC error rc=%d\n", rc); + goto unregister_batt; + } + } + } + + rc = qpnp_lbc_bat_if_configure_btc(chip); + if (rc) { + pr_err("Failed to configure btc rc=%d\n", rc); + goto unregister_batt; + } + + /* Get/Set charger's initial status */ + determine_initial_status(chip); + + rc = qpnp_lbc_request_irqs(chip); + if (rc) { + pr_err("unable to initialize LBC MISC rc=%d\n", rc); + goto unregister_batt; + } + + if (chip->cfg_charging_disabled && !get_prop_batt_present(chip)) + pr_info("Battery absent and charging disabled\n"); + + /* Configure initial alarm for VDD trim */ + if ((chip->supported_feature_flag & VDD_TRIM_SUPPORTED) && + qpnp_lbc_is_fastchg_on(chip)) { + kt = ns_to_ktime(TRIM_PERIOD_NS); + alarm_start_relative(&chip->vddtrim_alarm, kt); + } + + chip->debug_root = debugfs_create_dir("qpnp_lbc", NULL); + if (!chip->debug_root) + pr_err("Couldn't create debug dir\n"); + + if (chip->debug_root) { + struct dentry *ent; + + ent = debugfs_create_file("lbc_config", S_IFREG | 0444, + chip->debug_root, chip, + &qpnp_lbc_config_debugfs_ops); + if (!ent) + pr_err("Couldn't create lbc_config debug file\n"); + } + + pr_debug("Probe chg_dis=%d bpd=%d usb=%d batt_pres=%d batt_volt=%d soc=%d\n", + chip->cfg_charging_disabled, + chip->cfg_bpd_detection, + qpnp_lbc_is_usb_chg_plugged_in(chip), + get_prop_batt_present(chip), + get_prop_battery_voltage_now(chip), + get_prop_capacity(chip)); + + return 0; + +unregister_batt: + if (chip->bat_if_base) + power_supply_unregister(chip->batt_psy); +fail_chg_enable: + power_supply_unregister(chip->usb_psy); + dev_set_drvdata(&pdev->dev, NULL); + return rc; +} + +static int is_parallel_charger(struct platform_device *pdev) +{ + return of_property_read_bool(pdev->dev.of_node, + "qcom,parallel-charger"); +} + +static int qpnp_lbc_probe(struct platform_device *pdev) +{ + if (is_parallel_charger(pdev)) + return qpnp_lbc_parallel_probe(pdev); + else + return qpnp_lbc_main_probe(pdev); +} + + +static int qpnp_lbc_remove(struct platform_device *pdev) +{ + struct qpnp_lbc_chip *chip = dev_get_drvdata(&pdev->dev); + + if (chip->supported_feature_flag & VDD_TRIM_SUPPORTED) { + alarm_cancel(&chip->vddtrim_alarm); + cancel_work_sync(&chip->vddtrim_work); + } + cancel_work_sync(&chip->debug_board_work); + cancel_delayed_work_sync(&chip->collapsible_detection_work); + debugfs_remove_recursive(chip->debug_root); + if (chip->bat_if_base) + power_supply_unregister(chip->batt_psy); + power_supply_unregister(chip->usb_psy); + mutex_destroy(&chip->jeita_configure_lock); + mutex_destroy(&chip->chg_enable_lock); + dev_set_drvdata(&pdev->dev, NULL); + return 0; +} + +static const struct of_device_id qpnp_lbc_match_table[] = { + { .compatible = QPNP_CHARGER_DEV_NAME, }, + {} +}; + +static struct platform_driver qpnp_lbc_driver = { + .probe = qpnp_lbc_probe, + .remove = qpnp_lbc_remove, + .driver = { + .name = QPNP_CHARGER_DEV_NAME, + .of_match_table = qpnp_lbc_match_table, + }, +}; + +/* + * qpnp_lbc_init() - register platform driver for qpnp-chg + */ +static int __init qpnp_lbc_init(void) +{ + return platform_driver_register(&qpnp_lbc_driver); +} +module_init(qpnp_lbc_init); + +static void __exit qpnp_lbc_exit(void) +{ + platform_driver_unregister(&qpnp_lbc_driver); +} +module_exit(qpnp_lbc_exit); + +MODULE_DESCRIPTION("QPNP Linear charger driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" QPNP_CHARGER_DEV_NAME); diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c index 49d9caefa1ab..0032df497d2a 100644 --- a/drivers/power/supply/qcom/qpnp-qg.c +++ b/drivers/power/supply/qcom/qpnp-qg.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "QG-K: %s: " fmt, __func__ @@ -2122,6 +2122,9 @@ static int qg_psy_set_property(struct power_supply *psy, if (chip->sp) soh_profile_update(chip->sp, chip->soh); break; + case POWER_SUPPLY_PROP_CLEAR_SOH: + chip->first_profile_load = pval->intval; + break; case POWER_SUPPLY_PROP_ESR_ACTUAL: chip->esr_actual = pval->intval; break; @@ -2254,6 +2257,9 @@ static int qg_psy_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_SOH: pval->intval = chip->soh; break; + case POWER_SUPPLY_PROP_CLEAR_SOH: + pval->intval = chip->first_profile_load; + break; case POWER_SUPPLY_PROP_CC_SOC: rc = qg_get_cc_soc(chip, &pval->intval); break; @@ -2296,6 +2302,7 @@ static int qg_property_is_writeable(struct power_supply *psy, case POWER_SUPPLY_PROP_SOH: case POWER_SUPPLY_PROP_FG_RESET: case POWER_SUPPLY_PROP_BATT_AGE_LEVEL: + case POWER_SUPPLY_PROP_CLEAR_SOH: return 1; default: break; @@ -2334,6 +2341,7 @@ static enum power_supply_property qg_psy_props[] = { POWER_SUPPLY_PROP_ESR_ACTUAL, POWER_SUPPLY_PROP_ESR_NOMINAL, POWER_SUPPLY_PROP_SOH, + POWER_SUPPLY_PROP_CLEAR_SOH, POWER_SUPPLY_PROP_CC_SOC, POWER_SUPPLY_PROP_FG_RESET, POWER_SUPPLY_PROP_VOLTAGE_AVG, @@ -3430,6 +3438,7 @@ static int qg_sanitize_sdam(struct qpnp_qg *chip) rc = qg_sdam_write(SDAM_MAGIC, SDAM_MAGIC_NUMBER); if (!rc) qg_dbg(chip, QG_DEBUG_PON, "First boot. SDAM initilized\n"); + chip->first_profile_load = true; } else { /* SDAM has invalid value */ rc = qg_sdam_clear(); @@ -3437,6 +3446,7 @@ static int qg_sanitize_sdam(struct qpnp_qg *chip) pr_err("SDAM uninitialized, SDAM reset\n"); rc = qg_sdam_write(SDAM_MAGIC, SDAM_MAGIC_NUMBER); } + chip->first_profile_load = true; } if (rc < 0) @@ -4366,7 +4376,7 @@ static int qg_parse_dt(struct qpnp_qg *chip) else chip->dt.esr_low_temp_threshold = (int)temp; - rc = of_property_read_u32(node, "qcom,shutdown_soc_threshold", &temp); + rc = of_property_read_u32(node, "qcom,shutdown-soc-threshold", &temp); if (rc < 0) chip->dt.shutdown_soc_threshold = -EINVAL; else diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c index 3bd5f9eab737..ce5a63dfb3b2 100644 --- a/drivers/power/supply/qcom/qpnp-smb5.c +++ b/drivers/power/supply/qcom/qpnp-smb5.c @@ -610,6 +610,12 @@ static int smb5_parse_dt_misc(struct smb5 *chip, struct device_node *node) if (chg->chg_param.hvdcp2_max_icl_ua <= 0) chg->chg_param.hvdcp2_max_icl_ua = MICRO_3PA; + of_property_read_u32(node, "qcom,hvdcp2-12v-max-icl-ua", + &chg->chg_param.hvdcp2_12v_max_icl_ua); + if (chg->chg_param.hvdcp2_12v_max_icl_ua <= 0) + chg->chg_param.hvdcp2_12v_max_icl_ua = + chg->chg_param.hvdcp2_max_icl_ua; + /* Used only in Adapter CV mode of operation */ of_property_read_u32(node, "qcom,qc4-max-icl-ua", &chg->chg_param.qc4_max_icl_ua); diff --git a/drivers/power/supply/qcom/qpnp-vm-bms.c b/drivers/power/supply/qcom/qpnp-vm-bms.c new file mode 100644 index 000000000000..2b30c05fd9cd --- /dev/null +++ b/drivers/power/supply/qcom/qpnp-vm-bms.c @@ -0,0 +1,4654 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014-2016, 2018-2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "VBMS: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define _BMS_MASK(BITS, POS) \ + ((unsigned char)(((1 << (BITS)) - 1) << (POS))) +#define BMS_MASK(LEFT_BIT_POS, RIGHT_BIT_POS) \ + _BMS_MASK((LEFT_BIT_POS) - (RIGHT_BIT_POS) + 1, \ + (RIGHT_BIT_POS)) + +/* Config / Data registers */ +#define REVISION1_REG 0x0 +#define STATUS1_REG 0x8 +#define FSM_STATE_MASK BMS_MASK(5, 3) +#define FSM_STATE_SHIFT 3 + +#define STATUS2_REG 0x9 +#define FIFO_CNT_SD_MASK BMS_MASK(7, 4) +#define FIFO_CNT_SD_SHIFT 4 + +#define MODE_CTL_REG 0x40 +#define FORCE_S3_MODE BIT(0) +#define ENABLE_S3_MODE BIT(1) +#define FORCE_S2_MODE BIT(2) +#define ENABLE_S2_MODE BIT(3) +#define S2_MODE_MASK BMS_MASK(3, 2) +#define S3_MODE_MASK BMS_MASK(1, 0) + +#define DATA_CTL1_REG 0x42 +#define MASTER_HOLD_BIT BIT(0) + +#define DATA_CTL2_REG 0x43 +#define FIFO_CNT_SD_CLR_BIT BIT(2) +#define ACC_DATA_SD_CLR_BIT BIT(1) +#define ACC_CNT_SD_CLR_BIT BIT(0) + +#define S3_OCV_TOL_CTL_REG 0x44 + +#define EN_CTL_REG 0x46 +#define BMS_EN_BIT BIT(7) + +#define FIFO_LENGTH_REG 0x47 +#define S1_FIFO_LENGTH_MASK BMS_MASK(3, 0) +#define S2_FIFO_LENGTH_MASK BMS_MASK(7, 4) +#define S2_FIFO_LENGTH_SHIFT 4 + +#define S1_SAMPLE_INTVL_REG 0x55 +#define S2_SAMPLE_INTVL_REG 0x56 +#define S3_SAMPLE_INTVL_REG 0x57 + +#define S1_ACC_CNT_REG 0x5E +#define S2_ACC_CNT_REG 0x5F +#define ACC_CNT_MASK BMS_MASK(2, 0) + +#define ACC_DATA0_SD_REG 0x63 +#define ACC_CNT_SD_REG 0x67 +#define OCV_DATA0_REG 0x6A +#define FIFO_0_LSB_REG 0xC0 + +#define BMS_SOC_REG 0xB0 +#define BMS_OCV_REG 0xB1 /* B1 & B2 */ +#define SOC_STORAGE_MASK 0xFE + +#define CHARGE_INCREASE_STORAGE 0xB3 +#define CHARGE_CYCLE_STORAGE_LSB 0xB4 /* B4 & B5 */ + +#define SEC_ACCESS 0xD0 + +#define QPNP_CHARGER_PRESENT BIT(7) + +/* Constants */ +#define OCV_TOL_LSB_UV 300 +#define MAX_OCV_TOL_THRESHOLD (OCV_TOL_LSB_UV * 0xFF) +#define MAX_SAMPLE_COUNT 256 +#define MAX_SAMPLE_INTERVAL 2550 +#define BMS_READ_TIMEOUT 500 +#define BMS_DEFAULT_TEMP 250 +#define OCV_INVALID 0xFFFF +#define SOC_INVALID 0xFF +#define OCV_UNINITIALIZED 0xFFFF +#define VBATT_ERROR_MARGIN 20000 +#define CV_DROP_MARGIN 10000 +#define MIN_OCV_UV 2000000 +#define TIME_PER_PERCENT_UUC 60 +#define IAVG_SAMPLES 16 +#define MIN_SOC_UUC 3 + +#define QPNP_VM_BMS_DEV_NAME "qcom,qpnp-vm-bms" + +#define DEBUG_BATT_ID_LOW 6500 +#define DEBUG_BATT_ID_HIGH 8500 + +#define VADC1_LC_USR_BASE 0x3100 +#define INT_TEST_VAL_OFFSET 0xE1 +#define QPNP_VBAT_COEFF_1 3000 +#define QPNP_VBAT_COEFF_2 45810000 +#define QPNP_VBAT_COEFF_3 100000 +#define QPNP_VBAT_COEFF_4 3500 +#define QPNP_VBAT_COEFF_5 80000000 +#define QPNP_VBAT_COEFF_6 4400 +#define QPNP_VBAT_COEFF_7 32200000 +#define QPNP_VBAT_COEFF_8 3880 +#define QPNP_VBAT_COEFF_9 5770 +#define QPNP_VBAT_COEFF_10 3660 +#define QPNP_VBAT_COEFF_11 5320 +#define QPNP_VBAT_COEFF_12 8060000 +#define QPNP_VBAT_COEFF_13 102640000 +#define QPNP_VBAT_COEFF_14 22220000 +#define QPNP_VBAT_COEFF_15 83060000 +#define QPNP_VBAT_COEFF_16 2810 +#define QPNP_VBAT_COEFF_17 5260 +#define QPNP_VBAT_COEFF_18 8027 +#define QPNP_VBAT_COEFF_19 2347 +#define QPNP_VBAT_COEFF_20 6043 +#define QPNP_VBAT_COEFF_21 1914 +#define QPNP_VBAT_OFFSET_SMIC 9446 +#define QPNP_VBAT_OFFSET_GF 9441 +#define QPNP_OCV_OFFSET_SMIC 4596 +#define QPNP_OCV_OFFSET_GF 5896 +#define QPNP_VBAT_COEFF_22 6800 +#define QPNP_VBAT_COEFF_23 3500 +#define QPNP_VBAT_COEFF_24 4360 +#define QPNP_VBAT_COEFF_25 8060 +#define QPNP_VBAT_COEFF_26 7895 +#define QPNP_VBAT_COEFF_27 5658 +#define QPNP_VBAT_COEFF_28 5760 +#define QPNP_VBAT_COEFF_29 7900 +#define QPNP_VBAT_COEFF_30 5660 +#define QPNP_VBAT_COEFF_31 3620 +#define QPNP_VBAT_COEFF_32 1230 +#define QPNP_VBAT_COEFF_33 5760 +#define QPNP_VBAT_COEFF_34 4080 +#define QPNP_VBAT_COEFF_35 7000 +#define QPNP_VBAT_COEFF_36 3040 +#define QPNP_VBAT_COEFF_37 3850 +#define QPNP_VBAT_COEFF_38 5000 +#define QPNP_VBAT_COEFF_39 2610 +#define QPNP_VBAT_COEFF_40 4190 +#define QPNP_VBAT_COEFF_41 5800 +#define QPNP_VBAT_COEFF_42 2620 +#define QPNP_VBAT_COEFF_43 4030 +#define QPNP_VBAT_COEFF_44 3230 +#define QPNP_VBAT_COEFF_45 3450 +#define QPNP_VBAT_COEFF_46 2120 +#define QPNP_VBAT_COEFF_47 3560 +#define QPNP_VBAT_COEFF_48 2190 +#define QPNP_VBAT_COEFF_49 4180 +#define QPNP_VBAT_COEFF_50 27800000 +#define QPNP_VBAT_COEFF_51 5110 +#define QPNP_VBAT_COEFF_52 34444000 + +#define COMP_ID_GF 0 +#define COMP_ID_SMIC 1 +#define COMP_ID_TSMC 2 +#define COMP_ID_NUM 3 + +/* indicates the state of BMS */ +enum { + IDLE_STATE, + S1_STATE, + S2_STATE, + S3_STATE, + S7_STATE, +}; + +enum { + WRKARND_PON_OCV_COMP = BIT(0), +}; + +struct bms_irq { + int irq; + unsigned long disabled; +}; + +struct bms_wakeup_source { + struct wakeup_source *source; + unsigned long disabled; +}; + +struct temp_curr_comp_map { + int temp_decideg; + int current_ma; +}; + +struct bms_dt_cfg { + bool cfg_report_charger_eoc; + bool cfg_force_bms_active_on_charger; + bool cfg_force_s3_on_suspend; + bool cfg_ignore_shutdown_soc; + bool cfg_use_voltage_soc; + int cfg_v_cutoff_uv; + int cfg_max_voltage_uv; + int cfg_r_conn_mohm; + int cfg_shutdown_soc_valid_limit; + int cfg_low_soc_calc_threshold; + int cfg_low_soc_calculate_soc_ms; + int cfg_low_voltage_threshold; + int cfg_low_voltage_calculate_soc_ms; + int cfg_low_soc_fifo_length; + int cfg_calculate_soc_ms; + int cfg_s1_sample_interval_ms; + int cfg_s2_sample_interval_ms; + int cfg_s1_sample_count; + int cfg_s2_sample_count; + int cfg_s1_fifo_length; + int cfg_s2_fifo_length; + int cfg_disable_bms; + int cfg_s3_ocv_tol_uv; + int cfg_soc_resume_limit; + int cfg_low_temp_threshold; + int cfg_ibat_avg_samples; + int cfg_battery_aging_comp; + bool cfg_use_reported_soc; +}; + +struct qpnp_bms_chip { + struct device *dev; + struct platform_device *pdev; + struct regmap *regmap; + dev_t dev_no; + u16 base; + u8 revision[2]; + u32 batt_pres_addr; + u32 chg_pres_addr; + + /* status variables */ + u8 current_fsm_state; + bool last_soc_invalid; + bool warm_reset; + bool bms_psy_registered; + bool battery_full; + bool bms_dev_open; + bool data_ready; + bool apply_suspend_config; + bool in_cv_state; + bool low_soc_fifo_set; + int battery_status; + int calculated_soc; + int current_now; + int prev_current_now; + int prev_voltage_based_soc; + int calculate_soc_ms; + int voltage_soc_uv; + int battery_present; + int last_soc; + int last_soc_unbound; + int last_soc_change_sec; + int charge_start_tm_sec; + int catch_up_time_sec; + int delta_time_s; + int uuc_delta_time_s; + int ocv_at_100; + int last_ocv_uv; + int s2_fifo_length; + int last_acc; + int hi_power_state; + unsigned int vadc_v0625; + unsigned int vadc_v1250; + unsigned long tm_sec; + unsigned long workaround_flag; + unsigned long uuc_tm_sec; + u32 seq_num; + u8 shutdown_soc; + bool shutdown_soc_invalid; + u16 last_ocv_raw; + u32 shutdown_ocv; + bool suspend_data_valid; + int iavg_num_samples; + unsigned int iavg_index; + int iavg_samples_ma[IAVG_SAMPLES]; + int iavg_ma; + int prev_soc_uuc; + int eoc_reported; + u8 charge_increase; + u16 charge_cycles; + unsigned int start_soc; + unsigned int end_soc; + unsigned int chg_start_soc; + + struct bms_battery_data *batt_data; + struct bms_dt_cfg dt; + + struct dentry *debug_root; + struct bms_wakeup_source vbms_lv_wake_source; + struct bms_wakeup_source vbms_cv_wake_source; + struct bms_wakeup_source vbms_soc_wake_source; + wait_queue_head_t bms_wait_q; + struct delayed_work monitor_soc_work; + struct mutex bms_data_mutex; + struct mutex bms_device_mutex; + struct mutex last_soc_mutex; + struct mutex state_change_mutex; + struct class *bms_class; + struct device *bms_device; + struct cdev bms_cdev; + struct qpnp_vm_bms_data bms_data; + struct iio_channel *ref_625mv; + struct iio_channel *ref_125v; + struct iio_channel *vbat_sns; + struct iio_channel *lr_mux1_batt_therm; + struct iio_channel *die_temp; + struct iio_channel *lr_mux2_batt_id; + struct qpnp_adc_tm_chip *adc_tm_dev; + struct pmic_revid_data *revid_data; + struct qpnp_adc_tm_btm_param vbat_monitor_params; + struct bms_irq fifo_update_done_irq; + struct bms_irq fsm_state_change_irq; + struct power_supply_desc bms_psy_d; + struct power_supply *bms_psy; + struct power_supply *batt_psy; + struct power_supply *usb_psy; + struct notifier_block nb; + bool reported_soc_in_use; + bool charger_removed_since_full; + bool charger_reinserted; + bool reported_soc_high_current; + int reported_soc; + int reported_soc_change_sec; + int reported_soc_delta; + int batt_id_ohm; + int fab_id; +}; + +static struct qpnp_bms_chip *the_chip; + +static struct temp_curr_comp_map temp_curr_comp_lut[] = { + {-300, 15}, + {250, 17}, + {850, 28}, +}; + +static void disable_bms_irq(struct bms_irq *irq) +{ + if (!__test_and_set_bit(0, &irq->disabled)) { + disable_irq(irq->irq); + pr_debug("disabled irq %d\n", irq->irq); + } +} + +static void enable_bms_irq(struct bms_irq *irq) +{ + if (__test_and_clear_bit(0, &irq->disabled)) { + enable_irq(irq->irq); + pr_debug("enable irq %d\n", irq->irq); + } +} + +static void bms_stay_awake(struct bms_wakeup_source *source) +{ + if (__test_and_clear_bit(0, &source->disabled)) { + __pm_stay_awake(source->source); + pr_debug("enabled source %s\n", source->source->name); + } +} + +static void bms_relax(struct bms_wakeup_source *source) +{ + if (!__test_and_set_bit(0, &source->disabled)) { + __pm_relax(source->source); + pr_debug("disabled source %s\n", source->source->name); + } +} + +static bool bms_wake_active(struct bms_wakeup_source *source) +{ + return !source->disabled; +} + +static int bound_soc(int soc) +{ + soc = max(0, soc); + soc = min(100, soc); + + return soc; +} + +static char *qpnp_vm_bms_supplicants[] = { + "battery", +}; + +static int qpnp_read_wrapper(struct qpnp_bms_chip *chip, u8 *val, + u16 base, int count) +{ + int rc; + + rc = regmap_bulk_read(chip->regmap, base, val, count); + if (rc) + pr_err("Regmap read failed rc=%d\n", rc); + + return rc; +} + +static int qpnp_write_wrapper(struct qpnp_bms_chip *chip, u8 *val, + u16 base, int count) +{ + int rc; + + rc = regmap_bulk_write(chip->regmap, base, val, count); + if (rc) + pr_err("Regmap write failed rc=%d\n", rc); + + return rc; +} + +static int qpnp_masked_write_base(struct qpnp_bms_chip *chip, u16 addr, + u8 mask, u8 val) +{ + int rc; + u8 reg; + + rc = qpnp_read_wrapper(chip, ®, addr, 1); + if (rc) { + pr_err("read failed addr = %03X, rc = %d\n", addr, rc); + return rc; + } + reg &= ~mask; + reg |= val & mask; + rc = qpnp_write_wrapper(chip, ®, addr, 1); + if (rc) + pr_err("write failed addr = %03X, val = %02x, mask = %02x, reg = %02x, rc = %d\n", + addr, val, mask, reg, rc); + + return rc; +} + +static int qpnp_secure_write_wrapper(struct qpnp_bms_chip *chip, u8 *val, + u16 base) +{ + int rc; + u8 reg; + + reg = 0xA5; + rc = qpnp_write_wrapper(chip, ®, chip->base + SEC_ACCESS, 1); + if (rc) { + pr_err("Error %d writing 0xA5 to 0x%x reg\n", + rc, SEC_ACCESS); + return rc; + } + rc = qpnp_write_wrapper(chip, val, base, 1); + if (rc) + pr_err("Error %d writing %d to 0x%x reg\n", rc, *val, base); + + return rc; +} + +static int backup_ocv_soc(struct qpnp_bms_chip *chip, int ocv_uv, int soc) +{ + int rc; + u16 ocv_mv = ocv_uv / 1000; + + rc = qpnp_write_wrapper(chip, (u8 *)&ocv_mv, + chip->base + BMS_OCV_REG, 2); + if (rc) + pr_err("Unable to backup OCV rc=%d\n", rc); + + rc = qpnp_masked_write_base(chip, chip->base + BMS_SOC_REG, + SOC_STORAGE_MASK, (soc + 1) << 1); + if (rc) + pr_err("Unable to backup SOC rc=%d\n", rc); + + pr_debug("ocv_mv=%d soc=%d\n", ocv_mv, soc); + + return rc; +} + +static int get_current_time(unsigned long *now_tm_sec) +{ + struct rtc_time tm; + struct rtc_device *rtc; + int rc; + + rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); + if (rtc == NULL) { + pr_err("%s: unable to open rtc device (%s)\n", + __FILE__, CONFIG_RTC_HCTOSYS_DEVICE); + return -EINVAL; + } + + rc = rtc_read_time(rtc, &tm); + if (rc) { + pr_err("Error reading rtc device (%s) : %d\n", + CONFIG_RTC_HCTOSYS_DEVICE, rc); + goto close_time; + } + + rc = rtc_valid_tm(&tm); + if (rc) { + pr_err("Invalid RTC time (%s): %d\n", + CONFIG_RTC_HCTOSYS_DEVICE, rc); + goto close_time; + } + rtc_tm_to_time(&tm, now_tm_sec); + +close_time: + rtc_class_close(rtc); + return rc; +} + +static int calculate_delta_time(unsigned long *time_stamp, int *delta_time_s) +{ + unsigned long now_tm_sec = 0; + + /* default to delta time = 0 if anything fails */ + *delta_time_s = 0; + + if (get_current_time(&now_tm_sec)) { + pr_err("RTC read failed\n"); + return 0; + } + + *delta_time_s = (now_tm_sec - *time_stamp); + + /* remember this time */ + *time_stamp = now_tm_sec; + return 0; +} + +static bool is_debug_batt_id(struct qpnp_bms_chip *chip) +{ + if (is_between(DEBUG_BATT_ID_LOW, DEBUG_BATT_ID_HIGH, + chip->batt_id_ohm)) + return true; + + return false; +} + +static int bms_notifier_cb(struct notifier_block *nb, + unsigned long event, void *data) +{ + union power_supply_propval ret = {0,}; + struct power_supply *psy = data; + struct qpnp_bms_chip *chip = container_of(nb, struct qpnp_bms_chip, nb); + + if (event != PSY_EVENT_PROP_CHANGED) + return NOTIFY_OK; + + if ((strcmp(psy->desc->name, "battery") == 0)) { + if (chip->batt_psy == NULL) + chip->batt_psy = power_supply_get_by_name("battery"); + if (chip->batt_psy) { + if (is_debug_batt_id(chip)) { + power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_DEBUG_BATTERY, &ret); + if (!ret.intval) { + ret.intval = 1; + power_supply_set_property( + chip->batt_psy, + POWER_SUPPLY_PROP_DEBUG_BATTERY, + &ret); + } + } + } + } + + return NOTIFY_OK; +} +static bool is_charger_present(struct qpnp_bms_chip *chip) +{ + union power_supply_propval ret = {0,}; + + if (chip->usb_psy == NULL) + chip->usb_psy = power_supply_get_by_name("usb"); + if (chip->usb_psy) { + power_supply_get_property(chip->usb_psy, + POWER_SUPPLY_PROP_PRESENT, &ret); + return ret.intval; + } + + return false; +} + +static bool is_battery_charging(struct qpnp_bms_chip *chip) +{ + union power_supply_propval ret = {0,}; + + if (chip->batt_psy == NULL) + chip->batt_psy = power_supply_get_by_name("battery"); + if (chip->batt_psy) { + /* if battery has been registered, use the type property */ + power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_CHARGE_TYPE, &ret); + return ret.intval != POWER_SUPPLY_CHARGE_TYPE_NONE; + } + + /* Default to false if the battery power supply is not registered. */ + pr_debug("battery power supply is not registered\n"); + return false; +} + +#define BAT_PRES_BIT BIT(7) +static bool is_battery_present(struct qpnp_bms_chip *chip) +{ + union power_supply_propval ret = {0,}; + int rc; + u8 batt_pres; + + /* first try to use the batt_pres register if given */ + if (chip->batt_pres_addr) { + rc = qpnp_read_wrapper(chip, &batt_pres, + chip->batt_pres_addr, 1); + if (!rc && (batt_pres & BAT_PRES_BIT)) + return true; + else + return false; + } + if (chip->batt_psy == NULL) + chip->batt_psy = power_supply_get_by_name("battery"); + if (chip->batt_psy) { + /* if battery has been registered, use the present property */ + power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_PRESENT, &ret); + return ret.intval; + } + + /* Default to false if the battery power supply is not registered. */ + pr_debug("battery power supply is not registered\n"); + return false; +} + +#define BAT_REMOVED_OFFMODE_BIT BIT(6) +static bool is_battery_replaced_in_offmode(struct qpnp_bms_chip *chip) +{ + u8 batt_pres; + int rc; + + if (chip->batt_pres_addr) { + rc = qpnp_read_wrapper(chip, &batt_pres, + chip->batt_pres_addr, 1); + pr_debug("offmode removed: %02x\n", batt_pres); + if (!rc && (batt_pres & BAT_REMOVED_OFFMODE_BIT)) + return true; + } + + return false; +} + +static bool is_battery_taper_charging(struct qpnp_bms_chip *chip) +{ + union power_supply_propval ret = {0,}; + + if (chip->batt_psy == NULL) + chip->batt_psy = power_supply_get_by_name("battery"); + + if (chip->batt_psy) { + power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_CHARGE_TYPE, &ret); + return ret.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER; + } + + return false; +} + +static int master_hold_control(struct qpnp_bms_chip *chip, bool enable) +{ + u8 reg = 0; + int rc; + + reg = enable ? MASTER_HOLD_BIT : 0; + + rc = qpnp_secure_write_wrapper(chip, ®, + chip->base + DATA_CTL1_REG); + if (rc) + pr_err("Unable to write reg=%x rc=%d\n", DATA_CTL1_REG, rc); + + return rc; +} + +static int force_fsm_state(struct qpnp_bms_chip *chip, u8 state) +{ + int rc; + u8 mode_ctl = 0; + + switch (state) { + case S2_STATE: + mode_ctl = (FORCE_S2_MODE | ENABLE_S2_MODE); + break; + case S3_STATE: + mode_ctl = (FORCE_S3_MODE | ENABLE_S3_MODE); + break; + default: + pr_debug("Invalid state %d\n", state); + return -EINVAL; + } + + rc = qpnp_secure_write_wrapper(chip, &mode_ctl, + chip->base + MODE_CTL_REG); + if (rc) { + pr_err("Unable to write reg=%x rc=%d\n", MODE_CTL_REG, rc); + return rc; + } + /* delay for the FSM state to take affect in hardware */ + usleep_range(500, 600); + + pr_debug("force_mode=%d mode_cntl_reg=%x\n", state, mode_ctl); + + return 0; +} + +static int get_sample_interval(struct qpnp_bms_chip *chip, + u8 fsm_state, u32 *interval) +{ + int rc; + u8 val = 0, reg; + + *interval = 0; + + switch (fsm_state) { + case S1_STATE: + reg = S1_SAMPLE_INTVL_REG; + break; + case S2_STATE: + reg = S2_SAMPLE_INTVL_REG; + break; + case S3_STATE: + reg = S3_SAMPLE_INTVL_REG; + break; + default: + pr_err("Invalid state %d\n", fsm_state); + return -EINVAL; + } + + rc = qpnp_read_wrapper(chip, &val, chip->base + reg, 1); + if (rc) { + pr_err("Failed to get state(%d) sample_interval, rc=%d\n", + fsm_state, rc); + return rc; + } + + *interval = val * 10; + + return 0; +} + +static int get_sample_count(struct qpnp_bms_chip *chip, + u8 fsm_state, u32 *count) +{ + int rc; + u8 val = 0, reg; + + *count = 0; + + switch (fsm_state) { + case S1_STATE: + reg = S1_ACC_CNT_REG; + break; + case S2_STATE: + reg = S2_ACC_CNT_REG; + break; + default: + pr_err("Invalid state %d\n", fsm_state); + return -EINVAL; + } + + rc = qpnp_read_wrapper(chip, &val, chip->base + reg, 1); + if (rc) { + pr_err("Failed to get state(%d) sample_count, rc=%d\n", + fsm_state, rc); + return rc; + } + val &= ACC_CNT_MASK; + + *count = val ? (1 << (val + 1)) : 1; + + return 0; +} + +static int get_fifo_length(struct qpnp_bms_chip *chip, + u8 fsm_state, u32 *fifo_length) +{ + int rc; + u8 val = 0, reg, mask = 0, shift = 0; + + *fifo_length = 0; + + switch (fsm_state) { + case S1_STATE: + reg = FIFO_LENGTH_REG; + mask = S1_FIFO_LENGTH_MASK; + shift = 0; + break; + case S2_STATE: + reg = FIFO_LENGTH_REG; + mask = S2_FIFO_LENGTH_MASK; + shift = S2_FIFO_LENGTH_SHIFT; + break; + default: + pr_err("Invalid state %d\n", fsm_state); + return -EINVAL; + } + + rc = qpnp_read_wrapper(chip, &val, chip->base + reg, 1); + if (rc) { + pr_err("Failed to get state(%d) fifo_length, rc=%d\n", + fsm_state, rc); + return rc; + } + + val &= mask; + val >>= shift; + + *fifo_length = val; + + return 0; +} + +static int set_fifo_length(struct qpnp_bms_chip *chip, + u8 fsm_state, u32 fifo_length) +{ + int rc; + u8 reg, mask = 0, shift = 0; + + /* fifo_length of 1 is not supported due to a hardware issue */ + if ((fifo_length <= 1) || (fifo_length > MAX_FIFO_REGS)) { + pr_err("Invalid FIFO length = %d\n", fifo_length); + return -EINVAL; + } + + switch (fsm_state) { + case S1_STATE: + reg = FIFO_LENGTH_REG; + mask = S1_FIFO_LENGTH_MASK; + shift = 0; + break; + case S2_STATE: + reg = FIFO_LENGTH_REG; + mask = S2_FIFO_LENGTH_MASK; + shift = S2_FIFO_LENGTH_SHIFT; + break; + default: + pr_err("Invalid state %d\n", fsm_state); + return -EINVAL; + } + + rc = master_hold_control(chip, true); + if (rc) + pr_err("Unable to apply master_hold rc=%d\n", rc); + + rc = qpnp_masked_write_base(chip, chip->base + reg, mask, + fifo_length << shift); + if (rc) + pr_err("Unable to set fifo length rc=%d\n", rc); + + rc = master_hold_control(chip, false); + if (rc) + pr_err("Unable to apply master_hold rc=%d\n", rc); + + return rc; +} + +static int get_fsm_state(struct qpnp_bms_chip *chip, u8 *state) +{ + int rc; + + /* + * To read the STATUS1 register, write a value(any) to this register, + * wait for 10ms and then read the register. + */ + *state = 0; + rc = qpnp_write_wrapper(chip, state, chip->base + STATUS1_REG, 1); + if (rc) { + pr_err("Unable to write STATUS1_REG rc=%d\n", rc); + return rc; + } + usleep_range(10000, 11000); + + /* read the current FSM state */ + rc = qpnp_read_wrapper(chip, state, chip->base + STATUS1_REG, 1); + if (rc) { + pr_err("Unable to read STATUS1_REG rc=%d\n", rc); + return rc; + } + *state = (*state & FSM_STATE_MASK) >> FSM_STATE_SHIFT; + + return rc; +} + +static int update_fsm_state(struct qpnp_bms_chip *chip) +{ + u8 state = 0; + int rc; + + mutex_lock(&chip->state_change_mutex); + rc = get_fsm_state(chip, &state); + if (rc) { + pr_err("Unable to get fsm_state rc=%d\n", rc); + goto fail_fsm; + } + + chip->current_fsm_state = state; + +fail_fsm: + mutex_unlock(&chip->state_change_mutex); + return rc; +} + +static int backup_charge_cycle(struct qpnp_bms_chip *chip) +{ + int rc = 0; + + if (chip->charge_increase >= 0) { + rc = qpnp_write_wrapper(chip, &chip->charge_increase, + chip->base + CHARGE_INCREASE_STORAGE, 1); + if (rc) + pr_err("Unable to backup charge_increase rc=%d\n", rc); + } + + if (chip->charge_cycles >= 0) { + rc = qpnp_write_wrapper(chip, (u8 *)&chip->charge_cycles, + chip->base + CHARGE_CYCLE_STORAGE_LSB, 2); + if (rc) + pr_err("Unable to backup charge_cycles rc=%d\n", rc); + } + + pr_debug("%s storing charge_increase=%u charge_cycle=%u\n", + rc ? "Unable to" : "Successfully", + chip->charge_increase, chip->charge_cycles); + + return rc; +} + +static int read_chgcycle_data_from_backup(struct qpnp_bms_chip *chip) +{ + int rc; + uint16_t temp_u16 = 0; + u8 temp_u8 = 0; + + rc = qpnp_read_wrapper(chip, &temp_u8, + chip->base + CHARGE_INCREASE_STORAGE, 1); + if (rc) { + pr_err("Unable to read charge_increase rc=%d\n", rc); + return rc; + } + + rc = qpnp_read_wrapper(chip, (u8 *)&temp_u16, + chip->base + CHARGE_CYCLE_STORAGE_LSB, 2); + if (rc) { + pr_err("Unable to read charge_cycle rc=%d\n", rc); + return rc; + } + + if ((temp_u8 == 0xFF) || (temp_u16 == 0xFFFF)) { + chip->charge_cycles = 0; + chip->charge_increase = 0; + pr_info("rejecting aging data charge_increase=%u charge_cycle=%u\n", + temp_u8, temp_u16); + rc = backup_charge_cycle(chip); + if (rc) + pr_err("Unable to reset charge cycles rc=%d\n", rc); + } else { + chip->charge_increase = temp_u8; + chip->charge_cycles = temp_u16; + } + + pr_debug("charge_increase=%u charge_cycle=%u\n", + chip->charge_increase, chip->charge_cycles); + return rc; +} + +static int calculate_uuc_iavg(struct qpnp_bms_chip *chip) +{ + int i; + int iavg_ma = chip->current_now / 1000; + + /* only continue if ibat has changed */ + if (chip->current_now == chip->prev_current_now) + goto ibat_unchanged; + else + chip->prev_current_now = chip->current_now; + + chip->iavg_samples_ma[chip->iavg_index] = iavg_ma; + chip->iavg_index = (chip->iavg_index + 1) % + chip->dt.cfg_ibat_avg_samples; + chip->iavg_num_samples++; + if (chip->iavg_num_samples >= chip->dt.cfg_ibat_avg_samples) + chip->iavg_num_samples = chip->dt.cfg_ibat_avg_samples; + + if (chip->iavg_num_samples) { + iavg_ma = 0; + /* maintain a 16 sample average of ibat */ + for (i = 0; i < chip->iavg_num_samples; i++) { + pr_debug("iavg_samples_ma[%d] = %d\n", i, + chip->iavg_samples_ma[i]); + iavg_ma += chip->iavg_samples_ma[i]; + } + + chip->iavg_ma = DIV_ROUND_CLOSEST(iavg_ma, + chip->iavg_num_samples); + } + +ibat_unchanged: + pr_debug("current_now_ma=%d averaged_iavg_ma=%d\n", + chip->current_now / 1000, chip->iavg_ma); + + return chip->iavg_ma; +} + +static int adjust_uuc(struct qpnp_bms_chip *chip, int soc_uuc) +{ + int max_percent_change; + + calculate_delta_time(&chip->uuc_tm_sec, &chip->uuc_delta_time_s); + + /* make sure that the UUC changes 1% at a time */ + max_percent_change = max(chip->uuc_delta_time_s + / TIME_PER_PERCENT_UUC, 1); + + if (chip->prev_soc_uuc == -EINVAL) { + /* start with a minimum UUC if the initial UUC is high */ + if (soc_uuc > MIN_SOC_UUC) + chip->prev_soc_uuc = MIN_SOC_UUC; + else + chip->prev_soc_uuc = soc_uuc; + } else { + if (abs(chip->prev_soc_uuc - soc_uuc) <= max_percent_change) + chip->prev_soc_uuc = soc_uuc; + else if (soc_uuc > chip->prev_soc_uuc) + chip->prev_soc_uuc += max_percent_change; + else + chip->prev_soc_uuc -= max_percent_change; + } + + pr_debug("soc_uuc=%d new_soc_uuc=%d\n", soc_uuc, chip->prev_soc_uuc); + + return chip->prev_soc_uuc; +} + +static int lookup_soc_ocv(struct qpnp_bms_chip *chip, int ocv_uv, int batt_temp) +{ + int soc_ocv = 0, soc_cutoff = 0, soc_final = 0; + int fcc, acc, soc_uuc = 0, soc_acc = 0, iavg_ma = 0; + + soc_ocv = interpolate_pc(chip->batt_data->pc_temp_ocv_lut, + batt_temp, ocv_uv / 1000); + soc_cutoff = interpolate_pc(chip->batt_data->pc_temp_ocv_lut, + batt_temp, chip->dt.cfg_v_cutoff_uv / 1000); + + soc_final = DIV_ROUND_CLOSEST(100 * (soc_ocv - soc_cutoff), + (100 - soc_cutoff)); + + if (chip->batt_data->ibat_acc_lut) { + /* Apply ACC logic only if we discharging */ + if (chip->current_now > 0) { + + /* + * IBAT averaging is disabled at low temp. + * allowing the SOC to catcup quickly. + */ + if (batt_temp > chip->dt.cfg_low_temp_threshold) + iavg_ma = calculate_uuc_iavg(chip); + else + iavg_ma = chip->current_now / 1000; + + fcc = interpolate_fcc(chip->batt_data->fcc_temp_lut, + batt_temp); + acc = interpolate_acc(chip->batt_data->ibat_acc_lut, + batt_temp, iavg_ma); + if (acc <= 0) { + if (chip->last_acc) + acc = chip->last_acc; + else + acc = fcc; + } + soc_uuc = ((fcc - acc) * 100) / fcc; + + if (batt_temp > chip->dt.cfg_low_temp_threshold) + soc_uuc = adjust_uuc(chip, soc_uuc); + + soc_acc = DIV_ROUND_CLOSEST(100 * (soc_ocv - soc_uuc), + (100 - soc_uuc)); + + pr_debug("fcc=%d acc=%d soc_final=%d soc_uuc=%d soc_acc=%d current_now=%d iavg_ma=%d\n", + fcc, acc, soc_final, soc_uuc, + soc_acc, chip->current_now / 1000, iavg_ma); + + soc_final = soc_acc; + chip->last_acc = acc; + } else { + /* charging - reset all the counters */ + chip->last_acc = 0; + chip->iavg_num_samples = 0; + chip->iavg_index = 0; + chip->iavg_ma = 0; + chip->prev_current_now = 0; + chip->prev_soc_uuc = -EINVAL; + } + } + + soc_final = bound_soc(soc_final); + + pr_debug("soc_final=%d soc_ocv=%d soc_cutoff=%d ocv_uv=%u batt_temp=%d\n", + soc_final, soc_ocv, soc_cutoff, ocv_uv, batt_temp); + + return soc_final; +} + +#define V_PER_BIT_MUL_FACTOR 97656 +#define V_PER_BIT_DIV_FACTOR 1000 +#define VADC_INTRINSIC_OFFSET 0x6000 +static int vadc_reading_to_uv(int reading, bool vadc_bms) +{ + int64_t value; + + if (!vadc_bms) { + /* + * All the BMS H/W VADC values are pre-compensated + * for VADC_INTRINSIC_OFFSET, subtract this offset + * only if this reading is not obtained from BMS + */ + + if (reading <= VADC_INTRINSIC_OFFSET) + return 0; + + reading -= VADC_INTRINSIC_OFFSET; + } + + value = (reading * V_PER_BIT_MUL_FACTOR); + + return div_u64(value, (u32)V_PER_BIT_DIV_FACTOR); +} + +static int get_calculation_delay_ms(struct qpnp_bms_chip *chip) +{ + if (bms_wake_active(&chip->vbms_lv_wake_source)) + return chip->dt.cfg_low_voltage_calculate_soc_ms; + if (chip->calculated_soc < chip->dt.cfg_low_soc_calc_threshold) + return chip->dt.cfg_low_soc_calculate_soc_ms; + else + return chip->dt.cfg_calculate_soc_ms; +} + +#define VADC_CALIB_UV 625000 +#define VBATT_MUL_FACTOR 3 +static int adjust_vbatt_reading(struct qpnp_bms_chip *chip, int reading_uv) +{ + s64 numerator, denominator; + + if (reading_uv == 0) + return 0; + + /* don't adjust if not calibrated */ + if (chip->vadc_v0625 == 0 || chip->vadc_v1250 == 0) { + pr_debug("No cal yet return %d\n", + VBATT_MUL_FACTOR * reading_uv); + return VBATT_MUL_FACTOR * reading_uv; + } + + numerator = ((s64)reading_uv - chip->vadc_v0625) * VADC_CALIB_UV; + denominator = (s64)chip->vadc_v1250 - chip->vadc_v0625; + + if (denominator == 0) + return reading_uv * VBATT_MUL_FACTOR; + + return (VADC_CALIB_UV + div_s64(numerator, denominator)) + * VBATT_MUL_FACTOR; +} + +static int calib_vadc(struct qpnp_bms_chip *chip) +{ + int rc, raw_0625, raw_1250; + + rc = iio_read_channel_processed(chip->ref_625mv, &raw_0625); + if (rc < 0) { + pr_debug("raw_0625 channel read failed with rc = %d\n", rc); + return rc; + } + + rc = iio_read_channel_processed(chip->ref_125v, &raw_1250); + if (rc < 0) { + pr_debug("raw_1250 channel read failed with rc = %d\n", rc); + return rc; + } + + chip->vadc_v0625 = raw_0625; + chip->vadc_v1250 = raw_1250; + + pr_debug("vadc calib: 0625=%d raw (%d uv), 1250=%d raw (%d uv)\n", + raw_0625, chip->vadc_v0625, raw_1250, chip->vadc_v1250); + + return 0; +} + +static int32_t get_ocv_comp(int64_t *result, + struct qpnp_bms_chip *chip, int64_t die_temp) +{ + int64_t temp_var = 0, offset = 0; + int64_t old = *result; + int version; + + version = qpnp_adc_get_revid_version(chip->dev); + if (version == -EINVAL) + return 0; + switch (version) { + case QPNP_REV_ID_8916_1_0: + switch (chip->fab_id) { + case COMP_ID_SMIC: + if (die_temp < 25000) + temp_var = QPNP_VBAT_COEFF_26; + else + temp_var = QPNP_VBAT_COEFF_27; + temp_var = (die_temp - 25000) * temp_var; + break; + default: + case COMP_ID_GF: + offset = QPNP_OCV_OFFSET_GF; + if (die_temp < 25000) + temp_var = QPNP_VBAT_COEFF_26; + else + temp_var = QPNP_VBAT_COEFF_27; + temp_var = (die_temp - 25000) * temp_var; + break; + } + break; + case QPNP_REV_ID_8916_1_1: + switch (chip->fab_id) { + /* FAB_ID is zero */ + case COMP_ID_GF: + if (die_temp < 25000) + temp_var = QPNP_VBAT_COEFF_29; + else + temp_var = QPNP_VBAT_COEFF_30; + temp_var = (die_temp - 25000) * temp_var; + break; + /* FAB_ID is non-zero */ + default: + if (die_temp < 25000) + temp_var = QPNP_VBAT_COEFF_31; + else + temp_var = (-QPNP_VBAT_COEFF_32); + temp_var = (die_temp - 25000) * temp_var; + break; + } + break; + case QPNP_REV_ID_8916_2_0: + switch (chip->fab_id) { + case COMP_ID_SMIC: + offset = (-QPNP_VBAT_COEFF_38); + if (die_temp < 0) + temp_var = die_temp * QPNP_VBAT_COEFF_36; + else if (die_temp > 40000) + temp_var = ((die_temp - 40000) * + (-QPNP_VBAT_COEFF_37)); + break; + case COMP_ID_TSMC: + if (die_temp < 10000) + temp_var = ((die_temp - 10000) * + QPNP_VBAT_COEFF_41); + else if (die_temp > 50000) + temp_var = ((die_temp - 50000) * + (-QPNP_VBAT_COEFF_42)); + break; + default: + case COMP_ID_GF: + if (die_temp < 20000) + temp_var = ((die_temp - 20000) * + QPNP_VBAT_COEFF_45); + else if (die_temp > 40000) + temp_var = ((die_temp - 40000) * + (-QPNP_VBAT_COEFF_46)); + break; + } + break; + default: + temp_var = 0; + break; + } + temp_var = div64_s64(temp_var, QPNP_VBAT_COEFF_3); + + temp_var = 1000000 + temp_var; + + *result = *result * temp_var; + + if (offset) + *result -= offset; + + *result = div64_s64(*result, 1000000); + pr_debug("%lld compensated into %lld\n", old, *result); + + return 0; +} + +static int32_t get_vbat_sns_comp(int64_t *result, + struct qpnp_bms_chip *chip, int64_t die_temp) +{ + int64_t temp_var = 0, offset = 0; + int64_t old = *result; + int version; + + version = qpnp_adc_get_revid_version(chip->dev); + if (version == -EINVAL) + return 0; + + switch (version) { + case QPNP_REV_ID_8916_1_0: + switch (chip->fab_id) { + case COMP_ID_SMIC: + temp_var = ((die_temp - 25000) * + (QPNP_VBAT_COEFF_28)); + break; + default: + case COMP_ID_GF: + temp_var = ((die_temp - 25000) * + (QPNP_VBAT_COEFF_28)); + break; + } + break; + case QPNP_REV_ID_8916_1_1: + switch (chip->fab_id) { + /* FAB_ID is zero */ + case COMP_ID_GF: + temp_var = ((die_temp - 25000) * + (QPNP_VBAT_COEFF_33)); + break; + /* FAB_ID is non-zero */ + default: + offset = QPNP_VBAT_COEFF_35; + if (die_temp > 50000) { + temp_var = ((die_temp - 25000) * + (QPNP_VBAT_COEFF_34)); + } + break; + } + break; + case QPNP_REV_ID_8916_2_0: + switch (chip->fab_id) { + case COMP_ID_SMIC: + if (die_temp < 0) { + temp_var = (die_temp * + QPNP_VBAT_COEFF_39); + } else if (die_temp > 40000) { + temp_var = ((die_temp - 40000) * + (-QPNP_VBAT_COEFF_40)); + } + break; + case COMP_ID_TSMC: + if (die_temp < 10000) + temp_var = ((die_temp - 10000) * + QPNP_VBAT_COEFF_43); + else if (die_temp > 50000) + temp_var = ((die_temp - 50000) * + (-QPNP_VBAT_COEFF_44)); + break; + default: + case COMP_ID_GF: + if (die_temp < 20000) + temp_var = ((die_temp - 20000) * + QPNP_VBAT_COEFF_47); + else if (die_temp > 40000) + temp_var = ((die_temp - 40000) * + (-QPNP_VBAT_COEFF_48)); + break; + } + break; + default: + temp_var = 0; + break; + } + + temp_var = div64_s64(temp_var, QPNP_VBAT_COEFF_3); + + temp_var = 1000000 + temp_var; + + *result = *result * temp_var; + + if (offset) + *result -= offset; + + *result = div64_s64(*result, 1000000); + pr_debug("%lld compensated into %lld\n", old, *result); + + return 0; +} + +int32_t get_vbat_sns_comp_result(struct qpnp_bms_chip *chip, + int64_t *result, bool is_pon_ocv) +{ + int rc, die_temp_result; + + rc = iio_read_channel_processed(chip->die_temp, &die_temp_result); + if (rc < 0) { + pr_err("error reading die temperature rc=%d\n", rc); + return rc; + } + + pr_debug("die-temp = %d\n", die_temp_result); + + if (is_pon_ocv) + rc = get_ocv_comp(result, chip, die_temp_result); + else + rc = get_vbat_sns_comp(result, chip, + die_temp_result); + + if (rc < 0) + pr_err("Error with vbat compensation\n"); + + return rc; +} + + +static int convert_vbatt_raw_to_uv(struct qpnp_bms_chip *chip, + u16 reading, bool is_pon_ocv) +{ + int64_t uv, vbatt; + int rc; + + uv = vadc_reading_to_uv(reading, true); + pr_debug("%u raw converted into %lld uv\n", reading, uv); + + uv = adjust_vbatt_reading(chip, uv); + pr_debug("adjusted into %lld uv\n", uv); + + vbatt = uv; + rc = get_vbat_sns_comp_result(chip, &uv, is_pon_ocv); + if (rc) { + pr_debug("Vbatt compensation failed rc = %d\n", rc); + uv = vbatt; + } else { + pr_debug("temp-compensated %lld into %lld uv\n", vbatt, uv); + } + + return uv; +} + +static void convert_and_store_ocv(struct qpnp_bms_chip *chip, + int batt_temp, bool is_pon_ocv) +{ + int rc; + + rc = calib_vadc(chip); + if (rc) + pr_err("Vadc reference voltage read failed, rc = %d\n", rc); + + chip->last_ocv_uv = convert_vbatt_raw_to_uv(chip, + chip->last_ocv_raw, is_pon_ocv); + + pr_debug("last_ocv_uv = %d\n", chip->last_ocv_uv); +} + +static int read_and_update_ocv(struct qpnp_bms_chip *chip, int batt_temp, + bool is_pon_ocv) +{ + int rc, ocv_uv; + u16 ocv_data = 0; + + /* read the BMS h/w OCV */ + rc = qpnp_read_wrapper(chip, (u8 *)&ocv_data, + chip->base + OCV_DATA0_REG, 2); + if (rc) { + pr_err("Error reading ocv: rc = %d\n", rc); + return -ENXIO; + } + + /* check if OCV is within limits */ + ocv_uv = convert_vbatt_raw_to_uv(chip, ocv_data, is_pon_ocv); + if (ocv_uv < MIN_OCV_UV) { + pr_err("OCV too low or invalid (%d)- rejecting it\n", ocv_uv); + return 0; + } + + if ((chip->last_ocv_raw == OCV_UNINITIALIZED) || + (chip->last_ocv_raw != ocv_data)) { + pr_debug("new OCV!\n"); + chip->last_ocv_raw = ocv_data; + convert_and_store_ocv(chip, batt_temp, is_pon_ocv); + } + + pr_debug("ocv_raw=0x%x last_ocv_raw=0x%x last_ocv_uv=%d\n", + ocv_data, chip->last_ocv_raw, chip->last_ocv_uv); + + return 0; +} + +static int get_battery_voltage(struct qpnp_bms_chip *chip, int *result_uv) +{ + int rc; + + rc = iio_read_channel_processed(chip->vbat_sns, result_uv); + if (rc < 0) { + pr_err("error reading vbat_sns channel rc = %d\n", rc); + return rc; + } + pr_debug("mvolts phy=%d\n", result_uv); + return 0; +} + +static int get_battery_status(struct qpnp_bms_chip *chip) +{ + union power_supply_propval ret = {0,}; + + if (chip->batt_psy == NULL) + chip->batt_psy = power_supply_get_by_name("battery"); + if (chip->batt_psy) { + /* if battery has been registered, use the status property */ + power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_STATUS, &ret); + return ret.intval; + } + + /* Default to false if the battery power supply is not registered. */ + pr_debug("battery power supply is not registered\n"); + return POWER_SUPPLY_STATUS_UNKNOWN; +} + +static int get_batt_therm(struct qpnp_bms_chip *chip, int *batt_temp) +{ + int rc; + + rc = iio_read_channel_processed(chip->lr_mux1_batt_therm, batt_temp); + if (rc < 0) { + pr_err("lr_mux1_batt_therm channel read error : rc = %d\n", rc); + return rc; + } + return 0; +} + +static int get_prop_bms_rbatt(struct qpnp_bms_chip *chip) +{ + return chip->batt_data->default_rbatt_mohm; +} + +static int get_rbatt(struct qpnp_bms_chip *chip, int soc, int batt_temp) +{ + int rbatt_mohm, scalefactor; + + rbatt_mohm = chip->batt_data->default_rbatt_mohm; + if (chip->batt_data->rbatt_sf_lut == NULL) { + pr_debug("RBATT = %d\n", rbatt_mohm); + return rbatt_mohm; + } + + scalefactor = interpolate_scalingfactor( + chip->batt_data->rbatt_sf_lut, + batt_temp, soc); + rbatt_mohm = (rbatt_mohm * scalefactor) / 100; + + if (chip->dt.cfg_r_conn_mohm > 0) + rbatt_mohm += chip->dt.cfg_r_conn_mohm; + + return rbatt_mohm; +} + +static void charging_began(struct qpnp_bms_chip *chip) +{ + int rc; + u8 state; + + mutex_lock(&chip->last_soc_mutex); + + chip->charge_start_tm_sec = 0; + chip->catch_up_time_sec = 0; + chip->start_soc = chip->last_soc; + + /* + * reset ocv_at_100 to -EINVAL to indicate + * start of charging. + */ + chip->ocv_at_100 = -EINVAL; + + mutex_unlock(&chip->last_soc_mutex); + + /* + * If the BMS state is not in S2, force it in S2. Such + * a condition can only occur if we are coming out of + * suspend. + */ + mutex_lock(&chip->state_change_mutex); + rc = get_fsm_state(chip, &state); + if (rc) + pr_err("Unable to get FSM state rc=%d\n", rc); + if (rc || (state != S2_STATE)) { + pr_debug("Forcing S2 state\n"); + rc = force_fsm_state(chip, S2_STATE); + if (rc) + pr_err("Unable to set FSM state rc=%d\n", rc); + } + mutex_unlock(&chip->state_change_mutex); +} + +static void charging_ended(struct qpnp_bms_chip *chip) +{ + u8 state; + int rc, status = get_battery_status(chip); + + mutex_lock(&chip->last_soc_mutex); + + chip->charge_start_tm_sec = 0; + chip->catch_up_time_sec = 0; + chip->end_soc = chip->last_soc; + + if (status == POWER_SUPPLY_STATUS_FULL) + chip->last_soc_invalid = true; + + mutex_unlock(&chip->last_soc_mutex); + + /* + * If the BMS state is not in S2, force it in S2. Such + * a condition can only occur if we are coming out of + * suspend. + */ + mutex_lock(&chip->state_change_mutex); + rc = get_fsm_state(chip, &state); + if (rc) + pr_err("Unable to get FSM state rc=%d\n", rc); + if (rc || (state != S2_STATE)) { + pr_debug("Forcing S2 state\n"); + rc = force_fsm_state(chip, S2_STATE); + if (rc) + pr_err("Unable to set FSM state rc=%d\n", rc); + } + mutex_unlock(&chip->state_change_mutex); + + /* Calculate charge accumulated and update charge cycle */ + if (chip->dt.cfg_battery_aging_comp && + (chip->end_soc > chip->start_soc)) { + chip->charge_increase += (chip->end_soc - chip->start_soc); + if (chip->charge_increase > 100) { + chip->charge_cycles++; + chip->charge_increase %= 100; + } + pr_debug("start_soc=%u end_soc=%u charge_cycles=%u charge_increase=%u\n", + chip->start_soc, chip->end_soc, + chip->charge_cycles, chip->charge_increase); + rc = backup_charge_cycle(chip); + if (rc) + pr_err("Unable to store charge cycles rc=%d\n", rc); + } +} + +static int estimate_ocv(struct qpnp_bms_chip *chip) +{ + int i, rc, vbatt = 0, vbatt_final = 0; + + for (i = 0; i < 5; i++) { + rc = get_battery_voltage(chip, &vbatt); + if (rc) { + pr_err("Unable to read battery-voltage rc=%d\n", rc); + return rc; + } + /* + * Conservatively select the lowest vbatt to avoid reporting + * a higher ocv due to variations in bootup current. + */ + + if (i == 0) + vbatt_final = vbatt; + else if (vbatt < vbatt_final) + vbatt_final = vbatt; + + msleep(20); + } + + /* + * TODO: Revisit the OCV calcuations to use approximate ibatt + * and rbatt. + */ + return vbatt_final; +} + +static int scale_soc_while_chg(struct qpnp_bms_chip *chip, int chg_time_sec, + int catch_up_sec, int new_soc, int prev_soc) +{ + int scaled_soc; + int numerator; + + /* + * Don't report a high value immediately slowly scale the + * value from prev_soc to the new soc based on a charge time + * weighted average + */ + pr_debug("cts=%d catch_up_sec=%d\n", chg_time_sec, catch_up_sec); + if (catch_up_sec == 0) + return new_soc; + + if (chg_time_sec > catch_up_sec) + return new_soc; + + numerator = (catch_up_sec - chg_time_sec) * prev_soc + + chg_time_sec * new_soc; + scaled_soc = numerator / catch_up_sec; + + pr_debug("cts=%d new_soc=%d prev_soc=%d scaled_soc=%d\n", + chg_time_sec, new_soc, prev_soc, scaled_soc); + + return scaled_soc; +} + +static int report_eoc(struct qpnp_bms_chip *chip) +{ + int rc = -EINVAL; + union power_supply_propval ret = {0,}; + + if (chip->batt_psy == NULL) + chip->batt_psy = power_supply_get_by_name("battery"); + if (chip->batt_psy) { + rc = power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_STATUS, &ret); + if (rc) { + pr_err("Unable to get battery 'STATUS' rc=%d\n", rc); + } else if (ret.intval != POWER_SUPPLY_STATUS_FULL) { + pr_debug("Report EOC to charger\n"); + ret.intval = POWER_SUPPLY_STATUS_FULL; + rc = power_supply_set_property(chip->batt_psy, + POWER_SUPPLY_PROP_STATUS, &ret); + if (rc) { + pr_err("Unable to set 'STATUS' rc=%d\n", rc); + return rc; + } + chip->eoc_reported = true; + } + } else { + pr_err("battery psy not registered\n"); + } + + return rc; +} + +static void check_recharge_condition(struct qpnp_bms_chip *chip) +{ + int rc; + union power_supply_propval ret = {0,}; + int status = get_battery_status(chip); + + if (chip->last_soc > chip->dt.cfg_soc_resume_limit) + return; + + if (status == POWER_SUPPLY_STATUS_UNKNOWN) { + pr_debug("Unable to read battery status\n"); + return; + } + + /* Report recharge to charger for SOC based resume of charging */ + if ((status != POWER_SUPPLY_STATUS_CHARGING) && chip->eoc_reported) { + ret.intval = POWER_SUPPLY_STATUS_CHARGING; + rc = power_supply_set_property(chip->batt_psy, + POWER_SUPPLY_PROP_STATUS, &ret); + if (rc < 0) { + pr_err("Unable to set battery property rc=%d\n", rc); + } else { + pr_info("soc dropped below resume_soc soc=%d resume_soc=%d, restart charging\n", + chip->last_soc, + chip->dt.cfg_soc_resume_limit); + chip->eoc_reported = false; + } + } +} + +static void check_eoc_condition(struct qpnp_bms_chip *chip) +{ + int rc; + int status = get_battery_status(chip); + union power_supply_propval ret = {0,}; + + if (status == POWER_SUPPLY_STATUS_UNKNOWN) { + pr_err("Unable to read battery status\n"); + return; + } + + /* + * Check battery status: + * if last_soc is 100 and battery status is still charging + * reset ocv_at_100 and force reporting of eoc to charger. + */ + if ((chip->last_soc == 100) && + (status == POWER_SUPPLY_STATUS_CHARGING)) + chip->ocv_at_100 = -EINVAL; + + /* + * Store the OCV value at 100. If the new ocv is greater than + * ocv_at_100 (battery settles), update ocv_at_100. Else + * if the SOC drops, reset ocv_at_100. + */ + if (chip->ocv_at_100 == -EINVAL) { + if (chip->last_soc == 100) { + if (chip->dt.cfg_report_charger_eoc) { + rc = report_eoc(chip); + if (!rc) { + /* + * update ocv_at_100 only if EOC is + * reported successfully. + */ + chip->ocv_at_100 = chip->last_ocv_uv; + pr_debug("Battery FULL\n"); + } else { + pr_err("Unable to report eoc rc=%d\n", + rc); + chip->ocv_at_100 = -EINVAL; + } + } + if (chip->dt.cfg_use_reported_soc) { + /* begin reported_soc process */ + chip->reported_soc_in_use = true; + chip->charger_removed_since_full = false; + chip->charger_reinserted = false; + chip->reported_soc = 100; + pr_debug("Begin reported_soc process\n"); + } + } + } else { + if (chip->last_ocv_uv >= chip->ocv_at_100) { + pr_debug("new_ocv(%d) > ocv_at_100(%d) maintaining SOC to 100\n", + chip->last_ocv_uv, chip->ocv_at_100); + chip->ocv_at_100 = chip->last_ocv_uv; + chip->last_soc = 100; + } else if (chip->last_soc != 100) { + /* + * Report that the battery is discharging. + * This gets called once when the SOC falls + * below 100. + */ + if (chip->reported_soc_in_use + && chip->reported_soc == 100) { + pr_debug("reported_soc=100, last_soc=%d, do not send DISCHARING status\n", + chip->last_soc); + } else { + ret.intval = POWER_SUPPLY_STATUS_DISCHARGING; + power_supply_set_property(chip->batt_psy, + POWER_SUPPLY_PROP_STATUS, &ret); + } + pr_debug("SOC dropped (%d) discarding ocv_at_100\n", + chip->last_soc); + chip->ocv_at_100 = -EINVAL; + } + } +} + +static int report_voltage_based_soc(struct qpnp_bms_chip *chip) +{ + pr_debug("Reported voltage based soc = %d\n", + chip->prev_voltage_based_soc); + return chip->prev_voltage_based_soc; +} + +static int prepare_reported_soc(struct qpnp_bms_chip *chip) +{ + if (!chip->charger_removed_since_full) { + /* + * charger is not removed since full, + * keep reported_soc as 100 and calculate the delta soc + * between reported_soc and last_soc + */ + chip->reported_soc = 100; + chip->reported_soc_delta = 100 - chip->last_soc; + pr_debug("Keep at reported_soc 100, reported_soc_delta=%d, last_soc=%d\n", + chip->reported_soc_delta, + chip->last_soc); + } else { + /* charger is removed since full */ + if (chip->charger_reinserted) { + /* + * charger reinserted, keep the reported_soc + * until it equals to last_soc. + */ + if (chip->reported_soc == chip->last_soc) { + chip->reported_soc_in_use = false; + chip->reported_soc_high_current = false; + pr_debug("reported_soc equals to last_soc, stop reported_soc process\n"); + } + chip->reported_soc_change_sec = 0; + } + } + pr_debug("Reporting reported_soc=%d, last_soc=%d\n", + chip->reported_soc, chip->last_soc); + return chip->reported_soc; +} + +#define SOC_CATCHUP_SEC_MAX 600 +#define SOC_CATCHUP_SEC_PER_PERCENT 60 +#define MAX_CATCHUP_SOC (SOC_CATCHUP_SEC_MAX / SOC_CATCHUP_SEC_PER_PERCENT) +#define SOC_CHANGE_PER_SEC 5 +static int report_vm_bms_soc(struct qpnp_bms_chip *chip) +{ + int soc, soc_change, batt_temp, rc; + int time_since_last_change_sec = 0, charge_time_sec = 0; + unsigned long last_change_sec; + bool charging; + + soc = chip->calculated_soc; + + last_change_sec = chip->last_soc_change_sec; + calculate_delta_time(&last_change_sec, &time_since_last_change_sec); + + charging = is_battery_charging(chip); + + pr_debug("charging=%d last_soc=%d last_soc_unbound=%d\n", + charging, chip->last_soc, chip->last_soc_unbound); + /* + * account for charge time - limit it to SOC_CATCHUP_SEC to + * avoid overflows when charging continues for extended periods + */ + if (charging && chip->last_soc != -EINVAL) { + if (chip->charge_start_tm_sec == 0 || + (chip->catch_up_time_sec == 0 && + (abs(soc - chip->last_soc) >= MIN_SOC_UUC))) { + /* + * calculating soc for the first time + * after start of chg. Initialize catchup time + */ + if (abs(soc - chip->last_soc) < MAX_CATCHUP_SOC) + chip->catch_up_time_sec = + (soc - chip->last_soc) + * SOC_CATCHUP_SEC_PER_PERCENT; + else + chip->catch_up_time_sec = SOC_CATCHUP_SEC_MAX; + + chip->chg_start_soc = chip->last_soc; + + if (chip->catch_up_time_sec < 0) + chip->catch_up_time_sec = 0; + chip->charge_start_tm_sec = last_change_sec; + + pr_debug("chg_start_soc=%d charge_start_tm_sec=%d catch_up_time_sec=%d\n", + chip->chg_start_soc, chip->charge_start_tm_sec, + chip->catch_up_time_sec); + } + + charge_time_sec = min(SOC_CATCHUP_SEC_MAX, (int)last_change_sec + - chip->charge_start_tm_sec); + + /* end catchup if calculated soc and last soc are same */ + if (chip->last_soc == soc) { + chip->catch_up_time_sec = 0; + chip->chg_start_soc = chip->last_soc; + } + } + + if (chip->last_soc != -EINVAL) { + /* + * last_soc < soc ... if we have not been charging at all + * since the last time this was called, report previous SoC. + * Otherwise, scale and catch up. + */ + rc = get_batt_therm(chip, &batt_temp); + if (rc) + batt_temp = BMS_DEFAULT_TEMP; + + if (chip->last_soc < soc && !charging) + soc = chip->last_soc; + else if (chip->last_soc < soc && soc != 100) + soc = scale_soc_while_chg(chip, charge_time_sec, + chip->catch_up_time_sec, + soc, chip->chg_start_soc); + + /* + * if the battery is close to cutoff or if the batt_temp + * is under the low-temp threshold allow bigger change + */ + if (bms_wake_active(&chip->vbms_lv_wake_source) || + (batt_temp <= chip->dt.cfg_low_temp_threshold)) + soc_change = min((int)abs(chip->last_soc - soc), + time_since_last_change_sec); + else + soc_change = min((int)abs(chip->last_soc - soc), + time_since_last_change_sec + / SOC_CHANGE_PER_SEC); + + if (chip->last_soc_unbound) { + chip->last_soc_unbound = false; + } else { + /* + * if soc have not been unbound by resume, + * only change reported SoC by 1. + */ + soc_change = min(1, soc_change); + } + + if (soc < chip->last_soc && soc != 0) + soc = chip->last_soc - soc_change; + if (soc > chip->last_soc && soc != 100) + soc = chip->last_soc + soc_change; + } + + if (chip->last_soc != soc && !chip->last_soc_unbound) + chip->last_soc_change_sec = last_change_sec; + + /* + * Check/update eoc under following condition: + * if there is change in soc: + * soc != chip->last_soc + * during bootup if soc is 100: + */ + soc = bound_soc(soc); + if ((soc != chip->last_soc) || (soc == 100)) { + chip->last_soc = soc; + check_eoc_condition(chip); + if ((chip->dt.cfg_soc_resume_limit > 0) && !charging) + check_recharge_condition(chip); + } + + pr_debug("last_soc=%d calculated_soc=%d soc=%d time_since_last_change=%d\n", + chip->last_soc, chip->calculated_soc, + soc, time_since_last_change_sec); + + /* + * Backup the actual ocv (last_ocv_uv) and not the + * last_soc-interpolated ocv. This makes sure that + * the BMS algorithm always uses the correct ocv and + * can catch up on the last_soc (across reboots). + * We do not want the algorithm to be based of a wrong + * initial OCV. + */ + + backup_ocv_soc(chip, chip->last_ocv_uv, chip->last_soc); + + if (chip->reported_soc_in_use) + return prepare_reported_soc(chip); + + pr_debug("Reported SOC=%d\n", chip->last_soc); + + return chip->last_soc; +} + +static int report_state_of_charge(struct qpnp_bms_chip *chip) +{ + int soc; + + mutex_lock(&chip->last_soc_mutex); + + if (chip->dt.cfg_use_voltage_soc) + soc = report_voltage_based_soc(chip); + else + soc = report_vm_bms_soc(chip); + + mutex_unlock(&chip->last_soc_mutex); + + return soc; +} + +static void btm_notify_vbat(enum qpnp_tm_state state, void *ctx) +{ + struct qpnp_bms_chip *chip = ctx; + int vbat_uv; + int rc; + + rc = get_battery_voltage(chip, &vbat_uv); + if (rc) { + pr_err("error reading vbat_sns adc channel rc=%d\n", rc); + goto out; + } + + pr_debug("vbat is at %d, state is at %d\n", vbat_uv, state); + + if (state == ADC_TM_LOW_STATE) { + pr_debug("low voltage btm notification triggered\n"); + if (vbat_uv <= (chip->vbat_monitor_params.low_thr + + VBATT_ERROR_MARGIN)) { + if (!bms_wake_active(&chip->vbms_lv_wake_source)) + bms_stay_awake(&chip->vbms_lv_wake_source); + + chip->vbat_monitor_params.state_request = + ADC_TM_HIGH_THR_ENABLE; + } else { + pr_debug("faulty btm trigger, discarding\n"); + goto out; + } + } else if (state == ADC_TM_HIGH_STATE) { + pr_debug("high voltage btm notification triggered\n"); + if (vbat_uv > chip->vbat_monitor_params.high_thr) { + chip->vbat_monitor_params.state_request = + ADC_TM_LOW_THR_ENABLE; + if (bms_wake_active(&chip->vbms_lv_wake_source)) + bms_relax(&chip->vbms_lv_wake_source); + } else { + pr_debug("faulty btm trigger, discarding\n"); + goto out; + } + } else { + pr_debug("unknown voltage notification state: %d\n", state); + goto out; + } + + if (chip->bms_psy_registered) + power_supply_changed(chip->bms_psy); + +out: + qpnp_adc_tm_channel_measure(chip->adc_tm_dev, + &chip->vbat_monitor_params); +} + +static int reset_vbat_monitoring(struct qpnp_bms_chip *chip) +{ + int rc; + + chip->vbat_monitor_params.state_request = ADC_TM_HIGH_LOW_THR_DISABLE; + rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev, + &chip->vbat_monitor_params); + if (rc) { + pr_err("tm disable failed: %d\n", rc); + return rc; + } + + if (bms_wake_active(&chip->vbms_lv_wake_source)) + bms_relax(&chip->vbms_lv_wake_source); + + return 0; +} + +static int setup_vbat_monitoring(struct qpnp_bms_chip *chip) +{ + int rc; + + if (is_debug_batt_id(chip)) { + pr_debug("skip configuring vbat monitoring for debug_board\n"); + return 0; + } + chip->vbat_monitor_params.low_thr = + chip->dt.cfg_low_voltage_threshold; + chip->vbat_monitor_params.high_thr = + chip->dt.cfg_low_voltage_threshold + + VBATT_ERROR_MARGIN; + chip->vbat_monitor_params.state_request = ADC_TM_LOW_THR_ENABLE; + chip->vbat_monitor_params.channel = VBAT_SNS; + chip->vbat_monitor_params.btm_ctx = chip; + chip->vbat_monitor_params.timer_interval = ADC_MEAS1_INTERVAL_1S; + chip->vbat_monitor_params.threshold_notification = &btm_notify_vbat; + pr_debug("set low thr to %d and high to %d\n", + chip->vbat_monitor_params.low_thr, + chip->vbat_monitor_params.high_thr); + + rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev, + &chip->vbat_monitor_params); + if (rc) { + pr_err("adc-tm setup failed: %d\n", rc); + return rc; + } + + pr_debug("vbat monitoring setup complete\n"); + return 0; +} + +static void very_low_voltage_check(struct qpnp_bms_chip *chip, int vbat_uv) +{ + if (!bms_wake_active(&chip->vbms_lv_wake_source) + && (vbat_uv <= chip->dt.cfg_low_voltage_threshold)) { + pr_debug("voltage=%d holding low voltage ws\n", vbat_uv); + bms_stay_awake(&chip->vbms_lv_wake_source); + } else if (bms_wake_active(&chip->vbms_lv_wake_source) + && (vbat_uv > chip->dt.cfg_low_voltage_threshold)) { + pr_debug("voltage=%d releasing low voltage ws\n", vbat_uv); + bms_relax(&chip->vbms_lv_wake_source); + } +} + +static void cv_voltage_check(struct qpnp_bms_chip *chip, int vbat_uv) +{ + if (bms_wake_active(&chip->vbms_cv_wake_source)) { + if ((vbat_uv < (chip->dt.cfg_max_voltage_uv - + VBATT_ERROR_MARGIN + CV_DROP_MARGIN)) + && !is_battery_taper_charging(chip)) { + pr_debug("Fell below CV, releasing cv ws\n"); + chip->in_cv_state = false; + bms_relax(&chip->vbms_cv_wake_source); + } else if (!is_battery_charging(chip)) { + pr_debug("charging stopped, releasing cv ws\n"); + chip->in_cv_state = false; + bms_relax(&chip->vbms_cv_wake_source); + } + } else if (!bms_wake_active(&chip->vbms_cv_wake_source) + && is_battery_charging(chip) + && ((vbat_uv > (chip->dt.cfg_max_voltage_uv - + VBATT_ERROR_MARGIN)) + || is_battery_taper_charging(chip))) { + pr_debug("CC_TO_CV voltage=%d holding cv ws\n", vbat_uv); + chip->in_cv_state = true; + bms_stay_awake(&chip->vbms_cv_wake_source); + } +} + +static void low_soc_check(struct qpnp_bms_chip *chip) +{ + int rc; + + if (chip->dt.cfg_low_soc_fifo_length < 1) + return; + + mutex_lock(&chip->state_change_mutex); + + if (chip->calculated_soc <= chip->dt.cfg_low_soc_calc_threshold) { + if (!chip->low_soc_fifo_set) { + pr_debug("soc=%d (low-soc) setting fifo_length to %d\n", + chip->calculated_soc, + chip->dt.cfg_low_soc_fifo_length); + rc = get_fifo_length(chip, S2_STATE, + &chip->s2_fifo_length); + if (rc) { + pr_err("Unable to get_fifo_length rc=%d\n", rc); + goto low_soc_exit; + } + rc = set_fifo_length(chip, S2_STATE, + chip->dt.cfg_low_soc_fifo_length); + if (rc) { + pr_err("Unable to set_fifo_length rc=%d\n", rc); + goto low_soc_exit; + } + chip->low_soc_fifo_set = true; + } + } else { + if (chip->low_soc_fifo_set) { + pr_debug("soc=%d setting back fifo_length to %d\n", + chip->calculated_soc, + chip->s2_fifo_length); + rc = set_fifo_length(chip, S2_STATE, + chip->s2_fifo_length); + if (rc) { + pr_err("Unable to set_fifo_length rc=%d\n", rc); + goto low_soc_exit; + } + chip->low_soc_fifo_set = false; + } + } + +low_soc_exit: + mutex_unlock(&chip->state_change_mutex); +} + +static int calculate_soc_from_voltage(struct qpnp_bms_chip *chip) +{ + int voltage_range_uv, voltage_remaining_uv, voltage_based_soc; + int rc, vbat_uv; + + /* check if we have the averaged fifo data */ + if (chip->voltage_soc_uv) { + vbat_uv = chip->voltage_soc_uv; + } else { + rc = get_battery_voltage(chip, &vbat_uv); + if (rc < 0) { + pr_err("adc vbat failed err = %d\n", rc); + return rc; + } + pr_debug("instant-voltage based voltage-soc\n"); + } + + voltage_range_uv = chip->dt.cfg_max_voltage_uv - + chip->dt.cfg_v_cutoff_uv; + voltage_remaining_uv = vbat_uv - chip->dt.cfg_v_cutoff_uv; + voltage_based_soc = voltage_remaining_uv * 100 / voltage_range_uv; + + voltage_based_soc = clamp(voltage_based_soc, 0, 100); + + if (chip->prev_voltage_based_soc != voltage_based_soc + && chip->bms_psy_registered) { + pr_debug("update bms_psy\n"); + power_supply_changed(chip->bms_psy); + } + chip->prev_voltage_based_soc = voltage_based_soc; + + pr_debug("vbat used = %duv\n", vbat_uv); + pr_debug("Calculated voltage based soc=%d\n", voltage_based_soc); + + if (voltage_based_soc == 100) + if (chip->dt.cfg_report_charger_eoc) + report_eoc(chip); + + return 0; +} + +static void calculate_reported_soc(struct qpnp_bms_chip *chip) +{ + union power_supply_propval ret = {0,}; + + if (chip->last_soc < 0) { + pr_debug("last_soc is not ready, return\n"); + return; + } + + if (chip->reported_soc > chip->last_soc) { + /*send DISCHARGING status if the reported_soc drops from 100 */ + if (chip->reported_soc == 100) { + ret.intval = POWER_SUPPLY_STATUS_DISCHARGING; + power_supply_set_property(chip->batt_psy, + POWER_SUPPLY_PROP_STATUS, &ret); + pr_debug("Report discharging status, reported_soc=%d, last_soc=%d\n", + chip->reported_soc, chip->last_soc); + } + /* + * reported_soc_delta is used to prevent + * the big change in last_soc, + * this is not used in high current mode + */ + if (chip->reported_soc_delta > 0) + chip->reported_soc_delta--; + + if (chip->reported_soc_high_current) + chip->reported_soc--; + else + chip->reported_soc = chip->last_soc + + chip->reported_soc_delta; + + pr_debug("New reported_soc=%d, last_soc is=%d\n", + chip->reported_soc, chip->last_soc); + } else { + chip->reported_soc_in_use = false; + chip->reported_soc_high_current = false; + pr_debug("reported_soc equals last_soc,stop reported_soc process\n"); + } + pr_debug("bms power_supply_changed\n"); + power_supply_changed(chip->bms_psy); +} + +static int clamp_soc_based_on_voltage(struct qpnp_bms_chip *chip, int soc) +{ + int rc, vbat_uv; + + rc = get_battery_voltage(chip, &vbat_uv); + if (rc < 0) { + pr_err("adc vbat failed err = %d\n", rc); + return soc; + } + + /* only clamp when discharging */ + if (is_battery_charging(chip)) + return soc; + + if (soc <= 0 && vbat_uv > chip->dt.cfg_v_cutoff_uv) { + pr_debug("clamping soc to 1, vbat (%d) > cutoff (%d)\n", + vbat_uv, chip->dt.cfg_v_cutoff_uv); + return 1; + } + pr_debug("not clamping, using soc = %d, vbat = %d and cutoff = %d\n", + soc, vbat_uv, chip->dt.cfg_v_cutoff_uv); + return soc; +} + +static void battery_voltage_check(struct qpnp_bms_chip *chip) +{ + int rc, vbat_uv = 0; + + rc = get_battery_voltage(chip, &vbat_uv); + if (rc < 0) { + pr_err("Failed to read battery-voltage rc=%d\n", rc); + } else { + very_low_voltage_check(chip, vbat_uv); + cv_voltage_check(chip, vbat_uv); + } +} + +#define UI_SOC_CATCHUP_TIME (60) +static void monitor_soc_work(struct work_struct *work) +{ + struct qpnp_bms_chip *chip = container_of(work, + struct qpnp_bms_chip, + monitor_soc_work.work); + int rc, new_soc = 0, batt_temp; + + /*skip if its a debug-board */ + if (is_debug_batt_id(chip)) + return; + + bms_stay_awake(&chip->vbms_soc_wake_source); + + calculate_delta_time(&chip->tm_sec, &chip->delta_time_s); + pr_debug("elapsed_time=%d\n", chip->delta_time_s); + + mutex_lock(&chip->last_soc_mutex); + + if (!is_battery_present(chip)) { + /* if battery is not preset report 100% SOC */ + pr_debug("battery gone, reporting 100\n"); + chip->last_soc_invalid = true; + chip->last_soc = -EINVAL; + new_soc = 100; + } else { + battery_voltage_check(chip); + + if (chip->dt.cfg_use_voltage_soc) { + calculate_soc_from_voltage(chip); + } else { + rc = get_batt_therm(chip, &batt_temp); + if (rc < 0) { + pr_err("Unable to read batt temp rc=%d, using default=%d\n", + rc, BMS_DEFAULT_TEMP); + batt_temp = BMS_DEFAULT_TEMP; + } + + if (chip->last_soc_invalid) { + chip->last_soc_invalid = false; + chip->last_soc = -EINVAL; + } + new_soc = lookup_soc_ocv(chip, chip->last_ocv_uv, + batt_temp); + /* clamp soc due to BMS hw/sw immaturities */ + new_soc = clamp_soc_based_on_voltage(chip, new_soc); + + if (chip->calculated_soc != new_soc) { + pr_debug("SOC changed! new_soc=%d prev_soc=%d\n", + new_soc, chip->calculated_soc); + chip->calculated_soc = new_soc; + /* + * To recalculate the catch-up time, clear it + * when SOC changes. + */ + chip->catch_up_time_sec = 0; + + if (chip->calculated_soc == 100) + /* update last_soc immediately */ + report_vm_bms_soc(chip); + + pr_debug("update bms_psy\n"); + power_supply_changed(chip->bms_psy); + } else if (chip->last_soc != chip->calculated_soc) { + pr_debug("update bms_psy\n"); + power_supply_changed(chip->bms_psy); + } else { + report_vm_bms_soc(chip); + } + } + /* low SOC configuration */ + low_soc_check(chip); + } + /* + * schedule the work only if last_soc has not caught up with + * the calculated soc or if we are using voltage based soc + */ + if ((chip->last_soc != chip->calculated_soc) || + chip->dt.cfg_use_voltage_soc) + schedule_delayed_work(&chip->monitor_soc_work, + msecs_to_jiffies(get_calculation_delay_ms(chip))); + + if (chip->reported_soc_in_use && chip->charger_removed_since_full + && !chip->charger_reinserted) { + /* record the elapsed time after last reported_soc change */ + chip->reported_soc_change_sec += chip->delta_time_s; + pr_debug("reported_soc_change_sec=%d\n", + chip->reported_soc_change_sec); + + /* above the catch up time, calculate new reported_soc */ + if (chip->reported_soc_change_sec > UI_SOC_CATCHUP_TIME) { + calculate_reported_soc(chip); + chip->reported_soc_change_sec = 0; + } + } + + mutex_unlock(&chip->last_soc_mutex); + + bms_relax(&chip->vbms_soc_wake_source); +} + +#define DEBUG_BOARD_SOC 67 +#define BATT_MISSING_SOC 50 +static int get_prop_bms_capacity(struct qpnp_bms_chip *chip) +{ + if (is_debug_batt_id(chip)) + return DEBUG_BOARD_SOC; + + if (!chip->battery_present) + return BATT_MISSING_SOC; + + return report_state_of_charge(chip); +} + +static bool is_hi_power_state_requested(struct qpnp_bms_chip *chip) +{ + + pr_debug("hi_power_state=0x%x\n", chip->hi_power_state); + + if (chip->hi_power_state & VMBMS_IGNORE_ALL_BIT) + return false; + else + return !!chip->hi_power_state; + +} + +static int qpnp_vm_bms_config_power_state(struct qpnp_bms_chip *chip, + int usecase, bool hi_power_enable) +{ + if (usecase < 0) { + pr_err("Invalid power-usecase %x\n", usecase); + return -EINVAL; + } + + if (hi_power_enable) + chip->hi_power_state |= usecase; + else + chip->hi_power_state &= ~usecase; + + pr_debug("hi_power_state=%x usecase=%x hi_power_enable=%d\n", + chip->hi_power_state, usecase, hi_power_enable); + + return 0; +} + +static int get_prop_bms_current_now(struct qpnp_bms_chip *chip) +{ + return chip->current_now; +} + +static int get_current_cc(struct qpnp_bms_chip *chip) +{ + int soc, cc_full; + int64_t current_charge; + + if (chip->batt_data == NULL) + return -EINVAL; + + cc_full = chip->batt_data->fcc; + if (chip->dt.cfg_use_voltage_soc) + soc = chip->prev_voltage_based_soc; + else + soc = chip->last_soc; + + /* + * Full charge capacity is in mAh and soc is in % + * current_charge capacity is defined in uAh + * Hence conversion ((mAh * pct * 1000) / 100) => (mAh * pct * 10) + */ + current_charge = cc_full * soc * 10; + + return current_charge; +} + +static int get_charge_full(struct qpnp_bms_chip *chip) +{ + + if (chip->batt_data) + return chip->batt_data->fcc * 1000; + + return -EINVAL; +} + +static enum power_supply_property bms_power_props[] = { + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_RESISTANCE, + POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE, + POWER_SUPPLY_PROP_RESISTANCE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_VOLTAGE_OCV, + POWER_SUPPLY_PROP_HI_POWER, + POWER_SUPPLY_PROP_LOW_POWER, + POWER_SUPPLY_PROP_BATTERY_TYPE, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_CHARGE_COUNTER, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_RESISTANCE_ID, +}; + +static int +qpnp_vm_bms_property_is_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + switch (psp) { + case POWER_SUPPLY_PROP_CURRENT_NOW: + case POWER_SUPPLY_PROP_VOLTAGE_OCV: + case POWER_SUPPLY_PROP_HI_POWER: + case POWER_SUPPLY_PROP_LOW_POWER: + return 1; + default: + break; + } + + return 0; +} + +static int qpnp_vm_bms_power_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct qpnp_bms_chip *chip = power_supply_get_drvdata(psy); + int value = 0, rc; + + val->intval = 0; + + switch (psp) { + case POWER_SUPPLY_PROP_CAPACITY: + val->intval = get_prop_bms_capacity(chip); + break; + case POWER_SUPPLY_PROP_STATUS: + val->intval = chip->battery_status; + break; + case POWER_SUPPLY_PROP_RESISTANCE: + val->intval = get_prop_bms_rbatt(chip); + break; + case POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE: + if (chip->batt_data->rbatt_capacitive_mohm > 0) + val->intval = chip->batt_data->rbatt_capacitive_mohm; + if (chip->dt.cfg_r_conn_mohm > 0) + val->intval += chip->dt.cfg_r_conn_mohm; + break; + case POWER_SUPPLY_PROP_RESISTANCE_NOW: + rc = get_batt_therm(chip, &value); + if (rc < 0) + value = BMS_DEFAULT_TEMP; + val->intval = get_rbatt(chip, chip->calculated_soc, value); + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + val->intval = get_prop_bms_current_now(chip); + break; + case POWER_SUPPLY_PROP_BATTERY_TYPE: + val->strval = chip->batt_data->battery_type; + break; + case POWER_SUPPLY_PROP_VOLTAGE_OCV: + val->intval = chip->last_ocv_uv; + break; + case POWER_SUPPLY_PROP_TEMP: + rc = get_batt_therm(chip, &value); + if (rc < 0) + value = BMS_DEFAULT_TEMP; + val->intval = value; + break; + case POWER_SUPPLY_PROP_HI_POWER: + val->intval = is_hi_power_state_requested(chip); + break; + case POWER_SUPPLY_PROP_LOW_POWER: + val->intval = !is_hi_power_state_requested(chip); + break; + case POWER_SUPPLY_PROP_CYCLE_COUNT: + if (chip->dt.cfg_battery_aging_comp) + val->intval = chip->charge_cycles; + else + val->intval = -EINVAL; + break; + case POWER_SUPPLY_PROP_CHARGE_COUNTER: + val->intval = get_current_cc(chip); + break; + case POWER_SUPPLY_PROP_CHARGE_FULL: + val->intval = get_charge_full(chip); + break; + case POWER_SUPPLY_PROP_RESISTANCE_ID: + val->intval = chip->batt_id_ohm; + break; + default: + return -EINVAL; + } + return 0; +} + +static int qpnp_vm_bms_power_set_property(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val) +{ + int rc = 0; + struct qpnp_bms_chip *chip = power_supply_get_drvdata(psy); + + switch (psp) { + case POWER_SUPPLY_PROP_CURRENT_NOW: + chip->current_now = val->intval; + pr_debug("IBATT = %d\n", val->intval); + break; + case POWER_SUPPLY_PROP_VOLTAGE_OCV: + pm_relax(chip->dev); + cancel_delayed_work_sync(&chip->monitor_soc_work); + chip->last_ocv_uv = val->intval; + pr_debug("OCV = %d\n", val->intval); + schedule_delayed_work(&chip->monitor_soc_work, 0); + break; + case POWER_SUPPLY_PROP_HI_POWER: + rc = qpnp_vm_bms_config_power_state(chip, val->intval, true); + if (rc) + pr_err("Unable to set power-state rc=%d\n", rc); + break; + case POWER_SUPPLY_PROP_LOW_POWER: + rc = qpnp_vm_bms_config_power_state(chip, val->intval, false); + if (rc) + pr_err("Unable to set power-state rc=%d\n", rc); + break; + default: + return -EINVAL; + } + return rc; +} + +static void bms_new_battery_setup(struct qpnp_bms_chip *chip) +{ + int rc; + + mutex_lock(&chip->bms_data_mutex); + + chip->last_soc_invalid = true; + /* + * disable and re-enable the BMS hardware to reset + * the realtime-FIFO data and restart accumulation + */ + rc = qpnp_masked_write_base(chip, chip->base + EN_CTL_REG, + BMS_EN_BIT, 0); + /* delay for the BMS hardware to reset its state */ + msleep(200); + rc |= qpnp_masked_write_base(chip, chip->base + EN_CTL_REG, + BMS_EN_BIT, BMS_EN_BIT); + /* delay for the BMS hardware to re-start */ + msleep(200); + if (rc) + pr_err("Unable to reset BMS rc=%d\n", rc); + + chip->last_ocv_uv = estimate_ocv(chip); + + memset(&chip->bms_data, 0, sizeof(chip->bms_data)); + + /* update the sequence number */ + chip->bms_data.seq_num = chip->seq_num++; + + /* signal the read thread */ + chip->data_ready = true; + wake_up_interruptible(&chip->bms_wait_q); + + /* hold a wake lock until the read thread is scheduled */ + if (chip->bms_dev_open) + pm_stay_awake(chip->dev); + + mutex_unlock(&chip->bms_data_mutex); + + /* reset aging variables */ + if (chip->dt.cfg_battery_aging_comp) { + chip->charge_cycles = 0; + chip->charge_increase = 0; + rc = backup_charge_cycle(chip); + if (rc) + pr_err("Unable to reset aging data rc=%d\n", rc); + } +} + +static void battery_insertion_check(struct qpnp_bms_chip *chip) +{ + int present = (int)is_battery_present(chip); + + if (chip->battery_present != present) { + pr_debug("shadow_sts=%d status=%d\n", + chip->battery_present, present); + if (chip->battery_present != -EINVAL) { + if (present) { + /* new battery inserted */ + bms_new_battery_setup(chip); + setup_vbat_monitoring(chip); + pr_debug("New battery inserted!\n"); + } else { + /* battery removed */ + reset_vbat_monitoring(chip); + pr_debug("Battery removed\n"); + } + } + chip->battery_present = present; + } +} + +static void battery_status_check(struct qpnp_bms_chip *chip) +{ + int status = get_battery_status(chip); + + if (chip->battery_status != status) { + if (status == POWER_SUPPLY_STATUS_CHARGING) { + pr_debug("charging started\n"); + charging_began(chip); + } else if (chip->battery_status == + POWER_SUPPLY_STATUS_CHARGING) { + pr_debug("charging stopped\n"); + charging_ended(chip); + } + + if (status == POWER_SUPPLY_STATUS_FULL) { + pr_debug("battery full\n"); + chip->battery_full = true; + } else if (chip->battery_status == POWER_SUPPLY_STATUS_FULL) { + pr_debug("battery not-full anymore\n"); + chip->battery_full = false; + } + chip->battery_status = status; + } +} + +#define HIGH_CURRENT_TH 2 +static void reported_soc_check_status(struct qpnp_bms_chip *chip) +{ + u8 present; + + present = is_charger_present(chip); + pr_debug("usb_present=%d\n", present); + + if (!present && !chip->charger_removed_since_full) { + chip->charger_removed_since_full = true; + pr_debug("reported_soc: charger removed since full\n"); + return; + } + if (chip->reported_soc_high_current) { + pr_debug("reported_soc in high current mode, return\n"); + return; + } + if ((chip->reported_soc - chip->last_soc) > + (100 - chip->dt.cfg_soc_resume_limit + + HIGH_CURRENT_TH)) { + chip->reported_soc_high_current = true; + chip->charger_removed_since_full = true; + chip->charger_reinserted = false; + pr_debug("reported_soc enters high current mode\n"); + return; + } + if (present && chip->charger_removed_since_full) { + chip->charger_reinserted = true; + pr_debug("reported_soc: charger reinserted\n"); + } + if (!present && chip->charger_removed_since_full) { + chip->charger_reinserted = false; + pr_debug("reported_soc: charger removed again\n"); + } +} + +static void qpnp_vm_bms_ext_power_changed(struct power_supply *psy) +{ + struct qpnp_bms_chip *chip = power_supply_get_drvdata(psy); + + pr_debug("Triggered!\n"); + battery_status_check(chip); + battery_insertion_check(chip); + + mutex_lock(&chip->last_soc_mutex); + battery_voltage_check(chip); + mutex_unlock(&chip->last_soc_mutex); + + if (chip->reported_soc_in_use) + reported_soc_check_status(chip); +} + + +static void dump_bms_data(const char *func, struct qpnp_bms_chip *chip) +{ + int i; + + pr_debug("%s: fifo_count=%d acc_count=%d seq_num=%d\n", + func, chip->bms_data.num_fifo, + chip->bms_data.acc_count, + chip->bms_data.seq_num); + + for (i = 0; i < chip->bms_data.num_fifo; i++) + pr_debug("fifo=%d fifo_uv=%d sample_interval=%d sample_count=%d\n", + i, chip->bms_data.fifo_uv[i], + chip->bms_data.sample_interval_ms, + chip->bms_data.sample_count); + pr_debug("avg_acc_data=%d\n", chip->bms_data.acc_uv); +} + +static int read_and_populate_fifo_data(struct qpnp_bms_chip *chip) +{ + u8 fifo_count = 0, val = 0; + u8 fifo_data_raw[MAX_FIFO_REGS * 2]; + u16 fifo_data; + int rc, i, j; + int64_t voltage_soc_avg = 0; + + /* read the completed FIFO count */ + rc = qpnp_read_wrapper(chip, &val, chip->base + STATUS2_REG, 1); + if (rc) { + pr_err("Unable to read STATUS2 register rc=%d\n", rc); + return rc; + } + fifo_count = (val & FIFO_CNT_SD_MASK) >> FIFO_CNT_SD_SHIFT; + pr_debug("fifo_count=%d\n", fifo_count); + if (!fifo_count) { + pr_debug("No data in FIFO\n"); + return 0; + } else if (fifo_count > MAX_FIFO_REGS) { + pr_err("Invalid fifo-length %d rejecting data\n", fifo_count); + chip->bms_data.num_fifo = 0; + return 0; + } + + /* read the FIFO data */ + for (i = 0; i < fifo_count * 2; i++) { + rc = qpnp_read_wrapper(chip, &fifo_data_raw[i], + chip->base + FIFO_0_LSB_REG + i, 1); + if (rc) { + pr_err("Unable to read FIFO register(%d) rc=%d\n", + i, rc); + return rc; + } + } + + /* populate the structure */ + chip->bms_data.num_fifo = fifo_count; + + rc = get_sample_interval(chip, chip->current_fsm_state, + &chip->bms_data.sample_interval_ms); + if (rc) { + pr_err("Unable to read state=%d sample_interval rc=%d\n", + chip->current_fsm_state, rc); + return rc; + } + + rc = get_sample_count(chip, chip->current_fsm_state, + &chip->bms_data.sample_count); + if (rc) { + pr_err("Unable to read state=%d sample_count rc=%d\n", + chip->current_fsm_state, rc); + return rc; + } + + for (i = 0, j = 0; i < fifo_count * 2; i = i + 2, j++) { + fifo_data = fifo_data_raw[i] | (fifo_data_raw[i + 1] << 8); + chip->bms_data.fifo_uv[j] = convert_vbatt_raw_to_uv(chip, + fifo_data, 0); + voltage_soc_avg += chip->bms_data.fifo_uv[j]; + } + /* store the fifo average for voltage-based-soc */ + chip->voltage_soc_uv = div_u64(voltage_soc_avg, fifo_count); + + return 0; +} + +static int read_and_populate_acc_data(struct qpnp_bms_chip *chip) +{ + int rc; + u32 acc_data_sd = 0, acc_count_sd = 0, avg_acc_data = 0; + + /* read ACC SD count */ + rc = qpnp_read_wrapper(chip, (u8 *)&acc_count_sd, + chip->base + ACC_CNT_SD_REG, 1); + if (rc) { + pr_err("Unable to read ACC_CNT_SD_REG rc=%d\n", rc); + return rc; + } + if (!acc_count_sd) { + pr_debug("No data in accumulator\n"); + return 0; + } + /* read ACC SD data */ + rc = qpnp_read_wrapper(chip, (u8 *)&acc_data_sd, + chip->base + ACC_DATA0_SD_REG, 3); + if (rc) { + pr_err("Unable to read ACC_DATA0_SD_REG rc=%d\n", rc); + return rc; + } + avg_acc_data = div_u64(acc_data_sd, acc_count_sd); + + chip->bms_data.acc_uv = convert_vbatt_raw_to_uv(chip, + avg_acc_data, 0); + chip->bms_data.acc_count = acc_count_sd; + + rc = get_sample_interval(chip, chip->current_fsm_state, + &chip->bms_data.sample_interval_ms); + if (rc) { + pr_err("Unable to read state=%d sample_interval rc=%d\n", + chip->current_fsm_state, rc); + return rc; + } + + rc = get_sample_count(chip, chip->current_fsm_state, + &chip->bms_data.sample_count); + if (rc) { + pr_err("Unable to read state=%d sample_count rc=%d\n", + chip->current_fsm_state, rc); + return rc; + } + + return 0; +} + +static int clear_fifo_acc_data(struct qpnp_bms_chip *chip) +{ + int rc; + u8 reg = 0; + + reg = FIFO_CNT_SD_CLR_BIT | ACC_DATA_SD_CLR_BIT | ACC_CNT_SD_CLR_BIT; + rc = qpnp_masked_write_base(chip, chip->base + DATA_CTL2_REG, reg, reg); + if (rc) + pr_err("Unable to write DATA_CTL2_REG rc=%d\n", rc); + + return rc; +} + +static irqreturn_t bms_fifo_update_done_irq_handler(int irq, void *_chip) +{ + int rc; + struct qpnp_bms_chip *chip = _chip; + + pr_debug("fifo_update_done triggered\n"); + + mutex_lock(&chip->bms_data_mutex); + + if (chip->suspend_data_valid) { + pr_debug("Suspend data not processed yet\n"); + goto fail_fifo; + } + + rc = calib_vadc(chip); + if (rc) + pr_err("Unable to calibrate vadc rc=%d\n", rc); + + /* clear old data */ + memset(&chip->bms_data, 0, sizeof(chip->bms_data)); + /* + * 1. Read FIFO and populate the bms_data + * 2. Clear FIFO data + * 3. Notify userspace + */ + rc = update_fsm_state(chip); + if (rc) { + pr_err("Unable to read FSM state rc=%d\n", rc); + goto fail_fifo; + } + pr_debug("fsm_state=%d\n", chip->current_fsm_state); + + rc = read_and_populate_fifo_data(chip); + if (rc) { + pr_err("Unable to read FIFO data rc=%d\n", rc); + goto fail_fifo; + } + + rc = clear_fifo_acc_data(chip); + if (rc) + pr_err("Unable to clear FIFO/ACC data rc=%d\n", rc); + + /* update the sequence number */ + chip->bms_data.seq_num = chip->seq_num++; + + dump_bms_data(__func__, chip); + + /* signal the read thread */ + chip->data_ready = true; + wake_up_interruptible(&chip->bms_wait_q); + + /* hold a wake lock until the read thread is scheduled */ + if (chip->bms_dev_open) + pm_stay_awake(chip->dev); +fail_fifo: + mutex_unlock(&chip->bms_data_mutex); + return IRQ_HANDLED; +} + +static irqreturn_t bms_fsm_state_change_irq_handler(int irq, void *_chip) +{ + int rc; + struct qpnp_bms_chip *chip = _chip; + + pr_debug("fsm_state_changed triggered\n"); + + mutex_lock(&chip->bms_data_mutex); + + if (chip->suspend_data_valid) { + pr_debug("Suspend data not processed yet\n"); + goto fail_state; + } + + rc = calib_vadc(chip); + if (rc) + pr_err("Unable to calibrate vadc rc=%d\n", rc); + + /* clear old data */ + memset(&chip->bms_data, 0, sizeof(chip->bms_data)); + /* + * 1. Read FIFO and ACC_DATA and populate the bms_data + * 2. Clear FIFO & ACC data + * 3. Notify userspace + */ + pr_debug("prev_fsm_state=%d\n", chip->current_fsm_state); + + rc = read_and_populate_fifo_data(chip); + if (rc) { + pr_err("Unable to read FIFO data rc=%d\n", rc); + goto fail_state; + } + + /* read accumulator data */ + rc = read_and_populate_acc_data(chip); + if (rc) { + pr_err("Unable to read ACC_SD data rc=%d\n", rc); + goto fail_state; + } + + rc = update_fsm_state(chip); + if (rc) { + pr_err("Unable to read FSM state rc=%d\n", rc); + goto fail_state; + } + + rc = clear_fifo_acc_data(chip); + if (rc) + pr_err("Unable to clear FIFO/ACC data rc=%d\n", rc); + + /* update the sequence number */ + chip->bms_data.seq_num = chip->seq_num++; + + dump_bms_data(__func__, chip); + + /* signal the read thread */ + chip->data_ready = true; + wake_up_interruptible(&chip->bms_wait_q); + + /* hold a wake lock until the read thread is scheduled */ + if (chip->bms_dev_open) + pm_stay_awake(chip->dev); +fail_state: + mutex_unlock(&chip->bms_data_mutex); + return IRQ_HANDLED; +} + +static int read_shutdown_ocv_soc(struct qpnp_bms_chip *chip) +{ + u8 stored_soc = 0; + u16 stored_ocv = 0; + int rc; + + rc = qpnp_read_wrapper(chip, (u8 *)&stored_ocv, + chip->base + BMS_OCV_REG, 2); + if (rc) { + pr_err("failed to read addr = %d %d\n", + chip->base + BMS_OCV_REG, rc); + return -EINVAL; + } + + /* if shutdwon ocv is invalid, reject shutdown soc too */ + if (!stored_ocv || (stored_ocv == OCV_INVALID)) { + pr_debug("shutdown OCV %d - invalid\n", stored_ocv); + chip->shutdown_ocv = OCV_INVALID; + chip->shutdown_soc = SOC_INVALID; + return -EINVAL; + } + chip->shutdown_ocv = stored_ocv * 1000; + + /* + * The previous SOC is stored in the first 7 bits of the register as + * (Shutdown SOC + 1). This allows for register reset values of both + * 0x00 and 0xFF. + */ + rc = qpnp_read_wrapper(chip, &stored_soc, chip->base + BMS_SOC_REG, 1); + if (rc) { + pr_err("failed to read addr = %d %d\n", + chip->base + BMS_SOC_REG, rc); + return -EINVAL; + } + + if (!stored_soc || stored_soc == SOC_INVALID) { + chip->shutdown_soc = SOC_INVALID; + chip->shutdown_ocv = OCV_INVALID; + return -EINVAL; + } + chip->shutdown_soc = (stored_soc >> 1) - 1; + + pr_debug("shutdown_ocv=%d shutdown_soc=%d\n", + chip->shutdown_ocv, chip->shutdown_soc); + + return 0; +} + +static int interpolate_current_comp(int die_temp) +{ + int i; + int num_rows = ARRAY_SIZE(temp_curr_comp_lut); + + if (die_temp <= (temp_curr_comp_lut[0].temp_decideg)) + return temp_curr_comp_lut[0].current_ma; + + if (die_temp >= (temp_curr_comp_lut[num_rows - 1].temp_decideg)) + return temp_curr_comp_lut[num_rows - 1].current_ma; + + for (i = 0; i < num_rows - 1; i++) + if (die_temp <= (temp_curr_comp_lut[i].temp_decideg)) + break; + + if (die_temp == (temp_curr_comp_lut[i].temp_decideg)) + return temp_curr_comp_lut[i].current_ma; + + return linear_interpolate( + temp_curr_comp_lut[i - 1].current_ma, + temp_curr_comp_lut[i - 1].temp_decideg, + temp_curr_comp_lut[i].current_ma, + temp_curr_comp_lut[i].temp_decideg, + die_temp); +} + +static void adjust_pon_ocv(struct qpnp_bms_chip *chip, int batt_temp) +{ + int rc, current_ma, rbatt_mohm, die_temp, delta_uv, pc, result; + + rc = iio_read_channel_processed(chip->die_temp, &result); + if (rc < 0) { + pr_err("error reading die_temp channel rc=%d\n", rc); + } else { + pc = interpolate_pc(chip->batt_data->pc_temp_ocv_lut, + batt_temp, chip->last_ocv_uv / 1000); + /* + * For pc < 2, use the rbatt of pc = 2. This is to avoid + * the huge rbatt values at pc < 2 which can disrupt the pon_ocv + * calculations. + */ + if (pc < 2) + pc = 2; + rbatt_mohm = get_rbatt(chip, pc, batt_temp); + /* convert die_temp to DECIDEGC */ + die_temp = result / 100; + current_ma = interpolate_current_comp(die_temp); + delta_uv = rbatt_mohm * current_ma; + pr_debug("PON OCV changed from %d to %d pc=%d rbatt=%d current_ma=%d die_temp=%d batt_temp=%d delta_uv=%d\n", + chip->last_ocv_uv, chip->last_ocv_uv + delta_uv, pc, + rbatt_mohm, current_ma, die_temp, batt_temp, delta_uv); + + chip->last_ocv_uv += delta_uv; + } +} + +static int calculate_initial_soc(struct qpnp_bms_chip *chip) +{ + int rc, batt_temp = 0, est_ocv = 0; + + rc = get_batt_therm(chip, &batt_temp); + if (rc < 0) { + pr_err("Unable to read batt temp, using default=%d\n", + BMS_DEFAULT_TEMP); + batt_temp = BMS_DEFAULT_TEMP; + } + + rc = read_and_update_ocv(chip, batt_temp, true); + if (rc) { + pr_err("Unable to read PON OCV rc=%d\n", rc); + return rc; + } + + rc = read_shutdown_ocv_soc(chip); + if (rc < 0 || chip->dt.cfg_ignore_shutdown_soc) + chip->shutdown_soc_invalid = true; + + if (chip->warm_reset) { + /* + * if we have powered on from warm reset - + * Always use shutdown SOC. If shudown SOC is invalid then + * estimate OCV + */ + if (chip->shutdown_soc_invalid) { + pr_debug("Estimate OCV\n"); + est_ocv = estimate_ocv(chip); + if (est_ocv <= 0) { + pr_err("Unable to estimate OCV rc=%d\n", + est_ocv); + return -EINVAL; + } + chip->last_ocv_uv = est_ocv; + chip->calculated_soc = lookup_soc_ocv(chip, est_ocv, + batt_temp); + } else { + chip->last_ocv_uv = chip->shutdown_ocv; + chip->last_soc = chip->shutdown_soc; + chip->calculated_soc = lookup_soc_ocv(chip, + chip->shutdown_ocv, batt_temp); + pr_debug("Using shutdown SOC\n"); + } + } else { + /* + * In PM8916 2.0 PON OCV calculation is delayed due to + * change in the ordering of power-on sequence of LDO6. + * Adjust PON OCV to include current during PON. + */ + if (chip->workaround_flag & WRKARND_PON_OCV_COMP) + adjust_pon_ocv(chip, batt_temp); + + /* !warm_reset use PON OCV only if shutdown SOC is invalid */ + chip->calculated_soc = lookup_soc_ocv(chip, + chip->last_ocv_uv, batt_temp); + if (!chip->shutdown_soc_invalid && + (abs(chip->shutdown_soc - chip->calculated_soc) < + chip->dt.cfg_shutdown_soc_valid_limit)) { + chip->last_ocv_uv = chip->shutdown_ocv; + chip->last_soc = chip->shutdown_soc; + chip->calculated_soc = lookup_soc_ocv(chip, + chip->shutdown_ocv, batt_temp); + pr_debug("Using shutdown SOC\n"); + } else { + chip->shutdown_soc_invalid = true; + pr_debug("Using PON SOC\n"); + } + } + /* store the start-up OCV for voltage-based-soc */ + chip->voltage_soc_uv = chip->last_ocv_uv; + + pr_info("warm_reset=%d est_ocv=%d shutdown_soc_invalid=%d shutdown_ocv=%d shutdown_soc=%d last_soc=%d calculated_soc=%d last_ocv_uv=%d\n", + chip->warm_reset, est_ocv, chip->shutdown_soc_invalid, + chip->shutdown_ocv, chip->shutdown_soc, chip->last_soc, + chip->calculated_soc, chip->last_ocv_uv); + + return 0; +} + +static int calculate_initial_aging_comp(struct qpnp_bms_chip *chip) +{ + int rc; + bool battery_removed = is_battery_replaced_in_offmode(chip); + + if (battery_removed || chip->shutdown_soc_invalid) { + pr_info("Clearing aging data battery_removed=%d shutdown_soc_invalid=%d\n", + battery_removed, chip->shutdown_soc_invalid); + chip->charge_cycles = 0; + chip->charge_increase = 0; + rc = backup_charge_cycle(chip); + if (rc) + pr_err("Unable to reset aging data rc=%d\n", rc); + } else { + rc = read_chgcycle_data_from_backup(chip); + if (rc) + pr_err("Unable to read aging data rc=%d\n", rc); + } + + pr_debug("Initial aging data charge_cycles=%u charge_increase=%u\n", + chip->charge_cycles, chip->charge_increase); + return rc; +} + +static int bms_load_hw_defaults(struct qpnp_bms_chip *chip) +{ + u8 val, bms_en = 0; + u32 interval[2], count[2], fifo[2]; + int rc; + + /* S3 OCV tolerence threshold */ + if (chip->dt.cfg_s3_ocv_tol_uv >= 0 && + chip->dt.cfg_s3_ocv_tol_uv <= MAX_OCV_TOL_THRESHOLD) { + val = chip->dt.cfg_s3_ocv_tol_uv / OCV_TOL_LSB_UV; + rc = qpnp_masked_write_base(chip, + chip->base + S3_OCV_TOL_CTL_REG, 0xFF, val); + if (rc) { + pr_err("Unable to write s3_ocv_tol_threshold rc=%d\n", + rc); + return rc; + } + } + + /* S1 accumulator threshold */ + if (chip->dt.cfg_s1_sample_count >= 1 && + chip->dt.cfg_s1_sample_count <= MAX_SAMPLE_COUNT) { + val = (chip->dt.cfg_s1_sample_count > 1) ? + (ilog2(chip->dt.cfg_s1_sample_count) - 1) : 0; + rc = qpnp_masked_write_base(chip, + chip->base + S1_ACC_CNT_REG, + ACC_CNT_MASK, val); + if (rc) { + pr_err("Unable to write s1 sample count rc=%d\n", rc); + return rc; + } + } + + /* S2 accumulator threshold */ + if (chip->dt.cfg_s2_sample_count >= 1 && + chip->dt.cfg_s2_sample_count <= MAX_SAMPLE_COUNT) { + val = (chip->dt.cfg_s2_sample_count > 1) ? + (ilog2(chip->dt.cfg_s2_sample_count) - 1) : 0; + rc = qpnp_masked_write_base(chip, + chip->base + S2_ACC_CNT_REG, + ACC_CNT_MASK, val); + if (rc) { + pr_err("Unable to write s2 sample count rc=%d\n", rc); + return rc; + } + } + + if (chip->dt.cfg_s1_sample_interval_ms >= 0 && + chip->dt.cfg_s1_sample_interval_ms <= MAX_SAMPLE_INTERVAL) { + val = chip->dt.cfg_s1_sample_interval_ms / 10; + rc = qpnp_write_wrapper(chip, &val, + chip->base + S1_SAMPLE_INTVL_REG, 1); + if (rc) { + pr_err("Unable to write s1 sample inteval rc=%d\n", rc); + return rc; + } + } + + if (chip->dt.cfg_s2_sample_interval_ms >= 0 && + chip->dt.cfg_s2_sample_interval_ms <= MAX_SAMPLE_INTERVAL) { + val = chip->dt.cfg_s2_sample_interval_ms / 10; + rc = qpnp_write_wrapper(chip, &val, + chip->base + S2_SAMPLE_INTVL_REG, 1); + if (rc) { + pr_err("Unable to write s2 sample inteval rc=%d\n", rc); + return rc; + } + } + + if (chip->dt.cfg_s1_fifo_length >= 0 && + chip->dt.cfg_s1_fifo_length <= MAX_FIFO_REGS) { + rc = qpnp_masked_write_base(chip, chip->base + FIFO_LENGTH_REG, + S1_FIFO_LENGTH_MASK, + chip->dt.cfg_s1_fifo_length); + if (rc) { + pr_err("Unable to write s1 fifo length rc=%d\n", rc); + return rc; + } + } + + if (chip->dt.cfg_s2_fifo_length >= 0 && + chip->dt.cfg_s2_fifo_length <= MAX_FIFO_REGS) { + rc = qpnp_masked_write_base(chip, chip->base + + FIFO_LENGTH_REG, S2_FIFO_LENGTH_MASK, + chip->dt.cfg_s2_fifo_length + << S2_FIFO_LENGTH_SHIFT); + if (rc) { + pr_err("Unable to write s2 fifo length rc=%d\n", rc); + return rc; + } + } + + get_sample_interval(chip, S1_STATE, &interval[0]); + get_sample_interval(chip, S2_STATE, &interval[1]); + get_sample_count(chip, S1_STATE, &count[0]); + get_sample_count(chip, S2_STATE, &count[1]); + get_fifo_length(chip, S1_STATE, &fifo[0]); + get_fifo_length(chip, S2_STATE, &fifo[1]); + + /* Force the BMS state to S2 at boot-up */ + rc = force_fsm_state(chip, S2_STATE); + if (rc) { + pr_err("Unable to force S2 state rc=%d\n", rc); + return rc; + } + + rc = qpnp_read_wrapper(chip, &bms_en, chip->base + EN_CTL_REG, 1); + if (rc) { + pr_err("Unable to read BMS_EN state rc=%d\n", rc); + return rc; + } + + rc = update_fsm_state(chip); + if (rc) { + pr_err("Unable to read FSM state rc=%d\n", rc); + return rc; + } + + pr_info("BMS_EN=%d Sample_Interval-S1=[%d]S2=[%d] Sample_Count-S1=[%d]S2=[%d] Fifo_Length-S1=[%d]S2=[%d] FSM_state=%d\n", + !!bms_en, interval[0], interval[1], count[0], + count[1], fifo[0], fifo[1], + chip->current_fsm_state); + + return 0; +} + +static ssize_t vm_bms_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + int rc; + struct qpnp_bms_chip *chip = file->private_data; + + if (!chip->data_ready && (file->f_flags & O_NONBLOCK)) { + rc = -EAGAIN; + goto fail_read; + } + + rc = wait_event_interruptible(chip->bms_wait_q, chip->data_ready); + if (rc) { + pr_debug("wait failed! rc=%d\n", rc); + goto fail_read; + } + + if (!chip->data_ready) { + pr_debug("No Data, false wakeup\n"); + rc = -EFAULT; + goto fail_read; + } + + mutex_lock(&chip->bms_data_mutex); + + if (copy_to_user(buf, &chip->bms_data, sizeof(chip->bms_data))) { + pr_err("Failed in copy_to_user\n"); + mutex_unlock(&chip->bms_data_mutex); + rc = -EFAULT; + goto fail_read; + } + pr_debug("Data copied!!\n"); + chip->data_ready = false; + + mutex_unlock(&chip->bms_data_mutex); + /* wakelock-timeout for userspace to pick up */ + pm_wakeup_event(chip->dev, BMS_READ_TIMEOUT); + + return sizeof(chip->bms_data); + +fail_read: + pm_relax(chip->dev); + return rc; +} + +static int vm_bms_open(struct inode *inode, struct file *file) +{ + struct qpnp_bms_chip *chip = container_of(inode->i_cdev, + struct qpnp_bms_chip, bms_cdev); + + mutex_lock(&chip->bms_device_mutex); + + if (chip->bms_dev_open) { + pr_debug("BMS device already open\n"); + mutex_unlock(&chip->bms_device_mutex); + return -EBUSY; + } + + chip->bms_dev_open = true; + file->private_data = chip; + pr_debug("BMS device opened\n"); + + mutex_unlock(&chip->bms_device_mutex); + + return 0; +} + +static int vm_bms_release(struct inode *inode, struct file *file) +{ + struct qpnp_bms_chip *chip = container_of(inode->i_cdev, + struct qpnp_bms_chip, bms_cdev); + + mutex_lock(&chip->bms_device_mutex); + + chip->bms_dev_open = false; + pm_relax(chip->dev); + pr_debug("BMS device closed\n"); + + mutex_unlock(&chip->bms_device_mutex); + + return 0; +} + +static const struct file_operations bms_fops = { + .owner = THIS_MODULE, + .open = vm_bms_open, + .read = vm_bms_read, + .release = vm_bms_release, +}; + +static void bms_init_defaults(struct qpnp_bms_chip *chip) +{ + chip->data_ready = false; + chip->last_ocv_raw = OCV_UNINITIALIZED; + chip->battery_status = POWER_SUPPLY_STATUS_UNKNOWN; + chip->battery_present = -EINVAL; + chip->calculated_soc = -EINVAL; + chip->last_soc = -EINVAL; + chip->vbms_lv_wake_source.disabled = 1; + chip->vbms_cv_wake_source.disabled = 1; + chip->vbms_soc_wake_source.disabled = 1; + chip->ocv_at_100 = -EINVAL; + chip->prev_soc_uuc = -EINVAL; + chip->charge_cycles = 0; + chip->start_soc = 0; + chip->end_soc = 0; + chip->charge_increase = 0; +} + +#define REQUEST_IRQ(chip, rc, irq_name) \ +do { \ + rc = devm_request_threaded_irq(chip->dev, \ + chip->irq_name##_irq.irq, NULL, \ + bms_##irq_name##_irq_handler, \ + IRQF_TRIGGER_RISING | IRQF_ONESHOT, \ + #irq_name, chip); \ + if (rc < 0) \ + pr_err("Unable to request " #irq_name " irq: %d\n", rc);\ +} while (0) + +#define FIND_IRQ(chip, pdev, irq_name, rc) \ +do { \ + chip->irq_name##_irq.irq = of_irq_get_byname(child, \ + #irq_name); \ + if (chip->irq_name##_irq.irq < 0) { \ + rc = chip->irq_name##_irq.irq; \ + pr_err("Unable to get " #irq_name " irq rc=%d\n", rc); \ + } \ +} while (0) + +static int bms_request_irqs(struct qpnp_bms_chip *chip) +{ + int rc; + + REQUEST_IRQ(chip, rc, fifo_update_done); + if (rc < 0) + return rc; + + REQUEST_IRQ(chip, rc, fsm_state_change); + if (rc < 0) + return rc; + + /* Disable the state change IRQ */ + disable_bms_irq(&chip->fsm_state_change_irq); + enable_irq_wake(chip->fifo_update_done_irq.irq); + + return 0; +} + +static int bms_find_irqs(struct qpnp_bms_chip *chip, struct device_node *child) +{ + int rc = 0; + + FIND_IRQ(chip, child, fifo_update_done, rc); + if (rc < 0) + return rc; + FIND_IRQ(chip, child, fsm_state_change, rc); + if (rc < 0) + return rc; + + return 0; +} + + +static int64_t read_battery_id_uv(struct qpnp_bms_chip *chip) +{ + int rc, result; + + rc = iio_read_channel_processed(chip->lr_mux2_batt_id, &result); + if (rc < 0) { + pr_err("error reading batt id channel rc = %d\n", rc); + return rc; + } + + return result; +} + +static int show_bms_config(struct seq_file *m, void *data) +{ + struct qpnp_bms_chip *chip = m->private; + int s1_sample_interval, s2_sample_interval; + int s1_sample_count, s2_sample_count; + int s1_fifo_length, s2_fifo_length; + + get_sample_interval(chip, S1_STATE, &s1_sample_interval); + get_sample_interval(chip, S2_STATE, &s2_sample_interval); + get_sample_count(chip, S1_STATE, &s1_sample_count); + get_sample_count(chip, S2_STATE, &s2_sample_count); + get_fifo_length(chip, S1_STATE, &s1_fifo_length); + get_fifo_length(chip, S2_STATE, &s2_fifo_length); + + seq_printf(m, "r_conn_mohm\t=\t%d\n" + "v_cutoff_uv\t=\t%d\n" + "max_voltage_uv\t=\t%d\n" + "use_voltage_soc\t=\t%d\n" + "low_soc_calc_threshold\t=\t%d\n" + "low_soc_calculate_soc_ms\t=\t%d\n" + "low_voltage_threshold\t=\t%d\n" + "low_voltage_calculate_soc_ms\t=\t%d\n" + "calculate_soc_ms\t=\t%d\n" + "ignore_shutdown_soc\t=\t%d\n" + "shutdown_soc_valid_limit\t=\t%d\n" + "force_s3_on_suspend\t=\t%d\n" + "report_charger_eoc\t=\t%d\n" + "aging_compensation\t=\t%d\n" + "use_reported_soc\t=\t%d\n" + "s1_sample_interval_ms\t=\t%d\n" + "s2_sample_interval_ms\t=\t%d\n" + "s1_sample_count\t=\t%d\n" + "s2_sample_count\t=\t%d\n" + "s1_fifo_length\t=\t%d\n" + "s2_fifo_length\t=\t%d\n", + chip->dt.cfg_r_conn_mohm, + chip->dt.cfg_v_cutoff_uv, + chip->dt.cfg_max_voltage_uv, + chip->dt.cfg_use_voltage_soc, + chip->dt.cfg_low_soc_calc_threshold, + chip->dt.cfg_low_soc_calculate_soc_ms, + chip->dt.cfg_low_voltage_threshold, + chip->dt.cfg_low_voltage_calculate_soc_ms, + chip->dt.cfg_calculate_soc_ms, + chip->dt.cfg_ignore_shutdown_soc, + chip->dt.cfg_shutdown_soc_valid_limit, + chip->dt.cfg_force_s3_on_suspend, + chip->dt.cfg_report_charger_eoc, + chip->dt.cfg_battery_aging_comp, + chip->dt.cfg_use_reported_soc, + s1_sample_interval, + s2_sample_interval, + s1_sample_count, + s2_sample_count, + s1_fifo_length, + s2_fifo_length); + + return 0; +} + +static int bms_config_open(struct inode *inode, struct file *file) +{ + struct qpnp_bms_chip *chip = inode->i_private; + + return single_open(file, show_bms_config, chip); +} + +static const struct file_operations bms_config_debugfs_ops = { + .owner = THIS_MODULE, + .open = bms_config_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int show_bms_status(struct seq_file *m, void *data) +{ + struct qpnp_bms_chip *chip = m->private; + + seq_printf(m, "bms_psy_registered\t=\t%d\n" + "bms_dev_open\t=\t%d\n" + "warm_reset\t=\t%d\n" + "battery_status\t=\t%d\n" + "battery_present\t=\t%d\n" + "in_cv_state\t=\t%d\n" + "calculated_soc\t=\t%d\n" + "last_soc\t=\t%d\n" + "last_ocv_uv\t=\t%d\n" + "last_ocv_raw\t=\t%d\n" + "last_soc_unbound\t=\t%d\n" + "current_fsm_state\t=\t%d\n" + "current_now\t=\t%d\n" + "ocv_at_100\t=\t%d\n" + "low_voltage_ws_active\t=\t%d\n" + "cv_ws_active\t=\t%d\n", + chip->bms_psy_registered, + chip->bms_dev_open, + chip->warm_reset, + chip->battery_status, + chip->battery_present, + chip->in_cv_state, + chip->calculated_soc, + chip->last_soc, + chip->last_ocv_uv, + chip->last_ocv_raw, + chip->last_soc_unbound, + chip->current_fsm_state, + chip->current_now, + chip->ocv_at_100, + bms_wake_active(&chip->vbms_lv_wake_source), + bms_wake_active(&chip->vbms_cv_wake_source)); + return 0; +} + +static int bms_status_open(struct inode *inode, struct file *file) +{ + struct qpnp_bms_chip *chip = inode->i_private; + + return single_open(file, show_bms_status, chip); +} + +static const struct file_operations bms_status_debugfs_ops = { + .owner = THIS_MODULE, + .open = bms_status_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int show_bms_data(struct seq_file *m, void *data) +{ + struct qpnp_bms_chip *chip = m->private; + int i; + + mutex_lock(&chip->bms_data_mutex); + + seq_printf(m, "seq_num=%d\n", chip->bms_data.seq_num); + for (i = 0; i < chip->bms_data.num_fifo; i++) + seq_printf(m, "fifo_uv[%d]=%d sample_count=%d interval_ms=%d\n", + i, chip->bms_data.fifo_uv[i], + chip->bms_data.sample_count, + chip->bms_data.sample_interval_ms); + seq_printf(m, "acc_uv=%d sample_count=%d sample_interval=%d\n", + chip->bms_data.acc_uv, chip->bms_data.acc_count, + chip->bms_data.sample_interval_ms); + + mutex_unlock(&chip->bms_data_mutex); + + return 0; +} + +static int bms_data_open(struct inode *inode, struct file *file) +{ + struct qpnp_bms_chip *chip = inode->i_private; + + return single_open(file, show_bms_data, chip); +} + +static const struct file_operations bms_data_debugfs_ops = { + .owner = THIS_MODULE, + .open = bms_data_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#define BID_RPULL_OHM 100000 +#define VREF_BAT_THERM 1800 +static int get_battery_id(struct qpnp_bms_chip *chip, + int64_t battery_id_uv) +{ + int batt_id_mv; + int64_t denom; + + batt_id_mv = div_s64(battery_id_uv, 1000); + if (batt_id_mv == 0) { + pr_debug("batt_id_mv = 0 from ADC\n"); + return 0; + } + + denom = div64_s64(VREF_BAT_THERM * 1000, batt_id_mv) - 1000; + if (denom <= 0) { + /* batt id connector might be open, return 0 kohms */ + return 0; + } + + chip->batt_id_ohm = div64_u64(BID_RPULL_OHM * 1000 + denom / 2, denom); + + return 0; +} + +static int set_battery_data(struct qpnp_bms_chip *chip) +{ + int64_t battery_id_uv; + int rc = 0; + struct bms_battery_data *batt_data; + struct device_node *node; + + battery_id_uv = read_battery_id_uv(chip); + if (battery_id_uv < 0) { + pr_err("cannot read battery id_uv err = %lld\n", battery_id_uv); + return battery_id_uv; + } + + rc = get_battery_id(chip, battery_id_uv); + if (rc < 0) { + pr_err("Failed to calcualte battery-id rc=%d\n", rc); + return rc; + } + + node = of_parse_phandle(chip->pdev->dev.of_node, + "qcom,battery-data", 0); + pr_debug(" battery-id-uV=%lld batt_id=%d ohm\n", + battery_id_uv, chip->batt_id_ohm); + + if (!node) { + pr_err("No available batterydata\n"); + return -EINVAL; + } + + batt_data = devm_kzalloc(chip->dev, + sizeof(struct bms_battery_data), GFP_KERNEL); + if (!batt_data) + return -EINVAL; + + batt_data->fcc_temp_lut = devm_kzalloc(chip->dev, + sizeof(struct single_row_lut), GFP_KERNEL); + batt_data->pc_temp_ocv_lut = devm_kzalloc(chip->dev, + sizeof(struct pc_temp_ocv_lut), GFP_KERNEL); + batt_data->rbatt_sf_lut = devm_kzalloc(chip->dev, + sizeof(struct sf_lut), GFP_KERNEL); + batt_data->ibat_acc_lut = devm_kzalloc(chip->dev, + sizeof(struct ibat_temp_acc_lut), GFP_KERNEL); + + batt_data->max_voltage_uv = -1; + batt_data->cutoff_uv = -1; + batt_data->iterm_ua = -1; + + /* + * if the alloced luts are 0s, of_batterydata_read_data ignores + * them. + */ + rc = of_batterydata_read_data(node, batt_data, battery_id_uv); + if (rc || !batt_data->pc_temp_ocv_lut + || !batt_data->fcc_temp_lut + || !batt_data->rbatt_sf_lut + || !batt_data->ibat_acc_lut) { + pr_err("battery data load failed\n"); + return rc; + } + + if (batt_data->pc_temp_ocv_lut == NULL) { + pr_err("temp ocv lut table has not been loaded\n"); + + return -EINVAL; + } + + /* check if ibat_acc_lut is valid */ + if (!batt_data->ibat_acc_lut->rows) { + pr_info("ibat_acc_lut not present\n"); + batt_data->ibat_acc_lut = NULL; + } + + /* Override battery properties if specified in the battery profile */ + if (batt_data->max_voltage_uv >= 0) + chip->dt.cfg_max_voltage_uv = batt_data->max_voltage_uv; + if (batt_data->cutoff_uv >= 0) + chip->dt.cfg_v_cutoff_uv = batt_data->cutoff_uv; + + chip->batt_data = batt_data; + + return 0; +} + +static int parse_pdev_dt_properties(struct qpnp_bms_chip *chip, + struct platform_device *pdev) +{ + struct device_node *child; + int rc; + unsigned int base; + + chip->dev = &(pdev->dev); + chip->pdev = pdev; + + if (of_get_available_child_count(pdev->dev.of_node) == 0) { + pr_err("no child nodes found\n"); + return -ENXIO; + } + + for_each_available_child_of_node(pdev->dev.of_node, child) { + rc = of_property_read_u32(child, "reg", &base); + if (rc < 0) { + dev_err(&pdev->dev, + "Couldn't find reg in node = %s rc = %d\n", + child->full_name, rc); + return -ENXIO; + } + + pr_debug("Node name = %s\n", child->name); + + if (strcmp("qcom,batt-pres-status", + child->name) == 0) { + chip->batt_pres_addr = base; + continue; + } + + if (strcmp("qcom,qpnp-chg-pres", + child->name) == 0) { + chip->chg_pres_addr = base; + continue; + } + + chip->base = base; + rc = bms_find_irqs(chip, child); + if (rc) { + pr_err("Could not find irqs rc=%d\n", rc); + return rc; + } + } + + if (chip->base == 0) { + dev_err(&pdev->dev, "BMS peripheral was not registered\n"); + return -EINVAL; + } + + pr_debug("bms-base=0x%04x bat-pres-reg=0x%04x qpnp-chg-pres=0x%04x\n", + chip->base, chip->batt_pres_addr, chip->chg_pres_addr); + + return 0; +} + +#define PROP_READ(chip_prop, qpnp_pdev_property, retval) \ +do { \ + if (retval) \ + break; \ + retval = of_property_read_u32(chip->pdev->dev.of_node, \ + "qcom," qpnp_pdev_property, \ + &chip->dt.chip_prop); \ + if (retval) { \ + pr_err("Error reading " #qpnp_pdev_property \ + " property %d\n", retval); \ + } \ +} while (0) + +#define PROP_READ_OPTIONAL(chip_prop, qpnp_pdev_property, retval) \ +do { \ + retval = of_property_read_u32(chip->pdev->dev.of_node, \ + "qcom," qpnp_pdev_property, \ + &chip->dt.chip_prop); \ + if (retval) \ + chip->dt.chip_prop = -EINVAL; \ +} while (0) + +static int parse_bms_dt_properties(struct qpnp_bms_chip *chip) +{ + int rc = 0; + + PROP_READ(cfg_v_cutoff_uv, "v-cutoff-uv", rc); + PROP_READ(cfg_max_voltage_uv, "max-voltage-uv", rc); + PROP_READ(cfg_r_conn_mohm, "r-conn-mohm", rc); + PROP_READ(cfg_shutdown_soc_valid_limit, + "shutdown-soc-valid-limit", rc); + PROP_READ(cfg_low_soc_calc_threshold, + "low-soc-calculate-soc-threshold", rc); + PROP_READ(cfg_low_soc_calculate_soc_ms, + "low-soc-calculate-soc-ms", rc); + PROP_READ(cfg_low_voltage_calculate_soc_ms, + "low-voltage-calculate-soc-ms", rc); + PROP_READ(cfg_calculate_soc_ms, "calculate-soc-ms", rc); + PROP_READ(cfg_low_voltage_threshold, "low-voltage-threshold", rc); + + if (rc) { + pr_err("Missing required properties rc=%d\n", rc); + return rc; + } + + PROP_READ_OPTIONAL(cfg_s1_sample_interval_ms, + "s1-sample-interval-ms", rc); + PROP_READ_OPTIONAL(cfg_s2_sample_interval_ms, + "s2-sample-interval-ms", rc); + PROP_READ_OPTIONAL(cfg_s1_sample_count, "s1-sample-count", rc); + PROP_READ_OPTIONAL(cfg_s2_sample_count, "s2-sample-count", rc); + PROP_READ_OPTIONAL(cfg_s1_fifo_length, "s1-fifo-length", rc); + PROP_READ_OPTIONAL(cfg_s2_fifo_length, "s2-fifo-length", rc); + PROP_READ_OPTIONAL(cfg_s3_ocv_tol_uv, "s3-ocv-tolerence-uv", rc); + PROP_READ_OPTIONAL(cfg_low_soc_fifo_length, + "low-soc-fifo-length", rc); + PROP_READ_OPTIONAL(cfg_soc_resume_limit, "resume-soc", rc); + PROP_READ_OPTIONAL(cfg_low_temp_threshold, + "low-temp-threshold", rc); + if (rc) + chip->dt.cfg_low_temp_threshold = 0; + + PROP_READ_OPTIONAL(cfg_ibat_avg_samples, + "ibat-avg-samples", rc); + if (rc || (chip->dt.cfg_ibat_avg_samples <= 0) || + (chip->dt.cfg_ibat_avg_samples > IAVG_SAMPLES)) + chip->dt.cfg_ibat_avg_samples = IAVG_SAMPLES; + + chip->dt.cfg_ignore_shutdown_soc = of_property_read_bool( + chip->pdev->dev.of_node, "qcom,ignore-shutdown-soc"); + chip->dt.cfg_use_voltage_soc = of_property_read_bool( + chip->pdev->dev.of_node, "qcom,use-voltage-soc"); + chip->dt.cfg_force_s3_on_suspend = of_property_read_bool( + chip->pdev->dev.of_node, "qcom,force-s3-on-suspend"); + chip->dt.cfg_report_charger_eoc = of_property_read_bool( + chip->pdev->dev.of_node, "qcom,report-charger-eoc"); + chip->dt.cfg_disable_bms = of_property_read_bool( + chip->pdev->dev.of_node, "qcom,disable-bms"); + chip->dt.cfg_force_bms_active_on_charger = of_property_read_bool( + chip->pdev->dev.of_node, + "qcom,force-bms-active-on-charger"); + chip->dt.cfg_battery_aging_comp = of_property_read_bool( + chip->pdev->dev.of_node, "qcom,batt-aging-comp"); + chip->dt.cfg_use_reported_soc = of_property_read_bool( + chip->pdev->dev.of_node, "qcom,use-reported-soc"); + pr_debug("v_cutoff_uv=%d, max_v=%d\n", chip->dt.cfg_v_cutoff_uv, + chip->dt.cfg_max_voltage_uv); + pr_debug("r_conn=%d shutdown_soc_valid_limit=%d low_temp_threshold=%d ibat_avg_samples=%d\n", + chip->dt.cfg_r_conn_mohm, + chip->dt.cfg_shutdown_soc_valid_limit, + chip->dt.cfg_low_temp_threshold, + chip->dt.cfg_ibat_avg_samples); + pr_debug("ignore_shutdown_soc=%d, use_voltage_soc=%d low_soc_fifo_length=%d\n", + chip->dt.cfg_ignore_shutdown_soc, + chip->dt.cfg_use_voltage_soc, + chip->dt.cfg_low_soc_fifo_length); + pr_debug("force-s3-on-suspend=%d report-charger-eoc=%d disable-bms=%d disable-suspend-on-usb=%d aging_compensation=%d\n", + chip->dt.cfg_force_s3_on_suspend, + chip->dt.cfg_report_charger_eoc, + chip->dt.cfg_disable_bms, + chip->dt.cfg_force_bms_active_on_charger, + chip->dt.cfg_battery_aging_comp); + pr_debug("use-reported-soc is %d\n", + chip->dt.cfg_use_reported_soc); + + return 0; +} + +static int bms_get_adc(struct qpnp_bms_chip *chip, + struct platform_device *pdev) +{ + int rc = 0; + + chip->ref_625mv = iio_channel_get(&pdev->dev, "ref_625mv"); + if (IS_ERR(chip->ref_625mv)) { + if (PTR_ERR(chip->ref_625mv) != -EPROBE_DEFER) + pr_err("ref_625mv unavailable %ld\n", + PTR_ERR(chip->ref_625mv)); + rc = PTR_ERR(chip->ref_625mv); + chip->ref_625mv = NULL; + return rc; + } + + chip->ref_125v = iio_channel_get(&pdev->dev, "ref_1250v"); + if (IS_ERR(chip->ref_125v)) { + if (PTR_ERR(chip->ref_125v) != -EPROBE_DEFER) + pr_err("ref_125v unavailable %ld\n", + PTR_ERR(chip->ref_125v)); + rc = PTR_ERR(chip->ref_125v); + chip->ref_125v = NULL; + return rc; + } + + chip->vbat_sns = iio_channel_get(&pdev->dev, "vbat_sns"); + if (IS_ERR(chip->vbat_sns)) { + if (PTR_ERR(chip->vbat_sns) != -EPROBE_DEFER) + pr_err("vbat_sns unavailable %ld\n", + PTR_ERR(chip->vbat_sns)); + rc = PTR_ERR(chip->vbat_sns); + chip->vbat_sns = NULL; + return rc; + } + + chip->lr_mux1_batt_therm = iio_channel_get(&pdev->dev, "batt_therm"); + if (IS_ERR(chip->lr_mux1_batt_therm)) { + if (PTR_ERR(chip->lr_mux1_batt_therm) != -EPROBE_DEFER) + pr_err("lr_mux1_batt_therm unavailable %ld\n", + PTR_ERR(chip->lr_mux1_batt_therm)); + rc = PTR_ERR(chip->lr_mux1_batt_therm); + chip->lr_mux1_batt_therm = NULL; + return rc; + } + + chip->die_temp = iio_channel_get(&pdev->dev, "die_temp"); + if (IS_ERR(chip->die_temp)) { + if (PTR_ERR(chip->die_temp) != -EPROBE_DEFER) + pr_err("die_temp unavailable %ld\n", + PTR_ERR(chip->die_temp)); + rc = PTR_ERR(chip->die_temp); + chip->die_temp = NULL; + return rc; + } + + chip->lr_mux2_batt_id = iio_channel_get(&pdev->dev, "batt_id"); + if (IS_ERR(chip->lr_mux2_batt_id)) { + if (PTR_ERR(chip->lr_mux2_batt_id) != -EPROBE_DEFER) + pr_err("lr_mux2_batt_id unavailable %ld\n", + PTR_ERR(chip->lr_mux2_batt_id)); + rc = PTR_ERR(chip->lr_mux2_batt_id); + chip->lr_mux2_batt_id = NULL; + return rc; + } + + chip->adc_tm_dev = qpnp_get_adc_tm(&pdev->dev, "bms"); + if (IS_ERR(chip->adc_tm_dev)) { + rc = PTR_ERR(chip->adc_tm_dev); + if (rc == -EPROBE_DEFER) + pr_err("adc-tm not found - defer probe rc=%d\n", rc); + else + pr_err("adc-tm property missing, rc=%d\n", rc); + } + + return rc; +} + +static int register_bms_char_device(struct qpnp_bms_chip *chip) +{ + int rc; + + rc = alloc_chrdev_region(&chip->dev_no, 0, 1, "vm_bms"); + if (rc) { + pr_err("Unable to allocate chrdev rc=%d\n", rc); + return rc; + } + cdev_init(&chip->bms_cdev, &bms_fops); + rc = cdev_add(&chip->bms_cdev, chip->dev_no, 1); + if (rc) { + pr_err("Unable to add bms_cdev rc=%d\n", rc); + goto unregister_chrdev; + } + + chip->bms_class = class_create(THIS_MODULE, "vm_bms"); + if (IS_ERR_OR_NULL(chip->bms_class)) { + pr_err("Fail to create bms class\n"); + rc = -EINVAL; + goto delete_cdev; + } + chip->bms_device = device_create(chip->bms_class, + NULL, chip->dev_no, + NULL, "vm_bms"); + if (IS_ERR(chip->bms_device)) { + pr_err("Fail to create bms_device device\n"); + rc = -EINVAL; + goto delete_cdev; + } + + return 0; + +delete_cdev: + cdev_del(&chip->bms_cdev); +unregister_chrdev: + unregister_chrdev_region(chip->dev_no, 1); + return rc; +} + +static int qpnp_vm_bms_probe(struct platform_device *pdev) +{ + struct qpnp_bms_chip *chip; + struct device_node *revid_dev_node; + struct power_supply_config bms_psy_cfg; + int rc, vbatt = 0; + + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!chip->regmap) { + dev_err(&pdev->dev, "Couldn't get parent's regmap\n"); + return -EINVAL; + } + + rc = regmap_read(chip->regmap, + VADC1_LC_USR_BASE + INT_TEST_VAL_OFFSET, &chip->fab_id); + if (rc) { + pr_err("read failed rc=%d\n", rc); + return rc; + } + + rc = bms_get_adc(chip, pdev); + if (rc < 0) { + pr_err("Failed to get adc rc=%d\n", rc); + return rc; + } + + revid_dev_node = of_parse_phandle(pdev->dev.of_node, + "qcom,pmic-revid", 0); + if (!revid_dev_node) { + pr_err("Missing qcom,pmic-revid property\n"); + return -EINVAL; + } + + chip->revid_data = get_revid_data(revid_dev_node); + if (IS_ERR(chip->revid_data)) { + pr_err("revid error rc = %ld\n", PTR_ERR(chip->revid_data)); + return -EINVAL; + } + if ((chip->revid_data->pmic_subtype == PM8916_SUBTYPE) && + chip->revid_data->rev4 == PM8916_V2P0_REV4) + chip->workaround_flag |= WRKARND_PON_OCV_COMP; + + rc = qpnp_pon_is_warm_reset(); + if (rc < 0) { + pr_err("Error reading warm reset status rc=%d\n", rc); + return rc; + } + chip->warm_reset = !!rc; + + rc = parse_pdev_dt_properties(chip, pdev); + if (rc) { + pr_err("Error registering pdev resource rc=%d\n", rc); + return rc; + } + + rc = parse_bms_dt_properties(chip); + if (rc) { + pr_err("Unable to read all bms properties, rc = %d\n", rc); + return rc; + } + + if (chip->dt.cfg_disable_bms) { + pr_info("VMBMS disabled (disable-bms = 1)\n"); + rc = qpnp_masked_write_base(chip, chip->base + EN_CTL_REG, + BMS_EN_BIT, 0); + if (rc) + pr_err("Unable to disable VMBMS rc=%d\n", rc); + return -ENODEV; + } + + rc = qpnp_read_wrapper(chip, chip->revision, + chip->base + REVISION1_REG, 2); + if (rc) { + pr_err("Error reading version register rc=%d\n", rc); + return rc; + } + + pr_debug("BMS version: %hhu.%hhu\n", + chip->revision[1], chip->revision[0]); + + dev_set_drvdata(&pdev->dev, chip); + device_init_wakeup(&pdev->dev, 1); + mutex_init(&chip->bms_data_mutex); + mutex_init(&chip->bms_device_mutex); + mutex_init(&chip->last_soc_mutex); + mutex_init(&chip->state_change_mutex); + init_waitqueue_head(&chip->bms_wait_q); + + /* read battery-id and select the battery profile */ + rc = set_battery_data(chip); + if (rc) { + pr_err("Unable to read battery data %d\n", rc); + goto fail_init; + } + + /* set the battery profile */ + rc = config_battery_data(chip->batt_data); + if (rc) { + pr_err("Unable to config battery data %d\n", rc); + goto fail_init; + } + + chip->vbms_lv_wake_source.source = wakeup_source_register(NULL, + "vbms_lv_wake"); + chip->vbms_cv_wake_source.source = wakeup_source_register(NULL, + "vbms_cv_wake"); + chip->vbms_soc_wake_source.source = wakeup_source_register(NULL, + "vbms_soc_wake"); + INIT_DELAYED_WORK(&chip->monitor_soc_work, monitor_soc_work); + + bms_init_defaults(chip); + bms_load_hw_defaults(chip); + + if (is_battery_present(chip)) { + rc = setup_vbat_monitoring(chip); + if (rc) { + pr_err("fail to configure vbat monitoring rc=%d\n", + rc); + goto fail_setup; + } + } + + rc = bms_request_irqs(chip); + if (rc) { + pr_err("error requesting bms irqs, rc = %d\n", rc); + goto fail_irq; + } + + battery_insertion_check(chip); + battery_status_check(chip); + + /* character device to pass data to the userspace */ + rc = register_bms_char_device(chip); + if (rc) { + pr_err("Unable to regiter '/dev/vm_bms' rc=%d\n", rc); + goto fail_bms_device; + } + + the_chip = chip; + calculate_initial_soc(chip); + if (chip->dt.cfg_battery_aging_comp) { + rc = calculate_initial_aging_comp(chip); + if (rc) + pr_err("Unable to calculate initial aging data rc=%d\n", + rc); + } + + /* setup & register the battery power supply */ + chip->bms_psy_d.name = "bms"; + chip->bms_psy_d.type = POWER_SUPPLY_TYPE_BMS; + chip->bms_psy_d.properties = bms_power_props; + chip->bms_psy_d.num_properties = ARRAY_SIZE(bms_power_props); + chip->bms_psy_d.get_property = qpnp_vm_bms_power_get_property; + chip->bms_psy_d.set_property = qpnp_vm_bms_power_set_property; + chip->bms_psy_d.external_power_changed = qpnp_vm_bms_ext_power_changed; + chip->bms_psy_d.property_is_writeable = + qpnp_vm_bms_property_is_writeable; + + bms_psy_cfg.supplied_to = qpnp_vm_bms_supplicants; + bms_psy_cfg.num_supplicants = ARRAY_SIZE(qpnp_vm_bms_supplicants); + bms_psy_cfg.drv_data = chip; + bms_psy_cfg.of_node = NULL; + bms_psy_cfg.fwnode = NULL; + + chip->bms_psy = devm_power_supply_register(chip->dev, + &chip->bms_psy_d, + &bms_psy_cfg); + if (IS_ERR(chip->bms_psy)) { + pr_err("power_supply_register bms failed rc = %ld\n", + PTR_ERR(chip->bms_psy)); + goto fail_psy; + } + chip->bms_psy_registered = true; + + chip->nb.notifier_call = bms_notifier_cb; + rc = power_supply_reg_notifier(&chip->nb); + if (rc < 0) + pr_err("Failed register psy notifier rc = %d\n", rc); + + rc = get_battery_voltage(chip, &vbatt); + if (rc) { + pr_err("error reading vbat_sns adc channel, rc=%d\n", rc); + goto fail_get_vtg; + } + + chip->debug_root = debugfs_create_dir("qpnp_vmbms", NULL); + if (!chip->debug_root) + pr_err("Couldn't create debug dir\n"); + + if (chip->debug_root) { + struct dentry *ent; + + ent = debugfs_create_file("bms_data", S_IFREG | 0444, + chip->debug_root, chip, + &bms_data_debugfs_ops); + if (!ent) + pr_err("Couldn't create bms_data debug file\n"); + + ent = debugfs_create_file("bms_config", S_IFREG | 0444, + chip->debug_root, chip, + &bms_config_debugfs_ops); + if (!ent) + pr_err("Couldn't create bms_config debug file\n"); + + ent = debugfs_create_file("bms_status", S_IFREG | 0444, + chip->debug_root, chip, + &bms_status_debugfs_ops); + if (!ent) + pr_err("Couldn't create bms_status debug file\n"); + } + + schedule_delayed_work(&chip->monitor_soc_work, 0); + pr_info("probe success: soc=%d vbatt=%d ocv=%d warm_reset=%d\n", + get_prop_bms_capacity(chip), vbatt, + chip->last_ocv_uv, chip->warm_reset); + + return rc; + +fail_get_vtg: + power_supply_unregister(chip->bms_psy); +fail_psy: + device_destroy(chip->bms_class, chip->dev_no); + cdev_del(&chip->bms_cdev); + unregister_chrdev_region(chip->dev_no, 1); +fail_bms_device: + chip->bms_psy_registered = false; +fail_irq: + reset_vbat_monitoring(chip); +fail_setup: + wakeup_source_unregister(chip->vbms_lv_wake_source.source); + wakeup_source_unregister(chip->vbms_cv_wake_source.source); + wakeup_source_unregister(chip->vbms_soc_wake_source.source); +fail_init: + mutex_destroy(&chip->bms_data_mutex); + mutex_destroy(&chip->last_soc_mutex); + mutex_destroy(&chip->state_change_mutex); + mutex_destroy(&chip->bms_device_mutex); + the_chip = NULL; + + return rc; +} + +static int qpnp_vm_bms_remove(struct platform_device *pdev) +{ + struct qpnp_bms_chip *chip = dev_get_drvdata(&pdev->dev); + + cancel_delayed_work_sync(&chip->monitor_soc_work); + debugfs_remove_recursive(chip->debug_root); + device_destroy(chip->bms_class, chip->dev_no); + cdev_del(&chip->bms_cdev); + unregister_chrdev_region(chip->dev_no, 1); + reset_vbat_monitoring(chip); + wakeup_source_unregister(chip->vbms_lv_wake_source.source); + wakeup_source_unregister(chip->vbms_cv_wake_source.source); + wakeup_source_unregister(chip->vbms_soc_wake_source.source); + mutex_destroy(&chip->bms_data_mutex); + mutex_destroy(&chip->last_soc_mutex); + mutex_destroy(&chip->state_change_mutex); + mutex_destroy(&chip->bms_device_mutex); + power_supply_unreg_notifier(&chip->nb); + power_supply_unregister(chip->bms_psy); + dev_set_drvdata(&pdev->dev, NULL); + the_chip = NULL; + + return 0; +} + +static void process_suspend_data(struct qpnp_bms_chip *chip) +{ + int rc; + + mutex_lock(&chip->bms_data_mutex); + + chip->suspend_data_valid = false; + + memset(&chip->bms_data, 0, sizeof(chip->bms_data)); + + rc = read_and_populate_fifo_data(chip); + if (rc) + pr_err("Unable to read FIFO data rc=%d\n", rc); + + rc = read_and_populate_acc_data(chip); + if (rc) + pr_err("Unable to read ACC_SD data rc=%d\n", rc); + + rc = clear_fifo_acc_data(chip); + if (rc) + pr_err("Unable to clear FIFO/ACC data rc=%d\n", rc); + + if (chip->bms_data.num_fifo || chip->bms_data.acc_count) { + pr_debug("suspend data valid\n"); + chip->suspend_data_valid = true; + } + + mutex_unlock(&chip->bms_data_mutex); +} + +static void process_resume_data(struct qpnp_bms_chip *chip) +{ + int rc, batt_temp = 0; + int old_ocv = 0; + bool ocv_updated = false; + + rc = get_batt_therm(chip, &batt_temp); + if (rc < 0) { + pr_err("Unable to read batt temp, using default=%d\n", + BMS_DEFAULT_TEMP); + batt_temp = BMS_DEFAULT_TEMP; + } + + mutex_lock(&chip->bms_data_mutex); + /* + * We can get a h/w OCV update when the sleep_b + * is low, which is possible when APPS is suspended. + * So check for an OCV update only in bms_resume + */ + old_ocv = chip->last_ocv_uv; + rc = read_and_update_ocv(chip, batt_temp, false); + if (rc) + pr_err("Unable to read/upadate OCV rc=%d\n", rc); + + if (old_ocv != chip->last_ocv_uv) { + ocv_updated = true; + /* new OCV, clear suspended data */ + chip->suspend_data_valid = false; + memset(&chip->bms_data, 0, sizeof(chip->bms_data)); + chip->calculated_soc = lookup_soc_ocv(chip, + chip->last_ocv_uv, batt_temp); + pr_debug("OCV in sleep SOC=%d\n", chip->calculated_soc); + chip->last_soc_unbound = true; + chip->voltage_soc_uv = chip->last_ocv_uv; + pr_debug("update bms_psy\n"); + power_supply_changed(chip->bms_psy); + } + + if (ocv_updated || chip->suspend_data_valid) { + /* there is data to be sent */ + pr_debug("ocv_updated=%d suspend_data_valid=%d\n", + ocv_updated, chip->suspend_data_valid); + chip->bms_data.seq_num = chip->seq_num++; + dump_bms_data(__func__, chip); + + chip->data_ready = true; + wake_up_interruptible(&chip->bms_wait_q); + if (chip->bms_dev_open) + pm_stay_awake(chip->dev); + + } + chip->suspend_data_valid = false; + mutex_unlock(&chip->bms_data_mutex); +} + +static int bms_suspend(struct device *dev) +{ + struct qpnp_bms_chip *chip = dev_get_drvdata(dev); + bool battery_charging = is_battery_charging(chip); + bool hi_power_state = is_hi_power_state_requested(chip); + bool charger_present = is_charger_present(chip); + bool bms_suspend_config; + + /* + * Keep BMS FSM active if 'cfg_force_bms_active_on_charger' property + * is present and charger inserted. This ensures that recharge + * starts once battery SOC falls below resume_soc. + */ + bms_suspend_config = chip->dt.cfg_force_bms_active_on_charger + && charger_present; + + chip->apply_suspend_config = false; + if (!battery_charging && !hi_power_state && !bms_suspend_config) + chip->apply_suspend_config = true; + + pr_debug("battery_charging=%d power_state=%s hi_power_state=0x%x apply_suspend_config=%d bms_suspend_config=%d usb_present=%d\n", + battery_charging, hi_power_state ? "hi" : "low", + chip->hi_power_state, + chip->apply_suspend_config, bms_suspend_config, + charger_present); + + if (chip->apply_suspend_config) { + if (chip->dt.cfg_force_s3_on_suspend) { + disable_bms_irq(&chip->fifo_update_done_irq); + pr_debug("Forcing S3 state\n"); + mutex_lock(&chip->state_change_mutex); + force_fsm_state(chip, S3_STATE); + mutex_unlock(&chip->state_change_mutex); + /* Store accumulated data if any */ + process_suspend_data(chip); + } + } + + cancel_delayed_work_sync(&chip->monitor_soc_work); + + return 0; +} + +static int bms_resume(struct device *dev) +{ + u8 state = 0; + int rc, monitor_soc_delay = 0; + unsigned long tm_now_sec; + struct qpnp_bms_chip *chip = dev_get_drvdata(dev); + + if (chip->apply_suspend_config) { + if (chip->dt.cfg_force_s3_on_suspend) { + /* + * Update the state to S2 only if we are in S3. There is + * a possibility of being in S2 if we resumed on + * a charger insertion + */ + mutex_lock(&chip->state_change_mutex); + rc = get_fsm_state(chip, &state); + if (rc) + pr_err("Unable to get FSM state rc=%d\n", rc); + if (rc || (state == S3_STATE)) { + pr_debug("Unforcing S3 state, setting S2 state\n"); + force_fsm_state(chip, S2_STATE); + } + mutex_unlock(&chip->state_change_mutex); + enable_bms_irq(&chip->fifo_update_done_irq); + /* + * if we were charging while suspended, we will + * be woken up by the fifo done interrupt and no + * additional processing is needed. + */ + process_resume_data(chip); + } + } + + /* Start monitor_soc_work based on when it last executed */ + rc = get_current_time(&tm_now_sec); + if (rc) { + pr_err("Could not read current time: %d\n", rc); + } else { + monitor_soc_delay = get_calculation_delay_ms(chip) - + ((tm_now_sec - chip->tm_sec) * 1000); + monitor_soc_delay = max(0, monitor_soc_delay); + } + pr_debug("monitor_soc_delay_sec=%d tm_now_sec=%ld chip->tm_sec=%ld\n", + monitor_soc_delay / 1000, tm_now_sec, chip->tm_sec); + schedule_delayed_work(&chip->monitor_soc_work, + msecs_to_jiffies(monitor_soc_delay)); + + return 0; +} + +static const struct dev_pm_ops qpnp_vm_bms_pm_ops = { + .suspend = bms_suspend, + .resume = bms_resume, +}; + +static const struct of_device_id qpnp_vm_bms_match_table[] = { + { .compatible = QPNP_VM_BMS_DEV_NAME }, + {} +}; + +static struct platform_driver qpnp_vm_bms_driver = { + .probe = qpnp_vm_bms_probe, + .remove = qpnp_vm_bms_remove, + .driver = { + .name = QPNP_VM_BMS_DEV_NAME, + .of_match_table = qpnp_vm_bms_match_table, + .pm = &qpnp_vm_bms_pm_ops, + }, +}; +module_platform_driver(qpnp_vm_bms_driver); + +MODULE_DESCRIPTION("QPNP VM-BMS Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" QPNP_VM_BMS_DEV_NAME); diff --git a/drivers/power/supply/qcom/smb1360-charger-fg.c b/drivers/power/supply/qcom/smb1360-charger-fg.c new file mode 100644 index 000000000000..afbe825003ce --- /dev/null +++ b/drivers/power/supply/qcom/smb1360-charger-fg.c @@ -0,0 +1,5444 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2013-2015, 2018-2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "SMB:%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define _SMB1360_MASK(BITS, POS) \ + ((unsigned char)(((1 << (BITS)) - 1) << (POS))) +#define SMB1360_MASK(LEFT_BIT_POS, RIGHT_BIT_POS) \ + _SMB1360_MASK((LEFT_BIT_POS) - (RIGHT_BIT_POS) + 1, \ + (RIGHT_BIT_POS)) + +/* Charger Registers */ +#define CFG_BATT_CHG_REG 0x00 +#define CHG_ITERM_MASK SMB1360_MASK(2, 0) +#define CHG_ITERM_25MA 0x0 +#define CHG_ITERM_200MA 0x7 +#define RECHG_MV_MASK SMB1360_MASK(6, 5) +#define RECHG_MV_SHIFT 5 +#define OTG_CURRENT_MASK SMB1360_MASK(4, 3) +#define OTG_CURRENT_SHIFT 3 + +#define CFG_BATT_CHG_ICL_REG 0x05 +#define AC_INPUT_ICL_PIN_BIT BIT(7) +#define AC_INPUT_PIN_HIGH_BIT BIT(6) +#define RESET_STATE_USB_500 BIT(5) +#define INPUT_CURR_LIM_MASK SMB1360_MASK(3, 0) +#define INPUT_CURR_LIM_300MA 0x0 + +#define CFG_GLITCH_FLT_REG 0x06 +#define AICL_ENABLED_BIT BIT(0) +#define INPUT_UV_GLITCH_FLT_20MS_BIT BIT(7) + +#define CFG_CHG_MISC_REG 0x7 +#define CHG_EN_BY_PIN_BIT BIT(7) +#define CHG_EN_ACTIVE_LOW_BIT BIT(6) +#define PRE_TO_FAST_REQ_CMD_BIT BIT(5) +#define CFG_BAT_OV_ENDS_CHG_CYC BIT(4) +#define CHG_CURR_TERM_DIS_BIT BIT(3) +#define CFG_AUTO_RECHG_DIS_BIT BIT(2) +#define CFG_CHG_INHIBIT_EN_BIT BIT(0) + +#define CFG_CHG_FUNC_CTRL_REG 0x08 +#define CHG_RECHG_THRESH_FG_SRC_BIT BIT(1) + +#define CFG_STAT_CTRL_REG 0x09 +#define CHG_STAT_IRQ_ONLY_BIT BIT(4) +#define CHG_TEMP_CHG_ERR_BLINK_BIT BIT(3) +#define CHG_STAT_ACTIVE_HIGH_BIT BIT(1) +#define CHG_STAT_DISABLE_BIT BIT(0) + +#define CFG_SFY_TIMER_CTRL_REG 0x0A +#define SAFETY_TIME_DISABLE_BIT BIT(5) +#define SAFETY_TIME_MINUTES_SHIFT 2 +#define SAFETY_TIME_MINUTES_MASK SMB1360_MASK(3, 2) + +#define CFG_BATT_MISSING_REG 0x0D +#define BATT_MISSING_SRC_THERM_BIT BIT(1) + +#define CFG_FG_BATT_CTRL_REG 0x0E +#define CFG_FG_OTP_BACK_UP_ENABLE BIT(7) +#define BATT_ID_ENABLED_BIT BIT(5) +#define CHG_BATT_ID_FAIL BIT(4) +#define BATT_ID_FAIL_SELECT_PROFILE BIT(3) +#define BATT_PROFILE_SELECT_MASK SMB1360_MASK(3, 0) +#define BATT_PROFILEA_MASK 0x0 +#define BATT_PROFILEB_MASK 0xF + +#define IRQ_CFG_REG 0x0F +#define IRQ_BAT_HOT_COLD_HARD_BIT BIT(7) +#define IRQ_BAT_HOT_COLD_SOFT_BIT BIT(6) +#define IRQ_DCIN_UV_BIT BIT(2) +#define IRQ_AICL_DONE_BIT BIT(1) +#define IRQ_INTERNAL_TEMPERATURE_BIT BIT(0) + +#define IRQ2_CFG_REG 0x10 +#define IRQ2_SAFETY_TIMER_BIT BIT(7) +#define IRQ2_CHG_ERR_BIT BIT(6) +#define IRQ2_CHG_PHASE_CHANGE_BIT BIT(4) +#define IRQ2_POWER_OK_BIT BIT(2) +#define IRQ2_BATT_MISSING_BIT BIT(1) +#define IRQ2_VBAT_LOW_BIT BIT(0) + +#define IRQ3_CFG_REG 0x11 +#define IRQ3_FG_ACCESS_OK_BIT BIT(6) +#define IRQ3_SOC_CHANGE_BIT BIT(4) +#define IRQ3_SOC_MIN_BIT BIT(3) +#define IRQ3_SOC_MAX_BIT BIT(2) +#define IRQ3_SOC_EMPTY_BIT BIT(1) +#define IRQ3_SOC_FULL_BIT BIT(0) + +#define CHG_CURRENT_REG 0x13 +#define FASTCHG_CURR_MASK SMB1360_MASK(4, 2) +#define FASTCHG_CURR_SHIFT 2 + +#define CHG_CMP_CFG 0x14 +#define JEITA_COMP_CURR_MASK SMB1360_MASK(3, 0) +#define JEITA_COMP_EN_MASK SMB1360_MASK(7, 4) +#define JEITA_COMP_EN_SHIFT 4 +#define JEITA_COMP_EN_BIT SMB1360_MASK(7, 4) +#define BATT_CHG_FLT_VTG_REG 0x15 +#define VFLOAT_MASK SMB1360_MASK(6, 0) +#define CFG_FVC_REG 0x16 +#define FLT_VTG_COMP_MASK SMB1360_MASK(6, 0) + +#define SHDN_CTRL_REG 0x1A +#define SHDN_CMD_USE_BIT BIT(1) +#define SHDN_CMD_POLARITY_BIT BIT(2) + +#define CURRENT_GAIN_LSB_REG 0x1D +#define CURRENT_GAIN_MSB_REG 0x1E + +/* Command Registers */ +#define CMD_I2C_REG 0x40 +#define ALLOW_VOLATILE_BIT BIT(6) +#define FG_ACCESS_ENABLED_BIT BIT(5) +#define FG_RESET_BIT BIT(4) +#define CYCLE_STRETCH_CLEAR_BIT BIT(3) + +#define CMD_IL_REG 0x41 +#define USB_CTRL_MASK SMB1360_MASK(1, 0) +#define USB_100_BIT 0x01 +#define USB_500_BIT 0x00 +#define USB_AC_BIT 0x02 +#define SHDN_CMD_BIT BIT(7) + +#define CMD_CHG_REG 0x42 +#define CMD_CHG_EN BIT(1) +#define CMD_OTG_EN_BIT BIT(0) + +/* Status Registers */ +#define STATUS_1_REG 0x48 +#define AICL_CURRENT_STATUS_MASK SMB1360_MASK(6, 0) +#define AICL_LIMIT_1500MA 0xF + +#define STATUS_3_REG 0x4B +#define CHG_HOLD_OFF_BIT BIT(3) +#define CHG_TYPE_MASK SMB1360_MASK(2, 1) +#define CHG_TYPE_SHIFT 1 +#define BATT_NOT_CHG_VAL 0x0 +#define BATT_PRE_CHG_VAL 0x1 +#define BATT_FAST_CHG_VAL 0x2 +#define BATT_TAPER_CHG_VAL 0x3 +#define CHG_EN_BIT BIT(0) + +#define STATUS_4_REG 0x4C +#define CYCLE_STRETCH_ACTIVE_BIT BIT(5) + +#define REVISION_CTRL_REG 0x4F +#define DEVICE_REV_MASK SMB1360_MASK(3, 0) + +/* IRQ Status Registers */ +#define IRQ_A_REG 0x50 +#define IRQ_A_HOT_HARD_BIT BIT(6) +#define IRQ_A_COLD_HARD_BIT BIT(4) +#define IRQ_A_HOT_SOFT_BIT BIT(2) +#define IRQ_A_COLD_SOFT_BIT BIT(0) + +#define IRQ_B_REG 0x51 +#define IRQ_B_BATT_TERMINAL_BIT BIT(6) +#define IRQ_B_BATT_MISSING_BIT BIT(4) + +#define IRQ_C_REG 0x52 +#define IRQ_C_CHG_TERM BIT(0) + +#define IRQ_D_REG 0x53 +#define IRQ_E_REG 0x54 +#define IRQ_E_USBIN_UV_BIT BIT(0) + +#define IRQ_F_REG 0x55 + +#define IRQ_G_REG 0x56 + +#define IRQ_H_REG 0x57 +#define IRQ_I_REG 0x58 +#define FG_ACCESS_ALLOWED_BIT BIT(0) +#define BATT_ID_RESULT_BIT SMB1360_MASK(6, 4) +#define BATT_ID_SHIFT 4 + +/* FG registers - IRQ config register */ +#define SOC_MAX_REG 0x24 +#define SOC_MIN_REG 0x25 +#define VTG_EMPTY_REG 0x26 +#define SOC_DELTA_REG 0x28 +#define JEITA_SOFT_COLD_REG 0x29 +#define JEITA_SOFT_HOT_REG 0x2A +#define VTG_MIN_REG 0x2B + +/* FG SHADOW registers */ +#define SHDW_FG_ESR_ACTUAL 0x20 +#define SHDW_FG_BATT_STATUS 0x60 +#define BATTERY_PROFILE_BIT BIT(0) + +#define SHDW_FG_MSYS_SOC 0x61 +#define SHDW_FG_CAPACITY 0x62 +#define SHDW_FG_VTG_NOW 0x69 +#define SHDW_FG_CURR_NOW 0x6B +#define SHDW_FG_BATT_TEMP 0x6D + +#define VOLTAGE_PREDICTED_REG 0x80 +#define CC_TO_SOC_COEFF 0xBA +#define NOMINAL_CAPACITY_REG 0xBC +#define ACTUAL_CAPACITY_REG 0xBE +#define FG_AUTO_RECHARGE_SOC 0xD2 +#define FG_SYS_CUTOFF_V_REG 0xD3 +#define FG_CC_TO_CV_V_REG 0xD5 +#define FG_ITERM_REG 0xD9 +#define FG_THERM_C1_COEFF_REG 0xDB +#define FG_IBATT_STANDBY_REG 0xCF + +#define FG_I2C_CFG_MASK SMB1360_MASK(2, 1) +#define FG_CFG_I2C_ADDR 0x2 +#define FG_PROFILE_A_ADDR 0x4 +#define FG_PROFILE_B_ADDR 0x6 + +/* Constants */ +#define CURRENT_100_MA 100 +#define CURRENT_500_MA 500 +#define MAX_8_BITS 255 +#define JEITA_WORK_MS 3000 + +#define FG_RESET_THRESHOLD_MV 15 +#define SMB1360_REV_1 0x01 + +#define SMB1360_POWERON_DELAY_MS 2000 +#define SMB1360_FG_RESET_DELAY_MS 1500 + +enum { + WRKRND_FG_CONFIG_FAIL = BIT(0), + WRKRND_BATT_DET_FAIL = BIT(1), + WRKRND_USB100_FAIL = BIT(2), + WRKRND_HARD_JEITA = BIT(3), +}; + +enum { + USER = BIT(0), +}; + +enum { + PARALLEL_USER = BIT(0), + PARALLEL_CURRENT = BIT(1), + PARALLEL_JEITA_SOFT = BIT(2), + PARALLEL_JEITA_HARD = BIT(3), + PARALLEL_EOC = BIT(4), +}; + +enum fg_i2c_access_type { + FG_ACCESS_CFG = 0x1, + FG_ACCESS_PROFILE_A = 0x2, + FG_ACCESS_PROFILE_B = 0x3 +}; + +enum { + BATTERY_PROFILE_A, + BATTERY_PROFILE_B, + BATTERY_PROFILE_MAX, +}; + +static int otg_curr_ma[] = {350, 550, 950, 1500}; + +struct otp_backup_pool { + u8 reg_start; + u8 reg_end; + u8 start_now; + u16 alg_bitmap; + bool initialized; + struct mutex lock; +}; + +enum otp_backup_alg { + OTP_BACKUP_NOT_USE = 0, + OTP_BACKUP_FG_USE, + OTP_BACKUP_PROF_A_USE, + OTP_BACKUP_PROF_B_USE, +}; + +struct smb1360_otg_regulator { + struct regulator_desc rdesc; + struct regulator_dev *rdev; +}; + +enum wakeup_src { + WAKEUP_SRC_FG_ACCESS = 0, + WAKEUP_SRC_JEITA_SOFT, + WAKEUP_SRC_PARALLEL, + WAKEUP_SRC_MIN_SOC, + WAKEUP_SRC_EMPTY_SOC, + WAKEUP_SRC_JEITA_HYSTERSIS, + WAKEUP_SRC_MAX, +}; +#define WAKEUP_SRC_MASK (~(~0 << WAKEUP_SRC_MAX)) + +struct smb1360_wakeup_source { + struct wakeup_source *source; + unsigned long enabled_bitmap; + spinlock_t ws_lock; +}; + +static const unsigned int smb1360_extcon_cable[] = { + EXTCON_USB, + EXTCON_USB_HOST, + EXTCON_NONE, +}; + +struct smb1360_chip { + struct i2c_client *client; + struct device *dev; + u8 revision; + u8 soft_hot_rt_stat; + u8 soft_cold_rt_stat; + struct delayed_work jeita_work; + struct delayed_work delayed_init_work; + unsigned short default_i2c_addr; + unsigned short fg_i2c_addr; + bool pulsed_irq; + struct completion fg_mem_access_granted; + + /* wakeup source */ + struct smb1360_wakeup_source smb1360_ws; + + /* configuration data - charger */ + int fake_battery_soc; + bool batt_id_disabled; + bool charging_disabled; + bool recharge_disabled; + bool chg_inhibit_disabled; + bool iterm_disabled; + bool shdn_after_pwroff; + bool config_hard_thresholds; + bool soft_jeita_supported; + bool ov_ends_chg_cycle_disabled; + int iterm_ma; + int vfloat_mv; + int safety_time; + int resume_delta_mv; + u32 default_batt_profile; + unsigned int thermal_levels; + unsigned int therm_lvl_sel; + unsigned int *thermal_mitigation; + int otg_batt_curr_limit; + bool min_icl_usb100; + int cold_bat_decidegc; + int hot_bat_decidegc; + int cool_bat_decidegc; + int warm_bat_decidegc; + int cool_bat_mv; + int warm_bat_mv; + int cool_bat_ma; + int warm_bat_ma; + int soft_cold_thresh; + int soft_hot_thresh; + + /* parallel-chg params */ + int fastchg_current; + int parallel_chg_disable_status; + int max_parallel_chg_current; + bool parallel_charging; + + /* configuration data - fg */ + int soc_max; + int soc_min; + int delta_soc; + int voltage_min_mv; + int voltage_empty_mv; + int batt_capacity_mah; + int cc_soc_coeff; + int v_cutoff_mv; + int fg_iterm_ma; + int fg_ibatt_standby_ma; + int fg_thermistor_c1_coeff; + int fg_cc_to_cv_mv; + int fg_auto_recharge_soc; + bool empty_soc_disabled; + int fg_reset_threshold_mv; + bool fg_reset_at_pon; + bool rsense_10mohm; + bool otg_fet_present; + bool fet_gain_enabled; + int otg_fet_enable_gpio; + int usb_id_gpio; + + /* status tracking */ + int voltage_now; + int current_now; + int resistance_now; + int temp_now; + int soc_now; + int fcc_mah; + bool usb_present; + bool batt_present; + bool batt_hot; + bool batt_cold; + bool batt_warm; + bool batt_cool; + bool batt_full; + bool resume_completed; + bool irq_waiting; + bool irq_disabled; + bool empty_soc; + bool awake_min_soc; + int workaround_flags; + u8 irq_cfg_mask[3]; + int usb_psy_ma; + int charging_disabled_status; + u32 connected_rid; + u32 profile_rid[BATTERY_PROFILE_MAX]; + + u32 peek_poke_address; + u32 fg_access_type; + u32 fg_peek_poke_address; + int skip_writes; + int skip_reads; + enum power_supply_type usb_supply_type; + struct dentry *debug_root; + + struct iio_channel *lr_mux2_batt_id; + struct power_supply *parallel_psy; + struct power_supply_desc parallel_psy_d; + struct power_supply *usb_psy; + struct power_supply_desc usb_psy_d; + struct power_supply *batt_psy; + struct power_supply_desc batt_psy_d; + struct smb1360_otg_regulator otg_vreg; + struct mutex irq_complete; + struct mutex charging_disable_lock; + struct mutex current_change_lock; + struct mutex read_write_lock; + struct mutex parallel_chg_lock; + struct work_struct parallel_work; + struct mutex otp_gain_lock; + struct mutex fg_access_request_lock; + struct otp_backup_pool otp_backup; + u8 current_gain_otp_reg; + bool otp_hard_jeita_config; + int otp_cold_bat_decidegc; + int otp_hot_bat_decidegc; + u8 hard_jeita_otp_reg; + struct work_struct jeita_hysteresis_work; + int cold_hysteresis; + int hot_hysteresis; + struct extcon_dev *extcon; + int usb_id_irq; +}; + +static int chg_time[] = { + 192, + 384, + 768, + 1536, +}; + +static int input_current_limit[] = { + 300, 400, 450, 500, 600, 700, 800, 850, 900, + 950, 1000, 1100, 1200, 1300, 1400, 1500, +}; + +static int fastchg_current[] = { + 450, 600, 750, 900, 1050, 1200, 1350, 1500, +}; + +static void smb1360_stay_awake(struct smb1360_wakeup_source *source, + enum wakeup_src wk_src) +{ + unsigned long flags; + + spin_lock_irqsave(&source->ws_lock, flags); + + if (!__test_and_set_bit(wk_src, &source->enabled_bitmap)) { + __pm_stay_awake(source->source); + pr_debug("enabled source %s, wakeup_src %d\n", + source->source->name, wk_src); + } + spin_unlock_irqrestore(&source->ws_lock, flags); +} + +static void smb1360_relax(struct smb1360_wakeup_source *source, + enum wakeup_src wk_src) +{ + unsigned long flags; + + spin_lock_irqsave(&source->ws_lock, flags); + if (__test_and_clear_bit(wk_src, &source->enabled_bitmap) && + !(source->enabled_bitmap & WAKEUP_SRC_MASK)) { + __pm_relax(source->source); + pr_debug("disabled source %s\n", source->source->name); + } + spin_unlock_irqrestore(&source->ws_lock, flags); + + pr_debug("relax source %s, wakeup_src %d\n", + source->source->name, wk_src); +} + +static void smb1360_wakeup_src_init(struct smb1360_chip *chip) +{ + spin_lock_init(&chip->smb1360_ws.ws_lock); + chip->smb1360_ws.source = wakeup_source_register(NULL, "smb1360"); +} + +static int is_between(int value, int left, int right) +{ + if (left >= right && left >= value && value >= right) + return 1; + if (left <= right && left <= value && value <= right) + return 1; + + return 0; +} + +static int bound(int val, int min, int max) +{ + if (val < min) + return min; + if (val > max) + return max; + + return val; +} + +static int __smb1360_read(struct smb1360_chip *chip, int reg, + u8 *val) +{ + s32 ret; + + ret = i2c_smbus_read_byte_data(chip->client, reg); + if (ret < 0) { + dev_err(chip->dev, + "i2c read fail: can't read from %02x: %d\n", reg, ret); + return ret; + } + *val = ret; + pr_debug("Reading 0x%02x=0x%02x\n", reg, *val); + + return 0; +} + +static int __smb1360_write(struct smb1360_chip *chip, int reg, + u8 val) +{ + s32 ret; + + ret = i2c_smbus_write_byte_data(chip->client, reg, val); + if (ret < 0) { + dev_err(chip->dev, + "i2c write fail: can't write %02x to %02x: %d\n", + val, reg, ret); + return ret; + } + pr_debug("Writing 0x%02x=0x%02x\n", reg, val); + return 0; +} + +static int smb1360_read(struct smb1360_chip *chip, int reg, + u8 *val) +{ + int rc; + + if (chip->skip_reads) { + *val = 0; + return 0; + } + mutex_lock(&chip->read_write_lock); + rc = __smb1360_read(chip, reg, val); + mutex_unlock(&chip->read_write_lock); + + return rc; +} + +static int smb1360_write(struct smb1360_chip *chip, int reg, + u8 val) +{ + int rc; + + if (chip->skip_writes) + return 0; + + mutex_lock(&chip->read_write_lock); + rc = __smb1360_write(chip, reg, val); + mutex_unlock(&chip->read_write_lock); + + return rc; +} + +static int smb1360_fg_read(struct smb1360_chip *chip, int reg, + u8 *val) +{ + int rc; + + if (chip->skip_reads) { + *val = 0; + return 0; + } + + mutex_lock(&chip->read_write_lock); + chip->client->addr = chip->fg_i2c_addr; + rc = __smb1360_read(chip, reg, val); + chip->client->addr = chip->default_i2c_addr; + mutex_unlock(&chip->read_write_lock); + + return rc; +} + +static int smb1360_fg_write(struct smb1360_chip *chip, int reg, + u8 val) +{ + int rc; + + if (chip->skip_writes) + return 0; + + mutex_lock(&chip->read_write_lock); + chip->client->addr = chip->fg_i2c_addr; + rc = __smb1360_write(chip, reg, val); + chip->client->addr = chip->default_i2c_addr; + mutex_unlock(&chip->read_write_lock); + + return rc; +} + +static int smb1360_read_bytes(struct smb1360_chip *chip, int reg, + u8 *val, u8 bytes) +{ + s32 rc; + + if (chip->skip_reads) { + *val = 0; + return 0; + } + + mutex_lock(&chip->read_write_lock); + rc = i2c_smbus_read_i2c_block_data(chip->client, reg, bytes, val); + if (rc < 0) + dev_err(chip->dev, + "i2c read fail: can't read %d bytes from %02x: %d\n", + bytes, reg, rc); + mutex_unlock(&chip->read_write_lock); + + return (rc < 0) ? rc : 0; +} + +static int smb1360_write_bytes(struct smb1360_chip *chip, int reg, + u8 *val, u8 bytes) +{ + s32 rc; + + if (chip->skip_writes) { + *val = 0; + return 0; + } + + mutex_lock(&chip->read_write_lock); + rc = i2c_smbus_write_i2c_block_data(chip->client, reg, bytes, val); + if (rc < 0) + dev_err(chip->dev, + "i2c write fail: can't read %d bytes from %02x: %d\n", + bytes, reg, rc); + mutex_unlock(&chip->read_write_lock); + + return (rc < 0) ? rc : 0; +} + +static int smb1360_masked_write(struct smb1360_chip *chip, int reg, + u8 mask, u8 val) +{ + s32 rc; + u8 temp; + + if (chip->skip_writes || chip->skip_reads) + return 0; + + mutex_lock(&chip->read_write_lock); + rc = __smb1360_read(chip, reg, &temp); + if (rc < 0) { + dev_err(chip->dev, "read failed: reg=%03X, rc=%d\n", reg, rc); + goto out; + } + temp &= ~mask; + temp |= val & mask; + rc = __smb1360_write(chip, reg, temp); + if (rc < 0) { + dev_err(chip->dev, + "write failed: reg=%03X, rc=%d\n", reg, rc); + } +out: + mutex_unlock(&chip->read_write_lock); + return rc; +} + +static int smb1360_select_fg_i2c_address(struct smb1360_chip *chip) +{ + unsigned short addr = chip->default_i2c_addr << 0x1; + + switch (chip->fg_access_type) { + case FG_ACCESS_CFG: + addr = (addr & ~FG_I2C_CFG_MASK) | FG_CFG_I2C_ADDR; + break; + case FG_ACCESS_PROFILE_A: + addr = (addr & ~FG_I2C_CFG_MASK) | FG_PROFILE_A_ADDR; + break; + case FG_ACCESS_PROFILE_B: + addr = (addr & ~FG_I2C_CFG_MASK) | FG_PROFILE_B_ADDR; + break; + default: + pr_err("Invalid FG access type=%d\n", chip->fg_access_type); + return -EINVAL; + } + + chip->fg_i2c_addr = addr >> 0x1; + pr_debug("FG_access_type=%d fg_i2c_addr=%x\n", chip->fg_access_type, + chip->fg_i2c_addr); + + return 0; +} + +#define EXPONENT_MASK 0xF800 +#define MANTISSA_MASK 0x3FF +#define SIGN_MASK 0x400 +#define EXPONENT_SHIFT 11 +#define SIGN_SHIFT 10 +#define MICRO_UNIT 1000000ULL +static int64_t float_decode(u16 reg) +{ + int64_t final_val, exponent_val, mantissa_val; + int exponent, mantissa, n; + bool sign; + + exponent = (reg & EXPONENT_MASK) >> EXPONENT_SHIFT; + mantissa = (reg & MANTISSA_MASK); + sign = !!(reg & SIGN_MASK); + + pr_debug("exponent=%d mantissa=%d sign=%d\n", exponent, mantissa, sign); + + mantissa_val = mantissa * MICRO_UNIT; + + n = exponent - 15; + if (n < 0) + exponent_val = MICRO_UNIT >> -n; + else + exponent_val = MICRO_UNIT << n; + + n = n - 10; + if (n < 0) + mantissa_val >>= -n; + else + mantissa_val <<= n; + + final_val = exponent_val + mantissa_val; + + if (sign) + final_val *= -1; + + return final_val; +} + +#define MAX_MANTISSA (1023 * 1000000ULL) +static unsigned int float_encode(int64_t float_val) +{ + int exponent = 0, sign = 0; + unsigned int final_val = 0; + + if (float_val == 0) + return 0; + + if (float_val < 0) { + sign = 1; + float_val = -float_val; + } + + /* Reduce large mantissa until it fits into 10 bit */ + while (float_val >= MAX_MANTISSA) { + exponent++; + float_val >>= 1; + } + + /* Increase small mantissa to improve precision */ + while (float_val < MAX_MANTISSA && exponent > -25) { + exponent--; + float_val <<= 1; + } + + exponent = exponent + 25; + + /* Convert mantissa from micro-units to units */ + float_val = div_s64((float_val + MICRO_UNIT), (int)MICRO_UNIT); + + if (float_val == 1024) { + exponent--; + float_val <<= 1; + } + + float_val -= 1024; + + /* Ensure that resulting number is within range */ + if (float_val > MANTISSA_MASK) + float_val = MANTISSA_MASK; + + /* Convert to 5 bit exponent, 11 bit mantissa */ + final_val = (float_val & MANTISSA_MASK) | (sign << SIGN_SHIFT) | + ((exponent << EXPONENT_SHIFT) & EXPONENT_MASK); + + return final_val; +} + +/* FG reset could only be done after FG access being granted */ +static int smb1360_force_fg_reset(struct smb1360_chip *chip) +{ + int rc; + + rc = smb1360_masked_write(chip, CMD_I2C_REG, FG_RESET_BIT, + FG_RESET_BIT); + if (rc) { + pr_err("Couldn't reset FG rc=%d\n", rc); + return rc; + } + + msleep(SMB1360_FG_RESET_DELAY_MS); + + rc = smb1360_masked_write(chip, CMD_I2C_REG, FG_RESET_BIT, 0); + if (rc) + pr_err("Couldn't un-reset FG rc=%d\n", rc); + + return rc; +} + +/* + * Requesting FG access relys on the FG_ACCESS_ALLOWED IRQ. + * This function can only be called after interrupt handler + * being installed successfully. + */ +#define SMB1360_FG_ACCESS_TIMEOUT_MS 5000 +#define SMB1360_FG_ACCESS_RETRY_COUNT 3 +static int smb1360_enable_fg_access(struct smb1360_chip *chip) +{ + int rc = 0; + u8 reg, retry = SMB1360_FG_ACCESS_RETRY_COUNT; + + pr_debug("request FG memory access\n"); + /* + * read the ACCESS_ALLOW status bit firstly to + * check if the access was granted before + */ + mutex_lock(&chip->fg_access_request_lock); + smb1360_stay_awake(&chip->smb1360_ws, WAKEUP_SRC_FG_ACCESS); + rc = smb1360_read(chip, IRQ_I_REG, ®); + if (rc) { + pr_err("Couldn't read IRQ_I_REG, rc=%d\n", rc); + goto bail_i2c; + } else if (reg & FG_ACCESS_ALLOWED_BIT) { + pr_debug("FG access was granted\n"); + goto bail_i2c; + } + + /* request FG access */ + rc = smb1360_masked_write(chip, CMD_I2C_REG, FG_ACCESS_ENABLED_BIT, + FG_ACCESS_ENABLED_BIT); + if (rc) { + pr_err("Couldn't enable FG access rc=%d\n", rc); + goto bail_i2c; + } + + while (retry--) { + rc = wait_for_completion_interruptible_timeout( + &chip->fg_mem_access_granted, + msecs_to_jiffies(SMB1360_FG_ACCESS_TIMEOUT_MS)); + if (rc <= 0) + pr_debug("FG access timeout, retry: %d\n", retry); + else + break; + } + if (rc == 0) /* timed out */ + rc = -ETIMEDOUT; + else if (rc > 0) /* completed */ + rc = 0; + + /* Clear the FG access bit if request failed */ + if (rc < 0) { + rc = smb1360_masked_write(chip, CMD_I2C_REG, + FG_ACCESS_ENABLED_BIT, 0); + if (rc) + pr_err("Couldn't disable FG access rc=%d\n", rc); + } + +bail_i2c: + smb1360_relax(&chip->smb1360_ws, WAKEUP_SRC_FG_ACCESS); + mutex_unlock(&chip->fg_access_request_lock); + return rc; +} + +static inline bool is_device_suspended(struct smb1360_chip *chip) +{ + return !chip->resume_completed; +} + +static int smb1360_disable_fg_access(struct smb1360_chip *chip) +{ + int rc; + + rc = smb1360_masked_write(chip, CMD_I2C_REG, FG_ACCESS_ENABLED_BIT, 0); + if (rc) + pr_err("Couldn't disable FG access rc=%d\n", rc); + + init_completion(&chip->fg_mem_access_granted); + + return rc; +} + +static int smb1360_enable_volatile_writes(struct smb1360_chip *chip) +{ + int rc; + + rc = smb1360_masked_write(chip, CMD_I2C_REG, + ALLOW_VOLATILE_BIT, ALLOW_VOLATILE_BIT); + if (rc < 0) + dev_err(chip->dev, + "Couldn't set VOLATILE_W_PERM_BIT rc=%d\n", rc); + + return rc; +} + +static void smb1360_otp_backup_pool_init(struct smb1360_chip *chip) +{ + struct otp_backup_pool *pool = &chip->otp_backup; + + pool->reg_start = 0xE0; + pool->reg_end = 0xEF; + pool->start_now = pool->reg_start; + mutex_init(&pool->lock); +} + +static int smb1360_alloc_otp_backup_register(struct smb1360_chip *chip, + u8 size, int usage) +{ + int rc = 0, i; + u8 inv_pos; + struct otp_backup_pool *pool = &chip->otp_backup; + + if (size % 2) { + pr_err("Must be allocated with pairs\n"); + return -EINVAL; + } + + mutex_lock(&pool->lock); + if (pool->start_now + size > pool->reg_end) { + pr_err("Allocation fail: start = 0x%x, size = %d\n", + pool->start_now, size); + mutex_unlock(&pool->lock); + return -EBUSY; + } + rc = pool->start_now; + inv_pos = pool->reg_end - pool->start_now + 1; + for (i = 0; i < size; i = i + 2) { + inv_pos -= (i ? 2 : 0); + pool->alg_bitmap |= usage << (inv_pos - 2); + } + pr_debug("Allocation success, start = 0x%x, size = %d, alg_bitmap = 0x%x\n", + rc, size, pool->alg_bitmap); + pool->start_now += size; + mutex_unlock(&pool->lock); + + return rc; +} + +#define OTP_BACKUP_WA_ALG_1 0xF0 +#define OTP_BACKUP_WA_ALG_2 0xF1 +static int smb1360_otp_backup_alg_update(struct smb1360_chip *chip) +{ + int rc = 0; + struct otp_backup_pool *pool = &chip->otp_backup; + + mutex_lock(&pool->lock); + rc = smb1360_fg_write(chip, OTP_BACKUP_WA_ALG_1, + (u8)(pool->alg_bitmap >> 8)); + rc |= smb1360_fg_write(chip, OTP_BACKUP_WA_ALG_2, + (u8)(pool->alg_bitmap)); + if (rc) + pr_err("Write FG address F0/F1 failed, rc = %d\n", rc); + mutex_unlock(&pool->lock); + + return rc; +} + +#define TRIM_1C_REG 0x1C +#define CHECK_USB100_GOOD_BIT BIT(6) +static bool is_usb100_broken(struct smb1360_chip *chip) +{ + int rc; + u8 reg; + + rc = smb1360_read(chip, TRIM_1C_REG, ®); + if (rc < 0) { + dev_err(chip->dev, "Couldn't read trim 1C reg rc = %d\n", rc); + return rc; + } + return !!(reg & CHECK_USB100_GOOD_BIT); +} + +static int read_revision(struct smb1360_chip *chip, u8 *revision) +{ + int rc; + + *revision = 0; + rc = smb1360_read(chip, REVISION_CTRL_REG, revision); + if (rc) + dev_err(chip->dev, "Couldn't read REVISION_CTRL_REG rc=%d\n", + rc); + + *revision &= DEVICE_REV_MASK; + + return rc; +} + +#define MIN_FLOAT_MV 3460 +#define MAX_FLOAT_MV 4730 +#define VFLOAT_STEP_MV 10 +static int smb1360_float_voltage_set(struct smb1360_chip *chip, int vfloat_mv) +{ + u8 temp; + + if ((vfloat_mv < MIN_FLOAT_MV) || (vfloat_mv > MAX_FLOAT_MV)) { + dev_err(chip->dev, "bad float voltage mv =%d asked to set\n", + vfloat_mv); + return -EINVAL; + } + + temp = (vfloat_mv - MIN_FLOAT_MV) / VFLOAT_STEP_MV; + + return smb1360_masked_write(chip, BATT_CHG_FLT_VTG_REG, + VFLOAT_MASK, temp); +} + +#define MIN_RECHG_MV 50 +#define MAX_RECHG_MV 300 +static int smb1360_recharge_threshold_set(struct smb1360_chip *chip, + int resume_mv) +{ + u8 temp; + + if ((resume_mv < MIN_RECHG_MV) || (resume_mv > MAX_RECHG_MV)) { + dev_err(chip->dev, "bad rechg_thrsh =%d asked to set\n", + resume_mv); + return -EINVAL; + } + + temp = resume_mv / 100; + + return smb1360_masked_write(chip, CFG_BATT_CHG_REG, + RECHG_MV_MASK, temp << RECHG_MV_SHIFT); +} + +static int __smb1360_charging_disable(struct smb1360_chip *chip, bool disable) +{ + int rc; + + rc = smb1360_masked_write(chip, CMD_CHG_REG, + CMD_CHG_EN, disable ? 0 : CMD_CHG_EN); + if (rc < 0) + pr_err("Couldn't set CHG_ENABLE_BIT disable=%d rc = %d\n", + disable, rc); + else + pr_debug("CHG_EN status=%d\n", !disable); + + return rc; +} + +static int smb1360_charging_disable(struct smb1360_chip *chip, int reason, + int disable) +{ + int rc = 0; + int disabled; + + mutex_lock(&chip->charging_disable_lock); + + disabled = chip->charging_disabled_status; + + pr_debug("reason=%d requested_disable=%d disabled_status=%d\n", + reason, disable, disabled); + + if (disable == true) + disabled |= reason; + else + disabled &= ~reason; + + if (disabled) + rc = __smb1360_charging_disable(chip, true); + else + rc = __smb1360_charging_disable(chip, false); + + if (rc) + pr_err("Couldn't disable charging for reason=%d rc=%d\n", + rc, reason); + else + chip->charging_disabled_status = disabled; + + mutex_unlock(&chip->charging_disable_lock); + + return rc; +} + +static int smb1360_soft_jeita_comp_enable(struct smb1360_chip *chip, + bool enable) +{ + int rc = 0; + + rc = smb1360_masked_write(chip, CHG_CMP_CFG, JEITA_COMP_EN_MASK, + enable ? JEITA_COMP_EN_BIT : 0); + if (rc) + pr_err("Couldn't %s JEITA compensation\n", enable ? + "enable" : "disable"); + + return rc; +} + +static enum power_supply_property smb1360_battery_properties[] = { + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_CHARGING_ENABLED, + POWER_SUPPLY_PROP_CHARGE_TYPE, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_RESISTANCE, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL, +}; + +static int smb1360_get_prop_batt_present(struct smb1360_chip *chip) +{ + return chip->batt_present; +} + +static int smb1360_get_prop_batt_status(struct smb1360_chip *chip) +{ + int rc; + u8 reg = 0, chg_type; + + if (is_device_suspended(chip)) + return POWER_SUPPLY_STATUS_UNKNOWN; + + if (chip->batt_full) + return POWER_SUPPLY_STATUS_FULL; + + rc = smb1360_read(chip, STATUS_3_REG, ®); + if (rc) { + pr_err("Couldn't read STATUS_3_REG rc=%d\n", rc); + return POWER_SUPPLY_STATUS_UNKNOWN; + } + + pr_debug("STATUS_3_REG = %x\n", reg); + + if (reg & CHG_HOLD_OFF_BIT) + return POWER_SUPPLY_STATUS_NOT_CHARGING; + + chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT; + + if (chg_type == BATT_NOT_CHG_VAL) + return POWER_SUPPLY_STATUS_DISCHARGING; + else + return POWER_SUPPLY_STATUS_CHARGING; +} + +static int smb1360_get_prop_charge_type(struct smb1360_chip *chip) +{ + int rc; + u8 reg = 0; + u8 chg_type; + + if (is_device_suspended(chip)) + return POWER_SUPPLY_CHARGE_TYPE_UNKNOWN; + + rc = smb1360_read(chip, STATUS_3_REG, ®); + if (rc) { + pr_err("Couldn't read STATUS_3_REG rc=%d\n", rc); + return POWER_SUPPLY_CHARGE_TYPE_UNKNOWN; + } + + chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT; + if (chg_type == BATT_NOT_CHG_VAL) + return POWER_SUPPLY_CHARGE_TYPE_NONE; + else if ((chg_type == BATT_FAST_CHG_VAL) || + (chg_type == BATT_TAPER_CHG_VAL)) + return POWER_SUPPLY_CHARGE_TYPE_FAST; + else if (chg_type == BATT_PRE_CHG_VAL) + return POWER_SUPPLY_CHARGE_TYPE_TRICKLE; + + return POWER_SUPPLY_CHARGE_TYPE_NONE; +} + +static int smb1360_get_prop_batt_health(struct smb1360_chip *chip) +{ + union power_supply_propval ret = {0, }; + + if (chip->batt_hot) + ret.intval = POWER_SUPPLY_HEALTH_OVERHEAT; + else if (chip->batt_cold) + ret.intval = POWER_SUPPLY_HEALTH_COLD; + else if (chip->batt_warm) + ret.intval = POWER_SUPPLY_HEALTH_WARM; + else if (chip->batt_cool) + ret.intval = POWER_SUPPLY_HEALTH_COOL; + else + ret.intval = POWER_SUPPLY_HEALTH_GOOD; + + return ret.intval; +} + +static int smb1360_get_prop_batt_capacity(struct smb1360_chip *chip) +{ + u8 reg; + u32 temp = 0; + int rc, soc = 0; + + if (chip->fake_battery_soc >= 0) + return chip->fake_battery_soc; + + if (chip->empty_soc) { + pr_debug("empty_soc\n"); + return 0; + } + + if (is_device_suspended(chip)) + return chip->soc_now; + + rc = smb1360_read(chip, SHDW_FG_MSYS_SOC, ®); + if (rc) { + pr_err("Failed to read FG_MSYS_SOC rc=%d\n", rc); + return rc; + } + soc = (100 * reg) / MAX_8_BITS; + + temp = (100 * reg) % MAX_8_BITS; + if (temp > (MAX_8_BITS / 2)) + soc += 1; + + pr_debug("msys_soc_reg=0x%02x, fg_soc=%d batt_full = %d\n", reg, + soc, chip->batt_full); + + chip->soc_now = (chip->batt_full ? 100 : bound(soc, 0, 100)); + + return chip->soc_now; +} + +static int smb1360_get_prop_chg_full_design(struct smb1360_chip *chip) +{ + u8 reg[2]; + int rc, fcc_mah = 0; + + if (is_device_suspended(chip)) + return chip->fcc_mah; + + rc = smb1360_read_bytes(chip, SHDW_FG_CAPACITY, reg, 2); + if (rc) { + pr_err("Failed to read SHDW_FG_CAPACITY rc=%d\n", rc); + return rc; + } + fcc_mah = (reg[1] << 8) | reg[0]; + + pr_debug("reg[0]=0x%02x reg[1]=0x%02x fcc_mah=%d\n", + reg[0], reg[1], fcc_mah); + + chip->fcc_mah = fcc_mah * 1000; + + return chip->fcc_mah; +} + +static int smb1360_get_prop_batt_temp(struct smb1360_chip *chip) +{ + u8 reg[2]; + int rc, temp = 0; + + if (is_device_suspended(chip)) + return chip->temp_now; + + rc = smb1360_read_bytes(chip, SHDW_FG_BATT_TEMP, reg, 2); + if (rc) { + pr_err("Failed to read SHDW_FG_BATT_TEMP rc=%d\n", rc); + return rc; + } + + temp = (reg[1] << 8) | reg[0]; + temp = div_u64(temp * 625, 10000UL); /* temperature in kelvin */ + temp = (temp - 273) * 10; /* temperature in decideg */ + + pr_debug("reg[0]=0x%02x reg[1]=0x%02x temperature=%d\n", + reg[0], reg[1], temp); + + chip->temp_now = temp; + + return chip->temp_now; +} + +static int smb1360_get_prop_voltage_now(struct smb1360_chip *chip) +{ + u8 reg[2]; + int rc, temp = 0; + + if (is_device_suspended(chip)) + return chip->voltage_now; + + rc = smb1360_read_bytes(chip, SHDW_FG_VTG_NOW, reg, 2); + if (rc) { + pr_err("Failed to read SHDW_FG_VTG_NOW rc=%d\n", rc); + return rc; + } + + temp = (reg[1] << 8) | reg[0]; + temp = div_u64(temp * 5000, 0x7FFF); + + pr_debug("reg[0]=0x%02x reg[1]=0x%02x voltage=%d\n", + reg[0], reg[1], temp * 1000); + + chip->voltage_now = temp * 1000; + + return chip->voltage_now; +} + +static int smb1360_get_prop_batt_resistance(struct smb1360_chip *chip) +{ + u8 reg[2]; + u16 temp; + int rc; + int64_t resistance; + + if (is_device_suspended(chip)) + return chip->resistance_now; + + rc = smb1360_read_bytes(chip, SHDW_FG_ESR_ACTUAL, reg, 2); + if (rc) { + pr_err("Failed to read FG_ESR_ACTUAL rc=%d\n", rc); + return rc; + } + temp = (reg[1] << 8) | reg[0]; + + resistance = float_decode(temp) * 2; + + pr_debug("reg=0x%02x resistance=%lld\n", temp, resistance); + + /* resistance in uohms */ + chip->resistance_now = resistance; + + return chip->resistance_now; +} + +static int smb1360_get_prop_current_now(struct smb1360_chip *chip) +{ + u8 reg[2]; + int rc, temp = 0; + + if (is_device_suspended(chip)) + return chip->current_now; + + rc = smb1360_read_bytes(chip, SHDW_FG_CURR_NOW, reg, 2); + if (rc) { + pr_err("Failed to read SHDW_FG_CURR_NOW rc=%d\n", rc); + return rc; + } + + temp = ((s8)reg[1] << 8) | reg[0]; + temp = div_s64(temp * 2500, 0x7FFF); + + pr_debug("reg[0]=0x%02x reg[1]=0x%02x current=%d\n", + reg[0], reg[1], temp * 1000); + + chip->current_now = temp * 1000; + + return chip->current_now; +} + +static int smb1360_set_minimum_usb_current(struct smb1360_chip *chip) +{ + int rc = 0; + + if (chip->min_icl_usb100) { + pr_debug("USB min current set to 100mA\n"); + /* set input current limit to minimum (300mA) */ + rc = smb1360_masked_write(chip, CFG_BATT_CHG_ICL_REG, + INPUT_CURR_LIM_MASK, + INPUT_CURR_LIM_300MA); + if (rc) + pr_err("Couldn't set ICL mA rc=%d\n", rc); + + if (!(chip->workaround_flags & WRKRND_USB100_FAIL)) { + rc = smb1360_masked_write(chip, CMD_IL_REG, + USB_CTRL_MASK, USB_100_BIT); + if (rc) + pr_err("Couldn't configure for USB100 rc=%d\n", + rc); + } + } else { + pr_debug("USB min current set to 500mA\n"); + rc = smb1360_masked_write(chip, CMD_IL_REG, + USB_CTRL_MASK, USB_500_BIT); + if (rc) + pr_err("Couldn't configure for USB100 rc=%d\n", + rc); + } + + return rc; +} + +static struct power_supply *get_parallel_psy(struct smb1360_chip *chip) +{ + if (chip->parallel_psy) + return chip->parallel_psy; + chip->parallel_psy = power_supply_get_by_name("usb-parallel"); + if (!chip->parallel_psy) + pr_debug("parallel charger not found\n"); + return chip->parallel_psy; +} + +static int __smb1360_parallel_charger_enable(struct smb1360_chip *chip, + bool enable) +{ + struct power_supply *parallel_psy = get_parallel_psy(chip); + union power_supply_propval pval = {0, }; + + if (!parallel_psy) + return 0; + + pval.intval = (enable ? (chip->max_parallel_chg_current * 1000) : 0); + chip->parallel_psy_d.set_property(parallel_psy, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval); + pval.intval = (enable ? 1 : 0); + chip->parallel_psy_d.set_property(parallel_psy, + POWER_SUPPLY_PROP_CHARGING_ENABLED, &pval); + + pr_debug("Parallel-charger %s max_chg_current=%d\n", + enable ? "enabled" : "disabled", + enable ? (chip->max_parallel_chg_current * 1000) : 0); + + return 0; +} + +static int smb1360_parallel_charger_enable(struct smb1360_chip *chip, + int reason, bool enable) +{ + int disabled, *disabled_status; + + mutex_lock(&chip->parallel_chg_lock); + + disabled = chip->parallel_chg_disable_status; + disabled_status = &chip->parallel_chg_disable_status; + + pr_debug("reason=0x%x requested=%s disabled_status=0x%x\n", + reason, enable ? "enable" : "disable", disabled); + + if (enable) + disabled &= ~reason; + else + disabled |= reason; + + if (*disabled_status && !disabled) + __smb1360_parallel_charger_enable(chip, true); + + if (!(*disabled_status) && disabled) + __smb1360_parallel_charger_enable(chip, false); + + *disabled_status = disabled; + + pr_debug("disabled_status = %x\n", *disabled_status); + + mutex_unlock(&chip->parallel_chg_lock); + + return 0; +} + +static void smb1360_parallel_work(struct work_struct *work) +{ + u8 reg; + int rc, i; + struct smb1360_chip *chip = container_of(work, + struct smb1360_chip, parallel_work); + + /* check the AICL settled value */ + rc = smb1360_read(chip, STATUS_1_REG, ®); + if (rc) { + pr_debug("Unable to read AICL status rc=%d\n", rc); + goto exit_work; + } + pr_debug("STATUS_1 (aicl status)=0x%x\n", reg); + if ((reg & AICL_CURRENT_STATUS_MASK) == AICL_LIMIT_1500MA) { + /* Strong Charger - Enable parallel path */ + /* find the new fastchg current */ + chip->fastchg_current += (chip->max_parallel_chg_current / 2); + for (i = 0; i < ARRAY_SIZE(fastchg_current) - 1; i++) { + if (fastchg_current[i] >= chip->fastchg_current) + break; + } + if (i == ARRAY_SIZE(fastchg_current)) + i--; + + rc = smb1360_masked_write(chip, CHG_CURRENT_REG, + FASTCHG_CURR_MASK, i << FASTCHG_CURR_SHIFT); + if (rc) + pr_err("Couldn't set fastchg mA rc=%d\n", rc); + + pr_debug("fast-chg (parallel-mode) current set to = %d\n", + fastchg_current[i]); + + smb1360_parallel_charger_enable(chip, PARALLEL_CURRENT, true); + } else { + /* Weak-charger - Disable parallel path */ + smb1360_parallel_charger_enable(chip, PARALLEL_CURRENT, false); + } + +exit_work: + smb1360_relax(&chip->smb1360_ws, WAKEUP_SRC_PARALLEL); +} + +static int smb1360_set_appropriate_usb_current(struct smb1360_chip *chip) +{ + int rc = 0, i, therm_ma, current_ma; + int path_current = chip->usb_psy_ma; + + /* + * If battery is absent do not modify the current at all, these + * would be some appropriate values set by the bootloader or default + * configuration and since it is the only source of power we should + * not change it + */ + if (!chip->batt_present) { + pr_debug("ignoring current request since battery is absent\n"); + return 0; + } + + if (chip->therm_lvl_sel > 0 + && chip->therm_lvl_sel < (chip->thermal_levels - 1)) + /* + * consider thermal limit only when it is active and not at + * the highest level + */ + therm_ma = chip->thermal_mitigation[chip->therm_lvl_sel]; + else + therm_ma = path_current; + + current_ma = min(therm_ma, path_current); + + if (chip->workaround_flags & WRKRND_HARD_JEITA) { + if (chip->batt_warm) + current_ma = min(current_ma, chip->warm_bat_ma); + else if (chip->batt_cool) + current_ma = min(current_ma, chip->cool_bat_ma); + } + + if (current_ma <= 2) { + /* + * SMB1360 does not support USB suspend - + * so set the current-limit to minimum in suspend. + */ + pr_debug("current_ma=%d <= 2 set USB current to minimum\n", + current_ma); + rc = smb1360_set_minimum_usb_current(chip); + if (rc < 0) + pr_err("Couldn't to set minimum USB current rc = %d\n", + rc); + /* disable parallel charger */ + if (chip->parallel_charging) + smb1360_parallel_charger_enable(chip, + PARALLEL_CURRENT, false); + + return rc; + } + + for (i = ARRAY_SIZE(input_current_limit) - 1; i >= 0; i--) { + if (input_current_limit[i] <= current_ma) + break; + } + if (i < 0) { + pr_debug("Couldn't find ICL mA rc=%d\n", rc); + i = 0; + } + /* set input current limit */ + rc = smb1360_masked_write(chip, CFG_BATT_CHG_ICL_REG, + INPUT_CURR_LIM_MASK, i); + if (rc) + pr_err("Couldn't set ICL mA rc=%d\n", rc); + + pr_debug("ICL set to = %d\n", input_current_limit[i]); + + if ((current_ma <= CURRENT_100_MA) && + ((chip->workaround_flags & WRKRND_USB100_FAIL) || + !chip->min_icl_usb100)) { + pr_debug("usb100 not supported: usb100_wrkrnd=%d min_icl_100=%d\n", + !!(chip->workaround_flags & WRKRND_USB100_FAIL), + chip->min_icl_usb100); + current_ma = CURRENT_500_MA; + } + + if (current_ma <= CURRENT_100_MA) { + /* USB 100 */ + rc = smb1360_masked_write(chip, CMD_IL_REG, + USB_CTRL_MASK, USB_100_BIT); + if (rc) + pr_err("Couldn't configure for USB100 rc=%d\n", rc); + pr_debug("Setting USB 100\n"); + } else if (current_ma <= CURRENT_500_MA) { + /* USB 500 */ + rc = smb1360_masked_write(chip, CMD_IL_REG, + USB_CTRL_MASK, USB_500_BIT); + if (rc) + pr_err("Couldn't configure for USB500 rc=%d\n", rc); + pr_debug("Setting USB 500\n"); + } else { + /* USB AC */ + if (chip->rsense_10mohm) + current_ma /= 2; + + for (i = ARRAY_SIZE(fastchg_current) - 1; i >= 0; i--) { + if (fastchg_current[i] <= current_ma) + break; + } + if (i < 0) { + pr_debug("Couldn't find fastchg mA rc=%d\n", rc); + i = 0; + } + + chip->fastchg_current = fastchg_current[i]; + + /* set fastchg limit */ + rc = smb1360_masked_write(chip, CHG_CURRENT_REG, + FASTCHG_CURR_MASK, i << FASTCHG_CURR_SHIFT); + if (rc) + pr_err("Couldn't set fastchg mA rc=%d\n", rc); + + /* + * To move to a new (higher) input-current setting, + * first set USB500 and then USBAC. This makes sure + * that the new ICL setting takes affect. + */ + rc = smb1360_masked_write(chip, CMD_IL_REG, + USB_CTRL_MASK, USB_500_BIT); + if (rc) + pr_err("Couldn't configure for USB500 rc=%d\n", rc); + + rc = smb1360_masked_write(chip, CMD_IL_REG, + USB_CTRL_MASK, USB_AC_BIT); + if (rc) + pr_err("Couldn't configure for USB AC rc=%d\n", rc); + + pr_debug("fast-chg current set to = %d\n", fastchg_current[i]); + } + + return rc; +} + +static int smb1360_set_jeita_comp_curr(struct smb1360_chip *chip, + int current_ma) +{ + int i; + int rc = 0; + + for (i = ARRAY_SIZE(fastchg_current) - 1; i >= 0; i--) { + if (fastchg_current[i] <= current_ma) + break; + } + if (i < 0) { + pr_debug("Couldn't find fastchg_current %dmA\n", current_ma); + i = 0; + } + + rc = smb1360_masked_write(chip, CHG_CMP_CFG, + JEITA_COMP_CURR_MASK, i); + if (rc) + pr_err("Couldn't configure for Icomp, rc = %d\n", rc); + + return rc; +} + +#define TEMP_THRE_SET(x) ((x + 300) / 10) +#define TEMP_THRE_GET(x) ((x * 10) - 300) +static int smb1360_set_soft_jeita_threshold(struct smb1360_chip *chip, + int cold_threshold, int hot_threshold) +{ + int rc = 0; + + rc = smb1360_write(chip, JEITA_SOFT_COLD_REG, + TEMP_THRE_SET(cold_threshold)); + if (rc) { + pr_err("Couldn't set soft cold threshold, rc = %d\n", rc); + return rc; + } + chip->soft_cold_thresh = cold_threshold; + + rc = smb1360_write(chip, JEITA_SOFT_HOT_REG, + TEMP_THRE_SET(hot_threshold)); + if (rc) { + pr_err("Couldn't set soft hot threshold, rc = %d\n", rc); + return rc; + } + chip->soft_hot_thresh = hot_threshold; + + return rc; +} + +static int smb1360_get_soft_jeita_threshold(struct smb1360_chip *chip, + int *cold_threshold, int *hot_threshold) +{ + int rc = 0; + u8 value; + + rc = smb1360_read(chip, JEITA_SOFT_COLD_REG, &value); + if (rc) { + pr_err("Couldn't get soft cold threshold, rc = %d\n", rc); + return rc; + } + *cold_threshold = TEMP_THRE_GET(value); + + rc = smb1360_read(chip, JEITA_SOFT_HOT_REG, &value); + if (rc) { + pr_err("Couldn't get soft hot threshold, rc = %d\n", rc); + return rc; + } + *hot_threshold = TEMP_THRE_GET(value); + + return rc; +} + +#define OTP_HARD_COLD_REG_ADDR 0x12 +#define OTP_HARD_HOT_REG_ADDR 0x13 +static int smb1360_set_otp_hard_jeita_threshold(struct smb1360_chip *chip, + int cold_threshold, int hot_threshold) +{ + int rc = 0, i; + u8 reg[4] = { 0 }; + int otp_reg = 0; + int temp_code; + + if (cold_threshold > chip->cool_bat_decidegc || + chip->cool_bat_decidegc >= chip->warm_bat_decidegc || + chip->warm_bat_decidegc > hot_threshold) { + pr_err("cold:%d, cool:%d, warm:%d, hot:%d should be ordered in size\n", + cold_threshold, chip->cool_bat_decidegc, + chip->warm_bat_decidegc, hot_threshold); + return -EINVAL; + } + pr_debug("cold:%d, cool:%d, warm:%d, hot:%d\n", + cold_threshold, chip->cool_bat_decidegc, + chip->warm_bat_decidegc, hot_threshold); + if (!chip->hard_jeita_otp_reg) { + otp_reg = smb1360_alloc_otp_backup_register(chip, + ARRAY_SIZE(reg), OTP_BACKUP_FG_USE); + if (otp_reg <= 0) { + pr_err("OTP reg allocation failed for hard JEITA\n"); + return otp_reg; + } + + chip->hard_jeita_otp_reg = otp_reg; + } else { + otp_reg = chip->hard_jeita_otp_reg; + } + pr_debug("hard_jeita_otp_reg = 0x%x\n", chip->hard_jeita_otp_reg); + + reg[0] = (u8)OTP_HARD_HOT_REG_ADDR; + temp_code = TEMP_THRE_SET(hot_threshold); + if (temp_code < 0) { + pr_err("hard hot temp encode failed\n"); + return temp_code; + } + reg[1] = (u8)temp_code; + reg[2] = (u8)OTP_HARD_COLD_REG_ADDR; + temp_code = TEMP_THRE_SET(cold_threshold); + if (temp_code < 0) { + pr_err("hard cold temp encode failed\n"); + return temp_code; + } + reg[3] = (u8)temp_code; + + rc = smb1360_enable_fg_access(chip); + if (rc) { + pr_err("Couldn't request FG access rc = %d\n", rc); + return rc; + } + chip->fg_access_type = FG_ACCESS_CFG; + + rc = smb1360_select_fg_i2c_address(chip); + if (rc) { + pr_err("Unable to set FG access I2C address\n"); + goto restore_fg; + } + + for (i = 0; i < ARRAY_SIZE(reg); i++) { + rc = smb1360_fg_write(chip, (otp_reg + i), reg[i]); + if (rc) { + pr_err("Write FG address 0x%x: 0x%x failed, rc = %d\n", + otp_reg + i, reg[i], rc); + goto restore_fg; + } + pr_debug("Write FG addr=0x%x, value=0x%x\n", + otp_reg + i, reg[i]); + } + rc = smb1360_otp_backup_alg_update(chip); + if (rc) { + pr_err("Update OTP backup algorithm failed\n"); + goto restore_fg; + } + + rc = smb1360_masked_write(chip, CFG_FG_BATT_CTRL_REG, + CFG_FG_OTP_BACK_UP_ENABLE, CFG_FG_OTP_BACK_UP_ENABLE); + if (rc) { + pr_err("Write reg 0x0E failed, rc = %d\n", rc); + goto restore_fg; + } + +restore_fg: + rc = smb1360_disable_fg_access(chip); + if (rc) { + pr_err("Couldn't disable FG access rc = %d\n", rc); + return rc; + } + + return rc; +} + +static int smb1360_hard_jeita_otp_init(struct smb1360_chip *chip) +{ + int rc = 0; + + if (!chip->otp_hard_jeita_config) + return rc; + + rc = smb1360_set_otp_hard_jeita_threshold(chip, + chip->otp_cold_bat_decidegc, chip->otp_hot_bat_decidegc); + if (rc) { + dev_err(chip->dev, + "Couldn't set OTP hard jeita threshold,rc = %d\n", rc); + return rc; + } + + return rc; +} + +static int smb1360_system_temp_level_set(struct smb1360_chip *chip, + int lvl_sel) +{ + int rc = 0; + int prev_therm_lvl; + + if (!chip->thermal_mitigation) { + pr_err("Thermal mitigation not supported\n"); + return -EINVAL; + } + + if (lvl_sel < 0) { + pr_err("Unsupported level selected %d\n", lvl_sel); + return -EINVAL; + } + + if (lvl_sel >= chip->thermal_levels) { + pr_err("Unsupported level selected %d forcing %d\n", lvl_sel, + chip->thermal_levels - 1); + lvl_sel = chip->thermal_levels - 1; + } + + if (lvl_sel == chip->therm_lvl_sel) + return 0; + + mutex_lock(&chip->current_change_lock); + prev_therm_lvl = chip->therm_lvl_sel; + chip->therm_lvl_sel = lvl_sel; + + if (chip->therm_lvl_sel == (chip->thermal_levels - 1)) { + rc = smb1360_set_minimum_usb_current(chip); + if (rc) + pr_err("Couldn't set USB current to minimum rc = %d\n", + rc); + } else { + rc = smb1360_set_appropriate_usb_current(chip); + if (rc) + pr_err("Couldn't set USB current rc = %d\n", rc); + } + + mutex_unlock(&chip->current_change_lock); + return rc; +} + +static enum power_supply_property smb1360_usb_properties[] = { + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_CURRENT_MAX, + POWER_SUPPLY_PROP_TYPE, + POWER_SUPPLY_PROP_REAL_TYPE, + POWER_SUPPLY_PROP_SDP_CURRENT_MAX, +}; + +static int smb1360_usb_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + int is_battery_charging = 0; + struct smb1360_chip *chip = power_supply_get_drvdata(psy); + + switch (psp) { + case POWER_SUPPLY_PROP_SDP_CURRENT_MAX: + case POWER_SUPPLY_PROP_CURRENT_MAX: + val->intval = chip->usb_psy_ma * 1000; + break; + case POWER_SUPPLY_PROP_PRESENT: + val->intval = chip->usb_present; + break; + case POWER_SUPPLY_PROP_ONLINE: + is_battery_charging = smb1360_get_prop_batt_status(chip); + val->intval = chip->usb_present && + (is_battery_charging == POWER_SUPPLY_STATUS_CHARGING); + break; + case POWER_SUPPLY_PROP_REAL_TYPE: + val->intval = POWER_SUPPLY_TYPE_UNKNOWN; + if (chip->usb_present && + (chip->usb_supply_type != POWER_SUPPLY_TYPE_UNKNOWN)) + val->intval = chip->usb_supply_type; + break; + case POWER_SUPPLY_PROP_TYPE: + val->intval = POWER_SUPPLY_TYPE_USB; + if (chip->usb_present && + (chip->usb_supply_type != POWER_SUPPLY_TYPE_UNKNOWN)) + val->intval = chip->usb_supply_type; + break; + default: + return -EINVAL; + } + return 0; +} + +static int smb1360_usb_set_property(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val) +{ + struct smb1360_chip *chip = power_supply_get_drvdata(psy); + int rc = 0; + + switch (psp) { + case POWER_SUPPLY_PROP_SDP_CURRENT_MAX: + case POWER_SUPPLY_PROP_CURRENT_MAX: + chip->usb_psy_ma = val->intval / 1000; + rc = smb1360_set_appropriate_usb_current(chip); + break; + case POWER_SUPPLY_PROP_TYPE: + case POWER_SUPPLY_PROP_REAL_TYPE: + chip->usb_supply_type = val->intval; + break; + default: + return -EINVAL; + } + + power_supply_changed(psy); + return 0; +} + +static int smb1360_usb_is_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + switch (psp) { + case POWER_SUPPLY_PROP_CURRENT_MAX: + return 1; + default: + break; + } + return 0; +} + + +static int smb1360_battery_set_property(struct power_supply *psy, + enum power_supply_property prop, + const union power_supply_propval *val) +{ + struct smb1360_chip *chip = power_supply_get_drvdata(psy); + + switch (prop) { + case POWER_SUPPLY_PROP_CHARGING_ENABLED: + smb1360_charging_disable(chip, USER, !val->intval); + if (chip->parallel_charging) + smb1360_parallel_charger_enable(chip, + PARALLEL_USER, val->intval); + power_supply_changed(chip->batt_psy); + power_supply_changed(chip->usb_psy); + break; + case POWER_SUPPLY_PROP_CAPACITY: + chip->fake_battery_soc = val->intval; + pr_info("fake_soc set to %d\n", chip->fake_battery_soc); + power_supply_changed(chip->batt_psy); + break; + case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL: + smb1360_system_temp_level_set(chip, val->intval); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int smb1360_battery_is_writeable(struct power_supply *psy, + enum power_supply_property prop) +{ + int rc; + + switch (prop) { + case POWER_SUPPLY_PROP_CHARGING_ENABLED: + case POWER_SUPPLY_PROP_CAPACITY: + case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL: + rc = 1; + break; + default: + rc = 0; + break; + } + return rc; +} + +static int smb1360_battery_get_property(struct power_supply *psy, + enum power_supply_property prop, + union power_supply_propval *val) +{ + struct smb1360_chip *chip = power_supply_get_drvdata(psy); + + switch (prop) { + case POWER_SUPPLY_PROP_HEALTH: + val->intval = smb1360_get_prop_batt_health(chip); + break; + case POWER_SUPPLY_PROP_PRESENT: + val->intval = smb1360_get_prop_batt_present(chip); + break; + case POWER_SUPPLY_PROP_STATUS: + val->intval = smb1360_get_prop_batt_status(chip); + break; + case POWER_SUPPLY_PROP_CHARGING_ENABLED: + val->intval = !chip->charging_disabled_status; + break; + case POWER_SUPPLY_PROP_CHARGE_TYPE: + val->intval = smb1360_get_prop_charge_type(chip); + break; + case POWER_SUPPLY_PROP_CAPACITY: + val->intval = smb1360_get_prop_batt_capacity(chip); + break; + case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: + val->intval = smb1360_get_prop_chg_full_design(chip); + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + val->intval = smb1360_get_prop_voltage_now(chip); + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + val->intval = smb1360_get_prop_current_now(chip); + break; + case POWER_SUPPLY_PROP_RESISTANCE: + val->intval = smb1360_get_prop_batt_resistance(chip); + break; + case POWER_SUPPLY_PROP_TEMP: + val->intval = smb1360_get_prop_batt_temp(chip); + break; + case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL: + val->intval = chip->therm_lvl_sel; + break; + default: + return -EINVAL; + } + return 0; +} + +static int hot_hard_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + pr_debug("rt_stat = 0x%02x\n", rt_stat); + chip->batt_hot = !!rt_stat; + + if (chip->parallel_charging) { + pr_debug("%s parallel-charging\n", chip->batt_hot ? + "Disable" : "Enable"); + smb1360_parallel_charger_enable(chip, + PARALLEL_JEITA_HARD, !chip->batt_hot); + } + if (chip->hot_hysteresis) { + smb1360_stay_awake(&chip->smb1360_ws, + WAKEUP_SRC_JEITA_HYSTERSIS); + schedule_work(&chip->jeita_hysteresis_work); + } + + return 0; +} + +static int cold_hard_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + pr_debug("rt_stat = 0x%02x\n", rt_stat); + chip->batt_cold = !!rt_stat; + + if (chip->parallel_charging) { + pr_debug("%s parallel-charging\n", chip->batt_cold ? + "Disable" : "Enable"); + smb1360_parallel_charger_enable(chip, + PARALLEL_JEITA_HARD, !chip->batt_cold); + } + if (chip->cold_hysteresis) { + smb1360_stay_awake(&chip->smb1360_ws, + WAKEUP_SRC_JEITA_HYSTERSIS); + schedule_work(&chip->jeita_hysteresis_work); + } + + return 0; +} + +static void smb1360_jeita_hysteresis_work(struct work_struct *work) +{ + int rc = 0; + int hard_hot, hard_cold; + struct smb1360_chip *chip = container_of(work, + struct smb1360_chip, jeita_hysteresis_work); + + /* disable hard JEITA IRQ first */ + rc = smb1360_masked_write(chip, IRQ_CFG_REG, + IRQ_BAT_HOT_COLD_HARD_BIT, 0); + if (rc) { + pr_err("disable hard JEITA IRQ failed, rc = %d\n", rc); + goto exit_worker; + } + hard_hot = chip->otp_hot_bat_decidegc; + hard_cold = chip->otp_cold_bat_decidegc; + if (chip->batt_hot) + hard_hot -= chip->hot_hysteresis; + else if (chip->batt_cold) + hard_cold += chip->cold_hysteresis; + + rc = smb1360_set_otp_hard_jeita_threshold(chip, hard_cold, hard_hot); + if (rc) { + pr_err("set hard JEITA threshold failed\n"); + goto exit_worker; + } + pr_debug("hard cold: %d, hard hot: %d reprogramed\n", + hard_cold, hard_hot); + /* enable hard JEITA IRQ at the end */ + rc = smb1360_masked_write(chip, IRQ_CFG_REG, + IRQ_BAT_HOT_COLD_HARD_BIT, IRQ_BAT_HOT_COLD_HARD_BIT); + if (rc) + pr_err("enable hard JEITA IRQ failed\n"); +exit_worker: + smb1360_relax(&chip->smb1360_ws, WAKEUP_SRC_JEITA_HYSTERSIS); +} + +/* + * This worker thread should only be called when WRKRND_HARD_JEITA + * is set. + * It is needed to re-program JEITA soft thresholds, compensate + * target voltage and charging current manually. + * The function is required as JEITA hard thresholds can't be programmed. + */ +static void smb1360_jeita_work_fn(struct work_struct *work) +{ + int temp; + int rc = 0; + struct smb1360_chip *chip = container_of(work, + struct smb1360_chip, jeita_work.work); + + temp = smb1360_get_prop_batt_temp(chip); + + if (temp > chip->hot_bat_decidegc) { + /* battery status is hot, only config thresholds */ + rc = smb1360_set_soft_jeita_threshold(chip, + chip->warm_bat_decidegc, chip->hot_bat_decidegc); + if (rc) { + dev_err(chip->dev, "Couldn't set jeita threshold\n"); + goto end; + } + } else if (temp > chip->warm_bat_decidegc || + (temp == chip->warm_bat_decidegc && !!chip->soft_hot_rt_stat)) { + /* battery status is warm, do compensation manually */ + chip->batt_warm = true; + chip->batt_cool = false; + rc = smb1360_float_voltage_set(chip, chip->warm_bat_mv); + if (rc) { + dev_err(chip->dev, "Couldn't set float voltage\n"); + goto end; + } + rc = smb1360_set_appropriate_usb_current(chip); + if (rc) + pr_err("Couldn't set USB current\n"); + rc = smb1360_set_soft_jeita_threshold(chip, + chip->warm_bat_decidegc, chip->hot_bat_decidegc); + if (rc) { + dev_err(chip->dev, "Couldn't set jeita threshold\n"); + goto end; + } + } else if (temp > chip->cool_bat_decidegc || + (temp == chip->cool_bat_decidegc && !chip->soft_cold_rt_stat)) { + /* battery status is good, do the normal charging */ + chip->batt_warm = false; + chip->batt_cool = false; + rc = smb1360_float_voltage_set(chip, chip->vfloat_mv); + if (rc) { + dev_err(chip->dev, "Couldn't set float voltage\n"); + goto end; + } + rc = smb1360_set_appropriate_usb_current(chip); + if (rc) + pr_err("Couldn't set USB current\n"); + rc = smb1360_set_soft_jeita_threshold(chip, + chip->cool_bat_decidegc, chip->warm_bat_decidegc); + if (rc) { + dev_err(chip->dev, "Couldn't set jeita threshold\n"); + goto end; + } + } else if (temp > chip->cold_bat_decidegc) { + /* battery status is cool, do compensation manually */ + chip->batt_cool = true; + chip->batt_warm = false; + rc = smb1360_float_voltage_set(chip, chip->cool_bat_mv); + if (rc) { + dev_err(chip->dev, "Couldn't set float voltage\n"); + goto end; + } + rc = smb1360_set_soft_jeita_threshold(chip, + chip->cold_bat_decidegc, chip->cool_bat_decidegc); + if (rc) { + dev_err(chip->dev, "Couldn't set jeita threshold\n"); + goto end; + } + } else { + /* battery status is cold, only config thresholds */ + rc = smb1360_set_soft_jeita_threshold(chip, + chip->cold_bat_decidegc, chip->cool_bat_decidegc); + if (rc) { + dev_err(chip->dev, "Couldn't set jeita threshold\n"); + goto end; + } + } + + pr_debug("warm %d, cool %d, soft_cold_rt_sts %d, soft_hot_rt_sts %d, jeita supported %d, threshold_now %d %d\n", + chip->batt_warm, chip->batt_cool, !!chip->soft_cold_rt_stat, + !!chip->soft_hot_rt_stat, chip->soft_jeita_supported, + chip->soft_cold_thresh, chip->soft_hot_thresh); +end: + smb1360_relax(&chip->smb1360_ws, WAKEUP_SRC_JEITA_SOFT); +} + +static int hot_soft_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + chip->soft_hot_rt_stat = rt_stat; + pr_debug("rt_stat = 0x%02x\n", rt_stat); + if (!chip->config_hard_thresholds) + chip->batt_warm = !!rt_stat; + + if (chip->workaround_flags & WRKRND_HARD_JEITA) { + cancel_delayed_work_sync(&chip->jeita_work); + schedule_delayed_work(&chip->jeita_work, + msecs_to_jiffies(JEITA_WORK_MS)); + smb1360_stay_awake(&chip->smb1360_ws, + WAKEUP_SRC_JEITA_SOFT); + } + + if (chip->parallel_charging) { + pr_debug("%s parallel-charging\n", chip->batt_warm ? + "Disable" : "Enable"); + smb1360_parallel_charger_enable(chip, + PARALLEL_JEITA_SOFT, !chip->batt_warm); + } + return 0; +} + +static int cold_soft_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + chip->soft_cold_rt_stat = rt_stat; + pr_debug("rt_stat = 0x%02x\n", rt_stat); + if (!chip->config_hard_thresholds) + chip->batt_cool = !!rt_stat; + + if (chip->workaround_flags & WRKRND_HARD_JEITA) { + cancel_delayed_work_sync(&chip->jeita_work); + schedule_delayed_work(&chip->jeita_work, + msecs_to_jiffies(JEITA_WORK_MS)); + smb1360_stay_awake(&chip->smb1360_ws, + WAKEUP_SRC_JEITA_SOFT); + } + + if (chip->parallel_charging) { + pr_debug("%s parallel-charging\n", chip->batt_cool ? + "Disable" : "Enable"); + smb1360_parallel_charger_enable(chip, + PARALLEL_JEITA_SOFT, !chip->batt_cool); + } + + return 0; +} + +static int battery_missing_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + pr_debug("rt_stat = 0x%02x\n", rt_stat); + chip->batt_present = !rt_stat; + return 0; +} + +static int vbat_low_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + pr_debug("vbat low\n"); + + return 0; +} + +static int chg_hot_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + pr_warn_ratelimited("chg hot\n"); + return 0; +} + +static int chg_term_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + pr_debug("rt_stat = 0x%02x\n", rt_stat); + chip->batt_full = !!rt_stat; + + if (chip->parallel_charging) { + pr_debug("%s parallel-charging\n", chip->batt_full ? + "Disable" : "Enable"); + smb1360_parallel_charger_enable(chip, + PARALLEL_EOC, !chip->batt_full); + } + + return 0; +} + +static int chg_fastchg_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + pr_debug("rt_stat = 0x%02x\n", rt_stat); + + return 0; +} + +static int usbin_uv_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + bool usb_present = !rt_stat; + + pr_debug("chip->usb_present = %d usb_present = %d\n", + chip->usb_present, usb_present); + if (chip->usb_present && !usb_present) { + /* USB removed */ + chip->usb_present = usb_present; + extcon_set_state_sync(chip->extcon, EXTCON_USB, false); + chip->usb_supply_type = POWER_SUPPLY_TYPE_UNKNOWN; + } + + if (!chip->usb_present && usb_present) { + /* USB inserted */ + chip->usb_present = usb_present; + extcon_set_state_sync(chip->extcon, EXTCON_USB, true); + } + power_supply_changed(chip->usb_psy); + + return 0; +} + +static int aicl_done_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + bool aicl_done = !!rt_stat; + + pr_debug("AICL done=%d\n", aicl_done); + + if (chip->parallel_charging && aicl_done) { + cancel_work_sync(&chip->parallel_work); + smb1360_stay_awake(&chip->smb1360_ws, WAKEUP_SRC_PARALLEL); + schedule_work(&chip->parallel_work); + } + + return 0; +} + +static int chg_inhibit_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + /* + * charger is inserted when the battery voltage is high + * so h/w won't start charging just yet. Treat this as + * battery full + */ + pr_debug("rt_stat = 0x%02x\n", rt_stat); + chip->batt_full = !!rt_stat; + return 0; +} + +static int delta_soc_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + pr_debug("SOC changed! - rt_stat = 0x%02x\n", rt_stat); + + return 0; +} + +static int min_soc_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + pr_debug("SOC dropped below min SOC, rt_stat = 0x%02x\n", rt_stat); + + if (chip->awake_min_soc) + rt_stat ? smb1360_stay_awake(&chip->smb1360_ws, + WAKEUP_SRC_MIN_SOC) : + smb1360_relax(&chip->smb1360_ws, + WAKEUP_SRC_MIN_SOC); + + return 0; +} + +static int empty_soc_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + pr_debug("SOC empty! rt_stat = 0x%02x\n", rt_stat); + + if (!chip->empty_soc_disabled) { + if (rt_stat) { + chip->empty_soc = true; + smb1360_stay_awake(&chip->smb1360_ws, + WAKEUP_SRC_EMPTY_SOC); + pr_warn_ratelimited("SOC is 0\n"); + } else { + chip->empty_soc = false; + smb1360_relax(&chip->smb1360_ws, + WAKEUP_SRC_EMPTY_SOC); + } + } + + return 0; +} + +static int full_soc_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + if (rt_stat) + pr_debug("SOC is 100\n"); + + return 0; +} + +static int fg_access_allowed_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + pr_debug("stat=%d\n", !!rt_stat); + + if (rt_stat & FG_ACCESS_ALLOWED_BIT) { + pr_debug("FG access granted\n"); + complete_all(&chip->fg_mem_access_granted); + } + + return 0; +} + +static int batt_id_complete_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + pr_debug("batt_id = %x\n", (rt_stat & BATT_ID_RESULT_BIT) + >> BATT_ID_SHIFT); + + return 0; +} + +static int smb1360_adjust_current_gain(struct smb1360_chip *chip, + int gain_factor) +{ + int i, rc; + int64_t current_gain, new_current_gain; + u16 reg_value1 = 0, reg_value2 = 0; + u8 reg[4] = {0x1D, 0x00, 0x1E, 0x00}; + int otp_reg = 0; + + if (!chip->current_gain_otp_reg) { + otp_reg = smb1360_alloc_otp_backup_register(chip, + ARRAY_SIZE(reg), OTP_BACKUP_FG_USE); + if (otp_reg <= 0) { + pr_err("OTP reg allocation fail for adjusting current gain\n"); + return otp_reg; + } + chip->current_gain_otp_reg = otp_reg; + } else { + otp_reg = chip->current_gain_otp_reg; + } + pr_debug("current_gain_otp_reg = 0x%x\n", chip->current_gain_otp_reg); + + if (gain_factor) { + rc = smb1360_fg_read(chip, CURRENT_GAIN_LSB_REG, ®[1]); + if (rc) { + pr_err("Unable to set FG access I2C address rc=%d\n", + rc); + return rc; + } + + rc = smb1360_fg_read(chip, CURRENT_GAIN_MSB_REG, ®[3]); + if (rc) { + pr_err("Unable to set FG access I2C address rc=%d\n", + rc); + return rc; + } + + reg_value1 = (reg[3] << 8) | reg[1]; + current_gain = float_decode(reg_value1); + new_current_gain = MICRO_UNIT + (gain_factor * current_gain); + reg_value2 = float_encode(new_current_gain); + reg[1] = reg_value2 & 0xFF; + reg[3] = (reg_value2 & 0xFF00) >> 8; + pr_debug("current_gain_reg=0x%x current_gain_decoded=%lld new_current_gain_decoded=%lld new_current_gain_reg=0x%x\n", + reg_value1, current_gain, new_current_gain, reg_value2); + + for (i = 0; i < ARRAY_SIZE(reg); i++) { + pr_debug("Writing reg_add=%x value=%x\n", + otp_reg + i, reg[i]); + + rc = smb1360_fg_write(chip, (otp_reg + i), reg[i]); + if (rc) { + pr_err("Write FG address 0x%x failed, rc = %d\n", + otp_reg + i, rc); + return rc; + } + } + rc = smb1360_otp_backup_alg_update(chip); + if (rc) { + pr_err("Update OTP backup algorithm failed\n"); + return rc; + } + } else { + pr_debug("Disabling gain correction\n"); + rc = smb1360_fg_write(chip, 0xF0, 0x00); + if (rc) { + pr_err("Write fg address 0x%x failed, rc = %d\n", + 0xF0, rc); + return rc; + } + } + + return 0; +} + +static int smb1360_otp_gain_config(struct smb1360_chip *chip, int gain_factor) +{ + int rc = 0; + + rc = smb1360_enable_fg_access(chip); + if (rc) { + pr_err("Couldn't request FG access rc = %d\n", rc); + return rc; + } + chip->fg_access_type = FG_ACCESS_CFG; + + rc = smb1360_select_fg_i2c_address(chip); + if (rc) { + pr_err("Unable to set FG access I2C address\n"); + goto restore_fg; + } + + rc = smb1360_adjust_current_gain(chip, gain_factor); + if (rc) { + pr_err("Unable to modify current gain rc=%d\n", rc); + goto restore_fg; + } + + rc = smb1360_masked_write(chip, CFG_FG_BATT_CTRL_REG, + CFG_FG_OTP_BACK_UP_ENABLE, CFG_FG_OTP_BACK_UP_ENABLE); + if (rc) { + pr_err("Write reg 0x0E failed, rc = %d\n", rc); + goto restore_fg; + } + +restore_fg: + rc = smb1360_disable_fg_access(chip); + if (rc) { + pr_err("Couldn't disable FG access rc = %d\n", rc); + return rc; + } + + return rc; +} + +static int smb1360_otg_disable(struct smb1360_chip *chip) +{ + int rc; + + rc = smb1360_masked_write(chip, CMD_CHG_REG, CMD_OTG_EN_BIT, 0); + if (rc) { + pr_err("Couldn't disable OTG mode rc=%d\n", rc); + return rc; + } + + mutex_lock(&chip->otp_gain_lock); + /* Disable current gain configuration */ + if (chip->otg_fet_present && chip->fet_gain_enabled) { + /* Disable FET */ + gpio_set_value(chip->otg_fet_enable_gpio, 1); + rc = smb1360_otp_gain_config(chip, 0); + if (rc < 0) + pr_err("Couldn't config OTP gain config rc=%d\n", rc); + else + chip->fet_gain_enabled = false; + } + mutex_unlock(&chip->otp_gain_lock); + + return rc; +} + +static int otg_fail_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + int rc; + + pr_debug("OTG Failed stat=%d\n", rt_stat); + rc = smb1360_otg_disable(chip); + if (rc) + pr_err("Couldn't disable OTG mode rc=%d\n", rc); + + return 0; +} + +static int otg_oc_handler(struct smb1360_chip *chip, u8 rt_stat) +{ + int rc; + + pr_debug("OTG over-current stat=%d\n", rt_stat); + rc = smb1360_otg_disable(chip); + if (rc) + pr_err("Couldn't disable OTG mode rc=%d\n", rc); + + return 0; +} + +struct smb_irq_info { + const char *name; + int (*smb_irq)(struct smb1360_chip *chip, + u8 rt_stat); + int high; + int low; +}; + +struct irq_handler_info { + u8 stat_reg; + u8 val; + u8 prev_val; + struct smb_irq_info irq_info[4]; +}; + +static struct irq_handler_info handlers[] = { + {IRQ_A_REG, 0, 0, + { + { + .name = "cold_soft", + .smb_irq = cold_soft_handler, + }, + { + .name = "hot_soft", + .smb_irq = hot_soft_handler, + }, + { + .name = "cold_hard", + .smb_irq = cold_hard_handler, + }, + { + .name = "hot_hard", + .smb_irq = hot_hard_handler, + }, + }, + }, + {IRQ_B_REG, 0, 0, + { + { + .name = "chg_hot", + .smb_irq = chg_hot_handler, + }, + { + .name = "vbat_low", + .smb_irq = vbat_low_handler, + }, + { + .name = "battery_missing", + .smb_irq = battery_missing_handler, + }, + { + .name = "battery_missing", + .smb_irq = battery_missing_handler, + }, + }, + }, + {IRQ_C_REG, 0, 0, + { + { + .name = "chg_term", + .smb_irq = chg_term_handler, + }, + { + .name = "taper", + }, + { + .name = "recharge", + }, + { + .name = "fast_chg", + .smb_irq = chg_fastchg_handler, + }, + }, + }, + {IRQ_D_REG, 0, 0, + { + { + .name = "prechg_timeout", + }, + { + .name = "safety_timeout", + }, + { + .name = "aicl_done", + .smb_irq = aicl_done_handler, + }, + { + .name = "battery_ov", + }, + }, + }, + {IRQ_E_REG, 0, 0, + { + { + .name = "usbin_uv", + .smb_irq = usbin_uv_handler, + }, + { + .name = "usbin_ov", + }, + { + .name = "unused", + }, + { + .name = "chg_inhibit", + .smb_irq = chg_inhibit_handler, + }, + }, + }, + {IRQ_F_REG, 0, 0, + { + { + .name = "power_ok", + }, + { + .name = "unused", + }, + { + .name = "otg_fail", + .smb_irq = otg_fail_handler, + }, + { + .name = "otg_oc", + .smb_irq = otg_oc_handler, + }, + }, + }, + {IRQ_G_REG, 0, 0, + { + { + .name = "delta_soc", + .smb_irq = delta_soc_handler, + }, + { + .name = "chg_error", + }, + { + .name = "wd_timeout", + }, + { + .name = "unused", + }, + }, + }, + {IRQ_H_REG, 0, 0, + { + { + .name = "min_soc", + .smb_irq = min_soc_handler, + }, + { + .name = "max_soc", + }, + { + .name = "empty_soc", + .smb_irq = empty_soc_handler, + }, + { + .name = "full_soc", + .smb_irq = full_soc_handler, + }, + }, + }, + {IRQ_I_REG, 0, 0, + { + { + .name = "fg_access_allowed", + .smb_irq = fg_access_allowed_handler, + }, + { + .name = "fg_data_recovery", + }, + { + .name = "batt_id_complete", + .smb_irq = batt_id_complete_handler, + }, + }, + }, +}; + +#define IRQ_LATCHED_MASK 0x02 +#define IRQ_STATUS_MASK 0x01 +#define BATT_ID_LATCHED_MASK 0x08 +#define BATT_ID_STATUS_MASK 0x07 +#define BITS_PER_IRQ 2 +static irqreturn_t smb1360_stat_handler(int irq, void *dev_id) +{ + struct smb1360_chip *chip = dev_id; + int i, j; + u8 triggered; + u8 changed; + u8 rt_stat, prev_rt_stat, irq_latched_mask, irq_status_mask; + int rc; + int handler_count = 0; + + mutex_lock(&chip->irq_complete); + chip->irq_waiting = true; + if (!chip->resume_completed) { + dev_dbg(chip->dev, "IRQ triggered before device-resume\n"); + if (!chip->irq_disabled) { + disable_irq_nosync(irq); + chip->irq_disabled = true; + } + mutex_unlock(&chip->irq_complete); + return IRQ_HANDLED; + } + chip->irq_waiting = false; + + for (i = 0; i < ARRAY_SIZE(handlers); i++) { + rc = smb1360_read(chip, handlers[i].stat_reg, + &handlers[i].val); + if (rc < 0) { + dev_err(chip->dev, "Couldn't read %d rc = %d\n", + handlers[i].stat_reg, rc); + continue; + } + + for (j = 0; j < ARRAY_SIZE(handlers[i].irq_info); j++) { + if (handlers[i].stat_reg == IRQ_I_REG && j == 2) { + irq_latched_mask = BATT_ID_LATCHED_MASK; + irq_status_mask = BATT_ID_STATUS_MASK; + } else { + irq_latched_mask = IRQ_LATCHED_MASK; + irq_status_mask = IRQ_STATUS_MASK; + } + triggered = handlers[i].val + & (irq_latched_mask << (j * BITS_PER_IRQ)); + rt_stat = handlers[i].val + & (irq_status_mask << (j * BITS_PER_IRQ)); + prev_rt_stat = handlers[i].prev_val + & (irq_status_mask << (j * BITS_PER_IRQ)); + changed = prev_rt_stat ^ rt_stat; + + if (triggered || changed) + rt_stat ? handlers[i].irq_info[j].high++ : + handlers[i].irq_info[j].low++; + + if ((triggered || changed) + && handlers[i].irq_info[j].smb_irq != NULL) { + handler_count++; + rc = handlers[i].irq_info[j].smb_irq(chip, + rt_stat); + if (rc < 0) + dev_err(chip->dev, + "Couldn't handle %d irq for reg 0x%02x rc = %d\n", + j, handlers[i].stat_reg, rc); + } + } + handlers[i].prev_val = handlers[i].val; + } + + pr_debug("handler count = %d\n", handler_count); + if (handler_count) + power_supply_changed(chip->batt_psy); + + mutex_unlock(&chip->irq_complete); + + return IRQ_HANDLED; +} + +static irqreturn_t smb1360_usb_id_irq_handler(int irq, void *dev_id) +{ + struct smb1360_chip *chip = dev_id; + int rc = 0; + bool id_state; + + id_state = gpio_get_value(chip->usb_id_gpio); + + rc = smb1360_masked_write(chip, CMD_CHG_REG, CMD_OTG_EN_BIT, + !id_state ? CMD_OTG_EN_BIT : 0); + if (rc) { + pr_err("Couldn't enable OTG mode rc=%d\n", rc); + return IRQ_HANDLED; + } + extcon_set_state_sync(chip->extcon, EXTCON_USB_HOST, + !id_state ? true : false); + + pr_debug("usb_id_irq triggered, id_state = %d\n", id_state); + + return IRQ_HANDLED; +} + +static int show_irq_count(struct seq_file *m, void *data) +{ + int i, j, total = 0; + + for (i = 0; i < ARRAY_SIZE(handlers); i++) + for (j = 0; j < 4; j++) { + if (!handlers[i].irq_info[j].name) + continue; + seq_printf(m, "%s=%d\t(high=%d low=%d)\n", + handlers[i].irq_info[j].name, + handlers[i].irq_info[j].high + + handlers[i].irq_info[j].low, + handlers[i].irq_info[j].high, + handlers[i].irq_info[j].low); + total += (handlers[i].irq_info[j].high + + handlers[i].irq_info[j].low); + } + + seq_printf(m, "\n\tTotal = %d\n", total); + + return 0; +} + +static int irq_count_debugfs_open(struct inode *inode, struct file *file) +{ + struct smb1360_chip *chip = inode->i_private; + + return single_open(file, show_irq_count, chip); +} + +static const struct file_operations irq_count_debugfs_ops = { + .owner = THIS_MODULE, + .open = irq_count_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int get_reg(void *data, u64 *val) +{ + struct smb1360_chip *chip = data; + int rc; + u8 temp; + + rc = smb1360_read(chip, chip->peek_poke_address, &temp); + if (rc < 0) { + dev_err(chip->dev, + "Couldn't read reg %x rc = %d\n", + chip->peek_poke_address, rc); + return -EAGAIN; + } + *val = temp; + return 0; +} + +static int set_reg(void *data, u64 val) +{ + struct smb1360_chip *chip = data; + int rc; + u8 temp; + + temp = (u8) val; + rc = smb1360_write(chip, chip->peek_poke_address, temp); + if (rc < 0) { + dev_err(chip->dev, + "Couldn't write 0x%02x to 0x%02x rc= %d\n", + chip->peek_poke_address, temp, rc); + return -EAGAIN; + } + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(poke_poke_debug_ops, get_reg, set_reg, "0x%02llx\n"); + +static int fg_get_reg(void *data, u64 *val) +{ + struct smb1360_chip *chip = data; + int rc; + u8 temp; + + rc = smb1360_select_fg_i2c_address(chip); + if (rc) { + pr_err("Unable to set FG access I2C address\n"); + return -EINVAL; + } + + rc = smb1360_fg_read(chip, chip->fg_peek_poke_address, &temp); + if (rc < 0) { + dev_err(chip->dev, + "Couldn't read reg %x rc = %d\n", + chip->fg_peek_poke_address, rc); + return -EAGAIN; + } + *val = temp; + return 0; +} + +static int fg_set_reg(void *data, u64 val) +{ + struct smb1360_chip *chip = data; + int rc; + u8 temp; + + rc = smb1360_select_fg_i2c_address(chip); + if (rc) { + pr_err("Unable to set FG access I2C address\n"); + return -EINVAL; + } + + temp = (u8) val; + rc = smb1360_fg_write(chip, chip->fg_peek_poke_address, temp); + if (rc < 0) { + dev_err(chip->dev, + "Couldn't write 0x%02x to 0x%02x rc= %d\n", + chip->fg_peek_poke_address, temp, rc); + return -EAGAIN; + } + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(fg_poke_poke_debug_ops, fg_get_reg, + fg_set_reg, "0x%02llx\n"); + +#define LAST_CNFG_REG 0x17 +static int show_cnfg_regs(struct seq_file *m, void *data) +{ + struct smb1360_chip *chip = m->private; + int rc; + u8 reg; + u8 addr; + + for (addr = 0; addr <= LAST_CNFG_REG; addr++) { + rc = smb1360_read(chip, addr, ®); + if (!rc) + seq_printf(m, "0x%02x = 0x%02x\n", addr, reg); + } + + return 0; +} + +static int cnfg_debugfs_open(struct inode *inode, struct file *file) +{ + struct smb1360_chip *chip = inode->i_private; + + return single_open(file, show_cnfg_regs, chip); +} + +static const struct file_operations cnfg_debugfs_ops = { + .owner = THIS_MODULE, + .open = cnfg_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#define FIRST_CMD_REG 0x40 +#define LAST_CMD_REG 0x42 +static int show_cmd_regs(struct seq_file *m, void *data) +{ + struct smb1360_chip *chip = m->private; + int rc; + u8 reg; + u8 addr; + + for (addr = FIRST_CMD_REG; addr <= LAST_CMD_REG; addr++) { + rc = smb1360_read(chip, addr, ®); + if (!rc) + seq_printf(m, "0x%02x = 0x%02x\n", addr, reg); + } + + return 0; +} + +static int cmd_debugfs_open(struct inode *inode, struct file *file) +{ + struct smb1360_chip *chip = inode->i_private; + + return single_open(file, show_cmd_regs, chip); +} + +static const struct file_operations cmd_debugfs_ops = { + .owner = THIS_MODULE, + .open = cmd_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#define FIRST_STATUS_REG 0x48 +#define LAST_STATUS_REG 0x4B +static int show_status_regs(struct seq_file *m, void *data) +{ + struct smb1360_chip *chip = m->private; + int rc; + u8 reg; + u8 addr; + + for (addr = FIRST_STATUS_REG; addr <= LAST_STATUS_REG; addr++) { + rc = smb1360_read(chip, addr, ®); + if (!rc) + seq_printf(m, "0x%02x = 0x%02x\n", addr, reg); + } + + return 0; +} + +static int status_debugfs_open(struct inode *inode, struct file *file) +{ + struct smb1360_chip *chip = inode->i_private; + + return single_open(file, show_status_regs, chip); +} + +static const struct file_operations status_debugfs_ops = { + .owner = THIS_MODULE, + .open = status_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#define FIRST_IRQ_REG 0x50 +#define LAST_IRQ_REG 0x58 +static int show_irq_stat_regs(struct seq_file *m, void *data) +{ + struct smb1360_chip *chip = m->private; + int rc; + u8 reg; + u8 addr; + + for (addr = FIRST_IRQ_REG; addr <= LAST_IRQ_REG; addr++) { + rc = smb1360_read(chip, addr, ®); + if (!rc) + seq_printf(m, "0x%02x = 0x%02x\n", addr, reg); + } + + return 0; +} + +static int irq_stat_debugfs_open(struct inode *inode, struct file *file) +{ + struct smb1360_chip *chip = inode->i_private; + + return single_open(file, show_irq_stat_regs, chip); +} + +static const struct file_operations irq_stat_debugfs_ops = { + .owner = THIS_MODULE, + .open = irq_stat_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int data_8(u8 *reg) +{ + return reg[0]; +} +static int data_16(u8 *reg) +{ + return (reg[1] << 8) | reg[0]; +} +static int data_24(u8 *reg) +{ + return (reg[2] << 16) | (reg[1] << 8) | reg[0]; +} +static int data_28(u8 *reg) +{ + return ((reg[3] & 0xF) << 24) | (reg[2] << 16) | + (reg[1] << 8) | reg[0]; +} +static int data_32(u8 *reg) +{ + return (reg[3] << 24) | (reg[2] << 16) | + (reg[1] << 8) | reg[0]; +} + +struct fg_regs { + int index; + int length; + char *param_name; + int (*calc_func)(u8 *index); +}; + +static struct fg_regs fg_scratch_pad[] = { + {0, 2, "v_current_predicted", data_16}, + {2, 2, "v_cutoff_predicted", data_16}, + {4, 2, "v_full_predicted", data_16}, + {6, 2, "ocv_estimate", data_16}, + {8, 2, "rslow_drop", data_16}, + {10, 2, "voltage_old", data_16}, + {12, 2, "current_old", data_16}, + {14, 4, "current_average_full", data_32}, + {18, 2, "temperature", data_16}, + {20, 2, "temp_last_track", data_16}, + {22, 2, "ESR_nominal", data_16}, + {26, 2, "Rslow", data_16}, + {28, 2, "counter_imptr", data_16}, + {30, 2, "counter_pulse", data_16}, + {32, 1, "IRQ_delta_prev", data_8}, + {33, 1, "cap_learning_counter", data_8}, + {34, 4, "Vact_int_error", data_32}, + {38, 3, "SOC_cutoff", data_24}, + {41, 3, "SOC_full", data_24}, + {44, 3, "SOC_auto_rechrge_temp", data_24}, + {47, 3, "Battery_SOC", data_24}, + {50, 4, "CC_SOC", data_28}, + {54, 2, "SOC_filtered", data_16}, + {56, 2, "SOC_Monotonic", data_16}, + {58, 2, "CC_SOC_coeff", data_16}, + {60, 2, "nominal_capacity", data_16}, + {62, 2, "actual_capacity", data_16}, + {68, 1, "temperature_counter", data_8}, + {69, 3, "Vbatt_filtered", data_24}, + {72, 3, "Ibatt_filtered", data_24}, + {75, 2, "Current_CC_shadow", data_16}, + {79, 2, "Ibatt_standby", data_16}, + {82, 1, "Auto_recharge_SOC_threshold", data_8}, + {83, 2, "System_cutoff_voltage", data_16}, + {85, 2, "System_CC_to_CV_voltage", data_16}, + {87, 2, "System_term_current", data_16}, + {89, 2, "System_fake_term_current", data_16}, + {91, 2, "thermistor_c1_coeff", data_16}, +}; + +static struct fg_regs fg_cfg[] = { + {0, 2, "ESR_actual", data_16}, + {4, 1, "IRQ_SOC_max", data_8}, + {5, 1, "IRQ_SOC_min", data_8}, + {6, 1, "IRQ_volt_empty", data_8}, + {7, 1, "Temp_external", data_8}, + {8, 1, "IRQ_delta_threshold", data_8}, + {9, 1, "JIETA_soft_cold", data_8}, + {10, 1, "JIETA_soft_hot", data_8}, + {11, 1, "IRQ_volt_min", data_8}, + {14, 2, "ESR_sys_replace", data_16}, +}; + +static struct fg_regs fg_shdw[] = { + {0, 1, "Latest_battery_info", data_8}, + {1, 1, "Latest_Msys_SOC", data_8}, + {2, 2, "Battery_capacity", data_16}, + {4, 2, "Rslow_drop", data_16}, + {6, 1, "Latest_SOC", data_8}, + {7, 1, "Latest_Cutoff_SOC", data_8}, + {8, 1, "Latest_full_SOC", data_8}, + {9, 2, "Voltage_shadow", data_16}, + {11, 2, "Current_shadow", data_16}, + {13, 2, "Latest_temperature", data_16}, + {15, 1, "Latest_system_sbits", data_8}, +}; + +#define FIRST_FG_CFG_REG 0x20 +#define LAST_FG_CFG_REG 0x2F +#define FIRST_FG_SHDW_REG 0x60 +#define LAST_FG_SHDW_REG 0x6F +#define FG_SCRATCH_PAD_MAX 93 +#define FG_SCRATCH_PAD_BASE_REG 0x80 +#define SMB1360_I2C_READ_LENGTH 32 + +static int smb1360_check_cycle_stretch(struct smb1360_chip *chip) +{ + int rc = 0; + u8 reg; + + rc = smb1360_read(chip, STATUS_4_REG, ®); + if (rc) { + pr_err("Unable to read status regiseter\n"); + } else if (reg & CYCLE_STRETCH_ACTIVE_BIT) { + /* clear cycle stretch */ + rc = smb1360_masked_write(chip, CMD_I2C_REG, + CYCLE_STRETCH_CLEAR_BIT, CYCLE_STRETCH_CLEAR_BIT); + if (rc) + pr_err("Unable to clear cycle stretch\n"); + } + + return rc; +} + +static int show_fg_regs(struct seq_file *m, void *data) +{ + struct smb1360_chip *chip = m->private; + int rc, i, j, rem_length; + u8 reg[FG_SCRATCH_PAD_MAX]; + + rc = smb1360_check_cycle_stretch(chip); + if (rc) + pr_err("Unable to check cycle-stretch\n"); + + rc = smb1360_enable_fg_access(chip); + if (rc) { + pr_err("Couldn't request FG access rc=%d\n", rc); + return rc; + } + + for (i = 0; i < (FG_SCRATCH_PAD_MAX / SMB1360_I2C_READ_LENGTH); i++) { + j = i * SMB1360_I2C_READ_LENGTH; + rc = smb1360_read_bytes(chip, FG_SCRATCH_PAD_BASE_REG + j, + ®[j], SMB1360_I2C_READ_LENGTH); + if (rc) { + pr_err("Couldn't read scratch registers rc=%d\n", rc); + break; + } + } + + j = i * SMB1360_I2C_READ_LENGTH; + rem_length = (FG_SCRATCH_PAD_MAX % SMB1360_I2C_READ_LENGTH); + if (rem_length) { + rc = smb1360_read_bytes(chip, FG_SCRATCH_PAD_BASE_REG + j, + ®[j], rem_length); + if (rc) + pr_err("Couldn't read scratch registers rc=%d\n", rc); + } + + rc = smb1360_disable_fg_access(chip); + if (rc) { + pr_err("Couldn't disable FG access rc=%d\n", rc); + return rc; + } + + rc = smb1360_check_cycle_stretch(chip); + if (rc) + pr_err("Unable to check cycle-stretch\n"); + + + seq_puts(m, "FG scratch-pad registers\n"); + for (i = 0; i < ARRAY_SIZE(fg_scratch_pad); i++) + seq_printf(m, "\t%s = %x\n", fg_scratch_pad[i].param_name, + fg_scratch_pad[i].calc_func(®[fg_scratch_pad[i].index])); + + rem_length = LAST_FG_CFG_REG - FIRST_FG_CFG_REG + 1; + rc = smb1360_read_bytes(chip, FIRST_FG_CFG_REG, + ®[0], rem_length); + if (rc) + pr_err("Couldn't read config registers rc=%d\n", rc); + + seq_puts(m, "FG config registers\n"); + for (i = 0; i < ARRAY_SIZE(fg_cfg); i++) + seq_printf(m, "\t%s = %x\n", fg_cfg[i].param_name, + fg_cfg[i].calc_func(®[fg_cfg[i].index])); + + rem_length = LAST_FG_SHDW_REG - FIRST_FG_SHDW_REG + 1; + rc = smb1360_read_bytes(chip, FIRST_FG_SHDW_REG, + ®[0], rem_length); + if (rc) + pr_err("Couldn't read shadow registers rc=%d\n", rc); + + seq_puts(m, "FG shadow registers\n"); + for (i = 0; i < ARRAY_SIZE(fg_shdw); i++) + seq_printf(m, "\t%s = %x\n", fg_shdw[i].param_name, + fg_shdw[i].calc_func(®[fg_shdw[i].index])); + + return rc; +} + +static int fg_regs_open(struct inode *inode, struct file *file) +{ + struct smb1360_chip *chip = inode->i_private; + + return single_open(file, show_fg_regs, chip); +} + +static const struct file_operations fg_regs_debugfs_ops = { + .owner = THIS_MODULE, + .open = fg_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int smb1360_otg_regulator_enable(struct regulator_dev *rdev) +{ + int rc = 0; + struct smb1360_chip *chip = rdev_get_drvdata(rdev); + + rc = smb1360_masked_write(chip, CMD_CHG_REG, CMD_OTG_EN_BIT, + CMD_OTG_EN_BIT); + if (rc) { + pr_err("Couldn't enable OTG mode rc=%d\n", rc); + return rc; + } + + pr_debug("OTG mode enabled\n"); + /* Enable current gain configuration */ + mutex_lock(&chip->otp_gain_lock); + if (chip->otg_fet_present) { + /* Enable FET */ + gpio_set_value(chip->otg_fet_enable_gpio, 0); + rc = smb1360_otp_gain_config(chip, 3); + if (rc < 0) + pr_err("Couldn't config OTP gain config rc=%d\n", rc); + else + chip->fet_gain_enabled = true; + } + mutex_unlock(&chip->otp_gain_lock); + + return rc; +} + +static int smb1360_otg_regulator_disable(struct regulator_dev *rdev) +{ + int rc = 0; + struct smb1360_chip *chip = rdev_get_drvdata(rdev); + + rc = smb1360_otg_disable(chip); + if (rc) + pr_err("Couldn't disable OTG regulator rc=%d\n", rc); + + pr_debug("OTG mode disabled\n"); + return rc; +} + +static int smb1360_otg_regulator_is_enable(struct regulator_dev *rdev) +{ + u8 reg = 0; + int rc = 0; + struct smb1360_chip *chip = rdev_get_drvdata(rdev); + + rc = smb1360_read(chip, CMD_CHG_REG, ®); + if (rc) { + pr_err("Couldn't read OTG enable bit rc=%d\n", rc); + return rc; + } + + return (reg & CMD_OTG_EN_BIT) ? 1 : 0; +} + +static struct regulator_ops smb1360_otg_reg_ops = { + .enable = smb1360_otg_regulator_enable, + .disable = smb1360_otg_regulator_disable, + .is_enabled = smb1360_otg_regulator_is_enable, +}; + +static int smb1360_regulator_init(struct smb1360_chip *chip) +{ + int rc = 0; + struct regulator_config cfg = {}; + + /* OTG is enabled by SMB1360 if usb-id config is defined */ + if (chip->usb_id_gpio > 0 && chip->usb_id_irq > 0) + return 0; + + chip->otg_vreg.rdesc.owner = THIS_MODULE; + chip->otg_vreg.rdesc.type = REGULATOR_VOLTAGE; + chip->otg_vreg.rdesc.ops = &smb1360_otg_reg_ops; + chip->otg_vreg.rdesc.of_match = "qcom,smb1360-vbus"; + chip->otg_vreg.rdesc.name = "qcom,smb1360-vbus"; + + cfg.dev = chip->dev; + cfg.driver_data = chip; + cfg.of_node = chip->dev->of_node; + + chip->otg_vreg.rdev = regulator_register( + &chip->otg_vreg.rdesc, &cfg); + if (IS_ERR(chip->otg_vreg.rdev)) { + rc = PTR_ERR(chip->otg_vreg.rdev); + chip->otg_vreg.rdev = NULL; + if (rc != -EPROBE_DEFER) + dev_err(chip->dev, + "OTG reg failed, rc=%d\n", rc); + } + + return rc; +} + +static int smb1360_check_batt_profile(struct smb1360_chip *chip) +{ + int rc, i, timeout = 50; + u8 reg = 0, loaded_profile, new_profile = 0, bid_mask; + + if (!chip->connected_rid) { + pr_debug("Skip batt-profile loading connected_rid=%d\n", + chip->connected_rid); + return 0; + } + + rc = smb1360_read(chip, SHDW_FG_BATT_STATUS, ®); + if (rc) { + pr_err("Couldn't read FG_BATT_STATUS rc=%d\n", rc); + return rc; + } + + loaded_profile = !!(reg & BATTERY_PROFILE_BIT) ? + BATTERY_PROFILE_B : BATTERY_PROFILE_A; + + pr_debug("fg_batt_status=%x loaded_profile=%d\n", reg, loaded_profile); + + for (i = 0; i < BATTERY_PROFILE_MAX; i++) { + pr_debug("profile=%d profile_rid=%d connected_rid=%d\n", i, + chip->profile_rid[i], + chip->connected_rid); + if (abs(chip->profile_rid[i] - chip->connected_rid) < + (div_u64(chip->connected_rid, 10))) + break; + } + + if (i == BATTERY_PROFILE_MAX) { + pr_err("None of the battery-profiles match the connected-RID\n"); + return 0; + } + + if (i == loaded_profile) { + pr_debug("Loaded Profile-RID == connected-RID\n"); + return 0; + } + + new_profile = (loaded_profile == BATTERY_PROFILE_A) ? + BATTERY_PROFILE_B : BATTERY_PROFILE_A; + bid_mask = (new_profile == BATTERY_PROFILE_A) ? + BATT_PROFILEA_MASK : BATT_PROFILEB_MASK; + pr_info("Loaded Profile-RID != connected-RID, switch-profile old_profile=%d new_profile=%d\n", + loaded_profile, new_profile); + + /* set the BID mask */ + rc = smb1360_masked_write(chip, CFG_FG_BATT_CTRL_REG, + BATT_PROFILE_SELECT_MASK, bid_mask); + if (rc) { + pr_err("Couldn't reset battery-profile rc=%d\n", rc); + return rc; + } + + rc = smb1360_enable_fg_access(chip); + if (rc) { + pr_err("FG access timed-out, rc = %d\n", rc); + return rc; + } + /* delay after handshaking for profile-switch to continue */ + msleep(1500); + + rc = smb1360_force_fg_reset(chip); + if (rc) { + pr_err("Couldn't reset FG rc=%d\n", rc); + goto restore_fg; + } + + rc = smb1360_disable_fg_access(chip); + if (rc) { + pr_err("disable FG access failed, rc = %d\n", rc); + return rc; + } + + timeout = 10; + while (timeout) { + /* delay for profile to change */ + msleep(500); + rc = smb1360_read(chip, SHDW_FG_BATT_STATUS, ®); + if (rc) { + pr_err("Could't read FG_BATT_STATUS rc=%d\n", rc); + return rc; + } + + reg = !!(reg & BATTERY_PROFILE_BIT); + if (reg == new_profile) { + pr_info("New profile=%d loaded\n", new_profile); + break; + } + timeout--; + } + + if (!timeout) { + pr_err("New profile could not be loaded\n"); + return -EBUSY; + } + + return 0; + +restore_fg: + smb1360_disable_fg_access(chip); + return rc; +} + +#define UPDATE_IRQ_STAT(irq_reg, value) \ + handlers[irq_reg - IRQ_A_REG].prev_val = value + +static int determine_initial_status(struct smb1360_chip *chip) +{ + int rc; + u8 reg = 0; + bool id_state; + + /* + * It is okay to read the IRQ status as the irq's are + * not registered yet. + */ + chip->batt_present = true; + rc = smb1360_read(chip, IRQ_B_REG, ®); + if (rc < 0) { + dev_err(chip->dev, "Couldn't read IRQ_B_REG rc = %d\n", rc); + return rc; + } + UPDATE_IRQ_STAT(IRQ_B_REG, reg); + + if (reg & IRQ_B_BATT_TERMINAL_BIT || reg & IRQ_B_BATT_MISSING_BIT) + chip->batt_present = false; + + rc = smb1360_read(chip, IRQ_C_REG, ®); + if (rc) { + dev_err(chip->dev, "Couldn't read IRQ_C_REG rc = %d\n", rc); + return rc; + } + UPDATE_IRQ_STAT(IRQ_C_REG, reg); + + if (reg & IRQ_C_CHG_TERM) + chip->batt_full = true; + + rc = smb1360_read(chip, IRQ_A_REG, ®); + if (rc < 0) { + dev_err(chip->dev, "Couldn't read irq A rc = %d\n", rc); + return rc; + } + UPDATE_IRQ_STAT(IRQ_A_REG, reg); + + if (chip->workaround_flags & WRKRND_HARD_JEITA) { + schedule_delayed_work(&chip->jeita_work, 0); + } else { + if (reg & IRQ_A_HOT_HARD_BIT) + chip->batt_hot = true; + if (reg & IRQ_A_COLD_HARD_BIT) + chip->batt_cold = true; + if (!chip->config_hard_thresholds) { + if (reg & IRQ_A_HOT_SOFT_BIT) + chip->batt_warm = true; + if (reg & IRQ_A_COLD_SOFT_BIT) + chip->batt_cool = true; + } + } + + rc = smb1360_read(chip, IRQ_E_REG, ®); + if (rc < 0) { + dev_err(chip->dev, "Couldn't read irq E rc = %d\n", rc); + return rc; + } + UPDATE_IRQ_STAT(IRQ_E_REG, reg); + + /* Check usb charger presence and notify */ + chip->usb_present = (reg & IRQ_E_USBIN_UV_BIT) ? false : true; + /* USB removed */ + if (!chip->usb_present) + extcon_set_state_sync(chip->extcon, EXTCON_USB, false); + /* USB inserted */ + else + extcon_set_state_sync(chip->extcon, EXTCON_USB, true); + + pr_debug("usb %s at boot\n", chip->usb_present ? "present" : "absent"); + + /*check otg presence and notify*/ + if (chip->usb_id_gpio != -EINVAL) { + id_state = gpio_get_value(chip->usb_id_gpio); + /* usb-id is low, enable OTG */ + if (!id_state) { + rc = smb1360_masked_write(chip, CMD_CHG_REG, + CMD_OTG_EN_BIT, CMD_OTG_EN_BIT); + if (rc) { + pr_err("Couldn't enable OTG mode rc=%d\n", rc); + return rc; + } + extcon_set_state_sync(chip->extcon, EXTCON_USB_HOST, + true); + pr_debug("OTG enabled at boot\n"); + } + } + + power_supply_changed(chip->usb_psy); + return 0; +} + +static int smb1360_fg_config(struct smb1360_chip *chip) +{ + int rc = 0, temp, fcc_mah; + u8 reg = 0, reg2[2]; + + if (chip->fg_reset_at_pon) { + int v_predicted, v_now; + + rc = smb1360_enable_fg_access(chip); + if (rc) { + pr_err("Couldn't enable FG access rc=%d\n", rc); + return rc; + } + + rc = smb1360_read_bytes(chip, VOLTAGE_PREDICTED_REG, reg2, 2); + if (rc) { + pr_err("Failed to read VOLTAGE_PREDICTED rc=%d\n", rc); + goto disable_fg_reset; + } + v_predicted = (reg2[1] << 8) | reg2[0]; + v_predicted = div_u64(v_predicted * 5000, 0x7FFF); + + rc = smb1360_read_bytes(chip, SHDW_FG_VTG_NOW, reg2, 2); + if (rc) { + pr_err("Failed to read SHDW_FG_VTG_NOW rc=%d\n", rc); + goto disable_fg_reset; + } + v_now = (reg2[1] << 8) | reg2[0]; + v_now = div_u64(v_now * 5000, 0x7FFF); + + pr_debug("v_predicted=%d v_now=%d reset_threshold=%d\n", + v_predicted, v_now, chip->fg_reset_threshold_mv); + + /* + * Reset FG if the predicted voltage is off wrt + * the real-time voltage. + */ + temp = abs(v_predicted - v_now); + if (temp >= chip->fg_reset_threshold_mv) { + pr_info("Resetting FG - v_delta=%d threshold=%d\n", + temp, chip->fg_reset_threshold_mv); + /* delay for the FG access to settle */ + msleep(1500); + rc = smb1360_force_fg_reset(chip); + if (rc) { + pr_err("Couldn't reset FG rc=%d\n", rc); + goto disable_fg_reset; + } + } +disable_fg_reset: + smb1360_disable_fg_access(chip); + } + + /* + * The below IRQ thresholds are not accessible in REV_1 + * of SMB1360. + */ + if (!(chip->workaround_flags & WRKRND_FG_CONFIG_FAIL)) { + if (chip->delta_soc != -EINVAL) { + reg = abs(((chip->delta_soc * MAX_8_BITS) / 100) - 1); + pr_debug("delta_soc=%d reg=%x\n", chip->delta_soc, reg); + rc = smb1360_write(chip, SOC_DELTA_REG, reg); + if (rc) { + dev_err(chip->dev, "Couldn't write to SOC_DELTA_REG rc=%d\n", + rc); + return rc; + } + } + + if (chip->soc_min != -EINVAL) { + if (is_between(chip->soc_min, 0, 100)) { + reg = DIV_ROUND_UP(chip->soc_min * MAX_8_BITS, + 100); + pr_debug("soc_min=%d reg=%x\n", + chip->soc_min, reg); + rc = smb1360_write(chip, SOC_MIN_REG, reg); + if (rc) { + dev_err(chip->dev, "Couldn't write to SOC_MIN_REG rc=%d\n", + rc); + return rc; + } + } + } + + if (chip->soc_max != -EINVAL) { + if (is_between(chip->soc_max, 0, 100)) { + reg = DIV_ROUND_UP(chip->soc_max * MAX_8_BITS, + 100); + pr_debug("soc_max=%d reg=%x\n", + chip->soc_max, reg); + rc = smb1360_write(chip, SOC_MAX_REG, reg); + if (rc) { + dev_err(chip->dev, "Couldn't write to SOC_MAX_REG rc=%d\n", + rc); + return rc; + } + } + } + + if (chip->voltage_min_mv != -EINVAL) { + temp = (chip->voltage_min_mv - 2500) * MAX_8_BITS; + reg = DIV_ROUND_UP(temp, 2500); + pr_debug("voltage_min=%d reg=%x\n", + chip->voltage_min_mv, reg); + rc = smb1360_write(chip, VTG_MIN_REG, reg); + if (rc) { + dev_err(chip->dev, "Couldn't write to VTG_MIN_REG rc=%d\n", + rc); + return rc; + } + } + + if (chip->voltage_empty_mv != -EINVAL) { + temp = (chip->voltage_empty_mv - 2500) * MAX_8_BITS; + reg = DIV_ROUND_UP(temp, 2500); + pr_debug("voltage_empty=%d reg=%x\n", + chip->voltage_empty_mv, reg); + rc = smb1360_write(chip, VTG_EMPTY_REG, reg); + if (rc) { + dev_err(chip->dev, "Couldn't write to VTG_EMPTY_REG rc=%d\n", + rc); + return rc; + } + } + } + + /* scratch-pad register config */ + if (chip->batt_capacity_mah != -EINVAL + || chip->v_cutoff_mv != -EINVAL + || chip->fg_iterm_ma != -EINVAL + || chip->fg_ibatt_standby_ma != -EINVAL + || chip->fg_thermistor_c1_coeff != -EINVAL + || chip->fg_cc_to_cv_mv != -EINVAL + || chip->fg_auto_recharge_soc != -EINVAL) { + + rc = smb1360_enable_fg_access(chip); + if (rc) { + pr_err("Couldn't enable FG access rc=%d\n", rc); + return rc; + } + + /* Update battery capacity */ + if (chip->batt_capacity_mah != -EINVAL) { + rc = smb1360_read_bytes(chip, ACTUAL_CAPACITY_REG, + reg2, 2); + if (rc) { + pr_err("Failed to read ACTUAL CAPACITY rc=%d\n", + rc); + goto disable_fg; + } + fcc_mah = (reg2[1] << 8) | reg2[0]; + if (fcc_mah == chip->batt_capacity_mah) { + pr_debug("battery capacity correct\n"); + } else { + /* Update the battery capacity */ + reg2[1] = + (chip->batt_capacity_mah & 0xFF00) >> 8; + reg2[0] = (chip->batt_capacity_mah & 0xFF); + rc = smb1360_write_bytes(chip, + ACTUAL_CAPACITY_REG, reg2, 2); + if (rc) { + pr_err("Couldn't write batt-capacity rc=%d\n", + rc); + goto disable_fg; + } + rc = smb1360_write_bytes(chip, + NOMINAL_CAPACITY_REG, reg2, 2); + if (rc) { + pr_err("Couldn't write batt-capacity rc=%d\n", + rc); + goto disable_fg; + } + + /* Update CC to SOC COEFF */ + if (chip->cc_soc_coeff != -EINVAL) { + reg2[1] = + (chip->cc_soc_coeff & 0xFF00) >> 8; + reg2[0] = (chip->cc_soc_coeff & 0xFF); + rc = smb1360_write_bytes(chip, + CC_TO_SOC_COEFF, reg2, 2); + if (rc) { + pr_err("Couldn't write cc_soc_coeff rc=%d\n", + rc); + goto disable_fg; + } + } + } + } + + /* Update cutoff voltage for SOC = 0 */ + if (chip->v_cutoff_mv != -EINVAL) { + temp = (u16) div_u64(chip->v_cutoff_mv * 0x7FFF, 5000); + reg2[1] = (temp & 0xFF00) >> 8; + reg2[0] = temp & 0xFF; + rc = smb1360_write_bytes(chip, FG_SYS_CUTOFF_V_REG, + reg2, 2); + if (rc) { + pr_err("Couldn't write cutoff_mv rc=%d\n", rc); + goto disable_fg; + } + } + + /* + * Update FG iterm for SOC = 100, this value is always assumed + * to be -ve + */ + if (chip->fg_iterm_ma != -EINVAL) { + int iterm = chip->fg_iterm_ma * -1; + + temp = (s16) div_s64(iterm * 0x7FFF, 2500); + reg2[1] = (temp & 0xFF00) >> 8; + reg2[0] = temp & 0xFF; + rc = smb1360_write_bytes(chip, FG_ITERM_REG, + reg2, 2); + if (rc) { + pr_err("Couldn't write fg_iterm rc=%d\n", rc); + goto disable_fg; + } + } + + /* + * Update FG iterm standby for SOC = 0, this value is always + * assumed to be +ve + */ + if (chip->fg_ibatt_standby_ma != -EINVAL) { + int iterm = chip->fg_ibatt_standby_ma; + + temp = (u16) div_u64(iterm * 0x7FFF, 2500); + reg2[1] = (temp & 0xFF00) >> 8; + reg2[0] = temp & 0xFF; + rc = smb1360_write_bytes(chip, FG_IBATT_STANDBY_REG, + reg2, 2); + if (rc) { + pr_err("Couldn't write fg_iterm rc=%d\n", rc); + goto disable_fg; + } + } + + /* Update CC_to_CV voltage threshold */ + if (chip->fg_cc_to_cv_mv != -EINVAL) { + temp = (u16) div_u64(chip->fg_cc_to_cv_mv * 0x7FFF, + 5000); + reg2[1] = (temp & 0xFF00) >> 8; + reg2[0] = temp & 0xFF; + rc = smb1360_write_bytes(chip, FG_CC_TO_CV_V_REG, + reg2, 2); + if (rc) { + pr_err("Couldn't write cc_to_cv_mv rc=%d\n", + rc); + goto disable_fg; + } + } + + /* Update the thermistor c1 coefficient */ + if (chip->fg_thermistor_c1_coeff != -EINVAL) { + reg2[1] = (chip->fg_thermistor_c1_coeff & 0xFF00) >> 8; + reg2[0] = (chip->fg_thermistor_c1_coeff & 0xFF); + rc = smb1360_write_bytes(chip, FG_THERM_C1_COEFF_REG, + reg2, 2); + if (rc) { + pr_err("Couldn't write thermistor_c1_coeff rc=%d\n", + rc); + goto disable_fg; + } + } + + /* Update SoC based resume charging threshold */ + if (chip->fg_auto_recharge_soc != -EINVAL) { + rc = smb1360_masked_write(chip, CFG_CHG_FUNC_CTRL_REG, + CHG_RECHG_THRESH_FG_SRC_BIT, + CHG_RECHG_THRESH_FG_SRC_BIT); + if (rc) { + dev_err(chip->dev, "Couldn't write to CFG_CHG_FUNC_CTRL_REG rc=%d\n", + rc); + goto disable_fg; + } + + reg = DIV_ROUND_UP(chip->fg_auto_recharge_soc * + MAX_8_BITS, 100); + pr_debug("fg_auto_recharge_soc=%d reg=%x\n", + chip->fg_auto_recharge_soc, reg); + rc = smb1360_write(chip, FG_AUTO_RECHARGE_SOC, reg); + if (rc) { + dev_err(chip->dev, "Couldn't write to FG_AUTO_RECHARGE_SOC rc=%d\n", + rc); + goto disable_fg; + } + } + +disable_fg: + /* disable FG access */ + smb1360_disable_fg_access(chip); + } + + return rc; +} + +static void smb1360_check_feature_support(struct smb1360_chip *chip) +{ + + if (is_usb100_broken(chip)) { + pr_debug("USB100 is not supported\n"); + chip->workaround_flags |= WRKRND_USB100_FAIL; + } + + /* + * FG Configuration + * + * The REV_1 of the chip does not allow access to + * FG config registers (20-2FH). Set the workaround flag. + * Also, the battery detection does not work when the DCIN is absent, + * add a workaround flag for it. + */ + if (chip->revision == SMB1360_REV_1) { + pr_debug("FG config and Battery detection is not supported\n"); + chip->workaround_flags |= + WRKRND_FG_CONFIG_FAIL | WRKRND_BATT_DET_FAIL; + } +} + +static int smb1360_enable(struct smb1360_chip *chip, bool enable) +{ + int rc = 0; + u8 val = 0, shdn_cmd_polar; + + rc = smb1360_read(chip, SHDN_CTRL_REG, &val); + if (rc < 0) { + dev_err(chip->dev, "Couldn't read 0x1A reg rc = %d\n", rc); + return rc; + } + + /* Ignore if a CMD based shutdown is not enabled */ + if (!(val & SHDN_CMD_USE_BIT)) { + pr_debug("SMB not configured for CMD based shutdown\n"); + return 0; + } + + shdn_cmd_polar = !!(val & SHDN_CMD_POLARITY_BIT); + val = (shdn_cmd_polar ^ enable) ? SHDN_CMD_BIT : 0; + + pr_debug("enable=%d shdn_polarity=%d value=%d\n", enable, + shdn_cmd_polar, val); + + rc = smb1360_masked_write(chip, CMD_IL_REG, SHDN_CMD_BIT, val); + if (rc < 0) + pr_err("Couldn't shutdown smb1360 rc = %d\n", rc); + + return rc; +} + +static inline int smb1360_poweroff(struct smb1360_chip *chip) +{ + pr_debug("power off smb1360\n"); + return smb1360_enable(chip, false); +} + +static inline int smb1360_poweron(struct smb1360_chip *chip) +{ + pr_debug("power on smb1360\n"); + return smb1360_enable(chip, true); +} + +static int smb1360_jeita_init(struct smb1360_chip *chip) +{ + int rc = 0; + int temp; + + if (chip->config_hard_thresholds) { + if (chip->soft_jeita_supported) { + chip->workaround_flags |= WRKRND_HARD_JEITA; + rc = smb1360_set_soft_jeita_threshold(chip, + chip->cool_bat_decidegc, chip->warm_bat_decidegc); + if (rc) { + dev_err(chip->dev, + "Couldn't set jeita threshold\n"); + return rc; + } + } else { + rc = smb1360_set_soft_jeita_threshold(chip, + chip->cold_bat_decidegc, chip->hot_bat_decidegc); + if (rc) { + dev_err(chip->dev, + "Couldn't set jeita threshold\n"); + return rc; + } + } + } else { + if (chip->soft_jeita_supported) { + temp = min(chip->warm_bat_ma, chip->cool_bat_ma); + rc = smb1360_set_jeita_comp_curr(chip, temp); + if (rc) { + dev_err(chip->dev, "Couldn't set comp current\n"); + return rc; + } + + temp = (chip->vfloat_mv - chip->warm_bat_mv) / 10; + rc = smb1360_masked_write(chip, CFG_FVC_REG, + FLT_VTG_COMP_MASK, temp); + if (rc < 0) { + dev_err(chip->dev, + "Couldn't set VFLT compensation = %d\n", rc); + return rc; + } + + rc = smb1360_set_soft_jeita_threshold(chip, + chip->cool_bat_decidegc, chip->warm_bat_decidegc); + if (rc) { + dev_err(chip->dev, + "Couldn't set jeita threshold\n"); + return rc; + } + + rc = smb1360_soft_jeita_comp_enable(chip, true); + if (rc) { + dev_err(chip->dev, "Couldn't enable jeita\n"); + return rc; + } + } + } + + return rc; +} + +static int smb1360_otp_gain_init(struct smb1360_chip *chip) +{ + int rc = 0, gain_factor; + bool otp_gain_config = false; + + if (chip->rsense_10mohm) { + gain_factor = 2; + otp_gain_config = true; + } + + mutex_lock(&chip->otp_gain_lock); + if (chip->otg_fet_present) { + /* + * Reset current gain to the default value if OTG + * is not enabled + */ + if (!chip->fet_gain_enabled) { + otp_gain_config = true; + gain_factor = 0; + } + } + + if (otp_gain_config) { + rc = smb1360_otp_gain_config(chip, gain_factor); + if (rc < 0) + pr_err("Couldn't config OTP gain rc=%d\n", rc); + } + mutex_unlock(&chip->otp_gain_lock); + + return rc; +} + +static int smb1360_hw_init(struct smb1360_chip *chip) +{ + int rc; + int i; + u8 reg, mask; + + smb1360_check_feature_support(chip); + + rc = smb1360_enable_volatile_writes(chip); + if (rc < 0) { + dev_err(chip->dev, "Couldn't configure for volatile rc = %d\n", + rc); + return rc; + } + + /* Bring SMB1360 out of shutdown, if it was enabled by default */ + rc = smb1360_poweron(chip); + if (rc < 0) { + pr_err("smb1360 power on failed\n"); + return rc; + } + + /* + * A 2 seconds delay is mandatory after bringing the chip out + * of shutdown. This guarantees that FG is in a proper state. + */ + schedule_delayed_work(&chip->delayed_init_work, + msecs_to_jiffies(SMB1360_POWERON_DELAY_MS)); + + /* + * set chg en by cmd register, set chg en by writing bit 1, + * enable auto pre to fast + */ + rc = smb1360_masked_write(chip, CFG_CHG_MISC_REG, + CHG_EN_BY_PIN_BIT + | CHG_EN_ACTIVE_LOW_BIT + | PRE_TO_FAST_REQ_CMD_BIT, + 0); + if (rc < 0) { + dev_err(chip->dev, "Couldn't set CFG_CHG_MISC_REG rc=%d\n", rc); + return rc; + } + + /* USB/AC pin settings */ + rc = smb1360_masked_write(chip, CFG_BATT_CHG_ICL_REG, + AC_INPUT_ICL_PIN_BIT + | AC_INPUT_PIN_HIGH_BIT + | RESET_STATE_USB_500, + AC_INPUT_PIN_HIGH_BIT + | RESET_STATE_USB_500); + if (rc < 0) { + dev_err(chip->dev, "Couldn't set CFG_BATT_CHG_ICL_REG rc=%d\n", + rc); + return rc; + } + + /* AICL enable and set input-uv glitch flt to 20ms*/ + reg = AICL_ENABLED_BIT | INPUT_UV_GLITCH_FLT_20MS_BIT; + rc = smb1360_masked_write(chip, CFG_GLITCH_FLT_REG, reg, reg); + if (rc < 0) { + dev_err(chip->dev, "Couldn't set CFG_GLITCH_FLT_REG rc=%d\n", + rc); + return rc; + } + + /* set the float voltage */ + if (chip->vfloat_mv != -EINVAL) { + rc = smb1360_float_voltage_set(chip, chip->vfloat_mv); + if (rc < 0) { + dev_err(chip->dev, + "Couldn't set float voltage rc = %d\n", rc); + return rc; + } + } + + /* set iterm */ + if (chip->iterm_ma != -EINVAL) { + if (chip->iterm_disabled) { + dev_err(chip->dev, "Error: Both iterm_disabled and iterm_ma set\n"); + return -EINVAL; + } + + if (chip->rsense_10mohm) + chip->iterm_ma /= 2; + + if (chip->iterm_ma < 25) + reg = CHG_ITERM_25MA; + else if (chip->iterm_ma > 200) + reg = CHG_ITERM_200MA; + else + reg = DIV_ROUND_UP(chip->iterm_ma, 25) - 1; + + rc = smb1360_masked_write(chip, CFG_BATT_CHG_REG, + CHG_ITERM_MASK, reg); + if (rc) { + dev_err(chip->dev, "Couldn't set iterm rc = %d\n", rc); + return rc; + } + + rc = smb1360_masked_write(chip, CFG_CHG_MISC_REG, + CHG_CURR_TERM_DIS_BIT, 0); + if (rc) { + dev_err(chip->dev, + "Couldn't enable iterm rc = %d\n", rc); + return rc; + } + } else if (chip->iterm_disabled) { + rc = smb1360_masked_write(chip, CFG_CHG_MISC_REG, + CHG_CURR_TERM_DIS_BIT, + CHG_CURR_TERM_DIS_BIT); + if (rc) { + dev_err(chip->dev, "Couldn't set iterm rc = %d\n", + rc); + return rc; + } + } + + /* set the safety time voltage */ + if (chip->safety_time != -EINVAL) { + if (chip->safety_time == 0) { + /* safety timer disabled */ + rc = smb1360_masked_write(chip, CFG_SFY_TIMER_CTRL_REG, + SAFETY_TIME_DISABLE_BIT, SAFETY_TIME_DISABLE_BIT); + if (rc < 0) { + dev_err(chip->dev, + "Couldn't disable safety timer rc = %d\n", + rc); + return rc; + } + } else { + for (i = 0; i < ARRAY_SIZE(chg_time); i++) { + if (chip->safety_time <= chg_time[i]) { + reg = i << SAFETY_TIME_MINUTES_SHIFT; + break; + } + } + rc = smb1360_masked_write(chip, CFG_SFY_TIMER_CTRL_REG, + SAFETY_TIME_DISABLE_BIT | SAFETY_TIME_MINUTES_MASK, + reg); + if (rc < 0) { + dev_err(chip->dev, + "Couldn't set safety timer rc = %d\n", rc); + return rc; + } + } + } + + /* configure resume threshold, auto recharge and charge inhibit */ + if (chip->resume_delta_mv != -EINVAL) { + if (chip->recharge_disabled && chip->chg_inhibit_disabled) { + dev_err(chip->dev, + "Error: Both recharge_disabled and recharge_mv set\n"); + return -EINVAL; + } + rc = smb1360_recharge_threshold_set(chip, + chip->resume_delta_mv); + if (rc) { + dev_err(chip->dev, + "Couldn't set rechg thresh rc = %d\n", rc); + return rc; + } + } + + rc = smb1360_masked_write(chip, CFG_CHG_MISC_REG, + CFG_AUTO_RECHG_DIS_BIT, + chip->recharge_disabled ? + CFG_AUTO_RECHG_DIS_BIT : 0); + if (rc) { + dev_err(chip->dev, "Couldn't set rechg-cfg rc = %d\n", rc); + return rc; + } + rc = smb1360_masked_write(chip, CFG_CHG_MISC_REG, + CFG_CHG_INHIBIT_EN_BIT, + chip->chg_inhibit_disabled ? + 0 : CFG_CHG_INHIBIT_EN_BIT); + if (rc) { + dev_err(chip->dev, "Couldn't set chg_inhibit rc = %d\n", rc); + return rc; + } + + rc = smb1360_masked_write(chip, CFG_CHG_MISC_REG, + CFG_BAT_OV_ENDS_CHG_CYC, + chip->ov_ends_chg_cycle_disabled ? + 0 : CFG_BAT_OV_ENDS_CHG_CYC); + if (rc) { + dev_err(chip->dev, "Couldn't set bat_ov_ends_charge rc = %d\n" + , rc); + return rc; + } + + /* battery missing detection */ + rc = smb1360_masked_write(chip, CFG_BATT_MISSING_REG, + BATT_MISSING_SRC_THERM_BIT, + BATT_MISSING_SRC_THERM_BIT); + if (rc < 0) { + dev_err(chip->dev, "Couldn't set batt_missing config = %d\n", + rc); + return rc; + } + + rc = smb1360_jeita_init(chip); + if (rc < 0) { + dev_err(chip->dev, "Couldn't init jeita, rc = %d\n", rc); + return rc; + } + + /* interrupt enabling - active low */ + if (chip->client->irq) { + mask = CHG_STAT_IRQ_ONLY_BIT + | CHG_STAT_ACTIVE_HIGH_BIT + | CHG_STAT_DISABLE_BIT + | CHG_TEMP_CHG_ERR_BLINK_BIT; + + if (!chip->pulsed_irq) + reg = CHG_STAT_IRQ_ONLY_BIT; + else + reg = CHG_TEMP_CHG_ERR_BLINK_BIT; + rc = smb1360_masked_write(chip, CFG_STAT_CTRL_REG, mask, reg); + if (rc < 0) { + dev_err(chip->dev, "Couldn't set irq config rc = %d\n", + rc); + return rc; + } + + /* enabling only interesting interrupts */ + rc = smb1360_write(chip, IRQ_CFG_REG, + IRQ_BAT_HOT_COLD_HARD_BIT + | IRQ_BAT_HOT_COLD_SOFT_BIT + | IRQ_INTERNAL_TEMPERATURE_BIT + | IRQ_DCIN_UV_BIT + | IRQ_AICL_DONE_BIT); + if (rc) { + dev_err(chip->dev, "Couldn't set irq1 config rc = %d\n", + rc); + return rc; + } + + rc = smb1360_write(chip, IRQ2_CFG_REG, + IRQ2_SAFETY_TIMER_BIT + | IRQ2_CHG_ERR_BIT + | IRQ2_CHG_PHASE_CHANGE_BIT + | IRQ2_POWER_OK_BIT + | IRQ2_BATT_MISSING_BIT + | IRQ2_VBAT_LOW_BIT); + if (rc) { + dev_err(chip->dev, "Couldn't set irq2 config rc = %d\n", + rc); + return rc; + } + + rc = smb1360_write(chip, IRQ3_CFG_REG, + IRQ3_FG_ACCESS_OK_BIT + | IRQ3_SOC_CHANGE_BIT + | IRQ3_SOC_MIN_BIT + | IRQ3_SOC_MAX_BIT + | IRQ3_SOC_EMPTY_BIT + | IRQ3_SOC_FULL_BIT); + if (rc < 0) { + dev_err(chip->dev, "Couldn't set irq3 enable rc = %d\n", + rc); + return rc; + } + } + + /* batt-id configuration */ + if (chip->batt_id_disabled) { + mask = BATT_ID_ENABLED_BIT | CHG_BATT_ID_FAIL; + reg = CHG_BATT_ID_FAIL; + rc = smb1360_masked_write(chip, CFG_FG_BATT_CTRL_REG, + mask, reg); + if (rc < 0) { + dev_err(chip->dev, "Couldn't set batt_id_reg rc = %d\n", + rc); + return rc; + } + } + + /* USB OTG current limit configuration */ + if (chip->otg_batt_curr_limit != -EINVAL) { + for (i = 0; i < ARRAY_SIZE(otg_curr_ma); i++) { + if (otg_curr_ma[i] >= chip->otg_batt_curr_limit) + break; + } + + if (i == ARRAY_SIZE(otg_curr_ma)) + i = i - 1; + + rc = smb1360_masked_write(chip, CFG_BATT_CHG_REG, + OTG_CURRENT_MASK, + i << OTG_CURRENT_SHIFT); + if (rc) + pr_err("Couldn't set OTG current limit, rc = %d\n", rc); + } + + rc = smb1360_charging_disable(chip, USER, !!chip->charging_disabled); + if (rc) + dev_err(chip->dev, "Couldn't '%s' charging rc = %d\n", + chip->charging_disabled ? "disable" : "enable", rc); + + if (chip->parallel_charging) { + rc = smb1360_parallel_charger_enable(chip, PARALLEL_USER, + !chip->charging_disabled); + if (rc) + dev_err(chip->dev, "Couldn't '%s' parallel-charging rc = %d\n", + chip->charging_disabled ? "disable" : "enable", rc); + } + + return rc; +} + +static int smb1360_delayed_hw_init(struct smb1360_chip *chip) +{ + int rc; + + pr_debug("delayed hw init start!\n"); + + if (chip->otp_hard_jeita_config) { + rc = smb1360_hard_jeita_otp_init(chip); + if (rc) { + pr_err("Unable to change the OTP hard jeita, rc=%d\n", + rc); + return rc; + } + } + rc = smb1360_check_batt_profile(chip); + if (rc) { + pr_err("Unable to modify battery profile, rc=%d\n", rc); + return rc; + } + + rc = smb1360_otp_gain_init(chip); + if (rc) { + pr_err("Unable to config otp gain, rc=%d\n", rc); + return rc; + } + + rc = smb1360_fg_config(chip); + if (rc) { + pr_err("Couldn't configure FG rc=%d\n", rc); + return rc; + } + + rc = smb1360_check_cycle_stretch(chip); + if (rc) { + pr_err("Unable to check cycle-stretch\n"); + return rc; + } + + pr_debug("delayed hw init complete!\n"); + return rc; +} + +static void smb1360_delayed_init_work_fn(struct work_struct *work) +{ + int rc = 0; + struct smb1360_chip *chip = container_of(work, struct smb1360_chip, + delayed_init_work.work); + + rc = smb1360_delayed_hw_init(chip); + + if (!rc) { + /* + * If the delayed hw init successfully, update battery + * power_supply to make sure the correct SoC reported + * timely. + */ + power_supply_changed(chip->batt_psy); + } else if (rc == -ETIMEDOUT) { + /* + * If the delayed hw init failed causing by waiting for + * FG access timed-out, force a FG reset and queue the + * worker again to retry the initialization. + */ + pr_debug("delayed hw init timed-out, retry!\n"); + rc = smb1360_force_fg_reset(chip); + if (rc) { + pr_err("couldn't reset FG, rc = %d\n", rc); + return; + } + schedule_delayed_work(&chip->delayed_init_work, 0); + } else { + pr_err("delayed hw init failed, rc=%d\n", rc); + } +} + +static int smb_parse_batt_id(struct smb1360_chip *chip) +{ + int rc = 0, rpull = 0, vref = 0, batt_id_uv; + int64_t denom; + struct device_node *node = chip->dev->of_node; + + chip->lr_mux2_batt_id = iio_channel_get(chip->dev, "batt_id"); + if (IS_ERR(chip->lr_mux2_batt_id)) { + if (PTR_ERR(chip->lr_mux2_batt_id) != -EPROBE_DEFER) + pr_err("batt_id unavailable %ld\n", + PTR_ERR(chip->lr_mux2_batt_id)); + rc = PTR_ERR(chip->lr_mux2_batt_id); + chip->lr_mux2_batt_id = NULL; + return rc; + } + + rc = of_property_read_u32(node, "qcom,profile-a-rid-kohm", + &chip->profile_rid[0]); + if (rc < 0) { + pr_err("Couldn't read profile-a-rid-kohm rc=%d\n", rc); + return rc; + } + + rc = of_property_read_u32(node, "qcom,profile-b-rid-kohm", + &chip->profile_rid[1]); + if (rc < 0) { + pr_err("Couldn't read profile-b-rid-kohm rc=%d\n", rc); + return rc; + } + + rc = of_property_read_u32(node, "qcom,batt-id-vref-uv", &vref); + if (rc < 0) { + pr_err("Couldn't read batt-id-vref-uv rc=%d\n", rc); + return rc; + } + + rc = of_property_read_u32(node, "qcom,batt-id-rpullup-kohm", &rpull); + if (rc < 0) { + pr_err("Couldn't read batt-id-rpullup-kohm rc=%d\n", rc); + return rc; + } + + /* read battery ID */ + rc = iio_read_channel_processed(chip->lr_mux2_batt_id, &batt_id_uv); + if (rc < 0) { + pr_err("error reading batt id channel : rc = %d\n", rc); + return rc; + } + + if (batt_id_uv == 0) { + /* vadc not correct or batt id line grounded, report 0 kohms */ + pr_err("batt_id_uv = 0, batt-id grounded using same profile\n"); + return 0; + } + + denom = div64_s64(vref * 1000000LL, batt_id_uv) - 1000000LL; + if (denom == 0) { + /* batt id connector might be open, return 0 kohms */ + return 0; + } + chip->connected_rid = div64_s64(rpull * 1000000LL + denom/2, denom); + + pr_debug("batt_id_voltage = %lld, connected_rid = %d\n", + batt_id_uv, chip->connected_rid); + + return 0; +} + +/* + * Note the below: + * 1. if both qcom,soft-jeita-supported and qcom,config-hard-thresholds + * are not defined, SMB continues with default OTP configuration. + * 2. if both are enabled, the hard thresholds are modified. + * 3. if only qcom,config-hard-thresholds is defined, the soft JEITA is disabled + * 4. if only qcom,soft-jeita-supported is defined, the soft JEITA thresholds + * are modified. + */ +static int smb1360_parse_jeita_params(struct smb1360_chip *chip) +{ + int rc = 0; + struct device_node *node = chip->dev->of_node; + int temp[2]; + + if (of_property_read_bool(node, "qcom,config-hard-thresholds")) { + rc = of_property_read_u32(node, + "qcom,cold-bat-decidegc", &chip->cold_bat_decidegc); + if (rc) { + pr_err("cold_bat_decidegc property error, rc = %d\n", + rc); + return -EINVAL; + } + + rc = of_property_read_u32(node, + "qcom,hot-bat-decidegc", &chip->hot_bat_decidegc); + if (rc) { + pr_err("hot_bat_decidegc property error, rc = %d\n", + rc); + return -EINVAL; + } + + chip->config_hard_thresholds = true; + pr_debug("config_hard_thresholds = %d, cold_bat_decidegc = %d, hot_bat_decidegc = %d\n", + chip->config_hard_thresholds, chip->cold_bat_decidegc, + chip->hot_bat_decidegc); + } else if (of_property_read_bool(node, "qcom,otp-hard-jeita-config")) { + rc = of_property_read_u32(node, "qcom,otp-cold-bat-decidegc", + &chip->otp_cold_bat_decidegc); + if (rc) { + pr_err("otp-cold-bat-decidegc property error, rc = %d\n", + rc); + return -EINVAL; + } + + rc = of_property_read_u32(node, "qcom,otp-hot-bat-decidegc", + &chip->otp_hot_bat_decidegc); + + if (rc) { + pr_err("otp-hot-bat-decidegc property error, rc = %d\n", + rc); + return -EINVAL; + } + + chip->otp_hard_jeita_config = true; + rc = of_property_read_u32_array(node, + "qcom,otp-hard-jeita-hysteresis", temp, 2); + if (rc) { + if (rc != -EINVAL) { + pr_err("read otp-hard-jeita-hysteresis failed, rc = %d\n", + rc); + return rc; + } + } else { + chip->cold_hysteresis = temp[0]; + chip->hot_hysteresis = temp[1]; + } + + pr_debug("otp_hard_jeita_config = %d, otp_cold_bat_decidegc = %d\n" + "otp_hot_bat_decidegc = %d, cold_hysteresis = %d\n" + "hot_hysteresis = %d\n", + chip->otp_hard_jeita_config, + chip->otp_cold_bat_decidegc, + chip->otp_hot_bat_decidegc, chip->cold_hysteresis, + chip->hot_hysteresis); + } + + if (of_property_read_bool(node, "qcom,soft-jeita-supported")) { + rc = of_property_read_u32(node, "qcom,warm-bat-decidegc", + &chip->warm_bat_decidegc); + if (rc) { + pr_err("warm_bat_decidegc property error, rc = %d\n", + rc); + return -EINVAL; + } + + rc = of_property_read_u32(node, "qcom,cool-bat-decidegc", + &chip->cool_bat_decidegc); + if (rc) { + pr_err("cool_bat_decidegc property error, rc = %d\n", + rc); + return -EINVAL; + } + rc = of_property_read_u32(node, "qcom,cool-bat-mv", + &chip->cool_bat_mv); + if (rc) { + pr_err("cool_bat_mv property error, rc = %d\n", rc); + return -EINVAL; + } + + rc = of_property_read_u32(node, "qcom,warm-bat-mv", + &chip->warm_bat_mv); + if (rc) { + pr_err("warm_bat_mv property error, rc = %d\n", rc); + return -EINVAL; + } + + rc = of_property_read_u32(node, "qcom,cool-bat-ma", + &chip->cool_bat_ma); + if (rc) { + pr_err("cool_bat_ma property error, rc = %d\n", rc); + return -EINVAL; + } + + rc = of_property_read_u32(node, "qcom,warm-bat-ma", + &chip->warm_bat_ma); + + if (rc) { + pr_err("warm_bat_ma property error, rc = %d\n", rc); + return -EINVAL; + } + + chip->soft_jeita_supported = true; + } else { + /* + * If no soft JEITA configuration required from devicetree, + * read the default soft JEITA setting for hard JEITA + * configuration sanity check. + */ + rc = smb1360_get_soft_jeita_threshold(chip, + &chip->cool_bat_decidegc, + &chip->warm_bat_decidegc); + if (rc) { + pr_err("get default soft JEITA threshold failed, rc=%d\n", + rc); + return rc; + } + } + + pr_debug("soft-jeita-enabled = %d, warm-bat-decidegc = %d, cool-bat-decidegc = %d, cool-bat-mv = %d, warm-bat-mv = %d, cool-bat-ma = %d, warm-bat-ma = %d\n", + chip->soft_jeita_supported, chip->warm_bat_decidegc, + chip->cool_bat_decidegc, chip->cool_bat_mv, chip->warm_bat_mv, + chip->cool_bat_ma, chip->warm_bat_ma); + + return rc; +} + +#define MAX_PARALLEL_CURRENT 540 +static int smb1360_parse_parallel_charging_params(struct smb1360_chip *chip) +{ + struct device_node *node = chip->dev->of_node; + + if (of_property_read_bool(node, "qcom,parallel-charging-enabled")) { + + if (!chip->rsense_10mohm) { + pr_err("10mohm-rsense configuration not enabled - parallel-charging disabled\n"); + return 0; + } + chip->parallel_charging = true; + chip->max_parallel_chg_current = MAX_PARALLEL_CURRENT; + of_property_read_u32(node, "qcom,max-parallel-current-ma", + &chip->max_parallel_chg_current); + + pr_debug("Max parallel charger current = %dma\n", + chip->max_parallel_chg_current); + + /* mark the parallel-charger as disabled */ + chip->parallel_chg_disable_status |= PARALLEL_CURRENT; + } + + return 0; +} + +static int smb_parse_dt(struct smb1360_chip *chip) +{ + int rc; + struct device_node *node = chip->dev->of_node; + + if (!node) { + dev_err(chip->dev, "device tree info. missing\n"); + return -EINVAL; + } + + chip->rsense_10mohm = of_property_read_bool(node, "qcom,rsense-10mhom"); + + if (of_property_read_bool(node, "qcom,batt-profile-select")) { + rc = smb_parse_batt_id(chip); + if (rc < 0) { + if (rc != -EPROBE_DEFER) + pr_err("Unable to parse batt-id rc=%d\n", rc); + return rc; + } + } + + chip->otg_fet_present = of_property_read_bool(node, + "qcom,otg-fet-present"); + if (chip->otg_fet_present) { + chip->otg_fet_enable_gpio = of_get_named_gpio(node, + "qcom,otg-fet-enable-gpio", 0); + if (!gpio_is_valid(chip->otg_fet_enable_gpio)) { + if (chip->otg_fet_enable_gpio != -EPROBE_DEFER) + pr_err("Unable to get OTG FET enable gpio=%d\n", + chip->otg_fet_enable_gpio); + return chip->otg_fet_enable_gpio; + } + /* Configure OTG FET control gpio */ + rc = devm_gpio_request_one(chip->dev, + chip->otg_fet_enable_gpio, + GPIOF_OPEN_DRAIN | GPIOF_INIT_HIGH, + "smb1360_otg_fet_gpio"); + if (rc) { + pr_err("Unable to request gpio rc=%d\n", rc); + return rc; + } + } + chip->usb_id_gpio = -EINVAL; + if (of_find_property(node, "qcom,usb-id-gpio", NULL)) { + chip->usb_id_gpio = of_get_named_gpio(node, + "qcom,usb-id-gpio", 0); + } + + chip->pulsed_irq = of_property_read_bool(node, "qcom,stat-pulsed-irq"); + + rc = of_property_read_u32(node, "qcom,float-voltage-mv", + &chip->vfloat_mv); + if (rc < 0) + chip->vfloat_mv = -EINVAL; + + rc = of_property_read_u32(node, "qcom,charging-timeout", + &chip->safety_time); + if (rc < 0) + chip->safety_time = -EINVAL; + + if (!rc && (chip->safety_time > chg_time[ARRAY_SIZE(chg_time) - 1])) { + dev_err(chip->dev, "Bad charging-timeout %d\n", + chip->safety_time); + return -EINVAL; + } + + rc = of_property_read_u32(node, "qcom,recharge-thresh-mv", + &chip->resume_delta_mv); + if (rc < 0) + chip->resume_delta_mv = -EINVAL; + + chip->recharge_disabled = of_property_read_bool(node, + "qcom,recharge-disabled"); + + rc = of_property_read_u32(node, "qcom,iterm-ma", &chip->iterm_ma); + if (rc < 0) + chip->iterm_ma = -EINVAL; + + chip->iterm_disabled = of_property_read_bool(node, + "qcom,iterm-disabled"); + + chip->chg_inhibit_disabled = of_property_read_bool(node, + "qcom,chg-inhibit-disabled"); + + chip->charging_disabled = of_property_read_bool(node, + "qcom,charging-disabled"); + + chip->batt_id_disabled = of_property_read_bool(node, + "qcom,batt-id-disabled"); + + chip->shdn_after_pwroff = of_property_read_bool(node, + "qcom,shdn-after-pwroff"); + + chip->min_icl_usb100 = of_property_read_bool(node, + "qcom,min-icl-100ma"); + + chip->ov_ends_chg_cycle_disabled = of_property_read_bool(node, + "qcom,disable-ov-ends-chg-cycle"); + + rc = smb1360_parse_parallel_charging_params(chip); + if (rc) { + pr_err("Couldn't parse parallel charginng params rc=%d\n", rc); + return rc; + } + + if (of_find_property(node, "qcom,thermal-mitigation", + &chip->thermal_levels)) { + chip->thermal_mitigation = devm_kzalloc(chip->dev, + chip->thermal_levels, + GFP_KERNEL); + + if (chip->thermal_mitigation == NULL) { + pr_err("thermal mitigation kzalloc() failed.\n"); + return -ENOMEM; + } + + chip->thermal_levels /= sizeof(int); + rc = of_property_read_u32_array(node, + "qcom,thermal-mitigation", + chip->thermal_mitigation, chip->thermal_levels); + if (rc) { + pr_err("Couldn't read threm limits rc = %d\n", rc); + return rc; + } + } + + rc = smb1360_parse_jeita_params(chip); + if (rc < 0) { + pr_err("Couldn't parse jeita params, rc = %d\n", rc); + return rc; + } + + /* fg params */ + chip->empty_soc_disabled = of_property_read_bool(node, + "qcom,empty-soc-disabled"); + + rc = of_property_read_u32(node, "qcom,fg-delta-soc", &chip->delta_soc); + if (rc < 0) + chip->delta_soc = -EINVAL; + + rc = of_property_read_u32(node, "qcom,fg-soc-max", &chip->soc_max); + if (rc < 0) + chip->soc_max = -EINVAL; + + rc = of_property_read_u32(node, "qcom,fg-soc-min", &chip->soc_min); + if (rc < 0) + chip->soc_min = -EINVAL; + + chip->awake_min_soc = of_property_read_bool(node, + "qcom,awake-min-soc"); + + rc = of_property_read_u32(node, "qcom,fg-voltage-min-mv", + &chip->voltage_min_mv); + if (rc < 0) + chip->voltage_min_mv = -EINVAL; + + rc = of_property_read_u32(node, "qcom,fg-voltage-empty-mv", + &chip->voltage_empty_mv); + if (rc < 0) + chip->voltage_empty_mv = -EINVAL; + + rc = of_property_read_u32(node, "qcom,fg-batt-capacity-mah", + &chip->batt_capacity_mah); + if (rc < 0) + chip->batt_capacity_mah = -EINVAL; + + rc = of_property_read_u32(node, "qcom,fg-cc-soc-coeff", + &chip->cc_soc_coeff); + if (rc < 0) + chip->cc_soc_coeff = -EINVAL; + + rc = of_property_read_u32(node, "qcom,fg-cutoff-voltage-mv", + &chip->v_cutoff_mv); + if (rc < 0) + chip->v_cutoff_mv = -EINVAL; + + rc = of_property_read_u32(node, "qcom,fg-iterm-ma", + &chip->fg_iterm_ma); + if (rc < 0) + chip->fg_iterm_ma = -EINVAL; + + rc = of_property_read_u32(node, "qcom,fg-ibatt-standby-ma", + &chip->fg_ibatt_standby_ma); + if (rc < 0) + chip->fg_ibatt_standby_ma = -EINVAL; + + rc = of_property_read_u32(node, "qcom,thermistor-c1-coeff", + &chip->fg_thermistor_c1_coeff); + if (rc < 0) + chip->fg_thermistor_c1_coeff = -EINVAL; + + rc = of_property_read_u32(node, "qcom,fg-cc-to-cv-mv", + &chip->fg_cc_to_cv_mv); + if (rc < 0) + chip->fg_cc_to_cv_mv = -EINVAL; + + rc = of_property_read_u32(node, "qcom,otg-batt-curr-limit", + &chip->otg_batt_curr_limit); + if (rc < 0) + chip->otg_batt_curr_limit = -EINVAL; + + rc = of_property_read_u32(node, "qcom,fg-auto-recharge-soc", + &chip->fg_auto_recharge_soc); + if (rc < 0) + chip->fg_auto_recharge_soc = -EINVAL; + + if (of_property_read_bool(node, "qcom,fg-reset-at-pon")) { + chip->fg_reset_at_pon = true; + rc = of_property_read_u32(node, "qcom,fg-reset-threshold-mv", + &chip->fg_reset_threshold_mv); + if (rc) { + pr_debug("FG reset voltage threshold not specified using 50mV\n"); + chip->fg_reset_threshold_mv = FG_RESET_THRESHOLD_MV; + } + } + + return 0; +} + +static int smb1360_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + u8 reg; + int rc; + struct smb1360_chip *chip; + struct power_supply_config batt_psy_cfg = {}; + struct power_supply_config usb_psy_cfg = {}; + + chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->resume_completed = true; + chip->client = client; + chip->dev = &client->dev; + chip->fake_battery_soc = -EINVAL; + chip->usb_supply_type = POWER_SUPPLY_TYPE_UNKNOWN; + + chip->extcon = devm_extcon_dev_allocate(chip->dev, + smb1360_extcon_cable); + if (IS_ERR(chip->extcon)) { + pr_err("failed to allocate extcon device\n"); + rc = PTR_ERR(chip->extcon); + return rc; + } + + rc = devm_extcon_dev_register(chip->dev, chip->extcon); + if (rc) { + pr_err("failed to register extcon device\n"); + return rc; + } + + mutex_init(&chip->read_write_lock); + mutex_init(&chip->parallel_chg_lock); + mutex_init(&chip->otp_gain_lock); + mutex_init(&chip->fg_access_request_lock); + mutex_init(&chip->irq_complete); + mutex_init(&chip->charging_disable_lock); + mutex_init(&chip->current_change_lock); + + INIT_DELAYED_WORK(&chip->jeita_work, smb1360_jeita_work_fn); + INIT_DELAYED_WORK(&chip->delayed_init_work, + smb1360_delayed_init_work_fn); + init_completion(&chip->fg_mem_access_granted); + smb1360_wakeup_src_init(chip); + + chip->usb_psy_d.name = "usb"; + chip->usb_psy_d.type = POWER_SUPPLY_TYPE_USB; + chip->usb_psy_d.get_property = smb1360_usb_get_property; + chip->usb_psy_d.set_property = smb1360_usb_set_property; + chip->usb_psy_d.properties = smb1360_usb_properties; + chip->usb_psy_d.num_properties = ARRAY_SIZE(smb1360_usb_properties); + chip->usb_psy_d.property_is_writeable = smb1360_usb_is_writeable; + + usb_psy_cfg.drv_data = chip; + usb_psy_cfg.num_supplicants = 0; + + chip->usb_psy = devm_power_supply_register(chip->dev, + &chip->usb_psy_d, &usb_psy_cfg); + if (IS_ERR(chip->usb_psy)) { + dev_err(chip->dev, "Unable to register usb_psy rc = %ld\n", + PTR_ERR(chip->usb_psy)); + rc = PTR_ERR(chip->usb_psy); + return rc; + } + + /* probe the device to check if its actually connected */ + rc = smb1360_read(chip, CFG_BATT_CHG_REG, ®); + if (rc) { + pr_err("Failed to detect SMB1360, device may be absent\n"); + goto destroy_mutex; + } + + rc = read_revision(chip, &chip->revision); + if (rc) + dev_err(chip->dev, "Couldn't read revision rc = %d\n", rc); + + rc = smb_parse_dt(chip); + if (rc < 0) { + dev_err(&client->dev, "Unable to parse DT nodes\n"); + goto destroy_mutex; + } + + device_init_wakeup(chip->dev, 1); + i2c_set_clientdata(client, chip); + chip->default_i2c_addr = client->addr; + INIT_WORK(&chip->parallel_work, smb1360_parallel_work); + if (chip->cold_hysteresis || chip->hot_hysteresis) + INIT_WORK(&chip->jeita_hysteresis_work, + smb1360_jeita_hysteresis_work); + + pr_debug("default_i2c_addr=%x\n", chip->default_i2c_addr); + smb1360_otp_backup_pool_init(chip); + rc = smb1360_hw_init(chip); + if (rc < 0) { + dev_err(&client->dev, + "Unable to initialize hardware rc = %d\n", rc); + goto destroy_mutex; + } + + rc = smb1360_regulator_init(chip); + if (rc) { + dev_err(&client->dev, + "Couldn't initialize smb1360 ragulator rc=%d\n", rc); + goto fail_hw_init; + } + + rc = determine_initial_status(chip); + if (rc < 0) { + dev_err(&client->dev, + "Unable to determine init status rc = %d\n", rc); + goto fail_hw_init; + } + + chip->batt_psy_d.name = "battery"; + chip->batt_psy_d.type = POWER_SUPPLY_TYPE_BATTERY; + chip->batt_psy_d.get_property = smb1360_battery_get_property; + chip->batt_psy_d.set_property = smb1360_battery_set_property; + chip->batt_psy_d.properties = smb1360_battery_properties; + chip->batt_psy_d.num_properties = + ARRAY_SIZE(smb1360_battery_properties); + chip->batt_psy_d.property_is_writeable = smb1360_battery_is_writeable; + + batt_psy_cfg.drv_data = chip; + batt_psy_cfg.num_supplicants = 0; + + chip->batt_psy = devm_power_supply_register(chip->dev, + &chip->batt_psy_d, &batt_psy_cfg); + if (IS_ERR(chip->batt_psy)) { + dev_err(&client->dev, "Unable to register batt_psy rc = %ld\n", + PTR_ERR(chip->batt_psy)); + goto unregister_batt_psy; + } + + /* STAT irq configuration */ + if (client->irq) { + rc = devm_request_threaded_irq(&client->dev, client->irq, NULL, + smb1360_stat_handler, IRQF_ONESHOT, + "smb1360_stat_irq", chip); + if (rc < 0) { + dev_err(&client->dev, + "request_irq for irq=%d failed rc = %d\n", + client->irq, rc); + goto unregister_batt_psy; + } + enable_irq_wake(client->irq); + } + + chip->usb_id_irq = of_irq_get_byname(chip->dev->of_node, + "smb1360_usb_id_irq"); + if (chip->usb_id_irq > 0) { + if (chip->usb_id_gpio == -EINVAL) { + pr_err("usb-id gpio not defined\n"); + } else { + rc = devm_request_threaded_irq(&client->dev, + chip->usb_id_irq, NULL, + smb1360_usb_id_irq_handler, + IRQF_ONESHOT + | IRQF_TRIGGER_FALLING + | IRQF_TRIGGER_RISING, + "smb1360_usb_id_irq", chip); + if (rc < 0) { + dev_err(&client->dev, + "usb-id request_irq for irq=%d failed rc = %d\n", + chip->usb_id_irq, rc); + goto unregister_batt_psy; + } + enable_irq_wake(chip->usb_id_irq); + } + } + chip->debug_root = debugfs_create_dir("smb1360", NULL); + if (!chip->debug_root) + dev_err(chip->dev, "Couldn't create debug dir\n"); + + if (chip->debug_root) { + struct dentry *ent; + + ent = debugfs_create_file("config_registers", S_IFREG | 0444, + chip->debug_root, chip, + &cnfg_debugfs_ops); + if (!ent) + dev_err(chip->dev, + "Couldn't create cnfg debug file rc = %d\n", + rc); + + ent = debugfs_create_file("status_registers", S_IFREG | 0444, + chip->debug_root, chip, + &status_debugfs_ops); + if (!ent) + dev_err(chip->dev, + "Couldn't create status debug file rc = %d\n", + rc); + + ent = debugfs_create_file("irq_status", S_IFREG | 0444, + chip->debug_root, chip, + &irq_stat_debugfs_ops); + if (!ent) + dev_err(chip->dev, + "Couldn't create irq_stat debug file rc = %d\n", + rc); + + ent = debugfs_create_file("cmd_registers", S_IFREG | 0444, + chip->debug_root, chip, + &cmd_debugfs_ops); + if (!ent) + dev_err(chip->dev, + "Couldn't create cmd debug file rc = %d\n", + rc); + + ent = debugfs_create_file("fg_regs", + S_IFREG | 0444, chip->debug_root, chip, + &fg_regs_debugfs_ops); + if (!ent) + dev_err(chip->dev, + "Couldn't create fg_scratch_pad debug file rc = %d\n", + rc); + + ent = debugfs_create_x32("address", S_IFREG | 0644, + chip->debug_root, + &(chip->peek_poke_address)); + if (!ent) + dev_err(chip->dev, + "Couldn't create address debug file rc = %d\n", + rc); + + ent = debugfs_create_file("data", S_IFREG | 0644, + chip->debug_root, chip, + &poke_poke_debug_ops); + if (!ent) + dev_err(chip->dev, + "Couldn't create data debug file rc = %d\n", + rc); + + ent = debugfs_create_x32("fg_address", + S_IFREG | 0644, + chip->debug_root, + &(chip->fg_peek_poke_address)); + if (!ent) + dev_err(chip->dev, + "Couldn't create address debug file rc = %d\n", + rc); + + ent = debugfs_create_file("fg_data", + S_IFREG | 0644, + chip->debug_root, chip, + &fg_poke_poke_debug_ops); + if (!ent) + dev_err(chip->dev, + "Couldn't create data debug file rc = %d\n", + rc); + + ent = debugfs_create_x32("fg_access_type", + S_IFREG | 0644, + chip->debug_root, + &(chip->fg_access_type)); + if (!ent) + dev_err(chip->dev, + "Couldn't create data debug file rc = %d\n", + rc); + + ent = debugfs_create_x32("skip_writes", + S_IFREG | 0644, + chip->debug_root, + &(chip->skip_writes)); + if (!ent) + dev_err(chip->dev, + "Couldn't create data debug file rc = %d\n", + rc); + + ent = debugfs_create_x32("skip_reads", + S_IFREG | 0644, + chip->debug_root, + &(chip->skip_reads)); + if (!ent) + dev_err(chip->dev, + "Couldn't create data debug file rc = %d\n", + rc); + + ent = debugfs_create_file("irq_count", S_IFREG | 0444, + chip->debug_root, chip, + &irq_count_debugfs_ops); + if (!ent) + dev_err(chip->dev, + "Couldn't create count debug file rc = %d\n", + rc); + } + + dev_info(chip->dev, "SMB1360 revision=0x%x probe success! batt=%d usb=%d soc=%d\n", + chip->revision, + smb1360_get_prop_batt_present(chip), + chip->usb_present, + smb1360_get_prop_batt_capacity(chip)); + + return 0; + +unregister_batt_psy: + power_supply_unregister(chip->batt_psy); +fail_hw_init: + if (chip->otg_vreg.rdev) + regulator_unregister(chip->otg_vreg.rdev); +destroy_mutex: + power_supply_unregister(chip->usb_psy); + wakeup_source_unregister(chip->smb1360_ws.source); + mutex_destroy(&chip->read_write_lock); + mutex_destroy(&chip->parallel_chg_lock); + mutex_destroy(&chip->otp_gain_lock); + mutex_destroy(&chip->fg_access_request_lock); + mutex_destroy(&chip->irq_complete); + mutex_destroy(&chip->charging_disable_lock); + mutex_destroy(&chip->current_change_lock); + return rc; +} + +static int smb1360_remove(struct i2c_client *client) +{ + struct smb1360_chip *chip = i2c_get_clientdata(client); + + if (chip->otg_vreg.rdev) + regulator_unregister(chip->otg_vreg.rdev); + + power_supply_unregister(chip->usb_psy); + power_supply_unregister(chip->batt_psy); + wakeup_source_unregister(chip->smb1360_ws.source); + mutex_destroy(&chip->charging_disable_lock); + mutex_destroy(&chip->current_change_lock); + mutex_destroy(&chip->read_write_lock); + mutex_destroy(&chip->parallel_chg_lock); + mutex_destroy(&chip->irq_complete); + mutex_destroy(&chip->otp_gain_lock); + mutex_destroy(&chip->fg_access_request_lock); + debugfs_remove_recursive(chip->debug_root); + + return 0; +} + +static int smb1360_suspend(struct device *dev) +{ + int i, rc; + struct i2c_client *client = to_i2c_client(dev); + struct smb1360_chip *chip = i2c_get_clientdata(client); + + /* Save the current IRQ config */ + for (i = 0; i < 3; i++) { + rc = smb1360_read(chip, IRQ_CFG_REG + i, + &chip->irq_cfg_mask[i]); + if (rc) + pr_err("Couldn't save irq cfg regs rc=%d\n", rc); + } + + /* enable only important IRQs */ + rc = smb1360_write(chip, IRQ_CFG_REG, IRQ_DCIN_UV_BIT + | IRQ_AICL_DONE_BIT + | IRQ_BAT_HOT_COLD_SOFT_BIT + | IRQ_BAT_HOT_COLD_HARD_BIT); + if (rc < 0) + pr_err("Couldn't set irq_cfg rc=%d\n", rc); + + rc = smb1360_write(chip, IRQ2_CFG_REG, IRQ2_BATT_MISSING_BIT + | IRQ2_VBAT_LOW_BIT + | IRQ2_POWER_OK_BIT); + if (rc < 0) + pr_err("Couldn't set irq2_cfg rc=%d\n", rc); + + rc = smb1360_write(chip, IRQ3_CFG_REG, IRQ3_SOC_FULL_BIT + | IRQ3_SOC_MIN_BIT + | IRQ3_SOC_EMPTY_BIT); + if (rc < 0) + pr_err("Couldn't set irq3_cfg rc=%d\n", rc); + + mutex_lock(&chip->irq_complete); + chip->resume_completed = false; + mutex_unlock(&chip->irq_complete); + + return 0; +} + +static int smb1360_suspend_noirq(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct smb1360_chip *chip = i2c_get_clientdata(client); + + if (chip->irq_waiting) { + pr_err_ratelimited("Aborting suspend, an interrupt was detected while suspending\n"); + return -EBUSY; + } + return 0; +} + +static int smb1360_resume(struct device *dev) +{ + int i, rc; + struct i2c_client *client = to_i2c_client(dev); + struct smb1360_chip *chip = i2c_get_clientdata(client); + + /* Restore the IRQ config */ + for (i = 0; i < 3; i++) { + rc = smb1360_write(chip, IRQ_CFG_REG + i, + chip->irq_cfg_mask[i]); + if (rc) + pr_err("Couldn't restore irq cfg regs rc=%d\n", rc); + } + + mutex_lock(&chip->irq_complete); + chip->resume_completed = true; + if (chip->irq_waiting) { + chip->irq_disabled = false; + enable_irq(client->irq); + mutex_unlock(&chip->irq_complete); + smb1360_stat_handler(client->irq, chip); + } else { + mutex_unlock(&chip->irq_complete); + } + + power_supply_changed(chip->batt_psy); + + return 0; +} + +static void smb1360_shutdown(struct i2c_client *client) +{ + int rc; + struct smb1360_chip *chip = i2c_get_clientdata(client); + + rc = smb1360_otg_disable(chip); + if (rc) + pr_err("Couldn't disable OTG mode rc=%d\n", rc); + + if (chip->shdn_after_pwroff) { + rc = smb1360_poweroff(chip); + if (rc) + pr_err("Couldn't shutdown smb1360, rc = %d\n", rc); + pr_info("smb1360 power off\n"); + } +} + +static const struct dev_pm_ops smb1360_pm_ops = { + .resume = smb1360_resume, + .suspend_noirq = smb1360_suspend_noirq, + .suspend = smb1360_suspend, +}; + +static const struct of_device_id smb1360_match_table[] = { + { .compatible = "qcom,smb1360-chg-fg",}, + { }, +}; + +static const struct i2c_device_id smb1360_id[] = { + {"smb1360-chg-fg", 0}, + {}, +}; +MODULE_DEVICE_TABLE(i2c, smb1360_id); + +static struct i2c_driver smb1360_driver = { + .driver = { + .name = "smb1360-chg-fg", + .of_match_table = smb1360_match_table, + .pm = &smb1360_pm_ops, + }, + .probe = smb1360_probe, + .remove = smb1360_remove, + .shutdown = smb1360_shutdown, + .id_table = smb1360_id, +}; + +module_i2c_driver(smb1360_driver); + +MODULE_DESCRIPTION("SMB1360 Charger and Fuel Gauge"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("i2c:smb1360-chg-fg"); diff --git a/drivers/power/supply/qcom/smb1398-charger.c b/drivers/power/supply/qcom/smb1398-charger.c index c600ecad1411..c0be685a1925 100644 --- a/drivers/power/supply/qcom/smb1398-charger.c +++ b/drivers/power/supply/qcom/smb1398-charger.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "SMB1398: %s: " fmt, __func__ @@ -128,6 +128,7 @@ #define SMB_EN_POS_TRIGGER BIT(0) #define PERPH0_DIV2_SLAVE 0x2652 +#define CFG_EN_SLAVE_OWN_FREQ BIT(1) #define CFG_DIV2_SYNC_CLK_PHASE_90 BIT(0) #define DIV2_LCM_CFG_REG 0x2653 @@ -147,6 +148,10 @@ #define WIN_OV_400_MV 2 #define WIN_OV_500_MV 3 +#define PERPH0_OVLO_REF_REG 0x265B +#define SMB1394_INPUT_OVLO_CONF_MASK GENMASK(2, 0) +#define SMB1394_INPUT_OVLO_13P04V 0x5 + #define DIV2_MODE_CFG_REG 0x265C #define LCM_EXIT_CTRL_REG 0x265D @@ -176,13 +181,17 @@ #define PERPH0_CFG_SDCDC_REG 0x267A #define EN_WIN_UV_BIT BIT(7) +#define EN_WIN_OV_RISE_DEB_BIT BIT(6) #define PERPH0_SOVP_CFG0_REG 0x2680 +#define CFG_OVP_VSNS_THRESHOLD BIT(4) #define CFG_OVP_IGNORE_UVLO BIT(5) #define PERPH0_SSUPPLY_CFG0_REG 0x2682 #define EN_HV_OV_OPTION2_BIT BIT(7) #define EN_MV_OV_OPTION2_BIT BIT(5) +#define CFG_CMP_VOUT_VS_4V_REF_MASK GENMASK(2, 1) +#define CMP_VOUT_VS_4V_REF_3P2V 0x3 /* Value for SMB1394 only */ #define SSUPLY_TEMP_CTRL_REG 0x2683 #define SEL_OUT_TEMP_MAX_MASK GENMASK(7, 5) @@ -236,6 +245,11 @@ #define DIV2_CP_MASTER 0 #define DIV2_CP_SLAVE 1 #define COMBO_PRE_REGULATOR 2 +#define SMB1394_DIV2_CP_PRY 3 +#define SMB1394_DIV2_CP_SECY 4 + +#define IS_SMB1394(role) \ + (role == SMB1394_DIV2_CP_PRY || role == SMB1394_DIV2_CP_SECY) enum isns_mode { ISNS_MODE_OFF = 0, @@ -975,6 +989,19 @@ static int smb1398_div2_cp_get_min_icl(struct smb1398_chip *chip) return chip->div2_cp_min_ilim_ua; } +static char *div2_cp_get_model_name(struct smb1398_chip *chip) +{ + if (IS_SMB1394(chip->div2_cp_role)) + return "SMB1394"; + + if (chip->pmic_rev_id->rev4 > 2) + return "SMB1398_V3"; + else if (chip->pmic_rev_id->rev4 == 2) + return "SMB1398_V2"; + else + return "SMB1398_V1"; +} + static int div2_cp_master_get_prop(struct power_supply *psy, enum power_supply_property prop, union power_supply_propval *val) @@ -1065,8 +1092,7 @@ static int div2_cp_master_get_prop(struct power_supply *psy, val->intval = chip->pmic_rev_id->rev4; break; case POWER_SUPPLY_PROP_MODEL_NAME: - val->strval = (chip->pmic_rev_id->rev4 > 1) ? "SMB1398_V2" : - "SMB1398_V1"; + val->strval = div2_cp_get_model_name(chip); break; case POWER_SUPPLY_PROP_PARALLEL_MODE: val->intval = chip->pl_input_mode; @@ -1689,6 +1715,10 @@ static void smb1398_status_change_work(struct work_struct *work) chip->usb_present = !!pval.intval; if (!chip->usb_present) /* USB has been removed */ smb1398_toggle_uvlo(chip); + pval.intval = 1; + if (is_cps_available(chip)) + power_supply_set_property(chip->div2_cp_slave_psy, + POWER_SUPPLY_PROP_CP_TOGGLE_SWITCHER, &pval); } rc = power_supply_get_property(chip->usb_psy, @@ -1926,7 +1956,7 @@ static void smb1398_taper_work(struct work_struct *work) chip->taper_work_running = false; } -static int smb1398_update_ovp(struct smb1398_chip *chip) +static int _smb1398_update_ovp(struct smb1398_chip *chip) { int rc = 0; u8 reg = 0; @@ -1960,7 +1990,43 @@ static int smb1398_update_ovp(struct smb1398_chip *chip) return rc; } - return 0; + return rc; +} + +static int _smb1394_update_ovp(struct smb1398_chip *chip) +{ + int rc = 0; + + rc = smb1398_masked_write(chip, PERPH0_SOVP_CFG0_REG, + CFG_OVP_VSNS_THRESHOLD, CFG_OVP_VSNS_THRESHOLD); + if (rc < 0) { + dev_err(chip->dev, "Couldn't set PERPH0_SOVP_CFG0_REG rc=%d\n", + rc); + return rc; + } + + rc = smb1398_masked_write(chip, PERPH0_OVLO_REF_REG, + SMB1394_INPUT_OVLO_CONF_MASK, + SMB1394_INPUT_OVLO_13P04V); + if (rc < 0) { + dev_err(chip->dev, "Couldn't set PERPH0_OVLO_REF rc=%d\n", rc); + return rc; + } + + rc = smb1398_masked_write(chip, PERPH0_CFG_SDCDC_REG, + EN_WIN_OV_RISE_DEB_BIT, 0); + if (rc < 0) + dev_err(chip->dev, "Couldn't set PERPH0_CFG_SDCDC_REG rc=%d\n", + rc); + return rc; +} + +static int smb1398_update_ovp(struct smb1398_chip *chip) +{ + if (IS_SMB1394(chip->div2_cp_role)) + return _smb1394_update_ovp(chip); + + return _smb1398_update_ovp(chip); } static int smb1398_div2_cp_hw_init(struct smb1398_chip *chip) @@ -2046,6 +2112,17 @@ static int smb1398_div2_cp_hw_init(struct smb1398_chip *chip) return rc; } + if (IS_SMB1394(chip->div2_cp_role)) { + rc = smb1398_masked_write(chip, PERPH0_SSUPPLY_CFG0_REG, + CFG_CMP_VOUT_VS_4V_REF_MASK, + CMP_VOUT_VS_4V_REF_3P2V); + if (rc < 0) { + dev_err(chip->dev, "Couldn't set PERPH0_SSUPPLY_CFG0_REG, rc=%d\n", + rc); + return rc; + } + } + return rc; } @@ -2201,6 +2278,7 @@ static enum power_supply_property div2_cp_slave_props[] = { POWER_SUPPLY_PROP_CP_ENABLE, POWER_SUPPLY_PROP_INPUT_CURRENT_MAX, POWER_SUPPLY_PROP_CURRENT_CAPABILITY, + POWER_SUPPLY_PROP_CP_TOGGLE_SWITCHER, }; static int div2_cp_slave_get_prop(struct power_supply *psy, @@ -2224,6 +2302,9 @@ static int div2_cp_slave_get_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_CURRENT_CAPABILITY: pval->intval = (int)chip->current_capability; break; + case POWER_SUPPLY_PROP_CP_TOGGLE_SWITCHER: + pval->intval = 0; + break; default: dev_err(chip->dev, "read div2_cp_slave property %d is not supported\n", prop); @@ -2256,6 +2337,10 @@ static int div2_cp_slave_set_prop(struct power_supply *psy, return rc; chip->current_capability = mode; break; + case POWER_SUPPLY_PROP_CP_TOGGLE_SWITCHER: + /* use this case to toggle UVLO */ + rc = smb1398_toggle_uvlo(chip); + break; default: dev_err(chip->dev, "write div2_cp_slave property %d is not supported\n", prop); @@ -2374,13 +2459,32 @@ static int smb1398_div2_cp_slave_probe(struct smb1398_chip *chip) return rc; } - /* Enable slave clock on its own */ - rc = smb1398_masked_write(chip, NOLOCK_SPARE_REG, - EN_SLAVE_OWN_FREQ_BIT, EN_SLAVE_OWN_FREQ_BIT); - if (rc < 0) { - dev_err(chip->dev, "Couldn't enable slave clock, rc=%d\n", - rc); - return rc; + if (IS_SMB1394(chip->div2_cp_role)) { + rc = smb1398_masked_write(chip, PERPH0_SSUPPLY_CFG0_REG, + CFG_CMP_VOUT_VS_4V_REF_MASK, + CMP_VOUT_VS_4V_REF_3P2V); + if (rc < 0) { + dev_err(chip->dev, "Couldn't set PERPH0_SSUPPLY_CFG0_REG, rc=%d\n", + rc); + return rc; + } + + rc = smb1398_masked_write(chip, PERPH0_DIV2_SLAVE, + CFG_EN_SLAVE_OWN_FREQ, CFG_EN_SLAVE_OWN_FREQ); + if (rc < 0) { + dev_err(chip->dev, "Couldn't set PERPH0_DIV2_SLAVE, rc=%d\n", + rc); + return rc; + } + } else { + /* Enable slave clock on its own */ + rc = smb1398_masked_write(chip, NOLOCK_SPARE_REG, + EN_SLAVE_OWN_FREQ_BIT, EN_SLAVE_OWN_FREQ_BIT); + if (rc < 0) { + dev_err(chip->dev, "Couldn't enable slave clock, rc=%d\n", + rc); + return rc; + } } rc = smb1398_init_div2_cp_slave_psy(chip); @@ -2626,8 +2730,10 @@ static int smb1398_probe(struct platform_device *pdev) chip->div2_cp_role = (int)of_device_get_match_data(chip->dev); switch (chip->div2_cp_role) { case DIV2_CP_MASTER: + case SMB1394_DIV2_CP_PRY: rc = smb1398_div2_cp_master_probe(chip); break; + case SMB1394_DIV2_CP_SECY: case DIV2_CP_SLAVE: rc = smb1398_div2_cp_slave_probe(chip); break; @@ -2658,7 +2764,8 @@ static int smb1398_remove(struct platform_device *pdev) { struct smb1398_chip *chip = platform_get_drvdata(pdev); - if (chip->div2_cp_role == DIV2_CP_MASTER) { + if (chip->div2_cp_role == DIV2_CP_MASTER || + chip->div2_cp_role == SMB1394_DIV2_CP_PRY) { vote(chip->awake_votable, SHUTDOWN_VOTER, false, 0); vote(chip->div2_cp_disable_votable, SHUTDOWN_VOTER, true, 0); vote(chip->div2_cp_ilim_votable, SHUTDOWN_VOTER, true, 0); @@ -2725,6 +2832,12 @@ static const struct of_device_id match_table[] = { { .compatible = "qcom,smb1398-pre-regulator", .data = (void *)COMBO_PRE_REGULATOR, }, + { .compatible = "qcom,smb1394-div2-cp-primary", + .data = (void *)SMB1394_DIV2_CP_PRY, + }, + { .compatible = "qcom,smb1394-div2-cp-secondary", + .data = (void *)SMB1394_DIV2_CP_SECY, + }, { }, }; diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c index 555a12824d46..494cab5fb721 100644 --- a/drivers/power/supply/qcom/smb5-lib.c +++ b/drivers/power/supply/qcom/smb5-lib.c @@ -1218,6 +1218,7 @@ static void smblib_uusb_removal(struct smb_charger *chg) is_flash_active(chg) ? SDP_CURRENT_UA : SDP_100_MA); vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0); vote(chg->usb_icl_votable, HVDCP2_ICL_VOTER, false, 0); + vote(chg->usb_icl_votable, HVDCP2_12V_ICL_VOTER, false, 0); vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, false, 0); vote(chg->usb_icl_votable, THERMAL_THROTTLE_VOTER, false, 0); vote(chg->limited_irq_disable_votable, CHARGER_TYPE_VOTER, @@ -2666,6 +2667,8 @@ int smblib_dp_dm(struct smb_charger *chg, int val) rc = smblib_force_vbus_voltage(chg, FORCE_5V_BIT); if (rc < 0) pr_err("Failed to force 5V\n"); + + vote(chg->usb_icl_votable, HVDCP2_12V_ICL_VOTER, false, 0); break; case POWER_SUPPLY_DP_DM_FORCE_9V: if (chg->qc2_unsupported_voltage == QC2_NON_COMPLIANT_9V) { @@ -2691,6 +2694,8 @@ int smblib_dp_dm(struct smb_charger *chg, int val) rc = smblib_force_vbus_voltage(chg, FORCE_9V_BIT); if (rc < 0) pr_err("Failed to force 9V\n"); + + vote(chg->usb_icl_votable, HVDCP2_12V_ICL_VOTER, false, 0); break; case POWER_SUPPLY_DP_DM_FORCE_12V: if (chg->qc2_unsupported_voltage == QC2_NON_COMPLIANT_12V) { @@ -2707,6 +2712,8 @@ int smblib_dp_dm(struct smb_charger *chg, int val) } if ((stat & QC_9V_BIT) || (stat & QC_5V_BIT)) { + vote(chg->usb_icl_votable, HVDCP2_12V_ICL_VOTER, true, + chg->chg_param.hvdcp2_12v_max_icl_ua); /* Force 1A ICL before requesting higher voltage */ vote(chg->usb_icl_votable, HVDCP2_ICL_VOTER, true, 1000000); @@ -5717,7 +5724,6 @@ static void smblib_handle_hvdcp_check_timeout(struct smb_charger *chg, u32 hvdcp_ua = 0; if (rising) { - if (qc_charger) { hvdcp_ua = (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP) ? @@ -6262,6 +6268,7 @@ static void typec_src_removal(struct smb_charger *chg) vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0); vote(chg->usb_icl_votable, CTM_VOTER, false, 0); vote(chg->usb_icl_votable, HVDCP2_ICL_VOTER, false, 0); + vote(chg->usb_icl_votable, HVDCP2_12V_ICL_VOTER, false, 0); vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, false, 0); vote(chg->usb_icl_votable, THERMAL_THROTTLE_VOTER, false, 0); vote(chg->usb_icl_votable, LPD_VOTER, false, 0); diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h index 02f1b46d0da9..6294edca9efa 100644 --- a/drivers/power/supply/qcom/smb5-lib.h +++ b/drivers/power/supply/qcom/smb5-lib.h @@ -65,6 +65,7 @@ enum print_reason { #define JEITA_ARB_VOTER "JEITA_ARB_VOTER" #define MOISTURE_VOTER "MOISTURE_VOTER" #define HVDCP2_ICL_VOTER "HVDCP2_ICL_VOTER" +#define HVDCP2_12V_ICL_VOTER "HVDCP2_12V_ICL_VOTER" #define AICL_THRESHOLD_VOTER "AICL_THRESHOLD_VOTER" #define USBOV_DBC_VOTER "USBOV_DBC_VOTER" #define CHG_TERMINATION_VOTER "CHG_TERMINATION_VOTER" diff --git a/drivers/power/supply/qcom/smblite-lib.h b/drivers/power/supply/qcom/smblite-lib.h index 374c8ffba8bc..8c33d17753e9 100644 --- a/drivers/power/supply/qcom/smblite-lib.h +++ b/drivers/power/supply/qcom/smblite-lib.h @@ -55,7 +55,7 @@ enum print_reason { #define VBAT_TO_VRAW_ADC(v) div_u64((u64)v * 1000000UL, 194637UL) -#define ITERM_LIMITS_MA 10000 +#define ITERM_LIMITS_MA 5000 #define ADC_CHG_ITERM_MASK 32767 #define USBIN_25UA 25000 diff --git a/drivers/regulator/qcom_pm8008-regulator.c b/drivers/regulator/qcom_pm8008-regulator.c index 3c14d23f024a..61c67004397f 100644 --- a/drivers/regulator/qcom_pm8008-regulator.c +++ b/drivers/regulator/qcom_pm8008-regulator.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "PM8008: %s: " fmt, __func__ @@ -60,6 +60,8 @@ #define LDO_STEPPER_CTL_REG(base) (base + 0x3b) #define STEP_RATE_MASK GENMASK(1, 0) +/* Step rate in uV/us */ +#define PM8010_STEP_RATE 4800 #define LDO_PD_CTL_REG(base) (base + 0xA0) #define STRONG_PD_EN_BIT BIT(7) @@ -67,6 +69,11 @@ #define MAX_REG_NAME 20 #define PM8008_MAX_LDO 7 +enum pmic_subtype { + PM8008_SUBTYPE, + PM8010_SUBTYPE, +}; + struct pm8008_chip { struct device *dev; struct regmap *regmap; @@ -75,11 +82,20 @@ struct pm8008_chip { int ocp_irq; }; +struct reg_init_data { + u8 offset; + u8 data; +}; + struct regulator_data { - char *name; - char *supply_name; - int hpm_min_load_ua; - int min_dropout_uv; + char *name; + char *supply_name; + int min_uv; + int max_uv; + int hpm_min_load_ua; + int min_dropout_uv; + const struct reg_init_data *reg_init; + unsigned int reg_init_size; }; struct pm8008_regulator { @@ -96,17 +112,52 @@ struct pm8008_regulator { int min_dropout_uv; int step_rate; bool enable_ocp_broadcast; + enum pmic_subtype pmic_subtype; }; -static struct regulator_data reg_data[] = { - /* name, parent, min load, headroom */ - {"pm8008_l1", "vdd_l1_l2", 10000, 225000}, - {"pm8008_l2", "vdd_l1_l2", 10000, 225000}, - {"pm8008_l3", "vdd_l3_l4", 10000, 200000}, - {"pm8008_l4", "vdd_l3_l4", 10000, 200000}, - {"pm8008_l5", "vdd_l5", 10000, 300000}, - {"pm8008_l6", "vdd_l6", 10000, 300000}, - {"pm8008_l7", "vdd_l7", 10000, 300000}, +static const struct regulator_data pm8008_reg_data[PM8008_MAX_LDO] = { + /* name parent min_uv max_uv hpm_load headroom_uv */ + {"l1", "vdd_l1_l2", 528000, 1504000, 30000, 225000}, + {"l2", "vdd_l1_l2", 528000, 1504000, 30000, 225000}, + {"l3", "vdd_l3_l4", 1504000, 3400000, 10000, 200000}, + {"l4", "vdd_l3_l4", 1504000, 3400000, 10000, 200000}, + {"l5", "vdd_l5", 1504000, 3400000, 10000, 300000}, + {"l6", "vdd_l6", 1504000, 3400000, 10000, 300000}, + {"l7", "vdd_l7", 1504000, 3400000, 10000, 300000}, +}; + +static const struct reg_init_data pm8010_p300_reg_init_data[] = { + {0x55, 0x8A}, + {0x77, 0x03}, +}; + +static const struct reg_init_data pm8010_p600_reg_init_data[] = { + {0x76, 0x07}, + {0x77, 0x03}, +}; + +/* + * PM8010 LDOs 3, 4, and 6 can physically output a minimum of 1808 mV. However, + * 1504 mV is specified here to match PM8008 and to avoid the parent supply of + * these regulators being stuck at an unnecessarily high voltage as a result of + * the framework maintaining a minimum vote of 1808 mV + headroom at all times + * (even when the LDOs are OFF). This would waste power. The LDO hardware + * automatically rounds up programmed voltages to supported set points. + */ +static const struct regulator_data pm8010_reg_data[PM8008_MAX_LDO] = { + /* name parent min_uv max_uv hpm_load headroom_uv */ + {"l1", "vdd_l1_l2", 528000, 1544000, 30000, 100000}, + {"l2", "vdd_l1_l2", 528000, 1544000, 30000, 100000}, + {"l3", "vdd_l3_l4", 1504000, 3312000, 10000, 300000, + pm8010_p300_reg_init_data, ARRAY_SIZE(pm8010_p300_reg_init_data)}, + {"l4", "vdd_l3_l4", 1504000, 3312000, 10000, 300000, + pm8010_p300_reg_init_data, ARRAY_SIZE(pm8010_p300_reg_init_data)}, + {"l5", "vdd_l5", 1504000, 3544000, 10000, 300000, + pm8010_p600_reg_init_data, ARRAY_SIZE(pm8010_p600_reg_init_data)}, + {"l6", "vdd_l6", 1504000, 3312000, 10000, 300000, + pm8010_p300_reg_init_data, ARRAY_SIZE(pm8010_p300_reg_init_data)}, + {"l7", "vdd_l7", 1504000, 3544000, 10000, 300000, + pm8010_p600_reg_init_data, ARRAY_SIZE(pm8010_p600_reg_init_data)}, }; /* common functions */ @@ -121,7 +172,8 @@ static int pm8008_read(struct regmap *regmap, u16 reg, u8 *val, int count) return rc; } -static int pm8008_write(struct regmap *regmap, u16 reg, u8 *val, int count) +static int pm8008_write(struct regmap *regmap, u16 reg, const u8 *val, + int count) { int rc; @@ -559,6 +611,25 @@ static int pm8008_ldo_cb(struct notifier_block *nb, ulong event, void *data) return NOTIFY_OK; } +static int pm8008_regulator_register_init(struct pm8008_regulator *pm8008_reg, + const struct regulator_data *reg_data) +{ + int i, rc; + + if (!reg_data->reg_init) + return 0; + + for (i = 0; i < reg_data->reg_init_size; i++) { + rc = pm8008_write(pm8008_reg->regmap, + pm8008_reg->base + reg_data->reg_init[i].offset, + ®_data->reg_init[i].data, 1); + if (rc < 0) + return rc; + } + + return 0; +} + static int pm8008_register_ldo(struct pm8008_regulator *pm8008_reg, const char *name) { @@ -567,13 +638,17 @@ static int pm8008_register_ldo(struct pm8008_regulator *pm8008_reg, struct device *dev = pm8008_reg->dev; struct device_node *reg_node = pm8008_reg->of_node; char buff[MAX_REG_NAME]; + const struct regulator_data *reg_data; int rc, i, init_voltage; u32 base = 0; u8 reg; + reg_data = pm8008_reg->pmic_subtype == PM8008_SUBTYPE ? pm8008_reg_data + : pm8010_reg_data; + /* get regulator data */ for (i = 0; i < PM8008_MAX_LDO; i++) - if (!strcmp(reg_data[i].name, name)) + if (strstr(name, reg_data[i].name)) break; if (i == PM8008_MAX_LDO) { @@ -588,9 +663,13 @@ static int pm8008_register_ldo(struct pm8008_regulator *pm8008_reg, } pm8008_reg->base = base; + rc = pm8008_regulator_register_init(pm8008_reg, ®_data[i]); + if (rc) + return rc; + pm8008_reg->min_dropout_uv = reg_data[i].min_dropout_uv; of_property_read_u32(reg_node, "qcom,min-dropout-voltage", - &pm8008_reg->min_dropout_uv); + &pm8008_reg->min_dropout_uv); pm8008_reg->hpm_min_load_ua = reg_data[i].hpm_min_load_ua; of_property_read_u32(reg_node, "qcom,hpm-min-load", @@ -622,14 +701,18 @@ static int pm8008_register_ldo(struct pm8008_regulator *pm8008_reg, } /* get slew rate */ - rc = pm8008_read(pm8008_reg->regmap, - LDO_STEPPER_CTL_REG(pm8008_reg->base), ®, 1); - if (rc < 0) { - pr_err("%s: failed to read step rate configuration rc=%d\n", - name, rc); - return rc; + if (pm8008_reg->pmic_subtype == PM8008_SUBTYPE) { + rc = pm8008_read(pm8008_reg->regmap, + LDO_STEPPER_CTL_REG(pm8008_reg->base), ®, 1); + if (rc < 0) { + pr_err("%s: failed to read step rate configuration rc=%d\n", + name, rc); + return rc; + } + pm8008_reg->step_rate = 38400 >> (reg & STEP_RATE_MASK); + } else { + pm8008_reg->step_rate = PM8010_STEP_RATE; } - pm8008_reg->step_rate = 38400 >> (reg & STEP_RATE_MASK); scnprintf(buff, MAX_REG_NAME, "%s-supply", reg_data[i].supply_name); if (of_find_property(dev->of_node, buff, NULL)) { @@ -713,6 +796,18 @@ static int pm8008_register_ldo(struct pm8008_regulator *pm8008_reg, return 0; } +static const struct of_device_id pm8008_regulator_match_table[] = { + { + .compatible = "qcom,pm8008-regulator", + .data = (void *)(uintptr_t)PM8008_SUBTYPE, + }, + { + .compatible = "qcom,pm8010-regulator", + .data = (void *)(uintptr_t)PM8010_SUBTYPE, + }, + { }, +}; + /* PMIC probe and helper function */ static int pm8008_parse_regulator(struct regmap *regmap, struct device *dev) { @@ -720,8 +815,18 @@ static int pm8008_parse_regulator(struct regmap *regmap, struct device *dev) const char *name; struct device_node *child; struct pm8008_regulator *pm8008_reg; + const struct of_device_id *match; + enum pmic_subtype pmic_subtype; bool ocp; + match = of_match_node(pm8008_regulator_match_table, dev->of_node); + if (match) { + pmic_subtype = (uintptr_t)match->data; + } else { + dev_err(dev, "could not find compatible string match\n"); + return -ENODEV; + } + ocp = of_property_read_bool(dev->of_node, "qcom,enable-ocp-broadcast"); /* parse each subnode and register regulator for regulator child */ @@ -734,6 +839,7 @@ static int pm8008_parse_regulator(struct regmap *regmap, struct device *dev) pm8008_reg->of_node = child; pm8008_reg->dev = dev; pm8008_reg->enable_ocp_broadcast = ocp; + pm8008_reg->pmic_subtype = pmic_subtype; rc = of_property_read_string(child, "regulator-name", &name); if (rc) @@ -923,13 +1029,6 @@ static int pm8008_chip_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id pm8008_regulator_match_table[] = { - { - .compatible = "qcom,pm8008-regulator", - }, - { }, -}; - static struct platform_driver pm8008_regulator_driver = { .driver = { .name = "qcom,pm8008-regulator", diff --git a/drivers/regulator/qpnp-lcdb-regulator.c b/drivers/regulator/qpnp-lcdb-regulator.c index 62cd2520f0c0..972437a2f3cf 100644 --- a/drivers/regulator/qpnp-lcdb-regulator.c +++ b/drivers/regulator/qpnp-lcdb-regulator.c @@ -1509,7 +1509,7 @@ static struct regulator_ops qpnp_lcdb_ncp_ops = { static int qpnp_lcdb_regulator_register(struct qpnp_lcdb *lcdb, u8 type) { - int rc = 0, off_on_delay = 0; + int rc = 0, off_on_delay = 0, voltage_step = VOLTAGE_STEP_50_MV; struct regulator_init_data *init_data; struct regulator_config cfg = {}; struct regulator_desc *rdesc; @@ -1524,12 +1524,16 @@ static int qpnp_lcdb_regulator_register(struct qpnp_lcdb *lcdb, u8 type) rdesc = &lcdb->ldo.rdesc; rdesc->ops = &qpnp_lcdb_ldo_ops; rdesc->off_on_delay = off_on_delay; + rdesc->n_voltages = ((MAX_VOLTAGE_MV - MIN_VOLTAGE_MV) + / voltage_step) + 1; rdev = lcdb->ldo.rdev; } else if (type == NCP) { node = lcdb->ncp.node; rdesc = &lcdb->ncp.rdesc; rdesc->ops = &qpnp_lcdb_ncp_ops; rdesc->off_on_delay = off_on_delay; + rdesc->n_voltages = ((MAX_VOLTAGE_MV - MIN_VOLTAGE_MV) + / voltage_step) + 1; rdev = lcdb->ncp.rdev; } else { pr_err("Invalid regulator type %d\n", type); diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 26844dae39e5..b5ba99ae8be6 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2016-2017, Linaro Ltd - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */ #include @@ -41,12 +41,20 @@ do { \ ch->lcid, ch->rcid, __func__, ##__VA_ARGS__); \ } while (0) - #define GLINK_ERR(ctxt, x, ...) \ do { \ pr_err_ratelimited("[%s]: "x, __func__, ##__VA_ARGS__); \ if (ctxt) \ ipc_log_string(ctxt, "[%s]: "x, __func__, ##__VA_ARGS__); \ + +#define CH_ERR(ch, x, ...) \ +do { \ + if (ch->glink) { \ + ipc_log_string(ch->glink->ilc, "%s[%d:%d] %s: "x, ch->name, \ + ch->lcid, ch->rcid, __func__, ##__VA_ARGS__); \ + dev_err_ratelimited(ch->glink->dev, "[%s]: "x, __func__, \ + ##__VA_ARGS__); \ + } \ } while (0) #define GLINK_NAME_SIZE 32 @@ -1054,12 +1062,14 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail) intent->offset, channel->ept.priv, RPMSG_ADDR_ANY); - if (ret < 0) - CH_INFO(channel, - "glink:callback error ret = %d\n", ret); + + if (ret < 0 && ret != -ENODEV) { + CH_ERR(channel, + "callback error ret = %d\n", ret); + ret = 0; + } } else { - CH_INFO(channel, "callback not present\n"); - dev_err(glink->dev, "glink:callback not present\n"); + CH_ERR(channel, "callback not present\n"); } spin_unlock(&channel->recv_lock); diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index 0bf9e026bae9..c99e4c23aad8 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2015, Sony Mobile Communications AB. - * Copyright (c) 2012-2013, 2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2013, 2021 The Linux Foundation. All rights reserved. */ #include @@ -662,12 +662,19 @@ static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel) len = channel->pkt_size; } - ret = ept->cb(ept->rpdev, ptr, len, ept->priv, RPMSG_ADDR_ANY); - if (ret < 0) { - smd_ipc(channel->edge->ipc, false, NULL, - "%s: ret %d len %d ch %s\n", __func__, ret, len, + if (ept->cb) { + ret = ept->cb(ept->rpdev, ptr, len, ept->priv, RPMSG_ADDR_ANY); + if (ret < 0) { + smd_ipc(channel->edge->ipc, false, NULL, + "%s: ret %d len %d ch %s\n", __func__, ret, len, channel->name); - return ret; + return ret; + } + } else { + smd_ipc(channel->edge->ipc, false, NULL, + "%s: Callback not available on channel: %s\n", __func__, + channel->name); + return -EAGAIN; } /* Only forward the tail if the client consumed the data */ @@ -855,6 +862,9 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel, word_aligned); } + /* Ensure ordering of channel info updates */ + wmb(); + head += count; head &= (channel->fifo_size - 1); SET_TX_CHANNEL_INFO(channel, head, head); @@ -897,7 +907,8 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data, spin_lock_irqsave(&channel->tx_lock, flags); while (qcom_smd_get_tx_avail(channel) < tlen && - channel->state == SMD_CHANNEL_OPENED) { + channel->state == SMD_CHANNEL_OPENED && + channel->remote_state == SMD_CHANNEL_OPENED) { if (!wait) { ret = -EAGAIN; goto out_unlock; @@ -910,7 +921,8 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data, ret = wait_event_interruptible(channel->fblockread_event, qcom_smd_get_tx_avail(channel) >= tlen || - channel->state != SMD_CHANNEL_OPENED); + channel->state != SMD_CHANNEL_OPENED || + channel->remote_state != SMD_CHANNEL_OPENED); if (ret) return ret; @@ -1486,6 +1498,9 @@ static void qcom_channel_state_worker(struct work_struct *work) chinfo.dst = RPMSG_ADDR_ANY; smd_ipc(channel->edge->ipc, false, NULL, "%s: unregistering ch %s\n", __func__, channel->name); + + wake_up_interruptible_all(&channel->fblockread_event); + rpmsg_unregister_device(&edge->dev, &chinfo); channel->registered = false; spin_lock_irqsave(&edge->channels_lock, flags); diff --git a/drivers/rpmsg/rpm-smd.c b/drivers/rpmsg/rpm-smd.c index 9977289c083b..9af8a51518c3 100644 --- a/drivers/rpmsg/rpm-smd.c +++ b/drivers/rpmsg/rpm-smd.c @@ -702,6 +702,24 @@ static struct msm_rpm_driver_data msm_rpm_data = { .smd_open = COMPLETION_INITIALIZER(msm_rpm_data.smd_open), }; +static int trysend_count = 20; +module_param(trysend_count, int, 0664); +static int msm_rpm_trysend_smd_buffer(char *buf, uint32_t size) +{ + int ret; + int count = 0; + + do { + ret = rpmsg_trysend(rpm->rpm_channel, buf, size); + if (!ret) + break; + udelay(10); + count++; + } while (count < trysend_count); + + return ret; +} + static int msm_rpm_flush_requests(bool print) { struct rb_node *t; @@ -719,7 +737,7 @@ static int msm_rpm_flush_requests(bool print) set_msg_id(s->buf, msm_rpm_get_next_msg_id()); - ret = rpmsg_send(rpm->rpm_channel, s->buf, get_buf_len(s->buf)); + ret = msm_rpm_trysend_smd_buffer(s->buf, get_buf_len(s->buf)); WARN_ON(ret != 0); trace_rpm_smd_send_sleep_set(get_msg_id(s->buf), type, id); diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 21902e027ee8..31e3c2fa1f92 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -65,6 +65,7 @@ static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, u32 clk_1us_cycles, u32 clk_40ns_cycles); static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host); +static int ufs_qcom_init_sysfs(struct ufs_hba *hba); static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len, char *prefix) @@ -1545,13 +1546,16 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, if (on && (status == POST_CHANGE)) { if (!host->is_phy_pwr_on) { - phy_power_on(host->generic_phy); + err = phy_power_on(host->generic_phy); host->is_phy_pwr_on = true; } /* enable the device ref clock for HS mode*/ if (ufshcd_is_hs_mode(&hba->pwr_info)) ufs_qcom_dev_ref_clk_ctrl(host, true); + if (!err) + atomic_set(&host->clks_on, on); + } else if (!on && (status == PRE_CHANGE)) { /* * If auto hibern8 is enabled then the link will already @@ -1569,6 +1573,8 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, } } + atomic_set(&host->clks_on, on); + if (list_empty(head)) goto out; /* @@ -2253,6 +2259,8 @@ static int ufs_qcom_init(struct ufs_hba *hba) err = 0; } + ufs_qcom_init_sysfs(hba); + ufs_qcom_save_host_ptr(hba); goto out; @@ -2429,6 +2437,8 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, break; } + if (!err) + atomic_set(&host->scale_up, scale_up); return err; } @@ -2704,6 +2714,8 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep) struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct phy *phy = host->generic_phy; + host->err_occurred = true; + ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16, "HCI Vendor Specific Registers "); ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); @@ -2768,6 +2780,139 @@ static struct ufs_hba_variant ufs_hba_qcom_variant = { .pm_qos_vops = &ufs_hba_pm_qos_variant_ops, }; +/** + * QCOM specific sysfs group and nodes + */ +static ssize_t err_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return scnprintf(buf, PAGE_SIZE, "%d\n", !!host->err_occurred); +} + +static DEVICE_ATTR_RO(err_state); + +static ssize_t power_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + static const char * const names[] = { + "INVALID MODE", + "FAST MODE", + "SLOW MODE", + "INVALID MODE", + "FASTAUTO MODE", + "SLOWAUTO MODE", + "INVALID MODE", + }; + + /* Print current power info */ + return scnprintf(buf, PAGE_SIZE, + "[Rx,Tx]: Gear[%d,%d], Lane[%d,%d], PWR[%s,%s], Rate-%c\n", + hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, + hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, + names[hba->pwr_info.pwr_rx], + names[hba->pwr_info.pwr_tx], + hba->pwr_info.hs_rate == PA_HS_MODE_B ? 'B' : 'A'); +} + +static DEVICE_ATTR_RO(power_mode); + +static ssize_t bus_speed_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return scnprintf(buf, PAGE_SIZE, "%d\n", + !!atomic_read(&host->scale_up)); +} + +static DEVICE_ATTR_RO(bus_speed_mode); + +static ssize_t clk_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return scnprintf(buf, PAGE_SIZE, "%d\n", + !!atomic_read(&host->clks_on)); +} + +static DEVICE_ATTR_RO(clk_status); + +static unsigned int ufs_qcom_gec(struct ufs_hba *hba, + struct ufs_uic_err_reg_hist *err_hist, + char *err_name) +{ + unsigned long flags; + int i, cnt_err = 0; + + spin_lock_irqsave(hba->host->host_lock, flags); + for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) { + int p = (i + err_hist->pos) % UIC_ERR_REG_HIST_LENGTH; + + if (err_hist->tstamp[p] == 0) + continue; + dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p, + err_hist->reg[p], ktime_to_us(err_hist->tstamp[p])); + + ++cnt_err; + } + + spin_unlock_irqrestore(hba->host->host_lock, flags); + return cnt_err; +} + +static ssize_t err_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, + "%s: %d\n%s: %d\n%s: %d\n", + "pa_err_cnt_total", + ufs_qcom_gec(hba, &hba->ufs_stats.pa_err, + "pa_err_cnt_total"), + "dl_err_cnt_total", + ufs_qcom_gec(hba, &hba->ufs_stats.dl_err, + "dl_err_cnt_total"), + "dme_err_cnt", + ufs_qcom_gec(hba, &hba->ufs_stats.dme_err, + "dme_err_cnt")); +} + +static DEVICE_ATTR_RO(err_count); + +static struct attribute *ufs_qcom_sysfs_attrs[] = { + &dev_attr_err_state.attr, + &dev_attr_power_mode.attr, + &dev_attr_bus_speed_mode.attr, + &dev_attr_clk_status.attr, + &dev_attr_err_count.attr, + NULL +}; + +static const struct attribute_group ufs_qcom_sysfs_group = { + .name = "qcom", + .attrs = ufs_qcom_sysfs_attrs, +}; + +static int ufs_qcom_init_sysfs(struct ufs_hba *hba) +{ + int ret; + + ret = sysfs_create_group(&hba->dev->kobj, &ufs_qcom_sysfs_group); + if (ret) + dev_err(hba->dev, "%s: Failed to create qcom sysfs group (err = %d)\n", + __func__, ret); + + return ret; +} + /** * ufs_qcom_probe - probe routine of the driver * @pdev: pointer to Platform device handle diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h index 6538637b1c43..778309f5eb21 100644 --- a/drivers/scsi/ufs/ufs-qcom.h +++ b/drivers/scsi/ufs/ufs-qcom.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -365,6 +365,10 @@ struct ufs_qcom_host { struct ufs_vreg *vccq_parent; bool work_pending; bool is_phy_pwr_on; + bool err_occurred; + /* FlashPVL entries */ + atomic_t scale_up; + atomic_t clks_on; }; static inline u32 diff --git a/drivers/scsi/ufs/ufshcd-crypto-qti.c b/drivers/scsi/ufs/ufshcd-crypto-qti.c index 937b0ef0b4fc..a552fd37136e 100644 --- a/drivers/scsi/ufs/ufshcd-crypto-qti.c +++ b/drivers/scsi/ufs/ufshcd-crypto-qti.c @@ -271,7 +271,9 @@ int ufshcd_crypto_qti_init_crypto(struct ufs_hba *hba, mmio_base = devm_ioremap_resource(hba->dev, mem_res); if (IS_ERR(mmio_base)) { pr_err("%s: Unable to get ufs_crypto mmio base\n", __func__); - return PTR_ERR(mmio_base); + hba->caps &= ~UFSHCD_CAP_CRYPTO; + hba->quirks |= UFSHCD_QUIRK_BROKEN_CRYPTO; + return err; } err = ufshcd_hba_init_crypto_qti_spec(hba, &ufshcd_crypto_qti_ksm_ops); diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index b6067f7f227b..19079745c6cc 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -315,6 +315,15 @@ config QCOM_MEMORY_DUMP_V2 of deadlocks or cpu hangs these dump regions are captured to give a snapshot of the system at the time of the crash. +config MSM_DEBUG_LAR_UNLOCK + bool "MSM Debug LAR Unlock Support" + depends on QCOM_MEMORY_DUMP_V2 + help + This allows unlocking Core Debug lock to allow capture + of upper 32 bits of program counter at the time of + system crash. This is useful in getting correct crash + location. + config QCOM_SMEM_STATE bool @@ -407,7 +416,7 @@ config MSM_REMOTEQDSS /sys/class/remoteqdss. config MSM_TZ_SMMU - depends on ARCH_MSM8953 || ARCH_QCS405 || ARCH_MSM8917 + depends on ARCH_MSM8953 || ARCH_QCS405 || ARCH_MSM8917 || ARCH_MSM8937 bool "Helper functions for SMMU configuration through TZ" help Say 'Y' here for targets that need to call into TZ to configure @@ -806,6 +815,13 @@ config QTI_L2_REUSE to let the power collapsed cluster's L2 cache usage by the active cluster cpu. Use sysfs interface to control enabling this feature. +config QCOM_DCC + bool "QCOM Data Capture and Compare engine support" + help + This option enables driver for Data Capture and Compare engine. DCC + driver provides interface to configure DCC block and read back + captured data from DCC's internal SRAM. + config QTI_RPM_STATS_LOG bool "Qualcomm Technologies RPM Stats Driver" depends on QCOM_RPMH || MSM_RPM_SMD diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index f24825b7d6f3..3fe90e5ab8e5 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -86,6 +86,7 @@ ifdef CONFIG_QCOM_RPMH else obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpm_master_stat.o endif +obj-$(CONFIG_QCOM_DCC) += dcc.o obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpm_stats.o obj-$(CONFIG_QCOM_MEM_OFFLINE) += mem-offline.o obj-$(CONFIG_QTI_DDR_STATS_LOG) += ddr_stats.o diff --git a/drivers/soc/qcom/bam_dmux.c b/drivers/soc/qcom/bam_dmux.c index 6e6669a13059..c6ec3156c17e 100644 --- a/drivers/soc/qcom/bam_dmux.c +++ b/drivers/soc/qcom/bam_dmux.c @@ -299,6 +299,8 @@ static struct notifier_block restart_notifier = { static int in_global_reset; /* end subsystem restart */ +static int ssr_skipped_reconnect; + #define bam_ch_is_open(x) \ (bam_ch[(x)].status == (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN)) @@ -1122,6 +1124,12 @@ int msm_bam_dmux_open(uint32_t id, void *priv, kfree(hdr); return -ENODEV; } + if (in_global_reset) { + BAM_DMUX_LOG("%s: In SSR... ch_id[%d]\n", __func__, id); + spin_unlock_irqrestore(&bam_ch[id].lock, flags); + kfree(hdr); + return -ENODEV; + } bam_ch[id].notify = notify; bam_ch[id].priv = priv; @@ -1200,6 +1208,12 @@ int msm_bam_dmux_close(uint32_t id) return 0; } + if (in_global_reset) { + BAM_DMUX_LOG("%s: In SSR... ch_id[%d]\n", __func__, id); + read_unlock(&ul_wakeup_lock); + return 0; + } + hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC); if (hdr == NULL) { read_unlock(&ul_wakeup_lock); @@ -1995,6 +2009,7 @@ static void reconnect_to_bam(void) int i; if (in_global_reset) { + ssr_skipped_reconnect = 1; BAM_DMUX_LOG("%s: skipping due to SSR\n", __func__); return; } @@ -2048,7 +2063,7 @@ static void disconnect_to_bam(void) unsigned long flags; unsigned long time_remaining; - if (!in_global_reset) { + if (!in_global_reset && !ssr_skipped_reconnect) { time_remaining = wait_for_completion_timeout( &shutdown_completion, msecs_to_jiffies(SHUTDOWN_TIMEOUT_MS)); @@ -2103,6 +2118,7 @@ static void disconnect_to_bam(void) mutex_unlock(&bam_rx_pool_mutexlock); toggle_apps_ack(); verify_tx_queue_is_empty(__func__); + ssr_skipped_reconnect = 0; } static void vote_dfab(void) @@ -2240,6 +2256,8 @@ static int restart_notifier_cb(struct notifier_block *this, process_dynamic_mtu(false); set_ul_mtu(0, true); dynamic_mtu_enabled = false; + if (bam_connection_is_active) + ssr_skipped_reconnect = 1; /* Cleanup Channel States */ mutex_lock(&bam_pdev_mutexlock); @@ -2465,7 +2483,7 @@ static void toggle_apps_ack(void) { static unsigned int clear_bit; /* 0 = set the bit, else clear bit */ - if (in_global_reset) { + if (in_global_reset || ssr_skipped_reconnect) { BAM_DMUX_LOG("%s: skipped due to SSR\n", __func__); return; } diff --git a/drivers/soc/qcom/cdsprm.c b/drivers/soc/qcom/cdsprm.c index 3766a14a4fcd..1f38ce72869d 100644 --- a/drivers/soc/qcom/cdsprm.c +++ b/drivers/soc/qcom/cdsprm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */ /* @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -179,13 +180,17 @@ struct cdsprm { bool b_rpmsg_register; bool b_qosinitdone; bool b_applyingNpuLimit; - int latency_request; + bool b_silver_en; + int latency_request; struct dentry *debugfs_dir; struct dentry *debugfs_file; int (*set_l3_freq)(unsigned int freq_khz); int (*set_l3_freq_cached)(unsigned int freq_khz); int (*set_corner_limit)(enum cdsprm_npu_corner); int (*set_corner_limit_cached)(enum cdsprm_npu_corner); + u32 *coreno; + u32 corecount; + struct dev_pm_qos_request *dev_pm_qos_req; }; static struct cdsprm gcdsprm; @@ -454,15 +459,92 @@ void cdsprm_unregister_cdspl3gov(void) } EXPORT_SYMBOL(cdsprm_unregister_cdspl3gov); +static void qos_cores_init(struct device *dev) +{ + int i, err = 0; + u32 *cpucores = NULL; + + of_find_property(dev->of_node, + "qcom,qos-cores", &gcdsprm.corecount); + + if (gcdsprm.corecount) { + gcdsprm.corecount /= sizeof(u32); + + cpucores = kcalloc(gcdsprm.corecount, + sizeof(u32), GFP_KERNEL); + + if (cpucores == NULL) { + dev_err(dev, + "kcalloc failed for cpucores\n"); + gcdsprm.b_silver_en = false; + } else { + for (i = 0; i < gcdsprm.corecount; i++) { + err = of_property_read_u32_index(dev->of_node, + "qcom,qos-cores", i, &cpucores[i]); + if (err) { + dev_err(dev, + "%s: failed to read QOS coree for core:%d\n", + __func__, i); + gcdsprm.b_silver_en = false; + } + } + + gcdsprm.coreno = cpucores; + + gcdsprm.dev_pm_qos_req = kcalloc(gcdsprm.corecount, + sizeof(struct dev_pm_qos_request), GFP_KERNEL); + + if (gcdsprm.dev_pm_qos_req == NULL) { + dev_err(dev, + "kcalloc failed for dev_pm_qos_req\n"); + gcdsprm.b_silver_en = false; + } + } + } +} + static void set_qos_latency(int latency) { - if (!gcdsprm.qos_request) { - pm_qos_add_request(&gcdsprm.pm_qos_req, - PM_QOS_CPU_DMA_LATENCY, latency); - gcdsprm.qos_request = true; + int err = 0; + u32 ii = 0; + int cpu; + + if (gcdsprm.b_silver_en) { + + for (ii = 0; ii < gcdsprm.corecount; ii++) { + cpu = gcdsprm.coreno[ii]; + + if (!gcdsprm.qos_request) { + err = dev_pm_qos_add_request( + get_cpu_device(cpu), + &gcdsprm.dev_pm_qos_req[ii], + DEV_PM_QOS_RESUME_LATENCY, + latency); + } else { + err = dev_pm_qos_update_request( + &gcdsprm.dev_pm_qos_req[ii], + latency); + } + + if (err < 0) { + pr_err("%s: %s: PM voting cpu:%d fail,err %d,QoS update %d\n", + current->comm, __func__, cpu, + err, gcdsprm.qos_request); + break; + } + } + + if (err >= 0) + gcdsprm.qos_request = true; } else { - pm_qos_update_request(&gcdsprm.pm_qos_req, + if (!gcdsprm.qos_request) { + pm_qos_add_request(&gcdsprm.pm_qos_req, + PM_QOS_CPU_DMA_LATENCY, latency); + gcdsprm.qos_request = true; + } else { + pm_qos_update_request(&gcdsprm.pm_qos_req, latency); + } } } @@ -506,9 +588,10 @@ static void process_rm_request(struct sysmon_msg *msg) } else if ((rm_msg->b_qos_flag == SYSMON_CDSP_QOS_FLAG_DISABLE) && (gcdsprm.latency_request != - QOS_LATENCY_DISABLE_VALUE)) { - set_qos_latency(QOS_LATENCY_DISABLE_VALUE); - gcdsprm.latency_request = QOS_LATENCY_DISABLE_VALUE; + PM_QOS_RESUME_LATENCY_DEFAULT_VALUE)) { + set_qos_latency(PM_QOS_RESUME_LATENCY_DEFAULT_VALUE); + gcdsprm.latency_request = + PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; pr_debug("Set qos latency to %d\n", gcdsprm.latency_request); } @@ -549,8 +632,8 @@ static void process_delayed_rm_request(struct work_struct *work) curr_timestamp = arch_counter_get_cntvct(); } - set_qos_latency(QOS_LATENCY_DISABLE_VALUE); - gcdsprm.latency_request = QOS_LATENCY_DISABLE_VALUE; + set_qos_latency(PM_QOS_RESUME_LATENCY_DEFAULT_VALUE); + gcdsprm.latency_request = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; pr_debug("Set qos latency to %d\n", gcdsprm.latency_request); gcdsprm.dt_state = CDSP_DELAY_THREAD_EXITING; @@ -640,16 +723,22 @@ static int process_cdsp_request_thread(void *data) result = wait_event_interruptible(cdsprm_wq, (req = get_next_request())); + if (kthread_should_stop()) + break; + if (result) continue; + if (!req) + break; + msg = &req->msg; - if ((msg->feature_id == SYSMON_CDSP_FEATURE_RM_RX) && + if (msg && (msg->feature_id == SYSMON_CDSP_FEATURE_RM_RX) && gcdsprm.b_qosinitdone) { process_rm_request(msg); - } else if (msg->feature_id == - SYSMON_CDSP_FEATURE_L3_RX) { + } else if (msg && (msg->feature_id == + SYSMON_CDSP_FEATURE_L3_RX)) { l3_clock_khz = msg->fs.l3_struct.l3_clock_khz; spin_lock_irqsave(&gcdsprm.l3_lock, flags); @@ -661,8 +750,8 @@ static int process_cdsp_request_thread(void *data) pr_debug("Set L3 clock %d done\n", l3_clock_khz); } - } else if (msg->feature_id == - SYSMON_CDSP_FEATURE_NPU_LIMIT_RX) { + } else if (msg && (msg->feature_id == + SYSMON_CDSP_FEATURE_NPU_LIMIT_RX)) { mutex_lock(&gcdsprm.npu_activity_lock); gcdsprm.set_corner_limit_cached = @@ -706,8 +795,8 @@ static int process_cdsp_request_thread(void *data) pr_err("rpmsg send failed %d\n", result); else pr_debug("NPU limit ack sent\n"); - } else if (msg->feature_id == - SYSMON_CDSP_FEATURE_VERSION_RX) { + } else if (msg && (msg->feature_id == + SYSMON_CDSP_FEATURE_VERSION_RX)) { cdsprm_rpmsg_send_details(); pr_debug("Sent preserved data to DSP\n"); } @@ -928,6 +1017,12 @@ static int cdsp_rm_driver_probe(struct platform_device *pdev) struct thermal_cooling_device *tcdev = 0; unsigned int cooling_cells = 0; + gcdsprm.b_silver_en = of_property_read_bool(dev->of_node, + "qcom,qos-cores"); + + if (gcdsprm.b_silver_en) + qos_cores_init(dev); + if (of_property_read_u32(dev->of_node, "qcom,qos-latency-us", &gcdsprm.qos_latency_us)) { return -EINVAL; @@ -1101,6 +1196,7 @@ static int __init cdsprm_init(void) goto bail; } + err = platform_driver_register(&hvx_rm); if (err) { diff --git a/drivers/soc/qcom/crypto-qti-common.c b/drivers/soc/qcom/crypto-qti-common.c index 969956489cef..51a4a9beb10c 100644 --- a/drivers/soc/qcom/crypto-qti-common.c +++ b/drivers/soc/qcom/crypto-qti-common.c @@ -398,7 +398,7 @@ int crypto_qti_keyslot_program(void *priv_data, unsigned int slot, u8 data_unit_mask, int capid) { - int err = 0; + int err1 = 0, err2 = 0; struct crypto_vops_qti_entry *ice_entry; ice_entry = (struct crypto_vops_qti_entry *) priv_data; @@ -407,19 +407,19 @@ int crypto_qti_keyslot_program(void *priv_data, return -EINVAL; } - err = crypto_qti_program_key(ice_entry, key, slot, + err1 = crypto_qti_program_key(ice_entry, key, slot, data_unit_mask, capid); - if (err) { - pr_err("%s: program key failed with error %d\n", __func__, err); - err = crypto_qti_invalidate_key(ice_entry, slot); - if (err) { + if (err1) { + pr_err("%s: program key failed with error %d\n", + __func__, err1); + err2 = crypto_qti_invalidate_key(ice_entry, slot); + if (err2) { pr_err("%s: invalidate key failed with error %d\n", - __func__, err); - return err; + __func__, err2); } } - return err; + return err1; } int crypto_qti_keyslot_evict(void *priv_data, unsigned int slot) diff --git a/drivers/soc/qcom/dcc.c b/drivers/soc/qcom/dcc.c new file mode 100644 index 000000000000..836375c3b18a --- /dev/null +++ b/drivers/soc/qcom/dcc.c @@ -0,0 +1,1365 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RPM_MISC_REQ_TYPE 0x6373696d +#define RPM_MISC_DDR_DCC_ENABLE 0x32726464 + +#define TIMEOUT_US (100) + +#define BM(lsb, msb) ((BIT(msb) - BIT(lsb)) + BIT(msb)) +#define BMVAL(val, lsb, msb) ((val & BM(lsb, msb)) >> lsb) +#define BVAL(val, n) ((val & BIT(n)) >> n) + +#define dcc_writel(drvdata, val, off) \ + __raw_writel((val), drvdata->base + off) +#define dcc_readl(drvdata, off) \ + __raw_readl(drvdata->base + off) + +#define dcc_sram_readl(drvdata, off) \ + __raw_readl(drvdata->ram_base + off) + +/* DCC registers */ +#define DCC_HW_VERSION (0x00) +#define DCC_HW_INFO (0x04) +#define DCC_CGC_CFG (0x10) +#define DCC_LL (0x14) +#define DCC_RAM_CFG (0x18) +#define DCC_CFG (0x1C) +#define DCC_SW_CTL (0x20) +#define DCC_STATUS (0x24) +#define DCC_FETCH_ADDR (0x28) +#define DCC_SRAM_ADDR (0x2C) +#define DCC_INT_ENABLE (0x30) +#define DCC_INT_STATUS (0x34) +#define DCC_QSB_CFG (0x38) + +#define DCC_REG_DUMP_MAGIC_V2 (0x42445953) +#define DCC_REG_DUMP_VER (1) + +#define MAX_DCC_OFFSET (0xFF * 4) +#define MAX_DCC_LEN 0x7F + +#define SCM_SVC_DISABLE_XPU 0x23 + +enum dcc_func_type { + DCC_FUNC_TYPE_CAPTURE, + DCC_FUNC_TYPE_CRC, +}; + +static const char * const str_dcc_func_type[] = { + [DCC_FUNC_TYPE_CAPTURE] = "cap", + [DCC_FUNC_TYPE_CRC] = "crc", +}; + +enum dcc_data_sink { + DCC_DATA_SINK_ATB, + DCC_DATA_SINK_SRAM +}; + +static const char * const str_dcc_data_sink[] = { + [DCC_DATA_SINK_ATB] = "atb", + [DCC_DATA_SINK_SRAM] = "sram", +}; + +struct rpm_trig_req { + uint32_t enable; + uint32_t reserved; +}; + +struct dcc_config_entry { + uint32_t base; + uint32_t offset; + uint32_t len; + uint32_t index; + struct list_head list; +}; + +struct dcc_drvdata { + void __iomem *base; + uint32_t reg_size; + struct device *dev; + struct mutex mutex; + void __iomem *ram_base; + uint32_t ram_size; + struct clk *clk; + enum dcc_data_sink data_sink; + enum dcc_func_type func_type; + uint32_t ram_cfg; + bool enable; + bool interrupt_disable; + char *sram_node; + struct cdev sram_dev; + struct class *sram_class; + struct list_head config_head; + uint32_t nr_config; + void *reg_buf; + struct msm_dump_data reg_data; + bool save_reg; + void *sram_buf; + struct msm_dump_data sram_data; + struct rpm_trig_req rpm_trig_req; + struct msm_rpm_kvp rpm_kvp; + bool xpu_scm_avail; + uint64_t xpu_addr; + uint32_t xpu_unlock_count; +}; +static int dcc_sram_writel(struct dcc_drvdata *drvdata, + uint32_t val, uint32_t off) +{ + if (unlikely(off > (drvdata->ram_size - 4))) + return -EINVAL; + + __raw_writel((val), drvdata->ram_base + off); + + return 0; +} + +static int dcc_cfg_xpu(struct dcc_drvdata *drvdata, bool enable) +{ + struct scm_desc desc = {0}; + + desc.args[0] = drvdata->xpu_addr; + desc.args[1] = enable; + desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL); + + return scm_call2(SCM_SIP_FNID(SCM_SVC_MP, SCM_SVC_DISABLE_XPU), &desc); +} + +static int dcc_xpu_lock(struct dcc_drvdata *drvdata) +{ + int ret = 0; + + mutex_lock(&drvdata->mutex); + if (!drvdata->xpu_scm_avail) + goto err; + + if (drvdata->xpu_unlock_count == 0) + goto err; + + if (drvdata->xpu_unlock_count == 1) { + ret = clk_prepare_enable(drvdata->clk); + if (ret) + goto err; + + /* make sure all access to DCC are completed */ + mb(); + + ret = dcc_cfg_xpu(drvdata, 1); + if (ret) + dev_err(drvdata->dev, "Failed to lock DCC XPU.\n"); + + clk_disable_unprepare(drvdata->clk); + } + + if (!ret) + drvdata->xpu_unlock_count--; +err: + mutex_unlock(&drvdata->mutex); + return ret; +} + +static int dcc_xpu_unlock(struct dcc_drvdata *drvdata) +{ + int ret = 0; + + mutex_lock(&drvdata->mutex); + if (!drvdata->xpu_scm_avail) + goto err; + + if (drvdata->xpu_unlock_count == 0) { + ret = clk_prepare_enable(drvdata->clk); + if (ret) + goto err; + + ret = dcc_cfg_xpu(drvdata, 0); + if (ret) + dev_err(drvdata->dev, "Failed to unlock DCC XPU.\n"); + + clk_disable_unprepare(drvdata->clk); + } + + if (!ret) + drvdata->xpu_unlock_count++; +err: + mutex_unlock(&drvdata->mutex); + return ret; +} + +static bool dcc_ready(struct dcc_drvdata *drvdata) +{ + uint32_t val; + + /* poll until DCC ready */ + if (!readl_poll_timeout((drvdata->base + DCC_STATUS), val, + (BVAL(val, 4) == 1), 1, TIMEOUT_US)) + return true; + + return false; +} + +static int dcc_sw_trigger(struct dcc_drvdata *drvdata) +{ + int ret; + + ret = 0; + mutex_lock(&drvdata->mutex); + + if (!drvdata->enable) { + dev_err(drvdata->dev, + "DCC is disabled. Can't send sw trigger.\n"); + ret = -EINVAL; + goto err; + } + + if (!dcc_ready(drvdata)) { + dev_err(drvdata->dev, "DCC is not ready!\n"); + ret = -EBUSY; + goto err; + } + + dcc_writel(drvdata, 1, DCC_SW_CTL); + + if (!dcc_ready(drvdata)) { + dev_err(drvdata->dev, + "DCC is busy after receiving sw tigger.\n"); + ret = -EBUSY; + goto err; + } +err: + mutex_unlock(&drvdata->mutex); + return ret; +} + +static int __dcc_ll_cfg(struct dcc_drvdata *drvdata) +{ + int ret = 0; + uint32_t sram_offset = 0; + uint32_t prev_addr, addr; + uint32_t prev_off = 0, off; + uint32_t link; + uint32_t pos, total_len = 0; + struct dcc_config_entry *entry; + + if (list_empty(&drvdata->config_head)) { + dev_err(drvdata->dev, + "No configuration is available to program in DCC SRAM!\n"); + return -EINVAL; + } + + memset_io(drvdata->ram_base, 0, drvdata->ram_size); + + prev_addr = 0; + link = 0; + + list_for_each_entry(entry, &drvdata->config_head, list) { + /* Address type */ + addr = (entry->base >> 4) & BM(0, 27); + addr |= BIT(31); + off = entry->offset/4; + total_len += entry->len * 4; + + if (!prev_addr || prev_addr != addr || prev_off > off) { + /* Check if we need to write link of prev entry */ + if (link) { + ret = dcc_sram_writel(drvdata, + link, sram_offset); + if (ret) + goto overstep; + sram_offset += 4; + } + + /* Write address */ + ret = dcc_sram_writel(drvdata, addr, sram_offset); + if (ret) + goto overstep; + sram_offset += 4; + + /* Reset link and prev_off */ + link = 0; + prev_off = 0; + } + + if ((off - prev_off) > 0xFF || entry->len > MAX_DCC_LEN) { + dev_err(drvdata->dev, + "DCC: Progamming error! Base: 0x%x, offset 0x%x.\n", + entry->base, entry->offset); + ret = -EINVAL; + goto err; + } + + if (link) { + /* + * link already has one offset-length so new + * offset-length needs to be placed at bits [31:16] + */ + pos = 16; + + /* Clear bits [31:16] */ + link &= BM(0, 15); + + } else { + /* + * link is empty, so new offset-length needs to be + * placed at bits [15:0] + */ + pos = 0; + link = 1 << 16; + } + + /* write new offset-length pair to correct position */ + link |= (((off-prev_off) & BM(0, 7)) | + ((entry->len << 8) & BM(8, 14))) << pos; + + if (pos) { + ret = dcc_sram_writel(drvdata, link, sram_offset); + if (ret) + goto overstep; + sram_offset += 4; + link = 0; + } + + prev_off = off; + prev_addr = addr; + } + + if (link) { + ret = dcc_sram_writel(drvdata, link, sram_offset); + if (ret) + goto overstep; + sram_offset += 4; + } + + /* Setting zero to indicate end of the list */ + ret = dcc_sram_writel(drvdata, 0, sram_offset); + if (ret) + goto overstep; + sram_offset += 4; + + /* check if the data will overstep */ + if (drvdata->data_sink == DCC_DATA_SINK_SRAM + && drvdata->func_type == DCC_FUNC_TYPE_CAPTURE) { + if (sram_offset + total_len > drvdata->ram_size) { + sram_offset += total_len; + goto overstep; + } + } else { + if (sram_offset > drvdata->ram_size) + goto overstep; + } + + drvdata->ram_cfg = (sram_offset / 4); + return 0; +overstep: + ret = -EINVAL; + memset_io(drvdata->ram_base, 0, drvdata->ram_size); + dev_err(drvdata->dev, "DCC SRAM oversteps, 0x%x (0x%x)\n", + sram_offset, drvdata->ram_size); +err: + return ret; +} + +static void __dcc_reg_dump(struct dcc_drvdata *drvdata) +{ + uint32_t *reg_buf; + + if (!drvdata->reg_buf) + return; + + drvdata->reg_data.version = DCC_REG_DUMP_VER; + + reg_buf = drvdata->reg_buf; + + reg_buf[0] = dcc_readl(drvdata, DCC_HW_VERSION); + reg_buf[1] = dcc_readl(drvdata, DCC_HW_INFO); + reg_buf[2] = dcc_readl(drvdata, DCC_CGC_CFG); + reg_buf[3] = dcc_readl(drvdata, DCC_LL); + reg_buf[4] = dcc_readl(drvdata, DCC_RAM_CFG); + reg_buf[5] = dcc_readl(drvdata, DCC_CFG); + reg_buf[6] = dcc_readl(drvdata, DCC_SW_CTL); + reg_buf[7] = dcc_readl(drvdata, DCC_STATUS); + reg_buf[8] = dcc_readl(drvdata, DCC_FETCH_ADDR); + reg_buf[9] = dcc_readl(drvdata, DCC_SRAM_ADDR); + reg_buf[10] = dcc_readl(drvdata, DCC_INT_ENABLE); + reg_buf[11] = dcc_readl(drvdata, DCC_INT_STATUS); + reg_buf[12] = dcc_readl(drvdata, DCC_QSB_CFG); + + drvdata->reg_data.magic = DCC_REG_DUMP_MAGIC_V2; +} + +static void __dcc_first_crc(struct dcc_drvdata *drvdata) +{ + int i; + + /* + * Need to send 2 triggers to DCC. First trigger sets CRC error status + * bit. So need second trigger to reset this bit. + */ + for (i = 0; i < 2; i++) { + if (!dcc_ready(drvdata)) + dev_err(drvdata->dev, "DCC is not ready!\n"); + + dcc_writel(drvdata, 1, DCC_SW_CTL); + } + + /* Clear CRC error interrupt */ + dcc_writel(drvdata, BIT(0), DCC_INT_STATUS); +} + +static int dcc_enable(struct dcc_drvdata *drvdata) +{ + int ret = 0; + + mutex_lock(&drvdata->mutex); + + if (drvdata->enable) { + dev_err(drvdata->dev, "DCC is already enabled!\n"); + mutex_unlock(&drvdata->mutex); + return 0; + } + + /* 1. Prepare and enable DCC clock */ + ret = clk_prepare_enable(drvdata->clk); + if (ret) + goto err; + + dcc_writel(drvdata, 0, DCC_LL); + + /* 2. Program linked-list in the SRAM */ + ret = __dcc_ll_cfg(drvdata); + if (ret) + goto err_prog_ll; + + /* 3. If in capture mode program DCC_RAM_CFG reg */ + if (drvdata->func_type == DCC_FUNC_TYPE_CAPTURE) + dcc_writel(drvdata, drvdata->ram_cfg, DCC_RAM_CFG); + + /* 4. Configure data sink and function type */ + dcc_writel(drvdata, ((drvdata->data_sink << 4) | (drvdata->func_type)), + DCC_CFG); + + /* 5. Clears interrupt status register */ + dcc_writel(drvdata, 0, DCC_INT_ENABLE); + dcc_writel(drvdata, (BIT(4) | BIT(0)), DCC_INT_STATUS); + + /* Make sure all config is written in sram */ + mb(); + + /* 6. Set LL bit */ + dcc_writel(drvdata, 1, DCC_LL); + drvdata->enable = true; + + if (drvdata->func_type == DCC_FUNC_TYPE_CRC) { + __dcc_first_crc(drvdata); + + /* Enable CRC error interrupt */ + if (!drvdata->interrupt_disable) + dcc_writel(drvdata, BIT(0), DCC_INT_ENABLE); + } + + /* Save DCC registers */ + if (drvdata->save_reg) + __dcc_reg_dump(drvdata); + +err_prog_ll: + if (!drvdata->enable) + clk_disable_unprepare(drvdata->clk); +err: + mutex_unlock(&drvdata->mutex); + return ret; +} + +static int __dcc_rpm_sw_trigger(struct dcc_drvdata *drvdata, bool enable) +{ + int ret = 0; + struct msm_rpm_kvp *rpm_kvp = &drvdata->rpm_kvp; + + if (enable == drvdata->rpm_trig_req.enable) + return 0; + + if (enable && (!drvdata->enable || drvdata->func_type != + DCC_FUNC_TYPE_CRC)) { + dev_err(drvdata->dev, + "DCC: invalid state! Can't send sw trigger req to rpm\n"); + return -EINVAL; + } + + drvdata->rpm_trig_req.enable = enable; + rpm_kvp->key = RPM_MISC_DDR_DCC_ENABLE; + rpm_kvp->length = sizeof(struct rpm_trig_req); + rpm_kvp->data = (void *)(&drvdata->rpm_trig_req); + + ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET, + RPM_MISC_REQ_TYPE, 0, rpm_kvp, 1); + if (ret) { + dev_err(drvdata->dev, + "DCC: SW trigger %s req to rpm failed %d\n", + (enable ? "enable" : "disable"), ret); + drvdata->rpm_trig_req.enable = !enable; + } + + return ret; +} + +static void dcc_disable(struct dcc_drvdata *drvdata) +{ + mutex_lock(&drvdata->mutex); + if (!drvdata->enable) { + mutex_unlock(&drvdata->mutex); + return; + } + + /* Send request to RPM to disable DCC SW trigger */ + + if (__dcc_rpm_sw_trigger(drvdata, 0)) + dev_err(drvdata->dev, + "DCC: Request to RPM to disable SW trigger failed.\n"); + + if (!dcc_ready(drvdata)) + dev_err(drvdata->dev, "DCC is not ready! Disabling DCC...\n"); + + dcc_writel(drvdata, 0, DCC_LL); + drvdata->enable = false; + + /* Save DCC registers */ + if (drvdata->save_reg) + __dcc_reg_dump(drvdata); + + clk_disable_unprepare(drvdata->clk); + + mutex_unlock(&drvdata->mutex); +} + +static ssize_t func_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dcc_drvdata *drvdata = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%s\n", + str_dcc_func_type[drvdata->func_type]); +} + +static ssize_t func_type_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct dcc_drvdata *drvdata = dev_get_drvdata(dev); + char str[10] = ""; + int ret; + + if (strlen(buf) >= 10) + return -EINVAL; + + if (sscanf(buf, "%9s", str) != 1) + return -EINVAL; + + mutex_lock(&drvdata->mutex); + if (drvdata->enable) { + ret = -EBUSY; + goto out; + } + + if (!strcmp(str, str_dcc_func_type[DCC_FUNC_TYPE_CAPTURE])) + drvdata->func_type = DCC_FUNC_TYPE_CAPTURE; + else if (!strcmp(str, str_dcc_func_type[DCC_FUNC_TYPE_CRC])) + drvdata->func_type = DCC_FUNC_TYPE_CRC; + else { + ret = -EINVAL; + goto out; + } + + ret = size; +out: + mutex_unlock(&drvdata->mutex); + return ret; +} +static DEVICE_ATTR_RW(func_type); + +static ssize_t data_sink_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dcc_drvdata *drvdata = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%s\n", + str_dcc_data_sink[drvdata->data_sink]); +} + +static ssize_t data_sink_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct dcc_drvdata *drvdata = dev_get_drvdata(dev); + char str[10] = ""; + int ret; + + if (strlen(buf) >= 10) + return -EINVAL; + + if (sscanf(buf, "%9s", str) != 1) + return -EINVAL; + + mutex_lock(&drvdata->mutex); + if (drvdata->enable) { + ret = -EBUSY; + goto out; + } + + if (!strcmp(str, str_dcc_data_sink[DCC_DATA_SINK_SRAM])) + drvdata->data_sink = DCC_DATA_SINK_SRAM; + else if (!strcmp(str, str_dcc_data_sink[DCC_DATA_SINK_ATB])) + drvdata->data_sink = DCC_DATA_SINK_ATB; + else { + ret = -EINVAL; + goto out; + } + + ret = size; +out: + mutex_unlock(&drvdata->mutex); + return ret; +} +static DEVICE_ATTR_RW(data_sink); + +static ssize_t trigger_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret = 0; + unsigned long val; + struct dcc_drvdata *drvdata = dev_get_drvdata(dev); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + if (val != 1) + return -EINVAL; + + ret = dcc_xpu_unlock(drvdata); + if (ret) + return ret; + + ret = dcc_sw_trigger(drvdata); + if (!ret) + ret = size; + + dcc_xpu_lock(drvdata); + return ret; +} +static DEVICE_ATTR_WO(trigger); + +static ssize_t enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dcc_drvdata *drvdata = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%u\n", + (unsigned int)drvdata->enable); +} + +static ssize_t enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret = 0; + unsigned long val; + struct dcc_drvdata *drvdata = dev_get_drvdata(dev); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + ret = dcc_xpu_unlock(drvdata); + if (ret) + return ret; + + if (val) + ret = dcc_enable(drvdata); + else + dcc_disable(drvdata); + + if (!ret) + ret = size; + + dcc_xpu_lock(drvdata); + return ret; + +} +static DEVICE_ATTR_RW(enable); + +static ssize_t config_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dcc_drvdata *drvdata = dev_get_drvdata(dev); + struct dcc_config_entry *entry; + char local_buf[64]; + int len = 0, count = 0; + + buf[0] = '\0'; + + mutex_lock(&drvdata->mutex); + list_for_each_entry(entry, &drvdata->config_head, list) { + len = scnprintf(local_buf, 64, + "Index: 0x%x, Base: 0x%x, Offset: 0x%x, len: 0x%x\n", + entry->index, entry->base, + entry->offset, entry->len); + + if ((count + len) > PAGE_SIZE) { + dev_err(dev, "DCC: Couldn't write complete config!\n"); + break; + } + + strlcat(buf, local_buf, PAGE_SIZE); + count += len; + } + + mutex_unlock(&drvdata->mutex); + + return count; +} + +static int dcc_config_add(struct dcc_drvdata *drvdata, unsigned int addr, + unsigned int len) +{ + int ret; + struct dcc_config_entry *entry, *pentry; + unsigned int base, offset; + + mutex_lock(&drvdata->mutex); + + /* Check the len to avoid allocate huge memory */ + if (!len || len > (drvdata->ram_size / 8)) { + dev_err(drvdata->dev, "DCC: Invalid length!\n"); + ret = -EINVAL; + goto err; + } + + base = addr & BM(4, 31); + + if (!list_empty(&drvdata->config_head)) { + pentry = list_last_entry(&drvdata->config_head, + struct dcc_config_entry, list); + + if (addr >= (pentry->base + pentry->offset) && + addr <= (pentry->base + pentry->offset + MAX_DCC_OFFSET)) { + + /* Re-use base address from last entry */ + base = pentry->base; + + /* + * Check if new address is contiguous to last entry's + * addresses. If yes then we can re-use last entry and + * just need to update its length. + */ + if ((pentry->len * 4 + pentry->base + pentry->offset) + == addr) { + len += pentry->len; + + /* + * Check if last entry can hold additional new + * length. If yes then we don't need to create + * a new entry else we need to add a new entry + * with same base but updated offset. + */ + if (len > MAX_DCC_LEN) + pentry->len = MAX_DCC_LEN; + else + pentry->len = len; + + /* + * Update start addr and len for remaining + * addresses, which will be part of new + * entry. + */ + addr = pentry->base + pentry->offset + + pentry->len * 4; + len -= pentry->len; + } + } + } + + offset = addr - base; + + while (len) { + entry = devm_kzalloc(drvdata->dev, sizeof(*entry), GFP_KERNEL); + if (!entry) { + ret = -ENOMEM; + goto err; + } + + entry->base = base; + entry->offset = offset; + entry->len = min_t(uint32_t, len, MAX_DCC_LEN); + entry->index = drvdata->nr_config++; + INIT_LIST_HEAD(&entry->list); + list_add_tail(&entry->list, &drvdata->config_head); + + len -= entry->len; + offset += MAX_DCC_LEN * 4; + } + + mutex_unlock(&drvdata->mutex); + return 0; +err: + mutex_unlock(&drvdata->mutex); + return ret; +} + +static ssize_t config_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned int base, len; + struct dcc_drvdata *drvdata = dev_get_drvdata(dev); + int nval; + + nval = sscanf(buf, "%x %u", &base, &len); + if (nval <= 0 || nval > 2) + return -EINVAL; + + if (nval == 1) + len = 1; + + ret = dcc_config_add(drvdata, base, len); + if (ret) + return ret; + + return size; + +} +static DEVICE_ATTR_RW(config); + +static void dcc_config_reset(struct dcc_drvdata *drvdata) +{ + struct dcc_config_entry *entry, *temp; + + mutex_lock(&drvdata->mutex); + + list_for_each_entry_safe(entry, temp, &drvdata->config_head, list) { + list_del(&entry->list); + drvdata->nr_config--; + } + + mutex_unlock(&drvdata->mutex); +} + +static ssize_t config_reset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct dcc_drvdata *drvdata = dev_get_drvdata(dev); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + if (val) + dcc_config_reset(drvdata); + + return size; +} +static DEVICE_ATTR_WO(config_reset); + +static ssize_t crc_error_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + struct dcc_drvdata *drvdata = dev_get_drvdata(dev); + + ret = dcc_xpu_unlock(drvdata); + if (ret) + return ret; + + mutex_lock(&drvdata->mutex); + if (!drvdata->enable) { + ret = -EINVAL; + goto err; + } + + ret = scnprintf(buf, PAGE_SIZE, "%u\n", + (unsigned int)BVAL(dcc_readl(drvdata, DCC_STATUS), 0)); +err: + mutex_unlock(&drvdata->mutex); + dcc_xpu_lock(drvdata); + return ret; +} +static DEVICE_ATTR_RO(crc_error); + +static ssize_t ready_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + struct dcc_drvdata *drvdata = dev_get_drvdata(dev); + + ret = dcc_xpu_unlock(drvdata); + if (ret) + return ret; + + mutex_lock(&drvdata->mutex); + if (!drvdata->enable) { + ret = -EINVAL; + goto err; + } + + ret = scnprintf(buf, PAGE_SIZE, "%u\n", + (unsigned int)BVAL(dcc_readl(drvdata, DCC_STATUS), 4)); +err: + mutex_unlock(&drvdata->mutex); + dcc_xpu_lock(drvdata); + return ret; +} +static DEVICE_ATTR_RO(ready); + +static ssize_t interrupt_disable_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dcc_drvdata *drvdata = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%u\n", + (unsigned int)drvdata->interrupt_disable); +} + +static ssize_t interrupt_disable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct dcc_drvdata *drvdata = dev_get_drvdata(dev); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + mutex_lock(&drvdata->mutex); + drvdata->interrupt_disable = (val ? 1:0); + mutex_unlock(&drvdata->mutex); + return size; +} +static DEVICE_ATTR_RW(interrupt_disable); + +static ssize_t rpm_sw_trigger_on_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dcc_drvdata *drvdata = dev_get_drvdata(dev); + + return scnprintf(buf, PAGE_SIZE, "%u\n", + (unsigned int)drvdata->rpm_trig_req.enable); +} + +static ssize_t rpm_sw_trigger_on_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + unsigned long val; + struct dcc_drvdata *drvdata = dev_get_drvdata(dev); + + if (kstrtoul(buf, 16, &val)) + return -EINVAL; + + mutex_lock(&drvdata->mutex); + __dcc_rpm_sw_trigger(drvdata, !!val); + mutex_unlock(&drvdata->mutex); + return size; +} +static DEVICE_ATTR_RW(rpm_sw_trigger_on); + +static ssize_t xpu_unlock_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + unsigned long val; + struct dcc_drvdata *drvdata = dev_get_drvdata(dev); + + if (kstrtoul(buf, 10, &val)) + return -EINVAL; + + ret = val ? dcc_xpu_unlock(drvdata) : dcc_xpu_lock(drvdata); + if (!ret) + ret = size; + + return ret; +} +static DEVICE_ATTR_WO(xpu_unlock); + +static const struct device_attribute *dcc_attrs[] = { + &dev_attr_func_type, + &dev_attr_data_sink, + &dev_attr_trigger, + &dev_attr_enable, + &dev_attr_config, + &dev_attr_config_reset, + &dev_attr_ready, + &dev_attr_crc_error, + &dev_attr_interrupt_disable, + &dev_attr_rpm_sw_trigger_on, + &dev_attr_xpu_unlock, + NULL, +}; + +static int dcc_create_files(struct device *dev, + const struct device_attribute **attrs) +{ + int ret = 0, i; + + for (i = 0; attrs[i] != NULL; i++) { + ret = device_create_file(dev, attrs[i]); + if (ret) { + dev_err(dev, "DCC: Couldn't create sysfs attribute: %s!\n", + attrs[i]->attr.name); + break; + } + } + return ret; +} + +static int dcc_sram_open(struct inode *inode, struct file *file) +{ + struct dcc_drvdata *drvdata = container_of(inode->i_cdev, + struct dcc_drvdata, + sram_dev); + file->private_data = drvdata; + + return dcc_xpu_unlock(drvdata); +} + +static ssize_t dcc_sram_read(struct file *file, char __user *data, + size_t len, loff_t *ppos) +{ + int ret; + unsigned char *buf; + struct dcc_drvdata *drvdata = file->private_data; + + /* EOF check */ + if (drvdata->ram_size <= *ppos) + return 0; + + if ((*ppos + len) > drvdata->ram_size) + len = (drvdata->ram_size - *ppos); + + buf = kzalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = clk_prepare_enable(drvdata->clk); + if (ret) { + kfree(buf); + return ret; + } + + memcpy_fromio(buf, (drvdata->ram_base + *ppos), len); + + clk_disable_unprepare(drvdata->clk); + + if (copy_to_user(data, buf, len)) { + dev_err(drvdata->dev, + "DCC: Couldn't copy all data to user!\n"); + kfree(buf); + return -EFAULT; + } + + *ppos += len; + + kfree(buf); + + return len; +} + +static int dcc_sram_release(struct inode *inode, struct file *file) +{ + struct dcc_drvdata *drvdata = file->private_data; + + return dcc_xpu_lock(drvdata); +} + +static const struct file_operations dcc_sram_fops = { + .owner = THIS_MODULE, + .open = dcc_sram_open, + .read = dcc_sram_read, + .release = dcc_sram_release, + .llseek = no_llseek, +}; + +static int dcc_sram_dev_register(struct dcc_drvdata *drvdata) +{ + int ret; + struct device *device; + dev_t dev; + + ret = alloc_chrdev_region(&dev, 0, 1, drvdata->sram_node); + if (ret) + goto err_alloc; + + cdev_init(&drvdata->sram_dev, &dcc_sram_fops); + + drvdata->sram_dev.owner = THIS_MODULE; + ret = cdev_add(&drvdata->sram_dev, dev, 1); + if (ret) + goto err_cdev_add; + + drvdata->sram_class = class_create(THIS_MODULE, + drvdata->sram_node); + if (IS_ERR(drvdata->sram_class)) { + ret = PTR_ERR(drvdata->sram_class); + goto err_class_create; + } + + device = device_create(drvdata->sram_class, NULL, + drvdata->sram_dev.dev, drvdata, + drvdata->sram_node); + if (IS_ERR(device)) { + ret = PTR_ERR(device); + goto err_dev_create; + } + + return 0; +err_dev_create: + class_destroy(drvdata->sram_class); +err_class_create: + cdev_del(&drvdata->sram_dev); +err_cdev_add: + unregister_chrdev_region(drvdata->sram_dev.dev, 1); +err_alloc: + return ret; +} + +static void dcc_sram_dev_deregister(struct dcc_drvdata *drvdata) +{ + device_destroy(drvdata->sram_class, drvdata->sram_dev.dev); + class_destroy(drvdata->sram_class); + cdev_del(&drvdata->sram_dev); + unregister_chrdev_region(drvdata->sram_dev.dev, 1); +} + +static int dcc_sram_dev_init(struct dcc_drvdata *drvdata) +{ + int ret = 0; + size_t node_size; + char *node_name = "dcc_sram"; + struct device *dev = drvdata->dev; + + node_size = strlen(node_name) + 1; + + drvdata->sram_node = devm_kzalloc(dev, node_size, GFP_KERNEL); + if (!drvdata->sram_node) + return -ENOMEM; + + strlcpy(drvdata->sram_node, node_name, node_size); + ret = dcc_sram_dev_register(drvdata); + if (ret) + dev_err(drvdata->dev, "DCC: sram node not registered.\n"); + + return ret; +} + +static void dcc_sram_dev_exit(struct dcc_drvdata *drvdata) +{ + dcc_sram_dev_deregister(drvdata); +} + +static void dcc_allocate_dump_mem(struct dcc_drvdata *drvdata) +{ + int ret; + struct device *dev = drvdata->dev; + struct msm_dump_entry reg_dump_entry, sram_dump_entry; + + /* Allocate memory for dcc reg dump */ + drvdata->reg_buf = devm_kzalloc(dev, drvdata->reg_size, GFP_KERNEL); + if (drvdata->reg_buf) { + strlcpy(drvdata->reg_data.name, "KDCC_REG", + sizeof(drvdata->reg_data.name)); + drvdata->reg_data.addr = virt_to_phys(drvdata->reg_buf); + drvdata->reg_data.len = drvdata->reg_size; + reg_dump_entry.id = MSM_DUMP_DATA_DCC_REG; + reg_dump_entry.addr = virt_to_phys(&drvdata->reg_data); + ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, + ®_dump_entry); + if (ret) + dev_err(dev, "DCC REG dump setup failed\n"); + } else { + dev_err(dev, "DCC REG dump allocation failed\n"); + } + + /* Allocate memory for dcc sram dump */ + drvdata->sram_buf = devm_kzalloc(dev, drvdata->ram_size, GFP_KERNEL); + if (drvdata->sram_buf) { + strlcpy(drvdata->sram_data.name, "KDCC_SRAM", + sizeof(drvdata->sram_data.name)); + drvdata->sram_data.addr = virt_to_phys(drvdata->sram_buf); + drvdata->sram_data.len = drvdata->ram_size; + sram_dump_entry.id = MSM_DUMP_DATA_DCC_SRAM; + sram_dump_entry.addr = virt_to_phys(&drvdata->sram_data); + ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, + &sram_dump_entry); + if (ret) + dev_err(dev, "DCC SRAM dump setup failed\n"); + } else { + dev_err(dev, "DCC SRAM dump allocation failed\n"); + } +} + +static int dcc_probe(struct platform_device *pdev) +{ + int ret, i; + struct device *dev = &pdev->dev; + struct dcc_drvdata *drvdata; + struct resource *res; + const char *data_sink; + + drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + drvdata->dev = &pdev->dev; + platform_set_drvdata(pdev, drvdata); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dcc-base"); + if (!res) + return -EINVAL; + + drvdata->reg_size = resource_size(res); + drvdata->base = devm_ioremap(dev, res->start, resource_size(res)); + if (!drvdata->base) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "dcc-ram-base"); + if (!res) + return -EINVAL; + + drvdata->ram_size = resource_size(res); + drvdata->ram_base = devm_ioremap(dev, res->start, resource_size(res)); + if (!drvdata->ram_base) + return -ENOMEM; + + drvdata->clk = devm_clk_get(dev, "apb_pclk"); + if (IS_ERR(drvdata->clk)) { + ret = PTR_ERR(drvdata->clk); + goto err; + } + + drvdata->save_reg = of_property_read_bool(pdev->dev.of_node, + "qcom,save-reg"); + + mutex_init(&drvdata->mutex); + + INIT_LIST_HEAD(&drvdata->config_head); + drvdata->nr_config = 0; + drvdata->xpu_scm_avail = false; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "dcc-xpu-base"); + if (res) { + if (scm_is_call_available(SCM_SVC_MP, + SCM_SVC_DISABLE_XPU) > 0) { + drvdata->xpu_scm_avail = true; + drvdata->xpu_addr = res->start; + } else { + dev_err(dev, "scm call is not available\n"); + return -EINVAL; + } + } else { + dev_info(dev, "DCC XPU is not specified\n"); + } + + ret = dcc_xpu_unlock(drvdata); + if (ret) + goto err; + + ret = clk_prepare_enable(drvdata->clk); + if (ret) { + dcc_xpu_lock(drvdata); + goto err; + } + + memset_io(drvdata->ram_base, 0, drvdata->ram_size); + + dcc_xpu_lock(drvdata); + + clk_disable_unprepare(drvdata->clk); + + drvdata->data_sink = DCC_DATA_SINK_SRAM; + ret = of_property_read_string(pdev->dev.of_node, "qcom,data-sink", + &data_sink); + if (!ret) { + for (i = 0; i < ARRAY_SIZE(str_dcc_data_sink); i++) + if (!strcmp(data_sink, str_dcc_data_sink[i])) { + drvdata->data_sink = i; + break; + } + + if (i == ARRAY_SIZE(str_dcc_data_sink)) { + dev_err(dev, "Unknown sink type for DCC! Using '%s' as data sink\n", + str_dcc_data_sink[drvdata->data_sink]); + } + } + + ret = dcc_sram_dev_init(drvdata); + if (ret) + goto err; + + ret = dcc_create_files(dev, dcc_attrs); + if (ret) + goto err; + + dcc_allocate_dump_mem(drvdata); + return 0; +err: + return ret; +} + +static int dcc_remove(struct platform_device *pdev) +{ + struct dcc_drvdata *drvdata = platform_get_drvdata(pdev); + + dcc_sram_dev_exit(drvdata); + + dcc_config_reset(drvdata); + + return 0; +} + +static const struct of_device_id msm_dcc_match[] = { + { .compatible = "qcom,dcc"}, + {} +}; + +static struct platform_driver dcc_driver = { + .probe = dcc_probe, + .remove = dcc_remove, + .driver = { + .name = "msm-dcc", + .of_match_table = msm_dcc_match, + }, +}; + +static int __init dcc_init(void) +{ + return platform_driver_register(&dcc_driver); +} +module_init(dcc_init); + +static void __exit dcc_exit(void) +{ + platform_driver_unregister(&dcc_driver); +} +module_exit(dcc_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MSM data capture and compare engine"); diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c index 9a99362ee27e..4ddf7ef5edc9 100644 --- a/drivers/soc/qcom/dcc_v2.c +++ b/drivers/soc/qcom/dcc_v2.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */ #include @@ -157,7 +157,7 @@ struct dcc_drvdata { uint32_t *nr_config; uint32_t nr_link_list; uint8_t curr_list; - uint8_t cti_trig; + uint8_t *cti_trig; uint8_t loopoff; }; @@ -776,7 +776,7 @@ static int dcc_enable(struct dcc_drvdata *drvdata) } /* 5. Configure trigger */ - dcc_writel(drvdata, BIT(9) | ((drvdata->cti_trig << 8) | + dcc_writel(drvdata, BIT(9) | ((drvdata->cti_trig[list] << 8) | (drvdata->data_sink[list] << 4) | (drvdata->func_type[list])), DCC_LL_CFG(list)); } @@ -1485,7 +1485,8 @@ static ssize_t cti_trig_show(struct device *dev, { struct dcc_drvdata *drvdata = dev_get_drvdata(dev); - return scnprintf(buf, PAGE_SIZE, "%d\n", drvdata->cti_trig); + return scnprintf(buf, PAGE_SIZE, "%d\n", + drvdata->cti_trig[drvdata->curr_list]); } static ssize_t cti_trig_store(struct device *dev, @@ -1513,9 +1514,11 @@ static ssize_t cti_trig_store(struct device *dev, } if (val) - drvdata->cti_trig = 1; + drvdata->cti_trig[drvdata->curr_list] = 1; else - drvdata->cti_trig = 0; + drvdata->cti_trig[drvdata->curr_list] = 0; + + ret = size; out: mutex_unlock(&drvdata->mutex); return ret; @@ -1864,6 +1867,10 @@ static int dcc_probe(struct platform_device *pdev) sizeof(uint32_t), GFP_KERNEL); if (!drvdata->nr_config) return -ENOMEM; + drvdata->cti_trig = devm_kzalloc(dev, drvdata->nr_link_list * + sizeof(uint8_t), GFP_KERNEL); + if (!drvdata->cti_trig) + return -ENOMEM; drvdata->cfg_head = devm_kzalloc(dev, drvdata->nr_link_list * sizeof(struct list_head), GFP_KERNEL); if (!drvdata->cfg_head) diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index 439df9208385..abceee642d63 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "icnss: " fmt @@ -1081,26 +1081,26 @@ static int icnss_driver_event_server_arrive(void *data) goto qmi_registered; } ignore_assert = true; - goto clear_server; + goto fail; } if (!penv->msa_va) { icnss_pr_err("Invalid MSA address\n"); ret = -EINVAL; - goto clear_server; + goto fail; } ret = wlfw_msa_mem_info_send_sync_msg(penv); if (ret < 0) { ignore_assert = true; - goto clear_server; + goto fail; } if (!test_bit(ICNSS_MSA0_ASSIGNED, &penv->state)) { ret = icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_WLAN_HW_RW); if (ret < 0) - goto clear_server; + goto fail; set_bit(ICNSS_MSA0_ASSIGNED, &penv->state); } @@ -1140,8 +1140,6 @@ static int icnss_driver_event_server_arrive(void *data) err_setup_msa: icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_HLOS_ALL); clear_bit(ICNSS_MSA0_ASSIGNED, &penv->state); -clear_server: - icnss_clear_server(penv); fail: ICNSS_ASSERT(ignore_assert); qmi_registered: diff --git a/drivers/soc/qcom/icnss2/debug.c b/drivers/soc/qcom/icnss2/debug.c index b3dfbd53cd76..588892f5be46 100644 --- a/drivers/soc/qcom/icnss2/debug.c +++ b/drivers/soc/qcom/icnss2/debug.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */ #include #include @@ -14,6 +14,7 @@ void *icnss_ipc_log_context; void *icnss_ipc_log_long_context; +void *icnss_ipc_log_long1_context; static ssize_t icnss_regwrite_write(struct file *fp, const char __user *user_buf, @@ -770,6 +771,12 @@ void icnss_debug_init(void) "icnss_long", 0); if (!icnss_ipc_log_long_context) icnss_pr_err("Unable to create log long context\n"); + + icnss_ipc_log_long1_context = ipc_log_context_create(NUM_LOG_LONG_PAGES, + "icnss_long1", 0); + if (!icnss_ipc_log_long1_context) + icnss_pr_err("Unable to create log long context\n"); + } void icnss_debug_deinit(void) @@ -783,4 +790,9 @@ void icnss_debug_deinit(void) ipc_log_context_destroy(icnss_ipc_log_long_context); icnss_ipc_log_long_context = NULL; } + if (icnss_ipc_log_long1_context) { + ipc_log_context_destroy(icnss_ipc_log_long1_context); + icnss_ipc_log_long1_context = NULL; + } + } diff --git a/drivers/soc/qcom/icnss2/debug.h b/drivers/soc/qcom/icnss2/debug.h index ada100e79cc0..ae4b258800c4 100644 --- a/drivers/soc/qcom/icnss2/debug.h +++ b/drivers/soc/qcom/icnss2/debug.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. */ #ifndef _ICNSS_DEBUG_H @@ -14,6 +14,7 @@ extern void *icnss_ipc_log_context; extern void *icnss_ipc_log_long_context; +extern void *icnss_ipc_log_long1_context; #define icnss_ipc_log_string(_x...) \ ipc_log_string(icnss_ipc_log_context, _x) @@ -21,6 +22,9 @@ extern void *icnss_ipc_log_long_context; #define icnss_ipc_log_long_string(_x...) \ ipc_log_string(icnss_ipc_log_long_context, _x) +#define icnss_ipc_log_long1_string(_x...) \ + ipc_log_string(icnss_ipc_log_long1_context, _x) + #define icnss_pr_err(_fmt, ...) do { \ printk("%s" pr_fmt(_fmt), KERN_ERR, ##__VA_ARGS__); \ icnss_ipc_log_string("%s" pr_fmt(_fmt), "", \ @@ -49,6 +53,12 @@ extern void *icnss_ipc_log_long_context; pr_debug(_fmt, ##__VA_ARGS__); \ icnss_ipc_log_long_string(pr_fmt(_fmt), ##__VA_ARGS__); \ } while (0) + +#define icnss_pr_vdbg1(_fmt, ...) do { \ + pr_debug(_fmt, ##__VA_ARGS__); \ + icnss_ipc_log_long1_string(pr_fmt(_fmt), ##__VA_ARGS__); \ + } while (0) + #elif defined(DEBUG) #define icnss_pr_dbg(_fmt, ...) do { \ printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__); \ @@ -61,6 +71,13 @@ extern void *icnss_ipc_log_long_context; icnss_ipc_log_long_string("%s" pr_fmt(_fmt), "", \ ##__VA_ARGS__); \ } while (0) + +#define icnss_pr_vdbg1(_fmt, ...) do { \ + pr_debug(_fmt, ##__VA_ARGS__); \ + icnss_ipc_log_long1_string("%s" pr_fmt(_fmt), "", \ + ##__VA_ARGS__); \ + } while (0) + #else #define icnss_pr_dbg(_fmt, ...) do { \ no_printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__); \ @@ -73,6 +90,13 @@ extern void *icnss_ipc_log_long_context; icnss_ipc_log_long_string("%s" pr_fmt(_fmt), "", \ ##__VA_ARGS__); \ } while (0) + +#define icnss_pr_vdbg1(_fmt, ...) do { \ + no_printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__); \ + icnss_ipc_log_long1_string("%s" pr_fmt(_fmt), "", \ + ##__VA_ARGS__); \ + } while (0) + #endif #ifdef CONFIG_ICNSS2_DEBUG diff --git a/drivers/soc/qcom/icnss2/main.c b/drivers/soc/qcom/icnss2/main.c index ca0703b26949..e82beac06f71 100644 --- a/drivers/soc/qcom/icnss2/main.c +++ b/drivers/soc/qcom/icnss2/main.c @@ -566,7 +566,7 @@ static int icnss_driver_event_server_arrive(struct icnss_priv *priv, ret = icnss_hw_power_on(priv); if (ret) - goto clear_server; + goto fail; ret = wlfw_ind_register_send_sync_msg(priv); if (ret < 0) { @@ -618,8 +618,8 @@ static int icnss_driver_event_server_arrive(struct icnss_priv *priv, } priv->mem_base_va = devm_ioremap(&priv->pdev->dev, - priv->mem_base_pa, - priv->mem_base_size); + priv->mem_base_pa, + priv->mem_base_size); if (!priv->mem_base_va) { icnss_pr_err("Ioremap failed for bar address\n"); goto err_power_on; @@ -629,6 +629,17 @@ static int icnss_driver_event_server_arrive(struct icnss_priv *priv, &priv->mem_base_pa, priv->mem_base_va); + if (priv->mhi_state_info_pa) + priv->mhi_state_info_va = devm_ioremap(&priv->pdev->dev, + priv->mhi_state_info_pa, + PAGE_SIZE); + if (!priv->mhi_state_info_va) + icnss_pr_err("Ioremap failed for MHI info address\n"); + + icnss_pr_dbg("MHI state info Address pa: %pa, va: 0x%pK\n", + &priv->mhi_state_info_pa, + priv->mhi_state_info_va); + icnss_wlfw_bdf_dnld_send_sync(priv, ICNSS_BDF_REGDB); ret = icnss_wlfw_bdf_dnld_send_sync(priv, @@ -654,8 +665,6 @@ static int icnss_driver_event_server_arrive(struct icnss_priv *priv, err_power_on: icnss_hw_power_off(priv); -clear_server: - icnss_clear_server(priv); fail: ICNSS_ASSERT(ignore_assert); qmi_registered: @@ -1455,6 +1464,7 @@ static void icnss_driver_event_work(struct work_struct *work) break; case ICNSS_DRIVER_EVENT_M3_DUMP_UPLOAD_REQ: ret = icnss_m3_dump_upload_req_hdlr(priv, event->data); + break; case ICNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA: ret = icnss_qdss_trace_req_data_hdlr(priv, event->data); @@ -1979,7 +1989,8 @@ static int icnss_enable_recovery(struct icnss_priv *priv) return 0; } -static int icnss_trigger_ssr_smp2p(struct icnss_priv *priv) +static int icnss_send_smp2p(struct icnss_priv *priv, + enum icnss_smp2p_msg_id msg_id) { unsigned int value = 0; int ret; @@ -1987,19 +1998,22 @@ static int icnss_trigger_ssr_smp2p(struct icnss_priv *priv) if (IS_ERR(priv->smp2p_info.smem_state)) return -EINVAL; + if (test_bit(ICNSS_FW_DOWN, &priv->state)) + return -ENODEV; + value |= priv->smp2p_info.seq++; value <<= ICNSS_SMEM_SEQ_NO_POS; - value |= ICNSS_TRIGGER_SSR; + value |= msg_id; + + icnss_pr_vdbg1("Sending SMP2P value: 0x%X\n", value); + ret = qcom_smem_state_update_bits( priv->smp2p_info.smem_state, ICNSS_SMEM_VALUE_MASK, value); if (ret) - icnss_pr_dbg("Error in SMP2P sent ret: %d\n", ret); + icnss_pr_vdbg1("Error in SMP2P send ret: %d\n", ret); - icnss_pr_dbg("Initiate Root PD restart. SMP2P sent value: 0x%X\n", - value); - set_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state); return ret; } @@ -2472,6 +2486,22 @@ int icnss_get_soc_info(struct device *dev, struct icnss_soc_info *info) } EXPORT_SYMBOL(icnss_get_soc_info); +int icnss_get_mhi_state(struct device *dev) +{ + struct icnss_priv *priv = dev_get_drvdata(dev); + + if (!priv) { + icnss_pr_err("Platform driver not initialized\n"); + return -EINVAL; + } + + if (!priv->mhi_state_info_va) + return -ENOMEM; + + return ioread32(priv->mhi_state_info_va); +} +EXPORT_SYMBOL(icnss_get_mhi_state); + int icnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode) { int ret; @@ -2555,9 +2585,6 @@ int icnss_is_device_awake(struct device *dev) { struct icnss_priv *priv = dev_get_drvdata(dev); - if (!dev) - return -ENODEV; - if (!priv) { icnss_pr_err("Platform driver not initialized\n"); return -EINVAL; @@ -2567,6 +2594,22 @@ int icnss_is_device_awake(struct device *dev) } EXPORT_SYMBOL(icnss_is_device_awake); +int icnss_is_pci_ep_awake(struct device *dev) +{ + struct icnss_priv *priv = dev_get_drvdata(dev); + + if (!priv) { + icnss_pr_err("Platform driver not initialized\n"); + return -EINVAL; + } + + if (!priv->mhi_state_info_va) + return -ENOMEM; + + return ioread32(priv->mhi_state_info_va + ICNSS_PCI_EP_WAKE_OFFSET); +} +EXPORT_SYMBOL(icnss_is_pci_ep_awake); + int icnss_athdiag_read(struct device *dev, uint32_t offset, uint32_t mem_type, uint32_t data_len, uint8_t *output) @@ -2848,8 +2891,13 @@ int icnss_trigger_recovery(struct device *dev) goto out; } - if (priv->device_id == WCN6750_DEVICE_ID) - return icnss_trigger_ssr_smp2p(priv); + if (priv->device_id == WCN6750_DEVICE_ID) { + icnss_pr_vdbg1("Initiate Root PD restart"); + ret = icnss_send_smp2p(priv, ICNSS_TRIGGER_SSR); + if (!ret) + set_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state); + return ret; + } if (!test_bit(ICNSS_PDR_REGISTERED, &priv->state)) { icnss_pr_err("PD restart not enabled to trigger recovery: state: 0x%lx\n", @@ -2928,30 +2976,41 @@ EXPORT_SYMBOL(icnss_idle_restart); int icnss_exit_power_save(struct device *dev) { struct icnss_priv *priv = dev_get_drvdata(dev); - unsigned int value = 0; - int ret; - icnss_pr_dbg("Calling Exit Power Save\n"); + icnss_pr_vdbg1("Calling Exit Power Save\n"); if (test_bit(ICNSS_PD_RESTART, &priv->state) || !test_bit(ICNSS_MODE_ON, &priv->state)) return 0; - value |= priv->smp2p_info.seq++; - value <<= ICNSS_SMEM_SEQ_NO_POS; - value |= ICNSS_POWER_SAVE_EXIT; - ret = qcom_smem_state_update_bits( - priv->smp2p_info.smem_state, - ICNSS_SMEM_VALUE_MASK, - value); - if (ret) - icnss_pr_dbg("Error in SMP2P sent ret: %d\n", ret); - - icnss_pr_dbg("SMP2P sent value: 0x%X\n", value); - return ret; + return icnss_send_smp2p(priv, ICNSS_POWER_SAVE_EXIT); } EXPORT_SYMBOL(icnss_exit_power_save); +int icnss_prevent_l1(struct device *dev) +{ + struct icnss_priv *priv = dev_get_drvdata(dev); + + if (test_bit(ICNSS_PD_RESTART, &priv->state) || + !test_bit(ICNSS_MODE_ON, &priv->state)) + return 0; + + return icnss_send_smp2p(priv, ICNSS_PCI_EP_POWER_SAVE_EXIT); +} +EXPORT_SYMBOL(icnss_prevent_l1); + +void icnss_allow_l1(struct device *dev) +{ + struct icnss_priv *priv = dev_get_drvdata(dev); + + if (test_bit(ICNSS_PD_RESTART, &priv->state) || + !test_bit(ICNSS_MODE_ON, &priv->state)) + return; + + icnss_send_smp2p(priv, ICNSS_PCI_EP_POWER_SAVE_ENTER); +} +EXPORT_SYMBOL(icnss_allow_l1); + void icnss_allow_recursive_recovery(struct device *dev) { struct icnss_priv *priv = dev_get_drvdata(dev); @@ -3498,7 +3557,7 @@ static inline void icnss_get_smp2p_info(struct icnss_priv *priv) "wlan-smp2p-out", &priv->smp2p_info.smem_bit); if (IS_ERR(priv->smp2p_info.smem_state)) { - icnss_pr_dbg("Failed to get smem state %d", + icnss_pr_vdbg1("Failed to get smem state %d", PTR_ERR(priv->smp2p_info.smem_state)); } @@ -3701,7 +3760,6 @@ static int icnss_remove(struct platform_device *pdev) static int icnss_pm_suspend(struct device *dev) { struct icnss_priv *priv = dev_get_drvdata(dev); - unsigned int value = 0; int ret = 0; if (priv->magic != ICNSS_MAGIC) { @@ -3725,19 +3783,7 @@ static int icnss_pm_suspend(struct device *dev) !test_bit(ICNSS_MODE_ON, &priv->state)) return 0; - value |= priv->smp2p_info.seq++; - value <<= ICNSS_SMEM_SEQ_NO_POS; - value |= ICNSS_POWER_SAVE_ENTER; - - ret = qcom_smem_state_update_bits( - priv->smp2p_info.smem_state, - ICNSS_SMEM_VALUE_MASK, - value); - if (ret) - icnss_pr_dbg("Error in SMP2P sent ret: %d\n", - ret); - - icnss_pr_dbg("SMP2P sent value: 0x%X\n", value); + ret = icnss_send_smp2p(priv, ICNSS_POWER_SAVE_ENTER); } priv->stats.pm_suspend++; set_bit(ICNSS_PM_SUSPEND, &priv->state); @@ -3838,7 +3884,6 @@ static int icnss_pm_resume_noirq(struct device *dev) static int icnss_pm_runtime_suspend(struct device *dev) { struct icnss_priv *priv = dev_get_drvdata(dev); - unsigned int value = 0; int ret = 0; if (priv->magic != ICNSS_MAGIC) { @@ -3858,18 +3903,7 @@ static int icnss_pm_runtime_suspend(struct device *dev) !test_bit(ICNSS_MODE_ON, &priv->state)) return 0; - value |= priv->smp2p_info.seq++; - value <<= ICNSS_SMEM_SEQ_NO_POS; - value |= ICNSS_POWER_SAVE_ENTER; - - ret = qcom_smem_state_update_bits( - priv->smp2p_info.smem_state, - ICNSS_SMEM_VALUE_MASK, - value); - if (ret) - icnss_pr_dbg("Error in SMP2P sent ret: %d\n", ret); - - icnss_pr_dbg("SMP2P sent value: 0x%X\n", value); + ret = icnss_send_smp2p(priv, ICNSS_POWER_SAVE_ENTER); } out: return ret; diff --git a/drivers/soc/qcom/icnss2/main.h b/drivers/soc/qcom/icnss2/main.h index c4901a688963..1354ca8fd295 100644 --- a/drivers/soc/qcom/icnss2/main.h +++ b/drivers/soc/qcom/icnss2/main.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. */ #ifndef __MAIN_H__ @@ -25,6 +25,7 @@ #define THERMAL_NAME_LENGTH 20 #define ICNSS_SMEM_VALUE_MASK 0xFFFFFFFF #define ICNSS_SMEM_SEQ_NO_POS 16 +#define ICNSS_PCI_EP_WAKE_OFFSET 4 extern uint64_t dynamic_feature_mask; @@ -178,6 +179,8 @@ enum icnss_smp2p_msg_id { ICNSS_POWER_SAVE_ENTER = 1, ICNSS_POWER_SAVE_EXIT, ICNSS_TRIGGER_SSR, + ICNSS_PCI_EP_POWER_SAVE_ENTER = 6, + ICNSS_PCI_EP_POWER_SAVE_EXIT, }; struct icnss_stats { @@ -342,6 +345,9 @@ struct icnss_priv { phys_addr_t mem_base_pa; void __iomem *mem_base_va; u32 mem_base_size; + phys_addr_t mhi_state_info_pa; + void __iomem *mhi_state_info_va; + u32 mhi_state_info_size; struct iommu_domain *iommu_domain; dma_addr_t smmu_iova_start; size_t smmu_iova_len; diff --git a/drivers/soc/qcom/icnss2/qmi.c b/drivers/soc/qcom/icnss2/qmi.c index 934e54c0b6a0..fad86c7bc76a 100644 --- a/drivers/soc/qcom/icnss2/qmi.c +++ b/drivers/soc/qcom/icnss2/qmi.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "icnss2_qmi: " fmt @@ -334,6 +334,15 @@ int wlfw_device_info_send_msg(struct icnss_priv *priv) goto out; } + if (resp->mhi_state_info_addr_valid) + priv->mhi_state_info_pa = resp->mhi_state_info_addr; + + if (resp->mhi_state_info_size_valid) + priv->mhi_state_info_size = resp->mhi_state_info_size; + + if (!priv->mhi_state_info_pa) + icnss_pr_err("Fail to get MHI info address\n"); + kfree(resp); kfree(req); return 0; @@ -2328,8 +2337,6 @@ static void icnss_wlfw_respond_get_info_ind_cb(struct qmi_handle *qmi, struct icnss_priv *priv = container_of(qmi, struct icnss_priv, qmi); const struct wlfw_respond_get_info_ind_msg_v01 *ind_msg = data; - icnss_pr_vdbg("Received QMI WLFW respond get info indication\n"); - if (!txn) { icnss_pr_err("Spurious indication\n"); return; @@ -2928,9 +2935,6 @@ int icnss_wlfw_get_info_send_sync(struct icnss_priv *plat_priv, int type, struct qmi_txn txn; int ret = 0; - icnss_pr_dbg("Sending get info message, type: %d, cmd length: %d, state: 0x%lx\n", - type, cmd_len, plat_priv->state); - if (cmd_len > QMI_WLFW_MAX_DATA_SIZE_V01) return -EINVAL; diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c index a82fba07a8c2..7db2816e9434 100644 --- a/drivers/soc/qcom/memory_dump_v2.c +++ b/drivers/soc/qcom/memory_dump_v2.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2014-2017, 2019-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2017, 2019-2021, The Linux Foundation. All rights reserved. */ #include @@ -82,6 +82,7 @@ struct msm_memory_dump { }; static struct msm_memory_dump memdump; +static struct msm_mem_dump_vaddr_tbl vaddr_tbl; /** * update_reg_dump_table - update the register dump table @@ -699,6 +700,28 @@ int msm_dump_data_register_nominidump(enum msm_dump_table_ids id, } EXPORT_SYMBOL(msm_dump_data_register_nominidump); +struct dump_vaddr_entry *get_msm_dump_ptr(enum msm_dump_data_ids id) +{ + int i; + + if (!vaddr_tbl.entries) + return NULL; + + if (id > MSM_DUMP_DATA_MAX) + return NULL; + + for (i = 0; i < vaddr_tbl.num_node; i++) { + if (vaddr_tbl.entries[i].id == id) + break; + } + + if (i == vaddr_tbl.num_node) + return NULL; + + return &vaddr_tbl.entries[i]; +} +EXPORT_SYMBOL(get_msm_dump_ptr); + #define MSM_DUMP_TOTAL_SIZE_OFFSET 0x724 static int init_memory_dump(void *dump_vaddr, phys_addr_t phys_addr, size_t size) @@ -755,14 +778,9 @@ static int init_memory_dump(void *dump_vaddr, phys_addr_t phys_addr, static int __init init_debug_lar_unlock(void) { int ret; - uint32_t argument = 0; struct scm_desc desc = {0}; - if (!is_scm_armv8()) - ret = scm_call(SCM_SVC_TZ, SCM_CMD_DEBUG_LAR_UNLOCK, &argument, - sizeof(argument), NULL, 0); - else - ret = scm_call2(SCM_SIP_FNID(SCM_SVC_TZ, + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_TZ, SCM_CMD_DEBUG_LAR_UNLOCK), &desc); if (ret) pr_err("Core Debug Lock unlock failed, ret: %d\n", ret); @@ -792,6 +810,14 @@ static int mem_dump_alloc(struct platform_device *pdev) uint32_t ns_vmids[] = {VMID_HLOS}; uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE}; u64 shm_bridge_handle; + int i = 0; + + vaddr_tbl.num_node = of_get_child_count(node); + vaddr_tbl.entries = devm_kcalloc(&pdev->dev, vaddr_tbl.num_node, + sizeof(struct dump_vaddr_entry), + GFP_KERNEL); + if (!vaddr_tbl.entries) + dev_err(&pdev->dev, "Unable to allocate mem for ptr addr\n"); total_size = size = ret = no_of_nodes = 0; /* For dump table registration with IMEM */ @@ -867,9 +893,16 @@ static int mem_dump_alloc(struct platform_device *pdev) dump_entry.addr = phys_addr; ret = msm_dump_data_register_nominidump(MSM_DUMP_TABLE_APPS, &dump_entry); - if (ret) + if (ret) { dev_err(&pdev->dev, "Data dump setup failed, id = %d\n", id); + } else if (vaddr_tbl.entries) { + vaddr_tbl.entries[i].id = id; + vaddr_tbl.entries[i].dump_vaddr = + dump_vaddr + MSM_DUMP_DATA_SIZE; + vaddr_tbl.entries[i].dump_data_vaddr = dump_data; + i++; + } md_entry.phys_addr = dump_data->addr; md_entry.virt_addr = (uintptr_t)dump_vaddr + MSM_DUMP_DATA_SIZE; diff --git a/drivers/soc/qcom/msm_minidump.c b/drivers/soc/qcom/msm_minidump.c index 4e37b306dd76..a2de6fc57d71 100644 --- a/drivers/soc/qcom/msm_minidump.c +++ b/drivers/soc/qcom/msm_minidump.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2018,2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2018,2020-2021, The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "Minidump: " fmt @@ -232,6 +232,7 @@ EXPORT_SYMBOL(msm_minidump_update_region); int msm_minidump_add_region(const struct md_region *entry) { u32 entries; + u32 toc_init; struct md_region *mdr; if (validate_region(entry)) @@ -251,6 +252,19 @@ int msm_minidump_add_region(const struct md_region *entry) return -ENOMEM; } + toc_init = 0; + if (minidump_table.md_ss_toc && + (minidump_table.md_ss_toc->md_ss_enable_status == + MD_SS_ENABLED)) { + toc_init = 1; + if (minidump_table.md_ss_toc->ss_region_count >= + MAX_NUM_ENTRIES) { + spin_unlock(&mdt_lock); + pr_err("Maximum regions in minidump table reached.\n"); + return -ENOMEM; + } + } + mdr = &minidump_table.entry[entries]; strlcpy(mdr->name, entry->name, sizeof(mdr->name)); mdr->virt_addr = entry->virt_addr; @@ -260,9 +274,7 @@ int msm_minidump_add_region(const struct md_region *entry) minidump_table.num_regions = entries + 1; - if (minidump_table.md_ss_toc && - (minidump_table.md_ss_toc->md_ss_enable_status == - MD_SS_ENABLED)) + if (toc_init) md_update_ss_toc(entry); else pendings++; diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c index 5c55fb318904..9a3c53721a42 100644 --- a/drivers/soc/qcom/peripheral-loader.c +++ b/drivers/soc/qcom/peripheral-loader.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2010-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2010-2021, The Linux Foundation. All rights reserved. */ #include @@ -776,11 +776,12 @@ static int pil_init_entry_addr(struct pil_priv *priv, const struct pil_mdt *mdt) } static int pil_alloc_region(struct pil_priv *priv, phys_addr_t min_addr, - phys_addr_t max_addr, size_t align) + phys_addr_t max_addr, size_t align, + size_t mdt_size) { void *region; size_t size = max_addr - min_addr; - size_t aligned_size; + size_t aligned_size = max(size, mdt_size); /* Don't reallocate due to fragmentation concerns, just sanity check */ if (priv->region) { @@ -820,7 +821,8 @@ static int pil_alloc_region(struct pil_priv *priv, phys_addr_t min_addr, return 0; } -static int pil_setup_region(struct pil_priv *priv, const struct pil_mdt *mdt) +static int pil_setup_region(struct pil_priv *priv, const struct pil_mdt *mdt, + size_t mdt_size) { const struct elf32_phdr *phdr; phys_addr_t min_addr_r, min_addr_n, max_addr_r, max_addr_n, start, end; @@ -865,7 +867,8 @@ static int pil_setup_region(struct pil_priv *priv, const struct pil_mdt *mdt) max_addr_r = ALIGN(max_addr_r, SZ_4K); if (relocatable) { - ret = pil_alloc_region(priv, min_addr_r, max_addr_r, align); + ret = pil_alloc_region(priv, min_addr_r, max_addr_r, align, + mdt_size); } else { priv->region_start = min_addr_n; priv->region_end = max_addr_n; @@ -896,14 +899,15 @@ static int pil_cmp_seg(void *priv, struct list_head *a, struct list_head *b) return ret; } -static int pil_init_mmap(struct pil_desc *desc, const struct pil_mdt *mdt) +static int pil_init_mmap(struct pil_desc *desc, const struct pil_mdt *mdt, + size_t mdt_size) { struct pil_priv *priv = desc->priv; const struct elf32_phdr *phdr; struct pil_seg *seg; int i, ret; - ret = pil_setup_region(priv, mdt); + ret = pil_setup_region(priv, mdt, mdt_size); if (ret) return ret; @@ -1275,7 +1279,7 @@ int pil_boot(struct pil_desc *desc) goto release_fw; } - ret = pil_init_mmap(desc, mdt); + ret = pil_init_mmap(desc, mdt, fw->size); if (ret) goto release_fw; @@ -1288,7 +1292,8 @@ int pil_boot(struct pil_desc *desc) pil_log("before_init_image", desc); if (desc->ops->init_image) - ret = desc->ops->init_image(desc, fw->data, fw->size); + ret = desc->ops->init_image(desc, fw->data, fw->size, + priv->region_start, priv->region); if (ret) { pil_err(desc, "Initializing image failed(rc:%d)\n", ret); goto err_boot; diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h index 29fa4b612667..d5e6cf2b6e2a 100644 --- a/drivers/soc/qcom/peripheral-loader.h +++ b/drivers/soc/qcom/peripheral-loader.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2010-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2010-2019,2021, The Linux Foundation. All rights reserved. */ #ifndef __MSM_PERIPHERAL_LOADER_H #define __MSM_PERIPHERAL_LOADER_H @@ -103,7 +103,7 @@ struct pil_image_info { */ struct pil_reset_ops { int (*init_image)(struct pil_desc *pil, const u8 *metadata, - size_t size); + size_t size, phys_addr_t mdata_phys, void *region); int (*mem_setup)(struct pil_desc *pil, phys_addr_t addr, size_t size); int (*verify_blob)(struct pil_desc *pil, phys_addr_t phy_addr, size_t size); diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c index 9cff905d3e43..91ae185fa129 100644 --- a/drivers/soc/qcom/pil-msa.c +++ b/drivers/soc/qcom/pil-msa.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -850,7 +850,8 @@ int pil_mss_debug_reset(struct pil_desc *pil) } static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata, - size_t size) + size_t size, phys_addr_t region_start, + void *region) { struct modem_data *drv = dev_get_drvdata(pil->dev); void *mdata_virt; @@ -934,7 +935,8 @@ static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata, } static int pil_msa_mss_reset_mba_load_auth_mdt(struct pil_desc *pil, - const u8 *metadata, size_t size) + const u8 *metadata, size_t size, + phys_addr_t region_start, void *region) { int ret; @@ -942,7 +944,8 @@ static int pil_msa_mss_reset_mba_load_auth_mdt(struct pil_desc *pil, if (ret) return ret; - return pil_msa_auth_modem_mdt(pil, metadata, size); + return pil_msa_auth_modem_mdt(pil, metadata, size, region_start, + region); } static int pil_msa_mba_verify_blob(struct pil_desc *pil, phys_addr_t phy_addr, diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c index d48500b84ea4..7cb6fcb55d95 100644 --- a/drivers/soc/qcom/qdss_bridge.c +++ b/drivers/soc/qcom/qdss_bridge.c @@ -151,7 +151,8 @@ static int qdss_check_entry(struct qdss_bridge_drvdata *drvdata) int ret = 0; list_for_each_entry(entry, &drvdata->buf_tbl, link) { - if (atomic_read(&entry->available) == 0) { + if (atomic_read(&entry->available) == 0 + && atomic_read(&entry->used) == 1) { ret = 1; return ret; } @@ -199,6 +200,7 @@ static void qdss_buf_tbl_remove(struct qdss_bridge_drvdata *drvdata, if (entry->buf != buf) continue; atomic_set(&entry->available, 1); + atomic_set(&entry->used, 0); spin_unlock_bh(&drvdata->lock); return; } @@ -382,6 +384,7 @@ static int usb_write(struct qdss_bridge_drvdata *drvdata, entry->usb_req->buf = buf; entry->usb_req->length = len; + atomic_set(&entry->used, 1); ret = usb_qdss_write(drvdata->usb_ch, entry->usb_req); return ret; @@ -465,8 +468,7 @@ static void usb_notifier(void *priv, unsigned int event, { struct qdss_bridge_drvdata *drvdata = priv; - if (!drvdata || drvdata->mode != MHI_TRANSFER_TYPE_USB - || drvdata->opened != ENABLE) { + if (!drvdata || drvdata->mode != MHI_TRANSFER_TYPE_USB) { pr_err_ratelimited("%s can't be called in invalid status.\n", __func__); return; @@ -474,8 +476,10 @@ static void usb_notifier(void *priv, unsigned int event, switch (event) { case USB_QDSS_CONNECT: - usb_qdss_alloc_req(ch, drvdata->nr_trbs); - mhi_queue_read(drvdata); + if (drvdata->opened == ENABLE) { + usb_qdss_alloc_req(ch, drvdata->nr_trbs); + mhi_queue_read(drvdata); + } break; case USB_QDSS_DISCONNECT: diff --git a/drivers/soc/qcom/qdss_bridge.h b/drivers/soc/qcom/qdss_bridge.h index 3bf63a6c2d67..8125ccc38f6c 100644 --- a/drivers/soc/qcom/qdss_bridge.h +++ b/drivers/soc/qcom/qdss_bridge.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. */ #ifndef _QDSS_BRIDGE_H @@ -11,6 +11,7 @@ struct qdss_buf_tbl_lst { unsigned char *buf; struct qdss_request *usb_req; atomic_t available; + atomic_t used; }; struct qdss_mhi_buf_tbl_t { diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c index bda109faa0c7..141ecd121440 100644 --- a/drivers/soc/qcom/rpmh.c +++ b/drivers/soc/qcom/rpmh.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */ #include @@ -478,12 +478,10 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state, * we've returned from this function. */ rpmh_rsc_debug(ctrlr_to_drv(ctrlr), &compls[i]); - ret = -ETIMEDOUT; - goto exit; + BUG_ON(1); } } -exit: kfree(ptr); return ret; diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c index 299cae9808e9..acb5b2d7a092 100644 --- a/drivers/soc/qcom/secure_buffer.c +++ b/drivers/soc/qcom/secure_buffer.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2011 Google, Inc - * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2019, 2021 The Linux Foundation. All rights reserved. */ #include @@ -119,7 +119,8 @@ static int secure_buffer_change_table(struct sg_table *table, int lock) * secure environment to ensure the data is actually present * in RAM */ - dmac_flush_range(chunk_list, chunk_list + chunk_list_len); + dmac_flush_range(chunk_list, + (void *)chunk_list + chunk_list_len); ret = secure_buffer_change_chunk(chunk_list_phys, nchunks, V2_CHUNK_SIZE, lock); diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index b5638df21344..ee005e1a8552 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c @@ -1061,15 +1061,27 @@ static int qcom_smem_map_toc(struct qcom_smem *smem, struct device *dev, return 0; } -static int qcom_smem_mamp_legacy(struct qcom_smem *smem) +static int qcom_smem_map_legacy(struct qcom_smem *smem) { struct smem_header *header; u32 phys_addr; u32 p_size; + unsigned long flags; + int ret; phys_addr = smem->regions[0].aux_base; - header = smem->regions[0].virt_base; - p_size = header->available; + header = (struct smem_header __iomem *)smem->regions[0].virt_base; + + ret = hwspin_lock_timeout_irqsave(smem->hwlock, + HWSPINLOCK_TIMEOUT, + &flags); + if (ret) + return ret; + + p_size = readl_relaxed(&header->available) + + readl_relaxed(&header->free_offset); + + hwspin_unlock_irqrestore(smem->hwlock, &flags); /* unmap previously mapped starting 4k for smem header */ devm_iounmap(smem->dev, smem->regions[0].virt_base); @@ -1121,27 +1133,6 @@ static int qcom_smem_probe(struct platform_device *pdev) return -EINVAL; } - version = qcom_smem_get_sbl_version(smem); - switch (version >> 16) { - case SMEM_GLOBAL_PART_VERSION: - ret = qcom_smem_set_global_partition(smem); - if (ret < 0) - return ret; - smem->item_count = qcom_smem_get_item_count(smem); - break; - case SMEM_GLOBAL_HEAP_VERSION: - qcom_smem_mamp_legacy(smem); - smem->item_count = SMEM_ITEM_COUNT; - break; - default: - dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); - return -EINVAL; - } - - ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS); - if (ret < 0 && ret != -ENOENT) - return ret; - hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); if (hwlock_id < 0) { if (hwlock_id != -EPROBE_DEFER) @@ -1153,6 +1144,27 @@ static int qcom_smem_probe(struct platform_device *pdev) if (!smem->hwlock) return -ENXIO; + version = qcom_smem_get_sbl_version(smem); + switch (version >> 16) { + case SMEM_GLOBAL_PART_VERSION: + ret = qcom_smem_set_global_partition(smem); + if (ret < 0) + return ret; + smem->item_count = qcom_smem_get_item_count(smem); + break; + case SMEM_GLOBAL_HEAP_VERSION: + qcom_smem_map_legacy(smem); + smem->item_count = SMEM_ITEM_COUNT; + break; + default: + dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); + return -EINVAL; + } + + ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS); + if (ret < 0 && ret != -ENOENT) + return ret; + __smem = smem; return 0; diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c index d0cb6288d4d1..08b3cd9f69b6 100644 --- a/drivers/soc/qcom/smsm.c +++ b/drivers/soc/qcom/smsm.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2015, Sony Mobile Communications Inc. - * Copyright (c) 2012-2013,2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2013,2019,2021 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -404,7 +404,7 @@ static int smsm_inbound_entry(struct qcom_smsm *smsm, ret = devm_request_threaded_irq(smsm->dev, smsm->irq, NULL, smsm_intr, - IRQF_ONESHOT, + IRQF_NO_SUSPEND | IRQF_ONESHOT, "smsm", (void *)entry); if (ret) { dev_err(smsm->dev, "failed to request interrupt\n"); diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index 22931f5f67f9..2a378148e6cd 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -322,6 +322,7 @@ static struct msm_soc_info cpu_of_id[] = { /* kona ID */ [356] = {MSM_CPU_KONA, "KONA"}, [455] = {MSM_CPU_KONA, "KONA"}, + [496] = {MSM_CPU_KONA, "KONA"}, /* Lito ID */ [400] = {MSM_CPU_LITO, "LITO"}, @@ -334,6 +335,9 @@ static struct msm_soc_info cpu_of_id[] = { [417] = {MSM_CPU_BENGAL, "BENGAL"}, [444] = {MSM_CPU_BENGAL, "BENGAL"}, + /* Khaje ID */ + [518] = {MSM_CPU_KHAJE, "KHAJE"}, + /* Lagoon ID */ [434] = {MSM_CPU_LAGOON, "LAGOON"}, [459] = {MSM_CPU_LAGOON, "LAGOON"}, @@ -1260,6 +1264,10 @@ static void * __init setup_dummy_socinfo(void) dummy_socinfo.id = 417; strlcpy(dummy_socinfo.build_id, "bengal - ", sizeof(dummy_socinfo.build_id)); + } else if (early_machine_is_khaje()) { + dummy_socinfo.id = 518; + strlcpy(dummy_socinfo.build_id, "khaje - ", + sizeof(dummy_socinfo.build_id)); } else if (early_machine_is_bengalp()) { dummy_socinfo.id = 445; strlcpy(dummy_socinfo.build_id, "bengalp - ", diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c index e979abd14ee3..68ea839c6355 100644 --- a/drivers/soc/qcom/subsys-pil-tz.c +++ b/drivers/soc/qcom/subsys-pil-tz.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. */ #include @@ -39,6 +39,13 @@ #define desc_to_data(d) container_of(d, struct pil_tz_data, desc) #define subsys_to_data(d) container_of(d, struct pil_tz_data, subsys_desc) +struct pil_map_fw_info { + void *region; + unsigned long attrs; + phys_addr_t base_addr; + struct device *dev; +}; + /** * struct reg_info - regulator info * @reg: regulator handle @@ -599,16 +606,21 @@ static void pil_remove_proxy_vote(struct pil_desc *pil) } static int pil_init_image_trusted(struct pil_desc *pil, - const u8 *metadata, size_t size) + const u8 *metadata, size_t size, phys_addr_t mdata_phys, + void *region) { struct pil_tz_data *d = desc_to_data(pil); u32 scm_ret = 0; void *mdata_buf; - dma_addr_t mdata_phys; int ret; - unsigned long attrs = 0; - struct device dev = {0}; struct scm_desc desc = {0}; + struct pil_map_fw_info map_fw_info = { + .attrs = pil->attrs, + .region = region, + .base_addr = mdata_phys, + .dev = pil->dev, + }; + void *map_data = pil->map_data ? pil->map_data : &map_fw_info; if (d->subsys_desc.no_auth) return 0; @@ -616,15 +628,10 @@ static int pil_init_image_trusted(struct pil_desc *pil, ret = scm_pas_enable_bw(); if (ret) return ret; - arch_setup_dma_ops(&dev, 0, 0, NULL, 0); - dev.coherent_dma_mask = - DMA_BIT_MASK(sizeof(dma_addr_t) * 8); - attrs |= DMA_ATTR_STRONGLY_ORDERED; - mdata_buf = dma_alloc_attrs(&dev, size, &mdata_phys, GFP_KERNEL, - attrs); + mdata_buf = pil->map_fw_mem(mdata_phys, size, map_data); if (!mdata_buf) { - pr_err("scm-pas: Allocation for metadata failed.\n"); + dev_err(pil->dev, "Failed to map memory for metadata.\n"); scm_pas_disable_bw(); return -ENOMEM; } @@ -638,7 +645,7 @@ static int pil_init_image_trusted(struct pil_desc *pil, &desc); scm_ret = desc.ret[0]; - dma_free_attrs(&dev, size, mdata_buf, mdata_phys, attrs); + pil->unmap_fw_mem(mdata_buf, size, map_data); scm_pas_disable_bw(); if (ret) return ret; diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c index 69f7aa599ee0..2479632e103b 100644 --- a/drivers/soc/qcom/subsystem_restart.c +++ b/drivers/soc/qcom/subsystem_restart.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2011-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2021, The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "subsys-restart: %s(): " fmt, __func__ @@ -778,6 +778,7 @@ static int subsystem_powerup(struct subsys_device *dev, void *data) pr_info("[%s:%d]: Powering up %s\n", current->comm, current->pid, name); reinit_completion(&dev->err_ready); + enable_all_irqs(dev); ret = dev->desc->powerup(dev->desc); if (ret < 0) { notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE, @@ -793,7 +794,6 @@ static int subsystem_powerup(struct subsys_device *dev, void *data) pr_err("Powerup failure on %s\n", name); return ret; } - enable_all_irqs(dev); ret = wait_for_err_ready(dev); if (ret) { diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index 4d382c6a083a..f97b9f334673 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. */ @@ -427,7 +427,7 @@ static struct msm_gpi_tre *setup_go_tre(int cmd, int cs, int rx_len, int flags, if (cmd == SPI_RX_ONLY) { eot = 0; chain = 0; - eob = 0; + eob = 1; /* For non Shared SPI case */ } else { eot = 0; chain = 1; diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 4b40dfcfcd0f..aca84ffdc2dc 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -478,6 +478,17 @@ config GENERIC_ADC_THERMAL to this driver. This driver reports the temperature by reading ADC channel and converts it to temperature based on lookup table. +config THERMAL_QPNP_ADC_TM + tristate "Qualcomm Technologies Inc. Thermal Monitor ADC Driver" + depends on THERMAL + depends on SPMI + help + This enables the thermal Sysfs driver for the ADC thermal monitoring + device. It shows up in Sysfs as a thermal zone with multiple trip points. + Disabling the thermal zone device via the mode file results in disabling + the sensor. Also able to set threshold temperature for both hot and cold + and update when a threshold is reached. + config THERMAL_TSENS tristate "Qualcomm Technologies Inc. TSENS Temperature driver" depends on THERMAL diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 2162f70e829c..929a216c501c 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -60,6 +60,7 @@ obj-y += tegra/ obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o +obj-$(CONFIG_THERMAL_QPNP_ADC_TM) += qpnp-adc-tm.o qpnp-adc-common.o obj-$(CONFIG_ZX2967_THERMAL) += zx2967_thermal.o obj-$(CONFIG_UNIPHIER_THERMAL) += uniphier_thermal.o obj-$(CONFIG_THERMAL_TSENS) += msm-tsens.o tsens2xxx.o tsens-dbg.o tsens-mtc.o tsens1xxx.o tsens-calib.o diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig index cc78d3711bfe..2f1317090d02 100644 --- a/drivers/thermal/qcom/Kconfig +++ b/drivers/thermal/qcom/Kconfig @@ -102,6 +102,17 @@ config REGULATOR_COOLING_DEVICE If you want this support, you should say Y here. +config QTI_RPM_SMD_COOLING_DEVICE + bool "Qualcomm Technologies Inc. RPM SMD cooling device driver" + depends on MSM_RPM_SMD && THERMAL_OF + help + This implements a mitigation device to send temperature band + level to RPM hardware via SMD protocol. This mitigation device + will be used by temperature reliability rules to restrict a + railway at predefined voltage corner using RPM hardware. + + If you want this support, you should say Y here. + config MSM_BCL_PERIPHERAL_CTL bool "BCL driver to control the PMIC BCL peripheral" depends on SPMI && THERMAL_OF @@ -151,3 +162,14 @@ config QTI_CX_IPEAK_COOLING_DEVICE on the CX rail. If you want this support, you should say Y here. + +config QTI_THERMAL_QFPROM + tristate "Qualcomm Technologies Inc. thermal QFPROM driver" + depends on THERMAL + depends on QCOM_QFPROM + help + This driver enables or disables pre-configured thermal zones + selectively at runtime based on QFPROM nvmem cell bit value is + set or not. It supports to check multiple nvmem cell value for + multiple condtion. In that case, if any of the nvmem-cell condition + fails, driver just exits with default enabled thermal zones. diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile index 8a0b6190ec9f..677bf0335ce5 100644 --- a/drivers/thermal/qcom/Makefile +++ b/drivers/thermal/qcom/Makefile @@ -9,8 +9,10 @@ obj-$(CONFIG_QTI_QMI_COOLING_DEVICE) += thermal_mitigation_device_service_v01.o obj-$(CONFIG_QTI_THERMAL_LIMITS_DCVS) += msm_lmh_dcvs.o lmh_dbg.o obj-$(CONFIG_QTI_AOP_REG_COOLING_DEVICE) += regulator_aop_cdev.o obj-$(CONFIG_REGULATOR_COOLING_DEVICE) += regulator_cdev.o +obj-$(CONFIG_QTI_RPM_SMD_COOLING_DEVICE) += rpm_smd_cooling_device.o obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o obj-$(CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE) += cpu_isolate.o obj-$(CONFIG_QTI_LMH_CPU_VDD_COOLING_DEVICE) += lmh_cpu_vdd_cdev.o obj-$(CONFIG_QTI_LIMITS_ISENSE_CDSP) += msm_isense_cdsp.o obj-$(CONFIG_QTI_CX_IPEAK_COOLING_DEVICE) += cx_ipeak_cdev.o +obj-$(CONFIG_QTI_THERMAL_QFPROM) += qti_thermal_qfprom.o diff --git a/drivers/thermal/qcom/qmi_sensors.c b/drivers/thermal/qcom/qmi_sensors.c index 97c1467c65fa..a145040ff5cc 100644 --- a/drivers/thermal/qcom/qmi_sensors.c +++ b/drivers/thermal/qcom/qmi_sensors.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2020,2021 The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__ @@ -124,7 +124,7 @@ static char sensor_clients[QMI_TS_MAX_NR][QMI_CLIENT_NAME_LENGTH] = { static int32_t encode_qmi(int32_t val) { uint32_t shift = 0, local_val = 0; - int32_t temp_val = 0; + unsigned long temp_val = 0; if (val == INT_MAX || val == INT_MIN) return 0; @@ -134,8 +134,7 @@ static int32_t encode_qmi(int32_t val) temp_val *= -1; local_val |= 1 << QMI_FL_SIGN_BIT; } - shift = find_last_bit((const unsigned long *)&temp_val, - sizeof(temp_val) * 8); + shift = find_last_bit(&temp_val, sizeof(temp_val) * 8); local_val |= ((shift + 127) << QMI_MANTISSA_MSB); temp_val &= ~(1 << shift); @@ -279,6 +278,13 @@ static int qmi_ts_request(struct qmi_sensor *qmi_sens, qmi_sens->low_thresh != INT_MIN; req.temp_threshold_low = encode_qmi(qmi_sens->low_thresh); + + pr_debug("Sensor:%s set high_trip:%d, low_trip:%d, high_valid:%d, low_valid:%d\n", + qmi_sens->qmi_name, + qmi_sens->high_thresh, + qmi_sens->low_thresh, + req.temp_threshold_high_valid, + req.temp_threshold_low_valid); } mutex_lock(&ts->mutex); diff --git a/drivers/thermal/qcom/qti_thermal_qfprom.c b/drivers/thermal/qcom/qti_thermal_qfprom.c new file mode 100644 index 000000000000..47b9b2cd6e8b --- /dev/null +++ b/drivers/thermal/qcom/qti_thermal_qfprom.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__ + +#include +#include +#include +#include +#include +#include +#include + +#include "../thermal_core.h" + +static int thermal_qfprom_read(struct platform_device *pdev, + const char *cname, unsigned int *efuse_val) +{ + struct nvmem_cell *cell; + size_t len; + char *buf; + + cell = nvmem_cell_get(&pdev->dev, cname); + if (IS_ERR(cell)) { + dev_err(&pdev->dev, "failed to get nvmem cell %s\n", cname); + return -EINVAL; + } + + buf = nvmem_cell_read(cell, &len); + nvmem_cell_put(cell); + if (IS_ERR_OR_NULL(buf)) { + dev_err(&pdev->dev, "failed to read nvmem cell %s\n", cname); + return -EINVAL; + } + + if (len <= 0 || len > sizeof(u32)) { + dev_err(&pdev->dev, "nvmem cell length out of range:%d\n", len); + kfree(buf); + return -EINVAL; + } + memcpy(efuse_val, buf, min(len, sizeof(*efuse_val))); + kfree(buf); + + return 0; +} + +static int thermal_zone_set_mode(struct platform_device *pdev, + enum thermal_device_mode mode) +{ + const char *name; + struct property *prop = NULL; + + of_property_for_each_string(pdev->dev.of_node, + mode == THERMAL_DEVICE_ENABLED ? + "qcom,thermal-zone-enable-list" : + "qcom,thermal-zone-disable-list", prop, name) { + struct thermal_zone_device *zone; + struct thermal_instance *pos; + + zone = thermal_zone_get_zone_by_name(name); + if (IS_ERR(zone)) { + dev_err(&pdev->dev, + "could not find %s thermal zone\n", name); + continue; + } + + if (!(zone->ops && zone->ops->set_mode)) { + dev_err(&pdev->dev, + "thermal zone ops is not supported for %s\n", + name); + continue; + } + + zone->ops->set_mode(zone, mode); + if (mode == THERMAL_DEVICE_DISABLED) { + /* Clear thermal zone device */ + mutex_lock(&zone->lock); + zone->temperature = THERMAL_TEMP_INVALID; + zone->passive = 0; + list_for_each_entry(pos, &zone->thermal_instances, + tz_node) { + pos->initialized = false; + pos->target = THERMAL_NO_TARGET; + mutex_lock(&pos->cdev->lock); + pos->cdev->updated = false; + mutex_unlock(&pos->cdev->lock); + thermal_cdev_update(pos->cdev); + } + mutex_unlock(&zone->lock); + } + dev_dbg(&pdev->dev, "thermal zone %s is %s\n", name, + mode == THERMAL_DEVICE_ENABLED ? + "enabled" : "disabled"); + } + + return 0; +} + +static void update_thermal_zones(struct platform_device *pdev) +{ + thermal_zone_set_mode(pdev, THERMAL_DEVICE_ENABLED); + thermal_zone_set_mode(pdev, THERMAL_DEVICE_DISABLED); +} + +static int thermal_qfprom_probe(struct platform_device *pdev) +{ + int err = 0; + const char *name; + struct property *prop = NULL; + u8 efuse_pass_cnt = 0; + + of_property_for_each_string(pdev->dev.of_node, + "nvmem-cell-names", prop, name) { + u32 efuse_val = 0, efuse_match_val = 0; + + err = thermal_qfprom_read(pdev, name, &efuse_val); + if (err) + return err; + + err = of_property_read_u32_index(pdev->dev.of_node, + "qcom,thermal-qfprom-bit-values", efuse_pass_cnt, + &efuse_match_val); + if (err) { + dev_err(&pdev->dev, + "Invalid qfprom bit value for index %d\n", + efuse_pass_cnt); + return err; + } + + dev_dbg(&pdev->dev, "efuse[%s] val:0x%x match val[%d]:0x%x\n", + name, efuse_val, efuse_pass_cnt, + efuse_match_val); + + /* if any of efuse condition fails, just exit */ + if (efuse_val != efuse_match_val) + return 0; + + efuse_pass_cnt++; + } + + if (efuse_pass_cnt) + update_thermal_zones(pdev); + + return err; +} + +static const struct of_device_id thermal_qfprom_match[] = { + { .compatible = "qcom,thermal-qfprom-device", }, + {}, +}; + +static struct platform_driver thermal_qfprom_driver = { + .probe = thermal_qfprom_probe, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = thermal_qfprom_match, + }, +}; + +int __init thermal_qfprom_init(void) +{ + int err; + + err = platform_driver_register(&thermal_qfprom_driver); + if (err) + pr_err("Failed to register thermal qfprom platform driver:%d\n", + err); + return err; +} + +late_initcall(thermal_qfprom_init); diff --git a/drivers/thermal/qcom/qti_virtual_sensor.c b/drivers/thermal/qcom/qti_virtual_sensor.c index e11797e45b8b..6cd275ad193d 100644 --- a/drivers/thermal/qcom/qti_virtual_sensor.c +++ b/drivers/thermal/qcom/qti_virtual_sensor.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2019,2021, The Linux Foundation. All rights reserved. */ #include @@ -173,6 +173,16 @@ static const struct virtual_sensor_data qti_virtual_sensors[] = { .coefficients = {30, 70}, .avg_denominator = 100, }, + { + .virt_zone_name = "penta-cpu-max-step", + .num_sensors = 5, + .sensor_names = {"apc1-cpu0-usr", + "apc1-cpu1-usr", + "apc1-cpu2-usr", + "apc1-cpu3-usr", + "cpuss-usr"}, + .logic = VIRT_MAXIMUM, + }, }; int qti_virtual_sensor_register(struct device *dev) diff --git a/drivers/thermal/qcom/rpm_smd_cooling_device.c b/drivers/thermal/qcom/rpm_smd_cooling_device.c new file mode 100644 index 000000000000..79d1a94ea615 --- /dev/null +++ b/drivers/thermal/qcom/rpm_smd_cooling_device.c @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2018, 2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__ + +#include +#include +#include +#include +#include + +#define RPM_SMD_CDEV_DRIVER "rpm-smd-cooling-device" +#define RPM_SMD_RES_TYPE 0x6d726874 +#define RPM_SMD_RES_ID 0 +#define RPM_SMD_KEY 1 + +enum rpm_smd_temp_band { + RPM_SMD_COLD_CRITICAL = 1, + RPM_SMD_COLD, + RPM_SMD_COOL, + RPM_SMD_NORMAL, + RPM_SMD_WARM, + RPM_SMD_HOT, + RPM_SMD_HOT_CRITICAL, + RPM_SMD_TEMP_MAX_NR, +}; + +struct rpm_smd_cdev { + struct thermal_cooling_device *cool_dev; + char dev_name[THERMAL_NAME_LENGTH]; + unsigned int state; + struct msm_rpm_request *rpm_handle; +}; + +static int rpm_smd_send_request_to_rpm(struct rpm_smd_cdev *rpm_smd_dev, + unsigned int state) +{ + unsigned int band; + int msg_id, ret; + + if (!rpm_smd_dev || !rpm_smd_dev->rpm_handle) { + pr_err("Invalid RPM SMD handle\n"); + return -EINVAL; + } + + if (rpm_smd_dev->state == state) + return 0; + + /* if state is zero, then send RPM_SMD_NORMAL band */ + if (!state) + band = RPM_SMD_NORMAL; + else + band = state; + + ret = msm_rpm_add_kvp_data(rpm_smd_dev->rpm_handle, RPM_SMD_KEY, + (const uint8_t *)&band, (int)sizeof(band)); + if (ret) { + pr_err("Adding KVP data failed. err:%d\n", ret); + return ret; + } + + msg_id = msm_rpm_send_request(rpm_smd_dev->rpm_handle); + if (!msg_id) { + pr_err("RPM SMD send request failed\n"); + return -ENXIO; + } + + ret = msm_rpm_wait_for_ack(msg_id); + if (ret) { + pr_err("RPM SMD wait for ACK failed. err:%d\n", ret); + return ret; + } + rpm_smd_dev->state = state; + + pr_debug("Requested RPM SMD band:%d for %s\n", band, + rpm_smd_dev->dev_name); + + return ret; +} + +static int rpm_smd_get_max_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + *state = RPM_SMD_TEMP_MAX_NR - 1; + + return 0; +} + +static int rpm_smd_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state) +{ + struct rpm_smd_cdev *rpm_smd_dev = cdev->devdata; + int ret = 0; + + if (state > (RPM_SMD_TEMP_MAX_NR - 1)) + state = RPM_SMD_TEMP_MAX_NR - 1; + + ret = rpm_smd_send_request_to_rpm(rpm_smd_dev, (unsigned int)state); + if (ret) + return ret; + + return ret; +} + +static int rpm_smd_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct rpm_smd_cdev *rpm_smd_dev = cdev->devdata; + + *state = rpm_smd_dev->state; + + return 0; +} + +static struct thermal_cooling_device_ops rpm_smd_device_ops = { + .get_max_state = rpm_smd_get_max_state, + .get_cur_state = rpm_smd_get_cur_state, + .set_cur_state = rpm_smd_set_cur_state, +}; + +static int rpm_smd_cdev_remove(struct platform_device *pdev) +{ + struct rpm_smd_cdev *rpm_smd_dev = + (struct rpm_smd_cdev *)dev_get_drvdata(&pdev->dev); + + if (rpm_smd_dev) { + if (rpm_smd_dev->cool_dev) + thermal_cooling_device_unregister( + rpm_smd_dev->cool_dev); + + rpm_smd_send_request_to_rpm(rpm_smd_dev, RPM_SMD_NORMAL); + msm_rpm_free_request(rpm_smd_dev->rpm_handle); + } + + return 0; +} + +static int rpm_smd_cdev_probe(struct platform_device *pdev) +{ + struct rpm_smd_cdev *rpm_smd_dev; + int ret = 0; + struct device_node *np; + + np = dev_of_node(&pdev->dev); + if (!np) { + dev_err(&pdev->dev, + "of node not available for rpm smd cooling device\n"); + return -EINVAL; + } + + rpm_smd_dev = devm_kzalloc(&pdev->dev, sizeof(*rpm_smd_dev), + GFP_KERNEL); + if (!rpm_smd_dev) + return -ENOMEM; + + rpm_smd_dev->rpm_handle = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET, + RPM_SMD_RES_TYPE, RPM_SMD_RES_ID, 1); + if (!rpm_smd_dev->rpm_handle) { + dev_err(&pdev->dev, "Creating RPM SMD request handle failed\n"); + return -ENXIO; + } + + strlcpy(rpm_smd_dev->dev_name, np->name, THERMAL_NAME_LENGTH); + + /* Be pro-active and mitigate till we get first vote from TF */ + rpm_smd_send_request_to_rpm(rpm_smd_dev, RPM_SMD_COLD); + + rpm_smd_dev->cool_dev = thermal_of_cooling_device_register( + np, rpm_smd_dev->dev_name, rpm_smd_dev, + &rpm_smd_device_ops); + if (IS_ERR(rpm_smd_dev->cool_dev)) { + ret = PTR_ERR(rpm_smd_dev->cool_dev); + dev_err(&pdev->dev, "rpm_smd cdev register err:%d\n", ret); + rpm_smd_dev->cool_dev = NULL; + return ret; + } + + dev_set_drvdata(&pdev->dev, rpm_smd_dev); + + return ret; +} + +static const struct of_device_id rpm_smd_cdev_of_match[] = { + {.compatible = "qcom,rpm-smd-cooling-device", }, + {}, +}; + +static struct platform_driver rpm_smd_cdev_driver = { + .driver = { + .name = RPM_SMD_CDEV_DRIVER, + .of_match_table = rpm_smd_cdev_of_match, + }, + .probe = rpm_smd_cdev_probe, + .remove = rpm_smd_cdev_remove, +}; + +builtin_platform_driver(rpm_smd_cdev_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/qpnp-adc-common.c b/drivers/thermal/qpnp-adc-common.c new file mode 100644 index 000000000000..0b7116dfeec2 --- /dev/null +++ b/drivers/thermal/qpnp-adc-common.c @@ -0,0 +1,906 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ +/* + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include + +#define KELVINMIL_DEGMIL 273160 +/* + * Units for temperature below (on x axis) is in 0.1DegC as + * required by the battery driver. Note the resolution used + * here to compute the table was done for DegC to milli-volts. + * In consideration to limit the size of the table for the given + * temperature range below, the result is linearly interpolated + * and provided to the battery driver in the units desired for + * their framework which is 0.1DegC. True resolution of 0.1DegC + * will result in the below table size to increase by 10 times. + */ +static const struct qpnp_vadc_map_pt adcmap_btm_threshold[] = { + {-300, 1642}, + {-200, 1544}, + {-100, 1414}, + {0, 1260}, + {10, 1244}, + {20, 1228}, + {30, 1212}, + {40, 1195}, + {50, 1179}, + {60, 1162}, + {70, 1146}, + {80, 1129}, + {90, 1113}, + {100, 1097}, + {110, 1080}, + {120, 1064}, + {130, 1048}, + {140, 1032}, + {150, 1016}, + {160, 1000}, + {170, 985}, + {180, 969}, + {190, 954}, + {200, 939}, + {210, 924}, + {220, 909}, + {230, 894}, + {240, 880}, + {250, 866}, + {260, 852}, + {270, 838}, + {280, 824}, + {290, 811}, + {300, 798}, + {310, 785}, + {320, 773}, + {330, 760}, + {340, 748}, + {350, 736}, + {360, 725}, + {370, 713}, + {380, 702}, + {390, 691}, + {400, 681}, + {410, 670}, + {420, 660}, + {430, 650}, + {440, 640}, + {450, 631}, + {460, 622}, + {470, 613}, + {480, 604}, + {490, 595}, + {500, 587}, + {510, 579}, + {520, 571}, + {530, 563}, + {540, 556}, + {550, 548}, + {560, 541}, + {570, 534}, + {580, 527}, + {590, 521}, + {600, 514}, + {610, 508}, + {620, 502}, + {630, 496}, + {640, 490}, + {650, 485}, + {660, 281}, + {670, 274}, + {680, 267}, + {690, 260}, + {700, 254}, + {710, 247}, + {720, 241}, + {730, 235}, + {740, 229}, + {750, 224}, + {760, 218}, + {770, 213}, + {780, 208}, + {790, 203} +}; + +/* Voltage to temperature */ +static const struct qpnp_vadc_map_pt adcmap_100k_104ef_104fb[] = { + {1758, -40000}, + {1742, -35000}, + {1719, -30000}, + {1691, -25000}, + {1654, -20000}, + {1608, -15000}, + {1551, -10000}, + {1483, -5000}, + {1404, 0}, + {1315, 5000}, + {1218, 10000}, + {1114, 15000}, + {1007, 20000}, + {900, 25000}, + {795, 30000}, + {696, 35000}, + {605, 40000}, + {522, 45000}, + {448, 50000}, + {383, 55000}, + {327, 60000}, + {278, 65000}, + {237, 70000}, + {202, 75000}, + {172, 80000}, + {146, 85000}, + {125, 90000}, + {107, 95000}, + {92, 100000}, + {79, 105000}, + {68, 110000}, + {59, 115000}, + {51, 120000}, + {44, 125000} +}; + +static const struct qpnp_vadc_map_pt adcmap_smb_batt_therm[] = { + {-300, 1625}, + {-200, 1515}, + {-100, 1368}, + {0, 1192}, + {10, 1173}, + {20, 1154}, + {30, 1135}, + {40, 1116}, + {50, 1097}, + {60, 1078}, + {70, 1059}, + {80, 1040}, + {90, 1020}, + {100, 1001}, + {110, 982}, + {120, 963}, + {130, 944}, + {140, 925}, + {150, 907}, + {160, 888}, + {170, 870}, + {180, 851}, + {190, 833}, + {200, 815}, + {210, 797}, + {220, 780}, + {230, 762}, + {240, 745}, + {250, 728}, + {260, 711}, + {270, 695}, + {280, 679}, + {290, 663}, + {300, 647}, + {310, 632}, + {320, 616}, + {330, 602}, + {340, 587}, + {350, 573}, + {360, 559}, + {370, 545}, + {380, 531}, + {390, 518}, + {400, 505}, + {410, 492}, + {420, 480}, + {430, 465}, + {440, 456}, + {450, 445}, + {460, 433}, + {470, 422}, + {480, 412}, + {490, 401}, + {500, 391}, + {510, 381}, + {520, 371}, + {530, 362}, + {540, 352}, + {550, 343}, + {560, 335}, + {570, 326}, + {580, 318}, + {590, 309}, + {600, 302}, + {610, 294}, + {620, 286}, + {630, 279}, + {640, 272}, + {650, 265}, + {660, 258}, + {670, 252}, + {680, 245}, + {690, 239}, + {700, 233}, + {710, 227}, + {720, 221}, + {730, 216}, + {740, 211}, + {750, 205}, + {760, 200}, + {770, 195}, + {780, 190}, + {790, 186} +}; + +/* Voltage to temperature */ +static const struct qpnp_vadc_map_pt adcmap_batt_therm_qrd_215[] = { + {1575, -200}, + {1549, -180}, + {1522, -160}, + {1493, -140}, + {1463, -120}, + {1431, -100}, + {1398, -80}, + {1364, -60}, + {1329, -40}, + {1294, -20}, + {1258, 0}, + {1222, 20}, + {1187, 40}, + {1151, 60}, + {1116, 80}, + {1082, 100}, + {1049, 120}, + {1016, 140}, + {985, 160}, + {955, 180}, + {926, 200}, + {899, 220}, + {873, 240}, + {849, 260}, + {825, 280}, + {804, 300}, + {783, 320}, + {764, 340}, + {746, 360}, + {729, 380}, + {714, 400}, + {699, 420}, + {686, 440}, + {673, 460}, + {662, 480}, + {651, 500}, + {641, 520}, + {632, 540}, + {623, 560}, + {615, 580}, + {608, 600}, + {601, 620}, + {595, 640}, + {589, 660}, + {583, 680}, + {578, 700}, + {574, 720}, + {569, 740}, + {565, 760}, + {562, 780}, + {558, 800} +}; + +static int32_t qpnp_get_vadc_gain_and_offset(struct qpnp_adc_drv *adc, + struct qpnp_vadc_linear_graph *param, + enum qpnp_adc_calib_type calib_type) +{ + int rc = 0; + + switch (calib_type) { + case CALIB_RATIOMETRIC: + param->dy = + adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dy; + param->dx = + adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dx; + param->adc_vref = adc->adc_prop->adc_vdd_reference; + param->adc_gnd = + adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_gnd; + break; + case CALIB_ABSOLUTE: + param->dy = + adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].dy; + param->dx = + adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].dx; + param->adc_vref = adc->adc_prop->adc_vdd_reference; + param->adc_gnd = + adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].adc_gnd; + break; + default: + rc = -EINVAL; + } + return rc; +} + +static int32_t qpnp_adc_map_voltage_temp(const struct qpnp_vadc_map_pt *pts, + uint32_t tablesize, int32_t input, int64_t *output) +{ + unsigned int descending = 1; + uint32_t i = 0; + + if (pts == NULL) + return -EINVAL; + + /* Check if table is descending or ascending */ + if (tablesize > 1) { + if (pts[0].x < pts[1].x) + descending = 0; + } + + while (i < tablesize) { + if ((descending == 1) && (pts[i].x < input)) { + /* + * table entry is less than measured + * value and table is descending, stop. + */ + break; + } else if ((descending == 0) && + (pts[i].x > input)) { + /* + * table entry is greater than measured + * value and table is ascending, stop. + */ + break; + } + i++; + } + + if (i == 0) + *output = pts[0].y; + else if (i == tablesize) + *output = pts[tablesize-1].y; + else { + /* result is between search_index and search_index-1 */ + /* interpolate linearly */ + *output = (((int32_t) ((pts[i].y - pts[i-1].y)* + (input - pts[i-1].x))/ + (pts[i].x - pts[i-1].x))+ + pts[i-1].y); + } + + return 0; +} + +static int32_t qpnp_adc_map_temp_voltage(const struct qpnp_vadc_map_pt *pts, + uint32_t tablesize, int32_t input, int64_t *output) +{ + unsigned int descending = 1; + uint32_t i = 0; + + if (pts == NULL) + return -EINVAL; + + /* Check if table is descending or ascending */ + if (tablesize > 1) { + if (pts[0].y < pts[1].y) + descending = 0; + } + + while (i < tablesize) { + if ((descending == 1) && (pts[i].y < input)) { + /* Table entry is less than measured value. */ + /* Table is descending, stop. */ + break; + } else if ((descending == 0) && (pts[i].y > input)) { + /* Table entry is greater than measured value. */ + /* Table is ascending, stop. */ + break; + } + i++; + } + + if (i == 0) { + *output = pts[0].x; + } else if (i == tablesize) { + *output = pts[tablesize-1].x; + } else { + /* result is between search_index and search_index-1 */ + /* interpolate linearly */ + *output = (((int32_t) ((pts[i].x - pts[i-1].x)* + (input - pts[i-1].y))/ + (pts[i].y - pts[i-1].y))+ + pts[i-1].x); + } + + return 0; +} + +int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_adc_drv *adc, + const struct qpnp_adc_properties *adc_properties, + struct qpnp_adc_tm_config *param) +{ + struct qpnp_vadc_linear_graph param1; + int rc = 0; + + rc = qpnp_get_vadc_gain_and_offset(adc, ¶m1, CALIB_RATIOMETRIC); + if (rc < 0) + return rc; + + rc = qpnp_adc_map_temp_voltage(adcmap_100k_104ef_104fb, + ARRAY_SIZE(adcmap_100k_104ef_104fb), + param->low_thr_temp, ¶m->low_thr_voltage); + if (rc) + return rc; + + param->low_thr_voltage *= param1.dy; + param->low_thr_voltage = div64_s64(param->low_thr_voltage, + param1.adc_vref); + param->low_thr_voltage += param1.adc_gnd; + + rc = qpnp_adc_map_temp_voltage(adcmap_100k_104ef_104fb, + ARRAY_SIZE(adcmap_100k_104ef_104fb), + param->high_thr_temp, ¶m->high_thr_voltage); + if (rc) + return rc; + + param->high_thr_voltage *= param1.dy; + param->high_thr_voltage = div64_s64(param->high_thr_voltage, + param1.adc_vref); + param->high_thr_voltage += param1.adc_gnd; + + return 0; +} +EXPORT_SYMBOL(qpnp_adc_tm_scale_therm_voltage_pu2); + +int32_t qpnp_adc_usb_scaler(struct qpnp_adc_drv *adc, + struct qpnp_adc_tm_btm_param *param, + uint32_t *low_threshold, uint32_t *high_threshold) +{ + struct qpnp_vadc_linear_graph usb_param; + int rc = 0; + + rc = qpnp_get_vadc_gain_and_offset(adc, &usb_param, CALIB_RATIOMETRIC); + if (rc < 0) + return rc; + + *low_threshold = param->low_thr * usb_param.dy; + *low_threshold = div64_s64(*low_threshold, usb_param.adc_vref); + *low_threshold += usb_param.adc_gnd; + + *high_threshold = param->high_thr * usb_param.dy; + *high_threshold = div64_s64(*high_threshold, usb_param.adc_vref); + *high_threshold += usb_param.adc_gnd; + + pr_debug("high_volt:%d, low_volt:%d\n", param->high_thr, + param->low_thr); + return 0; +} +EXPORT_SYMBOL(qpnp_adc_usb_scaler); + +int32_t qpnp_adc_absolute_rthr(struct qpnp_adc_drv *adc, + struct qpnp_adc_tm_btm_param *param, + uint32_t *low_threshold, uint32_t *high_threshold) +{ + struct qpnp_vadc_linear_graph vbatt_param; + int rc = 0, sign = 0; + int64_t low_thr = 0, high_thr = 0; + + rc = qpnp_get_vadc_gain_and_offset(adc, &vbatt_param, + CALIB_ABSOLUTE); + if (rc < 0) + return rc; + + low_thr = (((param->low_thr/param->gain_den) - + QPNP_ADC_625_UV) * vbatt_param.dy); + if (low_thr < 0) { + sign = 1; + low_thr = -low_thr; + } + low_thr = low_thr * param->gain_num; + low_thr = div64_s64(low_thr, QPNP_ADC_625_UV); + if (sign) + low_thr = -low_thr; + *low_threshold = low_thr + vbatt_param.adc_gnd; + + sign = 0; + high_thr = (((param->high_thr/param->gain_den) - + QPNP_ADC_625_UV) * vbatt_param.dy); + if (high_thr < 0) { + sign = 1; + high_thr = -high_thr; + } + high_thr = high_thr * param->gain_num; + high_thr = div64_s64(high_thr, QPNP_ADC_625_UV); + if (sign) + high_thr = -high_thr; + *high_threshold = high_thr + vbatt_param.adc_gnd; + + pr_debug("high_volt:%d, low_volt:%d\n", param->high_thr, + param->low_thr); + pr_debug("adc_code_high:%x, adc_code_low:%x\n", *high_threshold, + *low_threshold); + return 0; +} +EXPORT_SYMBOL(qpnp_adc_absolute_rthr); + +int32_t qpnp_adc_vbatt_rscaler(struct qpnp_adc_drv *adc, + struct qpnp_adc_tm_btm_param *param, + uint32_t *low_threshold, uint32_t *high_threshold) +{ + return qpnp_adc_absolute_rthr(adc, param, low_threshold, + high_threshold); +} +EXPORT_SYMBOL(qpnp_adc_vbatt_rscaler); + +int32_t qpnp_adc_qrd_215_btm_scaler(struct qpnp_adc_drv *adc, + struct qpnp_adc_tm_btm_param *param, + uint32_t *low_threshold, uint32_t *high_threshold) +{ + struct qpnp_vadc_linear_graph btm_param; + int64_t low_output = 0, high_output = 0; + int rc = 0; + + rc = qpnp_get_vadc_gain_and_offset(adc, &btm_param, CALIB_RATIOMETRIC); + if (rc < 0) + return rc; + + pr_debug("warm_temp:%d and cool_temp:%d\n", param->high_temp, + param->low_temp); + rc = qpnp_adc_map_temp_voltage( + adcmap_batt_therm_qrd_215, + ARRAY_SIZE(adcmap_batt_therm_qrd_215), + (param->low_temp), + &low_output); + if (rc) { + pr_debug("low_temp mapping failed with %d\n", rc); + return rc; + } + + pr_debug("low_output:%lld\n", low_output); + low_output *= btm_param.dy; + low_output = div64_s64(low_output, btm_param.adc_vref); + low_output += btm_param.adc_gnd; + + rc = qpnp_adc_map_temp_voltage( + adcmap_batt_therm_qrd_215, + ARRAY_SIZE(adcmap_batt_therm_qrd_215), + (param->high_temp), + &high_output); + if (rc) { + pr_debug("high temp mapping failed with %d\n", rc); + return rc; + } + + pr_debug("high_output:%lld\n", high_output); + high_output *= btm_param.dy; + high_output = div64_s64(high_output, btm_param.adc_vref); + high_output += btm_param.adc_gnd; + + /* btm low temperature correspondes to high voltage threshold */ + *low_threshold = high_output; + /* btm high temperature correspondes to low voltage threshold */ + *high_threshold = low_output; + + pr_debug("high_volt:%d, low_volt:%d\n", *high_threshold, + *low_threshold); + return 0; +} +EXPORT_SYMBOL(qpnp_adc_qrd_215_btm_scaler); + +int32_t qpnp_adc_smb_btm_rscaler(struct qpnp_adc_drv *adc, + struct qpnp_adc_tm_btm_param *param, + uint32_t *low_threshold, uint32_t *high_threshold) +{ + struct qpnp_vadc_linear_graph btm_param; + int64_t low_output = 0, high_output = 0; + int rc = 0; + + rc = qpnp_get_vadc_gain_and_offset(adc, &btm_param, CALIB_RATIOMETRIC); + if (rc < 0) + return rc; + + pr_debug("warm_temp:%d and cool_temp:%d\n", param->high_temp, + param->low_temp); + rc = qpnp_adc_map_voltage_temp( + adcmap_smb_batt_therm, + ARRAY_SIZE(adcmap_smb_batt_therm), + (param->low_temp), + &low_output); + if (rc) { + pr_debug("low_temp mapping failed with %d\n", rc); + return rc; + } + + pr_debug("low_output:%lld\n", low_output); + low_output *= btm_param.dy; + low_output = div64_s64(low_output, btm_param.adc_vref); + low_output += btm_param.adc_gnd; + + rc = qpnp_adc_map_voltage_temp( + adcmap_smb_batt_therm, + ARRAY_SIZE(adcmap_smb_batt_therm), + (param->high_temp), + &high_output); + if (rc) { + pr_debug("high temp mapping failed with %d\n", rc); + return rc; + } + + pr_debug("high_output:%lld\n", high_output); + high_output *= btm_param.dy; + high_output = div64_s64(high_output, btm_param.adc_vref); + high_output += btm_param.adc_gnd; + + /* btm low temperature correspondes to high voltage threshold */ + *low_threshold = high_output; + /* btm high temperature correspondes to low voltage threshold */ + *high_threshold = low_output; + + pr_debug("high_volt:%d, low_volt:%d\n", *high_threshold, + *low_threshold); + return 0; +} +EXPORT_SYMBOL(qpnp_adc_smb_btm_rscaler); + +int qpnp_adc_get_revid_version(struct device *dev) +{ + struct pmic_revid_data *revid_data; + struct device_node *revid_dev_node; + + revid_dev_node = of_parse_phandle(dev->of_node, + "qcom,pmic-revid", 0); + if (!revid_dev_node) { + pr_debug("Missing qcom,pmic-revid property\n"); + return -EINVAL; + } + + revid_data = get_revid_data(revid_dev_node); + if (IS_ERR(revid_data)) { + pr_debug("revid error rc = %ld\n", PTR_ERR(revid_data)); + return -EINVAL; + } + + if (!revid_data) + return -EINVAL; + + if ((revid_data->rev1 == PM8941_V3P1_REV1) && + (revid_data->rev2 == PM8941_V3P1_REV2) && + (revid_data->rev3 == PM8941_V3P1_REV3) && + (revid_data->rev4 == PM8941_V3P1_REV4) && + (revid_data->pmic_subtype == PM8941_SUBTYPE)) + return QPNP_REV_ID_8941_3_1; + else if ((revid_data->rev1 == PM8941_V3P0_REV1) && + (revid_data->rev2 == PM8941_V3P0_REV2) && + (revid_data->rev3 == PM8941_V3P0_REV3) && + (revid_data->rev4 == PM8941_V3P0_REV4) && + (revid_data->pmic_subtype == PM8941_SUBTYPE)) + return QPNP_REV_ID_8941_3_0; + else if ((revid_data->rev1 == PM8941_V2P0_REV1) && + (revid_data->rev2 == PM8941_V2P0_REV2) && + (revid_data->rev3 == PM8941_V2P0_REV3) && + (revid_data->rev4 == PM8941_V2P0_REV4) && + (revid_data->pmic_subtype == PM8941_SUBTYPE)) + return QPNP_REV_ID_8941_2_0; + else if ((revid_data->rev1 == PM8226_V2P2_REV1) && + (revid_data->rev2 == PM8226_V2P2_REV2) && + (revid_data->rev3 == PM8226_V2P2_REV3) && + (revid_data->rev4 == PM8226_V2P2_REV4) && + (revid_data->pmic_subtype == PM8226_SUBTYPE)) + return QPNP_REV_ID_8026_2_2; + else if ((revid_data->rev1 == PM8226_V2P1_REV1) && + (revid_data->rev2 == PM8226_V2P1_REV2) && + (revid_data->rev3 == PM8226_V2P1_REV3) && + (revid_data->rev4 == PM8226_V2P1_REV4) && + (revid_data->pmic_subtype == PM8226_SUBTYPE)) + return QPNP_REV_ID_8026_2_1; + else if ((revid_data->rev1 == PM8226_V2P0_REV1) && + (revid_data->rev2 == PM8226_V2P0_REV2) && + (revid_data->rev3 == PM8226_V2P0_REV3) && + (revid_data->rev4 == PM8226_V2P0_REV4) && + (revid_data->pmic_subtype == PM8226_SUBTYPE)) + return QPNP_REV_ID_8026_2_0; + else if ((revid_data->rev1 == PM8226_V1P0_REV1) && + (revid_data->rev2 == PM8226_V1P0_REV2) && + (revid_data->rev3 == PM8226_V1P0_REV3) && + (revid_data->rev4 == PM8226_V1P0_REV4) && + (revid_data->pmic_subtype == PM8226_SUBTYPE)) + return QPNP_REV_ID_8026_1_0; + else if ((revid_data->rev1 == PM8110_V1P0_REV1) && + (revid_data->rev2 == PM8110_V1P0_REV2) && + (revid_data->rev3 == PM8110_V1P0_REV3) && + (revid_data->rev4 == PM8110_V1P0_REV4) && + (revid_data->pmic_subtype == PM8110_SUBTYPE)) + return QPNP_REV_ID_8110_1_0; + else if ((revid_data->rev1 == PM8110_V2P0_REV1) && + (revid_data->rev2 == PM8110_V2P0_REV2) && + (revid_data->rev3 == PM8110_V2P0_REV3) && + (revid_data->rev4 == PM8110_V2P0_REV4) && + (revid_data->pmic_subtype == PM8110_SUBTYPE)) + return QPNP_REV_ID_8110_2_0; + else if ((revid_data->rev1 == PM8916_V1P0_REV1) && + (revid_data->rev2 == PM8916_V1P0_REV2) && + (revid_data->rev3 == PM8916_V1P0_REV3) && + (revid_data->rev4 == PM8916_V1P0_REV4) && + (revid_data->pmic_subtype == PM8916_SUBTYPE)) + return QPNP_REV_ID_8916_1_0; + else if ((revid_data->rev1 == PM8916_V1P1_REV1) && + (revid_data->rev2 == PM8916_V1P1_REV2) && + (revid_data->rev3 == PM8916_V1P1_REV3) && + (revid_data->rev4 == PM8916_V1P1_REV4) && + (revid_data->pmic_subtype == PM8916_SUBTYPE)) + return QPNP_REV_ID_8916_1_1; + else if ((revid_data->rev1 == PM8916_V2P0_REV1) && + (revid_data->rev2 == PM8916_V2P0_REV2) && + (revid_data->rev3 == PM8916_V2P0_REV3) && + (revid_data->rev4 == PM8916_V2P0_REV4) && + (revid_data->pmic_subtype == PM8916_SUBTYPE)) + return QPNP_REV_ID_8916_2_0; + else if ((revid_data->rev1 == PM8909_V1P0_REV1) && + (revid_data->rev2 == PM8909_V1P0_REV2) && + (revid_data->rev3 == PM8909_V1P0_REV3) && + (revid_data->rev4 == PM8909_V1P0_REV4) && + (revid_data->pmic_subtype == PM8909_SUBTYPE)) + return QPNP_REV_ID_8909_1_0; + else if ((revid_data->rev1 == PM8909_V1P1_REV1) && + (revid_data->rev2 == PM8909_V1P1_REV2) && + (revid_data->rev3 == PM8909_V1P1_REV3) && + (revid_data->rev4 == PM8909_V1P1_REV4) && + (revid_data->pmic_subtype == PM8909_SUBTYPE)) + return QPNP_REV_ID_8909_1_1; + else if ((revid_data->rev4 == PM8950_V1P0_REV4) && + (revid_data->pmic_subtype == PM8950_SUBTYPE)) + return QPNP_REV_ID_PM8950_1_0; + else + return -EINVAL; +} +EXPORT_SYMBOL(qpnp_adc_get_revid_version); + +int32_t qpnp_adc_get_devicetree_data(struct platform_device *pdev, + struct qpnp_adc_drv *adc_qpnp) +{ + struct device_node *node = pdev->dev.of_node; + unsigned int base; + struct device_node *child; + struct qpnp_adc_amux *adc_channel_list; + struct qpnp_adc_properties *adc_prop; + struct qpnp_adc_amux_properties *amux_prop; + int count_adc_channel_list = 0, decimation = 0, rc = 0, i = 0; + + if (!node) + return -EINVAL; + + for_each_child_of_node(node, child) + count_adc_channel_list++; + + if (!count_adc_channel_list) { + pr_err("No channel listing\n"); + return -EINVAL; + } + + adc_qpnp->pdev = pdev; + + adc_prop = devm_kzalloc(&pdev->dev, + sizeof(struct qpnp_adc_properties), + GFP_KERNEL); + if (!adc_prop) + return -ENOMEM; + + adc_channel_list = devm_kzalloc(&pdev->dev, + ((sizeof(struct qpnp_adc_amux)) * count_adc_channel_list), + GFP_KERNEL); + if (!adc_channel_list) + return -ENOMEM; + + amux_prop = devm_kzalloc(&pdev->dev, + sizeof(struct qpnp_adc_amux_properties) + + sizeof(struct qpnp_vadc_chan_properties), GFP_KERNEL); + if (!amux_prop) { + dev_err(&pdev->dev, "Unable to allocate memory\n"); + return -ENOMEM; + } + + adc_qpnp->adc_channels = adc_channel_list; + adc_qpnp->amux_prop = amux_prop; + + for_each_child_of_node(node, child) { + int channel_num, scaling = 0, post_scaling = 0; + int fast_avg_setup = 0, calib_type = 0, rc, hw_settle_time = 0; + const char *channel_name; + + channel_name = of_get_property(child, + "label", NULL) ? : child->name; + + if (!channel_name) { + pr_err("Invalid channel name\n"); + return -EINVAL; + } + + rc = of_property_read_u32(child, "reg", &channel_num); + if (rc) { + pr_err("Invalid channel num\n"); + return -EINVAL; + } + + rc = of_property_read_u32(child, + "qcom,hw-settle-time", &hw_settle_time); + if (rc) { + pr_err("Invalid channel hw settle time property\n"); + return -EINVAL; + } + + rc = of_property_read_u32(child, + "qcom,pre-div-channel-scaling", &scaling); + if (rc) { + pr_err("Invalid channel scaling property\n"); + return -EINVAL; + } + + rc = of_property_read_u32(child, + "qcom,scale-fn-type", &post_scaling); + if (rc) { + pr_err("Invalid channel post scaling property\n"); + return -EINVAL; + } + + if (of_property_read_bool(child, "qcom,ratiometric")) + calib_type = CALIB_RATIOMETRIC; + else + calib_type = CALIB_ABSOLUTE; + + rc = of_property_read_u32(child, + "qcom,decimation", &decimation); + if (rc) { + pr_err("Invalid decimation\n"); + return -EINVAL; + } + + rc = of_property_read_u32(child, + "qcom,fast-avg-setup", &fast_avg_setup); + if (rc) { + pr_err("Invalid channel fast average setup\n"); + return -EINVAL; + } + + /* Individual channel properties */ + adc_channel_list[i].name = (char *)channel_name; + adc_channel_list[i].channel_num = channel_num; + adc_channel_list[i].adc_decimation = decimation; + adc_channel_list[i].fast_avg_setup = fast_avg_setup; + + adc_channel_list[i].chan_path_prescaling = scaling; + adc_channel_list[i].adc_scale_fn = post_scaling; + adc_channel_list[i].hw_settle_time = hw_settle_time; + adc_channel_list[i].calib_type = calib_type; + + i++; + } + + /* Get the ADC VDD reference voltage and ADC bit resolution */ + rc = of_property_read_u32(node, "qcom,adc-vdd-reference", + &adc_prop->adc_vdd_reference); + if (rc) { + pr_err("Invalid adc vdd reference property\n"); + return -EINVAL; + } + adc_qpnp->adc_prop = adc_prop; + + /* Get the peripheral address */ + rc = of_property_read_u32(pdev->dev.of_node, "reg", &base); + if (rc < 0) { + dev_err(&pdev->dev, + "Couldn't find reg in node = %s rc = %d\n", + pdev->dev.of_node->full_name, rc); + return rc; + } + + adc_qpnp->offset = base; + + /* Register the ADC peripheral interrupt */ + adc_qpnp->adc_irq_eoc = platform_get_irq_byname(pdev, + "eoc-int-en-set"); + if (adc_qpnp->adc_irq_eoc < 0) { + pr_err("Invalid irq\n"); + return -ENXIO; + } + + return 0; +} +EXPORT_SYMBOL(qpnp_adc_get_devicetree_data); diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c new file mode 100644 index 000000000000..0cbf0083a5d8 --- /dev/null +++ b/drivers/thermal/qpnp-adc-tm.c @@ -0,0 +1,2779 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2019,2021, The Linux Foundation. All rights reserved. + */ +/* + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "thermal_core.h" + +/* QPNP VADC TM register definition */ +#define QPNP_REVISION3 0x2 +#define QPNP_PERPH_SUBTYPE 0x5 +#define QPNP_PERPH_TYPE2 0x2 +#define QPNP_REVISION_EIGHT_CHANNEL_SUPPORT 2 +#define QPNP_PERPH_SUBTYPE_TWO_CHANNEL_SUPPORT 0x22 +#define QPNP_STATUS1 0x8 +#define QPNP_STATUS1_OP_MODE 4 +#define QPNP_STATUS1_MEAS_INTERVAL_EN_STS BIT(2) +#define QPNP_STATUS1_REQ_STS BIT(1) +#define QPNP_STATUS1_EOC BIT(0) +#define QPNP_STATUS2 0x9 +#define QPNP_STATUS2_CONV_SEQ_STATE 6 +#define QPNP_STATUS2_FIFO_NOT_EMPTY_FLAG BIT(1) +#define QPNP_STATUS2_CONV_SEQ_TIMEOUT_STS BIT(0) +#define QPNP_CONV_TIMEOUT_ERR 2 + +#define QPNP_MODE_CTL 0x40 +#define QPNP_OP_MODE_SHIFT 3 +#define QPNP_VREF_XO_THM_FORCE BIT(2) +#define QPNP_AMUX_TRIM_EN BIT(1) +#define QPNP_ADC_TRIM_EN BIT(0) +#define QPNP_EN_CTL1 0x46 +#define QPNP_ADC_TM_EN BIT(7) +#define QPNP_BTM_CONV_REQ 0x47 +#define QPNP_ADC_CONV_REQ_EN BIT(7) + +#define QPNP_ADC_DIG_PARAM 0x50 +#define QPNP_ADC_DIG_DEC_RATIO_SEL_SHIFT 3 +#define QPNP_HW_SETTLE_DELAY 0x51 +#define QPNP_CONV_SEQ_CTL 0x54 +#define QPNP_CONV_SEQ_HOLDOFF_SHIFT 4 +#define QPNP_CONV_SEQ_TRIG_CTL 0x55 +#define QPNP_ADC_TM_MEAS_INTERVAL_CTL 0x57 +#define QPNP_ADC_TM_MEAS_INTERVAL_TIME_SHIFT 0x3 +#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2 0x58 +#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2_SHIFT 0x4 +#define QPNP_ADC_TM_MEAS_INTERVAL_CTL2_MASK 0xf0 +#define QPNP_ADC_TM_MEAS_INTERVAL_CTL3_MASK 0xf + +#define QPNP_ADC_MEAS_INTERVAL_OP_CTL 0x59 +#define QPNP_ADC_MEAS_INTERVAL_OP BIT(7) + +#define QPNP_OP_MODE_SHIFT 3 +#define QPNP_CONV_REQ 0x52 +#define QPNP_CONV_REQ_SET BIT(7) + +#define QPNP_FAST_AVG_CTL 0x5a +#define QPNP_FAST_AVG_EN 0x5b +#define QPNP_FAST_AVG_ENABLED BIT(7) + +#define QPNP_M0_LOW_THR_LSB 0x5c +#define QPNP_M0_LOW_THR_MSB 0x5d +#define QPNP_M0_HIGH_THR_LSB 0x5e +#define QPNP_M0_HIGH_THR_MSB 0x5f +#define QPNP_M1_ADC_CH_SEL_CTL 0x68 +#define QPNP_M1_LOW_THR_LSB 0x69 +#define QPNP_M1_LOW_THR_MSB 0x6a +#define QPNP_M1_HIGH_THR_LSB 0x6b +#define QPNP_M1_HIGH_THR_MSB 0x6c +#define QPNP_M2_ADC_CH_SEL_CTL 0x70 +#define QPNP_M2_LOW_THR_LSB 0x71 +#define QPNP_M2_LOW_THR_MSB 0x72 +#define QPNP_M2_HIGH_THR_LSB 0x73 +#define QPNP_M2_HIGH_THR_MSB 0x74 +#define QPNP_M3_ADC_CH_SEL_CTL 0x78 +#define QPNP_M3_LOW_THR_LSB 0x79 +#define QPNP_M3_LOW_THR_MSB 0x7a +#define QPNP_M3_HIGH_THR_LSB 0x7b +#define QPNP_M3_HIGH_THR_MSB 0x7c +#define QPNP_M4_ADC_CH_SEL_CTL 0x80 +#define QPNP_M4_LOW_THR_LSB 0x81 +#define QPNP_M4_LOW_THR_MSB 0x82 +#define QPNP_M4_HIGH_THR_LSB 0x83 +#define QPNP_M4_HIGH_THR_MSB 0x84 +#define QPNP_M5_ADC_CH_SEL_CTL 0x88 +#define QPNP_M5_LOW_THR_LSB 0x89 +#define QPNP_M5_LOW_THR_MSB 0x8a +#define QPNP_M5_HIGH_THR_LSB 0x8b +#define QPNP_M5_HIGH_THR_MSB 0x8c +#define QPNP_M6_ADC_CH_SEL_CTL 0x90 +#define QPNP_M6_LOW_THR_LSB 0x91 +#define QPNP_M6_LOW_THR_MSB 0x92 +#define QPNP_M6_HIGH_THR_LSB 0x93 +#define QPNP_M6_HIGH_THR_MSB 0x94 +#define QPNP_M7_ADC_CH_SEL_CTL 0x98 +#define QPNP_M7_LOW_THR_LSB 0x99 +#define QPNP_M7_LOW_THR_MSB 0x9a +#define QPNP_M7_HIGH_THR_LSB 0x9b +#define QPNP_M7_HIGH_THR_MSB 0x9c + +#define QPNP_ADC_TM_MULTI_MEAS_EN 0x41 +#define QPNP_ADC_TM_MULTI_MEAS_EN_M0 BIT(0) +#define QPNP_ADC_TM_MULTI_MEAS_EN_M1 BIT(1) +#define QPNP_ADC_TM_MULTI_MEAS_EN_M2 BIT(2) +#define QPNP_ADC_TM_MULTI_MEAS_EN_M3 BIT(3) +#define QPNP_ADC_TM_MULTI_MEAS_EN_M4 BIT(4) +#define QPNP_ADC_TM_MULTI_MEAS_EN_M5 BIT(5) +#define QPNP_ADC_TM_MULTI_MEAS_EN_M6 BIT(6) +#define QPNP_ADC_TM_MULTI_MEAS_EN_M7 BIT(7) +#define QPNP_ADC_TM_LOW_THR_INT_EN 0x42 +#define QPNP_ADC_TM_LOW_THR_INT_EN_M0 BIT(0) +#define QPNP_ADC_TM_LOW_THR_INT_EN_M1 BIT(1) +#define QPNP_ADC_TM_LOW_THR_INT_EN_M2 BIT(2) +#define QPNP_ADC_TM_LOW_THR_INT_EN_M3 BIT(3) +#define QPNP_ADC_TM_LOW_THR_INT_EN_M4 BIT(4) +#define QPNP_ADC_TM_LOW_THR_INT_EN_M5 BIT(5) +#define QPNP_ADC_TM_LOW_THR_INT_EN_M6 BIT(6) +#define QPNP_ADC_TM_LOW_THR_INT_EN_M7 BIT(7) +#define QPNP_ADC_TM_HIGH_THR_INT_EN 0x43 +#define QPNP_ADC_TM_HIGH_THR_INT_EN_M0 BIT(0) +#define QPNP_ADC_TM_HIGH_THR_INT_EN_M1 BIT(1) +#define QPNP_ADC_TM_HIGH_THR_INT_EN_M2 BIT(2) +#define QPNP_ADC_TM_HIGH_THR_INT_EN_M3 BIT(3) +#define QPNP_ADC_TM_HIGH_THR_INT_EN_M4 BIT(4) +#define QPNP_ADC_TM_HIGH_THR_INT_EN_M5 BIT(5) +#define QPNP_ADC_TM_HIGH_THR_INT_EN_M6 BIT(6) +#define QPNP_ADC_TM_HIGH_THR_INT_EN_M7 BIT(7) + +#define QPNP_ADC_TM_M0_MEAS_INTERVAL_CTL 0x59 +#define QPNP_ADC_TM_M1_MEAS_INTERVAL_CTL 0x6d +#define QPNP_ADC_TM_M2_MEAS_INTERVAL_CTL 0x75 +#define QPNP_ADC_TM_M3_MEAS_INTERVAL_CTL 0x7d +#define QPNP_ADC_TM_M4_MEAS_INTERVAL_CTL 0x85 +#define QPNP_ADC_TM_M5_MEAS_INTERVAL_CTL 0x8d +#define QPNP_ADC_TM_M6_MEAS_INTERVAL_CTL 0x95 +#define QPNP_ADC_TM_M7_MEAS_INTERVAL_CTL 0x9d + +#define QPNP_ADC_TM_STATUS1 0x8 +#define QPNP_ADC_TM_STATUS_LOW 0xa +#define QPNP_ADC_TM_STATUS_HIGH 0xb + +#define QPNP_ADC_TM_M0_LOW_THR 0x5d5c +#define QPNP_ADC_TM_M0_HIGH_THR 0x5f5e +#define QPNP_ADC_TM_MEAS_INTERVAL 0x0 + +#define QPNP_ADC_TM_THR_LSB_MASK(val) (val & 0xff) +#define QPNP_ADC_TM_THR_MSB_MASK(val) ((val & 0xff00) >> 8) + +#define QPNP_MIN_TIME 2000 +#define QPNP_MAX_TIME 2100 +#define QPNP_RETRY 1000 + +enum thermal_trip_activation_mode { + THERMAL_TRIP_ACTIVATION_DISABLED = 0, + THERMAL_TRIP_ACTIVATION_ENABLED, +}; + +struct qpnp_adc_thr_info { + u8 status_low; + u8 status_high; + u8 qpnp_adc_tm_meas_en; + u8 adc_tm_low_enable; + u8 adc_tm_high_enable; + u8 adc_tm_low_thr_set; + u8 adc_tm_high_thr_set; + spinlock_t adc_tm_low_lock; + spinlock_t adc_tm_high_lock; +}; + +struct qpnp_adc_thr_client_info { + struct list_head list; + struct qpnp_adc_tm_btm_param *btm_param; + int32_t low_thr_requested; + int32_t high_thr_requested; + enum qpnp_state_request state_requested; + enum qpnp_state_request state_req_copy; + bool low_thr_set; + bool high_thr_set; + bool notify_low_thr; + bool notify_high_thr; +}; + +struct qpnp_adc_tm_sensor { + struct thermal_zone_device *tz_dev; + struct qpnp_adc_tm_chip *chip; + enum thermal_device_mode mode; + uint32_t sensor_num; + enum qpnp_adc_meas_timer_select timer_select; + uint32_t meas_interval; + uint32_t low_thr; + uint32_t high_thr; + uint32_t btm_channel_num; + uint32_t vadc_channel_num; + struct iio_channel *sen_adc; + struct workqueue_struct *req_wq; + struct work_struct work; + bool thermal_node; + uint32_t scale_type; + struct list_head thr_list; + bool high_thr_triggered; + bool low_thr_triggered; +}; + +struct qpnp_adc_tm_chip { + struct device *dev; + struct qpnp_adc_drv *adc; + struct list_head list; + bool adc_tm_initialized; + bool adc_tm_recalib_check; + int max_channels_available; + struct iio_channel *ref_1250v; + struct iio_channel *ref_625mv; + struct iio_channel *ref_vdd; + struct iio_channel *ref_gnd; + struct workqueue_struct *high_thr_wq; + struct workqueue_struct *low_thr_wq; + struct workqueue_struct *thr_wq; + struct work_struct trigger_high_thr_work; + struct work_struct trigger_low_thr_work; + struct work_struct trigger_thr_work; + bool adc_vote_enable; + struct qpnp_adc_thr_info th_info; + struct qpnp_adc_tm_sensor sensor[0]; +}; + +LIST_HEAD(qpnp_adc_tm_device_list); + +struct qpnp_adc_tm_trip_reg_type { + enum qpnp_adc_tm_channel_select btm_amux_chan; + uint16_t low_thr_lsb_addr; + uint16_t low_thr_msb_addr; + uint16_t high_thr_lsb_addr; + uint16_t high_thr_msb_addr; + u8 multi_meas_en; + u8 low_thr_int_chan_en; + u8 high_thr_int_chan_en; + u8 meas_interval_ctl; +}; + +static struct qpnp_adc_tm_trip_reg_type adc_tm_data[] = { + [QPNP_ADC_TM_CHAN0] = {QPNP_ADC_TM_M0_ADC_CH_SEL_CTL, + QPNP_M0_LOW_THR_LSB, + QPNP_M0_LOW_THR_MSB, QPNP_M0_HIGH_THR_LSB, + QPNP_M0_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M0, + QPNP_ADC_TM_LOW_THR_INT_EN_M0, QPNP_ADC_TM_HIGH_THR_INT_EN_M0, + QPNP_ADC_TM_M0_MEAS_INTERVAL_CTL}, + [QPNP_ADC_TM_CHAN1] = {QPNP_ADC_TM_M1_ADC_CH_SEL_CTL, + QPNP_M1_LOW_THR_LSB, + QPNP_M1_LOW_THR_MSB, QPNP_M1_HIGH_THR_LSB, + QPNP_M1_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M1, + QPNP_ADC_TM_LOW_THR_INT_EN_M1, QPNP_ADC_TM_HIGH_THR_INT_EN_M1, + QPNP_ADC_TM_M1_MEAS_INTERVAL_CTL}, + [QPNP_ADC_TM_CHAN2] = {QPNP_ADC_TM_M2_ADC_CH_SEL_CTL, + QPNP_M2_LOW_THR_LSB, + QPNP_M2_LOW_THR_MSB, QPNP_M2_HIGH_THR_LSB, + QPNP_M2_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M2, + QPNP_ADC_TM_LOW_THR_INT_EN_M2, QPNP_ADC_TM_HIGH_THR_INT_EN_M2, + QPNP_ADC_TM_M2_MEAS_INTERVAL_CTL}, + [QPNP_ADC_TM_CHAN3] = {QPNP_ADC_TM_M3_ADC_CH_SEL_CTL, + QPNP_M3_LOW_THR_LSB, + QPNP_M3_LOW_THR_MSB, QPNP_M3_HIGH_THR_LSB, + QPNP_M3_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M3, + QPNP_ADC_TM_LOW_THR_INT_EN_M3, QPNP_ADC_TM_HIGH_THR_INT_EN_M3, + QPNP_ADC_TM_M3_MEAS_INTERVAL_CTL}, + [QPNP_ADC_TM_CHAN4] = {QPNP_ADC_TM_M4_ADC_CH_SEL_CTL, + QPNP_M4_LOW_THR_LSB, + QPNP_M4_LOW_THR_MSB, QPNP_M4_HIGH_THR_LSB, + QPNP_M4_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M4, + QPNP_ADC_TM_LOW_THR_INT_EN_M4, QPNP_ADC_TM_HIGH_THR_INT_EN_M4, + QPNP_ADC_TM_M4_MEAS_INTERVAL_CTL}, + [QPNP_ADC_TM_CHAN5] = {QPNP_ADC_TM_M5_ADC_CH_SEL_CTL, + QPNP_M5_LOW_THR_LSB, + QPNP_M5_LOW_THR_MSB, QPNP_M5_HIGH_THR_LSB, + QPNP_M5_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M5, + QPNP_ADC_TM_LOW_THR_INT_EN_M5, QPNP_ADC_TM_HIGH_THR_INT_EN_M5, + QPNP_ADC_TM_M5_MEAS_INTERVAL_CTL}, + [QPNP_ADC_TM_CHAN6] = {QPNP_ADC_TM_M6_ADC_CH_SEL_CTL, + QPNP_M6_LOW_THR_LSB, + QPNP_M6_LOW_THR_MSB, QPNP_M6_HIGH_THR_LSB, + QPNP_M6_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M6, + QPNP_ADC_TM_LOW_THR_INT_EN_M6, QPNP_ADC_TM_HIGH_THR_INT_EN_M6, + QPNP_ADC_TM_M6_MEAS_INTERVAL_CTL}, + [QPNP_ADC_TM_CHAN7] = {QPNP_ADC_TM_M7_ADC_CH_SEL_CTL, + QPNP_M7_LOW_THR_LSB, + QPNP_M7_LOW_THR_MSB, QPNP_M7_HIGH_THR_LSB, + QPNP_M7_HIGH_THR_MSB, QPNP_ADC_TM_MULTI_MEAS_EN_M7, + QPNP_ADC_TM_LOW_THR_INT_EN_M7, QPNP_ADC_TM_HIGH_THR_INT_EN_M7, + QPNP_ADC_TM_M7_MEAS_INTERVAL_CTL}, +}; + +static struct qpnp_adc_tm_reverse_scale_fn adc_tm_rscale_fn[] = { + [SCALE_R_VBATT] = {qpnp_adc_vbatt_rscaler}, + [SCALE_R_USB_ID] = {qpnp_adc_usb_scaler}, + [SCALE_R_ABSOLUTE] = {qpnp_adc_absolute_rthr}, + [SCALE_R_SMB_BATT_THERM] = {qpnp_adc_smb_btm_rscaler}, + [SCALE_QRD_215_RBATT_THERM] = {qpnp_adc_qrd_215_btm_scaler}, +}; + +static int32_t qpnp_adc_tm_read_reg(struct qpnp_adc_tm_chip *chip, + int16_t reg, u8 *data, int len) +{ + int rc = 0; + + rc = regmap_bulk_read(chip->adc->regmap, (chip->adc->offset + reg), + data, len); + if (rc < 0) + pr_err("adc-tm read reg %d failed with %d\n", reg, rc); + + return rc; +} + +static int32_t qpnp_adc_tm_write_reg(struct qpnp_adc_tm_chip *chip, + int16_t reg, u8 data, int len) +{ + int rc = 0; + u8 *buf; + + buf = &data; + + rc = regmap_bulk_write(chip->adc->regmap, (chip->adc->offset + reg), + buf, len); + if (rc < 0) + pr_err("adc-tm write reg %d failed with %d\n", reg, rc); + + return rc; +} + +static int32_t qpnp_adc_tm_fast_avg_en(struct qpnp_adc_tm_chip *chip, + uint32_t *fast_avg_sample) +{ + int rc = 0, version = 0; + u8 fast_avg_en = 0; + + version = qpnp_adc_get_revid_version(chip->dev); + if (!((version == QPNP_REV_ID_8916_1_0) || + (version == QPNP_REV_ID_8916_1_1) || + (version == QPNP_REV_ID_8916_2_0))) { + pr_debug("fast-avg-en not required for this version\n"); + return rc; + } + + fast_avg_en = QPNP_FAST_AVG_ENABLED; + rc = qpnp_adc_tm_write_reg(chip, QPNP_FAST_AVG_EN, fast_avg_en, 1); + if (rc < 0) { + pr_err("adc-tm fast-avg enable err\n"); + return rc; + } + + if (*fast_avg_sample >= 3) + *fast_avg_sample = 2; + + return rc; +} + +static int32_t qpnp_adc_tm_enable(struct qpnp_adc_tm_chip *chip) +{ + int rc = 0; + u8 data = 0; + + data = QPNP_ADC_TM_EN; + rc = qpnp_adc_tm_write_reg(chip, QPNP_EN_CTL1, data, 1); + if (rc < 0) { + pr_err("adc-tm enable failed\n"); + return rc; + } + + return rc; +} + +static int32_t qpnp_adc_tm_disable(struct qpnp_adc_tm_chip *chip) +{ + u8 data = 0; + int rc = 0; + + rc = qpnp_adc_tm_write_reg(chip, QPNP_EN_CTL1, data, 1); + if (rc < 0) { + pr_err("adc-tm disable failed\n"); + return rc; + } + + return rc; +} + +static int qpnp_adc_tm_is_valid(struct qpnp_adc_tm_chip *chip) +{ + struct qpnp_adc_tm_chip *adc_tm_chip = NULL; + + list_for_each_entry(adc_tm_chip, &qpnp_adc_tm_device_list, list) + if (chip == adc_tm_chip) + return 0; + + return -EINVAL; +} + +static int32_t qpnp_adc_tm_enable_if_channel_meas( + struct qpnp_adc_tm_chip *chip) +{ + u8 adc_tm_meas_en = 0, status_low = 0, status_high = 0; + int rc = 0; + + /* Check if a measurement request is still required */ + rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN, + &adc_tm_meas_en, 1); + if (rc) { + pr_err("read status high failed with %d\n", rc); + return rc; + } + + rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_LOW_THR_INT_EN, + &status_low, 1); + if (rc) { + pr_err("read status low failed with %d\n", rc); + return rc; + } + + rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_HIGH_THR_INT_EN, + &status_high, 1); + if (rc) { + pr_err("read status high failed with %d\n", rc); + return rc; + } + + /* Enable only if there are pending measurement requests */ + if ((adc_tm_meas_en && status_high) || + (adc_tm_meas_en && status_low)) { + qpnp_adc_tm_enable(chip); + /* Request conversion */ + rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ, + QPNP_CONV_REQ_SET, 1); + if (rc < 0) { + pr_err("adc-tm request conversion failed\n"); + return rc; + } + } + + return rc; +} + +static int32_t qpnp_adc_tm_mode_select(struct qpnp_adc_tm_chip *chip, + u8 mode_ctl) +{ + int rc; + + mode_ctl |= (QPNP_ADC_TRIM_EN | QPNP_AMUX_TRIM_EN); + + /* VADC_BTM current sets mode to recurring measurements */ + rc = qpnp_adc_tm_write_reg(chip, QPNP_MODE_CTL, mode_ctl, 1); + if (rc < 0) + pr_err("adc-tm write mode selection err\n"); + + return rc; +} + +static int32_t qpnp_adc_tm_req_sts_check(struct qpnp_adc_tm_chip *chip) +{ + u8 status1 = 0, mode_ctl = 0; + int rc, count = 0; + + /* Re-enable the peripheral */ + rc = qpnp_adc_tm_enable(chip); + if (rc) { + pr_err("adc-tm re-enable peripheral failed\n"); + return rc; + } + + /* The VADC_TM bank needs to be disabled for new conversion request */ + rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1); + if (rc) { + pr_err("adc-tm read status1 failed\n"); + return rc; + } + + /* Disable the bank if a conversion is occurring */ + while (status1 & QPNP_STATUS1_REQ_STS) { + if (count > QPNP_RETRY) { + pr_err("retry error=%d with 0x%x\n", count, status1); + break; + } + /* + * Wait time is based on the optimum sampling rate + * and adding enough time buffer to account for ADC conversions + * occurring on different peripheral banks + */ + usleep_range(QPNP_MIN_TIME, QPNP_MAX_TIME); + rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, + &status1, 1); + if (rc < 0) { + pr_err("adc-tm disable failed\n"); + return rc; + } + count++; + } + + /* Change the mode back to recurring measurement mode */ + mode_ctl = ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT; + rc = qpnp_adc_tm_mode_select(chip, mode_ctl); + if (rc < 0) { + pr_err("adc-tm mode change to recurring failed\n"); + return rc; + } + + /* Disable the peripheral */ + rc = qpnp_adc_tm_disable(chip); + if (rc < 0) { + pr_err("adc-tm peripheral disable failed\n"); + return rc; + } + + return rc; +} + +static int32_t qpnp_adc_tm_get_btm_idx(struct qpnp_adc_tm_chip *chip, + uint32_t btm_chan, uint32_t *btm_chan_idx) +{ + int rc = 0, i; + bool chan_found = false; + + for (i = 0; i < QPNP_ADC_TM_CHAN_NONE; i++) { + if (adc_tm_data[i].btm_amux_chan == btm_chan) { + *btm_chan_idx = i; + chan_found = true; + } + } + if (!chan_found) + return -EINVAL; + return rc; +} + +static int32_t qpnp_adc_tm_check_revision(struct qpnp_adc_tm_chip *chip, + uint32_t btm_chan_num) +{ + u8 rev, perph_subtype; + int rc = 0; + + rc = qpnp_adc_tm_read_reg(chip, QPNP_REVISION3, &rev, 1); + if (rc) { + pr_err("adc-tm revision read failed\n"); + return rc; + } + + rc = qpnp_adc_tm_read_reg(chip, QPNP_PERPH_SUBTYPE, &perph_subtype, 1); + if (rc) { + pr_err("adc-tm perph_subtype read failed\n"); + return rc; + } + + if (perph_subtype == QPNP_PERPH_TYPE2) { + if ((rev < QPNP_REVISION_EIGHT_CHANNEL_SUPPORT) && + (btm_chan_num > QPNP_ADC_TM_M4_ADC_CH_SEL_CTL)) { + pr_debug("Version does not support more than 5 channels\n"); + return -EINVAL; + } + } + + if (perph_subtype == QPNP_PERPH_SUBTYPE_TWO_CHANNEL_SUPPORT) { + if (btm_chan_num > QPNP_ADC_TM_M1_ADC_CH_SEL_CTL) { + pr_debug("Version does not support more than 2 channels\n"); + return -EINVAL; + } + } + + return rc; +} + +static int32_t qpnp_adc_tm_timer_interval_select( + struct qpnp_adc_tm_chip *chip, uint32_t btm_chan, + struct qpnp_vadc_chan_properties *chan_prop) +{ + int rc, chan_idx = 0, i = 0; + bool chan_found = false; + u8 meas_interval_timer2 = 0, timer_interval_store = 0; + uint32_t btm_chan_idx = 0; + + while (i < chip->max_channels_available) { + if (chip->sensor[i].btm_channel_num == btm_chan) { + chan_idx = i; + chan_found = true; + i++; + } else + i++; + } + + if (!chan_found) { + pr_err("Channel not found\n"); + return -EINVAL; + } + + switch (chip->sensor[chan_idx].timer_select) { + case ADC_MEAS_TIMER_SELECT1: + rc = qpnp_adc_tm_write_reg(chip, + QPNP_ADC_TM_MEAS_INTERVAL_CTL, + chip->sensor[chan_idx].meas_interval, 1); + if (rc < 0) { + pr_err("timer1 configure failed\n"); + return rc; + } + break; + case ADC_MEAS_TIMER_SELECT2: + /* Thermal channels uses timer2, default to 1 second */ + rc = qpnp_adc_tm_read_reg(chip, + QPNP_ADC_TM_MEAS_INTERVAL_CTL2, + &meas_interval_timer2, 1); + if (rc < 0) { + pr_err("timer2 configure read failed\n"); + return rc; + } + timer_interval_store = chip->sensor[chan_idx].meas_interval; + timer_interval_store <<= QPNP_ADC_TM_MEAS_INTERVAL_CTL2_SHIFT; + timer_interval_store &= QPNP_ADC_TM_MEAS_INTERVAL_CTL2_MASK; + meas_interval_timer2 |= timer_interval_store; + rc = qpnp_adc_tm_write_reg(chip, + QPNP_ADC_TM_MEAS_INTERVAL_CTL2, + meas_interval_timer2, 1); + if (rc < 0) { + pr_err("timer2 configure failed\n"); + return rc; + } + break; + case ADC_MEAS_TIMER_SELECT3: + rc = qpnp_adc_tm_read_reg(chip, + QPNP_ADC_TM_MEAS_INTERVAL_CTL2, + &meas_interval_timer2, 1); + if (rc < 0) { + pr_err("timer3 read failed\n"); + return rc; + } + timer_interval_store = chip->sensor[chan_idx].meas_interval; + timer_interval_store &= QPNP_ADC_TM_MEAS_INTERVAL_CTL3_MASK; + meas_interval_timer2 |= timer_interval_store; + rc = qpnp_adc_tm_write_reg(chip, + QPNP_ADC_TM_MEAS_INTERVAL_CTL2, + meas_interval_timer2, 1); + if (rc < 0) { + pr_err("timer3 configure failed\n"); + return rc; + } + break; + default: + pr_err("Invalid timer selection\n"); + return -EINVAL; + } + + /* Select the timer to use for the corresponding channel */ + rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx); + if (rc < 0) { + pr_err("Invalid btm channel idx\n"); + return rc; + } + rc = qpnp_adc_tm_write_reg(chip, + adc_tm_data[btm_chan_idx].meas_interval_ctl, + chip->sensor[chan_idx].timer_select, 1); + if (rc < 0) { + pr_err("TM channel timer configure failed\n"); + return rc; + } + + pr_debug("timer select:%d, timer_value_within_select:%d, channel:%x\n", + chip->sensor[chan_idx].timer_select, + chip->sensor[chan_idx].meas_interval, + btm_chan); + + return rc; +} + +static int32_t qpnp_adc_tm_add_to_list(struct qpnp_adc_tm_chip *chip, + uint32_t dt_index, + struct qpnp_adc_tm_btm_param *param, + struct qpnp_vadc_chan_properties *chan_prop) +{ + struct qpnp_adc_thr_client_info *client_info = NULL; + bool client_info_exists = false; + + list_for_each_entry(client_info, + &chip->sensor[dt_index].thr_list, list) { + if (client_info->btm_param == param) { + client_info->low_thr_requested = chan_prop->low_thr; + client_info->high_thr_requested = chan_prop->high_thr; + client_info->state_requested = param->state_request; + client_info->state_req_copy = param->state_request; + client_info->notify_low_thr = false; + client_info->notify_high_thr = false; + client_info_exists = true; + pr_debug("client found\n"); + } + } + + if (!client_info_exists) { + client_info = devm_kzalloc(chip->dev, + sizeof(struct qpnp_adc_thr_client_info), GFP_KERNEL); + if (!client_info) + return -ENOMEM; + + pr_debug("new client\n"); + client_info->btm_param = param; + client_info->low_thr_requested = chan_prop->low_thr; + client_info->high_thr_requested = chan_prop->high_thr; + client_info->state_requested = param->state_request; + client_info->state_req_copy = param->state_request; + + list_add_tail(&client_info->list, + &chip->sensor[dt_index].thr_list); + } + + return 0; +} + +static int32_t qpnp_adc_tm_reg_update(struct qpnp_adc_tm_chip *chip, + uint16_t addr, u8 mask, bool state) +{ + u8 reg_value = 0; + int rc = 0; + + rc = qpnp_adc_tm_read_reg(chip, addr, ®_value, 1); + if (rc < 0) { + pr_err("read failed for addr:0x%x\n", addr); + return rc; + } + + reg_value = reg_value & ~mask; + if (state) + reg_value |= mask; + + pr_debug("state:%d, reg:0x%x with bits:0x%x and mask:0x%x\n", + state, addr, reg_value, ~mask); + rc = qpnp_adc_tm_write_reg(chip, addr, reg_value, 1); + if (rc < 0) { + pr_err("write failed for addr:%x\n", addr); + return rc; + } + + return rc; +} + +static int32_t qpnp_adc_tm_read_thr_value(struct qpnp_adc_tm_chip *chip, + uint32_t btm_chan) +{ + int rc = 0; + u8 data_lsb = 0, data_msb = 0; + uint32_t btm_chan_idx = 0; + int32_t low_thr = 0, high_thr = 0; + + rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx); + if (rc < 0) { + pr_err("Invalid btm channel idx\n"); + return rc; + } + + rc = qpnp_adc_tm_read_reg(chip, + adc_tm_data[btm_chan_idx].low_thr_lsb_addr, + &data_lsb, 1); + if (rc < 0) { + pr_err("low threshold lsb setting failed\n"); + return rc; + } + + rc = qpnp_adc_tm_read_reg(chip, + adc_tm_data[btm_chan_idx].low_thr_msb_addr, + &data_msb, 1); + if (rc < 0) { + pr_err("low threshold msb setting failed\n"); + return rc; + } + + low_thr = (data_msb << 8) | data_lsb; + + rc = qpnp_adc_tm_read_reg(chip, + adc_tm_data[btm_chan_idx].high_thr_lsb_addr, + &data_lsb, 1); + if (rc < 0) { + pr_err("high threshold lsb setting failed\n"); + return rc; + } + + rc = qpnp_adc_tm_read_reg(chip, + adc_tm_data[btm_chan_idx].high_thr_msb_addr, + &data_msb, 1); + if (rc < 0) { + pr_err("high threshold msb setting failed\n"); + return rc; + } + + high_thr = (data_msb << 8) | data_lsb; + + pr_debug("configured thresholds high:0x%x and low:0x%x\n", + high_thr, low_thr); + + return rc; +} + +static int32_t qpnp_adc_tm_thr_update(struct qpnp_adc_tm_chip *chip, + uint32_t btm_chan, int32_t high_thr, int32_t low_thr) +{ + int rc = 0; + uint32_t btm_chan_idx = 0; + + rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx); + if (rc < 0) { + pr_err("Invalid btm channel idx\n"); + return rc; + } + + rc = qpnp_adc_tm_write_reg(chip, + adc_tm_data[btm_chan_idx].low_thr_lsb_addr, + QPNP_ADC_TM_THR_LSB_MASK(low_thr), 1); + if (rc < 0) { + pr_err("low threshold lsb setting failed\n"); + return rc; + } + + rc = qpnp_adc_tm_write_reg(chip, + adc_tm_data[btm_chan_idx].low_thr_msb_addr, + QPNP_ADC_TM_THR_MSB_MASK(low_thr), 1); + if (rc < 0) { + pr_err("low threshold msb setting failed\n"); + return rc; + } + + rc = qpnp_adc_tm_write_reg(chip, + adc_tm_data[btm_chan_idx].high_thr_lsb_addr, + QPNP_ADC_TM_THR_LSB_MASK(high_thr), 1); + if (rc < 0) { + pr_err("high threshold lsb setting failed\n"); + return rc; + } + + rc = qpnp_adc_tm_write_reg(chip, + adc_tm_data[btm_chan_idx].high_thr_msb_addr, + QPNP_ADC_TM_THR_MSB_MASK(high_thr), 1); + if (rc < 0) + pr_err("high threshold msb setting failed\n"); + + pr_debug("client requested high:%d and low:%d\n", + high_thr, low_thr); + + return rc; +} + +static int32_t qpnp_adc_tm_manage_thresholds(struct qpnp_adc_tm_chip *chip, + uint32_t dt_index, uint32_t btm_chan) +{ + struct qpnp_adc_thr_client_info *client_info = NULL; + struct list_head *thr_list; + int high_thr = 0, low_thr = 0, rc = 0; + + + /* + * high_thr/low_thr starting point and reset the high_thr_set and + * low_thr_set back to reset since the thresholds will be + * recomputed. + */ + list_for_each(thr_list, + &chip->sensor[dt_index].thr_list) { + client_info = list_entry(thr_list, + struct qpnp_adc_thr_client_info, list); + high_thr = client_info->high_thr_requested; + low_thr = client_info->low_thr_requested; + client_info->high_thr_set = false; + client_info->low_thr_set = false; + } + + pr_debug("init threshold is high:%d and low:%d\n", high_thr, low_thr); + + /* Find the min of high_thr and max of low_thr */ + list_for_each(thr_list, + &chip->sensor[dt_index].thr_list) { + client_info = list_entry(thr_list, + struct qpnp_adc_thr_client_info, list); + if ((client_info->state_req_copy == ADC_TM_HIGH_THR_ENABLE) || + (client_info->state_req_copy == + ADC_TM_HIGH_LOW_THR_ENABLE)) + if (client_info->high_thr_requested < high_thr) + high_thr = client_info->high_thr_requested; + + if ((client_info->state_req_copy == ADC_TM_LOW_THR_ENABLE) || + (client_info->state_req_copy == + ADC_TM_HIGH_LOW_THR_ENABLE)) + if (client_info->low_thr_requested > low_thr) + low_thr = client_info->low_thr_requested; + + pr_debug("threshold compared is high:%d and low:%d\n", + client_info->high_thr_requested, + client_info->low_thr_requested); + pr_debug("current threshold is high:%d and low:%d\n", + high_thr, low_thr); + } + + /* Check which of the high_thr and low_thr got set */ + list_for_each(thr_list, + &chip->sensor[dt_index].thr_list) { + client_info = list_entry(thr_list, + struct qpnp_adc_thr_client_info, list); + if ((client_info->state_req_copy == ADC_TM_HIGH_THR_ENABLE) || + (client_info->state_req_copy == + ADC_TM_HIGH_LOW_THR_ENABLE)) + if (high_thr == client_info->high_thr_requested) + client_info->high_thr_set = true; + + if ((client_info->state_req_copy == ADC_TM_LOW_THR_ENABLE) || + (client_info->state_req_copy == + ADC_TM_HIGH_LOW_THR_ENABLE)) + if (low_thr == client_info->low_thr_requested) + client_info->low_thr_set = true; + } + + rc = qpnp_adc_tm_thr_update(chip, btm_chan, high_thr, low_thr); + if (rc < 0) + pr_err("setting chan:%d threshold failed\n", btm_chan); + + pr_debug("threshold written is high:%d and low:%d\n", + high_thr, low_thr); + + return 0; +} + +static int32_t qpnp_adc_tm_channel_configure(struct qpnp_adc_tm_chip *chip, + uint32_t btm_chan, + struct qpnp_vadc_chan_properties *chan_prop, + uint32_t amux_channel) +{ + int rc = 0, i = 0, chan_idx = 0; + bool chan_found = false, high_thr_set = false, low_thr_set = false; + u8 sensor_mask = 0; + struct qpnp_adc_thr_client_info *client_info = NULL; + uint32_t btm_chan_idx = 0; + + while (i < chip->max_channels_available) { + if (chip->sensor[i].btm_channel_num == btm_chan) { + chan_idx = i; + chan_found = true; + i++; + } else + i++; + } + + if (!chan_found) { + pr_err("Channel not found\n"); + return -EINVAL; + } + + rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx); + if (rc < 0) { + pr_err("Invalid btm channel idx\n"); + return rc; + } + + sensor_mask = 1 << chan_idx; + if (!chip->sensor[chan_idx].thermal_node) { + /* Update low and high notification thresholds */ + rc = qpnp_adc_tm_manage_thresholds(chip, chan_idx, + btm_chan); + if (rc < 0) { + pr_err("setting chan:%d threshold failed\n", btm_chan); + return rc; + } + + list_for_each_entry(client_info, + &chip->sensor[chan_idx].thr_list, list) { + if (client_info->high_thr_set) + high_thr_set = true; + if (client_info->low_thr_set) + low_thr_set = true; + } + + if (low_thr_set) { + pr_debug("low sensor mask:%x with state:%d\n", + sensor_mask, chan_prop->state_request); + /* Enable low threshold's interrupt */ + rc = qpnp_adc_tm_reg_update(chip, + QPNP_ADC_TM_LOW_THR_INT_EN, + sensor_mask, true); + if (rc < 0) { + pr_err("low thr enable err:%d\n", btm_chan); + return rc; + } + } + + if (high_thr_set) { + /* Enable high threshold's interrupt */ + pr_debug("high sensor mask:%x\n", sensor_mask); + rc = qpnp_adc_tm_reg_update(chip, + QPNP_ADC_TM_HIGH_THR_INT_EN, + sensor_mask, true); + if (rc < 0) { + pr_err("high thr enable err:%d\n", btm_chan); + return rc; + } + } + } + + /* Enable corresponding BTM channel measurement */ + rc = qpnp_adc_tm_reg_update(chip, + QPNP_ADC_TM_MULTI_MEAS_EN, sensor_mask, true); + if (rc < 0) { + pr_err("multi measurement en failed\n"); + return rc; + } + return rc; +} + +static int32_t qpnp_adc_tm_configure(struct qpnp_adc_tm_chip *chip, + struct qpnp_adc_amux_properties *chan_prop) +{ + u8 decimation = 0, op_cntrl = 0, mode_ctl = 0; + int rc = 0; + uint32_t btm_chan = 0; + + /* Set measurement in single measurement mode */ + mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT; + rc = qpnp_adc_tm_mode_select(chip, mode_ctl); + if (rc < 0) { + pr_err("adc-tm single mode select failed\n"); + return rc; + } + + /* Disable bank */ + rc = qpnp_adc_tm_disable(chip); + if (rc) + return rc; + + /* Check if a conversion is in progress */ + rc = qpnp_adc_tm_req_sts_check(chip); + if (rc < 0) { + pr_err("adc-tm req_sts check failed\n"); + return rc; + } + + /* Configure AMUX channel select for the corresponding BTM channel*/ + btm_chan = chan_prop->chan_prop->tm_channel_select; + rc = qpnp_adc_tm_write_reg(chip, btm_chan, chan_prop->amux_channel, 1); + if (rc < 0) { + pr_err("adc-tm channel selection err\n"); + return rc; + } + + /* Digital parameter setup */ + decimation |= chan_prop->decimation << + QPNP_ADC_DIG_DEC_RATIO_SEL_SHIFT; + rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_DIG_PARAM, decimation, 1); + if (rc < 0) { + pr_err("adc-tm digital parameter setup err\n"); + return rc; + } + + /* Hardware setting time */ + rc = qpnp_adc_tm_write_reg(chip, QPNP_HW_SETTLE_DELAY, + chan_prop->hw_settle_time, 1); + if (rc < 0) { + pr_err("adc-tm hw settling time setup err\n"); + return rc; + } + + /* Fast averaging setup/enable */ + rc = qpnp_adc_tm_fast_avg_en(chip, &chan_prop->fast_avg_setup); + if (rc < 0) { + pr_err("adc-tm fast-avg enable err\n"); + return rc; + } + + rc = qpnp_adc_tm_write_reg(chip, QPNP_FAST_AVG_CTL, + chan_prop->fast_avg_setup, 1); + if (rc < 0) { + pr_err("adc-tm fast-avg setup err\n"); + return rc; + } + + /* Measurement interval setup */ + rc = qpnp_adc_tm_timer_interval_select(chip, btm_chan, + chan_prop->chan_prop); + if (rc < 0) { + pr_err("adc-tm timer select failed\n"); + return rc; + } + + /* Channel configuration setup */ + rc = qpnp_adc_tm_channel_configure(chip, btm_chan, + chan_prop->chan_prop, chan_prop->amux_channel); + if (rc < 0) { + pr_err("adc-tm channel configure failed\n"); + return rc; + } + + /* Recurring interval measurement enable */ + rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_MEAS_INTERVAL_OP_CTL, + &op_cntrl, 1); + op_cntrl |= QPNP_ADC_MEAS_INTERVAL_OP; + rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_MEAS_INTERVAL_OP_CTL, + op_cntrl, true); + if (rc < 0) { + pr_err("adc-tm meas interval op configure failed\n"); + return rc; + } + + /* Enable bank */ + rc = qpnp_adc_tm_enable(chip); + if (rc) + return rc; + + /* Request conversion */ + rc = qpnp_adc_tm_write_reg(chip, QPNP_CONV_REQ, QPNP_CONV_REQ_SET, 1); + if (rc < 0) { + pr_err("adc-tm request conversion failed\n"); + return rc; + } + + return 0; +} + +static int qpnp_adc_tm_set_mode(struct qpnp_adc_tm_sensor *adc_tm, + enum thermal_device_mode mode) +{ + struct qpnp_adc_tm_chip *chip = adc_tm->chip; + int rc = 0, channel; + u8 sensor_mask = 0, mode_ctl = 0; + uint32_t btm_chan_idx = 0, btm_chan = 0; + + if (qpnp_adc_tm_is_valid(chip)) { + pr_err("invalid device\n"); + return -ENODEV; + } + + if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num)) + return -EINVAL; + + mutex_lock(&chip->adc->adc_lock); + + btm_chan = adc_tm->btm_channel_num; + rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx); + if (rc < 0) { + pr_err("Invalid btm channel idx\n"); + goto fail; + } + + if (mode == THERMAL_DEVICE_ENABLED) { + chip->adc->amux_prop->amux_channel = + adc_tm->vadc_channel_num; + channel = adc_tm->sensor_num; + chip->adc->amux_prop->decimation = + chip->adc->adc_channels[channel].adc_decimation; + chip->adc->amux_prop->hw_settle_time = + chip->adc->adc_channels[channel].hw_settle_time; + chip->adc->amux_prop->fast_avg_setup = + chip->adc->adc_channels[channel].fast_avg_setup; + chip->adc->amux_prop->mode_sel = + ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT; + chip->adc->amux_prop->chan_prop->low_thr = adc_tm->low_thr; + chip->adc->amux_prop->chan_prop->high_thr = adc_tm->high_thr; + chip->adc->amux_prop->chan_prop->tm_channel_select = + adc_tm->btm_channel_num; + chip->adc->amux_prop->calib_type = + chip->adc->adc_channels[channel].calib_type; + + rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop); + if (rc) { + pr_err("adc-tm configure failed with %d\n", rc); + goto fail; + } + } else if (mode == THERMAL_DEVICE_DISABLED) { + sensor_mask = 1 << adc_tm->sensor_num; + mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT; + rc = qpnp_adc_tm_mode_select(chip, mode_ctl); + if (rc < 0) { + pr_err("adc-tm single mode select failed\n"); + goto fail; + } + + /* Disable bank */ + rc = qpnp_adc_tm_disable(chip); + if (rc < 0) { + pr_err("adc-tm disable failed\n"); + goto fail; + } + + /* Check if a conversion is in progress */ + rc = qpnp_adc_tm_req_sts_check(chip); + if (rc < 0) { + pr_err("adc-tm req_sts check failed\n"); + goto fail; + } + rc = qpnp_adc_tm_reg_update(chip, + QPNP_ADC_TM_MULTI_MEAS_EN, + sensor_mask, false); + if (rc < 0) { + pr_err("multi measurement update failed\n"); + goto fail; + } + + rc = qpnp_adc_tm_enable_if_channel_meas(chip); + if (rc < 0) { + pr_err("re-enabling measurement failed\n"); + goto fail; + } + } + + adc_tm->mode = mode; + +fail: + mutex_unlock(&chip->adc->adc_lock); + + return 0; +} + +static int qpnp_adc_tm_activate_trip_type(struct qpnp_adc_tm_sensor *adc_tm, + int trip, enum thermal_trip_activation_mode mode) +{ + struct qpnp_adc_tm_chip *chip = adc_tm->chip; + int rc = 0, sensor_mask = 0; + u8 thr_int_en = 0; + bool state = false; + uint32_t btm_chan_idx = 0, btm_chan = 0; + + if (qpnp_adc_tm_is_valid(chip)) + return -ENODEV; + + if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num)) + return -EINVAL; + + if (mode == THERMAL_TRIP_ACTIVATION_ENABLED) + state = true; + + sensor_mask = 1 << adc_tm->sensor_num; + + pr_debug("Sensor number:%x with state:%d\n", + adc_tm->sensor_num, state); + + btm_chan = adc_tm->btm_channel_num; + rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx); + if (rc < 0) { + pr_err("Invalid btm channel idx\n"); + return rc; + } + + switch (trip) { + case ADC_TM_TRIP_HIGH_WARM: + /* low_thr (lower voltage) for higher temp */ + thr_int_en = adc_tm_data[btm_chan_idx].low_thr_int_chan_en; + rc = qpnp_adc_tm_reg_update(chip, + QPNP_ADC_TM_LOW_THR_INT_EN, + sensor_mask, state); + if (rc) + pr_err("channel:%x failed\n", btm_chan); + break; + case ADC_TM_TRIP_LOW_COOL: + /* high_thr (higher voltage) for cooler temp */ + thr_int_en = adc_tm_data[btm_chan_idx].high_thr_int_chan_en; + rc = qpnp_adc_tm_reg_update(chip, + QPNP_ADC_TM_HIGH_THR_INT_EN, + sensor_mask, state); + if (rc) + pr_err("channel:%x failed\n", btm_chan); + break; + default: + return -EINVAL; + } + + return rc; +} + +static int qpnp_adc_tm_set_trip_temp(void *data, int low_temp, int high_temp) +{ + struct qpnp_adc_tm_sensor *adc_tm = data; + struct qpnp_adc_tm_chip *chip = adc_tm->chip; + struct qpnp_adc_tm_config tm_config; + u8 trip_cool_thr0, trip_cool_thr1, trip_warm_thr0, trip_warm_thr1; + uint16_t reg_low_thr_lsb, reg_low_thr_msb; + uint16_t reg_high_thr_lsb, reg_high_thr_msb; + int rc = 0; + uint32_t btm_chan = 0, btm_chan_idx = 0; + + if (qpnp_adc_tm_is_valid(chip)) + return -ENODEV; + + if (qpnp_adc_tm_check_revision(chip, adc_tm->btm_channel_num)) + return -EINVAL; + + tm_config.channel = adc_tm->vadc_channel_num; + tm_config.high_thr_temp = tm_config.low_thr_temp = 0; + if (high_temp != INT_MAX) + tm_config.high_thr_temp = high_temp; + if (low_temp != INT_MIN) + tm_config.low_thr_temp = low_temp; + + if ((high_temp == INT_MAX) && (low_temp == INT_MIN)) { + pr_err("No trips to set\n"); + return -EINVAL; + } + + pr_debug("requested a high - %d and low - %d\n", + tm_config.high_thr_temp, tm_config.low_thr_temp); + rc = qpnp_adc_tm_scale_therm_voltage_pu2(chip->adc, + chip->adc->adc_prop, &tm_config); + if (rc < 0) { + pr_err("Failed to lookup the adc-tm thresholds\n"); + return rc; + } + + trip_warm_thr0 = ((tm_config.low_thr_voltage << 24) >> 24); + trip_warm_thr1 = ((tm_config.low_thr_voltage << 16) >> 24); + trip_cool_thr0 = ((tm_config.high_thr_voltage << 24) >> 24); + trip_cool_thr1 = ((tm_config.high_thr_voltage << 16) >> 24); + + pr_debug("low_thr:0x%llx, high_thr:0x%llx\n", tm_config.low_thr_voltage, + tm_config.high_thr_voltage); + + btm_chan = adc_tm->btm_channel_num; + rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan, &btm_chan_idx); + if (rc < 0) { + pr_err("Invalid btm channel idx\n"); + return rc; + } + + reg_low_thr_lsb = adc_tm_data[btm_chan_idx].low_thr_lsb_addr; + reg_low_thr_msb = adc_tm_data[btm_chan_idx].low_thr_msb_addr; + reg_high_thr_lsb = adc_tm_data[btm_chan_idx].high_thr_lsb_addr; + reg_high_thr_msb = adc_tm_data[btm_chan_idx].high_thr_msb_addr; + + if (high_temp != INT_MAX) { + rc = qpnp_adc_tm_write_reg(chip, reg_low_thr_lsb, + trip_cool_thr0, 1); + if (rc) { + pr_err("adc-tm_tm read threshold err\n"); + return rc; + } + + rc = qpnp_adc_tm_write_reg(chip, reg_low_thr_msb, + trip_cool_thr1, 1); + if (rc) { + pr_err("adc-tm_tm read threshold err\n"); + return rc; + } + adc_tm->low_thr = tm_config.high_thr_voltage; + + rc = qpnp_adc_tm_activate_trip_type(adc_tm, + ADC_TM_TRIP_HIGH_WARM, + THERMAL_TRIP_ACTIVATION_ENABLED); + if (rc) { + pr_err("adc-tm warm activation failed\n"); + return rc; + } + } else { + rc = qpnp_adc_tm_activate_trip_type(adc_tm, + ADC_TM_TRIP_HIGH_WARM, + THERMAL_TRIP_ACTIVATION_DISABLED); + if (rc) { + pr_err("adc-tm warm deactivation failed\n"); + return rc; + } + } + + if (low_temp != INT_MIN) { + rc = qpnp_adc_tm_write_reg(chip, reg_high_thr_lsb, + trip_warm_thr0, 1); + if (rc) { + pr_err("adc-tm_tm read threshold err\n"); + return rc; + } + + rc = qpnp_adc_tm_write_reg(chip, reg_high_thr_msb, + trip_warm_thr1, 1); + if (rc) { + pr_err("adc-tm_tm read threshold err\n"); + return rc; + } + adc_tm->high_thr = tm_config.low_thr_voltage; + + rc = qpnp_adc_tm_activate_trip_type(adc_tm, + ADC_TM_TRIP_LOW_COOL, + THERMAL_TRIP_ACTIVATION_ENABLED); + if (rc) { + pr_err("adc-tm cool activation failed\n"); + return rc; + } + } else { + rc = qpnp_adc_tm_activate_trip_type(adc_tm, + ADC_TM_TRIP_LOW_COOL, + THERMAL_TRIP_ACTIVATION_DISABLED); + if (rc) { + pr_err("adc-tm cool deactivation failed\n"); + return rc; + } + } + + if ((high_temp != INT_MAX) || (low_temp != INT_MIN)) { + rc = qpnp_adc_tm_set_mode(adc_tm, THERMAL_DEVICE_ENABLED); + if (rc) { + pr_err("sensor enabled failed\n"); + return rc; + } + } else { + rc = qpnp_adc_tm_set_mode(adc_tm, THERMAL_DEVICE_DISABLED); + if (rc) { + pr_err("sensor disable failed\n"); + return rc; + } + } + + return 0; +} + +static void notify_battery_therm(struct qpnp_adc_tm_sensor *adc_tm) +{ + struct qpnp_adc_thr_client_info *client_info = NULL; + + list_for_each_entry(client_info, + &adc_tm->thr_list, list) { + /* Batt therm's warm temperature translates to low voltage */ + if (client_info->notify_low_thr) { + /* HIGH_STATE = WARM_TEMP for battery client */ + client_info->btm_param->threshold_notification( + ADC_TM_WARM_STATE, client_info->btm_param->btm_ctx); + client_info->notify_low_thr = false; + } + + /* Batt therm's cool temperature translates to high voltage */ + if (client_info->notify_high_thr) { + /* LOW_STATE = COOL_TEMP for battery client */ + client_info->btm_param->threshold_notification( + ADC_TM_COOL_STATE, client_info->btm_param->btm_ctx); + client_info->notify_high_thr = false; + } + } +} + +static void notify_clients(struct qpnp_adc_tm_sensor *adc_tm) +{ + struct qpnp_adc_thr_client_info *client_info = NULL; + + list_for_each_entry(client_info, + &adc_tm->thr_list, list) { + /* For non batt therm clients */ + if (client_info->notify_low_thr) { + if (client_info->btm_param->threshold_notification + != NULL) { + pr_debug("notify kernel with low state\n"); + client_info->btm_param->threshold_notification( + ADC_TM_LOW_STATE, + client_info->btm_param->btm_ctx); + client_info->notify_low_thr = false; + } + } + + if (client_info->notify_high_thr) { + if (client_info->btm_param->threshold_notification + != NULL) { + pr_debug("notify kernel with high state\n"); + client_info->btm_param->threshold_notification( + ADC_TM_HIGH_STATE, + client_info->btm_param->btm_ctx); + client_info->notify_high_thr = false; + } + } + } +} + +static void notify_adc_tm_fn(struct work_struct *work) +{ + struct qpnp_adc_tm_sensor *adc_tm = container_of(work, + struct qpnp_adc_tm_sensor, work); + + if (adc_tm->thermal_node) { + pr_debug("notifying uspace client\n"); + of_thermal_handle_trip(adc_tm->tz_dev); + } else { + if (adc_tm->scale_type == SCALE_RBATT_THERM) + notify_battery_therm(adc_tm); + else + notify_clients(adc_tm); + } +} + +static int qpnp_adc_tm_recalib_request_check(struct qpnp_adc_tm_chip *chip, + int sensor_num, u8 status_high, u8 *notify_check) +{ + int rc = 0; + u8 sensor_mask = 0, mode_ctl = 0; + int32_t old_thr = 0, new_thr = 0; + uint32_t channel, btm_chan_num, scale_type; + struct qpnp_adc_thr_client_info *client_info = NULL; + struct list_head *thr_list; + struct iio_channel *chan_adc; + bool status = false; + + if (!chip->adc_tm_recalib_check) { + *notify_check = 1; + return rc; + } + + list_for_each(thr_list, &chip->sensor[sensor_num].thr_list) { + client_info = list_entry(thr_list, + struct qpnp_adc_thr_client_info, list); + channel = client_info->btm_param->channel; + btm_chan_num = chip->sensor[sensor_num].btm_channel_num; + sensor_mask = 1 << sensor_num; + chan_adc = chip->sensor[sensor_num].sen_adc; + + rc = iio_read_channel_processed(chan_adc, &new_thr); + if (rc < 0) { + pr_err("failure to read IIO channel=%d\n", + client_info->btm_param->channel); + goto fail; + } + + if (status_high) + old_thr = client_info->btm_param->high_thr; + else + old_thr = client_info->btm_param->low_thr; + + if (new_thr > old_thr) + status = (status_high) ? true : false; + else + status = (status_high) ? false : true; + + pr_debug( + "recalib:sen=%d, new_thr=%d, old_thr=%d status=%d valid_status=%d\n", + sensor_num, new_thr, + old_thr, status_high, status); + + rc = qpnp_adc_tm_read_thr_value(chip, btm_chan_num); + if (rc < 0) { + pr_err("adc-tm thresholds read failed\n"); + goto fail; + } + + if (status) { + *notify_check = 1; + pr_debug("Client can be notify\n"); + return rc; + } + + pr_debug("Client can not be notify, restart measurement\n"); + /* Set measurement in single measurement mode */ + mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT; + rc = qpnp_adc_tm_mode_select(chip, mode_ctl); + if (rc < 0) { + pr_err("adc-tm single mode select failed\n"); + goto fail; + } + + /* Disable bank */ + rc = qpnp_adc_tm_disable(chip); + if (rc < 0) { + pr_err("adc-tm disable failed\n"); + goto fail; + } + + /* Check if a conversion is in progress */ + rc = qpnp_adc_tm_req_sts_check(chip); + if (rc < 0) { + pr_err("adc-tm req_sts check failed\n"); + goto fail; + } + + rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_LOW_THR_INT_EN, + sensor_mask, false); + if (rc < 0) { + pr_err("low threshold int write failed\n"); + goto fail; + } + + rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_HIGH_THR_INT_EN, + sensor_mask, false); + if (rc < 0) { + pr_err("high threshold int enable failed\n"); + goto fail; + } + + rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_MULTI_MEAS_EN, + sensor_mask, false); + if (rc < 0) { + pr_err("multi measurement en failed\n"); + goto fail; + } + + /* restart measurement */ + scale_type = chip->sensor[sensor_num].scale_type; + chip->adc->amux_prop->amux_channel = channel; + chip->adc->amux_prop->decimation = + chip->adc->adc_channels[sensor_num].adc_decimation; + chip->adc->amux_prop->hw_settle_time = + chip->adc->adc_channels[sensor_num].hw_settle_time; + chip->adc->amux_prop->fast_avg_setup = + chip->adc->adc_channels[sensor_num].fast_avg_setup; + chip->adc->amux_prop->mode_sel = + ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT; + adc_tm_rscale_fn[scale_type].chan(chip->adc, + client_info->btm_param, + &chip->adc->amux_prop->chan_prop->low_thr, + &chip->adc->amux_prop->chan_prop->high_thr); + qpnp_adc_tm_add_to_list(chip, sensor_num, + client_info->btm_param, + chip->adc->amux_prop->chan_prop); + chip->adc->amux_prop->chan_prop->tm_channel_select = + chip->sensor[sensor_num].btm_channel_num; + chip->adc->amux_prop->chan_prop->state_request = + client_info->btm_param->state_request; + + rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop); + if (rc) { + pr_err("adc-tm configure failed with %d\n", rc); + goto fail; + } + *notify_check = 0; + pr_debug("BTM channel reconfigured for measuremnt\n"); + } +fail: + return rc; +} + +static int qpnp_adc_tm_disable_rearm_high_thresholds( + struct qpnp_adc_tm_chip *chip, int sensor_num) +{ + + struct qpnp_adc_thr_client_info *client_info = NULL; + struct list_head *thr_list; + uint32_t btm_chan_num = 0, btm_chan_idx = 0; + u8 sensor_mask = 0, notify_check = 0; + int rc = 0; + + btm_chan_num = chip->sensor[sensor_num].btm_channel_num; + rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan_num, &btm_chan_idx); + if (rc < 0) { + pr_err("Invalid btm channel idx\n"); + return rc; + } + + pr_debug("high:sen:%d, hs:0x%x, ls:0x%x, meas_en:0x%x\n", + sensor_num, chip->th_info.adc_tm_high_enable, + chip->th_info.adc_tm_low_enable, + chip->th_info.qpnp_adc_tm_meas_en); + if (!chip->sensor[sensor_num].thermal_node) { + /* + * For non thermal registered clients such as usb_id, + * vbatt, pmic_therm + */ + sensor_mask = 1 << sensor_num; + pr_debug("non thermal node - mask:%x\n", sensor_mask); + rc = qpnp_adc_tm_recalib_request_check(chip, + sensor_num, true, ¬ify_check); + if (rc < 0 || !notify_check) { + pr_debug("Calib recheck re-armed rc=%d\n", rc); + chip->th_info.adc_tm_high_enable = 0; + return rc; + } + } else { + /* + * Uses the thermal sysfs registered device to + * disable the corresponding high voltage threshold + * which is triggered by low temp + */ + sensor_mask = 1 << sensor_num; + pr_debug("thermal node with mask:%x\n", sensor_mask); + rc = qpnp_adc_tm_activate_trip_type( + &chip->sensor[sensor_num], + ADC_TM_TRIP_LOW_COOL, + THERMAL_TRIP_ACTIVATION_DISABLED); + if (rc < 0) { + pr_err("notify error:%d\n", sensor_num); + return rc; + } + } + list_for_each(thr_list, &chip->sensor[sensor_num].thr_list) { + client_info = list_entry(thr_list, + struct qpnp_adc_thr_client_info, list); + if (client_info->high_thr_set) { + client_info->high_thr_set = false; + client_info->notify_high_thr = true; + if (client_info->state_req_copy == + ADC_TM_HIGH_LOW_THR_ENABLE) + client_info->state_req_copy = + ADC_TM_LOW_THR_ENABLE; + else + client_info->state_req_copy = + ADC_TM_HIGH_THR_DISABLE; + } + } + qpnp_adc_tm_manage_thresholds(chip, sensor_num, btm_chan_num); + + rc = qpnp_adc_tm_reg_update(chip, + QPNP_ADC_TM_MULTI_MEAS_EN, + sensor_mask, false); + if (rc < 0) { + pr_err("multi meas disable failed\n"); + return rc; + } + + rc = qpnp_adc_tm_enable_if_channel_meas(chip); + if (rc < 0) { + pr_err("re-enabling measurement failed\n"); + return rc; + } + + queue_work(chip->sensor[sensor_num].req_wq, + &chip->sensor[sensor_num].work); + + return rc; +} + +static int qpnp_adc_tm_disable_rearm_low_thresholds( + struct qpnp_adc_tm_chip *chip, int sensor_num) +{ + struct qpnp_adc_thr_client_info *client_info = NULL; + struct list_head *thr_list; + uint32_t btm_chan_num = 0, btm_chan_idx = 0; + u8 sensor_mask = 0, notify_check = 0; + int rc = 0; + + btm_chan_num = chip->sensor[sensor_num].btm_channel_num; + rc = qpnp_adc_tm_get_btm_idx(chip, btm_chan_num, &btm_chan_idx); + if (rc < 0) { + pr_err("Invalid btm channel idx\n"); + return rc; + } + + pr_debug("low:sen:%d, hs:0x%x, ls:0x%x, meas_en:0x%x\n", + sensor_num, chip->th_info.adc_tm_high_enable, + chip->th_info.adc_tm_low_enable, + chip->th_info.qpnp_adc_tm_meas_en); + if (!chip->sensor[sensor_num].thermal_node) { + /* + * For non thermal registered clients such as usb_id, + * vbatt, pmic_therm + */ + sensor_mask = 1 << sensor_num; + pr_debug("non thermal node - mask:%x\n", sensor_mask); + rc = qpnp_adc_tm_recalib_request_check(chip, + sensor_num, false, ¬ify_check); + if (rc < 0 || !notify_check) { + pr_debug("Calib recheck re-armed rc=%d\n", rc); + chip->th_info.adc_tm_low_enable = 0; + return rc; + } + } else { + /* + * Uses the thermal sysfs registered device to disable + * the corresponding high voltage threshold which + * is triggered by low temp + */ + sensor_mask = 1 << sensor_num; + pr_debug("thermal node with mask:%x\n", sensor_mask); + rc = qpnp_adc_tm_activate_trip_type( + &chip->sensor[sensor_num], + ADC_TM_TRIP_HIGH_WARM, + THERMAL_TRIP_ACTIVATION_DISABLED); + if (rc < 0) { + pr_err("notify error:%d\n", sensor_num); + return rc; + } + } + list_for_each(thr_list, &chip->sensor[sensor_num].thr_list) { + client_info = list_entry(thr_list, + struct qpnp_adc_thr_client_info, list); + if (client_info->low_thr_set) { + client_info->low_thr_set = false; + client_info->notify_low_thr = true; + if (client_info->state_req_copy == + ADC_TM_HIGH_LOW_THR_ENABLE) + client_info->state_req_copy = + ADC_TM_HIGH_THR_ENABLE; + else + client_info->state_req_copy = + ADC_TM_LOW_THR_DISABLE; + } + } + qpnp_adc_tm_manage_thresholds(chip, sensor_num, btm_chan_num); + + rc = qpnp_adc_tm_reg_update(chip, + QPNP_ADC_TM_MULTI_MEAS_EN, + sensor_mask, false); + if (rc < 0) { + pr_err("multi meas disable failed\n"); + return rc; + } + + rc = qpnp_adc_tm_enable_if_channel_meas(chip); + if (rc < 0) { + pr_err("re-enabling measurement failed\n"); + return rc; + } + + queue_work(chip->sensor[sensor_num].req_wq, + &chip->sensor[sensor_num].work); + + return rc; +} + +static int qpnp_adc_tm_read_status(struct qpnp_adc_tm_chip *chip) +{ + int rc = 0, sensor_notify_num = 0, i = 0, sensor_num = 0; + unsigned long flags; + + if (qpnp_adc_tm_is_valid(chip)) + return -ENODEV; + + mutex_lock(&chip->adc->adc_lock); + + rc = qpnp_adc_tm_req_sts_check(chip); + if (rc) { + pr_err("adc-tm-tm req sts check failed with %d\n", rc); + goto fail; + } + + if (chip->th_info.adc_tm_high_enable) { + spin_lock_irqsave(&chip->th_info.adc_tm_high_lock, flags); + sensor_notify_num = chip->th_info.adc_tm_high_enable; + chip->th_info.adc_tm_high_enable = 0; + spin_unlock_irqrestore(&chip->th_info.adc_tm_high_lock, flags); + while (i < chip->max_channels_available) { + if ((sensor_notify_num & 0x1) == 1) { + sensor_num = i; + rc = qpnp_adc_tm_disable_rearm_high_thresholds( + chip, sensor_num); + if (rc < 0) { + pr_err("rearm threshold failed\n"); + goto fail; + } + } + sensor_notify_num >>= 1; + i++; + } + } + + if (chip->th_info.adc_tm_low_enable) { + spin_lock_irqsave(&chip->th_info.adc_tm_low_lock, flags); + sensor_notify_num = chip->th_info.adc_tm_low_enable; + chip->th_info.adc_tm_low_enable = 0; + spin_unlock_irqrestore(&chip->th_info.adc_tm_low_lock, flags); + i = 0; + while (i < chip->max_channels_available) { + if ((sensor_notify_num & 0x1) == 1) { + sensor_num = i; + rc = qpnp_adc_tm_disable_rearm_low_thresholds( + chip, sensor_num); + if (rc < 0) { + pr_err("rearm threshold failed\n"); + goto fail; + } + } + sensor_notify_num >>= 1; + i++; + } + } + +fail: + mutex_unlock(&chip->adc->adc_lock); + + return rc; +} + +static void qpnp_adc_tm_high_thr_work(struct work_struct *work) +{ + struct qpnp_adc_tm_chip *chip = container_of(work, + struct qpnp_adc_tm_chip, trigger_high_thr_work); + int rc; + + pr_debug("thr:0x%x\n", chip->th_info.adc_tm_high_enable); + + rc = qpnp_adc_tm_read_status(chip); + if (rc < 0) + pr_err("adc-tm high thr work failed\n"); +} + +static irqreturn_t qpnp_adc_tm_high_thr_isr(int irq, void *data) +{ + struct qpnp_adc_tm_chip *chip = data; + u8 mode_ctl = 0, status1 = 0, sensor_mask = 0; + int rc = 0, sensor_notify_num = 0, i = 0, sensor_num = 0; + + mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT; + /* Set measurement in single measurement mode */ + qpnp_adc_tm_mode_select(chip, mode_ctl); + + qpnp_adc_tm_disable(chip); + + rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1); + if (rc) { + pr_err("adc-tm read status1 failed\n"); + return IRQ_HANDLED; + } + + rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_HIGH, + &chip->th_info.status_high, 1); + if (rc) { + pr_err("adc-tm-tm read status high failed with %d\n", rc); + return IRQ_HANDLED; + } + + rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_HIGH_THR_INT_EN, + &chip->th_info.adc_tm_high_thr_set, 1); + if (rc) { + pr_err("adc-tm-tm read high thr failed with %d\n", rc); + return IRQ_HANDLED; + } + + /* Check which interrupt threshold is lower and measure against the + * enabled channel + */ + rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN, + &chip->th_info.qpnp_adc_tm_meas_en, 1); + if (rc) { + pr_err("adc-tm-tm read status high failed with %d\n", rc); + return IRQ_HANDLED; + } + + chip->th_info.adc_tm_high_enable = chip->th_info.qpnp_adc_tm_meas_en & + chip->th_info.status_high; + chip->th_info.adc_tm_high_enable &= chip->th_info.adc_tm_high_thr_set; + + sensor_notify_num = chip->th_info.adc_tm_high_enable; + while (i < chip->max_channels_available) { + if ((sensor_notify_num & 0x1) == 1) + sensor_num = i; + sensor_notify_num >>= 1; + i++; + } + + if (!chip->sensor[sensor_num].thermal_node) { + sensor_mask = 1 << sensor_num; + rc = qpnp_adc_tm_reg_update(chip, + QPNP_ADC_TM_HIGH_THR_INT_EN, + sensor_mask, false); + if (rc < 0) { + pr_err("high threshold int read failed\n"); + return IRQ_HANDLED; + } + } else { + /* + * Uses the thermal sysfs registered device to disable + * the corresponding high voltage threshold which + * is triggered by low temp + */ + pr_debug("thermal node with mask:%x\n", sensor_mask); + rc = qpnp_adc_tm_activate_trip_type( + &chip->sensor[sensor_num], + ADC_TM_TRIP_LOW_COOL, + THERMAL_TRIP_ACTIVATION_DISABLED); + if (rc < 0) { + pr_err("notify error:%d\n", sensor_num); + return IRQ_HANDLED; + } + } + + queue_work(chip->high_thr_wq, &chip->trigger_high_thr_work); + + return IRQ_HANDLED; +} + +static void qpnp_adc_tm_low_thr_work(struct work_struct *work) +{ + struct qpnp_adc_tm_chip *chip = container_of(work, + struct qpnp_adc_tm_chip, trigger_low_thr_work); + int rc; + + pr_debug("thr:0x%x\n", chip->th_info.adc_tm_low_enable); + + rc = qpnp_adc_tm_read_status(chip); + if (rc < 0) + pr_err("adc-tm low thr work failed\n"); +} + +static irqreturn_t qpnp_adc_tm_low_thr_isr(int irq, void *data) +{ + struct qpnp_adc_tm_chip *chip = data; + u8 mode_ctl = 0, status1 = 0, sensor_mask = 0; + int rc = 0, sensor_notify_num = 0, i = 0, sensor_num = 0; + + mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT; + /* Set measurement in single measurement mode */ + qpnp_adc_tm_mode_select(chip, mode_ctl); + + qpnp_adc_tm_disable(chip); + + rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS1, &status1, 1); + if (rc) { + pr_err("adc-tm read status1 failed\n"); + return IRQ_HANDLED; + } + + rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_STATUS_LOW, + &chip->th_info.status_low, 1); + if (rc) { + pr_err("adc-tm-tm read status low failed with %d\n", rc); + return IRQ_HANDLED; + } + + rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_LOW_THR_INT_EN, + &chip->th_info.adc_tm_low_thr_set, 1); + if (rc) { + pr_err("adc-tm-tm read low thr failed with %d\n", rc); + return IRQ_HANDLED; + } + + rc = qpnp_adc_tm_read_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN, + &chip->th_info.qpnp_adc_tm_meas_en, 1); + if (rc) { + pr_err("adc-tm-tm read status high failed with %d\n", rc); + return IRQ_HANDLED; + } + + chip->th_info.adc_tm_low_enable = chip->th_info.qpnp_adc_tm_meas_en & + chip->th_info.status_low; + chip->th_info.adc_tm_low_enable &= chip->th_info.adc_tm_low_thr_set; + + sensor_notify_num = chip->th_info.adc_tm_low_enable; + while (i < chip->max_channels_available) { + if ((sensor_notify_num & 0x1) == 1) + sensor_num = i; + sensor_notify_num >>= 1; + i++; + } + + if (!chip->sensor[sensor_num].thermal_node) { + sensor_mask = 1 << sensor_num; + rc = qpnp_adc_tm_reg_update(chip, + QPNP_ADC_TM_LOW_THR_INT_EN, + sensor_mask, false); + if (rc < 0) { + pr_err("low threshold int read failed\n"); + return IRQ_HANDLED; + } + } else { + /* + * Uses the thermal sysfs registered device to disable + * the corresponding low voltage threshold which + * is triggered by high temp + */ + pr_debug("thermal node with mask:%x\n", sensor_mask); + rc = qpnp_adc_tm_activate_trip_type( + &chip->sensor[sensor_num], + ADC_TM_TRIP_HIGH_WARM, + THERMAL_TRIP_ACTIVATION_DISABLED); + if (rc < 0) { + pr_err("notify error:%d\n", sensor_num); + return IRQ_HANDLED; + } + } + + queue_work(chip->low_thr_wq, &chip->trigger_low_thr_work); + + return IRQ_HANDLED; +} + +static int qpnp_adc_read_temp(void *data, int *temp) +{ + struct qpnp_adc_tm_sensor *adc_tm_sensor = data; + int rc = 0, degcel = 0; + + rc = iio_read_channel_processed(adc_tm_sensor->sen_adc, °cel); + if (rc < 0) { + pr_err("IIO channel read failed with %d\n", rc); + return rc; + } + + *temp = degcel; + return 0; +} + +static struct thermal_zone_of_device_ops qpnp_adc_tm_thermal_ops = { + .get_temp = qpnp_adc_read_temp, + .set_trips = qpnp_adc_tm_set_trip_temp, +}; + +int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_chip *chip, + struct qpnp_adc_tm_btm_param *param) +{ + uint32_t channel, amux_prescaling, dt_index = 0, scale_type = 0; + int rc = 0, i = 0, version = 0; + bool chan_found = false; + + if (qpnp_adc_tm_is_valid(chip)) { + pr_err("chip not valid\n"); + return -ENODEV; + } + + if (param->threshold_notification == NULL) { + pr_debug("No notification for high/low temp??\n"); + return -EINVAL; + } + + mutex_lock(&chip->adc->adc_lock); + + channel = param->channel; + + if (channel == VSYS) { + version = qpnp_adc_get_revid_version(chip->dev); + if (version == QPNP_REV_ID_PM8950_1_0) { + pr_debug("Channel not supported\n"); + rc = -EINVAL; + goto fail_unlock; + } + } + + while (i < chip->max_channels_available) { + if (chip->adc->adc_channels[i].channel_num == + channel) { + dt_index = i; + chan_found = true; + i++; + } else + i++; + } + + if (!chan_found) { + pr_err("not a valid ADC_TM channel\n"); + rc = -EINVAL; + goto fail_unlock; + } + + rc = qpnp_adc_tm_check_revision(chip, + chip->sensor[dt_index].btm_channel_num); + if (rc < 0) + goto fail_unlock; + + scale_type = chip->adc->adc_channels[dt_index].adc_scale_fn; + if (scale_type >= SCALE_RSCALE_NONE) { + rc = -EBADF; + goto fail_unlock; + } + + amux_prescaling = + chip->adc->adc_channels[dt_index].chan_path_prescaling; + + if (amux_prescaling >= PATH_SCALING_NONE) { + rc = -EINVAL; + goto fail_unlock; + } + + pr_debug("channel:%d, scale_type:%d, dt_idx:%d\n", + channel, scale_type, dt_index); + param->gain_num = qpnp_vadc_amux_scaling_ratio[amux_prescaling].num; + param->gain_den = qpnp_vadc_amux_scaling_ratio[amux_prescaling].den; + chip->adc->amux_prop->amux_channel = channel; + chip->adc->amux_prop->decimation = + chip->adc->adc_channels[dt_index].adc_decimation; + chip->adc->amux_prop->hw_settle_time = + chip->adc->adc_channels[dt_index].hw_settle_time; + chip->adc->amux_prop->fast_avg_setup = + chip->adc->adc_channels[dt_index].fast_avg_setup; + chip->adc->amux_prop->mode_sel = + ADC_OP_MEASUREMENT_INTERVAL << QPNP_OP_MODE_SHIFT; + adc_tm_rscale_fn[scale_type].chan(chip->adc, param, + &chip->adc->amux_prop->chan_prop->low_thr, + &chip->adc->amux_prop->chan_prop->high_thr); + qpnp_adc_tm_add_to_list(chip, dt_index, param, + chip->adc->amux_prop->chan_prop); + chip->adc->amux_prop->chan_prop->tm_channel_select = + chip->sensor[dt_index].btm_channel_num; + chip->adc->amux_prop->chan_prop->state_request = + param->state_request; + chip->adc->amux_prop->calib_type = + chip->adc->adc_channels[dt_index].calib_type; + rc = qpnp_adc_tm_configure(chip, chip->adc->amux_prop); + if (rc) { + pr_err("adc-tm configure failed with %d\n", rc); + goto fail_unlock; + } + + chip->sensor[dt_index].scale_type = scale_type; + +fail_unlock: + mutex_unlock(&chip->adc->adc_lock); + + return rc; +} +EXPORT_SYMBOL(qpnp_adc_tm_channel_measure); + +int32_t qpnp_adc_tm_disable_chan_meas(struct qpnp_adc_tm_chip *chip, + struct qpnp_adc_tm_btm_param *param) +{ + uint32_t channel, dt_index = 0, btm_chan_num; + u8 sensor_mask = 0, mode_ctl = 0; + int rc = 0; + + if (qpnp_adc_tm_is_valid(chip)) + return -ENODEV; + + mutex_lock(&chip->adc->adc_lock); + + /* Set measurement in single measurement mode */ + mode_ctl = ADC_OP_NORMAL_MODE << QPNP_OP_MODE_SHIFT; + rc = qpnp_adc_tm_mode_select(chip, mode_ctl); + if (rc < 0) { + pr_err("adc-tm single mode select failed\n"); + goto fail; + } + + /* Disable bank */ + rc = qpnp_adc_tm_disable(chip); + if (rc < 0) { + pr_err("adc-tm disable failed\n"); + goto fail; + } + + /* Check if a conversion is in progress */ + rc = qpnp_adc_tm_req_sts_check(chip); + if (rc < 0) { + pr_err("adc-tm req_sts check failed\n"); + goto fail; + } + + channel = param->channel; + while ((chip->adc->adc_channels[dt_index].channel_num + != channel) && (dt_index < chip->max_channels_available)) + dt_index++; + + if (dt_index >= chip->max_channels_available) { + pr_err("not a valid ADC_TMN channel\n"); + rc = -EINVAL; + goto fail; + } + + btm_chan_num = chip->sensor[dt_index].btm_channel_num; + + sensor_mask = 1 << chip->sensor[dt_index].sensor_num; + + rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_LOW_THR_INT_EN, + sensor_mask, false); + if (rc < 0) { + pr_err("high threshold int enable failed\n"); + goto fail; + } + + rc = qpnp_adc_tm_reg_update(chip, QPNP_ADC_TM_MULTI_MEAS_EN, + sensor_mask, false); + if (rc < 0) { + pr_err("multi measurement en failed\n"); + goto fail; + } + + rc = qpnp_adc_tm_enable_if_channel_meas(chip); + if (rc < 0) + pr_err("re-enabling measurement failed\n"); + +fail: + mutex_unlock(&chip->adc->adc_lock); + + return rc; +} +EXPORT_SYMBOL(qpnp_adc_tm_disable_chan_meas); + +struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev, const char *name) +{ + struct qpnp_adc_tm_chip *chip; + struct device_node *node = NULL; + char prop_name[QPNP_MAX_PROP_NAME_LEN]; + + snprintf(prop_name, QPNP_MAX_PROP_NAME_LEN, "qcom,%s-adc_tm", name); + + node = of_parse_phandle(dev->of_node, prop_name, 0); + if (node == NULL) + return ERR_PTR(-ENODEV); + + list_for_each_entry(chip, &qpnp_adc_tm_device_list, list) + if (chip->adc->pdev->dev.of_node == node) + return chip; + + return ERR_PTR(-EPROBE_DEFER); +} +EXPORT_SYMBOL(qpnp_get_adc_tm); + +static int qpnp_adc_tm_initial_setup(struct qpnp_adc_tm_chip *chip) +{ + u8 thr_init = 0; + int rc = 0; + + rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_HIGH_THR_INT_EN, + thr_init, 1); + if (rc < 0) { + pr_err("high thr init failed\n"); + return rc; + } + + rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_LOW_THR_INT_EN, + thr_init, 1); + if (rc < 0) { + pr_err("low thr init failed\n"); + return rc; + } + + rc = qpnp_adc_tm_write_reg(chip, QPNP_ADC_TM_MULTI_MEAS_EN, + thr_init, 1); + if (rc < 0) { + pr_err("multi meas en failed\n"); + return rc; + } + + return rc; +} + +static const struct of_device_id qpnp_adc_tm_match_table[] = { + { .compatible = "qcom,qpnp-adc-tm" }, + {} +}; + + +static int qpnp_adc_tm_measure_ref_points(struct qpnp_adc_tm_chip *chip) +{ + int read_1 = 0, read_2 = 0; + int ret; + struct qpnp_adc_drv *adc = chip->adc; + + ret = iio_read_channel_raw(chip->ref_1250v, &read_1); + if (ret < 0) + goto err; + + ret = iio_read_channel_raw(chip->ref_625mv, &read_2); + if (ret < 0) + goto err; + + if (read_1 == read_2) { + ret = -EINVAL; + goto err; + } + + adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].dy = + read_1 - read_2; + + adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].dx = + QPNP_ADC_625_UV; + + adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].adc_vref = read_1; + adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].adc_gnd = read_2; + + /* For Ratiometric calibration */ + read_1 = 0; + read_2 = 0; + + ret = iio_read_channel_raw(chip->ref_vdd, &read_1); + if (ret < 0) + goto err; + + ret = iio_read_channel_raw(chip->ref_gnd, &read_2); + if (ret < 0) + goto err; + + if (read_1 == read_2) { + ret = -EINVAL; + goto err; + } + + adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dy = + read_1 - read_2; + + adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dx = + adc->adc_prop->adc_vdd_reference; + + adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_vref = + read_1; + + adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_gnd = + read_2; + + pr_debug("Measuring reference points okay\n"); + return 0; + +err: + pr_err("Measuring reference points failed\n"); + return ret; +} + +static int qpnp_adc_tm_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node, *child; + struct device *dev = &pdev->dev; + struct qpnp_adc_tm_chip *chip; + struct qpnp_adc_drv *adc_qpnp; + struct iio_channel *channels; + int32_t count_adc_channel_list = 0, rc, sen_idx = 0, i = 0; + int indio_chan_count = 0; + bool thermal_node = false; + const struct of_device_id *id; + + for_each_child_of_node(node, child) + count_adc_channel_list++; + + if (!count_adc_channel_list) { + pr_err("No channel listing\n"); + return -EINVAL; + } + + channels = iio_channel_get_all(dev); + if (IS_ERR(channels)) + return PTR_ERR(channels); + + while (channels[indio_chan_count].indio_dev) + indio_chan_count++; + + if (indio_chan_count < 4) { + dev_err(dev, "Calibration IIO channels missing in main node\n"); + return -EINVAL; + } + + id = of_match_node(qpnp_adc_tm_match_table, node); + if (id == NULL) { + pr_err("qpnp_adc_tm_match of_node prop not present\n"); + return -ENODEV; + } + + chip = devm_kzalloc(&pdev->dev, sizeof(struct qpnp_adc_tm_chip) + + (count_adc_channel_list * + sizeof(struct qpnp_adc_tm_sensor)), + GFP_KERNEL); + if (!chip) + return -ENOMEM; + + list_add(&chip->list, &qpnp_adc_tm_device_list); + chip->max_channels_available = count_adc_channel_list; + + /* Get all calibration channels */ + chip->ref_625mv = iio_channel_get(&pdev->dev, "ref_625mv"); + if (IS_ERR(chip->ref_625mv)) { + pr_err("Calib channel ref_625mv unavailable %ld\n", + PTR_ERR(chip->ref_625mv)); + return PTR_ERR(chip->ref_625mv); + } + + chip->ref_1250v = iio_channel_get(&pdev->dev, "ref_1250v"); + if (IS_ERR(chip->ref_1250v)) { + pr_err("Calib channel ref_1250v unavailable %ld\n", + PTR_ERR(chip->ref_1250v)); + return PTR_ERR(chip->ref_1250v); + } + + chip->ref_vdd = iio_channel_get(&pdev->dev, "ref_vdd"); + if (IS_ERR(chip->ref_vdd)) { + pr_err("Calib channel ref_vdd unavailable %ld\n", + PTR_ERR(chip->ref_vdd)); + return PTR_ERR(chip->ref_vdd); + } + + chip->ref_gnd = iio_channel_get(&pdev->dev, "ref_gnd"); + if (IS_ERR(chip->ref_gnd)) { + pr_err("Calib channel ref_gnd unavailable %ld\n", + PTR_ERR(chip->ref_gnd)); + return PTR_ERR(chip->ref_gnd); + } + + adc_qpnp = devm_kzalloc(&pdev->dev, sizeof(struct qpnp_adc_drv), + GFP_KERNEL); + if (!adc_qpnp) { + rc = -ENOMEM; + goto fail; + } + + chip->dev = &(pdev->dev); + chip->adc = adc_qpnp; + chip->adc->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!chip->adc->regmap) { + dev_err(&pdev->dev, "Couldn't get parent's regmap\n"); + rc = -EINVAL; + goto fail; + } + + rc = qpnp_adc_get_devicetree_data(pdev, chip->adc); + if (rc) { + dev_err(&pdev->dev, "Failed to read device tree\n"); + goto fail; + } + + mutex_init(&chip->adc->adc_lock); + + /* Register the ADC peripheral interrupt */ + chip->adc->adc_high_thr_irq = platform_get_irq_byname(pdev, + "high-thr-en-set"); + if (chip->adc->adc_high_thr_irq < 0) { + pr_err("Invalid irq\n"); + rc = -ENXIO; + goto fail; + } + + chip->adc->adc_low_thr_irq = platform_get_irq_byname(pdev, + "low-thr-en-set"); + if (chip->adc->adc_low_thr_irq < 0) { + pr_err("Invalid irq\n"); + rc = -ENXIO; + goto fail; + } + + chip->adc_tm_recalib_check = of_property_read_bool(node, + "qcom,adc-tm-recalib-check"); + + for_each_child_of_node(node, child) { + char name[25]; + int btm_channel_num, timer_select = 0; + struct iio_channel *chan_child; + const char *name_channel; + + rc = of_property_read_u32(child, + "qcom,btm-channel-number", &btm_channel_num); + if (rc) { + pr_err("Invalid btm channel number\n"); + goto fail; + } + rc = of_property_read_u32(child, + "qcom,meas-interval-timer-idx", &timer_select); + if (rc) { + pr_debug("Default to timer2 with interval of 1 sec\n"); + chip->sensor[sen_idx].timer_select = + ADC_MEAS_TIMER_SELECT2; + chip->sensor[sen_idx].meas_interval = + ADC_MEAS2_INTERVAL_1S; + } else { + if (timer_select >= ADC_MEAS_TIMER_NUM) { + pr_err("Invalid timer selection number\n"); + goto fail; + } + chip->sensor[sen_idx].timer_select = timer_select; + if (timer_select == ADC_MEAS_TIMER_SELECT1) + chip->sensor[sen_idx].meas_interval = + ADC_MEAS1_INTERVAL_3P9MS; + else if (timer_select == ADC_MEAS_TIMER_SELECT3) + chip->sensor[sen_idx].meas_interval = + ADC_MEAS3_INTERVAL_4S; + else if (timer_select == ADC_MEAS_TIMER_SELECT2) + chip->sensor[sen_idx].meas_interval = + ADC_MEAS2_INTERVAL_1S; + } + + name_channel = of_get_property(child, "label", NULL); + + rc = of_property_match_string(child, "io-channel-names", + name_channel); + if (rc < 0) { + pr_err("IIO channel mismatch with ADC channel name\n"); + goto fail; + } + + /* Get IIO channel for ADC channel node */ + pdev->dev.of_node = child; + chan_child = iio_channel_get(&pdev->dev, name_channel); + pdev->dev.of_node = node; + + if (IS_ERR(chan_child)) { + pr_err("IIO channel for child unavailable %ld\n", + PTR_ERR(chan_child)); + return PTR_ERR(chan_child); + } + + chip->sensor[sen_idx].btm_channel_num = btm_channel_num; + chip->sensor[sen_idx].vadc_channel_num = + chip->adc->adc_channels[sen_idx].channel_num; + chip->sensor[sen_idx].sensor_num = sen_idx; + chip->sensor[sen_idx].chip = chip; + pr_debug("btm_chan:%x, vadc_chan:%x\n", btm_channel_num, + chip->adc->adc_channels[sen_idx].channel_num); + + /* Assign IIO channel to respective sensor */ + if (chan_child->channel->channel == + chip->adc->adc_channels[sen_idx].channel_num) { + chip->sensor[sen_idx].sen_adc = chan_child; + } else { + pr_err("%s:ADC channel number:%x, IIO channel number:%x, IIO channel doesn't match with ADC sensor\n", + __func__, + chip->adc->adc_channels[sen_idx].channel_num, + chan_child->channel->channel); + return -EINVAL; + } + + thermal_node = of_property_read_bool(child, + "qcom,thermal-node"); + if (thermal_node) { + /* Register with the thermal zone */ + pr_debug("thermal node%x\n", btm_channel_num); + chip->sensor[sen_idx].mode = THERMAL_DEVICE_DISABLED; + chip->sensor[sen_idx].thermal_node = true; + snprintf(name, sizeof(name), "%s", + chip->adc->adc_channels[sen_idx].name); + chip->sensor[sen_idx].low_thr = + QPNP_ADC_TM_M0_LOW_THR; + chip->sensor[sen_idx].high_thr = + QPNP_ADC_TM_M0_HIGH_THR; + chip->sensor[sen_idx].tz_dev = + devm_thermal_zone_of_sensor_register( + chip->dev, + chip->sensor[sen_idx].vadc_channel_num, + &chip->sensor[sen_idx], + &qpnp_adc_tm_thermal_ops); + if (IS_ERR(chip->sensor[sen_idx].tz_dev)) + pr_err("thermal device register failed.\n"); + } + chip->sensor[sen_idx].req_wq = alloc_workqueue( + "qpnp_adc_notify_wq", WQ_HIGHPRI, 0); + if (!chip->sensor[sen_idx].req_wq) { + pr_err("Requesting priority wq failed\n"); + goto fail; + } + INIT_WORK(&chip->sensor[sen_idx].work, notify_adc_tm_fn); + INIT_LIST_HEAD(&chip->sensor[sen_idx].thr_list); + sen_idx++; + } + + chip->high_thr_wq = alloc_workqueue("qpnp_adc_tm_high_thr_wq", + WQ_HIGHPRI, 0); + if (!chip->high_thr_wq) { + pr_err("Requesting high thr priority wq failed\n"); + goto fail; + } + + chip->low_thr_wq = alloc_workqueue("qpnp_adc_tm_low_thr_wq", + WQ_HIGHPRI, 0); + if (!chip->low_thr_wq) { + pr_err("Requesting low thr priority wq failed\n"); + goto fail; + } + + chip->thr_wq = alloc_workqueue("qpnp_adc_tm_thr_wq", + WQ_HIGHPRI, 0); + if (!chip->thr_wq) { + pr_err("Requesting thr priority wq failed\n"); + goto fail; + } + + INIT_WORK(&chip->trigger_high_thr_work, qpnp_adc_tm_high_thr_work); + INIT_WORK(&chip->trigger_low_thr_work, qpnp_adc_tm_low_thr_work); + + rc = qpnp_adc_tm_initial_setup(chip); + if (rc) + goto fail; + rc = devm_request_irq(&pdev->dev, chip->adc->adc_high_thr_irq, + qpnp_adc_tm_high_thr_isr, + IRQF_TRIGGER_RISING, "qpnp_adc_tm_high_interrupt", chip); + if (rc) { + dev_err(&pdev->dev, "failed to request adc irq\n"); + goto fail; + } else { + enable_irq_wake(chip->adc->adc_high_thr_irq); + } + + rc = devm_request_irq(&pdev->dev, chip->adc->adc_low_thr_irq, + qpnp_adc_tm_low_thr_isr, + IRQF_TRIGGER_RISING, + "qpnp_adc_tm_low_interrupt", chip); + if (rc) { + dev_err(&pdev->dev, "failed to request adc irq\n"); + goto fail; + } else { + enable_irq_wake(chip->adc->adc_low_thr_irq); + } + + chip->adc_vote_enable = false; + dev_set_drvdata(&pdev->dev, chip); + + /* Read all calibration channels and ref points */ + rc = qpnp_adc_tm_measure_ref_points(chip); + if (rc < 0) { + pr_err("Error measuring ref points\n"); + goto fail; + } + + spin_lock_init(&chip->th_info.adc_tm_low_lock); + spin_lock_init(&chip->th_info.adc_tm_high_lock); + + pr_debug("QPNP ADC TM driver probe OK\n"); + return 0; +fail: + for_each_child_of_node(node, child) { + thermal_node = of_property_read_bool(child, + "qcom,thermal-node"); + if (thermal_node) { + thermal_zone_device_unregister(chip->sensor[i].tz_dev); + if (chip->sensor[i].req_wq) + destroy_workqueue(chip->sensor[sen_idx].req_wq); + } + } + if (chip->high_thr_wq) + destroy_workqueue(chip->high_thr_wq); + if (chip->low_thr_wq) + destroy_workqueue(chip->low_thr_wq); + list_del(&chip->list); + dev_set_drvdata(&pdev->dev, NULL); + return rc; +} + +static int qpnp_adc_tm_remove(struct platform_device *pdev) +{ + struct qpnp_adc_tm_chip *chip = dev_get_drvdata(&pdev->dev); + struct device_node *node = pdev->dev.of_node, *child; + int i = 0; + + for_each_child_of_node(node, child) { + if (chip->sensor[i].req_wq) + destroy_workqueue(chip->sensor[i].req_wq); + i++; + } + + if (chip->high_thr_wq) + destroy_workqueue(chip->high_thr_wq); + if (chip->low_thr_wq) + destroy_workqueue(chip->low_thr_wq); + + dev_set_drvdata(&pdev->dev, NULL); + + return 0; +} + +static void qpnp_adc_tm_shutdown(struct platform_device *pdev) +{ + struct qpnp_adc_tm_chip *chip = dev_get_drvdata(&pdev->dev); + int rc = 0; + + /* Disable bank */ + rc = qpnp_adc_tm_disable(chip); + if (rc < 0) + pr_err("adc-tm disable failed\n"); + +} + +static int qpnp_adc_tm_suspend_noirq(struct device *dev) +{ + struct qpnp_adc_tm_chip *chip = dev_get_drvdata(dev); + struct device_node *node = dev->of_node, *child; + int i = 0; + + flush_workqueue(chip->high_thr_wq); + flush_workqueue(chip->low_thr_wq); + + for_each_child_of_node(node, child) { + if (chip->sensor[i].req_wq) { + pr_debug("flushing queue for sensor %d\n", i); + flush_workqueue(chip->sensor[i].req_wq); + } + i++; + } + return 0; +} + +static const struct dev_pm_ops qpnp_adc_tm_pm_ops = { + .suspend_noirq = qpnp_adc_tm_suspend_noirq, +}; + +static struct platform_driver qpnp_adc_tm_driver = { + .driver = { + .name = "qcom,qpnp-adc-tm", + .of_match_table = qpnp_adc_tm_match_table, + .pm = &qpnp_adc_tm_pm_ops, + }, + .probe = qpnp_adc_tm_probe, + .remove = qpnp_adc_tm_remove, + .shutdown = qpnp_adc_tm_shutdown, +}; + +static int __init qpnp_adc_tm_init(void) +{ + return platform_driver_register(&qpnp_adc_tm_driver); +} +module_init(qpnp_adc_tm_init); + +static void __exit qpnp_adc_tm_exit(void) +{ + platform_driver_unregister(&qpnp_adc_tm_driver); +} +module_exit(qpnp_adc_tm_exit); + +MODULE_DESCRIPTION("QPNP PMIC ADC Threshold Monitoring driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c index 7ed40ca19637..f18ad3ce24e8 100644 --- a/drivers/tty/serial/msm_geni_serial.c +++ b/drivers/tty/serial/msm_geni_serial.c @@ -25,6 +25,7 @@ #include #include #include +#include /* UART specific GENI registers */ #define SE_UART_LOOPBACK_CFG (0x22C) @@ -103,7 +104,7 @@ #define STALE_TIMEOUT (16) #define STALE_COUNT (DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT) #define SEC_TO_USEC (1000000) -#define SYSTEM_DELAY (500) +#define STALE_DELAY (1000) //10msec #define DEFAULT_BITS_PER_CHAR (10) #define GENI_UART_NR_PORTS (15) #define GENI_UART_CONS_PORTS (1) @@ -115,8 +116,8 @@ #define WAKEBYTE_TIMEOUT_MSEC (2000) #define WAIT_XFER_MAX_ITER (2) -#define WAIT_XFER_MAX_TIMEOUT_US (10000) -#define WAIT_XFER_MIN_TIMEOUT_US (9000) +#define WAIT_XFER_MAX_TIMEOUT_US (150) +#define WAIT_XFER_MIN_TIMEOUT_US (100) #define IPC_LOG_PWR_PAGES (10) #define IPC_LOG_MISC_PAGES (30) #define IPC_LOG_TX_RX_PAGES (30) @@ -152,6 +153,30 @@ #define DMA_RX_BUF_SIZE (2048) #define UART_CONSOLE_RX_WM (2) +enum uart_error_code { + UART_ERROR_DEFAULT, + UART_ERROR_INVALID_FW_LOADED, + UART_ERROR_CLK_GET_FAIL, + UART_ERROR_SE_CLK_RATE_FIND_FAIL, + UART_ERROR_SE_RESOURCES_INIT_FAIL, + UART_ERROR_SE_RESOURCES_ON_FAIL, + UART_ERROR_SE_RESOURCES_OFF_FAIL, + UART_ERROR_TX_DMA_MAP_FAIL, + UART_ERROR_TX_CANCEL_FAIL, + UART_ERROR_TX_ABORT_FAIL, + UART_ERROR_TX_FSM_RESET_FAIL, + UART_ERROR_RX_CANCEL_FAIL, + UART_ERROR_RX_ABORT_FAIL, + UART_ERROR_RX_FSM_RESET_FAIL, + UART_ERROR_RX_TTY_INSERT_FAIL, + UART_ERROR_ILLEGAL_INTERRUPT, + UART_ERROR_BUFFER_OVERRUN, + UART_ERROR_RX_PARITY_ERROR, + UART_ERROR_RX_BREAK_ERROR, + UART_ERROR_RX_SBE_ERROR, + SOC_ERROR_START_TX_IOS_SOC_RFR_HIGH +}; + struct msm_geni_serial_ver_info { int hw_major_ver; int hw_minor_ver; @@ -211,8 +236,13 @@ struct msm_geni_serial_port { struct completion m_cmd_timeout; struct completion s_cmd_timeout; spinlock_t rx_lock; + bool bypass_flow_control; + enum uart_error_code uart_error; + struct work_struct work; + struct workqueue_struct *qwork; }; +static void msm_geni_serial_worker(struct work_struct *work); static const struct uart_ops msm_geni_serial_pops; static struct uart_driver msm_geni_console_driver; static struct uart_driver msm_geni_serial_hs_driver; @@ -240,6 +270,8 @@ static void msm_geni_serial_set_manual_flow(bool enable, static bool handle_rx_dma_xfer(u32 s_irq_status, struct uart_port *uport); static int uart_line_id; +static bool is_earlycon; + #define GET_DEV_PORT(uport) \ container_of(uport, struct msm_geni_serial_port, uport) @@ -247,6 +279,19 @@ static struct msm_geni_serial_port msm_geni_console_port; static struct msm_geni_serial_port msm_geni_serial_ports[GENI_UART_NR_PORTS]; static void msm_geni_serial_handle_isr(struct uart_port *uport, unsigned long *flags, bool is_irq_masked); +/* + * The below API is required to pass UART error code to BT HOST. + */ +static void msm_geni_update_uart_error_code(struct msm_geni_serial_port *port, + enum uart_error_code uart_error_code) +{ + if (!port->is_console && !port->uart_error) { + port->uart_error = uart_error_code; + IPC_LOG_MSG(port->ipc_log_misc, + "%s: uart_error_code:%d\n", __func__, port->uart_error); + } +} + /* * The below API is required to check if uport->lock (spinlock) @@ -276,7 +321,14 @@ static int msm_geni_serial_spinlocked(struct uart_port *uport) static void msm_geni_serial_enable_interrupts(struct uart_port *uport) { unsigned int geni_m_irq_en, geni_s_irq_en; - struct msm_geni_serial_port *port = GET_DEV_PORT(uport); + struct msm_geni_serial_port *port = NULL; + + /* + * Earlyconsole also uses this API and finds port is NULL, + * hence add a protective check. + */ + if (!is_earlycon) + port = GET_DEV_PORT(uport); geni_m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN); @@ -288,7 +340,7 @@ static void msm_geni_serial_enable_interrupts(struct uart_port *uport) geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN); geni_write_reg_nolog(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN); - if (port->xfer_mode == SE_DMA) { + if (port && port->xfer_mode == SE_DMA) { geni_write_reg_nolog(DMA_TX_IRQ_BITS, uport->membase, SE_DMA_TX_IRQ_EN_SET); geni_write_reg_nolog(DMA_RX_IRQ_BITS, uport->membase, @@ -470,7 +522,6 @@ static void wait_for_transfers_inflight(struct uart_port *uport) int iter = 0; struct msm_geni_serial_port *port = GET_DEV_PORT(uport); unsigned int geni_status; - bool CTS, RX; geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS); /* Possible stop rx is called before this. */ @@ -487,19 +538,8 @@ static void wait_for_transfers_inflight(struct uart_port *uport) } } if (check_transfers_inflight(uport)) { - u32 geni_status = geni_read_reg_nolog(uport->membase, - SE_GENI_STATUS); - u32 geni_ios = geni_read_reg_nolog(uport->membase, SE_GENI_IOS); - u32 rx_fifo_status = geni_read_reg_nolog(uport->membase, - SE_GENI_RX_FIFO_STATUS); - u32 rx_dma = - geni_read_reg_nolog(uport->membase, SE_DMA_RX_LEN_IN); - CTS = geni_ios & BIT(1); // b[1] = UART CTS <- Peer RFR - RX = geni_ios & BIT(0); // b[0] = UART RX <- Peer TX - - IPC_LOG_MSG(port->ipc_log_misc, - "%s: geni=0x%x rx_fifo=0x%x rx_dma=0x%x, CTS_IO=%d, RX_IO=%d\n", - __func__, geni_status, rx_fifo_status, rx_dma, CTS, RX); + geni_se_dump_dbg_regs(&port->serial_rsc, + uport->membase, port->ipc_log_misc); } } @@ -551,11 +591,27 @@ static int vote_clock_off(struct uart_port *uport) return 0; }; + +static void msm_geni_serial_worker(struct work_struct *work) +{ + struct msm_geni_serial_port *port; + + port = container_of(work, struct msm_geni_serial_port, work); + + geni_se_dump_dbg_regs(&port->serial_rsc, + port->uport.membase, port->ipc_log_misc); + port->ipc_log_rx = port->ipc_log_single; + port->ipc_log_tx = port->ipc_log_single; + port->ipc_log_misc = port->ipc_log_single; + port->ipc_log_pwr = port->ipc_log_single; +} + static int msm_geni_serial_ioctl(struct uart_port *uport, unsigned int cmd, unsigned long arg) { int ret = -ENOIOCTLCMD; struct msm_geni_serial_port *port = GET_DEV_PORT(uport); + enum uart_error_code uart_error; switch (cmd) { case TIOCPMGET: { @@ -571,13 +627,14 @@ static int msm_geni_serial_ioctl(struct uart_port *uport, unsigned int cmd, break; } case TIOCFAULT: { - geni_se_dump_dbg_regs(&port->serial_rsc, - uport->membase, port->ipc_log_misc); - port->ipc_log_rx = port->ipc_log_single; - port->ipc_log_tx = port->ipc_log_single; - port->ipc_log_misc = port->ipc_log_single; - port->ipc_log_pwr = port->ipc_log_single; - ret = 0; + uart_error = port->uart_error; + port->uart_error = UART_ERROR_DEFAULT; + IPC_LOG_MSG(port->ipc_log_misc, + "%s:TIOCFAULT - uart_error_set:%d new_uart_error:%d\n", + __func__, uart_error, port->uart_error); + if (port->qwork) + queue_work(port->qwork, &port->work); + ret = uart_error; break; } default: @@ -628,6 +685,10 @@ static unsigned int msm_geni_serial_get_mctrl(struct uart_port *uport) geni_ios = geni_read_reg_nolog(uport->membase, SE_GENI_IOS); if (!(geni_ios & IO2_DATA_IN)) mctrl |= TIOCM_CTS; + else + msm_geni_update_uart_error_code(port, + SOC_ERROR_START_TX_IOS_SOC_RFR_HIGH); + IPC_LOG_MSG(port->ipc_log_misc, "%s: geni_ios:0x%x, mctrl:0x%x\n", __func__, geni_ios, mctrl); return mctrl; @@ -1110,6 +1171,8 @@ static int msm_geni_serial_prep_dma_tx(struct uart_port *uport) } else { IPC_LOG_MSG(msm_port->ipc_log_misc, "%s: TX DMA map Fail %d\n", __func__, ret); + msm_geni_update_uart_error_code(msm_port, + UART_ERROR_TX_DMA_MAP_FAIL); geni_write_reg_nolog(0, uport->membase, SE_UART_TX_TRANS_LEN); msm_port->m_cmd_done = false; @@ -1132,6 +1195,8 @@ static int msm_geni_serial_prep_dma_tx(struct uart_port *uport) IPC_LOG_MSG(msm_port->ipc_log_misc, "%s: tx_cancel failed 0x%x\n", __func__, geni_read_reg_nolog(uport->membase, SE_GENI_STATUS)); + msm_geni_update_uart_error_code(msm_port, + UART_ERROR_TX_CANCEL_FAIL); msm_port->m_cmd_done = false; reinit_completion(&msm_port->m_cmd_timeout); @@ -1149,6 +1214,8 @@ static int msm_geni_serial_prep_dma_tx(struct uart_port *uport) "%s: tx abort failed 0x%x\n", __func__, geni_read_reg_nolog(uport->membase, SE_GENI_STATUS)); + msm_geni_update_uart_error_code(msm_port, + UART_ERROR_TX_ABORT_FAIL); } } @@ -1163,9 +1230,12 @@ static int msm_geni_serial_prep_dma_tx(struct uart_port *uport) timeout = geni_wait_for_cmd_done(uport, is_irq_masked); - if (timeout) + if (timeout) { IPC_LOG_MSG(msm_port->ipc_log_misc, "%s: tx fsm reset failed\n", __func__); + msm_geni_update_uart_error_code( + msm_port, UART_ERROR_TX_FSM_RESET_FAIL); + } } if (msm_port->tx_dma) { @@ -1274,6 +1344,8 @@ static void stop_tx_sequencer(struct uart_port *uport) __func__, geni_read_reg_nolog(uport->membase, SE_GENI_STATUS)); IPC_LOG_MSG(port->ipc_log_misc, "%s: tx_cancel failed 0x%x\n", __func__, geni_read_reg_nolog(uport->membase, SE_GENI_STATUS)); + msm_geni_update_uart_error_code(port, + UART_ERROR_TX_CANCEL_FAIL); port->m_cmd_done = false; reinit_completion(&port->m_cmd_timeout); @@ -1287,6 +1359,8 @@ static void stop_tx_sequencer(struct uart_port *uport) IPC_LOG_MSG(port->ipc_log_misc, "%s: tx abort failed 0x%x\n", __func__, geni_read_reg_nolog(uport->membase, SE_GENI_STATUS)); + msm_geni_update_uart_error_code(port, + UART_ERROR_TX_ABORT_FAIL); } } @@ -1300,9 +1374,12 @@ static void stop_tx_sequencer(struct uart_port *uport) timeout = geni_wait_for_cmd_done(uport, is_irq_masked); - if (timeout) + if (timeout) { IPC_LOG_MSG(port->ipc_log_misc, "%s: tx fsm reset failed\n", __func__); + msm_geni_update_uart_error_code(port, + UART_ERROR_TX_FSM_RESET_FAIL); + } } if (port->tx_dma) { @@ -1362,16 +1439,24 @@ static void start_rx_sequencer(struct uart_port *uport) if (geni_status & S_GENI_CMD_ACTIVE) { if (port->xfer_mode == SE_DMA) { IPC_LOG_MSG(port->ipc_log_misc, - "%s: GENI: 0x%x\n", __func__, geni_status); + "%s: mapping rx dma GENI: 0x%x\n", + __func__, geni_status); geni_se_rx_dma_start(uport->membase, DMA_RX_BUF_SIZE, &port->rx_dma); } msm_geni_serial_stop_rx(uport); } - if (port->xfer_mode == SE_DMA) + if (port->xfer_mode == SE_DMA) { + IPC_LOG_MSG(port->ipc_log_misc, + "%s. mapping rx dma\n", __func__); geni_se_rx_dma_start(uport->membase, DMA_RX_BUF_SIZE, &port->rx_dma); + } + + /* Start RX with the RFR_OPEN to keep RFR in always ready state */ + geni_setup_s_cmd(uport->membase, UART_START_READ, geni_se_param); + msm_geni_serial_enable_interrupts(uport); /* Start RX with the RFR_OPEN to keep RFR in always ready state */ geni_setup_s_cmd(uport->membase, UART_START_READ, geni_se_param); @@ -1439,8 +1524,9 @@ static int stop_rx_sequencer(struct uart_port *uport) struct msm_geni_serial_port *port = GET_DEV_PORT(uport); unsigned long flags = 0; bool is_rx_active; - unsigned int stale_delay; u32 dma_rx_status, s_irq_status; + int usage_count; + int iter = 0; IPC_LOG_MSG(port->ipc_log_misc, "%s\n", __func__); @@ -1454,17 +1540,19 @@ static int stop_rx_sequencer(struct uart_port *uport) } if (!uart_console(uport)) { - msm_geni_serial_set_manual_flow(false, port); + if (!port->bypass_flow_control) + msm_geni_serial_set_manual_flow(false, port); /* - * Wait for the stale timeout to happen if there - * is any data pending in the rx fifo. - * Have a safety factor of 2 to include the interrupt - * and system latencies, add 500usec delay for interrupt - * latency or system delay. + * Wait for the stale timeout around 10msec to happen + * if there is any data pending in the rx fifo. + * This will help to handle incoming rx data in + * stop_rx_sequencer for interrupt latency or + * system delay cases. */ - stale_delay = (STALE_COUNT * SEC_TO_USEC) / port->cur_baud; - stale_delay = (2 * stale_delay) + SYSTEM_DELAY; - udelay(stale_delay); + while (iter < STALE_DELAY) { + iter++; + udelay(10); + } dma_rx_status = geni_read_reg_nolog(uport->membase, SE_DMA_RX_IRQ_STAT); @@ -1483,6 +1571,15 @@ static int stop_rx_sequencer(struct uart_port *uport) IPC_LOG_MSG(port->ipc_log_misc, "%s: Interrupt delay\n", __func__); handle_rx_dma_xfer(s_irq_status, uport); + if (!port->ioctl_count) { + usage_count = atomic_read( + &uport->dev->power.usage_count); + IPC_LOG_MSG(port->ipc_log_misc, + "%s: Abort Stop Rx, extend the PM timer, usage_count:%d\n", + __func__, usage_count); + pm_runtime_mark_last_busy(uport->dev); + return -EBUSY; + } } } @@ -1520,6 +1617,9 @@ static int stop_rx_sequencer(struct uart_port *uport) __func__, timeout, is_rx_active, geni_status); geni_se_dump_dbg_regs(&port->serial_rsc, uport->membase, port->ipc_log_misc); + msm_geni_update_uart_error_code(port, + UART_ERROR_RX_CANCEL_FAIL); + /* * Possible that stop_rx is called from system resume context * for console usecase. In early resume, irq remains disabled @@ -1549,6 +1649,8 @@ static int stop_rx_sequencer(struct uart_port *uport) IPC_LOG_MSG(port->console_log, "%s abort fail timeout:%d is_rx_active:%d 0x%x\n", __func__, timeout, is_rx_active, geni_status); + msm_geni_update_uart_error_code(port, + UART_ERROR_RX_ABORT_FAIL); geni_se_dump_dbg_regs(&port->serial_rsc, uport->membase, port->ipc_log_misc); } @@ -1561,9 +1663,12 @@ static int stop_rx_sequencer(struct uart_port *uport) timeout = geni_wait_for_cmd_done(uport, is_irq_masked); - if (timeout) + if (timeout) { IPC_LOG_MSG(port->ipc_log_misc, "%s: rx fsm reset failed\n", __func__); + msm_geni_update_uart_error_code(port, + UART_ERROR_RX_FSM_RESET_FAIL); + } } } /* Enable the interrupts once the cancel operation is done. */ @@ -1571,7 +1676,7 @@ static int stop_rx_sequencer(struct uart_port *uport) port->s_cmd = false; exit_rx_seq: - if (!uart_console(uport)) + if (!uart_console(uport) && !port->bypass_flow_control) msm_geni_serial_set_manual_flow(true, port); geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS); @@ -1629,6 +1734,8 @@ static int handle_rx_hs(struct uart_port *uport, if (ret != rx_bytes) { dev_err(uport->dev, "%s: ret %d rx_bytes %d\n", __func__, ret, rx_bytes); + msm_geni_update_uart_error_code(msm_port, + UART_ERROR_RX_TTY_INSERT_FAIL); WARN_ON(1); } uport->icount.rx += ret; @@ -1914,6 +2021,8 @@ static bool handle_rx_dma_xfer(u32 s_irq_status, struct uart_port *uport) "%s.Rx Errors. 0x%x parity:%d\n", __func__, dma_rx_status, uport->icount.parity); + msm_geni_update_uart_error_code(msm_port, + UART_ERROR_RX_PARITY_ERROR); drop_rx = true; } else if (dma_rx_status & UART_DMA_RX_BREAK) { uport->icount.brk++; @@ -1921,6 +2030,8 @@ static bool handle_rx_dma_xfer(u32 s_irq_status, struct uart_port *uport) "%s.Rx Errors. 0x%x break:%d\n", __func__, dma_rx_status, uport->icount.brk); + msm_geni_update_uart_error_code(msm_port, + UART_ERROR_RX_BREAK_ERROR); } if (dma_rx_status & RX_EOT || @@ -1928,6 +2039,8 @@ static bool handle_rx_dma_xfer(u32 s_irq_status, struct uart_port *uport) msm_geni_serial_handle_dma_rx(uport, drop_rx); if (!(dma_rx_status & RX_GENI_CANCEL_IRQ)) { + IPC_LOG_MSG(msm_port->ipc_log_misc, + "%s. mapping rx dma\n", __func__); geni_se_rx_dma_start(uport->membase, DMA_RX_BUF_SIZE, &msm_port->rx_dma); } else { @@ -1940,6 +2053,8 @@ static bool handle_rx_dma_xfer(u32 s_irq_status, struct uart_port *uport) IPC_LOG_MSG(msm_port->ipc_log_misc, "%s.Rx Errors. 0x%x\n", __func__, dma_rx_status); + msm_geni_update_uart_error_code(msm_port, + UART_ERROR_RX_SBE_ERROR); WARN_ON(1); } @@ -1998,17 +2113,16 @@ static void msm_geni_serial_handle_isr(struct uart_port *uport, IPC_LOG_MSG(msm_port->console_log, "%s.Illegal interrupt. sirq 0x%x mirq:0x%x\n", __func__, s_irq_status, m_irq_status); - else + else { + msm_geni_update_uart_error_code(msm_port, + UART_ERROR_ILLEGAL_INTERRUPT); WARN_ON(1); + } goto exit_geni_serial_isr; } - if (m_irq_status & (M_IO_DATA_ASSERT_EN | M_IO_DATA_DEASSERT_EN)) { + if (m_irq_status & (M_IO_DATA_ASSERT_EN | M_IO_DATA_DEASSERT_EN)) uport->icount.cts++; - IPC_LOG_MSG(msm_port->ipc_log_misc, - "%s. cts counter:%d\n", __func__, - uport->icount.cts); - } if (s_irq_status & S_RX_FIFO_WR_ERR_EN) { uport->icount.overrun++; @@ -2016,6 +2130,8 @@ static void msm_geni_serial_handle_isr(struct uart_port *uport, IPC_LOG_MSG(msm_port->ipc_log_misc, "%s.sirq 0x%x buf_overrun:%d\n", __func__, s_irq_status, uport->icount.buf_overrun); + msm_geni_update_uart_error_code(msm_port, + UART_ERROR_BUFFER_OVERRUN); } dma = geni_read_reg_nolog(uport->membase, SE_GENI_DMA_MODE_EN); @@ -2227,6 +2343,7 @@ static void msm_geni_serial_shutdown(struct uart_port *uport) msm_port->ioctl_count = 0; } + flush_workqueue(msm_port->qwork); ret = pm_runtime_put_sync_suspend(uport->dev); if (ret) { IPC_LOG_MSG(msm_port->ipc_log_pwr, @@ -2238,6 +2355,9 @@ static void msm_geni_serial_shutdown(struct uart_port *uport) disable_irq(msm_port->wakeup_irq); free_irq(msm_port->wakeup_irq, uport); } + + /* Reset UART error to default during port_close() */ + msm_port->uart_error = UART_ERROR_DEFAULT; } IPC_LOG_MSG(msm_port->ipc_log_misc, "%s: End\n", __func__); } @@ -2507,7 +2627,12 @@ static void msm_geni_serial_set_termios(struct uart_port *uport, return; } } + + //Client must control Flow, don't touch RFR during baud change. + port->bypass_flow_control = true; msm_geni_serial_stop_rx(uport); + port->bypass_flow_control = false; + /* baud rate */ baud = uart_get_baud_rate(uport, termios, old, 300, 4000000); port->cur_baud = baud; @@ -2524,6 +2649,8 @@ static void msm_geni_serial_set_termios(struct uart_port *uport, if (ret) { dev_err(uport->dev, "%s: Failed(%d) to find src clk for 0x%x\n", __func__, ret, baud); + msm_geni_update_uart_error_code(port, + UART_ERROR_SE_CLK_RATE_FIND_FAIL); goto exit_set_termios; } @@ -2844,6 +2971,7 @@ msm_geni_serial_earlycon_setup(struct earlycon_device *dev, unsigned long clk_rate; unsigned long cfg0, cfg1; + is_earlycon = true; if (!uport->membase) { ret = -ENOMEM; goto exit_geni_serial_earlyconsetup; @@ -3245,8 +3373,11 @@ static int msm_geni_serial_probe(struct platform_device *pdev) UART_CORE2X_VOTE, (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH)); - if (ret) + if (ret) { + msm_geni_update_uart_error_code(dev_port, + UART_ERROR_SE_RESOURCES_INIT_FAIL); goto exit_geni_serial_probe; + } dev_port->serial_rsc.ctrl_dev = &pdev->dev; @@ -3267,6 +3398,8 @@ static int msm_geni_serial_probe(struct platform_device *pdev) if (IS_ERR(dev_port->serial_rsc.se_clk)) { ret = PTR_ERR(dev_port->serial_rsc.se_clk); dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret); + msm_geni_update_uart_error_code(dev_port, + UART_ERROR_CLK_GET_FAIL); goto exit_geni_serial_probe; } @@ -3274,6 +3407,8 @@ static int msm_geni_serial_probe(struct platform_device *pdev) if (IS_ERR(dev_port->serial_rsc.m_ahb_clk)) { ret = PTR_ERR(dev_port->serial_rsc.m_ahb_clk); dev_err(&pdev->dev, "Err getting M AHB clk %d\n", ret); + msm_geni_update_uart_error_code(dev_port, + UART_ERROR_CLK_GET_FAIL); goto exit_geni_serial_probe; } @@ -3281,6 +3416,8 @@ static int msm_geni_serial_probe(struct platform_device *pdev) if (IS_ERR(dev_port->serial_rsc.s_ahb_clk)) { ret = PTR_ERR(dev_port->serial_rsc.s_ahb_clk); dev_err(&pdev->dev, "Err getting S AHB clk %d\n", ret); + msm_geni_update_uart_error_code(dev_port, + UART_ERROR_CLK_GET_FAIL); goto exit_geni_serial_probe; } @@ -3420,13 +3557,34 @@ static int msm_geni_serial_probe(struct platform_device *pdev) device_create_file(uport->dev, &dev_attr_ver_info); msm_geni_serial_debug_init(uport, is_console); dev_port->port_setup = false; + dev_port->uart_error = UART_ERROR_DEFAULT; ret = msm_geni_serial_get_ver_info(uport); if (ret) goto exit_wakeup_unregister; + if (!dev_port->is_console) { + dev_port->qwork = create_singlethread_workqueue( + "geni_serial_dump_wq"); + if (!dev_port->qwork) { + dev_err(&pdev->dev, "cannot create workqueue\n"); + goto exit_wakeup_unregister; + } + INIT_WORK(&dev_port->work, msm_geni_serial_worker); + } + ret = uart_add_one_port(drv, uport); if (ret) - goto exit_wakeup_unregister; + goto exit_workqueue_destroy; + + if (!uart_console(uport)) + spin_lock_init(&dev_port->rx_lock); + + /* + * Earlyconsole to kernel console will switch happen after + * uart_add_one_port. Hence marking is_earlycon to false here. + */ + if (is_console) + is_earlycon = false; if (!uart_console(uport)) spin_lock_init(&dev_port->rx_lock); @@ -3436,6 +3594,9 @@ static int msm_geni_serial_probe(struct platform_device *pdev) return 0; +exit_workqueue_destroy: + if (dev_port->qwork) + destroy_workqueue(dev_port->qwork); exit_wakeup_unregister: if (!is_console) wakeup_source_unregister(dev_port->geni_wake); @@ -3450,8 +3611,11 @@ static int msm_geni_serial_remove(struct platform_device *pdev) struct uart_driver *drv = (struct uart_driver *)port->uport.private_data; - if (!uart_console(&port->uport)) + if (!uart_console(&port->uport)) { wakeup_source_unregister(port->geni_wake); + flush_workqueue(port->qwork); + destroy_workqueue(port->qwork); + } uart_remove_one_port(drv, &port->uport); if (port->rx_dma) { geni_se_iommu_free_buf(port->wrapper_dev, &port->rx_dma, @@ -3488,6 +3652,7 @@ static int msm_geni_serial_runtime_suspend(struct device *dev) u32 geni_status = geni_read_reg_nolog(port->uport.membase, SE_GENI_STATUS); + IPC_LOG_MSG(port->ipc_log_pwr, "%s: Start\n", __func__); wait_for_transfers_inflight(&port->uport); /* * Manual RFR On. @@ -3499,10 +3664,12 @@ static int msm_geni_serial_runtime_suspend(struct device *dev) if (ret) { IPC_LOG_MSG(port->ipc_log_pwr, "%s: stop rx failed %d\n", __func__, ret); + /* Flow on from UART */ + msm_geni_serial_allow_rx(port); return -EBUSY; } - geni_status = geni_read_reg_nolog(port->uport.membase, SE_GENI_STATUS); + geni_status = geni_read_reg_nolog(port->uport.membase, SE_GENI_STATUS); if ((geni_status & M_GENI_CMD_ACTIVE)) stop_tx_sequencer(&port->uport); @@ -3517,6 +3684,8 @@ static int msm_geni_serial_runtime_suspend(struct device *dev) ret = se_geni_resources_off(&port->serial_rsc); if (ret) { dev_err(dev, "%s: Error ret %d\n", __func__, ret); + msm_geni_update_uart_error_code(port, + UART_ERROR_SE_RESOURCES_OFF_FAIL); goto exit_runtime_suspend; } @@ -3524,7 +3693,7 @@ static int msm_geni_serial_runtime_suspend(struct device *dev) port->edge_count = 0; enable_irq(port->wakeup_irq); } - IPC_LOG_MSG(port->ipc_log_pwr, "%s:\n", __func__); + IPC_LOG_MSG(port->ipc_log_pwr, "%s: End\n", __func__); __pm_relax(port->geni_wake); exit_runtime_suspend: return ret; @@ -3553,6 +3722,8 @@ static int msm_geni_serial_runtime_resume(struct device *dev) ret = se_geni_resources_on(&port->serial_rsc); if (ret) { dev_err(dev, "%s: Error ret %d\n", __func__, ret); + msm_geni_update_uart_error_code(port, + UART_ERROR_SE_RESOURCES_ON_FAIL); __pm_relax(port->geni_wake); goto exit_runtime_resume; } diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 4caeca67fd14..7906427283e4 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -1160,15 +1160,6 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud, return baud; } -static void msm_init_clock(struct uart_port *port) -{ - struct msm_port *msm_port = UART_TO_MSM(port); - - clk_prepare_enable(msm_port->clk); - clk_prepare_enable(msm_port->pclk); - msm_serial_set_mnd_regs(port); -} - static int msm_startup(struct uart_port *port) { struct msm_port *msm_port = UART_TO_MSM(port); @@ -1178,7 +1169,19 @@ static int msm_startup(struct uart_port *port) snprintf(msm_port->name, sizeof(msm_port->name), "msm_serial%d", port->line); - msm_init_clock(port); + /* + * UART clk must be kept enabled to + * avoid losing received character + */ + ret = clk_prepare_enable(msm_port->clk); + if (ret) + return ret; + + ret = clk_prepare_enable(msm_port->pclk); + if (ret) + goto err_pclk; + + msm_serial_set_mnd_regs(port); if (likely(port->fifosize > 12)) rfr_level = port->fifosize - 12; @@ -1217,6 +1220,8 @@ static int msm_startup(struct uart_port *port) clk_disable_unprepare(msm_port->pclk); clk_disable_unprepare(msm_port->clk); +err_pclk: + clk_disable_unprepare(msm_port->clk); return ret; } @@ -1231,6 +1236,7 @@ static void msm_shutdown(struct uart_port *port) if (msm_port->is_uartdm) msm_release_dma(msm_port); + clk_disable_unprepare(msm_port->pclk); clk_disable_unprepare(msm_port->clk); free_irq(port->irq, port); @@ -1397,8 +1403,16 @@ static void msm_power(struct uart_port *port, unsigned int state, switch (state) { case 0: - clk_prepare_enable(msm_port->clk); - clk_prepare_enable(msm_port->pclk); + /* + * UART clk must be kept enabled to + * avoid losing received character + */ + if (clk_prepare_enable(msm_port->clk)) + return; + if (clk_prepare_enable(msm_port->pclk)) { + clk_disable_unprepare(msm_port->clk); + return; + } break; case 3: clk_disable_unprepare(msm_port->clk); @@ -1670,7 +1684,7 @@ static int __init msm_console_setup(struct console *co, char *options) if (unlikely(!port->membase)) return -ENXIO; - msm_init_clock(port); + msm_serial_set_mnd_regs(port); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index b4838ab8ba95..0e8b5f5b19f3 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2751,10 +2751,14 @@ void __do_SAK(struct tty_struct *tty) struct task_struct *g, *p; struct pid *session; int i; + unsigned long flags; if (!tty) return; - session = tty->session; + + spin_lock_irqsave(&tty->ctrl_lock, flags); + session = get_pid(tty->session); + spin_unlock_irqrestore(&tty->ctrl_lock, flags); tty_ldisc_flush(tty); @@ -2786,6 +2790,7 @@ void __do_SAK(struct tty_struct *tty) task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); + put_pid(session); #endif } diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c index c4ecd66fafef..ffcab80ba77d 100644 --- a/drivers/tty/tty_jobctrl.c +++ b/drivers/tty/tty_jobctrl.c @@ -103,8 +103,8 @@ static void __proc_set_tty(struct tty_struct *tty) put_pid(tty->session); put_pid(tty->pgrp); tty->pgrp = get_pid(task_pgrp(current)); - spin_unlock_irqrestore(&tty->ctrl_lock, flags); tty->session = get_pid(task_session(current)); + spin_unlock_irqrestore(&tty->ctrl_lock, flags); if (current->signal->tty) { tty_debug(tty, "current tty %s not NULL!!\n", current->signal->tty->name); @@ -293,20 +293,23 @@ void disassociate_ctty(int on_exit) spin_lock_irq(¤t->sighand->siglock); put_pid(current->signal->tty_old_pgrp); current->signal->tty_old_pgrp = NULL; - tty = tty_kref_get(current->signal->tty); + spin_unlock_irq(¤t->sighand->siglock); + if (tty) { unsigned long flags; + + tty_lock(tty); spin_lock_irqsave(&tty->ctrl_lock, flags); put_pid(tty->session); put_pid(tty->pgrp); tty->session = NULL; tty->pgrp = NULL; spin_unlock_irqrestore(&tty->ctrl_lock, flags); + tty_unlock(tty); tty_kref_put(tty); } - spin_unlock_irq(¤t->sighand->siglock); /* Now clear signal->tty under the lock */ read_lock(&tasklist_lock); session_clear_tty(task_session(current)); @@ -477,14 +480,19 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t return -ENOTTY; if (retval) return retval; - if (!current->signal->tty || - (current->signal->tty != real_tty) || - (real_tty->session != task_session(current))) - return -ENOTTY; + if (get_user(pgrp_nr, p)) return -EFAULT; if (pgrp_nr < 0) return -EINVAL; + + spin_lock_irq(&real_tty->ctrl_lock); + if (!current->signal->tty || + (current->signal->tty != real_tty) || + (real_tty->session != task_session(current))) { + retval = -ENOTTY; + goto out_unlock_ctrl; + } rcu_read_lock(); pgrp = find_vpid(pgrp_nr); retval = -ESRCH; @@ -494,12 +502,12 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t if (session_of_pgrp(pgrp) != task_session(current)) goto out_unlock; retval = 0; - spin_lock_irq(&tty->ctrl_lock); put_pid(real_tty->pgrp); real_tty->pgrp = get_pid(pgrp); - spin_unlock_irq(&tty->ctrl_lock); out_unlock: rcu_read_unlock(); +out_unlock_ctrl: + spin_unlock_irq(&real_tty->ctrl_lock); return retval; } @@ -511,20 +519,30 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t * * Obtain the session id of the tty. If there is no session * return an error. - * - * Locking: none. Reference to current->signal->tty is safe. */ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) { + unsigned long flags; + pid_t sid; + /* * (tty == real_tty) is a cheap way of * testing if the tty is NOT a master pty. */ if (tty == real_tty && current->signal->tty != real_tty) return -ENOTTY; + + spin_lock_irqsave(&real_tty->ctrl_lock, flags); if (!real_tty->session) - return -ENOTTY; - return put_user(pid_vnr(real_tty->session), p); + goto err; + sid = pid_vnr(real_tty->session); + spin_unlock_irqrestore(&real_tty->ctrl_lock, flags); + + return put_user(sid, p); + +err: + spin_unlock_irqrestore(&real_tty->ctrl_lock, flags); + return -ENOTTY; } /* diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 9e12cc6c54c4..9422ea5896be 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -722,6 +722,7 @@ struct dwc3_ep_events { * @desc: usb_endpoint_descriptor pointer * @dwc: pointer to DWC controller * @saved_state: ep state saved during hibernation + * @missed_isoc_packets: counter for missed packets sent * @flags: endpoint flags (wedged, stalled, ...) * @number: endpoint number (1 - 15) * @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK @@ -752,6 +753,7 @@ struct dwc3_ep { struct dwc3 *dwc; u32 saved_state; + u32 missed_isoc_packets; unsigned flags; #define DWC3_EP_ENABLED BIT(0) #define DWC3_EP_STALL BIT(1) diff --git a/drivers/usb/dwc3/debug_ipc.c b/drivers/usb/dwc3/debug_ipc.c index 1873b222dc8c..0b73285f1dc0 100644 --- a/drivers/usb/dwc3/debug_ipc.c +++ b/drivers/usb/dwc3/debug_ipc.c @@ -149,10 +149,11 @@ void dwc3_dbg_dma_unmap(struct dwc3 *dwc, u8 ep_num, struct dwc3_request *req) req->trb->ctrl & DWC3_TRB_CTRL_HWO); } else { ipc_log_string(dwc->dwc_dma_ipc_log_ctxt, - "%02X-%-3.3s %-25.25s 0x%pK 0x%lx %u 0x%lx %d", + "%02X-%-3.3s %-25.25s 0x%pK 0x%lx %u 0x%lx %d %u", ep_num >> 1, ep_num & 1 ? "IN":"OUT", "UNMAP", &req->request, req->request.dma, req->request.length, - req->trb_dma, req->trb->ctrl & DWC3_TRB_CTRL_HWO); + req->trb_dma, req->trb->ctrl & DWC3_TRB_CTRL_HWO, + req->request.actual); } } diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c index 7c4bc127b566..9dfe4b32fb32 100644 --- a/drivers/usb/dwc3/dwc3-msm.c +++ b/drivers/usb/dwc3/dwc3-msm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. */ #include @@ -318,11 +318,15 @@ struct dwc3_msm { bool suspend; bool use_pdc_interrupts; enum dwc3_id_state id_state; - bool use_pwr_event_for_wakeup; + unsigned long use_pwr_event_for_wakeup; +#define PWR_EVENT_SS_WAKEUP BIT(0) +#define PWR_EVENT_HS_WAKEUP BIT(1) + unsigned long lpm_flags; #define MDWC3_SS_PHY_SUSPEND BIT(0) #define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1) #define MDWC3_POWER_COLLAPSE BIT(2) +#define MDWC3_USE_PWR_EVENT_IRQ_FOR_WAKEUP BIT(3) struct notifier_block usbdev_nb; bool hc_died; @@ -337,6 +341,7 @@ struct dwc3_msm { u32 num_gsi_event_buffers; struct dwc3_event_buffer **gsi_ev_buff; int pm_qos_latency; + bool perf_mode; struct pm_qos_request pm_qos_req_dma; struct delayed_work perf_vote_work; struct delayed_work sdp_check; @@ -2008,8 +2013,7 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event, switch (event) { case DWC3_CONTROLLER_ERROR_EVENT: dev_info(mdwc->dev, - "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n", - dwc->irq_cnt); + "DWC3_CONTROLLER_ERROR_EVENT received\n"); dwc3_gadget_disable_irq(dwc); @@ -2373,7 +2377,7 @@ static void configure_usb_wakeup_interrupt(struct dwc3_msm *mdwc, } } -static void enable_usb_pdc_interrupt(struct dwc3_msm *mdwc, bool enable) +static void configure_usb_wakeup_interrupts(struct dwc3_msm *mdwc, bool enable) { if (!enable) goto disable_usb_irq; @@ -2429,7 +2433,7 @@ static void configure_nonpdc_usb_interrupt(struct dwc3_msm *mdwc, } } -static void dwc3_msm_set_ss_pwr_events(struct dwc3_msm *mdwc, bool on) +static void dwc3_msm_set_pwr_events(struct dwc3_msm *mdwc, bool on) { u32 irq_mask, irq_stat; @@ -2440,12 +2444,28 @@ static void dwc3_msm_set_ss_pwr_events(struct dwc3_msm *mdwc, bool on) irq_mask = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_MASK_REG); - if (on) - irq_mask |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK | - PWR_EVNT_LPM_OUT_RX_ELECIDLE_IRQ_MASK); - else - irq_mask &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK | - PWR_EVNT_LPM_OUT_RX_ELECIDLE_IRQ_MASK); + if (on) { + /* + * In case of platforms which use mpm interrupts, in case where + * suspend happens with a hs/fs/ls device connected in host mode + * DP/DM falling edge will be monitored, but gic doesn't have + * capability to detect falling edge. So program power event irq + * to notify exit from lpm in such case. + */ + if (mdwc->use_pwr_event_for_wakeup & PWR_EVENT_HS_WAKEUP) + irq_mask |= PWR_EVNT_LPM_OUT_L2_MASK; + if ((mdwc->use_pwr_event_for_wakeup & PWR_EVENT_SS_WAKEUP) + && !(mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND)) + irq_mask |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK | + PWR_EVNT_LPM_OUT_RX_ELECIDLE_IRQ_MASK); + } else { + if (mdwc->use_pwr_event_for_wakeup & PWR_EVENT_HS_WAKEUP) + irq_mask &= ~PWR_EVNT_LPM_OUT_L2_MASK; + if ((mdwc->use_pwr_event_for_wakeup & PWR_EVENT_SS_WAKEUP) + && !(mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND)) + irq_mask &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK | + PWR_EVNT_LPM_OUT_RX_ELECIDLE_IRQ_MASK); + } dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_MASK_REG, irq_mask); } @@ -2559,7 +2579,7 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool enable_wakeup) dbg_event(0xFF, "pend evt", 0); /* disable power event irq, hs and ss phy irq is used as wake up src */ - disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq); + disable_irq_nosync(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq); dwc3_set_phy_speed_flags(mdwc); /* Suspend HS PHY */ @@ -2576,7 +2596,9 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool enable_wakeup) ((mdwc->hs_phy->flags & (PHY_HSFS_MODE | PHY_LS_MODE)) && !dwc3_msm_is_superspeed(mdwc))); can_suspend_ssphy = dwc->maximum_speed >= USB_SPEED_SUPER && - (!mdwc->use_pwr_event_for_wakeup || no_active_ss); + (!(mdwc->use_pwr_event_for_wakeup & + PWR_EVENT_SS_WAKEUP) || no_active_ss || + !enable_wakeup); /* Suspend SS PHY */ if (can_suspend_ssphy) { if (mdwc->in_host_mode) { @@ -2590,11 +2612,21 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool enable_wakeup) mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE; usb_phy_set_suspend(mdwc->ss_phy, 1); mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND; - } else if (mdwc->use_pwr_event_for_wakeup) { - dwc3_msm_set_ss_pwr_events(mdwc, true); - enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq); + } else if (mdwc->use_pwr_event_for_wakeup & PWR_EVENT_SS_WAKEUP) { + mdwc->lpm_flags |= MDWC3_USE_PWR_EVENT_IRQ_FOR_WAKEUP; } + /* + * When operating in HS host mode, check if pwr event IRQ is + * required for wakeup. + */ + if (mdwc->in_host_mode && (mdwc->use_pwr_event_for_wakeup + & PWR_EVENT_HS_WAKEUP)) + mdwc->lpm_flags |= MDWC3_USE_PWR_EVENT_IRQ_FOR_WAKEUP; + + if (mdwc->lpm_flags & MDWC3_USE_PWR_EVENT_IRQ_FOR_WAKEUP) + dwc3_msm_set_pwr_events(mdwc, true); + /* make sure above writes are completed before turning off clocks */ wmb(); @@ -2655,11 +2687,14 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool enable_wakeup) /* * with DCP or during cable disconnect, we dont require wakeup * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in - * case of host bus suspend and device bus suspend. + * case of host bus suspend and device bus suspend. Also in + * case of platforms with mpm interrupts and snps phy, enable + * dpse hsphy irq and dmse hsphy irq as done for pdc interrupts. */ if (!(mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) && enable_wakeup) { - if (mdwc->use_pdc_interrupts) { - enable_usb_pdc_interrupt(mdwc, true); + if (mdwc->use_pdc_interrupts || + !mdwc->wakeup_irq[HS_PHY_IRQ].irq) { + configure_usb_wakeup_interrupts(mdwc, true); } else { uirq = &mdwc->wakeup_irq[HS_PHY_IRQ]; configure_nonpdc_usb_interrupt(mdwc, uirq, true); @@ -2669,6 +2704,9 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool enable_wakeup) mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY; } + if (mdwc->lpm_flags & MDWC3_USE_PWR_EVENT_IRQ_FOR_WAKEUP) + enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq); + dev_info(mdwc->dev, "DWC3 in low power mode\n"); dbg_event(0xFF, "Ctl Sus", atomic_read(&dwc->in_lpm)); @@ -2768,10 +2806,10 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc) * Disable any wakeup events that were enabled if pwr_event_irq * is used as wakeup interrupt. */ - if (mdwc->use_pwr_event_for_wakeup && - !(mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND)) { + if (mdwc->lpm_flags & MDWC3_USE_PWR_EVENT_IRQ_FOR_WAKEUP) { disable_irq_nosync(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq); - dwc3_msm_set_ss_pwr_events(mdwc, false); + dwc3_msm_set_pwr_events(mdwc, false); + mdwc->lpm_flags &= ~MDWC3_USE_PWR_EVENT_IRQ_FOR_WAKEUP; } /* Resume SS PHY */ @@ -2821,8 +2859,9 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc) /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */ if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) { - if (mdwc->use_pdc_interrupts) { - enable_usb_pdc_interrupt(mdwc, false); + if (mdwc->use_pdc_interrupts || + !mdwc->wakeup_irq[HS_PHY_IRQ].irq) { + configure_usb_wakeup_interrupts(mdwc, false); } else { uirq = &mdwc->wakeup_irq[HS_PHY_IRQ]; configure_nonpdc_usb_interrupt(mdwc, uirq, false); @@ -2938,7 +2977,6 @@ static void dwc3_resume_work(struct work_struct *w) struct extcon_dev *edev = NULL; const char *edev_name; char *eud_str; - bool eud_connected = false; int ret = 0; dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__); @@ -2956,32 +2994,17 @@ static void dwc3_resume_work(struct work_struct *w) /* Skip querying speed and cc_state for EUD edev */ eud_str = strnstr(edev_name, "eud", strlen(edev_name)); if (eud_str) - eud_connected = true; + goto skip_update; } + dwc->maximum_speed = dwc->max_hw_supp_speed; /* Check speed and Type-C polarity values in order to configure PHY */ - if (!eud_connected && edev && extcon_get_state(edev, extcon_id)) { - dwc->maximum_speed = dwc->max_hw_supp_speed; - dwc->gadget.max_speed = dwc->maximum_speed; - + if (edev && extcon_get_state(edev, extcon_id)) { ret = extcon_get_property(edev, extcon_id, EXTCON_PROP_USB_SS, &val); - if (!ret && val.intval == 0) { + if (!ret && val.intval == 0) dwc->maximum_speed = USB_SPEED_HIGH; - dwc->gadget.max_speed = dwc->maximum_speed; - } - - if (mdwc->override_usb_speed && - mdwc->override_usb_speed <= dwc->maximum_speed) { - dwc->maximum_speed = mdwc->override_usb_speed; - dwc->gadget.max_speed = dwc->maximum_speed; - dbg_event(0xFF, "override_speed", - mdwc->override_usb_speed); - mdwc->override_usb_speed = 0; - } - - dbg_event(0xFF, "speed", dwc->maximum_speed); ret = extcon_get_property(edev, extcon_id, EXTCON_PROP_USB_TYPEC_POLARITY, &val); @@ -3001,6 +3024,18 @@ static void dwc3_resume_work(struct work_struct *w) dwc->gadget.is_selfpowered = 0; } +skip_update: + dbg_log_string("max_speed:%d hw_supp_speed:%d override_speed:%d", + dwc->maximum_speed, dwc->max_hw_supp_speed, + mdwc->override_usb_speed); + if (mdwc->override_usb_speed && + mdwc->override_usb_speed <= dwc->maximum_speed) { + dwc->maximum_speed = mdwc->override_usb_speed; + dwc->gadget.max_speed = dwc->maximum_speed; + } + + dbg_event(0xFF, "speed", dwc->maximum_speed); + /* * Skip scheduling sm work if no work is pending. When boot-up * with USB cable connected, usb state m/c is skipped to avoid @@ -3081,6 +3116,13 @@ static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc) irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK; } + /* Handle exit from L2 events */ + if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) { + dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L2_MASK\n", + __func__); + irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK; + irq_clear |= PWR_EVNT_LPM_OUT_L2_MASK; + } /* Unhandled events */ if (irq_stat) dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n", @@ -3910,8 +3952,17 @@ static int dwc3_msm_probe(struct platform_device *pdev) * On platforms with SS PHY that do not support ss_phy_irq for wakeup * events, use pwr_event_irq for wakeup events in superspeed mode. */ - mdwc->use_pwr_event_for_wakeup = dwc->maximum_speed >= USB_SPEED_SUPER - && !mdwc->wakeup_irq[SS_PHY_IRQ].irq; + if (dwc->maximum_speed >= USB_SPEED_SUPER + && !mdwc->wakeup_irq[SS_PHY_IRQ].irq) + mdwc->use_pwr_event_for_wakeup |= PWR_EVENT_SS_WAKEUP; + + /* + * On platforms with mpm interrupts and snps phy, when operating in + * HS host mode use power event irq for wakeup events as GIC is not + * capable to detect falling edge of dp/dm hsphy irq. + */ + if (!mdwc->use_pdc_interrupts && !mdwc->wakeup_irq[HS_PHY_IRQ].irq) + mdwc->use_pwr_event_for_wakeup |= PWR_EVENT_HS_WAKEUP; /* * Clocks and regulators will not be turned on until the first time @@ -4063,6 +4114,7 @@ static int dwc3_msm_remove(struct platform_device *pdev) } cancel_delayed_work_sync(&mdwc->perf_vote_work); + msm_dwc3_perf_vote_update(mdwc, false); cancel_delayed_work_sync(&mdwc->sm_work); if (mdwc->hs_phy) @@ -4157,10 +4209,12 @@ static int dwc3_msm_host_notifier(struct notifier_block *nb, static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc, bool perf_mode) { - static bool curr_perf_mode; int latency = mdwc->pm_qos_latency; + struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3); - if ((curr_perf_mode == perf_mode) || !latency) + dwc->irq_cnt = 0; + + if ((mdwc->perf_mode == perf_mode) || !latency) return; if (perf_mode) @@ -4169,7 +4223,7 @@ static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc, bool perf_mode) pm_qos_update_request(&mdwc->pm_qos_req_dma, PM_QOS_DEFAULT_VALUE); - curr_perf_mode = perf_mode; + mdwc->perf_mode = perf_mode; pr_debug("%s: latency updated to: %d\n", __func__, perf_mode ? latency : PM_QOS_DEFAULT_VALUE); } @@ -4179,16 +4233,14 @@ static void msm_dwc3_perf_vote_work(struct work_struct *w) struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, perf_vote_work.work); struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3); - static unsigned long last_irq_cnt; bool in_perf_mode = false; - if (dwc->irq_cnt - last_irq_cnt >= PM_QOS_THRESHOLD) + if (dwc->irq_cnt >= PM_QOS_THRESHOLD) in_perf_mode = true; pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%lu\n", - __func__, in_perf_mode, (dwc->irq_cnt - last_irq_cnt)); + __func__, in_perf_mode, dwc->irq_cnt); - last_irq_cnt = dwc->irq_cnt; msm_dwc3_perf_vote_update(mdwc, in_perf_mode); schedule_delayed_work(&mdwc->perf_vote_work, msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC)); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 8955fc90c9e2..12579d7fe365 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -230,6 +230,10 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc, struct dwc3_ep *dep) && dwc3_is_usb31(dwc)) mult = 6; + if ((dep->endpoint.maxburst > 6) && + usb_endpoint_xfer_isoc(dep->endpoint.desc)) + mult = 6; + tmp = ((max_packet + mdwidth) * mult) + mdwidth; fifo_size = DIV_ROUND_UP(tmp, mdwidth); dep->fifo_depth = fifo_size; @@ -315,6 +319,12 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, dwc3_gadget_del_and_unmap_request(dep, req, status); + if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && + (list_empty(&dep->started_list))) { + dep->flags |= DWC3_EP_PENDING_REQUEST; + dbg_event(dep->number, "STARTEDLISTEMPTY", 0); + } + spin_unlock(&dwc->lock); usb_gadget_giveback_request(&dep->endpoint, &req->request); spin_lock(&dwc->lock); @@ -1050,6 +1060,8 @@ static int dwc3_gadget_ep_disable(struct usb_ep *ep) spin_lock_irqsave(&dwc->lock, flags); ret = __dwc3_gadget_ep_disable(dep); dbg_event(dep->number, "DISABLE", ret); + dbg_event(dep->number, "MISSEDISOCPKTS", dep->missed_isoc_packets); + dep->missed_isoc_packets = 0; spin_unlock_irqrestore(&dwc->lock, flags); pm_runtime_mark_last_busy(dwc->sysdev); pm_runtime_put_sync_autosuspend(dwc->sysdev); @@ -1624,7 +1636,7 @@ static void __dwc3_gadget_start_isoc(struct dwc3_ep *dep) wraparound_bits += BIT(14); dep->frame_number = __dwc3_gadget_get_frame(dep->dwc) + - 2 * dep->interval; + max_t(u32, 16, 2 * dep->interval); /* align uf to ep interval */ dep->frame_number = (wraparound_bits | dep->frame_number) & @@ -1683,8 +1695,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) if ((dep->flags & DWC3_EP_PENDING_REQUEST)) { if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) { __dwc3_gadget_start_isoc(dep); - return 0; + dep->flags &= ~DWC3_EP_PENDING_REQUEST; } + return 0; } } @@ -3177,12 +3190,18 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep, if (event->status & DEPEVT_STATUS_MISSED_ISOC) { status = -EXDEV; - if (list_empty(&dep->started_list)) - stop = true; + dep->missed_isoc_packets++; + dbg_event(dep->number, "MISSEDISOC", 0); } dwc3_gadget_ep_cleanup_completed_requests(dep, event, status); + if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && + (list_empty(&dep->started_list))) { + stop = true; + dbg_event(dep->number, "STOPXFER", dep->frame_number); + } + if (stop) dwc3_stop_active_transfer(dwc, dep->number, true); /* diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 7c826ab0e5f0..fd902417d044 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -2288,7 +2288,8 @@ int composite_dev_prepare(struct usb_composite_driver *composite, if (!cdev->req) return -ENOMEM; - cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL); + cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ + + (gadget->extra_buf_alloc), GFP_KERNEL); if (!cdev->req->buf) goto fail; diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 64c86beee4a5..60652e81a627 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -1659,7 +1659,14 @@ static int android_setup(struct usb_gadget *gadget, static void android_disconnect(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); - struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev); + struct gadget_info *gi; + + if (!cdev) { + pr_err("%s: gadget is not connected\n", __func__); + return; + } + + gi = container_of(cdev, struct gadget_info, cdev); /* FIXME: There's a race between usb_gadget_udc_stop() which is likely * to set the gadget driver to NULL in the udc driver and this drivers diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile index 2b07aae56d24..f27f707088f5 100644 --- a/drivers/usb/gadget/function/Makefile +++ b/drivers/usb/gadget/function/Makefile @@ -71,5 +71,5 @@ usb_f_ptp-y := f_ptp.o obj-$(CONFIG_USB_F_PTP) += usb_f_ptp.o usb_f_qcrndis-y := f_qc_rndis.o u_data_ipa.o obj-$(CONFIG_USB_F_QCRNDIS) += usb_f_qcrndis.o -usb_f_rmnet_bam-y := f_rmnet.o u_ctrl_qti.o +usb_f_rmnet_bam-y := f_rmnet.o u_ctrl_qti.o u_bam_dmux.o obj-$(CONFIG_USB_F_RMNET_BAM) += usb_f_rmnet_bam.o diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c index 57a8a522f45d..a8f1740712d8 100644 --- a/drivers/usb/gadget/function/f_acm.c +++ b/drivers/usb/gadget/function/f_acm.c @@ -664,7 +664,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) /* allocate notification */ acm->notify_req = gs_alloc_req(ep, sizeof(struct usb_cdc_notification) + 2, - GFP_KERNEL); + cdev->gadget->extra_buf_alloc, GFP_KERNEL); if (!acm->notify_req) goto fail; diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c index 9e8d06bdd348..d018ebb4c611 100644 --- a/drivers/usb/gadget/function/f_cdev.c +++ b/drivers/usb/gadget/function/f_cdev.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2011, 2013-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2011, 2013-2021, The Linux Foundation. All rights reserved. * Linux Foundation chooses to take subject only to the GPLv2 license terms, * and distributes only under these terms. * @@ -86,7 +86,7 @@ struct cserial { struct f_cdev { struct cdev fcdev_cdev; - struct device *dev; + struct device dev; unsigned int port_num; char name[sizeof(DEVICE_NAME) + 2]; int minor; @@ -128,6 +128,10 @@ struct f_cdev { unsigned long nbytes_to_port_bridge; unsigned long nbytes_from_port_bridge; + unsigned int single_packet_mode; + unsigned int rx_buf_size; + unsigned int rx_queue_size; + struct dentry *debugfs_root; /* To test remote wakeup using debugfs */ @@ -140,6 +144,7 @@ struct f_cdev_opts { char *func_name; u8 port_num; u8 proto; + int refcnt; }; static int major, minors; @@ -406,7 +411,14 @@ static void port_complete_set_line_coding(struct usb_ep *ep, static void usb_cser_free_func(struct usb_function *f) { - /* Do nothing as cser_alloc() doesn't alloc anything. */ + unsigned long flags; + struct f_cdev_opts *opts = container_of(f->fi, struct f_cdev_opts, + func_inst); + struct f_cdev *port = opts->port; + + spin_lock_irqsave(&port->port_lock, flags); + opts->refcnt--; + spin_unlock_irqrestore(&port->port_lock, flags); } static int @@ -752,7 +764,8 @@ static void usb_cser_free_requests(struct usb_ep *ep, struct list_head *head) } static struct usb_request * -usb_cser_alloc_req(struct usb_ep *ep, unsigned int len, gfp_t flags) +usb_cser_alloc_req(struct usb_ep *ep, unsigned int len, size_t extra_sz, + gfp_t flags) { struct usb_request *req; @@ -763,7 +776,7 @@ usb_cser_alloc_req(struct usb_ep *ep, unsigned int len, gfp_t flags) } req->length = len; - req->buf = kmalloc(len, flags); + req->buf = kmalloc(len + extra_sz, flags); if (!req->buf) { pr_err("request buf allocation failed\n"); usb_ep_free_request(ep, req); @@ -816,7 +829,8 @@ static int usb_cser_bind(struct usb_configuration *c, struct usb_function *f) ep->driver_data = cdev; /* allocate notification */ port->port_usb.notify_req = usb_cser_alloc_req(ep, - sizeof(struct usb_cdc_notification) + 2, GFP_KERNEL); + sizeof(struct usb_cdc_notification) + 2, + cdev->gadget->extra_buf_alloc, GFP_KERNEL); if (!port->port_usb.notify_req) goto fail; @@ -873,13 +887,16 @@ static void cser_free_inst(struct usb_function_instance *fi) opts = container_of(fi, struct f_cdev_opts, func_inst); if (opts->port) { - device_destroy(fcdev_classp, MKDEV(major, opts->port->minor)); - cdev_del(&opts->port->fcdev_cdev); + cdev_device_del(&opts->port->fcdev_cdev, &opts->port->dev); + mutex_lock(&chardev_ida_lock); + ida_simple_remove(&chardev_ida, opts->port->minor); + mutex_unlock(&chardev_ida_lock); usb_cser_debugfs_exit(opts->port); + put_device(&opts->port->dev); } + usb_cser_chardev_deinit(); kfree(opts->func_name); - kfree(opts->port); kfree(opts); } @@ -895,7 +912,7 @@ static void usb_cser_unbind(struct usb_configuration *c, struct usb_function *f) } static int usb_cser_alloc_requests(struct usb_ep *ep, struct list_head *head, - int num, int size, + int num, int size, size_t extra_sz, void (*cb)(struct usb_ep *ep, struct usb_request *)) { int i; @@ -905,7 +922,7 @@ static int usb_cser_alloc_requests(struct usb_ep *ep, struct list_head *head, ep, head, num, size, cb); for (i = 0; i < num; i++) { - req = usb_cser_alloc_req(ep, size, GFP_ATOMIC); + req = usb_cser_alloc_req(ep, size, extra_sz, GFP_ATOMIC); if (!req) { pr_debug("req allocated:%d\n", i); return list_empty(head) ? -ENOMEM : 0; @@ -945,7 +962,7 @@ static void usb_cser_start_rx(struct f_cdev *port) req = list_entry(pool->next, struct usb_request, list); list_del_init(&req->list); - req->length = BRIDGE_RX_BUF_SIZE; + req->length = port->rx_buf_size; req->complete = usb_cser_read_complete; spin_unlock_irqrestore(&port->port_lock, flags); ret = usb_ep_queue(ep, req, GFP_KERNEL); @@ -1025,6 +1042,8 @@ static void usb_cser_write_complete(struct usb_ep *ep, struct usb_request *req) static void usb_cser_start_io(struct f_cdev *port) { + struct usb_function *f = &port->port_usb.func; + struct usb_composite_dev *cdev = f->config->cdev; int ret = -ENODEV; unsigned long flags; @@ -1040,7 +1059,9 @@ static void usb_cser_start_io(struct f_cdev *port) ret = usb_cser_alloc_requests(port->port_usb.out, &port->read_pool, - BRIDGE_RX_QUEUE_SIZE, BRIDGE_RX_BUF_SIZE, + port->rx_queue_size, + port->rx_buf_size, + 0, usb_cser_read_complete); if (ret) { pr_err("unable to allocate out requests\n"); @@ -1050,6 +1071,7 @@ static void usb_cser_start_io(struct f_cdev *port) ret = usb_cser_alloc_requests(port->port_usb.in, &port->write_pool, BRIDGE_TX_QUEUE_SIZE, BRIDGE_TX_BUF_SIZE, + cdev->gadget->extra_buf_alloc, usb_cser_write_complete); if (ret) { usb_cser_free_requests(port->port_usb.out, &port->read_pool); @@ -1103,13 +1125,10 @@ int f_cdev_open(struct inode *inode, struct file *file) struct f_cdev *port; port = container_of(inode->i_cdev, struct f_cdev, fcdev_cdev); - if (!port) { - pr_err("Port is NULL.\n"); - return -EINVAL; - } - - if (port && port->port_open) { + get_device(&port->dev); + if (port->port_open) { pr_err("port is already opened.\n"); + put_device(&port->dev); return -EBUSY; } @@ -1119,6 +1138,7 @@ int f_cdev_open(struct inode *inode, struct file *file) port->is_connected); if (ret) { pr_debug("open interrupted.\n"); + put_device(&port->dev); return ret; } @@ -1138,16 +1158,12 @@ int f_cdev_release(struct inode *inode, struct file *file) struct f_cdev *port; port = file->private_data; - if (!port) { - pr_err("port is NULL.\n"); - return -EINVAL; - } - spin_lock_irqsave(&port->port_lock, flags); port->port_open = false; port->cbits_updated = false; spin_unlock_irqrestore(&port->port_lock, flags); pr_debug("port(%s)(%pK) is closed.\n", port->name, port); + put_device(&port->dev); return 0; } @@ -1240,6 +1256,8 @@ ssize_t f_cdev_read(struct file *file, current_rx_req = NULL; current_rx_buf = NULL; } + if (port->single_packet_mode) + break; } port->pending_rx_bytes = pending_rx_bytes; @@ -1714,11 +1732,17 @@ static void usb_cser_debugfs_exit(struct f_cdev *port) debugfs_remove_recursive(port->debugfs_root); } +static void cdev_device_release(struct device *dev) +{ + struct f_cdev *port = container_of(dev, struct f_cdev, dev); + + pr_debug("Free cdev port(%d)\n", port->port_num); + kfree(port); +} + static struct f_cdev *f_cdev_alloc(char *func_name, int portno) { int ret; - dev_t dev; - struct device *device; struct f_cdev *port; port = kzalloc(sizeof(struct f_cdev), GFP_KERNEL); @@ -1758,6 +1782,11 @@ static struct f_cdev *f_cdev_alloc(char *func_name, int portno) INIT_LIST_HEAD(&port->read_queued); INIT_LIST_HEAD(&port->write_pool); + /* Fill rx bridge parameters */ + port->rx_buf_size = BRIDGE_RX_BUF_SIZE; + port->rx_queue_size = BRIDGE_RX_QUEUE_SIZE; + port->single_packet_mode = 0; + port->fcdev_wq = create_singlethread_workqueue(port->name); if (!port->fcdev_wq) { pr_err("Unable to create workqueue fcdev_wq for port:%s\n", @@ -1768,27 +1797,24 @@ static struct f_cdev *f_cdev_alloc(char *func_name, int portno) /* create char device */ cdev_init(&port->fcdev_cdev, &f_cdev_fops); - dev = MKDEV(major, port->minor); - ret = cdev_add(&port->fcdev_cdev, dev, 1); + device_initialize(&port->dev); + port->dev.class = fcdev_classp; + port->dev.parent = NULL; + port->dev.release = cdev_device_release; + port->dev.devt = MKDEV(major, port->minor); + dev_set_name(&port->dev, port->name); + ret = cdev_device_add(&port->fcdev_cdev, &port->dev); if (ret) { pr_err("Failed to add cdev for port(%s)\n", port->name); goto err_cdev_add; } - device = device_create(fcdev_classp, NULL, dev, NULL, port->name); - if (IS_ERR(device)) { - ret = PTR_ERR(device); - goto err_create_dev; - } - usb_cser_debugfs_init(port); pr_info("port_name:%s (%pK) portno:(%d)\n", port->name, port, port->port_num); return port; -err_create_dev: - cdev_del(&port->fcdev_cdev); err_cdev_add: destroy_workqueue(port->fcdev_wq); err_get_ida: @@ -1927,9 +1953,64 @@ static ssize_t usb_cser_status_store(struct config_item *item, return len; } +#define CDEV_BUF_ATTRIBUTE(name) \ +static ssize_t usb_cser_##name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct f_cdev *port = to_f_cdev_opts(item)->port; \ + unsigned long flags; \ + int ret; \ + \ + spin_lock_irqsave(&port->port_lock, flags); \ + ret = scnprintf(page, PAGE_SIZE, "%u\n", \ + port->name); \ + spin_unlock_irqrestore(&port->port_lock, flags); \ + \ + return ret; \ +} \ + \ +static ssize_t usb_cser_##name##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + struct f_cdev_opts *opts = to_f_cdev_opts(item); \ + struct f_cdev *port = opts->port; \ + unsigned long flags; \ + int ret; \ + unsigned int val; \ + \ + spin_lock_irqsave(&port->port_lock, flags); \ + if (opts->refcnt) { \ + ret = -EBUSY; \ + goto end; \ + } \ + \ + ret = kstrtouint(page, 0, &val); \ + if (ret) \ + goto end; \ + \ + port->name = val; \ + ret = len; \ + \ +end: \ + spin_unlock_irqrestore(&port->port_lock, flags); \ + return ret; \ +} \ + \ + +CDEV_BUF_ATTRIBUTE(rx_buf_size); +CDEV_BUF_ATTRIBUTE(rx_queue_size); +CDEV_BUF_ATTRIBUTE(single_packet_mode); + +CONFIGFS_ATTR(usb_cser_, single_packet_mode); CONFIGFS_ATTR(usb_cser_, status); +CONFIGFS_ATTR(usb_cser_, rx_buf_size); +CONFIGFS_ATTR(usb_cser_, rx_queue_size); + static struct configfs_attribute *cserial_attrs[] = { + &usb_cser_attr_single_packet_mode, &usb_cser_attr_status, + &usb_cser_attr_rx_buf_size, + &usb_cser_attr_rx_queue_size, NULL, }; @@ -2040,6 +2121,11 @@ static struct usb_function *cser_alloc(struct usb_function_instance *fi) { struct f_cdev_opts *opts = to_fi_cdev_opts(fi); struct f_cdev *port = opts->port; + unsigned long flags; + + spin_lock_irqsave(&port->port_lock, flags); + opts->refcnt++; + spin_unlock_irqrestore(&port->port_lock, flags); port->port_usb.func.name = "cser"; port->port_usb.func.strings = usb_cser_strings; diff --git a/drivers/usb/gadget/function/f_diag.c b/drivers/usb/gadget/function/f_diag.c index f2eb6e4be3c5..c6366df9f627 100644 --- a/drivers/usb/gadget/function/f_diag.c +++ b/drivers/usb/gadget/function/f_diag.c @@ -462,6 +462,7 @@ int usb_diag_alloc_req(struct usb_diag_ch *ch, int n_write, int n_read) } EXPORT_SYMBOL(usb_diag_alloc_req); #define DWC3_MAX_REQUEST_SIZE (16 * 1024 * 1024) +#define CI_MAX_REQUEST_SIZE (16 * 1024) /** * usb_diag_request_size - Max request size for controller * @ch: Channel handler @@ -471,6 +472,16 @@ EXPORT_SYMBOL(usb_diag_alloc_req); */ int usb_diag_request_size(struct usb_diag_ch *ch) { + struct diag_context *ctxt = ch->priv_usb; + struct usb_composite_dev *cdev; + + if (!ctxt) + return 0; + + cdev = ctxt->cdev; + if (cdev->gadget->is_chipidea) + return CI_MAX_REQUEST_SIZE; + return DWC3_MAX_REQUEST_SIZE; } EXPORT_SYMBOL(usb_diag_request_size); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 511d46db6049..988bae22c1fd 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -265,7 +265,8 @@ static void ffs_closed(struct ffs_data *ffs); static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock) __attribute__((warn_unused_result, nonnull)); -static char *ffs_prepare_buffer(const char __user *buf, size_t len) +static char *ffs_prepare_buffer(const char __user *buf, size_t len, + size_t extra_buf_alloc) __attribute__((warn_unused_result, nonnull)); @@ -342,6 +343,7 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, size_t len, loff_t *ptr) { struct ffs_data *ffs = file->private_data; + struct usb_gadget *gadget = ffs->gadget; ssize_t ret; char *data; @@ -369,7 +371,7 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, break; } - data = ffs_prepare_buffer(buf, len); + data = ffs_prepare_buffer(buf, len, 0); if (IS_ERR(data)) { ret = PTR_ERR(data); break; @@ -441,7 +443,7 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, spin_unlock_irq(&ffs->ev.waitq.lock); - data = ffs_prepare_buffer(buf, len); + data = ffs_prepare_buffer(buf, len, gadget->extra_buf_alloc); if (IS_ERR(data)) { ret = PTR_ERR(data); break; @@ -938,19 +940,21 @@ static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile, static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) { struct ffs_epfile *epfile = file->private_data; - struct ffs_data *ffs = epfile->ffs; + struct ffs_data *ffs; struct usb_request *req; struct ffs_ep *ep; char *data = NULL; ssize_t ret, data_len = -EINVAL; int halt; - - ffs_log("enter: %s", epfile->name); + size_t extra_buf_alloc = 0; /* Are we still active? */ if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) return -ENODEV; + ffs = epfile->ffs; + ffs_log("enter: %s", epfile->name); + /* Wait for endpoint to be enabled */ ep = epfile->ep; if (!ep) { @@ -1022,7 +1026,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) data_len = usb_ep_align_maybe(gadget, ep->ep, data_len); spin_unlock_irq(&epfile->ffs->eps_lock); - data = kmalloc(data_len, GFP_KERNEL); + extra_buf_alloc = gadget->extra_buf_alloc; + if (!io_data->read) + data = kmalloc(data_len + extra_buf_alloc, + GFP_KERNEL); + else + data = kmalloc(data_len, GFP_KERNEL); if (unlikely(!data)) { ret = -ENOMEM; goto error_mutex; @@ -1675,8 +1684,13 @@ ffs_fs_mount(struct file_system_type *t, int flags, return ERR_PTR(ret); ffs = ffs_data_new(dev_name); - if (unlikely(!ffs)) - return ERR_PTR(-ENOMEM); + if (IS_ERR_OR_NULL(ffs)) { + if (!ffs) + return ERR_PTR(-ENOMEM); + else + return ERR_PTR((long) ffs); + } + ffs->file_perms = data.perms; ffs->no_disconnect = data.no_disconnect; @@ -3767,8 +3781,10 @@ static void ffs_func_unbind(struct usb_configuration *c, ffs->func = NULL; } - if (!--opts->refcnt) + if (!--opts->refcnt) { + ffs_event_add(ffs, FUNCTIONFS_UNBIND); functionfs_unbind(ffs); + } /* cleanup after autoconfig */ spin_lock_irqsave(&func->ffs->eps_lock, flags); @@ -3791,10 +3807,12 @@ static void ffs_func_unbind(struct usb_configuration *c, func->function.ssp_descriptors = NULL; func->interfaces_nums = NULL; - ffs_event_add(ffs, FUNCTIONFS_UNBIND); + if (opts->refcnt) { + ffs_event_add(ffs, FUNCTIONFS_UNBIND); - ffs_log("exit: state %d setup_state %d flag %lu", ffs->state, - ffs->setup_state, ffs->flags); + ffs_log("exit: state %d setup_state %d flag %lu", ffs->state, + ffs->setup_state, ffs->flags); + } } static struct usb_function *ffs_alloc(struct usb_function_instance *fi) @@ -4041,14 +4059,23 @@ static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock) : mutex_lock_interruptible(mutex); } -static char *ffs_prepare_buffer(const char __user *buf, size_t len) +/** + * ffs_prepare_buffer() - copy userspace buffer into kernel. + * @buf: userspace buffer + * @len: length of the buffer + * @extra_alloc_buf: Extra buffer allocation if required by UDC. + * + * This function returns pointer to the copied buffer + */ +static char *ffs_prepare_buffer(const char __user *buf, size_t len, + size_t extra_buf_alloc) { char *data; if (unlikely(!len)) return NULL; - data = kmalloc(len, GFP_KERNEL); + data = kmalloc(len + extra_buf_alloc, GFP_KERNEL); if (unlikely(!data)) return ERR_PTR(-ENOMEM); diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c index 80ff811b6b1a..f78fa6d63f15 100644 --- a/drivers/usb/gadget/function/f_mtp.c +++ b/drivers/usb/gadget/function/f_mtp.c @@ -354,20 +354,6 @@ struct mtp_ext_config_desc { struct mtp_ext_config_desc_function function; }; -static struct mtp_ext_config_desc mtp_ext_config_desc = { - .header = { - .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)), - .bcdVersion = __constant_cpu_to_le16(0x0100), - .wIndex = __constant_cpu_to_le16(4), - .bCount = 1, - }, - .function = { - .bFirstInterfaceNumber = 0, - .bInterfaceCount = 1, - .compatibleID = { 'M', 'T', 'P' }, - }, -}; - struct mtp_device_status { __le16 wLength; __le16 wCode; @@ -1328,20 +1314,7 @@ static int mtp_ctrlrequest(struct usb_composite_dev *cdev, mtp_log("vendor request: %d index: %d value: %d length: %d\n", ctrl->bRequest, w_index, w_value, w_length); - if (ctrl->bRequest == 1 - && (ctrl->bRequestType & USB_DIR_IN) - && (w_index == 4 || w_index == 5)) { - value = (w_length < sizeof(mtp_ext_config_desc) ? - w_length : sizeof(mtp_ext_config_desc)); - memcpy(cdev->req->buf, &mtp_ext_config_desc, value); - - /* update compatibleID if PTP */ - if (dev->function.fs_descriptors == fs_ptp_descs) { - struct mtp_ext_config_desc *d = cdev->req->buf; - - d->function.compatibleID[0] = 'P'; - } - } + value = -EOPNOTSUPP; } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) { mtp_log("class request: %d index: %d value: %d length: %d\n", ctrl->bRequest, w_index, w_value, w_length); @@ -1411,6 +1384,12 @@ mtp_function_bind(struct usb_configuration *c, struct usb_function *f) dev->cdev = cdev; mtp_log("dev: %pK\n", dev); + /* ChipIdea controller supports 16K request length for IN endpoint */ + if (cdev->gadget->is_chipidea && mtp_tx_req_len > 16384) { + mtp_log("Truncating Tx Req length to 16K for ChipIdea\n"); + mtp_tx_req_len = 16384; + } + /* allocate interface ID(s) */ id = usb_interface_id(c, f); if (id < 0) @@ -1814,6 +1793,10 @@ struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config) return ERR_PTR(-ENOMEM); fi_mtp->func_inst.set_inst_name = mtp_set_inst_name; fi_mtp->func_inst.free_func_inst = mtp_free_inst; + if (mtp_config) + memcpy(fi_mtp->mtp_ext_compat_id, "MTP", 3); + else + memcpy(fi_mtp->mtp_ext_compat_id, "PTP", 3); fi_mtp->mtp_os_desc.ext_compat_id = fi_mtp->mtp_ext_compat_id; INIT_LIST_HEAD(&fi_mtp->mtp_os_desc.ext_prop); diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c index 2a4f3d411f07..98477306fda7 100644 --- a/drivers/usb/gadget/function/f_qdss.c +++ b/drivers/usb/gadget/function/f_qdss.c @@ -2,7 +2,7 @@ /* * f_qdss.c -- QDSS function Driver * - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. */ #include @@ -909,15 +909,20 @@ void usb_qdss_close(struct usb_qdss_ch *ch) if (!qdss) goto close; qdss->qdss_close = true; + spin_lock(&qdss->lock); while (!list_empty(&qdss->queued_data_pool)) { qreq = list_first_entry(&qdss->queued_data_pool, struct qdss_req, list); + spin_unlock(&qdss->lock); spin_unlock_irqrestore(&channel_lock, flags); - usb_ep_dequeue(qdss->port.data, qreq->usb_req); - wait_for_completion(&qreq->write_done); + qdss_log("dequeue req:%pK\n", qreq->usb_req); + if (!usb_ep_dequeue(qdss->port.data, qreq->usb_req)) + wait_for_completion(&qreq->write_done); spin_lock_irqsave(&channel_lock, flags); + spin_lock(&qdss->lock); } + spin_unlock(&qdss->lock); spin_unlock_irqrestore(&channel_lock, flags); usb_qdss_free_req(ch); spin_lock_irqsave(&channel_lock, flags); @@ -933,6 +938,8 @@ void usb_qdss_close(struct usb_qdss_ch *ch) if (qdss->endless_req) { spin_unlock_irqrestore(&channel_lock, flags); + /* Flush connect work before proceeding with de-queue */ + flush_work(&qdss->connect_w); usb_ep_dequeue(qdss->port.data, qdss->endless_req); spin_lock_irqsave(&channel_lock, flags); } diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c index d5373bc70324..b7b06316ee27 100644 --- a/drivers/usb/gadget/function/f_rndis.c +++ b/drivers/usb/gadget/function/f_rndis.c @@ -742,6 +742,27 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) rndis_data_intf.bInterfaceNumber = status; rndis_union_desc.bSlaveInterface0 = status; + if (rndis_opts->wceis) { + /* "Wireless" RNDIS; auto-detected by Windows */ + rndis_iad_descriptor.bFunctionClass = + USB_CLASS_WIRELESS_CONTROLLER; + rndis_iad_descriptor.bFunctionSubClass = 0x01; + rndis_iad_descriptor.bFunctionProtocol = 0x03; + rndis_control_intf.bInterfaceClass = + USB_CLASS_WIRELESS_CONTROLLER; + rndis_control_intf.bInterfaceSubClass = 0x01; + rndis_control_intf.bInterfaceProtocol = 0x03; + } else { + rndis_iad_descriptor.bFunctionClass = USB_CLASS_COMM; + rndis_iad_descriptor.bFunctionSubClass = + USB_CDC_SUBCLASS_ETHERNET; + rndis_iad_descriptor.bFunctionProtocol = USB_CDC_PROTO_NONE; + rndis_control_intf.bInterfaceClass = USB_CLASS_COMM; + rndis_control_intf.bInterfaceSubClass = USB_CDC_SUBCLASS_ACM; + rndis_control_intf.bInterfaceProtocol = + USB_CDC_ACM_PROTO_VENDOR; + } + status = -ENODEV; /* allocate instance-specific endpoints */ @@ -887,6 +908,8 @@ USB_ETHER_CONFIGFS_ITEM_ATTR_U8_RW(rndis, protocol); /* f_rndis_opts_ul_max_pkt_per_xfer */ USB_ETHER_CONFIGFS_ITEM_ATTR_UL_MAX_PKT_PER_XFER(rndis); +/* f_rndis_opts_wceis */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_WCEIS(rndis); static struct configfs_attribute *rndis_attrs[] = { &rndis_opts_attr_dev_addr, @@ -897,6 +920,7 @@ static struct configfs_attribute *rndis_attrs[] = { &rndis_opts_attr_subclass, &rndis_opts_attr_protocol, &rndis_opts_attr_ul_max_pkt_per_xfer, + &rndis_opts_attr_wceis, NULL, }; @@ -961,6 +985,9 @@ static struct usb_function_instance *rndis_alloc_inst(void) } opts->rndis_interf_group = rndis_interf_group; + /* Enable "Wireless" RNDIS by default */ + opts->wceis = true; + return &opts->func_inst; } diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index 7661009c189c..0c739142d5d2 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c @@ -821,9 +821,9 @@ static struct usb_function_instance *uvc_alloc_inst(void) cd->wObjectiveFocalLengthMax = cpu_to_le16(0); cd->wOcularFocalLength = cpu_to_le16(0); cd->bControlSize = 3; - cd->bmControls[0] = 2; - cd->bmControls[1] = 0; - cd->bmControls[2] = 0; + cd->bmControls[0] = 62; + cd->bmControls[1] = 126; + cd->bmControls[2] = 10; pd = &opts->uvc_processing; pd->bLength = UVC_DT_PROCESSING_UNIT_SIZE(3); @@ -833,9 +833,9 @@ static struct usb_function_instance *uvc_alloc_inst(void) pd->bSourceID = 1; pd->wMaxMultiplier = cpu_to_le16(16*1024); pd->bControlSize = 3; - pd->bmControls[0] = 64; - pd->bmControls[1] = 16; - pd->bmControls[2] = 1; + pd->bmControls[0] = 91; + pd->bmControls[1] = 23; + pd->bmControls[2] = 4; pd->iProcessing = 0; pd->bmVideoStandards = 0; diff --git a/drivers/usb/gadget/function/u_bam_dmux.c b/drivers/usb/gadget/function/u_bam_dmux.c new file mode 100644 index 000000000000..9eab3ce83150 --- /dev/null +++ b/drivers/usb/gadget/function/u_bam_dmux.c @@ -0,0 +1,1723 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2011-2018, 2020-2021, Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include "u_rmnet.h" + +static struct workqueue_struct *gbam_wq; +static unsigned int n_tx_req_queued; + +static unsigned int bam_ch_ids[BAM_DMUX_NUM_FUNCS] = { + BAM_DMUX_USB_RMNET_0, + BAM_DMUX_USB_DPL +}; + +static char bam_ch_names[BAM_DMUX_NUM_FUNCS][BAM_DMUX_CH_NAME_MAX_LEN]; + +#define BAM_PENDING_PKTS_LIMIT 220 +#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000 +#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500 +#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300 +#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1 + +#define BAM_MUX_HDR 8 + +#define BAM_MUX_RX_Q_SIZE 128 +#define BAM_MUX_TX_Q_SIZE 200 +#define BAM_MUX_RX_REQ_SIZE 2048 /* Must be 1KB aligned */ + +#define DL_INTR_THRESHOLD 20 +#define BAM_PENDING_BYTES_LIMIT (50 * BAM_MUX_RX_REQ_SIZE) +#define BAM_PENDING_BYTES_FCTRL_EN_TSHOLD (BAM_PENDING_BYTES_LIMIT / 3) + +/* Extra buffer size to allocate for tx */ +#define EXTRA_ALLOCATION_SIZE_U_BAM 128 + +static unsigned int bam_pending_pkts_limit = BAM_PENDING_PKTS_LIMIT; +static ssize_t bam_pending_pkts_limit_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", bam_pending_pkts_limit); +} + +static ssize_t bam_pending_pkts_limit_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int val; + + if (kstrtos32(buf, 0, &val)) + return -EINVAL; + + bam_pending_pkts_limit = val; + + return count; + +} +static DEVICE_ATTR_RW(bam_pending_pkts_limit); + +static unsigned int bam_pending_bytes_limit = BAM_PENDING_BYTES_LIMIT; +static ssize_t bam_pending_bytes_limit_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", bam_pending_bytes_limit); +} + +static ssize_t bam_pending_bytes_limit_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int val; + + if (kstrtos32(buf, 0, &val)) + return -EINVAL; + + bam_pending_bytes_limit = val; + + return count; + +} +static DEVICE_ATTR_RW(bam_pending_bytes_limit); + +static unsigned int bam_pending_bytes_fctrl_en_thold = + BAM_PENDING_BYTES_FCTRL_EN_TSHOLD; +static ssize_t bam_pending_bytes_fctrl_en_thold_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", + bam_pending_bytes_fctrl_en_thold); +} + +static ssize_t bam_pending_bytes_fctrl_en_thold_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int val; + + if (kstrtos32(buf, 0, &val)) + return -EINVAL; + + bam_pending_bytes_fctrl_en_thold = val; + + return count; + +} +static DEVICE_ATTR_RW(bam_pending_bytes_fctrl_en_thold); + +static unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD; +static ssize_t bam_mux_tx_pkt_drop_thld_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", bam_mux_tx_pkt_drop_thld); +} + +static ssize_t bam_mux_tx_pkt_drop_thld_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int val; + + if (kstrtos32(buf, 0, &val)) + return -EINVAL; + + bam_mux_tx_pkt_drop_thld = val; + + return count; + +} +static DEVICE_ATTR_RW(bam_mux_tx_pkt_drop_thld); + +static unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD; +static ssize_t bam_mux_rx_fctrl_en_thld_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", bam_mux_rx_fctrl_en_thld); +} + +static ssize_t bam_mux_rx_fctrl_en_thld_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int val; + + if (kstrtos32(buf, 0, &val)) + return -EINVAL; + + bam_mux_rx_fctrl_en_thld = val; + + return count; + +} +static DEVICE_ATTR_RW(bam_mux_rx_fctrl_en_thld); + +static unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT; +static ssize_t bam_mux_rx_fctrl_support_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", bam_mux_rx_fctrl_support); +} + +static ssize_t bam_mux_rx_fctrl_support_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int val; + + if (kstrtos32(buf, 0, &val)) + return -EINVAL; + + bam_mux_rx_fctrl_support = val; + + return count; + +} +static DEVICE_ATTR_RW(bam_mux_rx_fctrl_support); + +static unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD; +static ssize_t bam_mux_rx_fctrl_dis_thld_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", bam_mux_rx_fctrl_dis_thld); +} + +static ssize_t bam_mux_rx_fctrl_dis_thld_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int val; + + if (kstrtos32(buf, 0, &val)) + return -EINVAL; + + bam_mux_rx_fctrl_dis_thld = val; + + return count; + +} +static DEVICE_ATTR_RW(bam_mux_rx_fctrl_dis_thld); + +static unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE; +static ssize_t bam_mux_tx_q_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", bam_mux_tx_q_size); +} + +static ssize_t bam_mux_tx_q_size_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int val; + + if (kstrtos32(buf, 0, &val)) + return -EINVAL; + + bam_mux_tx_q_size = val; + + return count; + +} +static DEVICE_ATTR_RW(bam_mux_tx_q_size); + +static unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE; +static ssize_t bam_mux_rx_q_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", bam_mux_rx_q_size); +} + +static ssize_t bam_mux_rx_q_size_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int val; + + if (kstrtos32(buf, 0, &val)) + return -EINVAL; + + bam_mux_rx_q_size = val; + + return count; + +} +static DEVICE_ATTR_RW(bam_mux_rx_q_size); + +static unsigned long bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE; +static ssize_t bam_mux_rx_req_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", bam_mux_rx_req_size); +} + +static ssize_t bam_mux_rx_req_size_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int val; + + if (kstrtos32(buf, 0, &val)) + return -EINVAL; + + bam_mux_rx_req_size = val; + + return count; + +} +static DEVICE_ATTR_RW(bam_mux_rx_req_size); + +static unsigned int dl_intr_threshold = DL_INTR_THRESHOLD; +static ssize_t dl_intr_threshold_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", dl_intr_threshold); +} + +static ssize_t dl_intr_threshold_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int val; + + if (kstrtos32(buf, 0, &val)) + return -EINVAL; + + dl_intr_threshold = val; + + return count; + +} +static DEVICE_ATTR_RW(dl_intr_threshold); + +#define BAM_CH_OPENED BIT(0) +#define BAM_CH_READY BIT(1) +#define BAM_CH_WRITE_INPROGRESS BIT(2) + +enum u_bam_event_type { + U_BAM_DISCONNECT_E = 0, + U_BAM_CONNECT_E, +}; + +struct bam_ch_info { + unsigned long flags; + unsigned int id; + + struct list_head tx_idle; + struct sk_buff_head tx_skb_q; + + struct list_head rx_idle; + struct sk_buff_head rx_skb_q; + struct sk_buff_head rx_skb_idle; + + struct gbam_port *port; + struct work_struct write_tobam_w; + struct work_struct write_tohost_w; + + /* stats */ + unsigned int pending_pkts_with_bam; + unsigned int pending_bytes_with_bam; + unsigned int tohost_drp_cnt; + unsigned int tomodem_drp_cnt; + unsigned int tx_len; + unsigned int rx_len; + unsigned long to_modem; + unsigned long to_host; + unsigned int rx_flow_control_disable; + unsigned int rx_flow_control_enable; + unsigned int rx_flow_control_triggered; + unsigned int max_num_pkts_pending_with_bam; + unsigned int max_bytes_pending_with_bam; + unsigned int delayed_bam_mux_write_done; + unsigned long skb_expand_cnt; +}; + +struct gbam_port { + enum u_bam_event_type last_event; + unsigned int port_num; + spinlock_t port_lock_ul; + spinlock_t port_lock_dl; + spinlock_t port_lock; + + struct data_port *port_usb; + struct usb_gadget *gadget; + + struct bam_ch_info data_ch; + + struct work_struct connect_w; + struct work_struct disconnect_w; +}; + +static struct bam_portmaster { + struct gbam_port *port; + struct platform_driver pdrv; +} bam_ports[BAM_DMUX_NUM_FUNCS]; + +static void gbam_start_rx(struct gbam_port *port); +static void gbam_notify(void *p, int event, unsigned long data); +static void gbam_data_write_tobam(struct work_struct *w); + +/*---------------misc functions---------------- */ +static void gbam_free_requests(struct usb_ep *ep, struct list_head *head) +{ + struct usb_request *req; + + while (!list_empty(head)) { + req = list_entry(head->next, struct usb_request, list); + list_del(&req->list); + usb_ep_free_request(ep, req); + } +} + +static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head, + int num, + void (*cb)(struct usb_ep *ep, struct usb_request *), + gfp_t flags) +{ + int i; + struct usb_request *req; + + pr_debug("%s: ep:%pK head:%pK num:%d cb:%pK\n", __func__, + ep, head, num, cb); + + for (i = 0; i < num; i++) { + req = usb_ep_alloc_request(ep, flags); + if (!req) { + pr_debug("%s: req allocated:%d\n", __func__, i); + return list_empty(head) ? -ENOMEM : 0; + } + req->complete = cb; + list_add(&req->list, head); + } + + return 0; +} + +static inline dma_addr_t gbam_get_dma_from_skb(struct sk_buff *skb) +{ + return *((dma_addr_t *)(skb->cb)); +} + +/* This function should be called with port_lock_ul lock held */ +static struct sk_buff *gbam_alloc_skb_from_pool(struct gbam_port *port) +{ + struct bam_ch_info *d; + struct sk_buff *skb; + dma_addr_t skb_buf_dma_addr; + + if (!port) + return NULL; + + d = &port->data_ch; + if (!d) + return NULL; + + if (d->rx_skb_idle.qlen == 0) { + /* + * In case skb idle pool is empty, we allow to allocate more + * skbs so we dynamically enlarge the pool size when needed. + * Therefore, in steady state this dynamic allocation will + * stop when the pool will arrive to its optimal size. + */ + pr_debug("%s: allocate skb\n", __func__); + skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC); + + if (!skb) + goto alloc_exit; + + skb_reserve(skb, BAM_MUX_HDR); + skb_buf_dma_addr = DMA_ERROR_CODE; + + memcpy(skb->cb, &skb_buf_dma_addr, + sizeof(skb_buf_dma_addr)); + + } else { + pr_debug("%s: pull skb from pool\n", __func__); + skb = __skb_dequeue(&d->rx_skb_idle); + if (!skb) + goto alloc_exit; + + if (skb_headroom(skb) < BAM_MUX_HDR) + skb_reserve(skb, BAM_MUX_HDR); + } + +alloc_exit: + return skb; +} + +/* This function should be called with port_lock_ul lock held */ +static void gbam_free_skb_to_pool(struct gbam_port *port, struct sk_buff *skb) +{ + struct bam_ch_info *d; + + if (!port) + return; + d = &port->data_ch; + + skb->len = 0; + skb_reset_tail_pointer(skb); + __skb_queue_tail(&d->rx_skb_idle, skb); +} + +static void gbam_free_rx_skb_idle_list(struct gbam_port *port) +{ + struct bam_ch_info *d; + struct sk_buff *skb; + dma_addr_t dma_addr; + struct usb_gadget *gadget = NULL; + + if (!port) + return; + d = &port->data_ch; + + gadget = port->port_usb->cdev->gadget; + + while (d->rx_skb_idle.qlen > 0) { + skb = __skb_dequeue(&d->rx_skb_idle); + if (!skb) + break; + + dma_addr = gbam_get_dma_from_skb(skb); + + if (gadget && dma_addr != DMA_ERROR_CODE) { + dma_unmap_single(&gadget->dev, dma_addr, + bam_mux_rx_req_size, DMA_BIDIRECTIONAL); + + dma_addr = DMA_ERROR_CODE; + memcpy(skb->cb, &dma_addr, + sizeof(dma_addr)); + } + dev_kfree_skb_any(skb); + } +} + +/*--------------------------------------------- */ + +/*------------data_path----------------------------*/ +static void gbam_write_data_tohost(struct gbam_port *port) +{ + unsigned long flags; + struct bam_ch_info *d = &port->data_ch; + struct sk_buff *skb; + struct sk_buff *new_skb; + int ret; + int tail_room = 0; + int extra_alloc = 0; + struct usb_request *req; + struct usb_ep *ep; + + spin_lock_irqsave(&port->port_lock_dl, flags); + if (!port->port_usb) { + spin_unlock_irqrestore(&port->port_lock_dl, flags); + return; + } + + ep = port->port_usb->in; + + while (!list_empty(&d->tx_idle)) { + skb = __skb_dequeue(&d->tx_skb_q); + if (!skb) + break; + + /* + * Some UDC requires allocation of some extra bytes for + * TX buffer due to hardware requirement. Check if extra + * bytes are already there, otherwise allocate new buffer + * with extra bytes and do memcpy. + */ + if (port->gadget->extra_buf_alloc) + extra_alloc = EXTRA_ALLOCATION_SIZE_U_BAM; + tail_room = skb_tailroom(skb); + if (tail_room < extra_alloc) { + pr_debug("%s: tail_room %d less than %d\n", __func__, + tail_room, extra_alloc); + new_skb = skb_copy_expand(skb, 0, extra_alloc - + tail_room, GFP_ATOMIC); + if (!new_skb) { + pr_err("skb_copy_expand failed\n"); + break; + } + dev_kfree_skb_any(skb); + skb = new_skb; + d->skb_expand_cnt++; + } + + req = list_first_entry(&d->tx_idle, + struct usb_request, + list); + req->context = skb; + req->buf = skb->data; + req->length = skb->len; + n_tx_req_queued++; + if (n_tx_req_queued == dl_intr_threshold) { + req->no_interrupt = 0; + n_tx_req_queued = 0; + } else { + req->no_interrupt = 1; + } + + /* Send ZLP in case packet length is multiple of maxpacksize */ + req->zero = 1; + + list_del(&req->list); + + spin_unlock(&port->port_lock_dl); + ret = usb_ep_queue(ep, req, GFP_ATOMIC); + spin_lock(&port->port_lock_dl); + if (ret) { + pr_err_ratelimited("%s: usb epIn failed with %d\n", + __func__, ret); + list_add(&req->list, &d->tx_idle); + dev_kfree_skb_any(skb); + break; + } + d->to_host++; + } + spin_unlock_irqrestore(&port->port_lock_dl, flags); +} + +static void gbam_write_data_tohost_w(struct work_struct *w) +{ + struct bam_ch_info *d; + struct gbam_port *port; + + d = container_of(w, struct bam_ch_info, write_tohost_w); + port = d->port; + + gbam_write_data_tohost(port); +} + +static void gbam_data_recv_cb(void *p, struct sk_buff *skb) +{ + struct gbam_port *port = p; + struct bam_ch_info *d = &port->data_ch; + unsigned long flags; + + if (!skb) + return; + + pr_debug("%s: p:%pK#%d d:%pK skb_len:%d\n", __func__, + port, port->port_num, d, skb->len); + + spin_lock_irqsave(&port->port_lock_dl, flags); + if (!port->port_usb) { + spin_unlock_irqrestore(&port->port_lock_dl, flags); + dev_kfree_skb_any(skb); + return; + } + + if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) { + d->tohost_drp_cnt++; + printk_ratelimited(KERN_ERR "%s: tx pkt dropped: tx_drop_cnt:%u\n", + __func__, d->tohost_drp_cnt); + spin_unlock_irqrestore(&port->port_lock_dl, flags); + dev_kfree_skb_any(skb); + return; + } + + __skb_queue_tail(&d->tx_skb_q, skb); + spin_unlock_irqrestore(&port->port_lock_dl, flags); + + gbam_write_data_tohost(port); +} + +static void gbam_data_write_done(void *p, struct sk_buff *skb) +{ + struct gbam_port *port = p; + struct bam_ch_info *d = &port->data_ch; + unsigned long flags; + + if (!skb) + return; + + spin_lock_irqsave(&port->port_lock_ul, flags); + + d->pending_pkts_with_bam--; + d->pending_bytes_with_bam -= skb->len; + gbam_free_skb_to_pool(port, skb); + + pr_debug("%s:port:%pK d:%pK tom:%lu ppkt:%u pbytes:%u pno:%d\n", + __func__, port, d, d->to_modem, d->pending_pkts_with_bam, + d->pending_bytes_with_bam, port->port_num); + + spin_unlock_irqrestore(&port->port_lock_ul, flags); + + /* + * If BAM doesn't have much pending data then push new data from here: + * write_complete notify only to avoid any underruns due to wq latency + */ + if (d->pending_bytes_with_bam <= bam_pending_bytes_fctrl_en_thold) { + gbam_data_write_tobam(&d->write_tobam_w); + } else { + d->delayed_bam_mux_write_done++; + queue_work(gbam_wq, &d->write_tobam_w); + } +} + +/* This function should be called with port_lock_ul spinlock acquired */ +static bool gbam_ul_bam_limit_reached(struct bam_ch_info *data_ch) +{ + unsigned int curr_pending_pkts = data_ch->pending_pkts_with_bam; + unsigned int curr_pending_bytes = data_ch->pending_bytes_with_bam; + struct sk_buff *skb; + + if (curr_pending_pkts >= bam_pending_pkts_limit) + return true; + + /* check if next skb length doesn't exceed pending_bytes_limit */ + skb = skb_peek(&data_ch->rx_skb_q); + if (!skb) + return false; + + if ((curr_pending_bytes + skb->len) > bam_pending_bytes_limit) + return true; + else + return false; +} + +static void gbam_data_write_tobam(struct work_struct *w) +{ + struct gbam_port *port; + struct bam_ch_info *d; + struct sk_buff *skb; + unsigned long flags; + int ret; + int qlen; + + d = container_of(w, struct bam_ch_info, write_tobam_w); + port = d->port; + + spin_lock_irqsave(&port->port_lock_ul, flags); + if (!port->port_usb) { + spin_unlock_irqrestore(&port->port_lock_ul, flags); + return; + } + /* Bail out if already in progress */ + if (test_bit(BAM_CH_WRITE_INPROGRESS, &d->flags)) { + spin_unlock_irqrestore(&port->port_lock_ul, flags); + return; + } + + set_bit(BAM_CH_WRITE_INPROGRESS, &d->flags); + + while (!gbam_ul_bam_limit_reached(d)) { + skb = __skb_dequeue(&d->rx_skb_q); + if (!skb) + break; + + d->pending_pkts_with_bam++; + d->pending_bytes_with_bam += skb->len; + d->to_modem++; + + pr_debug("%s: port:%pK d:%pK tom:%lu ppkts:%u pbytes:%u pno:%d\n", + __func__, port, d, + d->to_modem, d->pending_pkts_with_bam, + d->pending_bytes_with_bam, port->port_num); + + spin_unlock_irqrestore(&port->port_lock_ul, flags); + ret = msm_bam_dmux_write(d->id, skb); + spin_lock_irqsave(&port->port_lock_ul, flags); + if (ret) { + pr_debug("%s: write error:%d\n", __func__, ret); + d->pending_pkts_with_bam--; + d->pending_bytes_with_bam -= skb->len; + d->to_modem--; + d->tomodem_drp_cnt++; + gbam_free_skb_to_pool(port, skb); + break; + } + if (d->pending_pkts_with_bam > d->max_num_pkts_pending_with_bam) + d->max_num_pkts_pending_with_bam = + d->pending_pkts_with_bam; + if (d->pending_bytes_with_bam > d->max_bytes_pending_with_bam) + d->max_bytes_pending_with_bam = + d->pending_bytes_with_bam; + } + + qlen = d->rx_skb_q.qlen; + + clear_bit(BAM_CH_WRITE_INPROGRESS, &d->flags); + spin_unlock_irqrestore(&port->port_lock_ul, flags); + + if (qlen < bam_mux_rx_fctrl_dis_thld) { + if (d->rx_flow_control_triggered) { + d->rx_flow_control_disable++; + d->rx_flow_control_triggered = 0; + } + gbam_start_rx(port); + } +} +/*-------------------------------------------------------------*/ + +static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct gbam_port *port = ep->driver_data; + struct bam_ch_info *d; + struct sk_buff *skb = req->context; + int status = req->status; + + switch (status) { + case 0: + /* successful completion */ + break; + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + dev_kfree_skb_any(skb); + usb_ep_free_request(ep, req); + return; + default: + pr_err("%s: data tx ep error %d\n", + __func__, status); + break; + } + + dev_kfree_skb_any(skb); + + if (!port) + return; + + spin_lock(&port->port_lock_dl); + d = &port->data_ch; + list_add_tail(&req->list, &d->tx_idle); + spin_unlock(&port->port_lock_dl); + + queue_work(gbam_wq, &d->write_tohost_w); +} + +static void +gbam_epout_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct gbam_port *port = ep->driver_data; + struct bam_ch_info *d = &port->data_ch; + struct sk_buff *skb = req->context; + int status = req->status; + int queue = 0; + + switch (status) { + case 0: + skb_put(skb, req->actual); + queue = 1; + break; + case -ECONNRESET: + case -ESHUTDOWN: + /* cable disconnection */ + spin_lock(&port->port_lock_ul); + gbam_free_skb_to_pool(port, skb); + spin_unlock(&port->port_lock_ul); + req->buf = NULL; + usb_ep_free_request(ep, req); + return; + default: + printk_ratelimited(KERN_ERR "%s: %s response error %d, %d/%d\n", + __func__, ep->name, status, req->actual, req->length); + spin_lock(&port->port_lock_ul); + gbam_free_skb_to_pool(port, skb); + spin_unlock(&port->port_lock_ul); + break; + } + + spin_lock(&port->port_lock_ul); + + if (queue) { + __skb_queue_tail(&d->rx_skb_q, skb); + queue_work(gbam_wq, &d->write_tobam_w); + } + + /* TODO: Handle flow control gracefully by having + * having call back mechanism from bam driver + */ + if (bam_mux_rx_fctrl_support && + d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) { + if (!d->rx_flow_control_triggered) { + d->rx_flow_control_triggered = 1; + d->rx_flow_control_enable++; + } + list_add_tail(&req->list, &d->rx_idle); + spin_unlock(&port->port_lock_ul); + return; + } + + skb = gbam_alloc_skb_from_pool(port); + if (!skb) { + list_add_tail(&req->list, &d->rx_idle); + spin_unlock(&port->port_lock_ul); + return; + } + spin_unlock(&port->port_lock_ul); + + req->buf = skb->data; + req->dma = gbam_get_dma_from_skb(skb); + req->length = bam_mux_rx_req_size; + + req->context = skb; + + status = usb_ep_queue(ep, req, GFP_ATOMIC); + if (status) { + spin_lock(&port->port_lock_ul); + gbam_free_skb_to_pool(port, skb); + spin_unlock(&port->port_lock_ul); + + printk_ratelimited(KERN_ERR "%s: data rx enqueue err %d\n", + __func__, status); + + spin_lock(&port->port_lock_ul); + list_add_tail(&req->list, &d->rx_idle); + spin_unlock(&port->port_lock_ul); + } +} + +static void gbam_start_rx(struct gbam_port *port) +{ + struct usb_request *req; + struct bam_ch_info *d; + struct usb_ep *ep; + unsigned long flags; + int ret; + struct sk_buff *skb; + + spin_lock_irqsave(&port->port_lock_ul, flags); + if (!port->port_usb || !port->port_usb->out) { + spin_unlock_irqrestore(&port->port_lock_ul, flags); + return; + } + + d = &port->data_ch; + ep = port->port_usb->out; + + while (port->port_usb && !list_empty(&d->rx_idle)) { + + if (bam_mux_rx_fctrl_support && + d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) + break; + + req = list_first_entry(&d->rx_idle, struct usb_request, list); + + skb = gbam_alloc_skb_from_pool(port); + if (!skb) + break; + + list_del(&req->list); + req->buf = skb->data; + req->dma = gbam_get_dma_from_skb(skb); + req->length = bam_mux_rx_req_size; + + req->context = skb; + + spin_unlock_irqrestore(&port->port_lock_ul, flags); + ret = usb_ep_queue(ep, req, GFP_ATOMIC); + spin_lock_irqsave(&port->port_lock_ul, flags); + if (ret) { + gbam_free_skb_to_pool(port, skb); + + printk_ratelimited(KERN_ERR "%s: rx queue failed %d\n", + __func__, ret); + + if (port->port_usb) + list_add(&req->list, &d->rx_idle); + else + usb_ep_free_request(ep, req); + break; + } + } + + spin_unlock_irqrestore(&port->port_lock_ul, flags); +} + +static int _gbam_start_io(struct gbam_port *port, bool in) +{ + unsigned long flags; + int ret = 0; + struct usb_ep *ep; + struct list_head *idle; + unsigned int queue_size; + spinlock_t *spinlock; + void (*ep_complete)(struct usb_ep *ep, + struct usb_request *req); + + if (in) + spinlock = &port->port_lock_dl; + else + spinlock = &port->port_lock_ul; + + spin_lock_irqsave(spinlock, flags); + if (!port->port_usb) { + spin_unlock_irqrestore(spinlock, flags); + return -EBUSY; + } + + if (in) { + ep = port->port_usb->in; + idle = &port->data_ch.tx_idle; + queue_size = bam_mux_tx_q_size; + ep_complete = gbam_epin_complete; + } else { + ep = port->port_usb->out; + if (!ep) + goto out; + idle = &port->data_ch.rx_idle; + queue_size = bam_mux_rx_q_size; + ep_complete = gbam_epout_complete; + } + + ret = gbam_alloc_requests(ep, idle, queue_size, ep_complete, + GFP_ATOMIC); +out: + spin_unlock_irqrestore(spinlock, flags); + if (ret) + pr_err("%s: allocation failed\n", __func__); + + return ret; +} + +static void gbam_start_io(struct gbam_port *port) +{ + unsigned long flags; + + pr_debug("%s: port:%pK\n", __func__, port); + + if (_gbam_start_io(port, true)) + return; + + if (_gbam_start_io(port, false)) { + spin_lock_irqsave(&port->port_lock_dl, flags); + if (port->port_usb) + gbam_free_requests(port->port_usb->in, + &port->data_ch.tx_idle); + spin_unlock_irqrestore(&port->port_lock_dl, flags); + return; + } + + /* queue out requests */ + gbam_start_rx(port); +} + +static void gbam_notify(void *p, int event, unsigned long data) +{ + struct gbam_port *port = p; + struct bam_ch_info *d; + struct sk_buff *skb; + + if (port == NULL) + pr_err("BAM DMUX notifying after channel close\n"); + + switch (event) { + case BAM_DMUX_RECEIVE: + skb = (struct sk_buff *)data; + if (port) + gbam_data_recv_cb(p, skb); + else + dev_kfree_skb_any(skb); + break; + case BAM_DMUX_WRITE_DONE: + skb = (struct sk_buff *)data; + if (port) + gbam_data_write_done(p, skb); + else + dev_kfree_skb_any(skb); + break; + case BAM_DMUX_TRANSMIT_SIZE: + d = &port->data_ch; + if (test_bit(BAM_CH_OPENED, &d->flags)) + pr_warn("%s, BAM channel opened already\n", __func__); + bam_mux_rx_req_size = data; + pr_debug("%s rx_req_size: %lu\n", __func__, + bam_mux_rx_req_size); + break; + } +} + +static void gbam_free_rx_buffers(struct gbam_port *port) +{ + struct sk_buff *skb; + unsigned long flags; + struct bam_ch_info *d; + + spin_lock_irqsave(&port->port_lock_ul, flags); + + if (!port->port_usb || !port->port_usb->out) + goto free_rx_buf_out; + + d = &port->data_ch; + gbam_free_requests(port->port_usb->out, &d->rx_idle); + + while ((skb = __skb_dequeue(&d->rx_skb_q))) + dev_kfree_skb_any(skb); + + gbam_free_rx_skb_idle_list(port); + +free_rx_buf_out: + spin_unlock_irqrestore(&port->port_lock_ul, flags); +} + +static void gbam_free_tx_buffers(struct gbam_port *port) +{ + struct sk_buff *skb; + unsigned long flags; + struct bam_ch_info *d; + + spin_lock_irqsave(&port->port_lock_dl, flags); + + if (!port->port_usb) + goto free_tx_buf_out; + + d = &port->data_ch; + gbam_free_requests(port->port_usb->in, &d->tx_idle); + + while ((skb = __skb_dequeue(&d->tx_skb_q))) + dev_kfree_skb_any(skb); + +free_tx_buf_out: + spin_unlock_irqrestore(&port->port_lock_dl, flags); +} + +static void gbam_free_buffers(struct gbam_port *port) +{ + gbam_free_rx_buffers(port); + gbam_free_tx_buffers(port); +} + +static void gbam_disconnect_work(struct work_struct *w) +{ + struct gbam_port *port = + container_of(w, struct gbam_port, disconnect_w); + struct bam_ch_info *d = &port->data_ch; + + if (!test_bit(BAM_CH_OPENED, &d->flags)) { + pr_err("%s: Bam channel is not opened\n", __func__); + goto exit; + } + + msm_bam_dmux_close(d->id); + clear_bit(BAM_CH_OPENED, &d->flags); +exit: + return; +} + +static void gbam_connect_work(struct work_struct *w) +{ + struct gbam_port *port = container_of(w, struct gbam_port, connect_w); + struct bam_ch_info *d = &port->data_ch; + int ret; + unsigned long flags; + + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); + if (!port->port_usb) { + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); + return; + } + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); + + if (!test_bit(BAM_CH_READY, &d->flags)) { + pr_err("%s: Bam channel is not ready\n", __func__); + return; + } + + ret = msm_bam_dmux_open(d->id, port, gbam_notify); + if (ret) { + pr_err("%s: unable open bam ch:%d err:%d\n", + __func__, d->id, ret); + return; + } + + set_bit(BAM_CH_OPENED, &d->flags); + + gbam_start_io(port); + + pr_debug("%s: done\n", __func__); +} + +static int gbam_sys_init(struct device *dev) +{ + int ret = 0; + + if (!dev) + return -EINVAL; + + device_create_file(dev, &dev_attr_bam_pending_pkts_limit); + device_create_file(dev, &dev_attr_bam_pending_bytes_limit); + device_create_file(dev, &dev_attr_bam_pending_bytes_fctrl_en_thold); + device_create_file(dev, &dev_attr_bam_mux_tx_pkt_drop_thld); + device_create_file(dev, &dev_attr_bam_mux_rx_fctrl_en_thld); + device_create_file(dev, &dev_attr_bam_mux_rx_fctrl_support); + device_create_file(dev, &dev_attr_bam_mux_rx_fctrl_dis_thld); + device_create_file(dev, &dev_attr_bam_mux_tx_q_size); + device_create_file(dev, &dev_attr_bam_mux_rx_q_size); + device_create_file(dev, &dev_attr_bam_mux_rx_req_size); + device_create_file(dev, &dev_attr_dl_intr_threshold); + + return ret; + +} + +static void gbam_sys_remove(struct device *dev) +{ + + device_remove_file(dev, &dev_attr_bam_pending_pkts_limit); + device_remove_file(dev, &dev_attr_bam_pending_bytes_limit); + device_remove_file(dev, &dev_attr_bam_pending_bytes_fctrl_en_thold); + device_remove_file(dev, &dev_attr_bam_mux_tx_pkt_drop_thld); + device_remove_file(dev, &dev_attr_bam_mux_rx_fctrl_en_thld); + device_remove_file(dev, &dev_attr_bam_mux_rx_fctrl_support); + device_remove_file(dev, &dev_attr_bam_mux_rx_fctrl_dis_thld); + device_remove_file(dev, &dev_attr_bam_mux_tx_q_size); + device_remove_file(dev, &dev_attr_bam_mux_rx_q_size); + device_remove_file(dev, &dev_attr_bam_mux_rx_req_size); + device_remove_file(dev, &dev_attr_dl_intr_threshold); + +} + +/* BAM data channel ready, allow attempt to open */ +static int gbam_data_ch_probe(struct platform_device *pdev) +{ + struct gbam_port *port; + struct bam_ch_info *d; + int i; + unsigned long flags; + bool do_work = false; + + pr_debug("%s: name:%s\n", __func__, pdev->name); + + for (i = 0; i < BAM_DMUX_NUM_FUNCS; i++) { + port = bam_ports[i].port; + if (!port) + continue; + + d = &port->data_ch; + + if (!strcmp(bam_ch_names[i], pdev->name)) { + set_bit(BAM_CH_READY, &d->flags); + + /* if usb is online, try opening bam_ch */ + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); + if (port->port_usb) + do_work = true; + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); + + if (do_work) + queue_work(gbam_wq, &port->connect_w); + break; + } + } + + gbam_sys_init(&pdev->dev); + + return 0; +} + +/* BAM data channel went inactive, so close it */ +static int gbam_data_ch_remove(struct platform_device *pdev) +{ + struct gbam_port *port; + struct bam_ch_info *d; + struct usb_ep *ep_in = NULL; + struct usb_ep *ep_out = NULL; + unsigned long flags; + int i; + + pr_debug("%s: name:%s\n", __func__, pdev->name); + + for (i = 0; i < BAM_DMUX_NUM_FUNCS; i++) { + if (!strcmp(bam_ch_names[i], pdev->name)) { + port = bam_ports[i].port; + if (!port) + continue; + + d = &port->data_ch; + + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); + if (port->port_usb) { + ep_in = port->port_usb->in; + ep_out = port->port_usb->out; + } + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); + + if (ep_in) + usb_ep_fifo_flush(ep_in); + if (ep_out) + usb_ep_fifo_flush(ep_out); + + gbam_free_buffers(port); + + msm_bam_dmux_close(d->id); + + /* bam dmux will free all pending skbs */ + d->pending_pkts_with_bam = 0; + d->pending_bytes_with_bam = 0; + + clear_bit(BAM_CH_READY, &d->flags); + clear_bit(BAM_CH_OPENED, &d->flags); + } + } + gbam_sys_remove(&pdev->dev); + + return 0; +} + +static void gbam_port_free(enum bam_dmux_func_type func) +{ + struct gbam_port *port = bam_ports[func].port; + struct platform_driver *pdrv = &bam_ports[func].pdrv; + + if (port) { + platform_driver_unregister(pdrv); + + kfree(port); + bam_ports[func].port = NULL; + } +} + +static int gbam_port_alloc(enum bam_dmux_func_type func) +{ + struct gbam_port *port; + struct bam_ch_info *d; + struct platform_driver *pdrv; + + port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + port->port_num = func; + + /* port initialization */ + spin_lock_init(&port->port_lock_ul); + spin_lock_init(&port->port_lock_dl); + spin_lock_init(&port->port_lock); + INIT_WORK(&port->connect_w, gbam_connect_work); + INIT_WORK(&port->disconnect_w, gbam_disconnect_work); + + /* data ch */ + d = &port->data_ch; + d->port = port; + INIT_LIST_HEAD(&d->tx_idle); + INIT_LIST_HEAD(&d->rx_idle); + INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam); + INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost_w); + skb_queue_head_init(&d->tx_skb_q); + skb_queue_head_init(&d->rx_skb_q); + skb_queue_head_init(&d->rx_skb_idle); + d->id = bam_ch_ids[func]; + + bam_ports[func].port = port; + + scnprintf(bam_ch_names[func], BAM_DMUX_CH_NAME_MAX_LEN, + "bam_dmux_ch_%d", bam_ch_ids[func]); + pdrv = &bam_ports[func].pdrv; + pdrv->probe = gbam_data_ch_probe; + pdrv->remove = gbam_data_ch_remove; + pdrv->driver.name = bam_ch_names[func]; + pdrv->driver.owner = THIS_MODULE; + + platform_driver_register(pdrv); + pr_debug("%s: port:%pK portno:%d\n", __func__, port, func); + + return 0; +} + +#if defined(CONFIG_DEBUG_FS) +#define DEBUG_BUF_SIZE 1024 +static ssize_t gbam_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct gbam_port *port; + struct bam_ch_info *d; + char *buf; + unsigned long flags; + int ret; + int i; + int temp = 0; + + buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < BAM_DMUX_NUM_FUNCS; i++) { + port = bam_ports[i].port; + if (!port) + continue; + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); + + d = &port->data_ch; + + temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp, + "#PORT:%d port:%pK data_ch:%pK#\n" + "dpkts_to_usbhost: %lu\n" + "dpkts_to_modem: %lu\n" + "dpkts_pwith_bam: %u\n" + "dbytes_pwith_bam: %u\n" + "to_usbhost_dcnt: %u\n" + "tomodem__dcnt: %u\n" + "rx_flow_control_disable_count: %u\n" + "rx_flow_control_enable_count: %u\n" + "rx_flow_control_triggered: %u\n" + "max_num_pkts_pending_with_bam: %u\n" + "max_bytes_pending_with_bam: %u\n" + "delayed_bam_mux_write_done: %u\n" + "tx_buf_len: %u\n" + "rx_buf_len: %u\n" + "data_ch_open: %d\n" + "data_ch_ready: %d\n" + "skb_expand_cnt: %lu\n", + i, port, &port->data_ch, + d->to_host, d->to_modem, + d->pending_pkts_with_bam, + d->pending_bytes_with_bam, + d->tohost_drp_cnt, d->tomodem_drp_cnt, + d->rx_flow_control_disable, + d->rx_flow_control_enable, + d->rx_flow_control_triggered, + d->max_num_pkts_pending_with_bam, + d->max_bytes_pending_with_bam, + d->delayed_bam_mux_write_done, + d->tx_skb_q.qlen, d->rx_skb_q.qlen, + test_bit(BAM_CH_OPENED, &d->flags), + test_bit(BAM_CH_READY, &d->flags), + d->skb_expand_cnt); + + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); + } + + ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp); + + kfree(buf); + + return ret; +} + +static ssize_t gbam_reset_stats(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct gbam_port *port; + struct bam_ch_info *d; + int i; + unsigned long flags; + + for (i = 0; i < BAM_DMUX_NUM_FUNCS; i++) { + port = bam_ports[i].port; + if (!port) + continue; + + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); + + d = &port->data_ch; + + d->to_host = 0; + d->to_modem = 0; + d->pending_pkts_with_bam = 0; + d->pending_bytes_with_bam = 0; + d->tohost_drp_cnt = 0; + d->tomodem_drp_cnt = 0; + d->rx_flow_control_disable = 0; + d->rx_flow_control_enable = 0; + d->rx_flow_control_triggered = 0; + d->max_num_pkts_pending_with_bam = 0; + d->max_bytes_pending_with_bam = 0; + d->delayed_bam_mux_write_done = 0; + d->skb_expand_cnt = 0; + + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); + } + return count; +} + +static const struct file_operations gbam_stats_ops = { + .read = gbam_read_stats, + .write = gbam_reset_stats, +}; + +static struct dentry *gbam_dent; +static void gbam_debugfs_init(void) +{ + struct dentry *dfile; + + if (gbam_dent) + return; + + gbam_dent = debugfs_create_dir("usb_rmnet", NULL); + if (!gbam_dent || IS_ERR(gbam_dent)) + return; + + dfile = debugfs_create_file("status", 0444, gbam_dent, NULL, + &gbam_stats_ops); + if (!dfile || IS_ERR(dfile)) { + debugfs_remove(gbam_dent); + gbam_dent = NULL; + return; + } +} +static void gbam_debugfs_remove(void) +{ + if (!gbam_dent) + return; + + debugfs_remove_recursive(gbam_dent); + debugfs_remove(gbam_dent); + gbam_dent = NULL; +} +#else +static inline void gbam_debugfs_init(void) {} +static inline void gbam_debugfs_remove(void) {} +#endif + +void gbam_disconnect(struct data_port *gr, enum bam_dmux_func_type func) +{ + struct gbam_port *port; + unsigned long flags, flags_ul; + struct bam_ch_info *d; + + pr_debug("%s: grmnet:%pK port#%d\n", __func__, gr, func); + + if (func >= BAM_DMUX_NUM_FUNCS) { + pr_err("%s: invalid bam portno#%d\n", __func__, func); + return; + } + + if (!gr) { + pr_err("%s: grmnet port is null\n", __func__); + return; + } + port = bam_ports[func].port; + + if (!port) { + pr_err("%s: NULL port\n", __func__); + return; + } + + spin_lock_irqsave(&port->port_lock, flags); + + d = &port->data_ch; + /* Already disconnected due to suspend with remote wake disabled */ + if (port->last_event == U_BAM_DISCONNECT_E) { + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + + port->port_usb = gr; + + gbam_free_buffers(port); + + spin_lock_irqsave(&port->port_lock_ul, flags_ul); + spin_lock(&port->port_lock_dl); + port->port_usb = NULL; + n_tx_req_queued = 0; + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags_ul); + + usb_ep_disable(gr->in); + /* disable endpoints */ + if (gr->out) + usb_ep_disable(gr->out); + + gr->in->driver_data = NULL; + if (gr->out) + gr->out->driver_data = NULL; + + port->last_event = U_BAM_DISCONNECT_E; + queue_work(gbam_wq, &port->disconnect_w); + + spin_unlock_irqrestore(&port->port_lock, flags); +} + +int gbam_connect(struct data_port *gr, enum bam_dmux_func_type func) +{ + struct gbam_port *port; + struct bam_ch_info *d; + int ret; + unsigned long flags, flags_ul; + + pr_debug("%s: grmnet:%pK port#%d\n", __func__, gr, func); + + if (!gr) { + pr_err("%s: grmnet port is null\n", __func__); + return -ENODEV; + } + + if (!gr->cdev->gadget) { + pr_err("%s: gadget handle not passed\n", __func__); + return -EINVAL; + } + + if (func >= BAM_DMUX_NUM_FUNCS) { + pr_err("%s: invalid portno#%d\n", __func__, func); + return -ENODEV; + } + + port = bam_ports[func].port; + + if (!port) { + pr_err("%s: NULL port\n", __func__); + return -ENODEV; + } + + spin_lock_irqsave(&port->port_lock, flags); + + d = &port->data_ch; + + spin_lock_irqsave(&port->port_lock_ul, flags_ul); + spin_lock(&port->port_lock_dl); + port->port_usb = gr; + port->gadget = port->port_usb->cdev->gadget; + + d->to_host = 0; + d->to_modem = 0; + d->pending_pkts_with_bam = 0; + d->pending_bytes_with_bam = 0; + d->tohost_drp_cnt = 0; + d->tomodem_drp_cnt = 0; + d->rx_flow_control_disable = 0; + d->rx_flow_control_enable = 0; + d->rx_flow_control_triggered = 0; + d->max_num_pkts_pending_with_bam = 0; + d->max_bytes_pending_with_bam = 0; + d->delayed_bam_mux_write_done = 0; + + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags_ul); + + ret = usb_ep_enable(gr->in); + if (ret) { + pr_err("%s: usb_ep_enable failed eptype:IN ep:%pK\n", + __func__, gr->in); + goto exit; + } + gr->in->driver_data = port; + + /* + * DPL traffic is routed through BAM-DMUX on some targets. + * DPL function has only 1 IN endpoint. Add out endpoint + * checks for BAM-DMUX transport. + */ + if (gr->out) { + ret = usb_ep_enable(gr->out); + if (ret) { + pr_err("%s: usb_ep_enable failed eptype:OUT ep:%pK\n", + __func__, gr->out); + gr->in->driver_data = NULL; + usb_ep_disable(gr->in); + goto exit; + } + gr->out->driver_data = port; + } + + port->last_event = U_BAM_CONNECT_E; + queue_work(gbam_wq, &port->connect_w); + + ret = 0; +exit: + spin_unlock_irqrestore(&port->port_lock, flags); + return ret; +} + +int gbam_setup(enum bam_dmux_func_type func) +{ + int ret; + + pr_debug("%s: requested BAM port:%d\n", __func__, func); + + if (func >= BAM_DMUX_NUM_FUNCS) { + pr_err("%s: Invalid num of ports count:%d\n", __func__, func); + return -EINVAL; + } + + if (!gbam_wq) { + gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | + WQ_MEM_RECLAIM, 1); + if (!gbam_wq) { + pr_err("%s: Unable to create workqueue gbam_wq\n", + __func__); + return -ENOMEM; + } + } + + ret = gbam_port_alloc(func); + if (ret) { + pr_err("%s: Unable to alloc port:%d\n", __func__, func); + goto destroy_wq; + } + + gbam_debugfs_init(); + + return 0; + +destroy_wq: + destroy_workqueue(gbam_wq); + + return ret; +} + +void gbam_cleanup(enum bam_dmux_func_type func) +{ + gbam_debugfs_remove(); + gbam_port_free(func); +} + +int gbam_mbim_connect(struct usb_gadget *g, struct usb_ep *in, + struct usb_ep *out) +{ + struct data_port *gr; + + gr = kzalloc(sizeof(*gr), GFP_ATOMIC); + if (!gr) + return -ENOMEM; + gr->in = in; + gr->out = out; + gr->cdev->gadget = g; + + return gbam_connect(gr, BAM_DMUX_FUNC_MBIM); +} + +void gbam_mbim_disconnect(void) +{ + struct gbam_port *port = bam_ports[BAM_DMUX_FUNC_MBIM].port; + struct data_port *gr = port->port_usb; + + if (!gr) { + pr_err("%s: port_usb is NULL\n", __func__); + return; + } + + gbam_disconnect(gr, BAM_DMUX_FUNC_MBIM); + kfree(gr); +} + +int gbam_mbim_setup(void) +{ + int ret = 0; + + if (!bam_ports[BAM_DMUX_FUNC_RMNET].port) + ret = gbam_setup(BAM_DMUX_FUNC_MBIM); + + return ret; +} diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h index 959f6665b40f..a9e7986c1364 100644 --- a/drivers/usb/gadget/function/u_ether_configfs.h +++ b/drivers/usb/gadget/function/u_ether_configfs.h @@ -261,4 +261,50 @@ out: \ \ CONFIGFS_ATTR(_f_##_opts_, ul_max_pkt_per_xfer) +#define USB_ETHERNET_CONFIGFS_ITEM_ATTR_WCEIS(_f_) \ + static ssize_t _f_##_opts_wceis_show(struct config_item *item, \ + char *page) \ + { \ + struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ + bool wceis; \ + \ + if (opts->bound == false) { \ + pr_err("Gadget function do not bind yet.\n"); \ + return -ENODEV; \ + } \ + \ + mutex_lock(&opts->lock); \ + wceis = opts->wceis; \ + mutex_unlock(&opts->lock); \ + return snprintf(page, PAGE_SIZE, "%d", wceis); \ + } \ + \ + static ssize_t _f_##_opts_wceis_store(struct config_item *item, \ + const char *page, size_t len)\ + { \ + struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ + bool wceis; \ + int ret; \ + \ + if (opts->bound == false) { \ + pr_err("Gadget function do not bind yet.\n"); \ + return -ENODEV; \ + } \ + \ + mutex_lock(&opts->lock); \ + \ + ret = kstrtobool(page, &wceis); \ + if (ret) \ + goto out; \ + \ + opts->wceis = wceis; \ + ret = len; \ +out: \ + mutex_unlock(&opts->lock); \ + \ + return ret; \ + } \ + \ + CONFIGFS_ATTR(_f_##_opts_, wceis) + #endif /* __U_ETHER_CONFIGFS_H */ diff --git a/drivers/usb/gadget/function/u_rmnet.h b/drivers/usb/gadget/function/u_rmnet.h index f2071103f8b8..66bef351f92f 100644 --- a/drivers/usb/gadget/function/u_rmnet.h +++ b/drivers/usb/gadget/function/u_rmnet.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2011-2017, 2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2017, 2020-2021, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -23,7 +23,7 @@ enum bam_dmux_func_type { BAM_DMUX_FUNC_RMNET, - BAM_DMUX_FUNC_MBIM, + BAM_DMUX_FUNC_MBIM = 0, BAM_DMUX_FUNC_DPL, BAM_DMUX_NUM_FUNCS, }; @@ -76,28 +76,15 @@ enum data_xport_type { NR_XPORT_TYPES }; -static inline int gbam_setup(enum bam_dmux_func_type func) -{ - return 0; -} - -static inline void gbam_cleanup(enum bam_dmux_func_type func) -{ -} - -static inline int gbam_connect(struct data_port *gr, - enum bam_dmux_func_type func) -{ - return 0; -} - -static inline void gbam_disconnect(struct data_port *gr, - enum bam_dmux_func_type func) -{ -} +int gbam_connect(struct data_port *gr, enum bam_dmux_func_type func); +void gbam_disconnect(struct data_port *gr, enum bam_dmux_func_type func); +void gbam_cleanup(enum bam_dmux_func_type func); +int gbam_setup(enum bam_dmux_func_type func); +int gbam_mbim_connect(struct usb_gadget *g, struct usb_ep *in, + struct usb_ep *out); int gbam_mbim_connect(struct usb_gadget *g, struct usb_ep *in, - struct usb_ep *out); + struct usb_ep *out); void gbam_mbim_disconnect(void); int gbam_mbim_setup(void); diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h index d65fb4ebac3c..190d3bd9919a 100644 --- a/drivers/usb/gadget/function/u_rndis.h +++ b/drivers/usb/gadget/function/u_rndis.h @@ -39,6 +39,9 @@ struct f_rndis_opts { */ struct mutex lock; int refcnt; + + /* "Wireless" RNDIS; auto-detected by Windows */ + bool wceis; }; void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net); diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 38afe96c5cd2..e9388ae8b079 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -156,7 +156,8 @@ static struct portmaster { * usb_request or NULL if there is an error. */ struct usb_request * -gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) +gs_alloc_req(struct usb_ep *ep, unsigned int len, size_t extra_sz, + gfp_t kmalloc_flags) { struct usb_request *req; @@ -164,7 +165,7 @@ gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) if (req != NULL) { req->length = len; - req->buf = kmalloc(len, kmalloc_flags); + req->buf = kmalloc(len + extra_sz, kmalloc_flags); if (req->buf == NULL) { usb_ep_free_request(ep, req); return NULL; @@ -506,6 +507,7 @@ static void gs_free_requests(struct usb_ep *ep, struct list_head *head, } static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, + size_t extra_sz, void (*fn)(struct usb_ep *, struct usb_request *), int *allocated) { @@ -518,7 +520,7 @@ static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, * be as speedy as we might otherwise be. */ for (i = 0; i < n; i++) { - req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC); + req = gs_alloc_req(ep, ep->maxpacket, extra_sz, GFP_ATOMIC); if (!req) return list_empty(head) ? -ENOMEM : 0; req->complete = fn; @@ -540,6 +542,8 @@ static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, */ static int gs_start_io(struct gs_port *port) { + struct usb_function *f = &port->port_usb->func; + struct usb_composite_dev *cdev = f->config->cdev; struct list_head *head = &port->read_pool; struct usb_ep *ep = port->port_usb->out; int status; @@ -551,12 +555,13 @@ static int gs_start_io(struct gs_port *port) * configurations may use different endpoints with a given port; * and high speed vs full speed changes packet sizes too. */ - status = gs_alloc_requests(ep, head, gs_read_complete, + status = gs_alloc_requests(ep, head, 0, gs_read_complete, &port->read_allocated); if (status) return status; status = gs_alloc_requests(port->port_usb->in, &port->write_pool, + cdev->gadget->extra_buf_alloc, gs_write_complete, &port->write_allocated); if (status) { gs_free_requests(ep, head, &port->read_allocated); diff --git a/drivers/usb/gadget/function/u_serial.h b/drivers/usb/gadget/function/u_serial.h index 9acaac1cbb75..c6a73cce5dfa 100644 --- a/drivers/usb/gadget/function/u_serial.h +++ b/drivers/usb/gadget/function/u_serial.h @@ -50,7 +50,8 @@ struct gserial { }; /* utilities to allocate/free request and buffer */ -struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags); +struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned int len, + size_t extra_sz, gfp_t flags); void gs_free_req(struct usb_ep *, struct usb_request *req); /* management of individual TTY ports */ diff --git a/drivers/usb/gadget/function/u_uac2.h b/drivers/usb/gadget/function/u_uac2.h index 39b73b777249..6a561cf39d98 100644 --- a/drivers/usb/gadget/function/u_uac2.h +++ b/drivers/usb/gadget/function/u_uac2.h @@ -21,7 +21,7 @@ #define UAC2_DEF_CCHMASK 0x3 #define UAC2_DEF_CSRATE 44100 #define UAC2_DEF_CSSIZE 2 -#define UAC2_DEF_REQ_NUM 2 +#define UAC2_DEF_REQ_NUM 32 struct f_uac2_opts { struct usb_function_instance func_inst; diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h index f63180f0effc..7180c92ae14c 100644 --- a/drivers/usb/gadget/function/uvc.h +++ b/drivers/usb/gadget/function/uvc.h @@ -64,7 +64,7 @@ extern unsigned int uvc_gadget_trace_param; * Driver specific constants */ -#define UVC_NUM_REQUESTS 16 +#define UVC_NUM_REQUESTS 64 #define UVC_MAX_REQUEST_SIZE 64 #define UVC_MAX_EVENTS 4 diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 4e944c04be41..538d56e89ec8 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -1213,6 +1213,10 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, INIT_WORK(&gadget->work, usb_gadget_state_work); gadget->dev.parent = parent; + dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask); + gadget->dev.dma_parms = parent->dma_parms; + gadget->dev.dma_mask = parent->dma_mask; + if (release) gadget->dev.release = release; else diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c index 885c9305e8b4..7b85243b8644 100644 --- a/drivers/usb/pd/policy_engine.c +++ b/drivers/usb/pd/policy_engine.c @@ -1721,8 +1721,12 @@ static void handle_vdm_tx(struct usbpd *pd, enum pd_sop_type sop_type) mutex_unlock(&pd->svid_handler_lock); /* retry when hitting PE_SRC/SNK_Ready again */ - if (ret != -EBUSY && sop_type == SOP_MSG) + if (ret != -EBUSY && sop_type == SOP_MSG) { usbpd_set_state(pd, PE_SEND_SOFT_RESET); + } else if (sop_type != SOP_MSG) { + kfree(pd->vdm_tx); + pd->vdm_tx = NULL; + } return; } diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c index aaab20dfb419..e0b10654fe27 100644 --- a/drivers/usb/phy/phy-msm-qusb.c +++ b/drivers/usb/phy/phy-msm-qusb.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. */ #include @@ -505,10 +505,25 @@ static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt, } } +static void qusb_phy_reset(struct qusb_phy *qphy) +{ + int ret = 0; + + ret = reset_control_assert(qphy->phy_reset); + if (ret) + dev_err(qphy->phy.dev, "%s: phy_reset assert failed\n", + __func__); + usleep_range(100, 150); + ret = reset_control_deassert(qphy->phy_reset); + if (ret) + dev_err(qphy->phy.dev, "%s: phy_reset deassert failed\n", + __func__); +} + static int qusb_phy_init(struct usb_phy *phy) { struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); - int ret, reset_val = 0; + int reset_val = 0; u8 reg; bool pll_lock_fail = false; @@ -537,13 +552,7 @@ static int qusb_phy_init(struct usb_phy *phy) } /* Perform phy reset */ - ret = reset_control_assert(qphy->phy_reset); - if (ret) - dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__); - usleep_range(100, 150); - ret = reset_control_deassert(qphy->phy_reset); - if (ret) - dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__); + qusb_phy_reset(qphy); /* Disable the PHY */ if (qphy->major_rev < 2) @@ -907,15 +916,7 @@ static int qusb_phy_drive_dp_pulse(struct usb_phy *phy, } qusb_phy_gdsc(qphy, true); qusb_phy_enable_clocks(qphy, true); - - ret = reset_control_assert(qphy->phy_reset); - if (ret) - dev_err(qphy->phy.dev, "phyassert failed\n"); - usleep_range(100, 150); - ret = reset_control_deassert(qphy->phy_reset); - if (ret) - dev_err(qphy->phy.dev, "deassert failed\n"); - + qusb_phy_reset(qphy); /* Configure PHY to enable control on DP/DM lines */ writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN, qphy->base + QUSB2PHY_PORT_POWERDOWN); @@ -1002,13 +1003,7 @@ static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev) qusb_phy_enable_clocks(qphy, true); dev_dbg(qphy->phy.dev, "RESET QUSB PHY\n"); - ret = reset_control_assert(qphy->phy_reset); - if (ret) - dev_err(qphy->phy.dev, "phyassert failed\n"); - usleep_range(100, 150); - ret = reset_control_deassert(qphy->phy_reset); - if (ret) - dev_err(qphy->phy.dev, "deassert failed\n"); + qusb_phy_reset(qphy); /* * Phy in non-driving mode leaves Dp and Dm @@ -1056,6 +1051,13 @@ static int qusb_phy_dpdm_regulator_disable(struct regulator_dev *rdev) if (!qphy->cable_connected) qusb_phy_clear_tcsr_clamp(qphy, false); + /* + * Phy reset is needed in case multiple instances + * of HSPHY exists with shared power supplies. This + * reset is to bring out the PHY from high-Z state + * and avoid extra current consumption. + */ + qusb_phy_reset(qphy); ret = qusb_phy_enable_power(qphy, false); if (ret < 0) { dev_dbg(qphy->phy.dev, @@ -1391,18 +1393,7 @@ static int qusb_phy_prepare_chg_det(struct qusb_phy *qphy) static void qusb_phy_unprepare_chg_det(struct qusb_phy *qphy) { - int ret; - - ret = reset_control_assert(qphy->phy_reset); - if (ret) - dev_err(qphy->phy.dev, "phyassert failed\n"); - - usleep_range(100, 150); - - ret = reset_control_deassert(qphy->phy_reset); - if (ret) - dev_err(qphy->phy.dev, "deassert failed\n"); - + qusb_phy_reset(qphy); qusb_phy_enable_clocks(qphy, false); qusb_phy_clear_tcsr_clamp(qphy, false); qusb_phy_enable_power(qphy, false); diff --git a/drivers/usb/phy/phy-msm-snps-hs.c b/drivers/usb/phy/phy-msm-snps-hs.c index d2334ea523b5..b9cf8dd83163 100644 --- a/drivers/usb/phy/phy-msm-snps-hs.c +++ b/drivers/usb/phy/phy-msm-snps-hs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. */ #include @@ -647,6 +647,13 @@ static int msm_hsphy_dpdm_regulator_disable(struct regulator_dev *rdev) mutex_lock(&phy->phy_lock); if (phy->dpdm_enable) { if (!phy->cable_connected) { + /* + * Phy reset is needed in case multiple instances + * of HSPHY exists with shared power supplies. This + * reset is to bring out the PHY from high-Z state + * and avoid extra current consumption. + */ + msm_hsphy_reset(phy); ret = msm_hsphy_enable_power(phy, false); if (ret < 0) { mutex_unlock(&phy->phy_lock); diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index 5b8edf70124f..e925966a45b2 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c @@ -966,6 +966,7 @@ static int msm_otg_reset(struct usb_phy *phy) u32 val = 0; u32 ulpi_val = 0; + mutex_lock(&motg->lock); msm_otg_dbg_log_event(&motg->phy, "USB RESET", phy->otg->state, get_pm_runtime_counter(phy->dev)); /* @@ -974,10 +975,13 @@ static int msm_otg_reset(struct usb_phy *phy) * USB BAM reset on other cases e.g. USB cable disconnections. * If hardware reported error then it must be reset for recovery. */ - if (motg->err_event_seen) + if (motg->err_event_seen) { dev_info(phy->dev, "performing USB h/w reset for recovery\n"); - else if (pdata->disable_reset_on_disconnect && motg->reset_counter) + } else if (pdata->disable_reset_on_disconnect && + motg->reset_counter) { + mutex_unlock(&motg->lock); return 0; + } motg->reset_counter++; @@ -992,6 +996,7 @@ static int msm_otg_reset(struct usb_phy *phy) enable_irq(motg->phy_irq); enable_irq(motg->irq); + mutex_unlock(&motg->lock); return ret; } @@ -1002,6 +1007,7 @@ static int msm_otg_reset(struct usb_phy *phy) ret = msm_otg_link_reset(motg); if (ret) { dev_err(phy->dev, "link reset failed\n"); + mutex_unlock(&motg->lock); return ret; } @@ -1058,6 +1064,7 @@ static int msm_otg_reset(struct usb_phy *phy) if (phy->otg->state == OTG_STATE_UNDEFINED && motg->rm_pulldown) msm_chg_block_on(motg); + mutex_unlock(&motg->lock); return 0; } @@ -1413,6 +1420,7 @@ static irqreturn_t msm_otg_phy_irq_handler(int irq, void *data) static void msm_otg_set_vbus_state(int online); static void msm_otg_perf_vote_update(struct msm_otg *motg, bool perf_mode); +static int get_psy_type(struct msm_otg *motg); #ifdef CONFIG_PM_SLEEP static int msm_otg_suspend(struct msm_otg *motg) @@ -1451,7 +1459,11 @@ static int msm_otg_suspend(struct msm_otg *motg) msm_otg_perf_vote_update(motg, false); host_pc_charger = (motg->chg_type == USB_SDP_CHARGER) || - (motg->chg_type == USB_CDP_CHARGER); + (motg->chg_type == USB_CDP_CHARGER) || + (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB) || + (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_CDP); + msm_otg_dbg_log_event(phy, "CHARGER CONNECTED", + host_pc_charger, motg->inputs); /* !BSV, but its handling is in progress by otg sm_work */ sm_work_busy = !test_bit(B_SESS_VLD, &motg->inputs) && @@ -1461,17 +1473,6 @@ static int msm_otg_suspend(struct msm_otg *motg) if (motg->err_event_seen) msm_otg_reset(phy); - /* Enable line state difference wakeup fix for only device and host - * bus suspend scenarios. Otherwise PHY can not be suspended when - * a charger that pulls DP/DM high is connected. - */ - config2 = readl_relaxed(USB_GENCONFIG_2); - if (device_bus_suspend) - config2 |= GENCONFIG_2_LINESTATE_DIFF_WAKEUP_EN; - else - config2 &= ~GENCONFIG_2_LINESTATE_DIFF_WAKEUP_EN; - writel_relaxed(config2, USB_GENCONFIG_2); - /* * Abort suspend when, * 1. host mode activation in progress due to Micro-A cable insertion @@ -1489,6 +1490,17 @@ static int msm_otg_suspend(struct msm_otg *motg) return -EBUSY; } + /* Enable line state difference wakeup fix for only device and host + * bus suspend scenarios. Otherwise PHY can not be suspended when + * a charger that pulls DP/DM high is connected. + */ + config2 = readl_relaxed(USB_GENCONFIG_2); + if (device_bus_suspend) + config2 |= GENCONFIG_2_LINESTATE_DIFF_WAKEUP_EN; + else + config2 &= ~GENCONFIG_2_LINESTATE_DIFF_WAKEUP_EN; + writel_relaxed(config2, USB_GENCONFIG_2); + if (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED) { /* put the controller in non-driving mode */ func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL); @@ -2855,6 +2867,10 @@ static void check_for_sdp_connection(struct work_struct *w) msm_otg_set_vbus_state(motg->vbus_state); } +#define DP_PULSE_WIDTH_MSEC 200 +static int +msm_otg_phy_drive_dp_pulse(struct msm_otg *motg, unsigned int pulse_width); + static void msm_otg_sm_work(struct work_struct *w) { struct msm_otg *motg = container_of(w, struct msm_otg, sm_work); @@ -2898,6 +2914,9 @@ static void msm_otg_sm_work(struct work_struct *w) get_pm_runtime_counter(dev), 0); pm_runtime_put_sync(dev); break; + } else if (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_CDP) { + pr_debug("Connected to CDP, pull DP up from sm_work\n"); + msm_otg_phy_drive_dp_pulse(motg, DP_PULSE_WIDTH_MSEC); } pm_runtime_put(dev); /* FALL THROUGH */ @@ -3066,19 +3085,25 @@ msm_otg_phy_drive_dp_pulse(struct msm_otg *motg, unsigned int pulse_width) { int ret = 0; u32 val; + bool in_lpm = false; msm_otg_dbg_log_event(&motg->phy, "DRIVE DP PULSE", motg->inputs, 0); - ret = msm_hsusb_ldo_enable(motg, USB_PHY_REG_ON); - if (ret) - return ret; - msm_hsusb_config_vddcx(1); - ret = regulator_enable(hsusb_vdd); - WARN(ret, "hsusb_vdd LDO enable failed for driving pulse\n"); - clk_prepare_enable(motg->xo_clk); - clk_prepare_enable(motg->phy_csr_clk); - clk_prepare_enable(motg->core_clk); - clk_prepare_enable(motg->pclk); + if (atomic_read(&motg->in_lpm)) + in_lpm = true; + + if (in_lpm) { + ret = msm_hsusb_ldo_enable(motg, USB_PHY_REG_ON); + if (ret) + return ret; + msm_hsusb_config_vddcx(1); + ret = regulator_enable(hsusb_vdd); + WARN(ret, "hsusb_vdd LDO enable failed for driving pulse\n"); + clk_prepare_enable(motg->xo_clk); + clk_prepare_enable(motg->phy_csr_clk); + clk_prepare_enable(motg->core_clk); + clk_prepare_enable(motg->pclk); + } msm_otg_exit_phy_retention(motg); @@ -3124,24 +3149,27 @@ msm_otg_phy_drive_dp_pulse(struct msm_otg *motg, unsigned int pulse_width) /* Make sure above writes are completed before clks off */ mb(); - clk_disable_unprepare(motg->pclk); - clk_disable_unprepare(motg->core_clk); - clk_disable_unprepare(motg->phy_csr_clk); - clk_disable_unprepare(motg->xo_clk); - regulator_disable(hsusb_vdd); - msm_hsusb_config_vddcx(0); - msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF); + if (in_lpm) { + clk_disable_unprepare(motg->pclk); + clk_disable_unprepare(motg->core_clk); + clk_disable_unprepare(motg->phy_csr_clk); + clk_disable_unprepare(motg->xo_clk); + regulator_disable(hsusb_vdd); + msm_hsusb_config_vddcx(0); + msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF); + } else { + msm_otg_reset(&motg->phy); + } msm_otg_dbg_log_event(&motg->phy, "DP PULSE DRIVEN", motg->inputs, 0); return 0; } -#define DP_PULSE_WIDTH_MSEC 200 - static void msm_otg_set_vbus_state(int online) { struct msm_otg *motg = the_msm_otg; + struct usb_otg *otg = motg->phy.otg; motg->vbus_state = online; @@ -3154,7 +3182,12 @@ static void msm_otg_set_vbus_state(int online) motg->inputs, 0); if (test_and_set_bit(B_SESS_VLD, &motg->inputs)) return; - if (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_CDP) { + /* + * It might race with block reset happening in sm_work, while + * state machine is in undefined state. Add check to avoid it. + */ + if ((get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_CDP) && + (otg->state != OTG_STATE_UNDEFINED)) { pr_debug("Connected to CDP, pull DP up\n"); msm_otg_phy_drive_dp_pulse(motg, DP_PULSE_WIDTH_MSEC); } @@ -4317,6 +4350,7 @@ static int msm_otg_probe(struct platform_device *pdev) motg->pdev = pdev; motg->dbg_idx = 0; motg->dbg_lock = __RW_LOCK_UNLOCKED(lck); + mutex_init(&motg->lock); if (motg->pdata->bus_scale_table) { motg->bus_perf_client = diff --git a/drivers/video/fbdev/msm/Makefile b/drivers/video/fbdev/msm/Makefile index 309c6d2fe2f9..37f193e23c63 100644 --- a/drivers/video/fbdev/msm/Makefile +++ b/drivers/video/fbdev/msm/Makefile @@ -12,6 +12,7 @@ mdss-mdp-objs += mdss_mdp_pp.o mdss_mdp_pp_debug.o mdss_mdp_pp_cache_config.o md mdss-mdp-objs += mdss_mdp_intf_video.o mdss-mdp-objs += mdss_mdp_intf_cmd.o mdss-mdp-objs += mdss_mdp_intf_writeback.o +mdss-mdp-objs += mdss_rotator.o mdss-mdp-objs += mdss_mdp_overlay.o mdss-mdp-objs += mdss_mdp_layer.o mdss-mdp-objs += mdss_mdp_splash_logo.o diff --git a/drivers/video/fbdev/msm/dsi_status_6g.c b/drivers/video/fbdev/msm/dsi_status_6g.c index a977520ef3d0..24f4a042d0e8 100644 --- a/drivers/video/fbdev/msm/dsi_status_6g.c +++ b/drivers/video/fbdev/msm/dsi_status_6g.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2013-2018, 2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2013-2018, 2020-2021, The Linux Foundation. All rights reserved. */ #include #include @@ -151,7 +151,9 @@ void mdss_check_dsi_ctrl_status(struct work_struct *work, uint32_t interval) MDSS_XLOG(mipi->mode); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON); + mutex_lock(&pstatus_data->mfd->sd_lock); ret = ctrl_pdata->check_status(ctrl_pdata); + mutex_unlock(&pstatus_data->mfd->sd_lock); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF); mutex_unlock(&ctl->offlock); diff --git a/drivers/video/fbdev/msm/mdss_dsi_status.c b/drivers/video/fbdev/msm/mdss_dsi_status.c index 863b596e053b..aae9dae83ffb 100644 --- a/drivers/video/fbdev/msm/mdss_dsi_status.c +++ b/drivers/video/fbdev/msm/mdss_dsi_status.c @@ -50,8 +50,11 @@ int mdss_dsi_check_panel_status(struct mdss_dsi_ctrl_pdata *ctrl, void *arg) * then no need to fail this function, * instead return a positive value. */ - if (ctrl->check_status) + if (ctrl->check_status) { + mutex_lock(&mfd->sd_lock); ret = ctrl->check_status(ctrl); + mutex_unlock(&mfd->sd_lock); + } else ret = 1; mutex_unlock(&ctl->offlock); diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c index 85b0c45c141d..3bed711cb588 100644 --- a/drivers/video/fbdev/msm/mdss_fb.c +++ b/drivers/video/fbdev/msm/mdss_fb.c @@ -3,7 +3,7 @@ * Core MDSS framebuffer driver. * * Copyright (C) 2007 Google Incorporated - * Copyright (c) 2008-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2008-2021, The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "%s: " fmt, __func__ @@ -1315,6 +1315,7 @@ static int mdss_fb_probe(struct platform_device *pdev) mutex_init(&mfd->bl_lock); mutex_init(&mfd->mdss_sysfs_lock); mutex_init(&mfd->switch_lock); + mutex_init(&mfd->sd_lock); fbi_list[fbi_list_index++] = fbi; @@ -3686,6 +3687,19 @@ static int __mdss_fb_perform_commit(struct msm_fb_data_type *mfd) int ret = -ENOTSUPP; u32 new_dsi_mode, dynamic_dsi_switch = 0; + if (mfd->panel_info->panel_dead) { + pr_debug("Panel dead, Signal fence and exit commit\n"); + /* + * In case of ESD attack, return early from commit + * after signalling fences. + */ + mdss_fb_release_kickoff(mfd); + mdss_fb_signal_timeline(sync_pt_data); + if ((mfd->panel.type == MIPI_CMD_PANEL) && + (mfd->mdp.signal_retire_fence)) + mfd->mdp.signal_retire_fence(mfd, 1); + return ret; + } if (!sync_pt_data->async_wait_fences) mdss_fb_wait_for_fence(sync_pt_data); sync_pt_data->flushed = false; @@ -4656,7 +4670,6 @@ static int mdss_fb_atomic_commit_ioctl(struct fb_info *info, struct mdp_destination_scaler_data *ds_data = NULL; struct mdp_destination_scaler_data __user *ds_data_user; struct msm_fb_data_type *mfd; - struct mdss_overlay_private *mdp5_data = NULL; struct mdss_data_type *mdata; ret = copy_from_user(&commit, argp, sizeof(struct mdp_layer_commit)); @@ -4669,23 +4682,6 @@ static int mdss_fb_atomic_commit_ioctl(struct fb_info *info, if (!mfd) return -EINVAL; - mdp5_data = mfd_to_mdp5_data(mfd); - - if (mfd->panel_info->panel_dead) { - pr_debug("early commit return\n"); - MDSS_XLOG(mfd->panel_info->panel_dead); - /* - * In case of an ESD attack, since we early return from the - * commits, we need to signal the outstanding fences. - */ - mdss_fb_release_fences(mfd); - if ((mfd->panel.type == MIPI_CMD_PANEL) && - mfd->mdp.signal_retire_fence && mdp5_data) - mfd->mdp.signal_retire_fence(mfd, - mdp5_data->retire_cnt); - return 0; - } - output_layer_user = commit.commit_v1.output_layer; if (output_layer_user) { buffer_size = sizeof(struct mdp_output_layer); @@ -5236,8 +5232,13 @@ int mdss_fb_suspres_panel(struct device *dev, void *data) event = *((bool *) data) ? MDSS_EVENT_RESUME : MDSS_EVENT_SUSPEND; - /* Do not send runtime suspend/resume for HDMI primary */ - if (!mdss_fb_is_hdmi_primary(mfd)) { + /* Do not send runtime suspend/resume for HDMI/DP */ + if ((mfd->panel.type == DTV_PANEL) || + (mfd->panel.type == DP_PANEL) || + (mfd->panel.type == HDMI_PANEL)) { + pr_debug("%s: Avoid sus/res for panel = %d, ndx = %d\n", + __func__, mfd->panel.type, mfd->index); + } else { rc = mdss_fb_send_panel_event(mfd, event, NULL); if (rc) pr_warn("unable to %s fb%d (%d)\n", diff --git a/drivers/video/fbdev/msm/mdss_fb.h b/drivers/video/fbdev/msm/mdss_fb.h index a055493f217a..e6575e74b367 100644 --- a/drivers/video/fbdev/msm/mdss_fb.h +++ b/drivers/video/fbdev/msm/mdss_fb.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2008-2018, 2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2008-2018, 2020-2021, The Linux Foundation. All rights reserved. */ #ifndef MDSS_FB_H #define MDSS_FB_H @@ -313,6 +313,7 @@ struct msm_fb_data_type { u32 bl_level_usr; struct mutex bl_lock; struct mutex mdss_sysfs_lock; + struct mutex sd_lock; bool ipc_resume; struct platform_device *pdev; diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c index 971e09cfb8e4..6962c6efb4dc 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c +++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2010-2018, 2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2010-2018, 2020, 2021, The Linux Foundation. All rights reserved. */ #include #include @@ -649,7 +649,7 @@ static ssize_t edid_store(struct device *dev, memset(hdmi_ctrl->edid_buf, 0, hdmi_ctrl->edid_buf_size); while (edid_size--) { - char t[char_to_nib + 1]; + char t[3]; /* char_to_nib + 1; */ int d; memcpy(t, buf_t, sizeof(char) * char_to_nib); diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c index 90d617bc6fd2..b1bcdce75285 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c +++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "%s: " fmt, __func__ @@ -2417,6 +2417,7 @@ static int __overlay_secure_ctrl(struct msm_fb_data_type *mfd) if (mdp5_data->secure_transition_state == SECURE_TRANSITION_NONE) return ret; + mutex_lock(&mfd->sd_lock); /* Secure Display */ if (mdp5_data->secure_transition_state == SD_NON_SECURE_TO_SECURE) { if (!mdss_get_sd_client_cnt()) { @@ -2445,6 +2446,7 @@ static int __overlay_secure_ctrl(struct msm_fb_data_type *mfd) MDP_SECURE_DISPLAY_OVERLAY_SESSION); if (ret) { pr_err("secure display enable fail:%d\n", ret); + mutex_unlock(&mfd->sd_lock); return ret; } } @@ -2461,6 +2463,7 @@ static int __overlay_secure_ctrl(struct msm_fb_data_type *mfd) MDP_SECURE_DISPLAY_OVERLAY_SESSION); if (ret) { pr_err("secure display disable fail:%d\n", ret); + mutex_unlock(&mfd->sd_lock); return ret; } } @@ -2476,6 +2479,7 @@ static int __overlay_secure_ctrl(struct msm_fb_data_type *mfd) MDP_SECURE_CAMERA_OVERLAY_SESSION); if (ret) { pr_err("secure camera enable fail:%d\n", ret); + mutex_unlock(&mfd->sd_lock); return ret; } } @@ -2492,6 +2496,7 @@ static int __overlay_secure_ctrl(struct msm_fb_data_type *mfd) MDP_SECURE_CAMERA_OVERLAY_SESSION); if (ret) { pr_err("secure camera disable fail:%d\n", ret); + mutex_unlock(&mfd->sd_lock); return ret; } } @@ -2499,6 +2504,7 @@ static int __overlay_secure_ctrl(struct msm_fb_data_type *mfd) mdp5_data->sc_enabled = 0; } + mutex_unlock(&mfd->sd_lock); MDSS_XLOG(ret); return ret; } @@ -5893,7 +5899,7 @@ static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd) pr_debug("cleaning up pipes on fb%d\n", mfd->index); if (mdata->handoff_pending) mdp5_data->allow_kickoff = true; - + atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt); mdss_mdp_overlay_kickoff(mfd, NULL); } else if (!mdss_mdp_ctl_is_power_on(mdp5_data->ctl)) { if (mfd->panel_reconfig) { diff --git a/drivers/video/fbdev/msm/mdss_rotator.c b/drivers/video/fbdev/msm/mdss_rotator.c new file mode 100644 index 000000000000..bdf667ada332 --- /dev/null +++ b/drivers/video/fbdev/msm/mdss_rotator.c @@ -0,0 +1,3037 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2014-2018, 2021, The Linux Foundation. All rights reserved. */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mdss_rotator_internal.h" +#include "mdss_mdp.h" +#include "mdss_debug.h" +#include "mdss_sync.h" + +/* waiting for hw time out, 3 vsync for 30fps*/ +#define ROT_HW_ACQUIRE_TIMEOUT_IN_MS 100 + +/* acquire fence time out, following other driver fence time out practice */ +#define ROT_FENCE_WAIT_TIMEOUT MSEC_PER_SEC +/* + * Max rotator hw blocks possible. Used for upper array limits instead of + * alloc and freeing small array + */ +#define ROT_MAX_HW_BLOCKS 2 + +#define ROT_CHECK_BOUNDS(offset, size, max_size) \ + (((size) > (max_size)) || ((offset) > ((max_size) - (size)))) + +#define CLASS_NAME "rotator" +#define DRIVER_NAME "mdss_rotator" + +#define MDP_REG_BUS_VECTOR_ENTRY(ab_val, ib_val) \ + { \ + .src = MSM_BUS_MASTER_AMPSS_M0, \ + .dst = MSM_BUS_SLAVE_DISPLAY_CFG, \ + .ab = (ab_val), \ + .ib = (ib_val), \ + } + +#define BUS_VOTE_19_MHZ 153600000 + +static struct msm_bus_vectors rot_reg_bus_vectors[] = { + MDP_REG_BUS_VECTOR_ENTRY(0, 0), + MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_19_MHZ), +}; +static struct msm_bus_paths rot_reg_bus_usecases[ARRAY_SIZE( + rot_reg_bus_vectors)]; +static struct msm_bus_scale_pdata rot_reg_bus_scale_table = { + .usecase = rot_reg_bus_usecases, + .num_usecases = ARRAY_SIZE(rot_reg_bus_usecases), + .name = "mdss_rot_reg", + .active_only = 1, +}; + +static struct mdss_rot_mgr *rot_mgr; +static void mdss_rotator_wq_handler(struct work_struct *work); + +static int mdss_rotator_bus_scale_set_quota(struct mdss_rot_bus_data_type *bus, + u64 quota) +{ + int new_uc_idx; + int ret; + + if (bus->bus_hdl < 1) { + pr_err("invalid bus handle %d\n", bus->bus_hdl); + return -EINVAL; + } + + if (bus->curr_quota_val == quota) { + pr_debug("bw request already requested\n"); + return 0; + } + + if (!quota) { + new_uc_idx = 0; + } else { + struct msm_bus_vectors *vect = NULL; + struct msm_bus_scale_pdata *bw_table = + bus->bus_scale_pdata; + u64 port_quota = quota; + u32 total_axi_port_cnt; + int i; + + new_uc_idx = (bus->curr_bw_uc_idx % + (bw_table->num_usecases - 1)) + 1; + + total_axi_port_cnt = bw_table->usecase[new_uc_idx].num_paths; + if (total_axi_port_cnt == 0) { + pr_err("Number of bw paths is 0\n"); + return -ENODEV; + } + do_div(port_quota, total_axi_port_cnt); + + for (i = 0; i < total_axi_port_cnt; i++) { + vect = &bw_table->usecase[new_uc_idx].vectors[i]; + vect->ab = port_quota; + vect->ib = 0; + } + } + bus->curr_bw_uc_idx = new_uc_idx; + bus->curr_quota_val = quota; + + pr_debug("uc_idx=%d quota=%llu\n", new_uc_idx, quota); + MDSS_XLOG(new_uc_idx, ((quota >> 32) & 0xFFFFFFFF), + (quota & 0xFFFFFFFF)); + ATRACE_BEGIN("msm_bus_scale_req_rot"); + ret = msm_bus_scale_client_update_request(bus->bus_hdl, + new_uc_idx); + ATRACE_END("msm_bus_scale_req_rot"); + return ret; +} + +static int mdss_rotator_enable_reg_bus(struct mdss_rot_mgr *mgr, u64 quota) +{ + int ret = 0, changed = 0; + u32 usecase_ndx = 0; + + if (!mgr || !mgr->reg_bus.bus_hdl) + return 0; + + if (quota) + usecase_ndx = 1; + + if (usecase_ndx != mgr->reg_bus.curr_bw_uc_idx) { + mgr->reg_bus.curr_bw_uc_idx = usecase_ndx; + changed++; + } + + pr_debug("%s, changed=%d register bus %s\n", __func__, changed, + quota ? "Enable":"Disable"); + + if (changed) { + ATRACE_BEGIN("msm_bus_scale_req_rot_reg"); + ret = msm_bus_scale_client_update_request(mgr->reg_bus.bus_hdl, + usecase_ndx); + ATRACE_END("msm_bus_scale_req_rot_reg"); + } + + return ret; +} + +/* + * Clock rate of all open sessions working a particular hw block + * are added together to get the required rate for that hw block. + * The max of each hw block becomes the final clock rate voted for + */ +static unsigned long mdss_rotator_clk_rate_calc( + struct mdss_rot_mgr *mgr, + struct mdss_rot_file_private *private) +{ + struct mdss_rot_perf *perf; + unsigned long clk_rate[ROT_MAX_HW_BLOCKS] = {0}; + unsigned long total_clk_rate = 0; + int i, wb_idx; + + mutex_lock(&private->perf_lock); + list_for_each_entry(perf, &private->perf_list, list) { + bool rate_accounted_for = false; + + mutex_lock(&perf->work_dis_lock); + /* + * If there is one session that has two work items across + * different hw blocks rate is accounted for in both blocks. + */ + for (i = 0; i < mgr->queue_count; i++) { + if (perf->work_distribution[i]) { + clk_rate[i] += perf->clk_rate; + rate_accounted_for = true; + } + } + + /* + * Sessions that are open but not distributed on any hw block + * Still need to be accounted for. Rate is added to last known + * wb idx. + */ + wb_idx = perf->last_wb_idx; + if ((!rate_accounted_for) && (wb_idx >= 0) && + (wb_idx < mgr->queue_count)) + clk_rate[wb_idx] += perf->clk_rate; + mutex_unlock(&perf->work_dis_lock); + } + mutex_unlock(&private->perf_lock); + + for (i = 0; i < mgr->queue_count; i++) + total_clk_rate = max(clk_rate[i], total_clk_rate); + + pr_debug("Total clk rate calc=%lu\n", total_clk_rate); + return total_clk_rate; +} + +static struct clk *mdss_rotator_get_clk(struct mdss_rot_mgr *mgr, u32 clk_idx) +{ + if (clk_idx >= MDSS_CLK_ROTATOR_END_IDX) { + pr_err("Invalid clk index:%u\n", clk_idx); + return NULL; + } + + return mgr->rot_clk[clk_idx]; +} + +static void mdss_rotator_set_clk_rate(struct mdss_rot_mgr *mgr, + unsigned long rate, u32 clk_idx) +{ + unsigned long clk_rate; + struct clk *clk = mdss_rotator_get_clk(mgr, clk_idx); + int ret; + + if (clk) { + mutex_lock(&mgr->clk_lock); + clk_rate = clk_round_rate(clk, rate); + if (IS_ERR_VALUE(clk_rate)) { + pr_err("unable to round rate err=%ld\n", clk_rate); + } else if (clk_rate != clk_get_rate(clk)) { + ret = clk_set_rate(clk, clk_rate); + if (IS_ERR_VALUE((unsigned long)ret)) { + pr_err("clk_set_rate failed, err:%d\n", ret); + } else { + pr_debug("rotator clk rate=%lu\n", clk_rate); + MDSS_XLOG(clk_rate); + } + } + mutex_unlock(&mgr->clk_lock); + } else { + pr_err("rotator clk not setup properly\n"); + } +} + +static void mdss_rotator_footswitch_ctrl(struct mdss_rot_mgr *mgr, bool on) +{ + int ret; + + if (mgr->regulator_enable == on) { + pr_err("Regulators already in selected mode on=%d\n", on); + return; + } + + pr_debug("%s: rotator regulators\n", on ? "Enable" : "Disable"); + ret = msm_dss_enable_vreg(mgr->module_power.vreg_config, + mgr->module_power.num_vreg, on); + if (ret) { + pr_warn("Rotator regulator failed to %s\n", + on ? "enable" : "disable"); + return; + } + + mgr->regulator_enable = on; +} + +static int mdss_rotator_clk_ctrl(struct mdss_rot_mgr *mgr, int enable) +{ + struct clk *clk; + int ret = 0; + int i, changed = 0; + + mutex_lock(&mgr->clk_lock); + if (enable) { + if (mgr->rot_enable_clk_cnt == 0) + changed++; + mgr->rot_enable_clk_cnt++; + } else { + if (mgr->rot_enable_clk_cnt) { + mgr->rot_enable_clk_cnt--; + if (mgr->rot_enable_clk_cnt == 0) + changed++; + } else { + pr_err("Can not be turned off\n"); + } + } + + if (changed) { + pr_debug("Rotator clk %s\n", enable ? "enable" : "disable"); + for (i = 0; i < MDSS_CLK_ROTATOR_END_IDX; i++) { + clk = mgr->rot_clk[i]; + if (enable) { + ret = clk_prepare_enable(clk); + if (ret) { + pr_err("enable failed clk_idx %d\n", i); + goto error; + } + } else { + clk_disable_unprepare(clk); + } + } + mutex_lock(&mgr->bus_lock); + if (enable) { + /* Active+Sleep */ + msm_bus_scale_client_update_context( + mgr->data_bus.bus_hdl, false, + mgr->data_bus.curr_bw_uc_idx); + trace_rotator_bw_ao_as_context(0); + } else { + /* Active Only */ + msm_bus_scale_client_update_context( + mgr->data_bus.bus_hdl, true, + mgr->data_bus.curr_bw_uc_idx); + trace_rotator_bw_ao_as_context(1); + } + mutex_unlock(&mgr->bus_lock); + } + mutex_unlock(&mgr->clk_lock); + + return ret; +error: + for (i--; i >= 0; i--) + clk_disable_unprepare(mgr->rot_clk[i]); + mutex_unlock(&mgr->clk_lock); + return ret; +} + +int mdss_rotator_resource_ctrl(struct mdss_rot_mgr *mgr, int enable) +{ + int changed = 0; + int ret = 0; + + mutex_lock(&mgr->clk_lock); + if (enable) { + if (mgr->res_ref_cnt == 0) + changed++; + mgr->res_ref_cnt++; + } else { + if (mgr->res_ref_cnt) { + mgr->res_ref_cnt--; + if (mgr->res_ref_cnt == 0) + changed++; + } else { + pr_err("Rot resource already off\n"); + } + } + + pr_debug("%s: res_cnt=%d changed=%d enable=%d\n", + __func__, mgr->res_ref_cnt, changed, enable); + MDSS_XLOG(mgr->res_ref_cnt, changed, enable); + + if (changed) { + if (enable) + mdss_rotator_footswitch_ctrl(mgr, true); + else + mdss_rotator_footswitch_ctrl(mgr, false); + } + mutex_unlock(&mgr->clk_lock); + return ret; +} + +/* caller is expected to hold perf->work_dis_lock lock */ +static bool mdss_rotator_is_work_pending(struct mdss_rot_mgr *mgr, + struct mdss_rot_perf *perf) +{ + int i; + + for (i = 0; i < mgr->queue_count; i++) { + if (perf->work_distribution[i]) { + pr_debug("Work is still scheduled to complete\n"); + return true; + } + } + return false; +} + +static int mdss_rotator_install_fence_fd(struct mdss_rot_entry_container *req) +{ + int i; + int ret = 0; + struct sync_file *sync_file; + + for (i = 0; i < req->count; i++) { + sync_file = sync_file_create((struct dma_fence *) + (req->entries[i].output_fence)); + if (!sync_file) { + ret = -ENOMEM; + break; + } + fd_install(req->entries[i].output_fence_fd, sync_file->file); + } + return ret; +} + +static int mdss_rotator_create_fence(struct mdss_rot_entry *entry) +{ + int ret = 0, fd; + u32 val; + struct mdss_fence *fence; + struct mdss_rot_timeline *rot_timeline; + + if (!entry->queue) + return -EINVAL; + + rot_timeline = &entry->queue->timeline; + + mutex_lock(&rot_timeline->lock); + val = rot_timeline->next_value + 1; + + fence = mdss_get_sync_fence(rot_timeline->timeline, + rot_timeline->fence_name, NULL, val); + if (fence == NULL) { + pr_err("cannot create sync point\n"); + goto sync_pt_create_err; + } + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + pr_err("fail to get unused fd\n"); + ret = fd; + goto get_fd_err; + } + + rot_timeline->next_value++; + mutex_unlock(&rot_timeline->lock); + + entry->output_fence_fd = fd; + entry->output_fence = fence; + pr_debug("output sync point created at %s:val=%u\n", + mdss_get_sync_fence_name(fence), val); + + return 0; + +get_fd_err: + mdss_put_sync_fence(fence); +sync_pt_create_err: + mutex_unlock(&rot_timeline->lock); + return ret; +} + +static void mdss_rotator_clear_fence(struct mdss_rot_entry *entry) +{ + struct mdss_rot_timeline *rot_timeline; + + if (entry->input_fence) { + mdss_put_sync_fence(entry->input_fence); + entry->input_fence = NULL; + } + + rot_timeline = &entry->queue->timeline; + + /* fence failed to copy to user space */ + if (entry->output_fence) { + mdss_put_sync_fence(entry->output_fence); + entry->output_fence = NULL; + put_unused_fd(entry->output_fence_fd); + + mutex_lock(&rot_timeline->lock); + rot_timeline->next_value--; + mutex_unlock(&rot_timeline->lock); + } +} + +static int mdss_rotator_signal_output(struct mdss_rot_entry *entry) +{ + struct mdss_rot_timeline *rot_timeline; + + if (!entry->queue) + return -EINVAL; + + rot_timeline = &entry->queue->timeline; + + if (entry->output_signaled) { + pr_debug("output already signaled\n"); + return 0; + } + + mutex_lock(&rot_timeline->lock); + mdss_inc_timeline(rot_timeline->timeline, 1); + mutex_unlock(&rot_timeline->lock); + + entry->output_signaled = true; + + return 0; +} + +static int mdss_rotator_wait_for_input(struct mdss_rot_entry *entry) +{ + int ret; + + if (!entry->input_fence) { + pr_debug("invalid input fence, no wait\n"); + return 0; + } + + ret = mdss_wait_sync_fence(entry->input_fence, ROT_FENCE_WAIT_TIMEOUT); + mdss_put_sync_fence(entry->input_fence); + entry->input_fence = NULL; + return ret; +} + +static int mdss_rotator_import_buffer(struct mdp_layer_buffer *buffer, + struct mdss_mdp_data *data, u32 flags, struct device *dev, bool input) +{ + int i, ret = 0; + struct msmfb_data planes[MAX_PLANES]; + int dir = DMA_TO_DEVICE; + + if (!input) + dir = DMA_FROM_DEVICE; + + memset(planes, 0, sizeof(planes)); + + if (buffer->plane_count > MAX_PLANES) { + pr_err("buffer plane_count exceeds MAX_PLANES limit:%d\n", + buffer->plane_count); + return -EINVAL; + } + + for (i = 0; i < buffer->plane_count; i++) { + planes[i].memory_id = buffer->planes[i].fd; + planes[i].offset = buffer->planes[i].offset; + } + + ret = mdss_mdp_data_get_and_validate_size(data, planes, + buffer->plane_count, flags, dev, true, dir, buffer); + data->state = MDP_BUF_STATE_READY; + data->last_alloc = local_clock(); + + return ret; +} + +static int mdss_rotator_map_and_check_data(struct mdss_rot_entry *entry) +{ + int ret; + struct mdp_layer_buffer *input; + struct mdp_layer_buffer *output; + struct mdss_mdp_format_params *fmt; + struct mdss_mdp_plane_sizes ps; + bool rotation; + + input = &entry->item.input; + output = &entry->item.output; + + rotation = (entry->item.flags & MDP_ROTATION_90) ? true : false; + + ATRACE_BEGIN(__func__); + ret = mdss_iommu_ctrl(1); + if (IS_ERR_VALUE((unsigned long)ret)) { + ATRACE_END(__func__); + return ret; + } + + /* if error during map, the caller will release the data */ + entry->src_buf.state = MDP_BUF_STATE_ACTIVE; + ret = mdss_mdp_data_map(&entry->src_buf, true, DMA_TO_DEVICE); + if (ret) { + pr_err("source buffer mapping failed ret:%d\n", ret); + goto end; + } + + entry->dst_buf.state = MDP_BUF_STATE_ACTIVE; + ret = mdss_mdp_data_map(&entry->dst_buf, true, DMA_FROM_DEVICE); + if (ret) { + pr_err("destination buffer mapping failed ret:%d\n", ret); + goto end; + } + + fmt = mdss_mdp_get_format_params(input->format); + if (!fmt) { + pr_err("invalid input format:%d\n", input->format); + ret = -EINVAL; + goto end; + } + + ret = mdss_mdp_get_plane_sizes( + fmt, input->width, input->height, &ps, 0, rotation); + if (ret) { + pr_err("fail to get input plane size ret=%d\n", ret); + goto end; + } + + ret = mdss_mdp_data_check(&entry->src_buf, &ps, fmt); + if (ret) { + pr_err("fail to check input data ret=%d\n", ret); + goto end; + } + + fmt = mdss_mdp_get_format_params(output->format); + if (!fmt) { + pr_err("invalid output format:%d\n", output->format); + ret = -EINVAL; + goto end; + } + + ret = mdss_mdp_get_plane_sizes( + fmt, output->width, output->height, &ps, 0, rotation); + if (ret) { + pr_err("fail to get output plane size ret=%d\n", ret); + goto end; + } + + ret = mdss_mdp_data_check(&entry->dst_buf, &ps, fmt); + if (ret) { + pr_err("fail to check output data ret=%d\n", ret); + goto end; + } + +end: + mdss_iommu_ctrl(0); + ATRACE_END(__func__); + + return ret; +} + +static struct mdss_rot_perf *__mdss_rotator_find_session( + struct mdss_rot_file_private *private, + u32 session_id) +{ + struct mdss_rot_perf *perf, *perf_next; + bool found = false; + + list_for_each_entry_safe(perf, perf_next, &private->perf_list, list) { + if (perf->config.session_id == session_id) { + found = true; + break; + } + } + if (!found) + perf = NULL; + return perf; +} + +static struct mdss_rot_perf *mdss_rotator_find_session( + struct mdss_rot_file_private *private, + u32 session_id) +{ + struct mdss_rot_perf *perf; + + mutex_lock(&private->perf_lock); + perf = __mdss_rotator_find_session(private, session_id); + mutex_unlock(&private->perf_lock); + return perf; +} + +static void mdss_rotator_release_data(struct mdss_rot_entry *entry) +{ + struct mdss_mdp_data *src_buf = &entry->src_buf; + struct mdss_mdp_data *dst_buf = &entry->dst_buf; + + mdss_mdp_data_free(src_buf, true, DMA_TO_DEVICE); + src_buf->last_freed = local_clock(); + src_buf->state = MDP_BUF_STATE_UNUSED; + + mdss_mdp_data_free(dst_buf, true, DMA_FROM_DEVICE); + dst_buf->last_freed = local_clock(); + dst_buf->state = MDP_BUF_STATE_UNUSED; +} + +static int mdss_rotator_import_data(struct mdss_rot_mgr *mgr, + struct mdss_rot_entry *entry) +{ + int ret; + struct mdp_layer_buffer *input; + struct mdp_layer_buffer *output; + u32 flag = 0; + + input = &entry->item.input; + output = &entry->item.output; + + if (entry->item.flags & MDP_ROTATION_SECURE) + flag = MDP_SECURE_OVERLAY_SESSION; + + ret = mdss_rotator_import_buffer(input, &entry->src_buf, flag, + &mgr->pdev->dev, true); + if (ret) { + pr_err("fail to import input buffer\n"); + return ret; + } + + /* + * driver assumes output buffer is ready to be written + * immediately + */ + ret = mdss_rotator_import_buffer(output, &entry->dst_buf, flag, + &mgr->pdev->dev, false); + if (ret) { + pr_err("fail to import output buffer\n"); + return ret; + } + + return ret; +} + +static struct mdss_rot_hw_resource *mdss_rotator_hw_alloc( + struct mdss_rot_mgr *mgr, u32 pipe_id, u32 wb_id) +{ + struct mdss_rot_hw_resource *hw; + struct mdss_data_type *mdata = mdss_mdp_get_mdata(); + u32 pipe_ndx, offset = mdss_mdp_get_wb_ctl_support(mdata, true); + int ret = 0; + + hw = kzalloc(sizeof(struct mdss_rot_hw_resource), + GFP_KERNEL); + if (!hw) + return ERR_PTR(-ENOMEM); + + hw->ctl = mdss_mdp_ctl_alloc(mdata, offset); + if (IS_ERR_OR_NULL(hw->ctl)) { + pr_err("unable to allocate ctl\n"); + ret = -ENODEV; + goto error; + } + + if (wb_id == MDSS_ROTATION_HW_ANY) + hw->wb = mdss_mdp_wb_alloc(MDSS_MDP_WB_ROTATOR, hw->ctl->num); + else + hw->wb = mdss_mdp_wb_assign(wb_id, hw->ctl->num); + + if (IS_ERR_OR_NULL(hw->wb)) { + pr_err("unable to allocate wb\n"); + ret = -ENODEV; + goto error; + } + hw->ctl->wb = hw->wb; + hw->mixer = mdss_mdp_mixer_assign(hw->wb->num, true, true); + + if (IS_ERR_OR_NULL(hw->mixer)) { + pr_err("unable to allocate wb mixer\n"); + ret = -ENODEV; + goto error; + } + hw->ctl->mixer_left = hw->mixer; + hw->mixer->ctl = hw->ctl; + + hw->mixer->rotator_mode = true; + + switch (hw->mixer->num) { + case MDSS_MDP_WB_LAYERMIXER0: + hw->ctl->opmode = MDSS_MDP_CTL_OP_ROT0_MODE; + break; + case MDSS_MDP_WB_LAYERMIXER1: + hw->ctl->opmode = MDSS_MDP_CTL_OP_ROT1_MODE; + break; + default: + pr_err("invalid layer mixer=%d\n", hw->mixer->num); + ret = -EINVAL; + goto error; + } + + hw->ctl->ops.start_fnc = mdss_mdp_writeback_start; + hw->ctl->power_state = MDSS_PANEL_POWER_ON; + hw->ctl->wb_type = MDSS_MDP_WB_CTL_TYPE_BLOCK; + + + if (hw->ctl->ops.start_fnc) + ret = hw->ctl->ops.start_fnc(hw->ctl); + + if (ret) + goto error; + + if (pipe_id >= mdata->ndma_pipes) + goto error; + + pipe_ndx = mdata->dma_pipes[pipe_id].ndx; + hw->pipe = mdss_mdp_pipe_assign(mdata, hw->mixer, + pipe_ndx, MDSS_MDP_PIPE_RECT0); + if (IS_ERR_OR_NULL(hw->pipe)) { + pr_err("dma pipe allocation failed\n"); + ret = -ENODEV; + goto error; + } + + hw->pipe->mixer_left = hw->mixer; + hw->pipe_id = hw->wb->num; + hw->wb_id = hw->wb->num; + + return hw; +error: + if (!IS_ERR_OR_NULL(hw->pipe)) + mdss_mdp_pipe_destroy(hw->pipe); + if (!IS_ERR_OR_NULL(hw->ctl)) { + if (hw->ctl->ops.stop_fnc) + hw->ctl->ops.stop_fnc(hw->ctl, MDSS_PANEL_POWER_OFF); + mdss_mdp_ctl_free(hw->ctl); + } + kfree(hw); + + return ERR_PTR(ret); +} + +static void mdss_rotator_free_hw(struct mdss_rot_mgr *mgr, + struct mdss_rot_hw_resource *hw) +{ + struct mdss_mdp_mixer *mixer; + struct mdss_mdp_ctl *ctl; + + mixer = hw->pipe->mixer_left; + + mdss_mdp_pipe_destroy(hw->pipe); + + ctl = mdss_mdp_ctl_mixer_switch(mixer->ctl, + MDSS_MDP_WB_CTL_TYPE_BLOCK); + if (ctl) { + if (ctl->ops.stop_fnc) + ctl->ops.stop_fnc(ctl, MDSS_PANEL_POWER_OFF); + mdss_mdp_ctl_free(ctl); + } + + kfree(hw); +} + +struct mdss_rot_hw_resource *mdss_rotator_get_hw_resource( + struct mdss_rot_queue *queue, struct mdss_rot_entry *entry) +{ + struct mdss_rot_hw_resource *hw = queue->hw; + + if (!hw) { + pr_err("no hw in the queue\n"); + return NULL; + } + + mutex_lock(&queue->hw_lock); + + if (hw->workload) { + hw = ERR_PTR(-EBUSY); + goto get_hw_resource_err; + } + hw->workload = entry; + +get_hw_resource_err: + mutex_unlock(&queue->hw_lock); + return hw; +} + +static void mdss_rotator_put_hw_resource(struct mdss_rot_queue *queue, + struct mdss_rot_hw_resource *hw) +{ + mutex_lock(&queue->hw_lock); + hw->workload = NULL; + mutex_unlock(&queue->hw_lock); +} + +/* + * caller will need to call mdss_rotator_deinit_queue when + * the function returns error + */ +static int mdss_rotator_init_queue(struct mdss_rot_mgr *mgr) +{ + int i, size, ret = 0; + char name[32]; + + size = sizeof(struct mdss_rot_queue) * mgr->queue_count; + mgr->queues = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL); + if (!mgr->queues) + return -ENOMEM; + + for (i = 0; i < mgr->queue_count; i++) { + snprintf(name, sizeof(name), "rot_workq_%d", i); + pr_debug("work queue name=%s\n", name); + mgr->queues[i].rot_work_queue = alloc_ordered_workqueue("%s", + WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, name); + if (!mgr->queues[i].rot_work_queue) { + ret = -EPERM; + break; + } + + snprintf(name, sizeof(name), "rot_timeline_%d", i); + pr_debug("timeline name=%s\n", name); + mgr->queues[i].timeline.timeline = + mdss_create_timeline(name); + if (!mgr->queues[i].timeline.timeline) { + ret = -EPERM; + break; + } + + size = sizeof(mgr->queues[i].timeline.fence_name); + snprintf(mgr->queues[i].timeline.fence_name, size, + "rot_fence_%d", i); + mutex_init(&mgr->queues[i].timeline.lock); + + mutex_init(&mgr->queues[i].hw_lock); + } + + return ret; +} + +static void mdss_rotator_deinit_queue(struct mdss_rot_mgr *mgr) +{ + int i; + + if (!mgr->queues) + return; + + for (i = 0; i < mgr->queue_count; i++) { + if (mgr->queues[i].rot_work_queue) + destroy_workqueue(mgr->queues[i].rot_work_queue); + + if (mgr->queues[i].timeline.timeline) { + struct mdss_timeline *obj; + + obj = (struct mdss_timeline *) + mgr->queues[i].timeline.timeline; + mdss_destroy_timeline(obj); + } + } +} + +/* + * mdss_rotator_assign_queue() - Function assign rotation work onto hw + * @mgr: Rotator manager. + * @entry: Contains details on rotator work item being requested + * @private: Private struct used for access rot session performance struct + * + * This Function allocates hw required to complete rotation work item + * requested. + * + * Caller is responsible for calling cleanup function if error is returned + */ +static int mdss_rotator_assign_queue(struct mdss_rot_mgr *mgr, + struct mdss_rot_entry *entry, + struct mdss_rot_file_private *private) +{ + struct mdss_rot_perf *perf; + struct mdss_rot_queue *queue; + struct mdss_rot_hw_resource *hw; + struct mdp_rotation_item *item = &entry->item; + u32 wb_idx = item->wb_idx; + u32 pipe_idx = item->pipe_idx; + int ret = 0; + + /* + * todo: instead of always assign writeback block 0, we can + * apply some load balancing logic in the future + */ + if (wb_idx == MDSS_ROTATION_HW_ANY) { + wb_idx = 0; + pipe_idx = 0; + } + + if (wb_idx >= mgr->queue_count) { + pr_err("Invalid wb idx = %d\n", wb_idx); + return -EINVAL; + } + + queue = mgr->queues + wb_idx; + + mutex_lock(&queue->hw_lock); + + if (!queue->hw) { + hw = mdss_rotator_hw_alloc(mgr, pipe_idx, wb_idx); + if (IS_ERR_OR_NULL(hw)) { + pr_err("fail to allocate hw\n"); + ret = PTR_ERR(hw); + } else { + queue->hw = hw; + } + } + + if (queue->hw) { + entry->queue = queue; + queue->hw->pending_count++; + } + + mutex_unlock(&queue->hw_lock); + + perf = mdss_rotator_find_session(private, item->session_id); + if (!perf) { + pr_err("Could not find session based on rotation work item\n"); + return -EINVAL; + } + + entry->perf = perf; + perf->last_wb_idx = wb_idx; + + return ret; +} + +static void mdss_rotator_unassign_queue(struct mdss_rot_mgr *mgr, + struct mdss_rot_entry *entry) +{ + struct mdss_rot_queue *queue = entry->queue; + + if (!queue) + return; + + entry->queue = NULL; + + mutex_lock(&queue->hw_lock); + + if (!queue->hw) { + pr_err("entry assigned a queue with no hw\n"); + mutex_unlock(&queue->hw_lock); + return; + } + + queue->hw->pending_count--; + if (queue->hw->pending_count == 0) { + mdss_rotator_free_hw(mgr, queue->hw); + queue->hw = NULL; + } + + mutex_unlock(&queue->hw_lock); +} + +static void mdss_rotator_queue_request(struct mdss_rot_mgr *mgr, + struct mdss_rot_file_private *private, + struct mdss_rot_entry_container *req) +{ + struct mdss_rot_entry *entry; + struct mdss_rot_queue *queue; + unsigned long clk_rate; + u32 wb_idx; + int i; + + for (i = 0; i < req->count; i++) { + entry = req->entries + i; + queue = entry->queue; + wb_idx = queue->hw->wb_id; + mutex_lock(&entry->perf->work_dis_lock); + entry->perf->work_distribution[wb_idx]++; + mutex_unlock(&entry->perf->work_dis_lock); + entry->work_assigned = true; + } + + clk_rate = mdss_rotator_clk_rate_calc(mgr, private); + mdss_rotator_set_clk_rate(mgr, clk_rate, MDSS_CLK_ROTATOR_CORE); + + for (i = 0; i < req->count; i++) { + entry = req->entries + i; + queue = entry->queue; + entry->output_fence = NULL; + queue_work(queue->rot_work_queue, &entry->commit_work); + } +} + +static int mdss_rotator_calc_perf(struct mdss_rot_perf *perf) +{ + struct mdp_rotation_config *config = &perf->config; + u32 read_bw, write_bw; + struct mdss_mdp_format_params *in_fmt, *out_fmt; + + in_fmt = mdss_mdp_get_format_params(config->input.format); + if (!in_fmt) { + pr_err("invalid input format\n"); + return -EINVAL; + } + out_fmt = mdss_mdp_get_format_params(config->output.format); + if (!out_fmt) { + pr_err("invalid output format\n"); + return -EINVAL; + } + if (!config->input.width || + (0xffffffff/config->input.width < config->input.height)) + return -EINVAL; + + perf->clk_rate = config->input.width * config->input.height; + + if (!perf->clk_rate || + (0xffffffff/perf->clk_rate < config->frame_rate)) + return -EINVAL; + + perf->clk_rate *= config->frame_rate; + /* rotator processes 4 pixels per clock */ + perf->clk_rate /= 4; + + read_bw = config->input.width * config->input.height * + config->frame_rate; + if (in_fmt->chroma_sample == MDSS_MDP_CHROMA_420) + read_bw = (read_bw * 3) / 2; + else + read_bw *= in_fmt->bpp; + + write_bw = config->output.width * config->output.height * + config->frame_rate; + if (out_fmt->chroma_sample == MDSS_MDP_CHROMA_420) + write_bw = (write_bw * 3) / 2; + else + write_bw *= out_fmt->bpp; + + read_bw = apply_comp_ratio_factor(read_bw, in_fmt, + &config->input.comp_ratio); + write_bw = apply_comp_ratio_factor(write_bw, out_fmt, + &config->output.comp_ratio); + + perf->bw = read_bw + write_bw; + return 0; +} + +static int mdss_rotator_update_perf(struct mdss_rot_mgr *mgr) +{ + struct mdss_rot_file_private *priv; + struct mdss_rot_perf *perf; + int not_in_suspend_mode; + u64 total_bw = 0; + + ATRACE_BEGIN(__func__); + + not_in_suspend_mode = !atomic_read(&mgr->device_suspended); + + if (not_in_suspend_mode) { + mutex_lock(&mgr->file_lock); + list_for_each_entry(priv, &mgr->file_list, list) { + mutex_lock(&priv->perf_lock); + list_for_each_entry(perf, &priv->perf_list, list) { + total_bw += perf->bw; + } + mutex_unlock(&priv->perf_lock); + } + mutex_unlock(&mgr->file_lock); + } + + mutex_lock(&mgr->bus_lock); + total_bw += mgr->pending_close_bw_vote; + mdss_rotator_enable_reg_bus(mgr, total_bw); + mdss_rotator_bus_scale_set_quota(&mgr->data_bus, total_bw); + mutex_unlock(&mgr->bus_lock); + + ATRACE_END(__func__); + return 0; +} + +static void mdss_rotator_release_from_work_distribution( + struct mdss_rot_mgr *mgr, + struct mdss_rot_entry *entry) +{ + if (entry->work_assigned) { + bool free_perf = false; + u32 wb_idx = entry->queue->hw->wb_id; + + mutex_lock(&mgr->lock); + mutex_lock(&entry->perf->work_dis_lock); + if (entry->perf->work_distribution[wb_idx]) + entry->perf->work_distribution[wb_idx]--; + + if (!entry->perf->work_distribution[wb_idx] + && list_empty(&entry->perf->list)) { + /* close session has offloaded perf free to us */ + free_perf = true; + } + mutex_unlock(&entry->perf->work_dis_lock); + entry->work_assigned = false; + if (free_perf) { + mutex_lock(&mgr->bus_lock); + mgr->pending_close_bw_vote -= entry->perf->bw; + mutex_unlock(&mgr->bus_lock); + mdss_rotator_resource_ctrl(mgr, false); + kfree(entry->perf->work_distribution); + kfree(entry->perf); + mdss_rotator_update_perf(mgr); + mdss_rotator_clk_ctrl(mgr, false); + entry->perf = NULL; + } + mutex_unlock(&mgr->lock); + } +} + +static void mdss_rotator_release_entry(struct mdss_rot_mgr *mgr, + struct mdss_rot_entry *entry) +{ + mdss_rotator_release_from_work_distribution(mgr, entry); + mdss_rotator_clear_fence(entry); + mdss_rotator_release_data(entry); + mdss_rotator_unassign_queue(mgr, entry); +} + +static int mdss_rotator_config_dnsc_factor(struct mdss_rot_mgr *mgr, + struct mdss_rot_entry *entry) +{ + int ret = 0; + u16 src_w, src_h, dst_w, dst_h, bit; + struct mdp_rotation_item *item = &entry->item; + struct mdss_mdp_format_params *fmt; + + src_w = item->src_rect.w; + src_h = item->src_rect.h; + + if (item->flags & MDP_ROTATION_90) { + dst_w = item->dst_rect.h; + dst_h = item->dst_rect.w; + } else { + dst_w = item->dst_rect.w; + dst_h = item->dst_rect.h; + } + + if (!mgr->has_downscale && + (src_w != dst_w || src_h != dst_h)) { + pr_err("rotator downscale not supported\n"); + ret = -EINVAL; + goto dnsc_err; + } + + entry->dnsc_factor_w = 0; + entry->dnsc_factor_h = 0; + + if ((src_w != dst_w) || (src_h != dst_h)) { + if ((src_w % dst_w) || (src_h % dst_h)) { + ret = -EINVAL; + goto dnsc_err; + } + entry->dnsc_factor_w = src_w / dst_w; + bit = fls(entry->dnsc_factor_w); + /* + * New Chipsets supports downscale upto 1/64 + * change the Bit check from 5 to 7 to support 1/64 down scale + */ + if ((entry->dnsc_factor_w & ~BIT(bit - 1)) || (bit > 7)) { + ret = -EINVAL; + goto dnsc_err; + } + entry->dnsc_factor_h = src_h / dst_h; + bit = fls(entry->dnsc_factor_h); + if ((entry->dnsc_factor_h & ~BIT(bit - 1)) || (bit > 7)) { + ret = -EINVAL; + goto dnsc_err; + } + } + + fmt = mdss_mdp_get_format_params(item->output.format); + if (mdss_mdp_is_ubwc_format(fmt) && + (entry->dnsc_factor_h || entry->dnsc_factor_w)) { + pr_err("ubwc not supported with downscale %d\n", + item->output.format); + ret = -EINVAL; + } + +dnsc_err: + + /* Downscaler does not support asymmetrical dnsc */ + if (entry->dnsc_factor_w != entry->dnsc_factor_h) + ret = -EINVAL; + + if (ret) { + pr_err("Invalid rotator downscale ratio %dx%d->%dx%d\n", + src_w, src_h, dst_w, dst_h); + entry->dnsc_factor_w = 0; + entry->dnsc_factor_h = 0; + } + return ret; +} + +static bool mdss_rotator_verify_format(struct mdss_rot_mgr *mgr, + struct mdss_mdp_format_params *in_fmt, + struct mdss_mdp_format_params *out_fmt, bool rotation) +{ + u8 in_v_subsample, in_h_subsample; + u8 out_v_subsample, out_h_subsample; + + if (!mgr->has_ubwc && (mdss_mdp_is_ubwc_format(in_fmt) || + mdss_mdp_is_ubwc_format(out_fmt))) { + pr_err("Rotator doesn't allow ubwc\n"); + return -EINVAL; + } + + if (!(out_fmt->flag & VALID_ROT_WB_FORMAT)) { + pr_err("Invalid output format\n"); + return false; + } + + if (in_fmt->is_yuv != out_fmt->is_yuv) { + pr_err("Rotator does not support CSC\n"); + return false; + } + + /* Forcing same pixel depth */ + if (memcmp(in_fmt->bits, out_fmt->bits, sizeof(in_fmt->bits))) { + /* Exception is that RGB can drop alpha or add X */ + if (in_fmt->is_yuv || out_fmt->alpha_enable || + (in_fmt->bits[C2_R_Cr] != out_fmt->bits[C2_R_Cr]) || + (in_fmt->bits[C0_G_Y] != out_fmt->bits[C0_G_Y]) || + (in_fmt->bits[C1_B_Cb] != out_fmt->bits[C1_B_Cb])) { + pr_err("Bit format does not match\n"); + return false; + } + } + + /* Need to make sure that sub-sampling persists through rotation */ + if (rotation) { + mdss_mdp_get_v_h_subsample_rate(in_fmt->chroma_sample, + &in_v_subsample, &in_h_subsample); + mdss_mdp_get_v_h_subsample_rate(out_fmt->chroma_sample, + &out_v_subsample, &out_h_subsample); + + if ((in_v_subsample != out_h_subsample) || + (in_h_subsample != out_v_subsample)) { + pr_err("Rotation has invalid subsampling\n"); + return false; + } + } else { + if (in_fmt->chroma_sample != out_fmt->chroma_sample) { + pr_err("Format subsampling mismatch\n"); + return false; + } + } + + pr_debug("in_fmt=%0d, out_fmt=%d, has_ubwc=%d\n", + in_fmt->format, out_fmt->format, mgr->has_ubwc); + return true; +} + +static int mdss_rotator_verify_config(struct mdss_rot_mgr *mgr, + struct mdp_rotation_config *config) +{ + struct mdss_mdp_format_params *in_fmt, *out_fmt; + u8 in_v_subsample, in_h_subsample; + u8 out_v_subsample, out_h_subsample; + u32 input, output; + bool rotation; + + input = config->input.format; + output = config->output.format; + rotation = (config->flags & MDP_ROTATION_90) ? true : false; + + in_fmt = mdss_mdp_get_format_params(input); + if (!in_fmt) { + pr_err("Unrecognized input format:%u\n", input); + return -EINVAL; + } + + out_fmt = mdss_mdp_get_format_params(output); + if (!out_fmt) { + pr_err("Unrecognized output format:%u\n", output); + return -EINVAL; + } + + mdss_mdp_get_v_h_subsample_rate(in_fmt->chroma_sample, + &in_v_subsample, &in_h_subsample); + mdss_mdp_get_v_h_subsample_rate(out_fmt->chroma_sample, + &out_v_subsample, &out_h_subsample); + + /* Dimension of image needs to be divisible by subsample rate */ + if ((config->input.height % in_v_subsample) || + (config->input.width % in_h_subsample)) { + pr_err("In ROI, subsample mismatch, w=%d, h=%d, vss%d, hss%d\n", + config->input.width, config->input.height, + in_v_subsample, in_h_subsample); + return -EINVAL; + } + + if ((config->output.height % out_v_subsample) || + (config->output.width % out_h_subsample)) { + pr_err("Out ROI, subsample mismatch, w=%d, h=%d, vss%d, hss%d\n", + config->output.width, config->output.height, + out_v_subsample, out_h_subsample); + return -EINVAL; + } + + if (!mdss_rotator_verify_format(mgr, in_fmt, + out_fmt, rotation)) { + pr_err("Rot format pairing invalid, in_fmt:%d, out_fmt:%d\n", + input, output); + return -EINVAL; + } + + return 0; +} + +static int mdss_rotator_validate_item_matches_session( + struct mdp_rotation_config *config, struct mdp_rotation_item *item) +{ + int ret; + + ret = __compare_session_item_rect(&config->input, + &item->src_rect, item->input.format, true); + if (ret) + return ret; + + ret = __compare_session_item_rect(&config->output, + &item->dst_rect, item->output.format, false); + if (ret) + return ret; + + ret = __compare_session_rotations(config->flags, item->flags); + if (ret) + return ret; + + return 0; +} + +static int mdss_rotator_validate_img_roi(struct mdp_rotation_item *item) +{ + struct mdss_mdp_format_params *fmt; + uint32_t width, height; + int ret = 0; + + width = item->input.width; + height = item->input.height; + if (item->flags & MDP_ROTATION_DEINTERLACE) { + width *= 2; + height /= 2; + } + + /* Check roi bounds */ + if (ROT_CHECK_BOUNDS(item->src_rect.x, item->src_rect.w, width) || + ROT_CHECK_BOUNDS(item->src_rect.y, item->src_rect.h, + height)) { + pr_err("invalid src flag=%08x img wh=%dx%d rect=%d,%d,%d,%d\n", + item->flags, width, height, item->src_rect.x, + item->src_rect.y, item->src_rect.w, item->src_rect.h); + return -EINVAL; + } + if (ROT_CHECK_BOUNDS(item->dst_rect.x, item->dst_rect.w, + item->output.width) || + ROT_CHECK_BOUNDS(item->dst_rect.y, item->dst_rect.h, + item->output.height)) { + pr_err("invalid dst img wh=%dx%d rect=%d,%d,%d,%d\n", + item->output.width, item->output.height, + item->dst_rect.x, item->dst_rect.y, item->dst_rect.w, + item->dst_rect.h); + return -EINVAL; + } + + fmt = mdss_mdp_get_format_params(item->output.format); + if (!fmt) { + pr_err("invalid output format:%d\n", item->output.format); + return -EINVAL; + } + + if (mdss_mdp_is_ubwc_format(fmt)) + ret = mdss_mdp_validate_offset_for_ubwc_format(fmt, + item->dst_rect.x, item->dst_rect.y); + + return ret; +} + +static int mdss_rotator_validate_fmt_and_item_flags( + struct mdp_rotation_config *config, struct mdp_rotation_item *item) +{ + struct mdss_mdp_format_params *fmt; + + fmt = mdss_mdp_get_format_params(item->input.format); + if ((item->flags & MDP_ROTATION_DEINTERLACE) && + mdss_mdp_is_ubwc_format(fmt)) { + pr_err("cannot perform mdp deinterlace on tiled formats\n"); + return -EINVAL; + } + return 0; +} + +static int mdss_rotator_validate_entry(struct mdss_rot_mgr *mgr, + struct mdss_rot_file_private *private, + struct mdss_rot_entry *entry) +{ + int ret; + struct mdp_rotation_item *item; + struct mdss_rot_perf *perf; + + item = &entry->item; + + if (item->wb_idx != item->pipe_idx) { + pr_err("invalid writeback and pipe idx\n"); + return -EINVAL; + } + + if (item->wb_idx != MDSS_ROTATION_HW_ANY && + item->wb_idx > mgr->queue_count) { + pr_err("invalid writeback idx\n"); + return -EINVAL; + } + + perf = mdss_rotator_find_session(private, item->session_id); + if (!perf) { + pr_err("Could not find session:%u\n", item->session_id); + return -EINVAL; + } + + ret = mdss_rotator_validate_item_matches_session(&perf->config, item); + if (ret) { + pr_err("Work item does not match session:%u\n", + item->session_id); + return ret; + } + + ret = mdss_rotator_validate_img_roi(item); + if (ret) { + pr_err("Image roi is invalid\n"); + return ret; + } + + ret = mdss_rotator_validate_fmt_and_item_flags(&perf->config, item); + if (ret) + return ret; + + ret = mdss_rotator_config_dnsc_factor(mgr, entry); + if (ret) { + pr_err("fail to configure downscale factor\n"); + return ret; + } + return ret; +} + +/* + * Upon failure from the function, caller needs to make sure + * to call mdss_rotator_remove_request to clean up resources. + */ +static int mdss_rotator_add_request(struct mdss_rot_mgr *mgr, + struct mdss_rot_file_private *private, + struct mdss_rot_entry_container *req) +{ + struct mdss_rot_entry *entry; + struct mdp_rotation_item *item; + u32 flag = 0; + int i, ret; + + for (i = 0; i < req->count; i++) { + entry = req->entries + i; + item = &entry->item; + + if (item->flags & MDP_ROTATION_SECURE) + flag = MDP_SECURE_OVERLAY_SESSION; + + ret = mdss_rotator_validate_entry(mgr, private, entry); + if (ret) { + pr_err("fail to validate the entry\n"); + return ret; + } + + ret = mdss_rotator_import_data(mgr, entry); + if (ret) { + pr_err("fail to import the data\n"); + return ret; + } + + if (item->input.fence >= 0) { + entry->input_fence = mdss_get_fd_sync_fence( + item->input.fence); + if (!entry->input_fence) { + pr_err("invalid input fence fd\n"); + return -EINVAL; + } + } + + ret = mdss_rotator_assign_queue(mgr, entry, private); + if (ret) { + pr_err("fail to assign queue to entry\n"); + return ret; + } + + entry->request = req; + + INIT_WORK(&entry->commit_work, mdss_rotator_wq_handler); + + ret = mdss_rotator_create_fence(entry); + if (ret) { + pr_err("fail to create fence\n"); + return ret; + } + item->output.fence = entry->output_fence_fd; + + pr_debug("Entry added. wbidx=%u, src{%u,%u,%u,%u}f=%u\n" + "dst{%u,%u,%u,%u}f=%u session_id=%u\n", item->wb_idx, + item->src_rect.x, item->src_rect.y, + item->src_rect.w, item->src_rect.h, item->input.format, + item->dst_rect.x, item->dst_rect.y, + item->dst_rect.w, item->dst_rect.h, item->output.format, + item->session_id); + } + + mutex_lock(&private->req_lock); + list_add(&req->list, &private->req_list); + mutex_unlock(&private->req_lock); + + return 0; +} + +static void mdss_rotator_remove_request(struct mdss_rot_mgr *mgr, + struct mdss_rot_file_private *private, + struct mdss_rot_entry_container *req) +{ + int i; + + mutex_lock(&private->req_lock); + for (i = 0; i < req->count; i++) + mdss_rotator_release_entry(mgr, req->entries + i); + list_del_init(&req->list); + mutex_unlock(&private->req_lock); +} + +/* This function should be called with req_lock */ +static void mdss_rotator_cancel_request(struct mdss_rot_mgr *mgr, + struct mdss_rot_entry_container *req) +{ + struct mdss_rot_entry *entry; + int i; + + /* + * To avoid signal the rotation entry output fence in the wrong + * order, all the entries in the same request needs to be cancelled + * first, before signaling the output fence. + */ + for (i = req->count - 1; i >= 0; i--) { + entry = req->entries + i; + cancel_work_sync(&entry->commit_work); + } + + for (i = req->count - 1; i >= 0; i--) { + entry = req->entries + i; + mdss_rotator_signal_output(entry); + mdss_rotator_release_entry(mgr, entry); + } + + list_del_init(&req->list); + kfree(req); +} + +static void mdss_rotator_cancel_all_requests(struct mdss_rot_mgr *mgr, + struct mdss_rot_file_private *private) +{ + struct mdss_rot_entry_container *req, *req_next; + + pr_debug("Canceling all rotator requests\n"); + + mutex_lock(&private->req_lock); + list_for_each_entry_safe(req, req_next, &private->req_list, list) + mdss_rotator_cancel_request(mgr, req); + mutex_unlock(&private->req_lock); +} + +static void mdss_rotator_free_competed_request(struct mdss_rot_mgr *mgr, + struct mdss_rot_file_private *private) +{ + struct mdss_rot_entry_container *req, *req_next; + + mutex_lock(&private->req_lock); + list_for_each_entry_safe(req, req_next, &private->req_list, list) { + if (atomic_read(&req->pending_count) == 0) { + list_del_init(&req->list); + kfree(req); + } + } + mutex_unlock(&private->req_lock); +} + +static void mdss_rotator_release_rotator_perf_session( + struct mdss_rot_mgr *mgr, + struct mdss_rot_file_private *private) +{ + struct mdss_rot_perf *perf, *perf_next; + + pr_debug("Releasing all rotator request\n"); + mdss_rotator_cancel_all_requests(mgr, private); + + mutex_lock(&private->perf_lock); + list_for_each_entry_safe(perf, perf_next, &private->perf_list, list) { + list_del_init(&perf->list); + kfree(perf->work_distribution); + kfree(perf); + } + mutex_unlock(&private->perf_lock); +} + +static void mdss_rotator_release_all(struct mdss_rot_mgr *mgr) +{ + struct mdss_rot_file_private *priv, *priv_next; + + mutex_lock(&mgr->file_lock); + list_for_each_entry_safe(priv, priv_next, &mgr->file_list, list) { + mdss_rotator_release_rotator_perf_session(mgr, priv); + mdss_rotator_resource_ctrl(mgr, false); + list_del_init(&priv->list); + priv->file->private_data = NULL; + kfree(priv); + } + mutex_unlock(&rot_mgr->file_lock); + + mdss_rotator_update_perf(mgr); +} + +static int mdss_rotator_prepare_hw(struct mdss_rot_hw_resource *hw, + struct mdss_rot_entry *entry) +{ + struct mdss_mdp_pipe *pipe; + struct mdss_mdp_ctl *orig_ctl, *rot_ctl; + int ret; + + pipe = hw->pipe; + orig_ctl = pipe->mixer_left->ctl; + if (orig_ctl->shared_lock) + mutex_lock(orig_ctl->shared_lock); + + rot_ctl = mdss_mdp_ctl_mixer_switch(orig_ctl, + MDSS_MDP_WB_CTL_TYPE_BLOCK); + if (!rot_ctl) { + ret = -EINVAL; + goto error; + } else { + hw->ctl = rot_ctl; + pipe->mixer_left = rot_ctl->mixer_left; + } + + return 0; + +error: + if (orig_ctl->shared_lock) + mutex_unlock(orig_ctl->shared_lock); + return ret; +} + +static void mdss_rotator_translate_rect(struct mdss_rect *dst, + struct mdp_rect *src) +{ + dst->x = src->x; + dst->y = src->y; + dst->w = src->w; + dst->h = src->h; +} + +static u32 mdss_rotator_translate_flags(u32 input) +{ + u32 output = 0; + + if (input & MDP_ROTATION_NOP) + output |= MDP_ROT_NOP; + if (input & MDP_ROTATION_FLIP_LR) + output |= MDP_FLIP_LR; + if (input & MDP_ROTATION_FLIP_UD) + output |= MDP_FLIP_UD; + if (input & MDP_ROTATION_90) + output |= MDP_ROT_90; + if (input & MDP_ROTATION_DEINTERLACE) + output |= MDP_DEINTERLACE; + if (input & MDP_ROTATION_SECURE) + output |= MDP_SECURE_OVERLAY_SESSION; + if (input & MDP_ROTATION_BWC_EN) + output |= MDP_BWC_EN; + + return output; +} + +static int mdss_rotator_config_hw(struct mdss_rot_hw_resource *hw, + struct mdss_rot_entry *entry) +{ + struct mdss_mdp_pipe *pipe; + struct mdp_rotation_item *item; + struct mdss_rot_perf *perf; + int ret; + + ATRACE_BEGIN(__func__); + pipe = hw->pipe; + item = &entry->item; + perf = entry->perf; + + pipe->flags = mdss_rotator_translate_flags(item->flags); + pipe->src_fmt = mdss_mdp_get_format_params(item->input.format); + pipe->img_width = item->input.width; + pipe->img_height = item->input.height; + mdss_rotator_translate_rect(&pipe->src, &item->src_rect); + mdss_rotator_translate_rect(&pipe->dst, &item->src_rect); + pipe->scaler.enable = 0; + pipe->frame_rate = perf->config.frame_rate; + + pipe->params_changed++; + + mdss_mdp_smp_release(pipe); + + ret = mdss_mdp_smp_reserve(pipe); + if (ret) { + pr_err("unable to mdss_mdp_smp_reserve rot data\n"); + goto done; + } + + ret = mdss_mdp_overlay_setup_scaling(pipe); + if (ret) { + pr_err("scaling setup failed %d\n", ret); + goto done; + } + + ret = mdss_mdp_pipe_queue_data(pipe, &entry->src_buf); + pr_debug("Config pipe. src{%u,%u,%u,%u}f=%u\n" + "dst{%u,%u,%u,%u}f=%u session_id=%u\n", + item->src_rect.x, item->src_rect.y, + item->src_rect.w, item->src_rect.h, item->input.format, + item->dst_rect.x, item->dst_rect.y, + item->dst_rect.w, item->dst_rect.h, item->output.format, + item->session_id); + MDSS_XLOG(item->input.format, pipe->img_width, pipe->img_height, + pipe->flags); +done: + ATRACE_END(__func__); + return ret; +} + +static int mdss_rotator_kickoff_entry(struct mdss_rot_hw_resource *hw, + struct mdss_rot_entry *entry) +{ + int ret; + struct mdss_mdp_writeback_arg wb_args = { + .data = &entry->dst_buf, + .priv_data = entry, + }; + + ret = mdss_mdp_writeback_display_commit(hw->ctl, &wb_args); + return ret; +} + +static int mdss_rotator_wait_for_entry(struct mdss_rot_hw_resource *hw, + struct mdss_rot_entry *entry) +{ + int ret; + struct mdss_mdp_ctl *ctl = hw->ctl; + + ret = mdss_mdp_display_wait4comp(ctl); + if (ctl->shared_lock) + mutex_unlock(ctl->shared_lock); + return ret; +} + +static int mdss_rotator_commit_entry(struct mdss_rot_hw_resource *hw, + struct mdss_rot_entry *entry) +{ + int ret; + + ret = mdss_rotator_prepare_hw(hw, entry); + if (ret) { + pr_err("fail to prepare hw resource %d\n", ret); + return ret; + } + + ret = mdss_rotator_config_hw(hw, entry); + if (ret) { + pr_err("fail to configure hw resource %d\n", ret); + return ret; + } + + ret = mdss_rotator_kickoff_entry(hw, entry); + if (ret) { + pr_err("fail to do kickoff %d\n", ret); + return ret; + } + + ret = mdss_rotator_wait_for_entry(hw, entry); + if (ret) { + pr_err("fail to wait for completion %d\n", ret); + return ret; + } + + return ret; +} + +static int mdss_rotator_handle_entry(struct mdss_rot_hw_resource *hw, + struct mdss_rot_entry *entry) +{ + int ret; + + ret = mdss_rotator_wait_for_input(entry); + if (ret) { + pr_err("wait for input buffer failed %d\n", ret); + return ret; + } + + ret = mdss_rotator_map_and_check_data(entry); + if (ret) { + pr_err("fail to prepare input/output data %d\n", ret); + return ret; + } + + ret = mdss_rotator_commit_entry(hw, entry); + if (ret) + pr_err("rotator commit failed %d\n", ret); + + return ret; +} + +static void mdss_rotator_wq_handler(struct work_struct *work) +{ + struct mdss_rot_entry *entry; + struct mdss_rot_entry_container *request; + struct mdss_rot_hw_resource *hw; + int ret; + + entry = container_of(work, struct mdss_rot_entry, commit_work); + request = entry->request; + + if (!request) { + pr_err("fatal error, no request with entry\n"); + return; + } + + hw = mdss_rotator_get_hw_resource(entry->queue, entry); + if (!hw) { + pr_err("no hw for the queue\n"); + goto get_hw_res_err; + } + + ret = mdss_rotator_handle_entry(hw, entry); + if (ret) { + struct mdp_rotation_item *item = &entry->item; + + pr_err("Rot req fail. src{%u,%u,%u,%u}f=%u\n" + "dst{%u,%u,%u,%u}f=%u session_id=%u, wbidx%d, pipe_id=%d\n", + item->src_rect.x, item->src_rect.y, + item->src_rect.w, item->src_rect.h, item->input.format, + item->dst_rect.x, item->dst_rect.y, + item->dst_rect.w, item->dst_rect.h, item->output.format, + item->session_id, item->wb_idx, item->pipe_idx); + } + + mdss_rotator_put_hw_resource(entry->queue, hw); + +get_hw_res_err: + mdss_rotator_signal_output(entry); + mdss_rotator_release_entry(rot_mgr, entry); + atomic_dec(&request->pending_count); +} + +static int mdss_rotator_validate_request(struct mdss_rot_mgr *mgr, + struct mdss_rot_file_private *private, + struct mdss_rot_entry_container *req) +{ + int i, ret = 0; + struct mdss_rot_entry *entry; + + for (i = 0; i < req->count; i++) { + entry = req->entries + i; + ret = mdss_rotator_validate_entry(mgr, private, + entry); + if (ret) { + pr_err("fail to validate the entry\n"); + return ret; + } + } + + return ret; +} + +static u32 mdss_rotator_generator_session_id(struct mdss_rot_mgr *mgr) +{ + u32 id; + + mutex_lock(&mgr->lock); + id = mgr->session_id_generator++; + mutex_unlock(&mgr->lock); + return id; +} + +static int mdss_rotator_open_session(struct mdss_rot_mgr *mgr, + struct mdss_rot_file_private *private, unsigned long arg) +{ + struct mdp_rotation_config config; + struct mdss_rot_perf *perf; + int ret; + + ret = copy_from_user(&config, (void __user *)arg, sizeof(config)); + if (ret) { + pr_err("fail to copy session data\n"); + return ret; + } + + ret = mdss_rotator_verify_config(mgr, &config); + if (ret) { + pr_err("Rotator verify format failed\n"); + return ret; + } + + perf = kzalloc(sizeof(*perf), GFP_KERNEL); + if (!perf) + return -ENOMEM; + + ATRACE_BEGIN(__func__); /* Open session votes for bw */ + perf->work_distribution = kcalloc(mgr->queue_count, sizeof(u32), + GFP_KERNEL); + if (!perf->work_distribution) { + ret = -ENOMEM; + goto alloc_err; + } + + config.session_id = mdss_rotator_generator_session_id(mgr); + perf->config = config; + perf->last_wb_idx = -1; + mutex_init(&perf->work_dis_lock); + + INIT_LIST_HEAD(&perf->list); + + ret = mdss_rotator_calc_perf(perf); + if (ret) { + pr_err("error setting the session%d\n", ret); + goto copy_user_err; + } + + ret = copy_to_user((void *)arg, &config, sizeof(config)); + if (ret) { + pr_err("fail to copy to user\n"); + goto copy_user_err; + } + + mutex_lock(&private->perf_lock); + list_add(&perf->list, &private->perf_list); + mutex_unlock(&private->perf_lock); + + ret = mdss_rotator_resource_ctrl(mgr, true); + if (ret) { + pr_err("Failed to aqcuire rotator resources\n"); + goto resource_err; + } + + mdss_rotator_clk_ctrl(rot_mgr, true); + ret = mdss_rotator_update_perf(mgr); + if (ret) { + pr_err("fail to open session, not enough clk/bw\n"); + goto perf_err; + } + pr_debug("open session id=%u in{%u,%u}f:%u out{%u,%u}f:%u\n", + config.session_id, config.input.width, config.input.height, + config.input.format, config.output.width, config.output.height, + config.output.format); + + goto done; +perf_err: + mdss_rotator_clk_ctrl(rot_mgr, false); + mdss_rotator_resource_ctrl(mgr, false); +resource_err: + mutex_lock(&private->perf_lock); + list_del_init(&perf->list); + mutex_unlock(&private->perf_lock); +copy_user_err: + kfree(perf->work_distribution); +alloc_err: + kfree(perf); +done: + ATRACE_END(__func__); + return ret; +} + +static int mdss_rotator_close_session(struct mdss_rot_mgr *mgr, + struct mdss_rot_file_private *private, unsigned long arg) +{ + struct mdss_rot_perf *perf; + bool offload_release_work = false; + u32 id; + + id = (u32)arg; + mutex_lock(&mgr->lock); + mutex_lock(&private->perf_lock); + perf = __mdss_rotator_find_session(private, id); + if (!perf) { + mutex_unlock(&private->perf_lock); + mutex_unlock(&mgr->lock); + pr_err("Trying to close session that does not exist\n"); + return -EINVAL; + } + + ATRACE_BEGIN(__func__); + mutex_lock(&perf->work_dis_lock); + if (mdss_rotator_is_work_pending(mgr, perf)) { + pr_debug("Work is still pending, offload free to wq\n"); + mutex_lock(&mgr->bus_lock); + mgr->pending_close_bw_vote += perf->bw; + mutex_unlock(&mgr->bus_lock); + offload_release_work = true; + } + list_del_init(&perf->list); + mutex_unlock(&perf->work_dis_lock); + mutex_unlock(&private->perf_lock); + + if (offload_release_work) + goto done; + + mdss_rotator_resource_ctrl(mgr, false); + kfree(perf->work_distribution); + kfree(perf); + mdss_rotator_update_perf(mgr); + mdss_rotator_clk_ctrl(rot_mgr, false); +done: + pr_debug("Closed session id:%u\n", id); + ATRACE_END(__func__); + mutex_unlock(&mgr->lock); + return 0; +} + +static int mdss_rotator_config_session(struct mdss_rot_mgr *mgr, + struct mdss_rot_file_private *private, unsigned long arg) +{ + int ret = 0; + struct mdss_rot_perf *perf; + struct mdp_rotation_config config; + + ret = copy_from_user(&config, (void __user *)arg, + sizeof(config)); + if (ret) { + pr_err("fail to copy session data\n"); + return ret; + } + + ret = mdss_rotator_verify_config(mgr, &config); + if (ret) { + pr_err("Rotator verify format failed\n"); + return ret; + } + + mutex_lock(&mgr->lock); + perf = mdss_rotator_find_session(private, config.session_id); + if (!perf) { + pr_err("No session with id=%u could be found\n", + config.session_id); + mutex_unlock(&mgr->lock); + return -EINVAL; + } + + ATRACE_BEGIN(__func__); + mutex_lock(&private->perf_lock); + perf->config = config; + ret = mdss_rotator_calc_perf(perf); + mutex_unlock(&private->perf_lock); + + if (ret) { + pr_err("error in configuring the session %d\n", ret); + goto done; + } + + ret = mdss_rotator_update_perf(mgr); + + pr_debug("reconfig session id=%u in{%u,%u}f:%u out{%u,%u}f:%u\n", + config.session_id, config.input.width, config.input.height, + config.input.format, config.output.width, config.output.height, + config.output.format); +done: + ATRACE_END(__func__); + mutex_unlock(&mgr->lock); + return ret; +} + +struct mdss_rot_entry_container *mdss_rotator_req_init( + struct mdss_rot_mgr *mgr, struct mdp_rotation_item *items, + u32 count, u32 flags) +{ + struct mdss_rot_entry_container *req; + int size, i; + + /* + * Check input and output plane_count from each given item + * are within the MAX_PLANES limit + */ + for (i = 0 ; i < count; i++) { + if ((items[i].input.plane_count > MAX_PLANES) || + (items[i].output.plane_count > MAX_PLANES)) { + pr_err("Input/Output plane_count exceeds MAX_PLANES limit, input:%d, output:%d\n", + items[i].input.plane_count, + items[i].output.plane_count); + return ERR_PTR(-EINVAL); + } + } + + size = sizeof(struct mdss_rot_entry_container); + size += sizeof(struct mdss_rot_entry) * count; + req = kzalloc(size, GFP_KERNEL); + + if (!req) + return ERR_PTR(-ENOMEM); + + + INIT_LIST_HEAD(&req->list); + req->count = count; + req->entries = (struct mdss_rot_entry *) + ((void *)req + sizeof(struct mdss_rot_entry_container)); + req->flags = flags; + atomic_set(&req->pending_count, count); + + for (i = 0; i < count; i++) + req->entries[i].item = items[i]; + + return req; +} + +static int mdss_rotator_handle_request_common(struct mdss_rot_mgr *mgr, + struct mdss_rot_file_private *private, + struct mdss_rot_entry_container *req, + struct mdp_rotation_item *items) +{ + int i, ret; + + mdss_rotator_free_competed_request(mgr, private); + + ret = mdss_rotator_add_request(mgr, private, req); + if (ret) { + pr_err("fail to add rotation request\n"); + mdss_rotator_remove_request(mgr, private, req); + return ret; + } + + for (i = 0; i < req->count; i++) + items[i].output.fence = + req->entries[i].item.output.fence; + + return ret; +} + +static int mdss_rotator_handle_request(struct mdss_rot_mgr *mgr, + struct mdss_rot_file_private *private, unsigned long arg) +{ + struct mdp_rotation_request user_req; + struct mdp_rotation_item *items = NULL; + struct mdss_rot_entry_container *req = NULL; + int size, ret; + uint32_t req_count; + struct mdss_data_type *mdata = mdss_mdp_get_mdata(); + + if (mdata->handoff_pending) { + pr_err("Rotator request failed. Handoff pending\n"); + return -EPERM; + } + + if (mdss_get_sd_client_cnt()) { + pr_err("rot request not permitted during secure display session\n"); + return -EPERM; + } + + ret = copy_from_user(&user_req, (void __user *)arg, + sizeof(user_req)); + if (ret) { + pr_err("fail to copy rotation request\n"); + return ret; + } + + req_count = user_req.count; + if ((!req_count) || (req_count > MAX_LAYER_COUNT)) { + pr_err("invalid rotator req count :%d\n", req_count); + return -EINVAL; + } + + /* + * here, we make a copy of the items so that we can copy + * all the output fences to the client in one call. Otherwise, + * we will have to call multiple copy_to_user + */ + size = sizeof(struct mdp_rotation_item) * req_count; + items = kzalloc(size, GFP_KERNEL); + if (!items) { + pr_err("fail to allocate rotation items\n"); + return -ENOMEM; + } + ret = copy_from_user(items, user_req.list, size); + if (ret) { + pr_err("fail to copy rotation items\n"); + kfree(items); + return ret; + } + + req = mdss_rotator_req_init(mgr, items, user_req.count, user_req.flags); + if (IS_ERR_OR_NULL(req)) { + pr_err("fail to allocate rotation request\n"); + ret = PTR_ERR(req); + kfree(items); + return ret; + } + + mutex_lock(&mgr->lock); + + if (req->flags & MDSS_ROTATION_REQUEST_VALIDATE) { + ret = mdss_rotator_validate_request(mgr, private, req); + goto handle_request_err1; + } + + ret = mdss_rotator_handle_request_common(mgr, private, req, items); + if (ret) { + pr_err("fail to handle request\n"); + goto handle_request_err1; + } + + ret = copy_to_user(user_req.list, items, size); + if (ret) { + pr_err("fail to copy output fence to user\n"); + mdss_rotator_remove_request(mgr, private, req); + goto handle_request_err1; + } + + ret = mdss_rotator_install_fence_fd(req); + if (ret) { + pr_err("get_unused_fd_flags failed error:0x%x\n", ret); + mdss_rotator_remove_request(mgr, private, req); + goto handle_request_err1; + } + + mdss_rotator_queue_request(mgr, private, req); + + mutex_unlock(&mgr->lock); + + kfree(items); + return ret; + +handle_request_err1: + mutex_unlock(&mgr->lock); + kfree(items); + kfree(req); + return ret; +} + +static int mdss_rotator_open(struct inode *inode, struct file *file) +{ + struct mdss_rot_file_private *private; + + if (!rot_mgr) + return -ENODEV; + + if (atomic_read(&rot_mgr->device_suspended)) + return -EPERM; + + private = kzalloc(sizeof(*private), + GFP_KERNEL); + if (!private) + return -ENOMEM; + + mutex_init(&private->req_lock); + mutex_init(&private->perf_lock); + INIT_LIST_HEAD(&private->req_list); + INIT_LIST_HEAD(&private->perf_list); + INIT_LIST_HEAD(&private->list); + + mutex_lock(&rot_mgr->file_lock); + list_add(&private->list, &rot_mgr->file_list); + file->private_data = private; + private->file = file; + mutex_unlock(&rot_mgr->file_lock); + + return 0; +} + +static bool mdss_rotator_file_priv_allowed(struct mdss_rot_mgr *mgr, + struct mdss_rot_file_private *priv) +{ + struct mdss_rot_file_private *_priv, *_priv_next; + bool ret = false; + + mutex_lock(&mgr->file_lock); + list_for_each_entry_safe(_priv, _priv_next, &mgr->file_list, list) { + if (_priv == priv) { + ret = true; + break; + } + } + mutex_unlock(&mgr->file_lock); + return ret; +} + +static int mdss_rotator_close(struct inode *inode, struct file *file) +{ + struct mdss_rot_file_private *private; + + if (!rot_mgr) + return -ENODEV; + + if (!file->private_data) + return -EINVAL; + + private = (struct mdss_rot_file_private *)file->private_data; + + if (!(mdss_rotator_file_priv_allowed(rot_mgr, private))) { + pr_err("Calling close with unrecognized rot_file_private\n"); + return -EINVAL; + } + + mdss_rotator_release_rotator_perf_session(rot_mgr, private); + + mutex_lock(&rot_mgr->file_lock); + list_del_init(&private->list); + kfree(private); + file->private_data = NULL; + mutex_unlock(&rot_mgr->file_lock); + + mdss_rotator_update_perf(rot_mgr); + return 0; +} + +#ifdef CONFIG_COMPAT +static int mdss_rotator_handle_request32(struct mdss_rot_mgr *mgr, + struct mdss_rot_file_private *private, unsigned long arg) +{ + struct mdp_rotation_request32 user_req32; + struct mdp_rotation_item *items = NULL; + struct mdss_rot_entry_container *req = NULL; + int size, ret; + uint32_t req_count; + + if (mdss_get_sd_client_cnt()) { + pr_err("rot request not permitted during secure display session\n"); + return -EPERM; + } + + ret = copy_from_user(&user_req32, (void __user *)arg, + sizeof(user_req32)); + if (ret) { + pr_err("fail to copy rotation request\n"); + return ret; + } + + req_count = user_req32.count; + if ((!req_count) || (req_count > MAX_LAYER_COUNT)) { + pr_err("invalid rotator req count :%d\n", req_count); + return -EINVAL; + } + + size = sizeof(struct mdp_rotation_item) * req_count; + items = kzalloc(size, GFP_KERNEL); + if (!items) { + pr_err("fail to allocate rotation items\n"); + return -ENOMEM; + } + ret = copy_from_user(items, compat_ptr(user_req32.list), size); + if (ret) { + pr_err("fail to copy rotation items\n"); + kfree(items); + return ret; + } + + req = mdss_rotator_req_init(mgr, items, user_req32.count, + user_req32.flags); + if (IS_ERR_OR_NULL(req)) { + pr_err("fail to allocate rotation request\n"); + ret = PTR_ERR(req); + kfree(items); + return ret; + } + + mutex_lock(&mgr->lock); + + if (req->flags & MDSS_ROTATION_REQUEST_VALIDATE) { + ret = mdss_rotator_validate_request(mgr, private, req); + goto handle_request32_err1; + } + + ret = mdss_rotator_handle_request_common(mgr, private, req, items); + if (ret) { + pr_err("fail to handle request\n"); + goto handle_request32_err1; + } + + ret = copy_to_user(compat_ptr(user_req32.list), items, size); + if (ret) { + pr_err("fail to copy output fence to user\n"); + mdss_rotator_remove_request(mgr, private, req); + goto handle_request32_err1; + } + + ret = mdss_rotator_install_fence_fd(req); + if (ret) { + pr_err("get_unused_fd_flags failed error:0x%x\n", ret); + mdss_rotator_remove_request(mgr, private, req); + goto handle_request32_err1; + } + + mdss_rotator_queue_request(mgr, private, req); + + mutex_unlock(&mgr->lock); + + kfree(items); + return ret; + +handle_request32_err1: + mutex_unlock(&mgr->lock); + kfree(items); + kfree(req); + return ret; +} + +static unsigned int __do_compat_ioctl_rot(unsigned int cmd32) +{ + unsigned int cmd; + + switch (cmd32) { + case MDSS_ROTATION_REQUEST32: + cmd = MDSS_ROTATION_REQUEST; + break; + case MDSS_ROTATION_OPEN32: + cmd = MDSS_ROTATION_OPEN; + break; + case MDSS_ROTATION_CLOSE32: + cmd = MDSS_ROTATION_CLOSE; + break; + case MDSS_ROTATION_CONFIG32: + cmd = MDSS_ROTATION_CONFIG; + break; + default: + cmd = cmd32; + break; + } + + return cmd; +} + +static long mdss_rotator_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct mdss_rot_file_private *private; + int ret = -EINVAL; + + if (!rot_mgr) + return -ENODEV; + + if (atomic_read(&rot_mgr->device_suspended)) + return -EPERM; + + if (!file->private_data) + return -EINVAL; + + private = (struct mdss_rot_file_private *)file->private_data; + + if (!(mdss_rotator_file_priv_allowed(rot_mgr, private))) { + pr_err("Calling ioctl with unrecognized rot_file_private\n"); + return -EINVAL; + } + + cmd = __do_compat_ioctl_rot(cmd); + + switch (cmd) { + case MDSS_ROTATION_REQUEST: + ATRACE_BEGIN("rotator_request32"); + ret = mdss_rotator_handle_request32(rot_mgr, private, arg); + ATRACE_END("rotator_request32"); + break; + case MDSS_ROTATION_OPEN: + ret = mdss_rotator_open_session(rot_mgr, private, arg); + break; + case MDSS_ROTATION_CLOSE: + ret = mdss_rotator_close_session(rot_mgr, private, arg); + break; + case MDSS_ROTATION_CONFIG: + ret = mdss_rotator_config_session(rot_mgr, private, arg); + break; + default: + pr_err("unexpected IOCTL %d\n", cmd); + } + + if (ret) + pr_err("rotator ioctl=%d failed, err=%d\n", cmd, ret); + return ret; + +} +#endif + +static long mdss_rotator_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct mdss_rot_file_private *private; + int ret = -EINVAL; + + if (!rot_mgr) + return -ENODEV; + + if (atomic_read(&rot_mgr->device_suspended)) + return -EPERM; + + if (!file->private_data) + return -EINVAL; + + private = (struct mdss_rot_file_private *)file->private_data; + + if (!(mdss_rotator_file_priv_allowed(rot_mgr, private))) { + pr_err("Calling ioctl with unrecognized rot_file_private\n"); + return -EINVAL; + } + + switch (cmd) { + case MDSS_ROTATION_REQUEST: + ATRACE_BEGIN("rotator_request"); + ret = mdss_rotator_handle_request(rot_mgr, private, arg); + ATRACE_END("rotator_request"); + break; + case MDSS_ROTATION_OPEN: + ret = mdss_rotator_open_session(rot_mgr, private, arg); + break; + case MDSS_ROTATION_CLOSE: + ret = mdss_rotator_close_session(rot_mgr, private, arg); + break; + case MDSS_ROTATION_CONFIG: + ret = mdss_rotator_config_session(rot_mgr, private, arg); + break; + default: + pr_err("unexpected IOCTL %d\n", cmd); + } + + if (ret) + pr_err("rotator ioctl=%d failed, err=%d\n", cmd, ret); + return ret; +} + +static ssize_t caps_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + size_t len = PAGE_SIZE; + int cnt = 0; + + if (!rot_mgr) + return cnt; + +#define SPRINT(fmt, ...) \ + (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__)) + + SPRINT("wb_count=%d\n", rot_mgr->queue_count); + SPRINT("downscale=%d\n", rot_mgr->has_downscale); + + return cnt; +} + +static DEVICE_ATTR_RO(caps); + +static struct attribute *mdss_rotator_fs_attrs[] = { + &dev_attr_caps.attr, + NULL +}; + +static struct attribute_group mdss_rotator_fs_attr_group = { + .attrs = mdss_rotator_fs_attrs +}; + +static const struct file_operations mdss_rotator_fops = { + .owner = THIS_MODULE, + .open = mdss_rotator_open, + .release = mdss_rotator_close, + .unlocked_ioctl = mdss_rotator_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = mdss_rotator_compat_ioctl, +#endif +}; + +static int mdss_rotator_parse_dt_bus(struct mdss_rot_mgr *mgr, + struct platform_device *dev) +{ + struct device_node *node; + int ret = 0, i; + bool register_bus_needed; + int usecases; + + mgr->data_bus.bus_scale_pdata = msm_bus_cl_get_pdata(dev); + if (IS_ERR_OR_NULL(mgr->data_bus.bus_scale_pdata)) { + ret = PTR_ERR(mgr->data_bus.bus_scale_pdata); + if (!ret) { + ret = -EINVAL; + pr_err("msm_bus_cl_get_pdata failed. ret=%d\n", ret); + mgr->data_bus.bus_scale_pdata = NULL; + } + } + + register_bus_needed = of_property_read_bool(dev->dev.of_node, + "qcom,mdss-has-reg-bus"); + if (register_bus_needed) { + node = of_get_child_by_name( + dev->dev.of_node, "qcom,mdss-rot-reg-bus"); + if (!node) { + mgr->reg_bus.bus_scale_pdata = &rot_reg_bus_scale_table; + usecases = mgr->reg_bus.bus_scale_pdata->num_usecases; + for (i = 0; i < usecases; i++) { + rot_reg_bus_usecases[i].num_paths = 1; + rot_reg_bus_usecases[i].vectors = + &rot_reg_bus_vectors[i]; + } + } else { + mgr->reg_bus.bus_scale_pdata = + msm_bus_pdata_from_node(dev, node); + if (IS_ERR_OR_NULL(mgr->reg_bus.bus_scale_pdata)) { + ret = PTR_ERR(mgr->reg_bus.bus_scale_pdata); + if (!ret) + ret = -EINVAL; + pr_err("reg_rot_bus failed rc=%d\n", ret); + mgr->reg_bus.bus_scale_pdata = NULL; + } + } + } + return ret; +} + +static int mdss_rotator_parse_dt(struct mdss_rot_mgr *mgr, + struct platform_device *dev) +{ + int ret = 0; + u32 data; + + ret = of_property_read_u32(dev->dev.of_node, + "qcom,mdss-wb-count", &data); + if (ret) { + pr_err("Error in device tree\n"); + return ret; + } + if (data > ROT_MAX_HW_BLOCKS) { + pr_err("Err, num of wb block (%d) larger than sw max %d\n", + data, ROT_MAX_HW_BLOCKS); + return -EINVAL; + } + + rot_mgr->queue_count = data; + rot_mgr->has_downscale = of_property_read_bool(dev->dev.of_node, + "qcom,mdss-has-downscale"); + rot_mgr->has_ubwc = of_property_read_bool(dev->dev.of_node, + "qcom,mdss-has-ubwc"); + + ret = mdss_rotator_parse_dt_bus(mgr, dev); + if (ret) + pr_err("Failed to parse bus data\n"); + + return ret; +} + +static void mdss_rotator_put_dt_vreg_data(struct device *dev, + struct dss_module_power *mp) +{ + if (!mp) { + DEV_ERR("%s: invalid input\n", __func__); + return; + } + + msm_dss_config_vreg(dev, mp->vreg_config, mp->num_vreg, 0); + mp->num_vreg = 0; +} + +static int mdss_rotator_get_dt_vreg_data(struct device *dev, + struct dss_module_power *mp) +{ + const char *st = NULL; + struct device_node *of_node = NULL; + int dt_vreg_total = 0; + int i; + int rc; + + if (!dev || !mp) { + DEV_ERR("%s: invalid input\n", __func__); + return -EINVAL; + } + + of_node = dev->of_node; + + dt_vreg_total = of_property_count_strings(of_node, "qcom,supply-names"); + if (dt_vreg_total < 0) { + DEV_ERR("%s: vreg not found. rc=%d\n", __func__, + dt_vreg_total); + return 0; + } + mp->num_vreg = dt_vreg_total; + mp->vreg_config = devm_kzalloc(dev, sizeof(struct dss_vreg) * + dt_vreg_total, GFP_KERNEL); + if (!mp->vreg_config) { + DEV_ERR("%s: can't alloc vreg mem\n", __func__); + return -ENOMEM; + } + + /* vreg-name */ + for (i = 0; i < dt_vreg_total; i++) { + rc = of_property_read_string_index(of_node, + "qcom,supply-names", i, &st); + if (rc) { + DEV_ERR("%s: error reading name. i=%d, rc=%d\n", + __func__, i, rc); + goto error; + } + snprintf(mp->vreg_config[i].vreg_name, 32, "%s", st); + } + msm_dss_config_vreg(dev, mp->vreg_config, mp->num_vreg, 1); + + for (i = 0; i < dt_vreg_total; i++) { + DEV_DBG("%s: %s min=%d, max=%d, enable=%d disable=%d\n", + __func__, + mp->vreg_config[i].vreg_name, + mp->vreg_config[i].min_voltage, + mp->vreg_config[i].max_voltage, + mp->vreg_config[i].load[DSS_REG_MODE_ENABLE], + mp->vreg_config[i].load[DSS_REG_MODE_DISABLE]); + } + return rc; + +error: + mp->num_vreg = 0; + return rc; +} + +static void mdss_rotator_bus_scale_unregister(struct mdss_rot_mgr *mgr) +{ + pr_debug("unregister bus_hdl=%x, reg_bus_hdl=%x\n", + mgr->data_bus.bus_hdl, mgr->reg_bus.bus_hdl); + + if (mgr->data_bus.bus_hdl) + msm_bus_scale_unregister_client(mgr->data_bus.bus_hdl); + + if (mgr->reg_bus.bus_hdl) + msm_bus_scale_unregister_client(mgr->reg_bus.bus_hdl); +} + +static int mdss_rotator_bus_scale_register(struct mdss_rot_mgr *mgr) +{ + if (!mgr->data_bus.bus_scale_pdata) { + pr_err("Scale table is NULL\n"); + return -EINVAL; + } + + mgr->data_bus.bus_hdl = + msm_bus_scale_register_client( + mgr->data_bus.bus_scale_pdata); + if (!mgr->data_bus.bus_hdl) { + pr_err("bus_client register failed\n"); + return -EINVAL; + } + pr_debug("registered bus_hdl=%x\n", mgr->data_bus.bus_hdl); + + if (mgr->reg_bus.bus_scale_pdata) { + mgr->reg_bus.bus_hdl = + msm_bus_scale_register_client( + mgr->reg_bus.bus_scale_pdata); + if (!mgr->reg_bus.bus_hdl) { + pr_err("register bus_client register failed\n"); + mdss_rotator_bus_scale_unregister(mgr); + return -EINVAL; + } + pr_debug("registered register bus_hdl=%x\n", + mgr->reg_bus.bus_hdl); + } + + return 0; +} + +static int mdss_rotator_clk_register(struct platform_device *pdev, + struct mdss_rot_mgr *mgr, char *clk_name, u32 clk_idx) +{ + struct clk *tmp; + + pr_debug("registered clk_reg\n"); + + if (clk_idx >= MDSS_CLK_ROTATOR_END_IDX) { + pr_err("invalid clk index %d\n", clk_idx); + return -EINVAL; + } + + if (mgr->rot_clk[clk_idx]) { + pr_err("Stomping on clk prev registered:%d\n", clk_idx); + return -EINVAL; + } + + tmp = devm_clk_get(&pdev->dev, clk_name); + if (IS_ERR(tmp)) { + pr_err("unable to get clk: %s\n", clk_name); + return PTR_ERR(tmp); + } + mgr->rot_clk[clk_idx] = tmp; + return 0; +} + +static int mdss_rotator_res_init(struct platform_device *pdev, + struct mdss_rot_mgr *mgr) +{ + int ret; + + ret = mdss_rotator_get_dt_vreg_data(&pdev->dev, &mgr->module_power); + if (ret) + return ret; + + ret = mdss_rotator_clk_register(pdev, mgr, + "iface_clk", MDSS_CLK_ROTATOR_AHB); + if (ret) + goto error; + + ret = mdss_rotator_clk_register(pdev, mgr, + "rot_core_clk", MDSS_CLK_ROTATOR_CORE); + if (ret) + goto error; + + ret = mdss_rotator_bus_scale_register(mgr); + if (ret) + goto error; + + return 0; +error: + mdss_rotator_put_dt_vreg_data(&pdev->dev, &mgr->module_power); + return ret; +} + +static int mdss_rotator_probe(struct platform_device *pdev) +{ + int ret; + + rot_mgr = devm_kzalloc(&pdev->dev, sizeof(struct mdss_rot_mgr), + GFP_KERNEL); + if (!rot_mgr) + return -ENOMEM; + + rot_mgr->pdev = pdev; + ret = mdss_rotator_parse_dt(rot_mgr, pdev); + if (ret) { + pr_err("fail to parse the dt\n"); + goto error_parse_dt; + } + + mutex_init(&rot_mgr->lock); + mutex_init(&rot_mgr->clk_lock); + mutex_init(&rot_mgr->bus_lock); + atomic_set(&rot_mgr->device_suspended, 0); + ret = mdss_rotator_init_queue(rot_mgr); + if (ret) { + pr_err("fail to init queue\n"); + goto error_get_dev_num; + } + + mutex_init(&rot_mgr->file_lock); + INIT_LIST_HEAD(&rot_mgr->file_list); + + platform_set_drvdata(pdev, rot_mgr); + + ret = alloc_chrdev_region(&rot_mgr->dev_num, 0, 1, DRIVER_NAME); + if (ret < 0) { + pr_err("alloc_chrdev_region failed ret = %d\n", ret); + goto error_get_dev_num; + } + + rot_mgr->class = class_create(THIS_MODULE, CLASS_NAME); + if (IS_ERR(rot_mgr->class)) { + ret = PTR_ERR(rot_mgr->class); + pr_err("couldn't create class rc = %d\n", ret); + goto error_class_create; + } + + rot_mgr->device = device_create(rot_mgr->class, NULL, + rot_mgr->dev_num, NULL, DRIVER_NAME); + if (IS_ERR(rot_mgr->device)) { + ret = PTR_ERR(rot_mgr->device); + pr_err("device_create failed %d\n", ret); + goto error_class_device_create; + } + + cdev_init(&rot_mgr->cdev, &mdss_rotator_fops); + ret = cdev_add(&rot_mgr->cdev, + MKDEV(MAJOR(rot_mgr->dev_num), 0), 1); + if (ret < 0) { + pr_err("cdev_add failed %d\n", ret); + goto error_cdev_add; + } + + ret = sysfs_create_group(&rot_mgr->device->kobj, + &mdss_rotator_fs_attr_group); + if (ret) + pr_err("unable to register rotator sysfs nodes\n"); + + ret = mdss_rotator_res_init(pdev, rot_mgr); + if (ret < 0) { + pr_err("res_init failed %d\n", ret); + goto error_res_init; + } + return 0; + +error_res_init: + cdev_del(&rot_mgr->cdev); +error_cdev_add: + device_destroy(rot_mgr->class, rot_mgr->dev_num); +error_class_device_create: + class_destroy(rot_mgr->class); +error_class_create: + unregister_chrdev_region(rot_mgr->dev_num, 1); +error_get_dev_num: + mdss_rotator_deinit_queue(rot_mgr); +error_parse_dt: + return ret; +} + +static int mdss_rotator_remove(struct platform_device *dev) +{ + struct mdss_rot_mgr *mgr; + + mgr = (struct mdss_rot_mgr *)platform_get_drvdata(dev); + if (!mgr) + return -ENODEV; + + sysfs_remove_group(&rot_mgr->device->kobj, &mdss_rotator_fs_attr_group); + + mdss_rotator_release_all(mgr); + + mdss_rotator_put_dt_vreg_data(&dev->dev, &mgr->module_power); + mdss_rotator_bus_scale_unregister(mgr); + cdev_del(&rot_mgr->cdev); + device_destroy(rot_mgr->class, rot_mgr->dev_num); + class_destroy(rot_mgr->class); + unregister_chrdev_region(rot_mgr->dev_num, 1); + + mdss_rotator_deinit_queue(rot_mgr); + return 0; +} + +static void mdss_rotator_suspend_cancel_rot_work(struct mdss_rot_mgr *mgr) +{ + struct mdss_rot_file_private *priv, *priv_next; + + mutex_lock(&mgr->file_lock); + list_for_each_entry_safe(priv, priv_next, &mgr->file_list, list) { + mdss_rotator_cancel_all_requests(mgr, priv); + } + mutex_unlock(&rot_mgr->file_lock); +} + +#if defined(CONFIG_PM) +static int mdss_rotator_suspend(struct platform_device *dev, pm_message_t state) +{ + struct mdss_rot_mgr *mgr; + + mgr = (struct mdss_rot_mgr *)platform_get_drvdata(dev); + if (!mgr) + return -ENODEV; + + atomic_inc(&mgr->device_suspended); + mdss_rotator_suspend_cancel_rot_work(mgr); + mdss_rotator_update_perf(mgr); + return 0; +} + +static int mdss_rotator_resume(struct platform_device *dev) +{ + struct mdss_rot_mgr *mgr; + + mgr = (struct mdss_rot_mgr *)platform_get_drvdata(dev); + if (!mgr) + return -ENODEV; + + atomic_dec(&mgr->device_suspended); + mdss_rotator_update_perf(mgr); + return 0; +} +#endif + +static const struct of_device_id mdss_rotator_dt_match[] = { + { .compatible = "qcom,mdss_rotator",}, + {} +}; + +MODULE_DEVICE_TABLE(of, mdss_rotator_dt_match); + +static struct platform_driver mdss_rotator_driver = { + .probe = mdss_rotator_probe, + .remove = mdss_rotator_remove, +#if defined(CONFIG_PM) + .suspend = mdss_rotator_suspend, + .resume = mdss_rotator_resume, +#endif + .driver = { + .name = "mdss_rotator", + .of_match_table = mdss_rotator_dt_match, + .pm = NULL, + } +}; + +static int __init mdss_rotator_init(void) +{ + return platform_driver_register(&mdss_rotator_driver); +} + +static void __exit mdss_rotator_exit(void) +{ + return platform_driver_unregister(&mdss_rotator_driver); +} + +module_init(mdss_rotator_init); +module_exit(mdss_rotator_exit); + +MODULE_DESCRIPTION("MSM Rotator driver"); +MODULE_LICENSE("GPL v2"); diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 7aeb9456aec7..7ef08e7f00a6 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -348,13 +348,13 @@ static int f2fs_write_meta_pages(struct address_space *mapping, goto skip_write; /* if locked failed, cp will flush dirty pages instead */ - if (!mutex_trylock(&sbi->cp_mutex)) + if (!down_write_trylock(&sbi->cp_global_sem)) goto skip_write; trace_f2fs_writepages(mapping->host, wbc, META); diff = nr_pages_to_write(sbi, META, wbc); written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO); - mutex_unlock(&sbi->cp_mutex); + up_write(&sbi->cp_global_sem); wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff); return 0; @@ -1579,7 +1579,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) f2fs_warn(sbi, "Start checkpoint disabled!"); } if (cpc->reason != CP_RESIZE) - mutex_lock(&sbi->cp_mutex); + down_write(&sbi->cp_global_sem); if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) && ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) || @@ -1649,7 +1649,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); out: if (cpc->reason != CP_RESIZE) - mutex_unlock(&sbi->cp_mutex); + up_write(&sbi->cp_global_sem); return err; } diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 76f5930e6c6e..07c443570207 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1429,7 +1429,7 @@ struct f2fs_sb_info { int cur_cp_pack; /* remain current cp pack */ spinlock_t cp_lock; /* for flag in ckpt */ struct inode *meta_inode; /* cache meta blocks */ - struct mutex cp_mutex; /* checkpoint procedure lock */ + struct rw_semaphore cp_global_sem; /* checkpoint procedure lock */ struct rw_semaphore cp_rwsem; /* blocking FS operations */ struct rw_semaphore node_write; /* locking node writes */ struct rw_semaphore node_change; /* locking node change */ diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 5b95d5a146eb..a8c5a6b3e22f 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -1592,7 +1592,7 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) freeze_super(sbi->sb); down_write(&sbi->gc_lock); - mutex_lock(&sbi->cp_mutex); + down_write(&sbi->cp_global_sem); spin_lock(&sbi->stat_lock); if (shrunk_blocks + valid_user_blocks(sbi) + @@ -1637,7 +1637,7 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) spin_unlock(&sbi->stat_lock); } out_err: - mutex_unlock(&sbi->cp_mutex); + up_write(&sbi->cp_global_sem); up_write(&sbi->gc_lock); thaw_super(sbi->sb); clear_sbi_flag(sbi, SBI_IS_RESIZEFS); diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 22f1ffc44330..4b4c2d339846 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -44,11 +44,15 @@ int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) { struct f2fs_nm_info *nm_i = NM_I(sbi); + struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct sysinfo val; unsigned long avail_ram; unsigned long mem_size = 0; bool res = false; + if (!nm_i) + return true; + si_meminfo(&val); /* only uses low memory */ @@ -90,6 +94,10 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) /* it allows 20% / total_ram for inmemory pages */ mem_size = get_pages(sbi, F2FS_INMEM_PAGES); res = mem_size < (val.totalram / 5); + } else if (type == DISCARD_CACHE) { + mem_size = (atomic_read(&dcc->discard_cmd_cnt) * + sizeof(struct discard_cmd)) >> PAGE_SHIFT; + res = mem_size < (avail_ram * nm_i->ram_thresh / 100); } else { if (!sbi->sb->s_bdi->wb.dirty_exceeded) return true; @@ -526,7 +534,7 @@ int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, int i; ni->nid = nid; - +retry: /* Check nat cache */ down_read(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, nid); @@ -538,10 +546,19 @@ int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, return 0; } - memset(&ne, 0, sizeof(struct f2fs_nat_entry)); + /* + * Check current segment summary by trying to grab journal_rwsem first. + * This sem is on the critical path on the checkpoint requiring the above + * nat_tree_lock. Therefore, we should retry, if we failed to grab here + * while not bothering checkpoint. + */ + if (!rwsem_is_locked(&sbi->cp_global_sem)) { + down_read(&curseg->journal_rwsem); + } else if (!down_read_trylock(&curseg->journal_rwsem)) { + up_read(&nm_i->nat_tree_lock); + goto retry; + } - /* Check current segment summary */ - down_read(&curseg->journal_rwsem); i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0); if (i >= 0) { ne = nat_in_journal(journal, i); diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index 69e5859e993c..1183d5bf903b 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -147,6 +147,7 @@ enum mem_type { INO_ENTRIES, /* indicates inode entries */ EXTENT_CACHE, /* indicates extent cache */ INMEM_PAGES, /* indicates inmemory pages */ + DISCARD_CACHE, /* indicates memory of cached discard cmds */ BASE_CHECK, /* check kernel status */ }; diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 1ad3d1fd0c35..b50b5e07ade1 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -798,7 +798,7 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) INIT_LIST_HEAD(&dir_list); /* prevent checkpoint */ - mutex_lock(&sbi->cp_mutex); + down_write(&sbi->cp_global_sem); /* step #1: find fsynced inode numbers */ err = find_fsync_dnodes(sbi, &inode_list, check_only); @@ -834,7 +834,8 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) } else { clear_sbi_flag(sbi, SBI_POR_DOING); } - mutex_unlock(&sbi->cp_mutex); + + up_write(&sbi->cp_global_sem); /* let's drop all the directory inodes for clean checkpoint */ destroy_fsync_dnodes(&dir_list, err); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index e4898789c5c0..3c2da869da2f 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1072,6 +1072,8 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi, struct discard_policy *dpolicy, int discard_type, unsigned int granularity) { + struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + /* common policy */ dpolicy->type = discard_type; dpolicy->sync = true; @@ -1091,7 +1093,9 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi, dpolicy->ordered = true; if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) { dpolicy->granularity = 1; - dpolicy->max_interval = DEF_MIN_DISCARD_ISSUE_TIME; + if (atomic_read(&dcc->discard_cmd_cnt)) + dpolicy->max_interval = + DEF_MIN_DISCARD_ISSUE_TIME; } } else if (discard_type == DPOLICY_FORCE) { dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME; @@ -1707,8 +1711,15 @@ static int issue_discard_thread(void *data) set_freezable(); do { - __init_discard_policy(sbi, &dpolicy, DPOLICY_BG, - dcc->discard_granularity); + if (sbi->gc_mode == GC_URGENT || + !f2fs_available_free_memory(sbi, DISCARD_CACHE)) + __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1); + else + __init_discard_policy(sbi, &dpolicy, DPOLICY_BG, + dcc->discard_granularity); + + if (!atomic_read(&dcc->discard_cmd_cnt)) + wait_ms = dpolicy.max_interval; wait_event_interruptible_timeout(*q, kthread_should_stop() || freezing(current) || @@ -1733,9 +1744,6 @@ static int issue_discard_thread(void *data) continue; } - if (sbi->gc_mode == GC_URGENT) - __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1); - sb_start_intwrite(sbi->sb); issued = __issue_discard_cmd(sbi, &dpolicy); diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 40544f8d9960..afc055fb27a2 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -3521,7 +3521,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) sbi->valid_super_block = valid_super_block; init_rwsem(&sbi->gc_lock); mutex_init(&sbi->writepages); - mutex_init(&sbi->cp_mutex); + init_rwsem(&sbi->cp_global_sem); init_rwsem(&sbi->node_write); init_rwsem(&sbi->node_change); diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 9861204da06f..912ad1b100a0 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -327,12 +327,16 @@ static u64 fuse_get_unique(struct fuse_iqueue *fiq) return ++fiq->reqctr; } -static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req) +static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req, + bool sync) { req->in.h.len = sizeof(struct fuse_in_header) + len_args(req->in.numargs, (struct fuse_arg *) req->in.args); list_add_tail(&req->list, &fiq->pending); - wake_up(&fiq->waitq); + if (sync) + wake_up_sync(&fiq->waitq); + else + wake_up(&fiq->waitq); kill_fasync(&fiq->fasync, SIGIO, POLL_IN); } @@ -368,7 +372,7 @@ static void flush_bg_queue(struct fuse_conn *fc) fc->active_background++; spin_lock(&fiq->lock); req->in.h.unique = fuse_get_unique(fiq); - queue_request(fiq, req); + queue_request(fiq, req, 0); spin_unlock(&fiq->lock); } } @@ -497,7 +501,7 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) req->out.h.error = -ENOTCONN; } else { req->in.h.unique = fuse_get_unique(fiq); - queue_request(fiq, req); + queue_request(fiq, req, 1); /* acquire extra reference, since request is still needed after request_end() */ __fuse_get_request(req); @@ -637,7 +641,7 @@ static int fuse_request_send_notify_reply(struct fuse_conn *fc, req->in.h.unique = unique; spin_lock(&fiq->lock); if (fiq->connected) { - queue_request(fiq, req); + queue_request(fiq, req, 0); err = 0; } spin_unlock(&fiq->lock); diff --git a/fs/incfs/data_mgmt.c b/fs/incfs/data_mgmt.c index 074a733c7001..e0705be44fb2 100644 --- a/fs/incfs/data_mgmt.c +++ b/fs/incfs/data_mgmt.c @@ -144,7 +144,7 @@ struct data_file *incfs_open_data_file(struct mount_info *mi, struct file *bf) if (!S_ISREG(bf->f_inode->i_mode)) return ERR_PTR(-EBADF); - bfc = incfs_alloc_bfc(bf); + bfc = incfs_alloc_bfc(mi, bf); if (IS_ERR(bfc)) return ERR_CAST(bfc); @@ -199,6 +199,7 @@ void incfs_free_data_file(struct data_file *df) for (i = 0; i < ARRAY_SIZE(df->df_segments); i++) data_file_segment_destroy(&df->df_segments[i]); incfs_free_bfc(df->df_backing_file_context); + kfree(df->df_signature); kfree(df); } @@ -387,8 +388,8 @@ static void log_block_read(struct mount_info *mi, incfs_uuid_t *id, schedule_delayed_work(&log->ml_wakeup_work, msecs_to_jiffies(16)); } -static int validate_hash_tree(struct file *bf, struct file *f, int block_index, - struct mem_range data, u8 *buf) +static int validate_hash_tree(struct backing_file_context *bfc, struct file *f, + int block_index, struct mem_range data, u8 *buf) { struct data_file *df = get_incfs_data_file(f); u8 stored_digest[INCFS_MAX_HASH_SIZE] = {}; @@ -445,7 +446,7 @@ static int validate_hash_tree(struct file *bf, struct file *f, int block_index, if (page) put_page(page); - res = incfs_kread(bf, buf, INCFS_DATA_FILE_BLOCK_SIZE, + res = incfs_kread(bfc, buf, INCFS_DATA_FILE_BLOCK_SIZE, hash_block_offset[lvl] + sig->hash_offset); if (res < 0) return res; @@ -918,7 +919,7 @@ ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f, ssize_t result; size_t bytes_to_read; struct mount_info *mi = NULL; - struct file *bf = NULL; + struct backing_file_context *bfc = NULL; struct data_file_block block = {}; struct data_file *df = get_incfs_data_file(f); @@ -929,7 +930,7 @@ ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f, return -ERANGE; mi = df->df_mount_info; - bf = df->df_backing_file_context->bc_file; + bfc = df->df_backing_file_context; result = wait_for_data_block(df, index, timeout_ms, &block); if (result < 0) @@ -938,20 +939,20 @@ ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f, pos = block.db_backing_file_data_offset; if (block.db_comp_alg == COMPRESSION_NONE) { bytes_to_read = min(dst.len, block.db_stored_size); - result = incfs_kread(bf, dst.data, bytes_to_read, pos); + result = incfs_kread(bfc, dst.data, bytes_to_read, pos); /* Some data was read, but not enough */ if (result >= 0 && result != bytes_to_read) result = -EIO; } else { bytes_to_read = min(tmp.len, block.db_stored_size); - result = incfs_kread(bf, tmp.data, bytes_to_read, pos); + result = incfs_kread(bfc, tmp.data, bytes_to_read, pos); if (result == bytes_to_read) { result = decompress(range(tmp.data, bytes_to_read), dst); if (result < 0) { const char *name = - bf->f_path.dentry->d_name.name; + bfc->bc_file->f_path.dentry->d_name.name; pr_warn_once("incfs: Decompression error. %s", name); @@ -963,7 +964,7 @@ ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f, } if (result > 0) { - int err = validate_hash_tree(bf, f, index, dst, tmp.data); + int err = validate_hash_tree(bfc, f, index, dst, tmp.data); if (err < 0) result = err; @@ -1026,14 +1027,13 @@ int incfs_process_new_data_block(struct data_file *df, unlock: mutex_unlock(&segment->blockmap_mutex); if (error) - pr_debug("incfs: %s %d error: %d\n", __func__, - block->block_index, error); + pr_debug("%d error: %d\n", block->block_index, error); return error; } int incfs_read_file_signature(struct data_file *df, struct mem_range dst) { - struct file *bf = df->df_backing_file_context->bc_file; + struct backing_file_context *bfc = df->df_backing_file_context; struct incfs_df_signature *sig; int read_res = 0; @@ -1047,7 +1047,7 @@ int incfs_read_file_signature(struct data_file *df, struct mem_range dst) if (dst.len < sig->sig_size) return -E2BIG; - read_res = incfs_kread(bf, dst.data, sig->sig_size, sig->sig_offset); + read_res = incfs_kread(bfc, dst.data, sig->sig_size, sig->sig_offset); if (read_res < 0) return read_res; @@ -1173,7 +1173,7 @@ static int process_file_signature_md(struct incfs_file_signature *sg, goto out; } - read = incfs_kread(df->df_backing_file_context->bc_file, buf, + read = incfs_kread(df->df_backing_file_context, buf, signature->sig_size, signature->sig_offset); if (read < 0) { error = read; diff --git a/fs/incfs/format.c b/fs/incfs/format.c index c56e559b6893..d326415fc6de 100644 --- a/fs/incfs/format.c +++ b/fs/incfs/format.c @@ -15,7 +15,8 @@ #include "format.h" #include "data_mgmt.h" -struct backing_file_context *incfs_alloc_bfc(struct file *backing_file) +struct backing_file_context *incfs_alloc_bfc(struct mount_info *mi, + struct file *backing_file) { struct backing_file_context *result = NULL; @@ -24,6 +25,7 @@ struct backing_file_context *incfs_alloc_bfc(struct file *backing_file) return ERR_PTR(-ENOMEM); result->bc_file = get_file(backing_file); + result->bc_cred = mi->mi_owner; mutex_init(&result->bc_mutex); return result; } @@ -116,7 +118,7 @@ static int append_zeros(struct backing_file_context *bfc, size_t len) static int write_to_bf(struct backing_file_context *bfc, const void *buf, size_t count, loff_t pos) { - ssize_t res = incfs_kwrite(bfc->bc_file, buf, count, pos); + ssize_t res = incfs_kwrite(bfc, buf, count, pos); if (res < 0) return res; @@ -531,8 +533,7 @@ int incfs_read_blockmap_entries(struct backing_file_context *bfc, if (start_index < 0 || bm_base_off <= 0) return -ENODATA; - result = incfs_kread(bfc->bc_file, entries, bytes_to_read, - bm_entry_off); + result = incfs_kread(bfc, entries, bytes_to_read, bm_entry_off); if (result < 0) return result; return result / sizeof(*entries); @@ -548,8 +549,7 @@ int incfs_read_file_header(struct backing_file_context *bfc, if (!bfc || !first_md_off) return -EFAULT; - LOCK_REQUIRED(bfc->bc_mutex); - bytes_read = incfs_kread(bfc->bc_file, &fh, sizeof(fh), 0); + bytes_read = incfs_kread(bfc, &fh, sizeof(fh), 0); if (bytes_read < 0) return bytes_read; @@ -603,8 +603,8 @@ int incfs_read_next_metadata_record(struct backing_file_context *bfc, return -EPERM; memset(&handler->md_buffer, 0, max_md_size); - bytes_read = incfs_kread(bfc->bc_file, &handler->md_buffer, - max_md_size, handler->md_record_offset); + bytes_read = incfs_kread(bfc, &handler->md_buffer, max_md_size, + handler->md_record_offset); if (bytes_read < 0) return bytes_read; if (bytes_read < sizeof(*md_hdr)) @@ -680,12 +680,22 @@ int incfs_read_next_metadata_record(struct backing_file_context *bfc, return res; } -ssize_t incfs_kread(struct file *f, void *buf, size_t size, loff_t pos) +ssize_t incfs_kread(struct backing_file_context *bfc, void *buf, size_t size, + loff_t pos) { - return kernel_read(f, buf, size, &pos); + const struct cred *old_cred = override_creds(bfc->bc_cred); + int ret = kernel_read(bfc->bc_file, buf, size, &pos); + + revert_creds(old_cred); + return ret; } -ssize_t incfs_kwrite(struct file *f, const void *buf, size_t size, loff_t pos) +ssize_t incfs_kwrite(struct backing_file_context *bfc, const void *buf, + size_t size, loff_t pos) { - return kernel_write(f, buf, size, &pos); + const struct cred *old_cred = override_creds(bfc->bc_cred); + int ret = kernel_write(bfc->bc_file, buf, size, &pos); + + revert_creds(old_cred); + return ret; } diff --git a/fs/incfs/format.h b/fs/incfs/format.h index 1a83349bb2eb..a6d3aef6b358 100644 --- a/fs/incfs/format.h +++ b/fs/incfs/format.h @@ -256,6 +256,13 @@ struct backing_file_context { * 0 means there are no metadata records. */ loff_t bc_last_md_record_offset; + + /* + * Credentials to set before reads/writes + * Note that this is a pointer to the mount_info mi_owner field so + * there is no need to get/put the creds + */ + const struct cred *bc_cred; }; struct metadata_handler { @@ -283,7 +290,9 @@ struct metadata_handler { loff_t incfs_get_end_offset(struct file *f); /* Backing file context management */ -struct backing_file_context *incfs_alloc_bfc(struct file *backing_file); +struct mount_info; +struct backing_file_context *incfs_alloc_bfc(struct mount_info *mi, + struct file *backing_file); void incfs_free_bfc(struct backing_file_context *bfc); @@ -334,7 +343,9 @@ int incfs_read_blockmap_entries(struct backing_file_context *bfc, int incfs_read_next_metadata_record(struct backing_file_context *bfc, struct metadata_handler *handler); -ssize_t incfs_kread(struct file *f, void *buf, size_t size, loff_t pos); -ssize_t incfs_kwrite(struct file *f, const void *buf, size_t size, loff_t pos); +ssize_t incfs_kread(struct backing_file_context *bfc, void *buf, size_t size, + loff_t pos); +ssize_t incfs_kwrite(struct backing_file_context *bfc, const void *buf, + size_t size, loff_t pos); #endif /* _INCFS_FORMAT_H */ diff --git a/fs/incfs/main.c b/fs/incfs/main.c index e65d0d895128..5a5939a10079 100644 --- a/fs/incfs/main.c +++ b/fs/incfs/main.c @@ -30,8 +30,18 @@ static ssize_t corefs_show(struct kobject *kobj, static struct kobj_attribute corefs_attr = __ATTR_RO(corefs); +static ssize_t mounter_context_for_backing_rw_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buff) +{ + return scnprintf(buff, PAGE_SIZE, "%s", "supported\n"); +} + +static struct kobj_attribute mounter_context_for_backing_rw_attr = + __ATTR_RO(mounter_context_for_backing_rw); + static struct attribute *attributes[] = { &corefs_attr.attr, + &mounter_context_for_backing_rw_attr.attr, NULL, }; diff --git a/fs/incfs/vfs.c b/fs/incfs/vfs.c index a13330eabe7d..9e335954171f 100644 --- a/fs/incfs/vfs.c +++ b/fs/incfs/vfs.c @@ -604,8 +604,11 @@ static ssize_t log_read(struct file *f, char __user *buf, size_t len, reads_to_collect = min_t(ssize_t, rl_size, reads_to_collect); while (reads_to_collect > 0) { - struct read_log_state next_state = READ_ONCE(log_state->state); - int reads_collected = incfs_collect_logged_reads( + struct read_log_state next_state; + int reads_collected; + + memcpy(&next_state, &log_state->state, sizeof(next_state)); + reads_collected = incfs_collect_logged_reads( mi, &next_state, reads_buf, min_t(ssize_t, reads_to_collect, reads_per_page)); if (reads_collected <= 0) { @@ -624,7 +627,7 @@ static ssize_t log_read(struct file *f, char __user *buf, size_t len, goto out; } - WRITE_ONCE(log_state->state, next_state); + memcpy(&log_state->state, &next_state, sizeof(next_state)); total_reads_collected += reads_collected; buf += reads_collected * sizeof(*reads_buf); reads_to_collect -= reads_collected; @@ -897,14 +900,14 @@ static int init_new_file(struct mount_info *mi, struct dentry *dentry, .dentry = dentry }; new_file = dentry_open(&path, O_RDWR | O_NOATIME | O_LARGEFILE, - mi->mi_owner); + current_cred()); if (IS_ERR(new_file)) { error = PTR_ERR(new_file); goto out; } - bfc = incfs_alloc_bfc(new_file); + bfc = incfs_alloc_bfc(mi, new_file); fput(new_file); if (IS_ERR(bfc)) { error = PTR_ERR(bfc); @@ -1025,7 +1028,7 @@ static int dir_relative_path_resolve( if (dir_fd < 0) return dir_fd; - dir_f = dentry_open(base_path, O_RDONLY | O_NOATIME, mi->mi_owner); + dir_f = dentry_open(base_path, O_RDONLY | O_NOATIME, current_cred()); if (IS_ERR(dir_f)) { error = PTR_ERR(dir_f); @@ -1903,10 +1906,13 @@ static int file_open(struct inode *inode, struct file *file) struct file *backing_file = NULL; struct path backing_path = {}; int err = 0; + const struct cred *old_cred; get_incfs_backing_path(file->f_path.dentry, &backing_path); - backing_file = dentry_open( - &backing_path, O_RDWR | O_NOATIME | O_LARGEFILE, mi->mi_owner); + old_cred = override_creds(mi->mi_owner); + backing_file = dentry_open(&backing_path, + O_RDWR | O_NOATIME | O_LARGEFILE, current_cred()); + revert_creds(old_cred); path_put(&backing_path); if (IS_ERR(backing_file)) { diff --git a/gen_headers_arm.bp b/gen_headers_arm.bp index b5a546f124ac..2894fb4a72f1 100644 --- a/gen_headers_arm.bp +++ b/gen_headers_arm.bp @@ -148,6 +148,7 @@ gen_headers_out_arm = [ "linux/b1lli.h", "linux/batadv_packet.h", "linux/batman_adv.h", + "linux/batterydata-interface.h", "linux/baycom.h", "linux/bcache.h", "linux/bcm933xx_hcs.h", @@ -476,6 +477,8 @@ gen_headers_out_arm = [ "linux/qemu_fw_cfg.h", "linux/qg-profile.h", "linux/qg.h", + "linux/qbg-profile.h", + "linux/qbg.h", "linux/qnx4_fs.h", "linux/qnxtypes.h", "linux/qrng.h", @@ -526,6 +529,7 @@ gen_headers_out_arm = [ "linux/shm.h", "linux/signal.h", "linux/signalfd.h", + "linux/slatecom_interface.h", "linux/smc.h", "linux/smc_diag.h", "linux/smcinvoke.h", @@ -623,6 +627,7 @@ gen_headers_out_arm = [ "linux/virtio_scsi.h", "linux/virtio_types.h", "linux/virtio_vsock.h", + "linux/vm_bms.h", "linux/vm_sockets.h", "linux/vm_sockets_diag.h", "linux/vmcore.h", @@ -1017,7 +1022,10 @@ genrule { genrule { name: "qti_generate_kernel_headers_arm", - tools: ["headers_install.sh"], + tools: [ + "headers_install.sh", + "unifdef", + ], tool_files: [ "kernel_headers.py", "arch/arm/tools/syscallhdr.sh", @@ -1042,6 +1050,7 @@ genrule { "--arch_syscall_tool $(location arch/arm/tools/syscallhdr.sh) " + "--arch_syscall_tbl $(location arch/arm/tools/syscall.tbl) " + "--headers_install $(location headers_install.sh) " + + "--unifdef $(location unifdef) " + "--include_uapi $(locations include/uapi/**/*.h)", out: ["linux/version.h"] + gen_headers_out_arm, } diff --git a/gen_headers_arm64.bp b/gen_headers_arm64.bp index 17616f395cf2..f90c1b771b13 100644 --- a/gen_headers_arm64.bp +++ b/gen_headers_arm64.bp @@ -143,6 +143,7 @@ gen_headers_out_arm64 = [ "linux/b1lli.h", "linux/batadv_packet.h", "linux/batman_adv.h", + "linux/batterydata-interface.h", "linux/baycom.h", "linux/bcache.h", "linux/bcm933xx_hcs.h", @@ -470,6 +471,8 @@ gen_headers_out_arm64 = [ "linux/qemu_fw_cfg.h", "linux/qg-profile.h", "linux/qg.h", + "linux/qbg-profile.h", + "linux/qbg.h", "linux/qnx4_fs.h", "linux/qnxtypes.h", "linux/qrng.h", @@ -520,6 +523,7 @@ gen_headers_out_arm64 = [ "linux/shm.h", "linux/signal.h", "linux/signalfd.h", + "linux/slatecom_interface.h", "linux/smc.h", "linux/smc_diag.h", "linux/smcinvoke.h", @@ -617,6 +621,7 @@ gen_headers_out_arm64 = [ "linux/virtio_scsi.h", "linux/virtio_types.h", "linux/virtio_vsock.h", + "linux/vm_bms.h", "linux/vm_sockets.h", "linux/vm_sockets_diag.h", "linux/vmcore.h", @@ -1011,7 +1016,10 @@ genrule { genrule { name: "qti_generate_kernel_headers_arm64", - tools: ["headers_install.sh"], + tools: [ + "headers_install.sh", + "unifdef", + ], tool_files: [ "kernel_headers.py", ], @@ -1033,6 +1041,7 @@ genrule { "--new_gen_headers_bp $(location :qti_generate_gen_headers_arm64) " + "--version_makefile $(location Makefile) " + "--headers_install $(location headers_install.sh) " + + "--unifdef $(location unifdef) " + "--include_uapi $(locations include/uapi/**/*.h)", out: ["linux/version.h"] + gen_headers_out_arm64, } diff --git a/include/crypto/ice.h b/include/crypto/ice.h index 907bbd46a433..f5b310199a5c 100644 --- a/include/crypto/ice.h +++ b/include/crypto/ice.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2020, 2021 The Linux Foundation. All rights reserved. */ #ifndef _QCOM_INLINE_CRYPTO_ENGINE_H_ @@ -116,15 +116,15 @@ int qcom_ice_config_start(struct request *req, #else static inline int enable_ice_setup(struct ice_device *ice_dev) { - return 0; + return -ENODEV; } static inline int disable_ice_setup(struct ice_device *ice_dev) { - return 0; + return -ENODEV; } static inline int qcom_ice_setup_ice_hw(const char *storage_type, int enable) { - return 0; + return -ENODEV; } static inline void qcom_ice_set_fde_flag(int flag) {} static inline struct list_head *get_ice_dev_list(void) diff --git a/include/dt-bindings/clock/mdss-28nm-pll-clk-legacy.h b/include/dt-bindings/clock/mdss-28nm-pll-clk-legacy.h new file mode 100644 index 000000000000..7a0a274386bb --- /dev/null +++ b/include/dt-bindings/clock/mdss-28nm-pll-clk-legacy.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2019, 2021 The Linux Foundation. All rights reserved. + */ + +#ifndef __MDSS_28NM_PLL_CLK_LEGACY_H +#define __MDSS_28NM_PLL_CLK_LEGACY_H + +/* DSI PLL clocks */ +#define VCOCLK_0 0 +#define ANALOG_POSTDIV_0_CLK 1 +#define INDIRECT_PATH_SRC_0_CLK 2 +#define BYTECLK_SRC_MUX_0_CLK 3 +#define BYTECLK_SRC_0_CLK 4 +#define PCLK_SRC_0_CLK 5 +#define VCOCLK_1 6 +#define ANALOG_POSTDIV_1_CLK 7 +#define INDIRECT_PATH_SRC_1_CLK 8 +#define BYTECLK_SRC_MUX_1_CLK 9 +#define BYTECLK_SRC_1_CLK 10 +#define PCLK_SRC_1_CLK 11 + +/* HDMI PLL clocks */ +#define HDMI_VCO_CLK 0 +#define HDMI_VCO_DIVIDED_1_CLK_SRC 1 +#define HDMI_VCO_DIVIDED_TWO_CLK_SRC 2 +#define HDMI_VCO_DIVIDED_FOUR_CLK_SRC 3 +#define HDMI_VCO_DIVIDED_SIX_CLK_SRC 4 +#define HDMI_PCLK_SRC_MUX 5 +#define HDMI_PCLK_SRC 6 +#endif diff --git a/include/dt-bindings/clock/qcom,cpu-sdm.h b/include/dt-bindings/clock/qcom,cpu-sdm.h index 638941ffbf9a..1b573a87c266 100644 --- a/include/dt-bindings/clock/qcom,cpu-sdm.h +++ b/include/dt-bindings/clock/qcom,cpu-sdm.h @@ -6,8 +6,10 @@ #ifndef _DT_BINDINGS_CLK_QCOM_CPU_SDM_H #define _DT_BINDINGS_CLK_QCOM_CPU_SDM_H -#define APCS_CPU_PLL 0 +#define APCS_CPU_PLL1 0 #define APCS_MUX_C1_CLK 1 #define APCS_MUX_CCI_CLK 2 +#define APCS_CPU_PLL0 3 +#define APCS_MUX_C0_CLK 4 #endif diff --git a/include/dt-bindings/clock/qcom,dispcc-khaje.h b/include/dt-bindings/clock/qcom,dispcc-khaje.h new file mode 100644 index 000000000000..3c0de28206b3 --- /dev/null +++ b/include/dt-bindings/clock/qcom,dispcc-khaje.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_KHAJE_H +#define _DT_BINDINGS_CLK_QCOM_DISP_CC_KHAJE_H + +/* DISP_CC clocks */ +#define DISP_CC_PLL0 0 +#define DISP_CC_MDSS_AHB_CLK 1 +#define DISP_CC_MDSS_AHB_CLK_SRC 2 +#define DISP_CC_MDSS_BYTE0_CLK 3 +#define DISP_CC_MDSS_BYTE0_CLK_SRC 4 +#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5 +#define DISP_CC_MDSS_BYTE0_INTF_CLK 6 +#define DISP_CC_MDSS_ESC0_CLK 7 +#define DISP_CC_MDSS_ESC0_CLK_SRC 8 +#define DISP_CC_MDSS_MDP_CLK 9 +#define DISP_CC_MDSS_MDP_CLK_SRC 10 +#define DISP_CC_MDSS_MDP_LUT_CLK 11 +#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 12 +#define DISP_CC_MDSS_PCLK0_CLK 13 +#define DISP_CC_MDSS_PCLK0_CLK_SRC 14 +#define DISP_CC_MDSS_ROT_CLK 15 +#define DISP_CC_MDSS_ROT_CLK_SRC 16 +#define DISP_CC_MDSS_RSCC_AHB_CLK 17 +#define DISP_CC_MDSS_RSCC_VSYNC_CLK 18 +#define DISP_CC_MDSS_VSYNC_CLK 19 +#define DISP_CC_MDSS_VSYNC_CLK_SRC 20 +#define DISP_CC_SLEEP_CLK 21 +#define DISP_CC_XO_CLK 22 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-khaje.h b/include/dt-bindings/clock/qcom,gcc-khaje.h new file mode 100644 index 000000000000..336dc50b2040 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-khaje.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_KHAJE_H +#define _DT_BINDINGS_CLK_QCOM_GCC_KHAJE_H + +/* GCC clocks */ +#define GPLL0 0 +#define GPLL0_OUT_EVEN 1 +#define GPLL1 2 +#define GPLL10 3 +#define GPLL11 4 +#define GPLL3 5 +#define GPLL3_OUT_EVEN 6 +#define GPLL4 7 +#define GPLL5 8 +#define GPLL6 9 +#define GPLL6_OUT_EVEN 10 +#define GPLL7 11 +#define GPLL8 12 +#define GPLL8_OUT_EVEN 13 +#define GPLL9 14 +#define GPLL9_OUT_MAIN 15 +#define GCC_AHB2PHY_CSI_CLK 16 +#define GCC_AHB2PHY_USB_CLK 17 +#define GCC_BIMC_GPU_AXI_CLK 18 +#define GCC_BOOT_ROM_AHB_CLK 19 +#define GCC_CAM_THROTTLE_NRT_CLK 20 +#define GCC_CAM_THROTTLE_RT_CLK 21 +#define GCC_CAMERA_AHB_CLK 22 +#define GCC_CAMERA_XO_CLK 23 +#define GCC_CAMSS_AXI_CLK 24 +#define GCC_CAMSS_AXI_CLK_SRC 25 +#define GCC_CAMSS_CAMNOC_ATB_CLK 26 +#define GCC_CAMSS_CAMNOC_NTS_XO_CLK 27 +#define GCC_CAMSS_CCI_0_CLK 28 +#define GCC_CAMSS_CCI_CLK_SRC 29 +#define GCC_CAMSS_CPHY_0_CLK 30 +#define GCC_CAMSS_CPHY_1_CLK 31 +#define GCC_CAMSS_CPHY_2_CLK 32 +#define GCC_CAMSS_CSI0PHYTIMER_CLK 33 +#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 34 +#define GCC_CAMSS_CSI1PHYTIMER_CLK 35 +#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 36 +#define GCC_CAMSS_CSI2PHYTIMER_CLK 37 +#define GCC_CAMSS_CSI2PHYTIMER_CLK_SRC 38 +#define GCC_CAMSS_MCLK0_CLK 39 +#define GCC_CAMSS_MCLK0_CLK_SRC 40 +#define GCC_CAMSS_MCLK1_CLK 41 +#define GCC_CAMSS_MCLK1_CLK_SRC 42 +#define GCC_CAMSS_MCLK2_CLK 43 +#define GCC_CAMSS_MCLK2_CLK_SRC 44 +#define GCC_CAMSS_MCLK3_CLK 45 +#define GCC_CAMSS_MCLK3_CLK_SRC 46 +#define GCC_CAMSS_NRT_AXI_CLK 47 +#define GCC_CAMSS_OPE_AHB_CLK 48 +#define GCC_CAMSS_OPE_AHB_CLK_SRC 49 +#define GCC_CAMSS_OPE_CLK 50 +#define GCC_CAMSS_OPE_CLK_SRC 51 +#define GCC_CAMSS_RT_AXI_CLK 52 +#define GCC_CAMSS_TFE_0_CLK 53 +#define GCC_CAMSS_TFE_0_CLK_SRC 54 +#define GCC_CAMSS_TFE_0_CPHY_RX_CLK 55 +#define GCC_CAMSS_TFE_0_CSID_CLK 56 +#define GCC_CAMSS_TFE_0_CSID_CLK_SRC 57 +#define GCC_CAMSS_TFE_1_CLK 58 +#define GCC_CAMSS_TFE_1_CLK_SRC 59 +#define GCC_CAMSS_TFE_1_CPHY_RX_CLK 60 +#define GCC_CAMSS_TFE_1_CSID_CLK 61 +#define GCC_CAMSS_TFE_1_CSID_CLK_SRC 62 +#define GCC_CAMSS_TFE_2_CLK 63 +#define GCC_CAMSS_TFE_2_CLK_SRC 64 +#define GCC_CAMSS_TFE_2_CPHY_RX_CLK 65 +#define GCC_CAMSS_TFE_2_CSID_CLK 66 +#define GCC_CAMSS_TFE_2_CSID_CLK_SRC 67 +#define GCC_CAMSS_TFE_CPHY_RX_CLK_SRC 68 +#define GCC_CAMSS_TOP_AHB_CLK 69 +#define GCC_CAMSS_TOP_AHB_CLK_SRC 70 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 71 +#define GCC_CPUSS_GNOC_CLK 72 +#define GCC_DISP_AHB_CLK 73 +#define GCC_DISP_GPLL0_CLK_SRC 74 +#define GCC_DISP_GPLL0_DIV_CLK_SRC 75 +#define GCC_DISP_HF_AXI_CLK 76 +#define GCC_DISP_SLEEP_CLK 77 +#define GCC_DISP_THROTTLE_CORE_CLK 78 +#define GCC_DISP_XO_CLK 79 +#define GCC_GP1_CLK 80 +#define GCC_GP1_CLK_SRC 81 +#define GCC_GP2_CLK 82 +#define GCC_GP2_CLK_SRC 83 +#define GCC_GP3_CLK 84 +#define GCC_GP3_CLK_SRC 85 +#define GCC_GPU_CFG_AHB_CLK 86 +#define GCC_GPU_GPLL0_CLK_SRC 87 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 88 +#define GCC_GPU_IREF_CLK 89 +#define GCC_GPU_MEMNOC_GFX_CLK 90 +#define GCC_GPU_SNOC_DVM_GFX_CLK 91 +#define GCC_GPU_THROTTLE_CORE_CLK 92 +#define GCC_PDM2_CLK 93 +#define GCC_PDM2_CLK_SRC 94 +#define GCC_PDM_AHB_CLK 95 +#define GCC_PDM_XO4_CLK 96 +#define GCC_PRNG_AHB_CLK 97 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 98 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 99 +#define GCC_QMIP_DISP_AHB_CLK 100 +#define GCC_QMIP_GPU_CFG_AHB_CLK 101 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 102 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 103 +#define GCC_QUPV3_WRAP0_CORE_CLK 104 +#define GCC_QUPV3_WRAP0_S0_CLK 105 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 106 +#define GCC_QUPV3_WRAP0_S1_CLK 107 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 108 +#define GCC_QUPV3_WRAP0_S2_CLK 109 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 110 +#define GCC_QUPV3_WRAP0_S3_CLK 111 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 112 +#define GCC_QUPV3_WRAP0_S4_CLK 113 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 114 +#define GCC_QUPV3_WRAP0_S5_CLK 115 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 116 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 117 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 118 +#define GCC_SDCC1_AHB_CLK 119 +#define GCC_SDCC1_APPS_CLK 120 +#define GCC_SDCC1_APPS_CLK_SRC 121 +#define GCC_SDCC1_ICE_CORE_CLK 122 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 123 +#define GCC_SDCC2_AHB_CLK 124 +#define GCC_SDCC2_APPS_CLK 125 +#define GCC_SDCC2_APPS_CLK_SRC 126 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 127 +#define GCC_SYS_NOC_UFS_PHY_AXI_CLK 128 +#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 129 +#define GCC_UFS_CLKREF_CLK 130 +#define GCC_UFS_PHY_AHB_CLK 131 +#define GCC_UFS_PHY_AXI_CLK 132 +#define GCC_UFS_PHY_AXI_CLK_SRC 133 +#define GCC_UFS_PHY_ICE_CORE_CLK 134 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 135 +#define GCC_UFS_PHY_PHY_AUX_CLK 136 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 137 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 138 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 139 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 140 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 141 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 142 +#define GCC_USB30_PRIM_MASTER_CLK 143 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 144 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 145 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 146 +#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 147 +#define GCC_USB30_PRIM_SLEEP_CLK 148 +#define GCC_USB3_PRIM_CLKREF_CLK 149 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 150 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 151 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 152 +#define GCC_VCODEC0_AXI_CLK 153 +#define GCC_VENUS_AHB_CLK 154 +#define GCC_VENUS_CTL_AXI_CLK 155 +#define GCC_VIDEO_AHB_CLK 156 +#define GCC_VIDEO_AXI0_CLK 157 +#define GCC_VIDEO_THROTTLE_CORE_CLK 158 +#define GCC_VIDEO_VCODEC0_SYS_CLK 159 +#define GCC_VIDEO_VENUS_CLK_SRC 160 +#define GCC_VIDEO_VENUS_CTL_CLK 161 +#define GCC_VIDEO_XO_CLK 162 +#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 163 + +/* GCC resets */ +#define GCC_QUSB2PHY_PRIM_BCR 0 +#define GCC_QUSB2PHY_SEC_BCR 1 +#define GCC_SDCC1_BCR 2 +#define GCC_SDCC2_BCR 3 +#define GCC_UFS_PHY_BCR 4 +#define GCC_USB30_PRIM_BCR 5 +#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 6 +#define GCC_USB3_PHY_PRIM_SP0_BCR 7 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 8 +#define GCC_VCODEC0_BCR 9 +#define GCC_VENUS_BCR 10 +#define GCC_VIDEO_INTERFACE_BCR 11 +#define GCC_USB3_DP_PHY_PRIM_BCR 12 + +#endif diff --git a/include/dt-bindings/clock/qcom,gpucc-khaje.h b/include/dt-bindings/clock/qcom,gpucc-khaje.h new file mode 100644 index 000000000000..e6385c43cb84 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gpucc-khaje.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_KHAJE_H +#define _DT_BINDINGS_CLK_QCOM_GPU_CC_KHAJE_H + +/* GPU_CC clocks */ +#define GPU_CC_PLL0 0 +#define GPU_CC_PLL0_OUT_MAIN 1 +#define GPU_CC_PLL1 2 +#define GPU_CC_AHB_CLK 3 +#define GPU_CC_CRC_AHB_CLK 4 +#define GPU_CC_CX_GFX3D_CLK 5 +#define GPU_CC_CX_GMU_CLK 6 +#define GPU_CC_CX_SNOC_DVM_CLK 7 +#define GPU_CC_CXO_AON_CLK 8 +#define GPU_CC_CXO_CLK 9 +#define GPU_CC_GMU_CLK_SRC 10 +#define GPU_CC_GX_CXO_CLK 11 +#define GPU_CC_GX_GFX3D_CLK 12 +#define GPU_CC_GX_GFX3D_CLK_SRC 13 +#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 14 +#define GPU_CC_SLEEP_CLK 15 + +#endif diff --git a/include/dt-bindings/msm/msm-camera.h b/include/dt-bindings/msm/msm-camera.h index fb3279874bf5..3f52d3d00955 100644 --- a/include/dt-bindings/msm/msm-camera.h +++ b/include/dt-bindings/msm/msm-camera.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */ #ifndef __MSM_CAMERA_H @@ -76,4 +76,9 @@ #define CAM_CPAS_MP_LIMIT_FUSE 5 #define CAM_CPAS_FUSE_FEATURE_MAX 6 +/* Flash type*/ +#define CAM_FLASH_TYPE_PMIC 0 +#define CAM_FLASH_TYPE_I2C 1 +#define CAM_FLASH_TYPE_GPIO 2 + #endif diff --git a/include/dt-bindings/phy/qcom,khaje-qmp-usb3.h b/include/dt-bindings/phy/qcom,khaje-qmp-usb3.h new file mode 100644 index 000000000000..5d01d8ea6ccd --- /dev/null +++ b/include/dt-bindings/phy/qcom,khaje-qmp-usb3.h @@ -0,0 +1,768 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_PHY_QCOM_KHAJE_QMP_USB_H +#define _DT_BINDINGS_PHY_QCOM_KHAJE_QMP_USB_H + +/* USB3-DP Combo PHY register offsets */ +/* Module: USB3_DP_PHY_USB3_DP_COM_USB3_DP_COM_USB3_DP_COM */ +#define USB3_DP_COM_PHY_MODE_CTRL 0x0000 +#define USB3_DP_COM_SW_RESET 0x0004 +#define USB3_DP_COM_POWER_DOWN_CTRL 0x0008 +#define USB3_DP_COM_SWI_CTRL 0x000C +#define USB3_DP_COM_TYPEC_CTRL 0x0010 +#define USB3_DP_COM_TYPEC_PWRDN_CTRL 0x0014 +#define USB3_DP_COM_DP_BIST_CFG_0 0x0018 +#define USB3_DP_COM_RESET_OVRD_CTRL 0x001C +#define USB3_DP_COM_DBG_CLK_MUX_CTRL 0x0020 +#define USB3_DP_COM_TYPEC_STATUS 0x0024 +#define USB3_DP_COM_PLACEHOLDER_STATUS 0x0028 +#define USB3_DP_COM_REVISION_ID0 0x002C +#define USB3_DP_COM_REVISION_ID1 0x0030 +#define USB3_DP_COM_REVISION_ID2 0x0034 +#define USB3_DP_COM_REVISION_ID3 0x0038 + +/* Module: USB3_DP_PHY_USB3_QSERDES_COM_USB3_QSERDES_COM_USB3_DP_QMP_PLL */ +#define USB3_DP_QSERDES_COM_ATB_SEL1 0x1000 +#define USB3_DP_QSERDES_COM_ATB_SEL2 0x1004 +#define USB3_DP_QSERDES_COM_FREQ_UPDATE 0x1008 +#define USB3_DP_QSERDES_COM_BG_TIMER 0x100C +#define USB3_DP_QSERDES_COM_SSC_EN_CENTER 0x1010 +#define USB3_DP_QSERDES_COM_SSC_ADJ_PER1 0x1014 +#define USB3_DP_QSERDES_COM_SSC_ADJ_PER2 0x1018 +#define USB3_DP_QSERDES_COM_SSC_PER1 0x101C +#define USB3_DP_QSERDES_COM_SSC_PER2 0x1020 +#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE1_MODE0 0x1024 +#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE2_MODE0 0x1028 +#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE3_MODE0 0x102C +#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE1_MODE1 0x1030 +#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE2_MODE1 0x1034 +#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE3_MODE1 0x1038 +#define USB3_DP_QSERDES_COM_POST_DIV 0x103C +#define USB3_DP_QSERDES_COM_POST_DIV_MUX 0x1040 +#define USB3_DP_QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x1044 +#define USB3_DP_QSERDES_COM_CLK_ENABLE1 0x1048 +#define USB3_DP_QSERDES_COM_SYS_CLK_CTRL 0x104C +#define USB3_DP_QSERDES_COM_SYSCLK_BUF_ENABLE 0x1050 +#define USB3_DP_QSERDES_COM_PLL_EN 0x1054 +#define USB3_DP_QSERDES_COM_PLL_IVCO 0x1058 +#define USB3_DP_QSERDES_COM_CMN_IETRIM 0x105C +#define USB3_DP_QSERDES_COM_CMN_IPTRIM 0x1060 +#define USB3_DP_QSERDES_COM_EP_CLOCK_DETECT_CTRL 0x1064 +#define USB3_DP_QSERDES_COM_SYSCLK_DET_COMP_STATUS 0x1068 +#define USB3_DP_QSERDES_COM_CLK_EP_DIV_MODE0 0x106C +#define USB3_DP_QSERDES_COM_CLK_EP_DIV_MODE1 0x1070 +#define USB3_DP_QSERDES_COM_CP_CTRL_MODE0 0x1074 +#define USB3_DP_QSERDES_COM_CP_CTRL_MODE1 0x1078 +#define USB3_DP_QSERDES_COM_PLL_RCTRL_MODE0 0x107C +#define USB3_DP_QSERDES_COM_PLL_RCTRL_MODE1 0x1080 +#define USB3_DP_QSERDES_COM_PLL_CCTRL_MODE0 0x1084 +#define USB3_DP_QSERDES_COM_PLL_CCTRL_MODE1 0x1088 +#define USB3_DP_QSERDES_COM_PLL_CNTRL 0x108C +#define USB3_DP_QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x1090 +#define USB3_DP_QSERDES_COM_SYSCLK_EN_SEL 0x1094 +#define USB3_DP_QSERDES_COM_CML_SYSCLK_SEL 0x1098 +#define USB3_DP_QSERDES_COM_RESETSM_CNTRL 0x109C +#define USB3_DP_QSERDES_COM_RESETSM_CNTRL2 0x10A0 +#define USB3_DP_QSERDES_COM_LOCK_CMP_EN 0x10A4 +#define USB3_DP_QSERDES_COM_LOCK_CMP_CFG 0x10A8 +#define USB3_DP_QSERDES_COM_LOCK_CMP1_MODE0 0x10AC +#define USB3_DP_QSERDES_COM_LOCK_CMP2_MODE0 0x10B0 +#define USB3_DP_QSERDES_COM_LOCK_CMP1_MODE1 0x10B4 +#define USB3_DP_QSERDES_COM_LOCK_CMP2_MODE1 0x10B8 +#define USB3_DP_QSERDES_COM_DEC_START_MODE0 0x10BC +#define USB3_DP_QSERDES_COM_DEC_START_MSB_MODE0 0x10C0 +#define USB3_DP_QSERDES_COM_DEC_START_MODE1 0x10C4 +#define USB3_DP_QSERDES_COM_DEC_START_MSB_MODE1 0x10C8 +#define USB3_DP_QSERDES_COM_DIV_FRAC_START1_MODE0 0x10CC +#define USB3_DP_QSERDES_COM_DIV_FRAC_START2_MODE0 0x10D0 +#define USB3_DP_QSERDES_COM_DIV_FRAC_START3_MODE0 0x10D4 +#define USB3_DP_QSERDES_COM_DIV_FRAC_START1_MODE1 0x10D8 +#define USB3_DP_QSERDES_COM_DIV_FRAC_START2_MODE1 0x10DC +#define USB3_DP_QSERDES_COM_DIV_FRAC_START3_MODE1 0x10E0 +#define USB3_DP_QSERDES_COM_INTEGLOOP_INITVAL 0x10E4 +#define USB3_DP_QSERDES_COM_INTEGLOOP_EN 0x10E8 +#define USB3_DP_QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x10EC +#define USB3_DP_QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x10F0 +#define USB3_DP_QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x10F4 +#define USB3_DP_QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x10F8 +#define USB3_DP_QSERDES_COM_INTEGLOOP_P_PATH_GAIN0 0x10FC +#define USB3_DP_QSERDES_COM_INTEGLOOP_P_PATH_GAIN1 0x1100 +#define USB3_DP_QSERDES_COM_VCOCAL_DEADMAN_CTRL 0x1104 +#define USB3_DP_QSERDES_COM_VCO_TUNE_CTRL 0x1108 +#define USB3_DP_QSERDES_COM_VCO_TUNE_MAP 0x110C +#define USB3_DP_QSERDES_COM_VCO_TUNE1_MODE0 0x1110 +#define USB3_DP_QSERDES_COM_VCO_TUNE2_MODE0 0x1114 +#define USB3_DP_QSERDES_COM_VCO_TUNE1_MODE1 0x1118 +#define USB3_DP_QSERDES_COM_VCO_TUNE2_MODE1 0x111C +#define USB3_DP_QSERDES_COM_VCO_TUNE_INITVAL1 0x1120 +#define USB3_DP_QSERDES_COM_VCO_TUNE_INITVAL2 0x1124 +#define USB3_DP_QSERDES_COM_VCO_TUNE_MINVAL1 0x1128 +#define USB3_DP_QSERDES_COM_VCO_TUNE_MINVAL2 0x112C +#define USB3_DP_QSERDES_COM_VCO_TUNE_MAXVAL1 0x1130 +#define USB3_DP_QSERDES_COM_VCO_TUNE_MAXVAL2 0x1134 +#define USB3_DP_QSERDES_COM_VCO_TUNE_TIMER1 0x1138 +#define USB3_DP_QSERDES_COM_VCO_TUNE_TIMER2 0x113C +#define USB3_DP_QSERDES_COM_CMN_STATUS 0x1140 +#define USB3_DP_QSERDES_COM_RESET_SM_STATUS 0x1144 +#define USB3_DP_QSERDES_COM_RESTRIM_CODE_STATUS 0x1148 +#define USB3_DP_QSERDES_COM_PLLCAL_CODE1_STATUS 0x114C +#define USB3_DP_QSERDES_COM_PLLCAL_CODE2_STATUS 0x1150 +#define USB3_DP_QSERDES_COM_CLK_SELECT 0x1154 +#define USB3_DP_QSERDES_COM_HSCLK_SEL 0x1158 +#define USB3_DP_QSERDES_COM_HSCLK_HS_SWITCH_SEL 0x115C +#define USB3_DP_QSERDES_COM_INTEGLOOP_BINCODE_STATUS 0x1160 +#define USB3_DP_QSERDES_COM_PLL_ANALOG 0x1164 +#define USB3_DP_QSERDES_COM_CORECLK_DIV_MODE0 0x1168 +#define USB3_DP_QSERDES_COM_CORECLK_DIV_MODE1 0x116C +#define USB3_DP_QSERDES_COM_SW_RESET 0x1170 +#define USB3_DP_QSERDES_COM_CORE_CLK_EN 0x1174 +#define USB3_DP_QSERDES_COM_C_READY_STATUS 0x1178 +#define USB3_DP_QSERDES_COM_CMN_CONFIG 0x117C +#define USB3_DP_QSERDES_COM_CMN_RATE_OVERRIDE 0x1180 +#define USB3_DP_QSERDES_COM_SVS_MODE_CLK_SEL 0x1184 +#define USB3_DP_QSERDES_COM_DEBUG_BUS0 0x1188 +#define USB3_DP_QSERDES_COM_DEBUG_BUS1 0x118C +#define USB3_DP_QSERDES_COM_DEBUG_BUS2 0x1190 +#define USB3_DP_QSERDES_COM_DEBUG_BUS3 0x1194 +#define USB3_DP_QSERDES_COM_DEBUG_BUS_SEL 0x1198 +#define USB3_DP_QSERDES_COM_CMN_MISC1 0x119C +#define USB3_DP_QSERDES_COM_CMN_MISC2 0x11A0 +#define USB3_DP_QSERDES_COM_CMN_MODE 0x11A4 +#define USB3_DP_QSERDES_COM_VCO_DC_LEVEL_CTRL 0x11A8 +#define USB3_DP_QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x11AC +#define USB3_DP_QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x11B0 +#define USB3_DP_QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x11B4 +#define USB3_DP_QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x11B8 +#define USB3_DP_QSERDES_COM_BIN_VCOCAL_HSCLK_SEL 0x11BC + +/* Module: USB3_DP_PHY_USB3_QSERDES_TXA_USB3_QSERDES_TXA_USB3_DP_QMP_TX */ +#define USB3_DP_QSERDES_TXA_BIST_MODE_LANENO 0x1200 +#define USB3_DP_QSERDES_TXA_BIST_INVERT 0x1204 +#define USB3_DP_QSERDES_TXA_CLKBUF_ENABLE 0x1208 +#define USB3_DP_QSERDES_TXA_TX_EMP_POST1_LVL 0x120C +#define USB3_DP_QSERDES_TXA_TX_IDLE_LVL_LARGE_AMP 0x1210 +#define USB3_DP_QSERDES_TXA_TX_DRV_LVL 0x1214 +#define USB3_DP_QSERDES_TXA_TX_DRV_LVL_OFFSET 0x1218 +#define USB3_DP_QSERDES_TXA_RESET_TSYNC_EN 0x121C +#define USB3_DP_QSERDES_TXA_PRE_STALL_LDO_BOOST_EN 0x1220 +#define USB3_DP_QSERDES_TXA_TX_BAND 0x1224 +#define USB3_DP_QSERDES_TXA_SLEW_CNTL 0x1228 +#define USB3_DP_QSERDES_TXA_INTERFACE_SELECT 0x122C +#define USB3_DP_QSERDES_TXA_LPB_EN 0x1230 +#define USB3_DP_QSERDES_TXA_RES_CODE_LANE_TX 0x1234 +#define USB3_DP_QSERDES_TXA_RES_CODE_LANE_RX 0x1238 +#define USB3_DP_QSERDES_TXA_RES_CODE_LANE_OFFSET_TX 0x123C +#define USB3_DP_QSERDES_TXA_RES_CODE_LANE_OFFSET_RX 0x1240 +#define USB3_DP_QSERDES_TXA_PERL_LENGTH1 0x1244 +#define USB3_DP_QSERDES_TXA_PERL_LENGTH2 0x1248 +#define USB3_DP_QSERDES_TXA_SERDES_BYP_EN_OUT 0x124C +#define USB3_DP_QSERDES_TXA_DEBUG_BUS_SEL 0x1250 +#define USB3_DP_QSERDES_TXA_TRANSCEIVER_BIAS_EN 0x1254 +#define USB3_DP_QSERDES_TXA_HIGHZ_DRVR_EN 0x1258 +#define USB3_DP_QSERDES_TXA_TX_POL_INV 0x125C +#define USB3_DP_QSERDES_TXA_PARRATE_REC_DETECT_IDLE_EN 0x1260 +#define USB3_DP_QSERDES_TXA_BIST_PATTERN1 0x1264 +#define USB3_DP_QSERDES_TXA_BIST_PATTERN2 0x1268 +#define USB3_DP_QSERDES_TXA_BIST_PATTERN3 0x126C +#define USB3_DP_QSERDES_TXA_BIST_PATTERN4 0x1270 +#define USB3_DP_QSERDES_TXA_BIST_PATTERN5 0x1274 +#define USB3_DP_QSERDES_TXA_BIST_PATTERN6 0x1278 +#define USB3_DP_QSERDES_TXA_BIST_PATTERN7 0x127C +#define USB3_DP_QSERDES_TXA_BIST_PATTERN8 0x1280 +#define USB3_DP_QSERDES_TXA_LANE_MODE_1 0x1284 +#define USB3_DP_QSERDES_TXA_LANE_MODE_2 0x1288 +#define USB3_DP_QSERDES_TXA_LANE_MODE_3 0x128C +#define USB3_DP_QSERDES_TXA_ATB_SEL1 0x1290 +#define USB3_DP_QSERDES_TXA_ATB_SEL2 0x1294 +#define USB3_DP_QSERDES_TXA_RCV_DETECT_LVL 0x1298 +#define USB3_DP_QSERDES_TXA_RCV_DETECT_LVL_2 0x129C +#define USB3_DP_QSERDES_TXA_PRBS_SEED1 0x12A0 +#define USB3_DP_QSERDES_TXA_PRBS_SEED2 0x12A4 +#define USB3_DP_QSERDES_TXA_PRBS_SEED3 0x12A8 +#define USB3_DP_QSERDES_TXA_PRBS_SEED4 0x12AC +#define USB3_DP_QSERDES_TXA_RESET_GEN 0x12B0 +#define USB3_DP_QSERDES_TXA_RESET_GEN_MUXES 0x12B4 +#define USB3_DP_QSERDES_TXA_TRAN_DRVR_EMP_EN 0x12B8 +#define USB3_DP_QSERDES_TXA_TX_INTERFACE_MODE 0x12BC +#define USB3_DP_QSERDES_TXA_PWM_CTRL 0x12C0 +#define USB3_DP_QSERDES_TXA_PWM_ENCODED_OR_DATA 0x12C4 +#define USB3_DP_QSERDES_TXA_PWM_GEAR_1_DIVIDER_BAND2 0x12C8 +#define USB3_DP_QSERDES_TXA_PWM_GEAR_2_DIVIDER_BAND2 0x12CC +#define USB3_DP_QSERDES_TXA_PWM_GEAR_3_DIVIDER_BAND2 0x12D0 +#define USB3_DP_QSERDES_TXA_PWM_GEAR_4_DIVIDER_BAND2 0x12D4 +#define USB3_DP_QSERDES_TXA_PWM_GEAR_1_DIVIDER_BAND0_1 0x12D8 +#define USB3_DP_QSERDES_TXA_PWM_GEAR_2_DIVIDER_BAND0_1 0x12DC +#define USB3_DP_QSERDES_TXA_PWM_GEAR_3_DIVIDER_BAND0_1 0x12E0 +#define USB3_DP_QSERDES_TXA_PWM_GEAR_4_DIVIDER_BAND0_1 0x12E4 +#define USB3_DP_QSERDES_TXA_VMODE_CTRL1 0x12E8 +#define USB3_DP_QSERDES_TXA_ALOG_OBSV_BUS_CTRL_1 0x12EC +#define USB3_DP_QSERDES_TXA_BIST_STATUS 0x12F0 +#define USB3_DP_QSERDES_TXA_BIST_ERROR_COUNT1 0x12F4 +#define USB3_DP_QSERDES_TXA_BIST_ERROR_COUNT2 0x12F8 +#define USB3_DP_QSERDES_TXA_ALOG_OBSV_BUS_STATUS_1 0x12FC +#define USB3_DP_QSERDES_TXA_LANE_DIG_CONFIG 0x1300 +#define USB3_DP_QSERDES_TXA_PI_QEC_CTRL 0x1304 +#define USB3_DP_QSERDES_TXA_PRE_EMPH 0x1308 +#define USB3_DP_QSERDES_TXA_SW_RESET 0x130C +#define USB3_DP_QSERDES_TXA_DCC_OFFSET 0x1310 +#define USB3_DP_QSERDES_TXA_DIG_BKUP_CTRL 0x1314 +#define USB3_DP_QSERDES_TXA_DEBUG_BUS0 0x1318 +#define USB3_DP_QSERDES_TXA_DEBUG_BUS1 0x131C +#define USB3_DP_QSERDES_TXA_DEBUG_BUS2 0x1320 +#define USB3_DP_QSERDES_TXA_DEBUG_BUS3 0x1324 +#define USB3_DP_QSERDES_TXA_READ_EQCODE 0x1328 +#define USB3_DP_QSERDES_TXA_READ_OFFSETCODE 0x132C +#define USB3_DP_QSERDES_TXA_IA_ERROR_COUNTER_LOW 0x1330 +#define USB3_DP_QSERDES_TXA_IA_ERROR_COUNTER_HIGH 0x1334 +#define USB3_DP_QSERDES_TXA_VGA_READ_CODE 0x1338 +#define USB3_DP_QSERDES_TXA_VTH_READ_CODE 0x133C +#define USB3_DP_QSERDES_TXA_DFE_TAP1_READ_CODE 0x1340 +#define USB3_DP_QSERDES_TXA_DFE_TAP2_READ_CODE 0x1344 +#define USB3_DP_QSERDES_TXA_IDAC_STATUS_I 0x1348 +#define USB3_DP_QSERDES_TXA_IDAC_STATUS_IBAR 0x134C +#define USB3_DP_QSERDES_TXA_IDAC_STATUS_Q 0x1350 +#define USB3_DP_QSERDES_TXA_IDAC_STATUS_QBAR 0x1354 +#define USB3_DP_QSERDES_TXA_IDAC_STATUS_A 0x1358 +#define USB3_DP_QSERDES_TXA_IDAC_STATUS_ABAR 0x135C +#define USB3_DP_QSERDES_TXA_IDAC_STATUS_SM_ON 0x1360 +#define USB3_DP_QSERDES_TXA_IDAC_STATUS_CAL_DONE 0x1364 +#define USB3_DP_QSERDES_TXA_IDAC_STATUS_SIGNERROR 0x1368 +#define USB3_DP_QSERDES_TXA_DCC_CAL_STATUS 0x136C + +/* Module: USB3_DP_PHY_USB3_QSERDES_RXA_USB3_QSERDES_RXA_USB3_DP_QMP_RX */ +#define USB3_DP_QSERDES_RXA_UCDR_FO_GAIN_HALF 0x1400 +#define USB3_DP_QSERDES_RXA_UCDR_FO_GAIN_QUARTER 0x1404 +#define USB3_DP_QSERDES_RXA_UCDR_FO_GAIN 0x1408 +#define USB3_DP_QSERDES_RXA_UCDR_SO_GAIN_HALF 0x140C +#define USB3_DP_QSERDES_RXA_UCDR_SO_GAIN_QUARTER 0x1410 +#define USB3_DP_QSERDES_RXA_UCDR_SO_GAIN 0x1414 +#define USB3_DP_QSERDES_RXA_UCDR_SVS_FO_GAIN_HALF 0x1418 +#define USB3_DP_QSERDES_RXA_UCDR_SVS_FO_GAIN_QUARTER 0x141C +#define USB3_DP_QSERDES_RXA_UCDR_SVS_FO_GAIN 0x1420 +#define USB3_DP_QSERDES_RXA_UCDR_SVS_SO_GAIN_HALF 0x1424 +#define USB3_DP_QSERDES_RXA_UCDR_SVS_SO_GAIN_QUARTER 0x1428 +#define USB3_DP_QSERDES_RXA_UCDR_SVS_SO_GAIN 0x142C +#define USB3_DP_QSERDES_RXA_UCDR_FASTLOCK_FO_GAIN 0x1430 +#define USB3_DP_QSERDES_RXA_UCDR_SO_SATURATION_AND_ENABLE 0x1434 +#define USB3_DP_QSERDES_RXA_UCDR_FO_TO_SO_DELAY 0x1438 +#define USB3_DP_QSERDES_RXA_UCDR_FASTLOCK_COUNT_LOW 0x143C +#define USB3_DP_QSERDES_RXA_UCDR_FASTLOCK_COUNT_HIGH 0x1440 +#define USB3_DP_QSERDES_RXA_UCDR_PI_CONTROLS 0x1444 +#define USB3_DP_QSERDES_RXA_UCDR_PI_CTRL2 0x1448 +#define USB3_DP_QSERDES_RXA_UCDR_SB2_THRESH1 0x144C +#define USB3_DP_QSERDES_RXA_UCDR_SB2_THRESH2 0x1450 +#define USB3_DP_QSERDES_RXA_UCDR_SB2_GAIN1 0x1454 +#define USB3_DP_QSERDES_RXA_UCDR_SB2_GAIN2 0x1458 +#define USB3_DP_QSERDES_RXA_AUX_CONTROL 0x145C +#define USB3_DP_QSERDES_RXA_AUX_DATA_TCOARSE_TFINE 0x1460 +#define USB3_DP_QSERDES_RXA_RCLK_AUXDATA_SEL 0x1464 +#define USB3_DP_QSERDES_RXA_AC_JTAG_ENABLE 0x1468 +#define USB3_DP_QSERDES_RXA_AC_JTAG_INITP 0x146C +#define USB3_DP_QSERDES_RXA_AC_JTAG_INITN 0x1470 +#define USB3_DP_QSERDES_RXA_AC_JTAG_LVL 0x1474 +#define USB3_DP_QSERDES_RXA_AC_JTAG_MODE 0x1478 +#define USB3_DP_QSERDES_RXA_AC_JTAG_RESET 0x147C +#define USB3_DP_QSERDES_RXA_RX_TERM_BW 0x1480 +#define USB3_DP_QSERDES_RXA_RX_RCVR_IQ_EN 0x1484 +#define USB3_DP_QSERDES_RXA_RX_IDAC_I_DC_OFFSETS 0x1488 +#define USB3_DP_QSERDES_RXA_RX_IDAC_IBAR_DC_OFFSETS 0x148C +#define USB3_DP_QSERDES_RXA_RX_IDAC_Q_DC_OFFSETS 0x1490 +#define USB3_DP_QSERDES_RXA_RX_IDAC_QBAR_DC_OFFSETS 0x1494 +#define USB3_DP_QSERDES_RXA_RX_IDAC_A_DC_OFFSETS 0x1498 +#define USB3_DP_QSERDES_RXA_RX_IDAC_ABAR_DC_OFFSETS 0x149C +#define USB3_DP_QSERDES_RXA_RX_IDAC_EN 0x14A0 +#define USB3_DP_QSERDES_RXA_RX_IDAC_ENABLES 0x14A4 +#define USB3_DP_QSERDES_RXA_RX_IDAC_SIGN 0x14A8 +#define USB3_DP_QSERDES_RXA_RX_HIGHZ_HIGHRATE 0x14AC +#define USB3_DP_QSERDES_RXA_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET 0x14B0 +#define USB3_DP_QSERDES_RXA_DFE_1 0x14B4 +#define USB3_DP_QSERDES_RXA_DFE_2 0x14B8 +#define USB3_DP_QSERDES_RXA_DFE_3 0x14BC +#define USB3_DP_QSERDES_RXA_DFE_4 0x14C0 +#define USB3_DP_QSERDES_RXA_TX_ADAPT_PRE_THRESH1 0x14C4 +#define USB3_DP_QSERDES_RXA_TX_ADAPT_PRE_THRESH2 0x14C8 +#define USB3_DP_QSERDES_RXA_TX_ADAPT_POST_THRESH 0x14CC +#define USB3_DP_QSERDES_RXA_TX_ADAPT_MAIN_THRESH 0x14D0 +#define USB3_DP_QSERDES_RXA_VGA_CAL_CNTRL1 0x14D4 +#define USB3_DP_QSERDES_RXA_VGA_CAL_CNTRL2 0x14D8 +#define USB3_DP_QSERDES_RXA_GM_CAL 0x14DC +#define USB3_DP_QSERDES_RXA_RX_VGA_GAIN2_LSB 0x14E0 +#define USB3_DP_QSERDES_RXA_RX_VGA_GAIN2_MSB 0x14E4 +#define USB3_DP_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL1 0x14E8 +#define USB3_DP_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL2 0x14EC +#define USB3_DP_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL3 0x14F0 +#define USB3_DP_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL4 0x14F4 +#define USB3_DP_QSERDES_RXA_RX_IDAC_TSETTLE_LOW 0x14F8 +#define USB3_DP_QSERDES_RXA_RX_IDAC_TSETTLE_HIGH 0x14FC +#define USB3_DP_QSERDES_RXA_RX_IDAC_MEASURE_TIME 0x1500 +#define USB3_DP_QSERDES_RXA_RX_IDAC_ACCUMULATOR 0x1504 +#define USB3_DP_QSERDES_RXA_RX_EQ_OFFSET_LSB 0x1508 +#define USB3_DP_QSERDES_RXA_RX_EQ_OFFSET_MSB 0x150C +#define USB3_DP_QSERDES_RXA_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x1510 +#define USB3_DP_QSERDES_RXA_RX_OFFSET_ADAPTOR_CNTRL2 0x1514 +#define USB3_DP_QSERDES_RXA_SIGDET_ENABLES 0x1518 +#define USB3_DP_QSERDES_RXA_SIGDET_CNTRL 0x151C +#define USB3_DP_QSERDES_RXA_SIGDET_LVL 0x1520 +#define USB3_DP_QSERDES_RXA_SIGDET_DEGLITCH_CNTRL 0x1524 +#define USB3_DP_QSERDES_RXA_RX_BAND 0x1528 +#define USB3_DP_QSERDES_RXA_CDR_FREEZE_UP_DN 0x152C +#define USB3_DP_QSERDES_RXA_CDR_RESET_OVERRIDE 0x1530 +#define USB3_DP_QSERDES_RXA_RX_INTERFACE_MODE 0x1534 +#define USB3_DP_QSERDES_RXA_JITTER_GEN_MODE 0x1538 +#define USB3_DP_QSERDES_RXA_SJ_AMP1 0x153C +#define USB3_DP_QSERDES_RXA_SJ_AMP2 0x1540 +#define USB3_DP_QSERDES_RXA_SJ_PER1 0x1544 +#define USB3_DP_QSERDES_RXA_SJ_PER2 0x1548 +#define USB3_DP_QSERDES_RXA_PPM_OFFSET1 0x154C +#define USB3_DP_QSERDES_RXA_PPM_OFFSET2 0x1550 +#define USB3_DP_QSERDES_RXA_SIGN_PPM_PERIOD1 0x1554 +#define USB3_DP_QSERDES_RXA_SIGN_PPM_PERIOD2 0x1558 +#define USB3_DP_QSERDES_RXA_RX_PWM_ENABLE_AND_DATA 0x155C +#define USB3_DP_QSERDES_RXA_RX_PWM_GEAR1_TIMEOUT_COUNT 0x1560 +#define USB3_DP_QSERDES_RXA_RX_PWM_GEAR2_TIMEOUT_COUNT 0x1564 +#define USB3_DP_QSERDES_RXA_RX_PWM_GEAR3_TIMEOUT_COUNT 0x1568 +#define USB3_DP_QSERDES_RXA_RX_PWM_GEAR4_TIMEOUT_COUNT 0x156C +#define USB3_DP_QSERDES_RXA_RX_MODE_00_LOW 0x1570 +#define USB3_DP_QSERDES_RXA_RX_MODE_00_HIGH 0x1574 +#define USB3_DP_QSERDES_RXA_RX_MODE_00_HIGH2 0x1578 +#define USB3_DP_QSERDES_RXA_RX_MODE_00_HIGH3 0x157C +#define USB3_DP_QSERDES_RXA_RX_MODE_00_HIGH4 0x1580 +#define USB3_DP_QSERDES_RXA_RX_MODE_01_LOW 0x1584 +#define USB3_DP_QSERDES_RXA_RX_MODE_01_HIGH 0x1588 +#define USB3_DP_QSERDES_RXA_RX_MODE_01_HIGH2 0x158C +#define USB3_DP_QSERDES_RXA_RX_MODE_01_HIGH3 0x1590 +#define USB3_DP_QSERDES_RXA_RX_MODE_01_HIGH4 0x1594 +#define USB3_DP_QSERDES_RXA_RX_MODE_10_LOW 0x1598 +#define USB3_DP_QSERDES_RXA_RX_MODE_10_HIGH 0x159C +#define USB3_DP_QSERDES_RXA_RX_MODE_10_HIGH2 0x15A0 +#define USB3_DP_QSERDES_RXA_RX_MODE_10_HIGH3 0x15A4 +#define USB3_DP_QSERDES_RXA_RX_MODE_10_HIGH4 0x15A8 +#define USB3_DP_QSERDES_RXA_PHPRE_CTRL 0x15AC +#define USB3_DP_QSERDES_RXA_PHPRE_INITVAL 0x15B0 +#define USB3_DP_QSERDES_RXA_DFE_EN_TIMER 0x15B4 +#define USB3_DP_QSERDES_RXA_DFE_CTLE_POST_CAL_OFFSET 0x15B8 +#define USB3_DP_QSERDES_RXA_DCC_CTRL1 0x15BC +#define USB3_DP_QSERDES_RXA_DCC_CTRL2 0x15C0 +#define USB3_DP_QSERDES_RXA_VTH_CODE 0x15C4 +#define USB3_DP_QSERDES_RXA_VTH_MIN_THRESH 0x15C8 +#define USB3_DP_QSERDES_RXA_VTH_MAX_THRESH 0x15CC +#define USB3_DP_QSERDES_RXA_ALOG_OBSV_BUS_CTRL_1 0x15D0 +#define USB3_DP_QSERDES_RXA_PI_CTRL1 0x15D4 +#define USB3_DP_QSERDES_RXA_PI_CTRL2 0x15D8 +#define USB3_DP_QSERDES_RXA_PI_QUAD 0x15DC +#define USB3_DP_QSERDES_RXA_IDATA1 0x15E0 +#define USB3_DP_QSERDES_RXA_IDATA2 0x15E4 +#define USB3_DP_QSERDES_RXA_AUX_DATA1 0x15E8 +#define USB3_DP_QSERDES_RXA_AUX_DATA2 0x15EC +#define USB3_DP_QSERDES_RXA_AC_JTAG_OUTP 0x15F0 +#define USB3_DP_QSERDES_RXA_AC_JTAG_OUTN 0x15F4 +#define USB3_DP_QSERDES_RXA_RX_SIGDET 0x15F8 +#define USB3_DP_QSERDES_RXA_ALOG_OBSV_BUS_STATUS_1 0x15FC + +/* Module: USB3_DP_PHY_USB3_QSERDES_TXB_USB3_QSERDES_TXB_USB3_DP_QMP_TX */ +#define USB3_DP_QSERDES_TXB_BIST_MODE_LANENO 0x1600 +#define USB3_DP_QSERDES_TXB_BIST_INVERT 0x1604 +#define USB3_DP_QSERDES_TXB_CLKBUF_ENABLE 0x1608 +#define USB3_DP_QSERDES_TXB_TX_EMP_POST1_LVL 0x160C +#define USB3_DP_QSERDES_TXB_TX_IDLE_LVL_LARGE_AMP 0x1610 +#define USB3_DP_QSERDES_TXB_TX_DRV_LVL 0x1614 +#define USB3_DP_QSERDES_TXB_TX_DRV_LVL_OFFSET 0x1618 +#define USB3_DP_QSERDES_TXB_RESET_TSYNC_EN 0x161C +#define USB3_DP_QSERDES_TXB_PRE_STALL_LDO_BOOST_EN 0x1620 +#define USB3_DP_QSERDES_TXB_TX_BAND 0x1624 +#define USB3_DP_QSERDES_TXB_SLEW_CNTL 0x1628 +#define USB3_DP_QSERDES_TXB_INTERFACE_SELECT 0x162C +#define USB3_DP_QSERDES_TXB_LPB_EN 0x1630 +#define USB3_DP_QSERDES_TXB_RES_CODE_LANE_TX 0x1634 +#define USB3_DP_QSERDES_TXB_RES_CODE_LANE_RX 0x1638 +#define USB3_DP_QSERDES_TXB_RES_CODE_LANE_OFFSET_TX 0x163C +#define USB3_DP_QSERDES_TXB_RES_CODE_LANE_OFFSET_RX 0x1640 +#define USB3_DP_QSERDES_TXB_PERL_LENGTH1 0x1644 +#define USB3_DP_QSERDES_TXB_PERL_LENGTH2 0x1648 +#define USB3_DP_QSERDES_TXB_SERDES_BYP_EN_OUT 0x164C +#define USB3_DP_QSERDES_TXB_DEBUG_BUS_SEL 0x1650 +#define USB3_DP_QSERDES_TXB_TRANSCEIVER_BIAS_EN 0x1654 +#define USB3_DP_QSERDES_TXB_HIGHZ_DRVR_EN 0x1658 +#define USB3_DP_QSERDES_TXB_TX_POL_INV 0x165C +#define USB3_DP_QSERDES_TXB_PARRATE_REC_DETECT_IDLE_EN 0x1660 +#define USB3_DP_QSERDES_TXB_BIST_PATTERN1 0x1664 +#define USB3_DP_QSERDES_TXB_BIST_PATTERN2 0x1668 +#define USB3_DP_QSERDES_TXB_BIST_PATTERN3 0x166C +#define USB3_DP_QSERDES_TXB_BIST_PATTERN4 0x1670 +#define USB3_DP_QSERDES_TXB_BIST_PATTERN5 0x1674 +#define USB3_DP_QSERDES_TXB_BIST_PATTERN6 0x1678 +#define USB3_DP_QSERDES_TXB_BIST_PATTERN7 0x167C +#define USB3_DP_QSERDES_TXB_BIST_PATTERN8 0x1680 +#define USB3_DP_QSERDES_TXB_LANE_MODE_1 0x1684 +#define USB3_DP_QSERDES_TXB_LANE_MODE_2 0x1688 +#define USB3_DP_QSERDES_TXB_LANE_MODE_3 0x168C +#define USB3_DP_QSERDES_TXB_ATB_SEL1 0x1690 +#define USB3_DP_QSERDES_TXB_ATB_SEL2 0x1694 +#define USB3_DP_QSERDES_TXB_RCV_DETECT_LVL 0x1698 +#define USB3_DP_QSERDES_TXB_RCV_DETECT_LVL_2 0x169C +#define USB3_DP_QSERDES_TXB_PRBS_SEED1 0x16A0 +#define USB3_DP_QSERDES_TXB_PRBS_SEED2 0x16A4 +#define USB3_DP_QSERDES_TXB_PRBS_SEED3 0x16A8 +#define USB3_DP_QSERDES_TXB_PRBS_SEED4 0x16AC +#define USB3_DP_QSERDES_TXB_RESET_GEN 0x16B0 +#define USB3_DP_QSERDES_TXB_RESET_GEN_MUXES 0x16B4 +#define USB3_DP_QSERDES_TXB_TRAN_DRVR_EMP_EN 0x16B8 +#define USB3_DP_QSERDES_TXB_TX_INTERFACE_MODE 0x16BC +#define USB3_DP_QSERDES_TXB_PWM_CTRL 0x16C0 +#define USB3_DP_QSERDES_TXB_PWM_ENCODED_OR_DATA 0x16C4 +#define USB3_DP_QSERDES_TXB_PWM_GEAR_1_DIVIDER_BAND2 0x16C8 +#define USB3_DP_QSERDES_TXB_PWM_GEAR_2_DIVIDER_BAND2 0x16CC +#define USB3_DP_QSERDES_TXB_PWM_GEAR_3_DIVIDER_BAND2 0x16D0 +#define USB3_DP_QSERDES_TXB_PWM_GEAR_4_DIVIDER_BAND2 0x16D4 +#define USB3_DP_QSERDES_TXB_PWM_GEAR_1_DIVIDER_BAND0_1 0x16D8 +#define USB3_DP_QSERDES_TXB_PWM_GEAR_2_DIVIDER_BAND0_1 0x16DC +#define USB3_DP_QSERDES_TXB_PWM_GEAR_3_DIVIDER_BAND0_1 0x16E0 +#define USB3_DP_QSERDES_TXB_PWM_GEAR_4_DIVIDER_BAND0_1 0x16E4 +#define USB3_DP_QSERDES_TXB_VMODE_CTRL1 0x16E8 +#define USB3_DP_QSERDES_TXB_ALOG_OBSV_BUS_CTRL_1 0x16EC +#define USB3_DP_QSERDES_TXB_BIST_STATUS 0x16F0 +#define USB3_DP_QSERDES_TXB_BIST_ERROR_COUNT1 0x16F4 +#define USB3_DP_QSERDES_TXB_BIST_ERROR_COUNT2 0x16F8 +#define USB3_DP_QSERDES_TXB_ALOG_OBSV_BUS_STATUS_1 0x16FC +#define USB3_DP_QSERDES_TXB_LANE_DIG_CONFIG 0x1700 +#define USB3_DP_QSERDES_TXB_PI_QEC_CTRL 0x1704 +#define USB3_DP_QSERDES_TXB_PRE_EMPH 0x1708 +#define USB3_DP_QSERDES_TXB_SW_RESET 0x170C +#define USB3_DP_QSERDES_TXB_DCC_OFFSET 0x1710 +#define USB3_DP_QSERDES_TXB_DIG_BKUP_CTRL 0x1714 +#define USB3_DP_QSERDES_TXB_DEBUG_BUS0 0x1718 +#define USB3_DP_QSERDES_TXB_DEBUG_BUS1 0x171C +#define USB3_DP_QSERDES_TXB_DEBUG_BUS2 0x1720 +#define USB3_DP_QSERDES_TXB_DEBUG_BUS3 0x1724 +#define USB3_DP_QSERDES_TXB_READ_EQCODE 0x1728 +#define USB3_DP_QSERDES_TXB_READ_OFFSETCODE 0x172C +#define USB3_DP_QSERDES_TXB_IA_ERROR_COUNTER_LOW 0x1730 +#define USB3_DP_QSERDES_TXB_IA_ERROR_COUNTER_HIGH 0x1734 +#define USB3_DP_QSERDES_TXB_VGA_READ_CODE 0x1738 +#define USB3_DP_QSERDES_TXB_VTH_READ_CODE 0x173C +#define USB3_DP_QSERDES_TXB_DFE_TAP1_READ_CODE 0x1740 +#define USB3_DP_QSERDES_TXB_DFE_TAP2_READ_CODE 0x1744 +#define USB3_DP_QSERDES_TXB_IDAC_STATUS_I 0x1748 +#define USB3_DP_QSERDES_TXB_IDAC_STATUS_IBAR 0x174C +#define USB3_DP_QSERDES_TXB_IDAC_STATUS_Q 0x1750 +#define USB3_DP_QSERDES_TXB_IDAC_STATUS_QBAR 0x1754 +#define USB3_DP_QSERDES_TXB_IDAC_STATUS_A 0x1758 +#define USB3_DP_QSERDES_TXB_IDAC_STATUS_ABAR 0x175C +#define USB3_DP_QSERDES_TXB_IDAC_STATUS_SM_ON 0x1760 +#define USB3_DP_QSERDES_TXB_IDAC_STATUS_CAL_DONE 0x1764 +#define USB3_DP_QSERDES_TXB_IDAC_STATUS_SIGNERROR 0x1768 +#define USB3_DP_QSERDES_TXB_DCC_CAL_STATUS 0x176C + +/* Module: USB3_DP_PHY_USB3_QSERDES_RXB_USB3_QSERDES_RXB_USB3_DP_QMP_RX */ +#define USB3_DP_QSERDES_RXB_UCDR_FO_GAIN_HALF 0x1800 +#define USB3_DP_QSERDES_RXB_UCDR_FO_GAIN_QUARTER 0x1804 +#define USB3_DP_QSERDES_RXB_UCDR_FO_GAIN 0x1808 +#define USB3_DP_QSERDES_RXB_UCDR_SO_GAIN_HALF 0x180C +#define USB3_DP_QSERDES_RXB_UCDR_SO_GAIN_QUARTER 0x1810 +#define USB3_DP_QSERDES_RXB_UCDR_SO_GAIN 0x1814 +#define USB3_DP_QSERDES_RXB_UCDR_SVS_FO_GAIN_HALF 0x1818 +#define USB3_DP_QSERDES_RXB_UCDR_SVS_FO_GAIN_QUARTER 0x181C +#define USB3_DP_QSERDES_RXB_UCDR_SVS_FO_GAIN 0x1820 +#define USB3_DP_QSERDES_RXB_UCDR_SVS_SO_GAIN_HALF 0x1824 +#define USB3_DP_QSERDES_RXB_UCDR_SVS_SO_GAIN_QUARTER 0x1828 +#define USB3_DP_QSERDES_RXB_UCDR_SVS_SO_GAIN 0x182C +#define USB3_DP_QSERDES_RXB_UCDR_FASTLOCK_FO_GAIN 0x1830 +#define USB3_DP_QSERDES_RXB_UCDR_SO_SATURATION_AND_ENABLE 0x1834 +#define USB3_DP_QSERDES_RXB_UCDR_FO_TO_SO_DELAY 0x1838 +#define USB3_DP_QSERDES_RXB_UCDR_FASTLOCK_COUNT_LOW 0x183C +#define USB3_DP_QSERDES_RXB_UCDR_FASTLOCK_COUNT_HIGH 0x1840 +#define USB3_DP_QSERDES_RXB_UCDR_PI_CONTROLS 0x1844 +#define USB3_DP_QSERDES_RXB_UCDR_PI_CTRL2 0x1848 +#define USB3_DP_QSERDES_RXB_UCDR_SB2_THRESH1 0x184C +#define USB3_DP_QSERDES_RXB_UCDR_SB2_THRESH2 0x1850 +#define USB3_DP_QSERDES_RXB_UCDR_SB2_GAIN1 0x1854 +#define USB3_DP_QSERDES_RXB_UCDR_SB2_GAIN2 0x1858 +#define USB3_DP_QSERDES_RXB_AUX_CONTROL 0x185C +#define USB3_DP_QSERDES_RXB_AUX_DATA_TCOARSE_TFINE 0x1860 +#define USB3_DP_QSERDES_RXB_RCLK_AUXDATA_SEL 0x1864 +#define USB3_DP_QSERDES_RXB_AC_JTAG_ENABLE 0x1868 +#define USB3_DP_QSERDES_RXB_AC_JTAG_INITP 0x186C +#define USB3_DP_QSERDES_RXB_AC_JTAG_INITN 0x1870 +#define USB3_DP_QSERDES_RXB_AC_JTAG_LVL 0x1874 +#define USB3_DP_QSERDES_RXB_AC_JTAG_MODE 0x1878 +#define USB3_DP_QSERDES_RXB_AC_JTAG_RESET 0x187C +#define USB3_DP_QSERDES_RXB_RX_TERM_BW 0x1880 +#define USB3_DP_QSERDES_RXB_RX_RCVR_IQ_EN 0x1884 +#define USB3_DP_QSERDES_RXB_RX_IDAC_I_DC_OFFSETS 0x1888 +#define USB3_DP_QSERDES_RXB_RX_IDAC_IBAR_DC_OFFSETS 0x188C +#define USB3_DP_QSERDES_RXB_RX_IDAC_Q_DC_OFFSETS 0x1890 +#define USB3_DP_QSERDES_RXB_RX_IDAC_QBAR_DC_OFFSETS 0x1894 +#define USB3_DP_QSERDES_RXB_RX_IDAC_A_DC_OFFSETS 0x1898 +#define USB3_DP_QSERDES_RXB_RX_IDAC_ABAR_DC_OFFSETS 0x189C +#define USB3_DP_QSERDES_RXB_RX_IDAC_EN 0x18A0 +#define USB3_DP_QSERDES_RXB_RX_IDAC_ENABLES 0x18A4 +#define USB3_DP_QSERDES_RXB_RX_IDAC_SIGN 0x18A8 +#define USB3_DP_QSERDES_RXB_RX_HIGHZ_HIGHRATE 0x18AC +#define USB3_DP_QSERDES_RXB_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET 0x18B0 +#define USB3_DP_QSERDES_RXB_DFE_1 0x18B4 +#define USB3_DP_QSERDES_RXB_DFE_2 0x18B8 +#define USB3_DP_QSERDES_RXB_DFE_3 0x18BC +#define USB3_DP_QSERDES_RXB_DFE_4 0x18C0 +#define USB3_DP_QSERDES_RXB_TX_ADAPT_PRE_THRESH1 0x18C4 +#define USB3_DP_QSERDES_RXB_TX_ADAPT_PRE_THRESH2 0x18C8 +#define USB3_DP_QSERDES_RXB_TX_ADAPT_POST_THRESH 0x18CC +#define USB3_DP_QSERDES_RXB_TX_ADAPT_MAIN_THRESH 0x18D0 +#define USB3_DP_QSERDES_RXB_VGA_CAL_CNTRL1 0x18D4 +#define USB3_DP_QSERDES_RXB_VGA_CAL_CNTRL2 0x18D8 +#define USB3_DP_QSERDES_RXB_GM_CAL 0x18DC +#define USB3_DP_QSERDES_RXB_RX_VGA_GAIN2_LSB 0x18E0 +#define USB3_DP_QSERDES_RXB_RX_VGA_GAIN2_MSB 0x18E4 +#define USB3_DP_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL1 0x18E8 +#define USB3_DP_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL2 0x18EC +#define USB3_DP_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL3 0x18F0 +#define USB3_DP_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL4 0x18F4 +#define USB3_DP_QSERDES_RXB_RX_IDAC_TSETTLE_LOW 0x18F8 +#define USB3_DP_QSERDES_RXB_RX_IDAC_TSETTLE_HIGH 0x18FC +#define USB3_DP_QSERDES_RXB_RX_IDAC_MEASURE_TIME 0x1900 +#define USB3_DP_QSERDES_RXB_RX_IDAC_ACCUMULATOR 0x1904 +#define USB3_DP_QSERDES_RXB_RX_EQ_OFFSET_LSB 0x1908 +#define USB3_DP_QSERDES_RXB_RX_EQ_OFFSET_MSB 0x190C +#define USB3_DP_QSERDES_RXB_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x1910 +#define USB3_DP_QSERDES_RXB_RX_OFFSET_ADAPTOR_CNTRL2 0x1914 +#define USB3_DP_QSERDES_RXB_SIGDET_ENABLES 0x1918 +#define USB3_DP_QSERDES_RXB_SIGDET_CNTRL 0x191C +#define USB3_DP_QSERDES_RXB_SIGDET_LVL 0x1920 +#define USB3_DP_QSERDES_RXB_SIGDET_DEGLITCH_CNTRL 0x1924 +#define USB3_DP_QSERDES_RXB_RX_BAND 0x1928 +#define USB3_DP_QSERDES_RXB_CDR_FREEZE_UP_DN 0x192C +#define USB3_DP_QSERDES_RXB_CDR_RESET_OVERRIDE 0x1930 +#define USB3_DP_QSERDES_RXB_RX_INTERFACE_MODE 0x1934 +#define USB3_DP_QSERDES_RXB_JITTER_GEN_MODE 0x1938 +#define USB3_DP_QSERDES_RXB_SJ_AMP1 0x193C +#define USB3_DP_QSERDES_RXB_SJ_AMP2 0x1940 +#define USB3_DP_QSERDES_RXB_SJ_PER1 0x1944 +#define USB3_DP_QSERDES_RXB_SJ_PER2 0x1948 +#define USB3_DP_QSERDES_RXB_PPM_OFFSET1 0x194C +#define USB3_DP_QSERDES_RXB_PPM_OFFSET2 0x1950 +#define USB3_DP_QSERDES_RXB_SIGN_PPM_PERIOD1 0x1954 +#define USB3_DP_QSERDES_RXB_SIGN_PPM_PERIOD2 0x1958 +#define USB3_DP_QSERDES_RXB_RX_PWM_ENABLE_AND_DATA 0x195C +#define USB3_DP_QSERDES_RXB_RX_PWM_GEAR1_TIMEOUT_COUNT 0x1960 +#define USB3_DP_QSERDES_RXB_RX_PWM_GEAR2_TIMEOUT_COUNT 0x1964 +#define USB3_DP_QSERDES_RXB_RX_PWM_GEAR3_TIMEOUT_COUNT 0x1968 +#define USB3_DP_QSERDES_RXB_RX_PWM_GEAR4_TIMEOUT_COUNT 0x196C +#define USB3_DP_QSERDES_RXB_RX_MODE_00_LOW 0x1970 +#define USB3_DP_QSERDES_RXB_RX_MODE_00_HIGH 0x1974 +#define USB3_DP_QSERDES_RXB_RX_MODE_00_HIGH2 0x1978 +#define USB3_DP_QSERDES_RXB_RX_MODE_00_HIGH3 0x197C +#define USB3_DP_QSERDES_RXB_RX_MODE_00_HIGH4 0x1980 +#define USB3_DP_QSERDES_RXB_RX_MODE_01_LOW 0x1984 +#define USB3_DP_QSERDES_RXB_RX_MODE_01_HIGH 0x1988 +#define USB3_DP_QSERDES_RXB_RX_MODE_01_HIGH2 0x198C +#define USB3_DP_QSERDES_RXB_RX_MODE_01_HIGH3 0x1990 +#define USB3_DP_QSERDES_RXB_RX_MODE_01_HIGH4 0x1994 +#define USB3_DP_QSERDES_RXB_RX_MODE_10_LOW 0x1998 +#define USB3_DP_QSERDES_RXB_RX_MODE_10_HIGH 0x199C +#define USB3_DP_QSERDES_RXB_RX_MODE_10_HIGH2 0x19A0 +#define USB3_DP_QSERDES_RXB_RX_MODE_10_HIGH3 0x19A4 +#define USB3_DP_QSERDES_RXB_RX_MODE_10_HIGH4 0x19A8 +#define USB3_DP_QSERDES_RXB_PHPRE_CTRL 0x19AC +#define USB3_DP_QSERDES_RXB_PHPRE_INITVAL 0x19B0 +#define USB3_DP_QSERDES_RXB_DFE_EN_TIMER 0x19B4 +#define USB3_DP_QSERDES_RXB_DFE_CTLE_POST_CAL_OFFSET 0x19B8 +#define USB3_DP_QSERDES_RXB_DCC_CTRL1 0x19BC +#define USB3_DP_QSERDES_RXB_DCC_CTRL2 0x19C0 +#define USB3_DP_QSERDES_RXB_VTH_CODE 0x19C4 +#define USB3_DP_QSERDES_RXB_VTH_MIN_THRESH 0x19C8 +#define USB3_DP_QSERDES_RXB_VTH_MAX_THRESH 0x19CC +#define USB3_DP_QSERDES_RXB_ALOG_OBSV_BUS_CTRL_1 0x19D0 +#define USB3_DP_QSERDES_RXB_PI_CTRL1 0x19D4 +#define USB3_DP_QSERDES_RXB_PI_CTRL2 0x19D8 +#define USB3_DP_QSERDES_RXB_PI_QUAD 0x19DC +#define USB3_DP_QSERDES_RXB_IDATA1 0x19E0 +#define USB3_DP_QSERDES_RXB_IDATA2 0x19E4 +#define USB3_DP_QSERDES_RXB_AUX_DATA1 0x19E8 +#define USB3_DP_QSERDES_RXB_AUX_DATA2 0x19EC +#define USB3_DP_QSERDES_RXB_AC_JTAG_OUTP 0x19F0 +#define USB3_DP_QSERDES_RXB_AC_JTAG_OUTN 0x19F4 +#define USB3_DP_QSERDES_RXB_RX_SIGDET 0x19F8 +#define USB3_DP_QSERDES_RXB_ALOG_OBSV_BUS_STATUS_1 0x19FC + +/* Module: USB3_DP_PHY_USB3_PCS_MISC_USB3_PCS_MISC_USB3_PCS_MISC */ +#define USB3_DP_PCS_MISC_TYPEC_CTRL 0x1A00 +#define USB3_DP_PCS_MISC_TYPEC_PWRDN_CTRL 0x1A04 +#define USB3_DP_PCS_MISC_PCS_MISC_CONFIG1 0x1A08 +#define USB3_DP_PCS_MISC_CLAMP_ENABLE 0x1A0C +#define USB3_DP_PCS_MISC_TYPEC_STATUS 0x1A10 +#define USB3_DP_PCS_MISC_PLACEHOLDER_STATUS 0x1A14 + +/* Module: USB3_DP_PHY_USB3_PCS_LN_USB3_PCS_LN_USB3_PCS_LANE */ +#define USB3_DP_PCS_LN_PCS_STATUS1 0x1B00 +#define USB3_DP_PCS_LN_PCS_STATUS2 0x1B04 +#define USB3_DP_PCS_LN_PCS_STATUS2_CLEAR 0x1B08 +#define USB3_DP_PCS_LN_PCS_STATUS3 0x1B0C +#define USB3_DP_PCS_LN_BIST_CHK_ERR_CNT_L_STATUS 0x1B10 +#define USB3_DP_PCS_LN_BIST_CHK_ERR_CNT_H_STATUS 0x1B14 +#define USB3_DP_PCS_LN_BIST_CHK_STATUS 0x1B18 +#define USB3_DP_PCS_LN_INSIG_SW_CTRL1 0x1B1C +#define USB3_DP_PCS_LN_INSIG_MX_CTRL1 0x1B20 +#define USB3_DP_PCS_LN_OUTSIG_SW_CTRL1 0x1B24 +#define USB3_DP_PCS_LN_OUTSIG_MX_CTRL1 0x1B28 +#define USB3_DP_PCS_LN_TEST_CONTROL 0x1B2C +#define USB3_DP_PCS_LN_BIST_CTRL 0x1B30 +#define USB3_DP_PCS_LN_PRBS_SEED0 0x1B34 +#define USB3_DP_PCS_LN_PRBS_SEED1 0x1B38 +#define USB3_DP_PCS_LN_FIXED_PAT_CTRL 0x1B3C +#define USB3_DP_PCS_LN_EQ_CONFIG 0x1B40 + +/* Module: USB3_DP_PHY_USB3_PCS_USB3_PCS_USB3_PCS */ +#define USB3_DP_PCS_SW_RESET 0x1C00 +#define USB3_DP_PCS_REVISION_ID0 0x1C04 +#define USB3_DP_PCS_REVISION_ID1 0x1C08 +#define USB3_DP_PCS_REVISION_ID2 0x1C0C +#define USB3_DP_PCS_REVISION_ID3 0x1C10 +#define USB3_DP_PCS_PCS_STATUS1 0x1C14 +#define USB3_DP_PCS_PCS_STATUS2 0x1C18 +#define USB3_DP_PCS_PCS_STATUS3 0x1C1C +#define USB3_DP_PCS_PCS_STATUS4 0x1C20 +#define USB3_DP_PCS_PCS_STATUS5 0x1C24 +#define USB3_DP_PCS_PCS_STATUS6 0x1C28 +#define USB3_DP_PCS_PCS_STATUS7 0x1C2C +#define USB3_DP_PCS_DEBUG_BUS_0_STATUS 0x1C30 +#define USB3_DP_PCS_DEBUG_BUS_1_STATUS 0x1C34 +#define USB3_DP_PCS_DEBUG_BUS_2_STATUS 0x1C38 +#define USB3_DP_PCS_DEBUG_BUS_3_STATUS 0x1C3C +#define USB3_DP_PCS_POWER_DOWN_CONTROL 0x1C40 +#define USB3_DP_PCS_START_CONTROL 0x1C44 +#define USB3_DP_PCS_INSIG_SW_CTRL1 0x1C48 +#define USB3_DP_PCS_INSIG_SW_CTRL2 0x1C4C +#define USB3_DP_PCS_INSIG_SW_CTRL3 0x1C50 +#define USB3_DP_PCS_INSIG_SW_CTRL4 0x1C54 +#define USB3_DP_PCS_INSIG_SW_CTRL5 0x1C58 +#define USB3_DP_PCS_INSIG_SW_CTRL6 0x1C5C +#define USB3_DP_PCS_INSIG_SW_CTRL7 0x1C60 +#define USB3_DP_PCS_INSIG_SW_CTRL8 0x1C64 +#define USB3_DP_PCS_INSIG_MX_CTRL1 0x1C68 +#define USB3_DP_PCS_INSIG_MX_CTRL2 0x1C6C +#define USB3_DP_PCS_INSIG_MX_CTRL3 0x1C70 +#define USB3_DP_PCS_INSIG_MX_CTRL4 0x1C74 +#define USB3_DP_PCS_INSIG_MX_CTRL5 0x1C78 +#define USB3_DP_PCS_INSIG_MX_CTRL7 0x1C7C +#define USB3_DP_PCS_INSIG_MX_CTRL8 0x1C80 +#define USB3_DP_PCS_OUTSIG_SW_CTRL1 0x1C84 +#define USB3_DP_PCS_OUTSIG_MX_CTRL1 0x1C88 +#define USB3_DP_PCS_CLAMP_ENABLE 0x1C8C +#define USB3_DP_PCS_POWER_STATE_CONFIG1 0x1C90 +#define USB3_DP_PCS_POWER_STATE_CONFIG2 0x1C94 +#define USB3_DP_PCS_FLL_CNTRL1 0x1C98 +#define USB3_DP_PCS_FLL_CNTRL2 0x1C9C +#define USB3_DP_PCS_FLL_CNT_VAL_L 0x1CA0 +#define USB3_DP_PCS_FLL_CNT_VAL_H_TOL 0x1CA4 +#define USB3_DP_PCS_FLL_MAN_CODE 0x1CA8 +#define USB3_DP_PCS_TEST_CONTROL1 0x1CAC +#define USB3_DP_PCS_TEST_CONTROL2 0x1CB0 +#define USB3_DP_PCS_TEST_CONTROL3 0x1CB4 +#define USB3_DP_PCS_TEST_CONTROL4 0x1CB8 +#define USB3_DP_PCS_TEST_CONTROL5 0x1CBC +#define USB3_DP_PCS_TEST_CONTROL6 0x1CC0 +#define USB3_DP_PCS_LOCK_DETECT_CONFIG1 0x1CC4 +#define USB3_DP_PCS_LOCK_DETECT_CONFIG2 0x1CC8 +#define USB3_DP_PCS_LOCK_DETECT_CONFIG3 0x1CCC +#define USB3_DP_PCS_LOCK_DETECT_CONFIG4 0x1CD0 +#define USB3_DP_PCS_LOCK_DETECT_CONFIG5 0x1CD4 +#define USB3_DP_PCS_LOCK_DETECT_CONFIG6 0x1CD8 +#define USB3_DP_PCS_REFGEN_REQ_CONFIG1 0x1CDC +#define USB3_DP_PCS_REFGEN_REQ_CONFIG2 0x1CE0 +#define USB3_DP_PCS_REFGEN_REQ_CONFIG3 0x1CE4 +#define USB3_DP_PCS_BIST_CTRL 0x1CE8 +#define USB3_DP_PCS_PRBS_POLY0 0x1CEC +#define USB3_DP_PCS_PRBS_POLY1 0x1CF0 +#define USB3_DP_PCS_FIXED_PAT0 0x1CF4 +#define USB3_DP_PCS_FIXED_PAT1 0x1CF8 +#define USB3_DP_PCS_FIXED_PAT2 0x1CFC +#define USB3_DP_PCS_FIXED_PAT3 0x1D00 +#define USB3_DP_PCS_FIXED_PAT4 0x1D04 +#define USB3_DP_PCS_FIXED_PAT5 0x1D08 +#define USB3_DP_PCS_FIXED_PAT6 0x1D0C +#define USB3_DP_PCS_FIXED_PAT7 0x1D10 +#define USB3_DP_PCS_FIXED_PAT8 0x1D14 +#define USB3_DP_PCS_FIXED_PAT9 0x1D18 +#define USB3_DP_PCS_FIXED_PAT10 0x1D1C +#define USB3_DP_PCS_FIXED_PAT11 0x1D20 +#define USB3_DP_PCS_FIXED_PAT12 0x1D24 +#define USB3_DP_PCS_FIXED_PAT13 0x1D28 +#define USB3_DP_PCS_FIXED_PAT14 0x1D2C +#define USB3_DP_PCS_FIXED_PAT15 0x1D30 +#define USB3_DP_PCS_TXMGN_CONFIG 0x1D34 +#define USB3_DP_PCS_G12S1_TXMGN_V0 0x1D38 +#define USB3_DP_PCS_G12S1_TXMGN_V1 0x1D3C +#define USB3_DP_PCS_G12S1_TXMGN_V2 0x1D40 +#define USB3_DP_PCS_G12S1_TXMGN_V3 0x1D44 +#define USB3_DP_PCS_G12S1_TXMGN_V4 0x1D48 +#define USB3_DP_PCS_G12S1_TXMGN_V0_RS 0x1D4C +#define USB3_DP_PCS_G12S1_TXMGN_V1_RS 0x1D50 +#define USB3_DP_PCS_G12S1_TXMGN_V2_RS 0x1D54 +#define USB3_DP_PCS_G12S1_TXMGN_V3_RS 0x1D58 +#define USB3_DP_PCS_G12S1_TXMGN_V4_RS 0x1D5C +#define USB3_DP_PCS_G3S2_TXMGN_MAIN 0x1D60 +#define USB3_DP_PCS_G3S2_TXMGN_MAIN_RS 0x1D64 +#define USB3_DP_PCS_G12S1_TXDEEMPH_M6DB 0x1D68 +#define USB3_DP_PCS_G12S1_TXDEEMPH_M3P5DB 0x1D6C +#define USB3_DP_PCS_G3S2_PRE_GAIN 0x1D70 +#define USB3_DP_PCS_G3S2_POST_GAIN 0x1D74 +#define USB3_DP_PCS_G3S2_PRE_POST_OFFSET 0x1D78 +#define USB3_DP_PCS_G3S2_PRE_GAIN_RS 0x1D7C +#define USB3_DP_PCS_G3S2_POST_GAIN_RS 0x1D80 +#define USB3_DP_PCS_G3S2_PRE_POST_OFFSET_RS 0x1D84 +#define USB3_DP_PCS_RX_SIGDET_LVL 0x1D88 +#define USB3_DP_PCS_RX_SIGDET_DTCT_CNTRL 0x1D8C +#define USB3_DP_PCS_RCVR_DTCT_DLY_P1U2_L 0x1D90 +#define USB3_DP_PCS_RCVR_DTCT_DLY_P1U2_H 0x1D94 +#define USB3_DP_PCS_RATE_SLEW_CNTRL1 0x1D98 +#define USB3_DP_PCS_RATE_SLEW_CNTRL2 0x1D9C +#define USB3_DP_PCS_PWRUP_RESET_DLY_TIME_AUXCLK 0x1DA0 +#define USB3_DP_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L 0x1DA4 +#define USB3_DP_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H 0x1DA8 +#define USB3_DP_PCS_TSYNC_RSYNC_TIME 0x1DAC +#define USB3_DP_PCS_CDR_RESET_TIME 0x1DB0 +#define USB3_DP_PCS_TSYNC_DLY_TIME 0x1DB4 +#define USB3_DP_PCS_ELECIDLE_DLY_SEL 0x1DB8 +#define USB3_DP_PCS_CMN_ACK_OUT_SEL 0x1DBC +#define USB3_DP_PCS_ALIGN_DETECT_CONFIG1 0x1DC0 +#define USB3_DP_PCS_ALIGN_DETECT_CONFIG2 0x1DC4 +#define USB3_DP_PCS_ALIGN_DETECT_CONFIG3 0x1DC8 +#define USB3_DP_PCS_ALIGN_DETECT_CONFIG4 0x1DCC +#define USB3_DP_PCS_PCS_TX_RX_CONFIG 0x1DD0 +#define USB3_DP_PCS_RX_IDLE_DTCT_CNTRL 0x1DD4 +#define USB3_DP_PCS_RX_DCC_CAL_CONFIG 0x1DD8 +#define USB3_DP_PCS_EQ_CONFIG1 0x1DDC +#define USB3_DP_PCS_EQ_CONFIG2 0x1DE0 +#define USB3_DP_PCS_EQ_CONFIG3 0x1DE4 +#define USB3_DP_PCS_EQ_CONFIG4 0x1DE8 +#define USB3_DP_PCS_EQ_CONFIG5 0x1DEC + +/* Module: USB3_DP_PHY_USB3_PCS_USB3_USB3_PCS_USB3_USB3_PCS_USB3 */ +#define USB3_DP_PCS_USB3_POWER_STATE_CONFIG1 0x1F00 +#define USB3_DP_PCS_USB3_AUTONOMOUS_MODE_STATUS 0x1F04 +#define USB3_DP_PCS_USB3_AUTONOMOUS_MODE_CTRL 0x1F08 +#define USB3_DP_PCS_USB3_AUTONOMOUS_MODE_CTRL2 0x1F0C +#define USB3_DP_PCS_USB3_LFPS_RXTERM_IRQ_SOURCE_STATUS 0x1F10 +#define USB3_DP_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR 0x1F14 +#define USB3_DP_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x1F18 +#define USB3_DP_PCS_USB3_LFPS_TX_ECSTART 0x1F1C +#define USB3_DP_PCS_USB3_LFPS_PER_TIMER_VAL 0x1F20 +#define USB3_DP_PCS_USB3_LFPS_TX_END_CNT_U3_START 0x1F24 +#define USB3_DP_PCS_USB3_RXEQTRAINING_LOCK_TIME 0x1F28 +#define USB3_DP_PCS_USB3_RXEQTRAINING_WAIT_TIME 0x1F2C +#define USB3_DP_PCS_USB3_RXEQTRAINING_CTLE_TIME 0x1F30 +#define USB3_DP_PCS_USB3_RXEQTRAINING_WAIT_TIME_S2 0x1F34 +#define USB3_DP_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x1F38 +#define USB3_DP_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x1F3C +#define USB3_DP_PCS_USB3_RCVR_DTCT_DLY_U3_H 0x1F40 +#define USB3_DP_PCS_USB3_ARCVR_DTCT_EN_PERIOD 0x1F44 +#define USB3_DP_PCS_USB3_ARCVR_DTCT_CM_DLY 0x1F48 +#define USB3_DP_PCS_USB3_TXONESZEROS_RUN_LENGTH 0x1F4C +#define USB3_DP_PCS_USB3_ALFPS_DEGLITCH_VAL 0x1F50 +#define USB3_DP_PCS_USB3_SIGDET_STARTUP_TIMER_VAL 0x1F54 +#define USB3_DP_PCS_USB3_TEST_CONTROL 0x1F58 +#define USB3_DP_PCS_RXTERMINATION_DLY_SEL 0x1F5C + +#endif /* _DT_BINDINGS_PHY_QCOM_KHAJE_QMP_USB_H */ diff --git a/include/dt-bindings/sound/qcom,gpr.h b/include/dt-bindings/sound/qcom,gpr.h new file mode 100644 index 000000000000..c61a56886b02 --- /dev/null +++ b/include/dt-bindings/sound/qcom,gpr.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2021, The Linux Foundation. All rights reserved. */ +#ifndef __DT_BINDINGS_QCOM_GPR_H +#define __DT_BINDINGS_QCOM_GPR_H + +/* Domain IDs */ +#define GPR_DOMAIN_SIM 0x0 +#define GPR_DOMAIN_MODEM 0x1 +#define GPR_DOMAIN_ADSP 0x2 +#define GPR_DOMAIN_APPS 0x3 +#define GPR_DOMAIN_SDSP 0x4 +#define GPR_DOMAIN_CDSP 0x5 +#define GPR_DOMAIN_MAX 0x6 + +/* ADSP service IDs */ +#define GPR_SVC_ADSP_CORE 0x3 +#define GPR_SVC_AFE 0x4 +#define GPR_SVC_VSM 0x5 +#define GPR_SVC_VPM 0x6 +#define GPR_SVC_ASM 0x7 +#define GPR_SVC_ADM 0x8 +#define GPR_SVC_ADSP_MVM 0x09 +#define GPR_SVC_ADSP_CVS 0x0A +#define GPR_SVC_ADSP_CVP 0x0B +#define GPR_SVC_USM 0x0C +#define GPR_SVC_LSM 0x0D +#define GPR_SVC_VIDC 0x16 +#define GPR_SVC_MAX 0x17 + +#endif /* __DT_BINDINGS_QCOM_GPR_H */ diff --git a/include/linux/batterydata-interface.h b/include/linux/batterydata-interface.h new file mode 100644 index 000000000000..aa3acd268ddd --- /dev/null +++ b/include/linux/batterydata-interface.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2014-2015, 2018, 2021, The Linux Foundation. All rights reserved. + */ + +#include + +int config_battery_data(struct bms_battery_data *profile); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 26f0a62c303c..96f3ff01778f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -128,6 +128,8 @@ typedef __u32 __bitwise req_flags_t; #define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) /* ->timeout has been called, don't expire again */ #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) +/* increased nr_pending for this request */ +#define RQF_PM_ADDED ((__force req_flags_t)(1 << 22)) /* flags that prevent us from merging requests: */ #define RQF_NOMERGE_FLAGS \ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 9f511b3e769b..57ce109f039e 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -99,6 +99,7 @@ enum { CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ + CFTYPE_PRESSURE = (1 << 6), /* only if pressure feature is enabled */ /* internal flags, do not use outside cgroup core proper */ __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 11003a115508..0d9f2148cb3e 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -667,6 +667,8 @@ static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) return &cgrp->psi; } +bool cgroup_psi_enabled(void); + static inline void cgroup_init_kthreadd(void) { /* @@ -731,6 +733,11 @@ static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) return NULL; } +static inline bool cgroup_psi_enabled(void) +{ + return false; +} + static inline bool task_under_cgroup_hierarchy(struct task_struct *task, struct cgroup *ancestor) { diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h index babac5692398..39f2b9f7f39e 100644 --- a/include/linux/diagchar.h +++ b/include/linux/diagchar.h @@ -142,8 +142,8 @@ /* This needs to be modified manually now, when we add * a new RANGE of SSIDs to the msg_mask_tbl. */ -#define MSG_MASK_TBL_CNT 26 -#define APPS_EVENT_LAST_ID 0xCED +#define MSG_MASK_TBL_CNT 27 +#define APPS_EVENT_LAST_ID 0xCFE #define MSG_SSID_0 0 #define MSG_SSID_0_LAST 134 @@ -195,8 +195,10 @@ #define MSG_SSID_23_LAST 10416 #define MSG_SSID_24 10500 #define MSG_SSID_24_LAST 10505 -#define MSG_SSID_25 0xC000 -#define MSG_SSID_25_LAST 0xC063 +#define MSG_SSID_25 10600 +#define MSG_SSID_25_LAST 10620 +#define MSG_SSID_26 0xC000 +#define MSG_SSID_26_LAST 0xC063 static const uint32_t msg_bld_masks_0[] = { MSG_LVL_LOW, @@ -916,13 +918,39 @@ static const uint32_t msg_bld_masks_24[] = { }; static const uint32_t msg_bld_masks_25[] = { + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | + MSG_MASK_8 | MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11, + MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | + MSG_MASK_8 | MSG_MASK_9 | MSG_MASK_10, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW, + MSG_LVL_LOW +}; + +static const uint32_t msg_bld_masks_26[] = { MSG_LVL_LOW }; /* LOG CODES */ static const uint32_t log_code_last_tbl[] = { 0x0, /* EQUIP ID 0 */ - 0x1D1E, /* EQUIP ID 1 */ + 0x1D86, /* EQUIP ID 1 */ 0x0, /* EQUIP ID 2 */ 0x0, /* EQUIP ID 3 */ 0x4910, /* EQUIP ID 4 */ diff --git a/include/linux/leds.h b/include/linux/leds.h index fc0ce6c21ab8..8813f77f7f95 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -37,6 +37,7 @@ struct led_classdev { const char *name; enum led_brightness brightness; enum led_brightness max_brightness; + enum led_brightness usr_brightness_req; int flags; /* Lower 16 bits reflect status */ diff --git a/include/linux/mm.h b/include/linux/mm.h index d7cca734feb5..e1eded55e23d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1498,22 +1498,13 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, #ifdef CONFIG_SPECULATIVE_PAGE_FAULT static inline void vm_write_begin(struct vm_area_struct *vma) { - write_seqcount_begin(&vma->vm_sequence); -} -static inline void vm_write_begin_nested(struct vm_area_struct *vma, - int subclass) -{ - write_seqcount_begin_nested(&vma->vm_sequence, subclass); -} -static inline void vm_write_end(struct vm_area_struct *vma) -{ - write_seqcount_end(&vma->vm_sequence); -} -static inline void vm_raw_write_begin(struct vm_area_struct *vma) -{ + /* + * The reads never spins and preemption + * disablement is not required. + */ raw_write_seqcount_begin(&vma->vm_sequence); } -static inline void vm_raw_write_end(struct vm_area_struct *vma) +static inline void vm_write_end(struct vm_area_struct *vma) { raw_write_seqcount_end(&vma->vm_sequence); } @@ -1521,19 +1512,9 @@ static inline void vm_raw_write_end(struct vm_area_struct *vma) static inline void vm_write_begin(struct vm_area_struct *vma) { } -static inline void vm_write_begin_nested(struct vm_area_struct *vma, - int subclass) -{ -} static inline void vm_write_end(struct vm_area_struct *vma) { } -static inline void vm_raw_write_begin(struct vm_area_struct *vma) -{ -} -static inline void vm_raw_write_end(struct vm_area_struct *vma) -{ -} #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ extern void truncate_pagecache(struct inode *inode, loff_t new); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 58952fa01f1c..83b667fd5704 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1225,11 +1225,6 @@ int perf_event_max_stack_handler(struct ctl_table *table, int write, #define PERF_SECURITY_KERNEL 2 #define PERF_SECURITY_TRACEPOINT 3 -static inline bool perf_paranoid_any(void) -{ - return sysctl_perf_event_paranoid > 2; -} - static inline int perf_is_paranoid(void) { return sysctl_perf_event_paranoid > -1; diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h new file mode 100644 index 000000000000..0d03c6c7c7d7 --- /dev/null +++ b/include/linux/qpnp/qpnp-adc.h @@ -0,0 +1,970 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2012-2018, 2020-2021, The Linux Foundation. All rights reserved. + */ +/* + * Qualcomm Technologies Inc. PMIC QPNP ADC driver header file + * + */ + +#ifndef __QPNP_ADC_H +#define __QPNP_ADC_H + +#include +#include +#include +#include +/** + * enum qpnp_vadc_channels - QPNP AMUX arbiter channels + */ +enum qpnp_vadc_channels { + USBIN = 0, + DCIN, + VCHG_SNS, + SPARE1_03, + USB_ID_MV, + VCOIN, + VBAT_SNS, + VSYS, + DIE_TEMP, + REF_625MV, + REF_125V, + CHG_TEMP, + SPARE1, + SPARE2, + GND_REF, + VDD_VADC, + P_MUX1_1_1, + P_MUX2_1_1, + P_MUX3_1_1, + P_MUX4_1_1, + P_MUX5_1_1, + P_MUX6_1_1, + P_MUX7_1_1, + P_MUX8_1_1, + P_MUX9_1_1, + P_MUX10_1_1, + P_MUX11_1_1, + P_MUX12_1_1, + P_MUX13_1_1, + P_MUX14_1_1, + P_MUX15_1_1, + P_MUX16_1_1, + P_MUX1_1_3, + P_MUX2_1_3, + P_MUX3_1_3, + P_MUX4_1_3, + P_MUX5_1_3, + P_MUX6_1_3, + P_MUX7_1_3, + P_MUX8_1_3, + P_MUX9_1_3, + P_MUX10_1_3, + P_MUX11_1_3, + P_MUX12_1_3, + P_MUX13_1_3, + P_MUX14_1_3, + P_MUX15_1_3, + P_MUX16_1_3, + LR_MUX1_BATT_THERM, + LR_MUX2_BAT_ID, + LR_MUX3_XO_THERM, + LR_MUX4_AMUX_THM1, + LR_MUX5_AMUX_THM2, + LR_MUX6_AMUX_THM3, + LR_MUX7_HW_ID, + LR_MUX8_AMUX_THM4, + LR_MUX9_AMUX_THM5, + LR_MUX10_USB_ID_LV, + AMUX_PU1, + AMUX_PU2, + LR_MUX3_BUF_XO_THERM_BUF, + LR_MUX1_PU1_BAT_THERM = 112, + LR_MUX2_PU1_BAT_ID = 113, + LR_MUX3_PU1_XO_THERM = 114, + LR_MUX4_PU1_AMUX_THM1 = 115, + LR_MUX5_PU1_AMUX_THM2 = 116, + LR_MUX6_PU1_AMUX_THM3 = 117, + LR_MUX7_PU1_AMUX_HW_ID = 118, + LR_MUX8_PU1_AMUX_THM4 = 119, + LR_MUX9_PU1_AMUX_THM5 = 120, + LR_MUX10_PU1_AMUX_USB_ID_LV = 121, + LR_MUX3_BUF_PU1_XO_THERM_BUF = 124, + LR_MUX1_PU2_BAT_THERM = 176, + LR_MUX2_PU2_BAT_ID = 177, + LR_MUX3_PU2_XO_THERM = 178, + LR_MUX4_PU2_AMUX_THM1 = 179, + LR_MUX5_PU2_AMUX_THM2 = 180, + LR_MUX6_PU2_AMUX_THM3 = 181, + LR_MUX7_PU2_AMUX_HW_ID = 182, + LR_MUX8_PU2_AMUX_THM4 = 183, + LR_MUX9_PU2_AMUX_THM5 = 184, + LR_MUX10_PU2_AMUX_USB_ID_LV = 185, + LR_MUX3_BUF_PU2_XO_THERM_BUF = 188, + LR_MUX1_PU1_PU2_BAT_THERM = 240, + LR_MUX2_PU1_PU2_BAT_ID = 241, + LR_MUX3_PU1_PU2_XO_THERM = 242, + LR_MUX4_PU1_PU2_AMUX_THM1 = 243, + LR_MUX5_PU1_PU2_AMUX_THM2 = 244, + LR_MUX6_PU1_PU2_AMUX_THM3 = 245, + LR_MUX7_PU1_PU2_AMUX_HW_ID = 246, + LR_MUX8_PU1_PU2_AMUX_THM4 = 247, + LR_MUX9_PU1_PU2_AMUX_THM5 = 248, + LR_MUX10_PU1_PU2_AMUX_USB_ID_LV = 249, + LR_MUX3_BUF_PU1_PU2_XO_THERM_BUF = 252, + ALL_OFF = 255, + ADC_MAX_NUM = 0xffff, +}; + +#define QPNP_ADC_625_UV 625000 +#define QPNP_MAX_PROP_NAME_LEN 32 + +/* Structure device for qpnp adc tm */ +struct qpnp_adc_tm_chip; + +/** + * enum qpnp_adc_decimation_type - Sampling rate supported. + * %DECIMATION_TYPE1: 512 + * %DECIMATION_TYPE2: 1K + * %DECIMATION_TYPE3: 2K + * %DECIMATION_TYPE4: 4k + * %DECIMATION_NONE: Do not use this Sampling type. + * + * The Sampling rate is specific to each channel of the QPNP ADC arbiter. + */ +enum qpnp_adc_decimation_type { + DECIMATION_TYPE1 = 0, + DECIMATION_TYPE2, + DECIMATION_TYPE3, + DECIMATION_TYPE4, + DECIMATION_NONE = 0xff, +}; + +/** + * enum qpnp_adc_calib_type - QPNP ADC Calibration type. + * %ADC_CALIB_ABSOLUTE: Use 625mV and 1.25V reference channels. + * %ADC_CALIB_RATIOMETRIC: Use reference Voltage/GND. + * %ADC_CALIB_CONFIG_NONE: Do not use this calibration type. + * + * enum qpnp_adc_cal_sel - Selects the calibration type that is applied + * on the corresponding channel measurement after + * the ADC data is read. + * Use the input reference voltage depending on the calibration type + * to calcluate the offset and gain parameters. The calibration is + * specific to each channel of the QPNP ADC. + */ +enum qpnp_adc_calib_type { + CALIB_ABSOLUTE = 0, + CALIB_RATIOMETRIC, + CALIB_NONE, +}; + +/** + * enum qpnp_adc_channel_scaling_param - pre-scaling AMUX ratio. + * %CHAN_PATH_SCALING0: ratio of {1, 1} + * %CHAN_PATH_SCALING1: ratio of {1, 3} + * %CHAN_PATH_SCALING2: ratio of {1, 4} + * %CHAN_PATH_SCALING3: ratio of {1, 6} + * %CHAN_PATH_SCALING4: ratio of {1, 20} + * %CHAN_PATH_SCALING5: ratio of {1, 8} + * %CHAN_PATH_SCALING6: ratio of {10, 81} The actual ratio is (1/8.1). + * %CHAN_PATH_SCALING7: ratio of {1, 10} + * %CHAN_PATH_SCALING8: ratio of {1, 16} + * %CHAN_PATH_NONE: Do not use this pre-scaling ratio type. + * + * The pre-scaling is applied for signals to be within the voltage range + * of the ADC. + */ +enum qpnp_adc_channel_scaling_param { + PATH_SCALING0 = 0, + PATH_SCALING1, + PATH_SCALING2, + PATH_SCALING3, + PATH_SCALING4, + PATH_SCALING5, + PATH_SCALING6, + PATH_SCALING7, + PATH_SCALING8, + PATH_SCALING_NONE, +}; + +/** + * enum qpnp_adc_tm_rscale_fn_type - Scaling function used to convert the + * channels input voltage/temperature to corresponding ADC code that is + * applied for thresholds. Check the corresponding channels scaling to + * determine the appropriate temperature/voltage units that are passed + * to the scaling function. Example battery follows the power supply + * framework that needs its units to be in decidegreesC so it passes + * deci-degreesC. PA_THERM clients pass the temperature in degrees. + * The order below should match the one in the driver for + * adc_tm_rscale_fn[]. + */ +enum qpnp_adc_tm_rscale_fn_type { + SCALE_R_VBATT = 0, + SCALE_RBATT_THERM, + SCALE_R_USB_ID, + SCALE_RPMIC_THERM, + SCALE_R_SMB_BATT_THERM, + SCALE_R_ABSOLUTE, + SCALE_QRD_SKUH_RBATT_THERM, + SCALE_QRD_SKUT1_RBATT_THERM, + SCALE_QRD_215_RBATT_THERM, + SCALE_RSCALE_NONE, +}; + +/** + * enum qpnp_adc_fast_avg_ctl - Provides ability to obtain single result + * from the ADC that is an average of multiple measurement + * samples. Select number of samples for use in fast + * average mode (i.e. 2 ^ value). + * %ADC_FAST_AVG_SAMPLE_1: 0x0 = 1 + * %ADC_FAST_AVG_SAMPLE_2: 0x1 = 2 + * %ADC_FAST_AVG_SAMPLE_4: 0x2 = 4 + * %ADC_FAST_AVG_SAMPLE_8: 0x3 = 8 + * %ADC_FAST_AVG_SAMPLE_16: 0x4 = 16 + * %ADC_FAST_AVG_SAMPLE_32: 0x5 = 32 + * %ADC_FAST_AVG_SAMPLE_64: 0x6 = 64 + * %ADC_FAST_AVG_SAMPLE_128: 0x7 = 128 + * %ADC_FAST_AVG_SAMPLE_256: 0x8 = 256 + * %ADC_FAST_AVG_SAMPLE_512: 0x9 = 512 + */ +enum qpnp_adc_fast_avg_ctl { + ADC_FAST_AVG_SAMPLE_1 = 0, + ADC_FAST_AVG_SAMPLE_2, + ADC_FAST_AVG_SAMPLE_4, + ADC_FAST_AVG_SAMPLE_8, + ADC_FAST_AVG_SAMPLE_16, + ADC_FAST_AVG_SAMPLE_32, + ADC_FAST_AVG_SAMPLE_64, + ADC_FAST_AVG_SAMPLE_128, + ADC_FAST_AVG_SAMPLE_256, + ADC_FAST_AVG_SAMPLE_512, + ADC_FAST_AVG_SAMPLE_NONE, +}; + +/** + * enum qpnp_adc_hw_settle_time - Time between AMUX getting configured and + * the ADC starting conversion. Delay = 100us * value for + * value < 11 and 2ms * (value - 10) otherwise. + * %ADC_CHANNEL_HW_SETTLE_DELAY_0US: 0us + * %ADC_CHANNEL_HW_SETTLE_DELAY_100US: 100us + * %ADC_CHANNEL_HW_SETTLE_DELAY_200US: 200us + * %ADC_CHANNEL_HW_SETTLE_DELAY_300US: 300us + * %ADC_CHANNEL_HW_SETTLE_DELAY_400US: 400us + * %ADC_CHANNEL_HW_SETTLE_DELAY_500US: 500us + * %ADC_CHANNEL_HW_SETTLE_DELAY_600US: 600us + * %ADC_CHANNEL_HW_SETTLE_DELAY_700US: 700us + * %ADC_CHANNEL_HW_SETTLE_DELAY_800US: 800us + * %ADC_CHANNEL_HW_SETTLE_DELAY_900US: 900us + * %ADC_CHANNEL_HW_SETTLE_DELAY_1MS: 1ms + * %ADC_CHANNEL_HW_SETTLE_DELAY_2MS: 2ms + * %ADC_CHANNEL_HW_SETTLE_DELAY_4MS: 4ms + * %ADC_CHANNEL_HW_SETTLE_DELAY_6MS: 6ms + * %ADC_CHANNEL_HW_SETTLE_DELAY_8MS: 8ms + * %ADC_CHANNEL_HW_SETTLE_DELAY_10MS: 10ms + * %ADC_CHANNEL_HW_SETTLE_NONE + */ +enum qpnp_adc_hw_settle_time { + ADC_CHANNEL_HW_SETTLE_DELAY_0US = 0, + ADC_CHANNEL_HW_SETTLE_DELAY_100US, + ADC_CHANNEL_HW_SETTLE_DELAY_2000US, + ADC_CHANNEL_HW_SETTLE_DELAY_300US, + ADC_CHANNEL_HW_SETTLE_DELAY_400US, + ADC_CHANNEL_HW_SETTLE_DELAY_500US, + ADC_CHANNEL_HW_SETTLE_DELAY_600US, + ADC_CHANNEL_HW_SETTLE_DELAY_700US, + ADC_CHANNEL_HW_SETTLE_DELAY_800US, + ADC_CHANNEL_HW_SETTLE_DELAY_900US, + ADC_CHANNEL_HW_SETTLE_DELAY_1MS, + ADC_CHANNEL_HW_SETTLE_DELAY_2MS, + ADC_CHANNEL_HW_SETTLE_DELAY_4MS, + ADC_CHANNEL_HW_SETTLE_DELAY_6MS, + ADC_CHANNEL_HW_SETTLE_DELAY_8MS, + ADC_CHANNEL_HW_SETTLE_DELAY_10MS, + ADC_CHANNEL_HW_SETTLE_NONE, +}; + +/** + * enum qpnp_adc_dec_ratio_sel - Selects the decimation ratio of the ADC. + * Support values are 256, 512 and 1024. + */ +enum qpnp_vadc_dec_ratio_sel { + ADC_DEC_RATIO_256 = 0, + ADC_DEC_RATIO_512, + ADC_DEC_RATIO_1024, + ADC_DEC_RATIO_NONE, +}; + +/** + * enum qpnp_vadc_mode_sel - Selects the basic mode of operation. + * - The normal mode is used for single measurement. + * - The Conversion sequencer is used to trigger an + * ADC read when a HW trigger is selected. + * - The measurement interval performs a single or + * continuous measurement at a specified interval/delay. + * %ADC_OP_NORMAL_MODE : Normal mode used for single measurement. + * %ADC_OP_CONVERSION_SEQUENCER : Conversion sequencer used to trigger + * an ADC read on a HW supported trigger. + * Refer to enum qpnp_vadc_trigger for + * supported HW triggers. + * %ADC_OP_MEASUREMENT_INTERVAL : The measurement interval performs a + * single or continuous measurement after a specified delay. + * For delay look at qpnp_adc_meas_timer. + */ +enum qpnp_vadc_mode_sel { + ADC_OP_NORMAL_MODE = 0, + ADC_OP_CONVERSION_SEQUENCER, + ADC_OP_MEASUREMENT_INTERVAL, + ADC_OP_MODE_NONE, +}; + +/** + * enum qpnp_adc_meas_timer_1 - Selects the measurement interval time. + * If value = 0, use 0ms else use 2^(value + 4)/ 32768). + * The timer period is used by the USB_ID. Do not set a polling rate + * greater than 1 second on PMIC 2.0. The max polling rate on the PMIC 2.0 + * appears to be limited to 1 second. + * %ADC_MEAS_INTERVAL_0MS : 0ms + * %ADC_MEAS_INTERVAL_1P0MS : 1ms + * %ADC_MEAS_INTERVAL_2P0MS : 2ms + * %ADC_MEAS_INTERVAL_3P9MS : 3.9ms + * %ADC_MEAS_INTERVAL_7P8MS : 7.8ms + * %ADC_MEAS_INTERVAL_15P6MS : 15.6ms + * %ADC_MEAS_INTERVAL_31P3MS : 31.3ms + * %ADC_MEAS_INTERVAL_62P5MS : 62.5ms + * %ADC_MEAS_INTERVAL_125MS : 125ms + * %ADC_MEAS_INTERVAL_250MS : 250ms + * %ADC_MEAS_INTERVAL_500MS : 500ms + * %ADC_MEAS_INTERVAL_1S : 1seconds + * %ADC_MEAS_INTERVAL_2S : 2seconds + * %ADC_MEAS_INTERVAL_4S : 4seconds + * %ADC_MEAS_INTERVAL_8S : 8seconds + * %ADC_MEAS_INTERVAL_16S: 16seconds + */ +enum qpnp_adc_meas_timer_1 { + ADC_MEAS1_INTERVAL_0MS = 0, + ADC_MEAS1_INTERVAL_1P0MS, + ADC_MEAS1_INTERVAL_2P0MS, + ADC_MEAS1_INTERVAL_3P9MS, + ADC_MEAS1_INTERVAL_7P8MS, + ADC_MEAS1_INTERVAL_15P6MS, + ADC_MEAS1_INTERVAL_31P3MS, + ADC_MEAS1_INTERVAL_62P5MS, + ADC_MEAS1_INTERVAL_125MS, + ADC_MEAS1_INTERVAL_250MS, + ADC_MEAS1_INTERVAL_500MS, + ADC_MEAS1_INTERVAL_1S, + ADC_MEAS1_INTERVAL_2S, + ADC_MEAS1_INTERVAL_4S, + ADC_MEAS1_INTERVAL_8S, + ADC_MEAS1_INTERVAL_16S, + ADC_MEAS1_INTERVAL_NONE, +}; + +/** + * enum qpnp_adc_meas_timer_2 - Selects the measurement interval time. + * If value = 0, use 0ms else use 2^(value + 4)/ 32768). + * The timer period is used by the batt_therm. Do not set a polling rate + * greater than 1 second on PMIC 2.0. The max polling rate on the PMIC 2.0 + * appears to be limited to 1 second. + * %ADC_MEAS_INTERVAL_0MS : 0ms + * %ADC_MEAS_INTERVAL_100MS : 100ms + * %ADC_MEAS_INTERVAL_200MS : 200ms + * %ADC_MEAS_INTERVAL_300MS : 300ms + * %ADC_MEAS_INTERVAL_400MS : 400ms + * %ADC_MEAS_INTERVAL_500MS : 500ms + * %ADC_MEAS_INTERVAL_600MS : 600ms + * %ADC_MEAS_INTERVAL_700MS : 700ms + * %ADC_MEAS_INTERVAL_800MS : 800ms + * %ADC_MEAS_INTERVAL_900MS : 900ms + * %ADC_MEAS_INTERVAL_1S: 1seconds + * %ADC_MEAS_INTERVAL_1P1S: 1.1seconds + * %ADC_MEAS_INTERVAL_1P2S: 1.2seconds + * %ADC_MEAS_INTERVAL_1P3S: 1.3seconds + * %ADC_MEAS_INTERVAL_1P4S: 1.4seconds + * %ADC_MEAS_INTERVAL_1P5S: 1.5seconds + */ +enum qpnp_adc_meas_timer_2 { + ADC_MEAS2_INTERVAL_0MS = 0, + ADC_MEAS2_INTERVAL_100MS, + ADC_MEAS2_INTERVAL_200MS, + ADC_MEAS2_INTERVAL_300MS, + ADC_MEAS2_INTERVAL_400MS, + ADC_MEAS2_INTERVAL_500MS, + ADC_MEAS2_INTERVAL_600MS, + ADC_MEAS2_INTERVAL_700MS, + ADC_MEAS2_INTERVAL_800MS, + ADC_MEAS2_INTERVAL_900MS, + ADC_MEAS2_INTERVAL_1S, + ADC_MEAS2_INTERVAL_1P1S, + ADC_MEAS2_INTERVAL_1P2S, + ADC_MEAS2_INTERVAL_1P3S, + ADC_MEAS2_INTERVAL_1P4S, + ADC_MEAS2_INTERVAL_1P5S, + ADC_MEAS2_INTERVAL_NONE, +}; + +/** + * enum qpnp_adc_meas_timer_3 - Selects the measurement interval time. + * If value = 0, use 0ms else use 2^(value + 4)/ 32768). + * Do not set a polling rate greater than 1 second on PMIC 2.0. + * The max polling rate on the PMIC 2.0 appears to be limited to 1 second. + * %ADC_MEAS_INTERVAL_0MS : 0ms + * %ADC_MEAS_INTERVAL_1S : 1seconds + * %ADC_MEAS_INTERVAL_2S : 2seconds + * %ADC_MEAS_INTERVAL_3S : 3seconds + * %ADC_MEAS_INTERVAL_4S : 4seconds + * %ADC_MEAS_INTERVAL_5S : 5seconds + * %ADC_MEAS_INTERVAL_6S: 6seconds + * %ADC_MEAS_INTERVAL_7S : 7seconds + * %ADC_MEAS_INTERVAL_8S : 8seconds + * %ADC_MEAS_INTERVAL_9S : 9seconds + * %ADC_MEAS_INTERVAL_10S : 10seconds + * %ADC_MEAS_INTERVAL_11S : 11seconds + * %ADC_MEAS_INTERVAL_12S : 12seconds + * %ADC_MEAS_INTERVAL_13S : 13seconds + * %ADC_MEAS_INTERVAL_14S : 14seconds + * %ADC_MEAS_INTERVAL_15S : 15seconds + */ +enum qpnp_adc_meas_timer_3 { + ADC_MEAS3_INTERVAL_0S = 0, + ADC_MEAS3_INTERVAL_1S, + ADC_MEAS3_INTERVAL_2S, + ADC_MEAS3_INTERVAL_3S, + ADC_MEAS3_INTERVAL_4S, + ADC_MEAS3_INTERVAL_5S, + ADC_MEAS3_INTERVAL_6S, + ADC_MEAS3_INTERVAL_7S, + ADC_MEAS3_INTERVAL_8S, + ADC_MEAS3_INTERVAL_9S, + ADC_MEAS3_INTERVAL_10S, + ADC_MEAS3_INTERVAL_11S, + ADC_MEAS3_INTERVAL_12S, + ADC_MEAS3_INTERVAL_13S, + ADC_MEAS3_INTERVAL_14S, + ADC_MEAS3_INTERVAL_15S, + ADC_MEAS3_INTERVAL_NONE, +}; + +/** + * enum qpnp_adc_meas_timer_select - Selects the timer for which + * the appropriate polling frequency is set. + * %ADC_MEAS_TIMER_SELECT1 - Select this timer for measurement polling interval + * for 1 second. + * %ADC_MEAS_TIMER_SELECT2 - Select this timer for 500ms measurement interval. + * %ADC_MEAS_TIMER_SELECT3 - Select this timer for 5 second interval. + */ +enum qpnp_adc_meas_timer_select { + ADC_MEAS_TIMER_SELECT1 = 0, + ADC_MEAS_TIMER_SELECT2, + ADC_MEAS_TIMER_SELECT3, + ADC_MEAS_TIMER_NUM, +}; + +/** + * Channel selection registers for each of the configurable measurements + * Channels allotment is set at device config for a channel. + * The USB_ID, BATT_THERM, PMIC_THERM and VBAT channels are used by the + * kernel space USB, Battery and IADC drivers. + * The other 3 channels are configurable for use by userspace clients. + */ +enum qpnp_adc_tm_channel_select { + QPNP_ADC_TM_M0_ADC_CH_SEL_CTL = 0x48, + QPNP_ADC_TM_M1_ADC_CH_SEL_CTL = 0x68, + QPNP_ADC_TM_M2_ADC_CH_SEL_CTL = 0x70, + QPNP_ADC_TM_M3_ADC_CH_SEL_CTL = 0x78, + QPNP_ADC_TM_M4_ADC_CH_SEL_CTL = 0x80, + QPNP_ADC_TM_M5_ADC_CH_SEL_CTL = 0x88, + QPNP_ADC_TM_M6_ADC_CH_SEL_CTL = 0x90, + QPNP_ADC_TM_M7_ADC_CH_SEL_CTL = 0x98, + QPNP_ADC_TM_CH_SELECT_NONE +}; + +/** + * Channel index for the corresponding index to qpnp_adc_tm_channel_selec + */ +enum qpnp_adc_tm_channel_num { + QPNP_ADC_TM_CHAN0 = 0, + QPNP_ADC_TM_CHAN1, + QPNP_ADC_TM_CHAN2, + QPNP_ADC_TM_CHAN3, + QPNP_ADC_TM_CHAN4, + QPNP_ADC_TM_CHAN5, + QPNP_ADC_TM_CHAN6, + QPNP_ADC_TM_CHAN7, + QPNP_ADC_TM_CHAN_NONE +}; + +/** + * struct qpnp_adc_tm_config - Represent ADC Thermal Monitor configuration. + * @channel: ADC channel for which thermal monitoring is requested. + * @adc_code: The pre-calibrated digital output of a given ADC releative to the + * ADC reference. + * @high_thr_temp: Temperature at which high threshold notification is required. + * @low_thr_temp: Temperature at which low threshold notification is required. + * @low_thr_voltage : Low threshold voltage ADC code used for reverse + * calibration. + * @high_thr_voltage: High threshold voltage ADC code used for reverse + * calibration. + */ +struct qpnp_adc_tm_config { + int channel; + int adc_code; + int high_thr_temp; + int low_thr_temp; + int64_t high_thr_voltage; + int64_t low_thr_voltage; +}; + +/** + * enum qpnp_adc_tm_trip_type - Type for setting high/low temperature/voltage. + * %ADC_TM_TRIP_HIGH_WARM: Setting high temperature. Note that high temperature + * corresponds to low voltage. Driver handles this case + * appropriately to set high/low thresholds for voltage. + * threshold. + * %ADC_TM_TRIP_LOW_COOL: Setting low temperature. + */ +enum qpnp_adc_tm_trip_type { + ADC_TM_TRIP_HIGH_WARM = 0, + ADC_TM_TRIP_LOW_COOL, + ADC_TM_TRIP_NUM, +}; + +#define ADC_TM_WRITABLE_TRIPS_MASK ((1 << ADC_TM_TRIP_NUM) - 1) + +/** + * enum qpnp_tm_state - This lets the client know whether the threshold + * that was crossed was high/low. + * %ADC_TM_HIGH_STATE: Client is notified of crossing the requested high + * voltage threshold. + * %ADC_TM_COOL_STATE: Client is notified of crossing the requested cool + * temperature threshold. + * %ADC_TM_LOW_STATE: Client is notified of crossing the requested low + * voltage threshold. + * %ADC_TM_WARM_STATE: Client is notified of crossing the requested high + * temperature threshold. + */ +enum qpnp_tm_state { + ADC_TM_HIGH_STATE = 0, + ADC_TM_COOL_STATE = ADC_TM_HIGH_STATE, + ADC_TM_LOW_STATE, + ADC_TM_WARM_STATE = ADC_TM_LOW_STATE, + ADC_TM_STATE_NUM, +}; + +/** + * enum qpnp_state_request - Request to enable/disable the corresponding + * high/low voltage/temperature thresholds. + * %ADC_TM_HIGH_THR_ENABLE: Enable high voltage threshold. + * %ADC_TM_COOL_THR_ENABLE = Enables cool temperature threshold. + * %ADC_TM_LOW_THR_ENABLE: Enable low voltage/temperature threshold. + * %ADC_TM_WARM_THR_ENABLE = Enables warm temperature threshold. + * %ADC_TM_HIGH_LOW_THR_ENABLE: Enable high and low voltage/temperature + * threshold. + * %ADC_TM_HIGH_THR_DISABLE: Disable high voltage/temperature threshold. + * %ADC_TM_COOL_THR_ENABLE = Disables cool temperature threshold. + * %ADC_TM_LOW_THR_DISABLE: Disable low voltage/temperature threshold. + * %ADC_TM_WARM_THR_ENABLE = Disables warm temperature threshold. + * %ADC_TM_HIGH_THR_DISABLE: Disable high and low voltage/temperature + * threshold. + */ +enum qpnp_state_request { + ADC_TM_HIGH_THR_ENABLE = 0, + ADC_TM_COOL_THR_ENABLE = ADC_TM_HIGH_THR_ENABLE, + ADC_TM_LOW_THR_ENABLE, + ADC_TM_WARM_THR_ENABLE = ADC_TM_LOW_THR_ENABLE, + ADC_TM_HIGH_LOW_THR_ENABLE, + ADC_TM_HIGH_THR_DISABLE, + ADC_TM_COOL_THR_DISABLE = ADC_TM_HIGH_THR_DISABLE, + ADC_TM_LOW_THR_DISABLE, + ADC_TM_WARM_THR_DISABLE = ADC_TM_LOW_THR_DISABLE, + ADC_TM_HIGH_LOW_THR_DISABLE, + ADC_TM_THR_NUM, +}; + +/** + * struct qpnp_adc_tm_btm_param - Represent Battery temperature threshold + * monitoring configuration. + * @high_temp: High temperature threshold for which notification is requested. + * @low_temp: Low temperature threshold for which notification is requested. + * @high_thr_voltage: High voltage for which notification is requested. + * @low_thr_voltage: Low voltage for which notification is requested. + * @state_request: Enable/disable the corresponding high and low temperature + * thresholds. + * @timer_interval1: Select polling rate from qpnp_adc_meas_timer_1 type. + * @timer_interval2: Select polling rate from qpnp_adc_meas_timer_2 type. + * @timer_interval3: Select polling rate from qpnp_adc_meas_timer_3 type. + * @btmid_ctx: A context of void type. + * @threshold_notification: Notification callback once threshold are crossed. + * units to be used for High/Low temperature and voltage notification - + * This depends on the clients usage. Check the rscaling function + * for the appropriate channel nodes. + * @Batt therm clients temperature units is decidegreesCentigrate. + * @USB_ID inputs the voltage units in milli-volts. + * @PA_THERM inputs the units in degC. + * @PMIC_THERM inputs the units in millidegC. + */ +struct qpnp_adc_tm_btm_param { + uint32_t full_scale_code; + int32_t high_temp; + int32_t low_temp; + int32_t high_thr; + int32_t low_thr; + int32_t gain_num; + int32_t gain_den; + enum qpnp_vadc_channels channel; + enum qpnp_state_request state_request; + enum qpnp_adc_meas_timer_1 timer_interval; + enum qpnp_adc_meas_timer_2 timer_interval2; + enum qpnp_adc_meas_timer_3 timer_interval3; + void *btm_ctx; + void (*threshold_notification)(enum qpnp_tm_state state, + void *ctx); +}; + +/** + * struct qpnp_vadc_linear_graph - Represent ADC characteristics. + * @dy: Numerator slope to calculate the gain. + * @dx: Denominator slope to calculate the gain. + * @adc_vref: A/D word of the voltage reference used for the channel. + * @adc_gnd: A/D word of the ground reference used for the channel. + * + * Each ADC device has different offset and gain parameters which are computed + * to calibrate the device. + */ +struct qpnp_vadc_linear_graph { + int64_t dy; + int64_t dx; + int64_t adc_vref; + int64_t adc_gnd; +}; + +/** + * struct qpnp_vadc_map_pt - Map the graph representation for ADC channel + * @x: Represent the ADC digitized code. + * @y: Represent the physical data which can be temperature, voltage, + * resistance. + */ +struct qpnp_vadc_map_pt { + int32_t x; + int32_t y; +}; + +/** + * struct qpnp_vadc_scaling_ratio - Represent scaling ratio for adc input. + * @num: Numerator scaling parameter. + * @den: Denominator scaling parameter. + */ +struct qpnp_vadc_scaling_ratio { + int32_t num; + int32_t den; +}; + +/** + * struct qpnp_adc_properties - Represent the ADC properties. + * @adc_reference: Reference voltage for QPNP ADC. + * @full_scale_code: Full scale value with intrinsic offset removed. + * @biploar: Polarity for QPNP ADC. + */ +struct qpnp_adc_properties { + uint32_t adc_vdd_reference; + uint32_t full_scale_code; + bool bipolar; +}; + +/** + * struct qpnp_vadc_chan_properties - Represent channel properties of the ADC. + * @offset_gain_numerator: The inverse numerator of the gain applied to the + * input channel. + * @offset_gain_denominator: The inverse denominator of the gain applied to the + * input channel. + * @high_thr: High threshold voltage that is requested to be set. + * @low_thr: Low threshold voltage that is requested to be set. + * @timer_select: Chosen from one of the 3 timers to set the polling rate for + * the VADC_BTM channel. + * @meas_interval1: Polling rate to set for timer 1. + * @meas_interval2: Polling rate to set for timer 2. + * @tm_channel_select: BTM channel number for the 5 VADC_BTM channels. + * @state_request: User can select either enable or disable high/low or both + * activation levels based on the qpnp_state_request type. + * @adc_graph: ADC graph for the channel of struct type qpnp_adc_linear_graph. + */ +struct qpnp_vadc_chan_properties { + uint32_t offset_gain_numerator; + uint32_t offset_gain_denominator; + uint32_t high_thr; + uint32_t low_thr; + enum qpnp_adc_meas_timer_select timer_select; + enum qpnp_adc_meas_timer_1 meas_interval1; + enum qpnp_adc_meas_timer_2 meas_interval2; + enum qpnp_adc_tm_channel_select tm_channel_select; + enum qpnp_state_request state_request; + enum qpnp_adc_calib_type calib_type; + struct qpnp_vadc_linear_graph adc_graph[CALIB_NONE]; +}; + +/** + * struct qpnp_adc_amux - AMUX properties for individual channel + * @name: Channel string name. + * @channel_num: Channel in integer used from qpnp_adc_channels. + * @chan_path_prescaling: Channel scaling performed on the input signal. + * @adc_decimation: Sampling rate desired for the channel. + * adc_scale_fn: Scaling function to convert to the data meaningful for + * each individual channel whether it is voltage, current, + * temperature, etc and compensates the channel properties. + */ +struct qpnp_adc_amux { + char *name; + enum qpnp_vadc_channels channel_num; + enum qpnp_adc_channel_scaling_param chan_path_prescaling; + enum qpnp_adc_decimation_type adc_decimation; + enum qpnp_adc_tm_rscale_fn_type adc_scale_fn; + enum qpnp_adc_fast_avg_ctl fast_avg_setup; + enum qpnp_adc_hw_settle_time hw_settle_time; + enum qpnp_adc_calib_type calib_type; +}; + +/** + * struct qpnp_vadc_scaling_ratio + * + */ +static const struct qpnp_vadc_scaling_ratio qpnp_vadc_amux_scaling_ratio[] = { + {1, 1}, + {1, 3}, + {1, 4}, + {1, 6}, + {1, 20}, + {1, 8}, + {10, 81}, + {1, 10}, + {1, 16} +}; + +/** + * struct qpnp_adc_drv - QPNP ADC device structure. + * @spmi - spmi device for ADC peripheral. + * @offset - base offset for the ADC peripheral. + * @adc_prop - ADC properties specific to the ADC peripheral. + * @amux_prop - AMUX properties representing the ADC peripheral. + * @adc_channels - ADC channel properties for the ADC peripheral. + * @adc_irq_eoc - End of Conversion IRQ. + * @adc_irq_fifo_not_empty - Conversion sequencer request written + * to FIFO when not empty. + * @adc_irq_conv_seq_timeout - Conversion sequencer trigger timeout. + * @adc_high_thr_irq - Output higher than high threshold set for measurement. + * @adc_low_thr_irq - Output lower than low threshold set for measurement. + * @adc_lock - ADC lock for access to the peripheral. + * @adc_rslt_completion - ADC result notification after interrupt + * is received. + * @calib - Internal rsens calibration values for gain and offset. + */ +struct qpnp_adc_drv { + struct platform_device *pdev; + struct regmap *regmap; + uint16_t offset; + struct qpnp_adc_properties *adc_prop; + struct qpnp_adc_amux_properties *amux_prop; + struct qpnp_adc_amux *adc_channels; + int adc_irq_eoc; + int adc_irq_fifo_not_empty; + int adc_irq_conv_seq_timeout; + int adc_high_thr_irq; + int adc_low_thr_irq; + struct mutex adc_lock; +}; + +/** + * struct qpnp_adc_amux_properties - QPNP VADC amux channel property. + * @amux_channel - Refer to the qpnp_vadc_channel list. + * @decimation - Sampling rate supported for the channel. + * @mode_sel - The basic mode of operation. + * @hw_settle_time - The time between AMUX being configured and the + * start of conversion. + * @fast_avg_setup - Ability to provide single result from the ADC + * that is an average of multiple measurements. + * @calib_type - Used to store the calibration type for the channel + * absolute/ratiometric. + * @cal_val - Used to determine if fresh calibration value or timer + * updated calibration value is to be used. + * @chan_prop - Represent the channel properties of the ADC. + */ +struct qpnp_adc_amux_properties { + uint32_t amux_channel; + uint32_t decimation; + uint32_t mode_sel; + uint32_t hw_settle_time; + uint32_t fast_avg_setup; + enum qpnp_adc_calib_type calib_type; + struct qpnp_vadc_chan_properties chan_prop[0]; +}; + +/* SW index's for PMIC type and version used by QPNP VADC and IADC */ +#define QPNP_REV_ID_8941_3_1 1 +#define QPNP_REV_ID_8026_1_0 2 +#define QPNP_REV_ID_8026_2_0 3 +#define QPNP_REV_ID_8110_1_0 4 +#define QPNP_REV_ID_8026_2_1 5 +#define QPNP_REV_ID_8110_2_0 6 +#define QPNP_REV_ID_8026_2_2 7 +#define QPNP_REV_ID_8941_3_0 8 +#define QPNP_REV_ID_8941_2_0 9 +#define QPNP_REV_ID_8916_1_0 10 +#define QPNP_REV_ID_8916_1_1 11 +#define QPNP_REV_ID_8916_2_0 12 +#define QPNP_REV_ID_8909_1_0 13 +#define QPNP_REV_ID_8909_1_1 14 +#define QPNP_REV_ID_PM8950_1_0 16 + + +struct qpnp_adc_tm_reverse_scale_fn { + int32_t (*chan)(struct qpnp_adc_drv *adc, + struct qpnp_adc_tm_btm_param *param, + uint32_t *low_threshold, uint32_t *high_threshold); +}; + +/* Public API */ +/** + * qpnp_adc_get_devicetree_data() - Abstracts the ADC devicetree data. + * @pdev: Platform device structure. + * @adc_qpnp: QPNP ADC device structure. + */ +int32_t qpnp_adc_get_devicetree_data(struct platform_device *pdev, + struct qpnp_adc_drv *adc_qpnp); + +/** + * qpnp_adc_qrd_215_btm_scaler() - Performs reverse calibration on the + * low/high temperature threshold values passed by the client. + * The function maps the temperature to voltage and applies + * ratiometric calibration on the voltage values for SKUT1 board. + * @adc: QPNP ADC device structure. + * @param: The input parameters that contain the low/high temperature + * values. + * @low_threshold: The low threshold value that needs to be updated with + * the above calibrated voltage value. + * @high_threshold: The low threshold value that needs to be updated with + * the above calibrated voltage value. + */ +int32_t qpnp_adc_qrd_215_btm_scaler(struct qpnp_adc_drv *adc, + struct qpnp_adc_tm_btm_param *param, + uint32_t *low_threshold, uint32_t *high_threshold); + +/** + * qpnp_adc_tm_scale_therm_voltage_pu2() - Performs reverse calibration + * and convert given temperature to voltage on supported + * thermistor channels using 100k pull-up. + * @adc: QPNP ADC device structure. + * @adc_prop: adc properties of the qpnp adc such as bit resolution, + * reference voltage. + * @param: The input temperature values. + */ +int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_adc_drv *adc, + const struct qpnp_adc_properties *adc_properties, + struct qpnp_adc_tm_config *param); + +/** + * qpnp_adc_usb_scaler() - Performs reverse calibration on the low/high + * voltage threshold values passed by the client. + * The function applies ratiometric calibration on the + * voltage values. + * @adc: QPNP ADC device structure. + * @param: The input parameters that contain the low/high voltage + * threshold values. + * @low_threshold: The low threshold value that needs to be updated with + * the above calibrated voltage value. + * @high_threshold: The low threshold value that needs to be updated with + * the above calibrated voltage value. + */ +int32_t qpnp_adc_usb_scaler(struct qpnp_adc_drv *adc, + struct qpnp_adc_tm_btm_param *param, + uint32_t *low_threshold, uint32_t *high_threshold); +/** + * qpnp_adc_vbatt_rscaler() - Performs reverse calibration on the low/high + * voltage threshold values passed by the client. + * The function applies ratiometric calibration on the + * voltage values. + * @adc: QPNP ADC device structure. + * @param: The input parameters that contain the low/high voltage + * threshold values. + * @low_threshold: The low threshold value that needs to be updated with + * the above calibrated voltage value. + * @high_threshold: The low threshold value that needs to be updated with + * the above calibrated voltage value. + */ +int32_t qpnp_adc_vbatt_rscaler(struct qpnp_adc_drv *adc, + struct qpnp_adc_tm_btm_param *param, + uint32_t *low_threshold, uint32_t *high_threshold); +/** + * qpnp_adc_absolute_rthr() - Performs reverse calibration on the low/high + * voltage threshold values passed by the client. + * The function applies absolute calibration on the + * voltage values. + * @adc: QPNP ADC device structure. + * @param: The input parameters that contain the low/high voltage + * threshold values. + * @low_threshold: The low threshold value that needs to be updated with + * the above calibrated voltage value. + * @high_threshold: The low threshold value that needs to be updated with + * the above calibrated voltage value. + */ +int32_t qpnp_adc_absolute_rthr(struct qpnp_adc_drv *adc, + struct qpnp_adc_tm_btm_param *param, + uint32_t *low_threshold, uint32_t *high_threshold); +/** + * qpnp_adc_smb_btm_rscaler() - Performs reverse calibration on the low/high + * temperature threshold values passed by the client. + * The function maps the temperature to voltage and applies + * ratiometric calibration on the voltage values. + * @adc: QPNP ADC device structure. + * @param: The input parameters that contain the low/high temperature + * values. + * @low_threshold: The low threshold value that needs to be updated with + * the above calibrated voltage value. + * @high_threshold: The low threshold value that needs to be updated with + * the above calibrated voltage value. + */ +int32_t qpnp_adc_smb_btm_rscaler(struct qpnp_adc_drv *adc, + struct qpnp_adc_tm_btm_param *param, + uint32_t *low_threshold, uint32_t *high_threshold); + +/** + * qpnp_adc_get_revid_version() - Obtain the PMIC number and revision. + * @dev: Structure device node. + * returns internal mapped PMIC number and revision id. + */ +int qpnp_adc_get_revid_version(struct device *dev); + +/* Public API */ +/** + * qpnp_adc_tm_channel_measure() - Configures kernel clients a channel to + * monitor the corresponding ADC channel for threshold detection. + * Driver passes the high/low voltage threshold along + * with the notification callback once the set thresholds + * are crossed. + * @param: Structure pointer of qpnp_adc_tm_btm_param type. + * Clients pass the low/high temperature along with the threshold + * notification callback. + */ +int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_chip *chip, + struct qpnp_adc_tm_btm_param *param); +/** + * qpnp_adc_tm_disable_chan_meas() - Disables the monitoring of channel thats + * assigned for monitoring kernel clients. Disables the low/high + * threshold activation for the corresponding channel. + * @param: Structure pointer of qpnp_adc_tm_btm_param type. + * This is used to identify the channel for which the corresponding + * channels high/low threshold notification will be disabled. + */ +int32_t qpnp_adc_tm_disable_chan_meas(struct qpnp_adc_tm_chip *chip, + struct qpnp_adc_tm_btm_param *param); +/** + * qpnp_get_adc_tm() - Clients need to register with the adc_tm using the + * corresponding device instance it wants to read the channels + * from. Read the bindings document on how to pass the phandle + * for the corresponding adc_tm driver to register with. + * @name: Corresponding client's DT parser name. Read the DT bindings + * document on how to register with the vadc + * @struct qpnp_adc_tm_chip * - On success returns the vadc device structure + * pointer that needs to be used during an ADC TM request. + */ +struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev, const char *name); +#endif diff --git a/include/linux/qpnp/qpnp-revid.h b/include/linux/qpnp/qpnp-revid.h index 02d7251e409c..c0cb0b33729b 100644 --- a/include/linux/qpnp/qpnp-revid.h +++ b/include/linux/qpnp/qpnp-revid.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved. */ #ifndef __QPNP_REVID @@ -202,6 +202,9 @@ /* PM8008 SUBTYPE */ #define PM8008_SUBTYPE 0x2C +/* PM8010 SUBTYPE */ +#define PM8010_SUBTYPE 0x41 + /* PMI8998 REV_ID */ #define PMI8998_V1P0_REV1 0x00 #define PMI8998_V1P0_REV2 0x00 diff --git a/include/linux/tty.h b/include/linux/tty.h index 83b2e68778c9..a75926ab316e 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -306,6 +306,10 @@ struct tty_struct { struct termiox *termiox; /* May be NULL for unsupported */ char name[64]; struct pid *pgrp; /* Protected by ctrl lock */ + /* + * Writes protected by both ctrl lock and legacy mutex, readers must use + * at least one of them. + */ struct pid *session; unsigned long flags; int count; diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h index c88146965449..7835eb1644a0 100644 --- a/include/linux/usb/msm_hsusb.h +++ b/include/linux/usb/msm_hsusb.h @@ -185,6 +185,7 @@ struct msm_otg { struct usb_phy phy; struct msm_otg_platform_data *pdata; struct platform_device *pdev; + struct mutex lock; int irq; int async_irq; int phy_irq; diff --git a/include/linux/wait.h b/include/linux/wait.h index 62aa6e0b3149..d134aef49078 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -203,6 +203,7 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr); #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) +#define wake_up_sync(x) __wake_up_sync(x, TASK_NORMAL, 1) /* * Wakeup macros to be used to report events to the targets. diff --git a/include/media/radio-iris.h b/include/media/radio-iris.h index 3bf21608bec6..ed8dd5cb1d69 100644 --- a/include/media/radio-iris.h +++ b/include/media/radio-iris.h @@ -71,7 +71,7 @@ void radio_hci_event_packet(struct radio_hci_dev *hdev, struct sk_buff *skb); #undef FMDBG #ifdef FM_DEBUG -#define FMDBG(fmt, args...) pr_info("iris_radio: " fmt, ##args) +#define FMDBG(fmt, args...) pr_debug("iris_radio: " fmt, ##args) #else #define FMDBG(fmt, args...) #endif @@ -79,6 +79,8 @@ void radio_hci_event_packet(struct radio_hci_dev *hdev, struct sk_buff *skb); #undef FMDERR #define FMDERR(fmt, args...) pr_err("iris_radio: " fmt, ##args) +#define FM_INFO(fmt, args...) pr_info("iris_transport: " fmt, ##args) + /* HCI timeouts */ #define RADIO_HCI_TIMEOUT (10000) /* 10 seconds */ diff --git a/include/net/cnss2.h b/include/net/cnss2.h index d10ba56b9361..01756bdb0fd3 100644 --- a/include/net/cnss2.h +++ b/include/net/cnss2.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. */ #ifndef _NET_CNSS2_H #define _NET_CNSS2_H @@ -8,6 +8,7 @@ #define CNSS_MAX_FILE_NAME 20 #define CNSS_MAX_TIMESTAMP_LEN 32 +#define CNSS_MAX_DEV_MEM_NUM 4 /* * Temporary change for compilation, will be removed @@ -52,6 +53,11 @@ struct cnss_device_version { u32 minor_version; }; +struct cnss_dev_mem_info { + u64 start; + u64 size; +}; + struct cnss_soc_info { void __iomem *va; phys_addr_t pa; @@ -62,6 +68,7 @@ struct cnss_soc_info { uint32_t fw_version; char fw_build_timestamp[CNSS_MAX_TIMESTAMP_LEN + 1]; struct cnss_device_version device_version; + struct cnss_dev_mem_info dev_mem_info[CNSS_MAX_DEV_MEM_NUM]; }; struct cnss_wlan_runtime_ops { @@ -76,6 +83,13 @@ enum cnss_driver_status { CNSS_RECOVERY, CNSS_FW_DOWN, CNSS_HANG_EVENT, + CNSS_BUS_EVENT, +}; + +enum cnss_bus_event_type { + BUS_EVENT_PCI_LINK_DOWN = 0, + + BUS_EVENT_INVALID = 0xFFFF, }; struct cnss_hang_event { @@ -83,6 +97,11 @@ struct cnss_hang_event { u16 hang_event_data_len; }; +struct cnss_bus_event { + enum cnss_bus_event_type etype; + void *event_data; +}; + struct cnss_uevent_data { enum cnss_driver_status status; void *data; diff --git a/include/net/tcp.h b/include/net/tcp.h index 1607c917f6bb..c2e25d887fbe 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1345,7 +1345,7 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) rx_opt->num_sacks = 0; } -u32 tcp_default_init_rwnd(const struct sock *sk, u32 mss); +u32 tcp_default_init_rwnd(u32 mss); void tcp_cwnd_restart(struct sock *sk, s32 delta); static inline void tcp_slow_start_after_idle_check(struct sock *sk) diff --git a/include/soc/qcom/icnss2.h b/include/soc/qcom/icnss2.h index 940241e8d1f3..0a1406c03a1c 100644 --- a/include/soc/qcom/icnss2.h +++ b/include/soc/qcom/icnss2.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */ #ifndef _ICNSS_WLAN_H_ #define _ICNSS_WLAN_H_ @@ -191,4 +191,8 @@ extern int icnss_get_curr_therm_cdev_state(struct device *dev, unsigned long *thermal_state, int tcdev_id); extern int icnss_exit_power_save(struct device *dev); +extern int icnss_prevent_l1(struct device *dev); +extern void icnss_allow_l1(struct device *dev); +extern int icnss_get_mhi_state(struct device *dev); +extern int icnss_is_pci_ep_awake(struct device *dev); #endif /* _ICNSS_WLAN_H_ */ diff --git a/include/soc/qcom/memory_dump.h b/include/soc/qcom/memory_dump.h index bc84bc8d8ef8..d3f514add392 100644 --- a/include/soc/qcom/memory_dump.h +++ b/include/soc/qcom/memory_dump.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2012, 2014-2017, 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2012, 2014-2017, 2019, 2021, The Linux Foundation. All rights reserved. */ #ifndef __MSM_MEMORY_DUMP_H @@ -79,6 +79,8 @@ enum msm_dump_data_ids { MSM_DUMP_DATA_TMC_ETF = 0xF0, MSM_DUMP_DATA_TMC_ETF_SWAO = 0xF1, MSM_DUMP_DATA_TMC_REG = 0x100, + MSM_DUMP_DATA_TMC_ETR_REG = 0x100, + MSM_DUMP_DATA_TMC_ETF_REG = 0x101, MSM_DUMP_DATA_TMC_ETF_SWAO_REG = 0x102, MSM_DUMP_DATA_LOG_BUF = 0x110, MSM_DUMP_DATA_LOG_BUF_FIRST_IDX = 0x111, @@ -113,11 +115,23 @@ struct msm_dump_entry { uint64_t addr; }; +struct dump_vaddr_entry { + uint32_t id; + void *dump_vaddr; + struct msm_dump_data *dump_data_vaddr; +}; + +struct msm_mem_dump_vaddr_tbl { + uint8_t num_node; + struct dump_vaddr_entry *entries; +}; + #ifdef CONFIG_QCOM_MEMORY_DUMP_V2 extern int msm_dump_data_register(enum msm_dump_table_ids id, struct msm_dump_entry *entry); extern int msm_dump_data_register_nominidump(enum msm_dump_table_ids id, struct msm_dump_entry *entry); +extern struct dump_vaddr_entry *get_msm_dump_ptr(enum msm_dump_data_ids id); #else static inline int msm_dump_data_register(enum msm_dump_table_ids id, struct msm_dump_entry *entry) diff --git a/include/soc/qcom/mpm.h b/include/soc/qcom/mpm.h index 8209ebef6849..22b2fe4fefdc 100644 --- a/include/soc/qcom/mpm.h +++ b/include/soc/qcom/mpm.h @@ -19,5 +19,6 @@ extern const struct mpm_pin mpm_scuba_gic_chip_data[]; extern const struct mpm_pin mpm_sdm660_gic_chip_data[]; extern const struct mpm_pin mpm_msm8937_gic_chip_data[]; extern const struct mpm_pin mpm_msm8953_gic_chip_data[]; +extern const struct mpm_pin mpm_khaje_gic_chip_data[]; #endif /* __QCOM_MPM_H__ */ diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h index 682380fc30f2..e477626332ba 100644 --- a/include/soc/qcom/socinfo.h +++ b/include/soc/qcom/socinfo.h @@ -62,6 +62,8 @@ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,bengal") #define early_machine_is_bengalp() \ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,bengalp") +#define early_machine_is_khaje() \ + of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,khaje") #define early_machine_is_lagoon() \ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,lagoon") #define early_machine_is_scuba() \ @@ -125,6 +127,7 @@ #define early_machine_is_orchid() 0 #define early_machine_is_bengal() 0 #define early_machine_is_bengalp() 0 +#define early_machine_is_khaje() 0 #define early_machine_is_lagoon() 0 #define early_machine_is_scuba() 0 #define early_machine_is_scubaiot() 0 @@ -172,6 +175,7 @@ enum msm_cpu { MSM_CPU_ORCHID, MSM_CPU_BENGAL, MSM_CPU_BENGALP, + MSM_CPU_KHAJE, MSM_CPU_LAGOON, MSM_CPU_SCUBA, MSM_CPU_SCUBAIOT, diff --git a/include/sound/soc.h b/include/sound/soc.h index 9c3e6f44b94a..ce863e68a540 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -1070,6 +1070,8 @@ struct snd_soc_card { struct mutex dapm_mutex; struct mutex dapm_power_mutex; + spinlock_t dpcm_lock; + bool instantiated; bool topology_shortname_created; diff --git a/include/uapi/asm-generic/ioctls.h b/include/uapi/asm-generic/ioctls.h index 44ccfd8818f2..6b14abd5180c 100644 --- a/include/uapi/asm-generic/ioctls.h +++ b/include/uapi/asm-generic/ioctls.h @@ -79,7 +79,7 @@ #define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */ #define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */ #define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */ -#define TIOCFAULT 0x544C /* Uart fault */ +#define TIOCFAULT 0x54EC /* Uart fault */ #define TIOCPMGET 0x544D /* PM get */ #define TIOCPMPUT 0x544E /* PM put */ #define TIOCPMACT 0x544F /* PM is active */ diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h index 34a4833ecf78..74cb59e17e68 100644 --- a/include/uapi/drm/msm_drm_pp.h +++ b/include/uapi/drm/msm_drm_pp.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* - * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. */ #ifndef _MSM_DRM_PP_H_ @@ -573,4 +573,21 @@ struct drm_msm_ad4_manual_str_cfg { __u32 in_str; __u32 out_str; }; + +#define RC_DATA_SIZE_MAX 2720 +#define RC_CFG_SIZE_MAX 4 + +struct drm_msm_rc_mask_cfg { + __u64 flags; + __u32 cfg_param_01; + __u32 cfg_param_02; + __u32 cfg_param_03; + __u32 cfg_param_04[RC_CFG_SIZE_MAX]; + __u32 cfg_param_05[RC_CFG_SIZE_MAX]; + __u32 cfg_param_06[RC_CFG_SIZE_MAX]; + __u64 cfg_param_07; + __u32 cfg_param_08; + __u64 cfg_param_09[RC_DATA_SIZE_MAX]; +}; + #endif /* _MSM_DRM_PP_H_ */ diff --git a/include/uapi/drm/sde_drm.h b/include/uapi/drm/sde_drm.h index 12471580cc9a..e9d1ebe383de 100644 --- a/include/uapi/drm/sde_drm.h +++ b/include/uapi/drm/sde_drm.h @@ -489,4 +489,12 @@ struct sde_drm_roi_v1 { #define SDE_RECOVERY_CAPTURE 1 #define SDE_RECOVERY_HARD_RESET 2 +/* display format modifiers */ +/* + * QTI planar fsc Tile Format + * + * Refers to a tile variant of the planar format. + * Implementation may be platform and base-format specific. + */ +#define DRM_FORMAT_MOD_QCOM_FSC_TILE fourcc_mod_code(QCOM, 0x10) #endif /* _SDE_DRM_H_ */ diff --git a/include/uapi/linux/batterydata-interface.h b/include/uapi/linux/batterydata-interface.h new file mode 100644 index 000000000000..07cd900c0f4d --- /dev/null +++ b/include/uapi/linux/batterydata-interface.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +#ifndef __BATTERYDATA_LIB_H__ +#define __BATTERYDATA_LIB_H__ + +#include + +/** + * struct battery_params - Battery profile data to be exchanged. + * @soc: SOC (state of charge) of the battery + * @ocv_uv: OCV (open circuit voltage) of the battery + * @rbatt_sf: RBATT scaling factor + * @batt_temp: Battery temperature in deci-degree. + * @slope: Slope of the OCV-SOC curve. + * @fcc_mah: FCC (full charge capacity) of the battery. + */ +struct battery_params { + int soc; + int ocv_uv; + int rbatt_sf; + int batt_temp; + int slope; + int fcc_mah; +}; + +/* IOCTLs to query battery profile data */ +#define BPIOCXSOC _IOWR('B', 0x01, struct battery_params) /* SOC */ +#define BPIOCXRBATT _IOWR('B', 0x02, struct battery_params) /* RBATT SF */ +#define BPIOCXSLOPE _IOWR('B', 0x03, struct battery_params) /* SLOPE */ +#define BPIOCXFCC _IOWR('B', 0x04, struct battery_params) /* FCC */ + +#endif diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 4e7203ecacee..be6872b60b93 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -987,6 +987,7 @@ enum { #define RMNET_FLAGS_INGRESS_COALESCE (1U << 4) #define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 5) #define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 6) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV3 (1U << 7) enum { IFLA_RMNET_UNSPEC, diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h index 9b9cd6ed7907..5a6edc07e2b3 100644 --- a/include/uapi/linux/msm_kgsl.h +++ b/include/uapi/linux/msm_kgsl.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* - * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. */ #ifndef _UAPI_MSM_KGSL_H @@ -122,6 +122,7 @@ /* Flags for GPU command sync points */ #define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0 #define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1 +#define KGSL_CMD_SYNCPOINT_TYPE_TIMELINE 2 /* --- Memory allocation flags --- */ @@ -1102,6 +1103,21 @@ struct kgsl_cmd_syncpoint_fence { int fd; }; +/* + * struct kgsl_cmd_syncpoint_timeline + * @timelines: Address of an array of &struct kgsl_timeline_val + * @count: Number of entries in @timelines + * @timelines_size: Size of each entry in @timelines + * + * Define a syncpoint for a number of timelines. This syncpoint will + * be satisfied when all of the specified timelines are signaled. + */ +struct kgsl_cmd_syncpoint_timeline { + __u64 timelines; + __u32 count; + __u32 timelines_size; +}; + /** * struct kgsl_cmd_syncpoint - Define a sync point for a command batch * @type: type of sync point defined here @@ -1670,4 +1686,199 @@ struct kgsl_gpu_sparse_command { #define IOCTL_KGSL_GPU_SPARSE_COMMAND \ _IOWR(KGSL_IOC_TYPE, 0x55, struct kgsl_gpu_sparse_command) +#define KGSL_GPU_AUX_COMMAND_TIMELINE (1 << 1) +/* Reuse the same flag that GPU COMMAND uses */ +#define KGSL_GPU_AUX_COMMAND_SYNC KGSL_CMDBATCH_SYNC + +/** + * struct kgsl_aux_command_generic - Container for an AUX command + * @priv: Pointer to the type specific buffer + * @size: Size of the type specific buffer + * @type: type of sync point defined here + * + * Describes a generic container for GPU aux commands. @priv is a user pointer + * to the command struct matching @type of size @size. + */ +struct kgsl_gpu_aux_command_generic { + __u64 priv; + __u64 size; + __u32 type; +/* private: Padding for 64 bit compatibility */ + __u32 padding; +}; + +/** + * struct kgsl_gpu_aux_command - Argument for IOCTL_KGSL_GPU_AUX_COMMAND + * @flags: flags for the object + * @cmdlist: List of &struct kgsl_gpu_aux_command_generic objects + * @cmd_size: Size of each entry in @cmdlist + * @numcmds: Number of entries in @cmdlist + * @synclist: List of &struct kgsl_command_syncpoint objects + * @syncsize: Size of each entry in @synclist + * @numsyncs: Number of entries in @synclist + * @context_id: ID of the context submtting the aux command + * @timestamp: Timestamp for the command submission + * + * Describe a GPU auxiliary command. Auxiliary commands are tasks that are not + * performed on hardware but can be queued like normal GPU commands. Like GPU + * commands AUX commands are assigned a timestamp and processed in order in the + * queue. They can also have standard sync objects attached. The only + * difference is that AUX commands usually perform some sort of administrative + * task in the CPU and are retired in the dispatcher. + * + * For bind operations flags must have one of the KGSL_GPU_AUX_COMMAND_* flags + * set. If sync objects are attached KGSL_GPU_AUX_COMMAND_SYNC must be set. + * @cmdlist points to an array of &struct kgsl_gpu_aux_command_generic structs + * which in turn will have a pointer to a specific command type. + * @numcmds is the number of commands in the list and @cmdsize is the size + * of each entity in @cmdlist. + * + * If KGSL_GPU_AUX_COMMAND_SYNC is specified @synclist will point to an array of + * &struct kgsl_command_syncpoint items in the same fashion as a GPU hardware + * command. @numsyncs and @syncsize describe the list. + * + * @context_id is the context that is submitting the command and @timestamp + * contains the timestamp for the operation. + */ +struct kgsl_gpu_aux_command { + __u64 flags; + __u64 cmdlist; + __u32 cmdsize; + __u32 numcmds; + __u64 synclist; + __u32 syncsize; + __u32 numsyncs; + __u32 context_id; + __u32 timestamp; +}; + +#define IOCTL_KGSL_GPU_AUX_COMMAND \ + _IOWR(KGSL_IOC_TYPE, 0x57, struct kgsl_gpu_aux_command) + +/** + * struct kgsl_timeline_create - Argument for IOCTL_KGSL_TIMELINE_CREATE + * @seqno: Initial sequence number for the timeline + * @id: Timeline identifier [out] + * + * Create a new semaphore timeline and return the identifier in @id. + * The identifier is global for the device and can be used to + * identify the timeline in all subsequent commands. + */ +struct kgsl_timeline_create { + __u64 seqno; + __u32 id; +/* private: padding for 64 bit compatibility */ + __u32 padding; +}; + +#define IOCTL_KGSL_TIMELINE_CREATE \ + _IOWR(KGSL_IOC_TYPE, 0x58, struct kgsl_timeline_create) + +/** + * struct kgsl_timeline_val - A container to store a timeline/sequence number + * pair. + * @seqno: Sequence number to signal/query + * @timeline: The timeline identifier to signal/query + * + * A container to store a timeline/seqno pair used by the query and signal + * ioctls. + */ +struct kgsl_timeline_val { + __u64 seqno; + __u32 timeline; +/* private: padding for 64 bit compatibility */ + __u32 padding; +}; + +#define KGSL_TIMELINE_WAIT_ALL 1 +#define KGSL_TIMELINE_WAIT_ANY 2 + +/** + * struct kgsl_timeline_wait - Argument for IOCTL_KGSL_TIMELINE_WAIT + * @tv_sec: Number of seconds to wait for the signal + * @tv_nsec: Number of nanoseconds to wait for the signal + * @timelines: Address of an array of &struct kgsl_timeline_val entries + * @count: Number of entries in @timeline + * @timelines_size: Size of each entry in @timelines + * @flags: One of KGSL_TIMELINE_WAIT_ALL or KGSL_TIMELINE_WAIT_ANY + * + * Wait for the timelines listed in @timelines to be signaled. If @flags is + * equal to KGSL_TIMELINE_WAIT_ALL then wait for all timelines or if + * KGSL_TIMELINE_WAIT_ANY is specified then wait for any of the timelines to + * signal. @tv_sec and @tv_nsec indicates the number of seconds and nanoseconds + * that the process should be blocked waiting for the signal. + */ +struct kgsl_timeline_wait { + __s64 tv_sec; + __s64 tv_nsec; + __u64 timelines; + __u32 count; + __u32 timelines_size; + __u32 flags; +/* private: padding for 64 bit compatibility */ + __u32 padding; +}; + +#define IOCTL_KGSL_TIMELINE_WAIT \ + _IOW(KGSL_IOC_TYPE, 0x59, struct kgsl_timeline_wait) + +#define IOCTL_KGSL_TIMELINE_QUERY \ + _IOWR(KGSL_IOC_TYPE, 0x5A, struct kgsl_timeline_val) + +/** + * struct kgsl_timeline_signal - argument for IOCTL_KGSL_TIMELINE_SIGNAL + * @timelines: Address of an array of &struct kgsl_timeline_val entries + * @count: Number of entries in @timelines + * @timelines_size: Size of each entry in @timelines + * + * Signal an array of timelines of type @struct kgsl_timeline_val. + */ +struct kgsl_timeline_signal { + __u64 timelines; + __u32 count; + __u32 timelines_size; +}; + +#define IOCTL_KGSL_TIMELINE_SIGNAL \ + _IOW(KGSL_IOC_TYPE, 0x5B, struct kgsl_timeline_signal) + +/** + * struct kgsl_timeline_fence_get - argument for IOCTL_KGSL_TIMELINE_FENCE_GET + * @seqno: Sequence number for the fence + * @timeline: Timeline to create the fence on + * @handle: Contains the fence fd for a successful operation [out] + * + * Create a sync file descriptor for the seqnum on the timeline and return it in + * @handle. Can be polled and queried just like any other sync file descriptor + */ +struct kgsl_timeline_fence_get { + __u64 seqno; + __u32 timeline; + int handle; +}; + +#define IOCTL_KGSL_TIMELINE_FENCE_GET \ + _IOWR(KGSL_IOC_TYPE, 0x5C, struct kgsl_timeline_fence_get) +/** + * IOCTL_KGSL_TIMELINE_DESTROY takes a u32 identifier for the timeline to + * destroy + */ +#define IOCTL_KGSL_TIMELINE_DESTROY _IOW(KGSL_IOC_TYPE, 0x5D, __u32) + +/** + * struct kgsl_gpu_aux_command_timeline - An aux command for timeline signals + * @timelines: An array of &struct kgsl_timeline_val elements + * @count: The number of entries in @timelines + * @timelines_size: The size of each element in @timelines + * + * An aux command for timeline signals that can be pointed to by + * &struct kgsl_aux_command_generic when the type is + * KGSL_GPU_AUX_COMMAND_TIMELINE. + */ +struct kgsl_gpu_aux_command_timeline { + __u64 timelines; + __u32 count; + __u32 timelines_size; +}; + #endif /* _UAPI_MSM_KGSL_H */ diff --git a/include/uapi/linux/qbg-profile.h b/include/uapi/linux/qbg-profile.h new file mode 100644 index 000000000000..a30f4dbba9c6 --- /dev/null +++ b/include/uapi/linux/qbg-profile.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __QBG_PROFILE_H__ +#define __QBG_PROFILE_H__ + +#define MAX_BP_LUT_ROWS 35 +#define MAX_BP_LUT_COLS 8 +#define MAX_PROFILE_NAME_LENGTH 256 + +enum profile_table_type { + CHARGE_TABLE = 0, + DISCHARGE_TABLE, +}; + +struct battery_data_table { + unsigned short int table[MAX_BP_LUT_ROWS][MAX_BP_LUT_COLS]; + int unit_conv_factor[MAX_BP_LUT_COLS]; + unsigned short int nrows; + unsigned short int ncols; +}; + +struct battery_config { + char bp_profile_name[MAX_PROFILE_NAME_LENGTH]; + int bp_batt_id; + int capacity; + int bp_checksum; + int soh_range_high; + int soh_range_low; + int normal_impedance; + int aged_impedance; + int normal_capacity; + int aged_capacity; + int recharge_soc_delta; + int recharge_vflt_delta; + int recharge_iterm; +}; + +struct battery_profile_table { + enum profile_table_type table_type; + int table_index; + struct battery_data_table *table; +}; + +/* IOCTLs to query battery profile data */ +/* Battery configuration */ +#define BPIOCXBP \ + _IOWR('B', 0x01, struct battery_config) +/* Battery profile table */ +#define BPIOCXBPTABLE \ + _IOWR('B', 0x02, struct battery_profile_table) + +#endif diff --git a/include/uapi/linux/qbg.h b/include/uapi/linux/qbg.h new file mode 100644 index 000000000000..d8e335be2f54 --- /dev/null +++ b/include/uapi/linux/qbg.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __QBG_H__ +#define __QBG_H__ + +#define MAX_FIFO_COUNT 36 +#define QBG_MAX_STEP_CHG_ENTRIES 6 + +enum QBG_STATE { + QBG_LPM, + QBG_MPM, + QBG_HPM, + QBG_FAST_CHAR, + QBG_PON_OCV, + QBG_STATE_MAX, +}; + +enum QBG_SDAM_DATA_OFFSET { + QBG_ACC0_OFFSET = 0, + QBG_ACC1_OFFSET = 2, + QBG_ACC2_OFFSET = 4, + QBG_TBAT_OFFSET = 6, + QBG_IBAT_OFFSET = 8, + QBG_VREF_OFFSET = 10, + QBG_DATA_TAG_OFFSET = 12, + QBG_QG_STS_OFFSET, + QBG_STS1_OFFSET, + QBG_STS2_OFFSET, + QBG_STS3_OFFSET, + QBG_ONE_FIFO_LENGTH, +}; + +enum qbg { + QBG_PARAM_SOC, + QBG_PARAM_BATT_SOC, + QBG_PARAM_SYS_SOC, + QBG_PARAM_ESR, + QBG_PARAM_OCV_UV, + QBG_PARAM_MAX_LOAD_NOW, + QBG_PARAM_MAX_LOAD_AVG, + QBG_PARAM_HOLD_SOC_100PCT, + QBG_PARAM_CHARGE_CYCLE_COUNT, + QBG_PARAM_LEARNED_CAPACITY, + QBG_PARAM_TTF_100MS, + QBG_PARAM_TTE_100MS, + QBG_PARAM_SOH, + QBG_PARAM_TBAT, + QBG_PARAM_SYS_SOC_HOLD_100PCT, + QBG_PARAM_JEITA_COOL_THRESHOLD, + QBG_PARAM_TOTAL_IMPEDANCE, + QBG_PARAM_ESSENTIAL_PARAM_REVID, + QBG_PARAM_FIFO_TIMESTAMP, + QBG_PARAM_MAX, +}; + +struct qbg_essential_params { + short int msoc; + short int cutoff_soc; + short int full_soc; + short int x0; + short int x1; + short int x2; + short int soh_r; + short int soh_c; + short int theta0; + short int theta1; + short int theta2; + short int i1full; + short int i2full; + short int i1cutoff; + short int i2cutoff; + short int syssoc; + int discharge_cycle_count; + int charge_cycle_count; + unsigned int rtc_time; + short int batt_therm; + unsigned short int ocv; +} __attribute__ ((__packed__)); + +struct fifo_data { + unsigned short int v1; + unsigned short int v2; + unsigned short int i; + unsigned short int tbat; + unsigned short int ibat; + unsigned short int vref; + char data_tag; + char qg_sts; + char sts1; + char sts2; + char sts3; +} __attribute__ ((__packed__)); + +struct k_fifo_data { + unsigned int v1; + unsigned int v2; + unsigned int i; + unsigned int tbat; + unsigned int ibat; + unsigned int vref; + unsigned int data_tag; + unsigned int qg_sts; + unsigned int sts1; + unsigned int sts2; + unsigned int sts3; +} __attribute__ ((__packed__)); + +struct qbg_config { + unsigned int batt_id; + unsigned int pon_ocv; + unsigned int pon_ibat; + unsigned int pon_tbat; + unsigned int pon_soc; + unsigned int float_volt_uv; + unsigned int fastchg_curr_ma; + unsigned int vbat_cutoff_mv; + unsigned int ibat_cutoff_ma; + unsigned int vph_min_mv; + unsigned int iterm_ma; + unsigned int rconn_mohm; + unsigned long current_time; + unsigned int sdam_batt_id; + unsigned int essential_param_revid; + unsigned long sample_time_us[QBG_STATE_MAX]; +} __attribute__ ((__packed__)); + +struct qbg_param { + unsigned int data; + _Bool valid; +}; + +struct qbg_kernel_data { + unsigned int seq_no; + unsigned int fifo_time; + unsigned int fifo_count; + struct k_fifo_data fifo[MAX_FIFO_COUNT]; + struct qbg_param param[QBG_PARAM_MAX]; +} __attribute__ ((__packed__)); + +struct qbg_user_data { + struct qbg_param param[QBG_PARAM_MAX]; +} __attribute__ ((__packed__)); + +struct range_data { + int low_threshold; + int high_threshold; + unsigned int value; +} __attribute__ ((__packed__)); + +struct ranges { + struct range_data data[QBG_MAX_STEP_CHG_ENTRIES]; + unsigned char range_count; + _Bool valid; +} __attribute__((__packed__)); + +struct qbg_step_chg_jeita_params { + int jeita_full_fv_10nv; + int jeita_full_iterm_10na; + int jeita_warm_adc_value; + int jeita_cool_adc_value; + int battery_beta; + int battery_therm_kohm; + struct ranges step_fcc_cfg; + struct ranges jeita_fcc_cfg; + struct ranges jeita_fv_cfg; + unsigned char ttf_calc_mode; +} __attribute__ ((__packed__)); + +/* IOCTLs to read & write QBG config and essential params */ +#define QBGIOCXCFG _IOR('B', 0x01, struct qbg_config) +#define QBGIOCXEPR _IOR('B', 0x02, struct qbg_essential_params) +#define QBGIOCXEPW _IOWR('B', 0x03, struct qbg_essential_params) +#define QBGIOCXSTEPCHGCFG \ + _IOWR('B', 0x04, struct qbg_step_chg_jeita_params) + +#endif diff --git a/include/uapi/linux/slatecom_interface.h b/include/uapi/linux/slatecom_interface.h new file mode 100644 index 000000000000..1caed8c42bf6 --- /dev/null +++ b/include/uapi/linux/slatecom_interface.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ +#ifndef LINUX_SLATECOM_INTERFACE_H +#define LINUX_SLATECOM_INTERFACE_H + +#include + +#define SLATECOM_REG_READ 0 +#define SLATECOM_AHB_READ 1 +#define SLATECOM_AHB_WRITE 2 +#define SLATECOM_SET_SPI_FREE 3 +#define SLATECOM_SET_SPI_BUSY 4 +#define SLATECOM_REG_WRITE 5 +#define SLATECOM_SOFT_RESET 6 +#define SLATECOM_MODEM_DOWN2_SLATE 7 +#define SLATECOM_TWM_EXIT 8 +#define SLATECOM_SLATE_APP_RUNNING 9 +#define SLATECOM_ADSP_DOWN2_SLATE 10 +#define SLATECOM_SLATE_WEAR_LOAD 11 +#define SLATECOM_SLATE_WEAR_UNLOAD 12 +#define EXCHANGE_CODE 'V' + +struct slate_ui_data { + __u64 __user write; + __u64 __user result; + __u32 slate_address; + __u32 cmd; + __u32 num_of_words; + __u8 __user *buffer; +}; + +enum slate_event_type { + SLATE_BEFORE_POWER_DOWN = 1, + SLATE_AFTER_POWER_DOWN, + SLATE_BEFORE_POWER_UP, + SLATE_AFTER_POWER_UP, + MODEM_BEFORE_POWER_DOWN, + MODEM_AFTER_POWER_UP, + ADSP_BEFORE_POWER_DOWN, + ADSP_AFTER_POWER_UP, + TWM_SLATE_AFTER_POWER_UP, + SLATE_DSP_ERROR, + SLATE_DSP_READY, + SLATE_BT_ERROR, + SLATE_BT_READY, +}; + +#define SLATE_AFTER_POWER_UP SLATE_AFTER_POWER_UP + +#define REG_READ \ + _IOWR(EXCHANGE_CODE, SLATECOM_REG_READ, \ + struct slate_ui_data) +#define AHB_READ \ + _IOWR(EXCHANGE_CODE, SLATECOM_AHB_READ, \ + struct slate_ui_data) +#define AHB_WRITE \ + _IOW(EXCHANGE_CODE, SLATECOM_AHB_WRITE, \ + struct slate_ui_data) +#define SET_SPI_FREE \ + _IOR(EXCHANGE_CODE, SLATECOM_SET_SPI_FREE, \ + struct slate_ui_data) +#define SET_SPI_BUSY \ + _IOR(EXCHANGE_CODE, SLATECOM_SET_SPI_BUSY, \ + struct slate_ui_data) +#define REG_WRITE \ + _IOWR(EXCHANGE_CODE, SLATECOM_REG_WRITE, \ + struct slate_ui_data) +#define SLATE_SOFT_RESET \ + _IOWR(EXCHANGE_CODE, SLATECOM_SOFT_RESET, \ + struct slate_ui_data) +#define SLATE_TWM_EXIT \ + _IOWR(EXCHANGE_CODE, SLATECOM_TWM_EXIT, \ + struct slate_ui_data) +#define SLATE_APP_RUNNING \ + _IOWR(EXCHANGE_CODE, SLATECOM_SLATE_APP_RUNNING, \ + struct slate_ui_data) +#define SLATE_MODEM_DOWN2_SLATE_DONE \ + _IOWR(EXCHANGE_CODE, SLATECOM_MODEM_DOWN2_SLATE, \ + struct slate_ui_data) +#define SLATE_WEAR_LOAD \ + _IOWR(EXCHANGE_CODE, SLATECOM_SLATE_WEAR_LOAD, \ + struct slate_ui_data) +#define SLATE_WEAR_UNLOAD \ + _IOWR(EXCHANGE_CODE, SLATECOM_SLATE_WEAR_UNLOAD, \ + struct slate_ui_data) +#define SLATE_ADSP_DOWN2_SLATE_DONE \ + _IOWR(EXCHANGE_CODE, SLATECOM_ADSP_DOWN2_SLATE, \ + struct slate_ui_data) + +#endif /* LINUX_SLATECOM_INTERFACE_H */ + diff --git a/include/uapi/linux/vm_bms.h b/include/uapi/linux/vm_bms.h new file mode 100644 index 000000000000..db537f478f0c --- /dev/null +++ b/include/uapi/linux/vm_bms.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +#ifndef __VM_BMS_H__ +#define __VM_BMS_H__ + +#define VM_BMS_DEVICE "/dev/vm_bms" +#define MAX_FIFO_REGS 8 + +/** + * struct qpnp_vm_bms_data - vm-bms data (passed to usersapce) + * @data_type: type of data filled up + * @num_fifo: count of valid fifo averages + * @fifo_uv: array of fifo averages in uv + * @sample_interval sample interval of the fifo data in ms + * @sample_count total samples in one fifo + * @acc_uv averaged accumulator value in uv + * @acc_count num of accumulated samples + * @seq_num sequence number of the data + */ +struct qpnp_vm_bms_data { + unsigned int num_fifo; + unsigned int fifo_uv[MAX_FIFO_REGS]; + unsigned int sample_interval_ms; + unsigned int sample_count; + unsigned int acc_uv; + unsigned int acc_count; + unsigned int seq_num; +}; + +enum vmbms_power_usecase { + VMBMS_IGNORE_ALL_BIT = 1, + VMBMS_VOICE_CALL_BIT = (1 << 4), + VMBMS_STATIC_DISPLAY_BIT = (1 << 5), +}; + +#endif /* __VM_BMS_H__ */ diff --git a/include/uapi/media/msm_media_info.h b/include/uapi/media/msm_media_info.h index 9359b0cc6898..734fee1b75f4 100644 --- a/include/uapi/media/msm_media_info.h +++ b/include/uapi/media/msm_media_info.h @@ -70,6 +70,41 @@ enum color_fmts { * + UV_Stride * UV_Scanlines, 4096) */ COLOR_FMT_NV12, + /* Venus NV12: + * YUV 4:2:0 image with a plane of 8 bit Y samples followed + * by an interleaved U/V plane containing 8 bit 2x2 subsampled + * colour difference samples. + * + * <-------- Y/UV_Stride --------> + * <------- Width -------> + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * U V U V U V U V U V U V . . . . ^ + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . --> Buffer size alignment + * + * Y_Stride : Width aligned to 128 + * UV_Stride : Width aligned to 128 + * Y_Scanlines: Height aligned to 32 + * UV_Scanlines: Height/2 aligned to 16 + * Total size = align(Y_Stride * Y_Scanlines + * + UV_Stride * UV_Scanlines, 4096) + */ + COLOR_FMT_NV12_128, /* Venus NV21: * YUV 4:2:0 image with a plane of 8 bit Y samples followed * by an interleaved V/U plane containing 8 bit 2x2 subsampled @@ -806,6 +841,7 @@ static inline unsigned int VENUS_Y_STRIDE(unsigned int color_fmt, alignment = 512; stride = MSM_MEDIA_ALIGN(width, alignment); break; + case COLOR_FMT_NV12_128: case COLOR_FMT_NV12_UBWC: alignment = 128; stride = MSM_MEDIA_ALIGN(width, alignment); @@ -852,6 +888,7 @@ static inline unsigned int VENUS_UV_STRIDE(unsigned int color_fmt, alignment = 512; stride = MSM_MEDIA_ALIGN(width, alignment); break; + case COLOR_FMT_NV12_128: case COLOR_FMT_NV12_UBWC: alignment = 128; stride = MSM_MEDIA_ALIGN(width, alignment); @@ -896,6 +933,7 @@ static inline unsigned int VENUS_Y_SCANLINES(unsigned int color_fmt, case COLOR_FMT_NV12_512: alignment = 512; break; + case COLOR_FMT_NV12_128: case COLOR_FMT_NV12_UBWC: case COLOR_FMT_P010: alignment = 32; @@ -935,6 +973,7 @@ static inline unsigned int VENUS_UV_SCANLINES(unsigned int color_fmt, case COLOR_FMT_NV12_512: alignment = 256; break; + case COLOR_FMT_NV12_128: case COLOR_FMT_NV12_BPP10_UBWC: case COLOR_FMT_P010_UBWC: case COLOR_FMT_P010: @@ -1235,6 +1274,7 @@ static inline unsigned int VENUS_BUFFER_SIZE(unsigned int color_fmt, case COLOR_FMT_NV12: case COLOR_FMT_P010: case COLOR_FMT_NV12_512: + case COLOR_FMT_NV12_128: y_plane = y_stride * y_sclines; uv_plane = uv_stride * uv_sclines; size = y_plane + uv_plane; diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index e6bad6b3f604..e1f2e78ad9b8 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -211,6 +211,22 @@ struct cgroup_namespace init_cgroup_ns = { static struct file_system_type cgroup2_fs_type; static struct cftype cgroup_base_files[]; +/* cgroup optional features */ +enum cgroup_opt_features { +#ifdef CONFIG_PSI + OPT_FEATURE_PRESSURE, +#endif + OPT_FEATURE_COUNT +}; + +static const char *cgroup_opt_feature_names[OPT_FEATURE_COUNT] = { +#ifdef CONFIG_PSI + "pressure", +#endif +}; + +static u16 cgroup_feature_disable_mask __read_mostly; + static int cgroup_apply_control(struct cgroup *cgrp); static void cgroup_finalize_control(struct cgroup *cgrp, int ret); static void css_task_iter_skip(struct css_task_iter *it, @@ -3535,6 +3551,18 @@ static void cgroup_pressure_release(struct kernfs_open_file *of) { psi_trigger_replace(&of->priv, NULL); } + +bool cgroup_psi_enabled(void) +{ + return (cgroup_feature_disable_mask & (1 << OPT_FEATURE_PRESSURE)) == 0; +} + +#else /* CONFIG_PSI */ +bool cgroup_psi_enabled(void) +{ + return false; +} + #endif /* CONFIG_PSI */ static int cgroup_freeze_show(struct seq_file *seq, void *v) @@ -3782,6 +3810,8 @@ static int cgroup_addrm_files(struct cgroup_subsys_state *css, restart: for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) { /* does cft->flags tell us to skip this file on @cgrp? */ + if ((cft->flags & CFTYPE_PRESSURE) && !cgroup_psi_enabled()) + continue; if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp)) continue; if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp)) @@ -3858,6 +3888,9 @@ static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) WARN_ON(cft->ss || cft->kf_ops); + if ((cft->flags & CFTYPE_PRESSURE) && !cgroup_psi_enabled()) + continue; + if (cft->seq_start) kf_ops = &cgroup_kf_ops; else @@ -4773,7 +4806,7 @@ static struct cftype cgroup_base_files[] = { #ifdef CONFIG_PSI { .name = "io.pressure", - .flags = CFTYPE_NOT_ON_ROOT, + .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_PRESSURE, .seq_show = cgroup_io_pressure_show, .write = cgroup_io_pressure_write, .poll = cgroup_pressure_poll, @@ -4781,7 +4814,7 @@ static struct cftype cgroup_base_files[] = { }, { .name = "memory.pressure", - .flags = CFTYPE_NOT_ON_ROOT, + .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_PRESSURE, .seq_show = cgroup_memory_pressure_show, .write = cgroup_memory_pressure_write, .poll = cgroup_pressure_poll, @@ -4789,7 +4822,7 @@ static struct cftype cgroup_base_files[] = { }, { .name = "cpu.pressure", - .flags = CFTYPE_NOT_ON_ROOT, + .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_PRESSURE, .seq_show = cgroup_cpu_pressure_show, .write = cgroup_cpu_pressure_write, .poll = cgroup_pressure_poll, @@ -6029,6 +6062,15 @@ static int __init cgroup_disable(char *str) continue; cgroup_disable_mask |= 1 << i; } + + for (i = 0; i < OPT_FEATURE_COUNT; i++) { + if (strcmp(token, cgroup_opt_feature_names[i])) + continue; + cgroup_feature_disable_mask |= 1 << i; + pr_info("Disabling %s control group feature\n", + cgroup_opt_feature_names[i]); + break; + } } return 1; } @@ -6277,6 +6319,9 @@ static ssize_t show_delegatable_files(struct cftype *files, char *buf, if (!(cft->flags & CFTYPE_NS_DELEGATABLE)) continue; + if ((cft->flags & CFTYPE_PRESSURE) && !cgroup_psi_enabled()) + continue; + if (prefix) ret += snprintf(buf + ret, size - ret, "%s.", prefix); diff --git a/kernel/events/core.c b/kernel/events/core.c index 2c6b8409d56e..9b1fbf8a42ed 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -431,13 +431,8 @@ static cpumask_var_t perf_online_mask; * 0 - disallow raw tracepoint access for unpriv * 1 - disallow cpu events for unpriv * 2 - disallow kernel profiling for unpriv - * 3 - disallow all unpriv perf event use */ -#ifdef CONFIG_SECURITY_PERF_EVENTS_RESTRICT -int sysctl_perf_event_paranoid __read_mostly = 3; -#else int sysctl_perf_event_paranoid __read_mostly = 2; -#endif /* Minimum for 512 kiB + 1 user control page */ int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ @@ -10889,9 +10884,6 @@ SYSCALL_DEFINE5(perf_event_open, if (flags & ~PERF_FLAG_ALL) return -EINVAL; - if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN)) - return -EACCES; - /* Do we allow access to perf_event_open(2) ? */ err = security_perf_event_open(&attr, PERF_SECURITY_OPEN); if (err) diff --git a/kernel/futex.c b/kernel/futex.c index 52f641c00a65..d98657fb0da4 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -833,6 +833,29 @@ static struct futex_pi_state *alloc_pi_state(void) return pi_state; } +static void pi_state_update_owner(struct futex_pi_state *pi_state, + struct task_struct *new_owner) +{ + struct task_struct *old_owner = pi_state->owner; + + lockdep_assert_held(&pi_state->pi_mutex.wait_lock); + + if (old_owner) { + raw_spin_lock(&old_owner->pi_lock); + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); + raw_spin_unlock(&old_owner->pi_lock); + } + + if (new_owner) { + raw_spin_lock(&new_owner->pi_lock); + WARN_ON(!list_empty(&pi_state->list)); + list_add(&pi_state->list, &new_owner->pi_state_list); + pi_state->owner = new_owner; + raw_spin_unlock(&new_owner->pi_lock); + } +} + static void get_pi_state(struct futex_pi_state *pi_state) { WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount)); @@ -1009,7 +1032,8 @@ void exit_pi_state_list(struct task_struct *curr) * FUTEX_OWNER_DIED bit. See [4] * * [10] There is no transient state which leaves owner and user space - * TID out of sync. + * TID out of sync. Except one error case where the kernel is denied + * write access to the user address, see fixup_pi_state_owner(). * * * Serialization and lifetime rules: @@ -1536,26 +1560,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ ret = -EINVAL; } - if (ret) - goto out_unlock; - - /* - * This is a point of no return; once we modify the uval there is no - * going back and subsequent operations must not fail. - */ - - raw_spin_lock(&pi_state->owner->pi_lock); - WARN_ON(list_empty(&pi_state->list)); - list_del_init(&pi_state->list); - raw_spin_unlock(&pi_state->owner->pi_lock); - - raw_spin_lock(&new_owner->pi_lock); - WARN_ON(!list_empty(&pi_state->list)); - list_add(&pi_state->list, &new_owner->pi_state_list); - pi_state->owner = new_owner; - raw_spin_unlock(&new_owner->pi_lock); - - postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); + if (!ret) { + /* + * This is a point of no return; once we modified the uval + * there is no going back and subsequent operations must + * not fail. + */ + pi_state_update_owner(pi_state, new_owner); + postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); + } out_unlock: raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); @@ -2361,18 +2374,13 @@ static void unqueue_me_pi(struct futex_q *q) spin_unlock(q->lock_ptr); } -static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, - struct task_struct *argowner) +static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, + struct task_struct *argowner) { + u32 uval, uninitialized_var(curval), newval, newtid; struct futex_pi_state *pi_state = q->pi_state; - u32 uval, uninitialized_var(curval), newval; struct task_struct *oldowner, *newowner; - u32 newtid; - int ret, err = 0; - - lockdep_assert_held(q->lock_ptr); - - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + int err = 0; oldowner = pi_state->owner; @@ -2406,14 +2414,12 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, * We raced against a concurrent self; things are * already fixed up. Nothing to do. */ - ret = 0; - goto out_unlock; + return 0; } if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) { - /* We got the lock after all, nothing to fix. */ - ret = 0; - goto out_unlock; + /* We got the lock. pi_state is correct. Tell caller. */ + return 1; } /* @@ -2440,8 +2446,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, * We raced against a concurrent self; things are * already fixed up. Nothing to do. */ - ret = 0; - goto out_unlock; + return 1; } newowner = argowner; } @@ -2471,22 +2476,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, * We fixed up user space. Now we need to fix the pi_state * itself. */ - if (pi_state->owner != NULL) { - raw_spin_lock(&pi_state->owner->pi_lock); - WARN_ON(list_empty(&pi_state->list)); - list_del_init(&pi_state->list); - raw_spin_unlock(&pi_state->owner->pi_lock); - } + pi_state_update_owner(pi_state, newowner); - pi_state->owner = newowner; - - raw_spin_lock(&newowner->pi_lock); - WARN_ON(!list_empty(&pi_state->list)); - list_add(&pi_state->list, &newowner->pi_state_list); - raw_spin_unlock(&newowner->pi_lock); - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); - - return 0; + return argowner == current; /* * In order to reschedule or handle a page fault, we need to drop the @@ -2507,17 +2499,16 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, switch (err) { case -EFAULT: - ret = fault_in_user_writeable(uaddr); + err = fault_in_user_writeable(uaddr); break; case -EAGAIN: cond_resched(); - ret = 0; + err = 0; break; default: WARN_ON_ONCE(1); - ret = err; break; } @@ -2527,17 +2518,44 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, /* * Check if someone else fixed it for us: */ - if (pi_state->owner != oldowner) { - ret = 0; - goto out_unlock; - } + if (pi_state->owner != oldowner) + return argowner == current; - if (ret) - goto out_unlock; + /* Retry if err was -EAGAIN or the fault in succeeded */ + if (!err) + goto retry; - goto retry; + /* + * fault_in_user_writeable() failed so user state is immutable. At + * best we can make the kernel state consistent but user state will + * be most likely hosed and any subsequent unlock operation will be + * rejected due to PI futex rule [10]. + * + * Ensure that the rtmutex owner is also the pi_state owner despite + * the user space value claiming something different. There is no + * point in unlocking the rtmutex if current is the owner as it + * would need to wait until the next waiter has taken the rtmutex + * to guarantee consistent state. Keep it simple. Userspace asked + * for this wreckaged state. + * + * The rtmutex has an owner - either current or some other + * task. See the EAGAIN loop above. + */ + pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex)); -out_unlock: + return err; +} + +static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, + struct task_struct *argowner) +{ + struct futex_pi_state *pi_state = q->pi_state; + int ret; + + lockdep_assert_held(q->lock_ptr); + + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + ret = __fixup_pi_state_owner(uaddr, q, argowner); raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); return ret; } @@ -2561,8 +2579,6 @@ static long futex_wait_restart(struct restart_block *restart); */ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) { - int ret = 0; - if (locked) { /* * Got the lock. We might not be the anticipated owner if we @@ -2573,8 +2589,8 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) * stable state, anything else needs more attention. */ if (q->pi_state->owner != current) - ret = fixup_pi_state_owner(uaddr, q, current); - goto out; + return fixup_pi_state_owner(uaddr, q, current); + return 1; } /* @@ -2585,24 +2601,17 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) * Another speculative read; pi_state->owner == current is unstable * but needs our attention. */ - if (q->pi_state->owner == current) { - ret = fixup_pi_state_owner(uaddr, q, NULL); - goto out; - } + if (q->pi_state->owner == current) + return fixup_pi_state_owner(uaddr, q, NULL); /* * Paranoia check. If we did not take the lock, then we should not be - * the owner of the rt_mutex. + * the owner of the rt_mutex. Warn and establish consistent state. */ - if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) { - printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " - "pi-state %p\n", ret, - q->pi_state->pi_mutex.owner, - q->pi_state->owner); - } + if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current)) + return fixup_pi_state_owner(uaddr, q, current); -out: - return ret ? ret : locked; + return 0; } /** @@ -2823,7 +2832,6 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int trylock) { struct hrtimer_sleeper timeout, *to = NULL; - struct futex_pi_state *pi_state = NULL; struct rt_mutex_waiter rt_waiter; struct futex_hash_bucket *hb; struct futex_q q = futex_q_init; @@ -2957,23 +2965,9 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, if (res) ret = (res < 0) ? res : 0; - /* - * If fixup_owner() faulted and was unable to handle the fault, unlock - * it and return the fault to userspace. - */ - if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) { - pi_state = q.pi_state; - get_pi_state(pi_state); - } - /* Unqueue and drop the lock */ unqueue_me_pi(&q); - if (pi_state) { - rt_mutex_futex_unlock(&pi_state->pi_mutex); - put_pi_state(pi_state); - } - goto out_put_key; out_unlock_put_key: @@ -3239,7 +3233,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, u32 __user *uaddr2) { struct hrtimer_sleeper timeout, *to = NULL; - struct futex_pi_state *pi_state = NULL; struct rt_mutex_waiter rt_waiter; struct futex_hash_bucket *hb; union futex_key key2 = FUTEX_KEY_INIT; @@ -3324,16 +3317,17 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, if (q.pi_state && (q.pi_state->owner != current)) { spin_lock(q.lock_ptr); ret = fixup_pi_state_owner(uaddr2, &q, current); - if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { - pi_state = q.pi_state; - get_pi_state(pi_state); - } /* * Drop the reference to the pi state which * the requeue_pi() code acquired for us. */ put_pi_state(q.pi_state); spin_unlock(q.lock_ptr); + /* + * Adjust the return value. It's either -EFAULT or + * success (1) but the caller expects 0 for success. + */ + ret = ret < 0 ? ret : 0; } } else { struct rt_mutex *pi_mutex; @@ -3364,25 +3358,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, if (res) ret = (res < 0) ? res : 0; - /* - * If fixup_pi_state_owner() faulted and was unable to handle - * the fault, unlock the rt_mutex and return the fault to - * userspace. - */ - if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { - pi_state = q.pi_state; - get_pi_state(pi_state); - } - /* Unqueue and drop the lock. */ unqueue_me_pi(&q); } - if (pi_state) { - rt_mutex_futex_unlock(&pi_state->pi_mutex); - put_pi_state(pi_state); - } - if (ret == -EINTR) { /* * We've already been requeued, but cannot restart by calling diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index 7370d3153450..f49fb7fa7eef 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -149,6 +149,7 @@ static int psi_bug __read_mostly; DEFINE_STATIC_KEY_FALSE(psi_disabled); +DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled); #ifdef CONFIG_PSI_DEFAULT_DISABLED static bool psi_enable; @@ -213,6 +214,9 @@ void __init psi_init(void) return; } + if (!cgroup_psi_enabled()) + static_branch_disable(&psi_cgroups_enabled); + psi_period = jiffies_to_nsecs(PSI_FREQ); group_init(&psi_system); } @@ -836,23 +840,23 @@ static u32 psi_group_change(struct psi_group *group, int cpu, static struct psi_group *iterate_groups(struct task_struct *task, void **iter) { + if (*iter == &psi_system) + return NULL; + #ifdef CONFIG_CGROUPS - struct cgroup *cgroup = NULL; + if (static_branch_likely(&psi_cgroups_enabled)) { + struct cgroup *cgroup = NULL; - if (!*iter) - cgroup = task->cgroups->dfl_cgrp; - else if (*iter == &psi_system) - return NULL; - else - cgroup = cgroup_parent(*iter); + if (!*iter) + cgroup = task->cgroups->dfl_cgrp; + else + cgroup = cgroup_parent(*iter); - if (cgroup && cgroup_parent(cgroup)) { - *iter = cgroup; - return cgroup_psi(cgroup); + if (cgroup && cgroup_parent(cgroup)) { + *iter = cgroup; + return cgroup_psi(cgroup); + } } -#else - if (*iter) - return NULL; #endif *iter = &psi_system; return &psi_system; diff --git a/kernel/signal.c b/kernel/signal.c index c82ead187fff..c3c08a9ec74a 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1924,8 +1924,12 @@ bool do_notify_parent(struct task_struct *tsk, int sig) if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) sig = 0; } + /* + * Send with __send_signal as si_pid and si_uid are in the + * parent's namespaces. + */ if (valid_signal(sig) && sig) - __group_send_sig_info(sig, &info, tsk->parent); + __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false); __wake_up_parent(tsk, tsk->parent); spin_unlock_irqrestore(&psig->siglock, flags); diff --git a/kernel_headers.py b/kernel_headers.py index 4c2096e6b912..c21e31e3c321 100644 --- a/kernel_headers.py +++ b/kernel_headers.py @@ -310,7 +310,7 @@ def gen_arch_headers( return error_count -def run_headers_install(verbose, gen_dir, headers_install, prefix, h): +def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h): """Process a header through the headers_install script. The headers_install script does some processing of a header so that it is @@ -325,6 +325,7 @@ def run_headers_install(verbose, gen_dir, headers_install, prefix, h): verbose: Set True to print progress messages. gen_dir: Where to place the generated files. headers_install: The script that munges the header. + unifdef: The unifdef tool used by headers_install. prefix: The prefix to strip from h to generate the output filename. h: The input header to process. Return: @@ -344,7 +345,9 @@ def run_headers_install(verbose, gen_dir, headers_install, prefix, h): if verbose: print('run_headers_install: cmd is %s' % cmd) - result = subprocess.call(cmd) + env = os.environ.copy() + env["LOC_UNIFDEF"] = unifdef + result = subprocess.call(['sh', headers_install, out_h_dirname, h_dirname, out_h_basename], env=env) if result != 0: print('error: run_headers_install: cmd %s failed %d' % (cmd, result)) @@ -511,6 +514,7 @@ def gen_blueprints( # Tools and tool files. headers_install_sh = 'headers_install.sh' + unifdef = 'unifdef' kernel_headers_py = 'kernel_headers.py' arm_syscall_tool = 'arch/arm/tools/syscallhdr.sh' @@ -658,7 +662,10 @@ def gen_blueprints( f.write('genrule {\n') f.write(' name: "qti_generate_kernel_headers_%s",\n' % header_arch) - f.write(' tools: ["%s"],\n' % headers_install_sh) + f.write(' tools: [\n') + f.write(' "%s",\n' % headers_install_sh) + f.write(' "%s",\n' % unifdef) + f.write(' ],\n') f.write(' tool_files: [\n') f.write(' "%s",\n' % kernel_headers_py) @@ -692,6 +699,7 @@ def gen_blueprints( f.write(' "--arch_syscall_tbl $(location %s) " +\n' % arm_syscall_tbl) f.write(' "--headers_install $(location %s) " +\n' % headers_install_sh) + f.write(' "--unifdef $(location %s) " +\n' % unifdef) f.write(' "--include_uapi $(locations %s)",\n' % generic_src) f.write(' out: ["linux/version.h"] + gen_headers_out_%s,\n' % header_arch) f.write('}\n') @@ -746,7 +754,7 @@ def headers_diff(old_file, new_file): def gen_headers( verbose, header_arch, gen_dir, arch_asm_kbuild, asm_generic_kbuild, module_dir, old_gen_headers_bp, new_gen_headers_bp, version_makefile, - arch_syscall_tool, arch_syscall_tbl, headers_install, include_uapi, + arch_syscall_tool, arch_syscall_tbl, headers_install, unifdef, include_uapi, arch_include_uapi, techpack_include_uapi): """Generate the kernel headers. @@ -768,6 +776,7 @@ def gen_headers( arch_syscall_tool: The arch script that generates syscall headers. arch_syscall_tbl: The arch script that defines syscall vectors. headers_install: The headers_install tool to process input headers. + unifdef: The unifdef tool used by headers_install. include_uapi: The list of include/uapi header files. arch_include_uapi: The list of arch//include/uapi header files. Return: @@ -795,20 +804,20 @@ def gen_headers( for h in include_uapi: if not run_headers_install( - verbose, gen_dir, headers_install, + verbose, gen_dir, headers_install, unifdef, uapi_include_prefix, h): error_count += 1 for h in arch_include_uapi: if not run_headers_install( - verbose, gen_dir, headers_install, + verbose, gen_dir, headers_install, unifdef, arch_uapi_include_prefix, h): error_count += 1 for h in techpack_include_uapi: techpack_uapi_include_prefix = os.path.join(h.split('/include/uapi')[0], 'include', 'uapi') + os.sep if not run_headers_install( - verbose, gen_dir, headers_install, + verbose, gen_dir, headers_install, unifdef, techpack_uapi_include_prefix, h): error_count += 1 @@ -935,6 +944,10 @@ def main(): '--headers_install', required=True, help='The headers_install tool to process input headers.') + parser_headers.add_argument( + '--unifdef', + required=True, + help='The unifdef tool used by headers_install.') parser_headers.add_argument( '--include_uapi', required=True, @@ -983,12 +996,14 @@ def main(): print('arch_syscall_tool [%s]' % args.arch_syscall_tool) print('arch_syscall_tbl [%s]' % args.arch_syscall_tbl) print('headers_install [%s]' % args.headers_install) + print('unifdef [%s]' % args.unifdef) return gen_headers( args.verbose, args.header_arch, args.gen_dir, args.arch_asm_kbuild, args.asm_generic_kbuild, module_dir, args.old_gen_headers_bp, args.new_gen_headers_bp, args.version_makefile, args.arch_syscall_tool, args.arch_syscall_tbl, - args.headers_install, args.include_uapi, args.arch_include_uapi, args.techpack_include_uapi) + args.headers_install, args.unifdef, args.include_uapi, args.arch_include_uapi, + args.techpack_include_uapi) print('error: unknown mode: %s' % args.mode) return 1 diff --git a/mm/mmap.c b/mm/mmap.c index 9f4e340e5632..b5c3692e7193 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -735,29 +735,9 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, long adjust_next = 0; int remove_next = 0; - /* - * Why using vm_raw_write*() functions here to avoid lockdep's warning ? - * - * Locked is complaining about a theoretical lock dependency, involving - * 3 locks: - * mapping->i_mmap_rwsem --> vma->vm_sequence --> fs_reclaim - * - * Here are the major path leading to this dependency : - * 1. __vma_adjust() mmap_sem -> vm_sequence -> i_mmap_rwsem - * 2. move_vmap() mmap_sem -> vm_sequence -> fs_reclaim - * 3. __alloc_pages_nodemask() fs_reclaim -> i_mmap_rwsem - * 4. unmap_mapping_range() i_mmap_rwsem -> vm_sequence - * - * So there is no way to solve this easily, especially because in - * unmap_mapping_range() the i_mmap_rwsem is grab while the impacted - * VMAs are not yet known. - * However, the way the vm_seq is used is guarantying that we will - * never block on it since we just check for its value and never wait - * for it to move, see vma_has_changed() and handle_speculative_fault(). - */ - vm_raw_write_begin(vma); + vm_write_begin(vma); if (next) - vm_raw_write_begin(next); + vm_write_begin(next); if (next && !insert) { struct vm_area_struct *exporter = NULL, *importer = NULL; @@ -841,8 +821,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, error = anon_vma_clone(importer, exporter); if (error) { if (next && next != vma) - vm_raw_write_end(next); - vm_raw_write_end(vma); + vm_write_end(next); + vm_write_end(vma); return error; } } @@ -971,7 +951,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, if (next->anon_vma) anon_vma_merge(vma, next); mm->map_count--; - vm_raw_write_end(next); + vm_write_end(next); put_vma(next); /* * In mprotect's case 6 (see comments on vma_merge), @@ -987,7 +967,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, */ next = vma->vm_next; if (next) - vm_raw_write_begin(next); + vm_write_begin(next); } else { /* * For the scope of the comment "next" and @@ -1035,9 +1015,9 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, uprobe_mmap(insert); if (next && next != vma) - vm_raw_write_end(next); + vm_write_end(next); if (!keep_locked) - vm_raw_write_end(vma); + vm_write_end(vma); validate_mm(mm); @@ -3348,7 +3328,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, * that we protect it right now, and let the caller unprotect * it once the move is done. */ - vm_raw_write_begin(new_vma); + vm_write_begin(new_vma); vma_link(mm, new_vma, prev, rb_link, rb_parent); *need_rmap_locks = false; } diff --git a/mm/mremap.c b/mm/mremap.c index f1247602c12a..90dbf98c2404 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -300,7 +300,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, * to be mapped in our back while we are copying the PTEs. */ if (vma != new_vma) - vm_raw_write_begin(vma); + vm_write_begin(vma); moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, need_rmap_locks); @@ -319,7 +319,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, true); if (vma != new_vma) - vm_raw_write_end(vma); + vm_write_end(vma); vma = new_vma; old_len = new_len; old_addr = new_addr; @@ -329,9 +329,9 @@ static unsigned long move_vma(struct vm_area_struct *vma, arch_remap(mm, old_addr, old_addr + old_len, new_addr, new_addr + new_len); if (vma != new_vma) - vm_raw_write_end(vma); + vm_write_end(vma); } - vm_raw_write_end(new_vma); + vm_write_end(new_vma); /* Conceal VM_ACCOUNT so old reservation is not undone */ if (vm_flags & VM_ACCOUNT) { diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 433e35abbcc5..7446b98661d8 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -20,7 +20,6 @@ obj-$(CONFIG_BPFILTER) += bpfilter/ obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o -obj-$(CONFIG_SYSFS) += sysfs_net_ipv4.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o obj-$(CONFIG_IP_MROUTE) += ipmr.o diff --git a/net/ipv4/sysfs_net_ipv4.c b/net/ipv4/sysfs_net_ipv4.c deleted file mode 100644 index 35a651aaee47..000000000000 --- a/net/ipv4/sysfs_net_ipv4.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * net/ipv4/sysfs_net_ipv4.c - * - * sysfs-based networking knobs (so we can, unlike with sysctl, control perms) - * - * Copyright (C) 2008 Google, Inc. - * - * Robert Love - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include - -#define CREATE_IPV4_FILE(_name, _var) \ -static ssize_t _name##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%d\n", _var); \ -} \ -static ssize_t _name##_store(struct kobject *kobj, \ - struct kobj_attribute *attr, \ - const char *buf, size_t count) \ -{ \ - int val, ret; \ - ret = sscanf(buf, "%d", &val); \ - if (ret != 1) \ - return -EINVAL; \ - if (val < 0) \ - return -EINVAL; \ - _var = val; \ - return count; \ -} \ -static struct kobj_attribute _name##_attr = \ - __ATTR(_name, 0644, _name##_show, _name##_store) - -CREATE_IPV4_FILE(tcp_wmem_min, init_net.ipv4.sysctl_tcp_wmem[0]); -CREATE_IPV4_FILE(tcp_wmem_def, init_net.ipv4.sysctl_tcp_wmem[1]); -CREATE_IPV4_FILE(tcp_wmem_max, init_net.ipv4.sysctl_tcp_wmem[2]); - -CREATE_IPV4_FILE(tcp_rmem_min, init_net.ipv4.sysctl_tcp_rmem[0]); -CREATE_IPV4_FILE(tcp_rmem_def, init_net.ipv4.sysctl_tcp_rmem[1]); -CREATE_IPV4_FILE(tcp_rmem_max, init_net.ipv4.sysctl_tcp_rmem[2]); - -static struct attribute *ipv4_attrs[] = { - &tcp_wmem_min_attr.attr, - &tcp_wmem_def_attr.attr, - &tcp_wmem_max_attr.attr, - &tcp_rmem_min_attr.attr, - &tcp_rmem_def_attr.attr, - &tcp_rmem_max_attr.attr, - NULL -}; - -static struct attribute_group ipv4_attr_group = { - .attrs = ipv4_attrs, -}; - -static __init int sysfs_ipv4_init(void) -{ - struct kobject *ipv4_kobject; - int ret; - - ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj); - if (!ipv4_kobject) - return -ENOMEM; - - ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group); - if (ret) { - kobject_put(ipv4_kobject); - return ret; - } - - return 0; -} - -subsys_initcall(sysfs_ipv4_init); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b42e72ced1dc..8246b65af5da 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -580,10 +580,12 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; u32 delta_us; - if (!delta) - delta = 1; - delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); - tcp_rcv_rtt_update(tp, delta_us, 0); + if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) { + if (!delta) + delta = 1; + delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); + tcp_rcv_rtt_update(tp, delta_us, 0); + } } } @@ -2929,9 +2931,11 @@ static bool tcp_ack_update_rtt(struct sock *sk, const int flag, if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && flag & FLAG_ACKED) { u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; - u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); - seq_rtt_us = ca_rtt_us = delta_us; + if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) { + seq_rtt_us = delta * (USEC_PER_SEC / TCP_TS_HZ); + ca_rtt_us = seq_rtt_us; + } } rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */ if (seq_rtt_us < 0) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e2a2e132a207..5347e800c578 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2957,13 +2957,12 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) #endif TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; tp->retrans_out += tcp_skb_pcount(skb); - - /* Save stamp of the first retransmit. */ - if (!tp->retrans_stamp) - tp->retrans_stamp = tcp_skb_timestamp(skb); - } + /* Save stamp of the first (attempted) retransmit. */ + if (!tp->retrans_stamp) + tp->retrans_stamp = tcp_skb_timestamp(skb); + if (tp->undo_retrans < 0) tp->undo_retrans = 0; tp->undo_retrans += tcp_skb_pcount(skb); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index dda4e5361e13..5c16564e4fca 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -22,33 +22,21 @@ #include #include -static u32 tcp_retransmit_stamp(const struct sock *sk) -{ - u32 start_ts = tcp_sk(sk)->retrans_stamp; - - if (unlikely(!start_ts)) { - struct sk_buff *head = tcp_rtx_queue_head(sk); - - if (!head) - return 0; - start_ts = tcp_skb_timestamp(head); - } - return start_ts; -} - static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); u32 elapsed, start_ts; + s32 remaining; - start_ts = tcp_retransmit_stamp(sk); - if (!icsk->icsk_user_timeout || !start_ts) + start_ts = tcp_sk(sk)->retrans_stamp; + if (!icsk->icsk_user_timeout) return icsk->icsk_rto; elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts; - if (elapsed >= icsk->icsk_user_timeout) + remaining = icsk->icsk_user_timeout - elapsed; + if (remaining <= 0) return 1; /* user timeout has passed; fire ASAP */ - else - return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(icsk->icsk_user_timeout - elapsed)); + + return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining)); } static void set_tcp_default(void) @@ -204,7 +192,20 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); } +static unsigned int tcp_model_timeout(struct sock *sk, + unsigned int boundary, + unsigned int rto_base) +{ + unsigned int linear_backoff_thresh, timeout; + linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base); + if (boundary <= linear_backoff_thresh) + timeout = ((2 << boundary) - 1) * rto_base; + else + timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + + (boundary - linear_backoff_thresh) * TCP_RTO_MAX; + return jiffies_to_msecs(timeout); +} /** * retransmits_timed_out() - returns true if this connection has timed out * @sk: The current socket @@ -222,27 +223,21 @@ static bool retransmits_timed_out(struct sock *sk, unsigned int boundary, unsigned int timeout) { - const unsigned int rto_base = TCP_RTO_MIN; - unsigned int linear_backoff_thresh, start_ts; + unsigned int start_ts; if (!inet_csk(sk)->icsk_retransmits) return false; - start_ts = tcp_retransmit_stamp(sk); - if (!start_ts) - return false; - + start_ts = tcp_sk(sk)->retrans_stamp; if (likely(timeout == 0)) { - linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base); + unsigned int rto_base = TCP_RTO_MIN; - if (boundary <= linear_backoff_thresh) - timeout = ((2 << boundary) - 1) * rto_base; - else - timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + - (boundary - linear_backoff_thresh) * TCP_RTO_MAX; - timeout = jiffies_to_msecs(timeout); + if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) + rto_base = tcp_timeout_init(sk); + timeout = tcp_model_timeout(sk, boundary, rto_base); } - return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= timeout; + + return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0; } /* A write timeout has occurred. Process the after effects. */ @@ -541,14 +536,13 @@ void tcp_retransmit_timer(struct sock *sk) tcp_enter_loss(sk); + icsk->icsk_retransmits++; if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) { /* Retransmission failed because of local congestion, - * do not backoff. + * Let senders fight for local resources conservatively. */ - if (!icsk->icsk_retransmits) - icsk->icsk_retransmits = 1; inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL), + TCP_RESOURCE_PROBE_INTERVAL, TCP_RTO_MAX); goto out; } @@ -569,7 +563,6 @@ void tcp_retransmit_timer(struct sock *sk) * the 120 second clamps though! */ icsk->icsk_backoff++; - icsk->icsk_retransmits++; out_reset_timer: /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 005589c6d0f6..040ec8b0a1b2 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -1203,6 +1203,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = { { .name = "Q.931", .me = THIS_MODULE, + .data_len = sizeof(struct nf_ct_h323_master), .tuple.src.l3num = AF_INET6, .tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT), .tuple.dst.protonum = IPPROTO_TCP, diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c index a61f43674519..635fcf7a8a8e 100644 --- a/net/netfilter/xt_qtaguid.c +++ b/net/netfilter/xt_qtaguid.c @@ -1067,18 +1067,6 @@ static struct sock_tag *get_sock_stat_nl(const struct sock *sk) return sock_tag_tree_search(&sock_tag_tree, sk); } -static struct sock_tag *get_sock_stat(const struct sock *sk) -{ - struct sock_tag *sock_tag_entry; - MT_DEBUG("qtaguid: get_sock_stat(sk=%p)\n", sk); - if (!sk) - return NULL; - spin_lock_bh(&sock_tag_list_lock); - sock_tag_entry = get_sock_stat_nl(sk); - spin_unlock_bh(&sock_tag_list_lock); - return sock_tag_entry; -} - static int ipx_proto(const struct sk_buff *skb, struct xt_action_param *par) { @@ -1313,12 +1301,15 @@ static void if_tag_stat_update(const char *ifname, uid_t uid, * Look for a tagged sock. * It will have an acct_uid. */ - sock_tag_entry = get_sock_stat(sk); + spin_lock_bh(&sock_tag_list_lock); + sock_tag_entry = sk ? get_sock_stat_nl(sk) : NULL; if (sock_tag_entry) { tag = sock_tag_entry->tag; acct_tag = get_atag_from_tag(tag); uid_tag = get_utag_from_tag(tag); - } else { + } + spin_unlock_bh(&sock_tag_list_lock); + if (!sock_tag_entry) { acct_tag = make_atag_from_value(0); tag = combine_atag_with_uid(acct_tag, uid); uid_tag = make_tag_from_uid(uid); diff --git a/net/qrtr/Kconfig b/net/qrtr/Kconfig index 8b9d3c2eaeb3..957e3fc7c86e 100644 --- a/net/qrtr/Kconfig +++ b/net/qrtr/Kconfig @@ -14,6 +14,17 @@ config QRTR if QRTR +config QRTR_WAKEUP_MS + int "QRTR Wakeup timeout" + default 0 + help + This option is used to configure the wakesource timeout that QRTR + should take when a packet is received. The qrtr driver can guarantee + that the packet gets queued to the socket but cannot guarantee the + client process will get time to run if auto sleep is enabled. This + config will help mitigate missed packets on systems where auto sleep + is aggressive. + config QRTR_SMD tristate "SMD IPC Router channels" depends on RPMSG || (COMPILE_TEST && RPMSG=n) diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index e8f5bb848ee9..b0b59081d918 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -123,6 +123,7 @@ static inline struct qrtr_sock *qrtr_sk(struct sock *sk) } static unsigned int qrtr_local_nid = 1; +static unsigned int qrtr_wakeup_ms = CONFIG_QRTR_WAKEUP_MS; /* for node ids */ static RADIX_TREE(qrtr_nodes, GFP_KERNEL); @@ -210,6 +211,9 @@ static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb, static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb, int type, struct sockaddr_qrtr *from, struct sockaddr_qrtr *to, unsigned int flags); +static void qrtr_handle_del_proc(struct qrtr_node *node, struct sk_buff *skb); +static void qrtr_cleanup_flow_control(struct qrtr_node *node, + struct sk_buff *skb); static void qrtr_log_tx_msg(struct qrtr_node *node, struct qrtr_hdr_v1 *hdr, struct sk_buff *skb) @@ -296,6 +300,10 @@ static void qrtr_log_rx_msg(struct qrtr_node *node, struct sk_buff *skb) QRTR_INFO(node->ilc, "RX CTRL: cmd:0x%x node[0x%x]\n", cb->type, cb->src_node); + else if (cb->type == QRTR_TYPE_DEL_PROC) + QRTR_INFO(node->ilc, + "RX CTRL: cmd:0x%x node[0x%x]\n", + cb->type, le32_to_cpu(pkt.proc.node)); } } @@ -469,22 +477,20 @@ static int qrtr_tx_wait(struct qrtr_node *node, struct sockaddr_qrtr *to, /* Assume sk is set correctly for all data type packets */ timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); - mutex_lock(&node->qrtr_tx_lock); - flow = radix_tree_lookup(&node->qrtr_tx_flow, key); - if (!flow) { - flow = kzalloc(sizeof(*flow), GFP_KERNEL); - if (!flow) { - mutex_unlock(&node->qrtr_tx_lock); - return 1; - } - INIT_LIST_HEAD(&flow->waiters); - radix_tree_insert(&node->qrtr_tx_flow, key, flow); - } - mutex_unlock(&node->qrtr_tx_lock); - ret = timeo; for (;;) { mutex_lock(&node->qrtr_tx_lock); + flow = radix_tree_lookup(&node->qrtr_tx_flow, key); + if (!flow) { + flow = kzalloc(sizeof(*flow), GFP_KERNEL); + if (!flow) { + mutex_unlock(&node->qrtr_tx_lock); + return 1; + } + INIT_LIST_HEAD(&flow->waiters); + radix_tree_insert(&node->qrtr_tx_flow, key, flow); + } + if (atomic_read(&flow->pending) < QRTR_TX_FLOW_HIGH) { atomic_inc(&flow->pending); confirm_rx = atomic_read(&flow->pending) == @@ -843,7 +849,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) cb->type != QRTR_TYPE_RESUME_TX) goto err; - pm_wakeup_ws_event(node->ws, 0, true); + pm_wakeup_ws_event(node->ws, qrtr_wakeup_ms, true); skb->data_len = size; skb->len = size; @@ -1038,11 +1044,17 @@ static void qrtr_node_rx_work(struct kthread_work *work) } else if (cb->dst_node != qrtr_local_nid && cb->type == QRTR_TYPE_DATA) { qrtr_fwd_pkt(skb, cb); + } else if (cb->type == QRTR_TYPE_DEL_PROC) { + qrtr_handle_del_proc(node, skb); } else { ipc = qrtr_port_lookup(cb->dst_port); if (!ipc) { kfree_skb(skb); } else { + if (cb->type == QRTR_TYPE_DEL_SERVER || + cb->type == QRTR_TYPE_DEL_CLIENT) { + qrtr_cleanup_flow_control(node, skb); + } qrtr_sock_queue_skb(node, skb, ipc); qrtr_port_put(ipc); } @@ -1050,6 +1062,94 @@ static void qrtr_node_rx_work(struct kthread_work *work) } } +static void qrtr_cleanup_flow_control(struct qrtr_node *node, + struct sk_buff *skb) +{ + struct qrtr_ctrl_pkt *pkt; + unsigned long key; + void __rcu **slot; + struct sockaddr_qrtr src; + struct qrtr_tx_flow *flow; + struct radix_tree_iter iter; + struct qrtr_tx_flow_waiter *waiter; + struct qrtr_tx_flow_waiter *temp; + u32 cmd; + + pkt = (void *)skb->data; + cmd = le32_to_cpu(pkt->cmd); + + if (cmd == QRTR_TYPE_DEL_SERVER) { + src.sq_node = le32_to_cpu(pkt->server.node); + src.sq_port = le32_to_cpu(pkt->server.port); + } else { + src.sq_node = le32_to_cpu(pkt->client.node); + src.sq_port = le32_to_cpu(pkt->client.port); + } + + key = (u64)src.sq_node << 32 | src.sq_port; + + mutex_lock(&node->qrtr_tx_lock); + flow = radix_tree_lookup(&node->qrtr_tx_flow, key); + if (!flow) { + mutex_unlock(&node->qrtr_tx_lock); + return; + } + + list_for_each_entry_safe(waiter, temp, &flow->waiters, node) { + list_del(&waiter->node); + sock_put(waiter->sk); + kfree(waiter); + } + + radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) { + if (flow == (struct qrtr_tx_flow *)rcu_dereference(*slot)) { + radix_tree_iter_delete(&node->qrtr_tx_flow, + &iter, slot); + kfree(flow); + break; + } + } + mutex_unlock(&node->qrtr_tx_lock); +} + +static void qrtr_handle_del_proc(struct qrtr_node *node, struct sk_buff *skb) +{ + struct sockaddr_qrtr src = {AF_QIPCRTR, 0, QRTR_PORT_CTRL}; + struct sockaddr_qrtr dst = {AF_QIPCRTR, qrtr_local_nid, QRTR_PORT_CTRL}; + struct qrtr_ctrl_pkt pkt = {0,}; + struct qrtr_tx_flow_waiter *waiter; + struct qrtr_tx_flow_waiter *temp; + struct radix_tree_iter iter; + struct qrtr_tx_flow *flow; + void __rcu **slot; + unsigned long node_id; + + skb_copy_bits(skb, 0, &pkt, sizeof(pkt)); + src.sq_node = le32_to_cpu(pkt.proc.node); + /* Free tx flow counters */ + mutex_lock(&node->qrtr_tx_lock); + radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) { + flow = rcu_dereference(*slot); + /* extract node id from the index key */ + node_id = (iter.index & 0xFFFFFFFF00000000) >> 32; + if (node_id != src.sq_node) + continue; + list_for_each_entry_safe(waiter, temp, &flow->waiters, node) { + list_del(&waiter->node); + sock_put(waiter->sk); + kfree(waiter); + } + radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot); + kfree(flow); + } + mutex_unlock(&node->qrtr_tx_lock); + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = cpu_to_le32(QRTR_TYPE_BYE); + skb_store_bits(skb, 0, &pkt, sizeof(pkt)); + qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst, 0); +} + static void qrtr_hello_work(struct kthread_work *work) { struct sockaddr_qrtr from = {AF_QIPCRTR, 0, QRTR_PORT_CTRL}; @@ -1730,6 +1830,11 @@ static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg, rc = copied; if (addr) { + /* There is an anonymous 2-byte hole after sq_family, + * make sure to clear it. + */ + memset(addr, 0, sizeof(*addr)); + addr->sq_family = AF_QIPCRTR; addr->sq_node = cb->src_node; addr->sq_port = cb->src_port; diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c index aec36db4e4ef..e7590bce019c 100644 --- a/net/qrtr/smd.c +++ b/net/qrtr/smd.c @@ -24,7 +24,7 @@ static int qcom_smd_qrtr_callback(struct rpmsg_device *rpdev, int rc; if (!qdev) { - pr_err("%d:Not ready\n", __func__); + pr_err_ratelimited("%s:Not ready\n", __func__); return -EAGAIN; } @@ -64,7 +64,7 @@ static int qcom_smd_qrtr_probe(struct rpmsg_device *rpdev) u32 net_id; bool rt; int rc; - pr_err("%d:Entered\n", __func__); + pr_info("%s:Entered\n", __func__); qdev = devm_kzalloc(&rpdev->dev, sizeof(*qdev), GFP_KERNEL); if (!qdev) @@ -86,8 +86,7 @@ static int qcom_smd_qrtr_probe(struct rpmsg_device *rpdev) dev_set_drvdata(&rpdev->dev, qdev); - pr_err("%d:SMD QRTR driver probed\n", __func__); - dev_dbg(&rpdev->dev, "SMD QRTR driver probed\n"); + pr_info("%s:SMD QRTR driver probed\n", __func__); return 0; } diff --git a/security/Kconfig b/security/Kconfig index e483bbcb72c1..bfb53027829f 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -18,15 +18,6 @@ config SECURITY_DMESG_RESTRICT If you are unsure how to answer this question, answer N. -config SECURITY_PERF_EVENTS_RESTRICT - bool "Restrict unprivileged use of performance events" - depends on PERF_EVENTS - help - If you say Y here, the kernel.perf_event_paranoid sysctl - will be set to 3 by default, and no unprivileged use of the - perf_event_open syscall will be permitted unless it is - changed. - config SECURITY bool "Enable different security models" depends on SYSFS diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index d1f6a15cf977..f08dcc0ee601 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -2792,6 +2792,7 @@ int snd_soc_register_card(struct snd_soc_card *card) mutex_init(&card->mutex); mutex_init(&card->dapm_mutex); mutex_init(&card->dapm_power_mutex); + spin_lock_init(&card->dpcm_lock); ret = snd_soc_instantiate_card(card); if (ret != 0) diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 76ac88f51468..f88ace034ca3 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1340,8 +1340,10 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe, dpcm->fe = fe; be->dpcm[stream].runtime = fe->dpcm[stream].runtime; dpcm->state = SND_SOC_DPCM_LINK_STATE_NEW; + spin_lock(&fe->card->dpcm_lock); list_add(&dpcm->list_be, &fe->dpcm[stream].be_clients); list_add(&dpcm->list_fe, &be->dpcm[stream].fe_clients); + spin_unlock(&fe->card->dpcm_lock); dev_dbg(fe->dev, "connected new DPCM %s path %s %s %s\n", stream ? "capture" : "playback", fe->dai_link->name, @@ -1406,8 +1408,10 @@ void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream) #ifdef CONFIG_DEBUG_FS debugfs_remove(dpcm->debugfs_state); #endif + spin_lock(&fe->card->dpcm_lock); list_del(&dpcm->list_be); list_del(&dpcm->list_fe); + spin_unlock(&fe->card->dpcm_lock); kfree(dpcm); } } @@ -1672,9 +1676,11 @@ void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream) { struct snd_soc_dpcm *dpcm; + spin_lock(&fe->card->dpcm_lock); list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) dpcm->be->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; + spin_unlock(&fe->card->dpcm_lock); } static void dpcm_be_dai_startup_unwind(struct snd_soc_pcm_runtime *fe, @@ -3044,11 +3050,13 @@ static int dpcm_run_update_startup(struct snd_soc_pcm_runtime *fe, int stream) dpcm_be_dai_shutdown(fe, stream); disconnect: /* disconnect any non started BEs */ + spin_lock(&fe->card->dpcm_lock); list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) { struct snd_soc_pcm_runtime *be = dpcm->be; if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE; } + spin_unlock(&fe->card->dpcm_lock); return ret; } @@ -3662,7 +3670,9 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe, { struct snd_soc_dpcm *dpcm; int state; + int ret = 1; + spin_lock(&fe->card->dpcm_lock); list_for_each_entry(dpcm, &be->dpcm[stream].fe_clients, list_fe) { if (dpcm->fe == fe) @@ -3671,12 +3681,15 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe, state = dpcm->fe->dpcm[stream].state; if (state == SND_SOC_DPCM_STATE_START || state == SND_SOC_DPCM_STATE_PAUSED || - state == SND_SOC_DPCM_STATE_SUSPEND) - return 0; + state == SND_SOC_DPCM_STATE_SUSPEND) { + ret = 0; + break; + } } + spin_unlock(&fe->card->dpcm_lock); /* it's safe to free/stop this BE DAI */ - return 1; + return ret; } EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_free_stop); @@ -3689,7 +3702,9 @@ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe, { struct snd_soc_dpcm *dpcm; int state; + int ret = 1; + spin_lock(&fe->card->dpcm_lock); list_for_each_entry(dpcm, &be->dpcm[stream].fe_clients, list_fe) { if (dpcm->fe == fe) @@ -3699,12 +3714,15 @@ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe, if (state == SND_SOC_DPCM_STATE_START || state == SND_SOC_DPCM_STATE_PAUSED || state == SND_SOC_DPCM_STATE_SUSPEND || - state == SND_SOC_DPCM_STATE_PREPARE) - return 0; + state == SND_SOC_DPCM_STATE_PREPARE) { + ret = 0; + break; + } } + spin_unlock(&fe->card->dpcm_lock); /* it's safe to change hw_params */ - return 1; + return ret; } EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_params); @@ -3770,6 +3788,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe, goto out; } + spin_lock(&fe->card->dpcm_lock); list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) { struct snd_soc_pcm_runtime *be = dpcm->be; params = &dpcm->hw_params; @@ -3790,7 +3809,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe, params_channels(params), params_rate(params)); } - + spin_unlock(&fe->card->dpcm_lock); out: return offset; }