esoc: Add snapshot of esoc driver

This is a snapshot of the esoc driver as of msm-4.14 commit
<55e5efd553c10ef>. (Merge "nl80211: Fix external_auth check
for offloaded authentication")

Change-Id: Id47bcba3a5342986c3496707eb1e7cec03f19e3b
Signed-off-by: Rishabh Bhatnagar <rishabhb@codeaurora.org>
This commit is contained in:
Rishabh Bhatnagar 2018-05-23 16:58:07 -07:00
parent 40f5856822
commit f2e94fd87b
17 changed files with 4165 additions and 0 deletions

View file

@ -0,0 +1,164 @@
Attached MDM Modem Devices
External modems are devices that are attached to the msm and controlled by gpios.
There is also a data channel between the msm and the external modem that sometimes needs
to be reset.
Required Properties:
- compatible: The bus devices need to be compatible with
"qcom,ext-mdm9x55", qcom,ext-sdx50m.
Required named gpio properties:
- qcom,mdm2ap-errfatal-gpio: gpio for the external modem to indicate to the apps processor
of an error fatal condition on the modem.
- qcom,ap2mdm-errfatal-gpio: gpio for the apps processor to indicate to the external modem
of an error fatal condition on the apps processor.
- qcom,mdm2ap-status-gpio: gpio to indicate to the apps processor when there is a watchdog
bite on the external modem.
- qcom,ap2mdm-status-gpio: gpio for the apps processor to indicate to the modem that an apps
processor watchdog bite has occurred.
- qcom,ap2mdm-soft-reset-gpio: gpio for the apps processor to use to soft-reset the external
modem. If the flags parameter has a value of 0x1 then the gpio is active LOW.
Required Interrupts:
- "err_fatal_irq": Interrupt generated on the apps processor when the error fatal gpio is pulled
high by the external modem.
- "status_irq": Interrupt generated on the apps processor when the mdm2ap-status gpio falls low
on the external modem. This usually indicates a watchdog bite on the modem.
- "plbrdy_irq": Interrupt generated on the aps processor when the mdm2ap-pblrdy gpio is pulled
either high or low by the external modem. This is an indication that the modem
has rebooted.
- "mdm2ap_vddmin_irq": Interrupt generated on the apps processor when the external modem goes
into vddmin power state.
Optional named gpio properties:
- qcom,mdm2ap-pblrdy-gpio: gpio used by some external modems to indicate when the modem has
booted into the PBL bootloader.
- qcom,ap2mdm-wakeup-gpio: gpio used by the apps processor to wake the external modem
out of a low power state.
- qcom,ap2mdm-chnl-rdy-gpio: gpio used by the apps processor to inform the external modem
that data link is ready.
- qcom,mdm2ap-wakeup-gpio: gpio from the external modem to the apps processor to wake it
out of a low power state.
- qcom,ap2mdm-vddmin-gpio: gpio to indicate to the external modem when the apps processor
is about to enter vddmin power state.
- qcom,mdm2ap-vddmin-gpio: gpio used by the external modem to inform the apps processor
when it is about to enter vddmin power state.
- qcom,ap2mdm-kpdpwr-gpio: gpio used to simulate a power button press on the external
modem. Some modems use this as part of their initial power-up sequence.
If the "flags" parameter has a value of 0x1 then it is active LOW.
- qcom,ap2mdm-pmic-pwr-en-gpio: Some modems need this gpio for the apps processor to enable
the pmic on the external modem.
- qcom,use-usb-port-gpio: some modems use this gpio to switch a port connection from uart to usb.
This is used during firmware upgrade of some modems.
- qcom,mdm-link-detect-gpio: some modems may support two interfaces. This gpio
indicates whether only one or both links can be used.
Optional driver parameters:
- qcom,ramdump-delay-ms: time in milliseconds to wait before starting to collect ramdumps.
This interval is the time to wait after an error on the external modem is
signaled to the apps processor before starting to collect ramdumps. Its
value depends on the type of external modem (e.g. MDM vs QSC), and how
error fatal handing is done on the modem.
The default value is 2 seconds (2000 milliseconds) as specified by the
mdm9x15 software developer. Consultation with the developer of the modem
software is required to determine this value for that modem.
- qcom,ps-hold-delay-ms: minimum delay in milliseconds between consecutive PS_HOLD toggles.
SGLTE targets that use a QSC1215 modem require a minimum delay between consecutive
toggling of the PS_HOLD pmic input. For one target it is 500 milliseconds but it
may vary depending on the target and how the external modem is connected. The value
is specified by the hardware designers.
- qcom,early-power-on: boolean flag to indicate if to power on the modem when the device is probed.
- qcom,sfr-query: boolean flag to indicate if to query the modem for a reset reason.
- qcom,no-powerdown-after-ramdumps: boolean flag to indicate if to power down the modem after ramdumps.
- qcom,no-a2m-errfatal-on-ssr: boolean to tell driver not to raise ap2mdm errfatal during SSR.
- qcom,no-reset-on-first-powerup: boolean to tell driver not to reset the modem when first
powering up the modem.
- qcom,ramdump-timeout-ms: ramdump timeout interval in milliseconds.
This interval is the time to wait for collection of the external modem's ramdump
to complete. It's value depends on the speed of the data connection between the
external modem and the apps processor on the platform. If the connection is a
UART port then this delay needs to be longer in order to avoid premature timeout
of the ramdump collection.
The default value is 2 minutes (120000 milliseconds) which is based on the
measured time it takes over a UART connection. It is reduced when the data
connection is an HSIC port. The value is usually tuned empirically for a
particular target.
- qcom,image-upgrade-supported: boolean flag to indicate if software upgrade is supported.
- qcom,support-shutdown: boolean flag to indicate if graceful shutdown is supported.
- qcom,vddmin-drive-strength: drive strength in milliamps of the ap2mdm-vddmin gpio.
The ap2mdm_vddmin gpio is controlled by the RPM processor. It is pulled low
to indicate to the external modem that the apps processor has entered vddmin
state, and high to indicate the reverse. Its parameters are passed to the RPM
software from the HLOS because the RPM software has to way of saving this type
of configuration when an external modem is attached.
The value of the drive strength is specified by the hardware designers. A value
of 8 milliamps is typical.
This property is ignored if the property "qcom,ap2mdm-vddmin-gpio" is
not set.
- qcom,vddmin-modes: a string indicating the "modes" requested for the ap2mdm-vddmin gpio.
This value is passed to RPM and is used by the RPM module to determine the
gpio mux function. The only currently supported modes string is "normal" and
corresponds to the value 0x03 that is passed to RPM.
- qcom,restart-group: List of subsystems that will need to restart together.
- qcom,mdm-dual-link: Boolean indicates whether both links can used for
communication.
- qcom,ssctl-instance-id: Instance id used by the subsystem to connect with the SSCTL service.
- qcom,sysmon-id: platform device id that sysmon is probed with for the subsystem.
- qcom,pil-force-shutdown: Boolean. If set, the SSR framework will not trigger graceful shutdown
on behalf of the subsystem driver.
- qcom,mdm-link-info: a string indicating additional info about the physical link.
For example: "devID_domain.bus.slot" in case of PCIe.
- qcom,mdm-auto-boot: Boolean. To indicate this instance of esoc boots independently.
- qcom,mdm-statusline-not-a-powersource: Boolean. If set, status line to esoc device is not a
power source.
- qcom,mdm-userspace-handle-shutdown: Boolean. If set, userspace handles shutdown requests.
- qcom,shutdown-timeout-ms: graceful shutdown timeout in milliseconds.
This interval is the time needed for the external modem to gracefully shutdown
after the host sends a shutdown command. The value depends on how long it takes
for the high level OS in the external modem to shutdown gracefully. The default
value is 10000 milliseconds.
- qcom,reset-time-ms: time it takes for the external modem to forcefully reset in milliseconds.
This interval is the time it takes to toggle the reset of an external modem by
holding down the reset pin. The value depends on the external modem's power
management boot options. The default value is 203 milliseconds.
- qcom,esoc-skip-restart-for-mdm-crash: Boolean. If set, the esoc framework would skip the warm
reboot phase during the momem crash.
Example:
mdm0: qcom,mdm0 {
compatible = "qcom,mdm2-modem";
cell-index = <0>;
#address-cells = <0>;
interrupt-parent = <&mdm0>;
interrupts = <0 1 2 3>;
#interrupt-cells = <1>;
interrupt-map-mask = <0xffffffff>;
interrupt-map =
<0 &msmgpio 82 0x3
1 &msmgpio 46 0x3
2 &msmgpio 80 0x3
3 &msmgpio 27 0x3>;
interrupt-names =
"err_fatal_irq",
"status_irq",
"plbrdy_irq",
"mdm2ap_vddmin_irq";
qcom,mdm2ap-errfatal-gpio = <&msmgpio 82 0x00>;
qcom,ap2mdm-errfatal-gpio = <&msmgpio 106 0x00>;
qcom,mdm2ap-status-gpio = <&msmgpio 46 0x00>;
qcom,ap2mdm-status-gpio = <&msmgpio 105 0x00>;
qcom,ap2mdm-soft-reset-gpio = <&msmgpio 24 0x00>;
qcom,mdm2ap-pblrdy-gpio = <&msmgpio 80 0x00>;
qcom,ap2mdm-wakeup-gpio = <&msmgpio 104 0x00>;
qcom,ap2mdm-vddmin-gpio = <&msmgpio 108 0x00>;
qcom,mdm2ap-vddmin-gpio = <&msmgpio 27 0x00>;
qcom,ramdump-delay-ms = <2000>;
qcom,ramdump-timeout-ms = <120000>;
qcom,vddmin-modes = "normal";
qcom,vddmin-drive-strength = <8>;
qcom,ssctl-instance-id = <10>;
qcom,sysmon-id = <20>;
};

View file

@ -217,4 +217,6 @@ source "drivers/siox/Kconfig"
source "drivers/slimbus/Kconfig"
source "drivers/esoc/Kconfig"
endmenu

View file

@ -183,5 +183,6 @@ obj-$(CONFIG_FPGA) += fpga/
obj-$(CONFIG_FSI) += fsi/
obj-$(CONFIG_TEE) += tee/
obj-$(CONFIG_MULTIPLEXER) += mux/
obj-$(CONFIG_ESOC) += esoc/
obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/
obj-$(CONFIG_SIOX) += siox/

73
drivers/esoc/Kconfig Normal file
View file

@ -0,0 +1,73 @@
# SPDX-License-Identifier: GPL-2.0
#
# External soc control infrastructure and drivers
#
menuconfig ESOC
bool "External SOCs Control"
help
External SOCs can be powered on and monitored by user
space or kernel drivers. Additionally they can be controlled
to respond to control commands. This framework provides an
interface to track events related to the external slave socs.
if ESOC
config ESOC_DEV
bool "ESOC userspace interface"
help
Say yes here to enable a userspace representation of the control
link. Userspace can register a request engine or a command engine
for the external soc. It can receive event notifications from the
control link.
config ESOC_CLIENT
bool "ESOC client interface"
depends on OF
help
Say yes here to enable client interface for external socs.
Clients can specify the external soc that they are interested in
by using device tree phandles. Based on this, clients can register
for notifications from a specific soc.
config ESOC_DEBUG
bool "ESOC debug support"
help
Say yes here to enable debugging support in the ESOC framework
and individual esoc drivers. The config basically adds extra
logging information such that, in the event of a bug, this
logging information could be helpful to trace it.
config ESOC_MDM_4x
bool "Add support for external modems"
help
In some qualcomm boards, an external modem such as mdm9x55 or sdx50m
is connected to a primary msm. The primary soc can control/monitor
the modem via gpios. The data communication with such modems can
occur over PCIE or HSIC.
config ESOC_MDM_DRV
tristate "Command engine for 4x series external modems"
help
Provides a command engine to control the behavior of an external
modems (such as mdm9x55 or sdx50m). That is, it extends the SSR
framework to power-off, power-on or handle crash scenarios. It
also listens for events on the external modem.
config ESOC_MDM_DBG_ENG
tristate "debug engine for 4x series external modems"
depends on ESOC_MDM_DRV
help
Mainly used as a debug interface to probe the modem against various
scenarios. It basically provides a user interface to mask out certain
commands sent by command engine to the external modem. It also allows
masking of certain notifications being sent to the external modem.
config MDM_DBG_REQ_ENG
tristate "manual request engine for 4x series external modems"
depends on ESOC_MDM_DBG_ENG
help
Provides a user interface to handle incoming requests from
the external modem. Allows for debugging of IPC mechanism
between the external modem and the primary soc.
endif

10
drivers/esoc/Makefile Normal file
View file

@ -0,0 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
# generic external soc control support
ccflags-$(CONFIG_ESOC_DEBUG) := -DDEBUG
obj-$(CONFIG_ESOC) += esoc_bus.o
obj-$(CONFIG_ESOC_DEV) += esoc_dev.o
obj-$(CONFIG_ESOC_CLIENT) += esoc_client.o
obj-$(CONFIG_ESOC_MDM_4x) += esoc-mdm-pon.o esoc-mdm-4x.o
obj-$(CONFIG_ESOC_MDM_DRV) += esoc-mdm-drv.o
obj-$(CONFIG_ESOC_MDM_DBG_ENG) += esoc-mdm-dbg-eng.o

1123
drivers/esoc/esoc-mdm-4x.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,362 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/atomic.h>
#include <linux/device.h>
#include "esoc.h"
/*
* cmd_mask : Specifies if a command/notifier is masked, and
* whats the trigger value for mask to take effect.
* @mask_trigger: trigger value for mask.
* @mask: boolean to determine if command should be masked.
*/
struct esoc_mask {
atomic_t mask_trigger;
bool mask;
};
/*
* manual_to_esoc_cmd: Converts a user provided command
* to a corresponding esoc command.
* @cmd: ESOC command
* @manual_cmd: user specified command string.
*/
struct manual_to_esoc_cmd {
unsigned int cmd;
char manual_cmd[20];
};
/*
* manual_to_esoc_notify: Converts a user provided notification
* to corresponding esoc notification for Primary SOC.
* @notfication: ESOC notification.
* @manual_notifier: user specified notification string.
*/
struct manual_to_esoc_notify {
unsigned int notify;
char manual_notify[20];
};
static const struct manual_to_esoc_cmd cmd_map[] = {
{
.cmd = ESOC_PWR_ON,
.manual_cmd = "PON",
},
{
.cmd = ESOC_PREPARE_DEBUG,
.manual_cmd = "ENTER_DLOAD",
},
{ .cmd = ESOC_PWR_OFF,
.manual_cmd = "POFF",
},
{
.cmd = ESOC_FORCE_PWR_OFF,
.manual_cmd = "FORCE_POFF",
},
};
static struct esoc_mask cmd_mask[] = {
[ESOC_PWR_ON] = {
.mask = false,
.mask_trigger = ATOMIC_INIT(1),
},
[ESOC_PREPARE_DEBUG] = {
.mask = false,
.mask_trigger = ATOMIC_INIT(0),
},
[ESOC_PWR_OFF] = {
.mask = false,
.mask_trigger = ATOMIC_INIT(0),
},
[ESOC_FORCE_PWR_OFF] = {
.mask = false,
.mask_trigger = ATOMIC_INIT(0),
},
};
static const struct manual_to_esoc_notify notify_map[] = {
{
.notify = ESOC_PRIMARY_REBOOT,
.manual_notify = "REBOOT",
},
{
.notify = ESOC_PRIMARY_CRASH,
.manual_notify = "PANIC",
},
};
static struct esoc_mask notify_mask[] = {
[ESOC_PRIMARY_REBOOT] = {
.mask = false,
.mask_trigger = ATOMIC_INIT(0),
},
[ESOC_PRIMARY_CRASH] = {
.mask = false,
.mask_trigger = ATOMIC_INIT(0),
},
};
bool dbg_check_cmd_mask(unsigned int cmd)
{
pr_debug("command to mask %d\n", cmd);
if (cmd_mask[cmd].mask)
return atomic_add_negative(-1, &cmd_mask[cmd].mask_trigger);
else
return false;
}
EXPORT_SYMBOL(dbg_check_cmd_mask);
bool dbg_check_notify_mask(unsigned int notify)
{
pr_debug("notifier to mask %d\n", notify);
if (notify_mask[notify].mask)
return atomic_add_negative(-1,
&notify_mask[notify].mask_trigger);
else
return false;
}
EXPORT_SYMBOL(dbg_check_notify_mask);
/*
* Create driver attributes that let you mask
* specific commands.
*/
static ssize_t command_mask_store(struct device_driver *drv, const char *buf,
size_t count)
{
unsigned int cmd, i;
pr_debug("user input command %s", buf);
for (i = 0; i < ARRAY_SIZE(cmd_map); i++) {
if (!strcmp(cmd_map[i].manual_cmd, buf)) {
/*
* Map manual command string to ESOC command
* set mask for ESOC command
*/
cmd = cmd_map[i].cmd;
cmd_mask[cmd].mask = true;
pr_debug("Setting mask for manual command %s\n",
buf);
break;
}
}
if (i >= ARRAY_SIZE(cmd_map))
pr_err("invalid command specified\n");
return count;
}
static DRIVER_ATTR_WO(command_mask);
static ssize_t notifier_mask_store(struct device_driver *drv, const char *buf,
size_t count)
{
unsigned int notify, i;
pr_debug("user input notifier %s", buf);
for (i = 0; i < ARRAY_SIZE(notify_map); i++) {
if (!strcmp(buf, notify_map[i].manual_notify)) {
/*
* Map manual notifier string to primary soc
* notifier. Also set mask for the notifier.
*/
notify = notify_map[i].notify;
notify_mask[notify].mask = true;
pr_debug("Setting mask for manual notification %s\n",
buf);
break;
}
}
if (i >= ARRAY_SIZE(notify_map))
pr_err("invalid notifier specified\n");
return count;
}
static DRIVER_ATTR_WO(notifier_mask);
#ifdef CONFIG_MDM_DBG_REQ_ENG
static struct esoc_clink *dbg_clink;
/* Last recorded request from esoc */
static enum esoc_req last_req;
static DEFINE_SPINLOCK(req_lock);
/*
* esoc_to_user: Conversion of esoc ids to user visible strings
* id: esoc request, command, notifier, event id
* str: string equivalent of the above
*/
struct esoc_to_user {
unsigned int id;
char str[20];
};
static struct esoc_to_user in_to_resp[] = {
{
.id = ESOC_IMG_XFER_DONE,
.str = "XFER_DONE",
},
{
.id = ESOC_BOOT_DONE,
.str = "BOOT_DONE",
},
{
.id = ESOC_BOOT_FAIL,
.str = "BOOT_FAIL",
},
{
.id = ESOC_IMG_XFER_RETRY,
.str = "XFER_RETRY",
},
{ .id = ESOC_IMG_XFER_FAIL,
.str = "XFER_FAIL",
},
{
.id = ESOC_UPGRADE_AVAILABLE,
.str = "UPGRADE",
},
{ .id = ESOC_DEBUG_DONE,
.str = "DEBUG_DONE",
},
{
.id = ESOC_DEBUG_FAIL,
.str = "DEBUG_FAIL",
},
};
static struct esoc_to_user req_to_str[] = {
{
.id = ESOC_REQ_IMG,
.str = "REQ_IMG",
},
{
.id = ESOC_REQ_DEBUG,
.str = "REQ_DEBUG",
},
{
.id = ESOC_REQ_SHUTDOWN,
.str = "REQ_SHUTDOWN",
},
};
static ssize_t req_eng_resp_store(struct device_driver *drv, const char *buf,
size_t count)
{
unsigned int i;
const struct esoc_clink_ops *const clink_ops = dbg_clink->clink_ops;
dev_dbg(&dbg_clink->dev, "user input req eng response %s\n", buf);
for (i = 0; i < ARRAY_SIZE(in_to_resp); i++) {
size_t len1 = strlen(buf);
size_t len2 = strlen(in_to_resp[i].str);
if (len1 == len2 && !strcmp(buf, in_to_resp[i].str)) {
clink_ops->notify(in_to_resp[i].id, dbg_clink);
break;
}
}
if (i > ARRAY_SIZE(in_to_resp))
dev_err(&dbg_clink->dev, "Invalid resp %s, specified\n", buf);
return count;
}
static DRIVER_ATTR_WO(req_eng_resp);
static ssize_t last_esoc_req_show(struct device_driver *drv, char *buf)
{
unsigned int i;
unsigned long flags;
size_t count;
spin_lock_irqsave(&req_lock, flags);
for (i = 0; i < ARRAY_SIZE(req_to_str); i++) {
if (last_req == req_to_str[i].id) {
count = snprintf(buf, PAGE_SIZE, "%s\n",
req_to_str[i].str);
break;
}
}
spin_unlock_irqrestore(&req_lock, flags);
return count;
}
static DRIVER_ATTR_RO(last_esoc_req);
static void esoc_handle_req(enum esoc_req req, struct esoc_eng *eng)
{
unsigned long flags;
spin_lock_irqsave(&req_lock, flags);
last_req = req;
spin_unlock_irqrestore(&req_lock, flags);
}
static void esoc_handle_evt(enum esoc_evt evt, struct esoc_eng *eng)
{
}
static struct esoc_eng dbg_req_eng = {
.handle_clink_req = esoc_handle_req,
.handle_clink_evt = esoc_handle_evt,
};
int register_dbg_req_eng(struct esoc_clink *clink,
struct device_driver *drv)
{
int ret;
dbg_clink = clink;
ret = driver_create_file(drv, &driver_attr_req_eng_resp);
if (ret)
return ret;
ret = driver_create_file(drv, &driver_attr_last_esoc_req);
if (ret) {
dev_err(&clink->dev, "Unable to create last esoc req\n");
goto last_req_err;
}
ret = esoc_clink_register_req_eng(clink, &dbg_req_eng);
if (ret) {
pr_err("Unable to register req eng\n");
goto req_eng_fail;
}
spin_lock_init(&req_lock);
return 0;
last_req_err:
driver_remove_file(drv, &driver_attr_last_esoc_req);
req_eng_fail:
driver_remove_file(drv, &driver_attr_req_eng_resp);
return ret;
}
#else
int register_dbg_req_eng(struct esoc_clink *clink, struct device_driver *d)
{
return 0;
}
#endif
int mdm_dbg_eng_init(struct esoc_drv *esoc_drv,
struct esoc_clink *clink)
{
int ret;
struct device_driver *drv = &esoc_drv->driver;
ret = driver_create_file(drv, &driver_attr_command_mask);
if (ret) {
pr_err("Unable to create command mask file\n");
goto cmd_mask_err;
}
ret = driver_create_file(drv, &driver_attr_notifier_mask);
if (ret) {
pr_err("Unable to create notify mask file\n");
goto notify_mask_err;
}
ret = register_dbg_req_eng(clink, drv);
if (ret) {
pr_err("Failed to register esoc dbg req eng\n");
goto dbg_req_fail;
}
return 0;
dbg_req_fail:
driver_remove_file(drv, &driver_attr_notifier_mask);
notify_mask_err:
driver_remove_file(drv, &driver_attr_command_mask);
cmd_mask_err:
return ret;
}
EXPORT_SYMBOL(mdm_dbg_eng_init);
MODULE_LICENSE("GPL v2");

527
drivers/esoc/esoc-mdm-drv.c Normal file
View file

@ -0,0 +1,527 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2013-2015, 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/reboot.h>
#include <linux/of.h>
#include <linux/esoc_client.h>
#include "esoc.h"
#include "esoc-mdm.h"
#include "mdm-dbg.h"
/* Default number of powerup trial requests per session */
#define ESOC_DEF_PON_REQ 2
static unsigned int n_pon_tries = ESOC_DEF_PON_REQ;
module_param(n_pon_tries, uint, 0644);
MODULE_PARM_DESC(n_pon_tries,
"Number of power-on retrials allowed upon boot failure");
enum esoc_boot_fail_action {
BOOT_FAIL_ACTION_RETRY,
BOOT_FAIL_ACTION_COLD_RESET,
BOOT_FAIL_ACTION_SHUTDOWN,
BOOT_FAIL_ACTION_PANIC,
BOOT_FAIL_ACTION_NOP,
};
static unsigned int boot_fail_action = BOOT_FAIL_ACTION_NOP;
module_param(boot_fail_action, uint, 0644);
MODULE_PARM_DESC(boot_fail_action,
"Actions: 0:Retry PON; 1:Cold reset; 2:Power-down; 3:APQ Panic; 4:No action");
enum esoc_pon_state {
PON_INIT,
PON_SUCCESS,
PON_RETRY,
PON_FAIL
};
enum {
PWR_OFF = 0x1,
PWR_ON,
BOOT,
RUN,
CRASH,
IN_DEBUG,
SHUTDOWN,
RESET,
PEER_CRASH,
};
struct mdm_drv {
unsigned int mode;
struct esoc_eng cmd_eng;
struct completion pon_done;
struct completion req_eng_wait;
struct esoc_clink *esoc_clink;
enum esoc_pon_state pon_state;
struct workqueue_struct *mdm_queue;
struct work_struct ssr_work;
struct notifier_block esoc_restart;
};
#define to_mdm_drv(d) container_of(d, struct mdm_drv, cmd_eng)
static int esoc_msm_restart_handler(struct notifier_block *nb,
unsigned long action, void *data)
{
struct mdm_drv *mdm_drv = container_of(nb, struct mdm_drv,
esoc_restart);
struct esoc_clink *esoc_clink = mdm_drv->esoc_clink;
const struct esoc_clink_ops *const clink_ops = esoc_clink->clink_ops;
if (action == SYS_RESTART) {
if (mdm_dbg_stall_notify(ESOC_PRIMARY_REBOOT))
return NOTIFY_OK;
esoc_mdm_log(
"Reboot notifier: Notifying esoc of cold reboot\n");
dev_dbg(&esoc_clink->dev, "Notifying esoc of cold reboot\n");
clink_ops->notify(ESOC_PRIMARY_REBOOT, esoc_clink);
}
return NOTIFY_OK;
}
static void mdm_handle_clink_evt(enum esoc_evt evt,
struct esoc_eng *eng)
{
struct mdm_drv *mdm_drv = to_mdm_drv(eng);
bool unexpected_state = false;
switch (evt) {
case ESOC_INVALID_STATE:
esoc_mdm_log(
"ESOC_INVALID_STATE: Calling complete with state: PON_FAIL\n");
mdm_drv->pon_state = PON_FAIL;
complete(&mdm_drv->pon_done);
break;
case ESOC_RUN_STATE:
esoc_mdm_log(
"ESOC_RUN_STATE: Calling complete with state: PON_SUCCESS\n");
mdm_drv->pon_state = PON_SUCCESS;
mdm_drv->mode = RUN,
complete(&mdm_drv->pon_done);
break;
case ESOC_RETRY_PON_EVT:
esoc_mdm_log(
"ESOC_RETRY_PON_EVT: Calling complete with state: PON_RETRY\n");
mdm_drv->pon_state = PON_RETRY;
complete(&mdm_drv->pon_done);
break;
case ESOC_UNEXPECTED_RESET:
esoc_mdm_log("evt_state: ESOC_UNEXPECTED_RESET\n");
unexpected_state = true;
case ESOC_ERR_FATAL:
if (!unexpected_state)
esoc_mdm_log("evt_state: ESOC_ERR_FATAL\n");
/*
* Modem can crash while we are waiting for pon_done during
* a subsystem_get(). Setting mode to CRASH will prevent a
* subsequent subsystem_get() from entering poweron ops. Avoid
* this by seting mode to CRASH only if device was up and
* running.
*/
if (mdm_drv->mode == CRASH)
esoc_mdm_log(
"Modem in crash state already. Ignoring.\n");
if (mdm_drv->mode != RUN)
esoc_mdm_log("Modem not up. Ignoring.\n");
if (mdm_drv->mode == CRASH || mdm_drv->mode != RUN)
return;
mdm_drv->mode = CRASH;
esoc_mdm_log("Starting SSR work\n");
queue_work(mdm_drv->mdm_queue, &mdm_drv->ssr_work);
break;
case ESOC_REQ_ENG_ON:
esoc_mdm_log(
"evt_state: ESOC_REQ_ENG_ON; Registered a req engine\n");
complete(&mdm_drv->req_eng_wait);
break;
default:
break;
}
}
static void mdm_ssr_fn(struct work_struct *work)
{
struct mdm_drv *mdm_drv = container_of(work, struct mdm_drv, ssr_work);
/*
* If restarting esoc fails, the SSR framework triggers a kernel panic
*/
esoc_clink_request_ssr(mdm_drv->esoc_clink);
}
static void esoc_client_link_power_on(struct esoc_clink *esoc_clink,
bool mdm_crashed)
{
int i;
struct esoc_client_hook *client_hook;
dev_dbg(&esoc_clink->dev, "Calling power_on hooks\n");
esoc_mdm_log(
"Calling power_on hooks with crash state: %d\n", mdm_crashed);
for (i = 0; i < ESOC_MAX_HOOKS; i++) {
client_hook = esoc_clink->client_hook[i];
if (client_hook && client_hook->esoc_link_power_on)
client_hook->esoc_link_power_on(client_hook->priv,
mdm_crashed);
}
}
static void esoc_client_link_power_off(struct esoc_clink *esoc_clink,
bool mdm_crashed)
{
int i;
struct esoc_client_hook *client_hook;
dev_dbg(&esoc_clink->dev, "Calling power_off hooks\n");
esoc_mdm_log(
"Calling power_off hooks with crash state: %d\n", mdm_crashed);
for (i = 0; i < ESOC_MAX_HOOKS; i++) {
client_hook = esoc_clink->client_hook[i];
if (client_hook && client_hook->esoc_link_power_off) {
client_hook->esoc_link_power_off(client_hook->priv,
mdm_crashed);
}
}
}
static void mdm_crash_shutdown(const struct subsys_desc *mdm_subsys)
{
struct esoc_clink *esoc_clink =
container_of(mdm_subsys,
struct esoc_clink,
subsys);
const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
esoc_mdm_log("MDM crashed notification from SSR\n");
if (mdm_dbg_stall_notify(ESOC_PRIMARY_CRASH))
return;
clink_ops->notify(ESOC_PRIMARY_CRASH, esoc_clink);
}
static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys,
bool force_stop)
{
int ret;
struct esoc_clink *esoc_clink =
container_of(crashed_subsys, struct esoc_clink, subsys);
struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
esoc_mdm_log("Shutdown request from SSR\n");
if (mdm_drv->mode == CRASH || mdm_drv->mode == PEER_CRASH) {
esoc_mdm_log("Shutdown in crash mode\n");
if (mdm_dbg_stall_cmd(ESOC_PREPARE_DEBUG))
/* We want to mask debug command.
* In this case return success
* to move to next stage
*/
return 0;
esoc_clink_queue_request(ESOC_REQ_CRASH_SHUTDOWN, esoc_clink);
esoc_client_link_power_off(esoc_clink, true);
esoc_mdm_log("Executing the ESOC_PREPARE_DEBUG command\n");
ret = clink_ops->cmd_exe(ESOC_PREPARE_DEBUG,
esoc_clink);
if (ret) {
esoc_mdm_log("ESOC_PREPARE_DEBUG command failed\n");
dev_err(&esoc_clink->dev, "failed to enter debug\n");
return ret;
}
mdm_drv->mode = IN_DEBUG;
} else if (!force_stop) {
esoc_mdm_log("Graceful shutdown mode\n");
if (esoc_clink->subsys.sysmon_shutdown_ret) {
esoc_mdm_log(
"Executing the ESOC_FORCE_PWR_OFF command\n");
ret = clink_ops->cmd_exe(ESOC_FORCE_PWR_OFF,
esoc_clink);
} else {
if (mdm_dbg_stall_cmd(ESOC_PWR_OFF))
/* Since power off command is masked
* we return success, and leave the state
* of the command engine as is.
*/
return 0;
dev_dbg(&esoc_clink->dev, "Sending sysmon-shutdown\n");
esoc_mdm_log("Executing the ESOC_PWR_OFF command\n");
ret = clink_ops->cmd_exe(ESOC_PWR_OFF, esoc_clink);
}
if (ret) {
esoc_mdm_log(
"Executing the ESOC_PWR_OFF command failed\n");
dev_err(&esoc_clink->dev, "failed to exe power off\n");
return ret;
}
esoc_client_link_power_off(esoc_clink, false);
/* Pull the reset line low to turn off the device */
clink_ops->cmd_exe(ESOC_FORCE_PWR_OFF, esoc_clink);
mdm_drv->mode = PWR_OFF;
}
esoc_mdm_log("Shutdown completed\n");
return 0;
}
static void mdm_subsys_retry_powerup_cleanup(struct esoc_clink *esoc_clink)
{
struct mdm_ctrl *mdm = get_esoc_clink_data(esoc_clink);
struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
esoc_mdm_log("Doing cleanup\n");
esoc_client_link_power_off(esoc_clink, false);
mdm_disable_irqs(mdm);
mdm_drv->pon_state = PON_INIT;
reinit_completion(&mdm_drv->pon_done);
reinit_completion(&mdm_drv->req_eng_wait);
}
/* Returns 0 to proceed towards another retry, or an error code to quit */
static int mdm_handle_boot_fail(struct esoc_clink *esoc_clink, u8 *pon_trial)
{
struct mdm_ctrl *mdm = get_esoc_clink_data(esoc_clink);
switch (boot_fail_action) {
case BOOT_FAIL_ACTION_RETRY:
mdm_subsys_retry_powerup_cleanup(esoc_clink);
esoc_mdm_log("Request to retry a warm reset\n");
(*pon_trial)++;
break;
/*
* Issue a shutdown here and rerun the powerup again.
* This way it becomes a cold reset. Else, we end up
* issuing a cold reset & a warm reset back to back.
*/
case BOOT_FAIL_ACTION_COLD_RESET:
mdm_subsys_retry_powerup_cleanup(esoc_clink);
esoc_mdm_log("Doing cold reset by power-down and warm reset\n");
(*pon_trial)++;
mdm_power_down(mdm);
break;
case BOOT_FAIL_ACTION_PANIC:
esoc_mdm_log("Calling panic!!\n");
panic("Panic requested on external modem boot failure\n");
break;
case BOOT_FAIL_ACTION_NOP:
esoc_mdm_log("Leaving the modem in its curent state\n");
return -EIO;
case BOOT_FAIL_ACTION_SHUTDOWN:
default:
mdm_subsys_retry_powerup_cleanup(esoc_clink);
esoc_mdm_log("Shutdown the modem and quit\n");
mdm_power_down(mdm);
return -EIO;
}
return 0;
}
static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys)
{
int ret;
struct esoc_clink *esoc_clink =
container_of(crashed_subsys, struct esoc_clink,
subsys);
struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
int timeout = INT_MAX;
u8 pon_trial = 1;
esoc_mdm_log("Powerup request from SSR\n");
do {
esoc_mdm_log("Boot trial: %d\n", pon_trial);
if (!esoc_clink->auto_boot &&
!esoc_req_eng_enabled(esoc_clink)) {
esoc_mdm_log("Wait for req eng registration\n");
dev_dbg(&esoc_clink->dev,
"Wait for req eng registration\n");
wait_for_completion(&mdm_drv->req_eng_wait);
}
esoc_mdm_log("Req eng available\n");
if (mdm_drv->mode == PWR_OFF) {
esoc_mdm_log("In normal power-on mode\n");
if (mdm_dbg_stall_cmd(ESOC_PWR_ON))
return -EBUSY;
esoc_mdm_log("Executing the ESOC_PWR_ON command\n");
ret = clink_ops->cmd_exe(ESOC_PWR_ON, esoc_clink);
if (ret) {
esoc_mdm_log("ESOC_PWR_ON command failed\n");
dev_err(&esoc_clink->dev, "pwr on fail\n");
return ret;
}
esoc_client_link_power_on(esoc_clink, false);
} else if (mdm_drv->mode == IN_DEBUG) {
esoc_mdm_log("In SSR power-on mode\n");
esoc_mdm_log("Executing the ESOC_EXIT_DEBUG command\n");
ret = clink_ops->cmd_exe(ESOC_EXIT_DEBUG, esoc_clink);
if (ret) {
esoc_mdm_log(
"ESOC_EXIT_DEBUG command failed\n");
dev_err(&esoc_clink->dev,
"cannot exit debug mode\n");
return ret;
}
mdm_drv->mode = PWR_OFF;
esoc_mdm_log("Executing the ESOC_PWR_ON command\n");
ret = clink_ops->cmd_exe(ESOC_PWR_ON, esoc_clink);
if (ret) {
dev_err(&esoc_clink->dev, "pwr on fail\n");
return ret;
}
esoc_client_link_power_on(esoc_clink, true);
}
/*
* In autoboot case, it is possible that we can forever wait for
* boot completion, when esoc fails to boot. This is because
* there is no helper application which can alert esoc driver
* about boot failure. Prevent going to wait forever in such
* case.
*/
if (esoc_clink->auto_boot)
timeout = 10 * HZ;
esoc_mdm_log(
"Modem turned-on. Waiting for pon_done notification..\n");
ret = wait_for_completion_timeout(&mdm_drv->pon_done, timeout);
if (mdm_drv->pon_state == PON_FAIL || ret <= 0) {
dev_err(&esoc_clink->dev, "booting failed\n");
esoc_mdm_log("booting failed\n");
ret = mdm_handle_boot_fail(esoc_clink, &pon_trial);
if (ret)
return ret;
} else if (mdm_drv->pon_state == PON_RETRY) {
esoc_mdm_log(
"Boot failed. Doing cleanup and attempting to retry\n");
pon_trial++;
mdm_subsys_retry_powerup_cleanup(esoc_clink);
} else if (mdm_drv->pon_state == PON_SUCCESS) {
break;
}
} while (pon_trial <= n_pon_tries);
return 0;
}
static int mdm_subsys_ramdumps(int want_dumps,
const struct subsys_desc *crashed_subsys)
{
int ret;
struct esoc_clink *esoc_clink =
container_of(crashed_subsys, struct esoc_clink,
subsys);
const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
esoc_mdm_log("Ramdumps called from SSR\n");
if (want_dumps) {
esoc_mdm_log("Executing the ESOC_EXE_DEBUG command\n");
ret = clink_ops->cmd_exe(ESOC_EXE_DEBUG, esoc_clink);
if (ret) {
esoc_mdm_log(
"Failed executing the ESOC_EXE_DEBUG command\n");
dev_err(&esoc_clink->dev, "debugging failed\n");
return ret;
}
}
return 0;
}
static int mdm_register_ssr(struct esoc_clink *esoc_clink)
{
struct subsys_desc *subsys = &esoc_clink->subsys;
subsys->shutdown = mdm_subsys_shutdown;
subsys->ramdump = mdm_subsys_ramdumps;
subsys->powerup = mdm_subsys_powerup;
subsys->crash_shutdown = mdm_crash_shutdown;
return esoc_clink_register_ssr(esoc_clink);
}
int esoc_ssr_probe(struct esoc_clink *esoc_clink, struct esoc_drv *drv)
{
int ret;
struct mdm_drv *mdm_drv;
struct esoc_eng *esoc_eng;
mdm_drv = devm_kzalloc(&esoc_clink->dev, sizeof(*mdm_drv), GFP_KERNEL);
if (IS_ERR_OR_NULL(mdm_drv))
return PTR_ERR(mdm_drv);
esoc_eng = &mdm_drv->cmd_eng;
esoc_eng->handle_clink_evt = mdm_handle_clink_evt;
ret = esoc_clink_register_cmd_eng(esoc_clink, esoc_eng);
if (ret) {
dev_err(&esoc_clink->dev, "failed to register cmd engine\n");
return ret;
}
ret = mdm_register_ssr(esoc_clink);
if (ret)
goto ssr_err;
mdm_drv->mdm_queue = alloc_workqueue("mdm_drv_queue", 0, 0);
if (!mdm_drv->mdm_queue) {
dev_err(&esoc_clink->dev, "could not create mdm_queue\n");
goto queue_err;
}
esoc_set_drv_data(esoc_clink, mdm_drv);
init_completion(&mdm_drv->pon_done);
init_completion(&mdm_drv->req_eng_wait);
INIT_WORK(&mdm_drv->ssr_work, mdm_ssr_fn);
mdm_drv->esoc_clink = esoc_clink;
mdm_drv->mode = PWR_OFF;
mdm_drv->pon_state = PON_INIT;
mdm_drv->esoc_restart.notifier_call = esoc_msm_restart_handler;
ret = register_reboot_notifier(&mdm_drv->esoc_restart);
if (ret)
dev_err(&esoc_clink->dev, "register for reboot failed\n");
ret = mdm_dbg_eng_init(drv, esoc_clink);
if (ret) {
debug_init_done = false;
dev_err(&esoc_clink->dev, "dbg engine failure\n");
} else {
dev_dbg(&esoc_clink->dev, "dbg engine initialized\n");
debug_init_done = true;
}
return 0;
queue_err:
esoc_clink_unregister_ssr(esoc_clink);
ssr_err:
esoc_clink_unregister_cmd_eng(esoc_clink, esoc_eng);
return ret;
}
static struct esoc_compat compat_table[] = {
{
.name = "MDM9x55",
.data = NULL,
},
{
.name = "SDX50M",
.data = NULL,
},
};
static struct esoc_drv esoc_ssr_drv = {
.owner = THIS_MODULE,
.probe = esoc_ssr_probe,
.compat_table = compat_table,
.compat_entries = ARRAY_SIZE(compat_table),
.driver = {
.name = "mdm-4x",
},
};
int __init esoc_ssr_init(void)
{
return esoc_drv_register(&esoc_ssr_drv);
}
module_init(esoc_ssr_init);
MODULE_LICENSE("GPL v2");

265
drivers/esoc/esoc-mdm-pon.c Normal file
View file

@ -0,0 +1,265 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved.
*/
#include "esoc-mdm.h"
/* This function can be called from atomic context. */
static int mdm9x55_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
{
int soft_reset_direction_assert = 0,
soft_reset_direction_de_assert = 1;
uint32_t reset_time_us = mdm->reset_time_ms * 1000;
if (mdm->soft_reset_inverted) {
soft_reset_direction_assert = 1;
soft_reset_direction_de_assert = 0;
}
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
soft_reset_direction_assert);
/*
* Allow PS hold assert to be detected
*/
if (!atomic)
usleep_range(reset_time_us, reset_time_us + 100000);
else
/*
* The flow falls through this path as a part of the
* panic handler, which has to executed atomically.
*/
mdelay(mdm->reset_time_ms);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
soft_reset_direction_de_assert);
return 0;
}
/* This function can be called from atomic context. */
static int sdx50m_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
{
int soft_reset_direction_assert = 0,
soft_reset_direction_de_assert = 1;
if (mdm->soft_reset_inverted) {
soft_reset_direction_assert = 1;
soft_reset_direction_de_assert = 0;
}
esoc_mdm_log("RESET GPIO value (before doing a reset): %d\n",
gpio_get_value(MDM_GPIO(mdm, AP2MDM_SOFT_RESET)));
esoc_mdm_log("Setting AP2MDM_SOFT_RESET = %d\n",
soft_reset_direction_assert);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
soft_reset_direction_assert);
/*
* Allow PS hold assert to be detected
*/
if (!atomic)
usleep_range(80000,180000);
else
/*
* The flow falls through this path as a part of the
* panic handler, which has to executed atomically.
*/
mdelay(100);
esoc_mdm_log("Setting AP2MDM_SOFT_RESET = %d\n",
soft_reset_direction_de_assert);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
soft_reset_direction_de_assert);
return 0;
}
static int mdm4x_do_first_power_on(struct mdm_ctrl *mdm)
{
int i;
int pblrdy;
struct device *dev = mdm->dev;
esoc_mdm_log("Powering on modem for the first time\n");
dev_dbg(dev, "Powering on modem for the first time\n");
if (mdm->esoc->auto_boot)
return 0;
mdm_toggle_soft_reset(mdm, false);
/* Add a delay to allow PON sequence to complete*/
msleep(150);
esoc_mdm_log("Setting AP2MDM_STATUS = 1\n");
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 1);
if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) {
for (i = 0; i < MDM_PBLRDY_CNT; i++) {
pblrdy = gpio_get_value(MDM_GPIO(mdm, MDM2AP_PBLRDY));
if (pblrdy)
break;
usleep_range(5000, 6000);
}
dev_dbg(dev, "pblrdy i:%d\n", i);
msleep(200);
}
/*
* No PBLRDY gpio associated with this modem
* Send request for image. Let userspace confirm establishment of
* link to external modem.
*/
else {
esoc_mdm_log("Queueing the request: ESOC_REQ_IMG\n");
esoc_clink_queue_request(ESOC_REQ_IMG, mdm->esoc);
}
return 0;
}
static int mdm9x55_power_down(struct mdm_ctrl *mdm)
{
struct device *dev = mdm->dev;
int soft_reset_direction_assert = 0,
soft_reset_direction_de_assert = 1;
if (mdm->soft_reset_inverted) {
soft_reset_direction_assert = 1;
soft_reset_direction_de_assert = 0;
}
/* Assert the soft reset line whether mdm2ap_status went low or not */
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
soft_reset_direction_assert);
dev_dbg(dev, "Doing a hard reset\n");
/*
* Currently, there is a debounce timer on the charm PMIC. It is
* necessary to hold the PMIC RESET low for 406ms
* for the reset to fully take place. Sleep here to ensure the
* reset has occurred before the function exits.
*/
msleep(406);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
soft_reset_direction_de_assert);
return 0;
}
static int sdx50m_power_down(struct mdm_ctrl *mdm)
{
struct device *dev = mdm->dev;
int soft_reset_direction = mdm->soft_reset_inverted ? 1 : 0;
/* Assert the soft reset line whether mdm2ap_status went low or not */
esoc_mdm_log("Setting AP2MDM_SOFT_RESET = %d\n", soft_reset_direction);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
soft_reset_direction);
dev_dbg(dev, "Doing a hard reset\n");
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
soft_reset_direction);
/*
* Currently, there is a debounce timer on the charm PMIC. It is
* necessary to hold the PMIC RESET low for 406ms
* for the reset to fully take place. Sleep here to ensure the
* reset has occurred before the function exits.
*/
msleep(300);
return 0;
}
static void mdm9x55_cold_reset(struct mdm_ctrl *mdm)
{
dev_dbg(mdm->dev, "Triggering mdm cold reset");
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
!!mdm->soft_reset_inverted);
/*
* The function is executed as a part of the atomic reboot handler.
* Hence, go with a busy loop instead of sleep.
*/
mdelay(334);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
!mdm->soft_reset_inverted);
}
static void sdx50m_cold_reset(struct mdm_ctrl *mdm)
{
dev_dbg(mdm->dev, "Triggering mdm cold reset");
esoc_mdm_log("Setting AP2MDM_SOFT_RESET = %d\n",
!!mdm->soft_reset_inverted);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
!!mdm->soft_reset_inverted);
/*
* The function is executed as a part of the atomic reboot handler.
* Hence, go with a busy loop instead of sleep.
*/
mdelay(600);
esoc_mdm_log("Setting AP2MDM_SOFT_RESET = %d\n",
!!mdm->soft_reset_inverted);
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
!mdm->soft_reset_inverted);
}
static int mdm9x55_pon_dt_init(struct mdm_ctrl *mdm)
{
int val;
struct device_node *node = mdm->dev->of_node;
enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
val = of_property_read_u32(node, "qcom,reset-time-ms",
&mdm->reset_time_ms);
if (val)
mdm->reset_time_ms = DEF_MDM9X55_RESET_TIME;
val = of_get_named_gpio_flags(node, "qcom,ap2mdm-soft-reset-gpio",
0, &flags);
if (val >= 0) {
MDM_GPIO(mdm, AP2MDM_SOFT_RESET) = val;
if (flags & OF_GPIO_ACTIVE_LOW)
mdm->soft_reset_inverted = 1;
return 0;
} else
return -EIO;
}
static int mdm4x_pon_dt_init(struct mdm_ctrl *mdm)
{
int val;
struct device_node *node = mdm->dev->of_node;
enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
val = of_get_named_gpio_flags(node, "qcom,ap2mdm-soft-reset-gpio",
0, &flags);
if (val >= 0) {
MDM_GPIO(mdm, AP2MDM_SOFT_RESET) = val;
if (flags & OF_GPIO_ACTIVE_LOW)
mdm->soft_reset_inverted = 1;
return 0;
} else
return -EIO;
}
static int mdm4x_pon_setup(struct mdm_ctrl *mdm)
{
struct device *dev = mdm->dev;
if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_SOFT_RESET))) {
if (gpio_request(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
"AP2MDM_SOFT_RESET")) {
dev_err(dev, "Cannot config AP2MDM_SOFT_RESET gpio\n");
return -EIO;
}
}
return 0;
}
struct mdm_pon_ops mdm9x55_pon_ops = {
.pon = mdm4x_do_first_power_on,
.soft_reset = mdm9x55_toggle_soft_reset,
.poff_force = mdm9x55_power_down,
.cold_reset = mdm9x55_cold_reset,
.dt_init = mdm9x55_pon_dt_init,
.setup = mdm4x_pon_setup,
};
struct mdm_pon_ops sdx50m_pon_ops = {
.pon = mdm4x_do_first_power_on,
.soft_reset = sdx50m_toggle_soft_reset,
.poff_force = sdx50m_power_down,
.cold_reset = sdx50m_cold_reset,
.dt_init = mdm4x_pon_dt_init,
.setup = mdm4x_pon_setup,
};

148
drivers/esoc/esoc-mdm.h Normal file
View file

@ -0,0 +1,148 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef __ESOC_MDM_H__
#define __ESOC_MDM_H__
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include "esoc.h"
#define MDM_PBLRDY_CNT 20
#define INVALID_GPIO (-1)
#define MDM_GPIO(mdm, i) (mdm->gpios[i])
#define MDM9x55_LABEL "MDM9x55"
#define MDM9x55_PCIE "PCIe"
#define SDX50M_LABEL "SDX50M"
#define SDX50M_PCIE "PCIe"
#define MDM2AP_STATUS_TIMEOUT_MS 120000L
#define MDM_MODEM_TIMEOUT 3000
#define DEF_RAMDUMP_TIMEOUT 120000
#define DEF_RAMDUMP_DELAY 2000
#define DEF_SHUTDOWN_TIMEOUT 10000
#define DEF_MDM9X55_RESET_TIME 203
#define RD_BUF_SIZE 100
#define SFR_MAX_RETRIES 10
#define SFR_RETRY_INTERVAL 1000
#define MDM_DBG_OFFSET 0x934
#define MDM_DBG_MODE 0x53444247
#define MDM_CTI_NAME "coresight-cti-rpm-cpu0"
#define MDM_CTI_TRIG 0
#define MDM_CTI_CH 0
enum mdm_gpio {
AP2MDM_WAKEUP = 0,
AP2MDM_STATUS,
AP2MDM_SOFT_RESET,
AP2MDM_VDD_MIN,
AP2MDM_CHNLRDY,
AP2MDM_ERRFATAL,
AP2MDM_VDDMIN,
AP2MDM_PMIC_PWR_EN,
MDM2AP_WAKEUP,
MDM2AP_ERRFATAL,
MDM2AP_PBLRDY,
MDM2AP_STATUS,
MDM2AP_VDDMIN,
MDM_LINK_DETECT,
NUM_GPIOS,
};
struct mdm_pon_ops;
struct mdm_ctrl {
unsigned int gpios[NUM_GPIOS];
spinlock_t status_lock;
struct workqueue_struct *mdm_queue;
struct delayed_work mdm2ap_status_check_work;
struct work_struct mdm_status_work;
struct work_struct restart_reason_work;
struct completion debug_done;
struct device *dev;
struct pinctrl *pinctrl;
struct pinctrl_state *gpio_state_booting;
struct pinctrl_state *gpio_state_running;
struct pinctrl_state *gpio_state_active;
struct pinctrl_state *gpio_state_suspend;
int mdm2ap_status_valid_old_config;
int soft_reset_inverted;
int errfatal_irq;
int status_irq;
int pblrdy_irq;
int debug;
int init;
bool debug_fail;
unsigned int dump_timeout_ms;
unsigned int ramdump_delay_ms;
unsigned int shutdown_timeout_ms;
unsigned int reset_time_ms;
struct esoc_clink *esoc;
bool get_restart_reason;
unsigned long irq_mask;
bool ready;
bool dual_interface;
u32 status;
void __iomem *dbg_addr;
bool dbg_mode;
struct coresight_cti *cti;
int trig_cnt;
const struct mdm_pon_ops *pon_ops;
bool skip_restart_for_mdm_crash;
};
struct mdm_pon_ops {
int (*pon)(struct mdm_ctrl *mdm);
int (*soft_reset)(struct mdm_ctrl *mdm, bool atomic);
int (*poff_force)(struct mdm_ctrl *mdm);
int (*poff_cleanup)(struct mdm_ctrl *mdm);
void (*cold_reset)(struct mdm_ctrl *mdm);
int (*dt_init)(struct mdm_ctrl *mdm);
int (*setup)(struct mdm_ctrl *mdm);
};
struct mdm_ops {
struct esoc_clink_ops *clink_ops;
struct mdm_pon_ops *pon_ops;
int (*config_hw)(struct mdm_ctrl *mdm, const struct mdm_ops *ops,
struct platform_device *pdev);
};
void mdm_disable_irqs(struct mdm_ctrl *mdm);
static inline int mdm_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
{
return mdm->pon_ops->soft_reset(mdm, atomic);
}
static inline int mdm_do_first_power_on(struct mdm_ctrl *mdm)
{
return mdm->pon_ops->pon(mdm);
}
static inline int mdm_power_down(struct mdm_ctrl *mdm)
{
return mdm->pon_ops->poff_force(mdm);
}
static inline void mdm_cold_reset(struct mdm_ctrl *mdm)
{
mdm->pon_ops->cold_reset(mdm);
}
static inline int mdm_pon_dt_init(struct mdm_ctrl *mdm)
{
return mdm->pon_ops->dt_init(mdm);
}
static inline int mdm_pon_setup(struct mdm_ctrl *mdm)
{
return mdm->pon_ops->setup(mdm);
}
extern struct mdm_pon_ops mdm9x55_pon_ops;
extern struct mdm_pon_ops sdx50m_pon_ops;
#endif

188
drivers/esoc/esoc.h Normal file
View file

@ -0,0 +1,188 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013-2015, 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef __ESOC_H__
#define __ESOC_H__
#include <linux/cdev.h>
#include <linux/completion.h>
#include <linux/esoc_ctrl.h>
#include <linux/esoc_client.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <soc/qcom/subsystem_restart.h>
#include <soc/qcom/subsystem_notif.h>
#include <linux/ipc_logging.h>
#define ESOC_MDM_IPC_PAGES 10
extern void *ipc_log;
#define esoc_mdm_log(__msg, ...) \
do { \
if (ipc_log) \
ipc_log_string(ipc_log, \
"[%s]: "__msg, __func__, ##__VA_ARGS__); \
} while (0)
#define ESOC_DEV_MAX 4
#define ESOC_NAME_LEN 20
#define ESOC_LINK_LEN 20
struct esoc_clink;
/**
* struct esoc_eng: Engine of the esoc control link
* @handle_clink_req: handle incoming esoc requests.
* @handle_clink_evt: handle for esoc events.
* @esoc_clink: pointer to esoc control link.
*/
struct esoc_eng {
void (*handle_clink_req)(enum esoc_req req,
struct esoc_eng *eng);
void (*handle_clink_evt)(enum esoc_evt evt,
struct esoc_eng *eng);
struct esoc_clink *esoc_clink;
};
/**
* struct esoc_clink: Representation of external esoc device
* @name: Name of the external esoc.
* @link_name: name of the physical link.
* @link_info: additional info about the physical link.
* @parent: parent device.
* @dev: device for userspace interface.
* @pdev: platform device to interface with SSR driver.
* @id: id of the external device.
* @owner: owner of the device.
* @clink_ops: control operations for the control link
* @req_eng: handle for request engine.
* @cmd_eng: handle for command engine.
* @clink_data: private data of esoc control link.
* @compat_data: compat data of esoc driver.
* @subsys_desc: descriptor for subsystem restart
* @subsys_dev: ssr device handle.
* @np: device tree node for esoc_clink.
* @auto_boot: boots independently.
* @primary: primary esoc controls(reset/poweroff) all secondary
* esocs, but not otherway around.
* @statusline_not_a_powersource: True if status line to esoc is not a
* power source.
* @userspace_handle_shutdown: True if user space handles shutdown requests.
*/
struct esoc_clink {
const char *name;
const char *link_name;
const char *link_info;
struct device *parent;
struct device dev;
struct platform_device *pdev;
unsigned int id;
struct module *owner;
const struct esoc_clink_ops *clink_ops;
struct esoc_eng *req_eng;
struct esoc_eng *cmd_eng;
spinlock_t notify_lock;
void *clink_data;
void *compat_data;
struct subsys_desc subsys;
struct subsys_device *subsys_dev;
struct device_node *np;
bool auto_boot;
bool primary;
bool statusline_not_a_powersource;
bool userspace_handle_shutdown;
struct esoc_client_hook *client_hook[ESOC_MAX_HOOKS];
};
/**
* struct esoc_clink_ops: Operations to control external soc
* @cmd_exe: Execute control command
* @get_status: Get current status, or response to previous command
* @get_err_fatal: Get status of err fatal signal
* @notify_esoc: notify external soc of events
*/
struct esoc_clink_ops {
int (*cmd_exe)(enum esoc_cmd cmd, struct esoc_clink *dev);
void (*get_status)(u32 *status, struct esoc_clink *dev);
void (*get_err_fatal)(u32 *status, struct esoc_clink *dev);
void (*notify)(enum esoc_notify notify, struct esoc_clink *dev);
};
/**
* struct esoc_compat: Compatibility of esoc drivers.
* @name: esoc link that driver is compatible with.
* @data: driver data associated with esoc clink.
*/
struct esoc_compat {
const char *name;
void *data;
};
/**
* struct esoc_drv: Driver for an esoc clink
* @driver: drivers for esoc.
* @owner: module owner of esoc driver.
* @compat_table: compatible table for driver.
* @compat_entries
* @probe: probe function for esoc driver.
*/
struct esoc_drv {
struct device_driver driver;
struct module *owner;
struct esoc_compat *compat_table;
unsigned int compat_entries;
int (*probe)(struct esoc_clink *esoc_clink,
struct esoc_drv *drv);
};
#define to_esoc_clink(d) container_of(d, struct esoc_clink, dev)
#define to_esoc_drv(d) container_of(d, struct esoc_drv, driver)
extern struct bus_type esoc_bus_type;
/* Exported apis */
void esoc_dev_exit(void);
int esoc_dev_init(void);
void esoc_clink_unregister(struct esoc_clink *esoc_dev);
int esoc_clink_register(struct esoc_clink *esoc_dev);
struct esoc_clink *get_esoc_clink(int id);
struct esoc_clink *get_esoc_clink_by_node(struct device_node *node);
void put_esoc_clink(struct esoc_clink *esoc_clink);
void *get_esoc_clink_data(struct esoc_clink *esoc);
void set_esoc_clink_data(struct esoc_clink *esoc, void *data);
void esoc_clink_evt_notify(enum esoc_evt, struct esoc_clink *esoc_dev);
void esoc_clink_queue_request(enum esoc_req req, struct esoc_clink *esoc_dev);
void esoc_for_each_dev(void *data, int (*fn)(struct device *dev,
void *data));
int esoc_clink_register_cmd_eng(struct esoc_clink *esoc_clink,
struct esoc_eng *eng);
void esoc_clink_unregister_cmd_eng(struct esoc_clink *esoc_clink,
struct esoc_eng *eng);
int esoc_clink_register_req_eng(struct esoc_clink *esoc_clink,
struct esoc_eng *eng);
void esoc_clink_unregister_req_eng(struct esoc_clink *esoc_clink,
struct esoc_eng *eng);
int esoc_drv_register(struct esoc_drv *driver);
void esoc_set_drv_data(struct esoc_clink *esoc_clink, void *data);
void *esoc_get_drv_data(struct esoc_clink *esoc_clink);
/* ssr operations */
int esoc_clink_register_ssr(struct esoc_clink *esoc_clink);
int esoc_clink_request_ssr(struct esoc_clink *esoc_clink);
void esoc_clink_unregister_ssr(struct esoc_clink *esoc_clink);
/* client notification */
#ifdef CONFIG_ESOC_CLIENT
void notify_esoc_clients(struct esoc_clink *esoc_clink, unsigned long evt);
#else
static inline void notify_esoc_clients(struct esoc_clink *esoc_clink,
unsigned long evt)
{
}
#endif
bool esoc_req_eng_enabled(struct esoc_clink *esoc_clink);
bool esoc_cmd_eng_enabled(struct esoc_clink *esoc_clink);
#endif

398
drivers/esoc/esoc_bus.c Normal file
View file

@ -0,0 +1,398 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2013-2015, 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/idr.h>
#include <linux/slab.h>
#include "esoc.h"
static DEFINE_IDA(esoc_ida);
/* SYSFS */
static ssize_t
esoc_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return snprintf(buf, ESOC_NAME_LEN, "%s", to_esoc_clink(dev)->name);
}
static ssize_t
esoc_link_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return snprintf(buf, ESOC_LINK_LEN, "%s",
to_esoc_clink(dev)->link_name);
}
static ssize_t
esoc_link_info_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return snprintf(buf, ESOC_LINK_LEN, "%s",
to_esoc_clink(dev)->link_info);
}
static DEVICE_ATTR_RO(esoc_name);
static DEVICE_ATTR_RO(esoc_link);
static DEVICE_ATTR_RO(esoc_link_info);
static struct attribute *esoc_clink_attrs[] = {
&dev_attr_esoc_name.attr,
&dev_attr_esoc_link.attr,
&dev_attr_esoc_link_info.attr,
NULL
};
static struct attribute_group esoc_clink_attr_group = {
.attrs = esoc_clink_attrs,
};
const struct attribute_group *esoc_clink_attr_groups[] = {
&esoc_clink_attr_group,
NULL,
};
static int esoc_bus_match(struct device *dev, struct device_driver *drv)
{
int i = 0, match = 1;
struct esoc_clink *esoc_clink = to_esoc_clink(dev);
struct esoc_drv *esoc_drv = to_esoc_drv(drv);
int entries = esoc_drv->compat_entries;
struct esoc_compat *table = esoc_drv->compat_table;
for (i = 0; i < entries; i++) {
if (strcasecmp(esoc_clink->name, table[i].name) == 0)
return match;
}
return 0;
}
static int esoc_bus_probe(struct device *dev)
{
int ret;
struct esoc_clink *esoc_clink = to_esoc_clink(dev);
struct esoc_drv *esoc_drv = to_esoc_drv(dev->driver);
ret = esoc_drv->probe(esoc_clink, esoc_drv);
if (ret) {
pr_err("failed to probe %s dev\n", esoc_clink->name);
return ret;
}
return 0;
}
struct bus_type esoc_bus_type = {
.name = "esoc",
.match = esoc_bus_match,
.dev_groups = esoc_clink_attr_groups,
};
EXPORT_SYMBOL(esoc_bus_type);
struct device esoc_bus = {
.init_name = "esoc-bus"
};
EXPORT_SYMBOL(esoc_bus);
/* bus accessor */
static void esoc_clink_release(struct device *dev)
{
struct esoc_clink *esoc_clink = to_esoc_clink(dev);
ida_simple_remove(&esoc_ida, esoc_clink->id);
kfree(esoc_clink);
}
static int esoc_clink_match_id(struct device *dev, void *id)
{
struct esoc_clink *esoc_clink = to_esoc_clink(dev);
int *esoc_id = (int *)id;
if (esoc_clink->id == *esoc_id) {
if (!try_module_get(esoc_clink->owner))
return 0;
return 1;
}
return 0;
}
static int esoc_clink_match_node(struct device *dev, void *id)
{
struct esoc_clink *esoc_clink = to_esoc_clink(dev);
struct device_node *node = id;
if (esoc_clink->np == node) {
if (!try_module_get(esoc_clink->owner))
return 0;
return 1;
}
return 0;
}
void esoc_for_each_dev(void *data, int (*fn)(struct device *dev, void *))
{
bus_for_each_dev(&esoc_bus_type, NULL, data, fn);
}
EXPORT_SYMBOL(esoc_for_each_dev);
struct esoc_clink *get_esoc_clink(int id)
{
struct esoc_clink *esoc_clink;
struct device *dev;
dev = bus_find_device(&esoc_bus_type, NULL, &id, esoc_clink_match_id);
if (IS_ERR_OR_NULL(dev))
return NULL;
esoc_clink = to_esoc_clink(dev);
return esoc_clink;
}
EXPORT_SYMBOL(get_esoc_clink);
struct esoc_clink *get_esoc_clink_by_node(struct device_node *node)
{
struct esoc_clink *esoc_clink;
struct device *dev;
dev = bus_find_device(&esoc_bus_type, NULL, node,
esoc_clink_match_node);
if (IS_ERR_OR_NULL(dev))
return NULL;
esoc_clink = to_esoc_clink(dev);
return esoc_clink;
}
void put_esoc_clink(struct esoc_clink *esoc_clink)
{
module_put(esoc_clink->owner);
}
EXPORT_SYMBOL(put_esoc_clink);
bool esoc_req_eng_enabled(struct esoc_clink *esoc_clink)
{
return !esoc_clink->req_eng ? false : true;
}
EXPORT_SYMBOL(esoc_req_eng_enabled);
bool esoc_cmd_eng_enabled(struct esoc_clink *esoc_clink)
{
return !esoc_clink->cmd_eng ? false : true;
}
EXPORT_SYMBOL(esoc_cmd_eng_enabled);
/* ssr operations */
int esoc_clink_register_ssr(struct esoc_clink *esoc_clink)
{
int ret;
int len;
char *subsys_name;
len = strlen("esoc") + sizeof(esoc_clink->id);
subsys_name = kzalloc(len, GFP_KERNEL);
if (IS_ERR_OR_NULL(subsys_name))
return PTR_ERR(subsys_name);
snprintf(subsys_name, len, "esoc%d", esoc_clink->id);
esoc_clink->subsys.name = subsys_name;
esoc_clink->dev.of_node = esoc_clink->np;
esoc_clink->subsys.dev = &esoc_clink->pdev->dev;
esoc_clink->subsys_dev = subsys_register(&esoc_clink->subsys);
if (IS_ERR_OR_NULL(esoc_clink->subsys_dev)) {
dev_err(&esoc_clink->dev, "failed to register ssr node\n");
ret = PTR_ERR(esoc_clink->subsys_dev);
goto subsys_err;
}
return 0;
subsys_err:
kfree(subsys_name);
return ret;
}
EXPORT_SYMBOL(esoc_clink_register_ssr);
void esoc_clink_unregister_ssr(struct esoc_clink *esoc_clink)
{
subsys_unregister(esoc_clink->subsys_dev);
kfree(esoc_clink->subsys.name);
}
EXPORT_SYMBOL(esoc_clink_unregister_ssr);
int esoc_clink_request_ssr(struct esoc_clink *esoc_clink)
{
subsystem_restart_dev(esoc_clink->subsys_dev);
return 0;
}
EXPORT_SYMBOL(esoc_clink_request_ssr);
/* bus operations */
void esoc_clink_evt_notify(enum esoc_evt evt, struct esoc_clink *esoc_clink)
{
unsigned long flags;
spin_lock_irqsave(&esoc_clink->notify_lock, flags);
notify_esoc_clients(esoc_clink, evt);
if (esoc_clink->req_eng && esoc_clink->req_eng->handle_clink_evt)
esoc_clink->req_eng->handle_clink_evt(evt, esoc_clink->req_eng);
if (esoc_clink->cmd_eng && esoc_clink->cmd_eng->handle_clink_evt)
esoc_clink->cmd_eng->handle_clink_evt(evt, esoc_clink->cmd_eng);
spin_unlock_irqrestore(&esoc_clink->notify_lock, flags);
}
EXPORT_SYMBOL(esoc_clink_evt_notify);
void *get_esoc_clink_data(struct esoc_clink *esoc)
{
return esoc->clink_data;
}
EXPORT_SYMBOL(get_esoc_clink_data);
void set_esoc_clink_data(struct esoc_clink *esoc, void *data)
{
esoc->clink_data = data;
}
EXPORT_SYMBOL(set_esoc_clink_data);
void esoc_clink_queue_request(enum esoc_req req, struct esoc_clink *esoc_clink)
{
unsigned long flags;
struct esoc_eng *req_eng;
spin_lock_irqsave(&esoc_clink->notify_lock, flags);
if (esoc_clink->req_eng != NULL) {
req_eng = esoc_clink->req_eng;
req_eng->handle_clink_req(req, req_eng);
}
spin_unlock_irqrestore(&esoc_clink->notify_lock, flags);
}
EXPORT_SYMBOL(esoc_clink_queue_request);
void esoc_set_drv_data(struct esoc_clink *esoc_clink, void *data)
{
dev_set_drvdata(&esoc_clink->dev, data);
}
EXPORT_SYMBOL(esoc_set_drv_data);
void *esoc_get_drv_data(struct esoc_clink *esoc_clink)
{
return dev_get_drvdata(&esoc_clink->dev);
}
EXPORT_SYMBOL(esoc_get_drv_data);
/* bus registration functions */
void esoc_clink_unregister(struct esoc_clink *esoc_clink)
{
if (get_device(&esoc_clink->dev) != NULL) {
device_unregister(&esoc_clink->dev);
put_device(&esoc_clink->dev);
}
}
EXPORT_SYMBOL(esoc_clink_unregister);
int esoc_clink_register(struct esoc_clink *esoc_clink)
{
int id, err;
struct device *dev;
if (!esoc_clink->name || !esoc_clink->link_name ||
!esoc_clink->clink_ops) {
dev_err(esoc_clink->parent, "invalid esoc arguments\n");
return -EINVAL;
}
id = ida_simple_get(&esoc_ida, 0, ESOC_DEV_MAX, GFP_KERNEL);
if (id < 0) {
err = id;
goto exit_ida;
}
esoc_clink->id = id;
dev = &esoc_clink->dev;
dev->bus = &esoc_bus_type;
dev->release = esoc_clink_release;
if (!esoc_clink->parent)
dev->parent = &esoc_bus;
else
dev->parent = esoc_clink->parent;
dev_set_name(dev, "esoc%d", id);
err = device_register(dev);
if (err) {
dev_err(esoc_clink->parent, "esoc device register failed\n");
goto exit_ida;
}
spin_lock_init(&esoc_clink->notify_lock);
return 0;
exit_ida:
ida_simple_remove(&esoc_ida, id);
pr_err("unable to register %s, err = %d\n", esoc_clink->name, err);
return err;
}
EXPORT_SYMBOL(esoc_clink_register);
int esoc_clink_register_req_eng(struct esoc_clink *esoc_clink,
struct esoc_eng *eng)
{
if (esoc_clink->req_eng)
return -EBUSY;
if (!eng->handle_clink_req)
return -EINVAL;
esoc_clink->req_eng = eng;
eng->esoc_clink = esoc_clink;
esoc_clink_evt_notify(ESOC_REQ_ENG_ON, esoc_clink);
return 0;
}
EXPORT_SYMBOL(esoc_clink_register_req_eng);
int esoc_clink_register_cmd_eng(struct esoc_clink *esoc_clink,
struct esoc_eng *eng)
{
if (esoc_clink->cmd_eng)
return -EBUSY;
esoc_clink->cmd_eng = eng;
eng->esoc_clink = esoc_clink;
esoc_clink_evt_notify(ESOC_CMD_ENG_ON, esoc_clink);
return 0;
}
EXPORT_SYMBOL(esoc_clink_register_cmd_eng);
void esoc_clink_unregister_req_eng(struct esoc_clink *esoc_clink,
struct esoc_eng *eng)
{
esoc_clink->req_eng = NULL;
esoc_clink_evt_notify(ESOC_REQ_ENG_OFF, esoc_clink);
}
EXPORT_SYMBOL(esoc_clink_unregister_req_eng);
void esoc_clink_unregister_cmd_eng(struct esoc_clink *esoc_clink,
struct esoc_eng *eng)
{
esoc_clink->cmd_eng = NULL;
esoc_clink_evt_notify(ESOC_CMD_ENG_OFF, esoc_clink);
}
EXPORT_SYMBOL(esoc_clink_unregister_cmd_eng);
int esoc_drv_register(struct esoc_drv *driver)
{
int ret;
driver->driver.bus = &esoc_bus_type;
driver->driver.probe = esoc_bus_probe;
ret = driver_register(&driver->driver);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL(esoc_drv_register);
static int __init esoc_init(void)
{
int ret;
ret = device_register(&esoc_bus);
if (ret) {
pr_err("esoc bus device register fail\n");
return ret;
}
ret = bus_register(&esoc_bus_type);
if (ret) {
pr_err("esoc bus register fail\n");
return ret;
}
pr_debug("esoc bus registration done\n");
return 0;
}
subsys_initcall(esoc_init);
MODULE_LICENSE("GPL v2");

201
drivers/esoc/esoc_client.c Normal file
View file

@ -0,0 +1,201 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/esoc_client.h>
#include <linux/of.h>
#include <linux/spinlock.h>
#include "esoc.h"
static DEFINE_SPINLOCK(notify_lock);
static ATOMIC_NOTIFIER_HEAD(client_notify);
static void devm_esoc_desc_release(struct device *dev, void *res)
{
struct esoc_desc *esoc_desc = res;
kfree(esoc_desc->name);
kfree(esoc_desc->link);
put_esoc_clink(esoc_desc->priv);
}
static int devm_esoc_desc_match(struct device *dev, void *res, void *data)
{
struct esoc_desc *esoc_desc = res;
return esoc_desc == data;
}
struct esoc_desc *devm_register_esoc_client(struct device *dev,
const char *name)
{
int ret, index;
const char *client_desc;
char *esoc_prop;
const __be32 *parp;
struct device_node *esoc_node;
struct device_node *np = dev->of_node;
struct esoc_clink *esoc_clink;
struct esoc_desc *desc;
char *esoc_name, *esoc_link, *esoc_link_info;
for (index = 0;; index++) {
esoc_prop = kasprintf(GFP_KERNEL, "esoc-%d", index);
if (IS_ERR_OR_NULL(esoc_prop))
return ERR_PTR(-ENOMEM);
parp = of_get_property(np, esoc_prop, NULL);
if (parp == NULL) {
dev_err(dev, "esoc device not present\n");
kfree(esoc_prop);
return NULL;
}
ret = of_property_read_string_index(np, "esoc-names", index,
&client_desc);
if (ret) {
dev_err(dev, "cannot find matching string\n");
kfree(esoc_prop);
return NULL;
}
if (strcmp(client_desc, name)) {
kfree(esoc_prop);
continue;
}
kfree(esoc_prop);
esoc_node = of_find_node_by_phandle(be32_to_cpup(parp));
esoc_clink = get_esoc_clink_by_node(esoc_node);
if (IS_ERR_OR_NULL(esoc_clink)) {
dev_err(dev, "matching esoc clink not present\n");
return ERR_PTR(-EPROBE_DEFER);
}
esoc_name = kasprintf(GFP_KERNEL, "esoc%d",
esoc_clink->id);
if (IS_ERR_OR_NULL(esoc_name)) {
dev_err(dev, "unable to allocate esoc name\n");
return ERR_PTR(-ENOMEM);
}
esoc_link = kasprintf(GFP_KERNEL, "%s", esoc_clink->link_name);
if (IS_ERR_OR_NULL(esoc_link)) {
dev_err(dev, "unable to allocate esoc link name\n");
kfree(esoc_name);
return ERR_PTR(-ENOMEM);
}
esoc_link_info = kasprintf(GFP_KERNEL, "%s",
esoc_clink->link_info);
if (IS_ERR_OR_NULL(esoc_link_info)) {
dev_err(dev, "unable to alloc link info name\n");
kfree(esoc_name);
kfree(esoc_link);
return ERR_PTR(-ENOMEM);
}
desc = devres_alloc(devm_esoc_desc_release,
sizeof(*desc), GFP_KERNEL);
if (IS_ERR_OR_NULL(desc)) {
kfree(esoc_name);
kfree(esoc_link);
kfree(esoc_link_info);
dev_err(dev, "unable to allocate esoc descriptor\n");
return ERR_PTR(-ENOMEM);
}
desc->name = esoc_name;
desc->link = esoc_link;
desc->link_info = esoc_link_info;
desc->priv = esoc_clink;
devres_add(dev, desc);
return desc;
}
return NULL;
}
EXPORT_SYMBOL(devm_register_esoc_client);
void devm_unregister_esoc_client(struct device *dev,
struct esoc_desc *esoc_desc)
{
int ret;
ret = devres_release(dev, devm_esoc_desc_release,
devm_esoc_desc_match, esoc_desc);
WARN_ON(ret);
}
EXPORT_SYMBOL(devm_unregister_esoc_client);
int esoc_register_client_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&client_notify, nb);
}
EXPORT_SYMBOL(esoc_register_client_notifier);
void notify_esoc_clients(struct esoc_clink *esoc_clink, unsigned long evt)
{
unsigned int id;
unsigned long flags;
spin_lock_irqsave(&notify_lock, flags);
id = esoc_clink->id;
atomic_notifier_call_chain(&client_notify, evt, &id);
spin_unlock_irqrestore(&notify_lock, flags);
}
EXPORT_SYMBOL(notify_esoc_clients);
int esoc_register_client_hook(struct esoc_desc *desc,
struct esoc_client_hook *client_hook)
{
int i;
struct esoc_clink *esoc_clink;
if (IS_ERR_OR_NULL(desc) || IS_ERR_OR_NULL(client_hook)) {
pr_debug("%s: Invalid parameters\n", __func__);
return -EINVAL;
}
esoc_clink = desc->priv;
if (IS_ERR_OR_NULL(esoc_clink)) {
pr_debug("%s: Invalid esoc link\n", __func__);
return -EINVAL;
}
for (i = 0; i < ESOC_MAX_HOOKS; i++) {
if (i == client_hook->prio &&
esoc_clink->client_hook[i] == NULL) {
esoc_clink->client_hook[i] = client_hook;
dev_dbg(&esoc_clink->dev,
"Client hook registration successful\n");
return 0;
}
}
dev_dbg(&esoc_clink->dev, "Client hook registration failed!\n");
return -EINVAL;
}
EXPORT_SYMBOL(esoc_register_client_hook);
int esoc_unregister_client_hook(struct esoc_desc *desc,
struct esoc_client_hook *client_hook)
{
int i;
struct esoc_clink *esoc_clink;
if (IS_ERR_OR_NULL(desc) || IS_ERR_OR_NULL(client_hook)) {
pr_debug("%s: Invalid parameters\n", __func__);
return -EINVAL;
}
esoc_clink = desc->priv;
if (IS_ERR_OR_NULL(esoc_clink)) {
pr_debug("%s: Invalid esoc link\n", __func__);
return -EINVAL;
}
for (i = 0; i < ESOC_MAX_HOOKS; i++) {
if (i == client_hook->prio &&
esoc_clink->client_hook[i] != NULL) {
esoc_clink->client_hook[i] = NULL;
dev_dbg(&esoc_clink->dev,
"Client hook unregistration successful\n");
return 0;
}
}
dev_dbg(&esoc_clink->dev, "Client hook unregistration failed!\n");
return -EINVAL;
}
EXPORT_SYMBOL(esoc_unregister_client_hook);

491
drivers/esoc/esoc_dev.c Normal file
View file

@ -0,0 +1,491 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2013-2014, 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/kfifo.h>
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/esoc_client.h>
#include "esoc.h"
/**
* struct esoc_udev: Userspace char interface
* @dev: interface device.
* @req_fifio: fifo for clink requests.
* @req_wait: signal availability of request from clink
* @req_fifo_lock: serialize access to req fifo
* @evt_fito: fifo for clink events
* @evt_wait: signal availability of clink event
* @evt_fifo_lock: serialize access to event fifo
* @list: entry in esoc dev list.
* @clink: reference to contorl link
*/
struct esoc_udev {
struct device *dev;
struct kfifo req_fifo;
wait_queue_head_t req_wait;
spinlock_t req_fifo_lock;
struct kfifo evt_fifo;
wait_queue_head_t evt_wait;
spinlock_t evt_fifo_lock;
struct list_head list;
struct esoc_clink *clink;
};
/**
* struct esoc_uhandle: Userspace handle of esoc
* @esoc_clink: esoc control link.
* @eng: esoc engine for commands/ requests.
* @esoc_udev: user interface device.
* @req_eng_reg: indicates if engine is registered as request eng
* @cmd_eng_reg: indicates if engine is registered as cmd eng
*/
struct esoc_uhandle {
struct esoc_clink *esoc_clink;
struct esoc_eng eng;
struct esoc_udev *esoc_udev;
bool req_eng_reg;
bool cmd_eng_reg;
};
#define ESOC_MAX_MINOR 256
#define ESOC_MAX_REQ 8
#define ESOC_MAX_EVT 4
static LIST_HEAD(esoc_udev_list);
static DEFINE_SPINLOCK(esoc_udev_list_lock);
struct class *esoc_class;
static int esoc_major;
static struct esoc_udev *get_free_esoc_udev(struct esoc_clink *esoc_clink)
{
struct esoc_udev *esoc_udev;
int err;
if (esoc_clink->id > ESOC_MAX_MINOR) {
pr_err("too many esoc devices\n");
return ERR_PTR(-ENODEV);
}
esoc_udev = kzalloc(sizeof(*esoc_udev), GFP_KERNEL);
if (!esoc_udev)
return ERR_PTR(-ENOMEM);
err = kfifo_alloc(&esoc_udev->req_fifo, (sizeof(u32)) * ESOC_MAX_REQ,
GFP_KERNEL);
if (err) {
pr_err("unable to allocate request fifo for %s\n",
esoc_clink->name);
goto req_fifo_fail;
}
err = kfifo_alloc(&esoc_udev->evt_fifo, (sizeof(u32)) * ESOC_MAX_EVT,
GFP_KERNEL);
if (err) {
pr_err("unable to allocate evt fifo for %s\n",
esoc_clink->name);
goto evt_fifo_fail;
}
init_waitqueue_head(&esoc_udev->req_wait);
init_waitqueue_head(&esoc_udev->evt_wait);
spin_lock_init(&esoc_udev->req_fifo_lock);
spin_lock_init(&esoc_udev->evt_fifo_lock);
esoc_udev->clink = esoc_clink;
spin_lock(&esoc_udev_list_lock);
list_add_tail(&esoc_udev->list, &esoc_udev_list);
spin_unlock(&esoc_udev_list_lock);
return esoc_udev;
evt_fifo_fail:
kfifo_free(&esoc_udev->req_fifo);
req_fifo_fail:
kfree(esoc_udev);
return ERR_PTR(-ENODEV);
}
static void return_esoc_udev(struct esoc_udev *esoc_udev)
{
spin_lock(&esoc_udev_list_lock);
list_del(&esoc_udev->list);
spin_unlock(&esoc_udev_list_lock);
kfifo_free(&esoc_udev->req_fifo);
kfifo_free(&esoc_udev->evt_fifo);
kfree(esoc_udev);
}
static struct esoc_udev *esoc_udev_get_by_minor(unsigned int index)
{
struct esoc_udev *esoc_udev;
spin_lock(&esoc_udev_list_lock);
list_for_each_entry(esoc_udev, &esoc_udev_list, list) {
if (esoc_udev->clink->id == index)
goto found;
}
esoc_udev = NULL;
found:
spin_unlock(&esoc_udev_list_lock);
return esoc_udev;
}
void esoc_udev_handle_clink_req(enum esoc_req req, struct esoc_eng *eng)
{
int err;
u32 clink_req;
struct esoc_clink *esoc_clink = eng->esoc_clink;
struct esoc_udev *esoc_udev = esoc_udev_get_by_minor(esoc_clink->id);
if (!esoc_udev) {
esoc_mdm_log("esoc_udev not found\n");
return;
}
clink_req = (u32)req;
err = kfifo_in_spinlocked(&esoc_udev->req_fifo, &clink_req,
sizeof(clink_req),
&esoc_udev->req_fifo_lock);
if (err != sizeof(clink_req)) {
esoc_mdm_log("Unable to queue request %d; err: %d\n", req, err);
pr_err("unable to queue request for %s\n", esoc_clink->name);
return;
}
wake_up_interruptible(&esoc_udev->req_wait);
}
void esoc_udev_handle_clink_evt(enum esoc_evt evt, struct esoc_eng *eng)
{
int err;
u32 clink_evt;
struct esoc_clink *esoc_clink = eng->esoc_clink;
struct esoc_udev *esoc_udev = esoc_udev_get_by_minor(esoc_clink->id);
if (!esoc_udev) {
esoc_mdm_log("esoc_udev not found\n");
return;
}
clink_evt = (u32)evt;
err = kfifo_in_spinlocked(&esoc_udev->evt_fifo, &clink_evt,
sizeof(clink_evt),
&esoc_udev->evt_fifo_lock);
if (err != sizeof(clink_evt)) {
esoc_mdm_log("Unable to queue event %d; err: %d\n", evt, err);
pr_err("unable to queue event for %s\n", esoc_clink->name);
return;
}
wake_up_interruptible(&esoc_udev->evt_wait);
}
static int esoc_get_link_id(struct esoc_clink *esoc_clink,
unsigned long arg)
{
struct esoc_link_data link_data;
struct esoc_client_hook *client_hook;
struct esoc_link_data __user *user_arg;
user_arg = (struct esoc_link_data __user *) arg;
if (!user_arg) {
dev_err(&esoc_clink->dev, "Missing argument for link id\n");
return -EINVAL;
}
if (copy_from_user((void *) &link_data, user_arg, sizeof(*user_arg))) {
dev_err(&esoc_clink->dev,
"Unable to copy the data from the user\n");
return -EFAULT;
}
if (link_data.prio < 0 || link_data.prio >= ESOC_MAX_HOOKS) {
dev_err(&esoc_clink->dev, "Invalid client identifier passed\n");
return -EINVAL;
}
client_hook = esoc_clink->client_hook[link_data.prio];
if (client_hook && client_hook->esoc_link_get_id) {
link_data.link_id =
client_hook->esoc_link_get_id(client_hook->priv);
if (copy_to_user((void *) user_arg, &link_data,
sizeof(*user_arg))) {
dev_err(&esoc_clink->dev,
"Failed to send the data to the user\n");
return -EFAULT;
}
return 0;
}
dev_err(&esoc_clink->dev,
"Client hooks not registered for the device\n");
return -EINVAL;
}
static long esoc_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int err;
u32 esoc_cmd, status, req, evt;
struct esoc_uhandle *uhandle = file->private_data;
struct esoc_udev *esoc_udev = uhandle->esoc_udev;
struct esoc_clink *esoc_clink = uhandle->esoc_clink;
const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
void __user *uarg = (void __user *)arg;
switch (cmd) {
case ESOC_REG_REQ_ENG:
esoc_mdm_log("ESOC_REG_REQ_ENG\n");
err = esoc_clink_register_req_eng(esoc_clink, &uhandle->eng);
if (err) {
esoc_mdm_log("ESOC_REG_REQ_ENG failed: %d\n", err);
return err;
}
uhandle->req_eng_reg = true;
break;
case ESOC_REG_CMD_ENG:
esoc_mdm_log("ESOC_REG_CMD_ENG\n");
err = esoc_clink_register_cmd_eng(esoc_clink, &uhandle->eng);
if (err) {
esoc_mdm_log("ESOC_REG_CMD_ENG failed: %d\n", err);
return err;
}
uhandle->cmd_eng_reg = true;
break;
case ESOC_CMD_EXE:
if (esoc_clink->cmd_eng != &uhandle->eng) {
esoc_mdm_log("ESOC_CMD_EXE failed to access\n");
return -EACCES;
}
get_user(esoc_cmd, (u32 __user *)arg);
esoc_mdm_log("ESOC_CMD_EXE: Executing esoc command: %u\n",
esoc_cmd);
return clink_ops->cmd_exe(esoc_cmd, esoc_clink);
case ESOC_WAIT_FOR_REQ:
if (esoc_clink->req_eng != &uhandle->eng) {
esoc_mdm_log("ESOC_WAIT_FOR_REQ: Failed to access\n");
return -EACCES;
}
esoc_mdm_log(
"ESOC_WAIT_FOR_REQ: Waiting for req event to arrive.\n");
err = wait_event_interruptible(esoc_udev->req_wait,
!kfifo_is_empty(&esoc_udev->req_fifo));
if (!err) {
err = kfifo_out_spinlocked(&esoc_udev->req_fifo, &req,
sizeof(req),
&esoc_udev->req_fifo_lock);
if (err != sizeof(req)) {
esoc_mdm_log(
"ESOC_WAIT_FOR_REQ: Failed to read the event\n");
pr_err("read from clink %s req q failed\n",
esoc_clink->name);
return -EIO;
}
put_user(req, (unsigned int __user *)uarg);
esoc_mdm_log(
"ESOC_WAIT_FOR_REQ: Event arrived: %u\n", req);
}
return err;
case ESOC_NOTIFY:
get_user(esoc_cmd, (u32 __user *)arg);
esoc_mdm_log("ESOC_NOTIFY: Notifying esoc about cmd: %u\n",
esoc_cmd);
clink_ops->notify(esoc_cmd, esoc_clink);
break;
case ESOC_GET_STATUS:
clink_ops->get_status(&status, esoc_clink);
esoc_mdm_log(
"ESOC_GET_STATUS: Sending the status from esoc: %u\n", status);
put_user(status, (unsigned int __user *)uarg);
break;
case ESOC_GET_ERR_FATAL:
clink_ops->get_err_fatal(&status, esoc_clink);
esoc_mdm_log(
"ESOC_GET_ERR_FATAL: Sending err_fatal status from esoc: %u\n",
status);
put_user(status, (unsigned int __user *)uarg);
break;
case ESOC_WAIT_FOR_CRASH:
esoc_mdm_log(
"ESOC_WAIT_FOR_CRASH: Waiting for evt to arrive..\n");
err = wait_event_interruptible(esoc_udev->evt_wait,
!kfifo_is_empty(&esoc_udev->evt_fifo));
if (!err) {
err = kfifo_out_spinlocked(&esoc_udev->evt_fifo, &evt,
sizeof(evt),
&esoc_udev->evt_fifo_lock);
if (err != sizeof(evt)) {
esoc_mdm_log(
"ESOC_WAIT_FOR_CRASH: Failed to read event\n");
pr_err("read from clink %s evt q failed\n",
esoc_clink->name);
return -EIO;
}
put_user(evt, (unsigned int __user *)uarg);
esoc_mdm_log("ESOC_WAIT_FOR_CRASH: Event arrived: %u\n",
req);
}
return err;
case ESOC_GET_LINK_ID:
return esoc_get_link_id(esoc_clink, arg);
default:
return -EINVAL;
};
return 0;
}
static int esoc_dev_open(struct inode *inode, struct file *file)
{
struct esoc_uhandle *uhandle;
struct esoc_udev *esoc_udev;
struct esoc_clink *esoc_clink;
struct esoc_eng *eng;
unsigned int minor = iminor(inode);
esoc_udev = esoc_udev_get_by_minor(minor);
if (!esoc_udev) {
esoc_mdm_log("failed to get udev\n");
pr_err("failed to get udev\n");
return -ENOMEM;
}
esoc_clink = get_esoc_clink(esoc_udev->clink->id);
if (!esoc_clink) {
esoc_mdm_log("failed to get clink\n");
pr_err("failed to get clink\n");
return -ENOMEM;
}
uhandle = kzalloc(sizeof(*uhandle), GFP_KERNEL);
if (!uhandle) {
put_esoc_clink(esoc_clink);
return -ENOMEM;
}
uhandle->esoc_udev = esoc_udev;
uhandle->esoc_clink = esoc_clink;
eng = &uhandle->eng;
eng->handle_clink_req = esoc_udev_handle_clink_req;
eng->handle_clink_evt = esoc_udev_handle_clink_evt;
file->private_data = uhandle;
esoc_mdm_log(
"%s successfully attached to esoc driver\n", current->comm);
return 0;
}
static int esoc_dev_release(struct inode *inode, struct file *file)
{
struct esoc_clink *esoc_clink;
struct esoc_uhandle *uhandle = file->private_data;
esoc_clink = uhandle->esoc_clink;
if (uhandle->req_eng_reg) {
esoc_mdm_log("Unregistering req_eng\n");
esoc_clink_unregister_req_eng(esoc_clink, &uhandle->eng);
} else {
esoc_mdm_log("No req_eng to unregister\n");
}
if (uhandle->cmd_eng_reg) {
esoc_mdm_log("Unregistering cmd_eng\n");
esoc_clink_unregister_cmd_eng(esoc_clink, &uhandle->eng);
} else {
esoc_mdm_log("No cmd_eng to unregister\n");
}
uhandle->req_eng_reg = false;
uhandle->cmd_eng_reg = false;
put_esoc_clink(esoc_clink);
kfree(uhandle);
esoc_mdm_log("%s Unregistered with esoc\n", current->comm);
return 0;
}
static const struct file_operations esoc_dev_fops = {
.owner = THIS_MODULE,
.open = esoc_dev_open,
.unlocked_ioctl = esoc_dev_ioctl,
.release = esoc_dev_release,
};
int esoc_clink_add_device(struct device *dev, void *dummy)
{
struct esoc_udev *esoc_udev;
struct esoc_clink *esoc_clink = to_esoc_clink(dev);
esoc_udev = get_free_esoc_udev(esoc_clink);
if (IS_ERR_OR_NULL(esoc_udev))
return PTR_ERR(esoc_udev);
esoc_udev->dev = device_create(esoc_class, &esoc_clink->dev,
MKDEV(esoc_major, esoc_clink->id),
esoc_clink, "esoc-%d", esoc_clink->id);
if (IS_ERR_OR_NULL(esoc_udev->dev)) {
pr_err("failed to create user device\n");
goto dev_err;
}
return 0;
dev_err:
return_esoc_udev(esoc_udev);
return -ENODEV;
}
int esoc_clink_del_device(struct device *dev, void *dummy)
{
struct esoc_udev *esoc_udev;
struct esoc_clink *esoc_clink = to_esoc_clink(dev);
esoc_udev = esoc_udev_get_by_minor(esoc_clink->id);
if (!esoc_udev)
return 0;
device_destroy(esoc_class, MKDEV(esoc_major, esoc_clink->id));
return_esoc_udev(esoc_udev);
return 0;
}
static int esoc_dev_notifier_call(struct notifier_block *nb,
unsigned long action,
void *data)
{
struct device *dev = data;
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
return esoc_clink_add_device(dev, NULL);
case BUS_NOTIFY_DEL_DEVICE:
return esoc_clink_del_device(dev, NULL);
};
return 0;
}
static struct notifier_block esoc_dev_notifier = {
.notifier_call = esoc_dev_notifier_call,
};
int __init esoc_dev_init(void)
{
int ret = 0;
esoc_class = class_create(THIS_MODULE, "esoc-dev");
if (IS_ERR_OR_NULL(esoc_class)) {
pr_err("coudn't create class");
return PTR_ERR(esoc_class);
}
esoc_major = register_chrdev(0, "esoc", &esoc_dev_fops);
if (esoc_major < 0) {
pr_err("failed to allocate char dev\n");
ret = esoc_major;
goto class_unreg;
}
ret = bus_register_notifier(&esoc_bus_type, &esoc_dev_notifier);
if (ret)
goto chrdev_unreg;
esoc_for_each_dev(NULL, esoc_clink_add_device);
return ret;
chrdev_unreg:
unregister_chrdev(esoc_major, "esoc");
class_unreg:
class_destroy(esoc_class);
return 0;
}
void __exit esoc_dev_exit(void)
{
bus_unregister_notifier(&esoc_bus_type, &esoc_dev_notifier);
class_destroy(esoc_class);
unregister_chrdev(esoc_major, "esoc-dev");
}
MODULE_LICENSE("GPL v2");
module_init(esoc_dev_init);
module_exit(esoc_dev_exit);

49
drivers/esoc/mdm-dbg.h Normal file
View file

@ -0,0 +1,49 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
*/
static bool debug_init_done;
#ifndef CONFIG_ESOC_MDM_DBG_ENG
static inline bool dbg_check_cmd_mask(unsigned int cmd)
{
return false;
}
static inline bool dbg_check_notify_mask(unsigned int notify)
{
return false;
}
static inline int mdm_dbg_eng_init(struct esoc_drv *drv,
struct esoc_clink *clink)
{
return 0;
}
#else
extern bool dbg_check_cmd_mask(unsigned int cmd);
extern bool dbg_check_notify_mask(unsigned int notify);
extern int mdm_dbg_eng_init(struct esoc_drv *drv,
struct esoc_clink *clink);
#endif
static inline bool mdm_dbg_stall_cmd(unsigned int cmd)
{
if (debug_init_done)
return dbg_check_cmd_mask(cmd);
else
return false;
}
static inline bool mdm_dbg_stall_notify(unsigned int notify)
{
if (debug_init_done)
return dbg_check_notify_mask(notify);
else
return false;
}

View file

@ -0,0 +1,69 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014, 2017-2018, The Linux Foundation. All rights reserved.
*/
#ifndef __ESOC_CLIENT_H_
#define __ESOC_CLIENT_H_
#include <linux/device.h>
#include <linux/esoc_ctrl.h>
#include <linux/notifier.h>
struct esoc_client_hook {
char *name;
void *priv;
enum esoc_client_hook_prio prio;
int (*esoc_link_power_on)(void *priv, bool mdm_crashed);
void (*esoc_link_power_off)(void *priv, bool mdm_crashed);
u64 (*esoc_link_get_id)(void *priv);
};
/*
* struct esoc_desc: Describes an external soc
* @name: external soc name
* @priv: private data for external soc
*/
struct esoc_desc {
const char *name;
const char *link;
const char *link_info;
void *priv;
};
#ifdef CONFIG_ESOC_CLIENT
/* Can return probe deferral */
struct esoc_desc *devm_register_esoc_client(struct device *dev,
const char *name);
void devm_unregister_esoc_client(struct device *dev,
struct esoc_desc *esoc_desc);
int esoc_register_client_notifier(struct notifier_block *nb);
int esoc_register_client_hook(struct esoc_desc *desc,
struct esoc_client_hook *client_hook);
int esoc_unregister_client_hook(struct esoc_desc *desc,
struct esoc_client_hook *client_hook);
#else
static inline struct esoc_desc *devm_register_esoc_client(struct device *dev,
const char *name)
{
return NULL;
}
static inline void devm_unregister_esoc_client(struct device *dev,
struct esoc_desc *esoc_desc)
{
}
static inline int esoc_register_client_notifier(struct notifier_block *nb)
{
return -EIO;
}
static inline int esoc_register_client_hook(struct esoc_desc *desc,
struct esoc_client_hook *client_hook)
{
return -EIO;
}
static inline int esoc_unregister_client_hook(struct esoc_desc *desc,
struct esoc_client_hook *client_hook)
{
return -EIO;
}
#endif
#endif

View file

@ -0,0 +1,94 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _UAPI_ESOC_CTRL_H_
#define _UAPI_ESOC_CTRL_H_
#include <linux/types.h>
enum esoc_client_hook_prio {
ESOC_MHI_HOOK,
ESOC_MAX_HOOKS
};
struct esoc_link_data {
enum esoc_client_hook_prio prio;
__u64 link_id;
};
#define ESOC_CODE 0xCC
#define ESOC_CMD_EXE _IOW(ESOC_CODE, 1, unsigned int)
#define ESOC_WAIT_FOR_REQ _IOR(ESOC_CODE, 2, unsigned int)
#define ESOC_NOTIFY _IOW(ESOC_CODE, 3, unsigned int)
#define ESOC_GET_STATUS _IOR(ESOC_CODE, 4, unsigned int)
#define ESOC_GET_ERR_FATAL _IOR(ESOC_CODE, 5, unsigned int)
#define ESOC_WAIT_FOR_CRASH _IOR(ESOC_CODE, 6, unsigned int)
#define ESOC_REG_REQ_ENG _IO(ESOC_CODE, 7)
#define ESOC_REG_CMD_ENG _IO(ESOC_CODE, 8)
#define ESOC_GET_LINK_ID _IOWR(ESOC_CODE, 9, struct esoc_link_data)
#define ESOC_REQ_SEND_SHUTDOWN ESOC_REQ_SEND_SHUTDOWN
#define ESOC_REQ_CRASH_SHUTDOWN ESOC_REQ_CRASH_SHUTDOWN
#define ESOC_PON_RETRY ESOC_PON_RETRY
enum esoc_evt {
ESOC_RUN_STATE = 0x1,
ESOC_UNEXPECTED_RESET,
ESOC_ERR_FATAL,
ESOC_IN_DEBUG,
ESOC_REQ_ENG_ON,
ESOC_REQ_ENG_OFF,
ESOC_CMD_ENG_ON,
ESOC_CMD_ENG_OFF,
ESOC_INVALID_STATE,
ESOC_RETRY_PON_EVT,
};
enum esoc_cmd {
ESOC_PWR_ON = 1,
ESOC_PWR_OFF,
ESOC_FORCE_PWR_OFF,
ESOC_RESET,
ESOC_PREPARE_DEBUG,
ESOC_EXE_DEBUG,
ESOC_EXIT_DEBUG,
};
enum esoc_notify {
ESOC_IMG_XFER_DONE = 1,
ESOC_BOOT_DONE,
ESOC_BOOT_FAIL,
ESOC_IMG_XFER_RETRY,
ESOC_IMG_XFER_FAIL,
ESOC_UPGRADE_AVAILABLE,
ESOC_DEBUG_DONE,
ESOC_DEBUG_FAIL,
ESOC_PRIMARY_CRASH,
ESOC_PRIMARY_REBOOT,
ESOC_PON_RETRY,
};
enum esoc_req {
ESOC_REQ_IMG = 1,
ESOC_REQ_DEBUG,
ESOC_REQ_SHUTDOWN,
ESOC_REQ_SEND_SHUTDOWN,
ESOC_REQ_CRASH_SHUTDOWN,
};
#ifdef __KERNEL__
/**
* struct esoc_handle: Handle for clients of esoc
* @name: name of the external soc.
* @link: link of external soc.
* @id: id of external soc.
*/
struct esoc_handle {
const char *name;
const char *link;
unsigned int id;
};
#endif
#endif