rpmsg: Add snapshot of RPMSG Glink drivers

This snapshot is taken as of msm-4.14 'commit <67942dbf2187>
("Merge "net: netfilter: IRC DCC for private clients"")'.

This change brings the rpmsg and rpmsg glink drivers up to date with
the fixes from msm-4.14 and also adds the spi and spss transports from
msm-4.14. In addition, change the copyrights to SPDX format.

Change-Id: Idc0c9ad00c0562a7f3e2807ea9cfca644680dd9d
Signed-off-by: Chris Lew <clew@codeaurora.org>
This commit is contained in:
Chris Lew 2017-12-07 11:37:02 -08:00
parent 4e1105e28b
commit d064080d80
11 changed files with 3361 additions and 53 deletions

View file

@ -39,6 +39,28 @@ config RPMSG_QCOM_GLINK_SMEM
which provides support for using the GLINK communication protocol
over SMEM.
config RPMSG_QCOM_GLINK_SPSS
tristate "QTI SPSS Glink driver"
select RPMSG_QCOM_GLINK_NATIVE
depends on MAILBOX
depends on QCOM_SMEM
select QSEE_IPC_IRQ
help
Say y here to enable support for the GLINK SPSS communication driver,
which provides support for using the GLINK communication protocol
over SMEM. This protocol maps the smem and then shares the mapped
region with the remote proc by writing the smem descriptor location
and size into shared registers.
config RPMSG_QCOM_GLINK_SPI
tristate "QTI SPI Glink driver"
help
Say y here to enable support for the GLINK SPI communication driver,
which provides support for using the GLINK communication protocol
over SPI. This transport performs marshaling of GLINK commands and
data to the appropriate SPI bus wire format and allows for GLINK
communication with remote subsystems that are external to the SoC.
config RPMSG_QCOM_SMD
tristate "Qualcomm Shared Memory Driver (SMD)"
depends on MAILBOX

View file

@ -4,5 +4,7 @@ obj-$(CONFIG_RPMSG_CHAR) += rpmsg_char.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_NATIVE) += qcom_glink_native.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_SMEM) += qcom_glink_smem.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_SPSS) += qcom_glink_spss.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_SPI) += qcom_glink_spi.o
obj-$(CONFIG_RPMSG_QCOM_SMD) += qcom_smd.o
obj-$(CONFIG_RPMSG_VIRTIO) += virtio_rpmsg_bus.o

View file

@ -18,11 +18,35 @@
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/mailbox_client.h>
#include <linux/ipc_logging.h>
#include "rpmsg_internal.h"
#include "qcom_glink_native.h"
#define GLINK_LOG_PAGE_CNT 2
#define GLINK_INFO(ctxt, x, ...) \
do { \
if (ctxt) \
ipc_log_string(ctxt, "[%s]: "x, __func__, ##__VA_ARGS__); \
} while (0)
#define CH_INFO(ch, x, ...) \
do { \
if (ch->glink && ch->glink->ilc) \
ipc_log_string(ch->glink->ilc, "%s[%d:%d] %s: "x, ch->name, \
ch->lcid, ch->rcid, __func__, ##__VA_ARGS__); \
} while (0)
#define GLINK_ERR(ctxt, x, ...) \
do { \
pr_err_ratelimited("[%s]: "x, __func__, ##__VA_ARGS__); \
if (ctxt) \
ipc_log_string(ctxt, "[%s]: "x, __func__, ##__VA_ARGS__); \
} while (0)
#define GLINK_NAME_SIZE 32
#define GLINK_VERSION_1 1
@ -83,6 +107,8 @@ struct glink_core_rx_intent {
* @rx_pipe: pipe object for receive FIFO
* @tx_pipe: pipe object for transmit FIFO
* @irq: IRQ for signaling incoming events
* @kworker: kworker to handle rx_done work
* @task: kthread running @kworker
* @rx_work: worker for handling received control messages
* @rx_lock: protects the @rx_queue
* @rx_queue: queue of received control messages to be processed in @rx_work
@ -90,8 +116,10 @@ struct glink_core_rx_intent {
* @idr_lock: synchronizes @lcids and @rcids modifications
* @lcids: idr of all channels with a known local channel id
* @rcids: idr of all channels with a known remote channel id
* @in_reset: reset status of this edge
* @features: remote features
* @intentless: flag to indicate that there is no intent
* @ilc: ipc logging context reference
*/
struct qcom_glink {
struct device *dev;
@ -106,6 +134,9 @@ struct qcom_glink {
int irq;
struct kthread_worker kworker;
struct task_struct *task;
struct work_struct rx_work;
spinlock_t rx_lock;
struct list_head rx_queue;
@ -115,9 +146,13 @@ struct qcom_glink {
spinlock_t idr_lock;
struct idr lcids;
struct idr rcids;
atomic_t in_reset;
unsigned long features;
bool intentless;
void *ilc;
};
enum {
@ -149,7 +184,8 @@ enum {
* @open_req: completed once open-request has been received
* @intent_req_lock: Synchronises multiple intent requests
* @intent_req_result: Result of intent request
* @intent_req_comp: Completion for intent_req signalling
* @intent_req_comp: Status of intent request completion
* @intent_req_event: Waitqueue for @intent_req_comp
*/
struct glink_channel {
struct rpmsg_endpoint ept;
@ -168,19 +204,23 @@ struct glink_channel {
spinlock_t intent_lock;
struct idr liids;
struct idr riids;
struct work_struct intent_work;
struct kthread_work intent_work;
struct list_head done_intents;
struct glink_core_rx_intent *buf;
int buf_offset;
int buf_size;
unsigned int lsigs;
unsigned int rsigs;
struct completion open_ack;
struct completion open_req;
struct mutex intent_req_lock;
bool intent_req_result;
struct completion intent_req_comp;
atomic_t intent_req_comp;
wait_queue_head_t intent_req_event;
};
#define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept)
@ -201,10 +241,11 @@ static const struct rpmsg_endpoint_ops glink_endpoint_ops;
#define RPM_CMD_TX_DATA_CONT 12
#define RPM_CMD_READ_NOTIF 13
#define RPM_CMD_RX_DONE_W_REUSE 14
#define RPM_CMD_SIGNALS 15
#define GLINK_FEATURE_INTENTLESS BIT(1)
static void qcom_glink_rx_done_work(struct work_struct *work);
static void qcom_glink_rx_done_work(struct kthread_work *work);
static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
const char *name)
@ -225,10 +266,11 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
init_completion(&channel->open_req);
init_completion(&channel->open_ack);
init_completion(&channel->intent_req_comp);
atomic_set(&channel->intent_req_comp, 0);
init_waitqueue_head(&channel->intent_req_event);
INIT_LIST_HEAD(&channel->done_intents);
INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work);
kthread_init_work(&channel->intent_work, qcom_glink_rx_done_work);
idr_init(&channel->liids);
idr_init(&channel->riids);
@ -243,6 +285,9 @@ static void qcom_glink_channel_release(struct kref *ref)
refcount);
unsigned long flags;
CH_INFO(channel, "\n");
wake_up(&channel->intent_req_event);
spin_lock_irqsave(&channel->intent_lock, flags);
idr_destroy(&channel->liids);
idr_destroy(&channel->riids);
@ -280,6 +325,15 @@ static void qcom_glink_tx_write(struct qcom_glink *glink,
glink->tx_pipe->write(glink->tx_pipe, hdr, hlen, data, dlen);
}
static void qcom_glink_pipe_reset(struct qcom_glink *glink)
{
if (glink->tx_pipe->reset)
glink->tx_pipe->reset(glink->tx_pipe);
if (glink->rx_pipe->reset)
glink->rx_pipe->reset(glink->rx_pipe);
}
static int qcom_glink_tx(struct qcom_glink *glink,
const void *hdr, size_t hlen,
const void *data, size_t dlen, bool wait)
@ -300,6 +354,11 @@ static int qcom_glink_tx(struct qcom_glink *glink,
goto out;
}
if (atomic_read(&glink->in_reset)) {
ret = -ECONNRESET;
goto out;
}
/* Wait without holding the tx_lock */
spin_unlock_irqrestore(&glink->tx_lock, flags);
@ -327,6 +386,7 @@ static int qcom_glink_send_version(struct qcom_glink *glink)
msg.param1 = cpu_to_le16(GLINK_VERSION_1);
msg.param2 = cpu_to_le32(glink->features);
GLINK_INFO(glink->ilc, "vers:%d features:%d\n", msg.param1, msg.param2);
return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
}
@ -338,6 +398,7 @@ static void qcom_glink_send_version_ack(struct qcom_glink *glink)
msg.param1 = cpu_to_le16(GLINK_VERSION_1);
msg.param2 = cpu_to_le32(glink->features);
GLINK_INFO(glink->ilc, "vers:%d features:%d\n", msg.param1, msg.param2);
qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
}
@ -350,6 +411,7 @@ static void qcom_glink_send_open_ack(struct qcom_glink *glink,
msg.param1 = cpu_to_le16(channel->rcid);
msg.param2 = cpu_to_le32(0);
CH_INFO(channel, "\n");
qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
}
@ -368,7 +430,9 @@ static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink,
}
channel->intent_req_result = granted;
complete(&channel->intent_req_comp);
atomic_inc(&channel->intent_req_comp);
wake_up(&channel->intent_req_event);
CH_INFO(channel, "\n");
}
/**
@ -404,11 +468,12 @@ static int qcom_glink_send_open_req(struct qcom_glink *glink,
return ret;
channel->lcid = ret;
CH_INFO(channel, "\n");
req.msg.cmd = cpu_to_le16(RPM_CMD_OPEN);
req.msg.param1 = cpu_to_le16(channel->lcid);
req.msg.param2 = cpu_to_le32(name_len);
strcpy(req.name, channel->name);
strlcpy(req.name, channel->name, GLINK_NAME_SIZE);
ret = qcom_glink_tx(glink, &req, req_len, NULL, 0, true);
if (ret)
@ -417,6 +482,8 @@ static int qcom_glink_send_open_req(struct qcom_glink *glink,
return 0;
remove_idr:
CH_INFO(channel, "remote_idr\n");
spin_lock_irqsave(&glink->idr_lock, flags);
idr_remove(&glink->lcids, channel->lcid);
channel->lcid = 0;
@ -434,6 +501,7 @@ static void qcom_glink_send_close_req(struct qcom_glink *glink,
req.param1 = cpu_to_le16(channel->lcid);
req.param2 = 0;
CH_INFO(channel, "\n");
qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
}
@ -446,42 +514,58 @@ static void qcom_glink_send_close_ack(struct qcom_glink *glink,
req.param1 = cpu_to_le16(rcid);
req.param2 = 0;
GLINK_INFO(glink->ilc, "rcid:%d\n", rcid);
qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
}
static void qcom_glink_rx_done_work(struct work_struct *work)
static int __qcom_glink_rx_done(struct qcom_glink *glink,
struct glink_channel *channel,
struct glink_core_rx_intent *intent,
bool wait)
{
struct glink_channel *channel = container_of(work, struct glink_channel,
intent_work);
struct qcom_glink *glink = channel->glink;
struct glink_core_rx_intent *intent, *tmp;
struct {
u16 id;
u16 lcid;
u32 liid;
} __packed cmd;
unsigned int cid = channel->lcid;
unsigned int iid;
bool reuse;
unsigned int iid = intent->id;
bool reuse = intent->reuse;
int ret;
cmd.id = reuse ? RPM_CMD_RX_DONE_W_REUSE : RPM_CMD_RX_DONE;
cmd.lcid = cid;
cmd.liid = iid;
ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, wait);
if (ret)
return ret;
if (!reuse) {
kfree(intent->data);
kfree(intent);
}
CH_INFO(channel, "reuse:%d liid:%d", reuse, iid);
return 0;
}
static void qcom_glink_rx_done_work(struct kthread_work *work)
{
struct glink_channel *channel = container_of(work, struct glink_channel,
intent_work);
struct qcom_glink *glink = channel->glink;
struct glink_core_rx_intent *intent, *tmp;
unsigned long flags;
spin_lock_irqsave(&channel->intent_lock, flags);
list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
list_del(&intent->node);
spin_unlock_irqrestore(&channel->intent_lock, flags);
iid = intent->id;
reuse = intent->reuse;
cmd.id = reuse ? RPM_CMD_RX_DONE_W_REUSE : RPM_CMD_RX_DONE;
cmd.lcid = cid;
cmd.liid = iid;
__qcom_glink_rx_done(glink, channel, intent, true);
qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
if (!reuse) {
kfree(intent->data);
kfree(intent);
}
spin_lock_irqsave(&channel->intent_lock, flags);
}
spin_unlock_irqrestore(&channel->intent_lock, flags);
@ -491,6 +575,8 @@ static void qcom_glink_rx_done(struct qcom_glink *glink,
struct glink_channel *channel,
struct glink_core_rx_intent *intent)
{
int ret = -EAGAIN;
/* We don't send RX_DONE to intentless systems */
if (glink->intentless) {
kfree(intent->data);
@ -507,10 +593,14 @@ static void qcom_glink_rx_done(struct qcom_glink *glink,
/* Schedule the sending of a rx_done indication */
spin_lock(&channel->intent_lock);
list_add_tail(&intent->node, &channel->done_intents);
spin_unlock(&channel->intent_lock);
if (list_empty(&channel->done_intents))
ret = __qcom_glink_rx_done(glink, channel, intent, false);
schedule_work(&channel->intent_work);
if (ret) {
list_add_tail(&intent->node, &channel->done_intents);
kthread_queue_work(&glink->kworker, &channel->intent_work);
}
spin_unlock(&channel->intent_lock);
}
/**
@ -527,6 +617,8 @@ static void qcom_glink_receive_version(struct qcom_glink *glink,
u32 version,
u32 features)
{
GLINK_INFO(glink->ilc, "vers:%d features:%d\n", version, features);
switch (version) {
case 0:
break;
@ -554,6 +646,8 @@ static void qcom_glink_receive_version_ack(struct qcom_glink *glink,
u32 version,
u32 features)
{
GLINK_INFO(glink->ilc, "vers:%d features:%d\n", version, features);
switch (version) {
case 0:
/* Version negotiation failed */
@ -589,6 +683,7 @@ static int qcom_glink_send_intent_req_ack(struct qcom_glink *glink,
msg.param1 = cpu_to_le16(channel->lcid);
msg.param2 = cpu_to_le32(granted);
CH_INFO(channel, "\n");
qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
return 0;
@ -622,6 +717,9 @@ static int qcom_glink_advertise_intent(struct qcom_glink *glink,
cmd.size = cpu_to_le32(intent->size);
cmd.liid = cpu_to_le32(intent->id);
CH_INFO(channel, "count:%d size:%d liid:%d\n", 1,
intent->size, intent->id);
qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
return 0;
@ -692,6 +790,7 @@ static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
}
intent->in_use = false;
CH_INFO(channel, "reuse:%d iid:%d\n", reuse, intent->id);
if (!reuse) {
idr_remove(&channel->riids, intent->id);
@ -792,9 +891,6 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
return -EAGAIN;
}
if (WARN(chunk_size % 4, "Incoming data must be word aligned\n"))
return -EINVAL;
rcid = le16_to_cpu(hdr.msg.param1);
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, rcid);
@ -805,6 +901,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
/* Drop the message */
goto advance_rx;
}
CH_INFO(channel, "chunk_size:%d left_size:%d\n", chunk_size, left_size);
if (glink->intentless) {
/* Might have an ongoing, fragmented, message to append */
@ -927,6 +1024,8 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
intent->id = le32_to_cpu(msg->intents[i].iid);
intent->size = le32_to_cpu(msg->intents[i].size);
CH_INFO(channel, "riid:%d size:%d\n", intent->id, intent->size);
spin_lock_irqsave(&channel->intent_lock, flags);
ret = idr_alloc(&channel->riids, intent,
intent->id, intent->id + 1, GFP_ATOMIC);
@ -934,6 +1033,7 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
if (ret < 0)
dev_err(glink->dev, "failed to store remote intent\n");
}
kfree(msg);
@ -952,7 +1052,56 @@ static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
return -EINVAL;
}
complete(&channel->open_ack);
CH_INFO(channel, "\n");
complete_all(&channel->open_ack);
return 0;
}
/**
* qcom_glink_send_signals() - convert a signal cmd to wire format and transmit
* @glink: The transport to transmit on.
* @channel: The glink channel
* @sigs: The signals to encode.
*
* Return: 0 on success or standard Linux error code.
*/
static int qcom_glink_send_signals(struct qcom_glink *glink,
struct glink_channel *channel,
u32 sigs)
{
struct glink_msg msg;
msg.cmd = cpu_to_le16(RPM_CMD_SIGNALS);
msg.param1 = cpu_to_le16(channel->lcid);
msg.param2 = cpu_to_le32(sigs);
GLINK_INFO(glink->ilc, "sigs:%d\n", sigs);
return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
}
static int qcom_glink_handle_signals(struct qcom_glink *glink,
unsigned int rcid, unsigned int signals)
{
struct glink_channel *channel;
unsigned long flags;
u32 old;
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, rcid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
if (!channel) {
dev_err(glink->dev, "signal for non-existing channel\n");
return -EINVAL;
}
old = channel->rsigs;
channel->rsigs = signals;
if (channel->ept.sig_cb)
channel->ept.sig_cb(channel->ept.rpdev, old, channel->rsigs);
CH_INFO(channel, "old:%d new:%d\n", old, channel->rsigs);
return 0;
}
@ -1018,6 +1167,10 @@ static irqreturn_t qcom_glink_native_intr(int irq, void *data)
qcom_glink_handle_intent_req_ack(glink, param1, param2);
qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
case RPM_CMD_SIGNALS:
qcom_glink_handle_signals(glink, param1, param2);
qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
default:
dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd);
ret = -EINVAL;
@ -1043,6 +1196,7 @@ static struct glink_channel *qcom_glink_create_local(struct qcom_glink *glink,
if (IS_ERR(channel))
return ERR_CAST(channel);
CH_INFO(channel, "\n");
ret = qcom_glink_send_open_req(glink, channel);
if (ret)
goto release_channel;
@ -1060,12 +1214,15 @@ static struct glink_channel *qcom_glink_create_local(struct qcom_glink *glink,
return channel;
err_timeout:
CH_INFO(channel, "err_timeout\n");
/* qcom_glink_send_open_req() did register the channel in lcids*/
spin_lock_irqsave(&glink->idr_lock, flags);
idr_remove(&glink->lcids, channel->lcid);
spin_unlock_irqrestore(&glink->idr_lock, flags);
release_channel:
CH_INFO(channel, "release_channel\n");
/* Release qcom_glink_send_open_req() reference */
kref_put(&channel->refcount, qcom_glink_channel_release);
/* Release qcom_glink_alloc_channel() reference */
@ -1080,6 +1237,8 @@ static int qcom_glink_create_remote(struct qcom_glink *glink,
{
int ret;
CH_INFO(channel, "\n");
qcom_glink_send_open_ack(glink, channel);
ret = qcom_glink_send_open_req(glink, channel);
@ -1095,15 +1254,16 @@ static int qcom_glink_create_remote(struct qcom_glink *glink,
return 0;
close_link:
CH_INFO(channel, "close_link %d\n", ret);
/*
* Send a close request to "undo" our open-ack. The close-ack will
* release the last reference.
* release qcom_glink_send_open_req() reference and the last reference
* will be relesed after receiving remote_close or transport unregister
* by calling qcom_glink_native_remove().
*/
qcom_glink_send_close_req(glink, channel);
/* Release qcom_glink_send_open_req() reference */
kref_put(&channel->refcount, qcom_glink_channel_release);
return ret;
}
@ -1161,7 +1321,7 @@ static int qcom_glink_announce_create(struct rpmsg_device *rpdev)
__be32 *val = defaults;
int size;
if (glink->intentless)
if (glink->intentless || !completion_done(&channel->open_ack))
return 0;
prop = of_find_property(np, "qcom,intents", NULL);
@ -1216,20 +1376,27 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
mutex_lock(&channel->intent_req_lock);
reinit_completion(&channel->intent_req_comp);
atomic_set(&channel->intent_req_comp, 0);
cmd.id = RPM_CMD_RX_INTENT_REQ;
cmd.cid = channel->lcid;
cmd.size = size;
CH_INFO(channel, "size:%d\n", size);
ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
if (ret)
goto unlock;
ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ);
ret = wait_event_timeout(channel->intent_req_event,
atomic_read(&channel->intent_req_comp) ||
atomic_read(&glink->in_reset), 10 * HZ);
if (!ret) {
dev_err(glink->dev, "intent request timed out\n");
ret = -ETIMEDOUT;
} else if (atomic_read(&glink->in_reset)) {
CH_INFO(channel, "ssr detected\n");
ret = -ECONNRESET;
} else {
ret = channel->intent_req_result ? 0 : -ECANCELED;
}
@ -1315,6 +1482,27 @@ static int qcom_glink_trysend(struct rpmsg_endpoint *ept, void *data, int len)
return __qcom_glink_send(channel, data, len, false);
}
static int qcom_glink_get_sigs(struct rpmsg_endpoint *ept,
u32 *lsigs, u32 *rsigs)
{
struct glink_channel *channel = to_glink_channel(ept);
*lsigs = channel->lsigs;
*rsigs = channel->rsigs;
return 0;
}
static int qcom_glink_set_sigs(struct rpmsg_endpoint *ept, u32 sigs)
{
struct glink_channel *channel = to_glink_channel(ept);
struct qcom_glink *glink = channel->glink;
channel->lsigs = sigs;
return qcom_glink_send_signals(glink, channel, sigs);
}
/*
* Finds the device_node for the glink child interested in this channel.
*/
@ -1348,6 +1536,8 @@ static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
.destroy_ept = qcom_glink_destroy_ept,
.send = qcom_glink_send,
.trysend = qcom_glink_trysend,
.get_sigs = qcom_glink_get_sigs,
.set_sigs = qcom_glink_set_sigs,
};
static void qcom_glink_rpdev_release(struct device *dev)
@ -1396,7 +1586,7 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
channel->rcid = ret;
spin_unlock_irqrestore(&glink->idr_lock, flags);
complete(&channel->open_req);
complete_all(&channel->open_req);
if (create_device) {
rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
@ -1406,7 +1596,7 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
}
rpdev->ept = &channel->ept;
strncpy(rpdev->id.name, name, RPMSG_NAME_SIZE);
strlcpy(rpdev->id.name, name, RPMSG_NAME_SIZE);
rpdev->src = RPMSG_ADDR_ANY;
rpdev->dst = RPMSG_ADDR_ANY;
rpdev->ops = &glink_device_ops;
@ -1422,17 +1612,21 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
channel->rpdev = rpdev;
}
CH_INFO(channel, "\n");
return 0;
free_rpdev:
CH_INFO(channel, "free_rpdev\n");
kfree(rpdev);
rcid_remove:
CH_INFO(channel, "rcid_remove\n");
spin_lock_irqsave(&glink->idr_lock, flags);
idr_remove(&glink->rcids, channel->rcid);
channel->rcid = 0;
spin_unlock_irqrestore(&glink->idr_lock, flags);
free_channel:
CH_INFO(channel, "free_channel\n");
/* Release the reference, iff we took it */
if (create_device)
kref_put(&channel->refcount, qcom_glink_channel_release);
@ -1451,12 +1645,13 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
spin_unlock_irqrestore(&glink->idr_lock, flags);
if (WARN(!channel, "close request on unknown channel\n"))
return;
CH_INFO(channel, "\n");
/* cancel pending rx_done work */
cancel_work_sync(&channel->intent_work);
kthread_cancel_work_sync(&channel->intent_work);
if (channel->rpdev) {
strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
strlcpy(chinfo.name, channel->name, sizeof(chinfo.name));
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = RPMSG_ADDR_ANY;
@ -1484,6 +1679,7 @@ static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
spin_unlock_irqrestore(&glink->idr_lock, flags);
return;
}
CH_INFO(channel, "\n");
idr_remove(&glink->lcids, channel->lcid);
channel->lcid = 0;
@ -1547,21 +1743,93 @@ static void qcom_glink_work(struct work_struct *work)
}
}
static ssize_t rpmsg_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rpmsg_device *rpdev = to_rpmsg_device(dev);
struct glink_channel *channel = to_glink_channel(rpdev->ept);
return snprintf(buf, RPMSG_NAME_SIZE, "%s\n", channel->glink->name);
}
static DEVICE_ATTR_RO(rpmsg_name);
static struct attribute *qcom_glink_attrs[] = {
&dev_attr_rpmsg_name.attr,
NULL
};
ATTRIBUTE_GROUPS(qcom_glink);
static void qcom_glink_device_release(struct device *dev)
{
struct rpmsg_device *rpdev = to_rpmsg_device(dev);
struct glink_channel *channel = to_glink_channel(rpdev->ept);
/* Release qcom_glink_alloc_channel() reference */
kref_put(&channel->refcount, qcom_glink_channel_release);
kfree(rpdev);
}
static int qcom_glink_create_chrdev(struct qcom_glink *glink)
{
struct rpmsg_device *rpdev;
struct glink_channel *channel;
rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
if (!rpdev)
return -ENOMEM;
channel = qcom_glink_alloc_channel(glink, "rpmsg_chrdev");
if (IS_ERR(channel)) {
kfree(rpdev);
return PTR_ERR(channel);
}
channel->rpdev = rpdev;
rpdev->ept = &channel->ept;
rpdev->ops = &glink_device_ops;
rpdev->dev.parent = glink->dev;
rpdev->dev.release = qcom_glink_device_release;
return rpmsg_chrdev_register_device(rpdev);
}
static void qcom_glink_set_affinity(struct qcom_glink *glink, u32 *arr,
size_t size)
{
struct cpumask cpumask;
int i;
cpumask_clear(&cpumask);
for (i = 0; i < size; i++) {
if (arr[i] < num_possible_cpus())
cpumask_set_cpu(arr[i], &cpumask);
}
if (irq_set_affinity(glink->irq, &cpumask))
dev_err(glink->dev, "failed to set irq affinity\n");
if (sched_setaffinity(glink->task->pid, &cpumask))
dev_err(glink->dev, "failed to set task affinity\n");
}
struct qcom_glink *qcom_glink_native_probe(struct device *dev,
unsigned long features,
struct qcom_glink_pipe *rx,
struct qcom_glink_pipe *tx,
bool intentless)
{
struct qcom_glink *glink;
u32 *arr;
int size;
int irq;
int ret;
struct qcom_glink *glink;
glink = devm_kzalloc(dev, sizeof(*glink), GFP_KERNEL);
if (!glink)
return ERR_PTR(-ENOMEM);
glink->dev = dev;
glink->dev->groups = qcom_glink_groups;
glink->tx_pipe = tx;
glink->rx_pipe = rx;
@ -1576,6 +1844,7 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
spin_lock_init(&glink->idr_lock);
idr_init(&glink->lcids);
idr_init(&glink->rcids);
atomic_set(&glink->in_reset, 0);
ret = of_property_read_string(dev->of_node, "label", &glink->name);
if (ret < 0)
@ -1590,6 +1859,15 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
return ERR_CAST(glink->mbox_chan);
}
kthread_init_worker(&glink->kworker);
glink->task = kthread_run(kthread_worker_fn, &glink->kworker,
"glink_%s", glink->name);
if (IS_ERR(glink->task)) {
dev_err(dev, "failed to spawn intent kthread %d\n",
PTR_ERR(glink->task));
return ERR_CAST(glink->task);
}
irq = of_irq_get(dev->of_node, 0);
ret = devm_request_irq(dev, irq,
qcom_glink_native_intr,
@ -1602,9 +1880,29 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
glink->irq = irq;
size = of_property_count_u32_elems(dev->of_node, "cpu-affinity");
if (size > 0) {
arr = kmalloc_array(size, sizeof(u32), GFP_KERNEL);
if (!arr)
return ERR_PTR(-ENOMEM);
ret = of_property_read_u32_array(dev->of_node, "cpu-affinity",
arr, size);
if (!ret)
qcom_glink_set_affinity(glink, arr, size);
kfree(arr);
}
ret = qcom_glink_send_version(glink);
if (ret)
if (ret) {
dev_err(dev, "failed to send version %d\n", ret);
return ERR_PTR(ret);
}
ret = qcom_glink_create_chrdev(glink);
if (ret)
dev_err(glink->dev, "failed to register chrdev\n");
glink->ilc = ipc_log_context_create(GLINK_LOG_PAGE_CNT, glink->name, 0);
return glink;
}
@ -1624,21 +1922,51 @@ void qcom_glink_native_remove(struct qcom_glink *glink)
int ret;
unsigned long flags;
atomic_inc(&glink->in_reset);
disable_irq(glink->irq);
cancel_work_sync(&glink->rx_work);
/* Signal all threads to cancel tx */
spin_lock_irqsave(&glink->idr_lock, flags);
idr_for_each_entry(&glink->lcids, channel, cid) {
wake_up(&channel->intent_req_event);
}
spin_unlock_irqrestore(&glink->idr_lock, flags);
ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
if (ret)
dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
spin_lock_irqsave(&glink->idr_lock, flags);
idr_for_each_entry(&glink->lcids, channel, cid) {
spin_unlock_irqrestore(&glink->idr_lock, flags);
/* cancel pending rx_done work for each channel*/
kthread_cancel_work_sync(&channel->intent_work);
spin_lock_irqsave(&glink->idr_lock, flags);
}
spin_unlock_irqrestore(&glink->idr_lock, flags);
spin_lock_irqsave(&glink->idr_lock, flags);
/* Release any defunct local channels, waiting for close-ack */
idr_for_each_entry(&glink->lcids, channel, cid)
idr_for_each_entry(&glink->lcids, channel, cid) {
kref_put(&channel->refcount, qcom_glink_channel_release);
idr_remove(&glink->lcids, cid);
}
/* Release any defunct local channels, waiting for close-req */
idr_for_each_entry(&glink->rcids, channel, cid) {
kref_put(&channel->refcount, qcom_glink_channel_release);
idr_remove(&glink->rcids, cid);
}
idr_destroy(&glink->lcids);
idr_destroy(&glink->rcids);
spin_unlock_irqrestore(&glink->idr_lock, flags);
kthread_flush_worker(&glink->kworker);
kthread_stop(glink->task);
qcom_glink_pipe_reset(glink);
mbox_free_channel(glink->mbox_chan);
}
EXPORT_SYMBOL_GPL(qcom_glink_native_remove);

View file

@ -22,6 +22,8 @@ struct qcom_glink_pipe {
void (*write)(struct qcom_glink_pipe *glink_pipe,
const void *hdr, size_t hlen,
const void *data, size_t dlen);
void (*reset)(struct qcom_glink_pipe *glink_pipe);
};
struct qcom_glink;

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016, Linaro Ltd
* Copyright (c) 2018, The Linux Foundation, All rights reserved.
*/
#include <linux/io.h>
@ -90,13 +91,11 @@ static void glink_smem_rx_peak(struct qcom_glink_pipe *np,
len = min_t(size_t, count, pipe->native.length - tail);
if (len) {
__ioread32_copy(data, pipe->fifo + tail,
len / sizeof(u32));
memcpy_fromio(data, pipe->fifo + tail, len);
}
if (len != count) {
__ioread32_copy(data + len, pipe->fifo,
(count - len) / sizeof(u32));
memcpy_fromio(data + len, pipe->fifo, (count - len));
}
}
@ -109,7 +108,7 @@ static void glink_smem_rx_advance(struct qcom_glink_pipe *np,
tail = le32_to_cpu(*pipe->tail);
tail += count;
if (tail > pipe->native.length)
if (tail >= pipe->native.length)
tail -= pipe->native.length;
*pipe->tail = cpu_to_le32(tail);

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,341 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sizes.h>
#include <linux/soc/qcom/smem.h>
#include <linux/rpmsg/qcom_glink.h>
#include "qcom_glink_native.h"
#define FIFO_FULL_RESERVE 8
#define FIFO_ALIGNMENT 8
#define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */
#define SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR 478
#define SPSS_TX_FIFO_SIZE SZ_2K
#define SPSS_RX_FIFO_SIZE SZ_2K
struct glink_spss_cfg {
__le32 tx_tail;
__le32 tx_head;
__le32 tx_fifo_size;
__le32 rx_tail;
__le32 rx_head;
__le32 rx_fifo_size;
};
struct glink_spss_pipe {
struct qcom_glink_pipe native;
__le32 *tail;
__le32 *head;
void *fifo;
int remote_pid;
};
#define to_spss_pipe(p) container_of(p, struct glink_spss_pipe, native)
static void glink_spss_reset(struct qcom_glink_pipe *np)
{
struct glink_spss_pipe *pipe = to_spss_pipe(np);
*pipe->head = cpu_to_le32(0);
*pipe->tail = cpu_to_le32(0);
}
static size_t glink_spss_rx_avail(struct qcom_glink_pipe *np)
{
struct glink_spss_pipe *pipe = to_spss_pipe(np);
u32 head;
u32 tail;
head = le32_to_cpu(*pipe->head);
tail = le32_to_cpu(*pipe->tail);
if (head < tail)
return pipe->native.length - tail + head;
else
return head - tail;
}
static void glink_spss_rx_peak(struct qcom_glink_pipe *np,
void *data, unsigned int offset, size_t count)
{
struct glink_spss_pipe *pipe = to_spss_pipe(np);
size_t len;
u32 tail;
tail = le32_to_cpu(*pipe->tail);
tail += offset;
if (tail >= pipe->native.length)
tail -= pipe->native.length;
len = min_t(size_t, count, pipe->native.length - tail);
if (len)
memcpy_fromio(data, pipe->fifo + tail, len);
if (len != count)
memcpy_fromio(data + len, pipe->fifo, count - len);
}
static void glink_spss_rx_advance(struct qcom_glink_pipe *np,
size_t count)
{
struct glink_spss_pipe *pipe = to_spss_pipe(np);
u32 tail;
tail = le32_to_cpu(*pipe->tail);
tail += count;
if (tail >= pipe->native.length)
tail -= pipe->native.length;
*pipe->tail = cpu_to_le32(tail);
}
static size_t glink_spss_tx_avail(struct qcom_glink_pipe *np)
{
struct glink_spss_pipe *pipe = to_spss_pipe(np);
u32 head;
u32 tail;
u32 avail;
head = le32_to_cpu(*pipe->head);
tail = le32_to_cpu(*pipe->tail);
if (tail <= head)
avail = pipe->native.length - head + tail;
else
avail = tail - head;
if (avail < (FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE))
avail = 0;
else
avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE;
return avail;
}
static unsigned int glink_spss_tx_write_one(struct glink_spss_pipe *pipe,
unsigned int head,
const void *data, size_t count)
{
size_t len;
len = min_t(size_t, count, pipe->native.length - head);
if (len)
memcpy(pipe->fifo + head, data, len);
if (len != count)
memcpy(pipe->fifo, data + len, count - len);
head += count;
if (head >= pipe->native.length)
head -= pipe->native.length;
return head;
}
static void glink_spss_tx_write(struct qcom_glink_pipe *glink_pipe,
const void *hdr, size_t hlen,
const void *data, size_t dlen)
{
struct glink_spss_pipe *pipe = to_spss_pipe(glink_pipe);
unsigned int head;
head = le32_to_cpu(*pipe->head);
head = glink_spss_tx_write_one(pipe, head, hdr, hlen);
head = glink_spss_tx_write_one(pipe, head, data, dlen);
/* Ensure head is always aligned to 8 bytes */
head = ALIGN(head, 8);
if (head >= pipe->native.length)
head -= pipe->native.length;
/* Ensure ordering of fifo and head update */
wmb();
*pipe->head = cpu_to_le32(head);
}
static void qcom_glink_spss_release(struct device *dev)
{
kfree(dev);
}
static int glink_spss_advertise_cfg(struct device *dev,
u32 size, phys_addr_t addr)
{
struct device_node *np = dev->of_node;
__le64 __iomem *spss_addr;
__le32 __iomem *spss_size;
struct resource addr_r;
struct resource size_r;
int addr_idx;
int size_idx;
addr_idx = of_property_match_string(np, "reg-names", "qcom,spss-addr");
size_idx = of_property_match_string(np, "reg-names", "qcom,spss-size");
if (addr_idx < 0 || size_idx < 0) {
dev_err(dev, "failed to find location registers\n");
return -EINVAL;
}
if (of_address_to_resource(np, addr_idx, &addr_r))
return -ENOMEM;
spss_addr = devm_ioremap(dev, addr_r.start, resource_size(&addr_r));
if (IS_ERR_OR_NULL(spss_addr)) {
dev_err(dev, "failed to map spss addr resource\n");
return -ENOMEM;
}
if (of_address_to_resource(np, size_idx, &size_r))
return -ENOMEM;
spss_size = devm_ioremap(dev, size_r.start, resource_size(&size_r));
if (IS_ERR_OR_NULL(spss_size)) {
dev_err(dev, "failed to map spss size resource\n");
return -ENOMEM;
}
*spss_addr = cpu_to_le64(addr);
*spss_size = cpu_to_le32(size);
devm_iounmap(dev, spss_addr);
devm_iounmap(dev, spss_size);
return 0;
}
struct qcom_glink *qcom_glink_spss_register(struct device *parent,
struct device_node *node)
{
struct glink_spss_pipe *rx_pipe;
struct glink_spss_pipe *tx_pipe;
struct glink_spss_cfg *cfg;
struct qcom_glink *glink;
struct device *dev;
u32 remote_pid;
size_t tx_size;
size_t rx_size;
size_t size;
int ret;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
dev->parent = parent;
dev->of_node = node;
dev->release = qcom_glink_spss_release;
dev_set_name(dev, "%s:%s", node->parent->name, node->name);
ret = device_register(dev);
if (ret) {
pr_err("failed to register glink edge %s\n", node->name);
return ERR_PTR(ret);
}
ret = of_property_read_u32(dev->of_node, "qcom,remote-pid",
&remote_pid);
if (ret) {
dev_err(dev, "failed to parse qcom,remote-pid\n");
goto err_put_dev;
}
rx_pipe = devm_kzalloc(dev, sizeof(*rx_pipe), GFP_KERNEL);
tx_pipe = devm_kzalloc(dev, sizeof(*tx_pipe), GFP_KERNEL);
if (!rx_pipe || !tx_pipe) {
ret = -ENOMEM;
goto err_put_dev;
}
tx_size = SPSS_TX_FIFO_SIZE;
rx_size = SPSS_RX_FIFO_SIZE;
size = tx_size + rx_size + sizeof(*cfg);
ret = qcom_smem_alloc(remote_pid,
SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, size);
if (ret && ret != -EEXIST) {
dev_err(dev, "failed to allocate glink descriptors\n");
goto err_put_dev;
}
cfg = qcom_smem_get(remote_pid,
SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, &size);
if (IS_ERR(cfg)) {
dev_err(dev, "failed to acquire xprt descriptor\n");
ret = PTR_ERR(cfg);
goto err_put_dev;
}
if (size != tx_size + rx_size + sizeof(*cfg)) {
dev_err(dev, "glink descriptor of invalid size\n");
ret = -EINVAL;
goto err_put_dev;
}
cfg->tx_fifo_size = cpu_to_le32(tx_size);
cfg->rx_fifo_size = cpu_to_le32(rx_size);
tx_pipe->tail = &cfg->tx_tail;
tx_pipe->head = &cfg->tx_head;
tx_pipe->native.length = tx_size;
tx_pipe->fifo = (u8 *)cfg + sizeof(*cfg);
rx_pipe->tail = &cfg->rx_tail;
rx_pipe->head = &cfg->rx_head;
rx_pipe->native.length = rx_size;
rx_pipe->fifo = (u8 *)cfg + sizeof(*cfg) + tx_size;
rx_pipe->native.avail = glink_spss_rx_avail;
rx_pipe->native.peak = glink_spss_rx_peak;
rx_pipe->native.advance = glink_spss_rx_advance;
rx_pipe->native.reset = glink_spss_reset;
rx_pipe->remote_pid = remote_pid;
tx_pipe->native.avail = glink_spss_tx_avail;
tx_pipe->native.write = glink_spss_tx_write;
tx_pipe->native.reset = glink_spss_reset;
tx_pipe->remote_pid = remote_pid;
*rx_pipe->tail = 0;
*tx_pipe->head = 0;
ret = glink_spss_advertise_cfg(dev, size, qcom_smem_virt_to_phys(cfg));
if (ret)
goto err_put_dev;
glink = qcom_glink_native_probe(dev,
GLINK_FEATURE_INTENT_REUSE,
&rx_pipe->native, &tx_pipe->native,
false);
if (IS_ERR(glink)) {
ret = PTR_ERR(glink);
goto err_put_dev;
}
return glink;
err_put_dev:
put_device(dev);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(qcom_glink_spss_register);
void qcom_glink_spss_unregister(struct qcom_glink *glink)
{
qcom_glink_native_remove(glink);
qcom_glink_native_unregister(glink);
}
EXPORT_SYMBOL(qcom_glink_spss_unregister);
MODULE_DESCRIPTION("QTI GLINK SPSS driver");
MODULE_LICENSE("GPL v2");

View file

@ -4,6 +4,7 @@
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Copyright (C) 2011 Google, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Ohad Ben-Cohen <ohad@wizery.com>
* Brian Swetland <swetland@google.com>
@ -81,7 +82,7 @@ EXPORT_SYMBOL(rpmsg_create_ept);
*/
void rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
{
if (ept)
if (ept && ept->ops)
ept->ops->destroy_ept(ept);
}
EXPORT_SYMBOL(rpmsg_destroy_ept);
@ -283,6 +284,42 @@ int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
}
EXPORT_SYMBOL(rpmsg_trysend_offchannel);
/**
* rpmsg_get_sigs() - get the signals for this endpoint
* @ept: the rpmsg endpoint
* @sigs: serial signals bitmask
*
* Returns 0 on success and an appropriate error value on failure.
*/
int rpmsg_get_sigs(struct rpmsg_endpoint *ept, u32 *lsigs, u32 *rsigs)
{
if (WARN_ON(!ept))
return -EINVAL;
if (!ept->ops->get_sigs)
return -ENXIO;
return ept->ops->get_sigs(ept, lsigs, rsigs);
}
EXPORT_SYMBOL(rpmsg_get_sigs);
/**
* rpmsg_set_sigs() - set the remote signals for this endpoint
* @ept: the rpmsg endpoint
* @sigs: serial signals bitmask
*
* Returns 0 on success and an appropriate error value on failure.
*/
int rpmsg_set_sigs(struct rpmsg_endpoint *ept, u32 sigs)
{
if (WARN_ON(!ept))
return -EINVAL;
if (!ept->ops->set_sigs)
return -ENXIO;
return ept->ops->set_sigs(ept, sigs);
}
EXPORT_SYMBOL(rpmsg_set_sigs);
/*
* match an rpmsg channel with a channel info struct.
* this is used to make sure we're not creating rpmsg devices for channels
@ -468,6 +505,10 @@ static int rpmsg_dev_probe(struct device *dev)
rpdev->ept = ept;
rpdev->src = ept->addr;
if (rpdrv->signals)
ept->sig_cb = rpdrv->signals;
}
err = rpdrv->probe(rpdev);

View file

@ -4,6 +4,7 @@
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Copyright (C) 2011 Google, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Ohad Ben-Cohen <ohad@wizery.com>
* Brian Swetland <swetland@google.com>
@ -46,6 +47,8 @@ struct rpmsg_device_ops {
* @trysend: see @rpmsg_trysend(), required
* @trysendto: see @rpmsg_trysendto(), optional
* @trysend_offchannel: see @rpmsg_trysend_offchannel(), optional
* @get_sigs: see @rpmsg_get_sigs(), optional
* @set_sigs: see @rpmsg_set_sigs(), optional
*
* Indirection table for the operations that a rpmsg backend should implement.
* In addition to @destroy_ept, the backend must at least implement @send and
@ -65,6 +68,8 @@ struct rpmsg_endpoint_ops {
void *data, int len);
__poll_t (*poll)(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
int (*get_sigs)(struct rpmsg_endpoint *ept, u32 *lsigs, u32 *rsigs);
int (*set_sigs)(struct rpmsg_endpoint *ept, u32 sigs);
};
int rpmsg_register_device(struct rpmsg_device *rpdev);

View file

@ -4,6 +4,7 @@
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Copyright (C) 2011 Google, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* All rights reserved.
*/
@ -60,12 +61,14 @@ struct rpmsg_device {
};
typedef int (*rpmsg_rx_cb_t)(struct rpmsg_device *, void *, int, void *, u32);
typedef int (*rpmsg_rx_sig_t)(struct rpmsg_device *, u32, u32);
/**
* struct rpmsg_endpoint - binds a local rpmsg address to its user
* @rpdev: rpmsg channel device
* @refcount: when this drops to zero, the ept is deallocated
* @cb: rx callback handler
* @sig_cb: rx serial signal handler
* @cb_lock: must be taken before accessing/changing @cb
* @addr: local rpmsg address
* @priv: private data for the driver's use
@ -88,6 +91,7 @@ struct rpmsg_endpoint {
struct rpmsg_device *rpdev;
struct kref refcount;
rpmsg_rx_cb_t cb;
rpmsg_rx_sig_t sig_cb;
struct mutex cb_lock;
u32 addr;
void *priv;
@ -102,6 +106,7 @@ struct rpmsg_endpoint {
* @probe: invoked when a matching rpmsg channel (i.e. device) is found
* @remove: invoked when the rpmsg channel is removed
* @callback: invoked when an inbound message is received on the channel
* @signals: invoked when a serial signal change is received on the channel
*/
struct rpmsg_driver {
struct device_driver drv;
@ -109,6 +114,7 @@ struct rpmsg_driver {
int (*probe)(struct rpmsg_device *dev);
void (*remove)(struct rpmsg_device *dev);
int (*callback)(struct rpmsg_device *, void *, int, void *, u32);
int (*signals)(struct rpmsg_device *rpdev, u32 old, u32 new);
};
#if IS_ENABLED(CONFIG_RPMSG)
@ -135,6 +141,9 @@ int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
int rpmsg_get_sigs(struct rpmsg_endpoint *ept, u32 *lsigs, u32 *rsigs);
int rpmsg_set_sigs(struct rpmsg_endpoint *ept, u32 sigs);
#else
static inline int register_rpmsg_device(struct rpmsg_device *dev)
@ -242,6 +251,23 @@ static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept,
return 0;
}
static inline int rpmsg_get_sigs(struct rpmsg_endpoint *ept, u32 *lsigs,
u32 *rsigs)
{
/* This shouldn't be possible */
WARN_ON(1);
return -ENXIO;
}
static inline int rpmsg_set_sigs(struct rpmsg_endpoint *ept, u32 sigs)
{
/* This shouldn't be possible */
WARN_ON(1);
return -ENXIO;
}
#endif /* IS_ENABLED(CONFIG_RPMSG) */
/* use a macro to avoid include chaining to get THIS_MODULE */

View file

@ -6,6 +6,7 @@
#include <linux/device.h>
struct qcom_glink;
struct glink_spi;
#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SMEM)
@ -26,4 +27,44 @@ static inline void qcom_glink_smem_unregister(struct qcom_glink *glink) {}
#endif
#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SPSS)
struct qcom_glink *qcom_glink_spss_register(struct device *parent,
struct device_node *node);
void qcom_glink_spss_unregister(struct qcom_glink *glink);
#else
static inline struct qcom_glink *
qcom_glink_spss_register(struct device *parent,
struct device_node *node)
{
return NULL;
}
static inline void qcom_glink_spss_unregister(struct qcom_glink *glink) {}
#endif
#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SPI)
struct glink_spi *qcom_glink_spi_register(struct device *parent,
struct device_node *node);
void qcom_glink_spi_unregister(struct glink_spi *glink);
#else
static inline struct glink_spi *
qcom_glink_spi_register(struct device *parent, struct device_node *node)
{
return NULL;
}
static inline void qcom_glink_spi_unregister(struct glink_spi *glink) {}
#endif
#endif