Merge "drivers: add RmNet snapshot"
This commit is contained in:
commit
73818d7368
21 changed files with 4522 additions and 141 deletions
|
@ -1,13 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RMNET configuration engine
|
||||
*
|
||||
|
@ -21,6 +13,9 @@
|
|||
#include "rmnet_handlers.h"
|
||||
#include "rmnet_vnd.h"
|
||||
#include "rmnet_private.h"
|
||||
#include "rmnet_map.h"
|
||||
#include <soc/qcom/rmnet_qmi.h>
|
||||
#include <soc/qcom/qmi_rmnet.h>
|
||||
|
||||
/* Locking scheme -
|
||||
* The shared resource which needs to be protected is realdev->rx_handler_data.
|
||||
|
@ -43,15 +38,17 @@
|
|||
|
||||
/* Local Definitions and Declarations */
|
||||
|
||||
static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = {
|
||||
static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 2] = {
|
||||
[IFLA_RMNET_MUX_ID] = { .type = NLA_U16 },
|
||||
[IFLA_RMNET_FLAGS] = { .len = sizeof(struct ifla_rmnet_flags) },
|
||||
[IFLA_VLAN_EGRESS_QOS] = { .len = sizeof(struct tcmsg) },
|
||||
};
|
||||
|
||||
static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
|
||||
int rmnet_is_real_dev_registered(const struct net_device *real_dev)
|
||||
{
|
||||
return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_is_real_dev_registered);
|
||||
|
||||
/* Needs rtnl lock */
|
||||
static struct rmnet_port*
|
||||
|
@ -66,6 +63,9 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
|
|||
if (port->nr_rmnet_devs)
|
||||
return -EINVAL;
|
||||
|
||||
rmnet_map_cmd_exit(port);
|
||||
rmnet_map_tx_aggregate_exit(port);
|
||||
|
||||
kfree(port);
|
||||
|
||||
netdev_rx_handler_unregister(real_dev);
|
||||
|
@ -97,13 +97,15 @@ static int rmnet_register_real_device(struct net_device *real_dev)
|
|||
kfree(port);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* hold on to real dev for MAP data */
|
||||
dev_hold(real_dev);
|
||||
|
||||
for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
|
||||
INIT_HLIST_HEAD(&port->muxed_ep[entry]);
|
||||
|
||||
rmnet_map_tx_aggregate_init(port);
|
||||
rmnet_map_cmd_init(port);
|
||||
|
||||
netdev_dbg(real_dev, "registered with rmnet\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -211,6 +213,7 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
|
|||
hlist_del_init_rcu(&ep->hlnode);
|
||||
rmnet_unregister_bridge(dev, port);
|
||||
rmnet_vnd_dellink(mux_id, port, ep);
|
||||
synchronize_rcu();
|
||||
kfree(ep);
|
||||
}
|
||||
rmnet_unregister_real_device(real_dev, port);
|
||||
|
@ -234,7 +237,6 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
|
|||
|
||||
port = rmnet_get_port_rtnl(dev);
|
||||
|
||||
rcu_read_lock();
|
||||
rmnet_unregister_bridge(dev, port);
|
||||
|
||||
hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
|
||||
|
@ -242,12 +244,14 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
|
|||
rmnet_vnd_dellink(ep->mux_id, port, ep);
|
||||
|
||||
hlist_del_init_rcu(&ep->hlnode);
|
||||
synchronize_rcu();
|
||||
kfree(ep);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
unregister_netdevice_many(&list);
|
||||
|
||||
qmi_rmnet_qmi_exit(port->qmi_info, port);
|
||||
|
||||
rmnet_unregister_real_device(real_dev, port);
|
||||
}
|
||||
|
||||
|
@ -281,12 +285,15 @@ static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
|
|||
{
|
||||
u16 mux_id;
|
||||
|
||||
if (!data || !data[IFLA_RMNET_MUX_ID])
|
||||
if (!data) {
|
||||
return -EINVAL;
|
||||
|
||||
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
|
||||
if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
|
||||
return -ERANGE;
|
||||
} else {
|
||||
if (data[IFLA_RMNET_MUX_ID]) {
|
||||
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
|
||||
if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
|
||||
return -ERANGE;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -329,6 +336,13 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
|
|||
port->data_format = flags->flags & flags->mask;
|
||||
}
|
||||
|
||||
if (data[IFLA_VLAN_EGRESS_QOS]) {
|
||||
struct tcmsg *tcm;
|
||||
|
||||
tcm = nla_data(data[IFLA_VLAN_EGRESS_QOS]);
|
||||
qmi_rmnet_change_link(dev, port, tcm);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -338,7 +352,8 @@ static size_t rmnet_get_size(const struct net_device *dev)
|
|||
/* IFLA_RMNET_MUX_ID */
|
||||
nla_total_size(2) +
|
||||
/* IFLA_RMNET_FLAGS */
|
||||
nla_total_size(sizeof(struct ifla_rmnet_flags));
|
||||
nla_total_size(sizeof(struct ifla_rmnet_flags)) +
|
||||
nla_total_size(sizeof(struct tcmsg));
|
||||
}
|
||||
|
||||
static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
||||
|
@ -393,6 +408,7 @@ struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
|
|||
else
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_port);
|
||||
|
||||
struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
|
||||
{
|
||||
|
@ -405,6 +421,7 @@ struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
|
|||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_endpoint);
|
||||
|
||||
int rmnet_add_bridge(struct net_device *rmnet_dev,
|
||||
struct net_device *slave_dev,
|
||||
|
@ -459,6 +476,140 @@ int rmnet_del_bridge(struct net_device *rmnet_dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_QCOM_QMI_RMNET
|
||||
void *rmnet_get_qmi_pt(void *port)
|
||||
{
|
||||
if (port)
|
||||
return ((struct rmnet_port *)port)->qmi_info;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_qmi_pt);
|
||||
|
||||
void *rmnet_get_qos_pt(struct net_device *dev)
|
||||
{
|
||||
struct rmnet_priv *priv;
|
||||
|
||||
if (dev) {
|
||||
priv = netdev_priv(dev);
|
||||
return rcu_dereference(priv->qos_info);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_qos_pt);
|
||||
|
||||
void *rmnet_get_rmnet_port(struct net_device *dev)
|
||||
{
|
||||
struct rmnet_priv *priv;
|
||||
|
||||
if (dev) {
|
||||
priv = netdev_priv(dev);
|
||||
return (void *)rmnet_get_port(priv->real_dev);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_rmnet_port);
|
||||
|
||||
struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id)
|
||||
{
|
||||
struct rmnet_endpoint *ep;
|
||||
|
||||
if (port) {
|
||||
ep = rmnet_get_endpoint((struct rmnet_port *)port, mux_id);
|
||||
if (ep)
|
||||
return ep->egress_dev;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_rmnet_dev);
|
||||
|
||||
void rmnet_reset_qmi_pt(void *port)
|
||||
{
|
||||
if (port)
|
||||
((struct rmnet_port *)port)->qmi_info = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_reset_qmi_pt);
|
||||
|
||||
void rmnet_init_qmi_pt(void *port, void *qmi)
|
||||
{
|
||||
if (port)
|
||||
((struct rmnet_port *)port)->qmi_info = qmi;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_init_qmi_pt);
|
||||
|
||||
void rmnet_get_packets(void *port, u64 *rx, u64 *tx)
|
||||
{
|
||||
struct rmnet_priv *priv;
|
||||
struct rmnet_pcpu_stats *ps;
|
||||
unsigned int cpu, start;
|
||||
|
||||
struct rmnet_endpoint *ep;
|
||||
unsigned long bkt;
|
||||
|
||||
if (!port || !tx || !rx)
|
||||
return;
|
||||
|
||||
*tx = 0;
|
||||
*rx = 0;
|
||||
hash_for_each(((struct rmnet_port *)port)->muxed_ep, bkt, ep, hlnode) {
|
||||
priv = netdev_priv(ep->egress_dev);
|
||||
for_each_possible_cpu(cpu) {
|
||||
ps = per_cpu_ptr(priv->pcpu_stats, cpu);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&ps->syncp);
|
||||
*tx += ps->stats.tx_pkts;
|
||||
*rx += ps->stats.rx_pkts;
|
||||
} while (u64_stats_fetch_retry_irq(&ps->syncp, start));
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_packets);
|
||||
|
||||
void rmnet_set_powersave_format(void *port)
|
||||
{
|
||||
if (!port)
|
||||
return;
|
||||
((struct rmnet_port *)port)->data_format |= RMNET_INGRESS_FORMAT_PS;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_set_powersave_format);
|
||||
|
||||
void rmnet_clear_powersave_format(void *port)
|
||||
{
|
||||
if (!port)
|
||||
return;
|
||||
((struct rmnet_port *)port)->data_format &= ~RMNET_INGRESS_FORMAT_PS;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_clear_powersave_format);
|
||||
|
||||
void rmnet_enable_all_flows(void *port)
|
||||
{
|
||||
struct rmnet_endpoint *ep;
|
||||
unsigned long bkt;
|
||||
|
||||
if (unlikely(!port))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
|
||||
bkt, ep, hlnode) {
|
||||
qmi_rmnet_enable_all_flows(ep->egress_dev);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_enable_all_flows);
|
||||
|
||||
int rmnet_get_powersave_notif(void *port)
|
||||
{
|
||||
if (!port)
|
||||
return 0;
|
||||
return ((struct rmnet_port *)port)->data_format & RMNET_FORMAT_PS_NOTIF;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_powersave_notif);
|
||||
#endif
|
||||
|
||||
/* Startup/Shutdown */
|
||||
|
||||
static int __init rmnet_init(void)
|
||||
|
|
|
@ -1,13 +1,5 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RMNET Data configuration engine
|
||||
*
|
||||
|
@ -27,6 +19,20 @@ struct rmnet_endpoint {
|
|||
struct hlist_node hlnode;
|
||||
};
|
||||
|
||||
struct rmnet_port_priv_stats {
|
||||
u64 dl_hdr_last_seq;
|
||||
u64 dl_hdr_last_bytes;
|
||||
u64 dl_hdr_last_pkts;
|
||||
u64 dl_hdr_last_flows;
|
||||
u64 dl_hdr_count;
|
||||
u64 dl_hdr_total_bytes;
|
||||
u64 dl_hdr_total_pkts;
|
||||
u64 dl_hdr_avg_bytes;
|
||||
u64 dl_hdr_avg_pkts;
|
||||
u64 dl_trl_last_seq;
|
||||
u64 dl_trl_count;
|
||||
};
|
||||
|
||||
/* One instance of this structure is instantiated for each real_dev associated
|
||||
* with rmnet.
|
||||
*/
|
||||
|
@ -37,6 +43,27 @@ struct rmnet_port {
|
|||
u8 rmnet_mode;
|
||||
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
|
||||
struct net_device *bridge_ep;
|
||||
void *rmnet_perf;
|
||||
|
||||
u16 egress_agg_size;
|
||||
u16 egress_agg_count;
|
||||
|
||||
/* Protect aggregation related elements */
|
||||
spinlock_t agg_lock;
|
||||
|
||||
struct sk_buff *agg_skb;
|
||||
int agg_state;
|
||||
u8 agg_count;
|
||||
struct timespec agg_time;
|
||||
struct timespec agg_last;
|
||||
struct hrtimer hrtimer;
|
||||
|
||||
void *qmi_info;
|
||||
|
||||
/* dl marker elements */
|
||||
struct list_head dl_list;
|
||||
struct rmnet_port_priv_stats stats;
|
||||
int dl_marker_flush;
|
||||
};
|
||||
|
||||
extern struct rtnl_link_ops rmnet_link_ops;
|
||||
|
@ -72,8 +99,25 @@ struct rmnet_priv {
|
|||
struct rmnet_pcpu_stats __percpu *pcpu_stats;
|
||||
struct gro_cells gro_cells;
|
||||
struct rmnet_priv_stats stats;
|
||||
void __rcu *qos_info;
|
||||
};
|
||||
|
||||
enum rmnet_trace_func {
|
||||
RMNET_MODULE,
|
||||
NW_STACK_MODULE,
|
||||
};
|
||||
|
||||
enum rmnet_trace_evt {
|
||||
RMNET_DLVR_SKB,
|
||||
RMNET_RCV_FROM_PND,
|
||||
RMNET_TX_UL_PKT,
|
||||
NW_STACK_DEV_Q_XMIT,
|
||||
NW_STACK_NAPI_GRO_FLUSH,
|
||||
NW_STACK_RX,
|
||||
NW_STACK_TX,
|
||||
};
|
||||
|
||||
int rmnet_is_real_dev_registered(const struct net_device *real_dev);
|
||||
struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
|
||||
struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
|
||||
int rmnet_add_bridge(struct net_device *rmnet_dev,
|
||||
|
|
|
@ -1,13 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RMNET Data ingress/egress handler
|
||||
*
|
||||
|
@ -16,19 +8,58 @@
|
|||
#include <linux/netdevice.h>
|
||||
#include <linux/netdev_features.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <net/sock.h>
|
||||
#include <linux/tracepoint.h>
|
||||
#include "rmnet_private.h"
|
||||
#include "rmnet_config.h"
|
||||
#include "rmnet_vnd.h"
|
||||
#include "rmnet_map.h"
|
||||
#include "rmnet_handlers.h"
|
||||
#ifdef CONFIG_QCOM_QMI_HELPERS
|
||||
#include <soc/qcom/rmnet_qmi.h>
|
||||
#include <soc/qcom/qmi_rmnet.h>
|
||||
|
||||
#endif
|
||||
|
||||
#define RMNET_IP_VERSION_4 0x40
|
||||
#define RMNET_IP_VERSION_6 0x60
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/rmnet.h>
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_low);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_high);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_err);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_low);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_high);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_err);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_low);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_high);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_err);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_low);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_high);
|
||||
EXPORT_TRACEPOINT_SYMBOL(rmnet_err);
|
||||
|
||||
/* Helper Functions */
|
||||
|
||||
static void rmnet_set_skb_proto(struct sk_buff *skb)
|
||||
static int rmnet_check_skb_can_gro(struct sk_buff *skb)
|
||||
{
|
||||
switch (skb->protocol) {
|
||||
case htons(ETH_P_IP):
|
||||
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
|
||||
return 0;
|
||||
break;
|
||||
case htons(ETH_P_IPV6):
|
||||
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
|
||||
return 0;
|
||||
/* Fall through */
|
||||
}
|
||||
|
||||
return -EPROTONOSUPPORT;
|
||||
}
|
||||
|
||||
void rmnet_set_skb_proto(struct sk_buff *skb)
|
||||
{
|
||||
switch (skb->data[0] & 0xF0) {
|
||||
case RMNET_IP_VERSION_4:
|
||||
|
@ -42,22 +73,55 @@ static void rmnet_set_skb_proto(struct sk_buff *skb)
|
|||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_set_skb_proto);
|
||||
|
||||
/* Shs hook handler */
|
||||
|
||||
int (*rmnet_shs_skb_entry)(struct sk_buff *skb,
|
||||
struct rmnet_port *port) __rcu __read_mostly;
|
||||
EXPORT_SYMBOL(rmnet_shs_skb_entry);
|
||||
|
||||
/* Generic handler */
|
||||
|
||||
static void
|
||||
rmnet_deliver_skb(struct sk_buff *skb)
|
||||
void
|
||||
rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port)
|
||||
{
|
||||
int (*rmnet_shs_stamp)(struct sk_buff *skb, struct rmnet_port *port);
|
||||
struct rmnet_priv *priv = netdev_priv(skb->dev);
|
||||
|
||||
trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
|
||||
0xDEF, 0xDEF, (void *)skb, NULL);
|
||||
skb_reset_transport_header(skb);
|
||||
skb_reset_network_header(skb);
|
||||
rmnet_vnd_rx_fixup(skb, skb->dev);
|
||||
rmnet_vnd_rx_fixup(skb->dev, skb->len);
|
||||
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
skb_set_mac_header(skb, 0);
|
||||
gro_cells_receive(&priv->gro_cells, skb);
|
||||
|
||||
rmnet_shs_stamp = rcu_dereference(rmnet_shs_skb_entry);
|
||||
if (rmnet_shs_stamp) {
|
||||
rmnet_shs_stamp(skb, port);
|
||||
return;
|
||||
}
|
||||
|
||||
if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
|
||||
if (!rmnet_check_skb_can_gro(skb) &&
|
||||
port->dl_marker_flush >= 0) {
|
||||
struct napi_struct *napi = get_current_napi_context();
|
||||
|
||||
napi_gro_receive(napi, skb);
|
||||
port->dl_marker_flush++;
|
||||
} else {
|
||||
netif_receive_skb(skb);
|
||||
}
|
||||
} else {
|
||||
if (!rmnet_check_skb_can_gro(skb))
|
||||
gro_cells_receive(&priv->gro_cells, skb);
|
||||
else
|
||||
netif_receive_skb(skb);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_deliver_skb);
|
||||
|
||||
/* MAP handler */
|
||||
|
||||
|
@ -70,6 +134,11 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
|
|||
u8 mux_id;
|
||||
|
||||
if (RMNET_MAP_GET_CD_BIT(skb)) {
|
||||
if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
|
||||
if (!rmnet_map_flow_command(skb, port, false))
|
||||
return;
|
||||
}
|
||||
|
||||
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
|
||||
return rmnet_map_command(skb, port);
|
||||
|
||||
|
@ -98,19 +167,27 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
|
|||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_QCOM_QMI_HELPERS
|
||||
if (port->data_format & RMNET_INGRESS_FORMAT_PS)
|
||||
qmi_rmnet_work_maybe_restart(port);
|
||||
#endif
|
||||
|
||||
skb_trim(skb, len);
|
||||
rmnet_deliver_skb(skb);
|
||||
rmnet_deliver_skb(skb, port);
|
||||
return;
|
||||
|
||||
free_skb:
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
int (*rmnet_perf_deag_entry)(struct sk_buff *skb,
|
||||
struct rmnet_port *port) __rcu __read_mostly;
|
||||
EXPORT_SYMBOL(rmnet_perf_deag_entry);
|
||||
|
||||
static void
|
||||
rmnet_map_ingress_handler(struct sk_buff *skb,
|
||||
struct rmnet_port *port)
|
||||
{
|
||||
struct sk_buff *skbn;
|
||||
|
||||
if (skb->dev->type == ARPHRD_ETHER) {
|
||||
if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
|
||||
|
@ -122,10 +199,30 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
if (port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION) {
|
||||
while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
|
||||
__rmnet_map_ingress_handler(skbn, port);
|
||||
int (*rmnet_perf_core_deaggregate)(struct sk_buff *skb,
|
||||
struct rmnet_port *port);
|
||||
/* Deaggregation and freeing of HW originating
|
||||
* buffers is done within here
|
||||
*/
|
||||
rmnet_perf_core_deaggregate =
|
||||
rcu_dereference(rmnet_perf_deag_entry);
|
||||
if (rmnet_perf_core_deaggregate) {
|
||||
rmnet_perf_core_deaggregate(skb, port);
|
||||
} else {
|
||||
struct sk_buff *skbn;
|
||||
|
||||
consume_skb(skb);
|
||||
while (skb) {
|
||||
struct sk_buff *skb_frag =
|
||||
skb_shinfo(skb)->frag_list;
|
||||
|
||||
skb_shinfo(skb)->frag_list = NULL;
|
||||
while ((skbn = rmnet_map_deaggregate(skb, port))
|
||||
!= NULL)
|
||||
__rmnet_map_ingress_handler(skbn, port);
|
||||
consume_skb(skb);
|
||||
skb = skb_frag;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
__rmnet_map_ingress_handler(skb, port);
|
||||
}
|
||||
|
@ -151,6 +248,11 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_QCOM_QMI_HELPERS
|
||||
if (port->data_format & RMNET_INGRESS_FORMAT_PS)
|
||||
qmi_rmnet_work_maybe_restart(port);
|
||||
#endif
|
||||
|
||||
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
|
||||
rmnet_map_checksum_uplink_packet(skb, orig_dev);
|
||||
|
||||
|
@ -160,8 +262,26 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
|
|||
|
||||
map_header->mux_id = mux_id;
|
||||
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
|
||||
int non_linear_skb;
|
||||
|
||||
if (rmnet_map_tx_agg_skip(skb, required_headroom))
|
||||
goto done;
|
||||
|
||||
non_linear_skb = (orig_dev->features & NETIF_F_GSO) &&
|
||||
skb_is_nonlinear(skb);
|
||||
|
||||
if (non_linear_skb) {
|
||||
if (unlikely(__skb_linearize(skb)))
|
||||
goto done;
|
||||
}
|
||||
|
||||
rmnet_map_tx_aggregate(skb, port);
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
|
||||
done:
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -192,6 +312,8 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
|
|||
if (skb->pkt_type == PACKET_LOOPBACK)
|
||||
return RX_HANDLER_PASS;
|
||||
|
||||
trace_rmnet_low(RMNET_MODULE, RMNET_RCV_FROM_PND, 0xDEF,
|
||||
0xDEF, 0xDEF, 0xDEF, NULL, NULL);
|
||||
dev = skb->dev;
|
||||
port = rmnet_get_port(dev);
|
||||
|
||||
|
@ -207,6 +329,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
|
|||
done:
|
||||
return RX_HANDLER_CONSUMED;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_rx_handler);
|
||||
|
||||
/* Modifies packet as per logical endpoint configuration and egress data format
|
||||
* for egress device configured in logical endpoint. Packet is then transmitted
|
||||
|
@ -218,7 +341,11 @@ void rmnet_egress_handler(struct sk_buff *skb)
|
|||
struct rmnet_port *port;
|
||||
struct rmnet_priv *priv;
|
||||
u8 mux_id;
|
||||
int err;
|
||||
u32 skb_len;
|
||||
|
||||
trace_rmnet_low(RMNET_MODULE, RMNET_TX_UL_PKT, 0xDEF, 0xDEF, 0xDEF,
|
||||
0xDEF, (void *)skb, NULL);
|
||||
sk_pacing_shift_update(skb->sk, 8);
|
||||
|
||||
orig_dev = skb->dev;
|
||||
|
@ -230,10 +357,16 @@ void rmnet_egress_handler(struct sk_buff *skb)
|
|||
if (!port)
|
||||
goto drop;
|
||||
|
||||
if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
|
||||
skb_len = skb->len;
|
||||
err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev);
|
||||
if (err == -ENOMEM) {
|
||||
goto drop;
|
||||
} else if (err == -EINPROGRESS) {
|
||||
rmnet_vnd_tx_fixup(orig_dev, skb_len);
|
||||
return;
|
||||
}
|
||||
|
||||
rmnet_vnd_tx_fixup(skb, orig_dev);
|
||||
rmnet_vnd_tx_fixup(orig_dev, skb_len);
|
||||
|
||||
dev_queue_xmit(skb);
|
||||
return;
|
||||
|
|
|
@ -1,13 +1,5 @@
|
|||
/* Copyright (c) 2013, 2016-2017 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (c) 2013, 2016-2018 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* RMNET Data ingress/egress handler
|
||||
*
|
||||
|
@ -19,7 +11,10 @@
|
|||
#include "rmnet_config.h"
|
||||
|
||||
void rmnet_egress_handler(struct sk_buff *skb);
|
||||
|
||||
void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port);
|
||||
void rmnet_set_skb_proto(struct sk_buff *skb);
|
||||
rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb,
|
||||
struct rmnet_port *port);
|
||||
rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);
|
||||
|
||||
#endif /* _RMNET_HANDLERS_H_ */
|
||||
|
|
|
@ -1,17 +1,9 @@
|
|||
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. */
|
||||
|
||||
#ifndef _RMNET_MAP_H_
|
||||
#define _RMNET_MAP_H_
|
||||
#include "rmnet_config.h"
|
||||
|
||||
struct rmnet_map_control_command {
|
||||
u8 command_name;
|
||||
|
@ -34,6 +26,8 @@ enum rmnet_map_commands {
|
|||
RMNET_MAP_COMMAND_NONE,
|
||||
RMNET_MAP_COMMAND_FLOW_DISABLE,
|
||||
RMNET_MAP_COMMAND_FLOW_ENABLE,
|
||||
RMNET_MAP_COMMAND_FLOW_START = 7,
|
||||
RMNET_MAP_COMMAND_FLOW_END = 8,
|
||||
/* These should always be the last 2 elements */
|
||||
RMNET_MAP_COMMAND_UNKNOWN,
|
||||
RMNET_MAP_COMMAND_ENUM_LENGTH
|
||||
|
@ -63,6 +57,60 @@ struct rmnet_map_ul_csum_header {
|
|||
u16 csum_enabled:1;
|
||||
} __aligned(1);
|
||||
|
||||
struct rmnet_map_control_command_header {
|
||||
u8 command_name;
|
||||
u8 cmd_type:2;
|
||||
u8 reserved:6;
|
||||
u16 reserved2;
|
||||
u32 transaction_id;
|
||||
} __aligned(1);
|
||||
|
||||
struct rmnet_map_flow_info_le {
|
||||
__be32 mux_id;
|
||||
__be32 flow_id;
|
||||
__be32 bytes;
|
||||
__be32 pkts;
|
||||
} __aligned(1);
|
||||
|
||||
struct rmnet_map_flow_info_be {
|
||||
u32 mux_id;
|
||||
u32 flow_id;
|
||||
u32 bytes;
|
||||
u32 pkts;
|
||||
} __aligned(1);
|
||||
|
||||
struct rmnet_map_dl_ind_hdr {
|
||||
union {
|
||||
struct {
|
||||
u32 seq;
|
||||
u32 bytes;
|
||||
u32 pkts;
|
||||
u32 flows;
|
||||
struct rmnet_map_flow_info_le flow[0];
|
||||
} le __aligned(1);
|
||||
struct {
|
||||
__be32 seq;
|
||||
__be32 bytes;
|
||||
__be32 pkts;
|
||||
__be32 flows;
|
||||
struct rmnet_map_flow_info_be flow[0];
|
||||
} be __aligned(1);
|
||||
} __aligned(1);
|
||||
} __aligned(1);
|
||||
|
||||
struct rmnet_map_dl_ind_trl {
|
||||
union {
|
||||
__be32 seq_be;
|
||||
u32 seq_le;
|
||||
} __aligned(1);
|
||||
} __aligned(1);
|
||||
|
||||
struct rmnet_map_dl_ind {
|
||||
void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *dlhdr);
|
||||
void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *dltrl);
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
|
||||
(Y)->data)->mux_id)
|
||||
#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
|
||||
|
@ -75,6 +123,9 @@ struct rmnet_map_ul_csum_header {
|
|||
#define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \
|
||||
(Y)->data)->pkt_len))
|
||||
|
||||
#define RMNET_MAP_DEAGGR_SPACING 64
|
||||
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
|
||||
|
||||
#define RMNET_MAP_COMMAND_REQUEST 0
|
||||
#define RMNET_MAP_COMMAND_ACK 1
|
||||
#define RMNET_MAP_COMMAND_UNSUPPORTED 2
|
||||
|
@ -91,5 +142,17 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
|
|||
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
|
||||
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
|
||||
struct net_device *orig_dev);
|
||||
|
||||
int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
|
||||
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
|
||||
void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
|
||||
void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
|
||||
int rmnet_map_flow_command(struct sk_buff *skb,
|
||||
struct rmnet_port *port,
|
||||
bool rmnet_perf);
|
||||
void rmnet_map_cmd_init(struct rmnet_port *port);
|
||||
int rmnet_map_dl_ind_register(struct rmnet_port *port,
|
||||
struct rmnet_map_dl_ind *dl_ind);
|
||||
int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
|
||||
struct rmnet_map_dl_ind *dl_ind);
|
||||
void rmnet_map_cmd_exit(struct rmnet_port *port);
|
||||
#endif /* _RMNET_MAP_H_ */
|
||||
|
|
|
@ -1,14 +1,5 @@
|
|||
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. */
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include "rmnet_config.h"
|
||||
|
@ -16,6 +7,17 @@
|
|||
#include "rmnet_private.h"
|
||||
#include "rmnet_vnd.h"
|
||||
|
||||
#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
|
||||
sizeof(struct rmnet_map_header) + \
|
||||
sizeof(struct rmnet_map_control_command_header))
|
||||
|
||||
#define RMNET_MAP_CMD_SIZE (sizeof(struct rmnet_map_header) + \
|
||||
sizeof(struct rmnet_map_control_command_header))
|
||||
|
||||
#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
|
||||
sizeof(struct rmnet_map_header) + \
|
||||
sizeof(struct rmnet_map_control_command_header))
|
||||
|
||||
static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
|
||||
struct rmnet_port *port,
|
||||
int enable)
|
||||
|
@ -83,6 +85,102 @@ static void rmnet_map_send_ack(struct sk_buff *skb,
|
|||
netif_tx_unlock(dev);
|
||||
}
|
||||
|
||||
static void rmnet_map_dl_hdr_notify(struct rmnet_port *port,
|
||||
struct rmnet_map_dl_ind_hdr *dlhdr)
|
||||
{
|
||||
struct rmnet_map_dl_ind *tmp;
|
||||
|
||||
port->dl_marker_flush = 0;
|
||||
|
||||
list_for_each_entry(tmp, &port->dl_list, list)
|
||||
tmp->dl_hdr_handler(dlhdr);
|
||||
}
|
||||
|
||||
static void rmnet_map_dl_trl_notify(struct rmnet_port *port,
|
||||
struct rmnet_map_dl_ind_trl *dltrl)
|
||||
{
|
||||
struct rmnet_map_dl_ind *tmp;
|
||||
struct napi_struct *napi;
|
||||
|
||||
list_for_each_entry(tmp, &port->dl_list, list)
|
||||
tmp->dl_trl_handler(dltrl);
|
||||
|
||||
if (port->dl_marker_flush) {
|
||||
napi = get_current_napi_context();
|
||||
napi_gro_flush(napi, false);
|
||||
}
|
||||
|
||||
port->dl_marker_flush = -1;
|
||||
}
|
||||
|
||||
static void rmnet_map_process_flow_start(struct sk_buff *skb,
|
||||
struct rmnet_port *port,
|
||||
bool rmnet_perf)
|
||||
{
|
||||
struct rmnet_map_dl_ind_hdr *dlhdr;
|
||||
|
||||
if (skb->len < RMNET_DL_IND_HDR_SIZE)
|
||||
return;
|
||||
|
||||
skb_pull(skb, RMNET_MAP_CMD_SIZE);
|
||||
|
||||
dlhdr = (struct rmnet_map_dl_ind_hdr *)skb->data;
|
||||
|
||||
port->stats.dl_hdr_last_seq = dlhdr->le.seq;
|
||||
port->stats.dl_hdr_last_bytes = dlhdr->le.bytes;
|
||||
port->stats.dl_hdr_last_pkts = dlhdr->le.pkts;
|
||||
port->stats.dl_hdr_last_flows = dlhdr->le.flows;
|
||||
port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes;
|
||||
port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
|
||||
port->stats.dl_hdr_count++;
|
||||
|
||||
if (unlikely(!(port->stats.dl_hdr_count)))
|
||||
port->stats.dl_hdr_count = 1;
|
||||
|
||||
port->stats.dl_hdr_avg_bytes = port->stats.dl_hdr_total_bytes /
|
||||
port->stats.dl_hdr_count;
|
||||
|
||||
port->stats.dl_hdr_avg_pkts = port->stats.dl_hdr_total_pkts /
|
||||
port->stats.dl_hdr_count;
|
||||
|
||||
rmnet_map_dl_hdr_notify(port, dlhdr);
|
||||
if (rmnet_perf) {
|
||||
unsigned int pull_size;
|
||||
|
||||
pull_size = sizeof(struct rmnet_map_dl_ind_hdr);
|
||||
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
|
||||
pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
|
||||
skb_pull(skb, pull_size);
|
||||
}
|
||||
}
|
||||
|
||||
static void rmnet_map_process_flow_end(struct sk_buff *skb,
|
||||
struct rmnet_port *port,
|
||||
bool rmnet_perf)
|
||||
{
|
||||
struct rmnet_map_dl_ind_trl *dltrl;
|
||||
|
||||
if (skb->len < RMNET_DL_IND_TRL_SIZE)
|
||||
return;
|
||||
|
||||
skb_pull(skb, RMNET_MAP_CMD_SIZE);
|
||||
|
||||
dltrl = (struct rmnet_map_dl_ind_trl *)skb->data;
|
||||
|
||||
port->stats.dl_trl_last_seq = dltrl->seq_le;
|
||||
port->stats.dl_trl_count++;
|
||||
|
||||
rmnet_map_dl_trl_notify(port, dltrl);
|
||||
if (rmnet_perf) {
|
||||
unsigned int pull_size;
|
||||
|
||||
pull_size = sizeof(struct rmnet_map_dl_ind_trl);
|
||||
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
|
||||
pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
|
||||
skb_pull(skb, pull_size);
|
||||
}
|
||||
}
|
||||
|
||||
/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
|
||||
* name is decoded here and appropriate handler is called.
|
||||
*/
|
||||
|
@ -112,3 +210,81 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
|
|||
if (rc == RMNET_MAP_COMMAND_ACK)
|
||||
rmnet_map_send_ack(skb, rc, port);
|
||||
}
|
||||
|
||||
int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port,
|
||||
bool rmnet_perf)
|
||||
{
|
||||
struct rmnet_map_control_command *cmd;
|
||||
unsigned char command_name;
|
||||
|
||||
cmd = RMNET_MAP_GET_CMD_START(skb);
|
||||
command_name = cmd->command_name;
|
||||
|
||||
switch (command_name) {
|
||||
case RMNET_MAP_COMMAND_FLOW_START:
|
||||
rmnet_map_process_flow_start(skb, port, rmnet_perf);
|
||||
break;
|
||||
|
||||
case RMNET_MAP_COMMAND_FLOW_END:
|
||||
rmnet_map_process_flow_end(skb, port, rmnet_perf);
|
||||
break;
|
||||
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* rmnet_perf module will handle the consuming */
|
||||
if (!rmnet_perf)
|
||||
consume_skb(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_map_flow_command);
|
||||
|
||||
void rmnet_map_cmd_exit(struct rmnet_port *port)
|
||||
{
|
||||
struct rmnet_map_dl_ind *tmp, *idx;
|
||||
|
||||
list_for_each_entry_safe(tmp, idx, &port->dl_list, list)
|
||||
list_del_rcu(&tmp->list);
|
||||
}
|
||||
|
||||
void rmnet_map_cmd_init(struct rmnet_port *port)
|
||||
{
|
||||
INIT_LIST_HEAD(&port->dl_list);
|
||||
|
||||
port->dl_marker_flush = -1;
|
||||
}
|
||||
|
||||
int rmnet_map_dl_ind_register(struct rmnet_port *port,
|
||||
struct rmnet_map_dl_ind *dl_ind)
|
||||
{
|
||||
if (!port || !dl_ind || !dl_ind->dl_hdr_handler ||
|
||||
!dl_ind->dl_trl_handler)
|
||||
return -EINVAL;
|
||||
|
||||
list_add_rcu(&dl_ind->list, &port->dl_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_map_dl_ind_register);
|
||||
|
||||
int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
|
||||
struct rmnet_map_dl_ind *dl_ind)
|
||||
{
|
||||
struct rmnet_map_dl_ind *tmp;
|
||||
|
||||
if (!port || !dl_ind)
|
||||
return -EINVAL;
|
||||
|
||||
list_for_each_entry(tmp, &port->dl_list, list) {
|
||||
if (tmp == dl_ind) {
|
||||
list_del_rcu(&dl_ind->list);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_map_dl_ind_deregister);
|
||||
|
|
|
@ -1,13 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* RMNET Data MAP protocol
|
||||
*
|
||||
|
@ -20,9 +12,9 @@
|
|||
#include "rmnet_config.h"
|
||||
#include "rmnet_map.h"
|
||||
#include "rmnet_private.h"
|
||||
#include "rmnet_handlers.h"
|
||||
|
||||
#define RMNET_MAP_DEAGGR_SPACING 64
|
||||
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
|
||||
#define RMNET_MAP_PKT_COPY_THRESHOLD 64
|
||||
|
||||
static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
|
||||
const void *txporthdr)
|
||||
|
@ -307,11 +299,34 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
/* Deaggregates a single packet
|
||||
* A whole new buffer is allocated for each portion of an aggregated frame.
|
||||
* A whole new buffer is allocated for each portion of an aggregated frame
|
||||
* except when a UDP or command packet is received.
|
||||
* Caller should keep calling deaggregate() on the source skb until 0 is
|
||||
* returned, indicating that there are no more packets to deaggregate. Caller
|
||||
* is responsible for freeing the original skb.
|
||||
*/
|
||||
static int rmnet_validate_clone(struct sk_buff *skb)
|
||||
{
|
||||
if (RMNET_MAP_GET_CD_BIT(skb))
|
||||
return 0;
|
||||
|
||||
if (skb->len < RMNET_MAP_PKT_COPY_THRESHOLD)
|
||||
return 1;
|
||||
|
||||
switch (skb->data[4] & 0xF0) {
|
||||
case 0x40:
|
||||
if (((struct iphdr *)&skb->data[4])->protocol == IPPROTO_UDP)
|
||||
return 0;
|
||||
break;
|
||||
case 0x60:
|
||||
if (((struct ipv6hdr *)&skb->data[4])->nexthdr == IPPROTO_UDP)
|
||||
return 0;
|
||||
/* Fall through */
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
|
||||
struct rmnet_port *port)
|
||||
{
|
||||
|
@ -335,13 +350,27 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
|
|||
if (ntohs(maph->pkt_len) == 0)
|
||||
return NULL;
|
||||
|
||||
skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
|
||||
if (!skbn)
|
||||
return NULL;
|
||||
|
||||
skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
|
||||
skb_put(skbn, packet_len);
|
||||
memcpy(skbn->data, skb->data, packet_len);
|
||||
if (rmnet_validate_clone(skb)) {
|
||||
skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING,
|
||||
GFP_ATOMIC);
|
||||
if (!skbn)
|
||||
return NULL;
|
||||
|
||||
skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
|
||||
skb_put(skbn, packet_len);
|
||||
memcpy(skbn->data, skb->data, packet_len);
|
||||
|
||||
} else {
|
||||
skbn = skb_clone(skb, GFP_ATOMIC);
|
||||
if (!skbn)
|
||||
return NULL;
|
||||
|
||||
skb_trim(skbn, packet_len);
|
||||
skbn->truesize = SKB_TRUESIZE(packet_len);
|
||||
__skb_set_hash(skbn, 0, 0, 0);
|
||||
}
|
||||
|
||||
skb_pull(skb, packet_len);
|
||||
|
||||
return skbn;
|
||||
|
@ -386,6 +415,7 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_map_checksum_downlink_packet);
|
||||
|
||||
/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
|
||||
* packets that are supported for UL checksum offload.
|
||||
|
@ -432,3 +462,222 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
|
|||
|
||||
priv->stats.csum_sw++;
|
||||
}
|
||||
|
||||
struct rmnet_agg_work {
|
||||
struct work_struct work;
|
||||
struct rmnet_port *port;
|
||||
};
|
||||
|
||||
long rmnet_agg_time_limit __read_mostly = 1000000L;
|
||||
long rmnet_agg_bypass_time __read_mostly = 10000000L;
|
||||
|
||||
int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset)
|
||||
{
|
||||
u8 *packet_start = skb->data + offset;
|
||||
int is_icmp = 0;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
struct iphdr *ip4h = (struct iphdr *)(packet_start);
|
||||
|
||||
if (ip4h->protocol == IPPROTO_ICMP)
|
||||
is_icmp = 1;
|
||||
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
|
||||
|
||||
if (ip6h->nexthdr == IPPROTO_ICMPV6) {
|
||||
is_icmp = 1;
|
||||
} else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
|
||||
struct frag_hdr *frag;
|
||||
|
||||
frag = (struct frag_hdr *)(packet_start
|
||||
+ sizeof(struct ipv6hdr));
|
||||
if (frag->nexthdr == IPPROTO_ICMPV6)
|
||||
is_icmp = 1;
|
||||
}
|
||||
}
|
||||
|
||||
return is_icmp;
|
||||
}
|
||||
|
||||
static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
|
||||
{
|
||||
struct rmnet_agg_work *real_work;
|
||||
struct rmnet_port *port;
|
||||
unsigned long flags;
|
||||
struct sk_buff *skb;
|
||||
int agg_count = 0;
|
||||
|
||||
real_work = (struct rmnet_agg_work *)work;
|
||||
port = real_work->port;
|
||||
skb = NULL;
|
||||
|
||||
spin_lock_irqsave(&port->agg_lock, flags);
|
||||
if (likely(port->agg_state == -EINPROGRESS)) {
|
||||
/* Buffer may have already been shipped out */
|
||||
if (likely(port->agg_skb)) {
|
||||
skb = port->agg_skb;
|
||||
agg_count = port->agg_count;
|
||||
port->agg_skb = NULL;
|
||||
port->agg_count = 0;
|
||||
memset(&port->agg_time, 0, sizeof(struct timespec));
|
||||
}
|
||||
port->agg_state = 0;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&port->agg_lock, flags);
|
||||
if (skb)
|
||||
dev_queue_xmit(skb);
|
||||
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
|
||||
{
|
||||
struct rmnet_agg_work *work;
|
||||
struct rmnet_port *port;
|
||||
|
||||
port = container_of(t, struct rmnet_port, hrtimer);
|
||||
|
||||
work = kmalloc(sizeof(*work), GFP_ATOMIC);
|
||||
if (!work) {
|
||||
port->agg_state = 0;
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
INIT_WORK(&work->work, rmnet_map_flush_tx_packet_work);
|
||||
work->port = port;
|
||||
schedule_work((struct work_struct *)work);
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
|
||||
{
|
||||
struct timespec diff, last;
|
||||
int size, agg_count = 0;
|
||||
struct sk_buff *agg_skb;
|
||||
unsigned long flags;
|
||||
u8 *dest_buff;
|
||||
|
||||
new_packet:
|
||||
spin_lock_irqsave(&port->agg_lock, flags);
|
||||
memcpy(&last, &port->agg_last, sizeof(struct timespec));
|
||||
getnstimeofday(&port->agg_last);
|
||||
|
||||
if (!port->agg_skb) {
|
||||
/* Check to see if we should agg first. If the traffic is very
|
||||
* sparse, don't aggregate. We will need to tune this later
|
||||
*/
|
||||
diff = timespec_sub(port->agg_last, last);
|
||||
|
||||
if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time) {
|
||||
spin_unlock_irqrestore(&port->agg_lock, flags);
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
dev_queue_xmit(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
size = port->egress_agg_size - skb->len;
|
||||
port->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
|
||||
if (!port->agg_skb) {
|
||||
port->agg_skb = 0;
|
||||
port->agg_count = 0;
|
||||
memset(&port->agg_time, 0, sizeof(struct timespec));
|
||||
spin_unlock_irqrestore(&port->agg_lock, flags);
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
dev_queue_xmit(skb);
|
||||
return;
|
||||
}
|
||||
port->agg_skb->protocol = htons(ETH_P_MAP);
|
||||
port->agg_count = 1;
|
||||
getnstimeofday(&port->agg_time);
|
||||
dev_kfree_skb_any(skb);
|
||||
goto schedule;
|
||||
}
|
||||
diff = timespec_sub(port->agg_last, port->agg_time);
|
||||
|
||||
if (skb->len > (port->egress_agg_size - port->agg_skb->len) ||
|
||||
port->agg_count >= port->egress_agg_count ||
|
||||
diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_time_limit) {
|
||||
agg_skb = port->agg_skb;
|
||||
agg_count = port->agg_count;
|
||||
port->agg_skb = 0;
|
||||
port->agg_count = 0;
|
||||
memset(&port->agg_time, 0, sizeof(struct timespec));
|
||||
port->agg_state = 0;
|
||||
spin_unlock_irqrestore(&port->agg_lock, flags);
|
||||
hrtimer_cancel(&port->hrtimer);
|
||||
dev_queue_xmit(agg_skb);
|
||||
goto new_packet;
|
||||
}
|
||||
|
||||
dest_buff = skb_put(port->agg_skb, skb->len);
|
||||
memcpy(dest_buff, skb->data, skb->len);
|
||||
port->agg_count++;
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
schedule:
|
||||
if (port->agg_state != -EINPROGRESS) {
|
||||
port->agg_state = -EINPROGRESS;
|
||||
hrtimer_start(&port->hrtimer, ns_to_ktime(3000000),
|
||||
HRTIMER_MODE_REL);
|
||||
}
|
||||
spin_unlock_irqrestore(&port->agg_lock, flags);
|
||||
}
|
||||
|
||||
void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
|
||||
{
|
||||
hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
|
||||
port->egress_agg_size = 8192;
|
||||
port->egress_agg_count = 20;
|
||||
spin_lock_init(&port->agg_lock);
|
||||
}
|
||||
|
||||
void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
hrtimer_cancel(&port->hrtimer);
|
||||
spin_lock_irqsave(&port->agg_lock, flags);
|
||||
if (port->agg_state == -EINPROGRESS) {
|
||||
if (port->agg_skb) {
|
||||
kfree_skb(port->agg_skb);
|
||||
port->agg_skb = NULL;
|
||||
port->agg_count = 0;
|
||||
memset(&port->agg_time, 0, sizeof(struct timespec));
|
||||
}
|
||||
|
||||
port->agg_state = 0;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&port->agg_lock, flags);
|
||||
}
|
||||
|
||||
void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb)
|
||||
{
|
||||
struct rmnet_port *port;
|
||||
struct sk_buff *agg_skb;
|
||||
unsigned long flags;
|
||||
|
||||
port = rmnet_get_port(qmap_skb->dev);
|
||||
|
||||
if (port && (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION)) {
|
||||
spin_lock_irqsave(&port->agg_lock, flags);
|
||||
if (port->agg_skb) {
|
||||
agg_skb = port->agg_skb;
|
||||
port->agg_skb = 0;
|
||||
port->agg_count = 0;
|
||||
memset(&port->agg_time, 0, sizeof(struct timespec));
|
||||
port->agg_state = 0;
|
||||
spin_unlock_irqrestore(&port->agg_lock, flags);
|
||||
hrtimer_cancel(&port->hrtimer);
|
||||
dev_queue_xmit(agg_skb);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&port->agg_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
dev_queue_xmit(qmap_skb);
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_map_tx_qmap_cmd);
|
||||
|
|
|
@ -1,13 +1,5 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _RMNET_PRIVATE_H_
|
||||
|
@ -18,6 +10,15 @@
|
|||
#define RMNET_NEEDED_HEADROOM 16
|
||||
#define RMNET_TX_QUEUE_LEN 1000
|
||||
|
||||
/* Constants */
|
||||
#define RMNET_EGRESS_FORMAT_AGGREGATION BIT(31)
|
||||
#define RMNET_INGRESS_FORMAT_DL_MARKER BIT(30)
|
||||
#define RMNET_INGRESS_FORMAT_RPS_STAMP BIT(29)
|
||||
|
||||
/* Power save feature*/
|
||||
#define RMNET_INGRESS_FORMAT_PS BIT(27)
|
||||
#define RMNET_FORMAT_PS_NOTIF BIT(26)
|
||||
|
||||
/* Replace skb->dev to a virtual rmnet device and pass up the stack */
|
||||
#define RMNET_EPMODE_VND (1)
|
||||
/* Pass the frame directly to another device with dev_queue_xmit() */
|
||||
|
|
|
@ -1,14 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*
|
||||
* RMNET Data virtual network driver
|
||||
*
|
||||
|
@ -16,6 +7,7 @@
|
|||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/ip.h>
|
||||
#include <net/pkt_sched.h>
|
||||
#include "rmnet_config.h"
|
||||
#include "rmnet_handlers.h"
|
||||
|
@ -23,9 +15,12 @@
|
|||
#include "rmnet_map.h"
|
||||
#include "rmnet_vnd.h"
|
||||
|
||||
#include <soc/qcom/qmi_rmnet.h>
|
||||
#include <trace/events/rmnet.h>
|
||||
|
||||
/* RX/TX Fixup */
|
||||
|
||||
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
|
||||
void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
struct rmnet_pcpu_stats *pcpu_ptr;
|
||||
|
@ -34,11 +29,11 @@ void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
u64_stats_update_begin(&pcpu_ptr->syncp);
|
||||
pcpu_ptr->stats.rx_pkts++;
|
||||
pcpu_ptr->stats.rx_bytes += skb->len;
|
||||
pcpu_ptr->stats.rx_bytes += skb_len;
|
||||
u64_stats_update_end(&pcpu_ptr->syncp);
|
||||
}
|
||||
|
||||
void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
|
||||
void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
struct rmnet_pcpu_stats *pcpu_ptr;
|
||||
|
@ -47,7 +42,7 @@ void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
u64_stats_update_begin(&pcpu_ptr->syncp);
|
||||
pcpu_ptr->stats.tx_pkts++;
|
||||
pcpu_ptr->stats.tx_bytes += skb->len;
|
||||
pcpu_ptr->stats.tx_bytes += skb_len;
|
||||
u64_stats_update_end(&pcpu_ptr->syncp);
|
||||
}
|
||||
|
||||
|
@ -57,10 +52,19 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
|
|||
struct net_device *dev)
|
||||
{
|
||||
struct rmnet_priv *priv;
|
||||
int ip_type;
|
||||
u32 mark;
|
||||
unsigned int len;
|
||||
|
||||
priv = netdev_priv(dev);
|
||||
if (priv->real_dev) {
|
||||
ip_type = (ip_hdr(skb)->version == 4) ?
|
||||
AF_INET : AF_INET6;
|
||||
mark = skb->mark;
|
||||
len = skb->len;
|
||||
trace_rmnet_xmit_skb(skb);
|
||||
rmnet_egress_handler(skb);
|
||||
qmi_rmnet_burst_fc_check(dev, ip_type, mark, len);
|
||||
} else {
|
||||
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
|
||||
kfree_skb(skb);
|
||||
|
@ -105,9 +109,15 @@ static int rmnet_vnd_init(struct net_device *dev)
|
|||
static void rmnet_vnd_uninit(struct net_device *dev)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
void *qos;
|
||||
|
||||
gro_cells_destroy(&priv->gro_cells);
|
||||
free_percpu(priv->pcpu_stats);
|
||||
|
||||
qos = priv->qos_info;
|
||||
RCU_INIT_POINTER(priv->qos_info, NULL);
|
||||
synchronize_rcu();
|
||||
qmi_rmnet_qos_exit(dev, qos);
|
||||
}
|
||||
|
||||
static void rmnet_get_stats64(struct net_device *dev,
|
||||
|
@ -141,6 +151,20 @@ static void rmnet_get_stats64(struct net_device *dev,
|
|||
s->tx_dropped = total_stats.tx_drops;
|
||||
}
|
||||
|
||||
static u16 rmnet_vnd_select_queue(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
int txq = 0;
|
||||
|
||||
if (priv->real_dev)
|
||||
txq = qmi_rmnet_get_queue(dev, skb);
|
||||
|
||||
return (txq < dev->real_num_tx_queues) ? txq : 0;
|
||||
}
|
||||
|
||||
static const struct net_device_ops rmnet_vnd_ops = {
|
||||
.ndo_start_xmit = rmnet_vnd_start_xmit,
|
||||
.ndo_change_mtu = rmnet_vnd_change_mtu,
|
||||
|
@ -150,6 +174,7 @@ static const struct net_device_ops rmnet_vnd_ops = {
|
|||
.ndo_init = rmnet_vnd_init,
|
||||
.ndo_uninit = rmnet_vnd_uninit,
|
||||
.ndo_get_stats64 = rmnet_get_stats64,
|
||||
.ndo_select_queue = rmnet_vnd_select_queue,
|
||||
};
|
||||
|
||||
static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
|
||||
|
@ -164,12 +189,29 @@ static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
|
|||
"Checksum computed in software",
|
||||
};
|
||||
|
||||
static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = {
|
||||
"DL header last seen sequence",
|
||||
"DL header last seen bytes",
|
||||
"DL header last seen packets",
|
||||
"DL header last seen flows",
|
||||
"DL header pkts received",
|
||||
"DL header total bytes received",
|
||||
"DL header total pkts received",
|
||||
"DL header average bytes",
|
||||
"DL header average packets",
|
||||
"DL trailer last seen sequence",
|
||||
"DL trailer pkts received",
|
||||
};
|
||||
|
||||
static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
|
||||
{
|
||||
switch (stringset) {
|
||||
case ETH_SS_STATS:
|
||||
memcpy(buf, &rmnet_gstrings_stats,
|
||||
sizeof(rmnet_gstrings_stats));
|
||||
memcpy(buf + sizeof(rmnet_gstrings_stats),
|
||||
&rmnet_port_gstrings_stats,
|
||||
sizeof(rmnet_port_gstrings_stats));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -178,7 +220,8 @@ static int rmnet_get_sset_count(struct net_device *dev, int sset)
|
|||
{
|
||||
switch (sset) {
|
||||
case ETH_SS_STATS:
|
||||
return ARRAY_SIZE(rmnet_gstrings_stats);
|
||||
return ARRAY_SIZE(rmnet_gstrings_stats) +
|
||||
ARRAY_SIZE(rmnet_port_gstrings_stats);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -189,17 +232,42 @@ static void rmnet_get_ethtool_stats(struct net_device *dev,
|
|||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
struct rmnet_priv_stats *st = &priv->stats;
|
||||
struct rmnet_port_priv_stats *stp;
|
||||
struct rmnet_port *port;
|
||||
|
||||
if (!data)
|
||||
port = rmnet_get_port(priv->real_dev);
|
||||
|
||||
if (!data || !port)
|
||||
return;
|
||||
|
||||
stp = &port->stats;
|
||||
|
||||
memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
|
||||
memcpy(data + ARRAY_SIZE(rmnet_gstrings_stats), stp,
|
||||
ARRAY_SIZE(rmnet_port_gstrings_stats) * sizeof(u64));
|
||||
}
|
||||
|
||||
static int rmnet_stats_reset(struct net_device *dev)
|
||||
{
|
||||
struct rmnet_priv *priv = netdev_priv(dev);
|
||||
struct rmnet_port_priv_stats *stp;
|
||||
struct rmnet_port *port;
|
||||
|
||||
port = rmnet_get_port(priv->real_dev);
|
||||
if (!port)
|
||||
return -EINVAL;
|
||||
|
||||
stp = &port->stats;
|
||||
|
||||
memset(stp, 0, sizeof(*stp));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ethtool_ops rmnet_ethtool_ops = {
|
||||
.get_ethtool_stats = rmnet_get_ethtool_stats,
|
||||
.get_strings = rmnet_get_strings,
|
||||
.get_sset_count = rmnet_get_sset_count,
|
||||
.nway_reset = rmnet_stats_reset,
|
||||
};
|
||||
|
||||
/* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
|
||||
|
@ -210,7 +278,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
|
|||
rmnet_dev->netdev_ops = &rmnet_vnd_ops;
|
||||
rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
|
||||
rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
|
||||
eth_random_addr(rmnet_dev->dev_addr);
|
||||
random_ether_addr(rmnet_dev->dev_addr);
|
||||
rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
|
||||
|
||||
/* Raw IP mode */
|
||||
|
@ -258,6 +326,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
|
|||
priv = netdev_priv(rmnet_dev);
|
||||
priv->mux_id = id;
|
||||
priv->real_dev = real_dev;
|
||||
priv->qos_info = qmi_rmnet_qos_init(real_dev, id);
|
||||
|
||||
netdev_dbg(rmnet_dev, "rmnet dev created\n");
|
||||
}
|
||||
|
|
|
@ -1,13 +1,5 @@
|
|||
/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* RMNET Data Virtual Network Device APIs
|
||||
*
|
||||
|
@ -23,8 +15,8 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
|
|||
struct rmnet_endpoint *ep);
|
||||
int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
|
||||
struct rmnet_endpoint *ep);
|
||||
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
|
||||
void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
|
||||
void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len);
|
||||
void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len);
|
||||
u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev);
|
||||
void rmnet_vnd_setup(struct net_device *dev);
|
||||
#endif /* _RMNET_VND_H_ */
|
||||
|
|
|
@ -108,6 +108,36 @@ config QCOM_QMI_HELPERS
|
|||
clients and this helpers provide the common functionality needed for
|
||||
doing this from a kernel driver.
|
||||
|
||||
config QCOM_QMI_RMNET
|
||||
bool "QTI QMI Rmnet Helpers"
|
||||
depends on QCOM_QMI_HELPERS
|
||||
depends on RMNET
|
||||
help
|
||||
Helper for handling interfaces between kernel clients and rmnet
|
||||
driver. It enables the rmnet driver to create/delete DFC/WDA
|
||||
clients and provides the common functionality for data flow control
|
||||
and power save features.
|
||||
|
||||
config QCOM_QMI_DFC
|
||||
bool "Enable burst mode flow control"
|
||||
depends on QCOM_QMI_RMNET
|
||||
help
|
||||
Say y here to enable support for burst mode data flow control.
|
||||
DFC client provides an interface to the modem dfc service and
|
||||
does burst mode flow control. It enables the flow on receiving flow
|
||||
status indication and disables flows while grant size is reached.
|
||||
If unsure or not use burst mode flow control, say 'N'.
|
||||
|
||||
config QCOM_QMI_POWER_COLLAPSE
|
||||
bool "Enable power save features"
|
||||
depends on QCOM_QMI_RMNET
|
||||
help
|
||||
Say y here to enable support for power save features.
|
||||
It provides an interface to offload uplink flow control based on
|
||||
detected flow status.
|
||||
If unsure or not use power save feature, say 'N'.
|
||||
|
||||
|
||||
config QCOM_RMTFS_MEM
|
||||
tristate "Qualcomm Remote Filesystem memory driver"
|
||||
depends on ARCH_QCOM
|
||||
|
|
|
@ -9,6 +9,9 @@ obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
|
|||
obj-$(CONFIG_QCOM_PM) += spm.o
|
||||
obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
|
||||
qmi_helpers-y += qmi_encdec.o qmi_interface.o
|
||||
obj-$(CONFIG_QCOM_QMI_RMNET) += qmi_rmnet.o
|
||||
obj-$(CONFIG_QCOM_QMI_DFC) += dfc_qmi.o
|
||||
obj-$(CONFIG_QCOM_QMI_POWER_COLLAPSE) += wda_qmi.o
|
||||
obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
|
||||
obj-$(CONFIG_QCOM_RPMH) += qcom_rpmh.o
|
||||
qcom_rpmh-y += rpmh-rsc.o
|
||||
|
|
1056
drivers/soc/qcom/dfc_qmi.c
Normal file
1056
drivers/soc/qcom/dfc_qmi.c
Normal file
File diff suppressed because it is too large
Load diff
857
drivers/soc/qcom/qmi_rmnet.c
Normal file
857
drivers/soc/qcom/qmi_rmnet.c
Normal file
|
@ -0,0 +1,857 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <soc/qcom/qmi_rmnet.h>
|
||||
#include <soc/qcom/rmnet_qmi.h>
|
||||
#include <linux/soc/qcom/qmi.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <uapi/linux/rtnetlink.h>
|
||||
#include <net/pkt_sched.h>
|
||||
#include "qmi_rmnet_i.h"
|
||||
#include <trace/events/dfc.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
|
||||
#define NLMSG_FLOW_ACTIVATE 1
|
||||
#define NLMSG_FLOW_DEACTIVATE 2
|
||||
#define NLMSG_CLIENT_SETUP 4
|
||||
#define NLMSG_CLIENT_DELETE 5
|
||||
#define NLMSG_SCALE_FACTOR 6
|
||||
#define NLMSG_WQ_FREQUENCY 7
|
||||
|
||||
#define FLAG_DFC_MASK 0x000F
|
||||
#define FLAG_POWERSAVE_MASK 0x0010
|
||||
#define DFC_MODE_MULTIQ 2
|
||||
|
||||
unsigned int rmnet_wq_frequency __read_mostly = 4;
|
||||
|
||||
#define PS_WORK_ACTIVE_BIT 0
|
||||
#define PS_INTERVAL (((!rmnet_wq_frequency) ? 1 : rmnet_wq_frequency) * HZ)
|
||||
#define NO_DELAY (0x0000 * HZ)
|
||||
|
||||
static unsigned int qmi_rmnet_scale_factor = 5;
|
||||
|
||||
struct qmi_elem_info data_ep_id_type_v01_ei[] = {
|
||||
{
|
||||
.data_type = QMI_SIGNED_4_BYTE_ENUM,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(enum data_ep_type_enum_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = QMI_COMMON_TLV_TYPE,
|
||||
.offset = offsetof(struct data_ep_id_type_v01,
|
||||
ep_type),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_4_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u32),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = QMI_COMMON_TLV_TYPE,
|
||||
.offset = offsetof(struct data_ep_id_type_v01,
|
||||
iface_id),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_EOTI,
|
||||
.elem_len = 0,
|
||||
.elem_size = 0,
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = QMI_COMMON_TLV_TYPE,
|
||||
.offset = 0,
|
||||
.ei_array = NULL,
|
||||
},
|
||||
};
|
||||
EXPORT_SYMBOL(data_ep_id_type_v01_ei);
|
||||
|
||||
void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ))
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < MAX_CLIENT_NUM; i++) {
|
||||
if (qmi->fc_info[i].dfc_client)
|
||||
return qmi->fc_info[i].dfc_client;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int
|
||||
qmi_rmnet_has_client(struct qmi_info *qmi)
|
||||
{
|
||||
if (qmi->wda_client)
|
||||
return 1;
|
||||
|
||||
return qmi_rmnet_has_dfc_client(qmi) ? 1 : 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_QCOM_QMI_DFC
|
||||
static void
|
||||
qmi_rmnet_clean_flow_list(struct qmi_info *qmi, struct net_device *dev,
|
||||
struct qos_info *qos)
|
||||
{
|
||||
struct rmnet_bearer_map *bearer, *br_tmp;
|
||||
struct rmnet_flow_map *itm, *fl_tmp;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
list_for_each_entry_safe(itm, fl_tmp, &qos->flow_head, list) {
|
||||
list_del(&itm->list);
|
||||
kfree(itm);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(bearer, br_tmp, &qos->bearer_head, list) {
|
||||
list_del(&bearer->list);
|
||||
kfree(bearer);
|
||||
}
|
||||
}
|
||||
|
||||
struct rmnet_flow_map *
|
||||
qmi_rmnet_get_flow_map(struct qos_info *qos, u32 flow_id, int ip_type)
|
||||
{
|
||||
struct rmnet_flow_map *itm;
|
||||
|
||||
if (!qos)
|
||||
return NULL;
|
||||
|
||||
list_for_each_entry(itm, &qos->flow_head, list) {
|
||||
if ((itm->flow_id == flow_id) && (itm->ip_type == ip_type))
|
||||
return itm;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct rmnet_bearer_map *
|
||||
qmi_rmnet_get_bearer_map(struct qos_info *qos, uint8_t bearer_id)
|
||||
{
|
||||
struct rmnet_bearer_map *itm;
|
||||
|
||||
if (!qos)
|
||||
return NULL;
|
||||
|
||||
list_for_each_entry(itm, &qos->bearer_head, list) {
|
||||
if (itm->bearer_id == bearer_id)
|
||||
return itm;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
|
||||
struct rmnet_flow_map *new_map)
|
||||
{
|
||||
itm->bearer_id = new_map->bearer_id;
|
||||
itm->flow_id = new_map->flow_id;
|
||||
itm->ip_type = new_map->ip_type;
|
||||
itm->tcm_handle = new_map->tcm_handle;
|
||||
}
|
||||
|
||||
int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable)
|
||||
{
|
||||
struct netdev_queue *q;
|
||||
|
||||
if (unlikely(tcm_handle >= dev->num_tx_queues))
|
||||
return 0;
|
||||
|
||||
q = netdev_get_tx_queue(dev, tcm_handle);
|
||||
if (unlikely(!q))
|
||||
return 0;
|
||||
|
||||
if (enable)
|
||||
netif_tx_wake_queue(q);
|
||||
else
|
||||
netif_tx_stop_queue(q);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
|
||||
struct qmi_info *qmi)
|
||||
{
|
||||
struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
|
||||
struct rmnet_flow_map new_map, *itm;
|
||||
struct rmnet_bearer_map *bearer;
|
||||
|
||||
if (!qos_info)
|
||||
return -EINVAL;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
/* flow activate
|
||||
* tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id,
|
||||
* tcm->tcm_ifindex - ip_type, tcm->tcm_handle - tcm_handle
|
||||
*/
|
||||
|
||||
new_map.bearer_id = tcm->tcm__pad1;
|
||||
new_map.flow_id = tcm->tcm_parent;
|
||||
new_map.ip_type = tcm->tcm_ifindex;
|
||||
new_map.tcm_handle = tcm->tcm_handle;
|
||||
trace_dfc_flow_info(dev->name, new_map.bearer_id, new_map.flow_id,
|
||||
new_map.ip_type, new_map.tcm_handle, 1);
|
||||
|
||||
spin_lock_bh(&qos_info->qos_lock);
|
||||
|
||||
itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
|
||||
new_map.ip_type);
|
||||
if (itm) {
|
||||
qmi_rmnet_update_flow_map(itm, &new_map);
|
||||
} else {
|
||||
itm = kzalloc(sizeof(*itm), GFP_ATOMIC);
|
||||
if (!itm) {
|
||||
spin_unlock_bh(&qos_info->qos_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
qmi_rmnet_update_flow_map(itm, &new_map);
|
||||
list_add(&itm->list, &qos_info->flow_head);
|
||||
|
||||
bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
|
||||
if (bearer) {
|
||||
bearer->flow_ref++;
|
||||
} else {
|
||||
bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC);
|
||||
if (!bearer) {
|
||||
spin_unlock_bh(&qos_info->qos_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bearer->bearer_id = new_map.bearer_id;
|
||||
bearer->flow_ref = 1;
|
||||
bearer->grant_size = qos_info->default_grant;
|
||||
bearer->grant_thresh =
|
||||
qmi_rmnet_grant_per(bearer->grant_size);
|
||||
qos_info->default_grant = DEFAULT_GRANT;
|
||||
list_add(&bearer->list, &qos_info->bearer_head);
|
||||
}
|
||||
|
||||
qmi_rmnet_flow_control(dev, itm->tcm_handle,
|
||||
bearer->grant_size > 0 ? 1 : 0);
|
||||
|
||||
trace_dfc_qmi_tc(dev->name, itm->bearer_id, itm->flow_id,
|
||||
bearer->grant_size, 0, itm->tcm_handle, 1);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&qos_info->qos_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
|
||||
struct qmi_info *qmi)
|
||||
{
|
||||
struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
|
||||
struct rmnet_flow_map new_map, *itm;
|
||||
struct rmnet_bearer_map *bearer;
|
||||
|
||||
if (!qos_info)
|
||||
return -EINVAL;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
/* flow deactivate
|
||||
* tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id,
|
||||
* tcm->tcm_ifindex - ip_type
|
||||
*/
|
||||
|
||||
spin_lock_bh(&qos_info->qos_lock);
|
||||
|
||||
new_map.bearer_id = tcm->tcm__pad1;
|
||||
new_map.flow_id = tcm->tcm_parent;
|
||||
new_map.ip_type = tcm->tcm_ifindex;
|
||||
itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
|
||||
new_map.ip_type);
|
||||
if (itm) {
|
||||
trace_dfc_flow_info(dev->name, new_map.bearer_id,
|
||||
new_map.flow_id, new_map.ip_type,
|
||||
itm->tcm_handle, 0);
|
||||
list_del(&itm->list);
|
||||
|
||||
/* Enable flow to allow new call setup */
|
||||
qmi_rmnet_flow_control(dev, itm->tcm_handle, 1);
|
||||
trace_dfc_qmi_tc(dev->name, itm->bearer_id, itm->flow_id,
|
||||
0, 0, itm->tcm_handle, 1);
|
||||
|
||||
/*clear bearer map*/
|
||||
bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
|
||||
if (bearer && --bearer->flow_ref == 0) {
|
||||
list_del(&bearer->list);
|
||||
kfree(bearer);
|
||||
}
|
||||
|
||||
kfree(itm);
|
||||
}
|
||||
|
||||
if (list_empty(&qos_info->flow_head)) {
|
||||
netif_tx_wake_all_queues(dev);
|
||||
trace_dfc_qmi_tc(dev->name, 0xFF, 0, DEFAULT_GRANT, 0, 0, 1);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&qos_info->qos_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline void
|
||||
qmi_rmnet_update_flow_link(struct qmi_info *qmi, struct net_device *dev,
|
||||
struct rmnet_flow_map *itm, int add_flow)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void qmi_rmnet_clean_flow_list(struct qos_info *qos)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
|
||||
struct rmnet_flow_map *new_map)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int
|
||||
qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
|
||||
struct qmi_info *qmi)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int
|
||||
qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
|
||||
struct qmi_info *qmi)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int
|
||||
qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
|
||||
{
|
||||
int idx, rc, err = 0;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
/* client setup
|
||||
* tcm->tcm_handle - instance, tcm->tcm_info - ep_type,
|
||||
* tcm->tcm_parent - iface_id, tcm->tcm_ifindex - flags
|
||||
*/
|
||||
idx = (tcm->tcm_handle == 0) ? 0 : 1;
|
||||
|
||||
if (!qmi) {
|
||||
qmi = kzalloc(sizeof(struct qmi_info), GFP_KERNEL);
|
||||
if (!qmi)
|
||||
return -ENOMEM;
|
||||
|
||||
rmnet_init_qmi_pt(port, qmi);
|
||||
}
|
||||
|
||||
qmi->flag = tcm->tcm_ifindex;
|
||||
qmi->fc_info[idx].svc.instance = tcm->tcm_handle;
|
||||
qmi->fc_info[idx].svc.ep_type = tcm->tcm_info;
|
||||
qmi->fc_info[idx].svc.iface_id = tcm->tcm_parent;
|
||||
|
||||
if (((tcm->tcm_ifindex & FLAG_DFC_MASK) == DFC_MODE_MULTIQ) &&
|
||||
(qmi->fc_info[idx].dfc_client == NULL)) {
|
||||
rc = dfc_qmi_client_init(port, idx, qmi);
|
||||
if (rc < 0)
|
||||
err = rc;
|
||||
}
|
||||
|
||||
if ((tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) &&
|
||||
(idx == 0) && (qmi->wda_client == NULL)) {
|
||||
rc = wda_qmi_client_init(port, tcm->tcm_handle);
|
||||
if (rc < 0)
|
||||
err = rc;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
__qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, int idx)
|
||||
{
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (qmi->fc_info[idx].dfc_client) {
|
||||
dfc_qmi_client_exit(qmi->fc_info[idx].dfc_client);
|
||||
qmi->fc_info[idx].dfc_client = NULL;
|
||||
}
|
||||
|
||||
if (!qmi_rmnet_has_client(qmi)) {
|
||||
rmnet_reset_qmi_pt(port);
|
||||
kfree(qmi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void
|
||||
qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
|
||||
{
|
||||
int idx;
|
||||
|
||||
/* client delete: tcm->tcm_handle - instance*/
|
||||
idx = (tcm->tcm_handle == 0) ? 0 : 1;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if ((idx == 0) && qmi->wda_client) {
|
||||
wda_qmi_client_exit(qmi->wda_client);
|
||||
qmi->wda_client = NULL;
|
||||
}
|
||||
|
||||
__qmi_rmnet_delete_client(port, qmi, idx);
|
||||
}
|
||||
|
||||
void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt)
|
||||
{
|
||||
struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
|
||||
struct tcmsg *tcm = (struct tcmsg *)tcm_pt;
|
||||
|
||||
switch (tcm->tcm_family) {
|
||||
case NLMSG_FLOW_ACTIVATE:
|
||||
if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ) ||
|
||||
!qmi_rmnet_has_dfc_client(qmi))
|
||||
return;
|
||||
|
||||
qmi_rmnet_add_flow(dev, tcm, qmi);
|
||||
break;
|
||||
case NLMSG_FLOW_DEACTIVATE:
|
||||
if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ))
|
||||
return;
|
||||
|
||||
qmi_rmnet_del_flow(dev, tcm, qmi);
|
||||
break;
|
||||
case NLMSG_CLIENT_SETUP:
|
||||
if (((tcm->tcm_ifindex & FLAG_DFC_MASK) != DFC_MODE_MULTIQ) &&
|
||||
!(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK))
|
||||
return;
|
||||
|
||||
if (qmi_rmnet_setup_client(port, qmi, tcm) < 0) {
|
||||
if (!qmi_rmnet_has_client(qmi)) {
|
||||
kfree(qmi);
|
||||
rmnet_reset_qmi_pt(port);
|
||||
}
|
||||
}
|
||||
if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
|
||||
qmi_rmnet_work_init(port);
|
||||
rmnet_set_powersave_format(port);
|
||||
}
|
||||
break;
|
||||
case NLMSG_CLIENT_DELETE:
|
||||
if (!qmi)
|
||||
return;
|
||||
if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
|
||||
rmnet_clear_powersave_format(port);
|
||||
qmi_rmnet_work_exit(port);
|
||||
}
|
||||
qmi_rmnet_delete_client(port, qmi, tcm);
|
||||
break;
|
||||
case NLMSG_SCALE_FACTOR:
|
||||
if (!tcm->tcm_index)
|
||||
return;
|
||||
qmi_rmnet_scale_factor = tcm->tcm_index;
|
||||
break
|
||||
case NLMSG_WQ_FREQUENCY:
|
||||
rmnet_wq_frequency = tcm->tcm_index;
|
||||
break;
|
||||
default:
|
||||
pr_debug("%s(): No handler\n", __func__);
|
||||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_change_link);
|
||||
|
||||
void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
|
||||
{
|
||||
struct qmi_info *qmi = (struct qmi_info *)qmi_pt;
|
||||
int i;
|
||||
|
||||
if (!qmi)
|
||||
return;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
qmi_rmnet_work_exit(port);
|
||||
|
||||
if (qmi->wda_client) {
|
||||
wda_qmi_client_exit(qmi->wda_client);
|
||||
qmi->wda_client = NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_CLIENT_NUM; i++) {
|
||||
if (!__qmi_rmnet_delete_client(port, qmi, i))
|
||||
return;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_qmi_exit);
|
||||
|
||||
void qmi_rmnet_enable_all_flows(struct net_device *dev)
|
||||
{
|
||||
struct qos_info *qos;
|
||||
struct rmnet_bearer_map *bearer;
|
||||
int do_wake = 0;
|
||||
|
||||
qos = (struct qos_info *)rmnet_get_qos_pt(dev);
|
||||
if (!qos)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&qos->qos_lock);
|
||||
|
||||
list_for_each_entry(bearer, &qos->bearer_head, list) {
|
||||
bearer->grant_before_ps = bearer->grant_size;
|
||||
bearer->seq_before_ps = bearer->seq;
|
||||
bearer->grant_size = DEFAULT_GRANT;
|
||||
bearer->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
|
||||
bearer->seq = 0;
|
||||
bearer->ack_req = 0;
|
||||
bearer->ancillary = 0;
|
||||
do_wake = 1;
|
||||
}
|
||||
|
||||
if (do_wake) {
|
||||
netif_tx_wake_all_queues(dev);
|
||||
trace_dfc_qmi_tc(dev->name, 0xFF, 0, DEFAULT_GRANT, 0, 0, 1);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&qos->qos_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_enable_all_flows);
|
||||
|
||||
#ifdef CONFIG_QCOM_QMI_DFC
|
||||
void qmi_rmnet_burst_fc_check(struct net_device *dev,
|
||||
int ip_type, u32 mark, unsigned int len)
|
||||
{
|
||||
struct qos_info *qos = rmnet_get_qos_pt(dev);
|
||||
|
||||
if (!qos)
|
||||
return;
|
||||
|
||||
dfc_qmi_burst_check(dev, qos, ip_type, mark, len);
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_burst_fc_check);
|
||||
|
||||
int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct qos_info *qos = rmnet_get_qos_pt(dev);
|
||||
int txq = 0, ip_type = AF_INET;
|
||||
unsigned int len = skb->len;
|
||||
struct rmnet_flow_map *itm;
|
||||
u32 mark = skb->mark;
|
||||
|
||||
if (!qos)
|
||||
return 0;
|
||||
|
||||
switch (skb->protocol) {
|
||||
/* TCPv4 ACKs */
|
||||
case htons(ETH_P_IP):
|
||||
ip_type = AF_INET;
|
||||
if ((!mark) &&
|
||||
(ip_hdr(skb)->protocol == IPPROTO_TCP) &&
|
||||
(len == 40 || len == 52) &&
|
||||
(ip_hdr(skb)->ihl == 5) &&
|
||||
((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
|
||||
return 1;
|
||||
break;
|
||||
|
||||
/* TCPv6 ACKs */
|
||||
case htons(ETH_P_IPV6):
|
||||
ip_type = AF_INET6;
|
||||
if ((!mark) &&
|
||||
(ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) &&
|
||||
(len == 60 || len == 72) &&
|
||||
((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
|
||||
return 1;
|
||||
/* Fall through */
|
||||
}
|
||||
|
||||
/* Default flows */
|
||||
if (!mark)
|
||||
return 0;
|
||||
|
||||
/* Dedicated flows */
|
||||
spin_lock_bh(&qos->qos_lock);
|
||||
|
||||
itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
|
||||
if (unlikely(!itm))
|
||||
goto done;
|
||||
|
||||
txq = itm->tcm_handle;
|
||||
|
||||
done:
|
||||
spin_unlock_bh(&qos->qos_lock);
|
||||
return txq;
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_get_queue);
|
||||
|
||||
inline unsigned int qmi_rmnet_grant_per(unsigned int grant)
|
||||
{
|
||||
return grant / qmi_rmnet_scale_factor;
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_grant_per);
|
||||
|
||||
void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
|
||||
{
|
||||
struct qos_info *qos;
|
||||
|
||||
qos = kmalloc(sizeof(*qos), GFP_KERNEL);
|
||||
if (!qos)
|
||||
return NULL;
|
||||
|
||||
qos->mux_id = mux_id;
|
||||
qos->real_dev = real_dev;
|
||||
qos->default_grant = DEFAULT_GRANT;
|
||||
qos->tran_num = 0;
|
||||
INIT_LIST_HEAD(&qos->flow_head);
|
||||
INIT_LIST_HEAD(&qos->bearer_head);
|
||||
spin_lock_init(&qos->qos_lock);
|
||||
|
||||
return qos;
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_qos_init);
|
||||
|
||||
void qmi_rmnet_qos_exit(struct net_device *dev, void *qos)
|
||||
{
|
||||
void *port = rmnet_get_rmnet_port(dev);
|
||||
struct qmi_info *qmi = rmnet_get_qmi_pt(port);
|
||||
struct qos_info *qos_info = (struct qos_info *)qos;
|
||||
|
||||
if (!qmi || !qos)
|
||||
return;
|
||||
|
||||
qmi_rmnet_clean_flow_list(qmi, dev, qos_info);
|
||||
kfree(qos);
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_qos_exit);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
|
||||
static struct workqueue_struct *rmnet_ps_wq;
|
||||
static struct rmnet_powersave_work *rmnet_work;
|
||||
static struct list_head ps_list;
|
||||
|
||||
struct rmnet_powersave_work {
|
||||
struct delayed_work work;
|
||||
void *port;
|
||||
u64 old_rx_pkts;
|
||||
u64 old_tx_pkts;
|
||||
};
|
||||
|
||||
void qmi_rmnet_ps_on_notify(void *port)
|
||||
{
|
||||
struct qmi_rmnet_ps_ind *tmp;
|
||||
|
||||
list_for_each_entry(tmp, &ps_list, list)
|
||||
tmp->ps_on_handler(port);
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_ps_on_notify);
|
||||
|
||||
void qmi_rmnet_ps_off_notify(void *port)
|
||||
{
|
||||
struct qmi_rmnet_ps_ind *tmp;
|
||||
|
||||
list_for_each_entry(tmp, &ps_list, list)
|
||||
tmp->ps_off_handler(port);
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_ps_off_notify);
|
||||
|
||||
int qmi_rmnet_ps_ind_register(void *port,
|
||||
struct qmi_rmnet_ps_ind *ps_ind)
|
||||
{
|
||||
|
||||
if (!port || !ps_ind || !ps_ind->ps_on_handler ||
|
||||
!ps_ind->ps_off_handler)
|
||||
return -EINVAL;
|
||||
|
||||
list_add_rcu(&ps_ind->list, &ps_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_ps_ind_register);
|
||||
|
||||
int qmi_rmnet_ps_ind_deregister(void *port,
|
||||
struct qmi_rmnet_ps_ind *ps_ind)
|
||||
{
|
||||
struct qmi_rmnet_ps_ind *tmp;
|
||||
|
||||
if (!port || !ps_ind)
|
||||
return -EINVAL;
|
||||
|
||||
list_for_each_entry(tmp, &ps_list, list) {
|
||||
if (tmp == ps_ind) {
|
||||
list_del_rcu(&ps_ind->list);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_ps_ind_deregister);
|
||||
|
||||
int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable)
|
||||
{
|
||||
int rc = -EINVAL;
|
||||
struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
|
||||
|
||||
if (!qmi || !qmi->wda_client)
|
||||
return rc;
|
||||
|
||||
rc = wda_set_powersave_mode(qmi->wda_client, enable);
|
||||
if (rc < 0) {
|
||||
pr_err("%s() failed set powersave mode[%u], err=%d\n",
|
||||
__func__, enable, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (enable)
|
||||
dfc_qmi_wq_flush(qmi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_set_powersave_mode);
|
||||
|
||||
void qmi_rmnet_work_restart(void *port)
|
||||
{
|
||||
if (!rmnet_ps_wq || !rmnet_work)
|
||||
return;
|
||||
queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, NO_DELAY);
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_work_restart);
|
||||
|
||||
static void qmi_rmnet_check_stats(struct work_struct *work)
|
||||
{
|
||||
struct rmnet_powersave_work *real_work;
|
||||
struct qmi_info *qmi;
|
||||
u64 rxd, txd;
|
||||
u64 rx, tx;
|
||||
|
||||
real_work = container_of(to_delayed_work(work),
|
||||
struct rmnet_powersave_work, work);
|
||||
|
||||
if (unlikely(!real_work || !real_work->port))
|
||||
return;
|
||||
|
||||
qmi = (struct qmi_info *)rmnet_get_qmi_pt(real_work->port);
|
||||
if (unlikely(!qmi))
|
||||
return;
|
||||
|
||||
if (qmi->ps_enabled) {
|
||||
/* Retry after small delay if qmi error
|
||||
* This resumes UL grants by disabling
|
||||
* powersave mode if successful.
|
||||
*/
|
||||
if (qmi_rmnet_set_powersave_mode(real_work->port, 0) < 0) {
|
||||
queue_delayed_work(rmnet_ps_wq,
|
||||
&real_work->work, HZ / 50);
|
||||
return;
|
||||
|
||||
}
|
||||
qmi->ps_enabled = 0;
|
||||
|
||||
if (rmnet_get_powersave_notif(real_work->port))
|
||||
qmi_rmnet_ps_off_notify(real_work->port);
|
||||
|
||||
|
||||
goto end;
|
||||
}
|
||||
|
||||
rmnet_get_packets(real_work->port, &rx, &tx);
|
||||
rxd = rx - real_work->old_rx_pkts;
|
||||
txd = tx - real_work->old_tx_pkts;
|
||||
real_work->old_rx_pkts = rx;
|
||||
real_work->old_tx_pkts = tx;
|
||||
|
||||
if (!rxd && !txd) {
|
||||
if (qmi_rmnet_set_powersave_mode(real_work->port, 1) < 0) {
|
||||
queue_delayed_work(rmnet_ps_wq,
|
||||
&real_work->work, PS_INTERVAL);
|
||||
return;
|
||||
}
|
||||
qmi->ps_enabled = 1;
|
||||
clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
|
||||
|
||||
/* Enable flow after clear the bit so a new
|
||||
* work can be triggered.
|
||||
*/
|
||||
rmnet_enable_all_flows(real_work->port);
|
||||
|
||||
if (rmnet_get_powersave_notif(real_work->port))
|
||||
qmi_rmnet_ps_on_notify(real_work->port);
|
||||
|
||||
return;
|
||||
}
|
||||
end:
|
||||
queue_delayed_work(rmnet_ps_wq, &real_work->work, PS_INTERVAL);
|
||||
}
|
||||
|
||||
static void qmi_rmnet_work_set_active(void *port, int status)
|
||||
{
|
||||
struct qmi_info *qmi;
|
||||
|
||||
qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
|
||||
if (unlikely(!qmi))
|
||||
return;
|
||||
|
||||
if (status)
|
||||
set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
|
||||
else
|
||||
clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
|
||||
}
|
||||
|
||||
void qmi_rmnet_work_init(void *port)
|
||||
{
|
||||
if (rmnet_ps_wq)
|
||||
return;
|
||||
|
||||
rmnet_ps_wq = alloc_workqueue("rmnet_powersave_work",
|
||||
WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
|
||||
|
||||
rmnet_work = kmalloc(sizeof(*rmnet_work), GFP_ATOMIC);
|
||||
if (!rmnet_work) {
|
||||
destroy_workqueue(rmnet_ps_wq);
|
||||
rmnet_ps_wq = NULL;
|
||||
return;
|
||||
}
|
||||
INIT_LIST_HEAD(&ps_list);
|
||||
INIT_DEFERRABLE_WORK(&rmnet_work->work, qmi_rmnet_check_stats);
|
||||
rmnet_work->port = port;
|
||||
rmnet_get_packets(rmnet_work->port, &rmnet_work->old_rx_pkts,
|
||||
&rmnet_work->old_tx_pkts);
|
||||
|
||||
qmi_rmnet_work_set_active(rmnet_work->port, 1);
|
||||
queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, PS_INTERVAL);
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_work_init);
|
||||
|
||||
void qmi_rmnet_work_maybe_restart(void *port)
|
||||
{
|
||||
struct qmi_info *qmi;
|
||||
|
||||
qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
|
||||
if (unlikely(!qmi))
|
||||
return;
|
||||
|
||||
if (!test_and_set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active))
|
||||
qmi_rmnet_work_restart(port);
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_work_maybe_restart);
|
||||
|
||||
void qmi_rmnet_work_exit(void *port)
|
||||
{
|
||||
if (!rmnet_ps_wq || !rmnet_work)
|
||||
return;
|
||||
cancel_delayed_work_sync(&rmnet_work->work);
|
||||
destroy_workqueue(rmnet_ps_wq);
|
||||
qmi_rmnet_work_set_active(port, 0);
|
||||
rmnet_ps_wq = NULL;
|
||||
kfree(rmnet_work);
|
||||
rmnet_work = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_work_exit);
|
||||
#endif
|
170
drivers/soc/qcom/qmi_rmnet_i.h
Normal file
170
drivers/soc/qcom/qmi_rmnet_i.h
Normal file
|
@ -0,0 +1,170 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _RMNET_QMI_I_H
|
||||
#define _RMNET_QMI_I_H
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
#define IP_VER_4 4
|
||||
#define IP_VER_6 6
|
||||
|
||||
#define MAX_CLIENT_NUM 2
|
||||
#define MAX_FLOW_NUM 32
|
||||
#define DEFAULT_GRANT 10240
|
||||
|
||||
struct rmnet_flow_map {
|
||||
struct list_head list;
|
||||
u8 bearer_id;
|
||||
u32 flow_id;
|
||||
int ip_type;
|
||||
u32 tcm_handle;
|
||||
};
|
||||
|
||||
struct rmnet_bearer_map {
|
||||
struct list_head list;
|
||||
u8 bearer_id;
|
||||
int flow_ref;
|
||||
u32 grant_size;
|
||||
u32 grant_thresh;
|
||||
u16 seq;
|
||||
u8 ack_req;
|
||||
u32 grant_before_ps;
|
||||
u16 seq_before_ps;
|
||||
u32 ancillary;
|
||||
};
|
||||
|
||||
struct svc_info {
|
||||
u32 instance;
|
||||
u32 ep_type;
|
||||
u32 iface_id;
|
||||
};
|
||||
|
||||
struct fc_info {
|
||||
struct svc_info svc;
|
||||
void *dfc_client;
|
||||
};
|
||||
|
||||
struct qos_info {
|
||||
u8 mux_id;
|
||||
struct net_device *real_dev;
|
||||
struct list_head flow_head;
|
||||
struct list_head bearer_head;
|
||||
u32 default_grant;
|
||||
u32 tran_num;
|
||||
spinlock_t qos_lock;
|
||||
};
|
||||
|
||||
struct flow_info {
|
||||
struct net_device *dev;
|
||||
struct rmnet_flow_map *itm;
|
||||
};
|
||||
|
||||
struct qmi_info {
|
||||
int flag;
|
||||
void *wda_client;
|
||||
struct fc_info fc_info[MAX_CLIENT_NUM];
|
||||
unsigned long ps_work_active;
|
||||
int ps_enabled;
|
||||
};
|
||||
|
||||
enum data_ep_type_enum_v01 {
|
||||
DATA_EP_TYPE_ENUM_MIN_ENUM_VAL_V01 = INT_MIN,
|
||||
DATA_EP_TYPE_RESERVED_V01 = 0x00,
|
||||
DATA_EP_TYPE_HSIC_V01 = 0x01,
|
||||
DATA_EP_TYPE_HSUSB_V01 = 0x02,
|
||||
DATA_EP_TYPE_PCIE_V01 = 0x03,
|
||||
DATA_EP_TYPE_EMBEDDED_V01 = 0x04,
|
||||
DATA_EP_TYPE_ENUM_MAX_ENUM_VAL_V01 = INT_MAX
|
||||
};
|
||||
|
||||
struct data_ep_id_type_v01 {
|
||||
|
||||
enum data_ep_type_enum_v01 ep_type;
|
||||
u32 iface_id;
|
||||
};
|
||||
|
||||
extern struct qmi_elem_info data_ep_id_type_v01_ei[];
|
||||
|
||||
void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi);
|
||||
|
||||
#ifdef CONFIG_QCOM_QMI_DFC
|
||||
struct rmnet_flow_map *
|
||||
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
|
||||
u32 flow_id, int ip_type);
|
||||
|
||||
struct rmnet_bearer_map *
|
||||
qmi_rmnet_get_bearer_map(struct qos_info *qos_info, u8 bearer_id);
|
||||
|
||||
unsigned int qmi_rmnet_grant_per(unsigned int grant);
|
||||
|
||||
int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi);
|
||||
|
||||
void dfc_qmi_client_exit(void *dfc_data);
|
||||
|
||||
void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
|
||||
int ip_type, u32 mark, unsigned int len);
|
||||
|
||||
int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable);
|
||||
|
||||
void dfc_qmi_wq_flush(struct qmi_info *qmi);
|
||||
|
||||
#else
|
||||
static inline struct rmnet_flow_map *
|
||||
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
|
||||
uint32_t flow_id, int ip_type)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct rmnet_bearer_map *
|
||||
qmi_rmnet_get_bearer_map(struct qos_info *qos_info, uint8_t bearer_id)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int
|
||||
dfc_qmi_client_init(void *port, int modem, struct qmi_info *qmi)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void dfc_qmi_client_exit(void *dfc_data)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
|
||||
int ip_type, u32 mark, unsigned int len)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
dfc_qmi_wq_flush(struct qmi_info *qmi)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
|
||||
int wda_qmi_client_init(void *port, uint32_t instance);
|
||||
void wda_qmi_client_exit(void *wda_data);
|
||||
int wda_set_powersave_mode(void *wda_data, uint8_t enable);
|
||||
#else
|
||||
static inline int wda_qmi_client_init(void *port, uint32_t instance)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void wda_qmi_client_exit(void *wda_data)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int wda_set_powersave_mode(void *wda_data, uint8_t enable)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
#endif /*_RMNET_QMI_I_H*/
|
429
drivers/soc/qcom/wda_qmi.c
Normal file
429
drivers/soc/qcom/wda_qmi.c
Normal file
|
@ -0,0 +1,429 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/soc/qcom/qmi.h>
|
||||
#include <soc/qcom/rmnet_qmi.h>
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/wda.h>
|
||||
#include "qmi_rmnet_i.h"
|
||||
|
||||
struct wda_qmi_data {
|
||||
void *rmnet_port;
|
||||
struct workqueue_struct *wda_wq;
|
||||
struct work_struct svc_arrive;
|
||||
struct qmi_handle handle;
|
||||
struct sockaddr_qrtr ssctl;
|
||||
};
|
||||
|
||||
static void wda_svc_config(struct work_struct *work);
|
||||
/* **************************************************** */
|
||||
#define WDA_SERVICE_ID_V01 0x1A
|
||||
#define WDA_SERVICE_VERS_V01 0x01
|
||||
#define WDA_TIMEOUT_MS 20
|
||||
|
||||
#define QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01 0x002D
|
||||
#define QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01 0x002D
|
||||
#define QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01_MAX_MSG_LEN 18
|
||||
#define QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01_MAX_MSG_LEN 14
|
||||
|
||||
#define QMI_WDA_SET_POWERSAVE_MODE_REQ_V01 0x002E
|
||||
#define QMI_WDA_SET_POWERSAVE_MODE_RESP_V01 0x002E
|
||||
#define QMI_WDA_SET_POWERSAVE_MODE_REQ_V01_MAX_MSG_LEN 4
|
||||
#define QMI_WDA_SET_POWERSAVE_MODE_RESP_V01_MAX_MSG_LEN 7
|
||||
|
||||
enum wda_powersave_config_mask_enum_v01 {
|
||||
WDA_DATA_POWERSAVE_CONFIG_MASK_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
|
||||
WDA_DATA_POWERSAVE_CONFIG_NOT_SUPPORTED = 0x00,
|
||||
WDA_DATA_POWERSAVE_CONFIG_DL_MARKER_V01 = 0x01,
|
||||
WDA_DATA_POWERSAVE_CONFIG_FLOW_CTL_V01 = 0x02,
|
||||
WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01 = 0x7FFFFFFF,
|
||||
WDA_DATA_POWERSAVE_CONFIG_MASK_ENUM_MAX_ENUM_VAL_V01 = 2147483647
|
||||
};
|
||||
|
||||
struct wda_set_powersave_config_req_msg_v01 {
|
||||
/* Mandatory */
|
||||
struct data_ep_id_type_v01 ep_id;
|
||||
/* Optional */
|
||||
uint8_t req_data_cfg_valid;
|
||||
enum wda_powersave_config_mask_enum_v01 req_data_cfg;
|
||||
};
|
||||
|
||||
struct wda_set_powersave_config_resp_msg_v01 {
|
||||
/* Mandatory */
|
||||
struct qmi_response_type_v01 resp;
|
||||
/* Optional */
|
||||
uint8_t data_cfg_valid;
|
||||
enum wda_powersave_config_mask_enum_v01 data_cfg;
|
||||
};
|
||||
|
||||
struct wda_set_powersave_mode_req_msg_v01 {
|
||||
/* Mandatory */
|
||||
uint8_t powersave_control_flag;
|
||||
};
|
||||
|
||||
struct wda_set_powersave_mode_resp_msg_v01 {
|
||||
/* Mandatory */
|
||||
struct qmi_response_type_v01 resp;
|
||||
};
|
||||
|
||||
static struct qmi_elem_info wda_set_powersave_config_req_msg_v01_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(struct data_ep_id_type_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x01,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_config_req_msg_v01,
|
||||
ep_id),
|
||||
.ei_array = data_ep_id_type_v01_ei,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(uint8_t),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_config_req_msg_v01,
|
||||
req_data_cfg_valid),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_SIGNED_4_BYTE_ENUM,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(enum
|
||||
wda_powersave_config_mask_enum_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_config_req_msg_v01,
|
||||
req_data_cfg),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_EOTI,
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = QMI_COMMON_TLV_TYPE,
|
||||
},
|
||||
};
|
||||
|
||||
static struct qmi_elem_info wda_set_powersave_config_resp_msg_v01_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(struct qmi_response_type_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_config_resp_msg_v01,
|
||||
resp),
|
||||
.ei_array = qmi_response_type_v01_ei,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(uint8_t),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_config_resp_msg_v01,
|
||||
data_cfg_valid),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_SIGNED_4_BYTE_ENUM,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(enum
|
||||
wda_powersave_config_mask_enum_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_config_resp_msg_v01,
|
||||
data_cfg),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_EOTI,
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = QMI_COMMON_TLV_TYPE,
|
||||
},
|
||||
};
|
||||
|
||||
static struct qmi_elem_info wda_set_powersave_mode_req_msg_v01_ei[] = {
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_1_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(uint8_t),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x01,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_req_msg_v01,
|
||||
powersave_control_flag),
|
||||
.ei_array = NULL,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_EOTI,
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = QMI_COMMON_TLV_TYPE,
|
||||
},
|
||||
};
|
||||
|
||||
static struct qmi_elem_info wda_set_powersave_mode_resp_msg_v01_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(struct qmi_response_type_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct
|
||||
wda_set_powersave_mode_resp_msg_v01,
|
||||
resp),
|
||||
.ei_array = qmi_response_type_v01_ei,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_EOTI,
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = QMI_COMMON_TLV_TYPE,
|
||||
},
|
||||
};
|
||||
|
||||
static int wda_set_powersave_mode_req(void *wda_data, uint8_t enable)
|
||||
{
|
||||
struct wda_qmi_data *data = (struct wda_qmi_data *)wda_data;
|
||||
struct wda_set_powersave_mode_resp_msg_v01 *resp;
|
||||
struct wda_set_powersave_mode_req_msg_v01 *req;
|
||||
struct qmi_txn txn;
|
||||
int ret;
|
||||
|
||||
if (!data || !data->rmnet_port)
|
||||
return -EINVAL;
|
||||
|
||||
req = kzalloc(sizeof(*req), GFP_ATOMIC);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
|
||||
if (!resp) {
|
||||
kfree(req);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = qmi_txn_init(&data->handle, &txn,
|
||||
wda_set_powersave_mode_resp_msg_v01_ei, resp);
|
||||
if (ret < 0) {
|
||||
pr_err("%s() Failed init for response, err: %d\n",
|
||||
__func__, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
req->powersave_control_flag = enable;
|
||||
ret = qmi_send_request(&data->handle, &data->ssctl, &txn,
|
||||
QMI_WDA_SET_POWERSAVE_MODE_REQ_V01,
|
||||
QMI_WDA_SET_POWERSAVE_MODE_REQ_V01_MAX_MSG_LEN,
|
||||
wda_set_powersave_mode_req_msg_v01_ei, req);
|
||||
if (ret < 0) {
|
||||
qmi_txn_cancel(&txn);
|
||||
pr_err("%s() Failed sending request, err: %d\n",
|
||||
__func__, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = qmi_txn_wait(&txn, WDA_TIMEOUT_MS);
|
||||
if (ret < 0) {
|
||||
pr_err("%s() Response waiting failed, err: %d\n",
|
||||
__func__, ret);
|
||||
} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
|
||||
pr_err("%s() Request rejected, result: %d, err: %d\n",
|
||||
__func__, resp->resp.result, resp->resp.error);
|
||||
ret = -resp->resp.result;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(resp);
|
||||
kfree(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int wda_set_powersave_config_req(struct qmi_handle *wda_handle,
|
||||
struct qmi_info *qmi)
|
||||
{
|
||||
struct wda_qmi_data *data = container_of(wda_handle,
|
||||
struct wda_qmi_data, handle);
|
||||
struct wda_set_powersave_config_resp_msg_v01 *resp;
|
||||
struct wda_set_powersave_config_req_msg_v01 *req;
|
||||
struct qmi_txn txn;
|
||||
int ret;
|
||||
|
||||
req = kzalloc(sizeof(*req), GFP_ATOMIC);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
|
||||
if (!resp) {
|
||||
kfree(req);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = qmi_txn_init(wda_handle, &txn,
|
||||
wda_set_powersave_config_resp_msg_v01_ei, resp);
|
||||
if (ret < 0) {
|
||||
pr_err("%s() Failed init for response, err: %d\n",
|
||||
__func__, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
req->ep_id.ep_type = qmi->fc_info[0].svc.ep_type;
|
||||
req->ep_id.iface_id = qmi->fc_info[0].svc.iface_id;
|
||||
req->req_data_cfg_valid = 1;
|
||||
req->req_data_cfg = WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01;
|
||||
ret = qmi_send_request(wda_handle, &data->ssctl, &txn,
|
||||
QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01,
|
||||
QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01_MAX_MSG_LEN,
|
||||
wda_set_powersave_config_req_msg_v01_ei, req);
|
||||
if (ret < 0) {
|
||||
qmi_txn_cancel(&txn);
|
||||
pr_err("%s() Failed sending request, err: %d\n", __func__, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = qmi_txn_wait(&txn, WDA_TIMEOUT_MS);
|
||||
if (ret < 0) {
|
||||
pr_err("%s() Response waiting failed, err: %d\n",
|
||||
__func__, ret);
|
||||
} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
|
||||
pr_err("%s() Request rejected, result: %d, error: %d\n",
|
||||
__func__, resp->resp.result, resp->resp.error);
|
||||
ret = -resp->resp.result;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(resp);
|
||||
kfree(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void wda_svc_config(struct work_struct *work)
|
||||
{
|
||||
struct wda_qmi_data *data = container_of(work, struct wda_qmi_data,
|
||||
svc_arrive);
|
||||
struct qmi_info *qmi;
|
||||
|
||||
qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
|
||||
if (!qmi)
|
||||
goto clean_out;
|
||||
|
||||
if (wda_set_powersave_config_req(&data->handle, qmi) < 0) {
|
||||
pr_err("%s() failed, qmi handle pt: %p\n",
|
||||
__func__, &data->handle);
|
||||
goto clean_out;
|
||||
}
|
||||
|
||||
trace_wda_client_state_up(qmi->fc_info[0].svc.instance,
|
||||
qmi->fc_info[0].svc.ep_type,
|
||||
qmi->fc_info[0].svc.iface_id);
|
||||
qmi->wda_client = (void *)data;
|
||||
pr_info("Connection established with the WDA Service\n");
|
||||
return;
|
||||
|
||||
clean_out:
|
||||
qmi_handle_release(&data->handle);
|
||||
destroy_workqueue(data->wda_wq);
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
static int wda_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
|
||||
{
|
||||
struct wda_qmi_data *data = container_of(qmi, struct wda_qmi_data,
|
||||
handle);
|
||||
|
||||
data->ssctl.sq_family = AF_QIPCRTR;
|
||||
data->ssctl.sq_node = svc->node;
|
||||
data->ssctl.sq_port = svc->port;
|
||||
|
||||
queue_work(data->wda_wq, &data->svc_arrive);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void wda_svc_exit(struct qmi_handle *qmi, struct qmi_service *svc)
|
||||
{
|
||||
struct wda_qmi_data *data = container_of(qmi, struct wda_qmi_data,
|
||||
handle);
|
||||
|
||||
if (!data)
|
||||
pr_info("%s() data is null\n", __func__);
|
||||
}
|
||||
|
||||
static struct qmi_ops server_ops = {
|
||||
.new_server = wda_svc_arrive,
|
||||
.del_server = wda_svc_exit,
|
||||
};
|
||||
|
||||
int wda_qmi_client_init(void *port, uint32_t instance)
|
||||
{
|
||||
struct wda_qmi_data *data;
|
||||
int rc = 0;
|
||||
|
||||
if (!port)
|
||||
return -EINVAL;
|
||||
|
||||
pr_info("%s\n", __func__);
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
data->wda_wq = create_singlethread_workqueue("wda_wq");
|
||||
if (!data->wda_wq) {
|
||||
pr_err("%s Could not create workqueue\n", __func__);
|
||||
kfree(data);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
data->rmnet_port = port;
|
||||
INIT_WORK(&data->svc_arrive, wda_svc_config);
|
||||
|
||||
rc = qmi_handle_init(&data->handle,
|
||||
QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01_MAX_MSG_LEN,
|
||||
&server_ops, NULL);
|
||||
if (rc < 0) {
|
||||
pr_err("%s: Failed qmi_handle_init, err: %d\n", __func__, rc);
|
||||
kfree(data);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = qmi_add_lookup(&data->handle, WDA_SERVICE_ID_V01,
|
||||
WDA_SERVICE_VERS_V01, instance);
|
||||
if (rc < 0) {
|
||||
pr_err("%s(): Failed qmi_add_lookup, err: %d\n", __func__, rc);
|
||||
qmi_handle_release(&data->handle);
|
||||
destroy_workqueue(data->wda_wq);
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void wda_qmi_client_exit(void *wda_data)
|
||||
{
|
||||
struct wda_qmi_data *data = (struct wda_qmi_data *)wda_data;
|
||||
|
||||
if (!data) {
|
||||
pr_info("%s() data is null\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
trace_wda_client_state_down(0);
|
||||
qmi_handle_release(&data->handle);
|
||||
destroy_workqueue(data->wda_wq);
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
int wda_set_powersave_mode(void *wda_data, uint8_t enable)
|
||||
{
|
||||
trace_wda_set_powersave_mode(enable);
|
||||
return wda_set_powersave_mode_req(wda_data, enable);
|
||||
}
|
125
include/soc/qcom/qmi_rmnet.h
Normal file
125
include/soc/qcom/qmi_rmnet.h
Normal file
|
@ -0,0 +1,125 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _QMI_RMNET_H
|
||||
#define _QMI_RMNET_H
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
struct qmi_rmnet_ps_ind {
|
||||
void (*ps_on_handler)(void *port);
|
||||
void (*ps_off_handler)(void *port);
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
|
||||
#ifdef CONFIG_QCOM_QMI_RMNET
|
||||
void qmi_rmnet_qmi_exit(void *qmi_pt, void *port);
|
||||
void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt);
|
||||
void qmi_rmnet_enable_all_flows(struct net_device *dev);
|
||||
#else
|
||||
static inline void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
qmi_rmnet_enable_all_flows(struct net_device *dev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_QCOM_QMI_DFC
|
||||
void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id);
|
||||
void qmi_rmnet_qos_exit(struct net_device *dev, void *qos);
|
||||
void qmi_rmnet_burst_fc_check(struct net_device *dev,
|
||||
int ip_type, u32 mark, unsigned int len);
|
||||
int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb);
|
||||
#else
|
||||
static inline void *
|
||||
qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void qmi_rmnet_qos_exit(struct net_device *dev, void *qos)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
qmi_rmnet_burst_fc_check(struct net_device *dev,
|
||||
int ip_type, u32 mark, unsigned int len)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int qmi_rmnet_get_queue(struct net_device *dev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
|
||||
int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable);
|
||||
void qmi_rmnet_work_init(void *port);
|
||||
void qmi_rmnet_work_exit(void *port);
|
||||
void qmi_rmnet_work_maybe_restart(void *port);
|
||||
void qmi_rmnet_work_restart(void *port);
|
||||
|
||||
int qmi_rmnet_ps_ind_register(void *port,
|
||||
struct qmi_rmnet_ps_ind *ps_ind);
|
||||
int qmi_rmnet_ps_ind_deregister(void *port,
|
||||
struct qmi_rmnet_ps_ind *ps_ind);
|
||||
void qmi_rmnet_ps_off_notify(void *port);
|
||||
void qmi_rmnet_ps_on_notify(void *port);
|
||||
|
||||
#else
|
||||
static inline int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void qmi_rmnet_work_init(void *port)
|
||||
{
|
||||
}
|
||||
static inline void qmi_rmnet_work_restart(void *port)
|
||||
{
|
||||
|
||||
}
|
||||
static inline void qmi_rmnet_work_exit(void *port)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void qmi_rmnet_work_maybe_restart(void *port)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
static inline int qmi_rmnet_ps_ind_register(struct rmnet_port *port,
|
||||
struct qmi_rmnet_ps_ind *ps_ind)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int qmi_rmnet_ps_ind_deregister(struct rmnet_port *port,
|
||||
struct qmi_rmnet_ps_ind *ps_ind)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void qmi_rmnet_ps_off_notify(struct rmnet_port *port)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
static inline void qmi_rmnet_ps_on_notify(struct rmnet_port *port)
|
||||
{
|
||||
|
||||
}
|
||||
#endif
|
||||
#endif /*_QMI_RMNET_H*/
|
74
include/soc/qcom/rmnet_qmi.h
Normal file
74
include/soc/qcom/rmnet_qmi.h
Normal file
|
@ -0,0 +1,74 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _RMNET_QMI_H
|
||||
#define _RMNET_QMI_H
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb);
|
||||
|
||||
#ifdef CONFIG_QCOM_QMI_RMNET
|
||||
void *rmnet_get_qmi_pt(void *port);
|
||||
void *rmnet_get_qos_pt(struct net_device *dev);
|
||||
void *rmnet_get_rmnet_port(struct net_device *dev);
|
||||
struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id);
|
||||
void rmnet_reset_qmi_pt(void *port);
|
||||
void rmnet_init_qmi_pt(void *port, void *qmi);
|
||||
void rmnet_enable_all_flows(void *port);
|
||||
void rmnet_set_powersave_format(void *port);
|
||||
void rmnet_clear_powersave_format(void *port);
|
||||
void rmnet_get_packets(void *port, u64 *rx, u64 *tx);
|
||||
int rmnet_get_powersave_notif(void *port);
|
||||
#else
|
||||
static inline void *rmnet_get_qmi_pt(void *port)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void *rmnet_get_qos_pt(struct net_device *dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void *rmnet_get_rmnet_port(struct net_device *dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct net_device *rmnet_get_rmnet_dev(void *port,
|
||||
u8 mux_id)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void rmnet_reset_qmi_pt(void *port)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rmnet_init_qmi_pt(void *port, void *qmi)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rmnet_enable_all_flows(void *port)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rmnet_set_port_format(void *port)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rmnet_get_packets(void *port, u64 *rx, u64 *tx)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int rmnet_get_powersave_notif(void *port)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_QCOM_QMI_RMNET */
|
||||
#endif /*_RMNET_QMI_H*/
|
214
include/trace/events/dfc.h
Normal file
214
include/trace/events/dfc.h
Normal file
|
@ -0,0 +1,214 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM dfc
|
||||
|
||||
#if !defined(_TRACE_DFC_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_DFC_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
TRACE_EVENT(dfc_qmi_tc,
|
||||
|
||||
TP_PROTO(const char *name, u8 bearer_id, u32 flow_id, u32 grant,
|
||||
int qlen, u32 tcm_handle, int enable),
|
||||
|
||||
TP_ARGS(name, bearer_id, flow_id, grant, qlen, tcm_handle, enable),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(dev_name, name)
|
||||
__field(u8, bid)
|
||||
__field(u32, fid)
|
||||
__field(u32, grant)
|
||||
__field(int, qlen)
|
||||
__field(u32, tcm_handle)
|
||||
__field(int, enable)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(dev_name, name);
|
||||
__entry->bid = bearer_id;
|
||||
__entry->fid = flow_id;
|
||||
__entry->grant = grant;
|
||||
__entry->qlen = qlen;
|
||||
__entry->tcm_handle = tcm_handle;
|
||||
__entry->enable = enable;
|
||||
),
|
||||
|
||||
TP_printk("dev=%s bearer_id=%u grant=%u len=%d flow_id=%u q=%d %s",
|
||||
__get_str(dev_name),
|
||||
__entry->bid, __entry->grant, __entry->qlen, __entry->fid,
|
||||
__entry->tcm_handle,
|
||||
__entry->enable ? "enable" : "disable")
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_flow_ind,
|
||||
|
||||
TP_PROTO(int src, int idx, u8 mux_id, u8 bearer_id, u32 grant,
|
||||
u16 seq_num, u8 ack_req, u32 ancillary),
|
||||
|
||||
TP_ARGS(src, idx, mux_id, bearer_id, grant, seq_num, ack_req,
|
||||
ancillary),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, src)
|
||||
__field(int, idx)
|
||||
__field(u8, mid)
|
||||
__field(u8, bid)
|
||||
__field(u32, grant)
|
||||
__field(u16, seq)
|
||||
__field(u8, ack_req)
|
||||
__field(u32, ancillary)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->src = src;
|
||||
__entry->idx = idx;
|
||||
__entry->mid = mux_id;
|
||||
__entry->bid = bearer_id;
|
||||
__entry->grant = grant;
|
||||
__entry->seq = seq_num;
|
||||
__entry->ack_req = ack_req;
|
||||
__entry->ancillary = ancillary;
|
||||
),
|
||||
|
||||
TP_printk("src=%d [%d]: mid=%u bid=%u grant=%u seq=%u ack=%u anc=%u",
|
||||
__entry->src, __entry->idx, __entry->mid, __entry->bid,
|
||||
__entry->grant, __entry->seq, __entry->ack_req,
|
||||
__entry->ancillary)
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_flow_check,
|
||||
|
||||
TP_PROTO(const char *name, u8 bearer_id, unsigned int len, u32 grant),
|
||||
|
||||
TP_ARGS(name, bearer_id, len, grant),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(dev_name, name)
|
||||
__field(u8, bearer_id)
|
||||
__field(unsigned int, len)
|
||||
__field(u32, grant)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(dev_name, name)
|
||||
__entry->bearer_id = bearer_id;
|
||||
__entry->len = len;
|
||||
__entry->grant = grant;
|
||||
),
|
||||
|
||||
TP_printk("dev=%s bearer_id=%u skb_len=%u current_grant=%u",
|
||||
__get_str(dev_name),
|
||||
__entry->bearer_id, __entry->len, __entry->grant)
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_flow_info,
|
||||
|
||||
TP_PROTO(const char *name, u8 bearer_id, u32 flow_id, int ip_type,
|
||||
u32 handle, int add),
|
||||
|
||||
TP_ARGS(name, bearer_id, flow_id, ip_type, handle, add),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(dev_name, name)
|
||||
__field(u8, bid)
|
||||
__field(u32, fid)
|
||||
__field(int, ip)
|
||||
__field(u32, handle)
|
||||
__field(int, action)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(dev_name, name)
|
||||
__entry->bid = bearer_id;
|
||||
__entry->fid = flow_id;
|
||||
__entry->ip = ip_type;
|
||||
__entry->handle = handle;
|
||||
__entry->action = add;
|
||||
),
|
||||
|
||||
TP_printk("%s: dev=%s bearer_id=%u flow_id=%u ip_type=%d q=%d",
|
||||
__entry->action ? "add flow" : "delete flow",
|
||||
__get_str(dev_name),
|
||||
__entry->bid, __entry->fid, __entry->ip, __entry->handle)
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_client_state_up,
|
||||
|
||||
TP_PROTO(int idx, u32 instance, u32 ep_type, u32 iface),
|
||||
|
||||
TP_ARGS(idx, instance, ep_type, iface),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, idx)
|
||||
__field(u32, instance)
|
||||
__field(u32, ep_type)
|
||||
__field(u32, iface)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->idx = idx;
|
||||
__entry->instance = instance;
|
||||
__entry->ep_type = ep_type;
|
||||
__entry->iface = iface;
|
||||
),
|
||||
|
||||
TP_printk("DFC Client[%d] connect: instance=%u ep_type=%u iface_id=%u",
|
||||
__entry->idx, __entry->instance,
|
||||
__entry->ep_type, __entry->iface)
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_client_state_down,
|
||||
|
||||
TP_PROTO(int idx, int from_cb),
|
||||
|
||||
TP_ARGS(idx, from_cb),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, idx)
|
||||
__field(int, from_cb)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->idx = idx;
|
||||
__entry->from_cb = from_cb;
|
||||
),
|
||||
|
||||
TP_printk("DFC Client[%d] exit: callback %d",
|
||||
__entry->idx, __entry->from_cb)
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_qmap_cmd,
|
||||
|
||||
TP_PROTO(u8 mux_id, u8 bearer_id, u16 seq_num, u8 type, u32 tran),
|
||||
|
||||
TP_ARGS(mux_id, bearer_id, seq_num, type, tran),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, mid)
|
||||
__field(u8, bid)
|
||||
__field(u16, seq)
|
||||
__field(u8, type)
|
||||
__field(u32, tran)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->mid = mux_id;
|
||||
__entry->bid = bearer_id;
|
||||
__entry->seq = seq_num;
|
||||
__entry->type = type;
|
||||
__entry->tran = tran;
|
||||
),
|
||||
|
||||
TP_printk("mux_id=%u bearer_id=%u seq_num=%u type=%u tran=%u",
|
||||
__entry->mid, __entry->bid, __entry->seq,
|
||||
__entry->type, __entry->tran)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_DFC_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
476
include/trace/events/rmnet.h
Normal file
476
include/trace/events/rmnet.h
Normal file
|
@ -0,0 +1,476 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM rmnet
|
||||
|
||||
#if !defined(_TRACE_RMNET_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_RMNET_H
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Trace events for rmnet module */
|
||||
/*****************************************************************************/
|
||||
TRACE_EVENT(rmnet_xmit_skb,
|
||||
|
||||
TP_PROTO(struct sk_buff *skb),
|
||||
|
||||
TP_ARGS(skb),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(dev_name, skb->dev->name)
|
||||
__field(unsigned int, len)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(dev_name, skb->dev->name);
|
||||
__entry->len = skb->len;
|
||||
),
|
||||
|
||||
TP_printk("dev_name=%s len=%u", __get_str(dev_name), __entry->len)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rmnet_low,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, func)
|
||||
__field(u8, evt)
|
||||
__field(u32, uint1)
|
||||
__field(u32, uint2)
|
||||
__field(u64, ulong1)
|
||||
__field(u64, ulong2)
|
||||
__field(void *, ptr1)
|
||||
__field(void *, ptr2)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->func = func;
|
||||
__entry->evt = evt;
|
||||
__entry->uint1 = uint1;
|
||||
__entry->uint2 = uint2;
|
||||
__entry->ulong1 = ulong1;
|
||||
__entry->ulong2 = ulong2;
|
||||
__entry->ptr1 = ptr1;
|
||||
__entry->ptr2 = ptr2;
|
||||
),
|
||||
|
||||
TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
|
||||
__entry->func, __entry->evt,
|
||||
__entry->uint1, __entry->uint2,
|
||||
__entry->ulong1, __entry->ulong2,
|
||||
__entry->ptr1, __entry->ptr2)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rmnet_high,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, func)
|
||||
__field(u8, evt)
|
||||
__field(u32, uint1)
|
||||
__field(u32, uint2)
|
||||
__field(u64, ulong1)
|
||||
__field(u64, ulong2)
|
||||
__field(void *, ptr1)
|
||||
__field(void *, ptr2)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->func = func;
|
||||
__entry->evt = evt;
|
||||
__entry->uint1 = uint1;
|
||||
__entry->uint2 = uint2;
|
||||
__entry->ulong1 = ulong1;
|
||||
__entry->ulong2 = ulong2;
|
||||
__entry->ptr1 = ptr1;
|
||||
__entry->ptr2 = ptr2;
|
||||
),
|
||||
|
||||
TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
|
||||
__entry->func, __entry->evt,
|
||||
__entry->uint1, __entry->uint2,
|
||||
__entry->ulong1, __entry->ulong2,
|
||||
__entry->ptr1, __entry->ptr2)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rmnet_err,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, func)
|
||||
__field(u8, evt)
|
||||
__field(u32, uint1)
|
||||
__field(u32, uint2)
|
||||
__field(u64, ulong1)
|
||||
__field(u64, ulong2)
|
||||
__field(void *, ptr1)
|
||||
__field(void *, ptr2)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->func = func;
|
||||
__entry->evt = evt;
|
||||
__entry->uint1 = uint1;
|
||||
__entry->uint2 = uint2;
|
||||
__entry->ulong1 = ulong1;
|
||||
__entry->ulong2 = ulong2;
|
||||
__entry->ptr1 = ptr1;
|
||||
__entry->ptr2 = ptr2;
|
||||
),
|
||||
|
||||
TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
|
||||
__entry->func, __entry->evt,
|
||||
__entry->uint1, __entry->uint2,
|
||||
__entry->ulong1, __entry->ulong2,
|
||||
__entry->ptr1, __entry->ptr2)
|
||||
);
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Trace events for rmnet_perf module */
|
||||
/*****************************************************************************/
|
||||
TRACE_EVENT(rmnet_perf_low,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, func)
|
||||
__field(u8, evt)
|
||||
__field(u32, uint1)
|
||||
__field(u32, uint2)
|
||||
__field(u64, ulong1)
|
||||
__field(u64, ulong2)
|
||||
__field(void *, ptr1)
|
||||
__field(void *, ptr2)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->func = func;
|
||||
__entry->evt = evt;
|
||||
__entry->uint1 = uint1;
|
||||
__entry->uint2 = uint2;
|
||||
__entry->ulong1 = ulong1;
|
||||
__entry->ulong2 = ulong2;
|
||||
__entry->ptr1 = ptr1;
|
||||
__entry->ptr2 = ptr2;
|
||||
),
|
||||
|
||||
TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
|
||||
__entry->func, __entry->evt,
|
||||
__entry->uint1, __entry->uint2,
|
||||
__entry->ulong1, __entry->ulong2,
|
||||
__entry->ptr1, __entry->ptr2)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rmnet_perf_high,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, func)
|
||||
__field(u8, evt)
|
||||
__field(u32, uint1)
|
||||
__field(u32, uint2)
|
||||
__field(u64, ulong1)
|
||||
__field(u64, ulong2)
|
||||
__field(void *, ptr1)
|
||||
__field(void *, ptr2)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->func = func;
|
||||
__entry->evt = evt;
|
||||
__entry->uint1 = uint1;
|
||||
__entry->uint2 = uint2;
|
||||
__entry->ulong1 = ulong1;
|
||||
__entry->ulong2 = ulong2;
|
||||
__entry->ptr1 = ptr1;
|
||||
__entry->ptr2 = ptr2;
|
||||
),
|
||||
|
||||
TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
|
||||
__entry->func, __entry->evt,
|
||||
__entry->uint1, __entry->uint2,
|
||||
__entry->ulong1, __entry->ulong2,
|
||||
__entry->ptr1, __entry->ptr2)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rmnet_perf_err,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, func)
|
||||
__field(u8, evt)
|
||||
__field(u32, uint1)
|
||||
__field(u32, uint2)
|
||||
__field(u64, ulong1)
|
||||
__field(u64, ulong2)
|
||||
__field(void *, ptr1)
|
||||
__field(void *, ptr2)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->func = func;
|
||||
__entry->evt = evt;
|
||||
__entry->uint1 = uint1;
|
||||
__entry->uint2 = uint2;
|
||||
__entry->ulong1 = ulong1;
|
||||
__entry->ulong2 = ulong2;
|
||||
__entry->ptr1 = ptr1;
|
||||
__entry->ptr2 = ptr2;
|
||||
),
|
||||
|
||||
TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
|
||||
__entry->func, __entry->evt,
|
||||
__entry->uint1, __entry->uint2,
|
||||
__entry->ulong1, __entry->ulong2,
|
||||
__entry->ptr1, __entry->ptr2)
|
||||
);
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Trace events for rmnet_shs module */
|
||||
/*****************************************************************************/
|
||||
TRACE_EVENT(rmnet_shs_low,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, func)
|
||||
__field(u8, evt)
|
||||
__field(u32, uint1)
|
||||
__field(u32, uint2)
|
||||
__field(u64, ulong1)
|
||||
__field(u64, ulong2)
|
||||
__field(void *, ptr1)
|
||||
__field(void *, ptr2)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->func = func;
|
||||
__entry->evt = evt;
|
||||
__entry->uint1 = uint1;
|
||||
__entry->uint2 = uint2;
|
||||
__entry->ulong1 = ulong1;
|
||||
__entry->ulong2 = ulong2;
|
||||
__entry->ptr1 = ptr1;
|
||||
__entry->ptr2 = ptr2;
|
||||
),
|
||||
|
||||
TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
|
||||
__entry->func, __entry->evt,
|
||||
__entry->uint1, __entry->uint2,
|
||||
__entry->ulong1, __entry->ulong2,
|
||||
__entry->ptr1, __entry->ptr2)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rmnet_shs_high,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, func)
|
||||
__field(u8, evt)
|
||||
__field(u32, uint1)
|
||||
__field(u32, uint2)
|
||||
__field(u64, ulong1)
|
||||
__field(u64, ulong2)
|
||||
__field(void *, ptr1)
|
||||
__field(void *, ptr2)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->func = func;
|
||||
__entry->evt = evt;
|
||||
__entry->uint1 = uint1;
|
||||
__entry->uint2 = uint2;
|
||||
__entry->ulong1 = ulong1;
|
||||
__entry->ulong2 = ulong2;
|
||||
__entry->ptr1 = ptr1;
|
||||
__entry->ptr2 = ptr2;
|
||||
),
|
||||
|
||||
TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
|
||||
__entry->func, __entry->evt,
|
||||
__entry->uint1, __entry->uint2,
|
||||
__entry->ulong1, __entry->ulong2,
|
||||
__entry->ptr1, __entry->ptr2)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rmnet_shs_err,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, func)
|
||||
__field(u8, evt)
|
||||
__field(u32, uint1)
|
||||
__field(u32, uint2)
|
||||
__field(u64, ulong1)
|
||||
__field(u64, ulong2)
|
||||
__field(void *, ptr1)
|
||||
__field(void *, ptr2)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->func = func;
|
||||
__entry->evt = evt;
|
||||
__entry->uint1 = uint1;
|
||||
__entry->uint2 = uint2;
|
||||
__entry->ulong1 = ulong1;
|
||||
__entry->ulong2 = ulong2;
|
||||
__entry->ptr1 = ptr1;
|
||||
__entry->ptr2 = ptr2;
|
||||
),
|
||||
|
||||
TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
|
||||
__entry->func, __entry->evt,
|
||||
__entry->uint1, __entry->uint2,
|
||||
__entry->ulong1, __entry->ulong2,
|
||||
__entry->ptr1, __entry->ptr2)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rmnet_shs_wq_low,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, func)
|
||||
__field(u8, evt)
|
||||
__field(u32, uint1)
|
||||
__field(u32, uint2)
|
||||
__field(u64, ulong1)
|
||||
__field(u64, ulong2)
|
||||
__field(void *, ptr1)
|
||||
__field(void *, ptr2)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->func = func;
|
||||
__entry->evt = evt;
|
||||
__entry->uint1 = uint1;
|
||||
__entry->uint2 = uint2;
|
||||
__entry->ulong1 = ulong1;
|
||||
__entry->ulong2 = ulong2;
|
||||
__entry->ptr1 = ptr1;
|
||||
__entry->ptr2 = ptr2;
|
||||
),
|
||||
|
||||
TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
|
||||
__entry->func, __entry->evt,
|
||||
__entry->uint1, __entry->uint2,
|
||||
__entry->ulong1, __entry->ulong2,
|
||||
__entry->ptr1, __entry->ptr2)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rmnet_shs_wq_high,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, func)
|
||||
__field(u8, evt)
|
||||
__field(u32, uint1)
|
||||
__field(u32, uint2)
|
||||
__field(u64, ulong1)
|
||||
__field(u64, ulong2)
|
||||
__field(void *, ptr1)
|
||||
__field(void *, ptr2)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->func = func;
|
||||
__entry->evt = evt;
|
||||
__entry->uint1 = uint1;
|
||||
__entry->uint2 = uint2;
|
||||
__entry->ulong1 = ulong1;
|
||||
__entry->ulong2 = ulong2;
|
||||
__entry->ptr1 = ptr1;
|
||||
__entry->ptr2 = ptr2;
|
||||
),
|
||||
|
||||
TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
|
||||
__entry->func, __entry->evt,
|
||||
__entry->uint1, __entry->uint2,
|
||||
__entry->ulong1, __entry->ulong2,
|
||||
__entry->ptr1, __entry->ptr2)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rmnet_shs_wq_err,
|
||||
|
||||
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
|
||||
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
|
||||
|
||||
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, func)
|
||||
__field(u8, evt)
|
||||
__field(u32, uint1)
|
||||
__field(u32, uint2)
|
||||
__field(u64, ulong1)
|
||||
__field(u64, ulong2)
|
||||
__field(void *, ptr1)
|
||||
__field(void *, ptr2)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->func = func;
|
||||
__entry->evt = evt;
|
||||
__entry->uint1 = uint1;
|
||||
__entry->uint2 = uint2;
|
||||
__entry->ulong1 = ulong1;
|
||||
__entry->ulong2 = ulong2;
|
||||
__entry->ptr1 = ptr1;
|
||||
__entry->ptr2 = ptr2;
|
||||
),
|
||||
|
||||
TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
|
||||
__entry->func, __entry->evt,
|
||||
__entry->uint1, __entry->uint2,
|
||||
__entry->ulong1, __entry->ulong2,
|
||||
__entry->ptr1, __entry->ptr2)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_RMNET_H */
|
||||
|
||||
#include <trace/define_trace.h>
|
74
include/trace/events/wda.h
Normal file
74
include/trace/events/wda.h
Normal file
|
@ -0,0 +1,74 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM wda
|
||||
|
||||
#if !defined(_TRACE_WDA_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_WDA_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
TRACE_EVENT(wda_set_powersave_mode,
|
||||
|
||||
TP_PROTO(int enable),
|
||||
|
||||
TP_ARGS(enable),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, enable)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->enable = enable;
|
||||
),
|
||||
|
||||
TP_printk("set powersave mode to %s",
|
||||
__entry->enable ? "enable" : "disable")
|
||||
);
|
||||
|
||||
TRACE_EVENT(wda_client_state_up,
|
||||
|
||||
TP_PROTO(u32 instance, u32 ep_type, u32 iface),
|
||||
|
||||
TP_ARGS(instance, ep_type, iface),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, instance)
|
||||
__field(u32, ep_type)
|
||||
__field(u32, iface)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->instance = instance;
|
||||
__entry->ep_type = ep_type;
|
||||
__entry->iface = iface;
|
||||
),
|
||||
|
||||
TP_printk("Client: Connected with WDA instance=%u ep_type=%u i_id=%u",
|
||||
__entry->instance, __entry->ep_type, __entry->iface)
|
||||
);
|
||||
|
||||
TRACE_EVENT(wda_client_state_down,
|
||||
|
||||
TP_PROTO(int from_cb),
|
||||
|
||||
TP_ARGS(from_cb),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, from_cb)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->from_cb = from_cb;
|
||||
),
|
||||
|
||||
TP_printk("Client: Connection with WDA lost Exit by callback %d",
|
||||
__entry->from_cb)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_WDA_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
Loading…
Reference in a new issue