653cd284a8
Recently, ops->init() and ops->dump() of all actions were modified to always obtain tcf_lock when accessing private action state. Actions that don't depend on tcf_lock for synchronization with their data path use non-bh locking API. However, tcf_lock is also used to protect rate estimator stats in softirq context by timer callback. Change ops->init() and ops->dump() of all actions to disable bh when using tcf_lock to prevent deadlock reported by following lockdep warning: [ 105.470398] ================================ [ 105.475014] WARNING: inconsistent lock state [ 105.479628] 4.18.0-rc8+ #664 Not tainted [ 105.483897] -------------------------------- [ 105.488511] inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. [ 105.494871] swapper/16/0 [HC0[0]:SC1[1]:HE1:SE0] takes: [ 105.500449] 00000000f86c012e (&(&p->tcfa_lock)->rlock){+.?.}, at: est_fetch_counters+0x3c/0xa0 [ 105.509696] {SOFTIRQ-ON-W} state was registered at: [ 105.514925] _raw_spin_lock+0x2c/0x40 [ 105.519022] tcf_bpf_init+0x579/0x820 [act_bpf] [ 105.523990] tcf_action_init_1+0x4e4/0x660 [ 105.528518] tcf_action_init+0x1ce/0x2d0 [ 105.532880] tcf_exts_validate+0x1d8/0x200 [ 105.537416] fl_change+0x55a/0x268b [cls_flower] [ 105.542469] tc_new_tfilter+0x748/0xa20 [ 105.546738] rtnetlink_rcv_msg+0x56a/0x6d0 [ 105.551268] netlink_rcv_skb+0x18d/0x200 [ 105.555628] netlink_unicast+0x2d0/0x370 [ 105.559990] netlink_sendmsg+0x3b9/0x6a0 [ 105.564349] sock_sendmsg+0x6b/0x80 [ 105.568271] ___sys_sendmsg+0x4a1/0x520 [ 105.572547] __sys_sendmsg+0xd7/0x150 [ 105.576655] do_syscall_64+0x72/0x2c0 [ 105.580757] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 105.586243] irq event stamp: 489296 [ 105.590084] hardirqs last enabled at (489296): [<ffffffffb507e639>] _raw_spin_unlock_irq+0x29/0x40 [ 105.599765] hardirqs last disabled at (489295): [<ffffffffb507e745>] _raw_spin_lock_irq+0x15/0x50 [ 105.609277] softirqs last enabled at (489292): [<ffffffffb413a6a3>] irq_enter+0x83/0xa0 [ 105.618001] softirqs last disabled at (489293): [<ffffffffb413a800>] irq_exit+0x140/0x190 [ 105.626813] other info that might help us debug this: [ 105.633976] Possible unsafe locking scenario: [ 105.640526] CPU0 [ 105.643325] ---- [ 105.646125] lock(&(&p->tcfa_lock)->rlock); [ 105.650747] <Interrupt> [ 105.653717] lock(&(&p->tcfa_lock)->rlock); [ 105.658514] *** DEADLOCK *** [ 105.665349] 1 lock held by swapper/16/0: [ 105.669629] #0: 00000000a640ad99 ((&est->timer)){+.-.}, at: call_timer_fn+0x10b/0x550 [ 105.678200] stack backtrace: [ 105.683194] CPU: 16 PID: 0 Comm: swapper/16 Not tainted 4.18.0-rc8+ #664 [ 105.690249] Hardware name: Supermicro SYS-2028TP-DECR/X10DRT-P, BIOS 2.0b 03/30/2017 [ 105.698626] Call Trace: [ 105.701421] <IRQ> [ 105.703791] dump_stack+0x92/0xeb [ 105.707461] print_usage_bug+0x336/0x34c [ 105.711744] mark_lock+0x7c9/0x980 [ 105.715500] ? print_shortest_lock_dependencies+0x2e0/0x2e0 [ 105.721424] ? check_usage_forwards+0x230/0x230 [ 105.726315] __lock_acquire+0x923/0x26f0 [ 105.730597] ? debug_show_all_locks+0x240/0x240 [ 105.735478] ? mark_lock+0x493/0x980 [ 105.739412] ? check_chain_key+0x140/0x1f0 [ 105.743861] ? __lock_acquire+0x836/0x26f0 [ 105.748323] ? lock_acquire+0x12e/0x290 [ 105.752516] lock_acquire+0x12e/0x290 [ 105.756539] ? est_fetch_counters+0x3c/0xa0 [ 105.761084] _raw_spin_lock+0x2c/0x40 [ 105.765099] ? est_fetch_counters+0x3c/0xa0 [ 105.769633] est_fetch_counters+0x3c/0xa0 [ 105.773995] est_timer+0x87/0x390 [ 105.777670] ? est_fetch_counters+0xa0/0xa0 [ 105.782210] ? lock_acquire+0x12e/0x290 [ 105.786410] call_timer_fn+0x161/0x550 [ 105.790512] ? est_fetch_counters+0xa0/0xa0 [ 105.795055] ? del_timer_sync+0xd0/0xd0 [ 105.799249] ? __lock_is_held+0x93/0x110 [ 105.803531] ? mark_held_locks+0x20/0xe0 [ 105.807813] ? _raw_spin_unlock_irq+0x29/0x40 [ 105.812525] ? est_fetch_counters+0xa0/0xa0 [ 105.817069] ? est_fetch_counters+0xa0/0xa0 [ 105.821610] run_timer_softirq+0x3c4/0x9f0 [ 105.826064] ? lock_acquire+0x12e/0x290 [ 105.830257] ? __bpf_trace_timer_class+0x10/0x10 [ 105.835237] ? __lock_is_held+0x25/0x110 [ 105.839517] __do_softirq+0x11d/0x7bf [ 105.843542] irq_exit+0x140/0x190 [ 105.847208] smp_apic_timer_interrupt+0xac/0x3b0 [ 105.852182] apic_timer_interrupt+0xf/0x20 [ 105.856628] </IRQ> [ 105.859081] RIP: 0010:cpuidle_enter_state+0xd8/0x4d0 [ 105.864395] Code: 46 ff 48 89 44 24 08 0f 1f 44 00 00 31 ff e8 cf ec 46 ff 80 7c 24 07 00 0f 85 1d 02 00 00 e8 9f 90 4b ff fb 66 0f 1f 44 00 00 <4c> 8b 6c 24 08 4d 29 fd 0f 80 36 03 00 00 4c 89 e8 48 ba cf f7 53 [ 105.884288] RSP: 0018:ffff8803ad94fd20 EFLAGS: 00000246 ORIG_RAX: ffffffffffffff13 [ 105.892494] RAX: 0000000000000000 RBX: ffffe8fb300829c0 RCX: ffffffffb41e19e1 [ 105.899988] RDX: 0000000000000007 RSI: dffffc0000000000 RDI: ffff8803ad9358ac [ 105.907503] RBP: ffffffffb6636300 R08: 0000000000000004 R09: 0000000000000000 [ 105.914997] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000004 [ 105.922487] R13: ffffffffb6636140 R14: ffffffffb66362d8 R15: 000000188d36091b [ 105.929988] ? trace_hardirqs_on_caller+0x141/0x2d0 [ 105.935232] do_idle+0x28e/0x320 [ 105.938817] ? arch_cpu_idle_exit+0x40/0x40 [ 105.943361] ? mark_lock+0x8c1/0x980 [ 105.947295] ? _raw_spin_unlock_irqrestore+0x32/0x60 [ 105.952619] cpu_startup_entry+0xc2/0xd0 [ 105.956900] ? cpu_in_idle+0x20/0x20 [ 105.960830] ? _raw_spin_unlock_irqrestore+0x32/0x60 [ 105.966146] ? trace_hardirqs_on_caller+0x141/0x2d0 [ 105.971391] start_secondary+0x2b5/0x360 [ 105.975669] ? set_cpu_sibling_map+0x1330/0x1330 [ 105.980654] secondary_startup_64+0xa5/0xb0 Taking tcf_lock in sample action with bh disabled causes lockdep to issue a warning regarding possible irq lock inversion dependency between tcf_lock, and psample_groups_lock that is taken when holding tcf_lock in sample init: [ 162.108959] Possible interrupt unsafe locking scenario: [ 162.116386] CPU0 CPU1 [ 162.121277] ---- ---- [ 162.126162] lock(psample_groups_lock); [ 162.130447] local_irq_disable(); [ 162.136772] lock(&(&p->tcfa_lock)->rlock); [ 162.143957] lock(psample_groups_lock); [ 162.150813] <Interrupt> [ 162.153808] lock(&(&p->tcfa_lock)->rlock); [ 162.158608] *** DEADLOCK *** In order to prevent potential lock inversion dependency between tcf_lock and psample_groups_lock, extract call to psample_group_get() from tcf_lock protected section in sample action init function. Fixes:4e232818bd
("net: sched: act_mirred: remove dependency on rtnl lock") Fixes:764e9a2448
("net: sched: act_vlan: remove dependency on rtnl lock") Fixes:729e012609
("net: sched: act_tunnel_key: remove dependency on rtnl lock") Fixes:d772849566
("net: sched: act_sample: remove dependency on rtnl lock") Fixes:e8917f4370
("net: sched: act_gact: remove dependency on rtnl lock") Fixes:b6a2b971c0
("net: sched: act_csum: remove dependency on rtnl lock") Fixes:2142236b45
("net: sched: act_bpf: remove dependency on rtnl lock") Signed-off-by: Vlad Buslov <vladbu@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
308 lines
7.5 KiB
C
308 lines
7.5 KiB
C
/*
|
|
* net/sched/act_gact.c Generic actions
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
* copyright Jamal Hadi Salim (2002-4)
|
|
*
|
|
*/
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/string.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/rtnetlink.h>
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
#include <net/netlink.h>
|
|
#include <net/pkt_sched.h>
|
|
#include <linux/tc_act/tc_gact.h>
|
|
#include <net/tc_act/tc_gact.h>
|
|
|
|
static unsigned int gact_net_id;
|
|
static struct tc_action_ops act_gact_ops;
|
|
|
|
#ifdef CONFIG_GACT_PROB
|
|
static int gact_net_rand(struct tcf_gact *gact)
|
|
{
|
|
smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
|
|
if (prandom_u32() % gact->tcfg_pval)
|
|
return gact->tcf_action;
|
|
return gact->tcfg_paction;
|
|
}
|
|
|
|
static int gact_determ(struct tcf_gact *gact)
|
|
{
|
|
u32 pack = atomic_inc_return(&gact->packets);
|
|
|
|
smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
|
|
if (pack % gact->tcfg_pval)
|
|
return gact->tcf_action;
|
|
return gact->tcfg_paction;
|
|
}
|
|
|
|
typedef int (*g_rand)(struct tcf_gact *gact);
|
|
static g_rand gact_rand[MAX_RAND] = { NULL, gact_net_rand, gact_determ };
|
|
#endif /* CONFIG_GACT_PROB */
|
|
|
|
static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
|
|
[TCA_GACT_PARMS] = { .len = sizeof(struct tc_gact) },
|
|
[TCA_GACT_PROB] = { .len = sizeof(struct tc_gact_p) },
|
|
};
|
|
|
|
static int tcf_gact_init(struct net *net, struct nlattr *nla,
|
|
struct nlattr *est, struct tc_action **a,
|
|
int ovr, int bind, bool rtnl_held,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
struct tc_action_net *tn = net_generic(net, gact_net_id);
|
|
struct nlattr *tb[TCA_GACT_MAX + 1];
|
|
struct tc_gact *parm;
|
|
struct tcf_gact *gact;
|
|
int ret = 0;
|
|
int err;
|
|
#ifdef CONFIG_GACT_PROB
|
|
struct tc_gact_p *p_parm = NULL;
|
|
#endif
|
|
|
|
if (nla == NULL)
|
|
return -EINVAL;
|
|
|
|
err = nla_parse_nested(tb, TCA_GACT_MAX, nla, gact_policy, NULL);
|
|
if (err < 0)
|
|
return err;
|
|
|
|
if (tb[TCA_GACT_PARMS] == NULL)
|
|
return -EINVAL;
|
|
parm = nla_data(tb[TCA_GACT_PARMS]);
|
|
|
|
#ifndef CONFIG_GACT_PROB
|
|
if (tb[TCA_GACT_PROB] != NULL)
|
|
return -EOPNOTSUPP;
|
|
#else
|
|
if (tb[TCA_GACT_PROB]) {
|
|
p_parm = nla_data(tb[TCA_GACT_PROB]);
|
|
if (p_parm->ptype >= MAX_RAND)
|
|
return -EINVAL;
|
|
}
|
|
#endif
|
|
|
|
err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
|
|
if (!err) {
|
|
ret = tcf_idr_create(tn, parm->index, est, a,
|
|
&act_gact_ops, bind, true);
|
|
if (ret) {
|
|
tcf_idr_cleanup(tn, parm->index);
|
|
return ret;
|
|
}
|
|
ret = ACT_P_CREATED;
|
|
} else if (err > 0) {
|
|
if (bind)/* dont override defaults */
|
|
return 0;
|
|
if (!ovr) {
|
|
tcf_idr_release(*a, bind);
|
|
return -EEXIST;
|
|
}
|
|
} else {
|
|
return err;
|
|
}
|
|
|
|
gact = to_gact(*a);
|
|
|
|
spin_lock_bh(&gact->tcf_lock);
|
|
gact->tcf_action = parm->action;
|
|
#ifdef CONFIG_GACT_PROB
|
|
if (p_parm) {
|
|
gact->tcfg_paction = p_parm->paction;
|
|
gact->tcfg_pval = max_t(u16, 1, p_parm->pval);
|
|
/* Make sure tcfg_pval is written before tcfg_ptype
|
|
* coupled with smp_rmb() in gact_net_rand() & gact_determ()
|
|
*/
|
|
smp_wmb();
|
|
gact->tcfg_ptype = p_parm->ptype;
|
|
}
|
|
#endif
|
|
spin_unlock_bh(&gact->tcf_lock);
|
|
|
|
if (ret == ACT_P_CREATED)
|
|
tcf_idr_insert(tn, *a);
|
|
return ret;
|
|
}
|
|
|
|
static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a,
|
|
struct tcf_result *res)
|
|
{
|
|
struct tcf_gact *gact = to_gact(a);
|
|
int action = READ_ONCE(gact->tcf_action);
|
|
|
|
#ifdef CONFIG_GACT_PROB
|
|
{
|
|
u32 ptype = READ_ONCE(gact->tcfg_ptype);
|
|
|
|
if (ptype)
|
|
action = gact_rand[ptype](gact);
|
|
}
|
|
#endif
|
|
bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), skb);
|
|
if (action == TC_ACT_SHOT)
|
|
qstats_drop_inc(this_cpu_ptr(gact->common.cpu_qstats));
|
|
|
|
tcf_lastuse_update(&gact->tcf_tm);
|
|
|
|
return action;
|
|
}
|
|
|
|
static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
|
|
u64 lastuse)
|
|
{
|
|
struct tcf_gact *gact = to_gact(a);
|
|
int action = READ_ONCE(gact->tcf_action);
|
|
struct tcf_t *tm = &gact->tcf_tm;
|
|
|
|
_bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), bytes,
|
|
packets);
|
|
if (action == TC_ACT_SHOT)
|
|
this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
|
|
|
|
tm->lastuse = max_t(u64, tm->lastuse, lastuse);
|
|
}
|
|
|
|
static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,
|
|
int bind, int ref)
|
|
{
|
|
unsigned char *b = skb_tail_pointer(skb);
|
|
struct tcf_gact *gact = to_gact(a);
|
|
struct tc_gact opt = {
|
|
.index = gact->tcf_index,
|
|
.refcnt = refcount_read(&gact->tcf_refcnt) - ref,
|
|
.bindcnt = atomic_read(&gact->tcf_bindcnt) - bind,
|
|
};
|
|
struct tcf_t t;
|
|
|
|
spin_lock_bh(&gact->tcf_lock);
|
|
opt.action = gact->tcf_action;
|
|
if (nla_put(skb, TCA_GACT_PARMS, sizeof(opt), &opt))
|
|
goto nla_put_failure;
|
|
#ifdef CONFIG_GACT_PROB
|
|
if (gact->tcfg_ptype) {
|
|
struct tc_gact_p p_opt = {
|
|
.paction = gact->tcfg_paction,
|
|
.pval = gact->tcfg_pval,
|
|
.ptype = gact->tcfg_ptype,
|
|
};
|
|
|
|
if (nla_put(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt))
|
|
goto nla_put_failure;
|
|
}
|
|
#endif
|
|
tcf_tm_dump(&t, &gact->tcf_tm);
|
|
if (nla_put_64bit(skb, TCA_GACT_TM, sizeof(t), &t, TCA_GACT_PAD))
|
|
goto nla_put_failure;
|
|
spin_unlock_bh(&gact->tcf_lock);
|
|
|
|
return skb->len;
|
|
|
|
nla_put_failure:
|
|
spin_unlock_bh(&gact->tcf_lock);
|
|
nlmsg_trim(skb, b);
|
|
return -1;
|
|
}
|
|
|
|
static int tcf_gact_walker(struct net *net, struct sk_buff *skb,
|
|
struct netlink_callback *cb, int type,
|
|
const struct tc_action_ops *ops,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
struct tc_action_net *tn = net_generic(net, gact_net_id);
|
|
|
|
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
|
|
}
|
|
|
|
static int tcf_gact_search(struct net *net, struct tc_action **a, u32 index,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
struct tc_action_net *tn = net_generic(net, gact_net_id);
|
|
|
|
return tcf_idr_search(tn, a, index);
|
|
}
|
|
|
|
static size_t tcf_gact_get_fill_size(const struct tc_action *act)
|
|
{
|
|
size_t sz = nla_total_size(sizeof(struct tc_gact)); /* TCA_GACT_PARMS */
|
|
|
|
#ifdef CONFIG_GACT_PROB
|
|
if (to_gact(act)->tcfg_ptype)
|
|
/* TCA_GACT_PROB */
|
|
sz += nla_total_size(sizeof(struct tc_gact_p));
|
|
#endif
|
|
|
|
return sz;
|
|
}
|
|
|
|
static int tcf_gact_delete(struct net *net, u32 index)
|
|
{
|
|
struct tc_action_net *tn = net_generic(net, gact_net_id);
|
|
|
|
return tcf_idr_delete_index(tn, index);
|
|
}
|
|
|
|
static struct tc_action_ops act_gact_ops = {
|
|
.kind = "gact",
|
|
.type = TCA_ACT_GACT,
|
|
.owner = THIS_MODULE,
|
|
.act = tcf_gact_act,
|
|
.stats_update = tcf_gact_stats_update,
|
|
.dump = tcf_gact_dump,
|
|
.init = tcf_gact_init,
|
|
.walk = tcf_gact_walker,
|
|
.lookup = tcf_gact_search,
|
|
.get_fill_size = tcf_gact_get_fill_size,
|
|
.delete = tcf_gact_delete,
|
|
.size = sizeof(struct tcf_gact),
|
|
};
|
|
|
|
static __net_init int gact_init_net(struct net *net)
|
|
{
|
|
struct tc_action_net *tn = net_generic(net, gact_net_id);
|
|
|
|
return tc_action_net_init(tn, &act_gact_ops);
|
|
}
|
|
|
|
static void __net_exit gact_exit_net(struct list_head *net_list)
|
|
{
|
|
tc_action_net_exit(net_list, gact_net_id);
|
|
}
|
|
|
|
static struct pernet_operations gact_net_ops = {
|
|
.init = gact_init_net,
|
|
.exit_batch = gact_exit_net,
|
|
.id = &gact_net_id,
|
|
.size = sizeof(struct tc_action_net),
|
|
};
|
|
|
|
MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
|
|
MODULE_DESCRIPTION("Generic Classifier actions");
|
|
MODULE_LICENSE("GPL");
|
|
|
|
static int __init gact_init_module(void)
|
|
{
|
|
#ifdef CONFIG_GACT_PROB
|
|
pr_info("GACT probability on\n");
|
|
#else
|
|
pr_info("GACT probability NOT on\n");
|
|
#endif
|
|
|
|
return tcf_register_action(&act_gact_ops, &gact_net_ops);
|
|
}
|
|
|
|
static void __exit gact_cleanup_module(void)
|
|
{
|
|
tcf_unregister_action(&act_gact_ops, &gact_net_ops);
|
|
}
|
|
|
|
module_init(gact_init_module);
|
|
module_exit(gact_cleanup_module);
|