kernel-fxtec-pro1x/net/ipv4/fib_rules.c
Denis V. Lunev ae299fc051 net: add fib_rules_ops to flush_cache method
This is required to pass namespace context into rt_cache_flush called from
->flush_cache.

Signed-off-by: Denis V. Lunev <den@openvz.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-07-05 19:01:28 -07:00

329 lines
7.1 KiB
C

/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* IPv4 Forwarding Information Base: policy rules.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
* Thomas Graf <tgraf@suug.ch>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Fixes:
* Rani Assaf : local_rule cannot be deleted
* Marc Boucher : routing by fwmark
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/inetdevice.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/rcupdate.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/tcp.h>
#include <net/ip_fib.h>
#include <net/fib_rules.h>
struct fib4_rule
{
struct fib_rule common;
u8 dst_len;
u8 src_len;
u8 tos;
__be32 src;
__be32 srcmask;
__be32 dst;
__be32 dstmask;
#ifdef CONFIG_NET_CLS_ROUTE
u32 tclassid;
#endif
};
#ifdef CONFIG_NET_CLS_ROUTE
u32 fib_rules_tclass(struct fib_result *res)
{
return res->r ? ((struct fib4_rule *) res->r)->tclassid : 0;
}
#endif
int fib_lookup(struct net *net, struct flowi *flp, struct fib_result *res)
{
struct fib_lookup_arg arg = {
.result = res,
};
int err;
err = fib_rules_lookup(net->ipv4.rules_ops, flp, 0, &arg);
res->r = arg.rule;
return err;
}
static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
int flags, struct fib_lookup_arg *arg)
{
int err = -EAGAIN;
struct fib_table *tbl;
switch (rule->action) {
case FR_ACT_TO_TBL:
break;
case FR_ACT_UNREACHABLE:
err = -ENETUNREACH;
goto errout;
case FR_ACT_PROHIBIT:
err = -EACCES;
goto errout;
case FR_ACT_BLACKHOLE:
default:
err = -EINVAL;
goto errout;
}
if ((tbl = fib_get_table(rule->fr_net, rule->table)) == NULL)
goto errout;
err = tbl->tb_lookup(tbl, flp, (struct fib_result *) arg->result);
if (err > 0)
err = -EAGAIN;
errout:
return err;
}
static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
{
struct fib4_rule *r = (struct fib4_rule *) rule;
__be32 daddr = fl->fl4_dst;
__be32 saddr = fl->fl4_src;
if (((saddr ^ r->src) & r->srcmask) ||
((daddr ^ r->dst) & r->dstmask))
return 0;
if (r->tos && (r->tos != fl->fl4_tos))
return 0;
return 1;
}
static struct fib_table *fib_empty_table(struct net *net)
{
u32 id;
for (id = 1; id <= RT_TABLE_MAX; id++)
if (fib_get_table(net, id) == NULL)
return fib_new_table(net, id);
return NULL;
}
static const struct nla_policy fib4_rule_policy[FRA_MAX+1] = {
FRA_GENERIC_POLICY,
[FRA_FLOW] = { .type = NLA_U32 },
};
static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
struct nlmsghdr *nlh, struct fib_rule_hdr *frh,
struct nlattr **tb)
{
struct net *net = sock_net(skb->sk);
int err = -EINVAL;
struct fib4_rule *rule4 = (struct fib4_rule *) rule;
if (frh->tos & ~IPTOS_TOS_MASK)
goto errout;
if (rule->table == RT_TABLE_UNSPEC) {
if (rule->action == FR_ACT_TO_TBL) {
struct fib_table *table;
table = fib_empty_table(net);
if (table == NULL) {
err = -ENOBUFS;
goto errout;
}
rule->table = table->tb_id;
}
}
if (frh->src_len)
rule4->src = nla_get_be32(tb[FRA_SRC]);
if (frh->dst_len)
rule4->dst = nla_get_be32(tb[FRA_DST]);
#ifdef CONFIG_NET_CLS_ROUTE
if (tb[FRA_FLOW])
rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
#endif
rule4->src_len = frh->src_len;
rule4->srcmask = inet_make_mask(rule4->src_len);
rule4->dst_len = frh->dst_len;
rule4->dstmask = inet_make_mask(rule4->dst_len);
rule4->tos = frh->tos;
err = 0;
errout:
return err;
}
static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
struct nlattr **tb)
{
struct fib4_rule *rule4 = (struct fib4_rule *) rule;
if (frh->src_len && (rule4->src_len != frh->src_len))
return 0;
if (frh->dst_len && (rule4->dst_len != frh->dst_len))
return 0;
if (frh->tos && (rule4->tos != frh->tos))
return 0;
#ifdef CONFIG_NET_CLS_ROUTE
if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW])))
return 0;
#endif
if (frh->src_len && (rule4->src != nla_get_be32(tb[FRA_SRC])))
return 0;
if (frh->dst_len && (rule4->dst != nla_get_be32(tb[FRA_DST])))
return 0;
return 1;
}
static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
struct nlmsghdr *nlh, struct fib_rule_hdr *frh)
{
struct fib4_rule *rule4 = (struct fib4_rule *) rule;
frh->family = AF_INET;
frh->dst_len = rule4->dst_len;
frh->src_len = rule4->src_len;
frh->tos = rule4->tos;
if (rule4->dst_len)
NLA_PUT_BE32(skb, FRA_DST, rule4->dst);
if (rule4->src_len)
NLA_PUT_BE32(skb, FRA_SRC, rule4->src);
#ifdef CONFIG_NET_CLS_ROUTE
if (rule4->tclassid)
NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid);
#endif
return 0;
nla_put_failure:
return -ENOBUFS;
}
static u32 fib4_rule_default_pref(struct fib_rules_ops *ops)
{
struct list_head *pos;
struct fib_rule *rule;
if (!list_empty(&ops->rules_list)) {
pos = ops->rules_list.next;
if (pos->next != &ops->rules_list) {
rule = list_entry(pos->next, struct fib_rule, list);
if (rule->pref)
return rule->pref - 1;
}
}
return 0;
}
static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule)
{
return nla_total_size(4) /* dst */
+ nla_total_size(4) /* src */
+ nla_total_size(4); /* flow */
}
static void fib4_rule_flush_cache(struct fib_rules_ops *ops)
{
rt_cache_flush(ops->fro_net, -1);
}
static struct fib_rules_ops fib4_rules_ops_template = {
.family = AF_INET,
.rule_size = sizeof(struct fib4_rule),
.addr_size = sizeof(u32),
.action = fib4_rule_action,
.match = fib4_rule_match,
.configure = fib4_rule_configure,
.compare = fib4_rule_compare,
.fill = fib4_rule_fill,
.default_pref = fib4_rule_default_pref,
.nlmsg_payload = fib4_rule_nlmsg_payload,
.flush_cache = fib4_rule_flush_cache,
.nlgroup = RTNLGRP_IPV4_RULE,
.policy = fib4_rule_policy,
.owner = THIS_MODULE,
};
static int fib_default_rules_init(struct fib_rules_ops *ops)
{
int err;
err = fib_default_rule_add(ops, 0, RT_TABLE_LOCAL, FIB_RULE_PERMANENT);
if (err < 0)
return err;
err = fib_default_rule_add(ops, 0x7FFE, RT_TABLE_MAIN, 0);
if (err < 0)
return err;
err = fib_default_rule_add(ops, 0x7FFF, RT_TABLE_DEFAULT, 0);
if (err < 0)
return err;
return 0;
}
int __net_init fib4_rules_init(struct net *net)
{
int err;
struct fib_rules_ops *ops;
ops = kmemdup(&fib4_rules_ops_template, sizeof(*ops), GFP_KERNEL);
if (ops == NULL)
return -ENOMEM;
INIT_LIST_HEAD(&ops->rules_list);
ops->fro_net = net;
fib_rules_register(ops);
err = fib_default_rules_init(ops);
if (err < 0)
goto fail;
net->ipv4.rules_ops = ops;
return 0;
fail:
/* also cleans all rules already added */
fib_rules_unregister(ops);
kfree(ops);
return err;
}
void __net_exit fib4_rules_exit(struct net *net)
{
fib_rules_unregister(net->ipv4.rules_ops);
kfree(net->ipv4.rules_ops);
}