Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

The following patchset contains updates for your net-next tree,
they are:

1) Use kvfree() helper function from x_tables, from Eric Dumazet.

2) Remove extra timer from the conntrack ecache extension, use a
   workqueue instead to redeliver lost events to userspace instead,
   from Florian Westphal.

3) Removal of the ulog targets for ebtables and iptables. The nflog
   infrastructure superseded this almost 9 years ago, time to get rid
   of this code.

4) Replace the list of loggers by an array now that we can only have
   two possible non-overlapping logger flavours, ie. kernel ring buffer
   and netlink logging.

5) Move Eric Dumazet's log buffer code to nf_log to reuse it from
   all of the supported per-family loggers.

6) Consolidate nf_log_packet() as an unified interface for packet logging.
   After this patch, if the struct nf_loginfo is available, it explicitly
   selects the logger that is used.

7) Move ip and ip6 logging code from xt_LOG to the corresponding
   per-family loggers. Thus, x_tables and nf_tables share the same code
   for packet logging.

8) Add generic ARP packet logger, which is used by nf_tables. The
   format aims to be consistent with the output of xt_LOG.

9) Add generic bridge packet logger. Again, this is used by nf_tables
   and it routes the packets to the real family loggers. As a result,
   we get consistent logging format for the bridge family. The ebt_log
   logging code has been intentionally left in place not to break
   backward compatibility since the logging output differs from xt_LOG.

10) Update nft_log to explicitly request the required family logger when
    needed.

11) Finish nft_log so it supports arp, ip, ip6, bridge and inet families.
    Allowing selection between netlink and kernel buffer ring logging.

12) Several fixes coming after the netfilter core logging changes spotted
    by robots.

13) Use IS_ENABLED() macros whenever possible in the netfilter tree,
    from Duan Jiong.

14) Removal of a couple of unnecessary branch before kfree, from Fabian
    Frederick.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2014-07-20 21:01:43 -07:00
commit a8138f42d4
49 changed files with 1698 additions and 2235 deletions

View file

@ -18,7 +18,6 @@ struct nf_conntrack_ecache {
u16 ctmask; /* bitmask of ct events to be delivered */
u16 expmask; /* bitmask of expect events to be delivered */
u32 portid; /* netlink portid of destroyer */
struct timer_list timeout;
};
static inline struct nf_conntrack_ecache *
@ -216,8 +215,23 @@ void nf_conntrack_ecache_pernet_fini(struct net *net);
int nf_conntrack_ecache_init(void);
void nf_conntrack_ecache_fini(void);
#else /* CONFIG_NF_CONNTRACK_EVENTS */
static inline void nf_conntrack_ecache_delayed_work(struct net *net)
{
if (!delayed_work_pending(&net->ct.ecache_dwork)) {
schedule_delayed_work(&net->ct.ecache_dwork, HZ);
net->ct.ecache_dwork_pending = true;
}
}
static inline void nf_conntrack_ecache_work(struct net *net)
{
if (net->ct.ecache_dwork_pending) {
net->ct.ecache_dwork_pending = false;
mod_delayed_work(system_wq, &net->ct.ecache_dwork, 0);
}
}
#else /* CONFIG_NF_CONNTRACK_EVENTS */
static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
struct nf_conn *ct) {}
static inline int nf_conntrack_eventmask_report(unsigned int eventmask,
@ -255,6 +269,14 @@ static inline int nf_conntrack_ecache_init(void)
static inline void nf_conntrack_ecache_fini(void)
{
}
static inline void nf_conntrack_ecache_delayed_work(struct net *net)
{
}
static inline void nf_conntrack_ecache_work(struct net *net)
{
}
#endif /* CONFIG_NF_CONNTRACK_EVENTS */
#endif /*_NF_CONNTRACK_ECACHE_H*/

View file

@ -12,8 +12,11 @@
#define NF_LOG_UID 0x08 /* Log UID owning local socket */
#define NF_LOG_MASK 0x0f
#define NF_LOG_TYPE_LOG 0x01
#define NF_LOG_TYPE_ULOG 0x02
enum nf_log_type {
NF_LOG_TYPE_LOG = 0,
NF_LOG_TYPE_ULOG,
NF_LOG_TYPE_MAX
};
struct nf_loginfo {
u_int8_t type;
@ -40,10 +43,10 @@ typedef void nf_logfn(struct net *net,
const char *prefix);
struct nf_logger {
struct module *me;
nf_logfn *logfn;
char *name;
struct list_head list[NFPROTO_NUMPROTO];
char *name;
enum nf_log_type type;
nf_logfn *logfn;
struct module *me;
};
/* Function to register/unregister log function. */
@ -58,6 +61,13 @@ int nf_log_bind_pf(struct net *net, u_int8_t pf,
const struct nf_logger *logger);
void nf_log_unbind_pf(struct net *net, u_int8_t pf);
int nf_logger_find_get(int pf, enum nf_log_type type);
void nf_logger_put(int pf, enum nf_log_type type);
void nf_logger_request_module(int pf, enum nf_log_type type);
#define MODULE_ALIAS_NF_LOGGER(family, type) \
MODULE_ALIAS("nf-logger-" __stringify(family) "-" __stringify(type))
/* Calls the registered backend logging function */
__printf(8, 9)
void nf_log_packet(struct net *net,
@ -69,4 +79,24 @@ void nf_log_packet(struct net *net,
const struct nf_loginfo *li,
const char *fmt, ...);
struct nf_log_buf;
struct nf_log_buf *nf_log_buf_open(void);
__printf(2, 3) int nf_log_buf_add(struct nf_log_buf *m, const char *f, ...);
void nf_log_buf_close(struct nf_log_buf *m);
/* common logging functions */
int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
u8 proto, int fragment, unsigned int offset);
int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
u8 proto, int fragment, unsigned int offset,
unsigned int logflags);
void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk);
void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix);
#endif /* _NF_LOG_H */

View file

@ -1,54 +0,0 @@
#define S_SIZE (1024 - (sizeof(unsigned int) + 1))
struct sbuff {
unsigned int count;
char buf[S_SIZE + 1];
};
static struct sbuff emergency, *emergency_ptr = &emergency;
static __printf(2, 3) int sb_add(struct sbuff *m, const char *f, ...)
{
va_list args;
int len;
if (likely(m->count < S_SIZE)) {
va_start(args, f);
len = vsnprintf(m->buf + m->count, S_SIZE - m->count, f, args);
va_end(args);
if (likely(m->count + len < S_SIZE)) {
m->count += len;
return 0;
}
}
m->count = S_SIZE;
printk_once(KERN_ERR KBUILD_MODNAME " please increase S_SIZE\n");
return -1;
}
static struct sbuff *sb_open(void)
{
struct sbuff *m = kmalloc(sizeof(*m), GFP_ATOMIC);
if (unlikely(!m)) {
local_bh_disable();
do {
m = xchg(&emergency_ptr, NULL);
} while (!m);
}
m->count = 0;
return m;
}
static void sb_close(struct sbuff *m)
{
m->buf[m->count] = 0;
printk("%s\n", m->buf);
if (likely(m != &emergency))
kfree(m);
else {
emergency_ptr = m;
local_bh_enable();
}
}

View file

@ -4,6 +4,7 @@
#include <linux/list.h>
#include <linux/list_nulls.h>
#include <linux/atomic.h>
#include <linux/workqueue.h>
#include <linux/netfilter/nf_conntrack_tcp.h>
#include <linux/seqlock.h>
@ -73,6 +74,10 @@ struct ct_pcpu {
struct netns_ct {
atomic_t count;
unsigned int expect_count;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
struct delayed_work ecache_dwork;
bool ecache_dwork_pending;
#endif
#ifdef CONFIG_SYSCTL
struct ctl_table_header *sysctl_header;
struct ctl_table_header *acct_sysctl_header;
@ -82,7 +87,6 @@ struct netns_ct {
#endif
char *slabname;
unsigned int sysctl_log_invalid; /* Log invalid packets */
unsigned int sysctl_events_retry_timeout;
int sysctl_events;
int sysctl_acct;
int sysctl_auto_assign_helper;

View file

@ -697,6 +697,8 @@ enum nft_counter_attributes {
* @NFTA_LOG_PREFIX: prefix to prepend to log messages (NLA_STRING)
* @NFTA_LOG_SNAPLEN: length of payload to include in netlink message (NLA_U32)
* @NFTA_LOG_QTHRESHOLD: queue threshold (NLA_U32)
* @NFTA_LOG_LEVEL: log level (NLA_U32)
* @NFTA_LOG_FLAGS: logging flags (NLA_U32)
*/
enum nft_log_attributes {
NFTA_LOG_UNSPEC,
@ -704,6 +706,8 @@ enum nft_log_attributes {
NFTA_LOG_PREFIX,
NFTA_LOG_SNAPLEN,
NFTA_LOG_QTHRESHOLD,
NFTA_LOG_LEVEL,
NFTA_LOG_FLAGS,
__NFTA_LOG_MAX
};
#define NFTA_LOG_MAX (__NFTA_LOG_MAX - 1)

View file

@ -14,6 +14,5 @@ header-y += ebt_nflog.h
header-y += ebt_pkttype.h
header-y += ebt_redirect.h
header-y += ebt_stp.h
header-y += ebt_ulog.h
header-y += ebt_vlan.h
header-y += ebtables.h

View file

@ -1,38 +0,0 @@
#ifndef _EBT_ULOG_H
#define _EBT_ULOG_H
#include <linux/types.h>
#define EBT_ULOG_DEFAULT_NLGROUP 0
#define EBT_ULOG_DEFAULT_QTHRESHOLD 1
#define EBT_ULOG_MAXNLGROUPS 32 /* hardcoded netlink max */
#define EBT_ULOG_PREFIX_LEN 32
#define EBT_ULOG_MAX_QLEN 50
#define EBT_ULOG_WATCHER "ulog"
#define EBT_ULOG_VERSION 1
struct ebt_ulog_info {
__u32 nlgroup;
unsigned int cprange;
unsigned int qthreshold;
char prefix[EBT_ULOG_PREFIX_LEN];
};
typedef struct ebt_ulog_packet_msg {
int version;
char indev[IFNAMSIZ];
char outdev[IFNAMSIZ];
char physindev[IFNAMSIZ];
char physoutdev[IFNAMSIZ];
char prefix[EBT_ULOG_PREFIX_LEN];
struct timeval stamp;
unsigned long mark;
unsigned int hook;
size_t data_len;
/* The complete packet, including Ethernet header and perhaps
* the VLAN header is appended */
unsigned char data[0] __attribute__
((aligned (__alignof__(struct ebt_ulog_info))));
} ebt_ulog_packet_msg_t;
#endif /* _EBT_ULOG_H */

View file

@ -5,7 +5,6 @@ header-y += ipt_ECN.h
header-y += ipt_LOG.h
header-y += ipt_REJECT.h
header-y += ipt_TTL.h
header-y += ipt_ULOG.h
header-y += ipt_ah.h
header-y += ipt_ecn.h
header-y += ipt_ttl.h

View file

@ -1,49 +0,0 @@
/* Header file for IP tables userspace logging, Version 1.8
*
* (C) 2000-2002 by Harald Welte <laforge@gnumonks.org>
*
* Distributed under the terms of GNU GPL */
#ifndef _IPT_ULOG_H
#define _IPT_ULOG_H
#ifndef NETLINK_NFLOG
#define NETLINK_NFLOG 5
#endif
#define ULOG_DEFAULT_NLGROUP 1
#define ULOG_DEFAULT_QTHRESHOLD 1
#define ULOG_MAC_LEN 80
#define ULOG_PREFIX_LEN 32
#define ULOG_MAX_QLEN 50
/* Why 50? Well... there is a limit imposed by the slab cache 131000
* bytes. So the multipart netlink-message has to be < 131000 bytes.
* Assuming a standard ethernet-mtu of 1500, we could define this up
* to 80... but even 50 seems to be big enough. */
/* private data structure for each rule with a ULOG target */
struct ipt_ulog_info {
unsigned int nl_group;
size_t copy_range;
size_t qthreshold;
char prefix[ULOG_PREFIX_LEN];
};
/* Format of the ULOG packets passed through netlink */
typedef struct ulog_packet_msg {
unsigned long mark;
long timestamp_sec;
long timestamp_usec;
unsigned int hook;
char indev_name[IFNAMSIZ];
char outdev_name[IFNAMSIZ];
size_t data_len;
char prefix[ULOG_PREFIX_LEN];
unsigned char mac_len;
unsigned char mac[ULOG_MAC_LEN];
unsigned char payload[0];
} ulog_packet_msg_t;
#endif /*_IPT_ULOG_H*/

View file

@ -14,6 +14,9 @@ config NFT_BRIDGE_META
help
Add support for bridge dedicated meta key.
config NF_LOG_BRIDGE
tristate "Bridge packet logging"
endif # NF_TABLES_BRIDGE
menuconfig BRIDGE_NF_EBTABLES
@ -202,22 +205,6 @@ config BRIDGE_EBT_LOG
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_ULOG
tristate "ebt: ulog support (OBSOLETE)"
help
This option enables the old bridge-specific "ebt_ulog" implementation
which has been obsoleted by the new "nfnetlink_log" code (see
CONFIG_NETFILTER_NETLINK_LOG).
This option adds the ulog watcher, that you can use in any rule
in any ebtables table. The packet is passed to a userspace
logging daemon using netlink multicast sockets. This differs
from the log watcher in the sense that the complete packet is
sent to userspace instead of a descriptive text and that
netlink multicast sockets are used instead of the syslog.
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_NFLOG
tristate "ebt: nflog support"
help

View file

@ -5,6 +5,9 @@
obj-$(CONFIG_NF_TABLES_BRIDGE) += nf_tables_bridge.o
obj-$(CONFIG_NFT_BRIDGE_META) += nft_meta_bridge.o
# packet logging
obj-$(CONFIG_NF_LOG_BRIDGE) += nf_log_bridge.o
obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o
# tables

View file

@ -186,6 +186,10 @@ ebt_log_tg(struct sk_buff *skb, const struct xt_action_param *par)
li.u.log.level = info->loglevel;
li.u.log.logflags = info->bitmask;
/* Remember that we have to use ebt_log_packet() not to break backward
* compatibility. We cannot use the default bridge packet logger via
* nf_log_packet() with NFT_LOG_TYPE_LOG here. --Pablo
*/
if (info->bitmask & EBT_LOG_NFLOG)
nf_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb,
par->in, par->out, &li, "%s", info->prefix);
@ -205,54 +209,13 @@ static struct xt_target ebt_log_tg_reg __read_mostly = {
.me = THIS_MODULE,
};
static struct nf_logger ebt_log_logger __read_mostly = {
.name = "ebt_log",
.logfn = &ebt_log_packet,
.me = THIS_MODULE,
};
static int __net_init ebt_log_net_init(struct net *net)
{
nf_log_set(net, NFPROTO_BRIDGE, &ebt_log_logger);
return 0;
}
static void __net_exit ebt_log_net_fini(struct net *net)
{
nf_log_unset(net, &ebt_log_logger);
}
static struct pernet_operations ebt_log_net_ops = {
.init = ebt_log_net_init,
.exit = ebt_log_net_fini,
};
static int __init ebt_log_init(void)
{
int ret;
ret = register_pernet_subsys(&ebt_log_net_ops);
if (ret < 0)
goto err_pernet;
ret = xt_register_target(&ebt_log_tg_reg);
if (ret < 0)
goto err_target;
nf_log_register(NFPROTO_BRIDGE, &ebt_log_logger);
return ret;
err_target:
unregister_pernet_subsys(&ebt_log_net_ops);
err_pernet:
return ret;
return xt_register_target(&ebt_log_tg_reg);
}
static void __exit ebt_log_fini(void)
{
unregister_pernet_subsys(&ebt_log_net_ops);
nf_log_unregister(&ebt_log_logger);
xt_unregister_target(&ebt_log_tg_reg);
}

View file

@ -1,393 +0,0 @@
/*
* netfilter module for userspace bridged Ethernet frames logging daemons
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
* Harald Welte <laforge@netfilter.org>
*
* November, 2004
*
* Based on ipt_ULOG.c, which is
* (C) 2000-2002 by Harald Welte <laforge@netfilter.org>
*
* This module accepts two parameters:
*
* nlbufsiz:
* The parameter specifies how big the buffer for each netlink multicast
* group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will
* get accumulated in the kernel until they are sent to userspace. It is
* NOT possible to allocate more than 128kB, and it is strongly discouraged,
* because atomically allocating 128kB inside the network rx softirq is not
* reliable. Please also keep in mind that this buffer size is allocated for
* each nlgroup you are using, so the total kernel memory usage increases
* by that factor.
*
* flushtimeout:
* Specify, after how many hundredths of a second the queue should be
* flushed even if it is not full yet.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <net/netlink.h>
#include <linux/netdevice.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_ulog.h>
#include <net/netfilter/nf_log.h>
#include <net/netns/generic.h>
#include <net/sock.h>
#include "../br_private.h"
static unsigned int nlbufsiz = NLMSG_GOODSIZE;
module_param(nlbufsiz, uint, 0600);
MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) "
"(defaults to 4096)");
static unsigned int flushtimeout = 10;
module_param(flushtimeout, uint, 0600);
MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths ofa second) "
"(defaults to 10)");
typedef struct {
unsigned int qlen; /* number of nlmsgs' in the skb */
struct nlmsghdr *lastnlh; /* netlink header of last msg in skb */
struct sk_buff *skb; /* the pre-allocated skb */
struct timer_list timer; /* the timer function */
spinlock_t lock; /* the per-queue lock */
} ebt_ulog_buff_t;
static int ebt_ulog_net_id __read_mostly;
struct ebt_ulog_net {
unsigned int nlgroup[EBT_ULOG_MAXNLGROUPS];
ebt_ulog_buff_t ulog_buffers[EBT_ULOG_MAXNLGROUPS];
struct sock *ebtulognl;
};
static struct ebt_ulog_net *ebt_ulog_pernet(struct net *net)
{
return net_generic(net, ebt_ulog_net_id);
}
/* send one ulog_buff_t to userspace */
static void ulog_send(struct ebt_ulog_net *ebt, unsigned int nlgroup)
{
ebt_ulog_buff_t *ub = &ebt->ulog_buffers[nlgroup];
del_timer(&ub->timer);
if (!ub->skb)
return;
/* last nlmsg needs NLMSG_DONE */
if (ub->qlen > 1)
ub->lastnlh->nlmsg_type = NLMSG_DONE;
NETLINK_CB(ub->skb).dst_group = nlgroup + 1;
netlink_broadcast(ebt->ebtulognl, ub->skb, 0, nlgroup + 1, GFP_ATOMIC);
ub->qlen = 0;
ub->skb = NULL;
}
/* timer function to flush queue in flushtimeout time */
static void ulog_timer(unsigned long data)
{
struct ebt_ulog_net *ebt = container_of((void *)data,
struct ebt_ulog_net,
nlgroup[*(unsigned int *)data]);
ebt_ulog_buff_t *ub = &ebt->ulog_buffers[*(unsigned int *)data];
spin_lock_bh(&ub->lock);
if (ub->skb)
ulog_send(ebt, *(unsigned int *)data);
spin_unlock_bh(&ub->lock);
}
static struct sk_buff *ulog_alloc_skb(unsigned int size)
{
struct sk_buff *skb;
unsigned int n;
n = max(size, nlbufsiz);
skb = alloc_skb(n, GFP_ATOMIC | __GFP_NOWARN);
if (!skb) {
if (n > size) {
/* try to allocate only as much as we need for
* current packet */
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
pr_debug("cannot even allocate buffer of size %ub\n",
size);
}
}
return skb;
}
static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct ebt_ulog_info *uloginfo,
const char *prefix)
{
ebt_ulog_packet_msg_t *pm;
size_t size, copy_len;
struct nlmsghdr *nlh;
struct ebt_ulog_net *ebt = ebt_ulog_pernet(net);
unsigned int group = uloginfo->nlgroup;
ebt_ulog_buff_t *ub = &ebt->ulog_buffers[group];
spinlock_t *lock = &ub->lock;
ktime_t kt;
if ((uloginfo->cprange == 0) ||
(uloginfo->cprange > skb->len + ETH_HLEN))
copy_len = skb->len + ETH_HLEN;
else
copy_len = uloginfo->cprange;
size = nlmsg_total_size(sizeof(*pm) + copy_len);
if (size > nlbufsiz) {
pr_debug("Size %Zd needed, but nlbufsiz=%d\n", size, nlbufsiz);
return;
}
spin_lock_bh(lock);
if (!ub->skb) {
if (!(ub->skb = ulog_alloc_skb(size)))
goto unlock;
} else if (size > skb_tailroom(ub->skb)) {
ulog_send(ebt, group);
if (!(ub->skb = ulog_alloc_skb(size)))
goto unlock;
}
nlh = nlmsg_put(ub->skb, 0, ub->qlen, 0,
size - NLMSG_ALIGN(sizeof(*nlh)), 0);
if (!nlh) {
kfree_skb(ub->skb);
ub->skb = NULL;
goto unlock;
}
ub->qlen++;
pm = nlmsg_data(nlh);
memset(pm, 0, sizeof(*pm));
/* Fill in the ulog data */
pm->version = EBT_ULOG_VERSION;
kt = ktime_get_real();
pm->stamp = ktime_to_timeval(kt);
if (ub->qlen == 1)
ub->skb->tstamp = kt;
pm->data_len = copy_len;
pm->mark = skb->mark;
pm->hook = hooknr;
if (uloginfo->prefix != NULL)
strcpy(pm->prefix, uloginfo->prefix);
if (in) {
strcpy(pm->physindev, in->name);
/* If in isn't a bridge, then physindev==indev */
if (br_port_exists(in))
/* rcu_read_lock()ed by nf_hook_slow */
strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
else
strcpy(pm->indev, in->name);
}
if (out) {
/* If out exists, then out is a bridge port */
strcpy(pm->physoutdev, out->name);
/* rcu_read_lock()ed by nf_hook_slow */
strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
}
if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0)
BUG();
if (ub->qlen > 1)
ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;
ub->lastnlh = nlh;
if (ub->qlen >= uloginfo->qthreshold)
ulog_send(ebt, group);
else if (!timer_pending(&ub->timer)) {
ub->timer.expires = jiffies + flushtimeout * HZ / 100;
add_timer(&ub->timer);
}
unlock:
spin_unlock_bh(lock);
}
/* this function is registered with the netfilter core */
static void ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
const struct sk_buff *skb, const struct net_device *in,
const struct net_device *out, const struct nf_loginfo *li,
const char *prefix)
{
struct ebt_ulog_info loginfo;
if (!li || li->type != NF_LOG_TYPE_ULOG) {
loginfo.nlgroup = EBT_ULOG_DEFAULT_NLGROUP;
loginfo.cprange = 0;
loginfo.qthreshold = EBT_ULOG_DEFAULT_QTHRESHOLD;
loginfo.prefix[0] = '\0';
} else {
loginfo.nlgroup = li->u.ulog.group;
loginfo.cprange = li->u.ulog.copy_len;
loginfo.qthreshold = li->u.ulog.qthreshold;
strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
}
ebt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix);
}
static unsigned int
ebt_ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
struct net *net = dev_net(par->in ? par->in : par->out);
ebt_ulog_packet(net, par->hooknum, skb, par->in, par->out,
par->targinfo, NULL);
return EBT_CONTINUE;
}
static int ebt_ulog_tg_check(const struct xt_tgchk_param *par)
{
struct ebt_ulog_info *uloginfo = par->targinfo;
if (!par->net->xt.ebt_ulog_warn_deprecated) {
pr_info("ebt_ulog is deprecated and it will be removed soon, "
"use ebt_nflog instead\n");
par->net->xt.ebt_ulog_warn_deprecated = true;
}
if (uloginfo->nlgroup > 31)
return -EINVAL;
uloginfo->prefix[EBT_ULOG_PREFIX_LEN - 1] = '\0';
if (uloginfo->qthreshold > EBT_ULOG_MAX_QLEN)
uloginfo->qthreshold = EBT_ULOG_MAX_QLEN;
return 0;
}
static struct xt_target ebt_ulog_tg_reg __read_mostly = {
.name = "ulog",
.revision = 0,
.family = NFPROTO_BRIDGE,
.target = ebt_ulog_tg,
.checkentry = ebt_ulog_tg_check,
.targetsize = sizeof(struct ebt_ulog_info),
.me = THIS_MODULE,
};
static struct nf_logger ebt_ulog_logger __read_mostly = {
.name = "ebt_ulog",
.logfn = &ebt_log_packet,
.me = THIS_MODULE,
};
static int __net_init ebt_ulog_net_init(struct net *net)
{
int i;
struct ebt_ulog_net *ebt = ebt_ulog_pernet(net);
struct netlink_kernel_cfg cfg = {
.groups = EBT_ULOG_MAXNLGROUPS,
};
/* initialize ulog_buffers */
for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) {
ebt->nlgroup[i] = i;
setup_timer(&ebt->ulog_buffers[i].timer, ulog_timer,
(unsigned long)&ebt->nlgroup[i]);
spin_lock_init(&ebt->ulog_buffers[i].lock);
}
ebt->ebtulognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg);
if (!ebt->ebtulognl)
return -ENOMEM;
nf_log_set(net, NFPROTO_BRIDGE, &ebt_ulog_logger);
return 0;
}
static void __net_exit ebt_ulog_net_fini(struct net *net)
{
int i;
struct ebt_ulog_net *ebt = ebt_ulog_pernet(net);
nf_log_unset(net, &ebt_ulog_logger);
for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) {
ebt_ulog_buff_t *ub = &ebt->ulog_buffers[i];
del_timer(&ub->timer);
if (ub->skb) {
kfree_skb(ub->skb);
ub->skb = NULL;
}
}
netlink_kernel_release(ebt->ebtulognl);
}
static struct pernet_operations ebt_ulog_net_ops = {
.init = ebt_ulog_net_init,
.exit = ebt_ulog_net_fini,
.id = &ebt_ulog_net_id,
.size = sizeof(struct ebt_ulog_net),
};
static int __init ebt_ulog_init(void)
{
int ret;
if (nlbufsiz >= 128*1024) {
pr_warn("Netlink buffer has to be <= 128kB,"
"please try a smaller nlbufsiz parameter.\n");
return -EINVAL;
}
ret = register_pernet_subsys(&ebt_ulog_net_ops);
if (ret)
goto out_pernet;
ret = xt_register_target(&ebt_ulog_tg_reg);
if (ret)
goto out_target;
nf_log_register(NFPROTO_BRIDGE, &ebt_ulog_logger);
return 0;
out_target:
unregister_pernet_subsys(&ebt_ulog_net_ops);
out_pernet:
return ret;
}
static void __exit ebt_ulog_fini(void)
{
nf_log_unregister(&ebt_ulog_logger);
xt_unregister_target(&ebt_ulog_tg_reg);
unregister_pernet_subsys(&ebt_ulog_net_ops);
}
module_init(ebt_ulog_init);
module_exit(ebt_ulog_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
MODULE_DESCRIPTION("Ebtables: Packet logging to netlink using ULOG");

View file

@ -0,0 +1,96 @@
/*
* (C) 2014 by Pablo Neira Ayuso <pablo@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/if_bridge.h>
#include <linux/ip.h>
#include <net/route.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_log.h>
static void nf_log_bridge_packet(struct net *net, u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
switch (eth_hdr(skb)->h_proto) {
case htons(ETH_P_IP):
nf_log_packet(net, NFPROTO_IPV4, hooknum, skb, in, out,
loginfo, "%s", prefix);
break;
case htons(ETH_P_IPV6):
nf_log_packet(net, NFPROTO_IPV6, hooknum, skb, in, out,
loginfo, "%s", prefix);
break;
case htons(ETH_P_ARP):
case htons(ETH_P_RARP):
nf_log_packet(net, NFPROTO_ARP, hooknum, skb, in, out,
loginfo, "%s", prefix);
break;
}
}
static struct nf_logger nf_bridge_logger __read_mostly = {
.name = "nf_log_bridge",
.type = NF_LOG_TYPE_LOG,
.logfn = nf_log_bridge_packet,
.me = THIS_MODULE,
};
static int __net_init nf_log_bridge_net_init(struct net *net)
{
nf_log_set(net, NFPROTO_BRIDGE, &nf_bridge_logger);
return 0;
}
static void __net_exit nf_log_bridge_net_exit(struct net *net)
{
nf_log_unset(net, &nf_bridge_logger);
}
static struct pernet_operations nf_log_bridge_net_ops = {
.init = nf_log_bridge_net_init,
.exit = nf_log_bridge_net_exit,
};
static int __init nf_log_bridge_init(void)
{
int ret;
/* Request to load the real packet loggers. */
nf_logger_request_module(NFPROTO_IPV4, NF_LOG_TYPE_LOG);
nf_logger_request_module(NFPROTO_IPV6, NF_LOG_TYPE_LOG);
nf_logger_request_module(NFPROTO_ARP, NF_LOG_TYPE_LOG);
ret = register_pernet_subsys(&nf_log_bridge_net_ops);
if (ret < 0)
return ret;
nf_log_register(NFPROTO_BRIDGE, &nf_bridge_logger);
return 0;
}
static void __exit nf_log_bridge_exit(void)
{
unregister_pernet_subsys(&nf_log_bridge_net_ops);
nf_log_unregister(&nf_bridge_logger);
}
module_init(nf_log_bridge_init);
module_exit(nf_log_bridge_exit);
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_DESCRIPTION("Netfilter bridge packet logging");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 0);

View file

@ -36,6 +36,16 @@ config NF_CONNTRACK_PROC_COMPAT
If unsure, say Y.
config NF_LOG_ARP
tristate "ARP packet logging"
default m if NETFILTER_ADVANCED=n
select NF_LOG_COMMON
config NF_LOG_IPV4
tristate "IPv4 packet logging"
default m if NETFILTER_ADVANCED=n
select NF_LOG_COMMON
config NF_TABLES_IPV4
depends on NF_TABLES
tristate "IPv4 nf_tables support"
@ -159,25 +169,6 @@ config IP_NF_TARGET_SYNPROXY
To compile it as a module, choose M here. If unsure, say N.
config IP_NF_TARGET_ULOG
tristate "ULOG target support (obsolete)"
default m if NETFILTER_ADVANCED=n
---help---
This option enables the old IPv4-only "ipt_ULOG" implementation
which has been obsoleted by the new "nfnetlink_log" code (see
CONFIG_NETFILTER_NETLINK_LOG).
This option adds a `ULOG' target, which allows you to create rules in
any iptables table. The packet is passed to a userspace logging
daemon using netlink multicast sockets; unlike the LOG target
which can only be viewed through syslog.
The appropriate userspace logging daemon (ulogd) may be obtained from
<http://www.netfilter.org/projects/ulogd/index.html>
To compile it as a module, choose M here. If unsure, say N.
# NAT + specific targets: nf_conntrack
config NF_NAT_IPV4
tristate "IPv4 NAT"

View file

@ -19,6 +19,10 @@ obj-$(CONFIG_NF_NAT_IPV4) += nf_nat_ipv4.o
# defrag
obj-$(CONFIG_NF_DEFRAG_IPV4) += nf_defrag_ipv4.o
# logging
obj-$(CONFIG_NF_LOG_ARP) += nf_log_arp.o
obj-$(CONFIG_NF_LOG_IPV4) += nf_log_ipv4.o
# NAT helpers (nf_conntrack)
obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o
obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o

View file

@ -1,498 +0,0 @@
/*
* netfilter module for userspace packet logging daemons
*
* (C) 2000-2004 by Harald Welte <laforge@netfilter.org>
* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
* (C) 2005-2007 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This module accepts two parameters:
*
* nlbufsiz:
* The parameter specifies how big the buffer for each netlink multicast
* group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will
* get accumulated in the kernel until they are sent to userspace. It is
* NOT possible to allocate more than 128kB, and it is strongly discouraged,
* because atomically allocating 128kB inside the network rx softirq is not
* reliable. Please also keep in mind that this buffer size is allocated for
* each nlgroup you are using, so the total kernel memory usage increases
* by that factor.
*
* Actually you should use nlbufsiz a bit smaller than PAGE_SIZE, since
* nlbufsiz is used with alloc_skb, which adds another
* sizeof(struct skb_shared_info). Use NLMSG_GOODSIZE instead.
*
* flushtimeout:
* Specify, after how many hundredths of a second the queue should be
* flushed even if it is not full yet.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/socket.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <net/netlink.h>
#include <linux/netdevice.h>
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ipt_ULOG.h>
#include <net/netfilter/nf_log.h>
#include <net/netns/generic.h>
#include <net/sock.h>
#include <linux/bitops.h>
#include <asm/unaligned.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
MODULE_DESCRIPTION("Xtables: packet logging to netlink using ULOG");
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG);
#define ULOG_NL_EVENT 111 /* Harald's favorite number */
#define ULOG_MAXNLGROUPS 32 /* numer of nlgroups */
static unsigned int nlbufsiz = NLMSG_GOODSIZE;
module_param(nlbufsiz, uint, 0400);
MODULE_PARM_DESC(nlbufsiz, "netlink buffer size");
static unsigned int flushtimeout = 10;
module_param(flushtimeout, uint, 0600);
MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths of a second)");
static bool nflog = true;
module_param(nflog, bool, 0400);
MODULE_PARM_DESC(nflog, "register as internal netfilter logging module");
/* global data structures */
typedef struct {
unsigned int qlen; /* number of nlmsgs' in the skb */
struct nlmsghdr *lastnlh; /* netlink header of last msg in skb */
struct sk_buff *skb; /* the pre-allocated skb */
struct timer_list timer; /* the timer function */
} ulog_buff_t;
static int ulog_net_id __read_mostly;
struct ulog_net {
unsigned int nlgroup[ULOG_MAXNLGROUPS];
ulog_buff_t ulog_buffers[ULOG_MAXNLGROUPS];
struct sock *nflognl;
spinlock_t lock;
};
static struct ulog_net *ulog_pernet(struct net *net)
{
return net_generic(net, ulog_net_id);
}
/* send one ulog_buff_t to userspace */
static void ulog_send(struct ulog_net *ulog, unsigned int nlgroupnum)
{
ulog_buff_t *ub = &ulog->ulog_buffers[nlgroupnum];
pr_debug("ulog_send: timer is deleting\n");
del_timer(&ub->timer);
if (!ub->skb) {
pr_debug("ulog_send: nothing to send\n");
return;
}
/* last nlmsg needs NLMSG_DONE */
if (ub->qlen > 1)
ub->lastnlh->nlmsg_type = NLMSG_DONE;
NETLINK_CB(ub->skb).dst_group = nlgroupnum + 1;
pr_debug("throwing %d packets to netlink group %u\n",
ub->qlen, nlgroupnum + 1);
netlink_broadcast(ulog->nflognl, ub->skb, 0, nlgroupnum + 1,
GFP_ATOMIC);
ub->qlen = 0;
ub->skb = NULL;
ub->lastnlh = NULL;
}
/* timer function to flush queue in flushtimeout time */
static void ulog_timer(unsigned long data)
{
unsigned int groupnum = *((unsigned int *)data);
struct ulog_net *ulog = container_of((void *)data,
struct ulog_net,
nlgroup[groupnum]);
pr_debug("timer function called, calling ulog_send\n");
/* lock to protect against somebody modifying our structure
* from ipt_ulog_target at the same time */
spin_lock_bh(&ulog->lock);
ulog_send(ulog, groupnum);
spin_unlock_bh(&ulog->lock);
}
static struct sk_buff *ulog_alloc_skb(unsigned int size)
{
struct sk_buff *skb;
unsigned int n;
/* alloc skb which should be big enough for a whole
* multipart message. WARNING: has to be <= 131000
* due to slab allocator restrictions */
n = max(size, nlbufsiz);
skb = alloc_skb(n, GFP_ATOMIC | __GFP_NOWARN);
if (!skb) {
if (n > size) {
/* try to allocate only as much as we need for
* current packet */
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
pr_debug("cannot even allocate %ub\n", size);
}
}
return skb;
}
static void ipt_ulog_packet(struct net *net,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct ipt_ulog_info *loginfo,
const char *prefix)
{
ulog_buff_t *ub;
ulog_packet_msg_t *pm;
size_t size, copy_len;
struct nlmsghdr *nlh;
struct timeval tv;
struct ulog_net *ulog = ulog_pernet(net);
/* ffs == find first bit set, necessary because userspace
* is already shifting groupnumber, but we need unshifted.
* ffs() returns [1..32], we need [0..31] */
unsigned int groupnum = ffs(loginfo->nl_group) - 1;
/* calculate the size of the skb needed */
if (loginfo->copy_range == 0 || loginfo->copy_range > skb->len)
copy_len = skb->len;
else
copy_len = loginfo->copy_range;
size = nlmsg_total_size(sizeof(*pm) + copy_len);
ub = &ulog->ulog_buffers[groupnum];
spin_lock_bh(&ulog->lock);
if (!ub->skb) {
if (!(ub->skb = ulog_alloc_skb(size)))
goto alloc_failure;
} else if (ub->qlen >= loginfo->qthreshold ||
size > skb_tailroom(ub->skb)) {
/* either the queue len is too high or we don't have
* enough room in nlskb left. send it to userspace. */
ulog_send(ulog, groupnum);
if (!(ub->skb = ulog_alloc_skb(size)))
goto alloc_failure;
}
pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold);
nlh = nlmsg_put(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
sizeof(*pm)+copy_len, 0);
if (!nlh) {
pr_debug("error during nlmsg_put\n");
goto out_unlock;
}
ub->qlen++;
pm = nlmsg_data(nlh);
memset(pm, 0, sizeof(*pm));
/* We might not have a timestamp, get one */
if (skb->tstamp.tv64 == 0)
__net_timestamp((struct sk_buff *)skb);
/* copy hook, prefix, timestamp, payload, etc. */
pm->data_len = copy_len;
tv = ktime_to_timeval(skb->tstamp);
put_unaligned(tv.tv_sec, &pm->timestamp_sec);
put_unaligned(tv.tv_usec, &pm->timestamp_usec);
put_unaligned(skb->mark, &pm->mark);
pm->hook = hooknum;
if (prefix != NULL) {
strncpy(pm->prefix, prefix, sizeof(pm->prefix) - 1);
pm->prefix[sizeof(pm->prefix) - 1] = '\0';
}
else if (loginfo->prefix[0] != '\0')
strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
if (in && in->hard_header_len > 0 &&
skb->mac_header != skb->network_header &&
in->hard_header_len <= ULOG_MAC_LEN) {
memcpy(pm->mac, skb_mac_header(skb), in->hard_header_len);
pm->mac_len = in->hard_header_len;
} else
pm->mac_len = 0;
if (in)
strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
if (out)
strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
/* copy_len <= skb->len, so can't fail. */
if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
BUG();
/* check if we are building multi-part messages */
if (ub->qlen > 1)
ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;
ub->lastnlh = nlh;
/* if timer isn't already running, start it */
if (!timer_pending(&ub->timer)) {
ub->timer.expires = jiffies + flushtimeout * HZ / 100;
add_timer(&ub->timer);
}
/* if threshold is reached, send message to userspace */
if (ub->qlen >= loginfo->qthreshold) {
if (loginfo->qthreshold > 1)
nlh->nlmsg_type = NLMSG_DONE;
ulog_send(ulog, groupnum);
}
out_unlock:
spin_unlock_bh(&ulog->lock);
return;
alloc_failure:
pr_debug("Error building netlink message\n");
spin_unlock_bh(&ulog->lock);
}
static unsigned int
ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
struct net *net = dev_net(par->in ? par->in : par->out);
ipt_ulog_packet(net, par->hooknum, skb, par->in, par->out,
par->targinfo, NULL);
return XT_CONTINUE;
}
static void ipt_logfn(struct net *net,
u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *li,
const char *prefix)
{
struct ipt_ulog_info loginfo;
if (!li || li->type != NF_LOG_TYPE_ULOG) {
loginfo.nl_group = ULOG_DEFAULT_NLGROUP;
loginfo.copy_range = 0;
loginfo.qthreshold = ULOG_DEFAULT_QTHRESHOLD;
loginfo.prefix[0] = '\0';
} else {
loginfo.nl_group = li->u.ulog.group;
loginfo.copy_range = li->u.ulog.copy_len;
loginfo.qthreshold = li->u.ulog.qthreshold;
strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
}
ipt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix);
}
static int ulog_tg_check(const struct xt_tgchk_param *par)
{
const struct ipt_ulog_info *loginfo = par->targinfo;
if (!par->net->xt.ulog_warn_deprecated) {
pr_info("ULOG is deprecated and it will be removed soon, "
"use NFLOG instead\n");
par->net->xt.ulog_warn_deprecated = true;
}
if (loginfo->prefix[sizeof(loginfo->prefix) - 1] != '\0') {
pr_debug("prefix not null-terminated\n");
return -EINVAL;
}
if (loginfo->qthreshold > ULOG_MAX_QLEN) {
pr_debug("queue threshold %Zu > MAX_QLEN\n",
loginfo->qthreshold);
return -EINVAL;
}
return 0;
}
#ifdef CONFIG_COMPAT
struct compat_ipt_ulog_info {
compat_uint_t nl_group;
compat_size_t copy_range;
compat_size_t qthreshold;
char prefix[ULOG_PREFIX_LEN];
};
static void ulog_tg_compat_from_user(void *dst, const void *src)
{
const struct compat_ipt_ulog_info *cl = src;
struct ipt_ulog_info l = {
.nl_group = cl->nl_group,
.copy_range = cl->copy_range,
.qthreshold = cl->qthreshold,
};
memcpy(l.prefix, cl->prefix, sizeof(l.prefix));
memcpy(dst, &l, sizeof(l));
}
static int ulog_tg_compat_to_user(void __user *dst, const void *src)
{
const struct ipt_ulog_info *l = src;
struct compat_ipt_ulog_info cl = {
.nl_group = l->nl_group,
.copy_range = l->copy_range,
.qthreshold = l->qthreshold,
};
memcpy(cl.prefix, l->prefix, sizeof(cl.prefix));
return copy_to_user(dst, &cl, sizeof(cl)) ? -EFAULT : 0;
}
#endif /* CONFIG_COMPAT */
static struct xt_target ulog_tg_reg __read_mostly = {
.name = "ULOG",
.family = NFPROTO_IPV4,
.target = ulog_tg,
.targetsize = sizeof(struct ipt_ulog_info),
.checkentry = ulog_tg_check,
#ifdef CONFIG_COMPAT
.compatsize = sizeof(struct compat_ipt_ulog_info),
.compat_from_user = ulog_tg_compat_from_user,
.compat_to_user = ulog_tg_compat_to_user,
#endif
.me = THIS_MODULE,
};
static struct nf_logger ipt_ulog_logger __read_mostly = {
.name = "ipt_ULOG",
.logfn = ipt_logfn,
.me = THIS_MODULE,
};
static int __net_init ulog_tg_net_init(struct net *net)
{
int i;
struct ulog_net *ulog = ulog_pernet(net);
struct netlink_kernel_cfg cfg = {
.groups = ULOG_MAXNLGROUPS,
};
spin_lock_init(&ulog->lock);
/* initialize ulog_buffers */
for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
ulog->nlgroup[i] = i;
setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer,
(unsigned long)&ulog->nlgroup[i]);
}
ulog->nflognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg);
if (!ulog->nflognl)
return -ENOMEM;
if (nflog)
nf_log_set(net, NFPROTO_IPV4, &ipt_ulog_logger);
return 0;
}
static void __net_exit ulog_tg_net_exit(struct net *net)
{
ulog_buff_t *ub;
int i;
struct ulog_net *ulog = ulog_pernet(net);
if (nflog)
nf_log_unset(net, &ipt_ulog_logger);
netlink_kernel_release(ulog->nflognl);
/* remove pending timers and free allocated skb's */
for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
ub = &ulog->ulog_buffers[i];
pr_debug("timer is deleting\n");
del_timer(&ub->timer);
if (ub->skb) {
kfree_skb(ub->skb);
ub->skb = NULL;
}
}
}
static struct pernet_operations ulog_tg_net_ops = {
.init = ulog_tg_net_init,
.exit = ulog_tg_net_exit,
.id = &ulog_net_id,
.size = sizeof(struct ulog_net),
};
static int __init ulog_tg_init(void)
{
int ret;
pr_debug("init module\n");
if (nlbufsiz > 128*1024) {
pr_warn("Netlink buffer has to be <= 128kB\n");
return -EINVAL;
}
ret = register_pernet_subsys(&ulog_tg_net_ops);
if (ret)
goto out_pernet;
ret = xt_register_target(&ulog_tg_reg);
if (ret < 0)
goto out_target;
if (nflog)
nf_log_register(NFPROTO_IPV4, &ipt_ulog_logger);
return 0;
out_target:
unregister_pernet_subsys(&ulog_tg_net_ops);
out_pernet:
return ret;
}
static void __exit ulog_tg_exit(void)
{
pr_debug("cleanup_module\n");
if (nflog)
nf_log_unregister(&ipt_ulog_logger);
xt_unregister_target(&ulog_tg_reg);
unregister_pernet_subsys(&ulog_tg_net_ops);
}
module_init(ulog_tg_init);
module_exit(ulog_tg_exit);

View file

@ -314,7 +314,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
return -ENOENT;
}
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
@ -388,7 +388,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
.invert_tuple = ipv4_invert_tuple,
.print_tuple = ipv4_print_tuple,
.get_l4proto = ipv4_get_l4proto,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = ipv4_tuple_to_nlattr,
.nlattr_tuple_size = ipv4_nlattr_tuple_size,
.nlattr_to_tuple = ipv4_nlattr_to_tuple,

View file

@ -226,7 +226,7 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
return icmp_error_message(net, tmpl, skb, ctinfo, hooknum);
}
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
@ -408,7 +408,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
.error = icmp_error,
.destroy = NULL,
.me = NULL,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = icmp_tuple_to_nlattr,
.nlattr_tuple_size = icmp_nlattr_tuple_size,
.nlattr_to_tuple = icmp_nlattr_to_tuple,

View file

@ -17,7 +17,7 @@
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
#endif
#include <net/netfilter/nf_conntrack_zones.h>
@ -45,7 +45,7 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
{
u16 zone = NF_CT_DEFAULT_ZONE;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (skb->nfct)
zone = nf_ct_zone((struct nf_conn *)skb->nfct);
#endif
@ -74,8 +74,8 @@ static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
inet->nodefrag)
return NF_ACCEPT;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE)
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#if !IS_ENABLED(CONFIG_NF_NAT)
/* Previously seen (loopback)? Ignore. Do this before
fragment check. */
if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))

View file

@ -0,0 +1,149 @@
/*
* (C) 2014 by Pablo Neira Ayuso <pablo@netfilter.org>
*
* Based on code from ebt_log from:
*
* Bart De Schuymer <bdschuym@pandora.be>
* Harald Welte <laforge@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <net/route.h>
#include <linux/netfilter.h>
#include <linux/netfilter/xt_LOG.h>
#include <net/netfilter/nf_log.h>
static struct nf_loginfo default_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
.level = 5,
.logflags = NF_LOG_MASK,
},
},
};
struct arppayload {
unsigned char mac_src[ETH_ALEN];
unsigned char ip_src[4];
unsigned char mac_dst[ETH_ALEN];
unsigned char ip_dst[4];
};
static void dump_arp_packet(struct nf_log_buf *m,
const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int nhoff)
{
const struct arphdr *ah;
struct arphdr _arph;
const struct arppayload *ap;
struct arppayload _arpp;
ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
if (ah == NULL) {
nf_log_buf_add(m, "TRUNCATED");
return;
}
nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d",
ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op));
/* If it's for Ethernet and the lengths are OK, then log the ARP
* payload.
*/
if (ah->ar_hrd != htons(1) ||
ah->ar_hln != ETH_ALEN ||
ah->ar_pln != sizeof(__be32))
return;
ap = skb_header_pointer(skb, sizeof(_arph), sizeof(_arpp), &_arpp);
if (ap == NULL) {
nf_log_buf_add(m, " INCOMPLETE [%Zu bytes]",
skb->len - sizeof(_arph));
return;
}
nf_log_buf_add(m, " MACSRC=%pM IPSRC=%pI4 MACDST=%pM IPDST=%pI4",
ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst);
}
void nf_log_arp_packet(struct net *net, u_int8_t pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
struct nf_log_buf *m;
/* FIXME: Disabled from containers until syslog ns is supported */
if (!net_eq(net, &init_net))
return;
m = nf_log_buf_open();
if (!loginfo)
loginfo = &default_loginfo;
nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,
prefix);
dump_arp_packet(m, loginfo, skb, 0);
nf_log_buf_close(m);
}
static struct nf_logger nf_arp_logger __read_mostly = {
.name = "nf_log_arp",
.type = NF_LOG_TYPE_LOG,
.logfn = nf_log_arp_packet,
.me = THIS_MODULE,
};
static int __net_init nf_log_arp_net_init(struct net *net)
{
nf_log_set(net, NFPROTO_ARP, &nf_arp_logger);
return 0;
}
static void __net_exit nf_log_arp_net_exit(struct net *net)
{
nf_log_unset(net, &nf_arp_logger);
}
static struct pernet_operations nf_log_arp_net_ops = {
.init = nf_log_arp_net_init,
.exit = nf_log_arp_net_exit,
};
static int __init nf_log_arp_init(void)
{
int ret;
ret = register_pernet_subsys(&nf_log_arp_net_ops);
if (ret < 0)
return ret;
nf_log_register(NFPROTO_ARP, &nf_arp_logger);
return 0;
}
static void __exit nf_log_arp_exit(void)
{
unregister_pernet_subsys(&nf_log_arp_net_ops);
nf_log_unregister(&nf_arp_logger);
}
module_init(nf_log_arp_init);
module_exit(nf_log_arp_exit);
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_DESCRIPTION("Netfilter ARP packet logging");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NF_LOGGER(3, 0);

View file

@ -0,0 +1,385 @@
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <net/ipv6.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/tcp.h>
#include <net/route.h>
#include <linux/netfilter.h>
#include <linux/netfilter/xt_LOG.h>
#include <net/netfilter/nf_log.h>
static struct nf_loginfo default_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
.level = 5,
.logflags = NF_LOG_MASK,
},
},
};
/* One level of recursion won't kill us */
static void dump_ipv4_packet(struct nf_log_buf *m,
const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int iphoff)
{
struct iphdr _iph;
const struct iphdr *ih;
unsigned int logflags;
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
else
logflags = NF_LOG_MASK;
ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph);
if (ih == NULL) {
nf_log_buf_add(m, "TRUNCATED");
return;
}
/* Important fields:
* TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */
/* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */
nf_log_buf_add(m, "SRC=%pI4 DST=%pI4 ", &ih->saddr, &ih->daddr);
/* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
nf_log_buf_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
ntohs(ih->tot_len), ih->tos & IPTOS_TOS_MASK,
ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id));
/* Max length: 6 "CE DF MF " */
if (ntohs(ih->frag_off) & IP_CE)
nf_log_buf_add(m, "CE ");
if (ntohs(ih->frag_off) & IP_DF)
nf_log_buf_add(m, "DF ");
if (ntohs(ih->frag_off) & IP_MF)
nf_log_buf_add(m, "MF ");
/* Max length: 11 "FRAG:65535 " */
if (ntohs(ih->frag_off) & IP_OFFSET)
nf_log_buf_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET);
if ((logflags & XT_LOG_IPOPT) &&
ih->ihl * 4 > sizeof(struct iphdr)) {
const unsigned char *op;
unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
unsigned int i, optsize;
optsize = ih->ihl * 4 - sizeof(struct iphdr);
op = skb_header_pointer(skb, iphoff+sizeof(_iph),
optsize, _opt);
if (op == NULL) {
nf_log_buf_add(m, "TRUNCATED");
return;
}
/* Max length: 127 "OPT (" 15*4*2chars ") " */
nf_log_buf_add(m, "OPT (");
for (i = 0; i < optsize; i++)
nf_log_buf_add(m, "%02X", op[i]);
nf_log_buf_add(m, ") ");
}
switch (ih->protocol) {
case IPPROTO_TCP:
if (nf_log_dump_tcp_header(m, skb, ih->protocol,
ntohs(ih->frag_off) & IP_OFFSET,
iphoff+ih->ihl*4, logflags))
return;
break;
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
if (nf_log_dump_udp_header(m, skb, ih->protocol,
ntohs(ih->frag_off) & IP_OFFSET,
iphoff+ih->ihl*4))
return;
break;
case IPPROTO_ICMP: {
struct icmphdr _icmph;
const struct icmphdr *ich;
static const size_t required_len[NR_ICMP_TYPES+1]
= { [ICMP_ECHOREPLY] = 4,
[ICMP_DEST_UNREACH]
= 8 + sizeof(struct iphdr),
[ICMP_SOURCE_QUENCH]
= 8 + sizeof(struct iphdr),
[ICMP_REDIRECT]
= 8 + sizeof(struct iphdr),
[ICMP_ECHO] = 4,
[ICMP_TIME_EXCEEDED]
= 8 + sizeof(struct iphdr),
[ICMP_PARAMETERPROB]
= 8 + sizeof(struct iphdr),
[ICMP_TIMESTAMP] = 20,
[ICMP_TIMESTAMPREPLY] = 20,
[ICMP_ADDRESS] = 12,
[ICMP_ADDRESSREPLY] = 12 };
/* Max length: 11 "PROTO=ICMP " */
nf_log_buf_add(m, "PROTO=ICMP ");
if (ntohs(ih->frag_off) & IP_OFFSET)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
ich = skb_header_pointer(skb, iphoff + ih->ihl * 4,
sizeof(_icmph), &_icmph);
if (ich == NULL) {
nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
skb->len - iphoff - ih->ihl*4);
break;
}
/* Max length: 18 "TYPE=255 CODE=255 " */
nf_log_buf_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code);
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
if (ich->type <= NR_ICMP_TYPES &&
required_len[ich->type] &&
skb->len-iphoff-ih->ihl*4 < required_len[ich->type]) {
nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
skb->len - iphoff - ih->ihl*4);
break;
}
switch (ich->type) {
case ICMP_ECHOREPLY:
case ICMP_ECHO:
/* Max length: 19 "ID=65535 SEQ=65535 " */
nf_log_buf_add(m, "ID=%u SEQ=%u ",
ntohs(ich->un.echo.id),
ntohs(ich->un.echo.sequence));
break;
case ICMP_PARAMETERPROB:
/* Max length: 14 "PARAMETER=255 " */
nf_log_buf_add(m, "PARAMETER=%u ",
ntohl(ich->un.gateway) >> 24);
break;
case ICMP_REDIRECT:
/* Max length: 24 "GATEWAY=255.255.255.255 " */
nf_log_buf_add(m, "GATEWAY=%pI4 ", &ich->un.gateway);
/* Fall through */
case ICMP_DEST_UNREACH:
case ICMP_SOURCE_QUENCH:
case ICMP_TIME_EXCEEDED:
/* Max length: 3+maxlen */
if (!iphoff) { /* Only recurse once. */
nf_log_buf_add(m, "[");
dump_ipv4_packet(m, info, skb,
iphoff + ih->ihl*4+sizeof(_icmph));
nf_log_buf_add(m, "] ");
}
/* Max length: 10 "MTU=65535 " */
if (ich->type == ICMP_DEST_UNREACH &&
ich->code == ICMP_FRAG_NEEDED) {
nf_log_buf_add(m, "MTU=%u ",
ntohs(ich->un.frag.mtu));
}
}
break;
}
/* Max Length */
case IPPROTO_AH: {
struct ip_auth_hdr _ahdr;
const struct ip_auth_hdr *ah;
if (ntohs(ih->frag_off) & IP_OFFSET)
break;
/* Max length: 9 "PROTO=AH " */
nf_log_buf_add(m, "PROTO=AH ");
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
ah = skb_header_pointer(skb, iphoff+ih->ihl*4,
sizeof(_ahdr), &_ahdr);
if (ah == NULL) {
nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
skb->len - iphoff - ih->ihl*4);
break;
}
/* Length: 15 "SPI=0xF1234567 " */
nf_log_buf_add(m, "SPI=0x%x ", ntohl(ah->spi));
break;
}
case IPPROTO_ESP: {
struct ip_esp_hdr _esph;
const struct ip_esp_hdr *eh;
/* Max length: 10 "PROTO=ESP " */
nf_log_buf_add(m, "PROTO=ESP ");
if (ntohs(ih->frag_off) & IP_OFFSET)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
eh = skb_header_pointer(skb, iphoff+ih->ihl*4,
sizeof(_esph), &_esph);
if (eh == NULL) {
nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
skb->len - iphoff - ih->ihl*4);
break;
}
/* Length: 15 "SPI=0xF1234567 " */
nf_log_buf_add(m, "SPI=0x%x ", ntohl(eh->spi));
break;
}
/* Max length: 10 "PROTO 255 " */
default:
nf_log_buf_add(m, "PROTO=%u ", ih->protocol);
}
/* Max length: 15 "UID=4294967295 " */
if ((logflags & XT_LOG_UID) && !iphoff)
nf_log_dump_sk_uid_gid(m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */
if (!iphoff && skb->mark)
nf_log_buf_add(m, "MARK=0x%x ", skb->mark);
/* Proto Max log string length */
/* IP: 40+46+6+11+127 = 230 */
/* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */
/* UDP: 10+max(25,20) = 35 */
/* UDPLITE: 14+max(25,20) = 39 */
/* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */
/* ESP: 10+max(25)+15 = 50 */
/* AH: 9+max(25)+15 = 49 */
/* unknown: 10 */
/* (ICMP allows recursion one level deep) */
/* maxlen = IP + ICMP + IP + max(TCP,UDP,ICMP,unknown) */
/* maxlen = 230+ 91 + 230 + 252 = 803 */
}
static void dump_ipv4_mac_header(struct nf_log_buf *m,
const struct nf_loginfo *info,
const struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
unsigned int logflags = 0;
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
if (!(logflags & XT_LOG_MACDECODE))
goto fallback;
switch (dev->type) {
case ARPHRD_ETHER:
nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
ntohs(eth_hdr(skb)->h_proto));
return;
default:
break;
}
fallback:
nf_log_buf_add(m, "MAC=");
if (dev->hard_header_len &&
skb->mac_header != skb->network_header) {
const unsigned char *p = skb_mac_header(skb);
unsigned int i;
nf_log_buf_add(m, "%02x", *p++);
for (i = 1; i < dev->hard_header_len; i++, p++)
nf_log_buf_add(m, ":%02x", *p);
}
nf_log_buf_add(m, " ");
}
static void nf_log_ip_packet(struct net *net, u_int8_t pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
struct nf_log_buf *m;
/* FIXME: Disabled from containers until syslog ns is supported */
if (!net_eq(net, &init_net))
return;
m = nf_log_buf_open();
if (!loginfo)
loginfo = &default_loginfo;
nf_log_dump_packet_common(m, pf, hooknum, skb, in,
out, loginfo, prefix);
if (in != NULL)
dump_ipv4_mac_header(m, loginfo, skb);
dump_ipv4_packet(m, loginfo, skb, 0);
nf_log_buf_close(m);
}
static struct nf_logger nf_ip_logger __read_mostly = {
.name = "nf_log_ipv4",
.type = NF_LOG_TYPE_LOG,
.logfn = nf_log_ip_packet,
.me = THIS_MODULE,
};
static int __net_init nf_log_ipv4_net_init(struct net *net)
{
nf_log_set(net, NFPROTO_IPV4, &nf_ip_logger);
return 0;
}
static void __net_exit nf_log_ipv4_net_exit(struct net *net)
{
nf_log_unset(net, &nf_ip_logger);
}
static struct pernet_operations nf_log_ipv4_net_ops = {
.init = nf_log_ipv4_net_init,
.exit = nf_log_ipv4_net_exit,
};
static int __init nf_log_ipv4_init(void)
{
int ret;
ret = register_pernet_subsys(&nf_log_ipv4_net_ops);
if (ret < 0)
return ret;
nf_log_register(NFPROTO_IPV4, &nf_ip_logger);
return 0;
}
static void __exit nf_log_ipv4_exit(void)
{
unregister_pernet_subsys(&nf_log_ipv4_net_ops);
nf_log_unregister(&nf_ip_logger);
}
module_init(nf_log_ipv4_init);
module_exit(nf_log_ipv4_exit);
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
MODULE_DESCRIPTION("Netfilter IPv4 packet logging");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NF_LOGGER(AF_INET, 0);

View file

@ -154,6 +154,7 @@ static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
htons(oldlen), htons(datalen), 1);
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
struct nf_nat_range *range)
{
@ -169,6 +170,7 @@ static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
return 0;
}
#endif
static const struct nf_nat_l3proto nf_nat_l3proto_ipv4 = {
.l3proto = NFPROTO_IPV4,
@ -177,7 +179,9 @@ static const struct nf_nat_l3proto nf_nat_l3proto_ipv4 = {
.manip_pkt = nf_nat_ipv4_manip_pkt,
.csum_update = nf_nat_ipv4_csum_update,
.csum_recalc = nf_nat_ipv4_csum_recalc,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.nlattr_to_range = nf_nat_ipv4_nlattr_to_range,
#endif
#ifdef CONFIG_XFRM
.decode_session = nf_nat_ipv4_decode_session,
#endif

View file

@ -124,7 +124,7 @@ static const struct nf_nat_l4proto gre = {
.manip_pkt = gre_manip_pkt,
.in_range = nf_nat_l4proto_in_range,
.unique_tuple = gre_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
#endif
};

View file

@ -77,7 +77,7 @@ const struct nf_nat_l4proto nf_nat_l4proto_icmp = {
.manip_pkt = icmp_manip_pkt,
.in_range = icmp_in_range,
.unique_tuple = icmp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
#endif
};

View file

@ -55,6 +55,11 @@ config NFT_REJECT_IPV6
default NFT_REJECT
tristate
config NF_LOG_IPV6
tristate "IPv6 packet logging"
depends on NETFILTER_ADVANCED
select NF_LOG_COMMON
config IP6_NF_IPTABLES
tristate "IP6 tables support (required for filtering)"
depends on INET && IPV6

View file

@ -23,6 +23,9 @@ obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o
nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
# logging
obj-$(CONFIG_NF_LOG_IPV6) += nf_log_ipv6.o
# nf_tables
obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o

View file

@ -0,0 +1,417 @@
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <net/ipv6.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/tcp.h>
#include <net/route.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter/xt_LOG.h>
#include <net/netfilter/nf_log.h>
static struct nf_loginfo default_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
.level = 5,
.logflags = NF_LOG_MASK,
},
},
};
/* One level of recursion won't kill us */
static void dump_ipv6_packet(struct nf_log_buf *m,
const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int ip6hoff,
int recurse)
{
u_int8_t currenthdr;
int fragment;
struct ipv6hdr _ip6h;
const struct ipv6hdr *ih;
unsigned int ptr;
unsigned int hdrlen = 0;
unsigned int logflags;
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
else
logflags = NF_LOG_MASK;
ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
if (ih == NULL) {
nf_log_buf_add(m, "TRUNCATED");
return;
}
/* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
nf_log_buf_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr);
/* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
nf_log_buf_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
(ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
ih->hop_limit,
(ntohl(*(__be32 *)ih) & 0x000fffff));
fragment = 0;
ptr = ip6hoff + sizeof(struct ipv6hdr);
currenthdr = ih->nexthdr;
while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
struct ipv6_opt_hdr _hdr;
const struct ipv6_opt_hdr *hp;
hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
if (hp == NULL) {
nf_log_buf_add(m, "TRUNCATED");
return;
}
/* Max length: 48 "OPT (...) " */
if (logflags & XT_LOG_IPOPT)
nf_log_buf_add(m, "OPT ( ");
switch (currenthdr) {
case IPPROTO_FRAGMENT: {
struct frag_hdr _fhdr;
const struct frag_hdr *fh;
nf_log_buf_add(m, "FRAG:");
fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
&_fhdr);
if (fh == NULL) {
nf_log_buf_add(m, "TRUNCATED ");
return;
}
/* Max length: 6 "65535 " */
nf_log_buf_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8);
/* Max length: 11 "INCOMPLETE " */
if (fh->frag_off & htons(0x0001))
nf_log_buf_add(m, "INCOMPLETE ");
nf_log_buf_add(m, "ID:%08x ",
ntohl(fh->identification));
if (ntohs(fh->frag_off) & 0xFFF8)
fragment = 1;
hdrlen = 8;
break;
}
case IPPROTO_DSTOPTS:
case IPPROTO_ROUTING:
case IPPROTO_HOPOPTS:
if (fragment) {
if (logflags & XT_LOG_IPOPT)
nf_log_buf_add(m, ")");
return;
}
hdrlen = ipv6_optlen(hp);
break;
/* Max Length */
case IPPROTO_AH:
if (logflags & XT_LOG_IPOPT) {
struct ip_auth_hdr _ahdr;
const struct ip_auth_hdr *ah;
/* Max length: 3 "AH " */
nf_log_buf_add(m, "AH ");
if (fragment) {
nf_log_buf_add(m, ")");
return;
}
ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
&_ahdr);
if (ah == NULL) {
/*
* Max length: 26 "INCOMPLETE [65535
* bytes] )"
*/
nf_log_buf_add(m, "INCOMPLETE [%u bytes] )",
skb->len - ptr);
return;
}
/* Length: 15 "SPI=0xF1234567 */
nf_log_buf_add(m, "SPI=0x%x ", ntohl(ah->spi));
}
hdrlen = (hp->hdrlen+2)<<2;
break;
case IPPROTO_ESP:
if (logflags & XT_LOG_IPOPT) {
struct ip_esp_hdr _esph;
const struct ip_esp_hdr *eh;
/* Max length: 4 "ESP " */
nf_log_buf_add(m, "ESP ");
if (fragment) {
nf_log_buf_add(m, ")");
return;
}
/*
* Max length: 26 "INCOMPLETE [65535 bytes] )"
*/
eh = skb_header_pointer(skb, ptr, sizeof(_esph),
&_esph);
if (eh == NULL) {
nf_log_buf_add(m, "INCOMPLETE [%u bytes] )",
skb->len - ptr);
return;
}
/* Length: 16 "SPI=0xF1234567 )" */
nf_log_buf_add(m, "SPI=0x%x )",
ntohl(eh->spi));
}
return;
default:
/* Max length: 20 "Unknown Ext Hdr 255" */
nf_log_buf_add(m, "Unknown Ext Hdr %u", currenthdr);
return;
}
if (logflags & XT_LOG_IPOPT)
nf_log_buf_add(m, ") ");
currenthdr = hp->nexthdr;
ptr += hdrlen;
}
switch (currenthdr) {
case IPPROTO_TCP:
if (nf_log_dump_tcp_header(m, skb, currenthdr, fragment,
ptr, logflags))
return;
break;
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
if (nf_log_dump_udp_header(m, skb, currenthdr, fragment, ptr))
return;
break;
case IPPROTO_ICMPV6: {
struct icmp6hdr _icmp6h;
const struct icmp6hdr *ic;
/* Max length: 13 "PROTO=ICMPv6 " */
nf_log_buf_add(m, "PROTO=ICMPv6 ");
if (fragment)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
if (ic == NULL) {
nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
skb->len - ptr);
return;
}
/* Max length: 18 "TYPE=255 CODE=255 " */
nf_log_buf_add(m, "TYPE=%u CODE=%u ",
ic->icmp6_type, ic->icmp6_code);
switch (ic->icmp6_type) {
case ICMPV6_ECHO_REQUEST:
case ICMPV6_ECHO_REPLY:
/* Max length: 19 "ID=65535 SEQ=65535 " */
nf_log_buf_add(m, "ID=%u SEQ=%u ",
ntohs(ic->icmp6_identifier),
ntohs(ic->icmp6_sequence));
break;
case ICMPV6_MGM_QUERY:
case ICMPV6_MGM_REPORT:
case ICMPV6_MGM_REDUCTION:
break;
case ICMPV6_PARAMPROB:
/* Max length: 17 "POINTER=ffffffff " */
nf_log_buf_add(m, "POINTER=%08x ",
ntohl(ic->icmp6_pointer));
/* Fall through */
case ICMPV6_DEST_UNREACH:
case ICMPV6_PKT_TOOBIG:
case ICMPV6_TIME_EXCEED:
/* Max length: 3+maxlen */
if (recurse) {
nf_log_buf_add(m, "[");
dump_ipv6_packet(m, info, skb,
ptr + sizeof(_icmp6h), 0);
nf_log_buf_add(m, "] ");
}
/* Max length: 10 "MTU=65535 " */
if (ic->icmp6_type == ICMPV6_PKT_TOOBIG) {
nf_log_buf_add(m, "MTU=%u ",
ntohl(ic->icmp6_mtu));
}
}
break;
}
/* Max length: 10 "PROTO=255 " */
default:
nf_log_buf_add(m, "PROTO=%u ", currenthdr);
}
/* Max length: 15 "UID=4294967295 " */
if ((logflags & XT_LOG_UID) && recurse)
nf_log_dump_sk_uid_gid(m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */
if (recurse && skb->mark)
nf_log_buf_add(m, "MARK=0x%x ", skb->mark);
}
static void dump_ipv6_mac_header(struct nf_log_buf *m,
const struct nf_loginfo *info,
const struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
unsigned int logflags = 0;
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
if (!(logflags & XT_LOG_MACDECODE))
goto fallback;
switch (dev->type) {
case ARPHRD_ETHER:
nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
ntohs(eth_hdr(skb)->h_proto));
return;
default:
break;
}
fallback:
nf_log_buf_add(m, "MAC=");
if (dev->hard_header_len &&
skb->mac_header != skb->network_header) {
const unsigned char *p = skb_mac_header(skb);
unsigned int len = dev->hard_header_len;
unsigned int i;
if (dev->type == ARPHRD_SIT) {
p -= ETH_HLEN;
if (p < skb->head)
p = NULL;
}
if (p != NULL) {
nf_log_buf_add(m, "%02x", *p++);
for (i = 1; i < len; i++)
nf_log_buf_add(m, ":%02x", *p++);
}
nf_log_buf_add(m, " ");
if (dev->type == ARPHRD_SIT) {
const struct iphdr *iph =
(struct iphdr *)skb_mac_header(skb);
nf_log_buf_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr,
&iph->daddr);
}
} else {
nf_log_buf_add(m, " ");
}
}
static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
struct nf_log_buf *m;
/* FIXME: Disabled from containers until syslog ns is supported */
if (!net_eq(net, &init_net))
return;
m = nf_log_buf_open();
if (!loginfo)
loginfo = &default_loginfo;
nf_log_dump_packet_common(m, pf, hooknum, skb, in, out,
loginfo, prefix);
if (in != NULL)
dump_ipv6_mac_header(m, loginfo, skb);
dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1);
nf_log_buf_close(m);
}
static struct nf_logger nf_ip6_logger __read_mostly = {
.name = "nf_log_ipv6",
.type = NF_LOG_TYPE_LOG,
.logfn = nf_log_ip6_packet,
.me = THIS_MODULE,
};
static int __net_init nf_log_ipv6_net_init(struct net *net)
{
nf_log_set(net, NFPROTO_IPV6, &nf_ip6_logger);
return 0;
}
static void __net_exit nf_log_ipv6_net_exit(struct net *net)
{
nf_log_unset(net, &nf_ip6_logger);
}
static struct pernet_operations nf_log_ipv6_net_ops = {
.init = nf_log_ipv6_net_init,
.exit = nf_log_ipv6_net_exit,
};
static int __init nf_log_ipv6_init(void)
{
int ret;
ret = register_pernet_subsys(&nf_log_ipv6_net_ops);
if (ret < 0)
return ret;
nf_log_register(NFPROTO_IPV6, &nf_ip6_logger);
return 0;
}
static void __exit nf_log_ipv6_exit(void)
{
unregister_pernet_subsys(&nf_log_ipv6_net_ops);
nf_log_unregister(&nf_ip6_logger);
}
module_init(nf_log_ipv6_init);
module_exit(nf_log_ipv6_exit);
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
MODULE_DESCRIPTION("Netfilter IPv4 packet logging");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NF_LOGGER(AF_INET6, 0);

View file

@ -158,6 +158,7 @@ static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
htons(oldlen), htons(datalen), 1);
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
struct nf_nat_range *range)
{
@ -175,6 +176,7 @@ static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
return 0;
}
#endif
static const struct nf_nat_l3proto nf_nat_l3proto_ipv6 = {
.l3proto = NFPROTO_IPV6,
@ -183,7 +185,9 @@ static const struct nf_nat_l3proto nf_nat_l3proto_ipv6 = {
.manip_pkt = nf_nat_ipv6_manip_pkt,
.csum_update = nf_nat_ipv6_csum_update,
.csum_recalc = nf_nat_ipv6_csum_recalc,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.nlattr_to_range = nf_nat_ipv6_nlattr_to_range,
#endif
#ifdef CONFIG_XFRM
.decode_session = nf_nat_ipv6_decode_session,
#endif

View file

@ -46,6 +46,9 @@ config NF_CONNTRACK
To compile it as a module, choose M here. If unsure, say N.
config NF_LOG_COMMON
tristate
if NF_CONNTRACK
config NF_CONNTRACK_MARK
@ -744,6 +747,7 @@ config NETFILTER_XT_TARGET_LED
config NETFILTER_XT_TARGET_LOG
tristate "LOG target support"
depends on NF_LOG_IPV4 && NF_LOG_IPV6
default m if NETFILTER_ADVANCED=n
help
This option adds a `LOG' target, which allows you to create rules in

View file

@ -47,6 +47,9 @@ obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o
nf_nat-y := nf_nat_core.o nf_nat_proto_unknown.o nf_nat_proto_common.o \
nf_nat_proto_udp.o nf_nat_proto_tcp.o nf_nat_helper.o
# generic transport layer logging
obj-$(CONFIG_NF_LOG_COMMON) += nf_log_common.o
obj-$(CONFIG_NF_NAT) += nf_nat.o
# NAT protocols (nf_nat)

View file

@ -1806,92 +1806,6 @@ static struct ctl_table vs_vars[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#if 0
{
.procname = "timeout_established",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_ESTABLISHED],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_synsent",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_SENT],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_synrecv",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_RECV],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_finwait",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_FIN_WAIT],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_timewait",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_TIME_WAIT],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_close",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_closewait",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE_WAIT],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_lastack",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_LAST_ACK],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_listen",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_LISTEN],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_synack",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_SYNACK],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_udp",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_UDP],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "timeout_icmp",
.data = &vs_timeout_table_dos.timeout[IP_VS_S_ICMP],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
#endif
{ }
};

View file

@ -886,8 +886,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark);
rcu_read_unlock();
if (!cp) {
if (param->pe_data)
kfree(param->pe_data);
kfree(param->pe_data);
IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
return;
}

View file

@ -352,40 +352,6 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
local_bh_enable();
}
static void death_by_event(unsigned long ul_conntrack)
{
struct nf_conn *ct = (void *)ul_conntrack;
struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
BUG_ON(ecache == NULL);
if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
/* bad luck, let's retry again */
ecache->timeout.expires = jiffies +
(prandom_u32() % net->ct.sysctl_events_retry_timeout);
add_timer(&ecache->timeout);
return;
}
/* we've got the event delivered, now it's dying */
set_bit(IPS_DYING_BIT, &ct->status);
nf_ct_put(ct);
}
static void nf_ct_dying_timeout(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
BUG_ON(ecache == NULL);
/* set a new timer to retry event delivery */
setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
ecache->timeout.expires = jiffies +
(prandom_u32() % net->ct.sysctl_events_retry_timeout);
add_timer(&ecache->timeout);
}
bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
{
struct nf_conn_tstamp *tstamp;
@ -394,15 +360,20 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
if (tstamp && tstamp->stop == 0)
tstamp->stop = ktime_to_ns(ktime_get_real());
if (!nf_ct_is_dying(ct) &&
unlikely(nf_conntrack_event_report(IPCT_DESTROY, ct,
portid, report) < 0)) {
if (nf_ct_is_dying(ct))
goto delete;
if (nf_conntrack_event_report(IPCT_DESTROY, ct,
portid, report) < 0) {
/* destroy event was not delivered */
nf_ct_delete_from_lists(ct);
nf_ct_dying_timeout(ct);
nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
return false;
}
nf_conntrack_ecache_work(nf_ct_net(ct));
set_bit(IPS_DYING_BIT, &ct->status);
delete:
nf_ct_delete_from_lists(ct);
nf_ct_put(ct);
return true;
@ -1464,26 +1435,6 @@ void nf_conntrack_flush_report(struct net *net, u32 portid, int report)
}
EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
static void nf_ct_release_dying_list(struct net *net)
{
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct hlist_nulls_node *n;
int cpu;
for_each_possible_cpu(cpu) {
struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
spin_lock_bh(&pcpu->lock);
hlist_nulls_for_each_entry(h, n, &pcpu->dying, hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
/* never fails to remove them, no listeners at this point */
nf_ct_kill(ct);
}
spin_unlock_bh(&pcpu->lock);
}
}
static int untrack_refs(void)
{
int cnt = 0, cpu;
@ -1548,7 +1499,6 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
busy = 0;
list_for_each_entry(net, net_exit_list, exit_list) {
nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
nf_ct_release_dying_list(net);
if (atomic_read(&net->ct.count) != 0)
busy = 1;
}

View file

@ -29,6 +29,90 @@
static DEFINE_MUTEX(nf_ct_ecache_mutex);
#define ECACHE_RETRY_WAIT (HZ/10)
enum retry_state {
STATE_CONGESTED,
STATE_RESTART,
STATE_DONE,
};
static enum retry_state ecache_work_evict_list(struct ct_pcpu *pcpu)
{
struct nf_conn *refs[16];
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
unsigned int evicted = 0;
enum retry_state ret = STATE_DONE;
spin_lock(&pcpu->lock);
hlist_nulls_for_each_entry(h, n, &pcpu->dying, hnnode) {
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
if (nf_ct_is_dying(ct))
continue;
if (nf_conntrack_event(IPCT_DESTROY, ct)) {
ret = STATE_CONGESTED;
break;
}
/* we've got the event delivered, now it's dying */
set_bit(IPS_DYING_BIT, &ct->status);
refs[evicted] = ct;
if (++evicted >= ARRAY_SIZE(refs)) {
ret = STATE_RESTART;
break;
}
}
spin_unlock(&pcpu->lock);
/* can't _put while holding lock */
while (evicted)
nf_ct_put(refs[--evicted]);
return ret;
}
static void ecache_work(struct work_struct *work)
{
struct netns_ct *ctnet =
container_of(work, struct netns_ct, ecache_dwork.work);
int cpu, delay = -1;
struct ct_pcpu *pcpu;
local_bh_disable();
for_each_possible_cpu(cpu) {
enum retry_state ret;
pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu);
ret = ecache_work_evict_list(pcpu);
switch (ret) {
case STATE_CONGESTED:
delay = ECACHE_RETRY_WAIT;
goto out;
case STATE_RESTART:
delay = 0;
break;
case STATE_DONE:
break;
}
}
out:
local_bh_enable();
ctnet->ecache_dwork_pending = delay > 0;
if (delay >= 0)
schedule_delayed_work(&ctnet->ecache_dwork, delay);
}
/* deliver cached events and clear cache entry - must be called with locally
* disabled softirqs */
void nf_ct_deliver_cached_events(struct nf_conn *ct)
@ -157,7 +241,6 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
#define NF_CT_EVENTS_DEFAULT 1
static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
static int nf_ct_events_retry_timeout __read_mostly = 15*HZ;
#ifdef CONFIG_SYSCTL
static struct ctl_table event_sysctl_table[] = {
@ -168,13 +251,6 @@ static struct ctl_table event_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_events_retry_timeout",
.data = &init_net.ct.sysctl_events_retry_timeout,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{}
};
#endif /* CONFIG_SYSCTL */
@ -196,7 +272,6 @@ static int nf_conntrack_event_init_sysctl(struct net *net)
goto out;
table[0].data = &net->ct.sysctl_events;
table[1].data = &net->ct.sysctl_events_retry_timeout;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns)
@ -238,12 +313,13 @@ static void nf_conntrack_event_fini_sysctl(struct net *net)
int nf_conntrack_ecache_pernet_init(struct net *net)
{
net->ct.sysctl_events = nf_ct_events;
net->ct.sysctl_events_retry_timeout = nf_ct_events_retry_timeout;
INIT_DELAYED_WORK(&net->ct.ecache_dwork, ecache_work);
return nf_conntrack_event_init_sysctl(net);
}
void nf_conntrack_ecache_pernet_fini(struct net *net)
{
cancel_delayed_work_sync(&net->ct.ecache_dwork);
nf_conntrack_event_fini_sysctl(net);
}

View file

@ -745,8 +745,7 @@ static int ctnetlink_done(struct netlink_callback *cb)
{
if (cb->args[1])
nf_ct_put((struct nf_conn *)cb->args[1]);
if (cb->data)
kfree(cb->data);
kfree(cb->data);
return 0;
}

View file

@ -16,16 +16,22 @@
#define NF_LOG_PREFIXLEN 128
#define NFLOGGER_NAME_LEN 64
static struct list_head nf_loggers_l[NFPROTO_NUMPROTO] __read_mostly;
static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
static DEFINE_MUTEX(nf_log_mutex);
static struct nf_logger *__find_logger(int pf, const char *str_logger)
{
struct nf_logger *t;
struct nf_logger *log;
int i;
list_for_each_entry(t, &nf_loggers_l[pf], list[pf]) {
if (!strnicmp(str_logger, t->name, strlen(t->name)))
return t;
for (i = 0; i < NF_LOG_TYPE_MAX; i++) {
if (loggers[pf][i] == NULL)
continue;
log = rcu_dereference_protected(loggers[pf][i],
lockdep_is_held(&nf_log_mutex));
if (!strnicmp(str_logger, log->name, strlen(log->name)))
return log;
}
return NULL;
@ -73,17 +79,14 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
if (pf >= ARRAY_SIZE(init_net.nf.nf_loggers))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(logger->list); i++)
INIT_LIST_HEAD(&logger->list[i]);
mutex_lock(&nf_log_mutex);
if (pf == NFPROTO_UNSPEC) {
for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
list_add_tail(&(logger->list[i]), &(nf_loggers_l[i]));
rcu_assign_pointer(loggers[i][logger->type], logger);
} else {
/* register at end of list to honor first register win */
list_add_tail(&logger->list[pf], &nf_loggers_l[pf]);
rcu_assign_pointer(loggers[pf][logger->type], logger);
}
mutex_unlock(&nf_log_mutex);
@ -98,7 +101,7 @@ void nf_log_unregister(struct nf_logger *logger)
mutex_lock(&nf_log_mutex);
for (i = 0; i < NFPROTO_NUMPROTO; i++)
list_del(&logger->list[i]);
RCU_INIT_POINTER(loggers[i][logger->type], NULL);
mutex_unlock(&nf_log_mutex);
}
EXPORT_SYMBOL(nf_log_unregister);
@ -129,6 +132,48 @@ void nf_log_unbind_pf(struct net *net, u_int8_t pf)
}
EXPORT_SYMBOL(nf_log_unbind_pf);
void nf_logger_request_module(int pf, enum nf_log_type type)
{
if (loggers[pf][type] == NULL)
request_module("nf-logger-%u-%u", pf, type);
}
EXPORT_SYMBOL_GPL(nf_logger_request_module);
int nf_logger_find_get(int pf, enum nf_log_type type)
{
struct nf_logger *logger;
int ret = -ENOENT;
logger = loggers[pf][type];
if (logger == NULL)
request_module("nf-logger-%u-%u", pf, type);
rcu_read_lock();
logger = rcu_dereference(loggers[pf][type]);
if (logger == NULL)
goto out;
if (logger && try_module_get(logger->me))
ret = 0;
out:
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(nf_logger_find_get);
void nf_logger_put(int pf, enum nf_log_type type)
{
struct nf_logger *logger;
BUG_ON(loggers[pf][type] == NULL);
rcu_read_lock();
logger = rcu_dereference(loggers[pf][type]);
module_put(logger->me);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nf_logger_put);
void nf_log_packet(struct net *net,
u_int8_t pf,
unsigned int hooknum,
@ -143,7 +188,11 @@ void nf_log_packet(struct net *net,
const struct nf_logger *logger;
rcu_read_lock();
logger = rcu_dereference(net->nf.nf_loggers[pf]);
if (loginfo != NULL)
logger = rcu_dereference(loggers[pf][loginfo->type]);
else
logger = rcu_dereference(net->nf.nf_loggers[pf]);
if (logger) {
va_start(args, fmt);
vsnprintf(prefix, sizeof(prefix), fmt, args);
@ -154,6 +203,63 @@ void nf_log_packet(struct net *net,
}
EXPORT_SYMBOL(nf_log_packet);
#define S_SIZE (1024 - (sizeof(unsigned int) + 1))
struct nf_log_buf {
unsigned int count;
char buf[S_SIZE + 1];
};
static struct nf_log_buf emergency, *emergency_ptr = &emergency;
__printf(2, 3) int nf_log_buf_add(struct nf_log_buf *m, const char *f, ...)
{
va_list args;
int len;
if (likely(m->count < S_SIZE)) {
va_start(args, f);
len = vsnprintf(m->buf + m->count, S_SIZE - m->count, f, args);
va_end(args);
if (likely(m->count + len < S_SIZE)) {
m->count += len;
return 0;
}
}
m->count = S_SIZE;
printk_once(KERN_ERR KBUILD_MODNAME " please increase S_SIZE\n");
return -1;
}
EXPORT_SYMBOL_GPL(nf_log_buf_add);
struct nf_log_buf *nf_log_buf_open(void)
{
struct nf_log_buf *m = kmalloc(sizeof(*m), GFP_ATOMIC);
if (unlikely(!m)) {
local_bh_disable();
do {
m = xchg(&emergency_ptr, NULL);
} while (!m);
}
m->count = 0;
return m;
}
EXPORT_SYMBOL_GPL(nf_log_buf_open);
void nf_log_buf_close(struct nf_log_buf *m)
{
m->buf[m->count] = 0;
printk("%s\n", m->buf);
if (likely(m != &emergency))
kfree(m);
else {
emergency_ptr = m;
local_bh_enable();
}
}
EXPORT_SYMBOL_GPL(nf_log_buf_close);
#ifdef CONFIG_PROC_FS
static void *seq_start(struct seq_file *seq, loff_t *pos)
{
@ -188,8 +294,7 @@ static int seq_show(struct seq_file *s, void *v)
{
loff_t *pos = v;
const struct nf_logger *logger;
struct nf_logger *t;
int ret;
int i, ret;
struct net *net = seq_file_net(s);
logger = rcu_dereference_protected(net->nf.nf_loggers[*pos],
@ -203,11 +308,16 @@ static int seq_show(struct seq_file *s, void *v)
if (ret < 0)
return ret;
list_for_each_entry(t, &nf_loggers_l[*pos], list[*pos]) {
ret = seq_printf(s, "%s", t->name);
for (i = 0; i < NF_LOG_TYPE_MAX; i++) {
if (loggers[*pos][i] == NULL)
continue;
logger = rcu_dereference_protected(loggers[*pos][i],
lockdep_is_held(&nf_log_mutex));
ret = seq_printf(s, "%s", logger->name);
if (ret < 0)
return ret;
if (&t->list[*pos] != nf_loggers_l[*pos].prev) {
if (i == 0 && loggers[*pos][i + 1] != NULL) {
ret = seq_printf(s, ",");
if (ret < 0)
return ret;
@ -389,14 +499,5 @@ static struct pernet_operations nf_log_net_ops = {
int __init netfilter_log_init(void)
{
int i, ret;
ret = register_pernet_subsys(&nf_log_net_ops);
if (ret < 0)
return ret;
for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
INIT_LIST_HEAD(&(nf_loggers_l[i]));
return 0;
return register_pernet_subsys(&nf_log_net_ops);
}

View file

@ -0,0 +1,187 @@
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/tcp.h>
#include <net/route.h>
#include <linux/netfilter.h>
#include <linux/netfilter/xt_LOG.h>
#include <net/netfilter/nf_log.h>
int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
u8 proto, int fragment, unsigned int offset)
{
struct udphdr _udph;
const struct udphdr *uh;
if (proto == IPPROTO_UDP)
/* Max length: 10 "PROTO=UDP " */
nf_log_buf_add(m, "PROTO=UDP ");
else /* Max length: 14 "PROTO=UDPLITE " */
nf_log_buf_add(m, "PROTO=UDPLITE ");
if (fragment)
goto out;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
if (uh == NULL) {
nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
return 1;
}
/* Max length: 20 "SPT=65535 DPT=65535 " */
nf_log_buf_add(m, "SPT=%u DPT=%u LEN=%u ",
ntohs(uh->source), ntohs(uh->dest), ntohs(uh->len));
out:
return 0;
}
EXPORT_SYMBOL_GPL(nf_log_dump_udp_header);
int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
u8 proto, int fragment, unsigned int offset,
unsigned int logflags)
{
struct tcphdr _tcph;
const struct tcphdr *th;
/* Max length: 10 "PROTO=TCP " */
nf_log_buf_add(m, "PROTO=TCP ");
if (fragment)
return 0;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
if (th == NULL) {
nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
return 1;
}
/* Max length: 20 "SPT=65535 DPT=65535 " */
nf_log_buf_add(m, "SPT=%u DPT=%u ",
ntohs(th->source), ntohs(th->dest));
/* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
if (logflags & XT_LOG_TCPSEQ) {
nf_log_buf_add(m, "SEQ=%u ACK=%u ",
ntohl(th->seq), ntohl(th->ack_seq));
}
/* Max length: 13 "WINDOW=65535 " */
nf_log_buf_add(m, "WINDOW=%u ", ntohs(th->window));
/* Max length: 9 "RES=0x3C " */
nf_log_buf_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) &
TCP_RESERVED_BITS) >> 22));
/* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
if (th->cwr)
nf_log_buf_add(m, "CWR ");
if (th->ece)
nf_log_buf_add(m, "ECE ");
if (th->urg)
nf_log_buf_add(m, "URG ");
if (th->ack)
nf_log_buf_add(m, "ACK ");
if (th->psh)
nf_log_buf_add(m, "PSH ");
if (th->rst)
nf_log_buf_add(m, "RST ");
if (th->syn)
nf_log_buf_add(m, "SYN ");
if (th->fin)
nf_log_buf_add(m, "FIN ");
/* Max length: 11 "URGP=65535 " */
nf_log_buf_add(m, "URGP=%u ", ntohs(th->urg_ptr));
if ((logflags & XT_LOG_TCPOPT) && th->doff*4 > sizeof(struct tcphdr)) {
u_int8_t _opt[60 - sizeof(struct tcphdr)];
const u_int8_t *op;
unsigned int i;
unsigned int optsize = th->doff*4 - sizeof(struct tcphdr);
op = skb_header_pointer(skb, offset + sizeof(struct tcphdr),
optsize, _opt);
if (op == NULL) {
nf_log_buf_add(m, "OPT (TRUNCATED)");
return 1;
}
/* Max length: 127 "OPT (" 15*4*2chars ") " */
nf_log_buf_add(m, "OPT (");
for (i = 0; i < optsize; i++)
nf_log_buf_add(m, "%02X", op[i]);
nf_log_buf_add(m, ") ");
}
return 0;
}
EXPORT_SYMBOL_GPL(nf_log_dump_tcp_header);
void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk)
{
if (!sk || sk->sk_state == TCP_TIME_WAIT)
return;
read_lock_bh(&sk->sk_callback_lock);
if (sk->sk_socket && sk->sk_socket->file) {
const struct cred *cred = sk->sk_socket->file->f_cred;
nf_log_buf_add(m, "UID=%u GID=%u ",
from_kuid_munged(&init_user_ns, cred->fsuid),
from_kgid_munged(&init_user_ns, cred->fsgid));
}
read_unlock_bh(&sk->sk_callback_lock);
}
EXPORT_SYMBOL_GPL(nf_log_dump_sk_uid_gid);
void
nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo, const char *prefix)
{
nf_log_buf_add(m, KERN_SOH "%c%sIN=%s OUT=%s ",
'0' + loginfo->u.log.level, prefix,
in ? in->name : "",
out ? out->name : "");
#ifdef CONFIG_BRIDGE_NETFILTER
if (skb->nf_bridge) {
const struct net_device *physindev;
const struct net_device *physoutdev;
physindev = skb->nf_bridge->physindev;
if (physindev && in != physindev)
nf_log_buf_add(m, "PHYSIN=%s ", physindev->name);
physoutdev = skb->nf_bridge->physoutdev;
if (physoutdev && out != physoutdev)
nf_log_buf_add(m, "PHYSOUT=%s ", physoutdev->name);
}
#endif
}
EXPORT_SYMBOL_GPL(nf_log_dump_packet_common);
static int __init nf_log_common_init(void)
{
return 0;
}
static void __exit nf_log_common_exit(void) {}
module_init(nf_log_common_init);
module_exit(nf_log_common_exit);
MODULE_LICENSE("GPL");

View file

@ -710,7 +710,7 @@ static struct nf_ct_ext_type nat_extend __read_mostly = {
.flags = NF_CT_EXT_F_PREALLOC,
};
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>

View file

@ -95,7 +95,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
}
EXPORT_SYMBOL_GPL(nf_nat_l4proto_unique_tuple);
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
struct nf_nat_range *range)
{

View file

@ -78,7 +78,7 @@ static const struct nf_nat_l4proto nf_nat_l4proto_dccp = {
.manip_pkt = dccp_manip_pkt,
.in_range = nf_nat_l4proto_in_range,
.unique_tuple = dccp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
#endif
};

View file

@ -59,7 +59,7 @@ static const struct nf_nat_l4proto nf_nat_l4proto_sctp = {
.manip_pkt = sctp_manip_pkt,
.in_range = nf_nat_l4proto_in_range,
.unique_tuple = sctp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
#endif
};

View file

@ -79,7 +79,7 @@ const struct nf_nat_l4proto nf_nat_l4proto_tcp = {
.manip_pkt = tcp_manip_pkt,
.in_range = nf_nat_l4proto_in_range,
.unique_tuple = tcp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
#endif
};

View file

@ -70,7 +70,7 @@ const struct nf_nat_l4proto nf_nat_l4proto_udp = {
.manip_pkt = udp_manip_pkt,
.in_range = nf_nat_l4proto_in_range,
.unique_tuple = udp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
#endif
};

View file

@ -69,7 +69,7 @@ static const struct nf_nat_l4proto nf_nat_l4proto_udplite = {
.manip_pkt = udplite_manip_pkt,
.in_range = nf_nat_l4proto_in_range,
.unique_tuple = udplite_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
#endif
};

View file

@ -773,6 +773,7 @@ nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
static struct nf_logger nfulnl_logger __read_mostly = {
.name = "nfnetlink_log",
.type = NF_LOG_TYPE_ULOG,
.logfn = &nfulnl_log_packet,
.me = THIS_MODULE,
};
@ -1105,6 +1106,9 @@ MODULE_DESCRIPTION("netfilter userspace logging");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ULOG);
MODULE_ALIAS_NF_LOGGER(AF_INET, 1);
MODULE_ALIAS_NF_LOGGER(AF_INET6, 1);
MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 1);
module_init(nfnetlink_log_init);
module_exit(nfnetlink_log_fini);

View file

@ -1,5 +1,6 @@
/*
* Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
* Copyright (c) 2012-2014 Pablo Neira Ayuso <pablo@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -41,6 +42,8 @@ static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
[NFTA_LOG_PREFIX] = { .type = NLA_STRING },
[NFTA_LOG_SNAPLEN] = { .type = NLA_U32 },
[NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 },
[NFTA_LOG_LEVEL] = { .type = NLA_U32 },
[NFTA_LOG_FLAGS] = { .type = NLA_U32 },
};
static int nft_log_init(const struct nft_ctx *ctx,
@ -50,6 +53,7 @@ static int nft_log_init(const struct nft_ctx *ctx,
struct nft_log *priv = nft_expr_priv(expr);
struct nf_loginfo *li = &priv->loginfo;
const struct nlattr *nla;
int ret;
nla = tb[NFTA_LOG_PREFIX];
if (nla != NULL) {
@ -57,30 +61,74 @@ static int nft_log_init(const struct nft_ctx *ctx,
if (priv->prefix == NULL)
return -ENOMEM;
nla_strlcpy(priv->prefix, nla, nla_len(nla) + 1);
} else
} else {
priv->prefix = (char *)nft_log_null_prefix;
li->type = NF_LOG_TYPE_ULOG;
if (tb[NFTA_LOG_GROUP] != NULL)
li->u.ulog.group = ntohs(nla_get_be16(tb[NFTA_LOG_GROUP]));
if (tb[NFTA_LOG_SNAPLEN] != NULL)
li->u.ulog.copy_len = ntohl(nla_get_be32(tb[NFTA_LOG_SNAPLEN]));
if (tb[NFTA_LOG_QTHRESHOLD] != NULL) {
li->u.ulog.qthreshold =
ntohs(nla_get_be16(tb[NFTA_LOG_QTHRESHOLD]));
}
return 0;
li->type = NF_LOG_TYPE_LOG;
if (tb[NFTA_LOG_LEVEL] != NULL &&
tb[NFTA_LOG_GROUP] != NULL)
return -EINVAL;
if (tb[NFTA_LOG_GROUP] != NULL)
li->type = NF_LOG_TYPE_ULOG;
switch (li->type) {
case NF_LOG_TYPE_LOG:
if (tb[NFTA_LOG_LEVEL] != NULL) {
li->u.log.level =
ntohl(nla_get_be32(tb[NFTA_LOG_LEVEL]));
} else {
li->u.log.level = 4;
}
if (tb[NFTA_LOG_FLAGS] != NULL) {
li->u.log.logflags =
ntohl(nla_get_be32(tb[NFTA_LOG_FLAGS]));
}
break;
case NF_LOG_TYPE_ULOG:
li->u.ulog.group = ntohs(nla_get_be16(tb[NFTA_LOG_GROUP]));
if (tb[NFTA_LOG_SNAPLEN] != NULL) {
li->u.ulog.copy_len =
ntohl(nla_get_be32(tb[NFTA_LOG_SNAPLEN]));
}
if (tb[NFTA_LOG_QTHRESHOLD] != NULL) {
li->u.ulog.qthreshold =
ntohs(nla_get_be16(tb[NFTA_LOG_QTHRESHOLD]));
}
break;
}
if (ctx->afi->family == NFPROTO_INET) {
ret = nf_logger_find_get(NFPROTO_IPV4, li->type);
if (ret < 0)
return ret;
ret = nf_logger_find_get(NFPROTO_IPV6, li->type);
if (ret < 0) {
nf_logger_put(NFPROTO_IPV4, li->type);
return ret;
}
return 0;
}
return nf_logger_find_get(ctx->afi->family, li->type);
}
static void nft_log_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_log *priv = nft_expr_priv(expr);
struct nf_loginfo *li = &priv->loginfo;
if (priv->prefix != nft_log_null_prefix)
kfree(priv->prefix);
if (ctx->afi->family == NFPROTO_INET) {
nf_logger_put(NFPROTO_IPV4, li->type);
nf_logger_put(NFPROTO_IPV6, li->type);
} else {
nf_logger_put(ctx->afi->family, li->type);
}
}
static int nft_log_dump(struct sk_buff *skb, const struct nft_expr *expr)
@ -91,17 +139,33 @@ static int nft_log_dump(struct sk_buff *skb, const struct nft_expr *expr)
if (priv->prefix != nft_log_null_prefix)
if (nla_put_string(skb, NFTA_LOG_PREFIX, priv->prefix))
goto nla_put_failure;
if (li->u.ulog.group)
switch (li->type) {
case NF_LOG_TYPE_LOG:
if (nla_put_be32(skb, NFTA_LOG_LEVEL, htonl(li->u.log.level)))
goto nla_put_failure;
if (li->u.log.logflags) {
if (nla_put_be32(skb, NFTA_LOG_FLAGS,
htonl(li->u.log.logflags)))
goto nla_put_failure;
}
break;
case NF_LOG_TYPE_ULOG:
if (nla_put_be16(skb, NFTA_LOG_GROUP, htons(li->u.ulog.group)))
goto nla_put_failure;
if (li->u.ulog.copy_len)
if (nla_put_be32(skb, NFTA_LOG_SNAPLEN,
htonl(li->u.ulog.copy_len)))
goto nla_put_failure;
if (li->u.ulog.qthreshold)
if (nla_put_be16(skb, NFTA_LOG_QTHRESHOLD,
htons(li->u.ulog.qthreshold)))
goto nla_put_failure;
if (li->u.ulog.copy_len) {
if (nla_put_be32(skb, NFTA_LOG_SNAPLEN,
htonl(li->u.ulog.copy_len)))
goto nla_put_failure;
}
if (li->u.ulog.qthreshold) {
if (nla_put_be16(skb, NFTA_LOG_QTHRESHOLD,
htons(li->u.ulog.qthreshold)))
goto nla_put_failure;
}
break;
}
return 0;
nla_put_failure:

View file

@ -711,28 +711,15 @@ void xt_free_table_info(struct xt_table_info *info)
{
int cpu;
for_each_possible_cpu(cpu) {
if (info->size <= PAGE_SIZE)
kfree(info->entries[cpu]);
else
vfree(info->entries[cpu]);
}
for_each_possible_cpu(cpu)
kvfree(info->entries[cpu]);
if (info->jumpstack != NULL) {
if (sizeof(void *) * info->stacksize > PAGE_SIZE) {
for_each_possible_cpu(cpu)
vfree(info->jumpstack[cpu]);
} else {
for_each_possible_cpu(cpu)
kfree(info->jumpstack[cpu]);
}
for_each_possible_cpu(cpu)
kvfree(info->jumpstack[cpu]);
kvfree(info->jumpstack);
}
if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE)
vfree(info->jumpstack);
else
kfree(info->jumpstack);
free_percpu(info->stackptr);
kfree(info);

View file

@ -27,806 +27,6 @@
#include <linux/netfilter/xt_LOG.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/netfilter/nf_log.h>
#include <net/netfilter/xt_log.h>
static struct nf_loginfo default_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
.level = 5,
.logflags = NF_LOG_MASK,
},
},
};
static int dump_udp_header(struct sbuff *m, const struct sk_buff *skb,
u8 proto, int fragment, unsigned int offset)
{
struct udphdr _udph;
const struct udphdr *uh;
if (proto == IPPROTO_UDP)
/* Max length: 10 "PROTO=UDP " */
sb_add(m, "PROTO=UDP ");
else /* Max length: 14 "PROTO=UDPLITE " */
sb_add(m, "PROTO=UDPLITE ");
if (fragment)
goto out;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
if (uh == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
return 1;
}
/* Max length: 20 "SPT=65535 DPT=65535 " */
sb_add(m, "SPT=%u DPT=%u LEN=%u ", ntohs(uh->source), ntohs(uh->dest),
ntohs(uh->len));
out:
return 0;
}
static int dump_tcp_header(struct sbuff *m, const struct sk_buff *skb,
u8 proto, int fragment, unsigned int offset,
unsigned int logflags)
{
struct tcphdr _tcph;
const struct tcphdr *th;
/* Max length: 10 "PROTO=TCP " */
sb_add(m, "PROTO=TCP ");
if (fragment)
return 0;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
if (th == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
return 1;
}
/* Max length: 20 "SPT=65535 DPT=65535 " */
sb_add(m, "SPT=%u DPT=%u ", ntohs(th->source), ntohs(th->dest));
/* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
if (logflags & XT_LOG_TCPSEQ)
sb_add(m, "SEQ=%u ACK=%u ", ntohl(th->seq), ntohl(th->ack_seq));
/* Max length: 13 "WINDOW=65535 " */
sb_add(m, "WINDOW=%u ", ntohs(th->window));
/* Max length: 9 "RES=0x3C " */
sb_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) &
TCP_RESERVED_BITS) >> 22));
/* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
if (th->cwr)
sb_add(m, "CWR ");
if (th->ece)
sb_add(m, "ECE ");
if (th->urg)
sb_add(m, "URG ");
if (th->ack)
sb_add(m, "ACK ");
if (th->psh)
sb_add(m, "PSH ");
if (th->rst)
sb_add(m, "RST ");
if (th->syn)
sb_add(m, "SYN ");
if (th->fin)
sb_add(m, "FIN ");
/* Max length: 11 "URGP=65535 " */
sb_add(m, "URGP=%u ", ntohs(th->urg_ptr));
if ((logflags & XT_LOG_TCPOPT) && th->doff*4 > sizeof(struct tcphdr)) {
u_int8_t _opt[60 - sizeof(struct tcphdr)];
const u_int8_t *op;
unsigned int i;
unsigned int optsize = th->doff*4 - sizeof(struct tcphdr);
op = skb_header_pointer(skb, offset + sizeof(struct tcphdr),
optsize, _opt);
if (op == NULL) {
sb_add(m, "OPT (TRUNCATED)");
return 1;
}
/* Max length: 127 "OPT (" 15*4*2chars ") " */
sb_add(m, "OPT (");
for (i = 0; i < optsize; i++)
sb_add(m, "%02X", op[i]);
sb_add(m, ") ");
}
return 0;
}
static void dump_sk_uid_gid(struct sbuff *m, struct sock *sk)
{
if (!sk || sk->sk_state == TCP_TIME_WAIT)
return;
read_lock_bh(&sk->sk_callback_lock);
if (sk->sk_socket && sk->sk_socket->file) {
const struct cred *cred = sk->sk_socket->file->f_cred;
sb_add(m, "UID=%u GID=%u ",
from_kuid_munged(&init_user_ns, cred->fsuid),
from_kgid_munged(&init_user_ns, cred->fsgid));
}
read_unlock_bh(&sk->sk_callback_lock);
}
/* One level of recursion won't kill us */
static void dump_ipv4_packet(struct sbuff *m,
const struct nf_loginfo *info,
const struct sk_buff *skb,
unsigned int iphoff)
{
struct iphdr _iph;
const struct iphdr *ih;
unsigned int logflags;
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
else
logflags = NF_LOG_MASK;
ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph);
if (ih == NULL) {
sb_add(m, "TRUNCATED");
return;
}
/* Important fields:
* TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */
/* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */
sb_add(m, "SRC=%pI4 DST=%pI4 ",
&ih->saddr, &ih->daddr);
/* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
sb_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
ntohs(ih->tot_len), ih->tos & IPTOS_TOS_MASK,
ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id));
/* Max length: 6 "CE DF MF " */
if (ntohs(ih->frag_off) & IP_CE)
sb_add(m, "CE ");
if (ntohs(ih->frag_off) & IP_DF)
sb_add(m, "DF ");
if (ntohs(ih->frag_off) & IP_MF)
sb_add(m, "MF ");
/* Max length: 11 "FRAG:65535 " */
if (ntohs(ih->frag_off) & IP_OFFSET)
sb_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET);
if ((logflags & XT_LOG_IPOPT) &&
ih->ihl * 4 > sizeof(struct iphdr)) {
const unsigned char *op;
unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
unsigned int i, optsize;
optsize = ih->ihl * 4 - sizeof(struct iphdr);
op = skb_header_pointer(skb, iphoff+sizeof(_iph),
optsize, _opt);
if (op == NULL) {
sb_add(m, "TRUNCATED");
return;
}
/* Max length: 127 "OPT (" 15*4*2chars ") " */
sb_add(m, "OPT (");
for (i = 0; i < optsize; i++)
sb_add(m, "%02X", op[i]);
sb_add(m, ") ");
}
switch (ih->protocol) {
case IPPROTO_TCP:
if (dump_tcp_header(m, skb, ih->protocol,
ntohs(ih->frag_off) & IP_OFFSET,
iphoff+ih->ihl*4, logflags))
return;
break;
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
if (dump_udp_header(m, skb, ih->protocol,
ntohs(ih->frag_off) & IP_OFFSET,
iphoff+ih->ihl*4))
return;
break;
case IPPROTO_ICMP: {
struct icmphdr _icmph;
const struct icmphdr *ich;
static const size_t required_len[NR_ICMP_TYPES+1]
= { [ICMP_ECHOREPLY] = 4,
[ICMP_DEST_UNREACH]
= 8 + sizeof(struct iphdr),
[ICMP_SOURCE_QUENCH]
= 8 + sizeof(struct iphdr),
[ICMP_REDIRECT]
= 8 + sizeof(struct iphdr),
[ICMP_ECHO] = 4,
[ICMP_TIME_EXCEEDED]
= 8 + sizeof(struct iphdr),
[ICMP_PARAMETERPROB]
= 8 + sizeof(struct iphdr),
[ICMP_TIMESTAMP] = 20,
[ICMP_TIMESTAMPREPLY] = 20,
[ICMP_ADDRESS] = 12,
[ICMP_ADDRESSREPLY] = 12 };
/* Max length: 11 "PROTO=ICMP " */
sb_add(m, "PROTO=ICMP ");
if (ntohs(ih->frag_off) & IP_OFFSET)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
ich = skb_header_pointer(skb, iphoff + ih->ihl * 4,
sizeof(_icmph), &_icmph);
if (ich == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] ",
skb->len - iphoff - ih->ihl*4);
break;
}
/* Max length: 18 "TYPE=255 CODE=255 " */
sb_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code);
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
if (ich->type <= NR_ICMP_TYPES &&
required_len[ich->type] &&
skb->len-iphoff-ih->ihl*4 < required_len[ich->type]) {
sb_add(m, "INCOMPLETE [%u bytes] ",
skb->len - iphoff - ih->ihl*4);
break;
}
switch (ich->type) {
case ICMP_ECHOREPLY:
case ICMP_ECHO:
/* Max length: 19 "ID=65535 SEQ=65535 " */
sb_add(m, "ID=%u SEQ=%u ",
ntohs(ich->un.echo.id),
ntohs(ich->un.echo.sequence));
break;
case ICMP_PARAMETERPROB:
/* Max length: 14 "PARAMETER=255 " */
sb_add(m, "PARAMETER=%u ",
ntohl(ich->un.gateway) >> 24);
break;
case ICMP_REDIRECT:
/* Max length: 24 "GATEWAY=255.255.255.255 " */
sb_add(m, "GATEWAY=%pI4 ", &ich->un.gateway);
/* Fall through */
case ICMP_DEST_UNREACH:
case ICMP_SOURCE_QUENCH:
case ICMP_TIME_EXCEEDED:
/* Max length: 3+maxlen */
if (!iphoff) { /* Only recurse once. */
sb_add(m, "[");
dump_ipv4_packet(m, info, skb,
iphoff + ih->ihl*4+sizeof(_icmph));
sb_add(m, "] ");
}
/* Max length: 10 "MTU=65535 " */
if (ich->type == ICMP_DEST_UNREACH &&
ich->code == ICMP_FRAG_NEEDED)
sb_add(m, "MTU=%u ", ntohs(ich->un.frag.mtu));
}
break;
}
/* Max Length */
case IPPROTO_AH: {
struct ip_auth_hdr _ahdr;
const struct ip_auth_hdr *ah;
if (ntohs(ih->frag_off) & IP_OFFSET)
break;
/* Max length: 9 "PROTO=AH " */
sb_add(m, "PROTO=AH ");
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
ah = skb_header_pointer(skb, iphoff+ih->ihl*4,
sizeof(_ahdr), &_ahdr);
if (ah == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] ",
skb->len - iphoff - ih->ihl*4);
break;
}
/* Length: 15 "SPI=0xF1234567 " */
sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
break;
}
case IPPROTO_ESP: {
struct ip_esp_hdr _esph;
const struct ip_esp_hdr *eh;
/* Max length: 10 "PROTO=ESP " */
sb_add(m, "PROTO=ESP ");
if (ntohs(ih->frag_off) & IP_OFFSET)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
eh = skb_header_pointer(skb, iphoff+ih->ihl*4,
sizeof(_esph), &_esph);
if (eh == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] ",
skb->len - iphoff - ih->ihl*4);
break;
}
/* Length: 15 "SPI=0xF1234567 " */
sb_add(m, "SPI=0x%x ", ntohl(eh->spi));
break;
}
/* Max length: 10 "PROTO 255 " */
default:
sb_add(m, "PROTO=%u ", ih->protocol);
}
/* Max length: 15 "UID=4294967295 " */
if ((logflags & XT_LOG_UID) && !iphoff)
dump_sk_uid_gid(m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */
if (!iphoff && skb->mark)
sb_add(m, "MARK=0x%x ", skb->mark);
/* Proto Max log string length */
/* IP: 40+46+6+11+127 = 230 */
/* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */
/* UDP: 10+max(25,20) = 35 */
/* UDPLITE: 14+max(25,20) = 39 */
/* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */
/* ESP: 10+max(25)+15 = 50 */
/* AH: 9+max(25)+15 = 49 */
/* unknown: 10 */
/* (ICMP allows recursion one level deep) */
/* maxlen = IP + ICMP + IP + max(TCP,UDP,ICMP,unknown) */
/* maxlen = 230+ 91 + 230 + 252 = 803 */
}
static void dump_ipv4_mac_header(struct sbuff *m,
const struct nf_loginfo *info,
const struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
unsigned int logflags = 0;
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
if (!(logflags & XT_LOG_MACDECODE))
goto fallback;
switch (dev->type) {
case ARPHRD_ETHER:
sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
ntohs(eth_hdr(skb)->h_proto));
return;
default:
break;
}
fallback:
sb_add(m, "MAC=");
if (dev->hard_header_len &&
skb->mac_header != skb->network_header) {
const unsigned char *p = skb_mac_header(skb);
unsigned int i;
sb_add(m, "%02x", *p++);
for (i = 1; i < dev->hard_header_len; i++, p++)
sb_add(m, ":%02x", *p);
}
sb_add(m, " ");
}
static void
log_packet_common(struct sbuff *m,
u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
sb_add(m, KERN_SOH "%c%sIN=%s OUT=%s ",
'0' + loginfo->u.log.level, prefix,
in ? in->name : "",
out ? out->name : "");
#ifdef CONFIG_BRIDGE_NETFILTER
if (skb->nf_bridge) {
const struct net_device *physindev;
const struct net_device *physoutdev;
physindev = skb->nf_bridge->physindev;
if (physindev && in != physindev)
sb_add(m, "PHYSIN=%s ", physindev->name);
physoutdev = skb->nf_bridge->physoutdev;
if (physoutdev && out != physoutdev)
sb_add(m, "PHYSOUT=%s ", physoutdev->name);
}
#endif
}
static void
ipt_log_packet(struct net *net,
u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
struct sbuff *m;
/* FIXME: Disabled from containers until syslog ns is supported */
if (!net_eq(net, &init_net))
return;
m = sb_open();
if (!loginfo)
loginfo = &default_loginfo;
log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix);
if (in != NULL)
dump_ipv4_mac_header(m, loginfo, skb);
dump_ipv4_packet(m, loginfo, skb, 0);
sb_close(m);
}
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
/* One level of recursion won't kill us */
static void dump_ipv6_packet(struct sbuff *m,
const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int ip6hoff,
int recurse)
{
u_int8_t currenthdr;
int fragment;
struct ipv6hdr _ip6h;
const struct ipv6hdr *ih;
unsigned int ptr;
unsigned int hdrlen = 0;
unsigned int logflags;
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
else
logflags = NF_LOG_MASK;
ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
if (ih == NULL) {
sb_add(m, "TRUNCATED");
return;
}
/* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
sb_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr);
/* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
sb_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
(ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
ih->hop_limit,
(ntohl(*(__be32 *)ih) & 0x000fffff));
fragment = 0;
ptr = ip6hoff + sizeof(struct ipv6hdr);
currenthdr = ih->nexthdr;
while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
struct ipv6_opt_hdr _hdr;
const struct ipv6_opt_hdr *hp;
hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
if (hp == NULL) {
sb_add(m, "TRUNCATED");
return;
}
/* Max length: 48 "OPT (...) " */
if (logflags & XT_LOG_IPOPT)
sb_add(m, "OPT ( ");
switch (currenthdr) {
case IPPROTO_FRAGMENT: {
struct frag_hdr _fhdr;
const struct frag_hdr *fh;
sb_add(m, "FRAG:");
fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
&_fhdr);
if (fh == NULL) {
sb_add(m, "TRUNCATED ");
return;
}
/* Max length: 6 "65535 " */
sb_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8);
/* Max length: 11 "INCOMPLETE " */
if (fh->frag_off & htons(0x0001))
sb_add(m, "INCOMPLETE ");
sb_add(m, "ID:%08x ", ntohl(fh->identification));
if (ntohs(fh->frag_off) & 0xFFF8)
fragment = 1;
hdrlen = 8;
break;
}
case IPPROTO_DSTOPTS:
case IPPROTO_ROUTING:
case IPPROTO_HOPOPTS:
if (fragment) {
if (logflags & XT_LOG_IPOPT)
sb_add(m, ")");
return;
}
hdrlen = ipv6_optlen(hp);
break;
/* Max Length */
case IPPROTO_AH:
if (logflags & XT_LOG_IPOPT) {
struct ip_auth_hdr _ahdr;
const struct ip_auth_hdr *ah;
/* Max length: 3 "AH " */
sb_add(m, "AH ");
if (fragment) {
sb_add(m, ")");
return;
}
ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
&_ahdr);
if (ah == NULL) {
/*
* Max length: 26 "INCOMPLETE [65535
* bytes] )"
*/
sb_add(m, "INCOMPLETE [%u bytes] )",
skb->len - ptr);
return;
}
/* Length: 15 "SPI=0xF1234567 */
sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
}
hdrlen = (hp->hdrlen+2)<<2;
break;
case IPPROTO_ESP:
if (logflags & XT_LOG_IPOPT) {
struct ip_esp_hdr _esph;
const struct ip_esp_hdr *eh;
/* Max length: 4 "ESP " */
sb_add(m, "ESP ");
if (fragment) {
sb_add(m, ")");
return;
}
/*
* Max length: 26 "INCOMPLETE [65535 bytes] )"
*/
eh = skb_header_pointer(skb, ptr, sizeof(_esph),
&_esph);
if (eh == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] )",
skb->len - ptr);
return;
}
/* Length: 16 "SPI=0xF1234567 )" */
sb_add(m, "SPI=0x%x )", ntohl(eh->spi));
}
return;
default:
/* Max length: 20 "Unknown Ext Hdr 255" */
sb_add(m, "Unknown Ext Hdr %u", currenthdr);
return;
}
if (logflags & XT_LOG_IPOPT)
sb_add(m, ") ");
currenthdr = hp->nexthdr;
ptr += hdrlen;
}
switch (currenthdr) {
case IPPROTO_TCP:
if (dump_tcp_header(m, skb, currenthdr, fragment, ptr,
logflags))
return;
break;
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
if (dump_udp_header(m, skb, currenthdr, fragment, ptr))
return;
break;
case IPPROTO_ICMPV6: {
struct icmp6hdr _icmp6h;
const struct icmp6hdr *ic;
/* Max length: 13 "PROTO=ICMPv6 " */
sb_add(m, "PROTO=ICMPv6 ");
if (fragment)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
if (ic == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
return;
}
/* Max length: 18 "TYPE=255 CODE=255 " */
sb_add(m, "TYPE=%u CODE=%u ", ic->icmp6_type, ic->icmp6_code);
switch (ic->icmp6_type) {
case ICMPV6_ECHO_REQUEST:
case ICMPV6_ECHO_REPLY:
/* Max length: 19 "ID=65535 SEQ=65535 " */
sb_add(m, "ID=%u SEQ=%u ",
ntohs(ic->icmp6_identifier),
ntohs(ic->icmp6_sequence));
break;
case ICMPV6_MGM_QUERY:
case ICMPV6_MGM_REPORT:
case ICMPV6_MGM_REDUCTION:
break;
case ICMPV6_PARAMPROB:
/* Max length: 17 "POINTER=ffffffff " */
sb_add(m, "POINTER=%08x ", ntohl(ic->icmp6_pointer));
/* Fall through */
case ICMPV6_DEST_UNREACH:
case ICMPV6_PKT_TOOBIG:
case ICMPV6_TIME_EXCEED:
/* Max length: 3+maxlen */
if (recurse) {
sb_add(m, "[");
dump_ipv6_packet(m, info, skb,
ptr + sizeof(_icmp6h), 0);
sb_add(m, "] ");
}
/* Max length: 10 "MTU=65535 " */
if (ic->icmp6_type == ICMPV6_PKT_TOOBIG)
sb_add(m, "MTU=%u ", ntohl(ic->icmp6_mtu));
}
break;
}
/* Max length: 10 "PROTO=255 " */
default:
sb_add(m, "PROTO=%u ", currenthdr);
}
/* Max length: 15 "UID=4294967295 " */
if ((logflags & XT_LOG_UID) && recurse)
dump_sk_uid_gid(m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */
if (recurse && skb->mark)
sb_add(m, "MARK=0x%x ", skb->mark);
}
static void dump_ipv6_mac_header(struct sbuff *m,
const struct nf_loginfo *info,
const struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
unsigned int logflags = 0;
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
if (!(logflags & XT_LOG_MACDECODE))
goto fallback;
switch (dev->type) {
case ARPHRD_ETHER:
sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
ntohs(eth_hdr(skb)->h_proto));
return;
default:
break;
}
fallback:
sb_add(m, "MAC=");
if (dev->hard_header_len &&
skb->mac_header != skb->network_header) {
const unsigned char *p = skb_mac_header(skb);
unsigned int len = dev->hard_header_len;
unsigned int i;
if (dev->type == ARPHRD_SIT) {
p -= ETH_HLEN;
if (p < skb->head)
p = NULL;
}
if (p != NULL) {
sb_add(m, "%02x", *p++);
for (i = 1; i < len; i++)
sb_add(m, ":%02x", *p++);
}
sb_add(m, " ");
if (dev->type == ARPHRD_SIT) {
const struct iphdr *iph =
(struct iphdr *)skb_mac_header(skb);
sb_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr,
&iph->daddr);
}
} else
sb_add(m, " ");
}
static void
ip6t_log_packet(struct net *net,
u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
struct sbuff *m;
/* FIXME: Disabled from containers until syslog ns is supported */
if (!net_eq(net, &init_net))
return;
m = sb_open();
if (!loginfo)
loginfo = &default_loginfo;
log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix);
if (in != NULL)
dump_ipv6_mac_header(m, loginfo, skb);
dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1);
sb_close(m);
}
#endif
static unsigned int
log_tg(struct sk_buff *skb, const struct xt_action_param *par)
@ -839,17 +39,8 @@ log_tg(struct sk_buff *skb, const struct xt_action_param *par)
li.u.log.level = loginfo->level;
li.u.log.logflags = loginfo->logflags;
if (par->family == NFPROTO_IPV4)
ipt_log_packet(net, NFPROTO_IPV4, par->hooknum, skb, par->in,
par->out, &li, loginfo->prefix);
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
else if (par->family == NFPROTO_IPV6)
ip6t_log_packet(net, NFPROTO_IPV6, par->hooknum, skb, par->in,
par->out, &li, loginfo->prefix);
#endif
else
WARN_ON_ONCE(1);
nf_log_packet(net, par->family, par->hooknum, skb, par->in, par->out,
&li, "%s", loginfo->prefix);
return XT_CONTINUE;
}
@ -870,7 +61,12 @@ static int log_tg_check(const struct xt_tgchk_param *par)
return -EINVAL;
}
return 0;
return nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
}
static void log_tg_destroy(const struct xt_tgdtor_param *par)
{
nf_logger_put(par->family, NF_LOG_TYPE_LOG);
}
static struct xt_target log_tg_regs[] __read_mostly = {
@ -880,6 +76,7 @@ static struct xt_target log_tg_regs[] __read_mostly = {
.target = log_tg,
.targetsize = sizeof(struct xt_log_info),
.checkentry = log_tg_check,
.destroy = log_tg_destroy,
.me = THIS_MODULE,
},
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
@ -889,78 +86,19 @@ static struct xt_target log_tg_regs[] __read_mostly = {
.target = log_tg,
.targetsize = sizeof(struct xt_log_info),
.checkentry = log_tg_check,
.destroy = log_tg_destroy,
.me = THIS_MODULE,
},
#endif
};
static struct nf_logger ipt_log_logger __read_mostly = {
.name = "ipt_LOG",
.logfn = &ipt_log_packet,
.me = THIS_MODULE,
};
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static struct nf_logger ip6t_log_logger __read_mostly = {
.name = "ip6t_LOG",
.logfn = &ip6t_log_packet,
.me = THIS_MODULE,
};
#endif
static int __net_init log_net_init(struct net *net)
{
nf_log_set(net, NFPROTO_IPV4, &ipt_log_logger);
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
nf_log_set(net, NFPROTO_IPV6, &ip6t_log_logger);
#endif
return 0;
}
static void __net_exit log_net_exit(struct net *net)
{
nf_log_unset(net, &ipt_log_logger);
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
nf_log_unset(net, &ip6t_log_logger);
#endif
}
static struct pernet_operations log_net_ops = {
.init = log_net_init,
.exit = log_net_exit,
};
static int __init log_tg_init(void)
{
int ret;
ret = register_pernet_subsys(&log_net_ops);
if (ret < 0)
goto err_pernet;
ret = xt_register_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
if (ret < 0)
goto err_target;
nf_log_register(NFPROTO_IPV4, &ipt_log_logger);
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
nf_log_register(NFPROTO_IPV6, &ip6t_log_logger);
#endif
return 0;
err_target:
unregister_pernet_subsys(&log_net_ops);
err_pernet:
return ret;
return xt_register_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
}
static void __exit log_tg_exit(void)
{
unregister_pernet_subsys(&log_net_ops);
nf_log_unregister(&ipt_log_logger);
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
nf_log_unregister(&ip6t_log_logger);
#endif
xt_unregister_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
}