ixgbe: add initial support for xdp redirect

There are optimizations we can add after the basic feature is
enabled. But, for now keep the patch simple.

Signed-off-by: John Fastabend <john.fastabend@gmail.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
John Fastabend 2017-07-17 09:28:12 -07:00 committed by David S. Miller
parent 6103aa96ec
commit 6453073987

View file

@ -2214,7 +2214,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring, struct ixgbe_ring *rx_ring,
struct xdp_buff *xdp) struct xdp_buff *xdp)
{ {
int result = IXGBE_XDP_PASS; int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
u32 act; u32 act;
@ -2231,6 +2231,13 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
case XDP_TX: case XDP_TX:
result = ixgbe_xmit_xdp_ring(adapter, xdp); result = ixgbe_xmit_xdp_ring(adapter, xdp);
break; break;
case XDP_REDIRECT:
err = xdp_do_redirect(adapter->netdev, xdp);
if (!err)
result = IXGBE_XDP_TX;
else
result = IXGBE_XDP_CONSUMED;
break;
default: default:
bpf_warn_invalid_xdp_action(act); bpf_warn_invalid_xdp_action(act);
/* fallthrough */ /* fallthrough */
@ -9823,6 +9830,37 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp)
} }
} }
static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_ring *ring;
int err;
if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
return -EINVAL;
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
if (unlikely(!ring))
return -EINVAL;
err = ixgbe_xmit_xdp_ring(adapter, xdp);
if (err != IXGBE_XDP_TX)
return -ENOMEM;
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
*/
wmb();
ring = adapter->xdp_ring[smp_processor_id()];
writel(ring->next_to_use, ring->tail);
return 0;
}
static const struct net_device_ops ixgbe_netdev_ops = { static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open, .ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close, .ndo_stop = ixgbe_close,
@ -9869,6 +9907,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
.ndo_features_check = ixgbe_features_check, .ndo_features_check = ixgbe_features_check,
.ndo_xdp = ixgbe_xdp, .ndo_xdp = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
}; };
/** /**