]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
xdp: split code for map vs non-map redirect
authorJesper Dangaard Brouer <brouer@redhat.com>
Mon, 3 Sep 2018 07:55:07 +0000 (09:55 +0200)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 7 Sep 2018 05:34:08 +0000 (22:34 -0700)
The compiler does an efficient job of inlining static C functions.
Perf top clearly shows that almost everything gets inlined into the
function call xdp_do_redirect.

The function xdp_do_redirect end-up containing and interleaving the
map and non-map redirect code.  This is sub-optimal, as it would be
strange for an XDP program to use both types of redirect in the same
program. The two use-cases are separate, and interleaving the code
just cause more instruction-cache pressure.

I would like to stress (again) that the non-map variant bpf_redirect
is very slow compared to the bpf_redirect_map variant, approx half the
speed.  Measured with driver i40e the difference is:

- map     redirect: 13,250,350 pps
- non-map redirect:  7,491,425 pps

For this reason, the function name of the non-map variant of redirect
have been called xdp_do_redirect_slow.  This hopefully gives a hint
when using perf, that this is not the optimal XDP redirect operating mode.

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
net/core/filter.c

index d7dbe412cb9e584ce45550c343e8e2f29644b675..8cb242b4400f392ed85cfc2d413167848d00c36f 100644 (file)
@@ -3175,6 +3175,32 @@ static int __bpf_tx_xdp(struct net_device *dev,
        return 0;
 }
 
+static noinline int
+xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp,
+                    struct bpf_prog *xdp_prog, struct bpf_redirect_info *ri)
+{
+       struct net_device *fwd;
+       u32 index = ri->ifindex;
+       int err;
+
+       fwd = dev_get_by_index_rcu(dev_net(dev), index);
+       ri->ifindex = 0;
+       if (unlikely(!fwd)) {
+               err = -EINVAL;
+               goto err;
+       }
+
+       err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
+       if (unlikely(err))
+               goto err;
+
+       _trace_xdp_redirect(dev, xdp_prog, index);
+       return 0;
+err:
+       _trace_xdp_redirect_err(dev, xdp_prog, index, err);
+       return err;
+}
+
 static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
                            struct bpf_map *map,
                            struct xdp_buff *xdp,
@@ -3269,9 +3295,9 @@ void bpf_clear_redirect_map(struct bpf_map *map)
 }
 
 static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
-                              struct bpf_prog *xdp_prog, struct bpf_map *map)
+                              struct bpf_prog *xdp_prog, struct bpf_map *map,
+                              struct bpf_redirect_info *ri)
 {
-       struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
        u32 index = ri->ifindex;
        void *fwd = NULL;
        int err;
@@ -3304,29 +3330,11 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
 {
        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
        struct bpf_map *map = READ_ONCE(ri->map);
-       struct net_device *fwd;
-       u32 index = ri->ifindex;
-       int err;
 
        if (likely(map))
-               return xdp_do_redirect_map(dev, xdp, xdp_prog, map);
+               return xdp_do_redirect_map(dev, xdp, xdp_prog, map, ri);
 
-       fwd = dev_get_by_index_rcu(dev_net(dev), index);
-       ri->ifindex = 0;
-       if (unlikely(!fwd)) {
-               err = -EINVAL;
-               goto err;
-       }
-
-       err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
-       if (unlikely(err))
-               goto err;
-
-       _trace_xdp_redirect(dev, xdp_prog, index);
-       return 0;
-err:
-       _trace_xdp_redirect_err(dev, xdp_prog, index, err);
-       return err;
+       return xdp_do_redirect_slow(dev, xdp, xdp_prog, ri);
 }
 EXPORT_SYMBOL_GPL(xdp_do_redirect);