xdp: fix bug in devmap teardown code path [Linux 4.18]

This Linux kernel change "xdp: fix bug in devmap teardown code path" is included in the Linux 4.18 release. This change is authored by Jesper Dangaard Brouer <brouer [at] redhat.com> on Wed Aug 8 23:00:45 2018 +0200. The commit for this change in Linux stable tree is 1bf9116 (patch).

xdp: fix bug in devmap teardown code path

Like cpumap teardown, the devmap teardown code also flush remaining
xdp_frames, via bq_xmit_all() in case map entry is removed.  The code
can call xdp_return_frame_rx_napi, from the the wrong context, in-case
ndo_xdp_xmit() fails.

Fixes: 389ab7f01af9 ("xdp: introduce xdp_return_frame_rx_napi")
Fixes: 735fc4054b3a ("xdp: change ndo_xdp_xmit API to support bulking")
Signed-off-by: Jesper Dangaard Brouer <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>

There are 14 lines of Linux source code added/deleted in this change. Code changes to Linux kernel are as follows.

 kernel/bpf/devmap.c | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index d361fc1..750d45e 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
 }

 static int bq_xmit_all(struct bpf_dtab_netdev *obj,
-              struct xdp_bulk_queue *bq, u32 flags)
+              struct xdp_bulk_queue *bq, u32 flags,
+              bool in_napi_ctx)
 {
    struct net_device *dev = obj->dev;
    int sent = 0, drops = 0, err = 0;
@@ -254,7 +255,10 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj,
        struct xdp_frame *xdpf = bq->q[i];

        /* RX path under NAPI protection, can return frames faster */
-       xdp_return_frame_rx_napi(xdpf);
+       if (likely(in_napi_ctx))
+           xdp_return_frame_rx_napi(xdpf);
+       else
+           xdp_return_frame(xdpf);
        drops++;
    }
    goto out;
@@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map)
        __clear_bit(bit, bitmap);

        bq = this_cpu_ptr(dev->bulkq);
-       bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
+       bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
    }
 }

@@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
    struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);

    if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
-       bq_xmit_all(obj, bq, 0);
+       bq_xmit_all(obj, bq, 0, true);

    /* Ingress dev_rx will be the same for all xdp_frame's in
     * bulk_queue, because bq stored per-CPU and must be flushed
@@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
            __clear_bit(dev->bit, bitmap);

            bq = per_cpu_ptr(dev->bulkq, cpu);
-           bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
+           bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
        }
    }
 }

Leave a Reply

Your email address will not be published. Required fields are marked *