]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
tsnep: Prepare RX buffer for XDP support
authorGerhard Engleder <gerhard@engleder-embedded.com>
Mon, 16 Jan 2023 20:24:55 +0000 (21:24 +0100)
committerDavid S. Miller <davem@davemloft.net>
Wed, 18 Jan 2023 13:17:06 +0000 (13:17 +0000)
Always reserve XDP_PACKET_HEADROOM in front of RX buffer. Similar DMA
direction is always set to DMA_BIDIRECTIONAL. This eliminates the need
for RX queue reconfiguration during BPF program setup. The RX queue is
always prepared for XDP.

No negative impact of DMA_BIDIRECTIONAL was measured.

Signed-off-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/engleder/tsnep_main.c

index 0730cd45f9a37ca123b15a4ac01a0ec3ff97d44e..45e576e74510c8e480575487c4c24ee0fd354a49 100644 (file)
 #include <linux/etherdevice.h>
 #include <linux/phy.h>
 #include <linux/iopoll.h>
+#include <linux/bpf.h>
 
-#define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
-#define TSNEP_HEADROOM ALIGN(TSNEP_SKB_PAD, 4)
+#define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
+#define TSNEP_HEADROOM ALIGN(TSNEP_RX_OFFSET, 4)
 #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
                               SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 
@@ -806,9 +807,9 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
        pp_params.pool_size = TSNEP_RING_SIZE;
        pp_params.nid = dev_to_node(dmadev);
        pp_params.dev = dmadev;
-       pp_params.dma_dir = DMA_FROM_DEVICE;
+       pp_params.dma_dir = DMA_BIDIRECTIONAL;
        pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE;
-       pp_params.offset = TSNEP_SKB_PAD;
+       pp_params.offset = TSNEP_RX_OFFSET;
        rx->page_pool = page_pool_create(&pp_params);
        if (IS_ERR(rx->page_pool)) {
                retval = PTR_ERR(rx->page_pool);
@@ -843,7 +844,7 @@ static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
        entry->page = page;
        entry->len = TSNEP_MAX_RX_BUF_SIZE;
        entry->dma = page_pool_get_dma_addr(entry->page);
-       entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD);
+       entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET);
 }
 
 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index)
@@ -947,14 +948,14 @@ static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
                return NULL;
 
        /* update pointers within the skb to store the data */
-       skb_reserve(skb, TSNEP_SKB_PAD + TSNEP_RX_INLINE_METADATA_SIZE);
+       skb_reserve(skb, TSNEP_RX_OFFSET + TSNEP_RX_INLINE_METADATA_SIZE);
        __skb_put(skb, length - ETH_FCS_LEN);
 
        if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
                struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
                struct tsnep_rx_inline *rx_inline =
                        (struct tsnep_rx_inline *)(page_address(page) +
-                                                  TSNEP_SKB_PAD);
+                                                  TSNEP_RX_OFFSET);
 
                skb_shinfo(skb)->tx_flags |=
                        SKBTX_HW_TSTAMP_NETDEV;
@@ -1014,11 +1015,11 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
                 */
                dma_rmb();
 
-               prefetch(page_address(entry->page) + TSNEP_SKB_PAD);
+               prefetch(page_address(entry->page) + TSNEP_RX_OFFSET);
                length = __le32_to_cpu(entry->desc_wb->properties) &
                         TSNEP_DESC_LENGTH_MASK;
-               dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD,
-                                             length, dma_dir);
+               dma_sync_single_range_for_cpu(dmadev, entry->dma,
+                                             TSNEP_RX_OFFSET, length, dma_dir);
 
                /* RX metadata with timestamps is in front of actual data,
                 * subtract metadata size to get length of actual data and