]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
xsk: Use dma_need_sync instead of reimplenting it
authorChristoph Hellwig <hch@lst.de>
Mon, 29 Jun 2020 13:03:59 +0000 (15:03 +0200)
committerDaniel Borkmann <daniel@iogearbox.net>
Tue, 30 Jun 2020 13:44:03 +0000 (15:44 +0200)
Use the dma_need_sync helper instead of (not always entirely correctly)
poking into the dma-mapping internals.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20200629130359.2690853-5-hch@lst.de
net/xdp/xsk_buff_pool.c

index 6733e2c59e4835fb8fffb6d8ef5948b0c41ebbd4..08b80669f6495591fe4e9255ca0955f8ef83f7de 100644 (file)
@@ -2,9 +2,6 @@
 
 #include <net/xsk_buff_pool.h>
 #include <net/xdp_sock.h>
-#include <linux/dma-direct.h>
-#include <linux/dma-noncoherent.h>
-#include <linux/swiotlb.h>
 
 #include "xsk_queue.h"
 
@@ -124,48 +121,6 @@ static void xp_check_dma_contiguity(struct xsk_buff_pool *pool)
        }
 }
 
-static bool __maybe_unused xp_check_swiotlb_dma(struct xsk_buff_pool *pool)
-{
-#if defined(CONFIG_SWIOTLB)
-       phys_addr_t paddr;
-       u32 i;
-
-       for (i = 0; i < pool->dma_pages_cnt; i++) {
-               paddr = dma_to_phys(pool->dev, pool->dma_pages[i]);
-               if (is_swiotlb_buffer(paddr))
-                       return false;
-       }
-#endif
-       return true;
-}
-
-static bool xp_check_cheap_dma(struct xsk_buff_pool *pool)
-{
-#if defined(CONFIG_HAS_DMA)
-       const struct dma_map_ops *ops = get_dma_ops(pool->dev);
-
-       if (ops) {
-               return !ops->sync_single_for_cpu &&
-                       !ops->sync_single_for_device;
-       }
-
-       if (!dma_is_direct(ops))
-               return false;
-
-       if (!xp_check_swiotlb_dma(pool))
-               return false;
-
-       if (!dev_is_dma_coherent(pool->dev)) {
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) ||               \
-       defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) ||        \
-       defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
-               return false;
-#endif
-       }
-#endif
-       return true;
-}
-
 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
               unsigned long attrs, struct page **pages, u32 nr_pages)
 {
@@ -179,6 +134,7 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
 
        pool->dev = dev;
        pool->dma_pages_cnt = nr_pages;
+       pool->dma_need_sync = false;
 
        for (i = 0; i < pool->dma_pages_cnt; i++) {
                dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
@@ -187,13 +143,13 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
                        xp_dma_unmap(pool, attrs);
                        return -ENOMEM;
                }
+               if (dma_need_sync(dev, dma))
+                       pool->dma_need_sync = true;
                pool->dma_pages[i] = dma;
        }
 
        if (pool->unaligned)
                xp_check_dma_contiguity(pool);
-
-       pool->dma_need_sync = !xp_check_cheap_dma(pool);
        return 0;
 }
 EXPORT_SYMBOL(xp_dma_map);