]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
net/mellanox: switch from 'pci_' to 'dma_' API
authorChristophe JAILLET <christophe.jaillet@wanadoo.fr>
Sun, 22 Aug 2021 19:12:41 +0000 (21:12 +0200)
committerDavid S. Miller <davem@davemloft.net>
Mon, 23 Aug 2021 11:02:28 +0000 (12:02 +0100)
The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below.

It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.

It has been compile tested.

@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL

@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE

@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE

@@
@@
-    PCI_DMA_NONE
+    DMA_NONE

@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)

Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx5/core/main.c

index 442991d91c15e581695dbf3a8dcde47e1afe2c1b..7f6d3b82c29b297ca090002256bea1e3bf73ca4c 100644 (file)
@@ -991,7 +991,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
                 * expense of more costly truesize accounting
                 */
                priv->frag_info[0].frag_stride = PAGE_SIZE;
-               priv->dma_dir = PCI_DMA_BIDIRECTIONAL;
+               priv->dma_dir = DMA_BIDIRECTIONAL;
                priv->rx_headroom = XDP_PACKET_HEADROOM;
                i = 1;
        } else {
@@ -1021,7 +1021,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
                        buf_size += frag_size;
                        i++;
                }
-               priv->dma_dir = PCI_DMA_FROMDEVICE;
+               priv->dma_dir = DMA_FROM_DEVICE;
                priv->rx_headroom = 0;
        }
 
index 31b74bddb7cd8f154ba097e8385f9d35ffdf9a92..c56b9dba4c71898b61e87fd32e5fa523c313e445 100644 (file)
@@ -297,12 +297,12 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
                        dma_unmap_single(priv->ddev,
                                         tx_info->map0_dma,
                                         tx_info->map0_byte_count,
-                                        PCI_DMA_TODEVICE);
+                                        DMA_TO_DEVICE);
                else
                        dma_unmap_page(priv->ddev,
                                       tx_info->map0_dma,
                                       tx_info->map0_byte_count,
-                                      PCI_DMA_TODEVICE);
+                                      DMA_TO_DEVICE);
                /* Optimize the common case when there are no wraparounds */
                if (likely((void *)tx_desc +
                           (tx_info->nr_txbb << LOG_TXBB_SIZE) <= end)) {
@@ -311,7 +311,7 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
                                dma_unmap_page(priv->ddev,
                                        (dma_addr_t)be64_to_cpu(data->addr),
                                        be32_to_cpu(data->byte_count),
-                                       PCI_DMA_TODEVICE);
+                                       DMA_TO_DEVICE);
                        }
                } else {
                        if ((void *)data >= end)
@@ -325,7 +325,7 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
                                dma_unmap_page(priv->ddev,
                                        (dma_addr_t)be64_to_cpu(data->addr),
                                        be32_to_cpu(data->byte_count),
-                                       PCI_DMA_TODEVICE);
+                                       DMA_TO_DEVICE);
                        }
                }
        }
@@ -831,7 +831,7 @@ static bool mlx4_en_build_dma_wqe(struct mlx4_en_priv *priv,
 
                dma = dma_map_single(ddev, skb->data +
                                     lso_header_size, byte_count,
-                                    PCI_DMA_TODEVICE);
+                                    DMA_TO_DEVICE);
                if (dma_mapping_error(ddev, dma))
                        goto tx_drop_unmap;
 
@@ -853,7 +853,7 @@ tx_drop_unmap:
                ++data;
                dma_unmap_page(ddev, (dma_addr_t)be64_to_cpu(data->addr),
                               be32_to_cpu(data->byte_count),
-                              PCI_DMA_TODEVICE);
+                              DMA_TO_DEVICE);
        }
 
        return false;
@@ -1170,7 +1170,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
        tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN);
 
        dma_sync_single_range_for_device(priv->ddev, dma, frame->page_offset,
-                                        length, PCI_DMA_TODEVICE);
+                                        length, DMA_TO_DEVICE);
 
        data->addr = cpu_to_be64(dma + frame->page_offset);
        dma_wmb();
index 7267c6c6d2e2f60efc35a779a6419a096569435a..5a6b0fcaf7f8a59b3747a707862d0bdbdf471723 100644 (file)
@@ -3806,24 +3806,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
 
        pci_set_master(pdev);
 
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (err) {
                dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
                        dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
                        goto err_release_regions;
                }
        }
-       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-       if (err) {
-               dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (err) {
-                       dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
-                       goto err_release_regions;
-               }
-       }
 
        /* Allow large DMA segments, up to the firmware limit of 1 GB */
        dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
index 80cabf9b1787df5ef3d21a29312c8fe25c30da66..79482824c64ff29ca85b32ca77f115510bee4bde 100644 (file)
@@ -252,28 +252,16 @@ static int set_dma_caps(struct pci_dev *pdev)
 {
        int err;
 
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (err) {
                dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
                        dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
                        return err;
                }
        }
 
-       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-       if (err) {
-               dev_warn(&pdev->dev,
-                        "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (err) {
-                       dev_err(&pdev->dev,
-                               "Can't set consistent PCI DMA mask, aborting\n");
-                       return err;
-               }
-       }
-
        dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
        return err;
 }