]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | /* include this file if the platform implements the dma_ DMA Mapping API | |
3 | * and wants to provide the pci_ DMA Mapping API in terms of it */ | |
4 | ||
5 | #ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H | |
6 | #define _ASM_GENERIC_PCI_DMA_COMPAT_H | |
7 | ||
8 | #include <linux/dma-mapping.h> | |
9 | ||
10 | /* This defines the direction arg to the DMA mapping routines. */ | |
11 | #define PCI_DMA_BIDIRECTIONAL 0 | |
12 | #define PCI_DMA_TODEVICE 1 | |
13 | #define PCI_DMA_FROMDEVICE 2 | |
14 | #define PCI_DMA_NONE 3 | |
15 | ||
16 | static inline void * | |
17 | pci_alloc_consistent(struct pci_dev *hwdev, size_t size, | |
18 | dma_addr_t *dma_handle) | |
19 | { | |
20 | return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC); | |
21 | } | |
22 | ||
23 | static inline void * | |
24 | pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, | |
25 | dma_addr_t *dma_handle) | |
26 | { | |
27 | return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, | |
28 | size, dma_handle, GFP_ATOMIC); | |
29 | } | |
30 | ||
31 | static inline void | |
32 | pci_free_consistent(struct pci_dev *hwdev, size_t size, | |
33 | void *vaddr, dma_addr_t dma_handle) | |
34 | { | |
35 | dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle); | |
36 | } | |
37 | ||
38 | static inline dma_addr_t | |
39 | pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) | |
40 | { | |
41 | return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction); | |
42 | } | |
43 | ||
44 | static inline void | |
45 | pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, | |
46 | size_t size, int direction) | |
47 | { | |
48 | dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction); | |
49 | } | |
50 | ||
51 | static inline dma_addr_t | |
52 | pci_map_page(struct pci_dev *hwdev, struct page *page, | |
53 | unsigned long offset, size_t size, int direction) | |
54 | { | |
55 | return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction); | |
56 | } | |
57 | ||
58 | static inline void | |
59 | pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, | |
60 | size_t size, int direction) | |
61 | { | |
62 | dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction); | |
63 | } | |
64 | ||
65 | static inline int | |
66 | pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, | |
67 | int nents, int direction) | |
68 | { | |
69 | return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); | |
70 | } | |
71 | ||
72 | static inline void | |
73 | pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, | |
74 | int nents, int direction) | |
75 | { | |
76 | dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); | |
77 | } | |
78 | ||
79 | static inline void | |
80 | pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, | |
81 | size_t size, int direction) | |
82 | { | |
83 | dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); | |
84 | } | |
85 | ||
86 | static inline void | |
87 | pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, | |
88 | size_t size, int direction) | |
89 | { | |
90 | dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); | |
91 | } | |
92 | ||
93 | static inline void | |
94 | pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, | |
95 | int nelems, int direction) | |
96 | { | |
97 | dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); | |
98 | } | |
99 | ||
100 | static inline void | |
101 | pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, | |
102 | int nelems, int direction) | |
103 | { | |
104 | dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); | |
105 | } | |
106 | ||
107 | static inline int | |
108 | pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) | |
109 | { | |
110 | return dma_mapping_error(&pdev->dev, dma_addr); | |
111 | } | |
112 | ||
113 | #ifdef CONFIG_PCI | |
114 | static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) | |
115 | { | |
116 | return dma_set_mask(&dev->dev, mask); | |
117 | } | |
118 | ||
119 | static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | |
120 | { | |
121 | return dma_set_coherent_mask(&dev->dev, mask); | |
122 | } | |
123 | ||
124 | static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, | |
125 | unsigned int size) | |
126 | { | |
127 | return dma_set_max_seg_size(&dev->dev, size); | |
128 | } | |
129 | ||
130 | static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, | |
131 | unsigned long mask) | |
132 | { | |
133 | return dma_set_seg_boundary(&dev->dev, mask); | |
134 | } | |
135 | #else | |
136 | static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) | |
137 | { return -EIO; } | |
138 | static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | |
139 | { return -EIO; } | |
140 | static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, | |
141 | unsigned int size) | |
142 | { return -EIO; } | |
143 | static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, | |
144 | unsigned long mask) | |
145 | { return -EIO; } | |
146 | #endif | |
147 | ||
148 | #endif |