]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __SPARC64_PCI_H |
2 | #define __SPARC64_PCI_H | |
3 | ||
4 | #ifdef __KERNEL__ | |
5 | ||
6 | #include <linux/fs.h> | |
7 | #include <linux/mm.h> | |
8 | ||
9 | /* Can be used to override the logic in pci_scan_bus for skipping | |
10 | * already-configured bus numbers - to be used for buggy BIOSes | |
11 | * or architectures with incomplete PCI setup by the loader. | |
12 | */ | |
13 | #define pcibios_assign_all_busses() 0 | |
14 | #define pcibios_scan_all_fns(a, b) 0 | |
15 | ||
16 | #define PCIBIOS_MIN_IO 0UL | |
17 | #define PCIBIOS_MIN_MEM 0UL | |
18 | ||
19 | #define PCI_IRQ_NONE 0xffffffff | |
20 | ||
21 | static inline void pcibios_set_master(struct pci_dev *dev) | |
22 | { | |
23 | /* No special bus mastering setup handling */ | |
24 | } | |
25 | ||
c9c3e457 | 26 | static inline void pcibios_penalize_isa_irq(int irq, int active) |
1da177e4 LT |
27 | { |
28 | /* We don't do dynamic PCI IRQ allocation */ | |
29 | } | |
30 | ||
31 | /* Dynamic DMA mapping stuff. | |
32 | */ | |
33 | ||
34 | /* The PCI address space does not equal the physical memory | |
35 | * address space. The networking and block device layers use | |
36 | * this boolean for bounce buffer decisions. | |
37 | */ | |
38 | #define PCI_DMA_BUS_IS_PHYS (0) | |
39 | ||
40 | #include <asm/scatterlist.h> | |
41 | ||
42 | struct pci_dev; | |
43 | ||
44 | /* Allocate and map kernel buffer using consistent mode DMA for a device. | |
45 | * hwdev should be valid struct pci_dev pointer for PCI devices. | |
46 | */ | |
47 | extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle); | |
48 | ||
49 | /* Free and unmap a consistent DMA buffer. | |
50 | * cpu_addr is what was returned from pci_alloc_consistent, | |
51 | * size must be the same as what as passed into pci_alloc_consistent, | |
52 | * and likewise dma_addr must be the same as what *dma_addrp was set to. | |
53 | * | |
54 | * References to the memory and mappings associated with cpu_addr/dma_addr | |
55 | * past this call are illegal. | |
56 | */ | |
57 | extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); | |
58 | ||
59 | /* Map a single buffer of the indicated size for DMA in streaming mode. | |
60 | * The 32-bit bus address to use is returned. | |
61 | * | |
62 | * Once the device is given the dma address, the device owns this memory | |
63 | * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed. | |
64 | */ | |
65 | extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction); | |
66 | ||
67 | /* Unmap a single streaming mode DMA translation. The dma_addr and size | |
68 | * must match what was provided for in a previous pci_map_single call. All | |
69 | * other usages are undefined. | |
70 | * | |
71 | * After this call, reads by the cpu to the buffer are guaranteed to see | |
72 | * whatever the device wrote there. | |
73 | */ | |
74 | extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction); | |
75 | ||
76 | /* No highmem on sparc64, plus we have an IOMMU, so mapping pages is easy. */ | |
77 | #define pci_map_page(dev, page, off, size, dir) \ | |
78 | pci_map_single(dev, (page_address(page) + (off)), size, dir) | |
79 | #define pci_unmap_page(dev,addr,sz,dir) pci_unmap_single(dev,addr,sz,dir) | |
80 | ||
81 | /* pci_unmap_{single,page} is not a nop, thus... */ | |
82 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | |
83 | dma_addr_t ADDR_NAME; | |
84 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ | |
85 | __u32 LEN_NAME; | |
86 | #define pci_unmap_addr(PTR, ADDR_NAME) \ | |
87 | ((PTR)->ADDR_NAME) | |
88 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ | |
89 | (((PTR)->ADDR_NAME) = (VAL)) | |
90 | #define pci_unmap_len(PTR, LEN_NAME) \ | |
91 | ((PTR)->LEN_NAME) | |
92 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | |
93 | (((PTR)->LEN_NAME) = (VAL)) | |
94 | ||
95 | /* Map a set of buffers described by scatterlist in streaming | |
96 | * mode for DMA. This is the scatter-gather version of the | |
97 | * above pci_map_single interface. Here the scatter gather list | |
98 | * elements are each tagged with the appropriate dma address | |
99 | * and length. They are obtained via sg_dma_{address,length}(SG). | |
100 | * | |
101 | * NOTE: An implementation may be able to use a smaller number of | |
102 | * DMA address/length pairs than there are SG table elements. | |
103 | * (for example via virtual mapping capabilities) | |
104 | * The routine returns the number of addr/length pairs actually | |
105 | * used, at most nents. | |
106 | * | |
107 | * Device ownership issues as mentioned above for pci_map_single are | |
108 | * the same here. | |
109 | */ | |
110 | extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, | |
111 | int nents, int direction); | |
112 | ||
113 | /* Unmap a set of streaming mode DMA translations. | |
114 | * Again, cpu read rules concerning calls here are the same as for | |
115 | * pci_unmap_single() above. | |
116 | */ | |
117 | extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, | |
118 | int nhwents, int direction); | |
119 | ||
120 | /* Make physical memory consistent for a single | |
121 | * streaming mode DMA translation after a transfer. | |
122 | * | |
123 | * If you perform a pci_map_single() but wish to interrogate the | |
124 | * buffer using the cpu, yet do not wish to teardown the PCI dma | |
125 | * mapping, you must call this function before doing so. At the | |
126 | * next point you give the PCI dma address back to the card, you | |
127 | * must first perform a pci_dma_sync_for_device, and then the | |
128 | * device again owns the buffer. | |
129 | */ | |
130 | extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, | |
131 | size_t size, int direction); | |
132 | ||
133 | static inline void | |
134 | pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, | |
135 | size_t size, int direction) | |
136 | { | |
137 | /* No flushing needed to sync cpu writes to the device. */ | |
138 | BUG_ON(direction == PCI_DMA_NONE); | |
139 | } | |
140 | ||
141 | /* Make physical memory consistent for a set of streaming | |
142 | * mode DMA translations after a transfer. | |
143 | * | |
144 | * The same as pci_dma_sync_single_* but for a scatter-gather list, | |
145 | * same rules and usage. | |
146 | */ | |
147 | extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction); | |
148 | ||
149 | static inline void | |
150 | pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, | |
151 | int nelems, int direction) | |
152 | { | |
153 | /* No flushing needed to sync cpu writes to the device. */ | |
154 | BUG_ON(direction == PCI_DMA_NONE); | |
155 | } | |
156 | ||
157 | /* Return whether the given PCI device DMA address mask can | |
158 | * be supported properly. For example, if your device can | |
159 | * only drive the low 24-bits during PCI bus mastering, then | |
160 | * you would pass 0x00ffffff as the mask to this function. | |
161 | */ | |
162 | extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); | |
163 | ||
164 | /* PCI IOMMU mapping bypass support. */ | |
165 | ||
166 | /* PCI 64-bit addressing works for all slots on all controller | |
167 | * types on sparc64. However, it requires that the device | |
168 | * can drive enough of the 64 bits. | |
169 | */ | |
170 | #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) | |
171 | #define PCI64_ADDR_BASE 0xfffc000000000000UL | |
172 | ||
173 | /* Usage of the pci_dac_foo interfaces is only valid if this | |
174 | * test passes. | |
175 | */ | |
176 | #define pci_dac_dma_supported(pci_dev, mask) \ | |
177 | ((((mask) & PCI64_REQUIRED_MASK) == PCI64_REQUIRED_MASK) ? 1 : 0) | |
178 | ||
179 | static inline dma64_addr_t | |
180 | pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction) | |
181 | { | |
182 | return (PCI64_ADDR_BASE + | |
183 | __pa(page_address(page)) + offset); | |
184 | } | |
185 | ||
186 | static inline struct page * | |
187 | pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr) | |
188 | { | |
189 | unsigned long paddr = (dma_addr & PAGE_MASK) - PCI64_ADDR_BASE; | |
190 | ||
191 | return virt_to_page(__va(paddr)); | |
192 | } | |
193 | ||
194 | static inline unsigned long | |
195 | pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr) | |
196 | { | |
197 | return (dma_addr & ~PAGE_MASK); | |
198 | } | |
199 | ||
200 | static inline void | |
201 | pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) | |
202 | { | |
203 | /* DAC cycle addressing does not make use of the | |
204 | * PCI controller's streaming cache, so nothing to do. | |
205 | */ | |
206 | } | |
207 | ||
208 | static inline void | |
209 | pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) | |
210 | { | |
211 | /* DAC cycle addressing does not make use of the | |
212 | * PCI controller's streaming cache, so nothing to do. | |
213 | */ | |
214 | } | |
215 | ||
216 | #define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0) | |
217 | ||
218 | static inline int pci_dma_mapping_error(dma_addr_t dma_addr) | |
219 | { | |
220 | return (dma_addr == PCI_DMA_ERROR_CODE); | |
221 | } | |
222 | ||
bb4a61b6 | 223 | #ifdef CONFIG_PCI |
e24c2d96 DM |
224 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, |
225 | enum pci_dma_burst_strategy *strat, | |
226 | unsigned long *strategy_parameter) | |
227 | { | |
228 | unsigned long cacheline_size; | |
229 | u8 byte; | |
230 | ||
231 | pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte); | |
232 | if (byte == 0) | |
233 | cacheline_size = 1024; | |
234 | else | |
235 | cacheline_size = (int) byte * 4; | |
236 | ||
237 | *strat = PCI_DMA_BURST_BOUNDARY; | |
238 | *strategy_parameter = cacheline_size; | |
239 | } | |
bb4a61b6 | 240 | #endif |
e24c2d96 | 241 | |
1da177e4 LT |
242 | /* Return the index of the PCI controller for device PDEV. */ |
243 | ||
244 | extern int pci_domain_nr(struct pci_bus *bus); | |
245 | static inline int pci_proc_domain(struct pci_bus *bus) | |
246 | { | |
247 | return 1; | |
248 | } | |
249 | ||
250 | /* Platform support for /proc/bus/pci/X/Y mmap()s. */ | |
251 | ||
252 | #define HAVE_PCI_MMAP | |
253 | #define HAVE_ARCH_PCI_GET_UNMAPPED_AREA | |
254 | #define get_pci_unmapped_area get_fb_unmapped_area | |
255 | ||
256 | extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |
257 | enum pci_mmap_state mmap_state, | |
258 | int write_combine); | |
259 | ||
260 | /* Platform specific MWI support. */ | |
261 | #define HAVE_ARCH_PCI_MWI | |
262 | extern int pcibios_prep_mwi(struct pci_dev *dev); | |
263 | ||
264 | extern void | |
265 | pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, | |
266 | struct resource *res); | |
267 | ||
268 | extern void | |
269 | pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, | |
270 | struct pci_bus_region *region); | |
271 | ||
085ae41f DM |
272 | extern struct resource *pcibios_select_root(struct pci_dev *, struct resource *); |
273 | ||
1da177e4 LT |
274 | static inline void pcibios_add_platform_entries(struct pci_dev *dev) |
275 | { | |
276 | } | |
277 | ||
278 | static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | |
279 | { | |
280 | return PCI_IRQ_NONE; | |
281 | } | |
282 | ||
283 | #endif /* __KERNEL__ */ | |
284 | ||
285 | #endif /* __SPARC64_PCI_H */ |