]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/ppc64/kernel/dma.c
Linux-2.6.12-rc2
[mirror_ubuntu-bionic-kernel.git] / arch / ppc64 / kernel / dma.c
1 /*
2 * Copyright (C) 2004 IBM Corporation
3 *
4 * Implements the generic device dma API for ppc64. Handles
5 * the pci and vio busses
6 */
7
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 /* Include the busses we support */
11 #include <linux/pci.h>
12 #include <asm/vio.h>
13 #include <asm/scatterlist.h>
14 #include <asm/bug.h>
15
16 static struct dma_mapping_ops *get_dma_ops(struct device *dev)
17 {
18 if (dev->bus == &pci_bus_type)
19 return &pci_dma_ops;
20 #ifdef CONFIG_IBMVIO
21 if (dev->bus == &vio_bus_type)
22 return &vio_dma_ops;
23 #endif
24 return NULL;
25 }
26
27 int dma_supported(struct device *dev, u64 mask)
28 {
29 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
30
31 if (dma_ops)
32 return dma_ops->dma_supported(dev, mask);
33 BUG();
34 return 0;
35 }
36 EXPORT_SYMBOL(dma_supported);
37
38 int dma_set_mask(struct device *dev, u64 dma_mask)
39 {
40 if (dev->bus == &pci_bus_type)
41 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
42 #ifdef CONFIG_IBMVIO
43 if (dev->bus == &vio_bus_type)
44 return -EIO;
45 #endif /* CONFIG_IBMVIO */
46 BUG();
47 return 0;
48 }
49 EXPORT_SYMBOL(dma_set_mask);
50
51 void *dma_alloc_coherent(struct device *dev, size_t size,
52 dma_addr_t *dma_handle, unsigned int __nocast flag)
53 {
54 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
55
56 if (dma_ops)
57 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
58 BUG();
59 return NULL;
60 }
61 EXPORT_SYMBOL(dma_alloc_coherent);
62
63 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
64 dma_addr_t dma_handle)
65 {
66 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
67
68 if (dma_ops)
69 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
70 else
71 BUG();
72 }
73 EXPORT_SYMBOL(dma_free_coherent);
74
75 dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size,
76 enum dma_data_direction direction)
77 {
78 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
79
80 if (dma_ops)
81 return dma_ops->map_single(dev, cpu_addr, size, direction);
82 BUG();
83 return (dma_addr_t)0;
84 }
85 EXPORT_SYMBOL(dma_map_single);
86
87 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
88 enum dma_data_direction direction)
89 {
90 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
91
92 if (dma_ops)
93 dma_ops->unmap_single(dev, dma_addr, size, direction);
94 else
95 BUG();
96 }
97 EXPORT_SYMBOL(dma_unmap_single);
98
99 dma_addr_t dma_map_page(struct device *dev, struct page *page,
100 unsigned long offset, size_t size,
101 enum dma_data_direction direction)
102 {
103 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
104
105 if (dma_ops)
106 return dma_ops->map_single(dev,
107 (page_address(page) + offset), size, direction);
108 BUG();
109 return (dma_addr_t)0;
110 }
111 EXPORT_SYMBOL(dma_map_page);
112
113 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
114 enum dma_data_direction direction)
115 {
116 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
117
118 if (dma_ops)
119 dma_ops->unmap_single(dev, dma_address, size, direction);
120 else
121 BUG();
122 }
123 EXPORT_SYMBOL(dma_unmap_page);
124
125 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
126 enum dma_data_direction direction)
127 {
128 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
129
130 if (dma_ops)
131 return dma_ops->map_sg(dev, sg, nents, direction);
132 BUG();
133 return 0;
134 }
135 EXPORT_SYMBOL(dma_map_sg);
136
137 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
138 enum dma_data_direction direction)
139 {
140 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
141
142 if (dma_ops)
143 dma_ops->unmap_sg(dev, sg, nhwentries, direction);
144 else
145 BUG();
146 }
147 EXPORT_SYMBOL(dma_unmap_sg);