]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * DMA region bookkeeping routines | |
3 | * | |
4 | * Copyright (C) 2002 Maas Digital LLC | |
5 | * | |
6 | * This code is licensed under the GPL. See the file COPYING in the root | |
7 | * directory of the kernel sources for details. | |
8 | */ | |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/vmalloc.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/mm.h> | |
14 | #include "dma.h" | |
15 | ||
16 | /* dma_prog_region */ | |
17 | ||
18 | void dma_prog_region_init(struct dma_prog_region *prog) | |
19 | { | |
20 | prog->kvirt = NULL; | |
21 | prog->dev = NULL; | |
22 | prog->n_pages = 0; | |
23 | prog->bus_addr = 0; | |
24 | } | |
25 | ||
26 | int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev) | |
27 | { | |
28 | /* round up to page size */ | |
29 | n_bytes = PAGE_ALIGN(n_bytes); | |
30 | ||
31 | prog->n_pages = n_bytes >> PAGE_SHIFT; | |
32 | ||
33 | prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr); | |
34 | if (!prog->kvirt) { | |
35 | printk(KERN_ERR "dma_prog_region_alloc: pci_alloc_consistent() failed\n"); | |
36 | dma_prog_region_free(prog); | |
37 | return -ENOMEM; | |
38 | } | |
39 | ||
40 | prog->dev = dev; | |
41 | ||
42 | return 0; | |
43 | } | |
44 | ||
45 | void dma_prog_region_free(struct dma_prog_region *prog) | |
46 | { | |
47 | if (prog->kvirt) { | |
48 | pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT, prog->kvirt, prog->bus_addr); | |
49 | } | |
50 | ||
51 | prog->kvirt = NULL; | |
52 | prog->dev = NULL; | |
53 | prog->n_pages = 0; | |
54 | prog->bus_addr = 0; | |
55 | } | |
56 | ||
57 | /* dma_region */ | |
58 | ||
59 | void dma_region_init(struct dma_region *dma) | |
60 | { | |
61 | dma->kvirt = NULL; | |
62 | dma->dev = NULL; | |
63 | dma->n_pages = 0; | |
64 | dma->n_dma_pages = 0; | |
65 | dma->sglist = NULL; | |
66 | } | |
67 | ||
68 | int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction) | |
69 | { | |
70 | unsigned int i; | |
71 | ||
72 | /* round up to page size */ | |
73 | n_bytes = PAGE_ALIGN(n_bytes); | |
74 | ||
75 | dma->n_pages = n_bytes >> PAGE_SHIFT; | |
76 | ||
77 | dma->kvirt = vmalloc_32(n_bytes); | |
78 | if (!dma->kvirt) { | |
79 | printk(KERN_ERR "dma_region_alloc: vmalloc_32() failed\n"); | |
80 | goto err; | |
81 | } | |
82 | ||
83 | /* Clear the ram out, no junk to the user */ | |
84 | memset(dma->kvirt, 0, n_bytes); | |
85 | ||
86 | /* allocate scatter/gather list */ | |
87 | dma->sglist = vmalloc(dma->n_pages * sizeof(*dma->sglist)); | |
88 | if (!dma->sglist) { | |
89 | printk(KERN_ERR "dma_region_alloc: vmalloc(sglist) failed\n"); | |
90 | goto err; | |
91 | } | |
92 | ||
93 | /* just to be safe - this will become unnecessary once sglist->address goes away */ | |
94 | memset(dma->sglist, 0, dma->n_pages * sizeof(*dma->sglist)); | |
95 | ||
96 | /* fill scatter/gather list with pages */ | |
97 | for (i = 0; i < dma->n_pages; i++) { | |
98 | unsigned long va = (unsigned long) dma->kvirt + (i << PAGE_SHIFT); | |
99 | ||
100 | dma->sglist[i].page = vmalloc_to_page((void *)va); | |
101 | dma->sglist[i].length = PAGE_SIZE; | |
102 | } | |
103 | ||
104 | /* map sglist to the IOMMU */ | |
105 | dma->n_dma_pages = pci_map_sg(dev, dma->sglist, dma->n_pages, direction); | |
106 | ||
107 | if (dma->n_dma_pages == 0) { | |
108 | printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n"); | |
109 | goto err; | |
110 | } | |
111 | ||
112 | dma->dev = dev; | |
113 | dma->direction = direction; | |
114 | ||
115 | return 0; | |
116 | ||
117 | err: | |
118 | dma_region_free(dma); | |
119 | return -ENOMEM; | |
120 | } | |
121 | ||
122 | void dma_region_free(struct dma_region *dma) | |
123 | { | |
124 | if (dma->n_dma_pages) { | |
125 | pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages, dma->direction); | |
126 | dma->n_dma_pages = 0; | |
127 | dma->dev = NULL; | |
128 | } | |
129 | ||
130 | vfree(dma->sglist); | |
131 | dma->sglist = NULL; | |
132 | ||
133 | vfree(dma->kvirt); | |
134 | dma->kvirt = NULL; | |
135 | dma->n_pages = 0; | |
136 | } | |
137 | ||
138 | /* find the scatterlist index and remaining offset corresponding to a | |
139 | given offset from the beginning of the buffer */ | |
140 | static inline int dma_region_find(struct dma_region *dma, unsigned long offset, unsigned long *rem) | |
141 | { | |
142 | int i; | |
143 | unsigned long off = offset; | |
144 | ||
145 | for (i = 0; i < dma->n_dma_pages; i++) { | |
146 | if (off < sg_dma_len(&dma->sglist[i])) { | |
147 | *rem = off; | |
148 | break; | |
149 | } | |
150 | ||
151 | off -= sg_dma_len(&dma->sglist[i]); | |
152 | } | |
153 | ||
154 | BUG_ON(i >= dma->n_dma_pages); | |
155 | ||
156 | return i; | |
157 | } | |
158 | ||
159 | dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset) | |
160 | { | |
1934b8b6 | 161 | unsigned long rem = 0; |
1da177e4 LT |
162 | |
163 | struct scatterlist *sg = &dma->sglist[dma_region_find(dma, offset, &rem)]; | |
164 | return sg_dma_address(sg) + rem; | |
165 | } | |
166 | ||
167 | void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsigned long len) | |
168 | { | |
169 | int first, last; | |
170 | unsigned long rem; | |
171 | ||
172 | if (!len) | |
173 | len = 1; | |
174 | ||
175 | first = dma_region_find(dma, offset, &rem); | |
176 | last = dma_region_find(dma, offset + len - 1, &rem); | |
177 | ||
178 | pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1, dma->direction); | |
179 | } | |
180 | ||
181 | void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, unsigned long len) | |
182 | { | |
183 | int first, last; | |
184 | unsigned long rem; | |
185 | ||
186 | if (!len) | |
187 | len = 1; | |
188 | ||
189 | first = dma_region_find(dma, offset, &rem); | |
190 | last = dma_region_find(dma, offset + len - 1, &rem); | |
191 | ||
192 | pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first], last - first + 1, dma->direction); | |
193 | } | |
194 | ||
195 | #ifdef CONFIG_MMU | |
196 | ||
197 | /* nopage() handler for mmap access */ | |
198 | ||
199 | static struct page* | |
200 | dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int *type) | |
201 | { | |
202 | unsigned long offset; | |
203 | unsigned long kernel_virt_addr; | |
204 | struct page *ret = NOPAGE_SIGBUS; | |
205 | ||
206 | struct dma_region *dma = (struct dma_region*) area->vm_private_data; | |
207 | ||
208 | if (!dma->kvirt) | |
209 | goto out; | |
210 | ||
211 | if ( (address < (unsigned long) area->vm_start) || | |
212 | (address > (unsigned long) area->vm_start + (dma->n_pages << PAGE_SHIFT)) ) | |
213 | goto out; | |
214 | ||
215 | if (type) | |
216 | *type = VM_FAULT_MINOR; | |
217 | offset = address - area->vm_start; | |
218 | kernel_virt_addr = (unsigned long) dma->kvirt + offset; | |
219 | ret = vmalloc_to_page((void*) kernel_virt_addr); | |
220 | get_page(ret); | |
221 | out: | |
222 | return ret; | |
223 | } | |
224 | ||
225 | static struct vm_operations_struct dma_region_vm_ops = { | |
226 | .nopage = dma_region_pagefault, | |
227 | }; | |
228 | ||
229 | int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma) | |
230 | { | |
231 | unsigned long size; | |
232 | ||
233 | if (!dma->kvirt) | |
234 | return -EINVAL; | |
235 | ||
236 | /* must be page-aligned */ | |
237 | if (vma->vm_pgoff != 0) | |
238 | return -EINVAL; | |
239 | ||
240 | /* check the length */ | |
241 | size = vma->vm_end - vma->vm_start; | |
242 | if (size > (dma->n_pages << PAGE_SHIFT)) | |
243 | return -EINVAL; | |
244 | ||
245 | vma->vm_ops = &dma_region_vm_ops; | |
246 | vma->vm_private_data = dma; | |
247 | vma->vm_file = file; | |
248 | vma->vm_flags |= VM_RESERVED; | |
249 | ||
250 | return 0; | |
251 | } | |
252 | ||
253 | #else /* CONFIG_MMU */ | |
254 | ||
255 | int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma) | |
256 | { | |
257 | return -EINVAL; | |
258 | } | |
259 | ||
260 | #endif /* CONFIG_MMU */ |