]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/powerpc/mm/dma-noncoherent.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
[mirror_ubuntu-hirsute-kernel.git] / arch / powerpc / mm / dma-noncoherent.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * PowerPC version derived from arch/arm/mm/consistent.c
4 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
5 *
6 * Copyright (C) 2000 Russell King
7 *
8 * Consistent memory allocators. Used for DMA devices that want to
9 * share uncached memory with the processor core. The function return
10 * is the virtual address and 'dma_handle' is the physical address.
11 * Mostly stolen from the ARM port, with some changes for PowerPC.
12 * -- Dan
13 *
14 * Reorganized to get rid of the arch-specific consistent_* functions
15 * and provide non-coherent implementations for the DMA API. -Matt
16 *
17 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
18 * implementation. This is pulled straight from ARM and barely
19 * modified. -Matt
1da177e4
LT
20 */
21
1da177e4 22#include <linux/sched.h>
5a0e3ad6 23#include <linux/slab.h>
1da177e4
LT
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/string.h>
27#include <linux/types.h>
1da177e4 28#include <linux/highmem.h>
44a0337b 29#include <linux/dma-direct.h>
6666cc17 30#include <linux/dma-noncoherent.h>
93087948 31#include <linux/export.h>
1da177e4
LT
32
33#include <asm/tlbflush.h>
308c09f1 34#include <asm/dma.h>
1da177e4 35
9d9f2ccc 36#include <mm/mmu_decl.h>
8b31e49d 37
84532a0f
BH
38/*
39 * This address range defaults to a value that is safe for all
40 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
41 * can be further configured for specific applications under
42 * the "Advanced Setup" menu. -Matt
43 */
8b31e49d
BH
44#define CONSISTENT_BASE (IOREMAP_TOP)
45#define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
84532a0f
BH
46#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
47
48/*
49 * This is the page table (2MB) covering uncached, DMA consistent allocations
50 */
84532a0f
BH
51static DEFINE_SPINLOCK(consistent_lock);
52
53/*
54 * VM region handling support.
55 *
56 * This should become something generic, handling VM region allocations for
57 * vmalloc and similar (ioremap, module space, etc).
58 *
59 * I envisage vmalloc()'s supporting vm_struct becoming:
60 *
61 * struct vm_struct {
62 * struct vm_region region;
63 * unsigned long flags;
64 * struct page **pages;
65 * unsigned int nr_pages;
66 * unsigned long phys_addr;
67 * };
68 *
69 * get_vm_area() would then call vm_region_alloc with an appropriate
70 * struct vm_region head (eg):
71 *
72 * struct vm_region vmalloc_head = {
73 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
74 * .vm_start = VMALLOC_START,
75 * .vm_end = VMALLOC_END,
76 * };
77 *
78 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
79 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
80 * would have to initialise this each time prior to calling vm_region_alloc().
81 */
82struct ppc_vm_region {
83 struct list_head vm_list;
84 unsigned long vm_start;
85 unsigned long vm_end;
86};
87
88static struct ppc_vm_region consistent_head = {
89 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
90 .vm_start = CONSISTENT_BASE,
91 .vm_end = CONSISTENT_END,
92};
93
94static struct ppc_vm_region *
95ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
96{
97 unsigned long addr = head->vm_start, end = head->vm_end - size;
98 unsigned long flags;
99 struct ppc_vm_region *c, *new;
100
101 new = kmalloc(sizeof(struct ppc_vm_region), gfp);
102 if (!new)
103 goto out;
104
105 spin_lock_irqsave(&consistent_lock, flags);
106
107 list_for_each_entry(c, &head->vm_list, vm_list) {
108 if ((addr + size) < addr)
109 goto nospc;
110 if ((addr + size) <= c->vm_start)
111 goto found;
112 addr = c->vm_end;
113 if (addr > end)
114 goto nospc;
115 }
116
117 found:
118 /*
119 * Insert this entry _before_ the one we found.
120 */
121 list_add_tail(&new->vm_list, &c->vm_list);
122 new->vm_start = addr;
123 new->vm_end = addr + size;
124
125 spin_unlock_irqrestore(&consistent_lock, flags);
126 return new;
127
128 nospc:
129 spin_unlock_irqrestore(&consistent_lock, flags);
130 kfree(new);
131 out:
132 return NULL;
133}
134
135static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
136{
137 struct ppc_vm_region *c;
138
139 list_for_each_entry(c, &head->vm_list, vm_list) {
140 if (c->vm_start == addr)
141 goto out;
142 }
143 c = NULL;
144 out:
145 return c;
146}
147
1da177e4
LT
148/*
149 * Allocate DMA-coherent memory space and return both the kernel remapped
150 * virtual and bus address for that space.
151 */
68005b67
CH
152void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
153 gfp_t gfp, unsigned long attrs)
1da177e4
LT
154{
155 struct page *page;
84532a0f 156 struct ppc_vm_region *c;
1da177e4 157 unsigned long order;
8b31e49d 158 u64 mask = ISA_DMA_THRESHOLD, limit;
1da177e4 159
8b31e49d
BH
160 if (dev) {
161 mask = dev->coherent_dma_mask;
162
163 /*
164 * Sanity check the DMA mask - it must be non-zero, and
165 * must be able to be satisfied by a DMA allocation.
166 */
167 if (mask == 0) {
168 dev_warn(dev, "coherent DMA mask is unset\n");
169 goto no_page;
170 }
171
172 if ((~mask) & ISA_DMA_THRESHOLD) {
173 dev_warn(dev, "coherent DMA mask %#llx is smaller "
174 "than system GFP_DMA mask %#llx\n",
175 mask, (unsigned long long)ISA_DMA_THRESHOLD);
176 goto no_page;
177 }
84532a0f
BH
178 }
179
8b31e49d 180
1da177e4
LT
181 size = PAGE_ALIGN(size);
182 limit = (mask + 1) & ~mask;
8b31e49d
BH
183 if ((limit && size >= limit) ||
184 size >= (CONSISTENT_END - CONSISTENT_BASE)) {
84532a0f
BH
185 printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
186 size, mask);
1da177e4
LT
187 return NULL;
188 }
189
190 order = get_order(size);
191
8b31e49d 192 /* Might be useful if we ever have a real legacy DMA zone... */
1da177e4
LT
193 if (mask != 0xffffffff)
194 gfp |= GFP_DMA;
195
196 page = alloc_pages(gfp, order);
197 if (!page)
198 goto no_page;
199
200 /*
201 * Invalidate any data that might be lurking in the
202 * kernel direct-mapped region for device DMA.
203 */
204 {
205 unsigned long kaddr = (unsigned long)page_address(page);
206 memset(page_address(page), 0, size);
207 flush_dcache_range(kaddr, kaddr + size);
208 }
209
210 /*
84532a0f 211 * Allocate a virtual address in the consistent mapping region.
1da177e4 212 */
84532a0f
BH
213 c = ppc_vm_region_alloc(&consistent_head, size,
214 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
215 if (c) {
216 unsigned long vaddr = c->vm_start;
84532a0f 217 struct page *end = page + (1 << order);
1da177e4 218
84532a0f
BH
219 split_page(page, order);
220
221 /*
222 * Set the "dma handle"
223 */
44a0337b 224 *dma_handle = phys_to_dma(dev, page_to_phys(page));
1da177e4 225
84532a0f 226 do {
84532a0f 227 SetPageReserved(page);
4386c096 228 map_kernel_page(vaddr, page_to_phys(page),
c766ee72 229 pgprot_noncached(PAGE_KERNEL));
84532a0f 230 page++;
84532a0f
BH
231 vaddr += PAGE_SIZE;
232 } while (size -= PAGE_SIZE);
1da177e4 233
84532a0f
BH
234 /*
235 * Free the otherwise unused pages.
236 */
237 while (page < end) {
238 __free_page(page);
239 page++;
240 }
241
242 return (void *)c->vm_start;
1da177e4
LT
243 }
244
245 if (page)
246 __free_pages(page, order);
84532a0f 247 no_page:
1da177e4
LT
248 return NULL;
249}
1da177e4
LT
250
251/*
252 * free a page as defined by the above mapping.
253 */
68005b67 254void arch_dma_free(struct device *dev, size_t size, void *vaddr,
44a0337b 255 dma_addr_t dma_handle, unsigned long attrs)
1da177e4 256{
84532a0f
BH
257 struct ppc_vm_region *c;
258 unsigned long flags, addr;
8b31e49d 259
84532a0f
BH
260 size = PAGE_ALIGN(size);
261
262 spin_lock_irqsave(&consistent_lock, flags);
263
264 c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
265 if (!c)
266 goto no_area;
267
268 if ((c->vm_end - c->vm_start) != size) {
269 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
270 __func__, c->vm_end - c->vm_start, size);
271 dump_stack();
272 size = c->vm_end - c->vm_start;
273 }
274
84532a0f
BH
275 addr = c->vm_start;
276 do {
8b31e49d 277 pte_t *ptep;
84532a0f
BH
278 unsigned long pfn;
279
8b31e49d
BH
280 ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),
281 addr),
282 addr),
283 addr);
284 if (!pte_none(*ptep) && pte_present(*ptep)) {
285 pfn = pte_pfn(*ptep);
286 pte_clear(&init_mm, addr, ptep);
84532a0f
BH
287 if (pfn_valid(pfn)) {
288 struct page *page = pfn_to_page(pfn);
c1ce4b37 289 __free_reserved_page(page);
84532a0f
BH
290 }
291 }
8b31e49d 292 addr += PAGE_SIZE;
84532a0f
BH
293 } while (size -= PAGE_SIZE);
294
295 flush_tlb_kernel_range(c->vm_start, c->vm_end);
296
297 list_del(&c->vm_list);
298
299 spin_unlock_irqrestore(&consistent_lock, flags);
300
301 kfree(c);
302 return;
303
304 no_area:
305 spin_unlock_irqrestore(&consistent_lock, flags);
306 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
307 __func__, vaddr);
308 dump_stack();
1da177e4 309}
1da177e4 310
1da177e4
LT
311/*
312 * make an area consistent.
313 */
461db2bd 314static void __dma_sync(void *vaddr, size_t size, int direction)
1da177e4
LT
315{
316 unsigned long start = (unsigned long)vaddr;
317 unsigned long end = start + size;
318
319 switch (direction) {
320 case DMA_NONE:
321 BUG();
03d70617
AL
322 case DMA_FROM_DEVICE:
323 /*
324 * invalidate only when cache-line aligned otherwise there is
325 * the potential for discarding uncommitted data from the cache
326 */
8478d7f0 327 if ((start | end) & (L1_CACHE_BYTES - 1))
03d70617
AL
328 flush_dcache_range(start, end);
329 else
330 invalidate_dcache_range(start, end);
1da177e4
LT
331 break;
332 case DMA_TO_DEVICE: /* writeback only */
333 clean_dcache_range(start, end);
334 break;
335 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
336 flush_dcache_range(start, end);
337 break;
338 }
339}
1da177e4
LT
340
341#ifdef CONFIG_HIGHMEM
342/*
343 * __dma_sync_page() implementation for systems using highmem.
344 * In this case, each page of a buffer must be kmapped/kunmapped
345 * in order to have a virtual address for __dma_sync(). This must
338cec32 346 * not sleep so kmap_atomic()/kunmap_atomic() are used.
1da177e4
LT
347 *
348 * Note: yes, it is possible and correct to have a buffer extend
349 * beyond the first page.
350 */
351static inline void __dma_sync_page_highmem(struct page *page,
352 unsigned long offset, size_t size, int direction)
353{
a0c111c6 354 size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
1da177e4
LT
355 size_t cur_size = seg_size;
356 unsigned long flags, start, seg_offset = offset;
a0c111c6 357 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
1da177e4
LT
358 int seg_nr = 0;
359
360 local_irq_save(flags);
361
362 do {
2480b208 363 start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
1da177e4
LT
364
365 /* Sync this buffer segment */
366 __dma_sync((void *)start, seg_size, direction);
2480b208 367 kunmap_atomic((void *)start);
1da177e4
LT
368 seg_nr++;
369
370 /* Calculate next buffer segment size */
371 seg_size = min((size_t)PAGE_SIZE, size - cur_size);
372
373 /* Add the segment size to our running total */
374 cur_size += seg_size;
375 seg_offset = 0;
376 } while (seg_nr < nr_segs);
377
378 local_irq_restore(flags);
379}
380#endif /* CONFIG_HIGHMEM */
381
382/*
383 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
384 * takes a struct page instead of a virtual address
385 */
461db2bd 386static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir)
1da177e4 387{
461db2bd
CH
388 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
389 unsigned offset = paddr & ~PAGE_MASK;
390
1da177e4 391#ifdef CONFIG_HIGHMEM
461db2bd 392 __dma_sync_page_highmem(page, offset, size, dir);
1da177e4
LT
393#else
394 unsigned long start = (unsigned long)page_address(page) + offset;
461db2bd 395 __dma_sync((void *)start, size, dir);
1da177e4
LT
396#endif
397}
461db2bd
CH
398
399void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
400 size_t size, enum dma_data_direction dir)
401{
402 __dma_sync_page(paddr, size, dir);
403}
404
405void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
406 size_t size, enum dma_data_direction dir)
407{
408 __dma_sync_page(paddr, size, dir);
409}
6090912c
BH
410
411/*
461db2bd 412 * Return the PFN for a given cpu virtual address returned by arch_dma_alloc.
6090912c 413 */
6666cc17
CH
414long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,
415 dma_addr_t dma_addr)
6090912c
BH
416{
417 /* This should always be populated, so we don't test every
418 * level. If that fails, we'll have a nice crash which
419 * will be as good as a BUG_ON()
420 */
6666cc17 421 unsigned long cpu_addr = (unsigned long)vaddr;
6090912c
BH
422 pgd_t *pgd = pgd_offset_k(cpu_addr);
423 pud_t *pud = pud_offset(pgd, cpu_addr);
424 pmd_t *pmd = pmd_offset(pud, cpu_addr);
425 pte_t *ptep = pte_offset_kernel(pmd, cpu_addr);
426
427 if (pte_none(*ptep) || !pte_present(*ptep))
428 return 0;
429 return pte_pfn(*ptep);
430}