]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
14aa7e8b AJ |
2 | /* |
3 | * Port on Texas Instruments TMS320C6x architecture | |
4 | * | |
5 | * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated | |
6 | * Author: Aurelien Jacquiot <aurelien.jacquiot@ti.com> | |
7 | * | |
14aa7e8b AJ |
8 | * DMA uncached mapping support. |
9 | * | |
10 | * Using code pulled from ARM | |
11 | * Copyright (C) 2000-2004 Russell King | |
14aa7e8b AJ |
12 | */ |
13 | #include <linux/slab.h> | |
14 | #include <linux/bitmap.h> | |
15 | #include <linux/bitops.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/interrupt.h> | |
7f5c1ea3 | 18 | #include <linux/dma-noncoherent.h> |
14aa7e8b AJ |
19 | #include <linux/memblock.h> |
20 | ||
7f5c1ea3 | 21 | #include <asm/cacheflush.h> |
14aa7e8b | 22 | #include <asm/page.h> |
7f5c1ea3 | 23 | #include <asm/setup.h> |
14aa7e8b AJ |
24 | |
25 | /* | |
26 | * DMA coherent memory management, can be redefined using the memdma= | |
27 | * kernel command line | |
28 | */ | |
29 | ||
30 | /* none by default */ | |
31 | static phys_addr_t dma_base; | |
32 | static u32 dma_size; | |
33 | static u32 dma_pages; | |
34 | ||
35 | static unsigned long *dma_bitmap; | |
36 | ||
37 | /* bitmap lock */ | |
38 | static DEFINE_SPINLOCK(dma_lock); | |
39 | ||
40 | /* | |
41 | * Return a DMA coherent and contiguous memory chunk from the DMA memory | |
42 | */ | |
43 | static inline u32 __alloc_dma_pages(int order) | |
44 | { | |
45 | unsigned long flags; | |
46 | u32 pos; | |
47 | ||
48 | spin_lock_irqsave(&dma_lock, flags); | |
49 | pos = bitmap_find_free_region(dma_bitmap, dma_pages, order); | |
50 | spin_unlock_irqrestore(&dma_lock, flags); | |
51 | ||
52 | return dma_base + (pos << PAGE_SHIFT); | |
53 | } | |
54 | ||
55 | static void __free_dma_pages(u32 addr, int order) | |
56 | { | |
57 | unsigned long flags; | |
58 | u32 pos = (addr - dma_base) >> PAGE_SHIFT; | |
59 | ||
60 | if (addr < dma_base || (pos + (1 << order)) >= dma_pages) { | |
61 | printk(KERN_ERR "%s: freeing outside range.\n", __func__); | |
62 | BUG(); | |
63 | } | |
64 | ||
65 | spin_lock_irqsave(&dma_lock, flags); | |
66 | bitmap_release_region(dma_bitmap, pos, order); | |
67 | spin_unlock_irqrestore(&dma_lock, flags); | |
68 | } | |
69 | ||
70 | /* | |
71 | * Allocate DMA coherent memory space and return both the kernel | |
72 | * virtual and DMA address for that space. | |
73 | */ | |
7f5c1ea3 | 74 | void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
00085f1e | 75 | gfp_t gfp, unsigned long attrs) |
14aa7e8b | 76 | { |
518a2f19 | 77 | void *ret; |
14aa7e8b AJ |
78 | u32 paddr; |
79 | int order; | |
80 | ||
81 | if (!dma_size || !size) | |
82 | return NULL; | |
83 | ||
84 | order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1); | |
85 | ||
86 | paddr = __alloc_dma_pages(order); | |
87 | ||
88 | if (handle) | |
89 | *handle = paddr; | |
90 | ||
91 | if (!paddr) | |
92 | return NULL; | |
93 | ||
518a2f19 CH |
94 | ret = phys_to_virt(paddr); |
95 | memset(ret, 0, 1 << order); | |
96 | return ret; | |
14aa7e8b | 97 | } |
14aa7e8b AJ |
98 | |
99 | /* | |
100 | * Free DMA coherent memory as defined by the above mapping. | |
101 | */ | |
7f5c1ea3 | 102 | void arch_dma_free(struct device *dev, size_t size, void *vaddr, |
00085f1e | 103 | dma_addr_t dma_handle, unsigned long attrs) |
14aa7e8b AJ |
104 | { |
105 | int order; | |
106 | ||
107 | if (!dma_size || !size) | |
108 | return; | |
109 | ||
110 | order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1); | |
111 | ||
112 | __free_dma_pages(virt_to_phys(vaddr), order); | |
113 | } | |
14aa7e8b AJ |
114 | |
115 | /* | |
116 | * Initialise the coherent DMA memory allocator using the given uncached region. | |
117 | */ | |
118 | void __init coherent_mem_init(phys_addr_t start, u32 size) | |
119 | { | |
14aa7e8b AJ |
120 | if (!size) |
121 | return; | |
122 | ||
123 | printk(KERN_INFO | |
124 | "Coherent memory (DMA) region start=0x%x size=0x%x\n", | |
125 | start, size); | |
126 | ||
127 | dma_base = start; | |
128 | dma_size = size; | |
129 | ||
130 | /* allocate bitmap */ | |
131 | dma_pages = dma_size >> PAGE_SHIFT; | |
132 | if (dma_size & (PAGE_SIZE - 1)) | |
133 | ++dma_pages; | |
134 | ||
b63a07d6 MR |
135 | dma_bitmap = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long), |
136 | sizeof(long)); | |
8a7f97b9 MR |
137 | if (!dma_bitmap) |
138 | panic("%s: Failed to allocate %zu bytes align=0x%zx\n", | |
139 | __func__, BITS_TO_LONGS(dma_pages) * sizeof(long), | |
140 | sizeof(long)); | |
14aa7e8b | 141 | } |
7f5c1ea3 CH |
142 | |
143 | static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size, | |
144 | enum dma_data_direction dir) | |
145 | { | |
146 | BUG_ON(!valid_dma_direction(dir)); | |
147 | ||
148 | switch (dir) { | |
149 | case DMA_FROM_DEVICE: | |
150 | L2_cache_block_invalidate(paddr, paddr + size); | |
151 | break; | |
152 | case DMA_TO_DEVICE: | |
153 | L2_cache_block_writeback(paddr, paddr + size); | |
154 | break; | |
155 | case DMA_BIDIRECTIONAL: | |
156 | L2_cache_block_writeback_invalidate(paddr, paddr + size); | |
157 | break; | |
158 | default: | |
159 | break; | |
160 | } | |
161 | } | |
162 | ||
163 | void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | |
164 | size_t size, enum dma_data_direction dir) | |
165 | { | |
166 | return c6x_dma_sync(dev, paddr, size, dir); | |
167 | } | |
168 | ||
169 | void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | |
170 | size_t size, enum dma_data_direction dir) | |
171 | { | |
172 | return c6x_dma_sync(dev, paddr, size, dir); | |
173 | } |