]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/openrisc/kernel/dma.c
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / arch / openrisc / kernel / dma.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * OpenRISC Linux
4 *
5 * Linux architectural port borrowing liberally from similar works of
6 * others. All original copyrights apply as per the original source
7 * declaration.
8 *
9 * Modifications for the OpenRISC architecture:
10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12 *
13 * DMA mapping callbacks...
14 * As alloc_coherent is the only DMA callback being used currently, that's
15 * the only thing implemented properly. The rest need looking into...
16 */
17
18 #include <linux/dma-noncoherent.h>
19
20 #include <asm/cpuinfo.h>
21 #include <asm/spr_defs.h>
22 #include <asm/tlbflush.h>
23
24 static int
25 page_set_nocache(pte_t *pte, unsigned long addr,
26 unsigned long next, struct mm_walk *walk)
27 {
28 unsigned long cl;
29 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
30
31 pte_val(*pte) |= _PAGE_CI;
32
33 /*
34 * Flush the page out of the TLB so that the new page flags get
35 * picked up next time there's an access
36 */
37 flush_tlb_page(NULL, addr);
38
39 /* Flush page out of dcache */
40 for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
41 mtspr(SPR_DCBFR, cl);
42
43 return 0;
44 }
45
46 static int
47 page_clear_nocache(pte_t *pte, unsigned long addr,
48 unsigned long next, struct mm_walk *walk)
49 {
50 pte_val(*pte) &= ~_PAGE_CI;
51
52 /*
53 * Flush the page out of the TLB so that the new page flags get
54 * picked up next time there's an access
55 */
56 flush_tlb_page(NULL, addr);
57
58 return 0;
59 }
60
61 /*
62 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
63 *
64 * This function effectively just calls __get_free_pages, sets the
65 * cache-inhibit bit on those pages, and makes sure that the pages are
66 * flushed out of the cache before they are used.
67 *
68 * If the NON_CONSISTENT attribute is set, then this function just
69 * returns "normal", cachable memory.
70 *
71 * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
72 * into consideration here, too. All current known implementations of
73 * the OR1K support only strongly ordered memory accesses, so that flag
74 * is being ignored for now; uncached but write-combined memory is a
75 * missing feature of the OR1K.
76 */
77 void *
78 arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
79 gfp_t gfp, unsigned long attrs)
80 {
81 unsigned long va;
82 void *page;
83 struct mm_walk walk = {
84 .pte_entry = page_set_nocache,
85 .mm = &init_mm
86 };
87
88 page = alloc_pages_exact(size, gfp | __GFP_ZERO);
89 if (!page)
90 return NULL;
91
92 /* This gives us the real physical address of the first page. */
93 *dma_handle = __pa(page);
94
95 va = (unsigned long)page;
96
97 if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
98 /*
99 * We need to iterate through the pages, clearing the dcache for
100 * them and setting the cache-inhibit bit.
101 */
102 if (walk_page_range(va, va + size, &walk)) {
103 free_pages_exact(page, size);
104 return NULL;
105 }
106 }
107
108 return (void *)va;
109 }
110
111 void
112 arch_dma_free(struct device *dev, size_t size, void *vaddr,
113 dma_addr_t dma_handle, unsigned long attrs)
114 {
115 unsigned long va = (unsigned long)vaddr;
116 struct mm_walk walk = {
117 .pte_entry = page_clear_nocache,
118 .mm = &init_mm
119 };
120
121 if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
122 /* walk_page_range shouldn't be able to fail here */
123 WARN_ON(walk_page_range(va, va + size, &walk));
124 }
125
126 free_pages_exact(vaddr, size);
127 }
128
129 void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size,
130 enum dma_data_direction dir)
131 {
132 unsigned long cl;
133 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
134
135 switch (dir) {
136 case DMA_TO_DEVICE:
137 /* Flush the dcache for the requested range */
138 for (cl = addr; cl < addr + size;
139 cl += cpuinfo->dcache_block_size)
140 mtspr(SPR_DCBFR, cl);
141 break;
142 case DMA_FROM_DEVICE:
143 /* Invalidate the dcache for the requested range */
144 for (cl = addr; cl < addr + size;
145 cl += cpuinfo->dcache_block_size)
146 mtspr(SPR_DCBIR, cl);
147 break;
148 default:
149 /*
150 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
151 * flush nor invalidate the cache here as the area will need
152 * to be manually synced anyway.
153 */
154 break;
155 }
156 }