]>
Commit | Line | Data |
---|---|---|
92281dee DW |
1 | /* |
2 | * Copyright(c) 2015 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
7d3dcf26 | 13 | #include <linux/device.h> |
92281dee | 14 | #include <linux/types.h> |
34c0fd54 | 15 | #include <linux/pfn_t.h> |
92281dee DW |
16 | #include <linux/io.h> |
17 | #include <linux/mm.h> | |
41e94a85 | 18 | #include <linux/memory_hotplug.h> |
92281dee DW |
19 | |
20 | #ifndef ioremap_cache | |
21 | /* temporary while we convert existing ioremap_cache users to memremap */ | |
22 | __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) | |
23 | { | |
24 | return ioremap(offset, size); | |
25 | } | |
26 | #endif | |
27 | ||
182475b7 DW |
28 | static void *try_ram_remap(resource_size_t offset, size_t size) |
29 | { | |
30 | struct page *page = pfn_to_page(offset >> PAGE_SHIFT); | |
31 | ||
32 | /* In the simple case just return the existing linear address */ | |
33 | if (!PageHighMem(page)) | |
34 | return __va(offset); | |
35 | return NULL; /* fallback to ioremap_cache */ | |
36 | } | |
37 | ||
92281dee DW |
38 | /** |
39 | * memremap() - remap an iomem_resource as cacheable memory | |
40 | * @offset: iomem resource start address | |
41 | * @size: size of remap | |
42 | * @flags: either MEMREMAP_WB or MEMREMAP_WT | |
43 | * | |
44 | * memremap() is "ioremap" for cases where it is known that the resource | |
45 | * being mapped does not have i/o side effects and the __iomem | |
46 | * annotation is not applicable. | |
47 | * | |
48 | * MEMREMAP_WB - matches the default mapping for "System RAM" on | |
49 | * the architecture. This is usually a read-allocate write-back cache. | |
50 | * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM | |
51 | * memremap() will bypass establishing a new mapping and instead return | |
52 | * a pointer into the direct map. | |
53 | * | |
54 | * MEMREMAP_WT - establish a mapping whereby writes either bypass the | |
55 | * cache or are written through to memory and never exist in a | |
56 | * cache-dirty state with respect to program visibility. Attempts to | |
57 | * map "System RAM" with this mapping type will fail. | |
58 | */ | |
59 | void *memremap(resource_size_t offset, size_t size, unsigned long flags) | |
60 | { | |
61 | int is_ram = region_intersects(offset, size, "System RAM"); | |
62 | void *addr = NULL; | |
63 | ||
64 | if (is_ram == REGION_MIXED) { | |
65 | WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n", | |
66 | &offset, (unsigned long) size); | |
67 | return NULL; | |
68 | } | |
69 | ||
70 | /* Try all mapping types requested until one returns non-NULL */ | |
71 | if (flags & MEMREMAP_WB) { | |
72 | flags &= ~MEMREMAP_WB; | |
73 | /* | |
74 | * MEMREMAP_WB is special in that it can be satisifed | |
75 | * from the direct map. Some archs depend on the | |
76 | * capability of memremap() to autodetect cases where | |
77 | * the requested range is potentially in "System RAM" | |
78 | */ | |
79 | if (is_ram == REGION_INTERSECTS) | |
182475b7 DW |
80 | addr = try_ram_remap(offset, size); |
81 | if (!addr) | |
92281dee DW |
82 | addr = ioremap_cache(offset, size); |
83 | } | |
84 | ||
85 | /* | |
86 | * If we don't have a mapping yet and more request flags are | |
87 | * pending then we will be attempting to establish a new virtual | |
88 | * address mapping. Enforce that this mapping is not aliasing | |
89 | * "System RAM" | |
90 | */ | |
91 | if (!addr && is_ram == REGION_INTERSECTS && flags) { | |
92 | WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n", | |
93 | &offset, (unsigned long) size); | |
94 | return NULL; | |
95 | } | |
96 | ||
97 | if (!addr && (flags & MEMREMAP_WT)) { | |
98 | flags &= ~MEMREMAP_WT; | |
99 | addr = ioremap_wt(offset, size); | |
100 | } | |
101 | ||
102 | return addr; | |
103 | } | |
104 | EXPORT_SYMBOL(memremap); | |
105 | ||
106 | void memunmap(void *addr) | |
107 | { | |
108 | if (is_vmalloc_addr(addr)) | |
109 | iounmap((void __iomem *) addr); | |
110 | } | |
111 | EXPORT_SYMBOL(memunmap); | |
7d3dcf26 CH |
112 | |
113 | static void devm_memremap_release(struct device *dev, void *res) | |
114 | { | |
115 | memunmap(res); | |
116 | } | |
117 | ||
118 | static int devm_memremap_match(struct device *dev, void *res, void *match_data) | |
119 | { | |
120 | return *(void **)res == match_data; | |
121 | } | |
122 | ||
123 | void *devm_memremap(struct device *dev, resource_size_t offset, | |
124 | size_t size, unsigned long flags) | |
125 | { | |
126 | void **ptr, *addr; | |
127 | ||
538ea4aa DW |
128 | ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, |
129 | dev_to_node(dev)); | |
7d3dcf26 | 130 | if (!ptr) |
b36f4761 | 131 | return ERR_PTR(-ENOMEM); |
7d3dcf26 CH |
132 | |
133 | addr = memremap(offset, size, flags); | |
134 | if (addr) { | |
135 | *ptr = addr; | |
136 | devres_add(dev, ptr); | |
137 | } else | |
138 | devres_free(ptr); | |
139 | ||
140 | return addr; | |
141 | } | |
142 | EXPORT_SYMBOL(devm_memremap); | |
143 | ||
144 | void devm_memunmap(struct device *dev, void *addr) | |
145 | { | |
d741314f DW |
146 | WARN_ON(devres_release(dev, devm_memremap_release, |
147 | devm_memremap_match, addr)); | |
7d3dcf26 CH |
148 | } |
149 | EXPORT_SYMBOL(devm_memunmap); | |
41e94a85 | 150 | |
34c0fd54 DW |
151 | pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags) |
152 | { | |
153 | return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags); | |
154 | } | |
155 | EXPORT_SYMBOL(phys_to_pfn_t); | |
156 | ||
41e94a85 CH |
157 | #ifdef CONFIG_ZONE_DEVICE |
158 | struct page_map { | |
159 | struct resource res; | |
160 | }; | |
161 | ||
162 | static void devm_memremap_pages_release(struct device *dev, void *res) | |
163 | { | |
164 | struct page_map *page_map = res; | |
165 | ||
166 | /* pages are dead and unused, undo the arch mapping */ | |
167 | arch_remove_memory(page_map->res.start, resource_size(&page_map->res)); | |
168 | } | |
169 | ||
170 | void *devm_memremap_pages(struct device *dev, struct resource *res) | |
171 | { | |
172 | int is_ram = region_intersects(res->start, resource_size(res), | |
173 | "System RAM"); | |
174 | struct page_map *page_map; | |
175 | int error, nid; | |
176 | ||
177 | if (is_ram == REGION_MIXED) { | |
178 | WARN_ONCE(1, "%s attempted on mixed region %pr\n", | |
179 | __func__, res); | |
180 | return ERR_PTR(-ENXIO); | |
181 | } | |
182 | ||
183 | if (is_ram == REGION_INTERSECTS) | |
184 | return __va(res->start); | |
185 | ||
538ea4aa DW |
186 | page_map = devres_alloc_node(devm_memremap_pages_release, |
187 | sizeof(*page_map), GFP_KERNEL, dev_to_node(dev)); | |
41e94a85 CH |
188 | if (!page_map) |
189 | return ERR_PTR(-ENOMEM); | |
190 | ||
191 | memcpy(&page_map->res, res, sizeof(*res)); | |
192 | ||
193 | nid = dev_to_node(dev); | |
194 | if (nid < 0) | |
7eff93b7 | 195 | nid = numa_mem_id(); |
41e94a85 CH |
196 | |
197 | error = arch_add_memory(nid, res->start, resource_size(res), true); | |
198 | if (error) { | |
199 | devres_free(page_map); | |
200 | return ERR_PTR(error); | |
201 | } | |
202 | ||
203 | devres_add(dev, page_map); | |
204 | return __va(res->start); | |
205 | } | |
206 | EXPORT_SYMBOL(devm_memremap_pages); | |
207 | #endif /* CONFIG_ZONE_DEVICE */ |