]>
Commit | Line | Data |
---|---|---|
92281dee DW |
1 | /* |
2 | * Copyright(c) 2015 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
9476df7d DW |
13 | #include <linux/radix-tree.h> |
14 | #include <linux/memremap.h> | |
7d3dcf26 | 15 | #include <linux/device.h> |
92281dee | 16 | #include <linux/types.h> |
34c0fd54 | 17 | #include <linux/pfn_t.h> |
92281dee DW |
18 | #include <linux/io.h> |
19 | #include <linux/mm.h> | |
41e94a85 | 20 | #include <linux/memory_hotplug.h> |
92281dee DW |
21 | |
22 | #ifndef ioremap_cache | |
23 | /* temporary while we convert existing ioremap_cache users to memremap */ | |
24 | __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) | |
25 | { | |
26 | return ioremap(offset, size); | |
27 | } | |
28 | #endif | |
29 | ||
c269cba3 AB |
30 | #ifndef arch_memremap_wb |
31 | static void *arch_memremap_wb(resource_size_t offset, unsigned long size) | |
32 | { | |
33 | return (__force void *)ioremap_cache(offset, size); | |
34 | } | |
35 | #endif | |
36 | ||
8f716c9b TL |
37 | #ifndef arch_memremap_can_ram_remap |
38 | static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, | |
39 | unsigned long flags) | |
40 | { | |
41 | return true; | |
42 | } | |
43 | #endif | |
44 | ||
45 | static void *try_ram_remap(resource_size_t offset, size_t size, | |
46 | unsigned long flags) | |
182475b7 | 47 | { |
ac343e88 | 48 | unsigned long pfn = PHYS_PFN(offset); |
182475b7 DW |
49 | |
50 | /* In the simple case just return the existing linear address */ | |
8f716c9b TL |
51 | if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) && |
52 | arch_memremap_can_ram_remap(offset, size, flags)) | |
182475b7 | 53 | return __va(offset); |
8f716c9b | 54 | |
c269cba3 | 55 | return NULL; /* fallback to arch_memremap_wb */ |
182475b7 DW |
56 | } |
57 | ||
92281dee DW |
58 | /** |
59 | * memremap() - remap an iomem_resource as cacheable memory | |
60 | * @offset: iomem resource start address | |
61 | * @size: size of remap | |
8f716c9b TL |
62 | * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC, |
63 | * MEMREMAP_ENC, MEMREMAP_DEC | |
92281dee DW |
64 | * |
65 | * memremap() is "ioremap" for cases where it is known that the resource | |
66 | * being mapped does not have i/o side effects and the __iomem | |
c907e0eb BS |
67 | * annotation is not applicable. In the case of multiple flags, the different |
68 | * mapping types will be attempted in the order listed below until one of | |
69 | * them succeeds. | |
92281dee | 70 | * |
1c29f25b | 71 | * MEMREMAP_WB - matches the default mapping for System RAM on |
92281dee DW |
72 | * the architecture. This is usually a read-allocate write-back cache. |
73 | * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM | |
74 | * memremap() will bypass establishing a new mapping and instead return | |
75 | * a pointer into the direct map. | |
76 | * | |
77 | * MEMREMAP_WT - establish a mapping whereby writes either bypass the | |
78 | * cache or are written through to memory and never exist in a | |
79 | * cache-dirty state with respect to program visibility. Attempts to | |
1c29f25b | 80 | * map System RAM with this mapping type will fail. |
c907e0eb BS |
81 | * |
82 | * MEMREMAP_WC - establish a writecombine mapping, whereby writes may | |
83 | * be coalesced together (e.g. in the CPU's write buffers), but is otherwise | |
84 | * uncached. Attempts to map System RAM with this mapping type will fail. | |
92281dee DW |
85 | */ |
86 | void *memremap(resource_size_t offset, size_t size, unsigned long flags) | |
87 | { | |
1c29f25b TK |
88 | int is_ram = region_intersects(offset, size, |
89 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); | |
92281dee DW |
90 | void *addr = NULL; |
91 | ||
cf61e2a1 BS |
92 | if (!flags) |
93 | return NULL; | |
94 | ||
92281dee DW |
95 | if (is_ram == REGION_MIXED) { |
96 | WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n", | |
97 | &offset, (unsigned long) size); | |
98 | return NULL; | |
99 | } | |
100 | ||
101 | /* Try all mapping types requested until one returns non-NULL */ | |
102 | if (flags & MEMREMAP_WB) { | |
92281dee DW |
103 | /* |
104 | * MEMREMAP_WB is special in that it can be satisifed | |
105 | * from the direct map. Some archs depend on the | |
106 | * capability of memremap() to autodetect cases where | |
1c29f25b | 107 | * the requested range is potentially in System RAM. |
92281dee DW |
108 | */ |
109 | if (is_ram == REGION_INTERSECTS) | |
8f716c9b | 110 | addr = try_ram_remap(offset, size, flags); |
182475b7 | 111 | if (!addr) |
c269cba3 | 112 | addr = arch_memremap_wb(offset, size); |
92281dee DW |
113 | } |
114 | ||
115 | /* | |
cf61e2a1 BS |
116 | * If we don't have a mapping yet and other request flags are |
117 | * present then we will be attempting to establish a new virtual | |
92281dee | 118 | * address mapping. Enforce that this mapping is not aliasing |
1c29f25b | 119 | * System RAM. |
92281dee | 120 | */ |
cf61e2a1 | 121 | if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) { |
92281dee DW |
122 | WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n", |
123 | &offset, (unsigned long) size); | |
124 | return NULL; | |
125 | } | |
126 | ||
cf61e2a1 | 127 | if (!addr && (flags & MEMREMAP_WT)) |
92281dee | 128 | addr = ioremap_wt(offset, size); |
c907e0eb BS |
129 | |
130 | if (!addr && (flags & MEMREMAP_WC)) | |
131 | addr = ioremap_wc(offset, size); | |
92281dee DW |
132 | |
133 | return addr; | |
134 | } | |
135 | EXPORT_SYMBOL(memremap); | |
136 | ||
137 | void memunmap(void *addr) | |
138 | { | |
139 | if (is_vmalloc_addr(addr)) | |
140 | iounmap((void __iomem *) addr); | |
141 | } | |
142 | EXPORT_SYMBOL(memunmap); | |
7d3dcf26 CH |
143 | |
144 | static void devm_memremap_release(struct device *dev, void *res) | |
145 | { | |
9273a8bb | 146 | memunmap(*(void **)res); |
7d3dcf26 CH |
147 | } |
148 | ||
149 | static int devm_memremap_match(struct device *dev, void *res, void *match_data) | |
150 | { | |
151 | return *(void **)res == match_data; | |
152 | } | |
153 | ||
154 | void *devm_memremap(struct device *dev, resource_size_t offset, | |
155 | size_t size, unsigned long flags) | |
156 | { | |
157 | void **ptr, *addr; | |
158 | ||
538ea4aa DW |
159 | ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, |
160 | dev_to_node(dev)); | |
7d3dcf26 | 161 | if (!ptr) |
b36f4761 | 162 | return ERR_PTR(-ENOMEM); |
7d3dcf26 CH |
163 | |
164 | addr = memremap(offset, size, flags); | |
165 | if (addr) { | |
166 | *ptr = addr; | |
167 | devres_add(dev, ptr); | |
93f834df | 168 | } else { |
7d3dcf26 | 169 | devres_free(ptr); |
93f834df TK |
170 | return ERR_PTR(-ENXIO); |
171 | } | |
7d3dcf26 CH |
172 | |
173 | return addr; | |
174 | } | |
175 | EXPORT_SYMBOL(devm_memremap); | |
176 | ||
177 | void devm_memunmap(struct device *dev, void *addr) | |
178 | { | |
d741314f DW |
179 | WARN_ON(devres_release(dev, devm_memremap_release, |
180 | devm_memremap_match, addr)); | |
7d3dcf26 CH |
181 | } |
182 | EXPORT_SYMBOL(devm_memunmap); | |
41e94a85 CH |
183 | |
184 | #ifdef CONFIG_ZONE_DEVICE | |
9476df7d DW |
185 | static DEFINE_MUTEX(pgmap_lock); |
186 | static RADIX_TREE(pgmap_radix, GFP_KERNEL); | |
187 | #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1) | |
188 | #define SECTION_SIZE (1UL << PA_SECTION_SHIFT) | |
189 | ||
41e94a85 CH |
190 | struct page_map { |
191 | struct resource res; | |
9476df7d DW |
192 | struct percpu_ref *ref; |
193 | struct dev_pagemap pgmap; | |
4b94ffdc | 194 | struct vmem_altmap altmap; |
41e94a85 CH |
195 | }; |
196 | ||
ab1b597e | 197 | static unsigned long order_at(struct resource *res, unsigned long pgoff) |
9476df7d | 198 | { |
ab1b597e DW |
199 | unsigned long phys_pgoff = PHYS_PFN(res->start) + pgoff; |
200 | unsigned long nr_pages, mask; | |
eb7d78c9 | 201 | |
ab1b597e DW |
202 | nr_pages = PHYS_PFN(resource_size(res)); |
203 | if (nr_pages == pgoff) | |
204 | return ULONG_MAX; | |
205 | ||
206 | /* | |
207 | * What is the largest aligned power-of-2 range available from | |
208 | * this resource pgoff to the end of the resource range, | |
209 | * considering the alignment of the current pgoff? | |
210 | */ | |
211 | mask = phys_pgoff | rounddown_pow_of_two(nr_pages - pgoff); | |
212 | if (!mask) | |
213 | return ULONG_MAX; | |
214 | ||
215 | return find_first_bit(&mask, BITS_PER_LONG); | |
216 | } | |
217 | ||
218 | #define foreach_order_pgoff(res, order, pgoff) \ | |
219 | for (pgoff = 0, order = order_at((res), pgoff); order < ULONG_MAX; \ | |
220 | pgoff += 1UL << order, order = order_at((res), pgoff)) | |
221 | ||
222 | static void pgmap_radix_release(struct resource *res) | |
223 | { | |
224 | unsigned long pgoff, order; | |
9476df7d DW |
225 | |
226 | mutex_lock(&pgmap_lock); | |
ab1b597e DW |
227 | foreach_order_pgoff(res, order, pgoff) |
228 | radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff); | |
9476df7d | 229 | mutex_unlock(&pgmap_lock); |
ab1b597e DW |
230 | |
231 | synchronize_rcu(); | |
9476df7d DW |
232 | } |
233 | ||
5c2c2587 DW |
234 | static unsigned long pfn_first(struct page_map *page_map) |
235 | { | |
236 | struct dev_pagemap *pgmap = &page_map->pgmap; | |
237 | const struct resource *res = &page_map->res; | |
238 | struct vmem_altmap *altmap = pgmap->altmap; | |
239 | unsigned long pfn; | |
240 | ||
241 | pfn = res->start >> PAGE_SHIFT; | |
242 | if (altmap) | |
243 | pfn += vmem_altmap_offset(altmap); | |
244 | return pfn; | |
245 | } | |
246 | ||
247 | static unsigned long pfn_end(struct page_map *page_map) | |
248 | { | |
249 | const struct resource *res = &page_map->res; | |
250 | ||
251 | return (res->start + resource_size(res)) >> PAGE_SHIFT; | |
252 | } | |
253 | ||
254 | #define for_each_device_pfn(pfn, map) \ | |
255 | for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++) | |
256 | ||
9476df7d | 257 | static void devm_memremap_pages_release(struct device *dev, void *data) |
41e94a85 | 258 | { |
9476df7d DW |
259 | struct page_map *page_map = data; |
260 | struct resource *res = &page_map->res; | |
261 | resource_size_t align_start, align_size; | |
4b94ffdc | 262 | struct dev_pagemap *pgmap = &page_map->pgmap; |
71389703 DW |
263 | unsigned long pfn; |
264 | ||
265 | for_each_device_pfn(pfn, page_map) | |
266 | put_page(pfn_to_page(pfn)); | |
9476df7d | 267 | |
5c2c2587 DW |
268 | if (percpu_ref_tryget_live(pgmap->ref)) { |
269 | dev_WARN(dev, "%s: page mapping is still live!\n", __func__); | |
270 | percpu_ref_put(pgmap->ref); | |
271 | } | |
272 | ||
41e94a85 | 273 | /* pages are dead and unused, undo the arch mapping */ |
9476df7d DW |
274 | align_start = res->start & ~(SECTION_SIZE - 1); |
275 | align_size = ALIGN(resource_size(res), SECTION_SIZE); | |
b5d24fda | 276 | |
f931ab47 | 277 | mem_hotplug_begin(); |
9476df7d | 278 | arch_remove_memory(align_start, align_size); |
f931ab47 | 279 | mem_hotplug_done(); |
b5d24fda | 280 | |
9049771f | 281 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); |
eb7d78c9 | 282 | pgmap_radix_release(res); |
4b94ffdc DW |
283 | dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, |
284 | "%s: failed to free all reserved pages\n", __func__); | |
9476df7d DW |
285 | } |
286 | ||
287 | /* assumes rcu_read_lock() held at entry */ | |
288 | struct dev_pagemap *find_dev_pagemap(resource_size_t phys) | |
289 | { | |
290 | struct page_map *page_map; | |
291 | ||
292 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
293 | ||
ab1b597e | 294 | page_map = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys)); |
9476df7d | 295 | return page_map ? &page_map->pgmap : NULL; |
41e94a85 CH |
296 | } |
297 | ||
4b94ffdc DW |
298 | /** |
299 | * devm_memremap_pages - remap and provide memmap backing for the given resource | |
300 | * @dev: hosting device for @res | |
301 | * @res: "host memory" address range | |
5c2c2587 | 302 | * @ref: a live per-cpu reference count |
4b94ffdc DW |
303 | * @altmap: optional descriptor for allocating the memmap from @res |
304 | * | |
5c2c2587 DW |
305 | * Notes: |
306 | * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time | |
71389703 DW |
307 | * (or devm release event). The expected order of events is that @ref has |
308 | * been through percpu_ref_kill() before devm_memremap_pages_release(). The | |
309 | * wait for the completion of all references being dropped and | |
310 | * percpu_ref_exit() must occur after devm_memremap_pages_release(). | |
5c2c2587 DW |
311 | * |
312 | * 2/ @res is expected to be a host memory range that could feasibly be | |
313 | * treated as a "System RAM" range, i.e. not a device mmio range, but | |
314 | * this is not enforced. | |
4b94ffdc DW |
315 | */ |
316 | void *devm_memremap_pages(struct device *dev, struct resource *res, | |
5c2c2587 | 317 | struct percpu_ref *ref, struct vmem_altmap *altmap) |
41e94a85 | 318 | { |
ab1b597e DW |
319 | resource_size_t align_start, align_size, align_end; |
320 | unsigned long pfn, pgoff, order; | |
9049771f | 321 | pgprot_t pgprot = PAGE_KERNEL; |
4b94ffdc | 322 | struct dev_pagemap *pgmap; |
41e94a85 | 323 | struct page_map *page_map; |
5f29a77c | 324 | int error, nid, is_ram; |
5f29a77c DW |
325 | |
326 | align_start = res->start & ~(SECTION_SIZE - 1); | |
327 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) | |
328 | - align_start; | |
d37a14bb LT |
329 | is_ram = region_intersects(align_start, align_size, |
330 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); | |
41e94a85 CH |
331 | |
332 | if (is_ram == REGION_MIXED) { | |
333 | WARN_ONCE(1, "%s attempted on mixed region %pr\n", | |
334 | __func__, res); | |
335 | return ERR_PTR(-ENXIO); | |
336 | } | |
337 | ||
338 | if (is_ram == REGION_INTERSECTS) | |
339 | return __va(res->start); | |
340 | ||
5c2c2587 DW |
341 | if (!ref) |
342 | return ERR_PTR(-EINVAL); | |
343 | ||
538ea4aa DW |
344 | page_map = devres_alloc_node(devm_memremap_pages_release, |
345 | sizeof(*page_map), GFP_KERNEL, dev_to_node(dev)); | |
41e94a85 CH |
346 | if (!page_map) |
347 | return ERR_PTR(-ENOMEM); | |
4b94ffdc | 348 | pgmap = &page_map->pgmap; |
41e94a85 CH |
349 | |
350 | memcpy(&page_map->res, res, sizeof(*res)); | |
351 | ||
4b94ffdc DW |
352 | pgmap->dev = dev; |
353 | if (altmap) { | |
354 | memcpy(&page_map->altmap, altmap, sizeof(*altmap)); | |
355 | pgmap->altmap = &page_map->altmap; | |
356 | } | |
5c2c2587 | 357 | pgmap->ref = ref; |
4b94ffdc DW |
358 | pgmap->res = &page_map->res; |
359 | ||
9476df7d DW |
360 | mutex_lock(&pgmap_lock); |
361 | error = 0; | |
eb7d78c9 | 362 | align_end = align_start + align_size - 1; |
ab1b597e DW |
363 | |
364 | foreach_order_pgoff(res, order, pgoff) { | |
9476df7d DW |
365 | struct dev_pagemap *dup; |
366 | ||
367 | rcu_read_lock(); | |
ab1b597e | 368 | dup = find_dev_pagemap(res->start + PFN_PHYS(pgoff)); |
9476df7d DW |
369 | rcu_read_unlock(); |
370 | if (dup) { | |
371 | dev_err(dev, "%s: %pr collides with mapping for %s\n", | |
372 | __func__, res, dev_name(dup->dev)); | |
373 | error = -EBUSY; | |
374 | break; | |
375 | } | |
ab1b597e DW |
376 | error = __radix_tree_insert(&pgmap_radix, |
377 | PHYS_PFN(res->start) + pgoff, order, page_map); | |
9476df7d DW |
378 | if (error) { |
379 | dev_err(dev, "%s: failed: %d\n", __func__, error); | |
380 | break; | |
381 | } | |
382 | } | |
383 | mutex_unlock(&pgmap_lock); | |
384 | if (error) | |
385 | goto err_radix; | |
386 | ||
41e94a85 CH |
387 | nid = dev_to_node(dev); |
388 | if (nid < 0) | |
7eff93b7 | 389 | nid = numa_mem_id(); |
41e94a85 | 390 | |
9049771f DW |
391 | error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0, |
392 | align_size); | |
393 | if (error) | |
394 | goto err_pfn_remap; | |
395 | ||
f931ab47 | 396 | mem_hotplug_begin(); |
3d79a728 | 397 | error = arch_add_memory(nid, align_start, align_size, false); |
f1dd2cd1 MH |
398 | if (!error) |
399 | move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | |
400 | align_start >> PAGE_SHIFT, | |
401 | align_size >> PAGE_SHIFT); | |
f931ab47 | 402 | mem_hotplug_done(); |
9476df7d DW |
403 | if (error) |
404 | goto err_add_memory; | |
41e94a85 | 405 | |
5c2c2587 DW |
406 | for_each_device_pfn(pfn, page_map) { |
407 | struct page *page = pfn_to_page(pfn); | |
408 | ||
d77a117e DW |
409 | /* |
410 | * ZONE_DEVICE pages union ->lru with a ->pgmap back | |
411 | * pointer. It is a bug if a ZONE_DEVICE page is ever | |
412 | * freed or placed on a driver-private list. Seed the | |
413 | * storage with LIST_POISON* values. | |
414 | */ | |
415 | list_del(&page->lru); | |
5c2c2587 | 416 | page->pgmap = pgmap; |
71389703 | 417 | percpu_ref_get(ref); |
5c2c2587 | 418 | } |
41e94a85 CH |
419 | devres_add(dev, page_map); |
420 | return __va(res->start); | |
9476df7d DW |
421 | |
422 | err_add_memory: | |
9049771f DW |
423 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); |
424 | err_pfn_remap: | |
9476df7d DW |
425 | err_radix: |
426 | pgmap_radix_release(res); | |
427 | devres_free(page_map); | |
428 | return ERR_PTR(error); | |
41e94a85 CH |
429 | } |
430 | EXPORT_SYMBOL(devm_memremap_pages); | |
4b94ffdc DW |
431 | |
432 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) | |
433 | { | |
434 | /* number of pfns from base where pfn_to_page() is valid */ | |
435 | return altmap->reserve + altmap->free; | |
436 | } | |
437 | ||
438 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) | |
439 | { | |
440 | altmap->alloc -= nr_pfns; | |
441 | } | |
442 | ||
4b94ffdc DW |
443 | struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) |
444 | { | |
445 | /* | |
446 | * 'memmap_start' is the virtual address for the first "struct | |
447 | * page" in this range of the vmemmap array. In the case of | |
07061aab | 448 | * CONFIG_SPARSEMEM_VMEMMAP a page_to_pfn conversion is simple |
4b94ffdc DW |
449 | * pointer arithmetic, so we can perform this to_vmem_altmap() |
450 | * conversion without concern for the initialization state of | |
451 | * the struct page fields. | |
452 | */ | |
453 | struct page *page = (struct page *) memmap_start; | |
454 | struct dev_pagemap *pgmap; | |
455 | ||
456 | /* | |
07061aab | 457 | * Unconditionally retrieve a dev_pagemap associated with the |
4b94ffdc DW |
458 | * given physical address, this is only for use in the |
459 | * arch_{add|remove}_memory() for setting up and tearing down | |
460 | * the memmap. | |
461 | */ | |
462 | rcu_read_lock(); | |
463 | pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page))); | |
464 | rcu_read_unlock(); | |
465 | ||
466 | return pgmap ? pgmap->altmap : NULL; | |
467 | } | |
41e94a85 | 468 | #endif /* CONFIG_ZONE_DEVICE */ |