]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright(c) 2015 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | #include <linux/radix-tree.h> | |
14 | #include <linux/device.h> | |
15 | #include <linux/types.h> | |
16 | #include <linux/pfn_t.h> | |
17 | #include <linux/io.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/memory_hotplug.h> | |
20 | #include <linux/swap.h> | |
21 | #include <linux/swapops.h> | |
22 | ||
23 | #ifndef ioremap_cache | |
24 | /* temporary while we convert existing ioremap_cache users to memremap */ | |
25 | __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) | |
26 | { | |
27 | return ioremap(offset, size); | |
28 | } | |
29 | #endif | |
30 | ||
31 | #ifndef arch_memremap_wb | |
32 | static void *arch_memremap_wb(resource_size_t offset, unsigned long size) | |
33 | { | |
34 | return (__force void *)ioremap_cache(offset, size); | |
35 | } | |
36 | #endif | |
37 | ||
38 | #ifndef arch_memremap_can_ram_remap | |
39 | static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, | |
40 | unsigned long flags) | |
41 | { | |
42 | return true; | |
43 | } | |
44 | #endif | |
45 | ||
46 | static void *try_ram_remap(resource_size_t offset, size_t size, | |
47 | unsigned long flags) | |
48 | { | |
49 | unsigned long pfn = PHYS_PFN(offset); | |
50 | ||
51 | /* In the simple case just return the existing linear address */ | |
52 | if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) && | |
53 | arch_memremap_can_ram_remap(offset, size, flags)) | |
54 | return __va(offset); | |
55 | ||
56 | return NULL; /* fallback to arch_memremap_wb */ | |
57 | } | |
58 | ||
59 | /** | |
60 | * memremap() - remap an iomem_resource as cacheable memory | |
61 | * @offset: iomem resource start address | |
62 | * @size: size of remap | |
63 | * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC, | |
64 | * MEMREMAP_ENC, MEMREMAP_DEC | |
65 | * | |
66 | * memremap() is "ioremap" for cases where it is known that the resource | |
67 | * being mapped does not have i/o side effects and the __iomem | |
68 | * annotation is not applicable. In the case of multiple flags, the different | |
69 | * mapping types will be attempted in the order listed below until one of | |
70 | * them succeeds. | |
71 | * | |
72 | * MEMREMAP_WB - matches the default mapping for System RAM on | |
73 | * the architecture. This is usually a read-allocate write-back cache. | |
74 | * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM | |
75 | * memremap() will bypass establishing a new mapping and instead return | |
76 | * a pointer into the direct map. | |
77 | * | |
78 | * MEMREMAP_WT - establish a mapping whereby writes either bypass the | |
79 | * cache or are written through to memory and never exist in a | |
80 | * cache-dirty state with respect to program visibility. Attempts to | |
81 | * map System RAM with this mapping type will fail. | |
82 | * | |
83 | * MEMREMAP_WC - establish a writecombine mapping, whereby writes may | |
84 | * be coalesced together (e.g. in the CPU's write buffers), but is otherwise | |
85 | * uncached. Attempts to map System RAM with this mapping type will fail. | |
86 | */ | |
87 | void *memremap(resource_size_t offset, size_t size, unsigned long flags) | |
88 | { | |
89 | int is_ram = region_intersects(offset, size, | |
90 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); | |
91 | void *addr = NULL; | |
92 | ||
93 | if (!flags) | |
94 | return NULL; | |
95 | ||
96 | if (is_ram == REGION_MIXED) { | |
97 | WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n", | |
98 | &offset, (unsigned long) size); | |
99 | return NULL; | |
100 | } | |
101 | ||
102 | /* Try all mapping types requested until one returns non-NULL */ | |
103 | if (flags & MEMREMAP_WB) { | |
104 | /* | |
105 | * MEMREMAP_WB is special in that it can be satisifed | |
106 | * from the direct map. Some archs depend on the | |
107 | * capability of memremap() to autodetect cases where | |
108 | * the requested range is potentially in System RAM. | |
109 | */ | |
110 | if (is_ram == REGION_INTERSECTS) | |
111 | addr = try_ram_remap(offset, size, flags); | |
112 | if (!addr) | |
113 | addr = arch_memremap_wb(offset, size); | |
114 | } | |
115 | ||
116 | /* | |
117 | * If we don't have a mapping yet and other request flags are | |
118 | * present then we will be attempting to establish a new virtual | |
119 | * address mapping. Enforce that this mapping is not aliasing | |
120 | * System RAM. | |
121 | */ | |
122 | if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) { | |
123 | WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n", | |
124 | &offset, (unsigned long) size); | |
125 | return NULL; | |
126 | } | |
127 | ||
128 | if (!addr && (flags & MEMREMAP_WT)) | |
129 | addr = ioremap_wt(offset, size); | |
130 | ||
131 | if (!addr && (flags & MEMREMAP_WC)) | |
132 | addr = ioremap_wc(offset, size); | |
133 | ||
134 | return addr; | |
135 | } | |
136 | EXPORT_SYMBOL(memremap); | |
137 | ||
138 | void memunmap(void *addr) | |
139 | { | |
140 | if (is_vmalloc_addr(addr)) | |
141 | iounmap((void __iomem *) addr); | |
142 | } | |
143 | EXPORT_SYMBOL(memunmap); | |
144 | ||
145 | static void devm_memremap_release(struct device *dev, void *res) | |
146 | { | |
147 | memunmap(*(void **)res); | |
148 | } | |
149 | ||
150 | static int devm_memremap_match(struct device *dev, void *res, void *match_data) | |
151 | { | |
152 | return *(void **)res == match_data; | |
153 | } | |
154 | ||
155 | void *devm_memremap(struct device *dev, resource_size_t offset, | |
156 | size_t size, unsigned long flags) | |
157 | { | |
158 | void **ptr, *addr; | |
159 | ||
160 | ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, | |
161 | dev_to_node(dev)); | |
162 | if (!ptr) | |
163 | return ERR_PTR(-ENOMEM); | |
164 | ||
165 | addr = memremap(offset, size, flags); | |
166 | if (addr) { | |
167 | *ptr = addr; | |
168 | devres_add(dev, ptr); | |
169 | } else { | |
170 | devres_free(ptr); | |
171 | return ERR_PTR(-ENXIO); | |
172 | } | |
173 | ||
174 | return addr; | |
175 | } | |
176 | EXPORT_SYMBOL(devm_memremap); | |
177 | ||
178 | void devm_memunmap(struct device *dev, void *addr) | |
179 | { | |
180 | WARN_ON(devres_release(dev, devm_memremap_release, | |
181 | devm_memremap_match, addr)); | |
182 | } | |
183 | EXPORT_SYMBOL(devm_memunmap); | |
184 | ||
185 | #ifdef CONFIG_ZONE_DEVICE | |
186 | static DEFINE_MUTEX(pgmap_lock); | |
187 | static RADIX_TREE(pgmap_radix, GFP_KERNEL); | |
188 | #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1) | |
189 | #define SECTION_SIZE (1UL << PA_SECTION_SHIFT) | |
190 | ||
191 | struct page_map { | |
192 | struct resource res; | |
193 | struct percpu_ref *ref; | |
194 | struct dev_pagemap pgmap; | |
195 | struct vmem_altmap altmap; | |
196 | }; | |
197 | ||
198 | static unsigned long order_at(struct resource *res, unsigned long pgoff) | |
199 | { | |
200 | unsigned long phys_pgoff = PHYS_PFN(res->start) + pgoff; | |
201 | unsigned long nr_pages, mask; | |
202 | ||
203 | nr_pages = PHYS_PFN(resource_size(res)); | |
204 | if (nr_pages == pgoff) | |
205 | return ULONG_MAX; | |
206 | ||
207 | /* | |
208 | * What is the largest aligned power-of-2 range available from | |
209 | * this resource pgoff to the end of the resource range, | |
210 | * considering the alignment of the current pgoff? | |
211 | */ | |
212 | mask = phys_pgoff | rounddown_pow_of_two(nr_pages - pgoff); | |
213 | if (!mask) | |
214 | return ULONG_MAX; | |
215 | ||
216 | return find_first_bit(&mask, BITS_PER_LONG); | |
217 | } | |
218 | ||
219 | #define foreach_order_pgoff(res, order, pgoff) \ | |
220 | for (pgoff = 0, order = order_at((res), pgoff); order < ULONG_MAX; \ | |
221 | pgoff += 1UL << order, order = order_at((res), pgoff)) | |
222 | ||
223 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) | |
224 | int device_private_entry_fault(struct vm_area_struct *vma, | |
225 | unsigned long addr, | |
226 | swp_entry_t entry, | |
227 | unsigned int flags, | |
228 | pmd_t *pmdp) | |
229 | { | |
230 | struct page *page = device_private_entry_to_page(entry); | |
231 | ||
232 | /* | |
233 | * The page_fault() callback must migrate page back to system memory | |
234 | * so that CPU can access it. This might fail for various reasons | |
235 | * (device issue, device was unsafely unplugged, ...). When such | |
236 | * error conditions happen, the callback must return VM_FAULT_SIGBUS. | |
237 | * | |
238 | * Note that because memory cgroup charges are accounted to the device | |
239 | * memory, this should never fail because of memory restrictions (but | |
240 | * allocation of regular system page might still fail because we are | |
241 | * out of memory). | |
242 | * | |
243 | * There is a more in-depth description of what that callback can and | |
244 | * cannot do, in include/linux/memremap.h | |
245 | */ | |
246 | return page->pgmap->page_fault(vma, addr, page, flags, pmdp); | |
247 | } | |
248 | EXPORT_SYMBOL(device_private_entry_fault); | |
249 | #endif /* CONFIG_DEVICE_PRIVATE */ | |
250 | ||
251 | static void pgmap_radix_release(struct resource *res) | |
252 | { | |
253 | unsigned long pgoff, order; | |
254 | ||
255 | mutex_lock(&pgmap_lock); | |
256 | foreach_order_pgoff(res, order, pgoff) | |
257 | radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff); | |
258 | mutex_unlock(&pgmap_lock); | |
259 | ||
260 | synchronize_rcu(); | |
261 | } | |
262 | ||
263 | static unsigned long pfn_first(struct page_map *page_map) | |
264 | { | |
265 | struct dev_pagemap *pgmap = &page_map->pgmap; | |
266 | const struct resource *res = &page_map->res; | |
267 | struct vmem_altmap *altmap = pgmap->altmap; | |
268 | unsigned long pfn; | |
269 | ||
270 | pfn = res->start >> PAGE_SHIFT; | |
271 | if (altmap) | |
272 | pfn += vmem_altmap_offset(altmap); | |
273 | return pfn; | |
274 | } | |
275 | ||
276 | static unsigned long pfn_end(struct page_map *page_map) | |
277 | { | |
278 | const struct resource *res = &page_map->res; | |
279 | ||
280 | return (res->start + resource_size(res)) >> PAGE_SHIFT; | |
281 | } | |
282 | ||
283 | #define for_each_device_pfn(pfn, map) \ | |
284 | for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++) | |
285 | ||
286 | static void devm_memremap_pages_release(struct device *dev, void *data) | |
287 | { | |
288 | struct page_map *page_map = data; | |
289 | struct resource *res = &page_map->res; | |
290 | resource_size_t align_start, align_size; | |
291 | struct dev_pagemap *pgmap = &page_map->pgmap; | |
292 | unsigned long pfn; | |
293 | ||
294 | for_each_device_pfn(pfn, page_map) | |
295 | put_page(pfn_to_page(pfn)); | |
296 | ||
297 | if (percpu_ref_tryget_live(pgmap->ref)) { | |
298 | dev_WARN(dev, "%s: page mapping is still live!\n", __func__); | |
299 | percpu_ref_put(pgmap->ref); | |
300 | } | |
301 | ||
302 | /* pages are dead and unused, undo the arch mapping */ | |
303 | align_start = res->start & ~(SECTION_SIZE - 1); | |
304 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) | |
305 | - align_start; | |
306 | ||
307 | mem_hotplug_begin(); | |
308 | arch_remove_memory(align_start, align_size); | |
309 | mem_hotplug_done(); | |
310 | ||
311 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); | |
312 | pgmap_radix_release(res); | |
313 | dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, | |
314 | "%s: failed to free all reserved pages\n", __func__); | |
315 | } | |
316 | ||
317 | /* assumes rcu_read_lock() held at entry */ | |
318 | struct dev_pagemap *find_dev_pagemap(resource_size_t phys) | |
319 | { | |
320 | struct page_map *page_map; | |
321 | ||
322 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
323 | ||
324 | page_map = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys)); | |
325 | return page_map ? &page_map->pgmap : NULL; | |
326 | } | |
327 | ||
328 | /** | |
329 | * devm_memremap_pages - remap and provide memmap backing for the given resource | |
330 | * @dev: hosting device for @res | |
331 | * @res: "host memory" address range | |
332 | * @ref: a live per-cpu reference count | |
333 | * @altmap: optional descriptor for allocating the memmap from @res | |
334 | * | |
335 | * Notes: | |
336 | * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time | |
337 | * (or devm release event). The expected order of events is that @ref has | |
338 | * been through percpu_ref_kill() before devm_memremap_pages_release(). The | |
339 | * wait for the completion of all references being dropped and | |
340 | * percpu_ref_exit() must occur after devm_memremap_pages_release(). | |
341 | * | |
342 | * 2/ @res is expected to be a host memory range that could feasibly be | |
343 | * treated as a "System RAM" range, i.e. not a device mmio range, but | |
344 | * this is not enforced. | |
345 | */ | |
346 | void *devm_memremap_pages(struct device *dev, struct resource *res, | |
347 | struct percpu_ref *ref, struct vmem_altmap *altmap) | |
348 | { | |
349 | resource_size_t align_start, align_size, align_end; | |
350 | unsigned long pfn, pgoff, order; | |
351 | pgprot_t pgprot = PAGE_KERNEL; | |
352 | struct dev_pagemap *pgmap; | |
353 | struct page_map *page_map; | |
354 | int error, nid, is_ram, i = 0; | |
355 | ||
356 | align_start = res->start & ~(SECTION_SIZE - 1); | |
357 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) | |
358 | - align_start; | |
359 | is_ram = region_intersects(align_start, align_size, | |
360 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); | |
361 | ||
362 | if (is_ram == REGION_MIXED) { | |
363 | WARN_ONCE(1, "%s attempted on mixed region %pr\n", | |
364 | __func__, res); | |
365 | return ERR_PTR(-ENXIO); | |
366 | } | |
367 | ||
368 | if (is_ram == REGION_INTERSECTS) | |
369 | return __va(res->start); | |
370 | ||
371 | if (!ref) | |
372 | return ERR_PTR(-EINVAL); | |
373 | ||
374 | page_map = devres_alloc_node(devm_memremap_pages_release, | |
375 | sizeof(*page_map), GFP_KERNEL, dev_to_node(dev)); | |
376 | if (!page_map) | |
377 | return ERR_PTR(-ENOMEM); | |
378 | pgmap = &page_map->pgmap; | |
379 | ||
380 | memcpy(&page_map->res, res, sizeof(*res)); | |
381 | ||
382 | pgmap->dev = dev; | |
383 | if (altmap) { | |
384 | memcpy(&page_map->altmap, altmap, sizeof(*altmap)); | |
385 | pgmap->altmap = &page_map->altmap; | |
386 | } | |
387 | pgmap->ref = ref; | |
388 | pgmap->res = &page_map->res; | |
389 | pgmap->type = MEMORY_DEVICE_HOST; | |
390 | pgmap->page_fault = NULL; | |
391 | pgmap->page_free = NULL; | |
392 | pgmap->data = NULL; | |
393 | ||
394 | mutex_lock(&pgmap_lock); | |
395 | error = 0; | |
396 | align_end = align_start + align_size - 1; | |
397 | ||
398 | foreach_order_pgoff(res, order, pgoff) { | |
399 | struct dev_pagemap *dup; | |
400 | ||
401 | rcu_read_lock(); | |
402 | dup = find_dev_pagemap(res->start + PFN_PHYS(pgoff)); | |
403 | rcu_read_unlock(); | |
404 | if (dup) { | |
405 | dev_err(dev, "%s: %pr collides with mapping for %s\n", | |
406 | __func__, res, dev_name(dup->dev)); | |
407 | error = -EBUSY; | |
408 | break; | |
409 | } | |
410 | error = __radix_tree_insert(&pgmap_radix, | |
411 | PHYS_PFN(res->start) + pgoff, order, page_map); | |
412 | if (error) { | |
413 | dev_err(dev, "%s: failed: %d\n", __func__, error); | |
414 | break; | |
415 | } | |
416 | } | |
417 | mutex_unlock(&pgmap_lock); | |
418 | if (error) | |
419 | goto err_radix; | |
420 | ||
421 | nid = dev_to_node(dev); | |
422 | if (nid < 0) | |
423 | nid = numa_mem_id(); | |
424 | ||
425 | error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0, | |
426 | align_size); | |
427 | if (error) | |
428 | goto err_pfn_remap; | |
429 | ||
430 | mem_hotplug_begin(); | |
431 | error = arch_add_memory(nid, align_start, align_size, false); | |
432 | if (!error) | |
433 | move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | |
434 | align_start >> PAGE_SHIFT, | |
435 | align_size >> PAGE_SHIFT); | |
436 | mem_hotplug_done(); | |
437 | if (error) | |
438 | goto err_add_memory; | |
439 | ||
440 | for_each_device_pfn(pfn, page_map) { | |
441 | struct page *page = pfn_to_page(pfn); | |
442 | ||
443 | /* | |
444 | * ZONE_DEVICE pages union ->lru with a ->pgmap back | |
445 | * pointer. It is a bug if a ZONE_DEVICE page is ever | |
446 | * freed or placed on a driver-private list. Seed the | |
447 | * storage with LIST_POISON* values. | |
448 | */ | |
449 | list_del(&page->lru); | |
450 | page->pgmap = pgmap; | |
451 | percpu_ref_get(ref); | |
452 | if (!(++i % 1024)) | |
453 | cond_resched(); | |
454 | } | |
455 | devres_add(dev, page_map); | |
456 | return __va(res->start); | |
457 | ||
458 | err_add_memory: | |
459 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); | |
460 | err_pfn_remap: | |
461 | err_radix: | |
462 | pgmap_radix_release(res); | |
463 | devres_free(page_map); | |
464 | return ERR_PTR(error); | |
465 | } | |
466 | EXPORT_SYMBOL(devm_memremap_pages); | |
467 | ||
468 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) | |
469 | { | |
470 | /* number of pfns from base where pfn_to_page() is valid */ | |
471 | return altmap->reserve + altmap->free; | |
472 | } | |
473 | ||
474 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) | |
475 | { | |
476 | altmap->alloc -= nr_pfns; | |
477 | } | |
478 | ||
479 | struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) | |
480 | { | |
481 | /* | |
482 | * 'memmap_start' is the virtual address for the first "struct | |
483 | * page" in this range of the vmemmap array. In the case of | |
484 | * CONFIG_SPARSEMEM_VMEMMAP a page_to_pfn conversion is simple | |
485 | * pointer arithmetic, so we can perform this to_vmem_altmap() | |
486 | * conversion without concern for the initialization state of | |
487 | * the struct page fields. | |
488 | */ | |
489 | struct page *page = (struct page *) memmap_start; | |
490 | struct dev_pagemap *pgmap; | |
491 | ||
492 | /* | |
493 | * Unconditionally retrieve a dev_pagemap associated with the | |
494 | * given physical address, this is only for use in the | |
495 | * arch_{add|remove}_memory() for setting up and tearing down | |
496 | * the memmap. | |
497 | */ | |
498 | rcu_read_lock(); | |
499 | pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page))); | |
500 | rcu_read_unlock(); | |
501 | ||
502 | return pgmap ? pgmap->altmap : NULL; | |
503 | } | |
504 | #endif /* CONFIG_ZONE_DEVICE */ | |
505 | ||
506 | ||
507 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) | |
508 | void put_zone_device_private_or_public_page(struct page *page) | |
509 | { | |
510 | int count = page_ref_dec_return(page); | |
511 | ||
512 | /* | |
513 | * If refcount is 1 then page is freed and refcount is stable as nobody | |
514 | * holds a reference on the page. | |
515 | */ | |
516 | if (count == 1) { | |
517 | /* Clear Active bit in case of parallel mark_page_accessed */ | |
518 | __ClearPageActive(page); | |
519 | __ClearPageWaiters(page); | |
520 | ||
521 | page->mapping = NULL; | |
522 | mem_cgroup_uncharge(page); | |
523 | ||
524 | page->pgmap->page_free(page, page->pgmap->data); | |
525 | } else if (!count) | |
526 | __put_page(page); | |
527 | } | |
528 | EXPORT_SYMBOL(put_zone_device_private_or_public_page); | |
529 | #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ |