]>
Commit | Line | Data |
---|---|---|
92281dee DW |
1 | /* |
2 | * Copyright(c) 2015 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
9476df7d | 13 | #include <linux/radix-tree.h> |
7d3dcf26 | 14 | #include <linux/device.h> |
92281dee | 15 | #include <linux/types.h> |
34c0fd54 | 16 | #include <linux/pfn_t.h> |
92281dee DW |
17 | #include <linux/io.h> |
18 | #include <linux/mm.h> | |
41e94a85 | 19 | #include <linux/memory_hotplug.h> |
5042db43 JG |
20 | #include <linux/swap.h> |
21 | #include <linux/swapops.h> | |
92281dee DW |
22 | |
23 | #ifndef ioremap_cache | |
24 | /* temporary while we convert existing ioremap_cache users to memremap */ | |
25 | __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) | |
26 | { | |
27 | return ioremap(offset, size); | |
28 | } | |
29 | #endif | |
30 | ||
c269cba3 AB |
31 | #ifndef arch_memremap_wb |
32 | static void *arch_memremap_wb(resource_size_t offset, unsigned long size) | |
33 | { | |
34 | return (__force void *)ioremap_cache(offset, size); | |
35 | } | |
36 | #endif | |
37 | ||
8f716c9b TL |
38 | #ifndef arch_memremap_can_ram_remap |
39 | static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, | |
40 | unsigned long flags) | |
41 | { | |
42 | return true; | |
43 | } | |
44 | #endif | |
45 | ||
46 | static void *try_ram_remap(resource_size_t offset, size_t size, | |
47 | unsigned long flags) | |
182475b7 | 48 | { |
ac343e88 | 49 | unsigned long pfn = PHYS_PFN(offset); |
182475b7 DW |
50 | |
51 | /* In the simple case just return the existing linear address */ | |
8f716c9b TL |
52 | if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) && |
53 | arch_memremap_can_ram_remap(offset, size, flags)) | |
182475b7 | 54 | return __va(offset); |
8f716c9b | 55 | |
c269cba3 | 56 | return NULL; /* fallback to arch_memremap_wb */ |
182475b7 DW |
57 | } |
58 | ||
92281dee DW |
59 | /** |
60 | * memremap() - remap an iomem_resource as cacheable memory | |
61 | * @offset: iomem resource start address | |
62 | * @size: size of remap | |
8f716c9b TL |
63 | * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC, |
64 | * MEMREMAP_ENC, MEMREMAP_DEC | |
92281dee DW |
65 | * |
66 | * memremap() is "ioremap" for cases where it is known that the resource | |
67 | * being mapped does not have i/o side effects and the __iomem | |
c907e0eb BS |
68 | * annotation is not applicable. In the case of multiple flags, the different |
69 | * mapping types will be attempted in the order listed below until one of | |
70 | * them succeeds. | |
92281dee | 71 | * |
1c29f25b | 72 | * MEMREMAP_WB - matches the default mapping for System RAM on |
92281dee DW |
73 | * the architecture. This is usually a read-allocate write-back cache. |
74 | * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM | |
75 | * memremap() will bypass establishing a new mapping and instead return | |
76 | * a pointer into the direct map. | |
77 | * | |
78 | * MEMREMAP_WT - establish a mapping whereby writes either bypass the | |
79 | * cache or are written through to memory and never exist in a | |
80 | * cache-dirty state with respect to program visibility. Attempts to | |
1c29f25b | 81 | * map System RAM with this mapping type will fail. |
c907e0eb BS |
82 | * |
83 | * MEMREMAP_WC - establish a writecombine mapping, whereby writes may | |
84 | * be coalesced together (e.g. in the CPU's write buffers), but is otherwise | |
85 | * uncached. Attempts to map System RAM with this mapping type will fail. | |
92281dee DW |
86 | */ |
87 | void *memremap(resource_size_t offset, size_t size, unsigned long flags) | |
88 | { | |
1c29f25b TK |
89 | int is_ram = region_intersects(offset, size, |
90 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); | |
92281dee DW |
91 | void *addr = NULL; |
92 | ||
cf61e2a1 BS |
93 | if (!flags) |
94 | return NULL; | |
95 | ||
92281dee DW |
96 | if (is_ram == REGION_MIXED) { |
97 | WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n", | |
98 | &offset, (unsigned long) size); | |
99 | return NULL; | |
100 | } | |
101 | ||
102 | /* Try all mapping types requested until one returns non-NULL */ | |
103 | if (flags & MEMREMAP_WB) { | |
92281dee DW |
104 | /* |
105 | * MEMREMAP_WB is special in that it can be satisifed | |
106 | * from the direct map. Some archs depend on the | |
107 | * capability of memremap() to autodetect cases where | |
1c29f25b | 108 | * the requested range is potentially in System RAM. |
92281dee DW |
109 | */ |
110 | if (is_ram == REGION_INTERSECTS) | |
8f716c9b | 111 | addr = try_ram_remap(offset, size, flags); |
182475b7 | 112 | if (!addr) |
c269cba3 | 113 | addr = arch_memremap_wb(offset, size); |
92281dee DW |
114 | } |
115 | ||
116 | /* | |
cf61e2a1 BS |
117 | * If we don't have a mapping yet and other request flags are |
118 | * present then we will be attempting to establish a new virtual | |
92281dee | 119 | * address mapping. Enforce that this mapping is not aliasing |
1c29f25b | 120 | * System RAM. |
92281dee | 121 | */ |
cf61e2a1 | 122 | if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) { |
92281dee DW |
123 | WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n", |
124 | &offset, (unsigned long) size); | |
125 | return NULL; | |
126 | } | |
127 | ||
cf61e2a1 | 128 | if (!addr && (flags & MEMREMAP_WT)) |
92281dee | 129 | addr = ioremap_wt(offset, size); |
c907e0eb BS |
130 | |
131 | if (!addr && (flags & MEMREMAP_WC)) | |
132 | addr = ioremap_wc(offset, size); | |
92281dee DW |
133 | |
134 | return addr; | |
135 | } | |
136 | EXPORT_SYMBOL(memremap); | |
137 | ||
138 | void memunmap(void *addr) | |
139 | { | |
140 | if (is_vmalloc_addr(addr)) | |
141 | iounmap((void __iomem *) addr); | |
142 | } | |
143 | EXPORT_SYMBOL(memunmap); | |
7d3dcf26 CH |
144 | |
145 | static void devm_memremap_release(struct device *dev, void *res) | |
146 | { | |
9273a8bb | 147 | memunmap(*(void **)res); |
7d3dcf26 CH |
148 | } |
149 | ||
150 | static int devm_memremap_match(struct device *dev, void *res, void *match_data) | |
151 | { | |
152 | return *(void **)res == match_data; | |
153 | } | |
154 | ||
155 | void *devm_memremap(struct device *dev, resource_size_t offset, | |
156 | size_t size, unsigned long flags) | |
157 | { | |
158 | void **ptr, *addr; | |
159 | ||
538ea4aa DW |
160 | ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, |
161 | dev_to_node(dev)); | |
7d3dcf26 | 162 | if (!ptr) |
b36f4761 | 163 | return ERR_PTR(-ENOMEM); |
7d3dcf26 CH |
164 | |
165 | addr = memremap(offset, size, flags); | |
166 | if (addr) { | |
167 | *ptr = addr; | |
168 | devres_add(dev, ptr); | |
93f834df | 169 | } else { |
7d3dcf26 | 170 | devres_free(ptr); |
93f834df TK |
171 | return ERR_PTR(-ENXIO); |
172 | } | |
7d3dcf26 CH |
173 | |
174 | return addr; | |
175 | } | |
176 | EXPORT_SYMBOL(devm_memremap); | |
177 | ||
178 | void devm_memunmap(struct device *dev, void *addr) | |
179 | { | |
d741314f DW |
180 | WARN_ON(devres_release(dev, devm_memremap_release, |
181 | devm_memremap_match, addr)); | |
7d3dcf26 CH |
182 | } |
183 | EXPORT_SYMBOL(devm_memunmap); | |
41e94a85 CH |
184 | |
185 | #ifdef CONFIG_ZONE_DEVICE | |
9476df7d DW |
186 | static DEFINE_MUTEX(pgmap_lock); |
187 | static RADIX_TREE(pgmap_radix, GFP_KERNEL); | |
188 | #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1) | |
189 | #define SECTION_SIZE (1UL << PA_SECTION_SHIFT) | |
190 | ||
41e94a85 CH |
191 | struct page_map { |
192 | struct resource res; | |
9476df7d DW |
193 | struct percpu_ref *ref; |
194 | struct dev_pagemap pgmap; | |
4b94ffdc | 195 | struct vmem_altmap altmap; |
41e94a85 CH |
196 | }; |
197 | ||
ab1b597e | 198 | static unsigned long order_at(struct resource *res, unsigned long pgoff) |
9476df7d | 199 | { |
ab1b597e DW |
200 | unsigned long phys_pgoff = PHYS_PFN(res->start) + pgoff; |
201 | unsigned long nr_pages, mask; | |
eb7d78c9 | 202 | |
ab1b597e DW |
203 | nr_pages = PHYS_PFN(resource_size(res)); |
204 | if (nr_pages == pgoff) | |
205 | return ULONG_MAX; | |
206 | ||
207 | /* | |
208 | * What is the largest aligned power-of-2 range available from | |
209 | * this resource pgoff to the end of the resource range, | |
210 | * considering the alignment of the current pgoff? | |
211 | */ | |
212 | mask = phys_pgoff | rounddown_pow_of_two(nr_pages - pgoff); | |
213 | if (!mask) | |
214 | return ULONG_MAX; | |
215 | ||
216 | return find_first_bit(&mask, BITS_PER_LONG); | |
217 | } | |
218 | ||
219 | #define foreach_order_pgoff(res, order, pgoff) \ | |
220 | for (pgoff = 0, order = order_at((res), pgoff); order < ULONG_MAX; \ | |
221 | pgoff += 1UL << order, order = order_at((res), pgoff)) | |
222 | ||
5042db43 JG |
223 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) |
224 | int device_private_entry_fault(struct vm_area_struct *vma, | |
225 | unsigned long addr, | |
226 | swp_entry_t entry, | |
227 | unsigned int flags, | |
228 | pmd_t *pmdp) | |
229 | { | |
230 | struct page *page = device_private_entry_to_page(entry); | |
231 | ||
232 | /* | |
233 | * The page_fault() callback must migrate page back to system memory | |
234 | * so that CPU can access it. This might fail for various reasons | |
235 | * (device issue, device was unsafely unplugged, ...). When such | |
236 | * error conditions happen, the callback must return VM_FAULT_SIGBUS. | |
237 | * | |
238 | * Note that because memory cgroup charges are accounted to the device | |
239 | * memory, this should never fail because of memory restrictions (but | |
240 | * allocation of regular system page might still fail because we are | |
241 | * out of memory). | |
242 | * | |
243 | * There is a more in-depth description of what that callback can and | |
244 | * cannot do, in include/linux/memremap.h | |
245 | */ | |
246 | return page->pgmap->page_fault(vma, addr, page, flags, pmdp); | |
247 | } | |
248 | EXPORT_SYMBOL(device_private_entry_fault); | |
249 | #endif /* CONFIG_DEVICE_PRIVATE */ | |
250 | ||
ab1b597e DW |
251 | static void pgmap_radix_release(struct resource *res) |
252 | { | |
253 | unsigned long pgoff, order; | |
9476df7d DW |
254 | |
255 | mutex_lock(&pgmap_lock); | |
ab1b597e DW |
256 | foreach_order_pgoff(res, order, pgoff) |
257 | radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff); | |
9476df7d | 258 | mutex_unlock(&pgmap_lock); |
ab1b597e DW |
259 | |
260 | synchronize_rcu(); | |
9476df7d DW |
261 | } |
262 | ||
5c2c2587 DW |
263 | static unsigned long pfn_first(struct page_map *page_map) |
264 | { | |
265 | struct dev_pagemap *pgmap = &page_map->pgmap; | |
266 | const struct resource *res = &page_map->res; | |
267 | struct vmem_altmap *altmap = pgmap->altmap; | |
268 | unsigned long pfn; | |
269 | ||
270 | pfn = res->start >> PAGE_SHIFT; | |
271 | if (altmap) | |
272 | pfn += vmem_altmap_offset(altmap); | |
273 | return pfn; | |
274 | } | |
275 | ||
276 | static unsigned long pfn_end(struct page_map *page_map) | |
277 | { | |
278 | const struct resource *res = &page_map->res; | |
279 | ||
280 | return (res->start + resource_size(res)) >> PAGE_SHIFT; | |
281 | } | |
282 | ||
283 | #define for_each_device_pfn(pfn, map) \ | |
284 | for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++) | |
285 | ||
9476df7d | 286 | static void devm_memremap_pages_release(struct device *dev, void *data) |
41e94a85 | 287 | { |
9476df7d DW |
288 | struct page_map *page_map = data; |
289 | struct resource *res = &page_map->res; | |
290 | resource_size_t align_start, align_size; | |
4b94ffdc | 291 | struct dev_pagemap *pgmap = &page_map->pgmap; |
71389703 DW |
292 | unsigned long pfn; |
293 | ||
294 | for_each_device_pfn(pfn, page_map) | |
295 | put_page(pfn_to_page(pfn)); | |
9476df7d | 296 | |
5c2c2587 DW |
297 | if (percpu_ref_tryget_live(pgmap->ref)) { |
298 | dev_WARN(dev, "%s: page mapping is still live!\n", __func__); | |
299 | percpu_ref_put(pgmap->ref); | |
300 | } | |
301 | ||
41e94a85 | 302 | /* pages are dead and unused, undo the arch mapping */ |
9476df7d DW |
303 | align_start = res->start & ~(SECTION_SIZE - 1); |
304 | align_size = ALIGN(resource_size(res), SECTION_SIZE); | |
b5d24fda | 305 | |
f931ab47 | 306 | mem_hotplug_begin(); |
9476df7d | 307 | arch_remove_memory(align_start, align_size); |
f931ab47 | 308 | mem_hotplug_done(); |
b5d24fda | 309 | |
9049771f | 310 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); |
eb7d78c9 | 311 | pgmap_radix_release(res); |
4b94ffdc DW |
312 | dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, |
313 | "%s: failed to free all reserved pages\n", __func__); | |
9476df7d DW |
314 | } |
315 | ||
316 | /* assumes rcu_read_lock() held at entry */ | |
317 | struct dev_pagemap *find_dev_pagemap(resource_size_t phys) | |
318 | { | |
319 | struct page_map *page_map; | |
320 | ||
321 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
322 | ||
ab1b597e | 323 | page_map = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys)); |
9476df7d | 324 | return page_map ? &page_map->pgmap : NULL; |
41e94a85 CH |
325 | } |
326 | ||
4b94ffdc DW |
327 | /** |
328 | * devm_memremap_pages - remap and provide memmap backing for the given resource | |
329 | * @dev: hosting device for @res | |
330 | * @res: "host memory" address range | |
5c2c2587 | 331 | * @ref: a live per-cpu reference count |
4b94ffdc DW |
332 | * @altmap: optional descriptor for allocating the memmap from @res |
333 | * | |
5c2c2587 DW |
334 | * Notes: |
335 | * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time | |
71389703 DW |
336 | * (or devm release event). The expected order of events is that @ref has |
337 | * been through percpu_ref_kill() before devm_memremap_pages_release(). The | |
338 | * wait for the completion of all references being dropped and | |
339 | * percpu_ref_exit() must occur after devm_memremap_pages_release(). | |
5c2c2587 DW |
340 | * |
341 | * 2/ @res is expected to be a host memory range that could feasibly be | |
342 | * treated as a "System RAM" range, i.e. not a device mmio range, but | |
343 | * this is not enforced. | |
4b94ffdc DW |
344 | */ |
345 | void *devm_memremap_pages(struct device *dev, struct resource *res, | |
5c2c2587 | 346 | struct percpu_ref *ref, struct vmem_altmap *altmap) |
41e94a85 | 347 | { |
ab1b597e DW |
348 | resource_size_t align_start, align_size, align_end; |
349 | unsigned long pfn, pgoff, order; | |
9049771f | 350 | pgprot_t pgprot = PAGE_KERNEL; |
4b94ffdc | 351 | struct dev_pagemap *pgmap; |
41e94a85 | 352 | struct page_map *page_map; |
1fdcce6e | 353 | int error, nid, is_ram, i = 0; |
5f29a77c DW |
354 | |
355 | align_start = res->start & ~(SECTION_SIZE - 1); | |
356 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) | |
357 | - align_start; | |
d37a14bb LT |
358 | is_ram = region_intersects(align_start, align_size, |
359 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); | |
41e94a85 CH |
360 | |
361 | if (is_ram == REGION_MIXED) { | |
362 | WARN_ONCE(1, "%s attempted on mixed region %pr\n", | |
363 | __func__, res); | |
364 | return ERR_PTR(-ENXIO); | |
365 | } | |
366 | ||
367 | if (is_ram == REGION_INTERSECTS) | |
368 | return __va(res->start); | |
369 | ||
5c2c2587 DW |
370 | if (!ref) |
371 | return ERR_PTR(-EINVAL); | |
372 | ||
538ea4aa DW |
373 | page_map = devres_alloc_node(devm_memremap_pages_release, |
374 | sizeof(*page_map), GFP_KERNEL, dev_to_node(dev)); | |
41e94a85 CH |
375 | if (!page_map) |
376 | return ERR_PTR(-ENOMEM); | |
4b94ffdc | 377 | pgmap = &page_map->pgmap; |
41e94a85 CH |
378 | |
379 | memcpy(&page_map->res, res, sizeof(*res)); | |
380 | ||
4b94ffdc DW |
381 | pgmap->dev = dev; |
382 | if (altmap) { | |
383 | memcpy(&page_map->altmap, altmap, sizeof(*altmap)); | |
384 | pgmap->altmap = &page_map->altmap; | |
385 | } | |
5c2c2587 | 386 | pgmap->ref = ref; |
4b94ffdc | 387 | pgmap->res = &page_map->res; |
5042db43 JG |
388 | pgmap->type = MEMORY_DEVICE_HOST; |
389 | pgmap->page_fault = NULL; | |
390 | pgmap->page_free = NULL; | |
391 | pgmap->data = NULL; | |
4b94ffdc | 392 | |
9476df7d DW |
393 | mutex_lock(&pgmap_lock); |
394 | error = 0; | |
eb7d78c9 | 395 | align_end = align_start + align_size - 1; |
ab1b597e DW |
396 | |
397 | foreach_order_pgoff(res, order, pgoff) { | |
9476df7d DW |
398 | struct dev_pagemap *dup; |
399 | ||
400 | rcu_read_lock(); | |
ab1b597e | 401 | dup = find_dev_pagemap(res->start + PFN_PHYS(pgoff)); |
9476df7d DW |
402 | rcu_read_unlock(); |
403 | if (dup) { | |
404 | dev_err(dev, "%s: %pr collides with mapping for %s\n", | |
405 | __func__, res, dev_name(dup->dev)); | |
406 | error = -EBUSY; | |
407 | break; | |
408 | } | |
ab1b597e DW |
409 | error = __radix_tree_insert(&pgmap_radix, |
410 | PHYS_PFN(res->start) + pgoff, order, page_map); | |
9476df7d DW |
411 | if (error) { |
412 | dev_err(dev, "%s: failed: %d\n", __func__, error); | |
413 | break; | |
414 | } | |
415 | } | |
416 | mutex_unlock(&pgmap_lock); | |
417 | if (error) | |
418 | goto err_radix; | |
419 | ||
41e94a85 CH |
420 | nid = dev_to_node(dev); |
421 | if (nid < 0) | |
7eff93b7 | 422 | nid = numa_mem_id(); |
41e94a85 | 423 | |
9049771f DW |
424 | error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0, |
425 | align_size); | |
426 | if (error) | |
427 | goto err_pfn_remap; | |
428 | ||
f931ab47 | 429 | mem_hotplug_begin(); |
3d79a728 | 430 | error = arch_add_memory(nid, align_start, align_size, false); |
f1dd2cd1 MH |
431 | if (!error) |
432 | move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | |
433 | align_start >> PAGE_SHIFT, | |
434 | align_size >> PAGE_SHIFT); | |
f931ab47 | 435 | mem_hotplug_done(); |
9476df7d DW |
436 | if (error) |
437 | goto err_add_memory; | |
41e94a85 | 438 | |
5c2c2587 DW |
439 | for_each_device_pfn(pfn, page_map) { |
440 | struct page *page = pfn_to_page(pfn); | |
441 | ||
d77a117e DW |
442 | /* |
443 | * ZONE_DEVICE pages union ->lru with a ->pgmap back | |
444 | * pointer. It is a bug if a ZONE_DEVICE page is ever | |
445 | * freed or placed on a driver-private list. Seed the | |
446 | * storage with LIST_POISON* values. | |
447 | */ | |
448 | list_del(&page->lru); | |
5c2c2587 | 449 | page->pgmap = pgmap; |
71389703 | 450 | percpu_ref_get(ref); |
1fdcce6e MH |
451 | if (!(++i % 1024)) |
452 | cond_resched(); | |
5c2c2587 | 453 | } |
41e94a85 CH |
454 | devres_add(dev, page_map); |
455 | return __va(res->start); | |
9476df7d DW |
456 | |
457 | err_add_memory: | |
9049771f DW |
458 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); |
459 | err_pfn_remap: | |
9476df7d DW |
460 | err_radix: |
461 | pgmap_radix_release(res); | |
462 | devres_free(page_map); | |
463 | return ERR_PTR(error); | |
41e94a85 CH |
464 | } |
465 | EXPORT_SYMBOL(devm_memremap_pages); | |
4b94ffdc DW |
466 | |
467 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) | |
468 | { | |
469 | /* number of pfns from base where pfn_to_page() is valid */ | |
470 | return altmap->reserve + altmap->free; | |
471 | } | |
472 | ||
473 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) | |
474 | { | |
475 | altmap->alloc -= nr_pfns; | |
476 | } | |
477 | ||
4b94ffdc DW |
478 | struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) |
479 | { | |
480 | /* | |
481 | * 'memmap_start' is the virtual address for the first "struct | |
482 | * page" in this range of the vmemmap array. In the case of | |
07061aab | 483 | * CONFIG_SPARSEMEM_VMEMMAP a page_to_pfn conversion is simple |
4b94ffdc DW |
484 | * pointer arithmetic, so we can perform this to_vmem_altmap() |
485 | * conversion without concern for the initialization state of | |
486 | * the struct page fields. | |
487 | */ | |
488 | struct page *page = (struct page *) memmap_start; | |
489 | struct dev_pagemap *pgmap; | |
490 | ||
491 | /* | |
07061aab | 492 | * Unconditionally retrieve a dev_pagemap associated with the |
4b94ffdc DW |
493 | * given physical address, this is only for use in the |
494 | * arch_{add|remove}_memory() for setting up and tearing down | |
495 | * the memmap. | |
496 | */ | |
497 | rcu_read_lock(); | |
498 | pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page))); | |
499 | rcu_read_unlock(); | |
500 | ||
501 | return pgmap ? pgmap->altmap : NULL; | |
502 | } | |
41e94a85 | 503 | #endif /* CONFIG_ZONE_DEVICE */ |
7b2d55d2 JG |
504 | |
505 | ||
df6ad698 JG |
506 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) |
507 | void put_zone_device_private_or_public_page(struct page *page) | |
7b2d55d2 JG |
508 | { |
509 | int count = page_ref_dec_return(page); | |
510 | ||
511 | /* | |
512 | * If refcount is 1 then page is freed and refcount is stable as nobody | |
513 | * holds a reference on the page. | |
514 | */ | |
515 | if (count == 1) { | |
516 | /* Clear Active bit in case of parallel mark_page_accessed */ | |
517 | __ClearPageActive(page); | |
518 | __ClearPageWaiters(page); | |
519 | ||
520 | page->mapping = NULL; | |
c733a828 | 521 | mem_cgroup_uncharge(page); |
7b2d55d2 JG |
522 | |
523 | page->pgmap->page_free(page, page->pgmap->data); | |
524 | } else if (!count) | |
525 | __put_page(page); | |
526 | } | |
df6ad698 JG |
527 | EXPORT_SYMBOL(put_zone_device_private_or_public_page); |
528 | #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ |