]>
Commit | Line | Data |
---|---|---|
5981690d DW |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Copyright(c) 2015 Intel Corporation. All rights reserved. */ | |
7d3dcf26 | 3 | #include <linux/device.h> |
92281dee | 4 | #include <linux/io.h> |
0207df4f | 5 | #include <linux/kasan.h> |
41e94a85 | 6 | #include <linux/memory_hotplug.h> |
bcfa4b72 MW |
7 | #include <linux/mm.h> |
8 | #include <linux/pfn_t.h> | |
5042db43 JG |
9 | #include <linux/swap.h> |
10 | #include <linux/swapops.h> | |
bcfa4b72 | 11 | #include <linux/types.h> |
e7638488 | 12 | #include <linux/wait_bit.h> |
bcfa4b72 | 13 | #include <linux/xarray.h> |
92281dee | 14 | |
bcfa4b72 | 15 | static DEFINE_XARRAY(pgmap_array); |
9476df7d | 16 | |
f6a55e1a CH |
17 | #ifdef CONFIG_DEV_PAGEMAP_OPS |
18 | DEFINE_STATIC_KEY_FALSE(devmap_managed_key); | |
19 | EXPORT_SYMBOL(devmap_managed_key); | |
20 | static atomic_t devmap_managed_enable; | |
21 | ||
6f42193f | 22 | static void devmap_managed_enable_put(void) |
f6a55e1a CH |
23 | { |
24 | if (atomic_dec_and_test(&devmap_managed_enable)) | |
25 | static_branch_disable(&devmap_managed_key); | |
26 | } | |
27 | ||
6f42193f | 28 | static int devmap_managed_enable_get(struct dev_pagemap *pgmap) |
f6a55e1a | 29 | { |
24917f6b | 30 | if (!pgmap->ops || !pgmap->ops->page_free) { |
f6a55e1a CH |
31 | WARN(1, "Missing page_free method\n"); |
32 | return -EINVAL; | |
33 | } | |
34 | ||
35 | if (atomic_inc_return(&devmap_managed_enable) == 1) | |
36 | static_branch_enable(&devmap_managed_key); | |
6f42193f | 37 | return 0; |
f6a55e1a CH |
38 | } |
39 | #else | |
6f42193f | 40 | static int devmap_managed_enable_get(struct dev_pagemap *pgmap) |
f6a55e1a CH |
41 | { |
42 | return -EINVAL; | |
43 | } | |
6f42193f CH |
44 | static void devmap_managed_enable_put(void) |
45 | { | |
46 | } | |
f6a55e1a CH |
47 | #endif /* CONFIG_DEV_PAGEMAP_OPS */ |
48 | ||
bcfa4b72 | 49 | static void pgmap_array_delete(struct resource *res) |
ab1b597e | 50 | { |
bcfa4b72 MW |
51 | xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end), |
52 | NULL, GFP_KERNEL); | |
ab1b597e | 53 | synchronize_rcu(); |
9476df7d DW |
54 | } |
55 | ||
e7744aa2 | 56 | static unsigned long pfn_first(struct dev_pagemap *pgmap) |
5c2c2587 | 57 | { |
7cc7867f | 58 | return PHYS_PFN(pgmap->res.start) + |
514caf23 | 59 | vmem_altmap_offset(pgmap_altmap(pgmap)); |
5c2c2587 DW |
60 | } |
61 | ||
e7744aa2 | 62 | static unsigned long pfn_end(struct dev_pagemap *pgmap) |
5c2c2587 | 63 | { |
e7744aa2 | 64 | const struct resource *res = &pgmap->res; |
5c2c2587 DW |
65 | |
66 | return (res->start + resource_size(res)) >> PAGE_SHIFT; | |
67 | } | |
68 | ||
949b9325 DW |
69 | static unsigned long pfn_next(unsigned long pfn) |
70 | { | |
71 | if (pfn % 1024 == 0) | |
72 | cond_resched(); | |
73 | return pfn + 1; | |
74 | } | |
75 | ||
5c2c2587 | 76 | #define for_each_device_pfn(pfn, map) \ |
949b9325 | 77 | for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn)) |
5c2c2587 | 78 | |
24917f6b CH |
79 | static void dev_pagemap_kill(struct dev_pagemap *pgmap) |
80 | { | |
81 | if (pgmap->ops && pgmap->ops->kill) | |
82 | pgmap->ops->kill(pgmap); | |
83 | else | |
84 | percpu_ref_kill(pgmap->ref); | |
85 | } | |
86 | ||
87 | static void dev_pagemap_cleanup(struct dev_pagemap *pgmap) | |
88 | { | |
89 | if (pgmap->ops && pgmap->ops->cleanup) { | |
90 | pgmap->ops->cleanup(pgmap); | |
91 | } else { | |
92 | wait_for_completion(&pgmap->done); | |
93 | percpu_ref_exit(pgmap->ref); | |
94 | } | |
06282373 DW |
95 | /* |
96 | * Undo the pgmap ref assignment for the internal case as the | |
97 | * caller may re-enable the same pgmap. | |
98 | */ | |
99 | if (pgmap->ref == &pgmap->internal_ref) | |
100 | pgmap->ref = NULL; | |
24917f6b CH |
101 | } |
102 | ||
6869b7b2 | 103 | void memunmap_pages(struct dev_pagemap *pgmap) |
41e94a85 | 104 | { |
e7744aa2 | 105 | struct resource *res = &pgmap->res; |
71389703 | 106 | unsigned long pfn; |
2c2a5af6 | 107 | int nid; |
71389703 | 108 | |
24917f6b | 109 | dev_pagemap_kill(pgmap); |
e7744aa2 | 110 | for_each_device_pfn(pfn, pgmap) |
71389703 | 111 | put_page(pfn_to_page(pfn)); |
24917f6b | 112 | dev_pagemap_cleanup(pgmap); |
9476df7d | 113 | |
41e94a85 | 114 | /* pages are dead and unused, undo the arch mapping */ |
7cc7867f | 115 | nid = page_to_nid(pfn_to_page(PHYS_PFN(res->start))); |
2c2a5af6 | 116 | |
f931ab47 | 117 | mem_hotplug_begin(); |
69324b8f | 118 | if (pgmap->type == MEMORY_DEVICE_PRIVATE) { |
7cc7867f | 119 | pfn = PHYS_PFN(res->start); |
69324b8f | 120 | __remove_pages(page_zone(pfn_to_page(pfn)), pfn, |
7cc7867f | 121 | PHYS_PFN(resource_size(res)), NULL); |
69324b8f | 122 | } else { |
7cc7867f | 123 | arch_remove_memory(nid, res->start, resource_size(res), |
514caf23 | 124 | pgmap_altmap(pgmap)); |
7cc7867f | 125 | kasan_remove_zero_shadow(__va(res->start), resource_size(res)); |
69324b8f | 126 | } |
f931ab47 | 127 | mem_hotplug_done(); |
b5d24fda | 128 | |
7cc7867f | 129 | untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); |
bcfa4b72 | 130 | pgmap_array_delete(res); |
fdc029b1 | 131 | WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); |
6f42193f | 132 | devmap_managed_enable_put(); |
9476df7d | 133 | } |
6869b7b2 CH |
134 | EXPORT_SYMBOL_GPL(memunmap_pages); |
135 | ||
136 | static void devm_memremap_pages_release(void *data) | |
137 | { | |
138 | memunmap_pages(data); | |
139 | } | |
9476df7d | 140 | |
24917f6b CH |
141 | static void dev_pagemap_percpu_release(struct percpu_ref *ref) |
142 | { | |
143 | struct dev_pagemap *pgmap = | |
144 | container_of(ref, struct dev_pagemap, internal_ref); | |
145 | ||
146 | complete(&pgmap->done); | |
147 | } | |
148 | ||
6869b7b2 CH |
149 | /* |
150 | * Not device managed version of dev_memremap_pages, undone by | |
151 | * memunmap_pages(). Please use dev_memremap_pages if you have a struct | |
152 | * device available. | |
4b94ffdc | 153 | */ |
6869b7b2 | 154 | void *memremap_pages(struct dev_pagemap *pgmap, int nid) |
41e94a85 | 155 | { |
949b9325 | 156 | struct resource *res = &pgmap->res; |
966cf44f | 157 | struct dev_pagemap *conflict_pgmap; |
940519f0 MH |
158 | struct mhp_restrictions restrictions = { |
159 | /* | |
160 | * We do not want any optional features only our own memmap | |
7cc7867f | 161 | */ |
514caf23 | 162 | .altmap = pgmap_altmap(pgmap), |
940519f0 | 163 | }; |
9049771f | 164 | pgprot_t pgprot = PAGE_KERNEL; |
6869b7b2 | 165 | int error, is_ram; |
f6a55e1a | 166 | bool need_devmap_managed = true; |
5f29a77c | 167 | |
3ed2dcdf CH |
168 | switch (pgmap->type) { |
169 | case MEMORY_DEVICE_PRIVATE: | |
170 | if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) { | |
171 | WARN(1, "Device private memory not supported\n"); | |
172 | return ERR_PTR(-EINVAL); | |
173 | } | |
897e6365 CH |
174 | if (!pgmap->ops || !pgmap->ops->migrate_to_ram) { |
175 | WARN(1, "Missing migrate_to_ram method\n"); | |
176 | return ERR_PTR(-EINVAL); | |
177 | } | |
3ed2dcdf CH |
178 | break; |
179 | case MEMORY_DEVICE_FS_DAX: | |
180 | if (!IS_ENABLED(CONFIG_ZONE_DEVICE) || | |
181 | IS_ENABLED(CONFIG_FS_DAX_LIMITED)) { | |
182 | WARN(1, "File system DAX not supported\n"); | |
183 | return ERR_PTR(-EINVAL); | |
184 | } | |
185 | break; | |
186 | case MEMORY_DEVICE_DEVDAX: | |
187 | case MEMORY_DEVICE_PCI_P2PDMA: | |
f6a55e1a | 188 | need_devmap_managed = false; |
3ed2dcdf CH |
189 | break; |
190 | default: | |
191 | WARN(1, "Invalid pgmap type %d\n", pgmap->type); | |
192 | break; | |
193 | } | |
194 | ||
24917f6b CH |
195 | if (!pgmap->ref) { |
196 | if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup)) | |
197 | return ERR_PTR(-EINVAL); | |
198 | ||
199 | init_completion(&pgmap->done); | |
200 | error = percpu_ref_init(&pgmap->internal_ref, | |
201 | dev_pagemap_percpu_release, 0, GFP_KERNEL); | |
202 | if (error) | |
203 | return ERR_PTR(error); | |
204 | pgmap->ref = &pgmap->internal_ref; | |
205 | } else { | |
206 | if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) { | |
207 | WARN(1, "Missing reference count teardown definition\n"); | |
208 | return ERR_PTR(-EINVAL); | |
209 | } | |
50f44ee7 | 210 | } |
a95c90f1 | 211 | |
f6a55e1a | 212 | if (need_devmap_managed) { |
6f42193f | 213 | error = devmap_managed_enable_get(pgmap); |
f6a55e1a CH |
214 | if (error) |
215 | return ERR_PTR(error); | |
216 | } | |
217 | ||
7cc7867f | 218 | conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL); |
15d36fec | 219 | if (conflict_pgmap) { |
6869b7b2 | 220 | WARN(1, "Conflicting mapping in same section\n"); |
15d36fec | 221 | put_dev_pagemap(conflict_pgmap); |
50f44ee7 DW |
222 | error = -ENOMEM; |
223 | goto err_array; | |
15d36fec DJ |
224 | } |
225 | ||
7cc7867f | 226 | conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL); |
15d36fec | 227 | if (conflict_pgmap) { |
6869b7b2 | 228 | WARN(1, "Conflicting mapping in same section\n"); |
15d36fec | 229 | put_dev_pagemap(conflict_pgmap); |
50f44ee7 DW |
230 | error = -ENOMEM; |
231 | goto err_array; | |
15d36fec DJ |
232 | } |
233 | ||
7cc7867f | 234 | is_ram = region_intersects(res->start, resource_size(res), |
d37a14bb | 235 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); |
41e94a85 | 236 | |
06489cfb DW |
237 | if (is_ram != REGION_DISJOINT) { |
238 | WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__, | |
239 | is_ram == REGION_MIXED ? "mixed" : "ram", res); | |
a95c90f1 DW |
240 | error = -ENXIO; |
241 | goto err_array; | |
41e94a85 CH |
242 | } |
243 | ||
bcfa4b72 MW |
244 | error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start), |
245 | PHYS_PFN(res->end), pgmap, GFP_KERNEL)); | |
9476df7d | 246 | if (error) |
bcfa4b72 | 247 | goto err_array; |
9476df7d | 248 | |
41e94a85 | 249 | if (nid < 0) |
7eff93b7 | 250 | nid = numa_mem_id(); |
41e94a85 | 251 | |
7cc7867f DW |
252 | error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0, |
253 | resource_size(res)); | |
9049771f DW |
254 | if (error) |
255 | goto err_pfn_remap; | |
256 | ||
f931ab47 | 257 | mem_hotplug_begin(); |
69324b8f DW |
258 | |
259 | /* | |
260 | * For device private memory we call add_pages() as we only need to | |
261 | * allocate and initialize struct page for the device memory. More- | |
262 | * over the device memory is un-accessible thus we do not want to | |
263 | * create a linear mapping for the memory like arch_add_memory() | |
264 | * would do. | |
265 | * | |
266 | * For all other device memory types, which are accessible by | |
267 | * the CPU, we do want the linear mapping and thus use | |
268 | * arch_add_memory(). | |
269 | */ | |
270 | if (pgmap->type == MEMORY_DEVICE_PRIVATE) { | |
7cc7867f DW |
271 | error = add_pages(nid, PHYS_PFN(res->start), |
272 | PHYS_PFN(resource_size(res)), &restrictions); | |
69324b8f | 273 | } else { |
7cc7867f | 274 | error = kasan_add_zero_shadow(__va(res->start), resource_size(res)); |
69324b8f DW |
275 | if (error) { |
276 | mem_hotplug_done(); | |
277 | goto err_kasan; | |
278 | } | |
279 | ||
7cc7867f | 280 | error = arch_add_memory(nid, res->start, resource_size(res), |
940519f0 | 281 | &restrictions); |
69324b8f DW |
282 | } |
283 | ||
284 | if (!error) { | |
285 | struct zone *zone; | |
286 | ||
287 | zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; | |
7cc7867f DW |
288 | move_pfn_range_to_zone(zone, PHYS_PFN(res->start), |
289 | PHYS_PFN(resource_size(res)), restrictions.altmap); | |
0207df4f AR |
290 | } |
291 | ||
f931ab47 | 292 | mem_hotplug_done(); |
9476df7d DW |
293 | if (error) |
294 | goto err_add_memory; | |
41e94a85 | 295 | |
966cf44f AD |
296 | /* |
297 | * Initialization of the pages has been deferred until now in order | |
298 | * to allow us to do the work while not holding the hotplug lock. | |
299 | */ | |
300 | memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | |
7cc7867f DW |
301 | PHYS_PFN(res->start), |
302 | PHYS_PFN(resource_size(res)), pgmap); | |
966cf44f | 303 | percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap)); |
41e94a85 | 304 | return __va(res->start); |
9476df7d DW |
305 | |
306 | err_add_memory: | |
7cc7867f | 307 | kasan_remove_zero_shadow(__va(res->start), resource_size(res)); |
0207df4f | 308 | err_kasan: |
7cc7867f | 309 | untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); |
9049771f | 310 | err_pfn_remap: |
bcfa4b72 MW |
311 | pgmap_array_delete(res); |
312 | err_array: | |
24917f6b CH |
313 | dev_pagemap_kill(pgmap); |
314 | dev_pagemap_cleanup(pgmap); | |
6f42193f | 315 | devmap_managed_enable_put(); |
9476df7d | 316 | return ERR_PTR(error); |
41e94a85 | 317 | } |
6869b7b2 CH |
318 | EXPORT_SYMBOL_GPL(memremap_pages); |
319 | ||
320 | /** | |
321 | * devm_memremap_pages - remap and provide memmap backing for the given resource | |
322 | * @dev: hosting device for @res | |
323 | * @pgmap: pointer to a struct dev_pagemap | |
324 | * | |
325 | * Notes: | |
326 | * 1/ At a minimum the res and type members of @pgmap must be initialized | |
327 | * by the caller before passing it to this function | |
328 | * | |
329 | * 2/ The altmap field may optionally be initialized, in which case | |
330 | * PGMAP_ALTMAP_VALID must be set in pgmap->flags. | |
331 | * | |
332 | * 3/ The ref field may optionally be provided, in which pgmap->ref must be | |
333 | * 'live' on entry and will be killed and reaped at | |
334 | * devm_memremap_pages_release() time, or if this routine fails. | |
335 | * | |
336 | * 4/ res is expected to be a host memory range that could feasibly be | |
337 | * treated as a "System RAM" range, i.e. not a device mmio range, but | |
338 | * this is not enforced. | |
339 | */ | |
340 | void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) | |
341 | { | |
342 | int error; | |
343 | void *ret; | |
344 | ||
345 | ret = memremap_pages(pgmap, dev_to_node(dev)); | |
346 | if (IS_ERR(ret)) | |
347 | return ret; | |
348 | ||
349 | error = devm_add_action_or_reset(dev, devm_memremap_pages_release, | |
350 | pgmap); | |
351 | if (error) | |
352 | return ERR_PTR(error); | |
353 | return ret; | |
354 | } | |
808153e1 | 355 | EXPORT_SYMBOL_GPL(devm_memremap_pages); |
4b94ffdc | 356 | |
2e3f139e DW |
357 | void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap) |
358 | { | |
359 | devm_release_action(dev, devm_memremap_pages_release, pgmap); | |
360 | } | |
361 | EXPORT_SYMBOL_GPL(devm_memunmap_pages); | |
362 | ||
4b94ffdc DW |
363 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) |
364 | { | |
365 | /* number of pfns from base where pfn_to_page() is valid */ | |
514caf23 CH |
366 | if (altmap) |
367 | return altmap->reserve + altmap->free; | |
368 | return 0; | |
4b94ffdc DW |
369 | } |
370 | ||
371 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) | |
372 | { | |
373 | altmap->alloc -= nr_pfns; | |
374 | } | |
375 | ||
0822acb8 CH |
376 | /** |
377 | * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn | |
378 | * @pfn: page frame number to lookup page_map | |
379 | * @pgmap: optional known pgmap that already has a reference | |
380 | * | |
832d7aa0 CH |
381 | * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap |
382 | * is non-NULL but does not cover @pfn the reference to it will be released. | |
0822acb8 CH |
383 | */ |
384 | struct dev_pagemap *get_dev_pagemap(unsigned long pfn, | |
385 | struct dev_pagemap *pgmap) | |
386 | { | |
0822acb8 CH |
387 | resource_size_t phys = PFN_PHYS(pfn); |
388 | ||
389 | /* | |
832d7aa0 | 390 | * In the cached case we're already holding a live reference. |
0822acb8 | 391 | */ |
832d7aa0 | 392 | if (pgmap) { |
e7744aa2 | 393 | if (phys >= pgmap->res.start && phys <= pgmap->res.end) |
832d7aa0 CH |
394 | return pgmap; |
395 | put_dev_pagemap(pgmap); | |
0822acb8 CH |
396 | } |
397 | ||
398 | /* fall back to slow path lookup */ | |
399 | rcu_read_lock(); | |
bcfa4b72 | 400 | pgmap = xa_load(&pgmap_array, PHYS_PFN(phys)); |
0822acb8 CH |
401 | if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) |
402 | pgmap = NULL; | |
403 | rcu_read_unlock(); | |
404 | ||
405 | return pgmap; | |
406 | } | |
e7638488 | 407 | EXPORT_SYMBOL_GPL(get_dev_pagemap); |
7b2d55d2 | 408 | |
e7638488 | 409 | #ifdef CONFIG_DEV_PAGEMAP_OPS |
e7638488 | 410 | void __put_devmap_managed_page(struct page *page) |
7b2d55d2 JG |
411 | { |
412 | int count = page_ref_dec_return(page); | |
413 | ||
414 | /* | |
415 | * If refcount is 1 then page is freed and refcount is stable as nobody | |
416 | * holds a reference on the page. | |
417 | */ | |
418 | if (count == 1) { | |
419 | /* Clear Active bit in case of parallel mark_page_accessed */ | |
420 | __ClearPageActive(page); | |
421 | __ClearPageWaiters(page); | |
422 | ||
c733a828 | 423 | mem_cgroup_uncharge(page); |
7b2d55d2 | 424 | |
7ab0ad0e RC |
425 | /* |
426 | * When a device_private page is freed, the page->mapping field | |
427 | * may still contain a (stale) mapping value. For example, the | |
428 | * lower bits of page->mapping may still identify the page as | |
429 | * an anonymous page. Ultimately, this entire field is just | |
430 | * stale and wrong, and it will cause errors if not cleared. | |
431 | * One example is: | |
432 | * | |
433 | * migrate_vma_pages() | |
434 | * migrate_vma_insert_page() | |
435 | * page_add_new_anon_rmap() | |
436 | * __page_set_anon_rmap() | |
437 | * ...checks page->mapping, via PageAnon(page) call, | |
438 | * and incorrectly concludes that the page is an | |
439 | * anonymous page. Therefore, it incorrectly, | |
440 | * silently fails to set up the new anon rmap. | |
441 | * | |
442 | * For other types of ZONE_DEVICE pages, migration is either | |
443 | * handled differently or not done at all, so there is no need | |
444 | * to clear page->mapping. | |
445 | */ | |
446 | if (is_device_private_page(page)) | |
447 | page->mapping = NULL; | |
448 | ||
80a72d0a | 449 | page->pgmap->ops->page_free(page); |
7b2d55d2 JG |
450 | } else if (!count) |
451 | __put_page(page); | |
452 | } | |
31c5bda3 | 453 | EXPORT_SYMBOL(__put_devmap_managed_page); |
e7638488 | 454 | #endif /* CONFIG_DEV_PAGEMAP_OPS */ |