]>
Commit | Line | Data |
---|---|---|
5981690d DW |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Copyright(c) 2015 Intel Corporation. All rights reserved. */ | |
7d3dcf26 | 3 | #include <linux/device.h> |
92281dee | 4 | #include <linux/io.h> |
0207df4f | 5 | #include <linux/kasan.h> |
41e94a85 | 6 | #include <linux/memory_hotplug.h> |
bcfa4b72 MW |
7 | #include <linux/mm.h> |
8 | #include <linux/pfn_t.h> | |
5042db43 JG |
9 | #include <linux/swap.h> |
10 | #include <linux/swapops.h> | |
bcfa4b72 | 11 | #include <linux/types.h> |
e7638488 | 12 | #include <linux/wait_bit.h> |
bcfa4b72 | 13 | #include <linux/xarray.h> |
92281dee | 14 | |
bcfa4b72 | 15 | static DEFINE_XARRAY(pgmap_array); |
9476df7d DW |
16 | #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1) |
17 | #define SECTION_SIZE (1UL << PA_SECTION_SHIFT) | |
18 | ||
f6a55e1a CH |
19 | #ifdef CONFIG_DEV_PAGEMAP_OPS |
20 | DEFINE_STATIC_KEY_FALSE(devmap_managed_key); | |
21 | EXPORT_SYMBOL(devmap_managed_key); | |
22 | static atomic_t devmap_managed_enable; | |
23 | ||
24 | static void devmap_managed_enable_put(void *data) | |
25 | { | |
26 | if (atomic_dec_and_test(&devmap_managed_enable)) | |
27 | static_branch_disable(&devmap_managed_key); | |
28 | } | |
29 | ||
30 | static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap) | |
31 | { | |
24917f6b | 32 | if (!pgmap->ops || !pgmap->ops->page_free) { |
f6a55e1a CH |
33 | WARN(1, "Missing page_free method\n"); |
34 | return -EINVAL; | |
35 | } | |
36 | ||
37 | if (atomic_inc_return(&devmap_managed_enable) == 1) | |
38 | static_branch_enable(&devmap_managed_key); | |
39 | return devm_add_action_or_reset(dev, devmap_managed_enable_put, NULL); | |
40 | } | |
41 | #else | |
42 | static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap) | |
43 | { | |
44 | return -EINVAL; | |
45 | } | |
46 | #endif /* CONFIG_DEV_PAGEMAP_OPS */ | |
47 | ||
bcfa4b72 | 48 | static void pgmap_array_delete(struct resource *res) |
ab1b597e | 49 | { |
bcfa4b72 MW |
50 | xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end), |
51 | NULL, GFP_KERNEL); | |
ab1b597e | 52 | synchronize_rcu(); |
9476df7d DW |
53 | } |
54 | ||
e7744aa2 | 55 | static unsigned long pfn_first(struct dev_pagemap *pgmap) |
5c2c2587 | 56 | { |
7cc7867f | 57 | return PHYS_PFN(pgmap->res.start) + |
514caf23 | 58 | vmem_altmap_offset(pgmap_altmap(pgmap)); |
5c2c2587 DW |
59 | } |
60 | ||
e7744aa2 | 61 | static unsigned long pfn_end(struct dev_pagemap *pgmap) |
5c2c2587 | 62 | { |
e7744aa2 | 63 | const struct resource *res = &pgmap->res; |
5c2c2587 DW |
64 | |
65 | return (res->start + resource_size(res)) >> PAGE_SHIFT; | |
66 | } | |
67 | ||
949b9325 DW |
68 | static unsigned long pfn_next(unsigned long pfn) |
69 | { | |
70 | if (pfn % 1024 == 0) | |
71 | cond_resched(); | |
72 | return pfn + 1; | |
73 | } | |
74 | ||
5c2c2587 | 75 | #define for_each_device_pfn(pfn, map) \ |
949b9325 | 76 | for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn)) |
5c2c2587 | 77 | |
24917f6b CH |
78 | static void dev_pagemap_kill(struct dev_pagemap *pgmap) |
79 | { | |
80 | if (pgmap->ops && pgmap->ops->kill) | |
81 | pgmap->ops->kill(pgmap); | |
82 | else | |
83 | percpu_ref_kill(pgmap->ref); | |
84 | } | |
85 | ||
86 | static void dev_pagemap_cleanup(struct dev_pagemap *pgmap) | |
87 | { | |
88 | if (pgmap->ops && pgmap->ops->cleanup) { | |
89 | pgmap->ops->cleanup(pgmap); | |
90 | } else { | |
91 | wait_for_completion(&pgmap->done); | |
92 | percpu_ref_exit(pgmap->ref); | |
93 | } | |
06282373 DW |
94 | /* |
95 | * Undo the pgmap ref assignment for the internal case as the | |
96 | * caller may re-enable the same pgmap. | |
97 | */ | |
98 | if (pgmap->ref == &pgmap->internal_ref) | |
99 | pgmap->ref = NULL; | |
24917f6b CH |
100 | } |
101 | ||
e8d51348 | 102 | static void devm_memremap_pages_release(void *data) |
41e94a85 | 103 | { |
e7744aa2 | 104 | struct dev_pagemap *pgmap = data; |
e8d51348 | 105 | struct device *dev = pgmap->dev; |
e7744aa2 | 106 | struct resource *res = &pgmap->res; |
71389703 | 107 | unsigned long pfn; |
2c2a5af6 | 108 | int nid; |
71389703 | 109 | |
24917f6b | 110 | dev_pagemap_kill(pgmap); |
e7744aa2 | 111 | for_each_device_pfn(pfn, pgmap) |
71389703 | 112 | put_page(pfn_to_page(pfn)); |
24917f6b | 113 | dev_pagemap_cleanup(pgmap); |
9476df7d | 114 | |
41e94a85 | 115 | /* pages are dead and unused, undo the arch mapping */ |
7cc7867f | 116 | nid = page_to_nid(pfn_to_page(PHYS_PFN(res->start))); |
2c2a5af6 | 117 | |
f931ab47 | 118 | mem_hotplug_begin(); |
69324b8f | 119 | if (pgmap->type == MEMORY_DEVICE_PRIVATE) { |
7cc7867f | 120 | pfn = PHYS_PFN(res->start); |
69324b8f | 121 | __remove_pages(page_zone(pfn_to_page(pfn)), pfn, |
7cc7867f | 122 | PHYS_PFN(resource_size(res)), NULL); |
69324b8f | 123 | } else { |
7cc7867f | 124 | arch_remove_memory(nid, res->start, resource_size(res), |
514caf23 | 125 | pgmap_altmap(pgmap)); |
7cc7867f | 126 | kasan_remove_zero_shadow(__va(res->start), resource_size(res)); |
69324b8f | 127 | } |
f931ab47 | 128 | mem_hotplug_done(); |
b5d24fda | 129 | |
7cc7867f | 130 | untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); |
bcfa4b72 | 131 | pgmap_array_delete(res); |
e7744aa2 LG |
132 | dev_WARN_ONCE(dev, pgmap->altmap.alloc, |
133 | "%s: failed to free all reserved pages\n", __func__); | |
9476df7d DW |
134 | } |
135 | ||
24917f6b CH |
136 | static void dev_pagemap_percpu_release(struct percpu_ref *ref) |
137 | { | |
138 | struct dev_pagemap *pgmap = | |
139 | container_of(ref, struct dev_pagemap, internal_ref); | |
140 | ||
141 | complete(&pgmap->done); | |
142 | } | |
143 | ||
4b94ffdc DW |
144 | /** |
145 | * devm_memremap_pages - remap and provide memmap backing for the given resource | |
146 | * @dev: hosting device for @res | |
a95c90f1 | 147 | * @pgmap: pointer to a struct dev_pagemap |
4b94ffdc | 148 | * |
5c2c2587 | 149 | * Notes: |
24917f6b CH |
150 | * 1/ At a minimum the res and type members of @pgmap must be initialized |
151 | * by the caller before passing it to this function | |
e8d51348 | 152 | * |
514caf23 CH |
153 | * 2/ The altmap field may optionally be initialized, in which case |
154 | * PGMAP_ALTMAP_VALID must be set in pgmap->flags. | |
e8d51348 | 155 | * |
24917f6b CH |
156 | * 3/ The ref field may optionally be provided, in which pgmap->ref must be |
157 | * 'live' on entry and will be killed and reaped at | |
158 | * devm_memremap_pages_release() time, or if this routine fails. | |
5c2c2587 | 159 | * |
e8d51348 | 160 | * 4/ res is expected to be a host memory range that could feasibly be |
5c2c2587 DW |
161 | * treated as a "System RAM" range, i.e. not a device mmio range, but |
162 | * this is not enforced. | |
4b94ffdc | 163 | */ |
e8d51348 | 164 | void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) |
41e94a85 | 165 | { |
949b9325 | 166 | struct resource *res = &pgmap->res; |
966cf44f | 167 | struct dev_pagemap *conflict_pgmap; |
940519f0 MH |
168 | struct mhp_restrictions restrictions = { |
169 | /* | |
170 | * We do not want any optional features only our own memmap | |
7cc7867f | 171 | */ |
514caf23 | 172 | .altmap = pgmap_altmap(pgmap), |
940519f0 | 173 | }; |
9049771f | 174 | pgprot_t pgprot = PAGE_KERNEL; |
949b9325 | 175 | int error, nid, is_ram; |
f6a55e1a | 176 | bool need_devmap_managed = true; |
5f29a77c | 177 | |
3ed2dcdf CH |
178 | switch (pgmap->type) { |
179 | case MEMORY_DEVICE_PRIVATE: | |
180 | if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) { | |
181 | WARN(1, "Device private memory not supported\n"); | |
182 | return ERR_PTR(-EINVAL); | |
183 | } | |
897e6365 CH |
184 | if (!pgmap->ops || !pgmap->ops->migrate_to_ram) { |
185 | WARN(1, "Missing migrate_to_ram method\n"); | |
186 | return ERR_PTR(-EINVAL); | |
187 | } | |
3ed2dcdf CH |
188 | break; |
189 | case MEMORY_DEVICE_FS_DAX: | |
190 | if (!IS_ENABLED(CONFIG_ZONE_DEVICE) || | |
191 | IS_ENABLED(CONFIG_FS_DAX_LIMITED)) { | |
192 | WARN(1, "File system DAX not supported\n"); | |
193 | return ERR_PTR(-EINVAL); | |
194 | } | |
195 | break; | |
196 | case MEMORY_DEVICE_DEVDAX: | |
197 | case MEMORY_DEVICE_PCI_P2PDMA: | |
f6a55e1a | 198 | need_devmap_managed = false; |
3ed2dcdf CH |
199 | break; |
200 | default: | |
201 | WARN(1, "Invalid pgmap type %d\n", pgmap->type); | |
202 | break; | |
203 | } | |
204 | ||
24917f6b CH |
205 | if (!pgmap->ref) { |
206 | if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup)) | |
207 | return ERR_PTR(-EINVAL); | |
208 | ||
209 | init_completion(&pgmap->done); | |
210 | error = percpu_ref_init(&pgmap->internal_ref, | |
211 | dev_pagemap_percpu_release, 0, GFP_KERNEL); | |
212 | if (error) | |
213 | return ERR_PTR(error); | |
214 | pgmap->ref = &pgmap->internal_ref; | |
215 | } else { | |
216 | if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) { | |
217 | WARN(1, "Missing reference count teardown definition\n"); | |
218 | return ERR_PTR(-EINVAL); | |
219 | } | |
50f44ee7 | 220 | } |
a95c90f1 | 221 | |
f6a55e1a CH |
222 | if (need_devmap_managed) { |
223 | error = devmap_managed_enable_get(dev, pgmap); | |
224 | if (error) | |
225 | return ERR_PTR(error); | |
226 | } | |
227 | ||
7cc7867f | 228 | conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL); |
15d36fec DJ |
229 | if (conflict_pgmap) { |
230 | dev_WARN(dev, "Conflicting mapping in same section\n"); | |
231 | put_dev_pagemap(conflict_pgmap); | |
50f44ee7 DW |
232 | error = -ENOMEM; |
233 | goto err_array; | |
15d36fec DJ |
234 | } |
235 | ||
7cc7867f | 236 | conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL); |
15d36fec DJ |
237 | if (conflict_pgmap) { |
238 | dev_WARN(dev, "Conflicting mapping in same section\n"); | |
239 | put_dev_pagemap(conflict_pgmap); | |
50f44ee7 DW |
240 | error = -ENOMEM; |
241 | goto err_array; | |
15d36fec DJ |
242 | } |
243 | ||
7cc7867f | 244 | is_ram = region_intersects(res->start, resource_size(res), |
d37a14bb | 245 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); |
41e94a85 | 246 | |
06489cfb DW |
247 | if (is_ram != REGION_DISJOINT) { |
248 | WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__, | |
249 | is_ram == REGION_MIXED ? "mixed" : "ram", res); | |
a95c90f1 DW |
250 | error = -ENXIO; |
251 | goto err_array; | |
41e94a85 CH |
252 | } |
253 | ||
4b94ffdc | 254 | pgmap->dev = dev; |
4b94ffdc | 255 | |
bcfa4b72 MW |
256 | error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start), |
257 | PHYS_PFN(res->end), pgmap, GFP_KERNEL)); | |
9476df7d | 258 | if (error) |
bcfa4b72 | 259 | goto err_array; |
9476df7d | 260 | |
41e94a85 CH |
261 | nid = dev_to_node(dev); |
262 | if (nid < 0) | |
7eff93b7 | 263 | nid = numa_mem_id(); |
41e94a85 | 264 | |
7cc7867f DW |
265 | error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0, |
266 | resource_size(res)); | |
9049771f DW |
267 | if (error) |
268 | goto err_pfn_remap; | |
269 | ||
f931ab47 | 270 | mem_hotplug_begin(); |
69324b8f DW |
271 | |
272 | /* | |
273 | * For device private memory we call add_pages() as we only need to | |
274 | * allocate and initialize struct page for the device memory. More- | |
275 | * over the device memory is un-accessible thus we do not want to | |
276 | * create a linear mapping for the memory like arch_add_memory() | |
277 | * would do. | |
278 | * | |
279 | * For all other device memory types, which are accessible by | |
280 | * the CPU, we do want the linear mapping and thus use | |
281 | * arch_add_memory(). | |
282 | */ | |
283 | if (pgmap->type == MEMORY_DEVICE_PRIVATE) { | |
7cc7867f DW |
284 | error = add_pages(nid, PHYS_PFN(res->start), |
285 | PHYS_PFN(resource_size(res)), &restrictions); | |
69324b8f | 286 | } else { |
7cc7867f | 287 | error = kasan_add_zero_shadow(__va(res->start), resource_size(res)); |
69324b8f DW |
288 | if (error) { |
289 | mem_hotplug_done(); | |
290 | goto err_kasan; | |
291 | } | |
292 | ||
7cc7867f | 293 | error = arch_add_memory(nid, res->start, resource_size(res), |
940519f0 | 294 | &restrictions); |
69324b8f DW |
295 | } |
296 | ||
297 | if (!error) { | |
298 | struct zone *zone; | |
299 | ||
300 | zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; | |
7cc7867f DW |
301 | move_pfn_range_to_zone(zone, PHYS_PFN(res->start), |
302 | PHYS_PFN(resource_size(res)), restrictions.altmap); | |
0207df4f AR |
303 | } |
304 | ||
f931ab47 | 305 | mem_hotplug_done(); |
9476df7d DW |
306 | if (error) |
307 | goto err_add_memory; | |
41e94a85 | 308 | |
966cf44f AD |
309 | /* |
310 | * Initialization of the pages has been deferred until now in order | |
311 | * to allow us to do the work while not holding the hotplug lock. | |
312 | */ | |
313 | memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | |
7cc7867f DW |
314 | PHYS_PFN(res->start), |
315 | PHYS_PFN(resource_size(res)), pgmap); | |
966cf44f | 316 | percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap)); |
e8d51348 | 317 | |
a95c90f1 DW |
318 | error = devm_add_action_or_reset(dev, devm_memremap_pages_release, |
319 | pgmap); | |
320 | if (error) | |
321 | return ERR_PTR(error); | |
e8d51348 | 322 | |
41e94a85 | 323 | return __va(res->start); |
9476df7d DW |
324 | |
325 | err_add_memory: | |
7cc7867f | 326 | kasan_remove_zero_shadow(__va(res->start), resource_size(res)); |
0207df4f | 327 | err_kasan: |
7cc7867f | 328 | untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); |
9049771f | 329 | err_pfn_remap: |
bcfa4b72 MW |
330 | pgmap_array_delete(res); |
331 | err_array: | |
24917f6b CH |
332 | dev_pagemap_kill(pgmap); |
333 | dev_pagemap_cleanup(pgmap); | |
9476df7d | 334 | return ERR_PTR(error); |
41e94a85 | 335 | } |
808153e1 | 336 | EXPORT_SYMBOL_GPL(devm_memremap_pages); |
4b94ffdc | 337 | |
2e3f139e DW |
338 | void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap) |
339 | { | |
340 | devm_release_action(dev, devm_memremap_pages_release, pgmap); | |
341 | } | |
342 | EXPORT_SYMBOL_GPL(devm_memunmap_pages); | |
343 | ||
4b94ffdc DW |
344 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) |
345 | { | |
346 | /* number of pfns from base where pfn_to_page() is valid */ | |
514caf23 CH |
347 | if (altmap) |
348 | return altmap->reserve + altmap->free; | |
349 | return 0; | |
4b94ffdc DW |
350 | } |
351 | ||
352 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) | |
353 | { | |
354 | altmap->alloc -= nr_pfns; | |
355 | } | |
356 | ||
0822acb8 CH |
357 | /** |
358 | * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn | |
359 | * @pfn: page frame number to lookup page_map | |
360 | * @pgmap: optional known pgmap that already has a reference | |
361 | * | |
832d7aa0 CH |
362 | * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap |
363 | * is non-NULL but does not cover @pfn the reference to it will be released. | |
0822acb8 CH |
364 | */ |
365 | struct dev_pagemap *get_dev_pagemap(unsigned long pfn, | |
366 | struct dev_pagemap *pgmap) | |
367 | { | |
0822acb8 CH |
368 | resource_size_t phys = PFN_PHYS(pfn); |
369 | ||
370 | /* | |
832d7aa0 | 371 | * In the cached case we're already holding a live reference. |
0822acb8 | 372 | */ |
832d7aa0 | 373 | if (pgmap) { |
e7744aa2 | 374 | if (phys >= pgmap->res.start && phys <= pgmap->res.end) |
832d7aa0 CH |
375 | return pgmap; |
376 | put_dev_pagemap(pgmap); | |
0822acb8 CH |
377 | } |
378 | ||
379 | /* fall back to slow path lookup */ | |
380 | rcu_read_lock(); | |
bcfa4b72 | 381 | pgmap = xa_load(&pgmap_array, PHYS_PFN(phys)); |
0822acb8 CH |
382 | if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) |
383 | pgmap = NULL; | |
384 | rcu_read_unlock(); | |
385 | ||
386 | return pgmap; | |
387 | } | |
e7638488 | 388 | EXPORT_SYMBOL_GPL(get_dev_pagemap); |
7b2d55d2 | 389 | |
e7638488 | 390 | #ifdef CONFIG_DEV_PAGEMAP_OPS |
e7638488 | 391 | void __put_devmap_managed_page(struct page *page) |
7b2d55d2 JG |
392 | { |
393 | int count = page_ref_dec_return(page); | |
394 | ||
395 | /* | |
396 | * If refcount is 1 then page is freed and refcount is stable as nobody | |
397 | * holds a reference on the page. | |
398 | */ | |
399 | if (count == 1) { | |
400 | /* Clear Active bit in case of parallel mark_page_accessed */ | |
401 | __ClearPageActive(page); | |
402 | __ClearPageWaiters(page); | |
403 | ||
c733a828 | 404 | mem_cgroup_uncharge(page); |
7b2d55d2 | 405 | |
7ab0ad0e RC |
406 | /* |
407 | * When a device_private page is freed, the page->mapping field | |
408 | * may still contain a (stale) mapping value. For example, the | |
409 | * lower bits of page->mapping may still identify the page as | |
410 | * an anonymous page. Ultimately, this entire field is just | |
411 | * stale and wrong, and it will cause errors if not cleared. | |
412 | * One example is: | |
413 | * | |
414 | * migrate_vma_pages() | |
415 | * migrate_vma_insert_page() | |
416 | * page_add_new_anon_rmap() | |
417 | * __page_set_anon_rmap() | |
418 | * ...checks page->mapping, via PageAnon(page) call, | |
419 | * and incorrectly concludes that the page is an | |
420 | * anonymous page. Therefore, it incorrectly, | |
421 | * silently fails to set up the new anon rmap. | |
422 | * | |
423 | * For other types of ZONE_DEVICE pages, migration is either | |
424 | * handled differently or not done at all, so there is no need | |
425 | * to clear page->mapping. | |
426 | */ | |
427 | if (is_device_private_page(page)) | |
428 | page->mapping = NULL; | |
429 | ||
80a72d0a | 430 | page->pgmap->ops->page_free(page); |
7b2d55d2 JG |
431 | } else if (!count) |
432 | __put_page(page); | |
433 | } | |
31c5bda3 | 434 | EXPORT_SYMBOL(__put_devmap_managed_page); |
e7638488 | 435 | #endif /* CONFIG_DEV_PAGEMAP_OPS */ |