1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
3 #include <linux/device.h>
5 #include <linux/kasan.h>
6 #include <linux/memory_hotplug.h>
7 #include <linux/memremap.h>
8 #include <linux/pfn_t.h>
9 #include <linux/swap.h>
10 #include <linux/mmzone.h>
11 #include <linux/swapops.h>
12 #include <linux/types.h>
13 #include <linux/wait_bit.h>
14 #include <linux/xarray.h>
17 static DEFINE_XARRAY(pgmap_array
);
20 * The memremap() and memremap_pages() interfaces are alternately used
21 * to map persistent memory namespaces. These interfaces place different
22 * constraints on the alignment and size of the mapping (namespace).
23 * memremap() can map individual PAGE_SIZE pages. memremap_pages() can
24 * only map subsections (2MB), and at least one architecture (PowerPC)
25 * the minimum mapping granularity of memremap_pages() is 16MB.
27 * The role of memremap_compat_align() is to communicate the minimum
28 * arch supported alignment of a namespace such that it can freely
29 * switch modes without violating the arch constraint. Namely, do not
30 * allow a namespace to be PAGE_SIZE aligned since that namespace may be
31 * reconfigured into a mode that requires SUBSECTION_SIZE alignment.
33 #ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN
34 unsigned long memremap_compat_align(void)
36 return SUBSECTION_SIZE
;
38 EXPORT_SYMBOL_GPL(memremap_compat_align
);
42 DEFINE_STATIC_KEY_FALSE(devmap_managed_key
);
43 EXPORT_SYMBOL(devmap_managed_key
);
45 static void devmap_managed_enable_put(struct dev_pagemap
*pgmap
)
47 if (pgmap
->type
== MEMORY_DEVICE_FS_DAX
)
48 static_branch_dec(&devmap_managed_key
);
51 static void devmap_managed_enable_get(struct dev_pagemap
*pgmap
)
53 if (pgmap
->type
== MEMORY_DEVICE_FS_DAX
)
54 static_branch_inc(&devmap_managed_key
);
57 static void devmap_managed_enable_get(struct dev_pagemap
*pgmap
)
60 static void devmap_managed_enable_put(struct dev_pagemap
*pgmap
)
63 #endif /* CONFIG_FS_DAX */
65 static void pgmap_array_delete(struct range
*range
)
67 xa_store_range(&pgmap_array
, PHYS_PFN(range
->start
), PHYS_PFN(range
->end
),
72 static unsigned long pfn_first(struct dev_pagemap
*pgmap
, int range_id
)
74 struct range
*range
= &pgmap
->ranges
[range_id
];
75 unsigned long pfn
= PHYS_PFN(range
->start
);
79 return pfn
+ vmem_altmap_offset(pgmap_altmap(pgmap
));
82 bool pgmap_pfn_valid(struct dev_pagemap
*pgmap
, unsigned long pfn
)
86 for (i
= 0; i
< pgmap
->nr_range
; i
++) {
87 struct range
*range
= &pgmap
->ranges
[i
];
89 if (pfn
>= PHYS_PFN(range
->start
) &&
90 pfn
<= PHYS_PFN(range
->end
))
91 return pfn
>= pfn_first(pgmap
, i
);
97 static unsigned long pfn_end(struct dev_pagemap
*pgmap
, int range_id
)
99 const struct range
*range
= &pgmap
->ranges
[range_id
];
101 return (range
->start
+ range_len(range
)) >> PAGE_SHIFT
;
104 static unsigned long pfn_len(struct dev_pagemap
*pgmap
, unsigned long range_id
)
106 return (pfn_end(pgmap
, range_id
) -
107 pfn_first(pgmap
, range_id
)) >> pgmap
->vmemmap_shift
;
110 static void pageunmap_range(struct dev_pagemap
*pgmap
, int range_id
)
112 struct range
*range
= &pgmap
->ranges
[range_id
];
113 struct page
*first_page
;
115 /* make sure to access a memmap that was actually initialized */
116 first_page
= pfn_to_page(pfn_first(pgmap
, range_id
));
118 /* pages are dead and unused, undo the arch mapping */
120 remove_pfn_range_from_zone(page_zone(first_page
), PHYS_PFN(range
->start
),
121 PHYS_PFN(range_len(range
)));
122 if (pgmap
->type
== MEMORY_DEVICE_PRIVATE
) {
123 __remove_pages(PHYS_PFN(range
->start
),
124 PHYS_PFN(range_len(range
)), NULL
);
126 arch_remove_memory(range
->start
, range_len(range
),
127 pgmap_altmap(pgmap
));
128 kasan_remove_zero_shadow(__va(range
->start
), range_len(range
));
132 untrack_pfn(NULL
, PHYS_PFN(range
->start
), range_len(range
));
133 pgmap_array_delete(range
);
136 void memunmap_pages(struct dev_pagemap
*pgmap
)
140 percpu_ref_kill(&pgmap
->ref
);
141 for (i
= 0; i
< pgmap
->nr_range
; i
++)
142 percpu_ref_put_many(&pgmap
->ref
, pfn_len(pgmap
, i
));
143 wait_for_completion(&pgmap
->done
);
145 for (i
= 0; i
< pgmap
->nr_range
; i
++)
146 pageunmap_range(pgmap
, i
);
147 percpu_ref_exit(&pgmap
->ref
);
149 WARN_ONCE(pgmap
->altmap
.alloc
, "failed to free all reserved pages\n");
150 devmap_managed_enable_put(pgmap
);
152 EXPORT_SYMBOL_GPL(memunmap_pages
);
154 static void devm_memremap_pages_release(void *data
)
156 memunmap_pages(data
);
159 static void dev_pagemap_percpu_release(struct percpu_ref
*ref
)
161 struct dev_pagemap
*pgmap
= container_of(ref
, struct dev_pagemap
, ref
);
163 complete(&pgmap
->done
);
166 static int pagemap_range(struct dev_pagemap
*pgmap
, struct mhp_params
*params
,
167 int range_id
, int nid
)
169 const bool is_private
= pgmap
->type
== MEMORY_DEVICE_PRIVATE
;
170 struct range
*range
= &pgmap
->ranges
[range_id
];
171 struct dev_pagemap
*conflict_pgmap
;
174 if (WARN_ONCE(pgmap_altmap(pgmap
) && range_id
> 0,
175 "altmap not supported for multiple ranges\n"))
178 conflict_pgmap
= get_dev_pagemap(PHYS_PFN(range
->start
), NULL
);
179 if (conflict_pgmap
) {
180 WARN(1, "Conflicting mapping in same section\n");
181 put_dev_pagemap(conflict_pgmap
);
185 conflict_pgmap
= get_dev_pagemap(PHYS_PFN(range
->end
), NULL
);
186 if (conflict_pgmap
) {
187 WARN(1, "Conflicting mapping in same section\n");
188 put_dev_pagemap(conflict_pgmap
);
192 is_ram
= region_intersects(range
->start
, range_len(range
),
193 IORESOURCE_SYSTEM_RAM
, IORES_DESC_NONE
);
195 if (is_ram
!= REGION_DISJOINT
) {
196 WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n",
197 is_ram
== REGION_MIXED
? "mixed" : "ram",
198 range
->start
, range
->end
);
202 error
= xa_err(xa_store_range(&pgmap_array
, PHYS_PFN(range
->start
),
203 PHYS_PFN(range
->end
), pgmap
, GFP_KERNEL
));
210 error
= track_pfn_remap(NULL
, ¶ms
->pgprot
, PHYS_PFN(range
->start
), 0,
215 if (!mhp_range_allowed(range
->start
, range_len(range
), !is_private
)) {
223 * For device private memory we call add_pages() as we only need to
224 * allocate and initialize struct page for the device memory. More-
225 * over the device memory is un-accessible thus we do not want to
226 * create a linear mapping for the memory like arch_add_memory()
229 * For all other device memory types, which are accessible by
230 * the CPU, we do want the linear mapping and thus use
234 error
= add_pages(nid
, PHYS_PFN(range
->start
),
235 PHYS_PFN(range_len(range
)), params
);
237 error
= kasan_add_zero_shadow(__va(range
->start
), range_len(range
));
243 error
= arch_add_memory(nid
, range
->start
, range_len(range
),
250 zone
= &NODE_DATA(nid
)->node_zones
[ZONE_DEVICE
];
251 move_pfn_range_to_zone(zone
, PHYS_PFN(range
->start
),
252 PHYS_PFN(range_len(range
)), params
->altmap
,
261 * Initialization of the pages has been deferred until now in order
262 * to allow us to do the work while not holding the hotplug lock.
264 memmap_init_zone_device(&NODE_DATA(nid
)->node_zones
[ZONE_DEVICE
],
265 PHYS_PFN(range
->start
),
266 PHYS_PFN(range_len(range
)), pgmap
);
267 percpu_ref_get_many(&pgmap
->ref
, pfn_len(pgmap
, range_id
));
272 kasan_remove_zero_shadow(__va(range
->start
), range_len(range
));
274 untrack_pfn(NULL
, PHYS_PFN(range
->start
), range_len(range
));
276 pgmap_array_delete(range
);
282 * Not device managed version of devm_memremap_pages, undone by
283 * memunmap_pages(). Please use devm_memremap_pages if you have a struct
286 void *memremap_pages(struct dev_pagemap
*pgmap
, int nid
)
288 struct mhp_params params
= {
289 .altmap
= pgmap_altmap(pgmap
),
291 .pgprot
= PAGE_KERNEL
,
293 const int nr_range
= pgmap
->nr_range
;
296 if (WARN_ONCE(!nr_range
, "nr_range must be specified\n"))
297 return ERR_PTR(-EINVAL
);
299 switch (pgmap
->type
) {
300 case MEMORY_DEVICE_PRIVATE
:
301 if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE
)) {
302 WARN(1, "Device private memory not supported\n");
303 return ERR_PTR(-EINVAL
);
305 if (!pgmap
->ops
|| !pgmap
->ops
->migrate_to_ram
) {
306 WARN(1, "Missing migrate_to_ram method\n");
307 return ERR_PTR(-EINVAL
);
309 if (!pgmap
->ops
->page_free
) {
310 WARN(1, "Missing page_free method\n");
311 return ERR_PTR(-EINVAL
);
314 WARN(1, "Missing owner\n");
315 return ERR_PTR(-EINVAL
);
318 case MEMORY_DEVICE_COHERENT
:
319 if (!pgmap
->ops
->page_free
) {
320 WARN(1, "Missing page_free method\n");
321 return ERR_PTR(-EINVAL
);
324 WARN(1, "Missing owner\n");
325 return ERR_PTR(-EINVAL
);
328 case MEMORY_DEVICE_FS_DAX
:
329 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED
)) {
330 WARN(1, "File system DAX not supported\n");
331 return ERR_PTR(-EINVAL
);
334 case MEMORY_DEVICE_GENERIC
:
336 case MEMORY_DEVICE_PCI_P2PDMA
:
337 params
.pgprot
= pgprot_noncached(params
.pgprot
);
340 WARN(1, "Invalid pgmap type %d\n", pgmap
->type
);
344 init_completion(&pgmap
->done
);
345 error
= percpu_ref_init(&pgmap
->ref
, dev_pagemap_percpu_release
, 0,
348 return ERR_PTR(error
);
350 devmap_managed_enable_get(pgmap
);
353 * Clear the pgmap nr_range as it will be incremented for each
354 * successfully processed range. This communicates how many
355 * regions to unwind in the abort case.
359 for (i
= 0; i
< nr_range
; i
++) {
360 error
= pagemap_range(pgmap
, ¶ms
, i
, nid
);
367 memunmap_pages(pgmap
);
368 pgmap
->nr_range
= nr_range
;
369 return ERR_PTR(error
);
372 return __va(pgmap
->ranges
[0].start
);
374 EXPORT_SYMBOL_GPL(memremap_pages
);
377 * devm_memremap_pages - remap and provide memmap backing for the given resource
378 * @dev: hosting device for @res
379 * @pgmap: pointer to a struct dev_pagemap
382 * 1/ At a minimum the res and type members of @pgmap must be initialized
383 * by the caller before passing it to this function
385 * 2/ The altmap field may optionally be initialized, in which case
386 * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
388 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
389 * 'live' on entry and will be killed and reaped at
390 * devm_memremap_pages_release() time, or if this routine fails.
392 * 4/ range is expected to be a host memory range that could feasibly be
393 * treated as a "System RAM" range, i.e. not a device mmio range, but
394 * this is not enforced.
396 void *devm_memremap_pages(struct device
*dev
, struct dev_pagemap
*pgmap
)
401 ret
= memremap_pages(pgmap
, dev_to_node(dev
));
405 error
= devm_add_action_or_reset(dev
, devm_memremap_pages_release
,
408 return ERR_PTR(error
);
411 EXPORT_SYMBOL_GPL(devm_memremap_pages
);
413 void devm_memunmap_pages(struct device
*dev
, struct dev_pagemap
*pgmap
)
415 devm_release_action(dev
, devm_memremap_pages_release
, pgmap
);
417 EXPORT_SYMBOL_GPL(devm_memunmap_pages
);
419 unsigned long vmem_altmap_offset(struct vmem_altmap
*altmap
)
421 /* number of pfns from base where pfn_to_page() is valid */
423 return altmap
->reserve
+ altmap
->free
;
427 void vmem_altmap_free(struct vmem_altmap
*altmap
, unsigned long nr_pfns
)
429 altmap
->alloc
-= nr_pfns
;
433 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
434 * @pfn: page frame number to lookup page_map
435 * @pgmap: optional known pgmap that already has a reference
437 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
438 * is non-NULL but does not cover @pfn the reference to it will be released.
440 struct dev_pagemap
*get_dev_pagemap(unsigned long pfn
,
441 struct dev_pagemap
*pgmap
)
443 resource_size_t phys
= PFN_PHYS(pfn
);
446 * In the cached case we're already holding a live reference.
449 if (phys
>= pgmap
->range
.start
&& phys
<= pgmap
->range
.end
)
451 put_dev_pagemap(pgmap
);
454 /* fall back to slow path lookup */
456 pgmap
= xa_load(&pgmap_array
, PHYS_PFN(phys
));
457 if (pgmap
&& !percpu_ref_tryget_live(&pgmap
->ref
))
463 EXPORT_SYMBOL_GPL(get_dev_pagemap
);
465 void free_zone_device_page(struct page
*page
)
467 if (WARN_ON_ONCE(!page
->pgmap
->ops
|| !page
->pgmap
->ops
->page_free
))
470 mem_cgroup_uncharge(page_folio(page
));
473 * Note: we don't expect anonymous compound pages yet. Once supported
474 * and we could PTE-map them similar to THP, we'd have to clear
475 * PG_anon_exclusive on all tail pages.
477 VM_BUG_ON_PAGE(PageAnon(page
) && PageCompound(page
), page
);
479 __ClearPageAnonExclusive(page
);
482 * When a device managed page is freed, the page->mapping field
483 * may still contain a (stale) mapping value. For example, the
484 * lower bits of page->mapping may still identify the page as an
485 * anonymous page. Ultimately, this entire field is just stale
486 * and wrong, and it will cause errors if not cleared. One
489 * migrate_vma_pages()
490 * migrate_vma_insert_page()
491 * page_add_new_anon_rmap()
492 * __page_set_anon_rmap()
493 * ...checks page->mapping, via PageAnon(page) call,
494 * and incorrectly concludes that the page is an
495 * anonymous page. Therefore, it incorrectly,
496 * silently fails to set up the new anon rmap.
498 * For other types of ZONE_DEVICE pages, migration is either
499 * handled differently or not done at all, so there is no need
500 * to clear page->mapping.
502 page
->mapping
= NULL
;
503 page
->pgmap
->ops
->page_free(page
);
506 * Reset the page count to 1 to prepare for handing out the page again.
508 set_page_count(page
, 1);
512 bool __put_devmap_managed_page_refs(struct page
*page
, int refs
)
514 if (page
->pgmap
->type
!= MEMORY_DEVICE_FS_DAX
)
518 * fsdax page refcounts are 1-based, rather than 0-based: if
519 * refcount is 1, then the page is free and the refcount is
520 * stable because nobody holds a reference on the page.
522 if (page_ref_sub_return(page
, refs
) == 1)
523 wake_up_var(&page
->_refcount
);
526 EXPORT_SYMBOL(__put_devmap_managed_page_refs
);
527 #endif /* CONFIG_FS_DAX */