1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
3 #include <linux/device.h>
5 #include <linux/kasan.h>
6 #include <linux/memory_hotplug.h>
8 #include <linux/pfn_t.h>
9 #include <linux/swap.h>
10 #include <linux/swapops.h>
11 #include <linux/types.h>
12 #include <linux/wait_bit.h>
13 #include <linux/xarray.h>
15 static DEFINE_XARRAY(pgmap_array
);
16 #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
17 #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
19 #ifdef CONFIG_DEV_PAGEMAP_OPS
20 DEFINE_STATIC_KEY_FALSE(devmap_managed_key
);
21 EXPORT_SYMBOL(devmap_managed_key
);
22 static atomic_t devmap_managed_enable
;
24 static void devmap_managed_enable_put(void *data
)
26 if (atomic_dec_and_test(&devmap_managed_enable
))
27 static_branch_disable(&devmap_managed_key
);
30 static int devmap_managed_enable_get(struct device
*dev
, struct dev_pagemap
*pgmap
)
32 if (!pgmap
->ops
|| !pgmap
->ops
->page_free
) {
33 WARN(1, "Missing page_free method\n");
37 if (atomic_inc_return(&devmap_managed_enable
) == 1)
38 static_branch_enable(&devmap_managed_key
);
39 return devm_add_action_or_reset(dev
, devmap_managed_enable_put
, NULL
);
42 static int devmap_managed_enable_get(struct device
*dev
, struct dev_pagemap
*pgmap
)
46 #endif /* CONFIG_DEV_PAGEMAP_OPS */
48 static void pgmap_array_delete(struct resource
*res
)
50 xa_store_range(&pgmap_array
, PHYS_PFN(res
->start
), PHYS_PFN(res
->end
),
55 static unsigned long pfn_first(struct dev_pagemap
*pgmap
)
57 return PHYS_PFN(pgmap
->res
.start
) +
58 vmem_altmap_offset(pgmap_altmap(pgmap
));
61 static unsigned long pfn_end(struct dev_pagemap
*pgmap
)
63 const struct resource
*res
= &pgmap
->res
;
65 return (res
->start
+ resource_size(res
)) >> PAGE_SHIFT
;
68 static unsigned long pfn_next(unsigned long pfn
)
75 #define for_each_device_pfn(pfn, map) \
76 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
78 static void dev_pagemap_kill(struct dev_pagemap
*pgmap
)
80 if (pgmap
->ops
&& pgmap
->ops
->kill
)
81 pgmap
->ops
->kill(pgmap
);
83 percpu_ref_kill(pgmap
->ref
);
86 static void dev_pagemap_cleanup(struct dev_pagemap
*pgmap
)
88 if (pgmap
->ops
&& pgmap
->ops
->cleanup
) {
89 pgmap
->ops
->cleanup(pgmap
);
91 wait_for_completion(&pgmap
->done
);
92 percpu_ref_exit(pgmap
->ref
);
96 static void devm_memremap_pages_release(void *data
)
98 struct dev_pagemap
*pgmap
= data
;
99 struct device
*dev
= pgmap
->dev
;
100 struct resource
*res
= &pgmap
->res
;
104 dev_pagemap_kill(pgmap
);
105 for_each_device_pfn(pfn
, pgmap
)
106 put_page(pfn_to_page(pfn
));
107 dev_pagemap_cleanup(pgmap
);
109 /* pages are dead and unused, undo the arch mapping */
110 nid
= page_to_nid(pfn_to_page(PHYS_PFN(res
->start
)));
113 if (pgmap
->type
== MEMORY_DEVICE_PRIVATE
) {
114 pfn
= PHYS_PFN(res
->start
);
115 __remove_pages(page_zone(pfn_to_page(pfn
)), pfn
,
116 PHYS_PFN(resource_size(res
)), NULL
);
118 arch_remove_memory(nid
, res
->start
, resource_size(res
),
119 pgmap_altmap(pgmap
));
120 kasan_remove_zero_shadow(__va(res
->start
), resource_size(res
));
124 untrack_pfn(NULL
, PHYS_PFN(res
->start
), resource_size(res
));
125 pgmap_array_delete(res
);
126 dev_WARN_ONCE(dev
, pgmap
->altmap
.alloc
,
127 "%s: failed to free all reserved pages\n", __func__
);
130 static void dev_pagemap_percpu_release(struct percpu_ref
*ref
)
132 struct dev_pagemap
*pgmap
=
133 container_of(ref
, struct dev_pagemap
, internal_ref
);
135 complete(&pgmap
->done
);
139 * devm_memremap_pages - remap and provide memmap backing for the given resource
140 * @dev: hosting device for @res
141 * @pgmap: pointer to a struct dev_pagemap
144 * 1/ At a minimum the res and type members of @pgmap must be initialized
145 * by the caller before passing it to this function
147 * 2/ The altmap field may optionally be initialized, in which case
148 * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
150 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
151 * 'live' on entry and will be killed and reaped at
152 * devm_memremap_pages_release() time, or if this routine fails.
154 * 4/ res is expected to be a host memory range that could feasibly be
155 * treated as a "System RAM" range, i.e. not a device mmio range, but
156 * this is not enforced.
158 void *devm_memremap_pages(struct device
*dev
, struct dev_pagemap
*pgmap
)
160 struct resource
*res
= &pgmap
->res
;
161 struct dev_pagemap
*conflict_pgmap
;
162 struct mhp_restrictions restrictions
= {
164 * We do not want any optional features only our own memmap
166 .altmap
= pgmap_altmap(pgmap
),
168 pgprot_t pgprot
= PAGE_KERNEL
;
169 int error
, nid
, is_ram
;
170 bool need_devmap_managed
= true;
172 switch (pgmap
->type
) {
173 case MEMORY_DEVICE_PRIVATE
:
174 if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE
)) {
175 WARN(1, "Device private memory not supported\n");
176 return ERR_PTR(-EINVAL
);
178 if (!pgmap
->ops
|| !pgmap
->ops
->migrate_to_ram
) {
179 WARN(1, "Missing migrate_to_ram method\n");
180 return ERR_PTR(-EINVAL
);
183 case MEMORY_DEVICE_FS_DAX
:
184 if (!IS_ENABLED(CONFIG_ZONE_DEVICE
) ||
185 IS_ENABLED(CONFIG_FS_DAX_LIMITED
)) {
186 WARN(1, "File system DAX not supported\n");
187 return ERR_PTR(-EINVAL
);
190 case MEMORY_DEVICE_DEVDAX
:
191 case MEMORY_DEVICE_PCI_P2PDMA
:
192 need_devmap_managed
= false;
195 WARN(1, "Invalid pgmap type %d\n", pgmap
->type
);
200 if (pgmap
->ops
&& (pgmap
->ops
->kill
|| pgmap
->ops
->cleanup
))
201 return ERR_PTR(-EINVAL
);
203 init_completion(&pgmap
->done
);
204 error
= percpu_ref_init(&pgmap
->internal_ref
,
205 dev_pagemap_percpu_release
, 0, GFP_KERNEL
);
207 return ERR_PTR(error
);
208 pgmap
->ref
= &pgmap
->internal_ref
;
210 if (!pgmap
->ops
|| !pgmap
->ops
->kill
|| !pgmap
->ops
->cleanup
) {
211 WARN(1, "Missing reference count teardown definition\n");
212 return ERR_PTR(-EINVAL
);
216 if (need_devmap_managed
) {
217 error
= devmap_managed_enable_get(dev
, pgmap
);
219 return ERR_PTR(error
);
222 conflict_pgmap
= get_dev_pagemap(PHYS_PFN(res
->start
), NULL
);
223 if (conflict_pgmap
) {
224 dev_WARN(dev
, "Conflicting mapping in same section\n");
225 put_dev_pagemap(conflict_pgmap
);
230 conflict_pgmap
= get_dev_pagemap(PHYS_PFN(res
->end
), NULL
);
231 if (conflict_pgmap
) {
232 dev_WARN(dev
, "Conflicting mapping in same section\n");
233 put_dev_pagemap(conflict_pgmap
);
238 is_ram
= region_intersects(res
->start
, resource_size(res
),
239 IORESOURCE_SYSTEM_RAM
, IORES_DESC_NONE
);
241 if (is_ram
!= REGION_DISJOINT
) {
242 WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__
,
243 is_ram
== REGION_MIXED
? "mixed" : "ram", res
);
250 error
= xa_err(xa_store_range(&pgmap_array
, PHYS_PFN(res
->start
),
251 PHYS_PFN(res
->end
), pgmap
, GFP_KERNEL
));
255 nid
= dev_to_node(dev
);
259 error
= track_pfn_remap(NULL
, &pgprot
, PHYS_PFN(res
->start
), 0,
267 * For device private memory we call add_pages() as we only need to
268 * allocate and initialize struct page for the device memory. More-
269 * over the device memory is un-accessible thus we do not want to
270 * create a linear mapping for the memory like arch_add_memory()
273 * For all other device memory types, which are accessible by
274 * the CPU, we do want the linear mapping and thus use
277 if (pgmap
->type
== MEMORY_DEVICE_PRIVATE
) {
278 error
= add_pages(nid
, PHYS_PFN(res
->start
),
279 PHYS_PFN(resource_size(res
)), &restrictions
);
281 error
= kasan_add_zero_shadow(__va(res
->start
), resource_size(res
));
287 error
= arch_add_memory(nid
, res
->start
, resource_size(res
),
294 zone
= &NODE_DATA(nid
)->node_zones
[ZONE_DEVICE
];
295 move_pfn_range_to_zone(zone
, PHYS_PFN(res
->start
),
296 PHYS_PFN(resource_size(res
)), restrictions
.altmap
);
304 * Initialization of the pages has been deferred until now in order
305 * to allow us to do the work while not holding the hotplug lock.
307 memmap_init_zone_device(&NODE_DATA(nid
)->node_zones
[ZONE_DEVICE
],
308 PHYS_PFN(res
->start
),
309 PHYS_PFN(resource_size(res
)), pgmap
);
310 percpu_ref_get_many(pgmap
->ref
, pfn_end(pgmap
) - pfn_first(pgmap
));
312 error
= devm_add_action_or_reset(dev
, devm_memremap_pages_release
,
315 return ERR_PTR(error
);
317 return __va(res
->start
);
320 kasan_remove_zero_shadow(__va(res
->start
), resource_size(res
));
322 untrack_pfn(NULL
, PHYS_PFN(res
->start
), resource_size(res
));
324 pgmap_array_delete(res
);
326 dev_pagemap_kill(pgmap
);
327 dev_pagemap_cleanup(pgmap
);
328 return ERR_PTR(error
);
330 EXPORT_SYMBOL_GPL(devm_memremap_pages
);
332 void devm_memunmap_pages(struct device
*dev
, struct dev_pagemap
*pgmap
)
334 devm_release_action(dev
, devm_memremap_pages_release
, pgmap
);
336 EXPORT_SYMBOL_GPL(devm_memunmap_pages
);
338 unsigned long vmem_altmap_offset(struct vmem_altmap
*altmap
)
340 /* number of pfns from base where pfn_to_page() is valid */
342 return altmap
->reserve
+ altmap
->free
;
346 void vmem_altmap_free(struct vmem_altmap
*altmap
, unsigned long nr_pfns
)
348 altmap
->alloc
-= nr_pfns
;
352 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
353 * @pfn: page frame number to lookup page_map
354 * @pgmap: optional known pgmap that already has a reference
356 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
357 * is non-NULL but does not cover @pfn the reference to it will be released.
359 struct dev_pagemap
*get_dev_pagemap(unsigned long pfn
,
360 struct dev_pagemap
*pgmap
)
362 resource_size_t phys
= PFN_PHYS(pfn
);
365 * In the cached case we're already holding a live reference.
368 if (phys
>= pgmap
->res
.start
&& phys
<= pgmap
->res
.end
)
370 put_dev_pagemap(pgmap
);
373 /* fall back to slow path lookup */
375 pgmap
= xa_load(&pgmap_array
, PHYS_PFN(phys
));
376 if (pgmap
&& !percpu_ref_tryget_live(pgmap
->ref
))
382 EXPORT_SYMBOL_GPL(get_dev_pagemap
);
384 #ifdef CONFIG_DEV_PAGEMAP_OPS
385 void __put_devmap_managed_page(struct page
*page
)
387 int count
= page_ref_dec_return(page
);
390 * If refcount is 1 then page is freed and refcount is stable as nobody
391 * holds a reference on the page.
394 /* Clear Active bit in case of parallel mark_page_accessed */
395 __ClearPageActive(page
);
396 __ClearPageWaiters(page
);
398 mem_cgroup_uncharge(page
);
400 page
->pgmap
->ops
->page_free(page
);
404 EXPORT_SYMBOL(__put_devmap_managed_page
);
405 #endif /* CONFIG_DEV_PAGEMAP_OPS */