1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
3 #include <linux/device.h>
5 #include <linux/kasan.h>
6 #include <linux/memory_hotplug.h>
8 #include <linux/pfn_t.h>
9 #include <linux/swap.h>
10 #include <linux/swapops.h>
11 #include <linux/types.h>
12 #include <linux/wait_bit.h>
13 #include <linux/xarray.h>
14 #include <linux/hmm.h>
16 static DEFINE_XARRAY(pgmap_array
);
17 #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
18 #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
20 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
21 vm_fault_t
device_private_entry_fault(struct vm_area_struct
*vma
,
27 struct page
*page
= device_private_entry_to_page(entry
);
28 struct hmm_devmem
*devmem
;
30 devmem
= container_of(page
->pgmap
, typeof(*devmem
), pagemap
);
33 * The page_fault() callback must migrate page back to system memory
34 * so that CPU can access it. This might fail for various reasons
35 * (device issue, device was unsafely unplugged, ...). When such
36 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
38 * Note that because memory cgroup charges are accounted to the device
39 * memory, this should never fail because of memory restrictions (but
40 * allocation of regular system page might still fail because we are
43 * There is a more in-depth description of what that callback can and
44 * cannot do, in include/linux/memremap.h
46 return devmem
->page_fault(vma
, addr
, page
, flags
, pmdp
);
48 EXPORT_SYMBOL(device_private_entry_fault
);
49 #endif /* CONFIG_DEVICE_PRIVATE */
51 static void pgmap_array_delete(struct resource
*res
)
53 xa_store_range(&pgmap_array
, PHYS_PFN(res
->start
), PHYS_PFN(res
->end
),
58 static unsigned long pfn_first(struct dev_pagemap
*pgmap
)
60 const struct resource
*res
= &pgmap
->res
;
61 struct vmem_altmap
*altmap
= &pgmap
->altmap
;
64 pfn
= res
->start
>> PAGE_SHIFT
;
65 if (pgmap
->altmap_valid
)
66 pfn
+= vmem_altmap_offset(altmap
);
70 static unsigned long pfn_end(struct dev_pagemap
*pgmap
)
72 const struct resource
*res
= &pgmap
->res
;
74 return (res
->start
+ resource_size(res
)) >> PAGE_SHIFT
;
77 static unsigned long pfn_next(unsigned long pfn
)
84 #define for_each_device_pfn(pfn, map) \
85 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
87 static void devm_memremap_pages_release(void *data
)
89 struct dev_pagemap
*pgmap
= data
;
90 struct device
*dev
= pgmap
->dev
;
91 struct resource
*res
= &pgmap
->res
;
92 resource_size_t align_start
, align_size
;
96 pgmap
->kill(pgmap
->ref
);
97 for_each_device_pfn(pfn
, pgmap
)
98 put_page(pfn_to_page(pfn
));
100 /* pages are dead and unused, undo the arch mapping */
101 align_start
= res
->start
& ~(SECTION_SIZE
- 1);
102 align_size
= ALIGN(res
->start
+ resource_size(res
), SECTION_SIZE
)
105 nid
= page_to_nid(pfn_to_page(align_start
>> PAGE_SHIFT
));
108 if (pgmap
->type
== MEMORY_DEVICE_PRIVATE
) {
109 pfn
= align_start
>> PAGE_SHIFT
;
110 __remove_pages(page_zone(pfn_to_page(pfn
)), pfn
,
111 align_size
>> PAGE_SHIFT
, NULL
);
113 arch_remove_memory(nid
, align_start
, align_size
,
114 pgmap
->altmap_valid
? &pgmap
->altmap
: NULL
);
115 kasan_remove_zero_shadow(__va(align_start
), align_size
);
119 untrack_pfn(NULL
, PHYS_PFN(align_start
), align_size
);
120 pgmap_array_delete(res
);
121 dev_WARN_ONCE(dev
, pgmap
->altmap
.alloc
,
122 "%s: failed to free all reserved pages\n", __func__
);
126 * devm_memremap_pages - remap and provide memmap backing for the given resource
127 * @dev: hosting device for @res
128 * @pgmap: pointer to a struct dev_pagemap
131 * 1/ At a minimum the res, ref and type members of @pgmap must be initialized
132 * by the caller before passing it to this function
134 * 2/ The altmap field may optionally be initialized, in which case altmap_valid
135 * must be set to true
137 * 3/ pgmap->ref must be 'live' on entry and will be killed at
138 * devm_memremap_pages_release() time, or if this routine fails.
140 * 4/ res is expected to be a host memory range that could feasibly be
141 * treated as a "System RAM" range, i.e. not a device mmio range, but
142 * this is not enforced.
144 void *devm_memremap_pages(struct device
*dev
, struct dev_pagemap
*pgmap
)
146 resource_size_t align_start
, align_size
, align_end
;
147 struct vmem_altmap
*altmap
= pgmap
->altmap_valid
?
148 &pgmap
->altmap
: NULL
;
149 struct resource
*res
= &pgmap
->res
;
150 struct dev_pagemap
*conflict_pgmap
;
151 pgprot_t pgprot
= PAGE_KERNEL
;
152 int error
, nid
, is_ram
;
154 if (!pgmap
->ref
|| !pgmap
->kill
)
155 return ERR_PTR(-EINVAL
);
157 align_start
= res
->start
& ~(SECTION_SIZE
- 1);
158 align_size
= ALIGN(res
->start
+ resource_size(res
), SECTION_SIZE
)
160 align_end
= align_start
+ align_size
- 1;
162 conflict_pgmap
= get_dev_pagemap(PHYS_PFN(align_start
), NULL
);
163 if (conflict_pgmap
) {
164 dev_WARN(dev
, "Conflicting mapping in same section\n");
165 put_dev_pagemap(conflict_pgmap
);
166 return ERR_PTR(-ENOMEM
);
169 conflict_pgmap
= get_dev_pagemap(PHYS_PFN(align_end
), NULL
);
170 if (conflict_pgmap
) {
171 dev_WARN(dev
, "Conflicting mapping in same section\n");
172 put_dev_pagemap(conflict_pgmap
);
173 return ERR_PTR(-ENOMEM
);
176 is_ram
= region_intersects(align_start
, align_size
,
177 IORESOURCE_SYSTEM_RAM
, IORES_DESC_NONE
);
179 if (is_ram
!= REGION_DISJOINT
) {
180 WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__
,
181 is_ram
== REGION_MIXED
? "mixed" : "ram", res
);
188 error
= xa_err(xa_store_range(&pgmap_array
, PHYS_PFN(res
->start
),
189 PHYS_PFN(res
->end
), pgmap
, GFP_KERNEL
));
193 nid
= dev_to_node(dev
);
197 error
= track_pfn_remap(NULL
, &pgprot
, PHYS_PFN(align_start
), 0,
205 * For device private memory we call add_pages() as we only need to
206 * allocate and initialize struct page for the device memory. More-
207 * over the device memory is un-accessible thus we do not want to
208 * create a linear mapping for the memory like arch_add_memory()
211 * For all other device memory types, which are accessible by
212 * the CPU, we do want the linear mapping and thus use
215 if (pgmap
->type
== MEMORY_DEVICE_PRIVATE
) {
216 error
= add_pages(nid
, align_start
>> PAGE_SHIFT
,
217 align_size
>> PAGE_SHIFT
, NULL
, false);
219 error
= kasan_add_zero_shadow(__va(align_start
), align_size
);
225 error
= arch_add_memory(nid
, align_start
, align_size
, altmap
,
232 zone
= &NODE_DATA(nid
)->node_zones
[ZONE_DEVICE
];
233 move_pfn_range_to_zone(zone
, align_start
>> PAGE_SHIFT
,
234 align_size
>> PAGE_SHIFT
, altmap
);
242 * Initialization of the pages has been deferred until now in order
243 * to allow us to do the work while not holding the hotplug lock.
245 memmap_init_zone_device(&NODE_DATA(nid
)->node_zones
[ZONE_DEVICE
],
246 align_start
>> PAGE_SHIFT
,
247 align_size
>> PAGE_SHIFT
, pgmap
);
248 percpu_ref_get_many(pgmap
->ref
, pfn_end(pgmap
) - pfn_first(pgmap
));
250 error
= devm_add_action_or_reset(dev
, devm_memremap_pages_release
,
253 return ERR_PTR(error
);
255 return __va(res
->start
);
258 kasan_remove_zero_shadow(__va(align_start
), align_size
);
260 untrack_pfn(NULL
, PHYS_PFN(align_start
), align_size
);
262 pgmap_array_delete(res
);
264 pgmap
->kill(pgmap
->ref
);
265 return ERR_PTR(error
);
267 EXPORT_SYMBOL_GPL(devm_memremap_pages
);
269 unsigned long vmem_altmap_offset(struct vmem_altmap
*altmap
)
271 /* number of pfns from base where pfn_to_page() is valid */
272 return altmap
->reserve
+ altmap
->free
;
275 void vmem_altmap_free(struct vmem_altmap
*altmap
, unsigned long nr_pfns
)
277 altmap
->alloc
-= nr_pfns
;
281 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
282 * @pfn: page frame number to lookup page_map
283 * @pgmap: optional known pgmap that already has a reference
285 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
286 * is non-NULL but does not cover @pfn the reference to it will be released.
288 struct dev_pagemap
*get_dev_pagemap(unsigned long pfn
,
289 struct dev_pagemap
*pgmap
)
291 resource_size_t phys
= PFN_PHYS(pfn
);
294 * In the cached case we're already holding a live reference.
297 if (phys
>= pgmap
->res
.start
&& phys
<= pgmap
->res
.end
)
299 put_dev_pagemap(pgmap
);
302 /* fall back to slow path lookup */
304 pgmap
= xa_load(&pgmap_array
, PHYS_PFN(phys
));
305 if (pgmap
&& !percpu_ref_tryget_live(pgmap
->ref
))
311 EXPORT_SYMBOL_GPL(get_dev_pagemap
);
313 #ifdef CONFIG_DEV_PAGEMAP_OPS
314 DEFINE_STATIC_KEY_FALSE(devmap_managed_key
);
315 EXPORT_SYMBOL(devmap_managed_key
);
316 static atomic_t devmap_enable
;
319 * Toggle the static key for ->page_free() callbacks when dev_pagemap
322 void dev_pagemap_get_ops(void)
324 if (atomic_inc_return(&devmap_enable
) == 1)
325 static_branch_enable(&devmap_managed_key
);
327 EXPORT_SYMBOL_GPL(dev_pagemap_get_ops
);
329 void dev_pagemap_put_ops(void)
331 if (atomic_dec_and_test(&devmap_enable
))
332 static_branch_disable(&devmap_managed_key
);
334 EXPORT_SYMBOL_GPL(dev_pagemap_put_ops
);
336 void __put_devmap_managed_page(struct page
*page
)
338 int count
= page_ref_dec_return(page
);
341 * If refcount is 1 then page is freed and refcount is stable as nobody
342 * holds a reference on the page.
345 /* Clear Active bit in case of parallel mark_page_accessed */
346 __ClearPageActive(page
);
347 __ClearPageWaiters(page
);
349 mem_cgroup_uncharge(page
);
351 page
->pgmap
->page_free(page
, page
->pgmap
->data
);
355 EXPORT_SYMBOL(__put_devmap_managed_page
);
356 #endif /* CONFIG_DEV_PAGEMAP_OPS */