]>
Commit | Line | Data |
---|---|---|
c942fddf | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
133ff0ea JG |
2 | /* |
3 | * Copyright 2013 Red Hat Inc. | |
4 | * | |
f813f219 | 5 | * Authors: Jérôme Glisse <jglisse@redhat.com> |
133ff0ea JG |
6 | */ |
7 | /* | |
8 | * Refer to include/linux/hmm.h for information about heterogeneous memory | |
9 | * management or HMM for short. | |
10 | */ | |
11 | #include <linux/mm.h> | |
12 | #include <linux/hmm.h> | |
858b54da | 13 | #include <linux/init.h> |
da4c3c73 JG |
14 | #include <linux/rmap.h> |
15 | #include <linux/swap.h> | |
133ff0ea JG |
16 | #include <linux/slab.h> |
17 | #include <linux/sched.h> | |
4ef589dc JG |
18 | #include <linux/mmzone.h> |
19 | #include <linux/pagemap.h> | |
da4c3c73 JG |
20 | #include <linux/swapops.h> |
21 | #include <linux/hugetlb.h> | |
4ef589dc | 22 | #include <linux/memremap.h> |
c8a53b2d | 23 | #include <linux/sched/mm.h> |
7b2d55d2 | 24 | #include <linux/jump_label.h> |
55c0ece8 | 25 | #include <linux/dma-mapping.h> |
c0b12405 | 26 | #include <linux/mmu_notifier.h> |
4ef589dc JG |
27 | #include <linux/memory_hotplug.h> |
28 | ||
29 | #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT) | |
133ff0ea | 30 | |
6b368cd4 | 31 | #if IS_ENABLED(CONFIG_HMM_MIRROR) |
c0b12405 JG |
32 | static const struct mmu_notifier_ops hmm_mmu_notifier_ops; |
33 | ||
704f3f2c JG |
34 | /** |
35 | * hmm_get_or_create - register HMM against an mm (HMM internal) | |
133ff0ea JG |
36 | * |
37 | * @mm: mm struct to attach to | |
704f3f2c JG |
38 | * Returns: returns an HMM object, either by referencing the existing |
39 | * (per-process) object, or by creating a new one. | |
133ff0ea | 40 | * |
704f3f2c JG |
41 | * This is not intended to be used directly by device drivers. If mm already |
42 | * has an HMM struct then it get a reference on it and returns it. Otherwise | |
43 | * it allocates an HMM struct, initializes it, associate it with the mm and | |
44 | * returns it. | |
133ff0ea | 45 | */ |
704f3f2c | 46 | static struct hmm *hmm_get_or_create(struct mm_struct *mm) |
133ff0ea | 47 | { |
8a9320b7 JG |
48 | struct hmm *hmm; |
49 | ||
50 | lockdep_assert_held_exclusive(&mm->mmap_sem); | |
133ff0ea | 51 | |
8a9320b7 JG |
52 | /* Abuse the page_table_lock to also protect mm->hmm. */ |
53 | spin_lock(&mm->page_table_lock); | |
54 | hmm = mm->hmm; | |
55 | if (mm->hmm && kref_get_unless_zero(&mm->hmm->kref)) | |
56 | goto out_unlock; | |
57 | spin_unlock(&mm->page_table_lock); | |
c0b12405 JG |
58 | |
59 | hmm = kmalloc(sizeof(*hmm), GFP_KERNEL); | |
60 | if (!hmm) | |
61 | return NULL; | |
a3e0d41c | 62 | init_waitqueue_head(&hmm->wq); |
c0b12405 JG |
63 | INIT_LIST_HEAD(&hmm->mirrors); |
64 | init_rwsem(&hmm->mirrors_sem); | |
c0b12405 | 65 | hmm->mmu_notifier.ops = NULL; |
da4c3c73 | 66 | INIT_LIST_HEAD(&hmm->ranges); |
a3e0d41c | 67 | mutex_init(&hmm->lock); |
704f3f2c | 68 | kref_init(&hmm->kref); |
a3e0d41c | 69 | hmm->notifiers = 0; |
c0b12405 JG |
70 | hmm->mm = mm; |
71 | ||
8a9320b7 JG |
72 | hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops; |
73 | if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) { | |
74 | kfree(hmm); | |
75 | return NULL; | |
76 | } | |
c0b12405 | 77 | |
8a9320b7 | 78 | mmgrab(hmm->mm); |
86a2d598 RC |
79 | |
80 | /* | |
8a9320b7 JG |
81 | * We hold the exclusive mmap_sem here so we know that mm->hmm is |
82 | * still NULL or 0 kref, and is safe to update. | |
86a2d598 | 83 | */ |
86a2d598 | 84 | spin_lock(&mm->page_table_lock); |
8a9320b7 JG |
85 | mm->hmm = hmm; |
86 | ||
87 | out_unlock: | |
86a2d598 | 88 | spin_unlock(&mm->page_table_lock); |
8a9320b7 | 89 | return hmm; |
133ff0ea JG |
90 | } |
91 | ||
6d7c3cde JG |
92 | static void hmm_free_rcu(struct rcu_head *rcu) |
93 | { | |
8a9320b7 JG |
94 | struct hmm *hmm = container_of(rcu, struct hmm, rcu); |
95 | ||
96 | mmdrop(hmm->mm); | |
97 | kfree(hmm); | |
6d7c3cde JG |
98 | } |
99 | ||
704f3f2c JG |
100 | static void hmm_free(struct kref *kref) |
101 | { | |
102 | struct hmm *hmm = container_of(kref, struct hmm, kref); | |
704f3f2c | 103 | |
8a9320b7 JG |
104 | spin_lock(&hmm->mm->page_table_lock); |
105 | if (hmm->mm->hmm == hmm) | |
106 | hmm->mm->hmm = NULL; | |
107 | spin_unlock(&hmm->mm->page_table_lock); | |
704f3f2c | 108 | |
8a9320b7 | 109 | mmu_notifier_unregister_no_release(&hmm->mmu_notifier, hmm->mm); |
6d7c3cde | 110 | mmu_notifier_call_srcu(&hmm->rcu, hmm_free_rcu); |
704f3f2c JG |
111 | } |
112 | ||
113 | static inline void hmm_put(struct hmm *hmm) | |
114 | { | |
115 | kref_put(&hmm->kref, hmm_free); | |
116 | } | |
117 | ||
a3e0d41c | 118 | static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) |
c0b12405 | 119 | { |
6d7c3cde | 120 | struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); |
c0b12405 | 121 | struct hmm_mirror *mirror; |
da4c3c73 | 122 | |
6d7c3cde JG |
123 | /* Bail out if hmm is in the process of being freed */ |
124 | if (!kref_get_unless_zero(&hmm->kref)) | |
125 | return; | |
126 | ||
47f24598 JG |
127 | /* |
128 | * Since hmm_range_register() holds the mmget() lock hmm_release() is | |
129 | * prevented as long as a range exists. | |
130 | */ | |
131 | WARN_ON(!list_empty_careful(&hmm->ranges)); | |
e1401513 RC |
132 | |
133 | down_write(&hmm->mirrors_sem); | |
134 | mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror, | |
135 | list); | |
136 | while (mirror) { | |
137 | list_del_init(&mirror->list); | |
138 | if (mirror->ops->release) { | |
139 | /* | |
085ea250 RC |
140 | * Drop mirrors_sem so the release callback can wait |
141 | * on any pending work that might itself trigger a | |
142 | * mmu_notifier callback and thus would deadlock with | |
143 | * us. | |
e1401513 RC |
144 | */ |
145 | up_write(&hmm->mirrors_sem); | |
146 | mirror->ops->release(mirror); | |
147 | down_write(&hmm->mirrors_sem); | |
148 | } | |
149 | mirror = list_first_entry_or_null(&hmm->mirrors, | |
150 | struct hmm_mirror, list); | |
151 | } | |
152 | up_write(&hmm->mirrors_sem); | |
704f3f2c JG |
153 | |
154 | hmm_put(hmm); | |
e1401513 RC |
155 | } |
156 | ||
93065ac7 | 157 | static int hmm_invalidate_range_start(struct mmu_notifier *mn, |
a3e0d41c | 158 | const struct mmu_notifier_range *nrange) |
c0b12405 | 159 | { |
6d7c3cde | 160 | struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); |
a3e0d41c | 161 | struct hmm_mirror *mirror; |
ec131b2d | 162 | struct hmm_update update; |
a3e0d41c JG |
163 | struct hmm_range *range; |
164 | int ret = 0; | |
c0b12405 | 165 | |
6d7c3cde JG |
166 | if (!kref_get_unless_zero(&hmm->kref)) |
167 | return 0; | |
c0b12405 | 168 | |
a3e0d41c JG |
169 | update.start = nrange->start; |
170 | update.end = nrange->end; | |
ec131b2d | 171 | update.event = HMM_UPDATE_INVALIDATE; |
dfcd6660 | 172 | update.blockable = mmu_notifier_range_blockable(nrange); |
a3e0d41c | 173 | |
dfcd6660 | 174 | if (mmu_notifier_range_blockable(nrange)) |
a3e0d41c JG |
175 | mutex_lock(&hmm->lock); |
176 | else if (!mutex_trylock(&hmm->lock)) { | |
177 | ret = -EAGAIN; | |
178 | goto out; | |
179 | } | |
180 | hmm->notifiers++; | |
181 | list_for_each_entry(range, &hmm->ranges, list) { | |
182 | if (update.end < range->start || update.start >= range->end) | |
183 | continue; | |
184 | ||
185 | range->valid = false; | |
186 | } | |
187 | mutex_unlock(&hmm->lock); | |
188 | ||
dfcd6660 | 189 | if (mmu_notifier_range_blockable(nrange)) |
a3e0d41c JG |
190 | down_read(&hmm->mirrors_sem); |
191 | else if (!down_read_trylock(&hmm->mirrors_sem)) { | |
192 | ret = -EAGAIN; | |
193 | goto out; | |
194 | } | |
195 | list_for_each_entry(mirror, &hmm->mirrors, list) { | |
196 | int ret; | |
197 | ||
198 | ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update); | |
085ea250 RC |
199 | if (!update.blockable && ret == -EAGAIN) |
200 | break; | |
a3e0d41c JG |
201 | } |
202 | up_read(&hmm->mirrors_sem); | |
203 | ||
204 | out: | |
704f3f2c JG |
205 | hmm_put(hmm); |
206 | return ret; | |
c0b12405 JG |
207 | } |
208 | ||
209 | static void hmm_invalidate_range_end(struct mmu_notifier *mn, | |
a3e0d41c | 210 | const struct mmu_notifier_range *nrange) |
c0b12405 | 211 | { |
6d7c3cde | 212 | struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); |
c0b12405 | 213 | |
6d7c3cde JG |
214 | if (!kref_get_unless_zero(&hmm->kref)) |
215 | return; | |
c0b12405 | 216 | |
a3e0d41c JG |
217 | mutex_lock(&hmm->lock); |
218 | hmm->notifiers--; | |
219 | if (!hmm->notifiers) { | |
220 | struct hmm_range *range; | |
221 | ||
222 | list_for_each_entry(range, &hmm->ranges, list) { | |
223 | if (range->valid) | |
224 | continue; | |
225 | range->valid = true; | |
226 | } | |
227 | wake_up_all(&hmm->wq); | |
228 | } | |
229 | mutex_unlock(&hmm->lock); | |
230 | ||
704f3f2c | 231 | hmm_put(hmm); |
c0b12405 JG |
232 | } |
233 | ||
234 | static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { | |
e1401513 | 235 | .release = hmm_release, |
c0b12405 JG |
236 | .invalidate_range_start = hmm_invalidate_range_start, |
237 | .invalidate_range_end = hmm_invalidate_range_end, | |
238 | }; | |
239 | ||
240 | /* | |
241 | * hmm_mirror_register() - register a mirror against an mm | |
242 | * | |
243 | * @mirror: new mirror struct to register | |
244 | * @mm: mm to register against | |
085ea250 | 245 | * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments |
c0b12405 JG |
246 | * |
247 | * To start mirroring a process address space, the device driver must register | |
248 | * an HMM mirror struct. | |
c0b12405 JG |
249 | */ |
250 | int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) | |
251 | { | |
8a1a0cd0 JG |
252 | lockdep_assert_held_exclusive(&mm->mmap_sem); |
253 | ||
c0b12405 JG |
254 | /* Sanity check */ |
255 | if (!mm || !mirror || !mirror->ops) | |
256 | return -EINVAL; | |
257 | ||
704f3f2c | 258 | mirror->hmm = hmm_get_or_create(mm); |
c0b12405 JG |
259 | if (!mirror->hmm) |
260 | return -ENOMEM; | |
261 | ||
262 | down_write(&mirror->hmm->mirrors_sem); | |
704f3f2c JG |
263 | list_add(&mirror->list, &mirror->hmm->mirrors); |
264 | up_write(&mirror->hmm->mirrors_sem); | |
c0b12405 JG |
265 | |
266 | return 0; | |
267 | } | |
268 | EXPORT_SYMBOL(hmm_mirror_register); | |
269 | ||
270 | /* | |
271 | * hmm_mirror_unregister() - unregister a mirror | |
272 | * | |
085ea250 | 273 | * @mirror: mirror struct to unregister |
c0b12405 JG |
274 | * |
275 | * Stop mirroring a process address space, and cleanup. | |
276 | */ | |
277 | void hmm_mirror_unregister(struct hmm_mirror *mirror) | |
278 | { | |
187229c2 | 279 | struct hmm *hmm = mirror->hmm; |
c0b12405 JG |
280 | |
281 | down_write(&hmm->mirrors_sem); | |
e1401513 | 282 | list_del_init(&mirror->list); |
c0b12405 | 283 | up_write(&hmm->mirrors_sem); |
704f3f2c | 284 | hmm_put(hmm); |
c0b12405 JG |
285 | } |
286 | EXPORT_SYMBOL(hmm_mirror_unregister); | |
da4c3c73 | 287 | |
74eee180 JG |
288 | struct hmm_vma_walk { |
289 | struct hmm_range *range; | |
992de9a8 | 290 | struct dev_pagemap *pgmap; |
74eee180 JG |
291 | unsigned long last; |
292 | bool fault; | |
293 | bool block; | |
74eee180 JG |
294 | }; |
295 | ||
2aee09d8 JG |
296 | static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, |
297 | bool write_fault, uint64_t *pfn) | |
74eee180 | 298 | { |
9b1ae605 | 299 | unsigned int flags = FAULT_FLAG_REMOTE; |
74eee180 | 300 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
f88a1e90 | 301 | struct hmm_range *range = hmm_vma_walk->range; |
74eee180 | 302 | struct vm_area_struct *vma = walk->vma; |
50a7ca3c | 303 | vm_fault_t ret; |
74eee180 JG |
304 | |
305 | flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY; | |
2aee09d8 | 306 | flags |= write_fault ? FAULT_FLAG_WRITE : 0; |
50a7ca3c SJ |
307 | ret = handle_mm_fault(vma, addr, flags); |
308 | if (ret & VM_FAULT_RETRY) | |
73231612 | 309 | return -EAGAIN; |
50a7ca3c | 310 | if (ret & VM_FAULT_ERROR) { |
f88a1e90 | 311 | *pfn = range->values[HMM_PFN_ERROR]; |
74eee180 JG |
312 | return -EFAULT; |
313 | } | |
314 | ||
73231612 | 315 | return -EBUSY; |
74eee180 JG |
316 | } |
317 | ||
da4c3c73 JG |
318 | static int hmm_pfns_bad(unsigned long addr, |
319 | unsigned long end, | |
320 | struct mm_walk *walk) | |
321 | { | |
c719547f JG |
322 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
323 | struct hmm_range *range = hmm_vma_walk->range; | |
ff05c0c6 | 324 | uint64_t *pfns = range->pfns; |
da4c3c73 JG |
325 | unsigned long i; |
326 | ||
327 | i = (addr - range->start) >> PAGE_SHIFT; | |
328 | for (; addr < end; addr += PAGE_SIZE, i++) | |
f88a1e90 | 329 | pfns[i] = range->values[HMM_PFN_ERROR]; |
da4c3c73 JG |
330 | |
331 | return 0; | |
332 | } | |
333 | ||
5504ed29 JG |
334 | /* |
335 | * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s) | |
336 | * @start: range virtual start address (inclusive) | |
337 | * @end: range virtual end address (exclusive) | |
2aee09d8 JG |
338 | * @fault: should we fault or not ? |
339 | * @write_fault: write fault ? | |
5504ed29 | 340 | * @walk: mm_walk structure |
085ea250 | 341 | * Return: 0 on success, -EBUSY after page fault, or page fault error |
5504ed29 JG |
342 | * |
343 | * This function will be called whenever pmd_none() or pte_none() returns true, | |
344 | * or whenever there is no page directory covering the virtual address range. | |
345 | */ | |
2aee09d8 JG |
346 | static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, |
347 | bool fault, bool write_fault, | |
348 | struct mm_walk *walk) | |
da4c3c73 | 349 | { |
74eee180 JG |
350 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
351 | struct hmm_range *range = hmm_vma_walk->range; | |
ff05c0c6 | 352 | uint64_t *pfns = range->pfns; |
63d5066f | 353 | unsigned long i, page_size; |
da4c3c73 | 354 | |
74eee180 | 355 | hmm_vma_walk->last = addr; |
63d5066f JG |
356 | page_size = hmm_range_page_size(range); |
357 | i = (addr - range->start) >> range->page_shift; | |
358 | ||
359 | for (; addr < end; addr += page_size, i++) { | |
f88a1e90 | 360 | pfns[i] = range->values[HMM_PFN_NONE]; |
2aee09d8 | 361 | if (fault || write_fault) { |
74eee180 | 362 | int ret; |
da4c3c73 | 363 | |
2aee09d8 JG |
364 | ret = hmm_vma_do_fault(walk, addr, write_fault, |
365 | &pfns[i]); | |
73231612 | 366 | if (ret != -EBUSY) |
74eee180 JG |
367 | return ret; |
368 | } | |
369 | } | |
370 | ||
73231612 | 371 | return (fault || write_fault) ? -EBUSY : 0; |
2aee09d8 JG |
372 | } |
373 | ||
374 | static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, | |
375 | uint64_t pfns, uint64_t cpu_flags, | |
376 | bool *fault, bool *write_fault) | |
377 | { | |
f88a1e90 JG |
378 | struct hmm_range *range = hmm_vma_walk->range; |
379 | ||
2aee09d8 JG |
380 | if (!hmm_vma_walk->fault) |
381 | return; | |
382 | ||
023a019a JG |
383 | /* |
384 | * So we not only consider the individual per page request we also | |
385 | * consider the default flags requested for the range. The API can | |
386 | * be use in 2 fashions. The first one where the HMM user coalesce | |
387 | * multiple page fault into one request and set flags per pfns for | |
388 | * of those faults. The second one where the HMM user want to pre- | |
389 | * fault a range with specific flags. For the latter one it is a | |
390 | * waste to have the user pre-fill the pfn arrays with a default | |
391 | * flags value. | |
392 | */ | |
393 | pfns = (pfns & range->pfn_flags_mask) | range->default_flags; | |
394 | ||
2aee09d8 | 395 | /* We aren't ask to do anything ... */ |
f88a1e90 | 396 | if (!(pfns & range->flags[HMM_PFN_VALID])) |
2aee09d8 | 397 | return; |
f88a1e90 JG |
398 | /* If this is device memory than only fault if explicitly requested */ |
399 | if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { | |
400 | /* Do we fault on device memory ? */ | |
401 | if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { | |
402 | *write_fault = pfns & range->flags[HMM_PFN_WRITE]; | |
403 | *fault = true; | |
404 | } | |
2aee09d8 JG |
405 | return; |
406 | } | |
f88a1e90 JG |
407 | |
408 | /* If CPU page table is not valid then we need to fault */ | |
409 | *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); | |
410 | /* Need to write fault ? */ | |
411 | if ((pfns & range->flags[HMM_PFN_WRITE]) && | |
412 | !(cpu_flags & range->flags[HMM_PFN_WRITE])) { | |
413 | *write_fault = true; | |
2aee09d8 JG |
414 | *fault = true; |
415 | } | |
416 | } | |
417 | ||
418 | static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, | |
419 | const uint64_t *pfns, unsigned long npages, | |
420 | uint64_t cpu_flags, bool *fault, | |
421 | bool *write_fault) | |
422 | { | |
423 | unsigned long i; | |
424 | ||
425 | if (!hmm_vma_walk->fault) { | |
426 | *fault = *write_fault = false; | |
427 | return; | |
428 | } | |
429 | ||
a3e0d41c | 430 | *fault = *write_fault = false; |
2aee09d8 JG |
431 | for (i = 0; i < npages; ++i) { |
432 | hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags, | |
433 | fault, write_fault); | |
a3e0d41c | 434 | if ((*write_fault)) |
2aee09d8 JG |
435 | return; |
436 | } | |
437 | } | |
438 | ||
439 | static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, | |
440 | struct mm_walk *walk) | |
441 | { | |
442 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
443 | struct hmm_range *range = hmm_vma_walk->range; | |
444 | bool fault, write_fault; | |
445 | unsigned long i, npages; | |
446 | uint64_t *pfns; | |
447 | ||
448 | i = (addr - range->start) >> PAGE_SHIFT; | |
449 | npages = (end - addr) >> PAGE_SHIFT; | |
450 | pfns = &range->pfns[i]; | |
451 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, | |
452 | 0, &fault, &write_fault); | |
453 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); | |
454 | } | |
455 | ||
f88a1e90 | 456 | static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) |
2aee09d8 JG |
457 | { |
458 | if (pmd_protnone(pmd)) | |
459 | return 0; | |
f88a1e90 JG |
460 | return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | |
461 | range->flags[HMM_PFN_WRITE] : | |
462 | range->flags[HMM_PFN_VALID]; | |
da4c3c73 JG |
463 | } |
464 | ||
992de9a8 JG |
465 | static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud) |
466 | { | |
467 | if (!pud_present(pud)) | |
468 | return 0; | |
469 | return pud_write(pud) ? range->flags[HMM_PFN_VALID] | | |
470 | range->flags[HMM_PFN_WRITE] : | |
471 | range->flags[HMM_PFN_VALID]; | |
472 | } | |
473 | ||
53f5c3f4 JG |
474 | static int hmm_vma_handle_pmd(struct mm_walk *walk, |
475 | unsigned long addr, | |
476 | unsigned long end, | |
477 | uint64_t *pfns, | |
478 | pmd_t pmd) | |
479 | { | |
992de9a8 | 480 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
53f5c3f4 | 481 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
f88a1e90 | 482 | struct hmm_range *range = hmm_vma_walk->range; |
2aee09d8 | 483 | unsigned long pfn, npages, i; |
2aee09d8 | 484 | bool fault, write_fault; |
f88a1e90 | 485 | uint64_t cpu_flags; |
53f5c3f4 | 486 | |
2aee09d8 | 487 | npages = (end - addr) >> PAGE_SHIFT; |
f88a1e90 | 488 | cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); |
2aee09d8 JG |
489 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, |
490 | &fault, &write_fault); | |
53f5c3f4 | 491 | |
2aee09d8 JG |
492 | if (pmd_protnone(pmd) || fault || write_fault) |
493 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); | |
53f5c3f4 JG |
494 | |
495 | pfn = pmd_pfn(pmd) + pte_index(addr); | |
992de9a8 JG |
496 | for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { |
497 | if (pmd_devmap(pmd)) { | |
498 | hmm_vma_walk->pgmap = get_dev_pagemap(pfn, | |
499 | hmm_vma_walk->pgmap); | |
500 | if (unlikely(!hmm_vma_walk->pgmap)) | |
501 | return -EBUSY; | |
502 | } | |
391aab11 | 503 | pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags; |
992de9a8 JG |
504 | } |
505 | if (hmm_vma_walk->pgmap) { | |
506 | put_dev_pagemap(hmm_vma_walk->pgmap); | |
507 | hmm_vma_walk->pgmap = NULL; | |
508 | } | |
53f5c3f4 JG |
509 | hmm_vma_walk->last = end; |
510 | return 0; | |
992de9a8 JG |
511 | #else |
512 | /* If THP is not enabled then we should never reach that code ! */ | |
513 | return -EINVAL; | |
514 | #endif | |
53f5c3f4 JG |
515 | } |
516 | ||
f88a1e90 | 517 | static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) |
2aee09d8 | 518 | { |
789c2af8 | 519 | if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) |
2aee09d8 | 520 | return 0; |
f88a1e90 JG |
521 | return pte_write(pte) ? range->flags[HMM_PFN_VALID] | |
522 | range->flags[HMM_PFN_WRITE] : | |
523 | range->flags[HMM_PFN_VALID]; | |
2aee09d8 JG |
524 | } |
525 | ||
53f5c3f4 JG |
526 | static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, |
527 | unsigned long end, pmd_t *pmdp, pte_t *ptep, | |
528 | uint64_t *pfn) | |
529 | { | |
530 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
f88a1e90 | 531 | struct hmm_range *range = hmm_vma_walk->range; |
53f5c3f4 | 532 | struct vm_area_struct *vma = walk->vma; |
2aee09d8 JG |
533 | bool fault, write_fault; |
534 | uint64_t cpu_flags; | |
53f5c3f4 | 535 | pte_t pte = *ptep; |
f88a1e90 | 536 | uint64_t orig_pfn = *pfn; |
53f5c3f4 | 537 | |
f88a1e90 | 538 | *pfn = range->values[HMM_PFN_NONE]; |
73231612 | 539 | fault = write_fault = false; |
53f5c3f4 JG |
540 | |
541 | if (pte_none(pte)) { | |
73231612 JG |
542 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, |
543 | &fault, &write_fault); | |
2aee09d8 | 544 | if (fault || write_fault) |
53f5c3f4 JG |
545 | goto fault; |
546 | return 0; | |
547 | } | |
548 | ||
549 | if (!pte_present(pte)) { | |
550 | swp_entry_t entry = pte_to_swp_entry(pte); | |
551 | ||
552 | if (!non_swap_entry(entry)) { | |
2aee09d8 | 553 | if (fault || write_fault) |
53f5c3f4 JG |
554 | goto fault; |
555 | return 0; | |
556 | } | |
557 | ||
558 | /* | |
559 | * This is a special swap entry, ignore migration, use | |
560 | * device and report anything else as error. | |
561 | */ | |
562 | if (is_device_private_entry(entry)) { | |
f88a1e90 JG |
563 | cpu_flags = range->flags[HMM_PFN_VALID] | |
564 | range->flags[HMM_PFN_DEVICE_PRIVATE]; | |
2aee09d8 | 565 | cpu_flags |= is_write_device_private_entry(entry) ? |
f88a1e90 JG |
566 | range->flags[HMM_PFN_WRITE] : 0; |
567 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, | |
568 | &fault, &write_fault); | |
569 | if (fault || write_fault) | |
570 | goto fault; | |
391aab11 JG |
571 | *pfn = hmm_device_entry_from_pfn(range, |
572 | swp_offset(entry)); | |
f88a1e90 | 573 | *pfn |= cpu_flags; |
53f5c3f4 JG |
574 | return 0; |
575 | } | |
576 | ||
577 | if (is_migration_entry(entry)) { | |
2aee09d8 | 578 | if (fault || write_fault) { |
53f5c3f4 JG |
579 | pte_unmap(ptep); |
580 | hmm_vma_walk->last = addr; | |
581 | migration_entry_wait(vma->vm_mm, | |
2aee09d8 | 582 | pmdp, addr); |
73231612 | 583 | return -EBUSY; |
53f5c3f4 JG |
584 | } |
585 | return 0; | |
586 | } | |
587 | ||
588 | /* Report error for everything else */ | |
f88a1e90 | 589 | *pfn = range->values[HMM_PFN_ERROR]; |
53f5c3f4 | 590 | return -EFAULT; |
73231612 JG |
591 | } else { |
592 | cpu_flags = pte_to_hmm_pfn_flags(range, pte); | |
593 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, | |
594 | &fault, &write_fault); | |
53f5c3f4 JG |
595 | } |
596 | ||
2aee09d8 | 597 | if (fault || write_fault) |
53f5c3f4 JG |
598 | goto fault; |
599 | ||
992de9a8 JG |
600 | if (pte_devmap(pte)) { |
601 | hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte), | |
602 | hmm_vma_walk->pgmap); | |
603 | if (unlikely(!hmm_vma_walk->pgmap)) | |
604 | return -EBUSY; | |
605 | } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) { | |
606 | *pfn = range->values[HMM_PFN_SPECIAL]; | |
607 | return -EFAULT; | |
608 | } | |
609 | ||
391aab11 | 610 | *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags; |
53f5c3f4 JG |
611 | return 0; |
612 | ||
613 | fault: | |
992de9a8 JG |
614 | if (hmm_vma_walk->pgmap) { |
615 | put_dev_pagemap(hmm_vma_walk->pgmap); | |
616 | hmm_vma_walk->pgmap = NULL; | |
617 | } | |
53f5c3f4 JG |
618 | pte_unmap(ptep); |
619 | /* Fault any virtual address we were asked to fault */ | |
2aee09d8 | 620 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); |
53f5c3f4 JG |
621 | } |
622 | ||
da4c3c73 JG |
623 | static int hmm_vma_walk_pmd(pmd_t *pmdp, |
624 | unsigned long start, | |
625 | unsigned long end, | |
626 | struct mm_walk *walk) | |
627 | { | |
74eee180 JG |
628 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
629 | struct hmm_range *range = hmm_vma_walk->range; | |
d08faca0 | 630 | struct vm_area_struct *vma = walk->vma; |
ff05c0c6 | 631 | uint64_t *pfns = range->pfns; |
da4c3c73 | 632 | unsigned long addr = start, i; |
da4c3c73 | 633 | pte_t *ptep; |
d08faca0 | 634 | pmd_t pmd; |
da4c3c73 | 635 | |
da4c3c73 JG |
636 | |
637 | again: | |
d08faca0 JG |
638 | pmd = READ_ONCE(*pmdp); |
639 | if (pmd_none(pmd)) | |
da4c3c73 JG |
640 | return hmm_vma_walk_hole(start, end, walk); |
641 | ||
d08faca0 | 642 | if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB)) |
da4c3c73 JG |
643 | return hmm_pfns_bad(start, end, walk); |
644 | ||
d08faca0 JG |
645 | if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { |
646 | bool fault, write_fault; | |
647 | unsigned long npages; | |
648 | uint64_t *pfns; | |
649 | ||
650 | i = (addr - range->start) >> PAGE_SHIFT; | |
651 | npages = (end - addr) >> PAGE_SHIFT; | |
652 | pfns = &range->pfns[i]; | |
653 | ||
654 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, | |
655 | 0, &fault, &write_fault); | |
656 | if (fault || write_fault) { | |
657 | hmm_vma_walk->last = addr; | |
658 | pmd_migration_entry_wait(vma->vm_mm, pmdp); | |
73231612 | 659 | return -EBUSY; |
d08faca0 JG |
660 | } |
661 | return 0; | |
662 | } else if (!pmd_present(pmd)) | |
663 | return hmm_pfns_bad(start, end, walk); | |
da4c3c73 | 664 | |
d08faca0 | 665 | if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { |
da4c3c73 JG |
666 | /* |
667 | * No need to take pmd_lock here, even if some other threads | |
668 | * is splitting the huge pmd we will get that event through | |
669 | * mmu_notifier callback. | |
670 | * | |
671 | * So just read pmd value and check again its a transparent | |
672 | * huge or device mapping one and compute corresponding pfn | |
673 | * values. | |
674 | */ | |
675 | pmd = pmd_read_atomic(pmdp); | |
676 | barrier(); | |
677 | if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) | |
678 | goto again; | |
74eee180 | 679 | |
d08faca0 | 680 | i = (addr - range->start) >> PAGE_SHIFT; |
53f5c3f4 | 681 | return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd); |
da4c3c73 JG |
682 | } |
683 | ||
d08faca0 JG |
684 | /* |
685 | * We have handled all the valid case above ie either none, migration, | |
686 | * huge or transparent huge. At this point either it is a valid pmd | |
687 | * entry pointing to pte directory or it is a bad pmd that will not | |
688 | * recover. | |
689 | */ | |
690 | if (pmd_bad(pmd)) | |
da4c3c73 JG |
691 | return hmm_pfns_bad(start, end, walk); |
692 | ||
693 | ptep = pte_offset_map(pmdp, addr); | |
d08faca0 | 694 | i = (addr - range->start) >> PAGE_SHIFT; |
da4c3c73 | 695 | for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { |
53f5c3f4 | 696 | int r; |
74eee180 | 697 | |
53f5c3f4 JG |
698 | r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]); |
699 | if (r) { | |
700 | /* hmm_vma_handle_pte() did unmap pte directory */ | |
701 | hmm_vma_walk->last = addr; | |
702 | return r; | |
74eee180 | 703 | } |
da4c3c73 | 704 | } |
992de9a8 JG |
705 | if (hmm_vma_walk->pgmap) { |
706 | /* | |
707 | * We do put_dev_pagemap() here and not in hmm_vma_handle_pte() | |
708 | * so that we can leverage get_dev_pagemap() optimization which | |
709 | * will not re-take a reference on a pgmap if we already have | |
710 | * one. | |
711 | */ | |
712 | put_dev_pagemap(hmm_vma_walk->pgmap); | |
713 | hmm_vma_walk->pgmap = NULL; | |
714 | } | |
da4c3c73 JG |
715 | pte_unmap(ptep - 1); |
716 | ||
53f5c3f4 | 717 | hmm_vma_walk->last = addr; |
da4c3c73 JG |
718 | return 0; |
719 | } | |
720 | ||
992de9a8 JG |
721 | static int hmm_vma_walk_pud(pud_t *pudp, |
722 | unsigned long start, | |
723 | unsigned long end, | |
724 | struct mm_walk *walk) | |
725 | { | |
726 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
727 | struct hmm_range *range = hmm_vma_walk->range; | |
728 | unsigned long addr = start, next; | |
729 | pmd_t *pmdp; | |
730 | pud_t pud; | |
731 | int ret; | |
732 | ||
733 | again: | |
734 | pud = READ_ONCE(*pudp); | |
735 | if (pud_none(pud)) | |
736 | return hmm_vma_walk_hole(start, end, walk); | |
737 | ||
738 | if (pud_huge(pud) && pud_devmap(pud)) { | |
739 | unsigned long i, npages, pfn; | |
740 | uint64_t *pfns, cpu_flags; | |
741 | bool fault, write_fault; | |
742 | ||
743 | if (!pud_present(pud)) | |
744 | return hmm_vma_walk_hole(start, end, walk); | |
745 | ||
746 | i = (addr - range->start) >> PAGE_SHIFT; | |
747 | npages = (end - addr) >> PAGE_SHIFT; | |
748 | pfns = &range->pfns[i]; | |
749 | ||
750 | cpu_flags = pud_to_hmm_pfn_flags(range, pud); | |
751 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, | |
752 | cpu_flags, &fault, &write_fault); | |
753 | if (fault || write_fault) | |
754 | return hmm_vma_walk_hole_(addr, end, fault, | |
755 | write_fault, walk); | |
756 | ||
992de9a8 JG |
757 | pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
758 | for (i = 0; i < npages; ++i, ++pfn) { | |
759 | hmm_vma_walk->pgmap = get_dev_pagemap(pfn, | |
760 | hmm_vma_walk->pgmap); | |
761 | if (unlikely(!hmm_vma_walk->pgmap)) | |
762 | return -EBUSY; | |
391aab11 JG |
763 | pfns[i] = hmm_device_entry_from_pfn(range, pfn) | |
764 | cpu_flags; | |
992de9a8 JG |
765 | } |
766 | if (hmm_vma_walk->pgmap) { | |
767 | put_dev_pagemap(hmm_vma_walk->pgmap); | |
768 | hmm_vma_walk->pgmap = NULL; | |
769 | } | |
770 | hmm_vma_walk->last = end; | |
771 | return 0; | |
992de9a8 JG |
772 | } |
773 | ||
774 | split_huge_pud(walk->vma, pudp, addr); | |
775 | if (pud_none(*pudp)) | |
776 | goto again; | |
777 | ||
778 | pmdp = pmd_offset(pudp, addr); | |
779 | do { | |
780 | next = pmd_addr_end(addr, end); | |
781 | ret = hmm_vma_walk_pmd(pmdp, addr, next, walk); | |
782 | if (ret) | |
783 | return ret; | |
784 | } while (pmdp++, addr = next, addr != end); | |
785 | ||
786 | return 0; | |
787 | } | |
788 | ||
63d5066f JG |
789 | static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, |
790 | unsigned long start, unsigned long end, | |
791 | struct mm_walk *walk) | |
792 | { | |
793 | #ifdef CONFIG_HUGETLB_PAGE | |
794 | unsigned long addr = start, i, pfn, mask, size, pfn_inc; | |
795 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
796 | struct hmm_range *range = hmm_vma_walk->range; | |
797 | struct vm_area_struct *vma = walk->vma; | |
798 | struct hstate *h = hstate_vma(vma); | |
799 | uint64_t orig_pfn, cpu_flags; | |
800 | bool fault, write_fault; | |
801 | spinlock_t *ptl; | |
802 | pte_t entry; | |
803 | int ret = 0; | |
804 | ||
805 | size = 1UL << huge_page_shift(h); | |
806 | mask = size - 1; | |
807 | if (range->page_shift != PAGE_SHIFT) { | |
808 | /* Make sure we are looking at full page. */ | |
809 | if (start & mask) | |
810 | return -EINVAL; | |
811 | if (end < (start + size)) | |
812 | return -EINVAL; | |
813 | pfn_inc = size >> PAGE_SHIFT; | |
814 | } else { | |
815 | pfn_inc = 1; | |
816 | size = PAGE_SIZE; | |
817 | } | |
818 | ||
819 | ||
820 | ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); | |
821 | entry = huge_ptep_get(pte); | |
822 | ||
823 | i = (start - range->start) >> range->page_shift; | |
824 | orig_pfn = range->pfns[i]; | |
825 | range->pfns[i] = range->values[HMM_PFN_NONE]; | |
826 | cpu_flags = pte_to_hmm_pfn_flags(range, entry); | |
827 | fault = write_fault = false; | |
828 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, | |
829 | &fault, &write_fault); | |
830 | if (fault || write_fault) { | |
831 | ret = -ENOENT; | |
832 | goto unlock; | |
833 | } | |
834 | ||
835 | pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift); | |
836 | for (; addr < end; addr += size, i++, pfn += pfn_inc) | |
391aab11 JG |
837 | range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) | |
838 | cpu_flags; | |
63d5066f JG |
839 | hmm_vma_walk->last = end; |
840 | ||
841 | unlock: | |
842 | spin_unlock(ptl); | |
843 | ||
844 | if (ret == -ENOENT) | |
845 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); | |
846 | ||
847 | return ret; | |
848 | #else /* CONFIG_HUGETLB_PAGE */ | |
849 | return -EINVAL; | |
850 | #endif | |
851 | } | |
852 | ||
f88a1e90 JG |
853 | static void hmm_pfns_clear(struct hmm_range *range, |
854 | uint64_t *pfns, | |
33cd47dc JG |
855 | unsigned long addr, |
856 | unsigned long end) | |
857 | { | |
858 | for (; addr < end; addr += PAGE_SIZE, pfns++) | |
f88a1e90 | 859 | *pfns = range->values[HMM_PFN_NONE]; |
33cd47dc JG |
860 | } |
861 | ||
da4c3c73 | 862 | /* |
a3e0d41c | 863 | * hmm_range_register() - start tracking change to CPU page table over a range |
25f23a0c | 864 | * @range: range |
a3e0d41c JG |
865 | * @mm: the mm struct for the range of virtual address |
866 | * @start: start virtual address (inclusive) | |
867 | * @end: end virtual address (exclusive) | |
63d5066f | 868 | * @page_shift: expect page shift for the range |
a3e0d41c | 869 | * Returns 0 on success, -EFAULT if the address space is no longer valid |
25f23a0c | 870 | * |
a3e0d41c | 871 | * Track updates to the CPU page table see include/linux/hmm.h |
da4c3c73 | 872 | */ |
a3e0d41c | 873 | int hmm_range_register(struct hmm_range *range, |
e36acfe6 | 874 | struct hmm_mirror *mirror, |
a3e0d41c | 875 | unsigned long start, |
63d5066f JG |
876 | unsigned long end, |
877 | unsigned page_shift) | |
da4c3c73 | 878 | { |
63d5066f | 879 | unsigned long mask = ((1UL << page_shift) - 1UL); |
e36acfe6 | 880 | struct hmm *hmm = mirror->hmm; |
63d5066f | 881 | |
a3e0d41c | 882 | range->valid = false; |
704f3f2c JG |
883 | range->hmm = NULL; |
884 | ||
63d5066f JG |
885 | if ((start & mask) || (end & mask)) |
886 | return -EINVAL; | |
887 | if (start >= end) | |
da4c3c73 JG |
888 | return -EINVAL; |
889 | ||
63d5066f | 890 | range->page_shift = page_shift; |
a3e0d41c JG |
891 | range->start = start; |
892 | range->end = end; | |
893 | ||
47f24598 JG |
894 | /* Prevent hmm_release() from running while the range is valid */ |
895 | if (!mmget_not_zero(hmm->mm)) | |
a3e0d41c | 896 | return -EFAULT; |
da4c3c73 | 897 | |
085ea250 RC |
898 | /* Initialize range to track CPU page table updates. */ |
899 | mutex_lock(&hmm->lock); | |
855ce7d2 | 900 | |
085ea250 | 901 | range->hmm = hmm; |
e36acfe6 | 902 | kref_get(&hmm->kref); |
157816f3 | 903 | list_add(&range->list, &hmm->ranges); |
86586a41 | 904 | |
704f3f2c | 905 | /* |
a3e0d41c JG |
906 | * If there are any concurrent notifiers we have to wait for them for |
907 | * the range to be valid (see hmm_range_wait_until_valid()). | |
704f3f2c | 908 | */ |
085ea250 | 909 | if (!hmm->notifiers) |
a3e0d41c | 910 | range->valid = true; |
085ea250 | 911 | mutex_unlock(&hmm->lock); |
a3e0d41c JG |
912 | |
913 | return 0; | |
da4c3c73 | 914 | } |
a3e0d41c | 915 | EXPORT_SYMBOL(hmm_range_register); |
da4c3c73 JG |
916 | |
917 | /* | |
a3e0d41c JG |
918 | * hmm_range_unregister() - stop tracking change to CPU page table over a range |
919 | * @range: range | |
da4c3c73 JG |
920 | * |
921 | * Range struct is used to track updates to the CPU page table after a call to | |
a3e0d41c | 922 | * hmm_range_register(). See include/linux/hmm.h for how to use it. |
da4c3c73 | 923 | */ |
a3e0d41c | 924 | void hmm_range_unregister(struct hmm_range *range) |
da4c3c73 | 925 | { |
085ea250 RC |
926 | struct hmm *hmm = range->hmm; |
927 | ||
085ea250 | 928 | mutex_lock(&hmm->lock); |
47f24598 | 929 | list_del_init(&range->list); |
085ea250 | 930 | mutex_unlock(&hmm->lock); |
da4c3c73 | 931 | |
a3e0d41c | 932 | /* Drop reference taken by hmm_range_register() */ |
47f24598 | 933 | mmput(hmm->mm); |
085ea250 | 934 | hmm_put(hmm); |
2dcc3eb8 JG |
935 | |
936 | /* | |
937 | * The range is now invalid and the ref on the hmm is dropped, so | |
938 | * poison the pointer. Leave other fields in place, for the caller's | |
939 | * use. | |
940 | */ | |
941 | range->valid = false; | |
942 | memset(&range->hmm, POISON_INUSE, sizeof(range->hmm)); | |
da4c3c73 | 943 | } |
a3e0d41c JG |
944 | EXPORT_SYMBOL(hmm_range_unregister); |
945 | ||
946 | /* | |
947 | * hmm_range_snapshot() - snapshot CPU page table for a range | |
948 | * @range: range | |
085ea250 | 949 | * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid |
a3e0d41c JG |
950 | * permission (for instance asking for write and range is read only), |
951 | * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid | |
952 | * vma or it is illegal to access that range), number of valid pages | |
953 | * in range->pfns[] (from range start address). | |
954 | * | |
955 | * This snapshots the CPU page table for a range of virtual addresses. Snapshot | |
956 | * validity is tracked by range struct. See in include/linux/hmm.h for example | |
957 | * on how to use. | |
958 | */ | |
959 | long hmm_range_snapshot(struct hmm_range *range) | |
960 | { | |
63d5066f | 961 | const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; |
a3e0d41c JG |
962 | unsigned long start = range->start, end; |
963 | struct hmm_vma_walk hmm_vma_walk; | |
964 | struct hmm *hmm = range->hmm; | |
965 | struct vm_area_struct *vma; | |
966 | struct mm_walk mm_walk; | |
967 | ||
47f24598 | 968 | lockdep_assert_held(&hmm->mm->mmap_sem); |
a3e0d41c JG |
969 | do { |
970 | /* If range is no longer valid force retry. */ | |
971 | if (!range->valid) | |
972 | return -EAGAIN; | |
973 | ||
974 | vma = find_vma(hmm->mm, start); | |
63d5066f | 975 | if (vma == NULL || (vma->vm_flags & device_vma)) |
a3e0d41c JG |
976 | return -EFAULT; |
977 | ||
63d5066f | 978 | if (is_vm_hugetlb_page(vma)) { |
1c2308f0 JG |
979 | if (huge_page_shift(hstate_vma(vma)) != |
980 | range->page_shift && | |
63d5066f JG |
981 | range->page_shift != PAGE_SHIFT) |
982 | return -EINVAL; | |
983 | } else { | |
984 | if (range->page_shift != PAGE_SHIFT) | |
985 | return -EINVAL; | |
986 | } | |
987 | ||
a3e0d41c JG |
988 | if (!(vma->vm_flags & VM_READ)) { |
989 | /* | |
990 | * If vma do not allow read access, then assume that it | |
991 | * does not allow write access, either. HMM does not | |
992 | * support architecture that allow write without read. | |
993 | */ | |
994 | hmm_pfns_clear(range, range->pfns, | |
995 | range->start, range->end); | |
996 | return -EPERM; | |
997 | } | |
998 | ||
999 | range->vma = vma; | |
992de9a8 | 1000 | hmm_vma_walk.pgmap = NULL; |
a3e0d41c JG |
1001 | hmm_vma_walk.last = start; |
1002 | hmm_vma_walk.fault = false; | |
1003 | hmm_vma_walk.range = range; | |
1004 | mm_walk.private = &hmm_vma_walk; | |
1005 | end = min(range->end, vma->vm_end); | |
1006 | ||
1007 | mm_walk.vma = vma; | |
1008 | mm_walk.mm = vma->vm_mm; | |
1009 | mm_walk.pte_entry = NULL; | |
1010 | mm_walk.test_walk = NULL; | |
1011 | mm_walk.hugetlb_entry = NULL; | |
992de9a8 | 1012 | mm_walk.pud_entry = hmm_vma_walk_pud; |
a3e0d41c JG |
1013 | mm_walk.pmd_entry = hmm_vma_walk_pmd; |
1014 | mm_walk.pte_hole = hmm_vma_walk_hole; | |
63d5066f | 1015 | mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry; |
a3e0d41c JG |
1016 | |
1017 | walk_page_range(start, end, &mm_walk); | |
1018 | start = end; | |
1019 | } while (start < range->end); | |
1020 | ||
1021 | return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; | |
1022 | } | |
1023 | EXPORT_SYMBOL(hmm_range_snapshot); | |
74eee180 JG |
1024 | |
1025 | /* | |
73231612 | 1026 | * hmm_range_fault() - try to fault some address in a virtual address range |
08232a45 | 1027 | * @range: range being faulted |
74eee180 | 1028 | * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) |
085ea250 | 1029 | * Return: number of valid pages in range->pfns[] (from range start |
73231612 JG |
1030 | * address). This may be zero. If the return value is negative, |
1031 | * then one of the following values may be returned: | |
1032 | * | |
1033 | * -EINVAL invalid arguments or mm or virtual address are in an | |
63d5066f | 1034 | * invalid vma (for instance device file vma). |
73231612 JG |
1035 | * -ENOMEM: Out of memory. |
1036 | * -EPERM: Invalid permission (for instance asking for write and | |
1037 | * range is read only). | |
1038 | * -EAGAIN: If you need to retry and mmap_sem was drop. This can only | |
1039 | * happens if block argument is false. | |
1040 | * -EBUSY: If the the range is being invalidated and you should wait | |
1041 | * for invalidation to finish. | |
1042 | * -EFAULT: Invalid (ie either no valid vma or it is illegal to access | |
1043 | * that range), number of valid pages in range->pfns[] (from | |
1044 | * range start address). | |
74eee180 JG |
1045 | * |
1046 | * This is similar to a regular CPU page fault except that it will not trigger | |
73231612 JG |
1047 | * any memory migration if the memory being faulted is not accessible by CPUs |
1048 | * and caller does not ask for migration. | |
74eee180 | 1049 | * |
ff05c0c6 JG |
1050 | * On error, for one virtual address in the range, the function will mark the |
1051 | * corresponding HMM pfn entry with an error flag. | |
74eee180 | 1052 | */ |
73231612 | 1053 | long hmm_range_fault(struct hmm_range *range, bool block) |
74eee180 | 1054 | { |
63d5066f | 1055 | const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; |
a3e0d41c | 1056 | unsigned long start = range->start, end; |
74eee180 | 1057 | struct hmm_vma_walk hmm_vma_walk; |
a3e0d41c JG |
1058 | struct hmm *hmm = range->hmm; |
1059 | struct vm_area_struct *vma; | |
74eee180 | 1060 | struct mm_walk mm_walk; |
74eee180 JG |
1061 | int ret; |
1062 | ||
47f24598 | 1063 | lockdep_assert_held(&hmm->mm->mmap_sem); |
704f3f2c | 1064 | |
a3e0d41c JG |
1065 | do { |
1066 | /* If range is no longer valid force retry. */ | |
1067 | if (!range->valid) { | |
1068 | up_read(&hmm->mm->mmap_sem); | |
1069 | return -EAGAIN; | |
1070 | } | |
74eee180 | 1071 | |
a3e0d41c | 1072 | vma = find_vma(hmm->mm, start); |
63d5066f | 1073 | if (vma == NULL || (vma->vm_flags & device_vma)) |
a3e0d41c | 1074 | return -EFAULT; |
704f3f2c | 1075 | |
63d5066f JG |
1076 | if (is_vm_hugetlb_page(vma)) { |
1077 | if (huge_page_shift(hstate_vma(vma)) != | |
1078 | range->page_shift && | |
1079 | range->page_shift != PAGE_SHIFT) | |
1080 | return -EINVAL; | |
1081 | } else { | |
1082 | if (range->page_shift != PAGE_SHIFT) | |
1083 | return -EINVAL; | |
1084 | } | |
1085 | ||
a3e0d41c JG |
1086 | if (!(vma->vm_flags & VM_READ)) { |
1087 | /* | |
1088 | * If vma do not allow read access, then assume that it | |
1089 | * does not allow write access, either. HMM does not | |
1090 | * support architecture that allow write without read. | |
1091 | */ | |
1092 | hmm_pfns_clear(range, range->pfns, | |
1093 | range->start, range->end); | |
1094 | return -EPERM; | |
1095 | } | |
74eee180 | 1096 | |
a3e0d41c | 1097 | range->vma = vma; |
992de9a8 | 1098 | hmm_vma_walk.pgmap = NULL; |
a3e0d41c JG |
1099 | hmm_vma_walk.last = start; |
1100 | hmm_vma_walk.fault = true; | |
1101 | hmm_vma_walk.block = block; | |
1102 | hmm_vma_walk.range = range; | |
1103 | mm_walk.private = &hmm_vma_walk; | |
1104 | end = min(range->end, vma->vm_end); | |
1105 | ||
1106 | mm_walk.vma = vma; | |
1107 | mm_walk.mm = vma->vm_mm; | |
1108 | mm_walk.pte_entry = NULL; | |
1109 | mm_walk.test_walk = NULL; | |
1110 | mm_walk.hugetlb_entry = NULL; | |
992de9a8 | 1111 | mm_walk.pud_entry = hmm_vma_walk_pud; |
a3e0d41c JG |
1112 | mm_walk.pmd_entry = hmm_vma_walk_pmd; |
1113 | mm_walk.pte_hole = hmm_vma_walk_hole; | |
63d5066f | 1114 | mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry; |
a3e0d41c JG |
1115 | |
1116 | do { | |
1117 | ret = walk_page_range(start, end, &mm_walk); | |
1118 | start = hmm_vma_walk.last; | |
1119 | ||
1120 | /* Keep trying while the range is valid. */ | |
1121 | } while (ret == -EBUSY && range->valid); | |
1122 | ||
1123 | if (ret) { | |
1124 | unsigned long i; | |
1125 | ||
1126 | i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; | |
1127 | hmm_pfns_clear(range, &range->pfns[i], | |
1128 | hmm_vma_walk.last, range->end); | |
1129 | return ret; | |
1130 | } | |
1131 | start = end; | |
74eee180 | 1132 | |
a3e0d41c | 1133 | } while (start < range->end); |
704f3f2c | 1134 | |
73231612 | 1135 | return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; |
74eee180 | 1136 | } |
73231612 | 1137 | EXPORT_SYMBOL(hmm_range_fault); |
55c0ece8 JG |
1138 | |
1139 | /** | |
1140 | * hmm_range_dma_map() - hmm_range_fault() and dma map page all in one. | |
1141 | * @range: range being faulted | |
1142 | * @device: device against to dma map page to | |
1143 | * @daddrs: dma address of mapped pages | |
1144 | * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) | |
085ea250 | 1145 | * Return: number of pages mapped on success, -EAGAIN if mmap_sem have been |
55c0ece8 JG |
1146 | * drop and you need to try again, some other error value otherwise |
1147 | * | |
1148 | * Note same usage pattern as hmm_range_fault(). | |
1149 | */ | |
1150 | long hmm_range_dma_map(struct hmm_range *range, | |
1151 | struct device *device, | |
1152 | dma_addr_t *daddrs, | |
1153 | bool block) | |
1154 | { | |
1155 | unsigned long i, npages, mapped; | |
1156 | long ret; | |
1157 | ||
1158 | ret = hmm_range_fault(range, block); | |
1159 | if (ret <= 0) | |
1160 | return ret ? ret : -EBUSY; | |
1161 | ||
1162 | npages = (range->end - range->start) >> PAGE_SHIFT; | |
1163 | for (i = 0, mapped = 0; i < npages; ++i) { | |
1164 | enum dma_data_direction dir = DMA_TO_DEVICE; | |
1165 | struct page *page; | |
1166 | ||
1167 | /* | |
1168 | * FIXME need to update DMA API to provide invalid DMA address | |
1169 | * value instead of a function to test dma address value. This | |
1170 | * would remove lot of dumb code duplicated accross many arch. | |
1171 | * | |
1172 | * For now setting it to 0 here is good enough as the pfns[] | |
1173 | * value is what is use to check what is valid and what isn't. | |
1174 | */ | |
1175 | daddrs[i] = 0; | |
1176 | ||
391aab11 | 1177 | page = hmm_device_entry_to_page(range, range->pfns[i]); |
55c0ece8 JG |
1178 | if (page == NULL) |
1179 | continue; | |
1180 | ||
1181 | /* Check if range is being invalidated */ | |
1182 | if (!range->valid) { | |
1183 | ret = -EBUSY; | |
1184 | goto unmap; | |
1185 | } | |
1186 | ||
1187 | /* If it is read and write than map bi-directional. */ | |
1188 | if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) | |
1189 | dir = DMA_BIDIRECTIONAL; | |
1190 | ||
1191 | daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir); | |
1192 | if (dma_mapping_error(device, daddrs[i])) { | |
1193 | ret = -EFAULT; | |
1194 | goto unmap; | |
1195 | } | |
1196 | ||
1197 | mapped++; | |
1198 | } | |
1199 | ||
1200 | return mapped; | |
1201 | ||
1202 | unmap: | |
1203 | for (npages = i, i = 0; (i < npages) && mapped; ++i) { | |
1204 | enum dma_data_direction dir = DMA_TO_DEVICE; | |
1205 | struct page *page; | |
1206 | ||
391aab11 | 1207 | page = hmm_device_entry_to_page(range, range->pfns[i]); |
55c0ece8 JG |
1208 | if (page == NULL) |
1209 | continue; | |
1210 | ||
1211 | if (dma_mapping_error(device, daddrs[i])) | |
1212 | continue; | |
1213 | ||
1214 | /* If it is read and write than map bi-directional. */ | |
1215 | if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) | |
1216 | dir = DMA_BIDIRECTIONAL; | |
1217 | ||
1218 | dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir); | |
1219 | mapped--; | |
1220 | } | |
1221 | ||
1222 | return ret; | |
1223 | } | |
1224 | EXPORT_SYMBOL(hmm_range_dma_map); | |
1225 | ||
1226 | /** | |
1227 | * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map() | |
1228 | * @range: range being unmapped | |
1229 | * @vma: the vma against which the range (optional) | |
1230 | * @device: device against which dma map was done | |
1231 | * @daddrs: dma address of mapped pages | |
1232 | * @dirty: dirty page if it had the write flag set | |
085ea250 | 1233 | * Return: number of page unmapped on success, -EINVAL otherwise |
55c0ece8 JG |
1234 | * |
1235 | * Note that caller MUST abide by mmu notifier or use HMM mirror and abide | |
1236 | * to the sync_cpu_device_pagetables() callback so that it is safe here to | |
1237 | * call set_page_dirty(). Caller must also take appropriate locks to avoid | |
1238 | * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress. | |
1239 | */ | |
1240 | long hmm_range_dma_unmap(struct hmm_range *range, | |
1241 | struct vm_area_struct *vma, | |
1242 | struct device *device, | |
1243 | dma_addr_t *daddrs, | |
1244 | bool dirty) | |
1245 | { | |
1246 | unsigned long i, npages; | |
1247 | long cpages = 0; | |
1248 | ||
1249 | /* Sanity check. */ | |
1250 | if (range->end <= range->start) | |
1251 | return -EINVAL; | |
1252 | if (!daddrs) | |
1253 | return -EINVAL; | |
1254 | if (!range->pfns) | |
1255 | return -EINVAL; | |
1256 | ||
1257 | npages = (range->end - range->start) >> PAGE_SHIFT; | |
1258 | for (i = 0; i < npages; ++i) { | |
1259 | enum dma_data_direction dir = DMA_TO_DEVICE; | |
1260 | struct page *page; | |
1261 | ||
391aab11 | 1262 | page = hmm_device_entry_to_page(range, range->pfns[i]); |
55c0ece8 JG |
1263 | if (page == NULL) |
1264 | continue; | |
1265 | ||
1266 | /* If it is read and write than map bi-directional. */ | |
1267 | if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) { | |
1268 | dir = DMA_BIDIRECTIONAL; | |
1269 | ||
1270 | /* | |
1271 | * See comments in function description on why it is | |
1272 | * safe here to call set_page_dirty() | |
1273 | */ | |
1274 | if (dirty) | |
1275 | set_page_dirty(page); | |
1276 | } | |
1277 | ||
1278 | /* Unmap and clear pfns/dma address */ | |
1279 | dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir); | |
1280 | range->pfns[i] = range->values[HMM_PFN_NONE]; | |
1281 | /* FIXME see comments in hmm_vma_dma_map() */ | |
1282 | daddrs[i] = 0; | |
1283 | cpages++; | |
1284 | } | |
1285 | ||
1286 | return cpages; | |
1287 | } | |
1288 | EXPORT_SYMBOL(hmm_range_dma_unmap); | |
c0b12405 | 1289 | #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ |
4ef589dc JG |
1290 | |
1291 | ||
df6ad698 | 1292 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) |
4ef589dc JG |
1293 | struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, |
1294 | unsigned long addr) | |
1295 | { | |
1296 | struct page *page; | |
1297 | ||
1298 | page = alloc_page_vma(GFP_HIGHUSER, vma, addr); | |
1299 | if (!page) | |
1300 | return NULL; | |
1301 | lock_page(page); | |
1302 | return page; | |
1303 | } | |
1304 | EXPORT_SYMBOL(hmm_vma_alloc_locked_page); | |
1305 | ||
1306 | ||
1307 | static void hmm_devmem_ref_release(struct percpu_ref *ref) | |
1308 | { | |
1309 | struct hmm_devmem *devmem; | |
1310 | ||
1311 | devmem = container_of(ref, struct hmm_devmem, ref); | |
1312 | complete(&devmem->completion); | |
1313 | } | |
1314 | ||
1315 | static void hmm_devmem_ref_exit(void *data) | |
1316 | { | |
1317 | struct percpu_ref *ref = data; | |
1318 | struct hmm_devmem *devmem; | |
1319 | ||
1320 | devmem = container_of(ref, struct hmm_devmem, ref); | |
bbecd94e | 1321 | wait_for_completion(&devmem->completion); |
4ef589dc | 1322 | percpu_ref_exit(ref); |
4ef589dc JG |
1323 | } |
1324 | ||
bbecd94e | 1325 | static void hmm_devmem_ref_kill(struct percpu_ref *ref) |
4ef589dc | 1326 | { |
4ef589dc | 1327 | percpu_ref_kill(ref); |
4ef589dc JG |
1328 | } |
1329 | ||
b57e622e | 1330 | static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma, |
4ef589dc JG |
1331 | unsigned long addr, |
1332 | const struct page *page, | |
1333 | unsigned int flags, | |
1334 | pmd_t *pmdp) | |
1335 | { | |
1336 | struct hmm_devmem *devmem = page->pgmap->data; | |
1337 | ||
1338 | return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp); | |
1339 | } | |
1340 | ||
1341 | static void hmm_devmem_free(struct page *page, void *data) | |
1342 | { | |
1343 | struct hmm_devmem *devmem = data; | |
1344 | ||
2fa147bd DW |
1345 | page->mapping = NULL; |
1346 | ||
4ef589dc JG |
1347 | devmem->ops->free(devmem, page); |
1348 | } | |
1349 | ||
4ef589dc JG |
1350 | /* |
1351 | * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory | |
1352 | * | |
1353 | * @ops: memory event device driver callback (see struct hmm_devmem_ops) | |
1354 | * @device: device struct to bind the resource too | |
1355 | * @size: size in bytes of the device memory to add | |
085ea250 | 1356 | * Return: pointer to new hmm_devmem struct ERR_PTR otherwise |
4ef589dc JG |
1357 | * |
1358 | * This function first finds an empty range of physical address big enough to | |
1359 | * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which | |
1360 | * in turn allocates struct pages. It does not do anything beyond that; all | |
1361 | * events affecting the memory will go through the various callbacks provided | |
1362 | * by hmm_devmem_ops struct. | |
1363 | * | |
1364 | * Device driver should call this function during device initialization and | |
1365 | * is then responsible of memory management. HMM only provides helpers. | |
1366 | */ | |
1367 | struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, | |
1368 | struct device *device, | |
1369 | unsigned long size) | |
1370 | { | |
1371 | struct hmm_devmem *devmem; | |
1372 | resource_size_t addr; | |
bbecd94e | 1373 | void *result; |
4ef589dc JG |
1374 | int ret; |
1375 | ||
e7638488 | 1376 | dev_pagemap_get_ops(); |
4ef589dc | 1377 | |
58ef15b7 | 1378 | devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); |
4ef589dc JG |
1379 | if (!devmem) |
1380 | return ERR_PTR(-ENOMEM); | |
1381 | ||
1382 | init_completion(&devmem->completion); | |
1383 | devmem->pfn_first = -1UL; | |
1384 | devmem->pfn_last = -1UL; | |
1385 | devmem->resource = NULL; | |
1386 | devmem->device = device; | |
1387 | devmem->ops = ops; | |
1388 | ||
1389 | ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, | |
1390 | 0, GFP_KERNEL); | |
1391 | if (ret) | |
58ef15b7 | 1392 | return ERR_PTR(ret); |
4ef589dc | 1393 | |
58ef15b7 | 1394 | ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref); |
4ef589dc | 1395 | if (ret) |
58ef15b7 | 1396 | return ERR_PTR(ret); |
4ef589dc JG |
1397 | |
1398 | size = ALIGN(size, PA_SECTION_SIZE); | |
1399 | addr = min((unsigned long)iomem_resource.end, | |
1400 | (1UL << MAX_PHYSMEM_BITS) - 1); | |
1401 | addr = addr - size + 1UL; | |
1402 | ||
1403 | /* | |
1404 | * FIXME add a new helper to quickly walk resource tree and find free | |
1405 | * range | |
1406 | * | |
1407 | * FIXME what about ioport_resource resource ? | |
1408 | */ | |
1409 | for (; addr > size && addr >= iomem_resource.start; addr -= size) { | |
1410 | ret = region_intersects(addr, size, 0, IORES_DESC_NONE); | |
1411 | if (ret != REGION_DISJOINT) | |
1412 | continue; | |
1413 | ||
1414 | devmem->resource = devm_request_mem_region(device, addr, size, | |
1415 | dev_name(device)); | |
58ef15b7 DW |
1416 | if (!devmem->resource) |
1417 | return ERR_PTR(-ENOMEM); | |
4ef589dc JG |
1418 | break; |
1419 | } | |
58ef15b7 DW |
1420 | if (!devmem->resource) |
1421 | return ERR_PTR(-ERANGE); | |
4ef589dc JG |
1422 | |
1423 | devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; | |
1424 | devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; | |
1425 | devmem->pfn_last = devmem->pfn_first + | |
1426 | (resource_size(devmem->resource) >> PAGE_SHIFT); | |
063a7d1d | 1427 | devmem->page_fault = hmm_devmem_fault; |
4ef589dc | 1428 | |
bbecd94e DW |
1429 | devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; |
1430 | devmem->pagemap.res = *devmem->resource; | |
bbecd94e DW |
1431 | devmem->pagemap.page_free = hmm_devmem_free; |
1432 | devmem->pagemap.altmap_valid = false; | |
1433 | devmem->pagemap.ref = &devmem->ref; | |
1434 | devmem->pagemap.data = devmem; | |
1435 | devmem->pagemap.kill = hmm_devmem_ref_kill; | |
4ef589dc | 1436 | |
bbecd94e DW |
1437 | result = devm_memremap_pages(devmem->device, &devmem->pagemap); |
1438 | if (IS_ERR(result)) | |
1439 | return result; | |
4ef589dc | 1440 | return devmem; |
4ef589dc | 1441 | } |
02917e9f | 1442 | EXPORT_SYMBOL_GPL(hmm_devmem_add); |
4ef589dc | 1443 | |
d3df0a42 JG |
1444 | struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, |
1445 | struct device *device, | |
1446 | struct resource *res) | |
1447 | { | |
1448 | struct hmm_devmem *devmem; | |
bbecd94e | 1449 | void *result; |
d3df0a42 JG |
1450 | int ret; |
1451 | ||
1452 | if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY) | |
1453 | return ERR_PTR(-EINVAL); | |
1454 | ||
e7638488 | 1455 | dev_pagemap_get_ops(); |
d3df0a42 | 1456 | |
58ef15b7 | 1457 | devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); |
d3df0a42 JG |
1458 | if (!devmem) |
1459 | return ERR_PTR(-ENOMEM); | |
1460 | ||
1461 | init_completion(&devmem->completion); | |
1462 | devmem->pfn_first = -1UL; | |
1463 | devmem->pfn_last = -1UL; | |
1464 | devmem->resource = res; | |
1465 | devmem->device = device; | |
1466 | devmem->ops = ops; | |
1467 | ||
1468 | ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, | |
1469 | 0, GFP_KERNEL); | |
1470 | if (ret) | |
58ef15b7 | 1471 | return ERR_PTR(ret); |
d3df0a42 | 1472 | |
58ef15b7 DW |
1473 | ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, |
1474 | &devmem->ref); | |
d3df0a42 | 1475 | if (ret) |
58ef15b7 | 1476 | return ERR_PTR(ret); |
d3df0a42 JG |
1477 | |
1478 | devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; | |
1479 | devmem->pfn_last = devmem->pfn_first + | |
1480 | (resource_size(devmem->resource) >> PAGE_SHIFT); | |
063a7d1d | 1481 | devmem->page_fault = hmm_devmem_fault; |
d3df0a42 | 1482 | |
bbecd94e DW |
1483 | devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; |
1484 | devmem->pagemap.res = *devmem->resource; | |
bbecd94e DW |
1485 | devmem->pagemap.page_free = hmm_devmem_free; |
1486 | devmem->pagemap.altmap_valid = false; | |
1487 | devmem->pagemap.ref = &devmem->ref; | |
1488 | devmem->pagemap.data = devmem; | |
1489 | devmem->pagemap.kill = hmm_devmem_ref_kill; | |
d3df0a42 | 1490 | |
bbecd94e DW |
1491 | result = devm_memremap_pages(devmem->device, &devmem->pagemap); |
1492 | if (IS_ERR(result)) | |
1493 | return result; | |
d3df0a42 | 1494 | return devmem; |
d3df0a42 | 1495 | } |
02917e9f | 1496 | EXPORT_SYMBOL_GPL(hmm_devmem_add_resource); |
d3df0a42 | 1497 | |
858b54da JG |
1498 | /* |
1499 | * A device driver that wants to handle multiple devices memory through a | |
1500 | * single fake device can use hmm_device to do so. This is purely a helper | |
1501 | * and it is not needed to make use of any HMM functionality. | |
1502 | */ | |
1503 | #define HMM_DEVICE_MAX 256 | |
1504 | ||
1505 | static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX); | |
1506 | static DEFINE_SPINLOCK(hmm_device_lock); | |
1507 | static struct class *hmm_device_class; | |
1508 | static dev_t hmm_device_devt; | |
1509 | ||
1510 | static void hmm_device_release(struct device *device) | |
1511 | { | |
1512 | struct hmm_device *hmm_device; | |
1513 | ||
1514 | hmm_device = container_of(device, struct hmm_device, device); | |
1515 | spin_lock(&hmm_device_lock); | |
1516 | clear_bit(hmm_device->minor, hmm_device_mask); | |
1517 | spin_unlock(&hmm_device_lock); | |
1518 | ||
1519 | kfree(hmm_device); | |
1520 | } | |
1521 | ||
1522 | struct hmm_device *hmm_device_new(void *drvdata) | |
1523 | { | |
1524 | struct hmm_device *hmm_device; | |
1525 | ||
1526 | hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL); | |
1527 | if (!hmm_device) | |
1528 | return ERR_PTR(-ENOMEM); | |
1529 | ||
1530 | spin_lock(&hmm_device_lock); | |
1531 | hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX); | |
1532 | if (hmm_device->minor >= HMM_DEVICE_MAX) { | |
1533 | spin_unlock(&hmm_device_lock); | |
1534 | kfree(hmm_device); | |
1535 | return ERR_PTR(-EBUSY); | |
1536 | } | |
1537 | set_bit(hmm_device->minor, hmm_device_mask); | |
1538 | spin_unlock(&hmm_device_lock); | |
1539 | ||
1540 | dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor); | |
1541 | hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt), | |
1542 | hmm_device->minor); | |
1543 | hmm_device->device.release = hmm_device_release; | |
1544 | dev_set_drvdata(&hmm_device->device, drvdata); | |
1545 | hmm_device->device.class = hmm_device_class; | |
1546 | device_initialize(&hmm_device->device); | |
1547 | ||
1548 | return hmm_device; | |
1549 | } | |
1550 | EXPORT_SYMBOL(hmm_device_new); | |
1551 | ||
1552 | void hmm_device_put(struct hmm_device *hmm_device) | |
1553 | { | |
1554 | put_device(&hmm_device->device); | |
1555 | } | |
1556 | EXPORT_SYMBOL(hmm_device_put); | |
1557 | ||
1558 | static int __init hmm_init(void) | |
1559 | { | |
1560 | int ret; | |
1561 | ||
1562 | ret = alloc_chrdev_region(&hmm_device_devt, 0, | |
1563 | HMM_DEVICE_MAX, | |
1564 | "hmm_device"); | |
1565 | if (ret) | |
1566 | return ret; | |
1567 | ||
1568 | hmm_device_class = class_create(THIS_MODULE, "hmm_device"); | |
1569 | if (IS_ERR(hmm_device_class)) { | |
1570 | unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX); | |
1571 | return PTR_ERR(hmm_device_class); | |
1572 | } | |
1573 | return 0; | |
1574 | } | |
1575 | ||
1576 | device_initcall(hmm_init); | |
df6ad698 | 1577 | #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ |