]>
Commit | Line | Data |
---|---|---|
5cc9ed4b CW |
1 | /* |
2 | * Copyright © 2012-2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
b588c92b ML |
25 | #include <drm/drmP.h> |
26 | #include <drm/i915_drm.h> | |
5cc9ed4b CW |
27 | #include "i915_drv.h" |
28 | #include "i915_trace.h" | |
29 | #include "intel_drv.h" | |
30 | #include <linux/mmu_context.h> | |
31 | #include <linux/mmu_notifier.h> | |
32 | #include <linux/mempolicy.h> | |
33 | #include <linux/swap.h> | |
34 | ||
ad46cb53 CW |
35 | struct i915_mm_struct { |
36 | struct mm_struct *mm; | |
37 | struct drm_device *dev; | |
38 | struct i915_mmu_notifier *mn; | |
39 | struct hlist_node node; | |
40 | struct kref kref; | |
41 | struct work_struct work; | |
42 | }; | |
43 | ||
5cc9ed4b CW |
44 | #if defined(CONFIG_MMU_NOTIFIER) |
45 | #include <linux/interval_tree.h> | |
46 | ||
47 | struct i915_mmu_notifier { | |
48 | spinlock_t lock; | |
49 | struct hlist_node node; | |
50 | struct mmu_notifier mn; | |
51 | struct rb_root objects; | |
ec8b0dd5 | 52 | struct list_head linear; |
ec8b0dd5 | 53 | bool has_linear; |
5cc9ed4b CW |
54 | }; |
55 | ||
56 | struct i915_mmu_object { | |
ad46cb53 | 57 | struct i915_mmu_notifier *mn; |
5cc9ed4b | 58 | struct interval_tree_node it; |
ec8b0dd5 | 59 | struct list_head link; |
5cc9ed4b | 60 | struct drm_i915_gem_object *obj; |
380996aa | 61 | struct work_struct work; |
e4b946bf | 62 | bool active; |
ec8b0dd5 | 63 | bool is_linear; |
5cc9ed4b CW |
64 | }; |
65 | ||
380996aa | 66 | static void __cancel_userptr__worker(struct work_struct *work) |
ec8b0dd5 | 67 | { |
380996aa CW |
68 | struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); |
69 | struct drm_i915_gem_object *obj = mo->obj; | |
ec8b0dd5 | 70 | struct drm_device *dev = obj->base.dev; |
ec8b0dd5 CW |
71 | |
72 | mutex_lock(&dev->struct_mutex); | |
73 | /* Cancel any active worker and force us to re-evaluate gup */ | |
74 | obj->userptr.work = NULL; | |
75 | ||
76 | if (obj->pages != NULL) { | |
77 | struct drm_i915_private *dev_priv = to_i915(dev); | |
78 | struct i915_vma *vma, *tmp; | |
79 | bool was_interruptible; | |
80 | ||
81 | was_interruptible = dev_priv->mm.interruptible; | |
82 | dev_priv->mm.interruptible = false; | |
83 | ||
84 | list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) { | |
85 | int ret = i915_vma_unbind(vma); | |
86 | WARN_ON(ret && ret != -EIO); | |
87 | } | |
88 | WARN_ON(i915_gem_object_put_pages(obj)); | |
89 | ||
90 | dev_priv->mm.interruptible = was_interruptible; | |
91 | } | |
92 | ||
ec8b0dd5 CW |
93 | drm_gem_object_unreference(&obj->base); |
94 | mutex_unlock(&dev->struct_mutex); | |
ec8b0dd5 CW |
95 | } |
96 | ||
380996aa | 97 | static unsigned long cancel_userptr(struct i915_mmu_object *mo) |
ec8b0dd5 | 98 | { |
380996aa CW |
99 | unsigned long end = mo->obj->userptr.ptr + mo->obj->base.size; |
100 | ||
101 | /* The mmu_object is released late when destroying the | |
102 | * GEM object so it is entirely possible to gain a | |
103 | * reference on an object in the process of being freed | |
104 | * since our serialisation is via the spinlock and not | |
105 | * the struct_mutex - and consequently use it after it | |
106 | * is freed and then double free it. | |
107 | */ | |
108 | if (mo->active && kref_get_unless_zero(&mo->obj->base.refcount)) { | |
109 | schedule_work(&mo->work); | |
110 | /* only schedule one work packet to avoid the refleak */ | |
111 | mo->active = false; | |
ec8b0dd5 CW |
112 | } |
113 | ||
380996aa | 114 | return end; |
ec8b0dd5 CW |
115 | } |
116 | ||
5cc9ed4b CW |
117 | static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, |
118 | struct mm_struct *mm, | |
119 | unsigned long start, | |
120 | unsigned long end) | |
121 | { | |
380996aa CW |
122 | struct i915_mmu_notifier *mn = |
123 | container_of(_mn, struct i915_mmu_notifier, mn); | |
124 | struct i915_mmu_object *mo; | |
125 | ||
126 | /* interval ranges are inclusive, but invalidate range is exclusive */ | |
127 | end--; | |
128 | ||
129 | spin_lock(&mn->lock); | |
130 | if (mn->has_linear) { | |
131 | list_for_each_entry(mo, &mn->linear, link) { | |
132 | if (mo->it.last < start || mo->it.start > end) | |
460822b0 | 133 | continue; |
460822b0 | 134 | |
380996aa | 135 | cancel_userptr(mo); |
5cc9ed4b | 136 | } |
380996aa CW |
137 | } else { |
138 | struct interval_tree_node *it; | |
5cc9ed4b | 139 | |
380996aa CW |
140 | it = interval_tree_iter_first(&mn->objects, start, end); |
141 | while (it) { | |
142 | mo = container_of(it, struct i915_mmu_object, it); | |
143 | start = cancel_userptr(mo); | |
144 | it = interval_tree_iter_next(it, start, end); | |
145 | } | |
5cc9ed4b | 146 | } |
380996aa | 147 | spin_unlock(&mn->lock); |
5cc9ed4b CW |
148 | } |
149 | ||
150 | static const struct mmu_notifier_ops i915_gem_userptr_notifier = { | |
151 | .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start, | |
152 | }; | |
153 | ||
154 | static struct i915_mmu_notifier * | |
ad46cb53 | 155 | i915_mmu_notifier_create(struct mm_struct *mm) |
5cc9ed4b | 156 | { |
ad46cb53 | 157 | struct i915_mmu_notifier *mn; |
5cc9ed4b CW |
158 | int ret; |
159 | ||
ad46cb53 CW |
160 | mn = kmalloc(sizeof(*mn), GFP_KERNEL); |
161 | if (mn == NULL) | |
5cc9ed4b CW |
162 | return ERR_PTR(-ENOMEM); |
163 | ||
ad46cb53 CW |
164 | spin_lock_init(&mn->lock); |
165 | mn->mn.ops = &i915_gem_userptr_notifier; | |
166 | mn->objects = RB_ROOT; | |
ad46cb53 CW |
167 | INIT_LIST_HEAD(&mn->linear); |
168 | mn->has_linear = false; | |
169 | ||
170 | /* Protected by mmap_sem (write-lock) */ | |
171 | ret = __mmu_notifier_register(&mn->mn, mm); | |
5cc9ed4b | 172 | if (ret) { |
ad46cb53 | 173 | kfree(mn); |
5cc9ed4b CW |
174 | return ERR_PTR(ret); |
175 | } | |
176 | ||
ad46cb53 | 177 | return mn; |
5cc9ed4b CW |
178 | } |
179 | ||
5cc9ed4b | 180 | static int |
ad46cb53 CW |
181 | i915_mmu_notifier_add(struct drm_device *dev, |
182 | struct i915_mmu_notifier *mn, | |
183 | struct i915_mmu_object *mo) | |
5cc9ed4b CW |
184 | { |
185 | struct interval_tree_node *it; | |
281400ff | 186 | int ret = 0; |
5cc9ed4b | 187 | |
281400ff CW |
188 | /* By this point we have already done a lot of expensive setup that |
189 | * we do not want to repeat just because the caller (e.g. X) has a | |
190 | * signal pending (and partly because of that expensive setup, X | |
191 | * using an interrupt timer is likely to get stuck in an EINTR loop). | |
192 | */ | |
193 | mutex_lock(&dev->struct_mutex); | |
5cc9ed4b CW |
194 | |
195 | /* Make sure we drop the final active reference (and thereby | |
196 | * remove the objects from the interval tree) before we do | |
197 | * the check for overlapping objects. | |
198 | */ | |
ad46cb53 | 199 | i915_gem_retire_requests(dev); |
5cc9ed4b | 200 | |
ad46cb53 CW |
201 | spin_lock(&mn->lock); |
202 | it = interval_tree_iter_first(&mn->objects, | |
203 | mo->it.start, mo->it.last); | |
5cc9ed4b CW |
204 | if (it) { |
205 | struct drm_i915_gem_object *obj; | |
206 | ||
207 | /* We only need to check the first object in the range as it | |
208 | * either has cancelled gup work queued and we need to | |
209 | * return back to the user to give time for the gup-workers | |
210 | * to flush their object references upon which the object will | |
211 | * be removed from the interval-tree, or the the range is | |
212 | * still in use by another client and the overlap is invalid. | |
ec8b0dd5 CW |
213 | * |
214 | * If we do have an overlap, we cannot use the interval tree | |
215 | * for fast range invalidation. | |
5cc9ed4b CW |
216 | */ |
217 | ||
218 | obj = container_of(it, struct i915_mmu_object, it)->obj; | |
ec8b0dd5 | 219 | if (!obj->userptr.workers) |
ad46cb53 | 220 | mn->has_linear = mo->is_linear = true; |
ec8b0dd5 CW |
221 | else |
222 | ret = -EAGAIN; | |
223 | } else | |
ad46cb53 | 224 | interval_tree_insert(&mo->it, &mn->objects); |
ec8b0dd5 | 225 | |
380996aa | 226 | if (ret == 0) |
ad46cb53 | 227 | list_add(&mo->link, &mn->linear); |
380996aa | 228 | |
ad46cb53 CW |
229 | spin_unlock(&mn->lock); |
230 | mutex_unlock(&dev->struct_mutex); | |
5cc9ed4b CW |
231 | |
232 | return ret; | |
233 | } | |
234 | ||
ad46cb53 CW |
235 | static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn) |
236 | { | |
237 | struct i915_mmu_object *mo; | |
238 | ||
239 | list_for_each_entry(mo, &mn->linear, link) | |
240 | if (mo->is_linear) | |
241 | return true; | |
242 | ||
243 | return false; | |
244 | } | |
245 | ||
246 | static void | |
247 | i915_mmu_notifier_del(struct i915_mmu_notifier *mn, | |
248 | struct i915_mmu_object *mo) | |
249 | { | |
250 | spin_lock(&mn->lock); | |
251 | list_del(&mo->link); | |
252 | if (mo->is_linear) | |
253 | mn->has_linear = i915_mmu_notifier_has_linear(mn); | |
254 | else | |
255 | interval_tree_remove(&mo->it, &mn->objects); | |
ad46cb53 CW |
256 | spin_unlock(&mn->lock); |
257 | } | |
258 | ||
5cc9ed4b CW |
259 | static void |
260 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | |
261 | { | |
ad46cb53 | 262 | struct i915_mmu_object *mo; |
5cc9ed4b | 263 | |
ad46cb53 CW |
264 | mo = obj->userptr.mmu_object; |
265 | if (mo == NULL) | |
5cc9ed4b CW |
266 | return; |
267 | ||
ad46cb53 CW |
268 | i915_mmu_notifier_del(mo->mn, mo); |
269 | kfree(mo); | |
270 | ||
271 | obj->userptr.mmu_object = NULL; | |
272 | } | |
273 | ||
274 | static struct i915_mmu_notifier * | |
275 | i915_mmu_notifier_find(struct i915_mm_struct *mm) | |
276 | { | |
e9681366 CW |
277 | struct i915_mmu_notifier *mn = mm->mn; |
278 | ||
279 | mn = mm->mn; | |
280 | if (mn) | |
281 | return mn; | |
282 | ||
283 | down_write(&mm->mm->mmap_sem); | |
284 | mutex_lock(&to_i915(mm->dev)->mm_lock); | |
285 | if ((mn = mm->mn) == NULL) { | |
286 | mn = i915_mmu_notifier_create(mm->mm); | |
287 | if (!IS_ERR(mn)) | |
288 | mm->mn = mn; | |
ad46cb53 | 289 | } |
e9681366 CW |
290 | mutex_unlock(&to_i915(mm->dev)->mm_lock); |
291 | up_write(&mm->mm->mmap_sem); | |
292 | ||
293 | return mn; | |
5cc9ed4b CW |
294 | } |
295 | ||
296 | static int | |
297 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |
298 | unsigned flags) | |
299 | { | |
ad46cb53 CW |
300 | struct i915_mmu_notifier *mn; |
301 | struct i915_mmu_object *mo; | |
5cc9ed4b CW |
302 | int ret; |
303 | ||
304 | if (flags & I915_USERPTR_UNSYNCHRONIZED) | |
305 | return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; | |
306 | ||
ad46cb53 CW |
307 | if (WARN_ON(obj->userptr.mm == NULL)) |
308 | return -EINVAL; | |
5cc9ed4b | 309 | |
ad46cb53 CW |
310 | mn = i915_mmu_notifier_find(obj->userptr.mm); |
311 | if (IS_ERR(mn)) | |
312 | return PTR_ERR(mn); | |
5cc9ed4b | 313 | |
ad46cb53 CW |
314 | mo = kzalloc(sizeof(*mo), GFP_KERNEL); |
315 | if (mo == NULL) | |
316 | return -ENOMEM; | |
5cc9ed4b | 317 | |
ad46cb53 CW |
318 | mo->mn = mn; |
319 | mo->it.start = obj->userptr.ptr; | |
320 | mo->it.last = mo->it.start + obj->base.size - 1; | |
321 | mo->obj = obj; | |
380996aa | 322 | INIT_WORK(&mo->work, __cancel_userptr__worker); |
5cc9ed4b | 323 | |
ad46cb53 CW |
324 | ret = i915_mmu_notifier_add(obj->base.dev, mn, mo); |
325 | if (ret) { | |
326 | kfree(mo); | |
327 | return ret; | |
328 | } | |
329 | ||
330 | obj->userptr.mmu_object = mo; | |
5cc9ed4b | 331 | return 0; |
ad46cb53 CW |
332 | } |
333 | ||
334 | static void | |
335 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | |
336 | struct mm_struct *mm) | |
337 | { | |
338 | if (mn == NULL) | |
339 | return; | |
5cc9ed4b | 340 | |
ad46cb53 | 341 | mmu_notifier_unregister(&mn->mn, mm); |
5cc9ed4b | 342 | kfree(mn); |
5cc9ed4b CW |
343 | } |
344 | ||
345 | #else | |
346 | ||
347 | static void | |
348 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | |
349 | { | |
350 | } | |
351 | ||
352 | static int | |
353 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |
354 | unsigned flags) | |
355 | { | |
356 | if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0) | |
357 | return -ENODEV; | |
358 | ||
359 | if (!capable(CAP_SYS_ADMIN)) | |
360 | return -EPERM; | |
361 | ||
362 | return 0; | |
363 | } | |
ad46cb53 CW |
364 | |
365 | static void | |
366 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | |
367 | struct mm_struct *mm) | |
368 | { | |
369 | } | |
370 | ||
5cc9ed4b CW |
371 | #endif |
372 | ||
ad46cb53 CW |
373 | static struct i915_mm_struct * |
374 | __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real) | |
375 | { | |
376 | struct i915_mm_struct *mm; | |
377 | ||
378 | /* Protected by dev_priv->mm_lock */ | |
379 | hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) | |
380 | if (mm->mm == real) | |
381 | return mm; | |
382 | ||
383 | return NULL; | |
384 | } | |
385 | ||
386 | static int | |
387 | i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) | |
388 | { | |
389 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | |
390 | struct i915_mm_struct *mm; | |
391 | int ret = 0; | |
392 | ||
393 | /* During release of the GEM object we hold the struct_mutex. This | |
394 | * precludes us from calling mmput() at that time as that may be | |
395 | * the last reference and so call exit_mmap(). exit_mmap() will | |
396 | * attempt to reap the vma, and if we were holding a GTT mmap | |
397 | * would then call drm_gem_vm_close() and attempt to reacquire | |
398 | * the struct mutex. So in order to avoid that recursion, we have | |
399 | * to defer releasing the mm reference until after we drop the | |
400 | * struct_mutex, i.e. we need to schedule a worker to do the clean | |
401 | * up. | |
402 | */ | |
403 | mutex_lock(&dev_priv->mm_lock); | |
404 | mm = __i915_mm_struct_find(dev_priv, current->mm); | |
405 | if (mm == NULL) { | |
406 | mm = kmalloc(sizeof(*mm), GFP_KERNEL); | |
407 | if (mm == NULL) { | |
408 | ret = -ENOMEM; | |
409 | goto out; | |
410 | } | |
411 | ||
412 | kref_init(&mm->kref); | |
413 | mm->dev = obj->base.dev; | |
414 | ||
415 | mm->mm = current->mm; | |
416 | atomic_inc(¤t->mm->mm_count); | |
417 | ||
418 | mm->mn = NULL; | |
419 | ||
420 | /* Protected by dev_priv->mm_lock */ | |
421 | hash_add(dev_priv->mm_structs, | |
422 | &mm->node, (unsigned long)mm->mm); | |
423 | } else | |
424 | kref_get(&mm->kref); | |
425 | ||
426 | obj->userptr.mm = mm; | |
427 | out: | |
428 | mutex_unlock(&dev_priv->mm_lock); | |
429 | return ret; | |
430 | } | |
431 | ||
432 | static void | |
433 | __i915_mm_struct_free__worker(struct work_struct *work) | |
434 | { | |
435 | struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); | |
436 | i915_mmu_notifier_free(mm->mn, mm->mm); | |
437 | mmdrop(mm->mm); | |
438 | kfree(mm); | |
439 | } | |
440 | ||
441 | static void | |
442 | __i915_mm_struct_free(struct kref *kref) | |
443 | { | |
444 | struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); | |
445 | ||
446 | /* Protected by dev_priv->mm_lock */ | |
447 | hash_del(&mm->node); | |
448 | mutex_unlock(&to_i915(mm->dev)->mm_lock); | |
449 | ||
450 | INIT_WORK(&mm->work, __i915_mm_struct_free__worker); | |
451 | schedule_work(&mm->work); | |
452 | } | |
453 | ||
454 | static void | |
455 | i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj) | |
456 | { | |
457 | if (obj->userptr.mm == NULL) | |
458 | return; | |
459 | ||
460 | kref_put_mutex(&obj->userptr.mm->kref, | |
461 | __i915_mm_struct_free, | |
462 | &to_i915(obj->base.dev)->mm_lock); | |
463 | obj->userptr.mm = NULL; | |
464 | } | |
465 | ||
5cc9ed4b CW |
466 | struct get_pages_work { |
467 | struct work_struct work; | |
468 | struct drm_i915_gem_object *obj; | |
469 | struct task_struct *task; | |
470 | }; | |
471 | ||
5cc9ed4b CW |
472 | #if IS_ENABLED(CONFIG_SWIOTLB) |
473 | #define swiotlb_active() swiotlb_nr_tbl() | |
474 | #else | |
475 | #define swiotlb_active() 0 | |
476 | #endif | |
477 | ||
478 | static int | |
479 | st_set_pages(struct sg_table **st, struct page **pvec, int num_pages) | |
480 | { | |
481 | struct scatterlist *sg; | |
482 | int ret, n; | |
483 | ||
484 | *st = kmalloc(sizeof(**st), GFP_KERNEL); | |
485 | if (*st == NULL) | |
486 | return -ENOMEM; | |
487 | ||
488 | if (swiotlb_active()) { | |
489 | ret = sg_alloc_table(*st, num_pages, GFP_KERNEL); | |
490 | if (ret) | |
491 | goto err; | |
492 | ||
493 | for_each_sg((*st)->sgl, sg, num_pages, n) | |
494 | sg_set_page(sg, pvec[n], PAGE_SIZE, 0); | |
495 | } else { | |
496 | ret = sg_alloc_table_from_pages(*st, pvec, num_pages, | |
497 | 0, num_pages << PAGE_SHIFT, | |
498 | GFP_KERNEL); | |
499 | if (ret) | |
500 | goto err; | |
501 | } | |
502 | ||
503 | return 0; | |
504 | ||
505 | err: | |
506 | kfree(*st); | |
507 | *st = NULL; | |
508 | return ret; | |
509 | } | |
510 | ||
e2273302 ID |
511 | static int |
512 | __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj, | |
513 | struct page **pvec, int num_pages) | |
514 | { | |
515 | int ret; | |
516 | ||
517 | ret = st_set_pages(&obj->pages, pvec, num_pages); | |
518 | if (ret) | |
519 | return ret; | |
520 | ||
521 | ret = i915_gem_gtt_prepare_object(obj); | |
522 | if (ret) { | |
523 | sg_free_table(obj->pages); | |
524 | kfree(obj->pages); | |
525 | obj->pages = NULL; | |
526 | } | |
527 | ||
528 | return ret; | |
529 | } | |
530 | ||
380996aa | 531 | static int |
e4b946bf CW |
532 | __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, |
533 | bool value) | |
534 | { | |
380996aa CW |
535 | int ret = 0; |
536 | ||
e4b946bf CW |
537 | /* During mm_invalidate_range we need to cancel any userptr that |
538 | * overlaps the range being invalidated. Doing so requires the | |
539 | * struct_mutex, and that risks recursion. In order to cause | |
540 | * recursion, the user must alias the userptr address space with | |
541 | * a GTT mmapping (possible with a MAP_FIXED) - then when we have | |
542 | * to invalidate that mmaping, mm_invalidate_range is called with | |
543 | * the userptr address *and* the struct_mutex held. To prevent that | |
544 | * we set a flag under the i915_mmu_notifier spinlock to indicate | |
545 | * whether this object is valid. | |
546 | */ | |
547 | #if defined(CONFIG_MMU_NOTIFIER) | |
548 | if (obj->userptr.mmu_object == NULL) | |
380996aa | 549 | return 0; |
e4b946bf CW |
550 | |
551 | spin_lock(&obj->userptr.mmu_object->mn->lock); | |
380996aa CW |
552 | /* In order to serialise get_pages with an outstanding |
553 | * cancel_userptr, we must drop the struct_mutex and try again. | |
554 | */ | |
555 | if (!value || !work_pending(&obj->userptr.mmu_object->work)) | |
556 | obj->userptr.mmu_object->active = value; | |
557 | else | |
558 | ret = -EAGAIN; | |
e4b946bf CW |
559 | spin_unlock(&obj->userptr.mmu_object->mn->lock); |
560 | #endif | |
380996aa CW |
561 | |
562 | return ret; | |
e4b946bf CW |
563 | } |
564 | ||
5cc9ed4b CW |
565 | static void |
566 | __i915_gem_userptr_get_pages_worker(struct work_struct *_work) | |
567 | { | |
568 | struct get_pages_work *work = container_of(_work, typeof(*work), work); | |
569 | struct drm_i915_gem_object *obj = work->obj; | |
570 | struct drm_device *dev = obj->base.dev; | |
68d6c840 | 571 | const int npages = obj->base.size >> PAGE_SHIFT; |
5cc9ed4b CW |
572 | struct page **pvec; |
573 | int pinned, ret; | |
574 | ||
575 | ret = -ENOMEM; | |
576 | pinned = 0; | |
577 | ||
68d6c840 | 578 | pvec = kmalloc(npages*sizeof(struct page *), |
5cc9ed4b CW |
579 | GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); |
580 | if (pvec == NULL) | |
68d6c840 | 581 | pvec = drm_malloc_ab(npages, sizeof(struct page *)); |
5cc9ed4b | 582 | if (pvec != NULL) { |
ad46cb53 | 583 | struct mm_struct *mm = obj->userptr.mm->mm; |
5cc9ed4b CW |
584 | |
585 | down_read(&mm->mmap_sem); | |
68d6c840 | 586 | while (pinned < npages) { |
5cc9ed4b CW |
587 | ret = get_user_pages(work->task, mm, |
588 | obj->userptr.ptr + pinned * PAGE_SIZE, | |
68d6c840 | 589 | npages - pinned, |
5cc9ed4b CW |
590 | !obj->userptr.read_only, 0, |
591 | pvec + pinned, NULL); | |
592 | if (ret < 0) | |
593 | break; | |
594 | ||
595 | pinned += ret; | |
596 | } | |
597 | up_read(&mm->mmap_sem); | |
598 | } | |
599 | ||
600 | mutex_lock(&dev->struct_mutex); | |
68d6c840 CW |
601 | if (obj->userptr.work == &work->work) { |
602 | if (pinned == npages) { | |
603 | ret = __i915_gem_userptr_set_pages(obj, pvec, npages); | |
604 | if (ret == 0) { | |
605 | list_add_tail(&obj->global_list, | |
606 | &to_i915(dev)->mm.unbound_list); | |
607 | obj->get_page.sg = obj->pages->sgl; | |
608 | obj->get_page.last = 0; | |
609 | pinned = 0; | |
610 | } | |
5cc9ed4b | 611 | } |
68d6c840 | 612 | obj->userptr.work = ERR_PTR(ret); |
e4b946bf CW |
613 | if (ret) |
614 | __i915_gem_userptr_set_active(obj, false); | |
5cc9ed4b CW |
615 | } |
616 | ||
5cc9ed4b CW |
617 | obj->userptr.workers--; |
618 | drm_gem_object_unreference(&obj->base); | |
619 | mutex_unlock(&dev->struct_mutex); | |
620 | ||
621 | release_pages(pvec, pinned, 0); | |
622 | drm_free_large(pvec); | |
623 | ||
624 | put_task_struct(work->task); | |
625 | kfree(work); | |
626 | } | |
627 | ||
e4b946bf CW |
628 | static int |
629 | __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj, | |
630 | bool *active) | |
631 | { | |
632 | struct get_pages_work *work; | |
633 | ||
634 | /* Spawn a worker so that we can acquire the | |
635 | * user pages without holding our mutex. Access | |
636 | * to the user pages requires mmap_sem, and we have | |
637 | * a strict lock ordering of mmap_sem, struct_mutex - | |
638 | * we already hold struct_mutex here and so cannot | |
639 | * call gup without encountering a lock inversion. | |
640 | * | |
641 | * Userspace will keep on repeating the operation | |
642 | * (thanks to EAGAIN) until either we hit the fast | |
643 | * path or the worker completes. If the worker is | |
644 | * cancelled or superseded, the task is still run | |
645 | * but the results ignored. (This leads to | |
646 | * complications that we may have a stray object | |
647 | * refcount that we need to be wary of when | |
648 | * checking for existing objects during creation.) | |
649 | * If the worker encounters an error, it reports | |
650 | * that error back to this function through | |
651 | * obj->userptr.work = ERR_PTR. | |
652 | */ | |
653 | if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS) | |
654 | return -EAGAIN; | |
655 | ||
656 | work = kmalloc(sizeof(*work), GFP_KERNEL); | |
657 | if (work == NULL) | |
658 | return -ENOMEM; | |
659 | ||
660 | obj->userptr.work = &work->work; | |
661 | obj->userptr.workers++; | |
662 | ||
663 | work->obj = obj; | |
664 | drm_gem_object_reference(&obj->base); | |
665 | ||
666 | work->task = current; | |
667 | get_task_struct(work->task); | |
668 | ||
669 | INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); | |
670 | schedule_work(&work->work); | |
671 | ||
672 | *active = true; | |
673 | return -EAGAIN; | |
674 | } | |
675 | ||
5cc9ed4b CW |
676 | static int |
677 | i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) | |
678 | { | |
679 | const int num_pages = obj->base.size >> PAGE_SHIFT; | |
680 | struct page **pvec; | |
681 | int pinned, ret; | |
e4b946bf | 682 | bool active; |
5cc9ed4b CW |
683 | |
684 | /* If userspace should engineer that these pages are replaced in | |
685 | * the vma between us binding this page into the GTT and completion | |
686 | * of rendering... Their loss. If they change the mapping of their | |
687 | * pages they need to create a new bo to point to the new vma. | |
688 | * | |
689 | * However, that still leaves open the possibility of the vma | |
690 | * being copied upon fork. Which falls under the same userspace | |
691 | * synchronisation issue as a regular bo, except that this time | |
692 | * the process may not be expecting that a particular piece of | |
693 | * memory is tied to the GPU. | |
694 | * | |
695 | * Fortunately, we can hook into the mmu_notifier in order to | |
696 | * discard the page references prior to anything nasty happening | |
697 | * to the vma (discard or cloning) which should prevent the more | |
698 | * egregious cases from causing harm. | |
699 | */ | |
e4b946bf CW |
700 | if (IS_ERR(obj->userptr.work)) { |
701 | /* active flag will have been dropped already by the worker */ | |
702 | ret = PTR_ERR(obj->userptr.work); | |
703 | obj->userptr.work = NULL; | |
704 | return ret; | |
705 | } | |
706 | if (obj->userptr.work) | |
707 | /* active flag should still be held for the pending work */ | |
708 | return -EAGAIN; | |
709 | ||
710 | /* Let the mmu-notifier know that we have begun and need cancellation */ | |
380996aa CW |
711 | ret = __i915_gem_userptr_set_active(obj, true); |
712 | if (ret) | |
713 | return ret; | |
5cc9ed4b CW |
714 | |
715 | pvec = NULL; | |
716 | pinned = 0; | |
ad46cb53 | 717 | if (obj->userptr.mm->mm == current->mm) { |
5cc9ed4b CW |
718 | pvec = kmalloc(num_pages*sizeof(struct page *), |
719 | GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); | |
720 | if (pvec == NULL) { | |
721 | pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); | |
e4b946bf CW |
722 | if (pvec == NULL) { |
723 | __i915_gem_userptr_set_active(obj, false); | |
5cc9ed4b | 724 | return -ENOMEM; |
e4b946bf | 725 | } |
5cc9ed4b CW |
726 | } |
727 | ||
728 | pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, | |
729 | !obj->userptr.read_only, pvec); | |
730 | } | |
e4b946bf CW |
731 | |
732 | active = false; | |
733 | if (pinned < 0) | |
734 | ret = pinned, pinned = 0; | |
735 | else if (pinned < num_pages) | |
736 | ret = __i915_gem_userptr_get_pages_schedule(obj, &active); | |
737 | else | |
e2273302 | 738 | ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages); |
e4b946bf CW |
739 | if (ret) { |
740 | __i915_gem_userptr_set_active(obj, active); | |
741 | release_pages(pvec, pinned, 0); | |
5cc9ed4b | 742 | } |
5cc9ed4b CW |
743 | drm_free_large(pvec); |
744 | return ret; | |
745 | } | |
746 | ||
747 | static void | |
748 | i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) | |
749 | { | |
c479f438 | 750 | struct sg_page_iter sg_iter; |
5cc9ed4b CW |
751 | |
752 | BUG_ON(obj->userptr.work != NULL); | |
e4b946bf | 753 | __i915_gem_userptr_set_active(obj, false); |
5cc9ed4b CW |
754 | |
755 | if (obj->madv != I915_MADV_WILLNEED) | |
756 | obj->dirty = 0; | |
757 | ||
e2273302 ID |
758 | i915_gem_gtt_finish_object(obj); |
759 | ||
c479f438 TU |
760 | for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { |
761 | struct page *page = sg_page_iter_page(&sg_iter); | |
5cc9ed4b CW |
762 | |
763 | if (obj->dirty) | |
764 | set_page_dirty(page); | |
765 | ||
766 | mark_page_accessed(page); | |
767 | page_cache_release(page); | |
768 | } | |
769 | obj->dirty = 0; | |
770 | ||
771 | sg_free_table(obj->pages); | |
772 | kfree(obj->pages); | |
773 | } | |
774 | ||
775 | static void | |
776 | i915_gem_userptr_release(struct drm_i915_gem_object *obj) | |
777 | { | |
778 | i915_gem_userptr_release__mmu_notifier(obj); | |
ad46cb53 | 779 | i915_gem_userptr_release__mm_struct(obj); |
5cc9ed4b CW |
780 | } |
781 | ||
782 | static int | |
783 | i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) | |
784 | { | |
ad46cb53 | 785 | if (obj->userptr.mmu_object) |
5cc9ed4b CW |
786 | return 0; |
787 | ||
788 | return i915_gem_userptr_init__mmu_notifier(obj, 0); | |
789 | } | |
790 | ||
791 | static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { | |
93232aeb | 792 | .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE, |
5cc9ed4b CW |
793 | .get_pages = i915_gem_userptr_get_pages, |
794 | .put_pages = i915_gem_userptr_put_pages, | |
93232aeb | 795 | .dmabuf_export = i915_gem_userptr_dmabuf_export, |
5cc9ed4b CW |
796 | .release = i915_gem_userptr_release, |
797 | }; | |
798 | ||
799 | /** | |
800 | * Creates a new mm object that wraps some normal memory from the process | |
801 | * context - user memory. | |
802 | * | |
803 | * We impose several restrictions upon the memory being mapped | |
804 | * into the GPU. | |
805 | * 1. It must be page aligned (both start/end addresses, i.e ptr and size). | |
ec8b0dd5 | 806 | * 2. It must be normal system memory, not a pointer into another map of IO |
5cc9ed4b | 807 | * space (e.g. it must not be a GTT mmapping of another object). |
ec8b0dd5 | 808 | * 3. We only allow a bo as large as we could in theory map into the GTT, |
5cc9ed4b | 809 | * that is we limit the size to the total size of the GTT. |
ec8b0dd5 | 810 | * 4. The bo is marked as being snoopable. The backing pages are left |
5cc9ed4b CW |
811 | * accessible directly by the CPU, but reads and writes by the GPU may |
812 | * incur the cost of a snoop (unless you have an LLC architecture). | |
813 | * | |
814 | * Synchronisation between multiple users and the GPU is left to userspace | |
815 | * through the normal set-domain-ioctl. The kernel will enforce that the | |
816 | * GPU relinquishes the VMA before it is returned back to the system | |
817 | * i.e. upon free(), munmap() or process termination. However, the userspace | |
818 | * malloc() library may not immediately relinquish the VMA after free() and | |
819 | * instead reuse it whilst the GPU is still reading and writing to the VMA. | |
820 | * Caveat emptor. | |
821 | * | |
822 | * Also note, that the object created here is not currently a "first class" | |
823 | * object, in that several ioctls are banned. These are the CPU access | |
824 | * ioctls: mmap(), pwrite and pread. In practice, you are expected to use | |
cc917ab4 CW |
825 | * direct access via your pointer rather than use those ioctls. Another |
826 | * restriction is that we do not allow userptr surfaces to be pinned to the | |
827 | * hardware and so we reject any attempt to create a framebuffer out of a | |
828 | * userptr. | |
5cc9ed4b CW |
829 | * |
830 | * If you think this is a good interface to use to pass GPU memory between | |
831 | * drivers, please use dma-buf instead. In fact, wherever possible use | |
832 | * dma-buf instead. | |
833 | */ | |
834 | int | |
835 | i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |
836 | { | |
5cc9ed4b CW |
837 | struct drm_i915_gem_userptr *args = data; |
838 | struct drm_i915_gem_object *obj; | |
839 | int ret; | |
840 | u32 handle; | |
841 | ||
842 | if (args->flags & ~(I915_USERPTR_READ_ONLY | | |
843 | I915_USERPTR_UNSYNCHRONIZED)) | |
844 | return -EINVAL; | |
845 | ||
846 | if (offset_in_page(args->user_ptr | args->user_size)) | |
847 | return -EINVAL; | |
848 | ||
5cc9ed4b CW |
849 | if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE, |
850 | (char __user *)(unsigned long)args->user_ptr, args->user_size)) | |
851 | return -EFAULT; | |
852 | ||
853 | if (args->flags & I915_USERPTR_READ_ONLY) { | |
854 | /* On almost all of the current hw, we cannot tell the GPU that a | |
855 | * page is readonly, so this is just a placeholder in the uAPI. | |
856 | */ | |
857 | return -ENODEV; | |
858 | } | |
859 | ||
5cc9ed4b CW |
860 | obj = i915_gem_object_alloc(dev); |
861 | if (obj == NULL) | |
862 | return -ENOMEM; | |
863 | ||
864 | drm_gem_private_object_init(dev, &obj->base, args->user_size); | |
865 | i915_gem_object_init(obj, &i915_gem_userptr_ops); | |
866 | obj->cache_level = I915_CACHE_LLC; | |
867 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | |
868 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | |
869 | ||
870 | obj->userptr.ptr = args->user_ptr; | |
871 | obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY); | |
872 | ||
873 | /* And keep a pointer to the current->mm for resolving the user pages | |
874 | * at binding. This means that we need to hook into the mmu_notifier | |
875 | * in order to detect if the mmu is destroyed. | |
876 | */ | |
ad46cb53 CW |
877 | ret = i915_gem_userptr_init__mm_struct(obj); |
878 | if (ret == 0) | |
5cc9ed4b CW |
879 | ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); |
880 | if (ret == 0) | |
881 | ret = drm_gem_handle_create(file, &obj->base, &handle); | |
882 | ||
883 | /* drop reference from allocate - handle holds it now */ | |
884 | drm_gem_object_unreference_unlocked(&obj->base); | |
885 | if (ret) | |
886 | return ret; | |
887 | ||
888 | args->handle = handle; | |
889 | return 0; | |
890 | } | |
891 | ||
892 | int | |
893 | i915_gem_init_userptr(struct drm_device *dev) | |
894 | { | |
5cc9ed4b | 895 | struct drm_i915_private *dev_priv = to_i915(dev); |
ad46cb53 CW |
896 | mutex_init(&dev_priv->mm_lock); |
897 | hash_init(dev_priv->mm_structs); | |
5cc9ed4b CW |
898 | return 0; |
899 | } |