]>
Commit | Line | Data |
---|---|---|
5cc9ed4b CW |
1 | /* |
2 | * Copyright © 2012-2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
b588c92b ML |
25 | #include <drm/drmP.h> |
26 | #include <drm/i915_drm.h> | |
5cc9ed4b CW |
27 | #include "i915_drv.h" |
28 | #include "i915_trace.h" | |
29 | #include "intel_drv.h" | |
30 | #include <linux/mmu_context.h> | |
31 | #include <linux/mmu_notifier.h> | |
32 | #include <linux/mempolicy.h> | |
33 | #include <linux/swap.h> | |
34 | ||
ad46cb53 CW |
35 | struct i915_mm_struct { |
36 | struct mm_struct *mm; | |
f470b190 | 37 | struct drm_i915_private *i915; |
ad46cb53 CW |
38 | struct i915_mmu_notifier *mn; |
39 | struct hlist_node node; | |
40 | struct kref kref; | |
41 | struct work_struct work; | |
42 | }; | |
43 | ||
5cc9ed4b CW |
44 | #if defined(CONFIG_MMU_NOTIFIER) |
45 | #include <linux/interval_tree.h> | |
46 | ||
47 | struct i915_mmu_notifier { | |
48 | spinlock_t lock; | |
49 | struct hlist_node node; | |
50 | struct mmu_notifier mn; | |
51 | struct rb_root objects; | |
393afc2c | 52 | struct workqueue_struct *wq; |
5cc9ed4b CW |
53 | }; |
54 | ||
55 | struct i915_mmu_object { | |
ad46cb53 | 56 | struct i915_mmu_notifier *mn; |
768e159f | 57 | struct drm_i915_gem_object *obj; |
5cc9ed4b | 58 | struct interval_tree_node it; |
ec8b0dd5 | 59 | struct list_head link; |
380996aa | 60 | struct work_struct work; |
768e159f | 61 | bool attached; |
5cc9ed4b CW |
62 | }; |
63 | ||
393afc2c CW |
64 | static void wait_rendering(struct drm_i915_gem_object *obj) |
65 | { | |
8a3b3d57 CW |
66 | unsigned long active = __I915_BO_ACTIVE(obj); |
67 | int idx; | |
393afc2c | 68 | |
8a3b3d57 CW |
69 | for_each_active(active, idx) |
70 | i915_gem_active_wait_unlocked(&obj->last_read[idx], | |
ea746f36 | 71 | 0, NULL, NULL); |
393afc2c CW |
72 | } |
73 | ||
768e159f | 74 | static void cancel_userptr(struct work_struct *work) |
ec8b0dd5 | 75 | { |
380996aa CW |
76 | struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); |
77 | struct drm_i915_gem_object *obj = mo->obj; | |
ec8b0dd5 | 78 | struct drm_device *dev = obj->base.dev; |
ec8b0dd5 | 79 | |
8a3b3d57 CW |
80 | wait_rendering(obj); |
81 | ||
ec8b0dd5 CW |
82 | mutex_lock(&dev->struct_mutex); |
83 | /* Cancel any active worker and force us to re-evaluate gup */ | |
84 | obj->userptr.work = NULL; | |
85 | ||
86 | if (obj->pages != NULL) { | |
f826ee21 | 87 | /* We are inside a kthread context and can't be interrupted */ |
aa653a68 | 88 | WARN_ON(i915_gem_object_unbind(obj)); |
ec8b0dd5 | 89 | WARN_ON(i915_gem_object_put_pages(obj)); |
ec8b0dd5 CW |
90 | } |
91 | ||
f8c417cd | 92 | i915_gem_object_put(obj); |
ec8b0dd5 | 93 | mutex_unlock(&dev->struct_mutex); |
ec8b0dd5 CW |
94 | } |
95 | ||
768e159f | 96 | static void add_object(struct i915_mmu_object *mo) |
ec8b0dd5 | 97 | { |
768e159f CW |
98 | if (mo->attached) |
99 | return; | |
ec8b0dd5 | 100 | |
768e159f CW |
101 | interval_tree_insert(&mo->it, &mo->mn->objects); |
102 | mo->attached = true; | |
103 | } | |
104 | ||
105 | static void del_object(struct i915_mmu_object *mo) | |
106 | { | |
107 | if (!mo->attached) | |
108 | return; | |
109 | ||
110 | interval_tree_remove(&mo->it, &mo->mn->objects); | |
111 | mo->attached = false; | |
ec8b0dd5 CW |
112 | } |
113 | ||
5cc9ed4b CW |
114 | static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, |
115 | struct mm_struct *mm, | |
116 | unsigned long start, | |
117 | unsigned long end) | |
118 | { | |
380996aa CW |
119 | struct i915_mmu_notifier *mn = |
120 | container_of(_mn, struct i915_mmu_notifier, mn); | |
121 | struct i915_mmu_object *mo; | |
768e159f CW |
122 | struct interval_tree_node *it; |
123 | LIST_HEAD(cancelled); | |
124 | ||
125 | if (RB_EMPTY_ROOT(&mn->objects)) | |
126 | return; | |
380996aa CW |
127 | |
128 | /* interval ranges are inclusive, but invalidate range is exclusive */ | |
129 | end--; | |
130 | ||
131 | spin_lock(&mn->lock); | |
768e159f CW |
132 | it = interval_tree_iter_first(&mn->objects, start, end); |
133 | while (it) { | |
134 | /* The mmu_object is released late when destroying the | |
135 | * GEM object so it is entirely possible to gain a | |
136 | * reference on an object in the process of being freed | |
137 | * since our serialisation is via the spinlock and not | |
138 | * the struct_mutex - and consequently use it after it | |
139 | * is freed and then double free it. To prevent that | |
140 | * use-after-free we only acquire a reference on the | |
141 | * object if it is not in the process of being destroyed. | |
142 | */ | |
143 | mo = container_of(it, struct i915_mmu_object, it); | |
144 | if (kref_get_unless_zero(&mo->obj->base.refcount)) | |
393afc2c | 145 | queue_work(mn->wq, &mo->work); |
5cc9ed4b | 146 | |
768e159f CW |
147 | list_add(&mo->link, &cancelled); |
148 | it = interval_tree_iter_next(it, start, end); | |
5cc9ed4b | 149 | } |
768e159f CW |
150 | list_for_each_entry(mo, &cancelled, link) |
151 | del_object(mo); | |
380996aa | 152 | spin_unlock(&mn->lock); |
393afc2c CW |
153 | |
154 | flush_workqueue(mn->wq); | |
5cc9ed4b CW |
155 | } |
156 | ||
157 | static const struct mmu_notifier_ops i915_gem_userptr_notifier = { | |
158 | .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start, | |
159 | }; | |
160 | ||
161 | static struct i915_mmu_notifier * | |
ad46cb53 | 162 | i915_mmu_notifier_create(struct mm_struct *mm) |
5cc9ed4b | 163 | { |
ad46cb53 | 164 | struct i915_mmu_notifier *mn; |
5cc9ed4b CW |
165 | int ret; |
166 | ||
ad46cb53 CW |
167 | mn = kmalloc(sizeof(*mn), GFP_KERNEL); |
168 | if (mn == NULL) | |
5cc9ed4b CW |
169 | return ERR_PTR(-ENOMEM); |
170 | ||
ad46cb53 CW |
171 | spin_lock_init(&mn->lock); |
172 | mn->mn.ops = &i915_gem_userptr_notifier; | |
173 | mn->objects = RB_ROOT; | |
393afc2c CW |
174 | mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0); |
175 | if (mn->wq == NULL) { | |
176 | kfree(mn); | |
177 | return ERR_PTR(-ENOMEM); | |
178 | } | |
ad46cb53 CW |
179 | |
180 | /* Protected by mmap_sem (write-lock) */ | |
181 | ret = __mmu_notifier_register(&mn->mn, mm); | |
5cc9ed4b | 182 | if (ret) { |
393afc2c | 183 | destroy_workqueue(mn->wq); |
ad46cb53 | 184 | kfree(mn); |
5cc9ed4b CW |
185 | return ERR_PTR(ret); |
186 | } | |
187 | ||
ad46cb53 | 188 | return mn; |
5cc9ed4b CW |
189 | } |
190 | ||
5cc9ed4b CW |
191 | static void |
192 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | |
193 | { | |
ad46cb53 | 194 | struct i915_mmu_object *mo; |
5cc9ed4b | 195 | |
ad46cb53 CW |
196 | mo = obj->userptr.mmu_object; |
197 | if (mo == NULL) | |
5cc9ed4b CW |
198 | return; |
199 | ||
768e159f CW |
200 | spin_lock(&mo->mn->lock); |
201 | del_object(mo); | |
202 | spin_unlock(&mo->mn->lock); | |
ad46cb53 CW |
203 | kfree(mo); |
204 | ||
205 | obj->userptr.mmu_object = NULL; | |
206 | } | |
207 | ||
208 | static struct i915_mmu_notifier * | |
209 | i915_mmu_notifier_find(struct i915_mm_struct *mm) | |
210 | { | |
e9681366 CW |
211 | struct i915_mmu_notifier *mn = mm->mn; |
212 | ||
213 | mn = mm->mn; | |
214 | if (mn) | |
215 | return mn; | |
216 | ||
217 | down_write(&mm->mm->mmap_sem); | |
f470b190 | 218 | mutex_lock(&mm->i915->mm_lock); |
e9681366 CW |
219 | if ((mn = mm->mn) == NULL) { |
220 | mn = i915_mmu_notifier_create(mm->mm); | |
221 | if (!IS_ERR(mn)) | |
222 | mm->mn = mn; | |
ad46cb53 | 223 | } |
f470b190 | 224 | mutex_unlock(&mm->i915->mm_lock); |
e9681366 CW |
225 | up_write(&mm->mm->mmap_sem); |
226 | ||
227 | return mn; | |
5cc9ed4b CW |
228 | } |
229 | ||
230 | static int | |
231 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |
232 | unsigned flags) | |
233 | { | |
ad46cb53 CW |
234 | struct i915_mmu_notifier *mn; |
235 | struct i915_mmu_object *mo; | |
5cc9ed4b CW |
236 | |
237 | if (flags & I915_USERPTR_UNSYNCHRONIZED) | |
238 | return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; | |
239 | ||
ad46cb53 CW |
240 | if (WARN_ON(obj->userptr.mm == NULL)) |
241 | return -EINVAL; | |
5cc9ed4b | 242 | |
ad46cb53 CW |
243 | mn = i915_mmu_notifier_find(obj->userptr.mm); |
244 | if (IS_ERR(mn)) | |
245 | return PTR_ERR(mn); | |
5cc9ed4b | 246 | |
ad46cb53 CW |
247 | mo = kzalloc(sizeof(*mo), GFP_KERNEL); |
248 | if (mo == NULL) | |
249 | return -ENOMEM; | |
5cc9ed4b | 250 | |
ad46cb53 | 251 | mo->mn = mn; |
ad46cb53 | 252 | mo->obj = obj; |
768e159f CW |
253 | mo->it.start = obj->userptr.ptr; |
254 | mo->it.last = obj->userptr.ptr + obj->base.size - 1; | |
255 | INIT_WORK(&mo->work, cancel_userptr); | |
ad46cb53 CW |
256 | |
257 | obj->userptr.mmu_object = mo; | |
5cc9ed4b | 258 | return 0; |
ad46cb53 CW |
259 | } |
260 | ||
261 | static void | |
262 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | |
263 | struct mm_struct *mm) | |
264 | { | |
265 | if (mn == NULL) | |
266 | return; | |
5cc9ed4b | 267 | |
ad46cb53 | 268 | mmu_notifier_unregister(&mn->mn, mm); |
393afc2c | 269 | destroy_workqueue(mn->wq); |
5cc9ed4b | 270 | kfree(mn); |
5cc9ed4b CW |
271 | } |
272 | ||
273 | #else | |
274 | ||
275 | static void | |
276 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | |
277 | { | |
278 | } | |
279 | ||
280 | static int | |
281 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |
282 | unsigned flags) | |
283 | { | |
284 | if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0) | |
285 | return -ENODEV; | |
286 | ||
287 | if (!capable(CAP_SYS_ADMIN)) | |
288 | return -EPERM; | |
289 | ||
290 | return 0; | |
291 | } | |
ad46cb53 CW |
292 | |
293 | static void | |
294 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | |
295 | struct mm_struct *mm) | |
296 | { | |
297 | } | |
298 | ||
5cc9ed4b CW |
299 | #endif |
300 | ||
ad46cb53 CW |
301 | static struct i915_mm_struct * |
302 | __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real) | |
303 | { | |
304 | struct i915_mm_struct *mm; | |
305 | ||
306 | /* Protected by dev_priv->mm_lock */ | |
307 | hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) | |
308 | if (mm->mm == real) | |
309 | return mm; | |
310 | ||
311 | return NULL; | |
312 | } | |
313 | ||
314 | static int | |
315 | i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) | |
316 | { | |
317 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | |
318 | struct i915_mm_struct *mm; | |
319 | int ret = 0; | |
320 | ||
321 | /* During release of the GEM object we hold the struct_mutex. This | |
322 | * precludes us from calling mmput() at that time as that may be | |
323 | * the last reference and so call exit_mmap(). exit_mmap() will | |
324 | * attempt to reap the vma, and if we were holding a GTT mmap | |
325 | * would then call drm_gem_vm_close() and attempt to reacquire | |
326 | * the struct mutex. So in order to avoid that recursion, we have | |
327 | * to defer releasing the mm reference until after we drop the | |
328 | * struct_mutex, i.e. we need to schedule a worker to do the clean | |
329 | * up. | |
330 | */ | |
331 | mutex_lock(&dev_priv->mm_lock); | |
332 | mm = __i915_mm_struct_find(dev_priv, current->mm); | |
333 | if (mm == NULL) { | |
334 | mm = kmalloc(sizeof(*mm), GFP_KERNEL); | |
335 | if (mm == NULL) { | |
336 | ret = -ENOMEM; | |
337 | goto out; | |
338 | } | |
339 | ||
340 | kref_init(&mm->kref); | |
f470b190 | 341 | mm->i915 = to_i915(obj->base.dev); |
ad46cb53 CW |
342 | |
343 | mm->mm = current->mm; | |
344 | atomic_inc(¤t->mm->mm_count); | |
345 | ||
346 | mm->mn = NULL; | |
347 | ||
348 | /* Protected by dev_priv->mm_lock */ | |
349 | hash_add(dev_priv->mm_structs, | |
350 | &mm->node, (unsigned long)mm->mm); | |
351 | } else | |
352 | kref_get(&mm->kref); | |
353 | ||
354 | obj->userptr.mm = mm; | |
355 | out: | |
356 | mutex_unlock(&dev_priv->mm_lock); | |
357 | return ret; | |
358 | } | |
359 | ||
360 | static void | |
361 | __i915_mm_struct_free__worker(struct work_struct *work) | |
362 | { | |
363 | struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); | |
364 | i915_mmu_notifier_free(mm->mn, mm->mm); | |
365 | mmdrop(mm->mm); | |
366 | kfree(mm); | |
367 | } | |
368 | ||
369 | static void | |
370 | __i915_mm_struct_free(struct kref *kref) | |
371 | { | |
372 | struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); | |
373 | ||
374 | /* Protected by dev_priv->mm_lock */ | |
375 | hash_del(&mm->node); | |
f470b190 | 376 | mutex_unlock(&mm->i915->mm_lock); |
ad46cb53 CW |
377 | |
378 | INIT_WORK(&mm->work, __i915_mm_struct_free__worker); | |
379 | schedule_work(&mm->work); | |
380 | } | |
381 | ||
382 | static void | |
383 | i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj) | |
384 | { | |
385 | if (obj->userptr.mm == NULL) | |
386 | return; | |
387 | ||
388 | kref_put_mutex(&obj->userptr.mm->kref, | |
389 | __i915_mm_struct_free, | |
390 | &to_i915(obj->base.dev)->mm_lock); | |
391 | obj->userptr.mm = NULL; | |
392 | } | |
393 | ||
5cc9ed4b CW |
394 | struct get_pages_work { |
395 | struct work_struct work; | |
396 | struct drm_i915_gem_object *obj; | |
397 | struct task_struct *task; | |
398 | }; | |
399 | ||
5cc9ed4b CW |
400 | #if IS_ENABLED(CONFIG_SWIOTLB) |
401 | #define swiotlb_active() swiotlb_nr_tbl() | |
402 | #else | |
403 | #define swiotlb_active() 0 | |
404 | #endif | |
405 | ||
406 | static int | |
407 | st_set_pages(struct sg_table **st, struct page **pvec, int num_pages) | |
408 | { | |
409 | struct scatterlist *sg; | |
410 | int ret, n; | |
411 | ||
412 | *st = kmalloc(sizeof(**st), GFP_KERNEL); | |
413 | if (*st == NULL) | |
414 | return -ENOMEM; | |
415 | ||
416 | if (swiotlb_active()) { | |
417 | ret = sg_alloc_table(*st, num_pages, GFP_KERNEL); | |
418 | if (ret) | |
419 | goto err; | |
420 | ||
421 | for_each_sg((*st)->sgl, sg, num_pages, n) | |
422 | sg_set_page(sg, pvec[n], PAGE_SIZE, 0); | |
423 | } else { | |
424 | ret = sg_alloc_table_from_pages(*st, pvec, num_pages, | |
425 | 0, num_pages << PAGE_SHIFT, | |
426 | GFP_KERNEL); | |
427 | if (ret) | |
428 | goto err; | |
429 | } | |
430 | ||
431 | return 0; | |
432 | ||
433 | err: | |
434 | kfree(*st); | |
435 | *st = NULL; | |
436 | return ret; | |
437 | } | |
438 | ||
e2273302 ID |
439 | static int |
440 | __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj, | |
441 | struct page **pvec, int num_pages) | |
442 | { | |
443 | int ret; | |
444 | ||
445 | ret = st_set_pages(&obj->pages, pvec, num_pages); | |
446 | if (ret) | |
447 | return ret; | |
448 | ||
449 | ret = i915_gem_gtt_prepare_object(obj); | |
450 | if (ret) { | |
451 | sg_free_table(obj->pages); | |
452 | kfree(obj->pages); | |
453 | obj->pages = NULL; | |
454 | } | |
455 | ||
456 | return ret; | |
457 | } | |
458 | ||
380996aa | 459 | static int |
e4b946bf CW |
460 | __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, |
461 | bool value) | |
462 | { | |
380996aa CW |
463 | int ret = 0; |
464 | ||
e4b946bf CW |
465 | /* During mm_invalidate_range we need to cancel any userptr that |
466 | * overlaps the range being invalidated. Doing so requires the | |
467 | * struct_mutex, and that risks recursion. In order to cause | |
468 | * recursion, the user must alias the userptr address space with | |
469 | * a GTT mmapping (possible with a MAP_FIXED) - then when we have | |
470 | * to invalidate that mmaping, mm_invalidate_range is called with | |
471 | * the userptr address *and* the struct_mutex held. To prevent that | |
472 | * we set a flag under the i915_mmu_notifier spinlock to indicate | |
473 | * whether this object is valid. | |
474 | */ | |
475 | #if defined(CONFIG_MMU_NOTIFIER) | |
476 | if (obj->userptr.mmu_object == NULL) | |
380996aa | 477 | return 0; |
e4b946bf CW |
478 | |
479 | spin_lock(&obj->userptr.mmu_object->mn->lock); | |
380996aa CW |
480 | /* In order to serialise get_pages with an outstanding |
481 | * cancel_userptr, we must drop the struct_mutex and try again. | |
482 | */ | |
768e159f CW |
483 | if (!value) |
484 | del_object(obj->userptr.mmu_object); | |
485 | else if (!work_pending(&obj->userptr.mmu_object->work)) | |
486 | add_object(obj->userptr.mmu_object); | |
380996aa CW |
487 | else |
488 | ret = -EAGAIN; | |
e4b946bf CW |
489 | spin_unlock(&obj->userptr.mmu_object->mn->lock); |
490 | #endif | |
380996aa CW |
491 | |
492 | return ret; | |
e4b946bf CW |
493 | } |
494 | ||
5cc9ed4b CW |
495 | static void |
496 | __i915_gem_userptr_get_pages_worker(struct work_struct *_work) | |
497 | { | |
498 | struct get_pages_work *work = container_of(_work, typeof(*work), work); | |
499 | struct drm_i915_gem_object *obj = work->obj; | |
500 | struct drm_device *dev = obj->base.dev; | |
68d6c840 | 501 | const int npages = obj->base.size >> PAGE_SHIFT; |
5cc9ed4b CW |
502 | struct page **pvec; |
503 | int pinned, ret; | |
504 | ||
505 | ret = -ENOMEM; | |
506 | pinned = 0; | |
507 | ||
f2a85e19 | 508 | pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); |
5cc9ed4b | 509 | if (pvec != NULL) { |
ad46cb53 | 510 | struct mm_struct *mm = obj->userptr.mm->mm; |
9beae1ea LS |
511 | unsigned int flags = 0; |
512 | ||
513 | if (!obj->userptr.read_only) | |
514 | flags |= FOLL_WRITE; | |
5cc9ed4b | 515 | |
40313f0c CW |
516 | ret = -EFAULT; |
517 | if (atomic_inc_not_zero(&mm->mm_users)) { | |
518 | down_read(&mm->mmap_sem); | |
519 | while (pinned < npages) { | |
520 | ret = get_user_pages_remote | |
521 | (work->task, mm, | |
522 | obj->userptr.ptr + pinned * PAGE_SIZE, | |
523 | npages - pinned, | |
9beae1ea | 524 | flags, |
40313f0c CW |
525 | pvec + pinned, NULL); |
526 | if (ret < 0) | |
527 | break; | |
528 | ||
529 | pinned += ret; | |
530 | } | |
531 | up_read(&mm->mmap_sem); | |
532 | mmput(mm); | |
5cc9ed4b | 533 | } |
5cc9ed4b CW |
534 | } |
535 | ||
536 | mutex_lock(&dev->struct_mutex); | |
68d6c840 CW |
537 | if (obj->userptr.work == &work->work) { |
538 | if (pinned == npages) { | |
539 | ret = __i915_gem_userptr_set_pages(obj, pvec, npages); | |
540 | if (ret == 0) { | |
541 | list_add_tail(&obj->global_list, | |
542 | &to_i915(dev)->mm.unbound_list); | |
543 | obj->get_page.sg = obj->pages->sgl; | |
544 | obj->get_page.last = 0; | |
545 | pinned = 0; | |
546 | } | |
5cc9ed4b | 547 | } |
68d6c840 | 548 | obj->userptr.work = ERR_PTR(ret); |
5cc9ed4b CW |
549 | } |
550 | ||
5cc9ed4b | 551 | obj->userptr.workers--; |
f8c417cd | 552 | i915_gem_object_put(obj); |
5cc9ed4b CW |
553 | mutex_unlock(&dev->struct_mutex); |
554 | ||
555 | release_pages(pvec, pinned, 0); | |
556 | drm_free_large(pvec); | |
557 | ||
558 | put_task_struct(work->task); | |
559 | kfree(work); | |
560 | } | |
561 | ||
e4b946bf CW |
562 | static int |
563 | __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj, | |
564 | bool *active) | |
565 | { | |
566 | struct get_pages_work *work; | |
567 | ||
568 | /* Spawn a worker so that we can acquire the | |
569 | * user pages without holding our mutex. Access | |
570 | * to the user pages requires mmap_sem, and we have | |
571 | * a strict lock ordering of mmap_sem, struct_mutex - | |
572 | * we already hold struct_mutex here and so cannot | |
573 | * call gup without encountering a lock inversion. | |
574 | * | |
575 | * Userspace will keep on repeating the operation | |
576 | * (thanks to EAGAIN) until either we hit the fast | |
577 | * path or the worker completes. If the worker is | |
578 | * cancelled or superseded, the task is still run | |
579 | * but the results ignored. (This leads to | |
580 | * complications that we may have a stray object | |
581 | * refcount that we need to be wary of when | |
582 | * checking for existing objects during creation.) | |
583 | * If the worker encounters an error, it reports | |
584 | * that error back to this function through | |
585 | * obj->userptr.work = ERR_PTR. | |
586 | */ | |
587 | if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS) | |
588 | return -EAGAIN; | |
589 | ||
590 | work = kmalloc(sizeof(*work), GFP_KERNEL); | |
591 | if (work == NULL) | |
592 | return -ENOMEM; | |
593 | ||
594 | obj->userptr.work = &work->work; | |
595 | obj->userptr.workers++; | |
596 | ||
25dc556a | 597 | work->obj = i915_gem_object_get(obj); |
e4b946bf CW |
598 | |
599 | work->task = current; | |
600 | get_task_struct(work->task); | |
601 | ||
602 | INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); | |
603 | schedule_work(&work->work); | |
604 | ||
605 | *active = true; | |
606 | return -EAGAIN; | |
607 | } | |
608 | ||
5cc9ed4b CW |
609 | static int |
610 | i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) | |
611 | { | |
612 | const int num_pages = obj->base.size >> PAGE_SHIFT; | |
613 | struct page **pvec; | |
614 | int pinned, ret; | |
e4b946bf | 615 | bool active; |
5cc9ed4b CW |
616 | |
617 | /* If userspace should engineer that these pages are replaced in | |
618 | * the vma between us binding this page into the GTT and completion | |
619 | * of rendering... Their loss. If they change the mapping of their | |
620 | * pages they need to create a new bo to point to the new vma. | |
621 | * | |
622 | * However, that still leaves open the possibility of the vma | |
623 | * being copied upon fork. Which falls under the same userspace | |
624 | * synchronisation issue as a regular bo, except that this time | |
625 | * the process may not be expecting that a particular piece of | |
626 | * memory is tied to the GPU. | |
627 | * | |
628 | * Fortunately, we can hook into the mmu_notifier in order to | |
629 | * discard the page references prior to anything nasty happening | |
630 | * to the vma (discard or cloning) which should prevent the more | |
631 | * egregious cases from causing harm. | |
632 | */ | |
364c8172 CW |
633 | |
634 | if (obj->userptr.work) { | |
e4b946bf | 635 | /* active flag should still be held for the pending work */ |
364c8172 CW |
636 | if (IS_ERR(obj->userptr.work)) |
637 | return PTR_ERR(obj->userptr.work); | |
638 | else | |
639 | return -EAGAIN; | |
640 | } | |
e4b946bf CW |
641 | |
642 | /* Let the mmu-notifier know that we have begun and need cancellation */ | |
380996aa CW |
643 | ret = __i915_gem_userptr_set_active(obj, true); |
644 | if (ret) | |
645 | return ret; | |
5cc9ed4b CW |
646 | |
647 | pvec = NULL; | |
648 | pinned = 0; | |
ad46cb53 | 649 | if (obj->userptr.mm->mm == current->mm) { |
f2a85e19 CW |
650 | pvec = drm_malloc_gfp(num_pages, sizeof(struct page *), |
651 | GFP_TEMPORARY); | |
5cc9ed4b | 652 | if (pvec == NULL) { |
f2a85e19 CW |
653 | __i915_gem_userptr_set_active(obj, false); |
654 | return -ENOMEM; | |
5cc9ed4b CW |
655 | } |
656 | ||
657 | pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, | |
658 | !obj->userptr.read_only, pvec); | |
659 | } | |
e4b946bf CW |
660 | |
661 | active = false; | |
662 | if (pinned < 0) | |
663 | ret = pinned, pinned = 0; | |
664 | else if (pinned < num_pages) | |
665 | ret = __i915_gem_userptr_get_pages_schedule(obj, &active); | |
666 | else | |
e2273302 | 667 | ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages); |
e4b946bf CW |
668 | if (ret) { |
669 | __i915_gem_userptr_set_active(obj, active); | |
670 | release_pages(pvec, pinned, 0); | |
5cc9ed4b | 671 | } |
5cc9ed4b CW |
672 | drm_free_large(pvec); |
673 | return ret; | |
674 | } | |
675 | ||
676 | static void | |
677 | i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) | |
678 | { | |
85d1225e DG |
679 | struct sgt_iter sgt_iter; |
680 | struct page *page; | |
5cc9ed4b CW |
681 | |
682 | BUG_ON(obj->userptr.work != NULL); | |
e4b946bf | 683 | __i915_gem_userptr_set_active(obj, false); |
5cc9ed4b CW |
684 | |
685 | if (obj->madv != I915_MADV_WILLNEED) | |
686 | obj->dirty = 0; | |
687 | ||
e2273302 ID |
688 | i915_gem_gtt_finish_object(obj); |
689 | ||
85d1225e | 690 | for_each_sgt_page(page, sgt_iter, obj->pages) { |
5cc9ed4b CW |
691 | if (obj->dirty) |
692 | set_page_dirty(page); | |
693 | ||
694 | mark_page_accessed(page); | |
09cbfeaf | 695 | put_page(page); |
5cc9ed4b CW |
696 | } |
697 | obj->dirty = 0; | |
698 | ||
699 | sg_free_table(obj->pages); | |
700 | kfree(obj->pages); | |
701 | } | |
702 | ||
703 | static void | |
704 | i915_gem_userptr_release(struct drm_i915_gem_object *obj) | |
705 | { | |
706 | i915_gem_userptr_release__mmu_notifier(obj); | |
ad46cb53 | 707 | i915_gem_userptr_release__mm_struct(obj); |
5cc9ed4b CW |
708 | } |
709 | ||
710 | static int | |
711 | i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) | |
712 | { | |
ad46cb53 | 713 | if (obj->userptr.mmu_object) |
5cc9ed4b CW |
714 | return 0; |
715 | ||
716 | return i915_gem_userptr_init__mmu_notifier(obj, 0); | |
717 | } | |
718 | ||
719 | static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { | |
de472664 | 720 | .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE, |
5cc9ed4b CW |
721 | .get_pages = i915_gem_userptr_get_pages, |
722 | .put_pages = i915_gem_userptr_put_pages, | |
de472664 | 723 | .dmabuf_export = i915_gem_userptr_dmabuf_export, |
5cc9ed4b CW |
724 | .release = i915_gem_userptr_release, |
725 | }; | |
726 | ||
727 | /** | |
728 | * Creates a new mm object that wraps some normal memory from the process | |
729 | * context - user memory. | |
730 | * | |
731 | * We impose several restrictions upon the memory being mapped | |
732 | * into the GPU. | |
733 | * 1. It must be page aligned (both start/end addresses, i.e ptr and size). | |
ec8b0dd5 | 734 | * 2. It must be normal system memory, not a pointer into another map of IO |
5cc9ed4b | 735 | * space (e.g. it must not be a GTT mmapping of another object). |
ec8b0dd5 | 736 | * 3. We only allow a bo as large as we could in theory map into the GTT, |
5cc9ed4b | 737 | * that is we limit the size to the total size of the GTT. |
ec8b0dd5 | 738 | * 4. The bo is marked as being snoopable. The backing pages are left |
5cc9ed4b CW |
739 | * accessible directly by the CPU, but reads and writes by the GPU may |
740 | * incur the cost of a snoop (unless you have an LLC architecture). | |
741 | * | |
742 | * Synchronisation between multiple users and the GPU is left to userspace | |
743 | * through the normal set-domain-ioctl. The kernel will enforce that the | |
744 | * GPU relinquishes the VMA before it is returned back to the system | |
745 | * i.e. upon free(), munmap() or process termination. However, the userspace | |
746 | * malloc() library may not immediately relinquish the VMA after free() and | |
747 | * instead reuse it whilst the GPU is still reading and writing to the VMA. | |
748 | * Caveat emptor. | |
749 | * | |
750 | * Also note, that the object created here is not currently a "first class" | |
751 | * object, in that several ioctls are banned. These are the CPU access | |
752 | * ioctls: mmap(), pwrite and pread. In practice, you are expected to use | |
cc917ab4 CW |
753 | * direct access via your pointer rather than use those ioctls. Another |
754 | * restriction is that we do not allow userptr surfaces to be pinned to the | |
755 | * hardware and so we reject any attempt to create a framebuffer out of a | |
756 | * userptr. | |
5cc9ed4b CW |
757 | * |
758 | * If you think this is a good interface to use to pass GPU memory between | |
759 | * drivers, please use dma-buf instead. In fact, wherever possible use | |
760 | * dma-buf instead. | |
761 | */ | |
762 | int | |
763 | i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |
764 | { | |
5cc9ed4b CW |
765 | struct drm_i915_gem_userptr *args = data; |
766 | struct drm_i915_gem_object *obj; | |
767 | int ret; | |
768 | u32 handle; | |
769 | ||
ca377809 TU |
770 | if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) { |
771 | /* We cannot support coherent userptr objects on hw without | |
772 | * LLC and broken snooping. | |
773 | */ | |
774 | return -ENODEV; | |
775 | } | |
776 | ||
5cc9ed4b CW |
777 | if (args->flags & ~(I915_USERPTR_READ_ONLY | |
778 | I915_USERPTR_UNSYNCHRONIZED)) | |
779 | return -EINVAL; | |
780 | ||
781 | if (offset_in_page(args->user_ptr | args->user_size)) | |
782 | return -EINVAL; | |
783 | ||
5cc9ed4b CW |
784 | if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE, |
785 | (char __user *)(unsigned long)args->user_ptr, args->user_size)) | |
786 | return -EFAULT; | |
787 | ||
788 | if (args->flags & I915_USERPTR_READ_ONLY) { | |
789 | /* On almost all of the current hw, we cannot tell the GPU that a | |
790 | * page is readonly, so this is just a placeholder in the uAPI. | |
791 | */ | |
792 | return -ENODEV; | |
793 | } | |
794 | ||
5cc9ed4b CW |
795 | obj = i915_gem_object_alloc(dev); |
796 | if (obj == NULL) | |
797 | return -ENOMEM; | |
798 | ||
799 | drm_gem_private_object_init(dev, &obj->base, args->user_size); | |
800 | i915_gem_object_init(obj, &i915_gem_userptr_ops); | |
801 | obj->cache_level = I915_CACHE_LLC; | |
802 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | |
803 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | |
804 | ||
805 | obj->userptr.ptr = args->user_ptr; | |
806 | obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY); | |
807 | ||
808 | /* And keep a pointer to the current->mm for resolving the user pages | |
809 | * at binding. This means that we need to hook into the mmu_notifier | |
810 | * in order to detect if the mmu is destroyed. | |
811 | */ | |
ad46cb53 CW |
812 | ret = i915_gem_userptr_init__mm_struct(obj); |
813 | if (ret == 0) | |
5cc9ed4b CW |
814 | ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); |
815 | if (ret == 0) | |
816 | ret = drm_gem_handle_create(file, &obj->base, &handle); | |
817 | ||
818 | /* drop reference from allocate - handle holds it now */ | |
34911fd3 | 819 | i915_gem_object_put_unlocked(obj); |
5cc9ed4b CW |
820 | if (ret) |
821 | return ret; | |
822 | ||
823 | args->handle = handle; | |
824 | return 0; | |
825 | } | |
826 | ||
72778cb2 | 827 | void i915_gem_init_userptr(struct drm_i915_private *dev_priv) |
5cc9ed4b | 828 | { |
ad46cb53 CW |
829 | mutex_init(&dev_priv->mm_lock); |
830 | hash_init(dev_priv->mm_structs); | |
5cc9ed4b | 831 | } |