]>
Commit | Line | Data |
---|---|---|
5cc9ed4b CW |
1 | /* |
2 | * Copyright © 2012-2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
b588c92b ML |
25 | #include <drm/drmP.h> |
26 | #include <drm/i915_drm.h> | |
5cc9ed4b CW |
27 | #include "i915_drv.h" |
28 | #include "i915_trace.h" | |
29 | #include "intel_drv.h" | |
30 | #include <linux/mmu_context.h> | |
31 | #include <linux/mmu_notifier.h> | |
32 | #include <linux/mempolicy.h> | |
33 | #include <linux/swap.h> | |
6e84f315 | 34 | #include <linux/sched/mm.h> |
5cc9ed4b | 35 | |
ad46cb53 CW |
36 | struct i915_mm_struct { |
37 | struct mm_struct *mm; | |
f470b190 | 38 | struct drm_i915_private *i915; |
ad46cb53 CW |
39 | struct i915_mmu_notifier *mn; |
40 | struct hlist_node node; | |
41 | struct kref kref; | |
42 | struct work_struct work; | |
43 | }; | |
44 | ||
5cc9ed4b CW |
45 | #if defined(CONFIG_MMU_NOTIFIER) |
46 | #include <linux/interval_tree.h> | |
47 | ||
48 | struct i915_mmu_notifier { | |
49 | spinlock_t lock; | |
50 | struct hlist_node node; | |
51 | struct mmu_notifier mn; | |
f808c13f | 52 | struct rb_root_cached objects; |
393afc2c | 53 | struct workqueue_struct *wq; |
5cc9ed4b CW |
54 | }; |
55 | ||
56 | struct i915_mmu_object { | |
ad46cb53 | 57 | struct i915_mmu_notifier *mn; |
768e159f | 58 | struct drm_i915_gem_object *obj; |
5cc9ed4b | 59 | struct interval_tree_node it; |
ec8b0dd5 | 60 | struct list_head link; |
380996aa | 61 | struct work_struct work; |
768e159f | 62 | bool attached; |
5cc9ed4b CW |
63 | }; |
64 | ||
768e159f | 65 | static void cancel_userptr(struct work_struct *work) |
ec8b0dd5 | 66 | { |
380996aa CW |
67 | struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); |
68 | struct drm_i915_gem_object *obj = mo->obj; | |
15c344f4 CW |
69 | struct work_struct *active; |
70 | ||
71 | /* Cancel any active worker and force us to re-evaluate gup */ | |
72 | mutex_lock(&obj->mm.lock); | |
73 | active = fetch_and_zero(&obj->userptr.work); | |
74 | mutex_unlock(&obj->mm.lock); | |
75 | if (active) | |
76 | goto out; | |
ec8b0dd5 | 77 | |
e95433c7 | 78 | i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL); |
8a3b3d57 | 79 | |
15c344f4 | 80 | mutex_lock(&obj->base.dev->struct_mutex); |
ec8b0dd5 | 81 | |
03ac84f1 CW |
82 | /* We are inside a kthread context and can't be interrupted */ |
83 | if (i915_gem_object_unbind(obj) == 0) | |
548625ee | 84 | __i915_gem_object_put_pages(obj, I915_MM_NORMAL); |
f1fa4f44 | 85 | WARN_ONCE(i915_gem_object_has_pages(obj), |
bd3d2252 | 86 | "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n", |
03ac84f1 | 87 | obj->bind_count, |
1233e2db | 88 | atomic_read(&obj->mm.pages_pin_count), |
bd3d2252 | 89 | obj->pin_global); |
ec8b0dd5 | 90 | |
15c344f4 CW |
91 | mutex_unlock(&obj->base.dev->struct_mutex); |
92 | ||
93 | out: | |
f8c417cd | 94 | i915_gem_object_put(obj); |
ec8b0dd5 CW |
95 | } |
96 | ||
768e159f | 97 | static void add_object(struct i915_mmu_object *mo) |
ec8b0dd5 | 98 | { |
768e159f CW |
99 | if (mo->attached) |
100 | return; | |
ec8b0dd5 | 101 | |
768e159f CW |
102 | interval_tree_insert(&mo->it, &mo->mn->objects); |
103 | mo->attached = true; | |
104 | } | |
105 | ||
106 | static void del_object(struct i915_mmu_object *mo) | |
107 | { | |
108 | if (!mo->attached) | |
109 | return; | |
110 | ||
111 | interval_tree_remove(&mo->it, &mo->mn->objects); | |
112 | mo->attached = false; | |
ec8b0dd5 CW |
113 | } |
114 | ||
93065ac7 | 115 | static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, |
5d6527a7 | 116 | const struct mmu_notifier_range *range) |
5cc9ed4b | 117 | { |
380996aa CW |
118 | struct i915_mmu_notifier *mn = |
119 | container_of(_mn, struct i915_mmu_notifier, mn); | |
120 | struct i915_mmu_object *mo; | |
768e159f CW |
121 | struct interval_tree_node *it; |
122 | LIST_HEAD(cancelled); | |
5d6527a7 | 123 | unsigned long end; |
768e159f | 124 | |
f808c13f | 125 | if (RB_EMPTY_ROOT(&mn->objects.rb_root)) |
93065ac7 | 126 | return 0; |
380996aa CW |
127 | |
128 | /* interval ranges are inclusive, but invalidate range is exclusive */ | |
5d6527a7 | 129 | end = range->end - 1; |
380996aa CW |
130 | |
131 | spin_lock(&mn->lock); | |
5d6527a7 | 132 | it = interval_tree_iter_first(&mn->objects, range->start, end); |
768e159f | 133 | while (it) { |
5d6527a7 | 134 | if (!range->blockable) { |
93065ac7 MH |
135 | spin_unlock(&mn->lock); |
136 | return -EAGAIN; | |
137 | } | |
768e159f CW |
138 | /* The mmu_object is released late when destroying the |
139 | * GEM object so it is entirely possible to gain a | |
140 | * reference on an object in the process of being freed | |
141 | * since our serialisation is via the spinlock and not | |
142 | * the struct_mutex - and consequently use it after it | |
143 | * is freed and then double free it. To prevent that | |
144 | * use-after-free we only acquire a reference on the | |
145 | * object if it is not in the process of being destroyed. | |
146 | */ | |
147 | mo = container_of(it, struct i915_mmu_object, it); | |
148 | if (kref_get_unless_zero(&mo->obj->base.refcount)) | |
393afc2c | 149 | queue_work(mn->wq, &mo->work); |
5cc9ed4b | 150 | |
768e159f | 151 | list_add(&mo->link, &cancelled); |
5d6527a7 | 152 | it = interval_tree_iter_next(it, range->start, end); |
5cc9ed4b | 153 | } |
768e159f CW |
154 | list_for_each_entry(mo, &cancelled, link) |
155 | del_object(mo); | |
380996aa | 156 | spin_unlock(&mn->lock); |
393afc2c | 157 | |
d151e9ce CW |
158 | if (!list_empty(&cancelled)) |
159 | flush_workqueue(mn->wq); | |
93065ac7 MH |
160 | |
161 | return 0; | |
5cc9ed4b CW |
162 | } |
163 | ||
164 | static const struct mmu_notifier_ops i915_gem_userptr_notifier = { | |
165 | .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start, | |
166 | }; | |
167 | ||
168 | static struct i915_mmu_notifier * | |
ad46cb53 | 169 | i915_mmu_notifier_create(struct mm_struct *mm) |
5cc9ed4b | 170 | { |
ad46cb53 | 171 | struct i915_mmu_notifier *mn; |
5cc9ed4b | 172 | |
ad46cb53 CW |
173 | mn = kmalloc(sizeof(*mn), GFP_KERNEL); |
174 | if (mn == NULL) | |
5cc9ed4b CW |
175 | return ERR_PTR(-ENOMEM); |
176 | ||
ad46cb53 CW |
177 | spin_lock_init(&mn->lock); |
178 | mn->mn.ops = &i915_gem_userptr_notifier; | |
f808c13f | 179 | mn->objects = RB_ROOT_CACHED; |
457db89b CW |
180 | mn->wq = alloc_workqueue("i915-userptr-release", |
181 | WQ_UNBOUND | WQ_MEM_RECLAIM, | |
182 | 0); | |
393afc2c CW |
183 | if (mn->wq == NULL) { |
184 | kfree(mn); | |
185 | return ERR_PTR(-ENOMEM); | |
186 | } | |
ad46cb53 | 187 | |
ad46cb53 | 188 | return mn; |
5cc9ed4b CW |
189 | } |
190 | ||
5cc9ed4b CW |
191 | static void |
192 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | |
193 | { | |
ad46cb53 | 194 | struct i915_mmu_object *mo; |
5cc9ed4b | 195 | |
ad46cb53 CW |
196 | mo = obj->userptr.mmu_object; |
197 | if (mo == NULL) | |
5cc9ed4b CW |
198 | return; |
199 | ||
768e159f CW |
200 | spin_lock(&mo->mn->lock); |
201 | del_object(mo); | |
202 | spin_unlock(&mo->mn->lock); | |
ad46cb53 CW |
203 | kfree(mo); |
204 | ||
205 | obj->userptr.mmu_object = NULL; | |
206 | } | |
207 | ||
208 | static struct i915_mmu_notifier * | |
209 | i915_mmu_notifier_find(struct i915_mm_struct *mm) | |
210 | { | |
7741b547 DV |
211 | struct i915_mmu_notifier *mn; |
212 | int err = 0; | |
e9681366 CW |
213 | |
214 | mn = mm->mn; | |
215 | if (mn) | |
216 | return mn; | |
217 | ||
7741b547 DV |
218 | mn = i915_mmu_notifier_create(mm->mm); |
219 | if (IS_ERR(mn)) | |
220 | err = PTR_ERR(mn); | |
221 | ||
e9681366 | 222 | down_write(&mm->mm->mmap_sem); |
f470b190 | 223 | mutex_lock(&mm->i915->mm_lock); |
7741b547 DV |
224 | if (mm->mn == NULL && !err) { |
225 | /* Protected by mmap_sem (write-lock) */ | |
226 | err = __mmu_notifier_register(&mn->mn, mm->mm); | |
227 | if (!err) { | |
228 | /* Protected by mm_lock */ | |
229 | mm->mn = fetch_and_zero(&mn); | |
230 | } | |
cb8d50df TU |
231 | } else if (mm->mn) { |
232 | /* | |
233 | * Someone else raced and successfully installed the mmu | |
234 | * notifier, we can cancel our own errors. | |
235 | */ | |
7741b547 | 236 | err = 0; |
ad46cb53 | 237 | } |
f470b190 | 238 | mutex_unlock(&mm->i915->mm_lock); |
e9681366 CW |
239 | up_write(&mm->mm->mmap_sem); |
240 | ||
cb8d50df | 241 | if (mn && !IS_ERR(mn)) { |
7741b547 DV |
242 | destroy_workqueue(mn->wq); |
243 | kfree(mn); | |
244 | } | |
245 | ||
246 | return err ? ERR_PTR(err) : mm->mn; | |
5cc9ed4b CW |
247 | } |
248 | ||
249 | static int | |
250 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |
251 | unsigned flags) | |
252 | { | |
ad46cb53 CW |
253 | struct i915_mmu_notifier *mn; |
254 | struct i915_mmu_object *mo; | |
5cc9ed4b CW |
255 | |
256 | if (flags & I915_USERPTR_UNSYNCHRONIZED) | |
257 | return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; | |
258 | ||
ad46cb53 CW |
259 | if (WARN_ON(obj->userptr.mm == NULL)) |
260 | return -EINVAL; | |
5cc9ed4b | 261 | |
ad46cb53 CW |
262 | mn = i915_mmu_notifier_find(obj->userptr.mm); |
263 | if (IS_ERR(mn)) | |
264 | return PTR_ERR(mn); | |
5cc9ed4b | 265 | |
ad46cb53 CW |
266 | mo = kzalloc(sizeof(*mo), GFP_KERNEL); |
267 | if (mo == NULL) | |
268 | return -ENOMEM; | |
5cc9ed4b | 269 | |
ad46cb53 | 270 | mo->mn = mn; |
ad46cb53 | 271 | mo->obj = obj; |
768e159f CW |
272 | mo->it.start = obj->userptr.ptr; |
273 | mo->it.last = obj->userptr.ptr + obj->base.size - 1; | |
274 | INIT_WORK(&mo->work, cancel_userptr); | |
ad46cb53 CW |
275 | |
276 | obj->userptr.mmu_object = mo; | |
5cc9ed4b | 277 | return 0; |
ad46cb53 CW |
278 | } |
279 | ||
280 | static void | |
281 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | |
282 | struct mm_struct *mm) | |
283 | { | |
284 | if (mn == NULL) | |
285 | return; | |
5cc9ed4b | 286 | |
ad46cb53 | 287 | mmu_notifier_unregister(&mn->mn, mm); |
393afc2c | 288 | destroy_workqueue(mn->wq); |
5cc9ed4b | 289 | kfree(mn); |
5cc9ed4b CW |
290 | } |
291 | ||
292 | #else | |
293 | ||
294 | static void | |
295 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | |
296 | { | |
297 | } | |
298 | ||
299 | static int | |
300 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |
301 | unsigned flags) | |
302 | { | |
303 | if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0) | |
304 | return -ENODEV; | |
305 | ||
306 | if (!capable(CAP_SYS_ADMIN)) | |
307 | return -EPERM; | |
308 | ||
309 | return 0; | |
310 | } | |
ad46cb53 CW |
311 | |
312 | static void | |
313 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | |
314 | struct mm_struct *mm) | |
315 | { | |
316 | } | |
317 | ||
5cc9ed4b CW |
318 | #endif |
319 | ||
ad46cb53 CW |
320 | static struct i915_mm_struct * |
321 | __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real) | |
322 | { | |
323 | struct i915_mm_struct *mm; | |
324 | ||
325 | /* Protected by dev_priv->mm_lock */ | |
326 | hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) | |
327 | if (mm->mm == real) | |
328 | return mm; | |
329 | ||
330 | return NULL; | |
331 | } | |
332 | ||
333 | static int | |
334 | i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) | |
335 | { | |
336 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | |
337 | struct i915_mm_struct *mm; | |
338 | int ret = 0; | |
339 | ||
340 | /* During release of the GEM object we hold the struct_mutex. This | |
341 | * precludes us from calling mmput() at that time as that may be | |
342 | * the last reference and so call exit_mmap(). exit_mmap() will | |
343 | * attempt to reap the vma, and if we were holding a GTT mmap | |
344 | * would then call drm_gem_vm_close() and attempt to reacquire | |
345 | * the struct mutex. So in order to avoid that recursion, we have | |
346 | * to defer releasing the mm reference until after we drop the | |
347 | * struct_mutex, i.e. we need to schedule a worker to do the clean | |
348 | * up. | |
349 | */ | |
350 | mutex_lock(&dev_priv->mm_lock); | |
351 | mm = __i915_mm_struct_find(dev_priv, current->mm); | |
352 | if (mm == NULL) { | |
353 | mm = kmalloc(sizeof(*mm), GFP_KERNEL); | |
354 | if (mm == NULL) { | |
355 | ret = -ENOMEM; | |
356 | goto out; | |
357 | } | |
358 | ||
359 | kref_init(&mm->kref); | |
f470b190 | 360 | mm->i915 = to_i915(obj->base.dev); |
ad46cb53 CW |
361 | |
362 | mm->mm = current->mm; | |
f1f10076 | 363 | mmgrab(current->mm); |
ad46cb53 CW |
364 | |
365 | mm->mn = NULL; | |
366 | ||
367 | /* Protected by dev_priv->mm_lock */ | |
368 | hash_add(dev_priv->mm_structs, | |
369 | &mm->node, (unsigned long)mm->mm); | |
370 | } else | |
371 | kref_get(&mm->kref); | |
372 | ||
373 | obj->userptr.mm = mm; | |
374 | out: | |
375 | mutex_unlock(&dev_priv->mm_lock); | |
376 | return ret; | |
377 | } | |
378 | ||
379 | static void | |
380 | __i915_mm_struct_free__worker(struct work_struct *work) | |
381 | { | |
382 | struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); | |
383 | i915_mmu_notifier_free(mm->mn, mm->mm); | |
384 | mmdrop(mm->mm); | |
385 | kfree(mm); | |
386 | } | |
387 | ||
388 | static void | |
389 | __i915_mm_struct_free(struct kref *kref) | |
390 | { | |
391 | struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); | |
392 | ||
393 | /* Protected by dev_priv->mm_lock */ | |
394 | hash_del(&mm->node); | |
f470b190 | 395 | mutex_unlock(&mm->i915->mm_lock); |
ad46cb53 CW |
396 | |
397 | INIT_WORK(&mm->work, __i915_mm_struct_free__worker); | |
8a2421bd | 398 | queue_work(mm->i915->mm.userptr_wq, &mm->work); |
ad46cb53 CW |
399 | } |
400 | ||
401 | static void | |
402 | i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj) | |
403 | { | |
404 | if (obj->userptr.mm == NULL) | |
405 | return; | |
406 | ||
407 | kref_put_mutex(&obj->userptr.mm->kref, | |
408 | __i915_mm_struct_free, | |
409 | &to_i915(obj->base.dev)->mm_lock); | |
410 | obj->userptr.mm = NULL; | |
411 | } | |
412 | ||
5cc9ed4b CW |
413 | struct get_pages_work { |
414 | struct work_struct work; | |
415 | struct drm_i915_gem_object *obj; | |
416 | struct task_struct *task; | |
417 | }; | |
418 | ||
03ac84f1 | 419 | static struct sg_table * |
5602452e TU |
420 | __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, |
421 | struct page **pvec, int num_pages) | |
e2273302 | 422 | { |
5602452e TU |
423 | unsigned int max_segment = i915_sg_segment_size(); |
424 | struct sg_table *st; | |
84e8978e | 425 | unsigned int sg_page_sizes; |
e2273302 ID |
426 | int ret; |
427 | ||
5602452e TU |
428 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
429 | if (!st) | |
430 | return ERR_PTR(-ENOMEM); | |
431 | ||
432 | alloc_table: | |
433 | ret = __sg_alloc_table_from_pages(st, pvec, num_pages, | |
434 | 0, num_pages << PAGE_SHIFT, | |
435 | max_segment, | |
436 | GFP_KERNEL); | |
437 | if (ret) { | |
438 | kfree(st); | |
03ac84f1 | 439 | return ERR_PTR(ret); |
5602452e | 440 | } |
e2273302 | 441 | |
5602452e | 442 | ret = i915_gem_gtt_prepare_pages(obj, st); |
e2273302 | 443 | if (ret) { |
5602452e TU |
444 | sg_free_table(st); |
445 | ||
446 | if (max_segment > PAGE_SIZE) { | |
447 | max_segment = PAGE_SIZE; | |
448 | goto alloc_table; | |
449 | } | |
450 | ||
451 | kfree(st); | |
03ac84f1 | 452 | return ERR_PTR(ret); |
e2273302 ID |
453 | } |
454 | ||
84e8978e | 455 | sg_page_sizes = i915_sg_page_sizes(st->sgl); |
a5c08166 | 456 | |
84e8978e | 457 | __i915_gem_object_set_pages(obj, st, sg_page_sizes); |
b91b09ee | 458 | |
5602452e | 459 | return st; |
e2273302 ID |
460 | } |
461 | ||
380996aa | 462 | static int |
e4b946bf CW |
463 | __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, |
464 | bool value) | |
465 | { | |
380996aa CW |
466 | int ret = 0; |
467 | ||
e4b946bf CW |
468 | /* During mm_invalidate_range we need to cancel any userptr that |
469 | * overlaps the range being invalidated. Doing so requires the | |
470 | * struct_mutex, and that risks recursion. In order to cause | |
471 | * recursion, the user must alias the userptr address space with | |
472 | * a GTT mmapping (possible with a MAP_FIXED) - then when we have | |
473 | * to invalidate that mmaping, mm_invalidate_range is called with | |
474 | * the userptr address *and* the struct_mutex held. To prevent that | |
475 | * we set a flag under the i915_mmu_notifier spinlock to indicate | |
476 | * whether this object is valid. | |
477 | */ | |
478 | #if defined(CONFIG_MMU_NOTIFIER) | |
479 | if (obj->userptr.mmu_object == NULL) | |
380996aa | 480 | return 0; |
e4b946bf CW |
481 | |
482 | spin_lock(&obj->userptr.mmu_object->mn->lock); | |
380996aa CW |
483 | /* In order to serialise get_pages with an outstanding |
484 | * cancel_userptr, we must drop the struct_mutex and try again. | |
485 | */ | |
768e159f CW |
486 | if (!value) |
487 | del_object(obj->userptr.mmu_object); | |
488 | else if (!work_pending(&obj->userptr.mmu_object->work)) | |
489 | add_object(obj->userptr.mmu_object); | |
380996aa CW |
490 | else |
491 | ret = -EAGAIN; | |
e4b946bf CW |
492 | spin_unlock(&obj->userptr.mmu_object->mn->lock); |
493 | #endif | |
380996aa CW |
494 | |
495 | return ret; | |
e4b946bf CW |
496 | } |
497 | ||
5cc9ed4b CW |
498 | static void |
499 | __i915_gem_userptr_get_pages_worker(struct work_struct *_work) | |
500 | { | |
501 | struct get_pages_work *work = container_of(_work, typeof(*work), work); | |
502 | struct drm_i915_gem_object *obj = work->obj; | |
68d6c840 | 503 | const int npages = obj->base.size >> PAGE_SHIFT; |
5cc9ed4b CW |
504 | struct page **pvec; |
505 | int pinned, ret; | |
506 | ||
507 | ret = -ENOMEM; | |
508 | pinned = 0; | |
509 | ||
0ee931c4 | 510 | pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
5cc9ed4b | 511 | if (pvec != NULL) { |
ad46cb53 | 512 | struct mm_struct *mm = obj->userptr.mm->mm; |
9beae1ea LS |
513 | unsigned int flags = 0; |
514 | ||
0b100760 | 515 | if (!i915_gem_object_is_readonly(obj)) |
9beae1ea | 516 | flags |= FOLL_WRITE; |
5cc9ed4b | 517 | |
40313f0c | 518 | ret = -EFAULT; |
388f7934 | 519 | if (mmget_not_zero(mm)) { |
40313f0c CW |
520 | down_read(&mm->mmap_sem); |
521 | while (pinned < npages) { | |
522 | ret = get_user_pages_remote | |
523 | (work->task, mm, | |
524 | obj->userptr.ptr + pinned * PAGE_SIZE, | |
525 | npages - pinned, | |
9beae1ea | 526 | flags, |
5b56d49f | 527 | pvec + pinned, NULL, NULL); |
40313f0c CW |
528 | if (ret < 0) |
529 | break; | |
530 | ||
531 | pinned += ret; | |
532 | } | |
533 | up_read(&mm->mmap_sem); | |
534 | mmput(mm); | |
5cc9ed4b | 535 | } |
5cc9ed4b CW |
536 | } |
537 | ||
1233e2db | 538 | mutex_lock(&obj->mm.lock); |
68d6c840 | 539 | if (obj->userptr.work == &work->work) { |
03ac84f1 CW |
540 | struct sg_table *pages = ERR_PTR(ret); |
541 | ||
68d6c840 | 542 | if (pinned == npages) { |
5602452e TU |
543 | pages = __i915_gem_userptr_alloc_pages(obj, pvec, |
544 | npages); | |
03ac84f1 | 545 | if (!IS_ERR(pages)) { |
68d6c840 | 546 | pinned = 0; |
03ac84f1 | 547 | pages = NULL; |
68d6c840 | 548 | } |
5cc9ed4b | 549 | } |
03ac84f1 CW |
550 | |
551 | obj->userptr.work = ERR_CAST(pages); | |
42953b3c CW |
552 | if (IS_ERR(pages)) |
553 | __i915_gem_userptr_set_active(obj, false); | |
5cc9ed4b | 554 | } |
1233e2db | 555 | mutex_unlock(&obj->mm.lock); |
5cc9ed4b | 556 | |
c6f92f9f | 557 | release_pages(pvec, pinned); |
2098105e | 558 | kvfree(pvec); |
5cc9ed4b | 559 | |
f0cd5182 | 560 | i915_gem_object_put(obj); |
5cc9ed4b CW |
561 | put_task_struct(work->task); |
562 | kfree(work); | |
563 | } | |
564 | ||
03ac84f1 | 565 | static struct sg_table * |
1c8782dd | 566 | __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj) |
e4b946bf CW |
567 | { |
568 | struct get_pages_work *work; | |
569 | ||
570 | /* Spawn a worker so that we can acquire the | |
571 | * user pages without holding our mutex. Access | |
572 | * to the user pages requires mmap_sem, and we have | |
573 | * a strict lock ordering of mmap_sem, struct_mutex - | |
574 | * we already hold struct_mutex here and so cannot | |
575 | * call gup without encountering a lock inversion. | |
576 | * | |
577 | * Userspace will keep on repeating the operation | |
578 | * (thanks to EAGAIN) until either we hit the fast | |
579 | * path or the worker completes. If the worker is | |
580 | * cancelled or superseded, the task is still run | |
581 | * but the results ignored. (This leads to | |
582 | * complications that we may have a stray object | |
583 | * refcount that we need to be wary of when | |
584 | * checking for existing objects during creation.) | |
585 | * If the worker encounters an error, it reports | |
586 | * that error back to this function through | |
587 | * obj->userptr.work = ERR_PTR. | |
588 | */ | |
e4b946bf CW |
589 | work = kmalloc(sizeof(*work), GFP_KERNEL); |
590 | if (work == NULL) | |
03ac84f1 | 591 | return ERR_PTR(-ENOMEM); |
e4b946bf CW |
592 | |
593 | obj->userptr.work = &work->work; | |
e4b946bf | 594 | |
25dc556a | 595 | work->obj = i915_gem_object_get(obj); |
e4b946bf CW |
596 | |
597 | work->task = current; | |
598 | get_task_struct(work->task); | |
599 | ||
600 | INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); | |
8a2421bd | 601 | queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work); |
e4b946bf | 602 | |
03ac84f1 | 603 | return ERR_PTR(-EAGAIN); |
e4b946bf CW |
604 | } |
605 | ||
b91b09ee | 606 | static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) |
5cc9ed4b CW |
607 | { |
608 | const int num_pages = obj->base.size >> PAGE_SHIFT; | |
1c8782dd | 609 | struct mm_struct *mm = obj->userptr.mm->mm; |
5cc9ed4b | 610 | struct page **pvec; |
03ac84f1 | 611 | struct sg_table *pages; |
e4b946bf | 612 | bool active; |
1c8782dd | 613 | int pinned; |
5cc9ed4b CW |
614 | |
615 | /* If userspace should engineer that these pages are replaced in | |
616 | * the vma between us binding this page into the GTT and completion | |
617 | * of rendering... Their loss. If they change the mapping of their | |
618 | * pages they need to create a new bo to point to the new vma. | |
619 | * | |
620 | * However, that still leaves open the possibility of the vma | |
621 | * being copied upon fork. Which falls under the same userspace | |
622 | * synchronisation issue as a regular bo, except that this time | |
623 | * the process may not be expecting that a particular piece of | |
624 | * memory is tied to the GPU. | |
625 | * | |
626 | * Fortunately, we can hook into the mmu_notifier in order to | |
627 | * discard the page references prior to anything nasty happening | |
628 | * to the vma (discard or cloning) which should prevent the more | |
629 | * egregious cases from causing harm. | |
630 | */ | |
364c8172 CW |
631 | |
632 | if (obj->userptr.work) { | |
e4b946bf | 633 | /* active flag should still be held for the pending work */ |
364c8172 | 634 | if (IS_ERR(obj->userptr.work)) |
b91b09ee | 635 | return PTR_ERR(obj->userptr.work); |
364c8172 | 636 | else |
b91b09ee | 637 | return -EAGAIN; |
364c8172 | 638 | } |
e4b946bf | 639 | |
5cc9ed4b CW |
640 | pvec = NULL; |
641 | pinned = 0; | |
5cc9ed4b | 642 | |
15c344f4 | 643 | if (mm == current->mm) { |
2098105e | 644 | pvec = kvmalloc_array(num_pages, sizeof(struct page *), |
0ee931c4 | 645 | GFP_KERNEL | |
1c8782dd CW |
646 | __GFP_NORETRY | |
647 | __GFP_NOWARN); | |
648 | if (pvec) /* defer to worker if malloc fails */ | |
649 | pinned = __get_user_pages_fast(obj->userptr.ptr, | |
650 | num_pages, | |
0b100760 | 651 | !i915_gem_object_is_readonly(obj), |
1c8782dd | 652 | pvec); |
5cc9ed4b | 653 | } |
e4b946bf CW |
654 | |
655 | active = false; | |
1c8782dd CW |
656 | if (pinned < 0) { |
657 | pages = ERR_PTR(pinned); | |
658 | pinned = 0; | |
659 | } else if (pinned < num_pages) { | |
660 | pages = __i915_gem_userptr_get_pages_schedule(obj); | |
661 | active = pages == ERR_PTR(-EAGAIN); | |
662 | } else { | |
5602452e | 663 | pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages); |
1c8782dd | 664 | active = !IS_ERR(pages); |
5cc9ed4b | 665 | } |
1c8782dd CW |
666 | if (active) |
667 | __i915_gem_userptr_set_active(obj, true); | |
1c8782dd CW |
668 | |
669 | if (IS_ERR(pages)) | |
c6f92f9f | 670 | release_pages(pvec, pinned); |
2098105e | 671 | kvfree(pvec); |
1c8782dd | 672 | |
b91b09ee | 673 | return PTR_ERR_OR_ZERO(pages); |
5cc9ed4b CW |
674 | } |
675 | ||
676 | static void | |
03ac84f1 CW |
677 | i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, |
678 | struct sg_table *pages) | |
5cc9ed4b | 679 | { |
85d1225e DG |
680 | struct sgt_iter sgt_iter; |
681 | struct page *page; | |
5cc9ed4b CW |
682 | |
683 | BUG_ON(obj->userptr.work != NULL); | |
e4b946bf | 684 | __i915_gem_userptr_set_active(obj, false); |
5cc9ed4b | 685 | |
a4f5ea64 CW |
686 | if (obj->mm.madv != I915_MADV_WILLNEED) |
687 | obj->mm.dirty = false; | |
5cc9ed4b | 688 | |
03ac84f1 | 689 | i915_gem_gtt_finish_pages(obj, pages); |
e2273302 | 690 | |
03ac84f1 | 691 | for_each_sgt_page(page, sgt_iter, pages) { |
a4f5ea64 | 692 | if (obj->mm.dirty) |
5cc9ed4b CW |
693 | set_page_dirty(page); |
694 | ||
695 | mark_page_accessed(page); | |
09cbfeaf | 696 | put_page(page); |
5cc9ed4b | 697 | } |
a4f5ea64 | 698 | obj->mm.dirty = false; |
5cc9ed4b | 699 | |
03ac84f1 CW |
700 | sg_free_table(pages); |
701 | kfree(pages); | |
5cc9ed4b CW |
702 | } |
703 | ||
704 | static void | |
705 | i915_gem_userptr_release(struct drm_i915_gem_object *obj) | |
706 | { | |
707 | i915_gem_userptr_release__mmu_notifier(obj); | |
ad46cb53 | 708 | i915_gem_userptr_release__mm_struct(obj); |
5cc9ed4b CW |
709 | } |
710 | ||
711 | static int | |
712 | i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) | |
713 | { | |
ad46cb53 | 714 | if (obj->userptr.mmu_object) |
5cc9ed4b CW |
715 | return 0; |
716 | ||
717 | return i915_gem_userptr_init__mmu_notifier(obj, 0); | |
718 | } | |
719 | ||
720 | static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { | |
3599a91c TU |
721 | .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | |
722 | I915_GEM_OBJECT_IS_SHRINKABLE, | |
5cc9ed4b CW |
723 | .get_pages = i915_gem_userptr_get_pages, |
724 | .put_pages = i915_gem_userptr_put_pages, | |
de472664 | 725 | .dmabuf_export = i915_gem_userptr_dmabuf_export, |
5cc9ed4b CW |
726 | .release = i915_gem_userptr_release, |
727 | }; | |
728 | ||
a5a5ae2a | 729 | /* |
5cc9ed4b CW |
730 | * Creates a new mm object that wraps some normal memory from the process |
731 | * context - user memory. | |
732 | * | |
733 | * We impose several restrictions upon the memory being mapped | |
734 | * into the GPU. | |
735 | * 1. It must be page aligned (both start/end addresses, i.e ptr and size). | |
ec8b0dd5 | 736 | * 2. It must be normal system memory, not a pointer into another map of IO |
5cc9ed4b | 737 | * space (e.g. it must not be a GTT mmapping of another object). |
ec8b0dd5 | 738 | * 3. We only allow a bo as large as we could in theory map into the GTT, |
5cc9ed4b | 739 | * that is we limit the size to the total size of the GTT. |
ec8b0dd5 | 740 | * 4. The bo is marked as being snoopable. The backing pages are left |
5cc9ed4b CW |
741 | * accessible directly by the CPU, but reads and writes by the GPU may |
742 | * incur the cost of a snoop (unless you have an LLC architecture). | |
743 | * | |
744 | * Synchronisation between multiple users and the GPU is left to userspace | |
745 | * through the normal set-domain-ioctl. The kernel will enforce that the | |
746 | * GPU relinquishes the VMA before it is returned back to the system | |
747 | * i.e. upon free(), munmap() or process termination. However, the userspace | |
748 | * malloc() library may not immediately relinquish the VMA after free() and | |
749 | * instead reuse it whilst the GPU is still reading and writing to the VMA. | |
750 | * Caveat emptor. | |
751 | * | |
752 | * Also note, that the object created here is not currently a "first class" | |
753 | * object, in that several ioctls are banned. These are the CPU access | |
754 | * ioctls: mmap(), pwrite and pread. In practice, you are expected to use | |
cc917ab4 CW |
755 | * direct access via your pointer rather than use those ioctls. Another |
756 | * restriction is that we do not allow userptr surfaces to be pinned to the | |
757 | * hardware and so we reject any attempt to create a framebuffer out of a | |
758 | * userptr. | |
5cc9ed4b CW |
759 | * |
760 | * If you think this is a good interface to use to pass GPU memory between | |
761 | * drivers, please use dma-buf instead. In fact, wherever possible use | |
762 | * dma-buf instead. | |
763 | */ | |
764 | int | |
a5a5ae2a CW |
765 | i915_gem_userptr_ioctl(struct drm_device *dev, |
766 | void *data, | |
767 | struct drm_file *file) | |
5cc9ed4b | 768 | { |
0031fb96 | 769 | struct drm_i915_private *dev_priv = to_i915(dev); |
5cc9ed4b CW |
770 | struct drm_i915_gem_userptr *args = data; |
771 | struct drm_i915_gem_object *obj; | |
772 | int ret; | |
773 | u32 handle; | |
774 | ||
0031fb96 | 775 | if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) { |
ca377809 TU |
776 | /* We cannot support coherent userptr objects on hw without |
777 | * LLC and broken snooping. | |
778 | */ | |
779 | return -ENODEV; | |
780 | } | |
781 | ||
5cc9ed4b CW |
782 | if (args->flags & ~(I915_USERPTR_READ_ONLY | |
783 | I915_USERPTR_UNSYNCHRONIZED)) | |
784 | return -EINVAL; | |
785 | ||
c11c7bfd MA |
786 | if (!args->user_size) |
787 | return -EINVAL; | |
788 | ||
5cc9ed4b CW |
789 | if (offset_in_page(args->user_ptr | args->user_size)) |
790 | return -EINVAL; | |
791 | ||
96d4f267 | 792 | if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size)) |
5cc9ed4b CW |
793 | return -EFAULT; |
794 | ||
795 | if (args->flags & I915_USERPTR_READ_ONLY) { | |
0b100760 CW |
796 | struct i915_hw_ppgtt *ppgtt; |
797 | ||
798 | /* | |
799 | * On almost all of the older hw, we cannot tell the GPU that | |
800 | * a page is readonly. | |
5cc9ed4b | 801 | */ |
0b100760 CW |
802 | ppgtt = dev_priv->kernel_context->ppgtt; |
803 | if (!ppgtt || !ppgtt->vm.has_read_only) | |
804 | return -ENODEV; | |
5cc9ed4b CW |
805 | } |
806 | ||
187685cb | 807 | obj = i915_gem_object_alloc(dev_priv); |
5cc9ed4b CW |
808 | if (obj == NULL) |
809 | return -ENOMEM; | |
810 | ||
811 | drm_gem_private_object_init(dev, &obj->base, args->user_size); | |
812 | i915_gem_object_init(obj, &i915_gem_userptr_ops); | |
c0a51fd0 CK |
813 | obj->read_domains = I915_GEM_DOMAIN_CPU; |
814 | obj->write_domain = I915_GEM_DOMAIN_CPU; | |
b8f55be6 | 815 | i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); |
5cc9ed4b CW |
816 | |
817 | obj->userptr.ptr = args->user_ptr; | |
0b100760 CW |
818 | if (args->flags & I915_USERPTR_READ_ONLY) |
819 | i915_gem_object_set_readonly(obj); | |
5cc9ed4b CW |
820 | |
821 | /* And keep a pointer to the current->mm for resolving the user pages | |
822 | * at binding. This means that we need to hook into the mmu_notifier | |
823 | * in order to detect if the mmu is destroyed. | |
824 | */ | |
ad46cb53 CW |
825 | ret = i915_gem_userptr_init__mm_struct(obj); |
826 | if (ret == 0) | |
5cc9ed4b CW |
827 | ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); |
828 | if (ret == 0) | |
829 | ret = drm_gem_handle_create(file, &obj->base, &handle); | |
830 | ||
831 | /* drop reference from allocate - handle holds it now */ | |
f0cd5182 | 832 | i915_gem_object_put(obj); |
5cc9ed4b CW |
833 | if (ret) |
834 | return ret; | |
835 | ||
836 | args->handle = handle; | |
837 | return 0; | |
838 | } | |
839 | ||
8a2421bd | 840 | int i915_gem_init_userptr(struct drm_i915_private *dev_priv) |
5cc9ed4b | 841 | { |
ad46cb53 CW |
842 | mutex_init(&dev_priv->mm_lock); |
843 | hash_init(dev_priv->mm_structs); | |
8a2421bd CW |
844 | |
845 | dev_priv->mm.userptr_wq = | |
21cc6431 | 846 | alloc_workqueue("i915-userptr-acquire", |
457db89b | 847 | WQ_HIGHPRI | WQ_UNBOUND, |
21cc6431 | 848 | 0); |
8a2421bd CW |
849 | if (!dev_priv->mm.userptr_wq) |
850 | return -ENOMEM; | |
851 | ||
852 | return 0; | |
853 | } | |
854 | ||
855 | void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv) | |
856 | { | |
857 | destroy_workqueue(dev_priv->mm.userptr_wq); | |
5cc9ed4b | 858 | } |