]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Recreate vmapping even when the object is pinned
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
CommitLineData
673a394b 1/*
be6a0376 2 * Copyright © 2008-2015 Intel Corporation
673a394b
EA
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
760285e7 28#include <drm/drmP.h>
0de23977 29#include <drm/drm_vma_manager.h>
760285e7 30#include <drm/i915_drm.h>
673a394b 31#include "i915_drv.h"
57822dc6 32#include "i915_gem_clflush.h"
eb82289a 33#include "i915_vgpu.h"
1c5d22f7 34#include "i915_trace.h"
652c393a 35#include "intel_drv.h"
5d723d7a 36#include "intel_frontbuffer.h"
0ccdacf6 37#include "intel_mocs.h"
6b5e90f5 38#include <linux/dma-fence-array.h>
fe3288b5 39#include <linux/kthread.h>
c13d87ea 40#include <linux/reservation.h>
5949eac4 41#include <linux/shmem_fs.h>
5a0e3ad6 42#include <linux/slab.h>
20e4933c 43#include <linux/stop_machine.h>
673a394b 44#include <linux/swap.h>
79e53945 45#include <linux/pci.h>
1286ff73 46#include <linux/dma-buf.h>
673a394b 47
fbbd37b3 48static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
61050808 49
2c22569b
CW
50static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
51{
e27ab73d 52 if (obj->cache_dirty)
b50a5371
AS
53 return false;
54
b8f55be6 55 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
2c22569b
CW
56 return true;
57
58 return obj->pin_display;
59}
60
4f1959ee 61static int
bb6dc8d9 62insert_mappable_node(struct i915_ggtt *ggtt,
4f1959ee
AS
63 struct drm_mm_node *node, u32 size)
64{
65 memset(node, 0, sizeof(*node));
4e64e553
CW
66 return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
67 size, 0, I915_COLOR_UNEVICTABLE,
68 0, ggtt->mappable_end,
69 DRM_MM_INSERT_LOW);
4f1959ee
AS
70}
71
72static void
73remove_mappable_node(struct drm_mm_node *node)
74{
75 drm_mm_remove_node(node);
76}
77
73aa808f
CW
78/* some bookkeeping */
79static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
3ef7f228 80 u64 size)
73aa808f 81{
c20e8355 82 spin_lock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
83 dev_priv->mm.object_count++;
84 dev_priv->mm.object_memory += size;
c20e8355 85 spin_unlock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
86}
87
88static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
3ef7f228 89 u64 size)
73aa808f 90{
c20e8355 91 spin_lock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
92 dev_priv->mm.object_count--;
93 dev_priv->mm.object_memory -= size;
c20e8355 94 spin_unlock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
95}
96
21dd3734 97static int
33196ded 98i915_gem_wait_for_error(struct i915_gpu_error *error)
30dbf0c0 99{
30dbf0c0
CW
100 int ret;
101
4c7d62c6
CW
102 might_sleep();
103
0a6759c6
DV
104 /*
105 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
106 * userspace. If it takes that long something really bad is going on and
107 * we should simply try to bail out and fail as gracefully as possible.
108 */
1f83fee0 109 ret = wait_event_interruptible_timeout(error->reset_queue,
8c185eca 110 !i915_reset_backoff(error),
b52992c0 111 I915_RESET_TIMEOUT);
0a6759c6
DV
112 if (ret == 0) {
113 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
114 return -EIO;
115 } else if (ret < 0) {
30dbf0c0 116 return ret;
d98c52cf
CW
117 } else {
118 return 0;
0a6759c6 119 }
30dbf0c0
CW
120}
121
54cf91dc 122int i915_mutex_lock_interruptible(struct drm_device *dev)
76c1dec1 123{
fac5e23e 124 struct drm_i915_private *dev_priv = to_i915(dev);
76c1dec1
CW
125 int ret;
126
33196ded 127 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
76c1dec1
CW
128 if (ret)
129 return ret;
130
131 ret = mutex_lock_interruptible(&dev->struct_mutex);
132 if (ret)
133 return ret;
134
76c1dec1
CW
135 return 0;
136}
30dbf0c0 137
5a125c3c
EA
138int
139i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
05394f39 140 struct drm_file *file)
5a125c3c 141{
72e96d64 142 struct drm_i915_private *dev_priv = to_i915(dev);
62106b4f 143 struct i915_ggtt *ggtt = &dev_priv->ggtt;
72e96d64 144 struct drm_i915_gem_get_aperture *args = data;
ca1543be 145 struct i915_vma *vma;
ff8f7975 146 u64 pinned;
5a125c3c 147
ff8f7975 148 pinned = ggtt->base.reserved;
73aa808f 149 mutex_lock(&dev->struct_mutex);
1c7f4bca 150 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
20dfbde4 151 if (i915_vma_is_pinned(vma))
ca1543be 152 pinned += vma->node.size;
1c7f4bca 153 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
20dfbde4 154 if (i915_vma_is_pinned(vma))
ca1543be 155 pinned += vma->node.size;
73aa808f 156 mutex_unlock(&dev->struct_mutex);
5a125c3c 157
72e96d64 158 args->aper_size = ggtt->base.total;
0206e353 159 args->aper_available_size = args->aper_size - pinned;
6299f992 160
5a125c3c
EA
161 return 0;
162}
163
03ac84f1 164static struct sg_table *
6a2c4232 165i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
00731155 166{
93c76a3d 167 struct address_space *mapping = obj->base.filp->f_mapping;
dbb4351b 168 drm_dma_handle_t *phys;
6a2c4232
CW
169 struct sg_table *st;
170 struct scatterlist *sg;
dbb4351b 171 char *vaddr;
6a2c4232 172 int i;
00731155 173
6a2c4232 174 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
03ac84f1 175 return ERR_PTR(-EINVAL);
6a2c4232 176
dbb4351b
CW
177 /* Always aligning to the object size, allows a single allocation
178 * to handle all possible callers, and given typical object sizes,
179 * the alignment of the buddy allocation will naturally match.
180 */
181 phys = drm_pci_alloc(obj->base.dev,
182 obj->base.size,
183 roundup_pow_of_two(obj->base.size));
184 if (!phys)
185 return ERR_PTR(-ENOMEM);
186
187 vaddr = phys->vaddr;
6a2c4232
CW
188 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
189 struct page *page;
190 char *src;
191
192 page = shmem_read_mapping_page(mapping, i);
dbb4351b
CW
193 if (IS_ERR(page)) {
194 st = ERR_CAST(page);
195 goto err_phys;
196 }
6a2c4232
CW
197
198 src = kmap_atomic(page);
199 memcpy(vaddr, src, PAGE_SIZE);
200 drm_clflush_virt_range(vaddr, PAGE_SIZE);
201 kunmap_atomic(src);
202
09cbfeaf 203 put_page(page);
6a2c4232
CW
204 vaddr += PAGE_SIZE;
205 }
206
c033666a 207 i915_gem_chipset_flush(to_i915(obj->base.dev));
6a2c4232
CW
208
209 st = kmalloc(sizeof(*st), GFP_KERNEL);
dbb4351b
CW
210 if (!st) {
211 st = ERR_PTR(-ENOMEM);
212 goto err_phys;
213 }
6a2c4232
CW
214
215 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
216 kfree(st);
dbb4351b
CW
217 st = ERR_PTR(-ENOMEM);
218 goto err_phys;
6a2c4232
CW
219 }
220
221 sg = st->sgl;
222 sg->offset = 0;
223 sg->length = obj->base.size;
00731155 224
dbb4351b 225 sg_dma_address(sg) = phys->busaddr;
6a2c4232
CW
226 sg_dma_len(sg) = obj->base.size;
227
dbb4351b
CW
228 obj->phys_handle = phys;
229 return st;
230
231err_phys:
232 drm_pci_free(obj->base.dev, phys);
03ac84f1 233 return st;
6a2c4232
CW
234}
235
e27ab73d
CW
236static void __start_cpu_write(struct drm_i915_gem_object *obj)
237{
238 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
239 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
240 if (cpu_write_needs_clflush(obj))
241 obj->cache_dirty = true;
242}
243
6a2c4232 244static void
2b3c8317 245__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
e5facdf9
CW
246 struct sg_table *pages,
247 bool needs_clflush)
6a2c4232 248{
a4f5ea64 249 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
00731155 250
a4f5ea64
CW
251 if (obj->mm.madv == I915_MADV_DONTNEED)
252 obj->mm.dirty = false;
6a2c4232 253
e5facdf9
CW
254 if (needs_clflush &&
255 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
b8f55be6 256 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
2b3c8317 257 drm_clflush_sg(pages);
03ac84f1 258
e27ab73d 259 __start_cpu_write(obj);
03ac84f1
CW
260}
261
262static void
263i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
264 struct sg_table *pages)
265{
e5facdf9 266 __i915_gem_object_release_shmem(obj, pages, false);
03ac84f1 267
a4f5ea64 268 if (obj->mm.dirty) {
93c76a3d 269 struct address_space *mapping = obj->base.filp->f_mapping;
6a2c4232 270 char *vaddr = obj->phys_handle->vaddr;
00731155
CW
271 int i;
272
273 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
6a2c4232
CW
274 struct page *page;
275 char *dst;
276
277 page = shmem_read_mapping_page(mapping, i);
278 if (IS_ERR(page))
279 continue;
280
281 dst = kmap_atomic(page);
282 drm_clflush_virt_range(vaddr, PAGE_SIZE);
283 memcpy(dst, vaddr, PAGE_SIZE);
284 kunmap_atomic(dst);
285
286 set_page_dirty(page);
a4f5ea64 287 if (obj->mm.madv == I915_MADV_WILLNEED)
00731155 288 mark_page_accessed(page);
09cbfeaf 289 put_page(page);
00731155
CW
290 vaddr += PAGE_SIZE;
291 }
a4f5ea64 292 obj->mm.dirty = false;
00731155
CW
293 }
294
03ac84f1
CW
295 sg_free_table(pages);
296 kfree(pages);
dbb4351b
CW
297
298 drm_pci_free(obj->base.dev, obj->phys_handle);
6a2c4232
CW
299}
300
301static void
302i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
303{
a4f5ea64 304 i915_gem_object_unpin_pages(obj);
6a2c4232
CW
305}
306
307static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
308 .get_pages = i915_gem_object_get_pages_phys,
309 .put_pages = i915_gem_object_put_pages_phys,
310 .release = i915_gem_object_release_phys,
311};
312
581ab1fe
CW
313static const struct drm_i915_gem_object_ops i915_gem_object_ops;
314
35a9611c 315int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
aa653a68
CW
316{
317 struct i915_vma *vma;
318 LIST_HEAD(still_in_list);
02bef8f9
CW
319 int ret;
320
321 lockdep_assert_held(&obj->base.dev->struct_mutex);
aa653a68 322
02bef8f9
CW
323 /* Closed vma are removed from the obj->vma_list - but they may
324 * still have an active binding on the object. To remove those we
325 * must wait for all rendering to complete to the object (as unbinding
326 * must anyway), and retire the requests.
aa653a68 327 */
e95433c7
CW
328 ret = i915_gem_object_wait(obj,
329 I915_WAIT_INTERRUPTIBLE |
330 I915_WAIT_LOCKED |
331 I915_WAIT_ALL,
332 MAX_SCHEDULE_TIMEOUT,
333 NULL);
02bef8f9
CW
334 if (ret)
335 return ret;
336
337 i915_gem_retire_requests(to_i915(obj->base.dev));
338
aa653a68
CW
339 while ((vma = list_first_entry_or_null(&obj->vma_list,
340 struct i915_vma,
341 obj_link))) {
342 list_move_tail(&vma->obj_link, &still_in_list);
343 ret = i915_vma_unbind(vma);
344 if (ret)
345 break;
346 }
347 list_splice(&still_in_list, &obj->vma_list);
348
349 return ret;
350}
351
e95433c7
CW
352static long
353i915_gem_object_wait_fence(struct dma_fence *fence,
354 unsigned int flags,
355 long timeout,
356 struct intel_rps_client *rps)
00e60f26 357{
e95433c7 358 struct drm_i915_gem_request *rq;
00e60f26 359
e95433c7 360 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
00e60f26 361
e95433c7
CW
362 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
363 return timeout;
364
365 if (!dma_fence_is_i915(fence))
366 return dma_fence_wait_timeout(fence,
367 flags & I915_WAIT_INTERRUPTIBLE,
368 timeout);
369
370 rq = to_request(fence);
371 if (i915_gem_request_completed(rq))
372 goto out;
373
374 /* This client is about to stall waiting for the GPU. In many cases
375 * this is undesirable and limits the throughput of the system, as
376 * many clients cannot continue processing user input/output whilst
377 * blocked. RPS autotuning may take tens of milliseconds to respond
378 * to the GPU load and thus incurs additional latency for the client.
379 * We can circumvent that by promoting the GPU frequency to maximum
380 * before we wait. This makes the GPU throttle up much more quickly
381 * (good for benchmarks and user experience, e.g. window animations),
382 * but at a cost of spending more power processing the workload
383 * (bad for battery). Not all clients even want their results
384 * immediately and for them we should just let the GPU select its own
385 * frequency to maximise efficiency. To prevent a single client from
386 * forcing the clocks too high for the whole system, we only allow
387 * each client to waitboost once in a busy period.
388 */
389 if (rps) {
390 if (INTEL_GEN(rq->i915) >= 6)
7b92c1bd 391 gen6_rps_boost(rq, rps);
e95433c7
CW
392 else
393 rps = NULL;
00e60f26
CW
394 }
395
e95433c7
CW
396 timeout = i915_wait_request(rq, flags, timeout);
397
398out:
399 if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
400 i915_gem_request_retire_upto(rq);
401
e95433c7
CW
402 return timeout;
403}
404
405static long
406i915_gem_object_wait_reservation(struct reservation_object *resv,
407 unsigned int flags,
408 long timeout,
409 struct intel_rps_client *rps)
410{
e54ca977 411 unsigned int seq = __read_seqcount_begin(&resv->seq);
e95433c7 412 struct dma_fence *excl;
e54ca977 413 bool prune_fences = false;
e95433c7
CW
414
415 if (flags & I915_WAIT_ALL) {
416 struct dma_fence **shared;
417 unsigned int count, i;
00e60f26
CW
418 int ret;
419
e95433c7
CW
420 ret = reservation_object_get_fences_rcu(resv,
421 &excl, &count, &shared);
00e60f26
CW
422 if (ret)
423 return ret;
00e60f26 424
e95433c7
CW
425 for (i = 0; i < count; i++) {
426 timeout = i915_gem_object_wait_fence(shared[i],
427 flags, timeout,
428 rps);
d892e939 429 if (timeout < 0)
e95433c7 430 break;
00e60f26 431
e95433c7
CW
432 dma_fence_put(shared[i]);
433 }
434
435 for (; i < count; i++)
436 dma_fence_put(shared[i]);
437 kfree(shared);
e54ca977
CW
438
439 prune_fences = count && timeout >= 0;
e95433c7
CW
440 } else {
441 excl = reservation_object_get_excl_rcu(resv);
00e60f26
CW
442 }
443
e54ca977 444 if (excl && timeout >= 0) {
e95433c7 445 timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
e54ca977
CW
446 prune_fences = timeout >= 0;
447 }
e95433c7
CW
448
449 dma_fence_put(excl);
450
03d1cac6
CW
451 /* Oportunistically prune the fences iff we know they have *all* been
452 * signaled and that the reservation object has not been changed (i.e.
453 * no new fences have been added).
454 */
e54ca977 455 if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
03d1cac6
CW
456 if (reservation_object_trylock(resv)) {
457 if (!__read_seqcount_retry(&resv->seq, seq))
458 reservation_object_add_excl_fence(resv, NULL);
459 reservation_object_unlock(resv);
460 }
e54ca977
CW
461 }
462
e95433c7 463 return timeout;
00e60f26
CW
464}
465
6b5e90f5
CW
466static void __fence_set_priority(struct dma_fence *fence, int prio)
467{
468 struct drm_i915_gem_request *rq;
469 struct intel_engine_cs *engine;
470
471 if (!dma_fence_is_i915(fence))
472 return;
473
474 rq = to_request(fence);
475 engine = rq->engine;
476 if (!engine->schedule)
477 return;
478
479 engine->schedule(rq, prio);
480}
481
482static void fence_set_priority(struct dma_fence *fence, int prio)
483{
484 /* Recurse once into a fence-array */
485 if (dma_fence_is_array(fence)) {
486 struct dma_fence_array *array = to_dma_fence_array(fence);
487 int i;
488
489 for (i = 0; i < array->num_fences; i++)
490 __fence_set_priority(array->fences[i], prio);
491 } else {
492 __fence_set_priority(fence, prio);
493 }
494}
495
496int
497i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
498 unsigned int flags,
499 int prio)
500{
501 struct dma_fence *excl;
502
503 if (flags & I915_WAIT_ALL) {
504 struct dma_fence **shared;
505 unsigned int count, i;
506 int ret;
507
508 ret = reservation_object_get_fences_rcu(obj->resv,
509 &excl, &count, &shared);
510 if (ret)
511 return ret;
512
513 for (i = 0; i < count; i++) {
514 fence_set_priority(shared[i], prio);
515 dma_fence_put(shared[i]);
516 }
517
518 kfree(shared);
519 } else {
520 excl = reservation_object_get_excl_rcu(obj->resv);
521 }
522
523 if (excl) {
524 fence_set_priority(excl, prio);
525 dma_fence_put(excl);
526 }
527 return 0;
528}
529
e95433c7
CW
530/**
531 * Waits for rendering to the object to be completed
532 * @obj: i915 gem object
533 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
534 * @timeout: how long to wait
535 * @rps: client (user process) to charge for any waitboosting
00e60f26 536 */
e95433c7
CW
537int
538i915_gem_object_wait(struct drm_i915_gem_object *obj,
539 unsigned int flags,
540 long timeout,
541 struct intel_rps_client *rps)
00e60f26 542{
e95433c7
CW
543 might_sleep();
544#if IS_ENABLED(CONFIG_LOCKDEP)
545 GEM_BUG_ON(debug_locks &&
546 !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
547 !!(flags & I915_WAIT_LOCKED));
548#endif
549 GEM_BUG_ON(timeout < 0);
00e60f26 550
d07f0e59
CW
551 timeout = i915_gem_object_wait_reservation(obj->resv,
552 flags, timeout,
553 rps);
e95433c7 554 return timeout < 0 ? timeout : 0;
00e60f26
CW
555}
556
557static struct intel_rps_client *to_rps_client(struct drm_file *file)
558{
559 struct drm_i915_file_private *fpriv = file->driver_priv;
560
561 return &fpriv->rps;
562}
563
00731155
CW
564static int
565i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
566 struct drm_i915_gem_pwrite *args,
03ac84f1 567 struct drm_file *file)
00731155 568{
00731155 569 void *vaddr = obj->phys_handle->vaddr + args->offset;
3ed605bc 570 char __user *user_data = u64_to_user_ptr(args->data_ptr);
6a2c4232
CW
571
572 /* We manually control the domain here and pretend that it
573 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
574 */
77a0d1ca 575 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
10466d2a
CW
576 if (copy_from_user(vaddr, user_data, args->size))
577 return -EFAULT;
00731155 578
6a2c4232 579 drm_clflush_virt_range(vaddr, args->size);
10466d2a 580 i915_gem_chipset_flush(to_i915(obj->base.dev));
063e4e6b 581
d59b21ec 582 intel_fb_obj_flush(obj, ORIGIN_CPU);
10466d2a 583 return 0;
00731155
CW
584}
585
187685cb 586void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
42dcedd4 587{
efab6d8d 588 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
42dcedd4
CW
589}
590
591void i915_gem_object_free(struct drm_i915_gem_object *obj)
592{
fac5e23e 593 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
efab6d8d 594 kmem_cache_free(dev_priv->objects, obj);
42dcedd4
CW
595}
596
ff72145b
DA
597static int
598i915_gem_create(struct drm_file *file,
12d79d78 599 struct drm_i915_private *dev_priv,
ff72145b
DA
600 uint64_t size,
601 uint32_t *handle_p)
673a394b 602{
05394f39 603 struct drm_i915_gem_object *obj;
a1a2d1d3
PP
604 int ret;
605 u32 handle;
673a394b 606
ff72145b 607 size = roundup(size, PAGE_SIZE);
8ffc0246
CW
608 if (size == 0)
609 return -EINVAL;
673a394b
EA
610
611 /* Allocate the new object */
12d79d78 612 obj = i915_gem_object_create(dev_priv, size);
fe3db79b
CW
613 if (IS_ERR(obj))
614 return PTR_ERR(obj);
673a394b 615
05394f39 616 ret = drm_gem_handle_create(file, &obj->base, &handle);
202f2fef 617 /* drop reference from allocate - handle holds it now */
f0cd5182 618 i915_gem_object_put(obj);
d861e338
DV
619 if (ret)
620 return ret;
202f2fef 621
ff72145b 622 *handle_p = handle;
673a394b
EA
623 return 0;
624}
625
ff72145b
DA
626int
627i915_gem_dumb_create(struct drm_file *file,
628 struct drm_device *dev,
629 struct drm_mode_create_dumb *args)
630{
631 /* have to work out size/pitch and return them */
de45eaf7 632 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
ff72145b 633 args->size = args->pitch * args->height;
12d79d78 634 return i915_gem_create(file, to_i915(dev),
da6b51d0 635 args->size, &args->handle);
ff72145b
DA
636}
637
e27ab73d
CW
638static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
639{
640 return !(obj->cache_level == I915_CACHE_NONE ||
641 obj->cache_level == I915_CACHE_WT);
642}
643
ff72145b
DA
644/**
645 * Creates a new mm object and returns a handle to it.
14bb2c11
TU
646 * @dev: drm device pointer
647 * @data: ioctl data blob
648 * @file: drm file pointer
ff72145b
DA
649 */
650int
651i915_gem_create_ioctl(struct drm_device *dev, void *data,
652 struct drm_file *file)
653{
12d79d78 654 struct drm_i915_private *dev_priv = to_i915(dev);
ff72145b 655 struct drm_i915_gem_create *args = data;
63ed2cb2 656
12d79d78 657 i915_gem_flush_free_objects(dev_priv);
fbbd37b3 658
12d79d78 659 return i915_gem_create(file, dev_priv,
da6b51d0 660 args->size, &args->handle);
ff72145b
DA
661}
662
ef74921b
CW
663static inline enum fb_op_origin
664fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
665{
666 return (domain == I915_GEM_DOMAIN_GTT ?
667 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
668}
669
670static void
671flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
672{
673 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
674
675 if (!(obj->base.write_domain & flush_domains))
676 return;
677
678 /* No actual flushing is required for the GTT write domain. Writes
679 * to it "immediately" go to main memory as far as we know, so there's
680 * no chipset flush. It also doesn't land in render cache.
681 *
682 * However, we do have to enforce the order so that all writes through
683 * the GTT land before any writes to the device, such as updates to
684 * the GATT itself.
685 *
686 * We also have to wait a bit for the writes to land from the GTT.
687 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
688 * timing. This issue has only been observed when switching quickly
689 * between GTT writes and CPU reads from inside the kernel on recent hw,
690 * and it appears to only affect discrete GTT blocks (i.e. on LLC
691 * system agents we cannot reproduce this behaviour).
692 */
693 wmb();
694
695 switch (obj->base.write_domain) {
696 case I915_GEM_DOMAIN_GTT:
697 if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
698 if (intel_runtime_pm_get_if_in_use(dev_priv)) {
699 spin_lock_irq(&dev_priv->uncore.lock);
700 POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
701 spin_unlock_irq(&dev_priv->uncore.lock);
702 intel_runtime_pm_put(dev_priv);
703 }
704 }
705
706 intel_fb_obj_flush(obj,
707 fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
708 break;
709
710 case I915_GEM_DOMAIN_CPU:
711 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
712 break;
e27ab73d
CW
713
714 case I915_GEM_DOMAIN_RENDER:
715 if (gpu_write_needs_clflush(obj))
716 obj->cache_dirty = true;
717 break;
ef74921b
CW
718 }
719
720 obj->base.write_domain = 0;
721}
722
8461d226
DV
723static inline int
724__copy_to_user_swizzled(char __user *cpu_vaddr,
725 const char *gpu_vaddr, int gpu_offset,
726 int length)
727{
728 int ret, cpu_offset = 0;
729
730 while (length > 0) {
731 int cacheline_end = ALIGN(gpu_offset + 1, 64);
732 int this_length = min(cacheline_end - gpu_offset, length);
733 int swizzled_gpu_offset = gpu_offset ^ 64;
734
735 ret = __copy_to_user(cpu_vaddr + cpu_offset,
736 gpu_vaddr + swizzled_gpu_offset,
737 this_length);
738 if (ret)
739 return ret + length;
740
741 cpu_offset += this_length;
742 gpu_offset += this_length;
743 length -= this_length;
744 }
745
746 return 0;
747}
748
8c59967c 749static inline int
4f0c7cfb
BW
750__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
751 const char __user *cpu_vaddr,
8c59967c
DV
752 int length)
753{
754 int ret, cpu_offset = 0;
755
756 while (length > 0) {
757 int cacheline_end = ALIGN(gpu_offset + 1, 64);
758 int this_length = min(cacheline_end - gpu_offset, length);
759 int swizzled_gpu_offset = gpu_offset ^ 64;
760
761 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
762 cpu_vaddr + cpu_offset,
763 this_length);
764 if (ret)
765 return ret + length;
766
767 cpu_offset += this_length;
768 gpu_offset += this_length;
769 length -= this_length;
770 }
771
772 return 0;
773}
774
4c914c0c
BV
775/*
776 * Pins the specified object's pages and synchronizes the object with
777 * GPU accesses. Sets needs_clflush to non-zero if the caller should
778 * flush the object from the CPU cache.
779 */
780int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
43394c7d 781 unsigned int *needs_clflush)
4c914c0c
BV
782{
783 int ret;
784
e95433c7 785 lockdep_assert_held(&obj->base.dev->struct_mutex);
4c914c0c 786
e95433c7 787 *needs_clflush = 0;
43394c7d
CW
788 if (!i915_gem_object_has_struct_page(obj))
789 return -ENODEV;
4c914c0c 790
e95433c7
CW
791 ret = i915_gem_object_wait(obj,
792 I915_WAIT_INTERRUPTIBLE |
793 I915_WAIT_LOCKED,
794 MAX_SCHEDULE_TIMEOUT,
795 NULL);
c13d87ea
CW
796 if (ret)
797 return ret;
798
a4f5ea64 799 ret = i915_gem_object_pin_pages(obj);
9764951e
CW
800 if (ret)
801 return ret;
802
b8f55be6
CW
803 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
804 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
7f5f95d8
CW
805 ret = i915_gem_object_set_to_cpu_domain(obj, false);
806 if (ret)
807 goto err_unpin;
808 else
809 goto out;
810 }
811
ef74921b 812 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
a314d5cb 813
43394c7d
CW
814 /* If we're not in the cpu read domain, set ourself into the gtt
815 * read domain and manually flush cachelines (if required). This
816 * optimizes for the case when the gpu will dirty the data
817 * anyway again before the next pread happens.
818 */
e27ab73d
CW
819 if (!obj->cache_dirty &&
820 !(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
7f5f95d8 821 *needs_clflush = CLFLUSH_BEFORE;
4c914c0c 822
7f5f95d8 823out:
9764951e 824 /* return with the pages pinned */
43394c7d 825 return 0;
9764951e
CW
826
827err_unpin:
828 i915_gem_object_unpin_pages(obj);
829 return ret;
43394c7d
CW
830}
831
832int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
833 unsigned int *needs_clflush)
834{
835 int ret;
836
e95433c7
CW
837 lockdep_assert_held(&obj->base.dev->struct_mutex);
838
43394c7d
CW
839 *needs_clflush = 0;
840 if (!i915_gem_object_has_struct_page(obj))
841 return -ENODEV;
842
e95433c7
CW
843 ret = i915_gem_object_wait(obj,
844 I915_WAIT_INTERRUPTIBLE |
845 I915_WAIT_LOCKED |
846 I915_WAIT_ALL,
847 MAX_SCHEDULE_TIMEOUT,
848 NULL);
43394c7d
CW
849 if (ret)
850 return ret;
851
a4f5ea64 852 ret = i915_gem_object_pin_pages(obj);
9764951e
CW
853 if (ret)
854 return ret;
855
b8f55be6
CW
856 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
857 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
7f5f95d8
CW
858 ret = i915_gem_object_set_to_cpu_domain(obj, true);
859 if (ret)
860 goto err_unpin;
861 else
862 goto out;
863 }
864
ef74921b 865 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
a314d5cb 866
43394c7d
CW
867 /* If we're not in the cpu write domain, set ourself into the
868 * gtt write domain and manually flush cachelines (as required).
869 * This optimizes for the case when the gpu will use the data
870 * right away and we therefore have to clflush anyway.
871 */
e27ab73d 872 if (!obj->cache_dirty) {
7f5f95d8 873 *needs_clflush |= CLFLUSH_AFTER;
43394c7d 874
e27ab73d
CW
875 /*
876 * Same trick applies to invalidate partially written
877 * cachelines read before writing.
878 */
879 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
880 *needs_clflush |= CLFLUSH_BEFORE;
881 }
43394c7d 882
7f5f95d8 883out:
43394c7d 884 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
a4f5ea64 885 obj->mm.dirty = true;
9764951e 886 /* return with the pages pinned */
43394c7d 887 return 0;
9764951e
CW
888
889err_unpin:
890 i915_gem_object_unpin_pages(obj);
891 return ret;
4c914c0c
BV
892}
893
23c18c71
DV
894static void
895shmem_clflush_swizzled_range(char *addr, unsigned long length,
896 bool swizzled)
897{
e7e58eb5 898 if (unlikely(swizzled)) {
23c18c71
DV
899 unsigned long start = (unsigned long) addr;
900 unsigned long end = (unsigned long) addr + length;
901
902 /* For swizzling simply ensure that we always flush both
903 * channels. Lame, but simple and it works. Swizzled
904 * pwrite/pread is far from a hotpath - current userspace
905 * doesn't use it at all. */
906 start = round_down(start, 128);
907 end = round_up(end, 128);
908
909 drm_clflush_virt_range((void *)start, end - start);
910 } else {
911 drm_clflush_virt_range(addr, length);
912 }
913
914}
915
d174bd64
DV
916/* Only difference to the fast-path function is that this can handle bit17
917 * and uses non-atomic copy and kmap functions. */
918static int
bb6dc8d9 919shmem_pread_slow(struct page *page, int offset, int length,
d174bd64
DV
920 char __user *user_data,
921 bool page_do_bit17_swizzling, bool needs_clflush)
922{
923 char *vaddr;
924 int ret;
925
926 vaddr = kmap(page);
927 if (needs_clflush)
bb6dc8d9 928 shmem_clflush_swizzled_range(vaddr + offset, length,
23c18c71 929 page_do_bit17_swizzling);
d174bd64
DV
930
931 if (page_do_bit17_swizzling)
bb6dc8d9 932 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
d174bd64 933 else
bb6dc8d9 934 ret = __copy_to_user(user_data, vaddr + offset, length);
d174bd64
DV
935 kunmap(page);
936
f60d7f0c 937 return ret ? - EFAULT : 0;
d174bd64
DV
938}
939
bb6dc8d9
CW
940static int
941shmem_pread(struct page *page, int offset, int length, char __user *user_data,
942 bool page_do_bit17_swizzling, bool needs_clflush)
943{
944 int ret;
945
946 ret = -ENODEV;
947 if (!page_do_bit17_swizzling) {
948 char *vaddr = kmap_atomic(page);
949
950 if (needs_clflush)
951 drm_clflush_virt_range(vaddr + offset, length);
952 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
953 kunmap_atomic(vaddr);
954 }
955 if (ret == 0)
956 return 0;
957
958 return shmem_pread_slow(page, offset, length, user_data,
959 page_do_bit17_swizzling, needs_clflush);
960}
961
962static int
963i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
964 struct drm_i915_gem_pread *args)
965{
966 char __user *user_data;
967 u64 remain;
968 unsigned int obj_do_bit17_swizzling;
969 unsigned int needs_clflush;
970 unsigned int idx, offset;
971 int ret;
972
973 obj_do_bit17_swizzling = 0;
974 if (i915_gem_object_needs_bit17_swizzle(obj))
975 obj_do_bit17_swizzling = BIT(17);
976
977 ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
978 if (ret)
979 return ret;
980
981 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
982 mutex_unlock(&obj->base.dev->struct_mutex);
983 if (ret)
984 return ret;
985
986 remain = args->size;
987 user_data = u64_to_user_ptr(args->data_ptr);
988 offset = offset_in_page(args->offset);
989 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
990 struct page *page = i915_gem_object_get_page(obj, idx);
991 int length;
992
993 length = remain;
994 if (offset + length > PAGE_SIZE)
995 length = PAGE_SIZE - offset;
996
997 ret = shmem_pread(page, offset, length, user_data,
998 page_to_phys(page) & obj_do_bit17_swizzling,
999 needs_clflush);
1000 if (ret)
1001 break;
1002
1003 remain -= length;
1004 user_data += length;
1005 offset = 0;
1006 }
1007
1008 i915_gem_obj_finish_shmem_access(obj);
1009 return ret;
1010}
1011
1012static inline bool
1013gtt_user_read(struct io_mapping *mapping,
1014 loff_t base, int offset,
1015 char __user *user_data, int length)
b50a5371 1016{
b50a5371 1017 void *vaddr;
bb6dc8d9 1018 unsigned long unwritten;
b50a5371 1019
b50a5371 1020 /* We can use the cpu mem copy function because this is X86. */
bb6dc8d9
CW
1021 vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
1022 unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
1023 io_mapping_unmap_atomic(vaddr);
1024 if (unwritten) {
1025 vaddr = (void __force *)
1026 io_mapping_map_wc(mapping, base, PAGE_SIZE);
1027 unwritten = copy_to_user(user_data, vaddr + offset, length);
1028 io_mapping_unmap(vaddr);
1029 }
b50a5371
AS
1030 return unwritten;
1031}
1032
1033static int
bb6dc8d9
CW
1034i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
1035 const struct drm_i915_gem_pread *args)
b50a5371 1036{
bb6dc8d9
CW
1037 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1038 struct i915_ggtt *ggtt = &i915->ggtt;
b50a5371 1039 struct drm_mm_node node;
bb6dc8d9
CW
1040 struct i915_vma *vma;
1041 void __user *user_data;
1042 u64 remain, offset;
b50a5371
AS
1043 int ret;
1044
bb6dc8d9
CW
1045 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1046 if (ret)
1047 return ret;
1048
1049 intel_runtime_pm_get(i915);
1050 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1051 PIN_MAPPABLE | PIN_NONBLOCK);
18034584
CW
1052 if (!IS_ERR(vma)) {
1053 node.start = i915_ggtt_offset(vma);
1054 node.allocated = false;
49ef5294 1055 ret = i915_vma_put_fence(vma);
18034584
CW
1056 if (ret) {
1057 i915_vma_unpin(vma);
1058 vma = ERR_PTR(ret);
1059 }
1060 }
058d88c4 1061 if (IS_ERR(vma)) {
bb6dc8d9 1062 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
b50a5371 1063 if (ret)
bb6dc8d9
CW
1064 goto out_unlock;
1065 GEM_BUG_ON(!node.allocated);
b50a5371
AS
1066 }
1067
1068 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1069 if (ret)
1070 goto out_unpin;
1071
bb6dc8d9 1072 mutex_unlock(&i915->drm.struct_mutex);
b50a5371 1073
bb6dc8d9
CW
1074 user_data = u64_to_user_ptr(args->data_ptr);
1075 remain = args->size;
1076 offset = args->offset;
b50a5371
AS
1077
1078 while (remain > 0) {
1079 /* Operation in this page
1080 *
1081 * page_base = page offset within aperture
1082 * page_offset = offset within page
1083 * page_length = bytes to copy for this page
1084 */
1085 u32 page_base = node.start;
1086 unsigned page_offset = offset_in_page(offset);
1087 unsigned page_length = PAGE_SIZE - page_offset;
1088 page_length = remain < page_length ? remain : page_length;
1089 if (node.allocated) {
1090 wmb();
1091 ggtt->base.insert_page(&ggtt->base,
1092 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
bb6dc8d9 1093 node.start, I915_CACHE_NONE, 0);
b50a5371
AS
1094 wmb();
1095 } else {
1096 page_base += offset & PAGE_MASK;
1097 }
bb6dc8d9
CW
1098
1099 if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
1100 user_data, page_length)) {
b50a5371
AS
1101 ret = -EFAULT;
1102 break;
1103 }
1104
1105 remain -= page_length;
1106 user_data += page_length;
1107 offset += page_length;
1108 }
1109
bb6dc8d9 1110 mutex_lock(&i915->drm.struct_mutex);
b50a5371
AS
1111out_unpin:
1112 if (node.allocated) {
1113 wmb();
1114 ggtt->base.clear_range(&ggtt->base,
4fb84d99 1115 node.start, node.size);
b50a5371
AS
1116 remove_mappable_node(&node);
1117 } else {
058d88c4 1118 i915_vma_unpin(vma);
b50a5371 1119 }
bb6dc8d9
CW
1120out_unlock:
1121 intel_runtime_pm_put(i915);
1122 mutex_unlock(&i915->drm.struct_mutex);
f60d7f0c 1123
eb01459f
EA
1124 return ret;
1125}
1126
673a394b
EA
1127/**
1128 * Reads data from the object referenced by handle.
14bb2c11
TU
1129 * @dev: drm device pointer
1130 * @data: ioctl data blob
1131 * @file: drm file pointer
673a394b
EA
1132 *
1133 * On error, the contents of *data are undefined.
1134 */
1135int
1136i915_gem_pread_ioctl(struct drm_device *dev, void *data,
05394f39 1137 struct drm_file *file)
673a394b
EA
1138{
1139 struct drm_i915_gem_pread *args = data;
05394f39 1140 struct drm_i915_gem_object *obj;
bb6dc8d9 1141 int ret;
673a394b 1142
51311d0a
CW
1143 if (args->size == 0)
1144 return 0;
1145
1146 if (!access_ok(VERIFY_WRITE,
3ed605bc 1147 u64_to_user_ptr(args->data_ptr),
51311d0a
CW
1148 args->size))
1149 return -EFAULT;
1150
03ac0642 1151 obj = i915_gem_object_lookup(file, args->handle);
258a5ede
CW
1152 if (!obj)
1153 return -ENOENT;
673a394b 1154
7dcd2499 1155 /* Bounds check source. */
966d5bf5 1156 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
ce9d419d 1157 ret = -EINVAL;
bb6dc8d9 1158 goto out;
ce9d419d
CW
1159 }
1160
db53a302
CW
1161 trace_i915_gem_object_pread(obj, args->offset, args->size);
1162
e95433c7
CW
1163 ret = i915_gem_object_wait(obj,
1164 I915_WAIT_INTERRUPTIBLE,
1165 MAX_SCHEDULE_TIMEOUT,
1166 to_rps_client(file));
258a5ede 1167 if (ret)
bb6dc8d9 1168 goto out;
258a5ede 1169
bb6dc8d9 1170 ret = i915_gem_object_pin_pages(obj);
258a5ede 1171 if (ret)
bb6dc8d9 1172 goto out;
673a394b 1173
bb6dc8d9 1174 ret = i915_gem_shmem_pread(obj, args);
9c870d03 1175 if (ret == -EFAULT || ret == -ENODEV)
bb6dc8d9 1176 ret = i915_gem_gtt_pread(obj, args);
b50a5371 1177
bb6dc8d9
CW
1178 i915_gem_object_unpin_pages(obj);
1179out:
f0cd5182 1180 i915_gem_object_put(obj);
eb01459f 1181 return ret;
673a394b
EA
1182}
1183
0839ccb8
KP
1184/* This is the fast write path which cannot handle
1185 * page faults in the source data
9b7530cc 1186 */
0839ccb8 1187
fe115628
CW
1188static inline bool
1189ggtt_write(struct io_mapping *mapping,
1190 loff_t base, int offset,
1191 char __user *user_data, int length)
9b7530cc 1192{
4f0c7cfb 1193 void *vaddr;
0839ccb8 1194 unsigned long unwritten;
9b7530cc 1195
4f0c7cfb 1196 /* We can use the cpu mem copy function because this is X86. */
fe115628
CW
1197 vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
1198 unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
0839ccb8 1199 user_data, length);
fe115628
CW
1200 io_mapping_unmap_atomic(vaddr);
1201 if (unwritten) {
1202 vaddr = (void __force *)
1203 io_mapping_map_wc(mapping, base, PAGE_SIZE);
1204 unwritten = copy_from_user(vaddr + offset, user_data, length);
1205 io_mapping_unmap(vaddr);
1206 }
bb6dc8d9 1207
bb6dc8d9
CW
1208 return unwritten;
1209}
1210
3de09aa3
EA
1211/**
1212 * This is the fast pwrite path, where we copy the data directly from the
1213 * user into the GTT, uncached.
fe115628 1214 * @obj: i915 GEM object
14bb2c11 1215 * @args: pwrite arguments structure
3de09aa3 1216 */
673a394b 1217static int
fe115628
CW
1218i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
1219 const struct drm_i915_gem_pwrite *args)
673a394b 1220{
fe115628 1221 struct drm_i915_private *i915 = to_i915(obj->base.dev);
4f1959ee
AS
1222 struct i915_ggtt *ggtt = &i915->ggtt;
1223 struct drm_mm_node node;
fe115628
CW
1224 struct i915_vma *vma;
1225 u64 remain, offset;
1226 void __user *user_data;
4f1959ee 1227 int ret;
b50a5371 1228
fe115628
CW
1229 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1230 if (ret)
1231 return ret;
935aaa69 1232
9c870d03 1233 intel_runtime_pm_get(i915);
058d88c4 1234 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
de895082 1235 PIN_MAPPABLE | PIN_NONBLOCK);
18034584
CW
1236 if (!IS_ERR(vma)) {
1237 node.start = i915_ggtt_offset(vma);
1238 node.allocated = false;
49ef5294 1239 ret = i915_vma_put_fence(vma);
18034584
CW
1240 if (ret) {
1241 i915_vma_unpin(vma);
1242 vma = ERR_PTR(ret);
1243 }
1244 }
058d88c4 1245 if (IS_ERR(vma)) {
bb6dc8d9 1246 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
4f1959ee 1247 if (ret)
fe115628
CW
1248 goto out_unlock;
1249 GEM_BUG_ON(!node.allocated);
4f1959ee 1250 }
935aaa69
DV
1251
1252 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1253 if (ret)
1254 goto out_unpin;
1255
fe115628
CW
1256 mutex_unlock(&i915->drm.struct_mutex);
1257
b19482d7 1258 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
063e4e6b 1259
4f1959ee
AS
1260 user_data = u64_to_user_ptr(args->data_ptr);
1261 offset = args->offset;
1262 remain = args->size;
1263 while (remain) {
673a394b
EA
1264 /* Operation in this page
1265 *
0839ccb8
KP
1266 * page_base = page offset within aperture
1267 * page_offset = offset within page
1268 * page_length = bytes to copy for this page
673a394b 1269 */
4f1959ee 1270 u32 page_base = node.start;
bb6dc8d9
CW
1271 unsigned int page_offset = offset_in_page(offset);
1272 unsigned int page_length = PAGE_SIZE - page_offset;
4f1959ee
AS
1273 page_length = remain < page_length ? remain : page_length;
1274 if (node.allocated) {
1275 wmb(); /* flush the write before we modify the GGTT */
1276 ggtt->base.insert_page(&ggtt->base,
1277 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1278 node.start, I915_CACHE_NONE, 0);
1279 wmb(); /* flush modifications to the GGTT (insert_page) */
1280 } else {
1281 page_base += offset & PAGE_MASK;
1282 }
0839ccb8 1283 /* If we get a fault while copying data, then (presumably) our
3de09aa3
EA
1284 * source page isn't available. Return the error and we'll
1285 * retry in the slow path.
b50a5371
AS
1286 * If the object is non-shmem backed, we retry again with the
1287 * path that handles page fault.
0839ccb8 1288 */
fe115628
CW
1289 if (ggtt_write(&ggtt->mappable, page_base, page_offset,
1290 user_data, page_length)) {
1291 ret = -EFAULT;
1292 break;
935aaa69 1293 }
673a394b 1294
0839ccb8
KP
1295 remain -= page_length;
1296 user_data += page_length;
1297 offset += page_length;
673a394b 1298 }
d59b21ec 1299 intel_fb_obj_flush(obj, ORIGIN_CPU);
fe115628
CW
1300
1301 mutex_lock(&i915->drm.struct_mutex);
935aaa69 1302out_unpin:
4f1959ee
AS
1303 if (node.allocated) {
1304 wmb();
1305 ggtt->base.clear_range(&ggtt->base,
4fb84d99 1306 node.start, node.size);
4f1959ee
AS
1307 remove_mappable_node(&node);
1308 } else {
058d88c4 1309 i915_vma_unpin(vma);
4f1959ee 1310 }
fe115628 1311out_unlock:
9c870d03 1312 intel_runtime_pm_put(i915);
fe115628 1313 mutex_unlock(&i915->drm.struct_mutex);
3de09aa3 1314 return ret;
673a394b
EA
1315}
1316
3043c60c 1317static int
fe115628 1318shmem_pwrite_slow(struct page *page, int offset, int length,
d174bd64
DV
1319 char __user *user_data,
1320 bool page_do_bit17_swizzling,
1321 bool needs_clflush_before,
1322 bool needs_clflush_after)
673a394b 1323{
d174bd64
DV
1324 char *vaddr;
1325 int ret;
e5281ccd 1326
d174bd64 1327 vaddr = kmap(page);
e7e58eb5 1328 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
fe115628 1329 shmem_clflush_swizzled_range(vaddr + offset, length,
23c18c71 1330 page_do_bit17_swizzling);
d174bd64 1331 if (page_do_bit17_swizzling)
fe115628
CW
1332 ret = __copy_from_user_swizzled(vaddr, offset, user_data,
1333 length);
d174bd64 1334 else
fe115628 1335 ret = __copy_from_user(vaddr + offset, user_data, length);
d174bd64 1336 if (needs_clflush_after)
fe115628 1337 shmem_clflush_swizzled_range(vaddr + offset, length,
23c18c71 1338 page_do_bit17_swizzling);
d174bd64 1339 kunmap(page);
40123c1f 1340
755d2218 1341 return ret ? -EFAULT : 0;
40123c1f
EA
1342}
1343
fe115628
CW
1344/* Per-page copy function for the shmem pwrite fastpath.
1345 * Flushes invalid cachelines before writing to the target if
1346 * needs_clflush_before is set and flushes out any written cachelines after
1347 * writing if needs_clflush is set.
1348 */
40123c1f 1349static int
fe115628
CW
1350shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
1351 bool page_do_bit17_swizzling,
1352 bool needs_clflush_before,
1353 bool needs_clflush_after)
40123c1f 1354{
fe115628
CW
1355 int ret;
1356
1357 ret = -ENODEV;
1358 if (!page_do_bit17_swizzling) {
1359 char *vaddr = kmap_atomic(page);
1360
1361 if (needs_clflush_before)
1362 drm_clflush_virt_range(vaddr + offset, len);
1363 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
1364 if (needs_clflush_after)
1365 drm_clflush_virt_range(vaddr + offset, len);
1366
1367 kunmap_atomic(vaddr);
1368 }
1369 if (ret == 0)
1370 return ret;
1371
1372 return shmem_pwrite_slow(page, offset, len, user_data,
1373 page_do_bit17_swizzling,
1374 needs_clflush_before,
1375 needs_clflush_after);
1376}
1377
1378static int
1379i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1380 const struct drm_i915_gem_pwrite *args)
1381{
1382 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1383 void __user *user_data;
1384 u64 remain;
1385 unsigned int obj_do_bit17_swizzling;
1386 unsigned int partial_cacheline_write;
43394c7d 1387 unsigned int needs_clflush;
fe115628
CW
1388 unsigned int offset, idx;
1389 int ret;
40123c1f 1390
fe115628 1391 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
755d2218
CW
1392 if (ret)
1393 return ret;
1394
fe115628
CW
1395 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1396 mutex_unlock(&i915->drm.struct_mutex);
1397 if (ret)
1398 return ret;
673a394b 1399
fe115628
CW
1400 obj_do_bit17_swizzling = 0;
1401 if (i915_gem_object_needs_bit17_swizzle(obj))
1402 obj_do_bit17_swizzling = BIT(17);
e5281ccd 1403
fe115628
CW
1404 /* If we don't overwrite a cacheline completely we need to be
1405 * careful to have up-to-date data by first clflushing. Don't
1406 * overcomplicate things and flush the entire patch.
1407 */
1408 partial_cacheline_write = 0;
1409 if (needs_clflush & CLFLUSH_BEFORE)
1410 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
9da3da66 1411
fe115628
CW
1412 user_data = u64_to_user_ptr(args->data_ptr);
1413 remain = args->size;
1414 offset = offset_in_page(args->offset);
1415 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1416 struct page *page = i915_gem_object_get_page(obj, idx);
1417 int length;
40123c1f 1418
fe115628
CW
1419 length = remain;
1420 if (offset + length > PAGE_SIZE)
1421 length = PAGE_SIZE - offset;
755d2218 1422
fe115628
CW
1423 ret = shmem_pwrite(page, offset, length, user_data,
1424 page_to_phys(page) & obj_do_bit17_swizzling,
1425 (offset | length) & partial_cacheline_write,
1426 needs_clflush & CLFLUSH_AFTER);
755d2218 1427 if (ret)
fe115628 1428 break;
755d2218 1429
fe115628
CW
1430 remain -= length;
1431 user_data += length;
1432 offset = 0;
8c59967c 1433 }
673a394b 1434
d59b21ec 1435 intel_fb_obj_flush(obj, ORIGIN_CPU);
fe115628 1436 i915_gem_obj_finish_shmem_access(obj);
40123c1f 1437 return ret;
673a394b
EA
1438}
1439
1440/**
1441 * Writes data to the object referenced by handle.
14bb2c11
TU
1442 * @dev: drm device
1443 * @data: ioctl data blob
1444 * @file: drm file
673a394b
EA
1445 *
1446 * On error, the contents of the buffer that were to be modified are undefined.
1447 */
1448int
1449i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
fbd5a26d 1450 struct drm_file *file)
673a394b
EA
1451{
1452 struct drm_i915_gem_pwrite *args = data;
05394f39 1453 struct drm_i915_gem_object *obj;
51311d0a
CW
1454 int ret;
1455
1456 if (args->size == 0)
1457 return 0;
1458
1459 if (!access_ok(VERIFY_READ,
3ed605bc 1460 u64_to_user_ptr(args->data_ptr),
51311d0a
CW
1461 args->size))
1462 return -EFAULT;
1463
03ac0642 1464 obj = i915_gem_object_lookup(file, args->handle);
258a5ede
CW
1465 if (!obj)
1466 return -ENOENT;
673a394b 1467
7dcd2499 1468 /* Bounds check destination. */
966d5bf5 1469 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
ce9d419d 1470 ret = -EINVAL;
258a5ede 1471 goto err;
ce9d419d
CW
1472 }
1473
db53a302
CW
1474 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1475
7c55e2c5
CW
1476 ret = -ENODEV;
1477 if (obj->ops->pwrite)
1478 ret = obj->ops->pwrite(obj, args);
1479 if (ret != -ENODEV)
1480 goto err;
1481
e95433c7
CW
1482 ret = i915_gem_object_wait(obj,
1483 I915_WAIT_INTERRUPTIBLE |
1484 I915_WAIT_ALL,
1485 MAX_SCHEDULE_TIMEOUT,
1486 to_rps_client(file));
258a5ede
CW
1487 if (ret)
1488 goto err;
1489
fe115628 1490 ret = i915_gem_object_pin_pages(obj);
258a5ede 1491 if (ret)
fe115628 1492 goto err;
258a5ede 1493
935aaa69 1494 ret = -EFAULT;
673a394b
EA
1495 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1496 * it would end up going through the fenced access, and we'll get
1497 * different detiling behavior between reading and writing.
1498 * pread/pwrite currently are reading and writing from the CPU
1499 * perspective, requiring manual detiling by the client.
1500 */
6eae0059 1501 if (!i915_gem_object_has_struct_page(obj) ||
9c870d03 1502 cpu_write_needs_clflush(obj))
935aaa69
DV
1503 /* Note that the gtt paths might fail with non-page-backed user
1504 * pointers (e.g. gtt mappings when moving data between
9c870d03
CW
1505 * textures). Fallback to the shmem path in that case.
1506 */
fe115628 1507 ret = i915_gem_gtt_pwrite_fast(obj, args);
673a394b 1508
d1054ee4 1509 if (ret == -EFAULT || ret == -ENOSPC) {
6a2c4232
CW
1510 if (obj->phys_handle)
1511 ret = i915_gem_phys_pwrite(obj, args, file);
b50a5371 1512 else
fe115628 1513 ret = i915_gem_shmem_pwrite(obj, args);
6a2c4232 1514 }
5c0480f2 1515
fe115628 1516 i915_gem_object_unpin_pages(obj);
258a5ede 1517err:
f0cd5182 1518 i915_gem_object_put(obj);
258a5ede 1519 return ret;
673a394b
EA
1520}
1521
40e62d5d
CW
1522static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
1523{
1524 struct drm_i915_private *i915;
1525 struct list_head *list;
1526 struct i915_vma *vma;
1527
1528 list_for_each_entry(vma, &obj->vma_list, obj_link) {
1529 if (!i915_vma_is_ggtt(vma))
28f412e0 1530 break;
40e62d5d
CW
1531
1532 if (i915_vma_is_active(vma))
1533 continue;
1534
1535 if (!drm_mm_node_allocated(&vma->node))
1536 continue;
1537
1538 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1539 }
1540
1541 i915 = to_i915(obj->base.dev);
1542 list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
56cea323 1543 list_move_tail(&obj->global_link, list);
40e62d5d
CW
1544}
1545
673a394b 1546/**
2ef7eeaa
EA
1547 * Called when user space prepares to use an object with the CPU, either
1548 * through the mmap ioctl's mapping or a GTT mapping.
14bb2c11
TU
1549 * @dev: drm device
1550 * @data: ioctl data blob
1551 * @file: drm file
673a394b
EA
1552 */
1553int
1554i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
05394f39 1555 struct drm_file *file)
673a394b
EA
1556{
1557 struct drm_i915_gem_set_domain *args = data;
05394f39 1558 struct drm_i915_gem_object *obj;
2ef7eeaa
EA
1559 uint32_t read_domains = args->read_domains;
1560 uint32_t write_domain = args->write_domain;
40e62d5d 1561 int err;
673a394b 1562
2ef7eeaa 1563 /* Only handle setting domains to types used by the CPU. */
b8f9096d 1564 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1565 return -EINVAL;
1566
1567 /* Having something in the write domain implies it's in the read
1568 * domain, and only that read domain. Enforce that in the request.
1569 */
1570 if (write_domain != 0 && read_domains != write_domain)
1571 return -EINVAL;
1572
03ac0642 1573 obj = i915_gem_object_lookup(file, args->handle);
b8f9096d
CW
1574 if (!obj)
1575 return -ENOENT;
673a394b 1576
3236f57a
CW
1577 /* Try to flush the object off the GPU without holding the lock.
1578 * We will repeat the flush holding the lock in the normal manner
1579 * to catch cases where we are gazumped.
1580 */
40e62d5d 1581 err = i915_gem_object_wait(obj,
e95433c7
CW
1582 I915_WAIT_INTERRUPTIBLE |
1583 (write_domain ? I915_WAIT_ALL : 0),
1584 MAX_SCHEDULE_TIMEOUT,
1585 to_rps_client(file));
40e62d5d 1586 if (err)
f0cd5182 1587 goto out;
b8f9096d 1588
40e62d5d
CW
1589 /* Flush and acquire obj->pages so that we are coherent through
1590 * direct access in memory with previous cached writes through
1591 * shmemfs and that our cache domain tracking remains valid.
1592 * For example, if the obj->filp was moved to swap without us
1593 * being notified and releasing the pages, we would mistakenly
1594 * continue to assume that the obj remained out of the CPU cached
1595 * domain.
1596 */
1597 err = i915_gem_object_pin_pages(obj);
1598 if (err)
f0cd5182 1599 goto out;
40e62d5d
CW
1600
1601 err = i915_mutex_lock_interruptible(dev);
1602 if (err)
f0cd5182 1603 goto out_unpin;
3236f57a 1604
e22d8e3c
CW
1605 if (read_domains & I915_GEM_DOMAIN_WC)
1606 err = i915_gem_object_set_to_wc_domain(obj, write_domain);
1607 else if (read_domains & I915_GEM_DOMAIN_GTT)
1608 err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
43566ded 1609 else
e22d8e3c 1610 err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
2ef7eeaa 1611
40e62d5d
CW
1612 /* And bump the LRU for this access */
1613 i915_gem_object_bump_inactive_ggtt(obj);
031b698a 1614
673a394b 1615 mutex_unlock(&dev->struct_mutex);
b8f9096d 1616
40e62d5d 1617 if (write_domain != 0)
ef74921b
CW
1618 intel_fb_obj_invalidate(obj,
1619 fb_write_origin(obj, write_domain));
40e62d5d 1620
f0cd5182 1621out_unpin:
40e62d5d 1622 i915_gem_object_unpin_pages(obj);
f0cd5182
CW
1623out:
1624 i915_gem_object_put(obj);
40e62d5d 1625 return err;
673a394b
EA
1626}
1627
1628/**
1629 * Called when user space has done writes to this buffer
14bb2c11
TU
1630 * @dev: drm device
1631 * @data: ioctl data blob
1632 * @file: drm file
673a394b
EA
1633 */
1634int
1635i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
05394f39 1636 struct drm_file *file)
673a394b
EA
1637{
1638 struct drm_i915_gem_sw_finish *args = data;
05394f39 1639 struct drm_i915_gem_object *obj;
1d7cfea1 1640
03ac0642 1641 obj = i915_gem_object_lookup(file, args->handle);
c21724cc
CW
1642 if (!obj)
1643 return -ENOENT;
673a394b 1644
673a394b 1645 /* Pinned buffers may be scanout, so flush the cache */
5a97bcc6 1646 i915_gem_object_flush_if_display(obj);
f0cd5182 1647 i915_gem_object_put(obj);
5a97bcc6
CW
1648
1649 return 0;
673a394b
EA
1650}
1651
1652/**
14bb2c11
TU
1653 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1654 * it is mapped to.
1655 * @dev: drm device
1656 * @data: ioctl data blob
1657 * @file: drm file
673a394b
EA
1658 *
1659 * While the mapping holds a reference on the contents of the object, it doesn't
1660 * imply a ref on the object itself.
34367381
DV
1661 *
1662 * IMPORTANT:
1663 *
1664 * DRM driver writers who look a this function as an example for how to do GEM
1665 * mmap support, please don't implement mmap support like here. The modern way
1666 * to implement DRM mmap support is with an mmap offset ioctl (like
1667 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1668 * That way debug tooling like valgrind will understand what's going on, hiding
1669 * the mmap call in a driver private ioctl will break that. The i915 driver only
1670 * does cpu mmaps this way because we didn't know better.
673a394b
EA
1671 */
1672int
1673i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
05394f39 1674 struct drm_file *file)
673a394b
EA
1675{
1676 struct drm_i915_gem_mmap *args = data;
03ac0642 1677 struct drm_i915_gem_object *obj;
673a394b
EA
1678 unsigned long addr;
1679
1816f923
AG
1680 if (args->flags & ~(I915_MMAP_WC))
1681 return -EINVAL;
1682
568a58e5 1683 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1816f923
AG
1684 return -ENODEV;
1685
03ac0642
CW
1686 obj = i915_gem_object_lookup(file, args->handle);
1687 if (!obj)
bf79cb91 1688 return -ENOENT;
673a394b 1689
1286ff73
DV
1690 /* prime objects have no backing filp to GEM mmap
1691 * pages from.
1692 */
03ac0642 1693 if (!obj->base.filp) {
f0cd5182 1694 i915_gem_object_put(obj);
1286ff73
DV
1695 return -EINVAL;
1696 }
1697
03ac0642 1698 addr = vm_mmap(obj->base.filp, 0, args->size,
673a394b
EA
1699 PROT_READ | PROT_WRITE, MAP_SHARED,
1700 args->offset);
1816f923
AG
1701 if (args->flags & I915_MMAP_WC) {
1702 struct mm_struct *mm = current->mm;
1703 struct vm_area_struct *vma;
1704
80a89a5e 1705 if (down_write_killable(&mm->mmap_sem)) {
f0cd5182 1706 i915_gem_object_put(obj);
80a89a5e
MH
1707 return -EINTR;
1708 }
1816f923
AG
1709 vma = find_vma(mm, addr);
1710 if (vma)
1711 vma->vm_page_prot =
1712 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1713 else
1714 addr = -ENOMEM;
1715 up_write(&mm->mmap_sem);
aeecc969
CW
1716
1717 /* This may race, but that's ok, it only gets set */
50349247 1718 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1816f923 1719 }
f0cd5182 1720 i915_gem_object_put(obj);
673a394b
EA
1721 if (IS_ERR((void *)addr))
1722 return addr;
1723
1724 args->addr_ptr = (uint64_t) addr;
1725
1726 return 0;
1727}
1728
03af84fe
CW
1729static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1730{
6649a0b6 1731 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
03af84fe
CW
1732}
1733
4cc69075
CW
1734/**
1735 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1736 *
1737 * A history of the GTT mmap interface:
1738 *
1739 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1740 * aligned and suitable for fencing, and still fit into the available
1741 * mappable space left by the pinned display objects. A classic problem
1742 * we called the page-fault-of-doom where we would ping-pong between
1743 * two objects that could not fit inside the GTT and so the memcpy
1744 * would page one object in at the expense of the other between every
1745 * single byte.
1746 *
1747 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1748 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1749 * object is too large for the available space (or simply too large
1750 * for the mappable aperture!), a view is created instead and faulted
1751 * into userspace. (This view is aligned and sized appropriately for
1752 * fenced access.)
1753 *
e22d8e3c
CW
1754 * 2 - Recognise WC as a separate cache domain so that we can flush the
1755 * delayed writes via GTT before performing direct access via WC.
1756 *
4cc69075
CW
1757 * Restrictions:
1758 *
1759 * * snoopable objects cannot be accessed via the GTT. It can cause machine
1760 * hangs on some architectures, corruption on others. An attempt to service
1761 * a GTT page fault from a snoopable object will generate a SIGBUS.
1762 *
1763 * * the object must be able to fit into RAM (physical memory, though no
1764 * limited to the mappable aperture).
1765 *
1766 *
1767 * Caveats:
1768 *
1769 * * a new GTT page fault will synchronize rendering from the GPU and flush
1770 * all data to system memory. Subsequent access will not be synchronized.
1771 *
1772 * * all mappings are revoked on runtime device suspend.
1773 *
1774 * * there are only 8, 16 or 32 fence registers to share between all users
1775 * (older machines require fence register for display and blitter access
1776 * as well). Contention of the fence registers will cause the previous users
1777 * to be unmapped and any new access will generate new page faults.
1778 *
1779 * * running out of memory while servicing a fault may generate a SIGBUS,
1780 * rather than the expected SIGSEGV.
1781 */
1782int i915_gem_mmap_gtt_version(void)
1783{
e22d8e3c 1784 return 2;
4cc69075
CW
1785}
1786
2d4281bb
CW
1787static inline struct i915_ggtt_view
1788compute_partial_view(struct drm_i915_gem_object *obj,
2d4281bb
CW
1789 pgoff_t page_offset,
1790 unsigned int chunk)
1791{
1792 struct i915_ggtt_view view;
1793
1794 if (i915_gem_object_is_tiled(obj))
1795 chunk = roundup(chunk, tile_row_pages(obj));
1796
2d4281bb 1797 view.type = I915_GGTT_VIEW_PARTIAL;
8bab1193
CW
1798 view.partial.offset = rounddown(page_offset, chunk);
1799 view.partial.size =
2d4281bb 1800 min_t(unsigned int, chunk,
8bab1193 1801 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
2d4281bb
CW
1802
1803 /* If the partial covers the entire object, just create a normal VMA. */
1804 if (chunk >= obj->base.size >> PAGE_SHIFT)
1805 view.type = I915_GGTT_VIEW_NORMAL;
1806
1807 return view;
1808}
1809
de151cf6
JB
1810/**
1811 * i915_gem_fault - fault a page into the GTT
d9072a3e 1812 * @vmf: fault info
de151cf6
JB
1813 *
1814 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1815 * from userspace. The fault handler takes care of binding the object to
1816 * the GTT (if needed), allocating and programming a fence register (again,
1817 * only if needed based on whether the old reg is still valid or the object
1818 * is tiled) and inserting a new PTE into the faulting process.
1819 *
1820 * Note that the faulting process may involve evicting existing objects
1821 * from the GTT and/or fence registers to make room. So performance may
1822 * suffer if the GTT working set is large or there are few fence registers
1823 * left.
4cc69075
CW
1824 *
1825 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1826 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
de151cf6 1827 */
11bac800 1828int i915_gem_fault(struct vm_fault *vmf)
de151cf6 1829{
03af84fe 1830#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
11bac800 1831 struct vm_area_struct *area = vmf->vma;
058d88c4 1832 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
05394f39 1833 struct drm_device *dev = obj->base.dev;
72e96d64
JL
1834 struct drm_i915_private *dev_priv = to_i915(dev);
1835 struct i915_ggtt *ggtt = &dev_priv->ggtt;
b8f9096d 1836 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
058d88c4 1837 struct i915_vma *vma;
de151cf6 1838 pgoff_t page_offset;
82118877 1839 unsigned int flags;
b8f9096d 1840 int ret;
f65c9168 1841
de151cf6 1842 /* We don't use vmf->pgoff since that has the fake offset */
1a29d85e 1843 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
de151cf6 1844
db53a302
CW
1845 trace_i915_gem_object_fault(obj, page_offset, true, write);
1846
6e4930f6 1847 /* Try to flush the object off the GPU first without holding the lock.
b8f9096d 1848 * Upon acquiring the lock, we will perform our sanity checks and then
6e4930f6
CW
1849 * repeat the flush holding the lock in the normal manner to catch cases
1850 * where we are gazumped.
1851 */
e95433c7
CW
1852 ret = i915_gem_object_wait(obj,
1853 I915_WAIT_INTERRUPTIBLE,
1854 MAX_SCHEDULE_TIMEOUT,
1855 NULL);
6e4930f6 1856 if (ret)
b8f9096d
CW
1857 goto err;
1858
40e62d5d
CW
1859 ret = i915_gem_object_pin_pages(obj);
1860 if (ret)
1861 goto err;
1862
b8f9096d
CW
1863 intel_runtime_pm_get(dev_priv);
1864
1865 ret = i915_mutex_lock_interruptible(dev);
1866 if (ret)
1867 goto err_rpm;
6e4930f6 1868
eb119bd6 1869 /* Access to snoopable pages through the GTT is incoherent. */
0031fb96 1870 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
ddeff6ee 1871 ret = -EFAULT;
b8f9096d 1872 goto err_unlock;
eb119bd6
CW
1873 }
1874
82118877
CW
1875 /* If the object is smaller than a couple of partial vma, it is
1876 * not worth only creating a single partial vma - we may as well
1877 * clear enough space for the full object.
1878 */
1879 flags = PIN_MAPPABLE;
1880 if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1881 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1882
a61007a8 1883 /* Now pin it into the GTT as needed */
82118877 1884 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
a61007a8 1885 if (IS_ERR(vma)) {
a61007a8 1886 /* Use a partial view if it is bigger than available space */
2d4281bb 1887 struct i915_ggtt_view view =
8201c1fa 1888 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
aa136d9d 1889
50349247
CW
1890 /* Userspace is now writing through an untracked VMA, abandon
1891 * all hope that the hardware is able to track future writes.
1892 */
1893 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1894
a61007a8
CW
1895 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1896 }
058d88c4
CW
1897 if (IS_ERR(vma)) {
1898 ret = PTR_ERR(vma);
b8f9096d 1899 goto err_unlock;
058d88c4 1900 }
4a684a41 1901
c9839303
CW
1902 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1903 if (ret)
b8f9096d 1904 goto err_unpin;
74898d7e 1905
49ef5294 1906 ret = i915_vma_get_fence(vma);
d9e86c0e 1907 if (ret)
b8f9096d 1908 goto err_unpin;
7d1c4804 1909
275f039d 1910 /* Mark as being mmapped into userspace for later revocation */
9c870d03 1911 assert_rpm_wakelock_held(dev_priv);
275f039d
CW
1912 if (list_empty(&obj->userfault_link))
1913 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
275f039d 1914
b90b91d8 1915 /* Finally, remap it using the new GTT offset */
c58305af 1916 ret = remap_io_mapping(area,
8bab1193 1917 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
c58305af
CW
1918 (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
1919 min_t(u64, vma->size, area->vm_end - area->vm_start),
1920 &ggtt->mappable);
a61007a8 1921
b8f9096d 1922err_unpin:
058d88c4 1923 __i915_vma_unpin(vma);
b8f9096d 1924err_unlock:
de151cf6 1925 mutex_unlock(&dev->struct_mutex);
b8f9096d
CW
1926err_rpm:
1927 intel_runtime_pm_put(dev_priv);
40e62d5d 1928 i915_gem_object_unpin_pages(obj);
b8f9096d 1929err:
de151cf6 1930 switch (ret) {
d9bc7e9f 1931 case -EIO:
2232f031
DV
1932 /*
1933 * We eat errors when the gpu is terminally wedged to avoid
1934 * userspace unduly crashing (gl has no provisions for mmaps to
1935 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1936 * and so needs to be reported.
1937 */
1938 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
f65c9168
PZ
1939 ret = VM_FAULT_SIGBUS;
1940 break;
1941 }
045e769a 1942 case -EAGAIN:
571c608d
DV
1943 /*
1944 * EAGAIN means the gpu is hung and we'll wait for the error
1945 * handler to reset everything when re-faulting in
1946 * i915_mutex_lock_interruptible.
d9bc7e9f 1947 */
c715089f
CW
1948 case 0:
1949 case -ERESTARTSYS:
bed636ab 1950 case -EINTR:
e79e0fe3
DR
1951 case -EBUSY:
1952 /*
1953 * EBUSY is ok: this just means that another thread
1954 * already did the job.
1955 */
f65c9168
PZ
1956 ret = VM_FAULT_NOPAGE;
1957 break;
de151cf6 1958 case -ENOMEM:
f65c9168
PZ
1959 ret = VM_FAULT_OOM;
1960 break;
a7c2e1aa 1961 case -ENOSPC:
45d67817 1962 case -EFAULT:
f65c9168
PZ
1963 ret = VM_FAULT_SIGBUS;
1964 break;
de151cf6 1965 default:
a7c2e1aa 1966 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
f65c9168
PZ
1967 ret = VM_FAULT_SIGBUS;
1968 break;
de151cf6 1969 }
f65c9168 1970 return ret;
de151cf6
JB
1971}
1972
901782b2
CW
1973/**
1974 * i915_gem_release_mmap - remove physical page mappings
1975 * @obj: obj in question
1976 *
af901ca1 1977 * Preserve the reservation of the mmapping with the DRM core code, but
901782b2
CW
1978 * relinquish ownership of the pages back to the system.
1979 *
1980 * It is vital that we remove the page mapping if we have mapped a tiled
1981 * object through the GTT and then lose the fence register due to
1982 * resource pressure. Similarly if the object has been moved out of the
1983 * aperture, than pages mapped into userspace must be revoked. Removing the
1984 * mapping will then trigger a page fault on the next user access, allowing
1985 * fixup by i915_gem_fault().
1986 */
d05ca301 1987void
05394f39 1988i915_gem_release_mmap(struct drm_i915_gem_object *obj)
901782b2 1989{
275f039d 1990 struct drm_i915_private *i915 = to_i915(obj->base.dev);
275f039d 1991
349f2ccf
CW
1992 /* Serialisation between user GTT access and our code depends upon
1993 * revoking the CPU's PTE whilst the mutex is held. The next user
1994 * pagefault then has to wait until we release the mutex.
9c870d03
CW
1995 *
1996 * Note that RPM complicates somewhat by adding an additional
1997 * requirement that operations to the GGTT be made holding the RPM
1998 * wakeref.
349f2ccf 1999 */
275f039d 2000 lockdep_assert_held(&i915->drm.struct_mutex);
9c870d03 2001 intel_runtime_pm_get(i915);
349f2ccf 2002
3594a3e2 2003 if (list_empty(&obj->userfault_link))
9c870d03 2004 goto out;
901782b2 2005
3594a3e2 2006 list_del_init(&obj->userfault_link);
6796cb16
DH
2007 drm_vma_node_unmap(&obj->base.vma_node,
2008 obj->base.dev->anon_inode->i_mapping);
349f2ccf
CW
2009
2010 /* Ensure that the CPU's PTE are revoked and there are not outstanding
2011 * memory transactions from userspace before we return. The TLB
2012 * flushing implied above by changing the PTE above *should* be
2013 * sufficient, an extra barrier here just provides us with a bit
2014 * of paranoid documentation about our requirement to serialise
2015 * memory writes before touching registers / GSM.
2016 */
2017 wmb();
9c870d03
CW
2018
2019out:
2020 intel_runtime_pm_put(i915);
901782b2
CW
2021}
2022
7c108fd8 2023void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
eedd10f4 2024{
3594a3e2 2025 struct drm_i915_gem_object *obj, *on;
7c108fd8 2026 int i;
eedd10f4 2027
3594a3e2
CW
2028 /*
2029 * Only called during RPM suspend. All users of the userfault_list
2030 * must be holding an RPM wakeref to ensure that this can not
2031 * run concurrently with themselves (and use the struct_mutex for
2032 * protection between themselves).
2033 */
275f039d 2034
3594a3e2
CW
2035 list_for_each_entry_safe(obj, on,
2036 &dev_priv->mm.userfault_list, userfault_link) {
2037 list_del_init(&obj->userfault_link);
275f039d
CW
2038 drm_vma_node_unmap(&obj->base.vma_node,
2039 obj->base.dev->anon_inode->i_mapping);
275f039d 2040 }
7c108fd8
CW
2041
2042 /* The fence will be lost when the device powers down. If any were
2043 * in use by hardware (i.e. they are pinned), we should not be powering
2044 * down! All other fences will be reacquired by the user upon waking.
2045 */
2046 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2047 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2048
e0ec3ec6
CW
2049 /* Ideally we want to assert that the fence register is not
2050 * live at this point (i.e. that no piece of code will be
2051 * trying to write through fence + GTT, as that both violates
2052 * our tracking of activity and associated locking/barriers,
2053 * but also is illegal given that the hw is powered down).
2054 *
2055 * Previously we used reg->pin_count as a "liveness" indicator.
2056 * That is not sufficient, and we need a more fine-grained
2057 * tool if we want to have a sanity check here.
2058 */
7c108fd8
CW
2059
2060 if (!reg->vma)
2061 continue;
2062
2063 GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
2064 reg->dirty = true;
2065 }
eedd10f4
CW
2066}
2067
d8cb5086
CW
2068static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2069{
fac5e23e 2070 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
f3f6184c 2071 int err;
da494d7c 2072
f3f6184c 2073 err = drm_gem_create_mmap_offset(&obj->base);
b42a13d9 2074 if (likely(!err))
f3f6184c 2075 return 0;
d8cb5086 2076
b42a13d9
CW
2077 /* Attempt to reap some mmap space from dead objects */
2078 do {
2079 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2080 if (err)
2081 break;
f3f6184c 2082
b42a13d9 2083 i915_gem_drain_freed_objects(dev_priv);
f3f6184c 2084 err = drm_gem_create_mmap_offset(&obj->base);
b42a13d9
CW
2085 if (!err)
2086 break;
2087
2088 } while (flush_delayed_work(&dev_priv->gt.retire_work));
da494d7c 2089
f3f6184c 2090 return err;
d8cb5086
CW
2091}
2092
2093static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2094{
d8cb5086
CW
2095 drm_gem_free_mmap_offset(&obj->base);
2096}
2097
da6b51d0 2098int
ff72145b
DA
2099i915_gem_mmap_gtt(struct drm_file *file,
2100 struct drm_device *dev,
da6b51d0 2101 uint32_t handle,
ff72145b 2102 uint64_t *offset)
de151cf6 2103{
05394f39 2104 struct drm_i915_gem_object *obj;
de151cf6
JB
2105 int ret;
2106
03ac0642 2107 obj = i915_gem_object_lookup(file, handle);
f3f6184c
CW
2108 if (!obj)
2109 return -ENOENT;
ab18282d 2110
d8cb5086 2111 ret = i915_gem_object_create_mmap_offset(obj);
f3f6184c
CW
2112 if (ret == 0)
2113 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
de151cf6 2114
f0cd5182 2115 i915_gem_object_put(obj);
1d7cfea1 2116 return ret;
de151cf6
JB
2117}
2118
ff72145b
DA
2119/**
2120 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2121 * @dev: DRM device
2122 * @data: GTT mapping ioctl data
2123 * @file: GEM object info
2124 *
2125 * Simply returns the fake offset to userspace so it can mmap it.
2126 * The mmap call will end up in drm_gem_mmap(), which will set things
2127 * up so we can get faults in the handler above.
2128 *
2129 * The fault handler will take care of binding the object into the GTT
2130 * (since it may have been evicted to make room for something), allocating
2131 * a fence register, and mapping the appropriate aperture address into
2132 * userspace.
2133 */
2134int
2135i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2136 struct drm_file *file)
2137{
2138 struct drm_i915_gem_mmap_gtt *args = data;
2139
da6b51d0 2140 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
ff72145b
DA
2141}
2142
225067ee
DV
2143/* Immediately discard the backing storage */
2144static void
2145i915_gem_object_truncate(struct drm_i915_gem_object *obj)
e5281ccd 2146{
4d6294bf 2147 i915_gem_object_free_mmap_offset(obj);
1286ff73 2148
4d6294bf
CW
2149 if (obj->base.filp == NULL)
2150 return;
e5281ccd 2151
225067ee
DV
2152 /* Our goal here is to return as much of the memory as
2153 * is possible back to the system as we are called from OOM.
2154 * To do this we must instruct the shmfs to drop all of its
2155 * backing pages, *now*.
2156 */
5537252b 2157 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
a4f5ea64 2158 obj->mm.madv = __I915_MADV_PURGED;
4e5462ee 2159 obj->mm.pages = ERR_PTR(-EFAULT);
225067ee 2160}
e5281ccd 2161
5537252b 2162/* Try to discard unwanted pages */
03ac84f1 2163void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
225067ee 2164{
5537252b
CW
2165 struct address_space *mapping;
2166
1233e2db
CW
2167 lockdep_assert_held(&obj->mm.lock);
2168 GEM_BUG_ON(obj->mm.pages);
2169
a4f5ea64 2170 switch (obj->mm.madv) {
5537252b
CW
2171 case I915_MADV_DONTNEED:
2172 i915_gem_object_truncate(obj);
2173 case __I915_MADV_PURGED:
2174 return;
2175 }
2176
2177 if (obj->base.filp == NULL)
2178 return;
2179
93c76a3d 2180 mapping = obj->base.filp->f_mapping,
5537252b 2181 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
e5281ccd
CW
2182}
2183
5cdf5881 2184static void
03ac84f1
CW
2185i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2186 struct sg_table *pages)
673a394b 2187{
85d1225e
DG
2188 struct sgt_iter sgt_iter;
2189 struct page *page;
1286ff73 2190
e5facdf9 2191 __i915_gem_object_release_shmem(obj, pages, true);
673a394b 2192
03ac84f1 2193 i915_gem_gtt_finish_pages(obj, pages);
e2273302 2194
6dacfd2f 2195 if (i915_gem_object_needs_bit17_swizzle(obj))
03ac84f1 2196 i915_gem_object_save_bit_17_swizzle(obj, pages);
280b713b 2197
03ac84f1 2198 for_each_sgt_page(page, sgt_iter, pages) {
a4f5ea64 2199 if (obj->mm.dirty)
9da3da66 2200 set_page_dirty(page);
3ef94daa 2201
a4f5ea64 2202 if (obj->mm.madv == I915_MADV_WILLNEED)
9da3da66 2203 mark_page_accessed(page);
3ef94daa 2204
09cbfeaf 2205 put_page(page);
3ef94daa 2206 }
a4f5ea64 2207 obj->mm.dirty = false;
673a394b 2208
03ac84f1
CW
2209 sg_free_table(pages);
2210 kfree(pages);
37e680a1 2211}
6c085a72 2212
96d77634
CW
2213static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
2214{
2215 struct radix_tree_iter iter;
2216 void **slot;
2217
a4f5ea64
CW
2218 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
2219 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
96d77634
CW
2220}
2221
548625ee
CW
2222void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2223 enum i915_mm_subclass subclass)
37e680a1 2224{
03ac84f1 2225 struct sg_table *pages;
37e680a1 2226
a4f5ea64 2227 if (i915_gem_object_has_pinned_pages(obj))
03ac84f1 2228 return;
a5570178 2229
15717de2 2230 GEM_BUG_ON(obj->bind_count);
1233e2db
CW
2231 if (!READ_ONCE(obj->mm.pages))
2232 return;
2233
2234 /* May be called by shrinker from within get_pages() (on another bo) */
548625ee 2235 mutex_lock_nested(&obj->mm.lock, subclass);
1233e2db
CW
2236 if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
2237 goto unlock;
3e123027 2238
a2165e31
CW
2239 /* ->put_pages might need to allocate memory for the bit17 swizzle
2240 * array, hence protect them from being reaped by removing them from gtt
2241 * lists early. */
03ac84f1
CW
2242 pages = fetch_and_zero(&obj->mm.pages);
2243 GEM_BUG_ON(!pages);
a2165e31 2244
a4f5ea64 2245 if (obj->mm.mapping) {
4b30cb23
CW
2246 void *ptr;
2247
0ce81788 2248 ptr = page_mask_bits(obj->mm.mapping);
4b30cb23
CW
2249 if (is_vmalloc_addr(ptr))
2250 vunmap(ptr);
fb8621d3 2251 else
4b30cb23
CW
2252 kunmap(kmap_to_page(ptr));
2253
a4f5ea64 2254 obj->mm.mapping = NULL;
0a798eb9
CW
2255 }
2256
96d77634
CW
2257 __i915_gem_object_reset_page_iter(obj);
2258
4e5462ee
CW
2259 if (!IS_ERR(pages))
2260 obj->ops->put_pages(obj, pages);
2261
1233e2db
CW
2262unlock:
2263 mutex_unlock(&obj->mm.lock);
6c085a72
CW
2264}
2265
935a2f77 2266static bool i915_sg_trim(struct sg_table *orig_st)
0c40ce13
TU
2267{
2268 struct sg_table new_st;
2269 struct scatterlist *sg, *new_sg;
2270 unsigned int i;
2271
2272 if (orig_st->nents == orig_st->orig_nents)
935a2f77 2273 return false;
0c40ce13 2274
8bfc478f 2275 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
935a2f77 2276 return false;
0c40ce13
TU
2277
2278 new_sg = new_st.sgl;
2279 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2280 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2281 /* called before being DMA mapped, no need to copy sg->dma_* */
2282 new_sg = sg_next(new_sg);
2283 }
c2dc6cc9 2284 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
0c40ce13
TU
2285
2286 sg_free_table(orig_st);
2287
2288 *orig_st = new_st;
935a2f77 2289 return true;
0c40ce13
TU
2290}
2291
03ac84f1 2292static struct sg_table *
6c085a72 2293i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
e5281ccd 2294{
fac5e23e 2295 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
d766ef53
CW
2296 const unsigned long page_count = obj->base.size / PAGE_SIZE;
2297 unsigned long i;
e5281ccd 2298 struct address_space *mapping;
9da3da66
CW
2299 struct sg_table *st;
2300 struct scatterlist *sg;
85d1225e 2301 struct sgt_iter sgt_iter;
e5281ccd 2302 struct page *page;
90797e6d 2303 unsigned long last_pfn = 0; /* suppress gcc warning */
4ff340f0 2304 unsigned int max_segment;
4846bf0c 2305 gfp_t noreclaim;
e2273302 2306 int ret;
e5281ccd 2307
6c085a72
CW
2308 /* Assert that the object is not currently in any GPU domain. As it
2309 * wasn't in the GTT, there shouldn't be any way it could have been in
2310 * a GPU cache
2311 */
03ac84f1
CW
2312 GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2313 GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
6c085a72 2314
7453c549 2315 max_segment = swiotlb_max_segment();
871dfbd6 2316 if (!max_segment)
4ff340f0 2317 max_segment = rounddown(UINT_MAX, PAGE_SIZE);
871dfbd6 2318
9da3da66
CW
2319 st = kmalloc(sizeof(*st), GFP_KERNEL);
2320 if (st == NULL)
03ac84f1 2321 return ERR_PTR(-ENOMEM);
9da3da66 2322
d766ef53 2323rebuild_st:
9da3da66 2324 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
9da3da66 2325 kfree(st);
03ac84f1 2326 return ERR_PTR(-ENOMEM);
9da3da66 2327 }
e5281ccd 2328
9da3da66
CW
2329 /* Get the list of pages out of our struct file. They'll be pinned
2330 * at this point until we release them.
2331 *
2332 * Fail silently without starting the shrinker
2333 */
93c76a3d 2334 mapping = obj->base.filp->f_mapping;
0f6ab55d 2335 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
4846bf0c
CW
2336 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
2337
90797e6d
ID
2338 sg = st->sgl;
2339 st->nents = 0;
2340 for (i = 0; i < page_count; i++) {
4846bf0c
CW
2341 const unsigned int shrink[] = {
2342 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
2343 0,
2344 }, *s = shrink;
2345 gfp_t gfp = noreclaim;
2346
2347 do {
6c085a72 2348 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
4846bf0c
CW
2349 if (likely(!IS_ERR(page)))
2350 break;
2351
2352 if (!*s) {
2353 ret = PTR_ERR(page);
2354 goto err_sg;
2355 }
2356
2357 i915_gem_shrink(dev_priv, 2 * page_count, *s++);
2358 cond_resched();
24f8e00a 2359
6c085a72
CW
2360 /* We've tried hard to allocate the memory by reaping
2361 * our own buffer, now let the real VM do its job and
2362 * go down in flames if truly OOM.
24f8e00a
CW
2363 *
2364 * However, since graphics tend to be disposable,
2365 * defer the oom here by reporting the ENOMEM back
2366 * to userspace.
6c085a72 2367 */
4846bf0c
CW
2368 if (!*s) {
2369 /* reclaim and warn, but no oom */
2370 gfp = mapping_gfp_mask(mapping);
eaf41801
CW
2371
2372 /* Our bo are always dirty and so we require
2373 * kswapd to reclaim our pages (direct reclaim
2374 * does not effectively begin pageout of our
2375 * buffers on its own). However, direct reclaim
2376 * only waits for kswapd when under allocation
2377 * congestion. So as a result __GFP_RECLAIM is
2378 * unreliable and fails to actually reclaim our
2379 * dirty pages -- unless you try over and over
2380 * again with !__GFP_NORETRY. However, we still
2381 * want to fail this allocation rather than
2382 * trigger the out-of-memory killer and for
dbb32956 2383 * this we want __GFP_RETRY_MAYFAIL.
eaf41801 2384 */
dbb32956 2385 gfp |= __GFP_RETRY_MAYFAIL;
e2273302 2386 }
4846bf0c
CW
2387 } while (1);
2388
871dfbd6
CW
2389 if (!i ||
2390 sg->length >= max_segment ||
2391 page_to_pfn(page) != last_pfn + 1) {
90797e6d
ID
2392 if (i)
2393 sg = sg_next(sg);
2394 st->nents++;
2395 sg_set_page(sg, page, PAGE_SIZE, 0);
2396 } else {
2397 sg->length += PAGE_SIZE;
2398 }
2399 last_pfn = page_to_pfn(page);
3bbbe706
DV
2400
2401 /* Check that the i965g/gm workaround works. */
2402 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
e5281ccd 2403 }
871dfbd6 2404 if (sg) /* loop terminated early; short sg table */
426729dc 2405 sg_mark_end(sg);
74ce6b6c 2406
0c40ce13
TU
2407 /* Trim unused sg entries to avoid wasting memory. */
2408 i915_sg_trim(st);
2409
03ac84f1 2410 ret = i915_gem_gtt_prepare_pages(obj, st);
d766ef53
CW
2411 if (ret) {
2412 /* DMA remapping failed? One possible cause is that
2413 * it could not reserve enough large entries, asking
2414 * for PAGE_SIZE chunks instead may be helpful.
2415 */
2416 if (max_segment > PAGE_SIZE) {
2417 for_each_sgt_page(page, sgt_iter, st)
2418 put_page(page);
2419 sg_free_table(st);
2420
2421 max_segment = PAGE_SIZE;
2422 goto rebuild_st;
2423 } else {
2424 dev_warn(&dev_priv->drm.pdev->dev,
2425 "Failed to DMA remap %lu pages\n",
2426 page_count);
2427 goto err_pages;
2428 }
2429 }
e2273302 2430
6dacfd2f 2431 if (i915_gem_object_needs_bit17_swizzle(obj))
03ac84f1 2432 i915_gem_object_do_bit_17_swizzle(obj, st);
e5281ccd 2433
03ac84f1 2434 return st;
e5281ccd 2435
b17993b7 2436err_sg:
90797e6d 2437 sg_mark_end(sg);
b17993b7 2438err_pages:
85d1225e
DG
2439 for_each_sgt_page(page, sgt_iter, st)
2440 put_page(page);
9da3da66
CW
2441 sg_free_table(st);
2442 kfree(st);
0820baf3
CW
2443
2444 /* shmemfs first checks if there is enough memory to allocate the page
2445 * and reports ENOSPC should there be insufficient, along with the usual
2446 * ENOMEM for a genuine allocation failure.
2447 *
2448 * We use ENOSPC in our driver to mean that we have run out of aperture
2449 * space and so want to translate the error from shmemfs back to our
2450 * usual understanding of ENOMEM.
2451 */
e2273302
ID
2452 if (ret == -ENOSPC)
2453 ret = -ENOMEM;
2454
03ac84f1
CW
2455 return ERR_PTR(ret);
2456}
2457
2458void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2459 struct sg_table *pages)
2460{
1233e2db 2461 lockdep_assert_held(&obj->mm.lock);
03ac84f1
CW
2462
2463 obj->mm.get_page.sg_pos = pages->sgl;
2464 obj->mm.get_page.sg_idx = 0;
2465
2466 obj->mm.pages = pages;
2c3a3f44
CW
2467
2468 if (i915_gem_object_is_tiled(obj) &&
2469 to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2470 GEM_BUG_ON(obj->mm.quirked);
2471 __i915_gem_object_pin_pages(obj);
2472 obj->mm.quirked = true;
2473 }
03ac84f1
CW
2474}
2475
2476static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2477{
2478 struct sg_table *pages;
2479
2c3a3f44
CW
2480 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2481
03ac84f1
CW
2482 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
2483 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2484 return -EFAULT;
2485 }
2486
2487 pages = obj->ops->get_pages(obj);
2488 if (unlikely(IS_ERR(pages)))
2489 return PTR_ERR(pages);
2490
2491 __i915_gem_object_set_pages(obj, pages);
2492 return 0;
673a394b
EA
2493}
2494
37e680a1 2495/* Ensure that the associated pages are gathered from the backing storage
1233e2db 2496 * and pinned into our object. i915_gem_object_pin_pages() may be called
37e680a1 2497 * multiple times before they are released by a single call to
1233e2db 2498 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
37e680a1
CW
2499 * either as a result of memory pressure (reaping pages under the shrinker)
2500 * or as the object is itself released.
2501 */
a4f5ea64 2502int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
37e680a1 2503{
03ac84f1 2504 int err;
37e680a1 2505
1233e2db
CW
2506 err = mutex_lock_interruptible(&obj->mm.lock);
2507 if (err)
2508 return err;
4c7d62c6 2509
4e5462ee 2510 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
2c3a3f44
CW
2511 err = ____i915_gem_object_get_pages(obj);
2512 if (err)
2513 goto unlock;
37e680a1 2514
2c3a3f44
CW
2515 smp_mb__before_atomic();
2516 }
2517 atomic_inc(&obj->mm.pages_pin_count);
ee286370 2518
1233e2db
CW
2519unlock:
2520 mutex_unlock(&obj->mm.lock);
03ac84f1 2521 return err;
673a394b
EA
2522}
2523
dd6034c6 2524/* The 'mapping' part of i915_gem_object_pin_map() below */
d31d7cb1
CW
2525static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2526 enum i915_map_type type)
dd6034c6
DG
2527{
2528 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
a4f5ea64 2529 struct sg_table *sgt = obj->mm.pages;
85d1225e
DG
2530 struct sgt_iter sgt_iter;
2531 struct page *page;
b338fa47
DG
2532 struct page *stack_pages[32];
2533 struct page **pages = stack_pages;
dd6034c6 2534 unsigned long i = 0;
d31d7cb1 2535 pgprot_t pgprot;
dd6034c6
DG
2536 void *addr;
2537
2538 /* A single page can always be kmapped */
d31d7cb1 2539 if (n_pages == 1 && type == I915_MAP_WB)
dd6034c6
DG
2540 return kmap(sg_page(sgt->sgl));
2541
b338fa47
DG
2542 if (n_pages > ARRAY_SIZE(stack_pages)) {
2543 /* Too big for stack -- allocate temporary array instead */
2098105e 2544 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_TEMPORARY);
b338fa47
DG
2545 if (!pages)
2546 return NULL;
2547 }
dd6034c6 2548
85d1225e
DG
2549 for_each_sgt_page(page, sgt_iter, sgt)
2550 pages[i++] = page;
dd6034c6
DG
2551
2552 /* Check that we have the expected number of pages */
2553 GEM_BUG_ON(i != n_pages);
2554
d31d7cb1 2555 switch (type) {
3b24e7e8
CW
2556 default:
2557 MISSING_CASE(type);
2558 /* fallthrough to use PAGE_KERNEL anyway */
d31d7cb1
CW
2559 case I915_MAP_WB:
2560 pgprot = PAGE_KERNEL;
2561 break;
2562 case I915_MAP_WC:
2563 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2564 break;
2565 }
2566 addr = vmap(pages, n_pages, 0, pgprot);
dd6034c6 2567
b338fa47 2568 if (pages != stack_pages)
2098105e 2569 kvfree(pages);
dd6034c6
DG
2570
2571 return addr;
2572}
2573
2574/* get, pin, and map the pages of the object into kernel space */
d31d7cb1
CW
2575void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2576 enum i915_map_type type)
0a798eb9 2577{
d31d7cb1
CW
2578 enum i915_map_type has_type;
2579 bool pinned;
2580 void *ptr;
0a798eb9
CW
2581 int ret;
2582
d31d7cb1 2583 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
0a798eb9 2584
1233e2db 2585 ret = mutex_lock_interruptible(&obj->mm.lock);
0a798eb9
CW
2586 if (ret)
2587 return ERR_PTR(ret);
2588
3b24e7e8
CW
2589 pinned = !(type & I915_MAP_OVERRIDE);
2590 type &= ~I915_MAP_OVERRIDE;
2591
1233e2db 2592 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
4e5462ee 2593 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
2c3a3f44
CW
2594 ret = ____i915_gem_object_get_pages(obj);
2595 if (ret)
2596 goto err_unlock;
1233e2db 2597
2c3a3f44
CW
2598 smp_mb__before_atomic();
2599 }
2600 atomic_inc(&obj->mm.pages_pin_count);
1233e2db
CW
2601 pinned = false;
2602 }
2603 GEM_BUG_ON(!obj->mm.pages);
0a798eb9 2604
0ce81788 2605 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
d31d7cb1
CW
2606 if (ptr && has_type != type) {
2607 if (pinned) {
2608 ret = -EBUSY;
1233e2db 2609 goto err_unpin;
0a798eb9 2610 }
d31d7cb1
CW
2611
2612 if (is_vmalloc_addr(ptr))
2613 vunmap(ptr);
2614 else
2615 kunmap(kmap_to_page(ptr));
2616
a4f5ea64 2617 ptr = obj->mm.mapping = NULL;
0a798eb9
CW
2618 }
2619
d31d7cb1
CW
2620 if (!ptr) {
2621 ptr = i915_gem_object_map(obj, type);
2622 if (!ptr) {
2623 ret = -ENOMEM;
1233e2db 2624 goto err_unpin;
d31d7cb1
CW
2625 }
2626
0ce81788 2627 obj->mm.mapping = page_pack_bits(ptr, type);
d31d7cb1
CW
2628 }
2629
1233e2db
CW
2630out_unlock:
2631 mutex_unlock(&obj->mm.lock);
d31d7cb1
CW
2632 return ptr;
2633
1233e2db
CW
2634err_unpin:
2635 atomic_dec(&obj->mm.pages_pin_count);
2636err_unlock:
2637 ptr = ERR_PTR(ret);
2638 goto out_unlock;
0a798eb9
CW
2639}
2640
7c55e2c5
CW
2641static int
2642i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2643 const struct drm_i915_gem_pwrite *arg)
2644{
2645 struct address_space *mapping = obj->base.filp->f_mapping;
2646 char __user *user_data = u64_to_user_ptr(arg->data_ptr);
2647 u64 remain, offset;
2648 unsigned int pg;
2649
2650 /* Before we instantiate/pin the backing store for our use, we
2651 * can prepopulate the shmemfs filp efficiently using a write into
2652 * the pagecache. We avoid the penalty of instantiating all the
2653 * pages, important if the user is just writing to a few and never
2654 * uses the object on the GPU, and using a direct write into shmemfs
2655 * allows it to avoid the cost of retrieving a page (either swapin
2656 * or clearing-before-use) before it is overwritten.
2657 */
2658 if (READ_ONCE(obj->mm.pages))
2659 return -ENODEV;
2660
2661 /* Before the pages are instantiated the object is treated as being
2662 * in the CPU domain. The pages will be clflushed as required before
2663 * use, and we can freely write into the pages directly. If userspace
2664 * races pwrite with any other operation; corruption will ensue -
2665 * that is userspace's prerogative!
2666 */
2667
2668 remain = arg->size;
2669 offset = arg->offset;
2670 pg = offset_in_page(offset);
2671
2672 do {
2673 unsigned int len, unwritten;
2674 struct page *page;
2675 void *data, *vaddr;
2676 int err;
2677
2678 len = PAGE_SIZE - pg;
2679 if (len > remain)
2680 len = remain;
2681
2682 err = pagecache_write_begin(obj->base.filp, mapping,
2683 offset, len, 0,
2684 &page, &data);
2685 if (err < 0)
2686 return err;
2687
2688 vaddr = kmap(page);
2689 unwritten = copy_from_user(vaddr + pg, user_data, len);
2690 kunmap(page);
2691
2692 err = pagecache_write_end(obj->base.filp, mapping,
2693 offset, len, len - unwritten,
2694 page, data);
2695 if (err < 0)
2696 return err;
2697
2698 if (unwritten)
2699 return -EFAULT;
2700
2701 remain -= len;
2702 user_data += len;
2703 offset += len;
2704 pg = 0;
2705 } while (remain);
2706
2707 return 0;
2708}
2709
77b25a97
CW
2710static bool ban_context(const struct i915_gem_context *ctx,
2711 unsigned int score)
be62acb4 2712{
6095868a 2713 return (i915_gem_context_is_bannable(ctx) &&
77b25a97 2714 score >= CONTEXT_SCORE_BAN_THRESHOLD);
be62acb4
MK
2715}
2716
e5e1fc47 2717static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
aa60c664 2718{
77b25a97
CW
2719 unsigned int score;
2720 bool banned;
b083a087 2721
77b25a97 2722 atomic_inc(&ctx->guilty_count);
b083a087 2723
77b25a97
CW
2724 score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
2725 banned = ban_context(ctx, score);
2726 DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
2727 ctx->name, score, yesno(banned));
2728 if (!banned)
b083a087
MK
2729 return;
2730
77b25a97
CW
2731 i915_gem_context_set_banned(ctx);
2732 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
2733 atomic_inc(&ctx->file_priv->context_bans);
2734 DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
2735 ctx->name, atomic_read(&ctx->file_priv->context_bans));
2736 }
e5e1fc47
MK
2737}
2738
2739static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
2740{
77b25a97 2741 atomic_inc(&ctx->active_count);
aa60c664
MK
2742}
2743
8d9fc7fd 2744struct drm_i915_gem_request *
0bc40be8 2745i915_gem_find_active_request(struct intel_engine_cs *engine)
9375e446 2746{
754c9fd5
CW
2747 struct drm_i915_gem_request *request, *active = NULL;
2748 unsigned long flags;
4db080f9 2749
f69a02c9
CW
2750 /* We are called by the error capture and reset at a random
2751 * point in time. In particular, note that neither is crucially
2752 * ordered with an interrupt. After a hang, the GPU is dead and we
2753 * assume that no more writes can happen (we waited long enough for
2754 * all writes that were in transaction to be flushed) - adding an
2755 * extra delay for a recent interrupt is pointless. Hence, we do
2756 * not need an engine->irq_seqno_barrier() before the seqno reads.
2757 */
754c9fd5 2758 spin_lock_irqsave(&engine->timeline->lock, flags);
73cb9701 2759 list_for_each_entry(request, &engine->timeline->requests, link) {
754c9fd5
CW
2760 if (__i915_gem_request_completed(request,
2761 request->global_seqno))
4db080f9 2762 continue;
aa60c664 2763
36193acd 2764 GEM_BUG_ON(request->engine != engine);
c00122f3
CW
2765 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2766 &request->fence.flags));
754c9fd5
CW
2767
2768 active = request;
2769 break;
4db080f9 2770 }
754c9fd5 2771 spin_unlock_irqrestore(&engine->timeline->lock, flags);
b6b0fac0 2772
754c9fd5 2773 return active;
b6b0fac0
MK
2774}
2775
bf2f0436
MK
2776static bool engine_stalled(struct intel_engine_cs *engine)
2777{
2778 if (!engine->hangcheck.stalled)
2779 return false;
2780
2781 /* Check for possible seqno movement after hang declaration */
2782 if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
2783 DRM_DEBUG_DRIVER("%s pardoned\n", engine->name);
2784 return false;
2785 }
2786
2787 return true;
2788}
2789
a1ef70e1
MT
2790/*
2791 * Ensure irq handler finishes, and not run again.
2792 * Also return the active request so that we only search for it once.
2793 */
2794struct drm_i915_gem_request *
2795i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
2796{
2797 struct drm_i915_gem_request *request = NULL;
2798
2799 /* Prevent the signaler thread from updating the request
2800 * state (by calling dma_fence_signal) as we are processing
2801 * the reset. The write from the GPU of the seqno is
2802 * asynchronous and the signaler thread may see a different
2803 * value to us and declare the request complete, even though
2804 * the reset routine have picked that request as the active
2805 * (incomplete) request. This conflict is not handled
2806 * gracefully!
2807 */
2808 kthread_park(engine->breadcrumbs.signaler);
2809
2810 /* Prevent request submission to the hardware until we have
2811 * completed the reset in i915_gem_reset_finish(). If a request
2812 * is completed by one engine, it may then queue a request
2813 * to a second via its engine->irq_tasklet *just* as we are
2814 * calling engine->init_hw() and also writing the ELSP.
2815 * Turning off the engine->irq_tasklet until the reset is over
2816 * prevents the race.
2817 */
2818 tasklet_kill(&engine->irq_tasklet);
2819 tasklet_disable(&engine->irq_tasklet);
2820
2821 if (engine->irq_seqno_barrier)
2822 engine->irq_seqno_barrier(engine);
2823
d1d1ebf4
CW
2824 request = i915_gem_find_active_request(engine);
2825 if (request && request->fence.error == -EIO)
2826 request = ERR_PTR(-EIO); /* Previous reset failed! */
a1ef70e1
MT
2827
2828 return request;
2829}
2830
0e178aef 2831int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
4c965543
CW
2832{
2833 struct intel_engine_cs *engine;
a1ef70e1 2834 struct drm_i915_gem_request *request;
4c965543 2835 enum intel_engine_id id;
0e178aef 2836 int err = 0;
4c965543 2837
0e178aef 2838 for_each_engine(engine, dev_priv, id) {
a1ef70e1
MT
2839 request = i915_gem_reset_prepare_engine(engine);
2840 if (IS_ERR(request)) {
2841 err = PTR_ERR(request);
2842 continue;
0e178aef 2843 }
c64992e0
MT
2844
2845 engine->hangcheck.active_request = request;
0e178aef
CW
2846 }
2847
4c965543 2848 i915_gem_revoke_fences(dev_priv);
0e178aef
CW
2849
2850 return err;
4c965543
CW
2851}
2852
36193acd 2853static void skip_request(struct drm_i915_gem_request *request)
821ed7df
CW
2854{
2855 void *vaddr = request->ring->vaddr;
2856 u32 head;
2857
2858 /* As this request likely depends on state from the lost
2859 * context, clear out all the user operations leaving the
2860 * breadcrumb at the end (so we get the fence notifications).
2861 */
2862 head = request->head;
2863 if (request->postfix < head) {
2864 memset(vaddr + head, 0, request->ring->size - head);
2865 head = 0;
2866 }
2867 memset(vaddr + head, 0, request->postfix - head);
c0d5f32c
CW
2868
2869 dma_fence_set_error(&request->fence, -EIO);
821ed7df
CW
2870}
2871
36193acd
MK
2872static void engine_skip_context(struct drm_i915_gem_request *request)
2873{
2874 struct intel_engine_cs *engine = request->engine;
2875 struct i915_gem_context *hung_ctx = request->ctx;
2876 struct intel_timeline *timeline;
2877 unsigned long flags;
2878
2879 timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);
2880
2881 spin_lock_irqsave(&engine->timeline->lock, flags);
2882 spin_lock(&timeline->lock);
2883
2884 list_for_each_entry_continue(request, &engine->timeline->requests, link)
2885 if (request->ctx == hung_ctx)
2886 skip_request(request);
2887
2888 list_for_each_entry(request, &timeline->requests, link)
2889 skip_request(request);
2890
2891 spin_unlock(&timeline->lock);
2892 spin_unlock_irqrestore(&engine->timeline->lock, flags);
2893}
2894
d1d1ebf4
CW
2895/* Returns the request if it was guilty of the hang */
2896static struct drm_i915_gem_request *
2897i915_gem_reset_request(struct intel_engine_cs *engine,
2898 struct drm_i915_gem_request *request)
61da5362 2899{
71895a08
MK
2900 /* The guilty request will get skipped on a hung engine.
2901 *
2902 * Users of client default contexts do not rely on logical
2903 * state preserved between batches so it is safe to execute
2904 * queued requests following the hang. Non default contexts
2905 * rely on preserved state, so skipping a batch loses the
2906 * evolution of the state and it needs to be considered corrupted.
2907 * Executing more queued batches on top of corrupted state is
2908 * risky. But we take the risk by trying to advance through
2909 * the queued requests in order to make the client behaviour
2910 * more predictable around resets, by not throwing away random
2911 * amount of batches it has prepared for execution. Sophisticated
2912 * clients can use gem_reset_stats_ioctl and dma fence status
2913 * (exported via sync_file info ioctl on explicit fences) to observe
2914 * when it loses the context state and should rebuild accordingly.
2915 *
2916 * The context ban, and ultimately the client ban, mechanism are safety
2917 * valves if client submission ends up resulting in nothing more than
2918 * subsequent hangs.
2919 */
2920
d1d1ebf4 2921 if (engine_stalled(engine)) {
61da5362
MK
2922 i915_gem_context_mark_guilty(request->ctx);
2923 skip_request(request);
d1d1ebf4
CW
2924
2925 /* If this context is now banned, skip all pending requests. */
2926 if (i915_gem_context_is_banned(request->ctx))
2927 engine_skip_context(request);
61da5362 2928 } else {
d1d1ebf4
CW
2929 /*
2930 * Since this is not the hung engine, it may have advanced
2931 * since the hang declaration. Double check by refinding
2932 * the active request at the time of the reset.
2933 */
2934 request = i915_gem_find_active_request(engine);
2935 if (request) {
2936 i915_gem_context_mark_innocent(request->ctx);
2937 dma_fence_set_error(&request->fence, -EAGAIN);
2938
2939 /* Rewind the engine to replay the incomplete rq */
2940 spin_lock_irq(&engine->timeline->lock);
2941 request = list_prev_entry(request, link);
2942 if (&request->link == &engine->timeline->requests)
2943 request = NULL;
2944 spin_unlock_irq(&engine->timeline->lock);
2945 }
61da5362
MK
2946 }
2947
d1d1ebf4 2948 return request;
61da5362
MK
2949}
2950
a1ef70e1
MT
2951void i915_gem_reset_engine(struct intel_engine_cs *engine,
2952 struct drm_i915_gem_request *request)
b6b0fac0 2953{
ed454f2c
CW
2954 engine->irq_posted = 0;
2955
d1d1ebf4
CW
2956 if (request)
2957 request = i915_gem_reset_request(engine, request);
2958
2959 if (request) {
c0dcb203
CW
2960 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
2961 engine->name, request->global_seqno);
c0dcb203 2962 }
821ed7df
CW
2963
2964 /* Setup the CS to resume from the breadcrumb of the hung request */
2965 engine->reset_hw(engine, request);
4db080f9 2966}
aa60c664 2967
d8027093 2968void i915_gem_reset(struct drm_i915_private *dev_priv)
4db080f9 2969{
821ed7df 2970 struct intel_engine_cs *engine;
3b3f1650 2971 enum intel_engine_id id;
608c1a52 2972
4c7d62c6
CW
2973 lockdep_assert_held(&dev_priv->drm.struct_mutex);
2974
821ed7df
CW
2975 i915_gem_retire_requests(dev_priv);
2976
2ae55738
CW
2977 for_each_engine(engine, dev_priv, id) {
2978 struct i915_gem_context *ctx;
2979
c64992e0 2980 i915_gem_reset_engine(engine, engine->hangcheck.active_request);
2ae55738
CW
2981 ctx = fetch_and_zero(&engine->last_retired_context);
2982 if (ctx)
2983 engine->context_unpin(engine, ctx);
2984 }
821ed7df 2985
4362f4f6 2986 i915_gem_restore_fences(dev_priv);
f2a91d1a
CW
2987
2988 if (dev_priv->gt.awake) {
2989 intel_sanitize_gt_powersave(dev_priv);
2990 intel_enable_gt_powersave(dev_priv);
2991 if (INTEL_GEN(dev_priv) >= 6)
2992 gen6_rps_busy(dev_priv);
2993 }
821ed7df
CW
2994}
2995
a1ef70e1
MT
2996void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
2997{
2998 tasklet_enable(&engine->irq_tasklet);
2999 kthread_unpark(engine->breadcrumbs.signaler);
3000}
3001
d8027093
CW
3002void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
3003{
1f7b847d
CW
3004 struct intel_engine_cs *engine;
3005 enum intel_engine_id id;
3006
d8027093 3007 lockdep_assert_held(&dev_priv->drm.struct_mutex);
1f7b847d 3008
fe3288b5 3009 for_each_engine(engine, dev_priv, id) {
c64992e0 3010 engine->hangcheck.active_request = NULL;
a1ef70e1 3011 i915_gem_reset_finish_engine(engine);
fe3288b5 3012 }
d8027093
CW
3013}
3014
821ed7df
CW
3015static void nop_submit_request(struct drm_i915_gem_request *request)
3016{
bf2eac3b 3017 GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
3cd9442f 3018 dma_fence_set_error(&request->fence, -EIO);
3dcf93f7
CW
3019 i915_gem_request_submit(request);
3020 intel_engine_init_global_seqno(request->engine, request->global_seqno);
821ed7df
CW
3021}
3022
2a20d6f8 3023static void engine_set_wedged(struct intel_engine_cs *engine)
821ed7df 3024{
3cd9442f
CW
3025 struct drm_i915_gem_request *request;
3026 unsigned long flags;
3027
20e4933c
CW
3028 /* We need to be sure that no thread is running the old callback as
3029 * we install the nop handler (otherwise we would submit a request
3030 * to hardware that will never complete). In order to prevent this
3031 * race, we wait until the machine is idle before making the swap
3032 * (using stop_machine()).
3033 */
821ed7df 3034 engine->submit_request = nop_submit_request;
70c2a24d 3035
3cd9442f
CW
3036 /* Mark all executing requests as skipped */
3037 spin_lock_irqsave(&engine->timeline->lock, flags);
3038 list_for_each_entry(request, &engine->timeline->requests, link)
36703e79
CW
3039 if (!i915_gem_request_completed(request))
3040 dma_fence_set_error(&request->fence, -EIO);
3cd9442f
CW
3041 spin_unlock_irqrestore(&engine->timeline->lock, flags);
3042
dcb4c12a
OM
3043 /*
3044 * Clear the execlists queue up before freeing the requests, as those
3045 * are the ones that keep the context and ringbuffer backing objects
3046 * pinned in place.
3047 */
dcb4c12a 3048
7de1691a 3049 if (i915.enable_execlists) {
77f0d0e9 3050 struct execlist_port *port = engine->execlist_port;
663f71e7 3051 unsigned long flags;
77f0d0e9 3052 unsigned int n;
663f71e7
CW
3053
3054 spin_lock_irqsave(&engine->timeline->lock, flags);
3055
77f0d0e9
CW
3056 for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
3057 i915_gem_request_put(port_request(&port[n]));
70c2a24d 3058 memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
20311bd3
CW
3059 engine->execlist_queue = RB_ROOT;
3060 engine->execlist_first = NULL;
663f71e7
CW
3061
3062 spin_unlock_irqrestore(&engine->timeline->lock, flags);
4ee056f4
CW
3063
3064 /* The port is checked prior to scheduling a tasklet, but
3065 * just in case we have suspended the tasklet to do the
3066 * wedging make sure that when it wakes, it decides there
3067 * is no work to do by clearing the irq_posted bit.
3068 */
3069 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
dcb4c12a 3070 }
5e32d748
CW
3071
3072 /* Mark all pending requests as complete so that any concurrent
3073 * (lockless) lookup doesn't try and wait upon the request as we
3074 * reset it.
3075 */
3076 intel_engine_init_global_seqno(engine,
3077 intel_engine_last_submit(engine));
673a394b
EA
3078}
3079
20e4933c 3080static int __i915_gem_set_wedged_BKL(void *data)
673a394b 3081{
20e4933c 3082 struct drm_i915_private *i915 = data;
e2f80391 3083 struct intel_engine_cs *engine;
3b3f1650 3084 enum intel_engine_id id;
673a394b 3085
20e4933c 3086 for_each_engine(engine, i915, id)
2a20d6f8 3087 engine_set_wedged(engine);
20e4933c 3088
3d7adbbf
CW
3089 set_bit(I915_WEDGED, &i915->gpu_error.flags);
3090 wake_up_all(&i915->gpu_error.reset_queue);
3091
20e4933c
CW
3092 return 0;
3093}
3094
3095void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
3096{
20e4933c 3097 stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
673a394b
EA
3098}
3099
2e8f9d32
CW
3100bool i915_gem_unset_wedged(struct drm_i915_private *i915)
3101{
3102 struct i915_gem_timeline *tl;
3103 int i;
3104
3105 lockdep_assert_held(&i915->drm.struct_mutex);
3106 if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
3107 return true;
3108
3109 /* Before unwedging, make sure that all pending operations
3110 * are flushed and errored out - we may have requests waiting upon
3111 * third party fences. We marked all inflight requests as EIO, and
3112 * every execbuf since returned EIO, for consistency we want all
3113 * the currently pending requests to also be marked as EIO, which
3114 * is done inside our nop_submit_request - and so we must wait.
3115 *
3116 * No more can be submitted until we reset the wedged bit.
3117 */
3118 list_for_each_entry(tl, &i915->gt.timelines, link) {
3119 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
3120 struct drm_i915_gem_request *rq;
3121
3122 rq = i915_gem_active_peek(&tl->engine[i].last_request,
3123 &i915->drm.struct_mutex);
3124 if (!rq)
3125 continue;
3126
3127 /* We can't use our normal waiter as we want to
3128 * avoid recursively trying to handle the current
3129 * reset. The basic dma_fence_default_wait() installs
3130 * a callback for dma_fence_signal(), which is
3131 * triggered by our nop handler (indirectly, the
3132 * callback enables the signaler thread which is
3133 * woken by the nop_submit_request() advancing the seqno
3134 * and when the seqno passes the fence, the signaler
3135 * then signals the fence waking us up).
3136 */
3137 if (dma_fence_default_wait(&rq->fence, true,
3138 MAX_SCHEDULE_TIMEOUT) < 0)
3139 return false;
3140 }
3141 }
3142
3143 /* Undo nop_submit_request. We prevent all new i915 requests from
3144 * being queued (by disallowing execbuf whilst wedged) so having
3145 * waited for all active requests above, we know the system is idle
3146 * and do not have to worry about a thread being inside
3147 * engine->submit_request() as we swap over. So unlike installing
3148 * the nop_submit_request on reset, we can do this from normal
3149 * context and do not require stop_machine().
3150 */
3151 intel_engines_reset_default_submission(i915);
36703e79 3152 i915_gem_contexts_lost(i915);
2e8f9d32
CW
3153
3154 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
3155 clear_bit(I915_WEDGED, &i915->gpu_error.flags);
3156
3157 return true;
3158}
3159
75ef9da2 3160static void
673a394b
EA
3161i915_gem_retire_work_handler(struct work_struct *work)
3162{
b29c19b6 3163 struct drm_i915_private *dev_priv =
67d97da3 3164 container_of(work, typeof(*dev_priv), gt.retire_work.work);
91c8a326 3165 struct drm_device *dev = &dev_priv->drm;
673a394b 3166
891b48cf 3167 /* Come back later if the device is busy... */
b29c19b6 3168 if (mutex_trylock(&dev->struct_mutex)) {
67d97da3 3169 i915_gem_retire_requests(dev_priv);
b29c19b6 3170 mutex_unlock(&dev->struct_mutex);
673a394b 3171 }
67d97da3
CW
3172
3173 /* Keep the retire handler running until we are finally idle.
3174 * We do not need to do this test under locking as in the worst-case
3175 * we queue the retire worker once too often.
3176 */
c9615613
CW
3177 if (READ_ONCE(dev_priv->gt.awake)) {
3178 i915_queue_hangcheck(dev_priv);
67d97da3
CW
3179 queue_delayed_work(dev_priv->wq,
3180 &dev_priv->gt.retire_work,
bcb45086 3181 round_jiffies_up_relative(HZ));
c9615613 3182 }
b29c19b6 3183}
0a58705b 3184
b29c19b6
CW
3185static void
3186i915_gem_idle_work_handler(struct work_struct *work)
3187{
3188 struct drm_i915_private *dev_priv =
67d97da3 3189 container_of(work, typeof(*dev_priv), gt.idle_work.work);
91c8a326 3190 struct drm_device *dev = &dev_priv->drm;
67d97da3
CW
3191 bool rearm_hangcheck;
3192
3193 if (!READ_ONCE(dev_priv->gt.awake))
3194 return;
3195
0cb5670b
ID
3196 /*
3197 * Wait for last execlists context complete, but bail out in case a
3198 * new request is submitted.
3199 */
8490ae20 3200 wait_for(intel_engines_are_idle(dev_priv), 10);
28176ef4 3201 if (READ_ONCE(dev_priv->gt.active_requests))
67d97da3
CW
3202 return;
3203
3204 rearm_hangcheck =
3205 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
3206
3207 if (!mutex_trylock(&dev->struct_mutex)) {
3208 /* Currently busy, come back later */
3209 mod_delayed_work(dev_priv->wq,
3210 &dev_priv->gt.idle_work,
3211 msecs_to_jiffies(50));
3212 goto out_rearm;
3213 }
3214
93c97dc1
ID
3215 /*
3216 * New request retired after this work handler started, extend active
3217 * period until next instance of the work.
3218 */
3219 if (work_pending(work))
3220 goto out_unlock;
3221
28176ef4 3222 if (dev_priv->gt.active_requests)
67d97da3 3223 goto out_unlock;
b29c19b6 3224
05425249 3225 if (wait_for(intel_engines_are_idle(dev_priv), 10))
0cb5670b
ID
3226 DRM_ERROR("Timeout waiting for engines to idle\n");
3227
6c067579 3228 intel_engines_mark_idle(dev_priv);
47979480 3229 i915_gem_timelines_mark_idle(dev_priv);
35c94185 3230
67d97da3
CW
3231 GEM_BUG_ON(!dev_priv->gt.awake);
3232 dev_priv->gt.awake = false;
3233 rearm_hangcheck = false;
30ecad77 3234
67d97da3
CW
3235 if (INTEL_GEN(dev_priv) >= 6)
3236 gen6_rps_idle(dev_priv);
3237 intel_runtime_pm_put(dev_priv);
3238out_unlock:
3239 mutex_unlock(&dev->struct_mutex);
b29c19b6 3240
67d97da3
CW
3241out_rearm:
3242 if (rearm_hangcheck) {
3243 GEM_BUG_ON(!dev_priv->gt.awake);
3244 i915_queue_hangcheck(dev_priv);
35c94185 3245 }
673a394b
EA
3246}
3247
b1f788c6
CW
3248void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
3249{
d1b48c1e 3250 struct drm_i915_private *i915 = to_i915(gem->dev);
b1f788c6
CW
3251 struct drm_i915_gem_object *obj = to_intel_bo(gem);
3252 struct drm_i915_file_private *fpriv = file->driver_priv;
d1b48c1e 3253 struct i915_lut_handle *lut, *ln;
b1f788c6 3254
d1b48c1e
CW
3255 mutex_lock(&i915->drm.struct_mutex);
3256
3257 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
3258 struct i915_gem_context *ctx = lut->ctx;
3259 struct i915_vma *vma;
3260
3261 if (ctx->file_priv != fpriv)
3262 continue;
3263
3264 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
3265
3266 if (!i915_vma_is_ggtt(vma))
b1f788c6 3267 i915_vma_close(vma);
f8a7fde4 3268
d1b48c1e
CW
3269 list_del(&lut->obj_link);
3270 list_del(&lut->ctx_link);
4ff4b44c 3271
d1b48c1e
CW
3272 kmem_cache_free(i915->luts, lut);
3273 __i915_gem_object_release_unless_active(obj);
f8a7fde4 3274 }
d1b48c1e
CW
3275
3276 mutex_unlock(&i915->drm.struct_mutex);
b1f788c6
CW
3277}
3278
e95433c7
CW
3279static unsigned long to_wait_timeout(s64 timeout_ns)
3280{
3281 if (timeout_ns < 0)
3282 return MAX_SCHEDULE_TIMEOUT;
3283
3284 if (timeout_ns == 0)
3285 return 0;
3286
3287 return nsecs_to_jiffies_timeout(timeout_ns);
3288}
3289
23ba4fd0
BW
3290/**
3291 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
14bb2c11
TU
3292 * @dev: drm device pointer
3293 * @data: ioctl data blob
3294 * @file: drm file pointer
23ba4fd0
BW
3295 *
3296 * Returns 0 if successful, else an error is returned with the remaining time in
3297 * the timeout parameter.
3298 * -ETIME: object is still busy after timeout
3299 * -ERESTARTSYS: signal interrupted the wait
3300 * -ENONENT: object doesn't exist
3301 * Also possible, but rare:
b8050148 3302 * -EAGAIN: incomplete, restart syscall
23ba4fd0
BW
3303 * -ENOMEM: damn
3304 * -ENODEV: Internal IRQ fail
3305 * -E?: The add request failed
3306 *
3307 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3308 * non-zero timeout parameter the wait ioctl will wait for the given number of
3309 * nanoseconds on an object becoming unbusy. Since the wait itself does so
3310 * without holding struct_mutex the object may become re-busied before this
3311 * function completes. A similar but shorter * race condition exists in the busy
3312 * ioctl
3313 */
3314int
3315i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3316{
3317 struct drm_i915_gem_wait *args = data;
3318 struct drm_i915_gem_object *obj;
e95433c7
CW
3319 ktime_t start;
3320 long ret;
23ba4fd0 3321
11b5d511
DV
3322 if (args->flags != 0)
3323 return -EINVAL;
3324
03ac0642 3325 obj = i915_gem_object_lookup(file, args->bo_handle);
033d549b 3326 if (!obj)
23ba4fd0 3327 return -ENOENT;
23ba4fd0 3328
e95433c7
CW
3329 start = ktime_get();
3330
3331 ret = i915_gem_object_wait(obj,
3332 I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
3333 to_wait_timeout(args->timeout_ns),
3334 to_rps_client(file));
3335
3336 if (args->timeout_ns > 0) {
3337 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
3338 if (args->timeout_ns < 0)
3339 args->timeout_ns = 0;
c1d2061b
CW
3340
3341 /*
3342 * Apparently ktime isn't accurate enough and occasionally has a
3343 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
3344 * things up to make the test happy. We allow up to 1 jiffy.
3345 *
3346 * This is a regression from the timespec->ktime conversion.
3347 */
3348 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
3349 args->timeout_ns = 0;
b8050148
CW
3350
3351 /* Asked to wait beyond the jiffie/scheduler precision? */
3352 if (ret == -ETIME && args->timeout_ns)
3353 ret = -EAGAIN;
b4716185
CW
3354 }
3355
f0cd5182 3356 i915_gem_object_put(obj);
ff865885 3357 return ret;
23ba4fd0
BW
3358}
3359
73cb9701 3360static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
4df2faf4 3361{
73cb9701 3362 int ret, i;
4df2faf4 3363
73cb9701
CW
3364 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
3365 ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
3366 if (ret)
3367 return ret;
3368 }
62e63007 3369
73cb9701
CW
3370 return 0;
3371}
3372
25112b64
CW
3373static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms)
3374{
3375 return wait_for(intel_engine_is_idle(engine), timeout_ms);
3376}
3377
3378static int wait_for_engines(struct drm_i915_private *i915)
3379{
3380 struct intel_engine_cs *engine;
3381 enum intel_engine_id id;
3382
3383 for_each_engine(engine, i915, id) {
3384 if (GEM_WARN_ON(wait_for_engine(engine, 50))) {
3385 i915_gem_set_wedged(i915);
3386 return -EIO;
3387 }
3388
3389 GEM_BUG_ON(intel_engine_get_seqno(engine) !=
3390 intel_engine_last_submit(engine));
3391 }
3392
3393 return 0;
3394}
3395
73cb9701
CW
3396int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
3397{
73cb9701
CW
3398 int ret;
3399
863e9fde
CW
3400 /* If the device is asleep, we have no requests outstanding */
3401 if (!READ_ONCE(i915->gt.awake))
3402 return 0;
3403
9caa34aa
CW
3404 if (flags & I915_WAIT_LOCKED) {
3405 struct i915_gem_timeline *tl;
3406
3407 lockdep_assert_held(&i915->drm.struct_mutex);
3408
3409 list_for_each_entry(tl, &i915->gt.timelines, link) {
3410 ret = wait_for_timeline(tl, flags);
3411 if (ret)
3412 return ret;
3413 }
72022a70
CW
3414
3415 i915_gem_retire_requests(i915);
3416 GEM_BUG_ON(i915->gt.active_requests);
25112b64
CW
3417
3418 ret = wait_for_engines(i915);
9caa34aa
CW
3419 } else {
3420 ret = wait_for_timeline(&i915->gt.global_timeline, flags);
1ec14ad3 3421 }
4df2faf4 3422
25112b64 3423 return ret;
4df2faf4
DV
3424}
3425
5a97bcc6
CW
3426static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
3427{
e27ab73d
CW
3428 /*
3429 * We manually flush the CPU domain so that we can override and
3430 * force the flush for the display, and perform it asyncrhonously.
3431 */
3432 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3433 if (obj->cache_dirty)
3434 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
5a97bcc6
CW
3435 obj->base.write_domain = 0;
3436}
3437
3438void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
3439{
3440 if (!READ_ONCE(obj->pin_display))
3441 return;
3442
3443 mutex_lock(&obj->base.dev->struct_mutex);
3444 __i915_gem_object_flush_for_display(obj);
3445 mutex_unlock(&obj->base.dev->struct_mutex);
3446}
3447
e22d8e3c
CW
3448/**
3449 * Moves a single object to the WC read, and possibly write domain.
3450 * @obj: object to act on
3451 * @write: ask for write access or read only
3452 *
3453 * This function returns when the move is complete, including waiting on
3454 * flushes to occur.
3455 */
3456int
3457i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
3458{
3459 int ret;
3460
3461 lockdep_assert_held(&obj->base.dev->struct_mutex);
3462
3463 ret = i915_gem_object_wait(obj,
3464 I915_WAIT_INTERRUPTIBLE |
3465 I915_WAIT_LOCKED |
3466 (write ? I915_WAIT_ALL : 0),
3467 MAX_SCHEDULE_TIMEOUT,
3468 NULL);
3469 if (ret)
3470 return ret;
3471
3472 if (obj->base.write_domain == I915_GEM_DOMAIN_WC)
3473 return 0;
3474
3475 /* Flush and acquire obj->pages so that we are coherent through
3476 * direct access in memory with previous cached writes through
3477 * shmemfs and that our cache domain tracking remains valid.
3478 * For example, if the obj->filp was moved to swap without us
3479 * being notified and releasing the pages, we would mistakenly
3480 * continue to assume that the obj remained out of the CPU cached
3481 * domain.
3482 */
3483 ret = i915_gem_object_pin_pages(obj);
3484 if (ret)
3485 return ret;
3486
3487 flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
3488
3489 /* Serialise direct access to this object with the barriers for
3490 * coherent writes from the GPU, by effectively invalidating the
3491 * WC domain upon first access.
3492 */
3493 if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0)
3494 mb();
3495
3496 /* It should now be out of any other write domains, and we can update
3497 * the domain values for our changes.
3498 */
3499 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0);
3500 obj->base.read_domains |= I915_GEM_DOMAIN_WC;
3501 if (write) {
3502 obj->base.read_domains = I915_GEM_DOMAIN_WC;
3503 obj->base.write_domain = I915_GEM_DOMAIN_WC;
3504 obj->mm.dirty = true;
3505 }
3506
3507 i915_gem_object_unpin_pages(obj);
3508 return 0;
3509}
3510
2ef7eeaa
EA
3511/**
3512 * Moves a single object to the GTT read, and possibly write domain.
14bb2c11
TU
3513 * @obj: object to act on
3514 * @write: ask for write access or read only
2ef7eeaa
EA
3515 *
3516 * This function returns when the move is complete, including waiting on
3517 * flushes to occur.
3518 */
79e53945 3519int
2021746e 3520i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2ef7eeaa 3521{
e47c68e9 3522 int ret;
2ef7eeaa 3523
e95433c7 3524 lockdep_assert_held(&obj->base.dev->struct_mutex);
4c7d62c6 3525
e95433c7
CW
3526 ret = i915_gem_object_wait(obj,
3527 I915_WAIT_INTERRUPTIBLE |
3528 I915_WAIT_LOCKED |
3529 (write ? I915_WAIT_ALL : 0),
3530 MAX_SCHEDULE_TIMEOUT,
3531 NULL);
88241785
CW
3532 if (ret)
3533 return ret;
3534
c13d87ea
CW
3535 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3536 return 0;
3537
43566ded
CW
3538 /* Flush and acquire obj->pages so that we are coherent through
3539 * direct access in memory with previous cached writes through
3540 * shmemfs and that our cache domain tracking remains valid.
3541 * For example, if the obj->filp was moved to swap without us
3542 * being notified and releasing the pages, we would mistakenly
3543 * continue to assume that the obj remained out of the CPU cached
3544 * domain.
3545 */
a4f5ea64 3546 ret = i915_gem_object_pin_pages(obj);
43566ded
CW
3547 if (ret)
3548 return ret;
3549
ef74921b 3550 flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
1c5d22f7 3551
d0a57789
CW
3552 /* Serialise direct access to this object with the barriers for
3553 * coherent writes from the GPU, by effectively invalidating the
3554 * GTT domain upon first access.
3555 */
3556 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3557 mb();
3558
e47c68e9
EA
3559 /* It should now be out of any other write domains, and we can update
3560 * the domain values for our changes.
3561 */
40e62d5d 3562 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
05394f39 3563 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
e47c68e9 3564 if (write) {
05394f39
CW
3565 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3566 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
a4f5ea64 3567 obj->mm.dirty = true;
2ef7eeaa
EA
3568 }
3569
a4f5ea64 3570 i915_gem_object_unpin_pages(obj);
e47c68e9
EA
3571 return 0;
3572}
3573
ef55f92a
CW
3574/**
3575 * Changes the cache-level of an object across all VMA.
14bb2c11
TU
3576 * @obj: object to act on
3577 * @cache_level: new cache level to set for the object
ef55f92a
CW
3578 *
3579 * After this function returns, the object will be in the new cache-level
3580 * across all GTT and the contents of the backing storage will be coherent,
3581 * with respect to the new cache-level. In order to keep the backing storage
3582 * coherent for all users, we only allow a single cache level to be set
3583 * globally on the object and prevent it from being changed whilst the
3584 * hardware is reading from the object. That is if the object is currently
3585 * on the scanout it will be set to uncached (or equivalent display
3586 * cache coherency) and all non-MOCS GPU access will also be uncached so
3587 * that all direct access to the scanout remains coherent.
3588 */
e4ffd173
CW
3589int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3590 enum i915_cache_level cache_level)
3591{
aa653a68 3592 struct i915_vma *vma;
a6a7cc4b 3593 int ret;
e4ffd173 3594
4c7d62c6
CW
3595 lockdep_assert_held(&obj->base.dev->struct_mutex);
3596
e4ffd173 3597 if (obj->cache_level == cache_level)
a6a7cc4b 3598 return 0;
e4ffd173 3599
ef55f92a
CW
3600 /* Inspect the list of currently bound VMA and unbind any that would
3601 * be invalid given the new cache-level. This is principally to
3602 * catch the issue of the CS prefetch crossing page boundaries and
3603 * reading an invalid PTE on older architectures.
3604 */
aa653a68
CW
3605restart:
3606 list_for_each_entry(vma, &obj->vma_list, obj_link) {
ef55f92a
CW
3607 if (!drm_mm_node_allocated(&vma->node))
3608 continue;
3609
20dfbde4 3610 if (i915_vma_is_pinned(vma)) {
ef55f92a
CW
3611 DRM_DEBUG("can not change the cache level of pinned objects\n");
3612 return -EBUSY;
3613 }
3614
aa653a68
CW
3615 if (i915_gem_valid_gtt_space(vma, cache_level))
3616 continue;
3617
3618 ret = i915_vma_unbind(vma);
3619 if (ret)
3620 return ret;
3621
3622 /* As unbinding may affect other elements in the
3623 * obj->vma_list (due to side-effects from retiring
3624 * an active vma), play safe and restart the iterator.
3625 */
3626 goto restart;
42d6ab48
CW
3627 }
3628
ef55f92a
CW
3629 /* We can reuse the existing drm_mm nodes but need to change the
3630 * cache-level on the PTE. We could simply unbind them all and
3631 * rebind with the correct cache-level on next use. However since
3632 * we already have a valid slot, dma mapping, pages etc, we may as
3633 * rewrite the PTE in the belief that doing so tramples upon less
3634 * state and so involves less work.
3635 */
15717de2 3636 if (obj->bind_count) {
ef55f92a
CW
3637 /* Before we change the PTE, the GPU must not be accessing it.
3638 * If we wait upon the object, we know that all the bound
3639 * VMA are no longer active.
3640 */
e95433c7
CW
3641 ret = i915_gem_object_wait(obj,
3642 I915_WAIT_INTERRUPTIBLE |
3643 I915_WAIT_LOCKED |
3644 I915_WAIT_ALL,
3645 MAX_SCHEDULE_TIMEOUT,
3646 NULL);
e4ffd173
CW
3647 if (ret)
3648 return ret;
3649
0031fb96
TU
3650 if (!HAS_LLC(to_i915(obj->base.dev)) &&
3651 cache_level != I915_CACHE_NONE) {
ef55f92a
CW
3652 /* Access to snoopable pages through the GTT is
3653 * incoherent and on some machines causes a hard
3654 * lockup. Relinquish the CPU mmaping to force
3655 * userspace to refault in the pages and we can
3656 * then double check if the GTT mapping is still
3657 * valid for that pointer access.
3658 */
3659 i915_gem_release_mmap(obj);
3660
3661 /* As we no longer need a fence for GTT access,
3662 * we can relinquish it now (and so prevent having
3663 * to steal a fence from someone else on the next
3664 * fence request). Note GPU activity would have
3665 * dropped the fence as all snoopable access is
3666 * supposed to be linear.
3667 */
49ef5294
CW
3668 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3669 ret = i915_vma_put_fence(vma);
3670 if (ret)
3671 return ret;
3672 }
ef55f92a
CW
3673 } else {
3674 /* We either have incoherent backing store and
3675 * so no GTT access or the architecture is fully
3676 * coherent. In such cases, existing GTT mmaps
3677 * ignore the cache bit in the PTE and we can
3678 * rewrite it without confusing the GPU or having
3679 * to force userspace to fault back in its mmaps.
3680 */
e4ffd173
CW
3681 }
3682
1c7f4bca 3683 list_for_each_entry(vma, &obj->vma_list, obj_link) {
ef55f92a
CW
3684 if (!drm_mm_node_allocated(&vma->node))
3685 continue;
3686
3687 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3688 if (ret)
3689 return ret;
3690 }
e4ffd173
CW
3691 }
3692
1c7f4bca 3693 list_for_each_entry(vma, &obj->vma_list, obj_link)
2c22569b 3694 vma->node.color = cache_level;
b8f55be6 3695 i915_gem_object_set_cache_coherency(obj, cache_level);
e27ab73d 3696 obj->cache_dirty = true; /* Always invalidate stale cachelines */
2c22569b 3697
e4ffd173
CW
3698 return 0;
3699}
3700
199adf40
BW
3701int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3702 struct drm_file *file)
e6994aee 3703{
199adf40 3704 struct drm_i915_gem_caching *args = data;
e6994aee 3705 struct drm_i915_gem_object *obj;
fbbd37b3 3706 int err = 0;
e6994aee 3707
fbbd37b3
CW
3708 rcu_read_lock();
3709 obj = i915_gem_object_lookup_rcu(file, args->handle);
3710 if (!obj) {
3711 err = -ENOENT;
3712 goto out;
3713 }
e6994aee 3714
651d794f
CW
3715 switch (obj->cache_level) {
3716 case I915_CACHE_LLC:
3717 case I915_CACHE_L3_LLC:
3718 args->caching = I915_CACHING_CACHED;
3719 break;
3720
4257d3ba
CW
3721 case I915_CACHE_WT:
3722 args->caching = I915_CACHING_DISPLAY;
3723 break;
3724
651d794f
CW
3725 default:
3726 args->caching = I915_CACHING_NONE;
3727 break;
3728 }
fbbd37b3
CW
3729out:
3730 rcu_read_unlock();
3731 return err;
e6994aee
CW
3732}
3733
199adf40
BW
3734int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3735 struct drm_file *file)
e6994aee 3736{
9c870d03 3737 struct drm_i915_private *i915 = to_i915(dev);
199adf40 3738 struct drm_i915_gem_caching *args = data;
e6994aee
CW
3739 struct drm_i915_gem_object *obj;
3740 enum i915_cache_level level;
d65415df 3741 int ret = 0;
e6994aee 3742
199adf40
BW
3743 switch (args->caching) {
3744 case I915_CACHING_NONE:
e6994aee
CW
3745 level = I915_CACHE_NONE;
3746 break;
199adf40 3747 case I915_CACHING_CACHED:
e5756c10
ID
3748 /*
3749 * Due to a HW issue on BXT A stepping, GPU stores via a
3750 * snooped mapping may leave stale data in a corresponding CPU
3751 * cacheline, whereas normally such cachelines would get
3752 * invalidated.
3753 */
9c870d03 3754 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
e5756c10
ID
3755 return -ENODEV;
3756
e6994aee
CW
3757 level = I915_CACHE_LLC;
3758 break;
4257d3ba 3759 case I915_CACHING_DISPLAY:
9c870d03 3760 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
4257d3ba 3761 break;
e6994aee
CW
3762 default:
3763 return -EINVAL;
3764 }
3765
d65415df
CW
3766 obj = i915_gem_object_lookup(file, args->handle);
3767 if (!obj)
3768 return -ENOENT;
3769
3770 if (obj->cache_level == level)
3771 goto out;
3772
3773 ret = i915_gem_object_wait(obj,
3774 I915_WAIT_INTERRUPTIBLE,
3775 MAX_SCHEDULE_TIMEOUT,
3776 to_rps_client(file));
3bc2913e 3777 if (ret)
d65415df 3778 goto out;
3bc2913e 3779
d65415df
CW
3780 ret = i915_mutex_lock_interruptible(dev);
3781 if (ret)
3782 goto out;
e6994aee
CW
3783
3784 ret = i915_gem_object_set_cache_level(obj, level);
e6994aee 3785 mutex_unlock(&dev->struct_mutex);
d65415df
CW
3786
3787out:
3788 i915_gem_object_put(obj);
e6994aee
CW
3789 return ret;
3790}
3791
b9241ea3 3792/*
2da3b9b9
CW
3793 * Prepare buffer for display plane (scanout, cursors, etc).
3794 * Can be called from an uninterruptible phase (modesetting) and allows
3795 * any flushes to be pipelined (for pageflips).
b9241ea3 3796 */
058d88c4 3797struct i915_vma *
2da3b9b9
CW
3798i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3799 u32 alignment,
e6617330 3800 const struct i915_ggtt_view *view)
b9241ea3 3801{
058d88c4 3802 struct i915_vma *vma;
b9241ea3
ZW
3803 int ret;
3804
4c7d62c6
CW
3805 lockdep_assert_held(&obj->base.dev->struct_mutex);
3806
cc98b413
CW
3807 /* Mark the pin_display early so that we account for the
3808 * display coherency whilst setting up the cache domains.
3809 */
8a0c39b1 3810 obj->pin_display++;
cc98b413 3811
a7ef0640
EA
3812 /* The display engine is not coherent with the LLC cache on gen6. As
3813 * a result, we make sure that the pinning that is about to occur is
3814 * done with uncached PTEs. This is lowest common denominator for all
3815 * chipsets.
3816 *
3817 * However for gen6+, we could do better by using the GFDT bit instead
3818 * of uncaching, which would allow us to flush all the LLC-cached data
3819 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3820 */
651d794f 3821 ret = i915_gem_object_set_cache_level(obj,
8652744b
TU
3822 HAS_WT(to_i915(obj->base.dev)) ?
3823 I915_CACHE_WT : I915_CACHE_NONE);
058d88c4
CW
3824 if (ret) {
3825 vma = ERR_PTR(ret);
cc98b413 3826 goto err_unpin_display;
058d88c4 3827 }
a7ef0640 3828
2da3b9b9
CW
3829 /* As the user may map the buffer once pinned in the display plane
3830 * (e.g. libkms for the bootup splash), we have to ensure that we
2efb813d
CW
3831 * always use map_and_fenceable for all scanout buffers. However,
3832 * it may simply be too big to fit into mappable, in which case
3833 * put it anyway and hope that userspace can cope (but always first
3834 * try to preserve the existing ABI).
2da3b9b9 3835 */
2efb813d 3836 vma = ERR_PTR(-ENOSPC);
47a8e3f6 3837 if (!view || view->type == I915_GGTT_VIEW_NORMAL)
2efb813d
CW
3838 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3839 PIN_MAPPABLE | PIN_NONBLOCK);
767a222e
CW
3840 if (IS_ERR(vma)) {
3841 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3842 unsigned int flags;
3843
3844 /* Valleyview is definitely limited to scanning out the first
3845 * 512MiB. Lets presume this behaviour was inherited from the
3846 * g4x display engine and that all earlier gen are similarly
3847 * limited. Testing suggests that it is a little more
3848 * complicated than this. For example, Cherryview appears quite
3849 * happy to scanout from anywhere within its global aperture.
3850 */
3851 flags = 0;
3852 if (HAS_GMCH_DISPLAY(i915))
3853 flags = PIN_MAPPABLE;
3854 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
3855 }
058d88c4 3856 if (IS_ERR(vma))
cc98b413 3857 goto err_unpin_display;
2da3b9b9 3858
d8923dcf
CW
3859 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3860
a6a7cc4b 3861 /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
5a97bcc6 3862 __i915_gem_object_flush_for_display(obj);
d59b21ec 3863 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
b118c1e3 3864
2da3b9b9
CW
3865 /* It should now be out of any other write domains, and we can update
3866 * the domain values for our changes.
3867 */
05394f39 3868 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
b9241ea3 3869
058d88c4 3870 return vma;
cc98b413
CW
3871
3872err_unpin_display:
8a0c39b1 3873 obj->pin_display--;
058d88c4 3874 return vma;
cc98b413
CW
3875}
3876
3877void
058d88c4 3878i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
cc98b413 3879{
49d73912 3880 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
4c7d62c6 3881
058d88c4 3882 if (WARN_ON(vma->obj->pin_display == 0))
8a0c39b1
TU
3883 return;
3884
d8923dcf 3885 if (--vma->obj->pin_display == 0)
f51455d4 3886 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
e6617330 3887
383d5823 3888 /* Bump the LRU to try and avoid premature eviction whilst flipping */
befedbb7 3889 i915_gem_object_bump_inactive_ggtt(vma->obj);
383d5823 3890
058d88c4 3891 i915_vma_unpin(vma);
b9241ea3
ZW
3892}
3893
e47c68e9
EA
3894/**
3895 * Moves a single object to the CPU read, and possibly write domain.
14bb2c11
TU
3896 * @obj: object to act on
3897 * @write: requesting write or read-only access
e47c68e9
EA
3898 *
3899 * This function returns when the move is complete, including waiting on
3900 * flushes to occur.
3901 */
dabdfe02 3902int
919926ae 3903i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
e47c68e9 3904{
e47c68e9
EA
3905 int ret;
3906
e95433c7 3907 lockdep_assert_held(&obj->base.dev->struct_mutex);
4c7d62c6 3908
e95433c7
CW
3909 ret = i915_gem_object_wait(obj,
3910 I915_WAIT_INTERRUPTIBLE |
3911 I915_WAIT_LOCKED |
3912 (write ? I915_WAIT_ALL : 0),
3913 MAX_SCHEDULE_TIMEOUT,
3914 NULL);
88241785
CW
3915 if (ret)
3916 return ret;
3917
ef74921b 3918 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
2ef7eeaa 3919
e47c68e9 3920 /* Flush the CPU cache if it's still invalid. */
05394f39 3921 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
57822dc6 3922 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
05394f39 3923 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2ef7eeaa
EA
3924 }
3925
3926 /* It should now be out of any other write domains, and we can update
3927 * the domain values for our changes.
3928 */
e27ab73d 3929 GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
e47c68e9
EA
3930
3931 /* If we're writing through the CPU, then the GPU read domains will
3932 * need to be invalidated at next use.
3933 */
e27ab73d
CW
3934 if (write)
3935 __start_cpu_write(obj);
2ef7eeaa
EA
3936
3937 return 0;
3938}
3939
673a394b
EA
3940/* Throttle our rendering by waiting until the ring has completed our requests
3941 * emitted over 20 msec ago.
3942 *
b962442e
EA
3943 * Note that if we were to use the current jiffies each time around the loop,
3944 * we wouldn't escape the function with any frames outstanding if the time to
3945 * render a frame was over 20ms.
3946 *
673a394b
EA
3947 * This should get us reasonable parallelism between CPU and GPU but also
3948 * relatively low latency when blocking on a particular request to finish.
3949 */
40a5f0de 3950static int
f787a5f5 3951i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
40a5f0de 3952{
fac5e23e 3953 struct drm_i915_private *dev_priv = to_i915(dev);
f787a5f5 3954 struct drm_i915_file_private *file_priv = file->driver_priv;
d0bc54f2 3955 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
54fb2411 3956 struct drm_i915_gem_request *request, *target = NULL;
e95433c7 3957 long ret;
93533c29 3958
f4457ae7
CW
3959 /* ABI: return -EIO if already wedged */
3960 if (i915_terminally_wedged(&dev_priv->gpu_error))
3961 return -EIO;
e110e8d6 3962
1c25595f 3963 spin_lock(&file_priv->mm.lock);
c8659efa 3964 list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
b962442e
EA
3965 if (time_after_eq(request->emitted_jiffies, recent_enough))
3966 break;
40a5f0de 3967
c8659efa
CW
3968 if (target) {
3969 list_del(&target->client_link);
3970 target->file_priv = NULL;
3971 }
fcfa423c 3972
54fb2411 3973 target = request;
b962442e 3974 }
ff865885 3975 if (target)
e8a261ea 3976 i915_gem_request_get(target);
1c25595f 3977 spin_unlock(&file_priv->mm.lock);
40a5f0de 3978
54fb2411 3979 if (target == NULL)
f787a5f5 3980 return 0;
2bc43b5c 3981
e95433c7
CW
3982 ret = i915_wait_request(target,
3983 I915_WAIT_INTERRUPTIBLE,
3984 MAX_SCHEDULE_TIMEOUT);
e8a261ea 3985 i915_gem_request_put(target);
ff865885 3986
e95433c7 3987 return ret < 0 ? ret : 0;
40a5f0de
EA
3988}
3989
058d88c4 3990struct i915_vma *
ec7adb6e
JL
3991i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3992 const struct i915_ggtt_view *view,
91b2db6f 3993 u64 size,
2ffffd0f
CW
3994 u64 alignment,
3995 u64 flags)
ec7adb6e 3996{
ad16d2ed
CW
3997 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3998 struct i915_address_space *vm = &dev_priv->ggtt.base;
59bfa124
CW
3999 struct i915_vma *vma;
4000 int ret;
72e96d64 4001
4c7d62c6
CW
4002 lockdep_assert_held(&obj->base.dev->struct_mutex);
4003
718659a6 4004 vma = i915_vma_instance(obj, vm, view);
e0216b76 4005 if (unlikely(IS_ERR(vma)))
058d88c4 4006 return vma;
59bfa124
CW
4007
4008 if (i915_vma_misplaced(vma, size, alignment, flags)) {
4009 if (flags & PIN_NONBLOCK &&
4010 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
058d88c4 4011 return ERR_PTR(-ENOSPC);
59bfa124 4012
ad16d2ed 4013 if (flags & PIN_MAPPABLE) {
ad16d2ed
CW
4014 /* If the required space is larger than the available
4015 * aperture, we will not able to find a slot for the
4016 * object and unbinding the object now will be in
4017 * vain. Worse, doing so may cause us to ping-pong
4018 * the object in and out of the Global GTT and
4019 * waste a lot of cycles under the mutex.
4020 */
944397f0 4021 if (vma->fence_size > dev_priv->ggtt.mappable_end)
ad16d2ed
CW
4022 return ERR_PTR(-E2BIG);
4023
4024 /* If NONBLOCK is set the caller is optimistically
4025 * trying to cache the full object within the mappable
4026 * aperture, and *must* have a fallback in place for
4027 * situations where we cannot bind the object. We
4028 * can be a little more lax here and use the fallback
4029 * more often to avoid costly migrations of ourselves
4030 * and other objects within the aperture.
4031 *
4032 * Half-the-aperture is used as a simple heuristic.
4033 * More interesting would to do search for a free
4034 * block prior to making the commitment to unbind.
4035 * That caters for the self-harm case, and with a
4036 * little more heuristics (e.g. NOFAULT, NOEVICT)
4037 * we could try to minimise harm to others.
4038 */
4039 if (flags & PIN_NONBLOCK &&
944397f0 4040 vma->fence_size > dev_priv->ggtt.mappable_end / 2)
ad16d2ed
CW
4041 return ERR_PTR(-ENOSPC);
4042 }
4043
59bfa124
CW
4044 WARN(i915_vma_is_pinned(vma),
4045 "bo is already pinned in ggtt with incorrect alignment:"
05a20d09
CW
4046 " offset=%08x, req.alignment=%llx,"
4047 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
4048 i915_ggtt_offset(vma), alignment,
59bfa124 4049 !!(flags & PIN_MAPPABLE),
05a20d09 4050 i915_vma_is_map_and_fenceable(vma));
59bfa124
CW
4051 ret = i915_vma_unbind(vma);
4052 if (ret)
058d88c4 4053 return ERR_PTR(ret);
59bfa124
CW
4054 }
4055
058d88c4
CW
4056 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
4057 if (ret)
4058 return ERR_PTR(ret);
ec7adb6e 4059
058d88c4 4060 return vma;
673a394b
EA
4061}
4062
edf6b76f 4063static __always_inline unsigned int __busy_read_flag(unsigned int id)
3fdc13c7
CW
4064{
4065 /* Note that we could alias engines in the execbuf API, but
4066 * that would be very unwise as it prevents userspace from
4067 * fine control over engine selection. Ahem.
4068 *
4069 * This should be something like EXEC_MAX_ENGINE instead of
4070 * I915_NUM_ENGINES.
4071 */
4072 BUILD_BUG_ON(I915_NUM_ENGINES > 16);
4073 return 0x10000 << id;
4074}
4075
4076static __always_inline unsigned int __busy_write_id(unsigned int id)
4077{
70cb472c
CW
4078 /* The uABI guarantees an active writer is also amongst the read
4079 * engines. This would be true if we accessed the activity tracking
4080 * under the lock, but as we perform the lookup of the object and
4081 * its activity locklessly we can not guarantee that the last_write
4082 * being active implies that we have set the same engine flag from
4083 * last_read - hence we always set both read and write busy for
4084 * last_write.
4085 */
4086 return id | __busy_read_flag(id);
3fdc13c7
CW
4087}
4088
edf6b76f 4089static __always_inline unsigned int
d07f0e59 4090__busy_set_if_active(const struct dma_fence *fence,
3fdc13c7
CW
4091 unsigned int (*flag)(unsigned int id))
4092{
d07f0e59 4093 struct drm_i915_gem_request *rq;
3fdc13c7 4094
d07f0e59
CW
4095 /* We have to check the current hw status of the fence as the uABI
4096 * guarantees forward progress. We could rely on the idle worker
4097 * to eventually flush us, but to minimise latency just ask the
4098 * hardware.
1255501d 4099 *
d07f0e59 4100 * Note we only report on the status of native fences.
1255501d 4101 */
d07f0e59
CW
4102 if (!dma_fence_is_i915(fence))
4103 return 0;
4104
4105 /* opencode to_request() in order to avoid const warnings */
4106 rq = container_of(fence, struct drm_i915_gem_request, fence);
4107 if (i915_gem_request_completed(rq))
4108 return 0;
4109
1d39f281 4110 return flag(rq->engine->uabi_id);
3fdc13c7
CW
4111}
4112
edf6b76f 4113static __always_inline unsigned int
d07f0e59 4114busy_check_reader(const struct dma_fence *fence)
3fdc13c7 4115{
d07f0e59 4116 return __busy_set_if_active(fence, __busy_read_flag);
3fdc13c7
CW
4117}
4118
edf6b76f 4119static __always_inline unsigned int
d07f0e59 4120busy_check_writer(const struct dma_fence *fence)
3fdc13c7 4121{
d07f0e59
CW
4122 if (!fence)
4123 return 0;
4124
4125 return __busy_set_if_active(fence, __busy_write_id);
3fdc13c7
CW
4126}
4127
673a394b
EA
4128int
4129i915_gem_busy_ioctl(struct drm_device *dev, void *data,
05394f39 4130 struct drm_file *file)
673a394b
EA
4131{
4132 struct drm_i915_gem_busy *args = data;
05394f39 4133 struct drm_i915_gem_object *obj;
d07f0e59
CW
4134 struct reservation_object_list *list;
4135 unsigned int seq;
fbbd37b3 4136 int err;
673a394b 4137
d07f0e59 4138 err = -ENOENT;
fbbd37b3
CW
4139 rcu_read_lock();
4140 obj = i915_gem_object_lookup_rcu(file, args->handle);
d07f0e59 4141 if (!obj)
fbbd37b3 4142 goto out;
d1b851fc 4143
d07f0e59
CW
4144 /* A discrepancy here is that we do not report the status of
4145 * non-i915 fences, i.e. even though we may report the object as idle,
4146 * a call to set-domain may still stall waiting for foreign rendering.
4147 * This also means that wait-ioctl may report an object as busy,
4148 * where busy-ioctl considers it idle.
4149 *
4150 * We trade the ability to warn of foreign fences to report on which
4151 * i915 engines are active for the object.
4152 *
4153 * Alternatively, we can trade that extra information on read/write
4154 * activity with
4155 * args->busy =
4156 * !reservation_object_test_signaled_rcu(obj->resv, true);
4157 * to report the overall busyness. This is what the wait-ioctl does.
4158 *
4159 */
4160retry:
4161 seq = raw_read_seqcount(&obj->resv->seq);
426960be 4162
d07f0e59
CW
4163 /* Translate the exclusive fence to the READ *and* WRITE engine */
4164 args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
3fdc13c7 4165
d07f0e59
CW
4166 /* Translate shared fences to READ set of engines */
4167 list = rcu_dereference(obj->resv->fence);
4168 if (list) {
4169 unsigned int shared_count = list->shared_count, i;
3fdc13c7 4170
d07f0e59
CW
4171 for (i = 0; i < shared_count; ++i) {
4172 struct dma_fence *fence =
4173 rcu_dereference(list->shared[i]);
4174
4175 args->busy |= busy_check_reader(fence);
4176 }
426960be 4177 }
673a394b 4178
d07f0e59
CW
4179 if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
4180 goto retry;
4181
4182 err = 0;
fbbd37b3
CW
4183out:
4184 rcu_read_unlock();
4185 return err;
673a394b
EA
4186}
4187
4188int
4189i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4190 struct drm_file *file_priv)
4191{
0206e353 4192 return i915_gem_ring_throttle(dev, file_priv);
673a394b
EA
4193}
4194
3ef94daa
CW
4195int
4196i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4197 struct drm_file *file_priv)
4198{
fac5e23e 4199 struct drm_i915_private *dev_priv = to_i915(dev);
3ef94daa 4200 struct drm_i915_gem_madvise *args = data;
05394f39 4201 struct drm_i915_gem_object *obj;
1233e2db 4202 int err;
3ef94daa
CW
4203
4204 switch (args->madv) {
4205 case I915_MADV_DONTNEED:
4206 case I915_MADV_WILLNEED:
4207 break;
4208 default:
4209 return -EINVAL;
4210 }
4211
03ac0642 4212 obj = i915_gem_object_lookup(file_priv, args->handle);
1233e2db
CW
4213 if (!obj)
4214 return -ENOENT;
4215
4216 err = mutex_lock_interruptible(&obj->mm.lock);
4217 if (err)
4218 goto out;
3ef94daa 4219
a4f5ea64 4220 if (obj->mm.pages &&
3e510a8e 4221 i915_gem_object_is_tiled(obj) &&
656bfa3a 4222 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
bc0629a7
CW
4223 if (obj->mm.madv == I915_MADV_WILLNEED) {
4224 GEM_BUG_ON(!obj->mm.quirked);
a4f5ea64 4225 __i915_gem_object_unpin_pages(obj);
bc0629a7
CW
4226 obj->mm.quirked = false;
4227 }
4228 if (args->madv == I915_MADV_WILLNEED) {
2c3a3f44 4229 GEM_BUG_ON(obj->mm.quirked);
a4f5ea64 4230 __i915_gem_object_pin_pages(obj);
bc0629a7
CW
4231 obj->mm.quirked = true;
4232 }
656bfa3a
DV
4233 }
4234
a4f5ea64
CW
4235 if (obj->mm.madv != __I915_MADV_PURGED)
4236 obj->mm.madv = args->madv;
3ef94daa 4237
6c085a72 4238 /* if the object is no longer attached, discard its backing storage */
a4f5ea64 4239 if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
2d7ef395
CW
4240 i915_gem_object_truncate(obj);
4241
a4f5ea64 4242 args->retained = obj->mm.madv != __I915_MADV_PURGED;
1233e2db 4243 mutex_unlock(&obj->mm.lock);
bb6baf76 4244
1233e2db 4245out:
f8c417cd 4246 i915_gem_object_put(obj);
1233e2db 4247 return err;
3ef94daa
CW
4248}
4249
5b8c8aec
CW
4250static void
4251frontbuffer_retire(struct i915_gem_active *active,
4252 struct drm_i915_gem_request *request)
4253{
4254 struct drm_i915_gem_object *obj =
4255 container_of(active, typeof(*obj), frontbuffer_write);
4256
d59b21ec 4257 intel_fb_obj_flush(obj, ORIGIN_CS);
5b8c8aec
CW
4258}
4259
37e680a1
CW
4260void i915_gem_object_init(struct drm_i915_gem_object *obj,
4261 const struct drm_i915_gem_object_ops *ops)
0327d6ba 4262{
1233e2db
CW
4263 mutex_init(&obj->mm.lock);
4264
56cea323 4265 INIT_LIST_HEAD(&obj->global_link);
275f039d 4266 INIT_LIST_HEAD(&obj->userfault_link);
2f633156 4267 INIT_LIST_HEAD(&obj->vma_list);
d1b48c1e 4268 INIT_LIST_HEAD(&obj->lut_list);
8d9d5744 4269 INIT_LIST_HEAD(&obj->batch_pool_link);
0327d6ba 4270
37e680a1
CW
4271 obj->ops = ops;
4272
d07f0e59
CW
4273 reservation_object_init(&obj->__builtin_resv);
4274 obj->resv = &obj->__builtin_resv;
4275
50349247 4276 obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
5b8c8aec 4277 init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
a4f5ea64
CW
4278
4279 obj->mm.madv = I915_MADV_WILLNEED;
4280 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
4281 mutex_init(&obj->mm.get_page.lock);
0327d6ba 4282
f19ec8cb 4283 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
0327d6ba
CW
4284}
4285
37e680a1 4286static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3599a91c
TU
4287 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
4288 I915_GEM_OBJECT_IS_SHRINKABLE,
7c55e2c5 4289
37e680a1
CW
4290 .get_pages = i915_gem_object_get_pages_gtt,
4291 .put_pages = i915_gem_object_put_pages_gtt,
7c55e2c5
CW
4292
4293 .pwrite = i915_gem_object_pwrite_gtt,
37e680a1
CW
4294};
4295
b4bcbe2a 4296struct drm_i915_gem_object *
12d79d78 4297i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
ac52bc56 4298{
c397b908 4299 struct drm_i915_gem_object *obj;
5949eac4 4300 struct address_space *mapping;
b8f55be6 4301 unsigned int cache_level;
1a240d4d 4302 gfp_t mask;
fe3db79b 4303 int ret;
ac52bc56 4304
b4bcbe2a
CW
4305 /* There is a prevalence of the assumption that we fit the object's
4306 * page count inside a 32bit _signed_ variable. Let's document this and
4307 * catch if we ever need to fix it. In the meantime, if you do spot
4308 * such a local variable, please consider fixing!
4309 */
7a3ee5de 4310 if (size >> PAGE_SHIFT > INT_MAX)
b4bcbe2a
CW
4311 return ERR_PTR(-E2BIG);
4312
4313 if (overflows_type(size, obj->base.size))
4314 return ERR_PTR(-E2BIG);
4315
187685cb 4316 obj = i915_gem_object_alloc(dev_priv);
c397b908 4317 if (obj == NULL)
fe3db79b 4318 return ERR_PTR(-ENOMEM);
673a394b 4319
12d79d78 4320 ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size);
fe3db79b
CW
4321 if (ret)
4322 goto fail;
673a394b 4323
bed1ea95 4324 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
c0f86832 4325 if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
bed1ea95
CW
4326 /* 965gm cannot relocate objects above 4GiB. */
4327 mask &= ~__GFP_HIGHMEM;
4328 mask |= __GFP_DMA32;
4329 }
4330
93c76a3d 4331 mapping = obj->base.filp->f_mapping;
bed1ea95 4332 mapping_set_gfp_mask(mapping, mask);
4846bf0c 4333 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
5949eac4 4334
37e680a1 4335 i915_gem_object_init(obj, &i915_gem_object_ops);
73aa808f 4336
c397b908
DV
4337 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4338 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
673a394b 4339
b8f55be6 4340 if (HAS_LLC(dev_priv))
3d29b842 4341 /* On some devices, we can have the GPU use the LLC (the CPU
a1871112
EA
4342 * cache) for about a 10% performance improvement
4343 * compared to uncached. Graphics requests other than
4344 * display scanout are coherent with the CPU in
4345 * accessing this cache. This means in this mode we
4346 * don't need to clflush on the CPU side, and on the
4347 * GPU side we only need to flush internal caches to
4348 * get data visible to the CPU.
4349 *
4350 * However, we maintain the display planes as UC, and so
4351 * need to rebind when first used as such.
4352 */
b8f55be6
CW
4353 cache_level = I915_CACHE_LLC;
4354 else
4355 cache_level = I915_CACHE_NONE;
a1871112 4356
b8f55be6 4357 i915_gem_object_set_cache_coherency(obj, cache_level);
e27ab73d 4358
d861e338
DV
4359 trace_i915_gem_object_create(obj);
4360
05394f39 4361 return obj;
fe3db79b
CW
4362
4363fail:
4364 i915_gem_object_free(obj);
fe3db79b 4365 return ERR_PTR(ret);
c397b908
DV
4366}
4367
340fbd8c
CW
4368static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4369{
4370 /* If we are the last user of the backing storage (be it shmemfs
4371 * pages or stolen etc), we know that the pages are going to be
4372 * immediately released. In this case, we can then skip copying
4373 * back the contents from the GPU.
4374 */
4375
a4f5ea64 4376 if (obj->mm.madv != I915_MADV_WILLNEED)
340fbd8c
CW
4377 return false;
4378
4379 if (obj->base.filp == NULL)
4380 return true;
4381
4382 /* At first glance, this looks racy, but then again so would be
4383 * userspace racing mmap against close. However, the first external
4384 * reference to the filp can only be obtained through the
4385 * i915_gem_mmap_ioctl() which safeguards us against the user
4386 * acquiring such a reference whilst we are in the middle of
4387 * freeing the object.
4388 */
4389 return atomic_long_read(&obj->base.filp->f_count) == 1;
4390}
4391
fbbd37b3
CW
4392static void __i915_gem_free_objects(struct drm_i915_private *i915,
4393 struct llist_node *freed)
673a394b 4394{
fbbd37b3 4395 struct drm_i915_gem_object *obj, *on;
673a394b 4396
fbbd37b3
CW
4397 mutex_lock(&i915->drm.struct_mutex);
4398 intel_runtime_pm_get(i915);
4399 llist_for_each_entry(obj, freed, freed) {
4400 struct i915_vma *vma, *vn;
4401
4402 trace_i915_gem_object_destroy(obj);
4403
4404 GEM_BUG_ON(i915_gem_object_is_active(obj));
4405 list_for_each_entry_safe(vma, vn,
4406 &obj->vma_list, obj_link) {
fbbd37b3
CW
4407 GEM_BUG_ON(i915_vma_is_active(vma));
4408 vma->flags &= ~I915_VMA_PIN_MASK;
4409 i915_vma_close(vma);
4410 }
db6c2b41
CW
4411 GEM_BUG_ON(!list_empty(&obj->vma_list));
4412 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
fbbd37b3 4413
56cea323 4414 list_del(&obj->global_link);
fbbd37b3
CW
4415 }
4416 intel_runtime_pm_put(i915);
4417 mutex_unlock(&i915->drm.struct_mutex);
4418
f2be9d68
CW
4419 cond_resched();
4420
fbbd37b3
CW
4421 llist_for_each_entry_safe(obj, on, freed, freed) {
4422 GEM_BUG_ON(obj->bind_count);
4423 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
4424
4425 if (obj->ops->release)
4426 obj->ops->release(obj);
f65c9168 4427
fbbd37b3
CW
4428 if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
4429 atomic_set(&obj->mm.pages_pin_count, 0);
548625ee 4430 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
fbbd37b3
CW
4431 GEM_BUG_ON(obj->mm.pages);
4432
4433 if (obj->base.import_attach)
4434 drm_prime_gem_destroy(&obj->base, NULL);
4435
d07f0e59 4436 reservation_object_fini(&obj->__builtin_resv);
fbbd37b3
CW
4437 drm_gem_object_release(&obj->base);
4438 i915_gem_info_remove_obj(i915, obj->base.size);
4439
4440 kfree(obj->bit_17);
4441 i915_gem_object_free(obj);
4442 }
4443}
4444
4445static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
4446{
4447 struct llist_node *freed;
4448
4449 freed = llist_del_all(&i915->mm.free_list);
4450 if (unlikely(freed))
4451 __i915_gem_free_objects(i915, freed);
4452}
4453
4454static void __i915_gem_free_work(struct work_struct *work)
4455{
4456 struct drm_i915_private *i915 =
4457 container_of(work, struct drm_i915_private, mm.free_work);
4458 struct llist_node *freed;
26e12f89 4459
b1f788c6
CW
4460 /* All file-owned VMA should have been released by this point through
4461 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4462 * However, the object may also be bound into the global GTT (e.g.
4463 * older GPUs without per-process support, or for direct access through
4464 * the GTT either for the user or for scanout). Those VMA still need to
4465 * unbound now.
4466 */
1488fc08 4467
5ad08be7 4468 while ((freed = llist_del_all(&i915->mm.free_list))) {
fbbd37b3 4469 __i915_gem_free_objects(i915, freed);
5ad08be7
CW
4470 if (need_resched())
4471 break;
4472 }
fbbd37b3 4473}
a071fa00 4474
fbbd37b3
CW
4475static void __i915_gem_free_object_rcu(struct rcu_head *head)
4476{
4477 struct drm_i915_gem_object *obj =
4478 container_of(head, typeof(*obj), rcu);
4479 struct drm_i915_private *i915 = to_i915(obj->base.dev);
4480
4481 /* We can't simply use call_rcu() from i915_gem_free_object()
4482 * as we need to block whilst unbinding, and the call_rcu
4483 * task may be called from softirq context. So we take a
4484 * detour through a worker.
4485 */
4486 if (llist_add(&obj->freed, &i915->mm.free_list))
4487 schedule_work(&i915->mm.free_work);
4488}
656bfa3a 4489
fbbd37b3
CW
4490void i915_gem_free_object(struct drm_gem_object *gem_obj)
4491{
4492 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
a4f5ea64 4493
bc0629a7
CW
4494 if (obj->mm.quirked)
4495 __i915_gem_object_unpin_pages(obj);
4496
340fbd8c 4497 if (discard_backing_storage(obj))
a4f5ea64 4498 obj->mm.madv = I915_MADV_DONTNEED;
de151cf6 4499
fbbd37b3
CW
4500 /* Before we free the object, make sure any pure RCU-only
4501 * read-side critical sections are complete, e.g.
4502 * i915_gem_busy_ioctl(). For the corresponding synchronized
4503 * lookup see i915_gem_object_lookup_rcu().
4504 */
4505 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
673a394b
EA
4506}
4507
f8a7fde4
CW
4508void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
4509{
4510 lockdep_assert_held(&obj->base.dev->struct_mutex);
4511
d1b48c1e
CW
4512 if (!i915_gem_object_has_active_reference(obj) &&
4513 i915_gem_object_is_active(obj))
f8a7fde4
CW
4514 i915_gem_object_set_active_reference(obj);
4515 else
4516 i915_gem_object_put(obj);
4517}
4518
3033acab
CW
4519static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
4520{
4521 struct intel_engine_cs *engine;
4522 enum intel_engine_id id;
4523
4524 for_each_engine(engine, dev_priv, id)
f131e356
CW
4525 GEM_BUG_ON(engine->last_retired_context &&
4526 !i915_gem_context_is_kernel(engine->last_retired_context));
3033acab
CW
4527}
4528
24145517
CW
4529void i915_gem_sanitize(struct drm_i915_private *i915)
4530{
4531 /*
4532 * If we inherit context state from the BIOS or earlier occupants
4533 * of the GPU, the GPU may be in an inconsistent state when we
4534 * try to take over. The only way to remove the earlier state
4535 * is by resetting. However, resetting on earlier gen is tricky as
4536 * it may impact the display and we are uncertain about the stability
ea117b8d 4537 * of the reset, so this could be applied to even earlier gen.
24145517 4538 */
ea117b8d 4539 if (INTEL_GEN(i915) >= 5) {
24145517
CW
4540 int reset = intel_gpu_reset(i915, ALL_ENGINES);
4541 WARN_ON(reset && reset != -ENODEV);
4542 }
4543}
4544
bf9e8429 4545int i915_gem_suspend(struct drm_i915_private *dev_priv)
29105ccc 4546{
bf9e8429 4547 struct drm_device *dev = &dev_priv->drm;
dcff85c8 4548 int ret;
28dfe52a 4549
c998e8a0 4550 intel_runtime_pm_get(dev_priv);
54b4f68f
CW
4551 intel_suspend_gt_powersave(dev_priv);
4552
45c5f202 4553 mutex_lock(&dev->struct_mutex);
5ab57c70
CW
4554
4555 /* We have to flush all the executing contexts to main memory so
4556 * that they can saved in the hibernation image. To ensure the last
4557 * context image is coherent, we have to switch away from it. That
4558 * leaves the dev_priv->kernel_context still active when
4559 * we actually suspend, and its image in memory may not match the GPU
4560 * state. Fortunately, the kernel_context is disposable and we do
4561 * not rely on its state.
4562 */
4563 ret = i915_gem_switch_to_kernel_context(dev_priv);
4564 if (ret)
c998e8a0 4565 goto err_unlock;
5ab57c70 4566
22dd3bb9
CW
4567 ret = i915_gem_wait_for_idle(dev_priv,
4568 I915_WAIT_INTERRUPTIBLE |
4569 I915_WAIT_LOCKED);
f7403347 4570 if (ret)
c998e8a0 4571 goto err_unlock;
f7403347 4572
3033acab 4573 assert_kernel_context_is_current(dev_priv);
829a0af2 4574 i915_gem_contexts_lost(dev_priv);
45c5f202
CW
4575 mutex_unlock(&dev->struct_mutex);
4576
63987bfe
SAK
4577 intel_guc_suspend(dev_priv);
4578
737b1506 4579 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
67d97da3 4580 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
bdeb9785
CW
4581
4582 /* As the idle_work is rearming if it detects a race, play safe and
4583 * repeat the flush until it is definitely idle.
4584 */
4585 while (flush_delayed_work(&dev_priv->gt.idle_work))
4586 ;
4587
bdcf120b
CW
4588 /* Assert that we sucessfully flushed all the work and
4589 * reset the GPU back to its idle, low power state.
4590 */
67d97da3 4591 WARN_ON(dev_priv->gt.awake);
05425249 4592 WARN_ON(!intel_engines_are_idle(dev_priv));
bdcf120b 4593
1c777c5d
ID
4594 /*
4595 * Neither the BIOS, ourselves or any other kernel
4596 * expects the system to be in execlists mode on startup,
4597 * so we need to reset the GPU back to legacy mode. And the only
4598 * known way to disable logical contexts is through a GPU reset.
4599 *
4600 * So in order to leave the system in a known default configuration,
4601 * always reset the GPU upon unload and suspend. Afterwards we then
4602 * clean up the GEM state tracking, flushing off the requests and
4603 * leaving the system in a known idle state.
4604 *
4605 * Note that is of the upmost importance that the GPU is idle and
4606 * all stray writes are flushed *before* we dismantle the backing
4607 * storage for the pinned objects.
4608 *
4609 * However, since we are uncertain that resetting the GPU on older
4610 * machines is a good idea, we don't - just in case it leaves the
4611 * machine in an unusable condition.
4612 */
24145517 4613 i915_gem_sanitize(dev_priv);
c998e8a0 4614 goto out_rpm_put;
1c777c5d 4615
c998e8a0 4616err_unlock:
45c5f202 4617 mutex_unlock(&dev->struct_mutex);
c998e8a0
CW
4618out_rpm_put:
4619 intel_runtime_pm_put(dev_priv);
45c5f202 4620 return ret;
673a394b
EA
4621}
4622
bf9e8429 4623void i915_gem_resume(struct drm_i915_private *dev_priv)
5ab57c70 4624{
bf9e8429 4625 struct drm_device *dev = &dev_priv->drm;
5ab57c70 4626
31ab49ab
ID
4627 WARN_ON(dev_priv->gt.awake);
4628
5ab57c70 4629 mutex_lock(&dev->struct_mutex);
275a991c 4630 i915_gem_restore_gtt_mappings(dev_priv);
5ab57c70
CW
4631
4632 /* As we didn't flush the kernel context before suspend, we cannot
4633 * guarantee that the context image is complete. So let's just reset
4634 * it and start again.
4635 */
821ed7df 4636 dev_priv->gt.resume(dev_priv);
5ab57c70
CW
4637
4638 mutex_unlock(&dev->struct_mutex);
4639}
4640
c6be607a 4641void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
f691e2f4 4642{
c6be607a 4643 if (INTEL_GEN(dev_priv) < 5 ||
f691e2f4
DV
4644 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4645 return;
4646
4647 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4648 DISP_TILE_SURFACE_SWIZZLING);
4649
5db94019 4650 if (IS_GEN5(dev_priv))
11782b02
DV
4651 return;
4652
f691e2f4 4653 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
5db94019 4654 if (IS_GEN6(dev_priv))
6b26c86d 4655 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
5db94019 4656 else if (IS_GEN7(dev_priv))
6b26c86d 4657 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
5db94019 4658 else if (IS_GEN8(dev_priv))
31a5336e 4659 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
8782e26c
BW
4660 else
4661 BUG();
f691e2f4 4662}
e21af88d 4663
50a0bc90 4664static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
81e7f200 4665{
81e7f200
VS
4666 I915_WRITE(RING_CTL(base), 0);
4667 I915_WRITE(RING_HEAD(base), 0);
4668 I915_WRITE(RING_TAIL(base), 0);
4669 I915_WRITE(RING_START(base), 0);
4670}
4671
50a0bc90 4672static void init_unused_rings(struct drm_i915_private *dev_priv)
81e7f200 4673{
50a0bc90
TU
4674 if (IS_I830(dev_priv)) {
4675 init_unused_ring(dev_priv, PRB1_BASE);
4676 init_unused_ring(dev_priv, SRB0_BASE);
4677 init_unused_ring(dev_priv, SRB1_BASE);
4678 init_unused_ring(dev_priv, SRB2_BASE);
4679 init_unused_ring(dev_priv, SRB3_BASE);
4680 } else if (IS_GEN2(dev_priv)) {
4681 init_unused_ring(dev_priv, SRB0_BASE);
4682 init_unused_ring(dev_priv, SRB1_BASE);
4683 } else if (IS_GEN3(dev_priv)) {
4684 init_unused_ring(dev_priv, PRB1_BASE);
4685 init_unused_ring(dev_priv, PRB2_BASE);
81e7f200
VS
4686 }
4687}
4688
20a8a74a 4689static int __i915_gem_restart_engines(void *data)
4fc7c971 4690{
20a8a74a 4691 struct drm_i915_private *i915 = data;
e2f80391 4692 struct intel_engine_cs *engine;
3b3f1650 4693 enum intel_engine_id id;
20a8a74a
CW
4694 int err;
4695
4696 for_each_engine(engine, i915, id) {
4697 err = engine->init_hw(engine);
4698 if (err)
4699 return err;
4700 }
4701
4702 return 0;
4703}
4704
4705int i915_gem_init_hw(struct drm_i915_private *dev_priv)
4706{
d200cda6 4707 int ret;
4fc7c971 4708
de867c20
CW
4709 dev_priv->gt.last_init_time = ktime_get();
4710
5e4f5189
CW
4711 /* Double layer security blanket, see i915_gem_init() */
4712 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4713
0031fb96 4714 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
05e21cc4 4715 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4fc7c971 4716
772c2a51 4717 if (IS_HASWELL(dev_priv))
50a0bc90 4718 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
0bf21347 4719 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
9435373e 4720
6e266956 4721 if (HAS_PCH_NOP(dev_priv)) {
fd6b8f43 4722 if (IS_IVYBRIDGE(dev_priv)) {
6ba844b0
DV
4723 u32 temp = I915_READ(GEN7_MSG_CTL);
4724 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4725 I915_WRITE(GEN7_MSG_CTL, temp);
c6be607a 4726 } else if (INTEL_GEN(dev_priv) >= 7) {
6ba844b0
DV
4727 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4728 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4729 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4730 }
88a2b2a3
BW
4731 }
4732
c6be607a 4733 i915_gem_init_swizzling(dev_priv);
4fc7c971 4734
d5abdfda
DV
4735 /*
4736 * At least 830 can leave some of the unused rings
4737 * "active" (ie. head != tail) after resume which
4738 * will prevent c3 entry. Makes sure all unused rings
4739 * are totally idle.
4740 */
50a0bc90 4741 init_unused_rings(dev_priv);
d5abdfda 4742
ed54c1a1 4743 BUG_ON(!dev_priv->kernel_context);
90638cc1 4744
c6be607a 4745 ret = i915_ppgtt_init_hw(dev_priv);
4ad2fd88
JH
4746 if (ret) {
4747 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4748 goto out;
4749 }
4750
4751 /* Need to do basic initialisation of all rings first: */
20a8a74a
CW
4752 ret = __i915_gem_restart_engines(dev_priv);
4753 if (ret)
4754 goto out;
99433931 4755
bf9e8429 4756 intel_mocs_init_l3cc_table(dev_priv);
0ccdacf6 4757
b8991403
OM
4758 /* We can't enable contexts until all firmware is loaded */
4759 ret = intel_uc_init_hw(dev_priv);
4760 if (ret)
4761 goto out;
33a732f4 4762
5e4f5189
CW
4763out:
4764 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2fa48d8d 4765 return ret;
8187a2b7
ZN
4766}
4767
39df9190
CW
4768bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4769{
4770 if (INTEL_INFO(dev_priv)->gen < 6)
4771 return false;
4772
4773 /* TODO: make semaphores and Execlists play nicely together */
4774 if (i915.enable_execlists)
4775 return false;
4776
4777 if (value >= 0)
4778 return value;
4779
39df9190 4780 /* Enable semaphores on SNB when IO remapping is off */
80debff8 4781 if (IS_GEN6(dev_priv) && intel_vtd_active())
39df9190 4782 return false;
39df9190
CW
4783
4784 return true;
4785}
4786
bf9e8429 4787int i915_gem_init(struct drm_i915_private *dev_priv)
1070a42b 4788{
1070a42b
CW
4789 int ret;
4790
bf9e8429 4791 mutex_lock(&dev_priv->drm.struct_mutex);
d62b4892 4792
94312828 4793 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
57822dc6 4794
a83014d3 4795 if (!i915.enable_execlists) {
821ed7df 4796 dev_priv->gt.resume = intel_legacy_submission_resume;
7e37f889 4797 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
454afebd 4798 } else {
821ed7df 4799 dev_priv->gt.resume = intel_lr_context_resume;
117897f4 4800 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
a83014d3
OM
4801 }
4802
5e4f5189
CW
4803 /* This is just a security blanket to placate dragons.
4804 * On some systems, we very sporadically observe that the first TLBs
4805 * used by the CS may be stale, despite us poking the TLB reset. If
4806 * we hold the forcewake during initialisation these problems
4807 * just magically go away.
4808 */
4809 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4810
8a2421bd
CW
4811 ret = i915_gem_init_userptr(dev_priv);
4812 if (ret)
4813 goto out_unlock;
f6b9d5ca
CW
4814
4815 ret = i915_gem_init_ggtt(dev_priv);
4816 if (ret)
4817 goto out_unlock;
d62b4892 4818
829a0af2 4819 ret = i915_gem_contexts_init(dev_priv);
7bcc3777
JN
4820 if (ret)
4821 goto out_unlock;
2fa48d8d 4822
bf9e8429 4823 ret = intel_engines_init(dev_priv);
35a57ffb 4824 if (ret)
7bcc3777 4825 goto out_unlock;
2fa48d8d 4826
bf9e8429 4827 ret = i915_gem_init_hw(dev_priv);
60990320 4828 if (ret == -EIO) {
7e21d648 4829 /* Allow engine initialisation to fail by marking the GPU as
60990320
CW
4830 * wedged. But we only want to do this where the GPU is angry,
4831 * for all other failure, such as an allocation failure, bail.
4832 */
4833 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
821ed7df 4834 i915_gem_set_wedged(dev_priv);
60990320 4835 ret = 0;
1070a42b 4836 }
7bcc3777
JN
4837
4838out_unlock:
5e4f5189 4839 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
bf9e8429 4840 mutex_unlock(&dev_priv->drm.struct_mutex);
1070a42b 4841
60990320 4842 return ret;
1070a42b
CW
4843}
4844
24145517
CW
4845void i915_gem_init_mmio(struct drm_i915_private *i915)
4846{
4847 i915_gem_sanitize(i915);
4848}
4849
8187a2b7 4850void
cb15d9f8 4851i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
8187a2b7 4852{
e2f80391 4853 struct intel_engine_cs *engine;
3b3f1650 4854 enum intel_engine_id id;
8187a2b7 4855
3b3f1650 4856 for_each_engine(engine, dev_priv, id)
117897f4 4857 dev_priv->gt.cleanup_engine(engine);
8187a2b7
ZN
4858}
4859
40ae4e16
ID
4860void
4861i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4862{
49ef5294 4863 int i;
40ae4e16
ID
4864
4865 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4866 !IS_CHERRYVIEW(dev_priv))
4867 dev_priv->num_fence_regs = 32;
73f67aa8
JN
4868 else if (INTEL_INFO(dev_priv)->gen >= 4 ||
4869 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
4870 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
40ae4e16
ID
4871 dev_priv->num_fence_regs = 16;
4872 else
4873 dev_priv->num_fence_regs = 8;
4874
c033666a 4875 if (intel_vgpu_active(dev_priv))
40ae4e16
ID
4876 dev_priv->num_fence_regs =
4877 I915_READ(vgtif_reg(avail_rs.fence_num));
4878
4879 /* Initialize fence registers to zero */
49ef5294
CW
4880 for (i = 0; i < dev_priv->num_fence_regs; i++) {
4881 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
4882
4883 fence->i915 = dev_priv;
4884 fence->id = i;
4885 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
4886 }
4362f4f6 4887 i915_gem_restore_fences(dev_priv);
40ae4e16 4888
4362f4f6 4889 i915_gem_detect_bit_6_swizzle(dev_priv);
40ae4e16
ID
4890}
4891
73cb9701 4892int
cb15d9f8 4893i915_gem_load_init(struct drm_i915_private *dev_priv)
673a394b 4894{
a933568e 4895 int err = -ENOMEM;
42dcedd4 4896
a933568e
TU
4897 dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
4898 if (!dev_priv->objects)
73cb9701 4899 goto err_out;
73cb9701 4900
a933568e
TU
4901 dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
4902 if (!dev_priv->vmas)
73cb9701 4903 goto err_objects;
73cb9701 4904
d1b48c1e
CW
4905 dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
4906 if (!dev_priv->luts)
4907 goto err_vmas;
4908
a933568e
TU
4909 dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
4910 SLAB_HWCACHE_ALIGN |
4911 SLAB_RECLAIM_ACCOUNT |
5f0d5a3a 4912 SLAB_TYPESAFE_BY_RCU);
a933568e 4913 if (!dev_priv->requests)
d1b48c1e 4914 goto err_luts;
73cb9701 4915
52e54209
CW
4916 dev_priv->dependencies = KMEM_CACHE(i915_dependency,
4917 SLAB_HWCACHE_ALIGN |
4918 SLAB_RECLAIM_ACCOUNT);
4919 if (!dev_priv->dependencies)
4920 goto err_requests;
4921
c5cf9a91
CW
4922 dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
4923 if (!dev_priv->priorities)
4924 goto err_dependencies;
4925
73cb9701
CW
4926 mutex_lock(&dev_priv->drm.struct_mutex);
4927 INIT_LIST_HEAD(&dev_priv->gt.timelines);
bb89485e 4928 err = i915_gem_timeline_init__global(dev_priv);
73cb9701
CW
4929 mutex_unlock(&dev_priv->drm.struct_mutex);
4930 if (err)
c5cf9a91 4931 goto err_priorities;
673a394b 4932
fbbd37b3
CW
4933 INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
4934 init_llist_head(&dev_priv->mm.free_list);
6c085a72
CW
4935 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4936 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
a09ba7fa 4937 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
275f039d 4938 INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
67d97da3 4939 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
673a394b 4940 i915_gem_retire_work_handler);
67d97da3 4941 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
b29c19b6 4942 i915_gem_idle_work_handler);
1f15b76f 4943 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
1f83fee0 4944 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
31169714 4945
6f633402
JL
4946 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
4947
b5add959 4948 spin_lock_init(&dev_priv->fb_tracking.lock);
73cb9701
CW
4949
4950 return 0;
4951
c5cf9a91
CW
4952err_priorities:
4953 kmem_cache_destroy(dev_priv->priorities);
52e54209
CW
4954err_dependencies:
4955 kmem_cache_destroy(dev_priv->dependencies);
73cb9701
CW
4956err_requests:
4957 kmem_cache_destroy(dev_priv->requests);
d1b48c1e
CW
4958err_luts:
4959 kmem_cache_destroy(dev_priv->luts);
73cb9701
CW
4960err_vmas:
4961 kmem_cache_destroy(dev_priv->vmas);
4962err_objects:
4963 kmem_cache_destroy(dev_priv->objects);
4964err_out:
4965 return err;
673a394b 4966}
71acb5eb 4967
cb15d9f8 4968void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
d64aa096 4969{
c4d4c1c6 4970 i915_gem_drain_freed_objects(dev_priv);
7d5d59e5 4971 WARN_ON(!llist_empty(&dev_priv->mm.free_list));
c4d4c1c6 4972 WARN_ON(dev_priv->mm.object_count);
7d5d59e5 4973
ea84aa77
MA
4974 mutex_lock(&dev_priv->drm.struct_mutex);
4975 i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
4976 WARN_ON(!list_empty(&dev_priv->gt.timelines));
4977 mutex_unlock(&dev_priv->drm.struct_mutex);
4978
c5cf9a91 4979 kmem_cache_destroy(dev_priv->priorities);
52e54209 4980 kmem_cache_destroy(dev_priv->dependencies);
d64aa096 4981 kmem_cache_destroy(dev_priv->requests);
d1b48c1e 4982 kmem_cache_destroy(dev_priv->luts);
d64aa096
ID
4983 kmem_cache_destroy(dev_priv->vmas);
4984 kmem_cache_destroy(dev_priv->objects);
0eafec6d
CW
4985
4986 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
4987 rcu_barrier();
d64aa096
ID
4988}
4989
6a800eab
CW
4990int i915_gem_freeze(struct drm_i915_private *dev_priv)
4991{
d0aa301a
CW
4992 /* Discard all purgeable objects, let userspace recover those as
4993 * required after resuming.
4994 */
6a800eab 4995 i915_gem_shrink_all(dev_priv);
6a800eab 4996
6a800eab
CW
4997 return 0;
4998}
4999
461fb99c
CW
5000int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
5001{
5002 struct drm_i915_gem_object *obj;
7aab2d53
CW
5003 struct list_head *phases[] = {
5004 &dev_priv->mm.unbound_list,
5005 &dev_priv->mm.bound_list,
5006 NULL
5007 }, **p;
461fb99c
CW
5008
5009 /* Called just before we write the hibernation image.
5010 *
5011 * We need to update the domain tracking to reflect that the CPU
5012 * will be accessing all the pages to create and restore from the
5013 * hibernation, and so upon restoration those pages will be in the
5014 * CPU domain.
5015 *
5016 * To make sure the hibernation image contains the latest state,
5017 * we update that state just before writing out the image.
7aab2d53
CW
5018 *
5019 * To try and reduce the hibernation image, we manually shrink
d0aa301a 5020 * the objects as well, see i915_gem_freeze()
461fb99c
CW
5021 */
5022
6a800eab 5023 i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
17b93c40 5024 i915_gem_drain_freed_objects(dev_priv);
461fb99c 5025
d0aa301a 5026 mutex_lock(&dev_priv->drm.struct_mutex);
7aab2d53 5027 for (p = phases; *p; p++) {
e27ab73d
CW
5028 list_for_each_entry(obj, *p, global_link)
5029 __start_cpu_write(obj);
461fb99c 5030 }
6a800eab 5031 mutex_unlock(&dev_priv->drm.struct_mutex);
461fb99c
CW
5032
5033 return 0;
5034}
5035
f787a5f5 5036void i915_gem_release(struct drm_device *dev, struct drm_file *file)
b962442e 5037{
f787a5f5 5038 struct drm_i915_file_private *file_priv = file->driver_priv;
15f7bbc7 5039 struct drm_i915_gem_request *request;
b962442e
EA
5040
5041 /* Clean up our request list when the client is going away, so that
5042 * later retire_requests won't dereference our soon-to-be-gone
5043 * file_priv.
5044 */
1c25595f 5045 spin_lock(&file_priv->mm.lock);
c8659efa 5046 list_for_each_entry(request, &file_priv->mm.request_list, client_link)
f787a5f5 5047 request->file_priv = NULL;
1c25595f 5048 spin_unlock(&file_priv->mm.lock);
b29c19b6
CW
5049}
5050
829a0af2 5051int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
b29c19b6
CW
5052{
5053 struct drm_i915_file_private *file_priv;
e422b888 5054 int ret;
b29c19b6 5055
c4c29d7b 5056 DRM_DEBUG("\n");
b29c19b6
CW
5057
5058 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5059 if (!file_priv)
5060 return -ENOMEM;
5061
5062 file->driver_priv = file_priv;
829a0af2 5063 file_priv->dev_priv = i915;
ab0e7ff9 5064 file_priv->file = file;
b29c19b6
CW
5065
5066 spin_lock_init(&file_priv->mm.lock);
5067 INIT_LIST_HEAD(&file_priv->mm.request_list);
b29c19b6 5068
c80ff16e 5069 file_priv->bsd_engine = -1;
de1add36 5070
829a0af2 5071 ret = i915_gem_context_open(i915, file);
e422b888
BW
5072 if (ret)
5073 kfree(file_priv);
b29c19b6 5074
e422b888 5075 return ret;
b29c19b6
CW
5076}
5077
b680c37a
DV
5078/**
5079 * i915_gem_track_fb - update frontbuffer tracking
d9072a3e
GT
5080 * @old: current GEM buffer for the frontbuffer slots
5081 * @new: new GEM buffer for the frontbuffer slots
5082 * @frontbuffer_bits: bitmask of frontbuffer slots
b680c37a
DV
5083 *
5084 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5085 * from @old and setting them in @new. Both @old and @new can be NULL.
5086 */
a071fa00
DV
5087void i915_gem_track_fb(struct drm_i915_gem_object *old,
5088 struct drm_i915_gem_object *new,
5089 unsigned frontbuffer_bits)
5090{
faf5bf0a
CW
5091 /* Control of individual bits within the mask are guarded by
5092 * the owning plane->mutex, i.e. we can never see concurrent
5093 * manipulation of individual bits. But since the bitfield as a whole
5094 * is updated using RMW, we need to use atomics in order to update
5095 * the bits.
5096 */
5097 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
5098 sizeof(atomic_t) * BITS_PER_BYTE);
5099
a071fa00 5100 if (old) {
faf5bf0a
CW
5101 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
5102 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
a071fa00
DV
5103 }
5104
5105 if (new) {
faf5bf0a
CW
5106 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
5107 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
a071fa00
DV
5108 }
5109}
5110
ea70299d
DG
5111/* Allocate a new GEM object and fill it with the supplied data */
5112struct drm_i915_gem_object *
12d79d78 5113i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
ea70299d
DG
5114 const void *data, size_t size)
5115{
5116 struct drm_i915_gem_object *obj;
be062fa4
CW
5117 struct file *file;
5118 size_t offset;
5119 int err;
ea70299d 5120
12d79d78 5121 obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
fe3db79b 5122 if (IS_ERR(obj))
ea70299d
DG
5123 return obj;
5124
ce8ff099 5125 GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
ea70299d 5126
be062fa4
CW
5127 file = obj->base.filp;
5128 offset = 0;
5129 do {
5130 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
5131 struct page *page;
5132 void *pgdata, *vaddr;
ea70299d 5133
be062fa4
CW
5134 err = pagecache_write_begin(file, file->f_mapping,
5135 offset, len, 0,
5136 &page, &pgdata);
5137 if (err < 0)
5138 goto fail;
ea70299d 5139
be062fa4
CW
5140 vaddr = kmap(page);
5141 memcpy(vaddr, data, len);
5142 kunmap(page);
5143
5144 err = pagecache_write_end(file, file->f_mapping,
5145 offset, len, len,
5146 page, pgdata);
5147 if (err < 0)
5148 goto fail;
5149
5150 size -= len;
5151 data += len;
5152 offset += len;
5153 } while (size);
ea70299d
DG
5154
5155 return obj;
5156
5157fail:
f8c417cd 5158 i915_gem_object_put(obj);
be062fa4 5159 return ERR_PTR(err);
ea70299d 5160}
96d77634
CW
5161
5162struct scatterlist *
5163i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
5164 unsigned int n,
5165 unsigned int *offset)
5166{
a4f5ea64 5167 struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
96d77634
CW
5168 struct scatterlist *sg;
5169 unsigned int idx, count;
5170
5171 might_sleep();
5172 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
a4f5ea64 5173 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
96d77634
CW
5174
5175 /* As we iterate forward through the sg, we record each entry in a
5176 * radixtree for quick repeated (backwards) lookups. If we have seen
5177 * this index previously, we will have an entry for it.
5178 *
5179 * Initial lookup is O(N), but this is amortized to O(1) for
5180 * sequential page access (where each new request is consecutive
5181 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
5182 * i.e. O(1) with a large constant!
5183 */
5184 if (n < READ_ONCE(iter->sg_idx))
5185 goto lookup;
5186
5187 mutex_lock(&iter->lock);
5188
5189 /* We prefer to reuse the last sg so that repeated lookup of this
5190 * (or the subsequent) sg are fast - comparing against the last
5191 * sg is faster than going through the radixtree.
5192 */
5193
5194 sg = iter->sg_pos;
5195 idx = iter->sg_idx;
5196 count = __sg_page_count(sg);
5197
5198 while (idx + count <= n) {
5199 unsigned long exception, i;
5200 int ret;
5201
5202 /* If we cannot allocate and insert this entry, or the
5203 * individual pages from this range, cancel updating the
5204 * sg_idx so that on this lookup we are forced to linearly
5205 * scan onwards, but on future lookups we will try the
5206 * insertion again (in which case we need to be careful of
5207 * the error return reporting that we have already inserted
5208 * this index).
5209 */
5210 ret = radix_tree_insert(&iter->radix, idx, sg);
5211 if (ret && ret != -EEXIST)
5212 goto scan;
5213
5214 exception =
5215 RADIX_TREE_EXCEPTIONAL_ENTRY |
5216 idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
5217 for (i = 1; i < count; i++) {
5218 ret = radix_tree_insert(&iter->radix, idx + i,
5219 (void *)exception);
5220 if (ret && ret != -EEXIST)
5221 goto scan;
5222 }
5223
5224 idx += count;
5225 sg = ____sg_next(sg);
5226 count = __sg_page_count(sg);
5227 }
5228
5229scan:
5230 iter->sg_pos = sg;
5231 iter->sg_idx = idx;
5232
5233 mutex_unlock(&iter->lock);
5234
5235 if (unlikely(n < idx)) /* insertion completed by another thread */
5236 goto lookup;
5237
5238 /* In case we failed to insert the entry into the radixtree, we need
5239 * to look beyond the current sg.
5240 */
5241 while (idx + count <= n) {
5242 idx += count;
5243 sg = ____sg_next(sg);
5244 count = __sg_page_count(sg);
5245 }
5246
5247 *offset = n - idx;
5248 return sg;
5249
5250lookup:
5251 rcu_read_lock();
5252
5253 sg = radix_tree_lookup(&iter->radix, n);
5254 GEM_BUG_ON(!sg);
5255
5256 /* If this index is in the middle of multi-page sg entry,
5257 * the radixtree will contain an exceptional entry that points
5258 * to the start of that range. We will return the pointer to
5259 * the base page and the offset of this page within the
5260 * sg entry's range.
5261 */
5262 *offset = 0;
5263 if (unlikely(radix_tree_exception(sg))) {
5264 unsigned long base =
5265 (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
5266
5267 sg = radix_tree_lookup(&iter->radix, base);
5268 GEM_BUG_ON(!sg);
5269
5270 *offset = n - base;
5271 }
5272
5273 rcu_read_unlock();
5274
5275 return sg;
5276}
5277
5278struct page *
5279i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
5280{
5281 struct scatterlist *sg;
5282 unsigned int offset;
5283
5284 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
5285
5286 sg = i915_gem_object_get_sg(obj, n, &offset);
5287 return nth_page(sg_page(sg), offset);
5288}
5289
5290/* Like i915_gem_object_get_page(), but mark the returned page dirty */
5291struct page *
5292i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
5293 unsigned int n)
5294{
5295 struct page *page;
5296
5297 page = i915_gem_object_get_page(obj, n);
a4f5ea64 5298 if (!obj->mm.dirty)
96d77634
CW
5299 set_page_dirty(page);
5300
5301 return page;
5302}
5303
5304dma_addr_t
5305i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
5306 unsigned long n)
5307{
5308 struct scatterlist *sg;
5309 unsigned int offset;
5310
5311 sg = i915_gem_object_get_sg(obj, n, &offset);
5312 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
5313}
935a2f77 5314
8eeb7906
CW
5315int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
5316{
5317 struct sg_table *pages;
5318 int err;
5319
5320 if (align > obj->base.size)
5321 return -EINVAL;
5322
5323 if (obj->ops == &i915_gem_phys_ops)
5324 return 0;
5325
5326 if (obj->ops != &i915_gem_object_ops)
5327 return -EINVAL;
5328
5329 err = i915_gem_object_unbind(obj);
5330 if (err)
5331 return err;
5332
5333 mutex_lock(&obj->mm.lock);
5334
5335 if (obj->mm.madv != I915_MADV_WILLNEED) {
5336 err = -EFAULT;
5337 goto err_unlock;
5338 }
5339
5340 if (obj->mm.quirked) {
5341 err = -EFAULT;
5342 goto err_unlock;
5343 }
5344
5345 if (obj->mm.mapping) {
5346 err = -EBUSY;
5347 goto err_unlock;
5348 }
5349
5350 pages = obj->mm.pages;
5351 obj->ops = &i915_gem_phys_ops;
5352
8fb6a5df 5353 err = ____i915_gem_object_get_pages(obj);
8eeb7906
CW
5354 if (err)
5355 goto err_xfer;
5356
5357 /* Perma-pin (until release) the physical set of pages */
5358 __i915_gem_object_pin_pages(obj);
5359
5360 if (!IS_ERR_OR_NULL(pages))
5361 i915_gem_object_ops.put_pages(obj, pages);
5362 mutex_unlock(&obj->mm.lock);
5363 return 0;
5364
5365err_xfer:
5366 obj->ops = &i915_gem_object_ops;
5367 obj->mm.pages = pages;
5368err_unlock:
5369 mutex_unlock(&obj->mm.lock);
5370 return err;
5371}
5372
935a2f77
CW
5373#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5374#include "selftests/scatterlist.c"
66d9cb5d 5375#include "selftests/mock_gem_device.c"
44653988 5376#include "selftests/huge_gem_object.c"
8335fd65 5377#include "selftests/i915_gem_object.c"
17059450 5378#include "selftests/i915_gem_coherency.c"
935a2f77 5379#endif