]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Rely on accurate request tracking for finding hung batches
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
CommitLineData
673a394b
EA
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
760285e7 28#include <drm/drmP.h>
0de23977 29#include <drm/drm_vma_manager.h>
760285e7 30#include <drm/i915_drm.h>
673a394b 31#include "i915_drv.h"
1c5d22f7 32#include "i915_trace.h"
652c393a 33#include "intel_drv.h"
5949eac4 34#include <linux/shmem_fs.h>
5a0e3ad6 35#include <linux/slab.h>
673a394b 36#include <linux/swap.h>
79e53945 37#include <linux/pci.h>
1286ff73 38#include <linux/dma-buf.h>
673a394b 39
05394f39 40static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
2c22569b
CW
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42 bool force);
07fe0b12 43static __must_check int
23f54483
BW
44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly);
05394f39
CW
46static int i915_gem_phys_pwrite(struct drm_device *dev,
47 struct drm_i915_gem_object *obj,
71acb5eb 48 struct drm_i915_gem_pwrite *args,
05394f39 49 struct drm_file *file);
673a394b 50
61050808
CW
51static void i915_gem_write_fence(struct drm_device *dev, int reg,
52 struct drm_i915_gem_object *obj);
53static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
54 struct drm_i915_fence_reg *fence,
55 bool enable);
56
7dc19d5a
DC
57static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
58 struct shrink_control *sc);
59static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
60 struct shrink_control *sc);
d9973b43
CW
61static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
62static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
8c59967c 63static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
31169714 64
c76ce038
CW
65static bool cpu_cache_is_coherent(struct drm_device *dev,
66 enum i915_cache_level level)
67{
68 return HAS_LLC(dev) || level != I915_CACHE_NONE;
69}
70
2c22569b
CW
71static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
72{
73 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
74 return true;
75
76 return obj->pin_display;
77}
78
61050808
CW
79static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
80{
81 if (obj->tiling_mode)
82 i915_gem_release_mmap(obj);
83
84 /* As we do not have an associated fence register, we will force
85 * a tiling change if we ever need to acquire one.
86 */
5d82e3e6 87 obj->fence_dirty = false;
61050808
CW
88 obj->fence_reg = I915_FENCE_REG_NONE;
89}
90
73aa808f
CW
91/* some bookkeeping */
92static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
93 size_t size)
94{
c20e8355 95 spin_lock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
96 dev_priv->mm.object_count++;
97 dev_priv->mm.object_memory += size;
c20e8355 98 spin_unlock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
99}
100
101static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
102 size_t size)
103{
c20e8355 104 spin_lock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
105 dev_priv->mm.object_count--;
106 dev_priv->mm.object_memory -= size;
c20e8355 107 spin_unlock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
108}
109
21dd3734 110static int
33196ded 111i915_gem_wait_for_error(struct i915_gpu_error *error)
30dbf0c0 112{
30dbf0c0
CW
113 int ret;
114
7abb690a
DV
115#define EXIT_COND (!i915_reset_in_progress(error) || \
116 i915_terminally_wedged(error))
1f83fee0 117 if (EXIT_COND)
30dbf0c0
CW
118 return 0;
119
0a6759c6
DV
120 /*
121 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
122 * userspace. If it takes that long something really bad is going on and
123 * we should simply try to bail out and fail as gracefully as possible.
124 */
1f83fee0
DV
125 ret = wait_event_interruptible_timeout(error->reset_queue,
126 EXIT_COND,
127 10*HZ);
0a6759c6
DV
128 if (ret == 0) {
129 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
130 return -EIO;
131 } else if (ret < 0) {
30dbf0c0 132 return ret;
0a6759c6 133 }
1f83fee0 134#undef EXIT_COND
30dbf0c0 135
21dd3734 136 return 0;
30dbf0c0
CW
137}
138
54cf91dc 139int i915_mutex_lock_interruptible(struct drm_device *dev)
76c1dec1 140{
33196ded 141 struct drm_i915_private *dev_priv = dev->dev_private;
76c1dec1
CW
142 int ret;
143
33196ded 144 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
76c1dec1
CW
145 if (ret)
146 return ret;
147
148 ret = mutex_lock_interruptible(&dev->struct_mutex);
149 if (ret)
150 return ret;
151
23bc5982 152 WARN_ON(i915_verify_lists(dev));
76c1dec1
CW
153 return 0;
154}
30dbf0c0 155
7d1c4804 156static inline bool
05394f39 157i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
7d1c4804 158{
9843877d 159 return i915_gem_obj_bound_any(obj) && !obj->active;
7d1c4804
CW
160}
161
79e53945
JB
162int
163i915_gem_init_ioctl(struct drm_device *dev, void *data,
05394f39 164 struct drm_file *file)
79e53945 165{
93d18799 166 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945 167 struct drm_i915_gem_init *args = data;
2021746e 168
7bb6fb8d
DV
169 if (drm_core_check_feature(dev, DRIVER_MODESET))
170 return -ENODEV;
171
2021746e
CW
172 if (args->gtt_start >= args->gtt_end ||
173 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
174 return -EINVAL;
79e53945 175
f534bc0b
DV
176 /* GEM with user mode setting was never supported on ilk and later. */
177 if (INTEL_INFO(dev)->gen >= 5)
178 return -ENODEV;
179
79e53945 180 mutex_lock(&dev->struct_mutex);
d7e5008f
BW
181 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
182 args->gtt_end);
93d18799 183 dev_priv->gtt.mappable_end = args->gtt_end;
673a394b
EA
184 mutex_unlock(&dev->struct_mutex);
185
2021746e 186 return 0;
673a394b
EA
187}
188
5a125c3c
EA
189int
190i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
05394f39 191 struct drm_file *file)
5a125c3c 192{
73aa808f 193 struct drm_i915_private *dev_priv = dev->dev_private;
5a125c3c 194 struct drm_i915_gem_get_aperture *args = data;
6299f992
CW
195 struct drm_i915_gem_object *obj;
196 size_t pinned;
5a125c3c 197
6299f992 198 pinned = 0;
73aa808f 199 mutex_lock(&dev->struct_mutex);
35c20a60 200 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
d7f46fc4 201 if (i915_gem_obj_is_pinned(obj))
f343c5f6 202 pinned += i915_gem_obj_ggtt_size(obj);
73aa808f 203 mutex_unlock(&dev->struct_mutex);
5a125c3c 204
853ba5d2 205 args->aper_size = dev_priv->gtt.base.total;
0206e353 206 args->aper_available_size = args->aper_size - pinned;
6299f992 207
5a125c3c
EA
208 return 0;
209}
210
42dcedd4
CW
211void *i915_gem_object_alloc(struct drm_device *dev)
212{
213 struct drm_i915_private *dev_priv = dev->dev_private;
fac15c10 214 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
42dcedd4
CW
215}
216
217void i915_gem_object_free(struct drm_i915_gem_object *obj)
218{
219 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
220 kmem_cache_free(dev_priv->slab, obj);
221}
222
ff72145b
DA
223static int
224i915_gem_create(struct drm_file *file,
225 struct drm_device *dev,
226 uint64_t size,
227 uint32_t *handle_p)
673a394b 228{
05394f39 229 struct drm_i915_gem_object *obj;
a1a2d1d3
PP
230 int ret;
231 u32 handle;
673a394b 232
ff72145b 233 size = roundup(size, PAGE_SIZE);
8ffc0246
CW
234 if (size == 0)
235 return -EINVAL;
673a394b
EA
236
237 /* Allocate the new object */
ff72145b 238 obj = i915_gem_alloc_object(dev, size);
673a394b
EA
239 if (obj == NULL)
240 return -ENOMEM;
241
05394f39 242 ret = drm_gem_handle_create(file, &obj->base, &handle);
202f2fef 243 /* drop reference from allocate - handle holds it now */
d861e338
DV
244 drm_gem_object_unreference_unlocked(&obj->base);
245 if (ret)
246 return ret;
202f2fef 247
ff72145b 248 *handle_p = handle;
673a394b
EA
249 return 0;
250}
251
ff72145b
DA
252int
253i915_gem_dumb_create(struct drm_file *file,
254 struct drm_device *dev,
255 struct drm_mode_create_dumb *args)
256{
257 /* have to work out size/pitch and return them */
de45eaf7 258 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
ff72145b
DA
259 args->size = args->pitch * args->height;
260 return i915_gem_create(file, dev,
261 args->size, &args->handle);
262}
263
ff72145b
DA
264/**
265 * Creates a new mm object and returns a handle to it.
266 */
267int
268i915_gem_create_ioctl(struct drm_device *dev, void *data,
269 struct drm_file *file)
270{
271 struct drm_i915_gem_create *args = data;
63ed2cb2 272
ff72145b
DA
273 return i915_gem_create(file, dev,
274 args->size, &args->handle);
275}
276
8461d226
DV
277static inline int
278__copy_to_user_swizzled(char __user *cpu_vaddr,
279 const char *gpu_vaddr, int gpu_offset,
280 int length)
281{
282 int ret, cpu_offset = 0;
283
284 while (length > 0) {
285 int cacheline_end = ALIGN(gpu_offset + 1, 64);
286 int this_length = min(cacheline_end - gpu_offset, length);
287 int swizzled_gpu_offset = gpu_offset ^ 64;
288
289 ret = __copy_to_user(cpu_vaddr + cpu_offset,
290 gpu_vaddr + swizzled_gpu_offset,
291 this_length);
292 if (ret)
293 return ret + length;
294
295 cpu_offset += this_length;
296 gpu_offset += this_length;
297 length -= this_length;
298 }
299
300 return 0;
301}
302
8c59967c 303static inline int
4f0c7cfb
BW
304__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
305 const char __user *cpu_vaddr,
8c59967c
DV
306 int length)
307{
308 int ret, cpu_offset = 0;
309
310 while (length > 0) {
311 int cacheline_end = ALIGN(gpu_offset + 1, 64);
312 int this_length = min(cacheline_end - gpu_offset, length);
313 int swizzled_gpu_offset = gpu_offset ^ 64;
314
315 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
316 cpu_vaddr + cpu_offset,
317 this_length);
318 if (ret)
319 return ret + length;
320
321 cpu_offset += this_length;
322 gpu_offset += this_length;
323 length -= this_length;
324 }
325
326 return 0;
327}
328
d174bd64
DV
329/* Per-page copy function for the shmem pread fastpath.
330 * Flushes invalid cachelines before reading the target if
331 * needs_clflush is set. */
eb01459f 332static int
d174bd64
DV
333shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
334 char __user *user_data,
335 bool page_do_bit17_swizzling, bool needs_clflush)
336{
337 char *vaddr;
338 int ret;
339
e7e58eb5 340 if (unlikely(page_do_bit17_swizzling))
d174bd64
DV
341 return -EINVAL;
342
343 vaddr = kmap_atomic(page);
344 if (needs_clflush)
345 drm_clflush_virt_range(vaddr + shmem_page_offset,
346 page_length);
347 ret = __copy_to_user_inatomic(user_data,
348 vaddr + shmem_page_offset,
349 page_length);
350 kunmap_atomic(vaddr);
351
f60d7f0c 352 return ret ? -EFAULT : 0;
d174bd64
DV
353}
354
23c18c71
DV
355static void
356shmem_clflush_swizzled_range(char *addr, unsigned long length,
357 bool swizzled)
358{
e7e58eb5 359 if (unlikely(swizzled)) {
23c18c71
DV
360 unsigned long start = (unsigned long) addr;
361 unsigned long end = (unsigned long) addr + length;
362
363 /* For swizzling simply ensure that we always flush both
364 * channels. Lame, but simple and it works. Swizzled
365 * pwrite/pread is far from a hotpath - current userspace
366 * doesn't use it at all. */
367 start = round_down(start, 128);
368 end = round_up(end, 128);
369
370 drm_clflush_virt_range((void *)start, end - start);
371 } else {
372 drm_clflush_virt_range(addr, length);
373 }
374
375}
376
d174bd64
DV
377/* Only difference to the fast-path function is that this can handle bit17
378 * and uses non-atomic copy and kmap functions. */
379static int
380shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
381 char __user *user_data,
382 bool page_do_bit17_swizzling, bool needs_clflush)
383{
384 char *vaddr;
385 int ret;
386
387 vaddr = kmap(page);
388 if (needs_clflush)
23c18c71
DV
389 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
390 page_length,
391 page_do_bit17_swizzling);
d174bd64
DV
392
393 if (page_do_bit17_swizzling)
394 ret = __copy_to_user_swizzled(user_data,
395 vaddr, shmem_page_offset,
396 page_length);
397 else
398 ret = __copy_to_user(user_data,
399 vaddr + shmem_page_offset,
400 page_length);
401 kunmap(page);
402
f60d7f0c 403 return ret ? - EFAULT : 0;
d174bd64
DV
404}
405
eb01459f 406static int
dbf7bff0
DV
407i915_gem_shmem_pread(struct drm_device *dev,
408 struct drm_i915_gem_object *obj,
409 struct drm_i915_gem_pread *args,
410 struct drm_file *file)
eb01459f 411{
8461d226 412 char __user *user_data;
eb01459f 413 ssize_t remain;
8461d226 414 loff_t offset;
eb2c0c81 415 int shmem_page_offset, page_length, ret = 0;
8461d226 416 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
96d79b52 417 int prefaulted = 0;
8489731c 418 int needs_clflush = 0;
67d5a50c 419 struct sg_page_iter sg_iter;
eb01459f 420
2bb4629a 421 user_data = to_user_ptr(args->data_ptr);
eb01459f
EA
422 remain = args->size;
423
8461d226 424 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
eb01459f 425
8489731c
DV
426 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
427 /* If we're not in the cpu read domain, set ourself into the gtt
428 * read domain and manually flush cachelines (if required). This
429 * optimizes for the case when the gpu will dirty the data
430 * anyway again before the next pread happens. */
c76ce038 431 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
23f54483
BW
432 ret = i915_gem_object_wait_rendering(obj, true);
433 if (ret)
434 return ret;
8489731c 435 }
eb01459f 436
f60d7f0c
CW
437 ret = i915_gem_object_get_pages(obj);
438 if (ret)
439 return ret;
440
441 i915_gem_object_pin_pages(obj);
442
8461d226 443 offset = args->offset;
eb01459f 444
67d5a50c
ID
445 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
446 offset >> PAGE_SHIFT) {
2db76d7c 447 struct page *page = sg_page_iter_page(&sg_iter);
9da3da66
CW
448
449 if (remain <= 0)
450 break;
451
eb01459f
EA
452 /* Operation in this page
453 *
eb01459f 454 * shmem_page_offset = offset within page in shmem file
eb01459f
EA
455 * page_length = bytes to copy for this page
456 */
c8cbbb8b 457 shmem_page_offset = offset_in_page(offset);
eb01459f
EA
458 page_length = remain;
459 if ((shmem_page_offset + page_length) > PAGE_SIZE)
460 page_length = PAGE_SIZE - shmem_page_offset;
eb01459f 461
8461d226
DV
462 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
463 (page_to_phys(page) & (1 << 17)) != 0;
464
d174bd64
DV
465 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
466 user_data, page_do_bit17_swizzling,
467 needs_clflush);
468 if (ret == 0)
469 goto next_page;
dbf7bff0 470
dbf7bff0
DV
471 mutex_unlock(&dev->struct_mutex);
472
d330a953 473 if (likely(!i915.prefault_disable) && !prefaulted) {
f56f821f 474 ret = fault_in_multipages_writeable(user_data, remain);
96d79b52
DV
475 /* Userspace is tricking us, but we've already clobbered
476 * its pages with the prefault and promised to write the
477 * data up to the first fault. Hence ignore any errors
478 * and just continue. */
479 (void)ret;
480 prefaulted = 1;
481 }
eb01459f 482
d174bd64
DV
483 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
484 user_data, page_do_bit17_swizzling,
485 needs_clflush);
eb01459f 486
dbf7bff0 487 mutex_lock(&dev->struct_mutex);
f60d7f0c 488
dbf7bff0 489next_page:
e5281ccd 490 mark_page_accessed(page);
e5281ccd 491
f60d7f0c 492 if (ret)
8461d226 493 goto out;
8461d226 494
eb01459f 495 remain -= page_length;
8461d226 496 user_data += page_length;
eb01459f
EA
497 offset += page_length;
498 }
499
4f27b75d 500out:
f60d7f0c
CW
501 i915_gem_object_unpin_pages(obj);
502
eb01459f
EA
503 return ret;
504}
505
673a394b
EA
506/**
507 * Reads data from the object referenced by handle.
508 *
509 * On error, the contents of *data are undefined.
510 */
511int
512i915_gem_pread_ioctl(struct drm_device *dev, void *data,
05394f39 513 struct drm_file *file)
673a394b
EA
514{
515 struct drm_i915_gem_pread *args = data;
05394f39 516 struct drm_i915_gem_object *obj;
35b62a89 517 int ret = 0;
673a394b 518
51311d0a
CW
519 if (args->size == 0)
520 return 0;
521
522 if (!access_ok(VERIFY_WRITE,
2bb4629a 523 to_user_ptr(args->data_ptr),
51311d0a
CW
524 args->size))
525 return -EFAULT;
526
4f27b75d 527 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 528 if (ret)
4f27b75d 529 return ret;
673a394b 530
05394f39 531 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 532 if (&obj->base == NULL) {
1d7cfea1
CW
533 ret = -ENOENT;
534 goto unlock;
4f27b75d 535 }
673a394b 536
7dcd2499 537 /* Bounds check source. */
05394f39
CW
538 if (args->offset > obj->base.size ||
539 args->size > obj->base.size - args->offset) {
ce9d419d 540 ret = -EINVAL;
35b62a89 541 goto out;
ce9d419d
CW
542 }
543
1286ff73
DV
544 /* prime objects have no backing filp to GEM pread/pwrite
545 * pages from.
546 */
547 if (!obj->base.filp) {
548 ret = -EINVAL;
549 goto out;
550 }
551
db53a302
CW
552 trace_i915_gem_object_pread(obj, args->offset, args->size);
553
dbf7bff0 554 ret = i915_gem_shmem_pread(dev, obj, args, file);
673a394b 555
35b62a89 556out:
05394f39 557 drm_gem_object_unreference(&obj->base);
1d7cfea1 558unlock:
4f27b75d 559 mutex_unlock(&dev->struct_mutex);
eb01459f 560 return ret;
673a394b
EA
561}
562
0839ccb8
KP
563/* This is the fast write path which cannot handle
564 * page faults in the source data
9b7530cc 565 */
0839ccb8
KP
566
567static inline int
568fast_user_write(struct io_mapping *mapping,
569 loff_t page_base, int page_offset,
570 char __user *user_data,
571 int length)
9b7530cc 572{
4f0c7cfb
BW
573 void __iomem *vaddr_atomic;
574 void *vaddr;
0839ccb8 575 unsigned long unwritten;
9b7530cc 576
3e4d3af5 577 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
4f0c7cfb
BW
578 /* We can use the cpu mem copy function because this is X86. */
579 vaddr = (void __force*)vaddr_atomic + page_offset;
580 unwritten = __copy_from_user_inatomic_nocache(vaddr,
0839ccb8 581 user_data, length);
3e4d3af5 582 io_mapping_unmap_atomic(vaddr_atomic);
fbd5a26d 583 return unwritten;
0839ccb8
KP
584}
585
3de09aa3
EA
586/**
587 * This is the fast pwrite path, where we copy the data directly from the
588 * user into the GTT, uncached.
589 */
673a394b 590static int
05394f39
CW
591i915_gem_gtt_pwrite_fast(struct drm_device *dev,
592 struct drm_i915_gem_object *obj,
3de09aa3 593 struct drm_i915_gem_pwrite *args,
05394f39 594 struct drm_file *file)
673a394b 595{
0839ccb8 596 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 597 ssize_t remain;
0839ccb8 598 loff_t offset, page_base;
673a394b 599 char __user *user_data;
935aaa69
DV
600 int page_offset, page_length, ret;
601
1ec9e26d 602 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
935aaa69
DV
603 if (ret)
604 goto out;
605
606 ret = i915_gem_object_set_to_gtt_domain(obj, true);
607 if (ret)
608 goto out_unpin;
609
610 ret = i915_gem_object_put_fence(obj);
611 if (ret)
612 goto out_unpin;
673a394b 613
2bb4629a 614 user_data = to_user_ptr(args->data_ptr);
673a394b 615 remain = args->size;
673a394b 616
f343c5f6 617 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
673a394b
EA
618
619 while (remain > 0) {
620 /* Operation in this page
621 *
0839ccb8
KP
622 * page_base = page offset within aperture
623 * page_offset = offset within page
624 * page_length = bytes to copy for this page
673a394b 625 */
c8cbbb8b
CW
626 page_base = offset & PAGE_MASK;
627 page_offset = offset_in_page(offset);
0839ccb8
KP
628 page_length = remain;
629 if ((page_offset + remain) > PAGE_SIZE)
630 page_length = PAGE_SIZE - page_offset;
631
0839ccb8 632 /* If we get a fault while copying data, then (presumably) our
3de09aa3
EA
633 * source page isn't available. Return the error and we'll
634 * retry in the slow path.
0839ccb8 635 */
5d4545ae 636 if (fast_user_write(dev_priv->gtt.mappable, page_base,
935aaa69
DV
637 page_offset, user_data, page_length)) {
638 ret = -EFAULT;
639 goto out_unpin;
640 }
673a394b 641
0839ccb8
KP
642 remain -= page_length;
643 user_data += page_length;
644 offset += page_length;
673a394b 645 }
673a394b 646
935aaa69 647out_unpin:
d7f46fc4 648 i915_gem_object_ggtt_unpin(obj);
935aaa69 649out:
3de09aa3 650 return ret;
673a394b
EA
651}
652
d174bd64
DV
653/* Per-page copy function for the shmem pwrite fastpath.
654 * Flushes invalid cachelines before writing to the target if
655 * needs_clflush_before is set and flushes out any written cachelines after
656 * writing if needs_clflush is set. */
3043c60c 657static int
d174bd64
DV
658shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
659 char __user *user_data,
660 bool page_do_bit17_swizzling,
661 bool needs_clflush_before,
662 bool needs_clflush_after)
673a394b 663{
d174bd64 664 char *vaddr;
673a394b 665 int ret;
3de09aa3 666
e7e58eb5 667 if (unlikely(page_do_bit17_swizzling))
d174bd64 668 return -EINVAL;
3de09aa3 669
d174bd64
DV
670 vaddr = kmap_atomic(page);
671 if (needs_clflush_before)
672 drm_clflush_virt_range(vaddr + shmem_page_offset,
673 page_length);
674 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
675 user_data,
676 page_length);
677 if (needs_clflush_after)
678 drm_clflush_virt_range(vaddr + shmem_page_offset,
679 page_length);
680 kunmap_atomic(vaddr);
3de09aa3 681
755d2218 682 return ret ? -EFAULT : 0;
3de09aa3
EA
683}
684
d174bd64
DV
685/* Only difference to the fast-path function is that this can handle bit17
686 * and uses non-atomic copy and kmap functions. */
3043c60c 687static int
d174bd64
DV
688shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
689 char __user *user_data,
690 bool page_do_bit17_swizzling,
691 bool needs_clflush_before,
692 bool needs_clflush_after)
673a394b 693{
d174bd64
DV
694 char *vaddr;
695 int ret;
e5281ccd 696
d174bd64 697 vaddr = kmap(page);
e7e58eb5 698 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
23c18c71
DV
699 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
700 page_length,
701 page_do_bit17_swizzling);
d174bd64
DV
702 if (page_do_bit17_swizzling)
703 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
e5281ccd
CW
704 user_data,
705 page_length);
d174bd64
DV
706 else
707 ret = __copy_from_user(vaddr + shmem_page_offset,
708 user_data,
709 page_length);
710 if (needs_clflush_after)
23c18c71
DV
711 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
712 page_length,
713 page_do_bit17_swizzling);
d174bd64 714 kunmap(page);
40123c1f 715
755d2218 716 return ret ? -EFAULT : 0;
40123c1f
EA
717}
718
40123c1f 719static int
e244a443
DV
720i915_gem_shmem_pwrite(struct drm_device *dev,
721 struct drm_i915_gem_object *obj,
722 struct drm_i915_gem_pwrite *args,
723 struct drm_file *file)
40123c1f 724{
40123c1f 725 ssize_t remain;
8c59967c
DV
726 loff_t offset;
727 char __user *user_data;
eb2c0c81 728 int shmem_page_offset, page_length, ret = 0;
8c59967c 729 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
e244a443 730 int hit_slowpath = 0;
58642885
DV
731 int needs_clflush_after = 0;
732 int needs_clflush_before = 0;
67d5a50c 733 struct sg_page_iter sg_iter;
40123c1f 734
2bb4629a 735 user_data = to_user_ptr(args->data_ptr);
40123c1f
EA
736 remain = args->size;
737
8c59967c 738 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
40123c1f 739
58642885
DV
740 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
741 /* If we're not in the cpu write domain, set ourself into the gtt
742 * write domain and manually flush cachelines (if required). This
743 * optimizes for the case when the gpu will use the data
744 * right away and we therefore have to clflush anyway. */
2c22569b 745 needs_clflush_after = cpu_write_needs_clflush(obj);
23f54483
BW
746 ret = i915_gem_object_wait_rendering(obj, false);
747 if (ret)
748 return ret;
58642885 749 }
c76ce038
CW
750 /* Same trick applies to invalidate partially written cachelines read
751 * before writing. */
752 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
753 needs_clflush_before =
754 !cpu_cache_is_coherent(dev, obj->cache_level);
58642885 755
755d2218
CW
756 ret = i915_gem_object_get_pages(obj);
757 if (ret)
758 return ret;
759
760 i915_gem_object_pin_pages(obj);
761
673a394b 762 offset = args->offset;
05394f39 763 obj->dirty = 1;
673a394b 764
67d5a50c
ID
765 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
766 offset >> PAGE_SHIFT) {
2db76d7c 767 struct page *page = sg_page_iter_page(&sg_iter);
58642885 768 int partial_cacheline_write;
e5281ccd 769
9da3da66
CW
770 if (remain <= 0)
771 break;
772
40123c1f
EA
773 /* Operation in this page
774 *
40123c1f 775 * shmem_page_offset = offset within page in shmem file
40123c1f
EA
776 * page_length = bytes to copy for this page
777 */
c8cbbb8b 778 shmem_page_offset = offset_in_page(offset);
40123c1f
EA
779
780 page_length = remain;
781 if ((shmem_page_offset + page_length) > PAGE_SIZE)
782 page_length = PAGE_SIZE - shmem_page_offset;
40123c1f 783
58642885
DV
784 /* If we don't overwrite a cacheline completely we need to be
785 * careful to have up-to-date data by first clflushing. Don't
786 * overcomplicate things and flush the entire patch. */
787 partial_cacheline_write = needs_clflush_before &&
788 ((shmem_page_offset | page_length)
789 & (boot_cpu_data.x86_clflush_size - 1));
790
8c59967c
DV
791 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
792 (page_to_phys(page) & (1 << 17)) != 0;
793
d174bd64
DV
794 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
795 user_data, page_do_bit17_swizzling,
796 partial_cacheline_write,
797 needs_clflush_after);
798 if (ret == 0)
799 goto next_page;
e244a443
DV
800
801 hit_slowpath = 1;
e244a443 802 mutex_unlock(&dev->struct_mutex);
d174bd64
DV
803 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
804 user_data, page_do_bit17_swizzling,
805 partial_cacheline_write,
806 needs_clflush_after);
40123c1f 807
e244a443 808 mutex_lock(&dev->struct_mutex);
755d2218 809
e244a443 810next_page:
e5281ccd
CW
811 set_page_dirty(page);
812 mark_page_accessed(page);
e5281ccd 813
755d2218 814 if (ret)
8c59967c 815 goto out;
8c59967c 816
40123c1f 817 remain -= page_length;
8c59967c 818 user_data += page_length;
40123c1f 819 offset += page_length;
673a394b
EA
820 }
821
fbd5a26d 822out:
755d2218
CW
823 i915_gem_object_unpin_pages(obj);
824
e244a443 825 if (hit_slowpath) {
8dcf015e
DV
826 /*
827 * Fixup: Flush cpu caches in case we didn't flush the dirty
828 * cachelines in-line while writing and the object moved
829 * out of the cpu write domain while we've dropped the lock.
830 */
831 if (!needs_clflush_after &&
832 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
000433b6
CW
833 if (i915_gem_clflush_object(obj, obj->pin_display))
834 i915_gem_chipset_flush(dev);
e244a443 835 }
8c59967c 836 }
673a394b 837
58642885 838 if (needs_clflush_after)
e76e9aeb 839 i915_gem_chipset_flush(dev);
58642885 840
40123c1f 841 return ret;
673a394b
EA
842}
843
844/**
845 * Writes data to the object referenced by handle.
846 *
847 * On error, the contents of the buffer that were to be modified are undefined.
848 */
849int
850i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
fbd5a26d 851 struct drm_file *file)
673a394b
EA
852{
853 struct drm_i915_gem_pwrite *args = data;
05394f39 854 struct drm_i915_gem_object *obj;
51311d0a
CW
855 int ret;
856
857 if (args->size == 0)
858 return 0;
859
860 if (!access_ok(VERIFY_READ,
2bb4629a 861 to_user_ptr(args->data_ptr),
51311d0a
CW
862 args->size))
863 return -EFAULT;
864
d330a953 865 if (likely(!i915.prefault_disable)) {
0b74b508
XZ
866 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
867 args->size);
868 if (ret)
869 return -EFAULT;
870 }
673a394b 871
fbd5a26d 872 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 873 if (ret)
fbd5a26d 874 return ret;
1d7cfea1 875
05394f39 876 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 877 if (&obj->base == NULL) {
1d7cfea1
CW
878 ret = -ENOENT;
879 goto unlock;
fbd5a26d 880 }
673a394b 881
7dcd2499 882 /* Bounds check destination. */
05394f39
CW
883 if (args->offset > obj->base.size ||
884 args->size > obj->base.size - args->offset) {
ce9d419d 885 ret = -EINVAL;
35b62a89 886 goto out;
ce9d419d
CW
887 }
888
1286ff73
DV
889 /* prime objects have no backing filp to GEM pread/pwrite
890 * pages from.
891 */
892 if (!obj->base.filp) {
893 ret = -EINVAL;
894 goto out;
895 }
896
db53a302
CW
897 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
898
935aaa69 899 ret = -EFAULT;
673a394b
EA
900 /* We can only do the GTT pwrite on untiled buffers, as otherwise
901 * it would end up going through the fenced access, and we'll get
902 * different detiling behavior between reading and writing.
903 * pread/pwrite currently are reading and writing from the CPU
904 * perspective, requiring manual detiling by the client.
905 */
5c0480f2 906 if (obj->phys_obj) {
fbd5a26d 907 ret = i915_gem_phys_pwrite(dev, obj, args, file);
5c0480f2
DV
908 goto out;
909 }
910
2c22569b
CW
911 if (obj->tiling_mode == I915_TILING_NONE &&
912 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
913 cpu_write_needs_clflush(obj)) {
fbd5a26d 914 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
935aaa69
DV
915 /* Note that the gtt paths might fail with non-page-backed user
916 * pointers (e.g. gtt mappings when moving data between
917 * textures). Fallback to the shmem path in that case. */
fbd5a26d 918 }
673a394b 919
86a1ee26 920 if (ret == -EFAULT || ret == -ENOSPC)
935aaa69 921 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
5c0480f2 922
35b62a89 923out:
05394f39 924 drm_gem_object_unreference(&obj->base);
1d7cfea1 925unlock:
fbd5a26d 926 mutex_unlock(&dev->struct_mutex);
673a394b
EA
927 return ret;
928}
929
b361237b 930int
33196ded 931i915_gem_check_wedge(struct i915_gpu_error *error,
b361237b
CW
932 bool interruptible)
933{
1f83fee0 934 if (i915_reset_in_progress(error)) {
b361237b
CW
935 /* Non-interruptible callers can't handle -EAGAIN, hence return
936 * -EIO unconditionally for these. */
937 if (!interruptible)
938 return -EIO;
939
1f83fee0
DV
940 /* Recovery complete, but the reset failed ... */
941 if (i915_terminally_wedged(error))
b361237b
CW
942 return -EIO;
943
944 return -EAGAIN;
945 }
946
947 return 0;
948}
949
950/*
951 * Compare seqno against outstanding lazy request. Emit a request if they are
952 * equal.
953 */
954static int
955i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
956{
957 int ret;
958
959 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
960
961 ret = 0;
1823521d 962 if (seqno == ring->outstanding_lazy_seqno)
0025c077 963 ret = i915_add_request(ring, NULL);
b361237b
CW
964
965 return ret;
966}
967
094f9a54
CW
968static void fake_irq(unsigned long data)
969{
970 wake_up_process((struct task_struct *)data);
971}
972
973static bool missed_irq(struct drm_i915_private *dev_priv,
974 struct intel_ring_buffer *ring)
975{
976 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
977}
978
b29c19b6
CW
979static bool can_wait_boost(struct drm_i915_file_private *file_priv)
980{
981 if (file_priv == NULL)
982 return true;
983
984 return !atomic_xchg(&file_priv->rps_wait_boost, true);
985}
986
b361237b
CW
987/**
988 * __wait_seqno - wait until execution of seqno has finished
989 * @ring: the ring expected to report seqno
990 * @seqno: duh!
f69061be 991 * @reset_counter: reset sequence associated with the given seqno
b361237b
CW
992 * @interruptible: do an interruptible wait (normally yes)
993 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
994 *
f69061be
DV
995 * Note: It is of utmost importance that the passed in seqno and reset_counter
996 * values have been read by the caller in an smp safe manner. Where read-side
997 * locks are involved, it is sufficient to read the reset_counter before
998 * unlocking the lock that protects the seqno. For lockless tricks, the
999 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1000 * inserted.
1001 *
b361237b
CW
1002 * Returns 0 if the seqno was found within the alloted time. Else returns the
1003 * errno with remaining time filled in timeout argument.
1004 */
1005static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
f69061be 1006 unsigned reset_counter,
b29c19b6
CW
1007 bool interruptible,
1008 struct timespec *timeout,
1009 struct drm_i915_file_private *file_priv)
b361237b 1010{
3d13ef2e
DL
1011 struct drm_device *dev = ring->dev;
1012 drm_i915_private_t *dev_priv = dev->dev_private;
168c3f21
MK
1013 const bool irq_test_in_progress =
1014 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
094f9a54
CW
1015 struct timespec before, now;
1016 DEFINE_WAIT(wait);
47e9766d 1017 unsigned long timeout_expire;
b361237b
CW
1018 int ret;
1019
c67a470b
PZ
1020 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1021
b361237b
CW
1022 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1023 return 0;
1024
47e9766d 1025 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
b361237b 1026
3d13ef2e 1027 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
b29c19b6
CW
1028 gen6_rps_boost(dev_priv);
1029 if (file_priv)
1030 mod_delayed_work(dev_priv->wq,
1031 &file_priv->mm.idle_work,
1032 msecs_to_jiffies(100));
1033 }
1034
168c3f21 1035 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
b361237b
CW
1036 return -ENODEV;
1037
094f9a54
CW
1038 /* Record current time in case interrupted by signal, or wedged */
1039 trace_i915_gem_request_wait_begin(ring, seqno);
b361237b 1040 getrawmonotonic(&before);
094f9a54
CW
1041 for (;;) {
1042 struct timer_list timer;
b361237b 1043
094f9a54
CW
1044 prepare_to_wait(&ring->irq_queue, &wait,
1045 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
b361237b 1046
f69061be
DV
1047 /* We need to check whether any gpu reset happened in between
1048 * the caller grabbing the seqno and now ... */
094f9a54
CW
1049 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1050 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1051 * is truely gone. */
1052 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1053 if (ret == 0)
1054 ret = -EAGAIN;
1055 break;
1056 }
f69061be 1057
094f9a54
CW
1058 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1059 ret = 0;
1060 break;
1061 }
b361237b 1062
094f9a54
CW
1063 if (interruptible && signal_pending(current)) {
1064 ret = -ERESTARTSYS;
1065 break;
1066 }
1067
47e9766d 1068 if (timeout && time_after_eq(jiffies, timeout_expire)) {
094f9a54
CW
1069 ret = -ETIME;
1070 break;
1071 }
1072
1073 timer.function = NULL;
1074 if (timeout || missed_irq(dev_priv, ring)) {
47e9766d
MK
1075 unsigned long expire;
1076
094f9a54 1077 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
47e9766d 1078 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
094f9a54
CW
1079 mod_timer(&timer, expire);
1080 }
1081
5035c275 1082 io_schedule();
094f9a54 1083
094f9a54
CW
1084 if (timer.function) {
1085 del_singleshot_timer_sync(&timer);
1086 destroy_timer_on_stack(&timer);
1087 }
1088 }
b361237b 1089 getrawmonotonic(&now);
094f9a54 1090 trace_i915_gem_request_wait_end(ring, seqno);
b361237b 1091
168c3f21
MK
1092 if (!irq_test_in_progress)
1093 ring->irq_put(ring);
094f9a54
CW
1094
1095 finish_wait(&ring->irq_queue, &wait);
b361237b
CW
1096
1097 if (timeout) {
1098 struct timespec sleep_time = timespec_sub(now, before);
1099 *timeout = timespec_sub(*timeout, sleep_time);
4f42f4ef
CW
1100 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1101 set_normalized_timespec(timeout, 0, 0);
b361237b
CW
1102 }
1103
094f9a54 1104 return ret;
b361237b
CW
1105}
1106
1107/**
1108 * Waits for a sequence number to be signaled, and cleans up the
1109 * request and object lists appropriately for that event.
1110 */
1111int
1112i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1113{
1114 struct drm_device *dev = ring->dev;
1115 struct drm_i915_private *dev_priv = dev->dev_private;
1116 bool interruptible = dev_priv->mm.interruptible;
1117 int ret;
1118
1119 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1120 BUG_ON(seqno == 0);
1121
33196ded 1122 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
b361237b
CW
1123 if (ret)
1124 return ret;
1125
1126 ret = i915_gem_check_olr(ring, seqno);
1127 if (ret)
1128 return ret;
1129
f69061be
DV
1130 return __wait_seqno(ring, seqno,
1131 atomic_read(&dev_priv->gpu_error.reset_counter),
b29c19b6 1132 interruptible, NULL, NULL);
b361237b
CW
1133}
1134
d26e3af8
CW
1135static int
1136i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1137 struct intel_ring_buffer *ring)
1138{
1139 i915_gem_retire_requests_ring(ring);
1140
1141 /* Manually manage the write flush as we may have not yet
1142 * retired the buffer.
1143 *
1144 * Note that the last_write_seqno is always the earlier of
1145 * the two (read/write) seqno, so if we haved successfully waited,
1146 * we know we have passed the last write.
1147 */
1148 obj->last_write_seqno = 0;
1149 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1150
1151 return 0;
1152}
1153
b361237b
CW
1154/**
1155 * Ensures that all rendering to the object has completed and the object is
1156 * safe to unbind from the GTT or access from the CPU.
1157 */
1158static __must_check int
1159i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1160 bool readonly)
1161{
1162 struct intel_ring_buffer *ring = obj->ring;
1163 u32 seqno;
1164 int ret;
1165
1166 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1167 if (seqno == 0)
1168 return 0;
1169
1170 ret = i915_wait_seqno(ring, seqno);
1171 if (ret)
1172 return ret;
1173
d26e3af8 1174 return i915_gem_object_wait_rendering__tail(obj, ring);
b361237b
CW
1175}
1176
3236f57a
CW
1177/* A nonblocking variant of the above wait. This is a highly dangerous routine
1178 * as the object state may change during this call.
1179 */
1180static __must_check int
1181i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
6e4930f6 1182 struct drm_i915_file_private *file_priv,
3236f57a
CW
1183 bool readonly)
1184{
1185 struct drm_device *dev = obj->base.dev;
1186 struct drm_i915_private *dev_priv = dev->dev_private;
1187 struct intel_ring_buffer *ring = obj->ring;
f69061be 1188 unsigned reset_counter;
3236f57a
CW
1189 u32 seqno;
1190 int ret;
1191
1192 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1193 BUG_ON(!dev_priv->mm.interruptible);
1194
1195 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1196 if (seqno == 0)
1197 return 0;
1198
33196ded 1199 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
3236f57a
CW
1200 if (ret)
1201 return ret;
1202
1203 ret = i915_gem_check_olr(ring, seqno);
1204 if (ret)
1205 return ret;
1206
f69061be 1207 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3236f57a 1208 mutex_unlock(&dev->struct_mutex);
6e4930f6 1209 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
3236f57a 1210 mutex_lock(&dev->struct_mutex);
d26e3af8
CW
1211 if (ret)
1212 return ret;
3236f57a 1213
d26e3af8 1214 return i915_gem_object_wait_rendering__tail(obj, ring);
3236f57a
CW
1215}
1216
673a394b 1217/**
2ef7eeaa
EA
1218 * Called when user space prepares to use an object with the CPU, either
1219 * through the mmap ioctl's mapping or a GTT mapping.
673a394b
EA
1220 */
1221int
1222i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
05394f39 1223 struct drm_file *file)
673a394b
EA
1224{
1225 struct drm_i915_gem_set_domain *args = data;
05394f39 1226 struct drm_i915_gem_object *obj;
2ef7eeaa
EA
1227 uint32_t read_domains = args->read_domains;
1228 uint32_t write_domain = args->write_domain;
673a394b
EA
1229 int ret;
1230
2ef7eeaa 1231 /* Only handle setting domains to types used by the CPU. */
21d509e3 1232 if (write_domain & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1233 return -EINVAL;
1234
21d509e3 1235 if (read_domains & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1236 return -EINVAL;
1237
1238 /* Having something in the write domain implies it's in the read
1239 * domain, and only that read domain. Enforce that in the request.
1240 */
1241 if (write_domain != 0 && read_domains != write_domain)
1242 return -EINVAL;
1243
76c1dec1 1244 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1245 if (ret)
76c1dec1 1246 return ret;
1d7cfea1 1247
05394f39 1248 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 1249 if (&obj->base == NULL) {
1d7cfea1
CW
1250 ret = -ENOENT;
1251 goto unlock;
76c1dec1 1252 }
673a394b 1253
3236f57a
CW
1254 /* Try to flush the object off the GPU without holding the lock.
1255 * We will repeat the flush holding the lock in the normal manner
1256 * to catch cases where we are gazumped.
1257 */
6e4930f6
CW
1258 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1259 file->driver_priv,
1260 !write_domain);
3236f57a
CW
1261 if (ret)
1262 goto unref;
1263
2ef7eeaa
EA
1264 if (read_domains & I915_GEM_DOMAIN_GTT) {
1265 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
02354392
EA
1266
1267 /* Silently promote "you're not bound, there was nothing to do"
1268 * to success, since the client was just asking us to
1269 * make sure everything was done.
1270 */
1271 if (ret == -EINVAL)
1272 ret = 0;
2ef7eeaa 1273 } else {
e47c68e9 1274 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
2ef7eeaa
EA
1275 }
1276
3236f57a 1277unref:
05394f39 1278 drm_gem_object_unreference(&obj->base);
1d7cfea1 1279unlock:
673a394b
EA
1280 mutex_unlock(&dev->struct_mutex);
1281 return ret;
1282}
1283
1284/**
1285 * Called when user space has done writes to this buffer
1286 */
1287int
1288i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
05394f39 1289 struct drm_file *file)
673a394b
EA
1290{
1291 struct drm_i915_gem_sw_finish *args = data;
05394f39 1292 struct drm_i915_gem_object *obj;
673a394b
EA
1293 int ret = 0;
1294
76c1dec1 1295 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1296 if (ret)
76c1dec1 1297 return ret;
1d7cfea1 1298
05394f39 1299 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 1300 if (&obj->base == NULL) {
1d7cfea1
CW
1301 ret = -ENOENT;
1302 goto unlock;
673a394b
EA
1303 }
1304
673a394b 1305 /* Pinned buffers may be scanout, so flush the cache */
2c22569b
CW
1306 if (obj->pin_display)
1307 i915_gem_object_flush_cpu_write_domain(obj, true);
e47c68e9 1308
05394f39 1309 drm_gem_object_unreference(&obj->base);
1d7cfea1 1310unlock:
673a394b
EA
1311 mutex_unlock(&dev->struct_mutex);
1312 return ret;
1313}
1314
1315/**
1316 * Maps the contents of an object, returning the address it is mapped
1317 * into.
1318 *
1319 * While the mapping holds a reference on the contents of the object, it doesn't
1320 * imply a ref on the object itself.
1321 */
1322int
1323i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
05394f39 1324 struct drm_file *file)
673a394b
EA
1325{
1326 struct drm_i915_gem_mmap *args = data;
1327 struct drm_gem_object *obj;
673a394b
EA
1328 unsigned long addr;
1329
05394f39 1330 obj = drm_gem_object_lookup(dev, file, args->handle);
673a394b 1331 if (obj == NULL)
bf79cb91 1332 return -ENOENT;
673a394b 1333
1286ff73
DV
1334 /* prime objects have no backing filp to GEM mmap
1335 * pages from.
1336 */
1337 if (!obj->filp) {
1338 drm_gem_object_unreference_unlocked(obj);
1339 return -EINVAL;
1340 }
1341
6be5ceb0 1342 addr = vm_mmap(obj->filp, 0, args->size,
673a394b
EA
1343 PROT_READ | PROT_WRITE, MAP_SHARED,
1344 args->offset);
bc9025bd 1345 drm_gem_object_unreference_unlocked(obj);
673a394b
EA
1346 if (IS_ERR((void *)addr))
1347 return addr;
1348
1349 args->addr_ptr = (uint64_t) addr;
1350
1351 return 0;
1352}
1353
de151cf6
JB
1354/**
1355 * i915_gem_fault - fault a page into the GTT
1356 * vma: VMA in question
1357 * vmf: fault info
1358 *
1359 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1360 * from userspace. The fault handler takes care of binding the object to
1361 * the GTT (if needed), allocating and programming a fence register (again,
1362 * only if needed based on whether the old reg is still valid or the object
1363 * is tiled) and inserting a new PTE into the faulting process.
1364 *
1365 * Note that the faulting process may involve evicting existing objects
1366 * from the GTT and/or fence registers to make room. So performance may
1367 * suffer if the GTT working set is large or there are few fence registers
1368 * left.
1369 */
1370int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1371{
05394f39
CW
1372 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1373 struct drm_device *dev = obj->base.dev;
7d1c4804 1374 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6
JB
1375 pgoff_t page_offset;
1376 unsigned long pfn;
1377 int ret = 0;
0f973f27 1378 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
de151cf6 1379
f65c9168
PZ
1380 intel_runtime_pm_get(dev_priv);
1381
de151cf6
JB
1382 /* We don't use vmf->pgoff since that has the fake offset */
1383 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1384 PAGE_SHIFT;
1385
d9bc7e9f
CW
1386 ret = i915_mutex_lock_interruptible(dev);
1387 if (ret)
1388 goto out;
a00b10c3 1389
db53a302
CW
1390 trace_i915_gem_object_fault(obj, page_offset, true, write);
1391
6e4930f6
CW
1392 /* Try to flush the object off the GPU first without holding the lock.
1393 * Upon reacquiring the lock, we will perform our sanity checks and then
1394 * repeat the flush holding the lock in the normal manner to catch cases
1395 * where we are gazumped.
1396 */
1397 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1398 if (ret)
1399 goto unlock;
1400
eb119bd6
CW
1401 /* Access to snoopable pages through the GTT is incoherent. */
1402 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1403 ret = -EINVAL;
1404 goto unlock;
1405 }
1406
d9bc7e9f 1407 /* Now bind it into the GTT if needed */
1ec9e26d 1408 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
c9839303
CW
1409 if (ret)
1410 goto unlock;
4a684a41 1411
c9839303
CW
1412 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1413 if (ret)
1414 goto unpin;
74898d7e 1415
06d98131 1416 ret = i915_gem_object_get_fence(obj);
d9e86c0e 1417 if (ret)
c9839303 1418 goto unpin;
7d1c4804 1419
6299f992
CW
1420 obj->fault_mappable = true;
1421
f343c5f6
BW
1422 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1423 pfn >>= PAGE_SHIFT;
1424 pfn += page_offset;
de151cf6
JB
1425
1426 /* Finally, remap it using the new GTT offset */
1427 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
c9839303 1428unpin:
d7f46fc4 1429 i915_gem_object_ggtt_unpin(obj);
c715089f 1430unlock:
de151cf6 1431 mutex_unlock(&dev->struct_mutex);
d9bc7e9f 1432out:
de151cf6 1433 switch (ret) {
d9bc7e9f 1434 case -EIO:
a9340cca
DV
1435 /* If this -EIO is due to a gpu hang, give the reset code a
1436 * chance to clean up the mess. Otherwise return the proper
1437 * SIGBUS. */
f65c9168
PZ
1438 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
1439 ret = VM_FAULT_SIGBUS;
1440 break;
1441 }
045e769a 1442 case -EAGAIN:
571c608d
DV
1443 /*
1444 * EAGAIN means the gpu is hung and we'll wait for the error
1445 * handler to reset everything when re-faulting in
1446 * i915_mutex_lock_interruptible.
d9bc7e9f 1447 */
c715089f
CW
1448 case 0:
1449 case -ERESTARTSYS:
bed636ab 1450 case -EINTR:
e79e0fe3
DR
1451 case -EBUSY:
1452 /*
1453 * EBUSY is ok: this just means that another thread
1454 * already did the job.
1455 */
f65c9168
PZ
1456 ret = VM_FAULT_NOPAGE;
1457 break;
de151cf6 1458 case -ENOMEM:
f65c9168
PZ
1459 ret = VM_FAULT_OOM;
1460 break;
a7c2e1aa 1461 case -ENOSPC:
45d67817 1462 case -EFAULT:
f65c9168
PZ
1463 ret = VM_FAULT_SIGBUS;
1464 break;
de151cf6 1465 default:
a7c2e1aa 1466 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
f65c9168
PZ
1467 ret = VM_FAULT_SIGBUS;
1468 break;
de151cf6 1469 }
f65c9168
PZ
1470
1471 intel_runtime_pm_put(dev_priv);
1472 return ret;
de151cf6
JB
1473}
1474
48018a57
PZ
1475void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1476{
1477 struct i915_vma *vma;
1478
1479 /*
1480 * Only the global gtt is relevant for gtt memory mappings, so restrict
1481 * list traversal to objects bound into the global address space. Note
1482 * that the active list should be empty, but better safe than sorry.
1483 */
1484 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1485 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1486 i915_gem_release_mmap(vma->obj);
1487 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1488 i915_gem_release_mmap(vma->obj);
1489}
1490
901782b2
CW
1491/**
1492 * i915_gem_release_mmap - remove physical page mappings
1493 * @obj: obj in question
1494 *
af901ca1 1495 * Preserve the reservation of the mmapping with the DRM core code, but
901782b2
CW
1496 * relinquish ownership of the pages back to the system.
1497 *
1498 * It is vital that we remove the page mapping if we have mapped a tiled
1499 * object through the GTT and then lose the fence register due to
1500 * resource pressure. Similarly if the object has been moved out of the
1501 * aperture, than pages mapped into userspace must be revoked. Removing the
1502 * mapping will then trigger a page fault on the next user access, allowing
1503 * fixup by i915_gem_fault().
1504 */
d05ca301 1505void
05394f39 1506i915_gem_release_mmap(struct drm_i915_gem_object *obj)
901782b2 1507{
6299f992
CW
1508 if (!obj->fault_mappable)
1509 return;
901782b2 1510
51335df9 1511 drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
6299f992 1512 obj->fault_mappable = false;
901782b2
CW
1513}
1514
0fa87796 1515uint32_t
e28f8711 1516i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
92b88aeb 1517{
e28f8711 1518 uint32_t gtt_size;
92b88aeb
CW
1519
1520 if (INTEL_INFO(dev)->gen >= 4 ||
e28f8711
CW
1521 tiling_mode == I915_TILING_NONE)
1522 return size;
92b88aeb
CW
1523
1524 /* Previous chips need a power-of-two fence region when tiling */
1525 if (INTEL_INFO(dev)->gen == 3)
e28f8711 1526 gtt_size = 1024*1024;
92b88aeb 1527 else
e28f8711 1528 gtt_size = 512*1024;
92b88aeb 1529
e28f8711
CW
1530 while (gtt_size < size)
1531 gtt_size <<= 1;
92b88aeb 1532
e28f8711 1533 return gtt_size;
92b88aeb
CW
1534}
1535
de151cf6
JB
1536/**
1537 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1538 * @obj: object to check
1539 *
1540 * Return the required GTT alignment for an object, taking into account
5e783301 1541 * potential fence register mapping.
de151cf6 1542 */
d865110c
ID
1543uint32_t
1544i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1545 int tiling_mode, bool fenced)
de151cf6 1546{
de151cf6
JB
1547 /*
1548 * Minimum alignment is 4k (GTT page size), but might be greater
1549 * if a fence register is needed for the object.
1550 */
d865110c 1551 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
e28f8711 1552 tiling_mode == I915_TILING_NONE)
de151cf6
JB
1553 return 4096;
1554
a00b10c3
CW
1555 /*
1556 * Previous chips need to be aligned to the size of the smallest
1557 * fence register that can contain the object.
1558 */
e28f8711 1559 return i915_gem_get_gtt_size(dev, size, tiling_mode);
a00b10c3
CW
1560}
1561
d8cb5086
CW
1562static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1563{
1564 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1565 int ret;
1566
0de23977 1567 if (drm_vma_node_has_offset(&obj->base.vma_node))
d8cb5086
CW
1568 return 0;
1569
da494d7c
DV
1570 dev_priv->mm.shrinker_no_lock_stealing = true;
1571
d8cb5086
CW
1572 ret = drm_gem_create_mmap_offset(&obj->base);
1573 if (ret != -ENOSPC)
da494d7c 1574 goto out;
d8cb5086
CW
1575
1576 /* Badly fragmented mmap space? The only way we can recover
1577 * space is by destroying unwanted objects. We can't randomly release
1578 * mmap_offsets as userspace expects them to be persistent for the
1579 * lifetime of the objects. The closest we can is to release the
1580 * offsets on purgeable objects by truncating it and marking it purged,
1581 * which prevents userspace from ever using that object again.
1582 */
1583 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1584 ret = drm_gem_create_mmap_offset(&obj->base);
1585 if (ret != -ENOSPC)
da494d7c 1586 goto out;
d8cb5086
CW
1587
1588 i915_gem_shrink_all(dev_priv);
da494d7c
DV
1589 ret = drm_gem_create_mmap_offset(&obj->base);
1590out:
1591 dev_priv->mm.shrinker_no_lock_stealing = false;
1592
1593 return ret;
d8cb5086
CW
1594}
1595
1596static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1597{
d8cb5086
CW
1598 drm_gem_free_mmap_offset(&obj->base);
1599}
1600
de151cf6 1601int
ff72145b
DA
1602i915_gem_mmap_gtt(struct drm_file *file,
1603 struct drm_device *dev,
1604 uint32_t handle,
1605 uint64_t *offset)
de151cf6 1606{
da761a6e 1607 struct drm_i915_private *dev_priv = dev->dev_private;
05394f39 1608 struct drm_i915_gem_object *obj;
de151cf6
JB
1609 int ret;
1610
76c1dec1 1611 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1612 if (ret)
76c1dec1 1613 return ret;
de151cf6 1614
ff72145b 1615 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
c8725226 1616 if (&obj->base == NULL) {
1d7cfea1
CW
1617 ret = -ENOENT;
1618 goto unlock;
1619 }
de151cf6 1620
5d4545ae 1621 if (obj->base.size > dev_priv->gtt.mappable_end) {
da761a6e 1622 ret = -E2BIG;
ff56b0bc 1623 goto out;
da761a6e
CW
1624 }
1625
05394f39 1626 if (obj->madv != I915_MADV_WILLNEED) {
bd9b6a4e 1627 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
8c99e57d 1628 ret = -EFAULT;
1d7cfea1 1629 goto out;
ab18282d
CW
1630 }
1631
d8cb5086
CW
1632 ret = i915_gem_object_create_mmap_offset(obj);
1633 if (ret)
1634 goto out;
de151cf6 1635
0de23977 1636 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
de151cf6 1637
1d7cfea1 1638out:
05394f39 1639 drm_gem_object_unreference(&obj->base);
1d7cfea1 1640unlock:
de151cf6 1641 mutex_unlock(&dev->struct_mutex);
1d7cfea1 1642 return ret;
de151cf6
JB
1643}
1644
ff72145b
DA
1645/**
1646 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1647 * @dev: DRM device
1648 * @data: GTT mapping ioctl data
1649 * @file: GEM object info
1650 *
1651 * Simply returns the fake offset to userspace so it can mmap it.
1652 * The mmap call will end up in drm_gem_mmap(), which will set things
1653 * up so we can get faults in the handler above.
1654 *
1655 * The fault handler will take care of binding the object into the GTT
1656 * (since it may have been evicted to make room for something), allocating
1657 * a fence register, and mapping the appropriate aperture address into
1658 * userspace.
1659 */
1660int
1661i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1662 struct drm_file *file)
1663{
1664 struct drm_i915_gem_mmap_gtt *args = data;
1665
ff72145b
DA
1666 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1667}
1668
225067ee
DV
1669/* Immediately discard the backing storage */
1670static void
1671i915_gem_object_truncate(struct drm_i915_gem_object *obj)
e5281ccd 1672{
e5281ccd 1673 struct inode *inode;
e5281ccd 1674
4d6294bf 1675 i915_gem_object_free_mmap_offset(obj);
1286ff73 1676
4d6294bf
CW
1677 if (obj->base.filp == NULL)
1678 return;
e5281ccd 1679
225067ee
DV
1680 /* Our goal here is to return as much of the memory as
1681 * is possible back to the system as we are called from OOM.
1682 * To do this we must instruct the shmfs to drop all of its
1683 * backing pages, *now*.
1684 */
496ad9aa 1685 inode = file_inode(obj->base.filp);
225067ee 1686 shmem_truncate_range(inode, 0, (loff_t)-1);
e5281ccd 1687
225067ee
DV
1688 obj->madv = __I915_MADV_PURGED;
1689}
e5281ccd 1690
225067ee
DV
1691static inline int
1692i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1693{
1694 return obj->madv == I915_MADV_DONTNEED;
e5281ccd
CW
1695}
1696
5cdf5881 1697static void
05394f39 1698i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
673a394b 1699{
90797e6d
ID
1700 struct sg_page_iter sg_iter;
1701 int ret;
1286ff73 1702
05394f39 1703 BUG_ON(obj->madv == __I915_MADV_PURGED);
673a394b 1704
6c085a72
CW
1705 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1706 if (ret) {
1707 /* In the event of a disaster, abandon all caches and
1708 * hope for the best.
1709 */
1710 WARN_ON(ret != -EIO);
2c22569b 1711 i915_gem_clflush_object(obj, true);
6c085a72
CW
1712 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1713 }
1714
6dacfd2f 1715 if (i915_gem_object_needs_bit17_swizzle(obj))
280b713b
EA
1716 i915_gem_object_save_bit_17_swizzle(obj);
1717
05394f39
CW
1718 if (obj->madv == I915_MADV_DONTNEED)
1719 obj->dirty = 0;
3ef94daa 1720
90797e6d 1721 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
2db76d7c 1722 struct page *page = sg_page_iter_page(&sg_iter);
9da3da66 1723
05394f39 1724 if (obj->dirty)
9da3da66 1725 set_page_dirty(page);
3ef94daa 1726
05394f39 1727 if (obj->madv == I915_MADV_WILLNEED)
9da3da66 1728 mark_page_accessed(page);
3ef94daa 1729
9da3da66 1730 page_cache_release(page);
3ef94daa 1731 }
05394f39 1732 obj->dirty = 0;
673a394b 1733
9da3da66
CW
1734 sg_free_table(obj->pages);
1735 kfree(obj->pages);
37e680a1 1736}
6c085a72 1737
dd624afd 1738int
37e680a1
CW
1739i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1740{
1741 const struct drm_i915_gem_object_ops *ops = obj->ops;
1742
2f745ad3 1743 if (obj->pages == NULL)
37e680a1
CW
1744 return 0;
1745
a5570178
CW
1746 if (obj->pages_pin_count)
1747 return -EBUSY;
1748
9843877d 1749 BUG_ON(i915_gem_obj_bound_any(obj));
3e123027 1750
a2165e31
CW
1751 /* ->put_pages might need to allocate memory for the bit17 swizzle
1752 * array, hence protect them from being reaped by removing them from gtt
1753 * lists early. */
35c20a60 1754 list_del(&obj->global_list);
a2165e31 1755
37e680a1 1756 ops->put_pages(obj);
05394f39 1757 obj->pages = NULL;
37e680a1 1758
6c085a72
CW
1759 if (i915_gem_object_is_purgeable(obj))
1760 i915_gem_object_truncate(obj);
1761
1762 return 0;
1763}
1764
d9973b43 1765static unsigned long
93927ca5
DV
1766__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1767 bool purgeable_only)
6c085a72 1768{
57094f82 1769 struct list_head still_bound_list;
6c085a72 1770 struct drm_i915_gem_object *obj, *next;
d9973b43 1771 unsigned long count = 0;
6c085a72
CW
1772
1773 list_for_each_entry_safe(obj, next,
1774 &dev_priv->mm.unbound_list,
35c20a60 1775 global_list) {
93927ca5 1776 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
37e680a1 1777 i915_gem_object_put_pages(obj) == 0) {
6c085a72
CW
1778 count += obj->base.size >> PAGE_SHIFT;
1779 if (count >= target)
1780 return count;
1781 }
1782 }
1783
57094f82
CW
1784 /*
1785 * As we may completely rewrite the bound list whilst unbinding
1786 * (due to retiring requests) we have to strictly process only
1787 * one element of the list at the time, and recheck the list
1788 * on every iteration.
1789 */
1790 INIT_LIST_HEAD(&still_bound_list);
1791 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
07fe0b12 1792 struct i915_vma *vma, *v;
80dcfdbd 1793
57094f82
CW
1794 obj = list_first_entry(&dev_priv->mm.bound_list,
1795 typeof(*obj), global_list);
1796 list_move_tail(&obj->global_list, &still_bound_list);
1797
80dcfdbd
BW
1798 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1799 continue;
1800
57094f82
CW
1801 /*
1802 * Hold a reference whilst we unbind this object, as we may
1803 * end up waiting for and retiring requests. This might
1804 * release the final reference (held by the active list)
1805 * and result in the object being freed from under us.
1806 * in this object being freed.
1807 *
1808 * Note 1: Shrinking the bound list is special since only active
1809 * (and hence bound objects) can contain such limbo objects, so
1810 * we don't need special tricks for shrinking the unbound list.
1811 * The only other place where we have to be careful with active
1812 * objects suddenly disappearing due to retiring requests is the
1813 * eviction code.
1814 *
1815 * Note 2: Even though the bound list doesn't hold a reference
1816 * to the object we can safely grab one here: The final object
1817 * unreferencing and the bound_list are both protected by the
1818 * dev->struct_mutex and so we won't ever be able to observe an
1819 * object on the bound_list with a reference count equals 0.
1820 */
1821 drm_gem_object_reference(&obj->base);
1822
07fe0b12
BW
1823 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1824 if (i915_vma_unbind(vma))
1825 break;
80dcfdbd 1826
57094f82 1827 if (i915_gem_object_put_pages(obj) == 0)
6c085a72 1828 count += obj->base.size >> PAGE_SHIFT;
57094f82
CW
1829
1830 drm_gem_object_unreference(&obj->base);
6c085a72 1831 }
57094f82 1832 list_splice(&still_bound_list, &dev_priv->mm.bound_list);
6c085a72
CW
1833
1834 return count;
1835}
1836
d9973b43 1837static unsigned long
93927ca5
DV
1838i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1839{
1840 return __i915_gem_shrink(dev_priv, target, true);
1841}
1842
d9973b43 1843static unsigned long
6c085a72
CW
1844i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1845{
1846 struct drm_i915_gem_object *obj, *next;
7dc19d5a 1847 long freed = 0;
6c085a72
CW
1848
1849 i915_gem_evict_everything(dev_priv->dev);
1850
35c20a60 1851 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
7dc19d5a 1852 global_list) {
d9973b43 1853 if (i915_gem_object_put_pages(obj) == 0)
7dc19d5a 1854 freed += obj->base.size >> PAGE_SHIFT;
7dc19d5a
DC
1855 }
1856 return freed;
225067ee
DV
1857}
1858
37e680a1 1859static int
6c085a72 1860i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
e5281ccd 1861{
6c085a72 1862 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
e5281ccd
CW
1863 int page_count, i;
1864 struct address_space *mapping;
9da3da66
CW
1865 struct sg_table *st;
1866 struct scatterlist *sg;
90797e6d 1867 struct sg_page_iter sg_iter;
e5281ccd 1868 struct page *page;
90797e6d 1869 unsigned long last_pfn = 0; /* suppress gcc warning */
6c085a72 1870 gfp_t gfp;
e5281ccd 1871
6c085a72
CW
1872 /* Assert that the object is not currently in any GPU domain. As it
1873 * wasn't in the GTT, there shouldn't be any way it could have been in
1874 * a GPU cache
1875 */
1876 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1877 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1878
9da3da66
CW
1879 st = kmalloc(sizeof(*st), GFP_KERNEL);
1880 if (st == NULL)
1881 return -ENOMEM;
1882
05394f39 1883 page_count = obj->base.size / PAGE_SIZE;
9da3da66 1884 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
9da3da66 1885 kfree(st);
e5281ccd 1886 return -ENOMEM;
9da3da66 1887 }
e5281ccd 1888
9da3da66
CW
1889 /* Get the list of pages out of our struct file. They'll be pinned
1890 * at this point until we release them.
1891 *
1892 * Fail silently without starting the shrinker
1893 */
496ad9aa 1894 mapping = file_inode(obj->base.filp)->i_mapping;
6c085a72 1895 gfp = mapping_gfp_mask(mapping);
caf49191 1896 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
6c085a72 1897 gfp &= ~(__GFP_IO | __GFP_WAIT);
90797e6d
ID
1898 sg = st->sgl;
1899 st->nents = 0;
1900 for (i = 0; i < page_count; i++) {
6c085a72
CW
1901 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1902 if (IS_ERR(page)) {
1903 i915_gem_purge(dev_priv, page_count);
1904 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1905 }
1906 if (IS_ERR(page)) {
1907 /* We've tried hard to allocate the memory by reaping
1908 * our own buffer, now let the real VM do its job and
1909 * go down in flames if truly OOM.
1910 */
caf49191 1911 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
6c085a72
CW
1912 gfp |= __GFP_IO | __GFP_WAIT;
1913
1914 i915_gem_shrink_all(dev_priv);
1915 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1916 if (IS_ERR(page))
1917 goto err_pages;
1918
caf49191 1919 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
6c085a72
CW
1920 gfp &= ~(__GFP_IO | __GFP_WAIT);
1921 }
426729dc
KRW
1922#ifdef CONFIG_SWIOTLB
1923 if (swiotlb_nr_tbl()) {
1924 st->nents++;
1925 sg_set_page(sg, page, PAGE_SIZE, 0);
1926 sg = sg_next(sg);
1927 continue;
1928 }
1929#endif
90797e6d
ID
1930 if (!i || page_to_pfn(page) != last_pfn + 1) {
1931 if (i)
1932 sg = sg_next(sg);
1933 st->nents++;
1934 sg_set_page(sg, page, PAGE_SIZE, 0);
1935 } else {
1936 sg->length += PAGE_SIZE;
1937 }
1938 last_pfn = page_to_pfn(page);
3bbbe706
DV
1939
1940 /* Check that the i965g/gm workaround works. */
1941 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
e5281ccd 1942 }
426729dc
KRW
1943#ifdef CONFIG_SWIOTLB
1944 if (!swiotlb_nr_tbl())
1945#endif
1946 sg_mark_end(sg);
74ce6b6c
CW
1947 obj->pages = st;
1948
6dacfd2f 1949 if (i915_gem_object_needs_bit17_swizzle(obj))
e5281ccd
CW
1950 i915_gem_object_do_bit_17_swizzle(obj);
1951
1952 return 0;
1953
1954err_pages:
90797e6d
ID
1955 sg_mark_end(sg);
1956 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2db76d7c 1957 page_cache_release(sg_page_iter_page(&sg_iter));
9da3da66
CW
1958 sg_free_table(st);
1959 kfree(st);
e5281ccd 1960 return PTR_ERR(page);
673a394b
EA
1961}
1962
37e680a1
CW
1963/* Ensure that the associated pages are gathered from the backing storage
1964 * and pinned into our object. i915_gem_object_get_pages() may be called
1965 * multiple times before they are released by a single call to
1966 * i915_gem_object_put_pages() - once the pages are no longer referenced
1967 * either as a result of memory pressure (reaping pages under the shrinker)
1968 * or as the object is itself released.
1969 */
1970int
1971i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1972{
1973 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1974 const struct drm_i915_gem_object_ops *ops = obj->ops;
1975 int ret;
1976
2f745ad3 1977 if (obj->pages)
37e680a1
CW
1978 return 0;
1979
43e28f09 1980 if (obj->madv != I915_MADV_WILLNEED) {
bd9b6a4e 1981 DRM_DEBUG("Attempting to obtain a purgeable object\n");
8c99e57d 1982 return -EFAULT;
43e28f09
CW
1983 }
1984
a5570178
CW
1985 BUG_ON(obj->pages_pin_count);
1986
37e680a1
CW
1987 ret = ops->get_pages(obj);
1988 if (ret)
1989 return ret;
1990
35c20a60 1991 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
37e680a1 1992 return 0;
673a394b
EA
1993}
1994
e2d05a8b 1995static void
05394f39 1996i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
9d773091 1997 struct intel_ring_buffer *ring)
673a394b 1998{
05394f39 1999 struct drm_device *dev = obj->base.dev;
69dc4987 2000 struct drm_i915_private *dev_priv = dev->dev_private;
9d773091 2001 u32 seqno = intel_ring_get_seqno(ring);
617dbe27 2002
852835f3 2003 BUG_ON(ring == NULL);
02978ff5
CW
2004 if (obj->ring != ring && obj->last_write_seqno) {
2005 /* Keep the seqno relative to the current ring */
2006 obj->last_write_seqno = seqno;
2007 }
05394f39 2008 obj->ring = ring;
673a394b
EA
2009
2010 /* Add a reference if we're newly entering the active list. */
05394f39
CW
2011 if (!obj->active) {
2012 drm_gem_object_reference(&obj->base);
2013 obj->active = 1;
673a394b 2014 }
e35a41de 2015
05394f39 2016 list_move_tail(&obj->ring_list, &ring->active_list);
caea7476 2017
0201f1ec 2018 obj->last_read_seqno = seqno;
caea7476 2019
7dd49065 2020 if (obj->fenced_gpu_access) {
caea7476 2021 obj->last_fenced_seqno = seqno;
caea7476 2022
7dd49065
CW
2023 /* Bump MRU to take account of the delayed flush */
2024 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2025 struct drm_i915_fence_reg *reg;
2026
2027 reg = &dev_priv->fence_regs[obj->fence_reg];
2028 list_move_tail(&reg->lru_list,
2029 &dev_priv->mm.fence_list);
2030 }
caea7476
CW
2031 }
2032}
2033
e2d05a8b
BW
2034void i915_vma_move_to_active(struct i915_vma *vma,
2035 struct intel_ring_buffer *ring)
2036{
2037 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2038 return i915_gem_object_move_to_active(vma->obj, ring);
2039}
2040
caea7476 2041static void
caea7476 2042i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
ce44b0ea 2043{
ca191b13 2044 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
feb822cf
BW
2045 struct i915_address_space *vm;
2046 struct i915_vma *vma;
ce44b0ea 2047
65ce3027 2048 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
05394f39 2049 BUG_ON(!obj->active);
caea7476 2050
feb822cf
BW
2051 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2052 vma = i915_gem_obj_to_vma(obj, vm);
2053 if (vma && !list_empty(&vma->mm_list))
2054 list_move_tail(&vma->mm_list, &vm->inactive_list);
2055 }
caea7476 2056
65ce3027 2057 list_del_init(&obj->ring_list);
caea7476
CW
2058 obj->ring = NULL;
2059
65ce3027
CW
2060 obj->last_read_seqno = 0;
2061 obj->last_write_seqno = 0;
2062 obj->base.write_domain = 0;
2063
2064 obj->last_fenced_seqno = 0;
caea7476 2065 obj->fenced_gpu_access = false;
caea7476
CW
2066
2067 obj->active = 0;
2068 drm_gem_object_unreference(&obj->base);
2069
2070 WARN_ON(i915_verify_lists(dev));
ce44b0ea 2071}
673a394b 2072
9d773091 2073static int
fca26bb4 2074i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
53d227f2 2075{
9d773091
CW
2076 struct drm_i915_private *dev_priv = dev->dev_private;
2077 struct intel_ring_buffer *ring;
2078 int ret, i, j;
53d227f2 2079
107f27a5 2080 /* Carefully retire all requests without writing to the rings */
9d773091 2081 for_each_ring(ring, dev_priv, i) {
107f27a5
CW
2082 ret = intel_ring_idle(ring);
2083 if (ret)
2084 return ret;
9d773091 2085 }
9d773091 2086 i915_gem_retire_requests(dev);
107f27a5
CW
2087
2088 /* Finally reset hw state */
9d773091 2089 for_each_ring(ring, dev_priv, i) {
fca26bb4 2090 intel_ring_init_seqno(ring, seqno);
498d2ac1 2091
9d773091
CW
2092 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2093 ring->sync_seqno[j] = 0;
2094 }
53d227f2 2095
9d773091 2096 return 0;
53d227f2
DV
2097}
2098
fca26bb4
MK
2099int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2100{
2101 struct drm_i915_private *dev_priv = dev->dev_private;
2102 int ret;
2103
2104 if (seqno == 0)
2105 return -EINVAL;
2106
2107 /* HWS page needs to be set less than what we
2108 * will inject to ring
2109 */
2110 ret = i915_gem_init_seqno(dev, seqno - 1);
2111 if (ret)
2112 return ret;
2113
2114 /* Carefully set the last_seqno value so that wrap
2115 * detection still works
2116 */
2117 dev_priv->next_seqno = seqno;
2118 dev_priv->last_seqno = seqno - 1;
2119 if (dev_priv->last_seqno == 0)
2120 dev_priv->last_seqno--;
2121
2122 return 0;
2123}
2124
9d773091
CW
2125int
2126i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
53d227f2 2127{
9d773091
CW
2128 struct drm_i915_private *dev_priv = dev->dev_private;
2129
2130 /* reserve 0 for non-seqno */
2131 if (dev_priv->next_seqno == 0) {
fca26bb4 2132 int ret = i915_gem_init_seqno(dev, 0);
9d773091
CW
2133 if (ret)
2134 return ret;
53d227f2 2135
9d773091
CW
2136 dev_priv->next_seqno = 1;
2137 }
53d227f2 2138
f72b3435 2139 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
9d773091 2140 return 0;
53d227f2
DV
2141}
2142
0025c077
MK
2143int __i915_add_request(struct intel_ring_buffer *ring,
2144 struct drm_file *file,
7d736f4f 2145 struct drm_i915_gem_object *obj,
0025c077 2146 u32 *out_seqno)
673a394b 2147{
db53a302 2148 drm_i915_private_t *dev_priv = ring->dev->dev_private;
acb868d3 2149 struct drm_i915_gem_request *request;
7d736f4f 2150 u32 request_ring_position, request_start;
3cce469c
CW
2151 int ret;
2152
7d736f4f 2153 request_start = intel_ring_get_tail(ring);
cc889e0f
DV
2154 /*
2155 * Emit any outstanding flushes - execbuf can fail to emit the flush
2156 * after having emitted the batchbuffer command. Hence we need to fix
2157 * things up similar to emitting the lazy request. The difference here
2158 * is that the flush _must_ happen before the next request, no matter
2159 * what.
2160 */
a7b9761d
CW
2161 ret = intel_ring_flush_all_caches(ring);
2162 if (ret)
2163 return ret;
cc889e0f 2164
3c0e234c
CW
2165 request = ring->preallocated_lazy_request;
2166 if (WARN_ON(request == NULL))
acb868d3 2167 return -ENOMEM;
cc889e0f 2168
a71d8d94
CW
2169 /* Record the position of the start of the request so that
2170 * should we detect the updated seqno part-way through the
2171 * GPU processing the request, we never over-estimate the
2172 * position of the head.
2173 */
2174 request_ring_position = intel_ring_get_tail(ring);
2175
9d773091 2176 ret = ring->add_request(ring);
3c0e234c 2177 if (ret)
3bb73aba 2178 return ret;
673a394b 2179
9d773091 2180 request->seqno = intel_ring_get_seqno(ring);
852835f3 2181 request->ring = ring;
7d736f4f 2182 request->head = request_start;
a71d8d94 2183 request->tail = request_ring_position;
7d736f4f
MK
2184
2185 /* Whilst this request exists, batch_obj will be on the
2186 * active_list, and so will hold the active reference. Only when this
2187 * request is retired will the the batch_obj be moved onto the
2188 * inactive_list and lose its active reference. Hence we do not need
2189 * to explicitly hold another reference here.
2190 */
9a7e0c2a 2191 request->batch_obj = obj;
0e50e96b 2192
9a7e0c2a
CW
2193 /* Hold a reference to the current context so that we can inspect
2194 * it later in case a hangcheck error event fires.
2195 */
2196 request->ctx = ring->last_context;
0e50e96b
MK
2197 if (request->ctx)
2198 i915_gem_context_reference(request->ctx);
2199
673a394b 2200 request->emitted_jiffies = jiffies;
852835f3 2201 list_add_tail(&request->list, &ring->request_list);
3bb73aba 2202 request->file_priv = NULL;
852835f3 2203
db53a302
CW
2204 if (file) {
2205 struct drm_i915_file_private *file_priv = file->driver_priv;
2206
1c25595f 2207 spin_lock(&file_priv->mm.lock);
f787a5f5 2208 request->file_priv = file_priv;
b962442e 2209 list_add_tail(&request->client_list,
f787a5f5 2210 &file_priv->mm.request_list);
1c25595f 2211 spin_unlock(&file_priv->mm.lock);
b962442e 2212 }
673a394b 2213
9d773091 2214 trace_i915_gem_request_add(ring, request->seqno);
1823521d 2215 ring->outstanding_lazy_seqno = 0;
3c0e234c 2216 ring->preallocated_lazy_request = NULL;
db53a302 2217
db1b76ca 2218 if (!dev_priv->ums.mm_suspended) {
10cd45b6
MK
2219 i915_queue_hangcheck(ring->dev);
2220
f62a0076
CW
2221 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2222 queue_delayed_work(dev_priv->wq,
2223 &dev_priv->mm.retire_work,
2224 round_jiffies_up_relative(HZ));
2225 intel_mark_busy(dev_priv->dev);
f65d9421 2226 }
cc889e0f 2227
acb868d3 2228 if (out_seqno)
9d773091 2229 *out_seqno = request->seqno;
3cce469c 2230 return 0;
673a394b
EA
2231}
2232
f787a5f5
CW
2233static inline void
2234i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
673a394b 2235{
1c25595f 2236 struct drm_i915_file_private *file_priv = request->file_priv;
673a394b 2237
1c25595f
CW
2238 if (!file_priv)
2239 return;
1c5d22f7 2240
1c25595f 2241 spin_lock(&file_priv->mm.lock);
b29c19b6
CW
2242 list_del(&request->client_list);
2243 request->file_priv = NULL;
1c25595f 2244 spin_unlock(&file_priv->mm.lock);
673a394b 2245}
673a394b 2246
939fd762 2247static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
44e2c070 2248 const struct i915_hw_context *ctx)
be62acb4 2249{
44e2c070 2250 unsigned long elapsed;
be62acb4 2251
44e2c070
MK
2252 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2253
2254 if (ctx->hang_stats.banned)
be62acb4
MK
2255 return true;
2256
2257 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
ccc7bed0 2258 if (!i915_gem_context_is_default(ctx)) {
3fac8978 2259 DRM_DEBUG("context hanging too fast, banning!\n");
ccc7bed0
VS
2260 return true;
2261 } else if (dev_priv->gpu_error.stop_rings == 0) {
2262 DRM_ERROR("gpu hanging too fast, banning!\n");
2263 return true;
3fac8978 2264 }
be62acb4
MK
2265 }
2266
2267 return false;
2268}
2269
939fd762
MK
2270static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2271 struct i915_hw_context *ctx,
b6b0fac0 2272 const bool guilty)
aa60c664 2273{
44e2c070
MK
2274 struct i915_ctx_hang_stats *hs;
2275
2276 if (WARN_ON(!ctx))
2277 return;
aa60c664 2278
44e2c070
MK
2279 hs = &ctx->hang_stats;
2280
2281 if (guilty) {
939fd762 2282 hs->banned = i915_context_is_banned(dev_priv, ctx);
44e2c070
MK
2283 hs->batch_active++;
2284 hs->guilty_ts = get_seconds();
2285 } else {
2286 hs->batch_pending++;
aa60c664
MK
2287 }
2288}
2289
0e50e96b
MK
2290static void i915_gem_free_request(struct drm_i915_gem_request *request)
2291{
2292 list_del(&request->list);
2293 i915_gem_request_remove_from_client(request);
2294
2295 if (request->ctx)
2296 i915_gem_context_unreference(request->ctx);
2297
2298 kfree(request);
2299}
2300
8d9fc7fd
CW
2301struct drm_i915_gem_request *
2302i915_gem_find_active_request(struct intel_ring_buffer *ring)
9375e446 2303{
4db080f9 2304 struct drm_i915_gem_request *request;
8d9fc7fd
CW
2305 u32 completed_seqno;
2306
2307 completed_seqno = ring->get_seqno(ring, false);
4db080f9
CW
2308
2309 list_for_each_entry(request, &ring->request_list, list) {
2310 if (i915_seqno_passed(completed_seqno, request->seqno))
2311 continue;
aa60c664 2312
b6b0fac0 2313 return request;
4db080f9 2314 }
b6b0fac0
MK
2315
2316 return NULL;
2317}
2318
2319static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2320 struct intel_ring_buffer *ring)
2321{
2322 struct drm_i915_gem_request *request;
2323 bool ring_hung;
2324
8d9fc7fd 2325 request = i915_gem_find_active_request(ring);
b6b0fac0
MK
2326
2327 if (request == NULL)
2328 return;
2329
2330 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2331
939fd762 2332 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
b6b0fac0
MK
2333
2334 list_for_each_entry_continue(request, &ring->request_list, list)
939fd762 2335 i915_set_reset_status(dev_priv, request->ctx, false);
4db080f9 2336}
aa60c664 2337
4db080f9
CW
2338static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2339 struct intel_ring_buffer *ring)
2340{
dfaae392 2341 while (!list_empty(&ring->active_list)) {
05394f39 2342 struct drm_i915_gem_object *obj;
9375e446 2343
05394f39
CW
2344 obj = list_first_entry(&ring->active_list,
2345 struct drm_i915_gem_object,
2346 ring_list);
9375e446 2347
05394f39 2348 i915_gem_object_move_to_inactive(obj);
673a394b 2349 }
1d62beea
BW
2350
2351 /*
2352 * We must free the requests after all the corresponding objects have
2353 * been moved off active lists. Which is the same order as the normal
2354 * retire_requests function does. This is important if object hold
2355 * implicit references on things like e.g. ppgtt address spaces through
2356 * the request.
2357 */
2358 while (!list_empty(&ring->request_list)) {
2359 struct drm_i915_gem_request *request;
2360
2361 request = list_first_entry(&ring->request_list,
2362 struct drm_i915_gem_request,
2363 list);
2364
2365 i915_gem_free_request(request);
2366 }
673a394b
EA
2367}
2368
19b2dbde 2369void i915_gem_restore_fences(struct drm_device *dev)
312817a3
CW
2370{
2371 struct drm_i915_private *dev_priv = dev->dev_private;
2372 int i;
2373
4b9de737 2374 for (i = 0; i < dev_priv->num_fence_regs; i++) {
312817a3 2375 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
7d2cb39c 2376
94a335db
DV
2377 /*
2378 * Commit delayed tiling changes if we have an object still
2379 * attached to the fence, otherwise just clear the fence.
2380 */
2381 if (reg->obj) {
2382 i915_gem_object_update_fence(reg->obj, reg,
2383 reg->obj->tiling_mode);
2384 } else {
2385 i915_gem_write_fence(dev, i, NULL);
2386 }
312817a3
CW
2387 }
2388}
2389
069efc1d 2390void i915_gem_reset(struct drm_device *dev)
673a394b 2391{
77f01230 2392 struct drm_i915_private *dev_priv = dev->dev_private;
b4519513 2393 struct intel_ring_buffer *ring;
1ec14ad3 2394 int i;
673a394b 2395
4db080f9
CW
2396 /*
2397 * Before we free the objects from the requests, we need to inspect
2398 * them for finding the guilty party. As the requests only borrow
2399 * their reference to the objects, the inspection must be done first.
2400 */
2401 for_each_ring(ring, dev_priv, i)
2402 i915_gem_reset_ring_status(dev_priv, ring);
2403
b4519513 2404 for_each_ring(ring, dev_priv, i)
4db080f9 2405 i915_gem_reset_ring_cleanup(dev_priv, ring);
dfaae392 2406
3d57e5bd
BW
2407 i915_gem_cleanup_ringbuffer(dev);
2408
acce9ffa
BW
2409 i915_gem_context_reset(dev);
2410
19b2dbde 2411 i915_gem_restore_fences(dev);
673a394b
EA
2412}
2413
2414/**
2415 * This function clears the request list as sequence numbers are passed.
2416 */
a71d8d94 2417void
db53a302 2418i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
673a394b 2419{
673a394b
EA
2420 uint32_t seqno;
2421
db53a302 2422 if (list_empty(&ring->request_list))
6c0594a3
KW
2423 return;
2424
db53a302 2425 WARN_ON(i915_verify_lists(ring->dev));
673a394b 2426
b2eadbc8 2427 seqno = ring->get_seqno(ring, true);
1ec14ad3 2428
e9103038
CW
2429 /* Move any buffers on the active list that are no longer referenced
2430 * by the ringbuffer to the flushing/inactive lists as appropriate,
2431 * before we free the context associated with the requests.
2432 */
2433 while (!list_empty(&ring->active_list)) {
2434 struct drm_i915_gem_object *obj;
2435
2436 obj = list_first_entry(&ring->active_list,
2437 struct drm_i915_gem_object,
2438 ring_list);
2439
2440 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2441 break;
2442
2443 i915_gem_object_move_to_inactive(obj);
2444 }
2445
2446
852835f3 2447 while (!list_empty(&ring->request_list)) {
673a394b 2448 struct drm_i915_gem_request *request;
673a394b 2449
852835f3 2450 request = list_first_entry(&ring->request_list,
673a394b
EA
2451 struct drm_i915_gem_request,
2452 list);
673a394b 2453
dfaae392 2454 if (!i915_seqno_passed(seqno, request->seqno))
b84d5f0c
CW
2455 break;
2456
db53a302 2457 trace_i915_gem_request_retire(ring, request->seqno);
a71d8d94
CW
2458 /* We know the GPU must have read the request to have
2459 * sent us the seqno + interrupt, so use the position
2460 * of tail of the request to update the last known position
2461 * of the GPU head.
2462 */
2463 ring->last_retired_head = request->tail;
b84d5f0c 2464
0e50e96b 2465 i915_gem_free_request(request);
b84d5f0c 2466 }
673a394b 2467
db53a302
CW
2468 if (unlikely(ring->trace_irq_seqno &&
2469 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1ec14ad3 2470 ring->irq_put(ring);
db53a302 2471 ring->trace_irq_seqno = 0;
9d34e5db 2472 }
23bc5982 2473
db53a302 2474 WARN_ON(i915_verify_lists(ring->dev));
673a394b
EA
2475}
2476
b29c19b6 2477bool
b09a1fec
CW
2478i915_gem_retire_requests(struct drm_device *dev)
2479{
2480 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 2481 struct intel_ring_buffer *ring;
b29c19b6 2482 bool idle = true;
1ec14ad3 2483 int i;
b09a1fec 2484
b29c19b6 2485 for_each_ring(ring, dev_priv, i) {
b4519513 2486 i915_gem_retire_requests_ring(ring);
b29c19b6
CW
2487 idle &= list_empty(&ring->request_list);
2488 }
2489
2490 if (idle)
2491 mod_delayed_work(dev_priv->wq,
2492 &dev_priv->mm.idle_work,
2493 msecs_to_jiffies(100));
2494
2495 return idle;
b09a1fec
CW
2496}
2497
75ef9da2 2498static void
673a394b
EA
2499i915_gem_retire_work_handler(struct work_struct *work)
2500{
b29c19b6
CW
2501 struct drm_i915_private *dev_priv =
2502 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2503 struct drm_device *dev = dev_priv->dev;
0a58705b 2504 bool idle;
673a394b 2505
891b48cf 2506 /* Come back later if the device is busy... */
b29c19b6
CW
2507 idle = false;
2508 if (mutex_trylock(&dev->struct_mutex)) {
2509 idle = i915_gem_retire_requests(dev);
2510 mutex_unlock(&dev->struct_mutex);
673a394b 2511 }
b29c19b6 2512 if (!idle)
bcb45086
CW
2513 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2514 round_jiffies_up_relative(HZ));
b29c19b6 2515}
0a58705b 2516
b29c19b6
CW
2517static void
2518i915_gem_idle_work_handler(struct work_struct *work)
2519{
2520 struct drm_i915_private *dev_priv =
2521 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2522
2523 intel_mark_idle(dev_priv->dev);
673a394b
EA
2524}
2525
30dfebf3
DV
2526/**
2527 * Ensures that an object will eventually get non-busy by flushing any required
2528 * write domains, emitting any outstanding lazy request and retiring and
2529 * completed requests.
2530 */
2531static int
2532i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2533{
2534 int ret;
2535
2536 if (obj->active) {
0201f1ec 2537 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
30dfebf3
DV
2538 if (ret)
2539 return ret;
2540
30dfebf3
DV
2541 i915_gem_retire_requests_ring(obj->ring);
2542 }
2543
2544 return 0;
2545}
2546
23ba4fd0
BW
2547/**
2548 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2549 * @DRM_IOCTL_ARGS: standard ioctl arguments
2550 *
2551 * Returns 0 if successful, else an error is returned with the remaining time in
2552 * the timeout parameter.
2553 * -ETIME: object is still busy after timeout
2554 * -ERESTARTSYS: signal interrupted the wait
2555 * -ENONENT: object doesn't exist
2556 * Also possible, but rare:
2557 * -EAGAIN: GPU wedged
2558 * -ENOMEM: damn
2559 * -ENODEV: Internal IRQ fail
2560 * -E?: The add request failed
2561 *
2562 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2563 * non-zero timeout parameter the wait ioctl will wait for the given number of
2564 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2565 * without holding struct_mutex the object may become re-busied before this
2566 * function completes. A similar but shorter * race condition exists in the busy
2567 * ioctl
2568 */
2569int
2570i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2571{
f69061be 2572 drm_i915_private_t *dev_priv = dev->dev_private;
23ba4fd0
BW
2573 struct drm_i915_gem_wait *args = data;
2574 struct drm_i915_gem_object *obj;
2575 struct intel_ring_buffer *ring = NULL;
eac1f14f 2576 struct timespec timeout_stack, *timeout = NULL;
f69061be 2577 unsigned reset_counter;
23ba4fd0
BW
2578 u32 seqno = 0;
2579 int ret = 0;
2580
eac1f14f
BW
2581 if (args->timeout_ns >= 0) {
2582 timeout_stack = ns_to_timespec(args->timeout_ns);
2583 timeout = &timeout_stack;
2584 }
23ba4fd0
BW
2585
2586 ret = i915_mutex_lock_interruptible(dev);
2587 if (ret)
2588 return ret;
2589
2590 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2591 if (&obj->base == NULL) {
2592 mutex_unlock(&dev->struct_mutex);
2593 return -ENOENT;
2594 }
2595
30dfebf3
DV
2596 /* Need to make sure the object gets inactive eventually. */
2597 ret = i915_gem_object_flush_active(obj);
23ba4fd0
BW
2598 if (ret)
2599 goto out;
2600
2601 if (obj->active) {
0201f1ec 2602 seqno = obj->last_read_seqno;
23ba4fd0
BW
2603 ring = obj->ring;
2604 }
2605
2606 if (seqno == 0)
2607 goto out;
2608
23ba4fd0
BW
2609 /* Do this after OLR check to make sure we make forward progress polling
2610 * on this IOCTL with a 0 timeout (like busy ioctl)
2611 */
2612 if (!args->timeout_ns) {
2613 ret = -ETIME;
2614 goto out;
2615 }
2616
2617 drm_gem_object_unreference(&obj->base);
f69061be 2618 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
23ba4fd0
BW
2619 mutex_unlock(&dev->struct_mutex);
2620
b29c19b6 2621 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
4f42f4ef 2622 if (timeout)
eac1f14f 2623 args->timeout_ns = timespec_to_ns(timeout);
23ba4fd0
BW
2624 return ret;
2625
2626out:
2627 drm_gem_object_unreference(&obj->base);
2628 mutex_unlock(&dev->struct_mutex);
2629 return ret;
2630}
2631
5816d648
BW
2632/**
2633 * i915_gem_object_sync - sync an object to a ring.
2634 *
2635 * @obj: object which may be in use on another ring.
2636 * @to: ring we wish to use the object on. May be NULL.
2637 *
2638 * This code is meant to abstract object synchronization with the GPU.
2639 * Calling with NULL implies synchronizing the object with the CPU
2640 * rather than a particular GPU ring.
2641 *
2642 * Returns 0 if successful, else propagates up the lower layer error.
2643 */
2911a35b
BW
2644int
2645i915_gem_object_sync(struct drm_i915_gem_object *obj,
2646 struct intel_ring_buffer *to)
2647{
2648 struct intel_ring_buffer *from = obj->ring;
2649 u32 seqno;
2650 int ret, idx;
2651
2652 if (from == NULL || to == from)
2653 return 0;
2654
5816d648 2655 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
0201f1ec 2656 return i915_gem_object_wait_rendering(obj, false);
2911a35b
BW
2657
2658 idx = intel_ring_sync_index(from, to);
2659
0201f1ec 2660 seqno = obj->last_read_seqno;
2911a35b
BW
2661 if (seqno <= from->sync_seqno[idx])
2662 return 0;
2663
b4aca010
BW
2664 ret = i915_gem_check_olr(obj->ring, seqno);
2665 if (ret)
2666 return ret;
2911a35b 2667
b52b89da 2668 trace_i915_gem_ring_sync_to(from, to, seqno);
1500f7ea 2669 ret = to->sync_to(to, from, seqno);
e3a5a225 2670 if (!ret)
7b01e260
MK
2671 /* We use last_read_seqno because sync_to()
2672 * might have just caused seqno wrap under
2673 * the radar.
2674 */
2675 from->sync_seqno[idx] = obj->last_read_seqno;
2911a35b 2676
e3a5a225 2677 return ret;
2911a35b
BW
2678}
2679
b5ffc9bc
CW
2680static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2681{
2682 u32 old_write_domain, old_read_domains;
2683
b5ffc9bc
CW
2684 /* Force a pagefault for domain tracking on next user access */
2685 i915_gem_release_mmap(obj);
2686
b97c3d9c
KP
2687 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2688 return;
2689
97c809fd
CW
2690 /* Wait for any direct GTT access to complete */
2691 mb();
2692
b5ffc9bc
CW
2693 old_read_domains = obj->base.read_domains;
2694 old_write_domain = obj->base.write_domain;
2695
2696 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2697 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2698
2699 trace_i915_gem_object_change_domain(obj,
2700 old_read_domains,
2701 old_write_domain);
2702}
2703
07fe0b12 2704int i915_vma_unbind(struct i915_vma *vma)
673a394b 2705{
07fe0b12 2706 struct drm_i915_gem_object *obj = vma->obj;
7bddb01f 2707 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
43e28f09 2708 int ret;
673a394b 2709
07fe0b12 2710 if (list_empty(&vma->vma_link))
673a394b
EA
2711 return 0;
2712
0ff501cb
DV
2713 if (!drm_mm_node_allocated(&vma->node)) {
2714 i915_gem_vma_destroy(vma);
0ff501cb
DV
2715 return 0;
2716 }
433544bd 2717
d7f46fc4 2718 if (vma->pin_count)
31d8d651 2719 return -EBUSY;
673a394b 2720
c4670ad0
CW
2721 BUG_ON(obj->pages == NULL);
2722
a8198eea 2723 ret = i915_gem_object_finish_gpu(obj);
1488fc08 2724 if (ret)
a8198eea
CW
2725 return ret;
2726 /* Continue on if we fail due to EIO, the GPU is hung so we
2727 * should be safe and we need to cleanup or else we might
2728 * cause memory corruption through use-after-free.
2729 */
2730
b5ffc9bc 2731 i915_gem_object_finish_gtt(obj);
5323fd04 2732
96b47b65 2733 /* release the fence reg _after_ flushing */
d9e86c0e 2734 ret = i915_gem_object_put_fence(obj);
1488fc08 2735 if (ret)
d9e86c0e 2736 return ret;
96b47b65 2737
07fe0b12 2738 trace_i915_vma_unbind(vma);
db53a302 2739
6f65e29a
BW
2740 vma->unbind_vma(vma);
2741
74163907 2742 i915_gem_gtt_finish_object(obj);
7bddb01f 2743
64bf9303 2744 list_del_init(&vma->mm_list);
75e9e915 2745 /* Avoid an unnecessary call to unbind on rebind. */
5cacaac7
BW
2746 if (i915_is_ggtt(vma->vm))
2747 obj->map_and_fenceable = true;
673a394b 2748
2f633156
BW
2749 drm_mm_remove_node(&vma->node);
2750 i915_gem_vma_destroy(vma);
2751
2752 /* Since the unbound list is global, only move to that list if
b93dab6e 2753 * no more VMAs exist. */
2f633156
BW
2754 if (list_empty(&obj->vma_list))
2755 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
673a394b 2756
70903c3b
CW
2757 /* And finally now the object is completely decoupled from this vma,
2758 * we can drop its hold on the backing storage and allow it to be
2759 * reaped by the shrinker.
2760 */
2761 i915_gem_object_unpin_pages(obj);
2762
88241785 2763 return 0;
54cf91dc
CW
2764}
2765
b2da9fe5 2766int i915_gpu_idle(struct drm_device *dev)
4df2faf4
DV
2767{
2768 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 2769 struct intel_ring_buffer *ring;
1ec14ad3 2770 int ret, i;
4df2faf4 2771
4df2faf4 2772 /* Flush everything onto the inactive list. */
b4519513 2773 for_each_ring(ring, dev_priv, i) {
41bde553 2774 ret = i915_switch_context(ring, NULL, ring->default_context);
b6c7488d
BW
2775 if (ret)
2776 return ret;
2777
3e960501 2778 ret = intel_ring_idle(ring);
1ec14ad3
CW
2779 if (ret)
2780 return ret;
2781 }
4df2faf4 2782
8a1a49f9 2783 return 0;
4df2faf4
DV
2784}
2785
9ce079e4
CW
2786static void i965_write_fence_reg(struct drm_device *dev, int reg,
2787 struct drm_i915_gem_object *obj)
de151cf6 2788{
de151cf6 2789 drm_i915_private_t *dev_priv = dev->dev_private;
56c844e5
ID
2790 int fence_reg;
2791 int fence_pitch_shift;
de151cf6 2792
56c844e5
ID
2793 if (INTEL_INFO(dev)->gen >= 6) {
2794 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2795 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2796 } else {
2797 fence_reg = FENCE_REG_965_0;
2798 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2799 }
2800
d18b9619
CW
2801 fence_reg += reg * 8;
2802
2803 /* To w/a incoherency with non-atomic 64-bit register updates,
2804 * we split the 64-bit update into two 32-bit writes. In order
2805 * for a partial fence not to be evaluated between writes, we
2806 * precede the update with write to turn off the fence register,
2807 * and only enable the fence as the last step.
2808 *
2809 * For extra levels of paranoia, we make sure each step lands
2810 * before applying the next step.
2811 */
2812 I915_WRITE(fence_reg, 0);
2813 POSTING_READ(fence_reg);
2814
9ce079e4 2815 if (obj) {
f343c5f6 2816 u32 size = i915_gem_obj_ggtt_size(obj);
d18b9619 2817 uint64_t val;
de151cf6 2818
f343c5f6 2819 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
9ce079e4 2820 0xfffff000) << 32;
f343c5f6 2821 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
56c844e5 2822 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
9ce079e4
CW
2823 if (obj->tiling_mode == I915_TILING_Y)
2824 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2825 val |= I965_FENCE_REG_VALID;
c6642782 2826
d18b9619
CW
2827 I915_WRITE(fence_reg + 4, val >> 32);
2828 POSTING_READ(fence_reg + 4);
2829
2830 I915_WRITE(fence_reg + 0, val);
2831 POSTING_READ(fence_reg);
2832 } else {
2833 I915_WRITE(fence_reg + 4, 0);
2834 POSTING_READ(fence_reg + 4);
2835 }
de151cf6
JB
2836}
2837
9ce079e4
CW
2838static void i915_write_fence_reg(struct drm_device *dev, int reg,
2839 struct drm_i915_gem_object *obj)
de151cf6 2840{
de151cf6 2841 drm_i915_private_t *dev_priv = dev->dev_private;
9ce079e4 2842 u32 val;
de151cf6 2843
9ce079e4 2844 if (obj) {
f343c5f6 2845 u32 size = i915_gem_obj_ggtt_size(obj);
9ce079e4
CW
2846 int pitch_val;
2847 int tile_width;
c6642782 2848
f343c5f6 2849 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
9ce079e4 2850 (size & -size) != size ||
f343c5f6
BW
2851 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2852 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2853 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
c6642782 2854
9ce079e4
CW
2855 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2856 tile_width = 128;
2857 else
2858 tile_width = 512;
2859
2860 /* Note: pitch better be a power of two tile widths */
2861 pitch_val = obj->stride / tile_width;
2862 pitch_val = ffs(pitch_val) - 1;
2863
f343c5f6 2864 val = i915_gem_obj_ggtt_offset(obj);
9ce079e4
CW
2865 if (obj->tiling_mode == I915_TILING_Y)
2866 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2867 val |= I915_FENCE_SIZE_BITS(size);
2868 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2869 val |= I830_FENCE_REG_VALID;
2870 } else
2871 val = 0;
2872
2873 if (reg < 8)
2874 reg = FENCE_REG_830_0 + reg * 4;
2875 else
2876 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2877
2878 I915_WRITE(reg, val);
2879 POSTING_READ(reg);
de151cf6
JB
2880}
2881
9ce079e4
CW
2882static void i830_write_fence_reg(struct drm_device *dev, int reg,
2883 struct drm_i915_gem_object *obj)
de151cf6 2884{
de151cf6 2885 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6 2886 uint32_t val;
de151cf6 2887
9ce079e4 2888 if (obj) {
f343c5f6 2889 u32 size = i915_gem_obj_ggtt_size(obj);
9ce079e4 2890 uint32_t pitch_val;
de151cf6 2891
f343c5f6 2892 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
9ce079e4 2893 (size & -size) != size ||
f343c5f6
BW
2894 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2895 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2896 i915_gem_obj_ggtt_offset(obj), size);
e76a16de 2897
9ce079e4
CW
2898 pitch_val = obj->stride / 128;
2899 pitch_val = ffs(pitch_val) - 1;
de151cf6 2900
f343c5f6 2901 val = i915_gem_obj_ggtt_offset(obj);
9ce079e4
CW
2902 if (obj->tiling_mode == I915_TILING_Y)
2903 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2904 val |= I830_FENCE_SIZE_BITS(size);
2905 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2906 val |= I830_FENCE_REG_VALID;
2907 } else
2908 val = 0;
c6642782 2909
9ce079e4
CW
2910 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2911 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2912}
2913
d0a57789
CW
2914inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2915{
2916 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2917}
2918
9ce079e4
CW
2919static void i915_gem_write_fence(struct drm_device *dev, int reg,
2920 struct drm_i915_gem_object *obj)
2921{
d0a57789
CW
2922 struct drm_i915_private *dev_priv = dev->dev_private;
2923
2924 /* Ensure that all CPU reads are completed before installing a fence
2925 * and all writes before removing the fence.
2926 */
2927 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2928 mb();
2929
94a335db
DV
2930 WARN(obj && (!obj->stride || !obj->tiling_mode),
2931 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2932 obj->stride, obj->tiling_mode);
2933
9ce079e4 2934 switch (INTEL_INFO(dev)->gen) {
5ab31333 2935 case 8:
9ce079e4 2936 case 7:
56c844e5 2937 case 6:
9ce079e4
CW
2938 case 5:
2939 case 4: i965_write_fence_reg(dev, reg, obj); break;
2940 case 3: i915_write_fence_reg(dev, reg, obj); break;
2941 case 2: i830_write_fence_reg(dev, reg, obj); break;
7dbf9d6e 2942 default: BUG();
9ce079e4 2943 }
d0a57789
CW
2944
2945 /* And similarly be paranoid that no direct access to this region
2946 * is reordered to before the fence is installed.
2947 */
2948 if (i915_gem_object_needs_mb(obj))
2949 mb();
de151cf6
JB
2950}
2951
61050808
CW
2952static inline int fence_number(struct drm_i915_private *dev_priv,
2953 struct drm_i915_fence_reg *fence)
2954{
2955 return fence - dev_priv->fence_regs;
2956}
2957
2958static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2959 struct drm_i915_fence_reg *fence,
2960 bool enable)
2961{
2dc8aae0 2962 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
46a0b638
CW
2963 int reg = fence_number(dev_priv, fence);
2964
2965 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
61050808
CW
2966
2967 if (enable) {
46a0b638 2968 obj->fence_reg = reg;
61050808
CW
2969 fence->obj = obj;
2970 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2971 } else {
2972 obj->fence_reg = I915_FENCE_REG_NONE;
2973 fence->obj = NULL;
2974 list_del_init(&fence->lru_list);
2975 }
94a335db 2976 obj->fence_dirty = false;
61050808
CW
2977}
2978
d9e86c0e 2979static int
d0a57789 2980i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
d9e86c0e 2981{
1c293ea3 2982 if (obj->last_fenced_seqno) {
86d5bc37 2983 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
18991845
CW
2984 if (ret)
2985 return ret;
d9e86c0e
CW
2986
2987 obj->last_fenced_seqno = 0;
d9e86c0e
CW
2988 }
2989
86d5bc37 2990 obj->fenced_gpu_access = false;
d9e86c0e
CW
2991 return 0;
2992}
2993
2994int
2995i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2996{
61050808 2997 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
f9c513e9 2998 struct drm_i915_fence_reg *fence;
d9e86c0e
CW
2999 int ret;
3000
d0a57789 3001 ret = i915_gem_object_wait_fence(obj);
d9e86c0e
CW
3002 if (ret)
3003 return ret;
3004
61050808
CW
3005 if (obj->fence_reg == I915_FENCE_REG_NONE)
3006 return 0;
d9e86c0e 3007
f9c513e9
CW
3008 fence = &dev_priv->fence_regs[obj->fence_reg];
3009
61050808 3010 i915_gem_object_fence_lost(obj);
f9c513e9 3011 i915_gem_object_update_fence(obj, fence, false);
d9e86c0e
CW
3012
3013 return 0;
3014}
3015
3016static struct drm_i915_fence_reg *
a360bb1a 3017i915_find_fence_reg(struct drm_device *dev)
ae3db24a 3018{
ae3db24a 3019 struct drm_i915_private *dev_priv = dev->dev_private;
8fe301ad 3020 struct drm_i915_fence_reg *reg, *avail;
d9e86c0e 3021 int i;
ae3db24a
DV
3022
3023 /* First try to find a free reg */
d9e86c0e 3024 avail = NULL;
ae3db24a
DV
3025 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3026 reg = &dev_priv->fence_regs[i];
3027 if (!reg->obj)
d9e86c0e 3028 return reg;
ae3db24a 3029
1690e1eb 3030 if (!reg->pin_count)
d9e86c0e 3031 avail = reg;
ae3db24a
DV
3032 }
3033
d9e86c0e 3034 if (avail == NULL)
5dce5b93 3035 goto deadlock;
ae3db24a
DV
3036
3037 /* None available, try to steal one or wait for a user to finish */
d9e86c0e 3038 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
1690e1eb 3039 if (reg->pin_count)
ae3db24a
DV
3040 continue;
3041
8fe301ad 3042 return reg;
ae3db24a
DV
3043 }
3044
5dce5b93
CW
3045deadlock:
3046 /* Wait for completion of pending flips which consume fences */
3047 if (intel_has_pending_fb_unpin(dev))
3048 return ERR_PTR(-EAGAIN);
3049
3050 return ERR_PTR(-EDEADLK);
ae3db24a
DV
3051}
3052
de151cf6 3053/**
9a5a53b3 3054 * i915_gem_object_get_fence - set up fencing for an object
de151cf6
JB
3055 * @obj: object to map through a fence reg
3056 *
3057 * When mapping objects through the GTT, userspace wants to be able to write
3058 * to them without having to worry about swizzling if the object is tiled.
de151cf6
JB
3059 * This function walks the fence regs looking for a free one for @obj,
3060 * stealing one if it can't find any.
3061 *
3062 * It then sets up the reg based on the object's properties: address, pitch
3063 * and tiling format.
9a5a53b3
CW
3064 *
3065 * For an untiled surface, this removes any existing fence.
de151cf6 3066 */
8c4b8c3f 3067int
06d98131 3068i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
de151cf6 3069{
05394f39 3070 struct drm_device *dev = obj->base.dev;
79e53945 3071 struct drm_i915_private *dev_priv = dev->dev_private;
14415745 3072 bool enable = obj->tiling_mode != I915_TILING_NONE;
d9e86c0e 3073 struct drm_i915_fence_reg *reg;
ae3db24a 3074 int ret;
de151cf6 3075
14415745
CW
3076 /* Have we updated the tiling parameters upon the object and so
3077 * will need to serialise the write to the associated fence register?
3078 */
5d82e3e6 3079 if (obj->fence_dirty) {
d0a57789 3080 ret = i915_gem_object_wait_fence(obj);
14415745
CW
3081 if (ret)
3082 return ret;
3083 }
9a5a53b3 3084
d9e86c0e 3085 /* Just update our place in the LRU if our fence is getting reused. */
05394f39
CW
3086 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3087 reg = &dev_priv->fence_regs[obj->fence_reg];
5d82e3e6 3088 if (!obj->fence_dirty) {
14415745
CW
3089 list_move_tail(&reg->lru_list,
3090 &dev_priv->mm.fence_list);
3091 return 0;
3092 }
3093 } else if (enable) {
3094 reg = i915_find_fence_reg(dev);
5dce5b93
CW
3095 if (IS_ERR(reg))
3096 return PTR_ERR(reg);
d9e86c0e 3097
14415745
CW
3098 if (reg->obj) {
3099 struct drm_i915_gem_object *old = reg->obj;
3100
d0a57789 3101 ret = i915_gem_object_wait_fence(old);
29c5a587
CW
3102 if (ret)
3103 return ret;
3104
14415745 3105 i915_gem_object_fence_lost(old);
29c5a587 3106 }
14415745 3107 } else
a09ba7fa 3108 return 0;
a09ba7fa 3109
14415745 3110 i915_gem_object_update_fence(obj, reg, enable);
14415745 3111
9ce079e4 3112 return 0;
de151cf6
JB
3113}
3114
42d6ab48
CW
3115static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3116 struct drm_mm_node *gtt_space,
3117 unsigned long cache_level)
3118{
3119 struct drm_mm_node *other;
3120
3121 /* On non-LLC machines we have to be careful when putting differing
3122 * types of snoopable memory together to avoid the prefetcher
4239ca77 3123 * crossing memory domains and dying.
42d6ab48
CW
3124 */
3125 if (HAS_LLC(dev))
3126 return true;
3127
c6cfb325 3128 if (!drm_mm_node_allocated(gtt_space))
42d6ab48
CW
3129 return true;
3130
3131 if (list_empty(&gtt_space->node_list))
3132 return true;
3133
3134 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3135 if (other->allocated && !other->hole_follows && other->color != cache_level)
3136 return false;
3137
3138 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3139 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3140 return false;
3141
3142 return true;
3143}
3144
3145static void i915_gem_verify_gtt(struct drm_device *dev)
3146{
3147#if WATCH_GTT
3148 struct drm_i915_private *dev_priv = dev->dev_private;
3149 struct drm_i915_gem_object *obj;
3150 int err = 0;
3151
35c20a60 3152 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
42d6ab48
CW
3153 if (obj->gtt_space == NULL) {
3154 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3155 err++;
3156 continue;
3157 }
3158
3159 if (obj->cache_level != obj->gtt_space->color) {
3160 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
f343c5f6
BW
3161 i915_gem_obj_ggtt_offset(obj),
3162 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
42d6ab48
CW
3163 obj->cache_level,
3164 obj->gtt_space->color);
3165 err++;
3166 continue;
3167 }
3168
3169 if (!i915_gem_valid_gtt_space(dev,
3170 obj->gtt_space,
3171 obj->cache_level)) {
3172 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
f343c5f6
BW
3173 i915_gem_obj_ggtt_offset(obj),
3174 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
42d6ab48
CW
3175 obj->cache_level);
3176 err++;
3177 continue;
3178 }
3179 }
3180
3181 WARN_ON(err);
3182#endif
3183}
3184
673a394b
EA
3185/**
3186 * Finds free space in the GTT aperture and binds the object there.
3187 */
262de145 3188static struct i915_vma *
07fe0b12
BW
3189i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3190 struct i915_address_space *vm,
3191 unsigned alignment,
1ec9e26d 3192 unsigned flags)
673a394b 3193{
05394f39 3194 struct drm_device *dev = obj->base.dev;
673a394b 3195 drm_i915_private_t *dev_priv = dev->dev_private;
5e783301 3196 u32 size, fence_size, fence_alignment, unfenced_alignment;
07fe0b12 3197 size_t gtt_max =
1ec9e26d 3198 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
2f633156 3199 struct i915_vma *vma;
07f73f69 3200 int ret;
673a394b 3201
e28f8711
CW
3202 fence_size = i915_gem_get_gtt_size(dev,
3203 obj->base.size,
3204 obj->tiling_mode);
3205 fence_alignment = i915_gem_get_gtt_alignment(dev,
3206 obj->base.size,
d865110c 3207 obj->tiling_mode, true);
e28f8711 3208 unfenced_alignment =
d865110c 3209 i915_gem_get_gtt_alignment(dev,
1ec9e26d
DV
3210 obj->base.size,
3211 obj->tiling_mode, false);
a00b10c3 3212
673a394b 3213 if (alignment == 0)
1ec9e26d 3214 alignment = flags & PIN_MAPPABLE ? fence_alignment :
5e783301 3215 unfenced_alignment;
1ec9e26d 3216 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
bd9b6a4e 3217 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
262de145 3218 return ERR_PTR(-EINVAL);
673a394b
EA
3219 }
3220
1ec9e26d 3221 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
a00b10c3 3222
654fc607
CW
3223 /* If the object is bigger than the entire aperture, reject it early
3224 * before evicting everything in a vain attempt to find space.
3225 */
0a9ae0d7 3226 if (obj->base.size > gtt_max) {
bd9b6a4e 3227 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
a36689cb 3228 obj->base.size,
1ec9e26d 3229 flags & PIN_MAPPABLE ? "mappable" : "total",
0a9ae0d7 3230 gtt_max);
262de145 3231 return ERR_PTR(-E2BIG);
654fc607
CW
3232 }
3233
37e680a1 3234 ret = i915_gem_object_get_pages(obj);
6c085a72 3235 if (ret)
262de145 3236 return ERR_PTR(ret);
6c085a72 3237
fbdda6fb
CW
3238 i915_gem_object_pin_pages(obj);
3239
accfef2e 3240 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
262de145 3241 if (IS_ERR(vma))
bc6bc15b 3242 goto err_unpin;
2f633156 3243
0a9ae0d7 3244search_free:
07fe0b12 3245 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
0a9ae0d7 3246 size, alignment,
31e5d7c6
DH
3247 obj->cache_level, 0, gtt_max,
3248 DRM_MM_SEARCH_DEFAULT);
dc9dd7a2 3249 if (ret) {
f6cd1f15 3250 ret = i915_gem_evict_something(dev, vm, size, alignment,
1ec9e26d 3251 obj->cache_level, flags);
dc9dd7a2
CW
3252 if (ret == 0)
3253 goto search_free;
9731129c 3254
bc6bc15b 3255 goto err_free_vma;
673a394b 3256 }
2f633156 3257 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
c6cfb325 3258 obj->cache_level))) {
2f633156 3259 ret = -EINVAL;
bc6bc15b 3260 goto err_remove_node;
673a394b
EA
3261 }
3262
74163907 3263 ret = i915_gem_gtt_prepare_object(obj);
2f633156 3264 if (ret)
bc6bc15b 3265 goto err_remove_node;
673a394b 3266
35c20a60 3267 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
ca191b13 3268 list_add_tail(&vma->mm_list, &vm->inactive_list);
bf1a1092 3269
4bd561b3
BW
3270 if (i915_is_ggtt(vm)) {
3271 bool mappable, fenceable;
a00b10c3 3272
49987099
DV
3273 fenceable = (vma->node.size == fence_size &&
3274 (vma->node.start & (fence_alignment - 1)) == 0);
4bd561b3 3275
49987099
DV
3276 mappable = (vma->node.start + obj->base.size <=
3277 dev_priv->gtt.mappable_end);
a00b10c3 3278
5cacaac7 3279 obj->map_and_fenceable = mappable && fenceable;
4bd561b3 3280 }
75e9e915 3281
1ec9e26d 3282 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
75e9e915 3283
1ec9e26d 3284 trace_i915_vma_bind(vma, flags);
8ea99c92
DV
3285 vma->bind_vma(vma, obj->cache_level,
3286 flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
3287
42d6ab48 3288 i915_gem_verify_gtt(dev);
262de145 3289 return vma;
2f633156 3290
bc6bc15b 3291err_remove_node:
6286ef9b 3292 drm_mm_remove_node(&vma->node);
bc6bc15b 3293err_free_vma:
2f633156 3294 i915_gem_vma_destroy(vma);
262de145 3295 vma = ERR_PTR(ret);
bc6bc15b 3296err_unpin:
2f633156 3297 i915_gem_object_unpin_pages(obj);
262de145 3298 return vma;
673a394b
EA
3299}
3300
000433b6 3301bool
2c22569b
CW
3302i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3303 bool force)
673a394b 3304{
673a394b
EA
3305 /* If we don't have a page list set up, then we're not pinned
3306 * to GPU, and we can ignore the cache flush because it'll happen
3307 * again at bind time.
3308 */
05394f39 3309 if (obj->pages == NULL)
000433b6 3310 return false;
673a394b 3311
769ce464
ID
3312 /*
3313 * Stolen memory is always coherent with the GPU as it is explicitly
3314 * marked as wc by the system, or the system is cache-coherent.
3315 */
3316 if (obj->stolen)
000433b6 3317 return false;
769ce464 3318
9c23f7fc
CW
3319 /* If the GPU is snooping the contents of the CPU cache,
3320 * we do not need to manually clear the CPU cache lines. However,
3321 * the caches are only snooped when the render cache is
3322 * flushed/invalidated. As we always have to emit invalidations
3323 * and flushes when moving into and out of the RENDER domain, correct
3324 * snooping behaviour occurs naturally as the result of our domain
3325 * tracking.
3326 */
2c22569b 3327 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
000433b6 3328 return false;
9c23f7fc 3329
1c5d22f7 3330 trace_i915_gem_object_clflush(obj);
9da3da66 3331 drm_clflush_sg(obj->pages);
000433b6
CW
3332
3333 return true;
e47c68e9
EA
3334}
3335
3336/** Flushes the GTT write domain for the object if it's dirty. */
3337static void
05394f39 3338i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 3339{
1c5d22f7
CW
3340 uint32_t old_write_domain;
3341
05394f39 3342 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
e47c68e9
EA
3343 return;
3344
63256ec5 3345 /* No actual flushing is required for the GTT write domain. Writes
e47c68e9
EA
3346 * to it immediately go to main memory as far as we know, so there's
3347 * no chipset flush. It also doesn't land in render cache.
63256ec5
CW
3348 *
3349 * However, we do have to enforce the order so that all writes through
3350 * the GTT land before any writes to the device, such as updates to
3351 * the GATT itself.
e47c68e9 3352 */
63256ec5
CW
3353 wmb();
3354
05394f39
CW
3355 old_write_domain = obj->base.write_domain;
3356 obj->base.write_domain = 0;
1c5d22f7
CW
3357
3358 trace_i915_gem_object_change_domain(obj,
05394f39 3359 obj->base.read_domains,
1c5d22f7 3360 old_write_domain);
e47c68e9
EA
3361}
3362
3363/** Flushes the CPU write domain for the object if it's dirty. */
3364static void
2c22569b
CW
3365i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3366 bool force)
e47c68e9 3367{
1c5d22f7 3368 uint32_t old_write_domain;
e47c68e9 3369
05394f39 3370 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
e47c68e9
EA
3371 return;
3372
000433b6
CW
3373 if (i915_gem_clflush_object(obj, force))
3374 i915_gem_chipset_flush(obj->base.dev);
3375
05394f39
CW
3376 old_write_domain = obj->base.write_domain;
3377 obj->base.write_domain = 0;
1c5d22f7
CW
3378
3379 trace_i915_gem_object_change_domain(obj,
05394f39 3380 obj->base.read_domains,
1c5d22f7 3381 old_write_domain);
e47c68e9
EA
3382}
3383
2ef7eeaa
EA
3384/**
3385 * Moves a single object to the GTT read, and possibly write domain.
3386 *
3387 * This function returns when the move is complete, including waiting on
3388 * flushes to occur.
3389 */
79e53945 3390int
2021746e 3391i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2ef7eeaa 3392{
8325a09d 3393 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1c5d22f7 3394 uint32_t old_write_domain, old_read_domains;
e47c68e9 3395 int ret;
2ef7eeaa 3396
02354392 3397 /* Not valid to be called on unbound objects. */
9843877d 3398 if (!i915_gem_obj_bound_any(obj))
02354392
EA
3399 return -EINVAL;
3400
8d7e3de1
CW
3401 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3402 return 0;
3403
0201f1ec 3404 ret = i915_gem_object_wait_rendering(obj, !write);
88241785
CW
3405 if (ret)
3406 return ret;
3407
2c22569b 3408 i915_gem_object_flush_cpu_write_domain(obj, false);
1c5d22f7 3409
d0a57789
CW
3410 /* Serialise direct access to this object with the barriers for
3411 * coherent writes from the GPU, by effectively invalidating the
3412 * GTT domain upon first access.
3413 */
3414 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3415 mb();
3416
05394f39
CW
3417 old_write_domain = obj->base.write_domain;
3418 old_read_domains = obj->base.read_domains;
1c5d22f7 3419
e47c68e9
EA
3420 /* It should now be out of any other write domains, and we can update
3421 * the domain values for our changes.
3422 */
05394f39
CW
3423 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3424 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
e47c68e9 3425 if (write) {
05394f39
CW
3426 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3427 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3428 obj->dirty = 1;
2ef7eeaa
EA
3429 }
3430
1c5d22f7
CW
3431 trace_i915_gem_object_change_domain(obj,
3432 old_read_domains,
3433 old_write_domain);
3434
8325a09d 3435 /* And bump the LRU for this access */
ca191b13 3436 if (i915_gem_object_is_inactive(obj)) {
5c2abbea 3437 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
ca191b13
BW
3438 if (vma)
3439 list_move_tail(&vma->mm_list,
3440 &dev_priv->gtt.base.inactive_list);
3441
3442 }
8325a09d 3443
e47c68e9
EA
3444 return 0;
3445}
3446
e4ffd173
CW
3447int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3448 enum i915_cache_level cache_level)
3449{
7bddb01f 3450 struct drm_device *dev = obj->base.dev;
3089c6f2 3451 struct i915_vma *vma;
e4ffd173
CW
3452 int ret;
3453
3454 if (obj->cache_level == cache_level)
3455 return 0;
3456
d7f46fc4 3457 if (i915_gem_obj_is_pinned(obj)) {
e4ffd173
CW
3458 DRM_DEBUG("can not change the cache level of pinned objects\n");
3459 return -EBUSY;
3460 }
3461
3089c6f2
BW
3462 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3463 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
07fe0b12 3464 ret = i915_vma_unbind(vma);
3089c6f2
BW
3465 if (ret)
3466 return ret;
3467
3468 break;
3469 }
42d6ab48
CW
3470 }
3471
3089c6f2 3472 if (i915_gem_obj_bound_any(obj)) {
e4ffd173
CW
3473 ret = i915_gem_object_finish_gpu(obj);
3474 if (ret)
3475 return ret;
3476
3477 i915_gem_object_finish_gtt(obj);
3478
3479 /* Before SandyBridge, you could not use tiling or fence
3480 * registers with snooped memory, so relinquish any fences
3481 * currently pointing to our region in the aperture.
3482 */
42d6ab48 3483 if (INTEL_INFO(dev)->gen < 6) {
e4ffd173
CW
3484 ret = i915_gem_object_put_fence(obj);
3485 if (ret)
3486 return ret;
3487 }
3488
6f65e29a 3489 list_for_each_entry(vma, &obj->vma_list, vma_link)
8ea99c92
DV
3490 if (drm_mm_node_allocated(&vma->node))
3491 vma->bind_vma(vma, cache_level,
3492 obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
e4ffd173
CW
3493 }
3494
2c22569b
CW
3495 list_for_each_entry(vma, &obj->vma_list, vma_link)
3496 vma->node.color = cache_level;
3497 obj->cache_level = cache_level;
3498
3499 if (cpu_write_needs_clflush(obj)) {
e4ffd173
CW
3500 u32 old_read_domains, old_write_domain;
3501
3502 /* If we're coming from LLC cached, then we haven't
3503 * actually been tracking whether the data is in the
3504 * CPU cache or not, since we only allow one bit set
3505 * in obj->write_domain and have been skipping the clflushes.
3506 * Just set it to the CPU cache for now.
3507 */
3508 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
e4ffd173
CW
3509
3510 old_read_domains = obj->base.read_domains;
3511 old_write_domain = obj->base.write_domain;
3512
3513 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3514 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3515
3516 trace_i915_gem_object_change_domain(obj,
3517 old_read_domains,
3518 old_write_domain);
3519 }
3520
42d6ab48 3521 i915_gem_verify_gtt(dev);
e4ffd173
CW
3522 return 0;
3523}
3524
199adf40
BW
3525int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3526 struct drm_file *file)
e6994aee 3527{
199adf40 3528 struct drm_i915_gem_caching *args = data;
e6994aee
CW
3529 struct drm_i915_gem_object *obj;
3530 int ret;
3531
3532 ret = i915_mutex_lock_interruptible(dev);
3533 if (ret)
3534 return ret;
3535
3536 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3537 if (&obj->base == NULL) {
3538 ret = -ENOENT;
3539 goto unlock;
3540 }
3541
651d794f
CW
3542 switch (obj->cache_level) {
3543 case I915_CACHE_LLC:
3544 case I915_CACHE_L3_LLC:
3545 args->caching = I915_CACHING_CACHED;
3546 break;
3547
4257d3ba
CW
3548 case I915_CACHE_WT:
3549 args->caching = I915_CACHING_DISPLAY;
3550 break;
3551
651d794f
CW
3552 default:
3553 args->caching = I915_CACHING_NONE;
3554 break;
3555 }
e6994aee
CW
3556
3557 drm_gem_object_unreference(&obj->base);
3558unlock:
3559 mutex_unlock(&dev->struct_mutex);
3560 return ret;
3561}
3562
199adf40
BW
3563int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3564 struct drm_file *file)
e6994aee 3565{
199adf40 3566 struct drm_i915_gem_caching *args = data;
e6994aee
CW
3567 struct drm_i915_gem_object *obj;
3568 enum i915_cache_level level;
3569 int ret;
3570
199adf40
BW
3571 switch (args->caching) {
3572 case I915_CACHING_NONE:
e6994aee
CW
3573 level = I915_CACHE_NONE;
3574 break;
199adf40 3575 case I915_CACHING_CACHED:
e6994aee
CW
3576 level = I915_CACHE_LLC;
3577 break;
4257d3ba
CW
3578 case I915_CACHING_DISPLAY:
3579 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3580 break;
e6994aee
CW
3581 default:
3582 return -EINVAL;
3583 }
3584
3bc2913e
BW
3585 ret = i915_mutex_lock_interruptible(dev);
3586 if (ret)
3587 return ret;
3588
e6994aee
CW
3589 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3590 if (&obj->base == NULL) {
3591 ret = -ENOENT;
3592 goto unlock;
3593 }
3594
3595 ret = i915_gem_object_set_cache_level(obj, level);
3596
3597 drm_gem_object_unreference(&obj->base);
3598unlock:
3599 mutex_unlock(&dev->struct_mutex);
3600 return ret;
3601}
3602
cc98b413
CW
3603static bool is_pin_display(struct drm_i915_gem_object *obj)
3604{
3605 /* There are 3 sources that pin objects:
3606 * 1. The display engine (scanouts, sprites, cursors);
3607 * 2. Reservations for execbuffer;
3608 * 3. The user.
3609 *
3610 * We can ignore reservations as we hold the struct_mutex and
3611 * are only called outside of the reservation path. The user
3612 * can only increment pin_count once, and so if after
3613 * subtracting the potential reference by the user, any pin_count
3614 * remains, it must be due to another use by the display engine.
3615 */
d7f46fc4 3616 return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
cc98b413
CW
3617}
3618
b9241ea3 3619/*
2da3b9b9
CW
3620 * Prepare buffer for display plane (scanout, cursors, etc).
3621 * Can be called from an uninterruptible phase (modesetting) and allows
3622 * any flushes to be pipelined (for pageflips).
b9241ea3
ZW
3623 */
3624int
2da3b9b9
CW
3625i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3626 u32 alignment,
919926ae 3627 struct intel_ring_buffer *pipelined)
b9241ea3 3628{
2da3b9b9 3629 u32 old_read_domains, old_write_domain;
b9241ea3
ZW
3630 int ret;
3631
0be73284 3632 if (pipelined != obj->ring) {
2911a35b
BW
3633 ret = i915_gem_object_sync(obj, pipelined);
3634 if (ret)
b9241ea3
ZW
3635 return ret;
3636 }
3637
cc98b413
CW
3638 /* Mark the pin_display early so that we account for the
3639 * display coherency whilst setting up the cache domains.
3640 */
3641 obj->pin_display = true;
3642
a7ef0640
EA
3643 /* The display engine is not coherent with the LLC cache on gen6. As
3644 * a result, we make sure that the pinning that is about to occur is
3645 * done with uncached PTEs. This is lowest common denominator for all
3646 * chipsets.
3647 *
3648 * However for gen6+, we could do better by using the GFDT bit instead
3649 * of uncaching, which would allow us to flush all the LLC-cached data
3650 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3651 */
651d794f
CW
3652 ret = i915_gem_object_set_cache_level(obj,
3653 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
a7ef0640 3654 if (ret)
cc98b413 3655 goto err_unpin_display;
a7ef0640 3656
2da3b9b9
CW
3657 /* As the user may map the buffer once pinned in the display plane
3658 * (e.g. libkms for the bootup splash), we have to ensure that we
3659 * always use map_and_fenceable for all scanout buffers.
3660 */
1ec9e26d 3661 ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
2da3b9b9 3662 if (ret)
cc98b413 3663 goto err_unpin_display;
2da3b9b9 3664
2c22569b 3665 i915_gem_object_flush_cpu_write_domain(obj, true);
b118c1e3 3666
2da3b9b9 3667 old_write_domain = obj->base.write_domain;
05394f39 3668 old_read_domains = obj->base.read_domains;
2da3b9b9
CW
3669
3670 /* It should now be out of any other write domains, and we can update
3671 * the domain values for our changes.
3672 */
e5f1d962 3673 obj->base.write_domain = 0;
05394f39 3674 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
b9241ea3
ZW
3675
3676 trace_i915_gem_object_change_domain(obj,
3677 old_read_domains,
2da3b9b9 3678 old_write_domain);
b9241ea3
ZW
3679
3680 return 0;
cc98b413
CW
3681
3682err_unpin_display:
3683 obj->pin_display = is_pin_display(obj);
3684 return ret;
3685}
3686
3687void
3688i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3689{
d7f46fc4 3690 i915_gem_object_ggtt_unpin(obj);
cc98b413 3691 obj->pin_display = is_pin_display(obj);
b9241ea3
ZW
3692}
3693
85345517 3694int
a8198eea 3695i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
85345517 3696{
88241785
CW
3697 int ret;
3698
a8198eea 3699 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
85345517
CW
3700 return 0;
3701
0201f1ec 3702 ret = i915_gem_object_wait_rendering(obj, false);
c501ae7f
CW
3703 if (ret)
3704 return ret;
3705
a8198eea
CW
3706 /* Ensure that we invalidate the GPU's caches and TLBs. */
3707 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
c501ae7f 3708 return 0;
85345517
CW
3709}
3710
e47c68e9
EA
3711/**
3712 * Moves a single object to the CPU read, and possibly write domain.
3713 *
3714 * This function returns when the move is complete, including waiting on
3715 * flushes to occur.
3716 */
dabdfe02 3717int
919926ae 3718i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
e47c68e9 3719{
1c5d22f7 3720 uint32_t old_write_domain, old_read_domains;
e47c68e9
EA
3721 int ret;
3722
8d7e3de1
CW
3723 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3724 return 0;
3725
0201f1ec 3726 ret = i915_gem_object_wait_rendering(obj, !write);
88241785
CW
3727 if (ret)
3728 return ret;
3729
e47c68e9 3730 i915_gem_object_flush_gtt_write_domain(obj);
2ef7eeaa 3731
05394f39
CW
3732 old_write_domain = obj->base.write_domain;
3733 old_read_domains = obj->base.read_domains;
1c5d22f7 3734
e47c68e9 3735 /* Flush the CPU cache if it's still invalid. */
05394f39 3736 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2c22569b 3737 i915_gem_clflush_object(obj, false);
2ef7eeaa 3738
05394f39 3739 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2ef7eeaa
EA
3740 }
3741
3742 /* It should now be out of any other write domains, and we can update
3743 * the domain values for our changes.
3744 */
05394f39 3745 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
e47c68e9
EA
3746
3747 /* If we're writing through the CPU, then the GPU read domains will
3748 * need to be invalidated at next use.
3749 */
3750 if (write) {
05394f39
CW
3751 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3752 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
e47c68e9 3753 }
2ef7eeaa 3754
1c5d22f7
CW
3755 trace_i915_gem_object_change_domain(obj,
3756 old_read_domains,
3757 old_write_domain);
3758
2ef7eeaa
EA
3759 return 0;
3760}
3761
673a394b
EA
3762/* Throttle our rendering by waiting until the ring has completed our requests
3763 * emitted over 20 msec ago.
3764 *
b962442e
EA
3765 * Note that if we were to use the current jiffies each time around the loop,
3766 * we wouldn't escape the function with any frames outstanding if the time to
3767 * render a frame was over 20ms.
3768 *
673a394b
EA
3769 * This should get us reasonable parallelism between CPU and GPU but also
3770 * relatively low latency when blocking on a particular request to finish.
3771 */
40a5f0de 3772static int
f787a5f5 3773i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
40a5f0de 3774{
f787a5f5
CW
3775 struct drm_i915_private *dev_priv = dev->dev_private;
3776 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e 3777 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
f787a5f5
CW
3778 struct drm_i915_gem_request *request;
3779 struct intel_ring_buffer *ring = NULL;
f69061be 3780 unsigned reset_counter;
f787a5f5
CW
3781 u32 seqno = 0;
3782 int ret;
93533c29 3783
308887aa
DV
3784 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3785 if (ret)
3786 return ret;
3787
3788 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3789 if (ret)
3790 return ret;
e110e8d6 3791
1c25595f 3792 spin_lock(&file_priv->mm.lock);
f787a5f5 3793 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
b962442e
EA
3794 if (time_after_eq(request->emitted_jiffies, recent_enough))
3795 break;
40a5f0de 3796
f787a5f5
CW
3797 ring = request->ring;
3798 seqno = request->seqno;
b962442e 3799 }
f69061be 3800 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1c25595f 3801 spin_unlock(&file_priv->mm.lock);
40a5f0de 3802
f787a5f5
CW
3803 if (seqno == 0)
3804 return 0;
2bc43b5c 3805
b29c19b6 3806 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
f787a5f5
CW
3807 if (ret == 0)
3808 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
40a5f0de
EA
3809
3810 return ret;
3811}
3812
673a394b 3813int
05394f39 3814i915_gem_object_pin(struct drm_i915_gem_object *obj,
c37e2204 3815 struct i915_address_space *vm,
05394f39 3816 uint32_t alignment,
1ec9e26d 3817 unsigned flags)
673a394b 3818{
07fe0b12 3819 struct i915_vma *vma;
673a394b
EA
3820 int ret;
3821
bf3d149b 3822 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
1ec9e26d 3823 return -EINVAL;
07fe0b12
BW
3824
3825 vma = i915_gem_obj_to_vma(obj, vm);
07fe0b12 3826 if (vma) {
d7f46fc4
BW
3827 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3828 return -EBUSY;
3829
07fe0b12
BW
3830 if ((alignment &&
3831 vma->node.start & (alignment - 1)) ||
1ec9e26d 3832 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
d7f46fc4 3833 WARN(vma->pin_count,
ae7d49d8 3834 "bo is already pinned with incorrect alignment:"
f343c5f6 3835 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
75e9e915 3836 " obj->map_and_fenceable=%d\n",
07fe0b12 3837 i915_gem_obj_offset(obj, vm), alignment,
1ec9e26d 3838 flags & PIN_MAPPABLE,
05394f39 3839 obj->map_and_fenceable);
07fe0b12 3840 ret = i915_vma_unbind(vma);
ac0c6b5a
CW
3841 if (ret)
3842 return ret;
8ea99c92
DV
3843
3844 vma = NULL;
ac0c6b5a
CW
3845 }
3846 }
3847
8ea99c92 3848 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
262de145
DV
3849 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
3850 if (IS_ERR(vma))
3851 return PTR_ERR(vma);
22c344e9 3852 }
76446cac 3853
8ea99c92
DV
3854 if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
3855 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
74898d7e 3856
8ea99c92 3857 vma->pin_count++;
1ec9e26d
DV
3858 if (flags & PIN_MAPPABLE)
3859 obj->pin_mappable |= true;
673a394b
EA
3860
3861 return 0;
3862}
3863
3864void
d7f46fc4 3865i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
673a394b 3866{
d7f46fc4 3867 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
673a394b 3868
d7f46fc4
BW
3869 BUG_ON(!vma);
3870 BUG_ON(vma->pin_count == 0);
3871 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3872
3873 if (--vma->pin_count == 0)
6299f992 3874 obj->pin_mappable = false;
673a394b
EA
3875}
3876
3877int
3878i915_gem_pin_ioctl(struct drm_device *dev, void *data,
05394f39 3879 struct drm_file *file)
673a394b
EA
3880{
3881 struct drm_i915_gem_pin *args = data;
05394f39 3882 struct drm_i915_gem_object *obj;
673a394b
EA
3883 int ret;
3884
02f6bccc
DV
3885 if (INTEL_INFO(dev)->gen >= 6)
3886 return -ENODEV;
3887
1d7cfea1
CW
3888 ret = i915_mutex_lock_interruptible(dev);
3889 if (ret)
3890 return ret;
673a394b 3891
05394f39 3892 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3893 if (&obj->base == NULL) {
1d7cfea1
CW
3894 ret = -ENOENT;
3895 goto unlock;
673a394b 3896 }
673a394b 3897
05394f39 3898 if (obj->madv != I915_MADV_WILLNEED) {
bd9b6a4e 3899 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
8c99e57d 3900 ret = -EFAULT;
1d7cfea1 3901 goto out;
3ef94daa
CW
3902 }
3903
05394f39 3904 if (obj->pin_filp != NULL && obj->pin_filp != file) {
bd9b6a4e 3905 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
79e53945 3906 args->handle);
1d7cfea1
CW
3907 ret = -EINVAL;
3908 goto out;
79e53945
JB
3909 }
3910
aa5f8021
DV
3911 if (obj->user_pin_count == ULONG_MAX) {
3912 ret = -EBUSY;
3913 goto out;
3914 }
3915
93be8788 3916 if (obj->user_pin_count == 0) {
1ec9e26d 3917 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
1d7cfea1
CW
3918 if (ret)
3919 goto out;
673a394b
EA
3920 }
3921
93be8788
CW
3922 obj->user_pin_count++;
3923 obj->pin_filp = file;
3924
f343c5f6 3925 args->offset = i915_gem_obj_ggtt_offset(obj);
1d7cfea1 3926out:
05394f39 3927 drm_gem_object_unreference(&obj->base);
1d7cfea1 3928unlock:
673a394b 3929 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3930 return ret;
673a394b
EA
3931}
3932
3933int
3934i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
05394f39 3935 struct drm_file *file)
673a394b
EA
3936{
3937 struct drm_i915_gem_pin *args = data;
05394f39 3938 struct drm_i915_gem_object *obj;
76c1dec1 3939 int ret;
673a394b 3940
1d7cfea1
CW
3941 ret = i915_mutex_lock_interruptible(dev);
3942 if (ret)
3943 return ret;
673a394b 3944
05394f39 3945 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3946 if (&obj->base == NULL) {
1d7cfea1
CW
3947 ret = -ENOENT;
3948 goto unlock;
673a394b 3949 }
76c1dec1 3950
05394f39 3951 if (obj->pin_filp != file) {
bd9b6a4e 3952 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
79e53945 3953 args->handle);
1d7cfea1
CW
3954 ret = -EINVAL;
3955 goto out;
79e53945 3956 }
05394f39
CW
3957 obj->user_pin_count--;
3958 if (obj->user_pin_count == 0) {
3959 obj->pin_filp = NULL;
d7f46fc4 3960 i915_gem_object_ggtt_unpin(obj);
79e53945 3961 }
673a394b 3962
1d7cfea1 3963out:
05394f39 3964 drm_gem_object_unreference(&obj->base);
1d7cfea1 3965unlock:
673a394b 3966 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3967 return ret;
673a394b
EA
3968}
3969
3970int
3971i915_gem_busy_ioctl(struct drm_device *dev, void *data,
05394f39 3972 struct drm_file *file)
673a394b
EA
3973{
3974 struct drm_i915_gem_busy *args = data;
05394f39 3975 struct drm_i915_gem_object *obj;
30dbf0c0
CW
3976 int ret;
3977
76c1dec1 3978 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 3979 if (ret)
76c1dec1 3980 return ret;
673a394b 3981
05394f39 3982 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3983 if (&obj->base == NULL) {
1d7cfea1
CW
3984 ret = -ENOENT;
3985 goto unlock;
673a394b 3986 }
d1b851fc 3987
0be555b6
CW
3988 /* Count all active objects as busy, even if they are currently not used
3989 * by the gpu. Users of this interface expect objects to eventually
3990 * become non-busy without any further actions, therefore emit any
3991 * necessary flushes here.
c4de0a5d 3992 */
30dfebf3 3993 ret = i915_gem_object_flush_active(obj);
0be555b6 3994
30dfebf3 3995 args->busy = obj->active;
e9808edd
CW
3996 if (obj->ring) {
3997 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3998 args->busy |= intel_ring_flag(obj->ring) << 16;
3999 }
673a394b 4000
05394f39 4001 drm_gem_object_unreference(&obj->base);
1d7cfea1 4002unlock:
673a394b 4003 mutex_unlock(&dev->struct_mutex);
1d7cfea1 4004 return ret;
673a394b
EA
4005}
4006
4007int
4008i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4009 struct drm_file *file_priv)
4010{
0206e353 4011 return i915_gem_ring_throttle(dev, file_priv);
673a394b
EA
4012}
4013
3ef94daa
CW
4014int
4015i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4016 struct drm_file *file_priv)
4017{
4018 struct drm_i915_gem_madvise *args = data;
05394f39 4019 struct drm_i915_gem_object *obj;
76c1dec1 4020 int ret;
3ef94daa
CW
4021
4022 switch (args->madv) {
4023 case I915_MADV_DONTNEED:
4024 case I915_MADV_WILLNEED:
4025 break;
4026 default:
4027 return -EINVAL;
4028 }
4029
1d7cfea1
CW
4030 ret = i915_mutex_lock_interruptible(dev);
4031 if (ret)
4032 return ret;
4033
05394f39 4034 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
c8725226 4035 if (&obj->base == NULL) {
1d7cfea1
CW
4036 ret = -ENOENT;
4037 goto unlock;
3ef94daa 4038 }
3ef94daa 4039
d7f46fc4 4040 if (i915_gem_obj_is_pinned(obj)) {
1d7cfea1
CW
4041 ret = -EINVAL;
4042 goto out;
3ef94daa
CW
4043 }
4044
05394f39
CW
4045 if (obj->madv != __I915_MADV_PURGED)
4046 obj->madv = args->madv;
3ef94daa 4047
6c085a72
CW
4048 /* if the object is no longer attached, discard its backing storage */
4049 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
2d7ef395
CW
4050 i915_gem_object_truncate(obj);
4051
05394f39 4052 args->retained = obj->madv != __I915_MADV_PURGED;
bb6baf76 4053
1d7cfea1 4054out:
05394f39 4055 drm_gem_object_unreference(&obj->base);
1d7cfea1 4056unlock:
3ef94daa 4057 mutex_unlock(&dev->struct_mutex);
1d7cfea1 4058 return ret;
3ef94daa
CW
4059}
4060
37e680a1
CW
4061void i915_gem_object_init(struct drm_i915_gem_object *obj,
4062 const struct drm_i915_gem_object_ops *ops)
0327d6ba 4063{
35c20a60 4064 INIT_LIST_HEAD(&obj->global_list);
0327d6ba 4065 INIT_LIST_HEAD(&obj->ring_list);
b25cb2f8 4066 INIT_LIST_HEAD(&obj->obj_exec_link);
2f633156 4067 INIT_LIST_HEAD(&obj->vma_list);
0327d6ba 4068
37e680a1
CW
4069 obj->ops = ops;
4070
0327d6ba
CW
4071 obj->fence_reg = I915_FENCE_REG_NONE;
4072 obj->madv = I915_MADV_WILLNEED;
4073 /* Avoid an unnecessary call to unbind on the first bind. */
4074 obj->map_and_fenceable = true;
4075
4076 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4077}
4078
37e680a1
CW
4079static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4080 .get_pages = i915_gem_object_get_pages_gtt,
4081 .put_pages = i915_gem_object_put_pages_gtt,
4082};
4083
05394f39
CW
4084struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4085 size_t size)
ac52bc56 4086{
c397b908 4087 struct drm_i915_gem_object *obj;
5949eac4 4088 struct address_space *mapping;
1a240d4d 4089 gfp_t mask;
ac52bc56 4090
42dcedd4 4091 obj = i915_gem_object_alloc(dev);
c397b908
DV
4092 if (obj == NULL)
4093 return NULL;
673a394b 4094
c397b908 4095 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
42dcedd4 4096 i915_gem_object_free(obj);
c397b908
DV
4097 return NULL;
4098 }
673a394b 4099
bed1ea95
CW
4100 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4101 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4102 /* 965gm cannot relocate objects above 4GiB. */
4103 mask &= ~__GFP_HIGHMEM;
4104 mask |= __GFP_DMA32;
4105 }
4106
496ad9aa 4107 mapping = file_inode(obj->base.filp)->i_mapping;
bed1ea95 4108 mapping_set_gfp_mask(mapping, mask);
5949eac4 4109
37e680a1 4110 i915_gem_object_init(obj, &i915_gem_object_ops);
73aa808f 4111
c397b908
DV
4112 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4113 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
673a394b 4114
3d29b842
ED
4115 if (HAS_LLC(dev)) {
4116 /* On some devices, we can have the GPU use the LLC (the CPU
a1871112
EA
4117 * cache) for about a 10% performance improvement
4118 * compared to uncached. Graphics requests other than
4119 * display scanout are coherent with the CPU in
4120 * accessing this cache. This means in this mode we
4121 * don't need to clflush on the CPU side, and on the
4122 * GPU side we only need to flush internal caches to
4123 * get data visible to the CPU.
4124 *
4125 * However, we maintain the display planes as UC, and so
4126 * need to rebind when first used as such.
4127 */
4128 obj->cache_level = I915_CACHE_LLC;
4129 } else
4130 obj->cache_level = I915_CACHE_NONE;
4131
d861e338
DV
4132 trace_i915_gem_object_create(obj);
4133
05394f39 4134 return obj;
c397b908
DV
4135}
4136
1488fc08 4137void i915_gem_free_object(struct drm_gem_object *gem_obj)
673a394b 4138{
1488fc08 4139 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
05394f39 4140 struct drm_device *dev = obj->base.dev;
be72615b 4141 drm_i915_private_t *dev_priv = dev->dev_private;
07fe0b12 4142 struct i915_vma *vma, *next;
673a394b 4143
f65c9168
PZ
4144 intel_runtime_pm_get(dev_priv);
4145
26e12f89
CW
4146 trace_i915_gem_object_destroy(obj);
4147
1488fc08
CW
4148 if (obj->phys_obj)
4149 i915_gem_detach_phys_object(dev, obj);
4150
07fe0b12 4151 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
d7f46fc4
BW
4152 int ret;
4153
4154 vma->pin_count = 0;
4155 ret = i915_vma_unbind(vma);
07fe0b12
BW
4156 if (WARN_ON(ret == -ERESTARTSYS)) {
4157 bool was_interruptible;
1488fc08 4158
07fe0b12
BW
4159 was_interruptible = dev_priv->mm.interruptible;
4160 dev_priv->mm.interruptible = false;
1488fc08 4161
07fe0b12 4162 WARN_ON(i915_vma_unbind(vma));
1488fc08 4163
07fe0b12
BW
4164 dev_priv->mm.interruptible = was_interruptible;
4165 }
1488fc08
CW
4166 }
4167
1d64ae71
BW
4168 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4169 * before progressing. */
4170 if (obj->stolen)
4171 i915_gem_object_unpin_pages(obj);
4172
401c29f6
BW
4173 if (WARN_ON(obj->pages_pin_count))
4174 obj->pages_pin_count = 0;
37e680a1 4175 i915_gem_object_put_pages(obj);
d8cb5086 4176 i915_gem_object_free_mmap_offset(obj);
0104fdbb 4177 i915_gem_object_release_stolen(obj);
de151cf6 4178
9da3da66
CW
4179 BUG_ON(obj->pages);
4180
2f745ad3
CW
4181 if (obj->base.import_attach)
4182 drm_prime_gem_destroy(&obj->base, NULL);
de151cf6 4183
05394f39
CW
4184 drm_gem_object_release(&obj->base);
4185 i915_gem_info_remove_obj(dev_priv, obj->base.size);
c397b908 4186
05394f39 4187 kfree(obj->bit_17);
42dcedd4 4188 i915_gem_object_free(obj);
f65c9168
PZ
4189
4190 intel_runtime_pm_put(dev_priv);
673a394b
EA
4191}
4192
e656a6cb 4193struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
2f633156 4194 struct i915_address_space *vm)
e656a6cb
DV
4195{
4196 struct i915_vma *vma;
4197 list_for_each_entry(vma, &obj->vma_list, vma_link)
4198 if (vma->vm == vm)
4199 return vma;
4200
4201 return NULL;
4202}
4203
2f633156
BW
4204void i915_gem_vma_destroy(struct i915_vma *vma)
4205{
4206 WARN_ON(vma->node.allocated);
aaa05667
CW
4207
4208 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4209 if (!list_empty(&vma->exec_list))
4210 return;
4211
8b9c2b94 4212 list_del(&vma->vma_link);
b93dab6e 4213
2f633156
BW
4214 kfree(vma);
4215}
4216
29105ccc 4217int
45c5f202 4218i915_gem_suspend(struct drm_device *dev)
29105ccc
CW
4219{
4220 drm_i915_private_t *dev_priv = dev->dev_private;
45c5f202 4221 int ret = 0;
28dfe52a 4222
45c5f202 4223 mutex_lock(&dev->struct_mutex);
f7403347 4224 if (dev_priv->ums.mm_suspended)
45c5f202 4225 goto err;
28dfe52a 4226
b2da9fe5 4227 ret = i915_gpu_idle(dev);
f7403347 4228 if (ret)
45c5f202 4229 goto err;
f7403347 4230
b2da9fe5 4231 i915_gem_retire_requests(dev);
673a394b 4232
29105ccc 4233 /* Under UMS, be paranoid and evict. */
a39d7efc 4234 if (!drm_core_check_feature(dev, DRIVER_MODESET))
6c085a72 4235 i915_gem_evict_everything(dev);
29105ccc 4236
29105ccc 4237 i915_kernel_lost_context(dev);
6dbe2772 4238 i915_gem_cleanup_ringbuffer(dev);
29105ccc 4239
45c5f202
CW
4240 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4241 * We need to replace this with a semaphore, or something.
4242 * And not confound ums.mm_suspended!
4243 */
4244 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4245 DRIVER_MODESET);
4246 mutex_unlock(&dev->struct_mutex);
4247
4248 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
29105ccc 4249 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
b29c19b6 4250 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
29105ccc 4251
673a394b 4252 return 0;
45c5f202
CW
4253
4254err:
4255 mutex_unlock(&dev->struct_mutex);
4256 return ret;
673a394b
EA
4257}
4258
c3787e2e 4259int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
b9524a1e 4260{
c3787e2e 4261 struct drm_device *dev = ring->dev;
b9524a1e 4262 drm_i915_private_t *dev_priv = dev->dev_private;
35a85ac6
BW
4263 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4264 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
c3787e2e 4265 int i, ret;
b9524a1e 4266
040d2baa 4267 if (!HAS_L3_DPF(dev) || !remap_info)
c3787e2e 4268 return 0;
b9524a1e 4269
c3787e2e
BW
4270 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4271 if (ret)
4272 return ret;
b9524a1e 4273
c3787e2e
BW
4274 /*
4275 * Note: We do not worry about the concurrent register cacheline hang
4276 * here because no other code should access these registers other than
4277 * at initialization time.
4278 */
b9524a1e 4279 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
c3787e2e
BW
4280 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4281 intel_ring_emit(ring, reg_base + i);
4282 intel_ring_emit(ring, remap_info[i/4]);
b9524a1e
BW
4283 }
4284
c3787e2e 4285 intel_ring_advance(ring);
b9524a1e 4286
c3787e2e 4287 return ret;
b9524a1e
BW
4288}
4289
f691e2f4
DV
4290void i915_gem_init_swizzling(struct drm_device *dev)
4291{
4292 drm_i915_private_t *dev_priv = dev->dev_private;
4293
11782b02 4294 if (INTEL_INFO(dev)->gen < 5 ||
f691e2f4
DV
4295 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4296 return;
4297
4298 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4299 DISP_TILE_SURFACE_SWIZZLING);
4300
11782b02
DV
4301 if (IS_GEN5(dev))
4302 return;
4303
f691e2f4
DV
4304 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4305 if (IS_GEN6(dev))
6b26c86d 4306 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
8782e26c 4307 else if (IS_GEN7(dev))
6b26c86d 4308 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
31a5336e
BW
4309 else if (IS_GEN8(dev))
4310 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
8782e26c
BW
4311 else
4312 BUG();
f691e2f4 4313}
e21af88d 4314
67b1b571
CW
4315static bool
4316intel_enable_blt(struct drm_device *dev)
4317{
4318 if (!HAS_BLT(dev))
4319 return false;
4320
4321 /* The blitter was dysfunctional on early prototypes */
4322 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4323 DRM_INFO("BLT not supported on this pre-production hardware;"
4324 " graphics performance will be degraded.\n");
4325 return false;
4326 }
4327
4328 return true;
4329}
4330
4fc7c971 4331static int i915_gem_init_rings(struct drm_device *dev)
8187a2b7 4332{
4fc7c971 4333 struct drm_i915_private *dev_priv = dev->dev_private;
8187a2b7 4334 int ret;
68f95ba9 4335
5c1143bb 4336 ret = intel_init_render_ring_buffer(dev);
68f95ba9 4337 if (ret)
b6913e4b 4338 return ret;
68f95ba9
CW
4339
4340 if (HAS_BSD(dev)) {
5c1143bb 4341 ret = intel_init_bsd_ring_buffer(dev);
68f95ba9
CW
4342 if (ret)
4343 goto cleanup_render_ring;
d1b851fc 4344 }
68f95ba9 4345
67b1b571 4346 if (intel_enable_blt(dev)) {
549f7365
CW
4347 ret = intel_init_blt_ring_buffer(dev);
4348 if (ret)
4349 goto cleanup_bsd_ring;
4350 }
4351
9a8a2213
BW
4352 if (HAS_VEBOX(dev)) {
4353 ret = intel_init_vebox_ring_buffer(dev);
4354 if (ret)
4355 goto cleanup_blt_ring;
4356 }
4357
4358
99433931 4359 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4fc7c971 4360 if (ret)
9a8a2213 4361 goto cleanup_vebox_ring;
4fc7c971
BW
4362
4363 return 0;
4364
9a8a2213
BW
4365cleanup_vebox_ring:
4366 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4fc7c971
BW
4367cleanup_blt_ring:
4368 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4369cleanup_bsd_ring:
4370 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4371cleanup_render_ring:
4372 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4373
4374 return ret;
4375}
4376
4377int
4378i915_gem_init_hw(struct drm_device *dev)
4379{
4380 drm_i915_private_t *dev_priv = dev->dev_private;
35a85ac6 4381 int ret, i;
4fc7c971
BW
4382
4383 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4384 return -EIO;
4385
59124506 4386 if (dev_priv->ellc_size)
05e21cc4 4387 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4fc7c971 4388
0bf21347
VS
4389 if (IS_HASWELL(dev))
4390 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4391 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
9435373e 4392
88a2b2a3 4393 if (HAS_PCH_NOP(dev)) {
6ba844b0
DV
4394 if (IS_IVYBRIDGE(dev)) {
4395 u32 temp = I915_READ(GEN7_MSG_CTL);
4396 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4397 I915_WRITE(GEN7_MSG_CTL, temp);
4398 } else if (INTEL_INFO(dev)->gen >= 7) {
4399 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4400 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4401 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4402 }
88a2b2a3
BW
4403 }
4404
4fc7c971
BW
4405 i915_gem_init_swizzling(dev);
4406
4407 ret = i915_gem_init_rings(dev);
99433931
MK
4408 if (ret)
4409 return ret;
4410
c3787e2e
BW
4411 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4412 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4413
254f965c 4414 /*
2fa48d8d
BW
4415 * XXX: Contexts should only be initialized once. Doing a switch to the
4416 * default context switch however is something we'd like to do after
4417 * reset or thaw (the latter may not actually be necessary for HW, but
4418 * goes with our code better). Context switching requires rings (for
4419 * the do_switch), but before enabling PPGTT. So don't move this.
254f965c 4420 */
2fa48d8d 4421 ret = i915_gem_context_enable(dev_priv);
8245be31 4422 if (ret) {
2fa48d8d
BW
4423 DRM_ERROR("Context enable failed %d\n", ret);
4424 goto err_out;
b7c36d25 4425 }
e21af88d 4426
68f95ba9 4427 return 0;
2fa48d8d
BW
4428
4429err_out:
4430 i915_gem_cleanup_ringbuffer(dev);
4431 return ret;
8187a2b7
ZN
4432}
4433
1070a42b
CW
4434int i915_gem_init(struct drm_device *dev)
4435{
4436 struct drm_i915_private *dev_priv = dev->dev_private;
1070a42b
CW
4437 int ret;
4438
1070a42b 4439 mutex_lock(&dev->struct_mutex);
d62b4892
JB
4440
4441 if (IS_VALLEYVIEW(dev)) {
4442 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4443 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4444 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4445 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4446 }
4447
d7e5008f 4448 i915_gem_init_global_gtt(dev);
d62b4892 4449
2fa48d8d 4450 ret = i915_gem_context_init(dev);
e3848694
MK
4451 if (ret) {
4452 mutex_unlock(&dev->struct_mutex);
2fa48d8d 4453 return ret;
e3848694 4454 }
2fa48d8d 4455
1070a42b
CW
4456 ret = i915_gem_init_hw(dev);
4457 mutex_unlock(&dev->struct_mutex);
4458 if (ret) {
bdf4fd7e 4459 WARN_ON(dev_priv->mm.aliasing_ppgtt);
2fa48d8d 4460 i915_gem_context_fini(dev);
c39538a8 4461 drm_mm_takedown(&dev_priv->gtt.base.mm);
1070a42b
CW
4462 return ret;
4463 }
4464
53ca26ca
DV
4465 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4466 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4467 dev_priv->dri1.allow_batchbuffer = 1;
1070a42b
CW
4468 return 0;
4469}
4470
8187a2b7
ZN
4471void
4472i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4473{
4474 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 4475 struct intel_ring_buffer *ring;
1ec14ad3 4476 int i;
8187a2b7 4477
b4519513
CW
4478 for_each_ring(ring, dev_priv, i)
4479 intel_cleanup_ring_buffer(ring);
8187a2b7
ZN
4480}
4481
673a394b
EA
4482int
4483i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4484 struct drm_file *file_priv)
4485{
db1b76ca 4486 struct drm_i915_private *dev_priv = dev->dev_private;
b4519513 4487 int ret;
673a394b 4488
79e53945
JB
4489 if (drm_core_check_feature(dev, DRIVER_MODESET))
4490 return 0;
4491
1f83fee0 4492 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
673a394b 4493 DRM_ERROR("Reenabling wedged hardware, good luck\n");
1f83fee0 4494 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
673a394b
EA
4495 }
4496
673a394b 4497 mutex_lock(&dev->struct_mutex);
db1b76ca 4498 dev_priv->ums.mm_suspended = 0;
9bb2d6f9 4499
f691e2f4 4500 ret = i915_gem_init_hw(dev);
d816f6ac
WF
4501 if (ret != 0) {
4502 mutex_unlock(&dev->struct_mutex);
9bb2d6f9 4503 return ret;
d816f6ac 4504 }
9bb2d6f9 4505
5cef07e1 4506 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
673a394b 4507 mutex_unlock(&dev->struct_mutex);
dbb19d30 4508
5f35308b
CW
4509 ret = drm_irq_install(dev);
4510 if (ret)
4511 goto cleanup_ringbuffer;
dbb19d30 4512
673a394b 4513 return 0;
5f35308b
CW
4514
4515cleanup_ringbuffer:
4516 mutex_lock(&dev->struct_mutex);
4517 i915_gem_cleanup_ringbuffer(dev);
db1b76ca 4518 dev_priv->ums.mm_suspended = 1;
5f35308b
CW
4519 mutex_unlock(&dev->struct_mutex);
4520
4521 return ret;
673a394b
EA
4522}
4523
4524int
4525i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4526 struct drm_file *file_priv)
4527{
79e53945
JB
4528 if (drm_core_check_feature(dev, DRIVER_MODESET))
4529 return 0;
4530
dbb19d30 4531 drm_irq_uninstall(dev);
db1b76ca 4532
45c5f202 4533 return i915_gem_suspend(dev);
673a394b
EA
4534}
4535
4536void
4537i915_gem_lastclose(struct drm_device *dev)
4538{
4539 int ret;
673a394b 4540
e806b495
EA
4541 if (drm_core_check_feature(dev, DRIVER_MODESET))
4542 return;
4543
45c5f202 4544 ret = i915_gem_suspend(dev);
6dbe2772
KP
4545 if (ret)
4546 DRM_ERROR("failed to idle hardware: %d\n", ret);
673a394b
EA
4547}
4548
64193406
CW
4549static void
4550init_ring_lists(struct intel_ring_buffer *ring)
4551{
4552 INIT_LIST_HEAD(&ring->active_list);
4553 INIT_LIST_HEAD(&ring->request_list);
64193406
CW
4554}
4555
7e0d96bc
BW
4556void i915_init_vm(struct drm_i915_private *dev_priv,
4557 struct i915_address_space *vm)
fc8c067e 4558{
7e0d96bc
BW
4559 if (!i915_is_ggtt(vm))
4560 drm_mm_init(&vm->mm, vm->start, vm->total);
fc8c067e
BW
4561 vm->dev = dev_priv->dev;
4562 INIT_LIST_HEAD(&vm->active_list);
4563 INIT_LIST_HEAD(&vm->inactive_list);
4564 INIT_LIST_HEAD(&vm->global_link);
f72d21ed 4565 list_add_tail(&vm->global_link, &dev_priv->vm_list);
fc8c067e
BW
4566}
4567
673a394b
EA
4568void
4569i915_gem_load(struct drm_device *dev)
4570{
4571 drm_i915_private_t *dev_priv = dev->dev_private;
42dcedd4
CW
4572 int i;
4573
4574 dev_priv->slab =
4575 kmem_cache_create("i915_gem_object",
4576 sizeof(struct drm_i915_gem_object), 0,
4577 SLAB_HWCACHE_ALIGN,
4578 NULL);
673a394b 4579
fc8c067e
BW
4580 INIT_LIST_HEAD(&dev_priv->vm_list);
4581 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4582
a33afea5 4583 INIT_LIST_HEAD(&dev_priv->context_list);
6c085a72
CW
4584 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4585 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
a09ba7fa 4586 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
1ec14ad3
CW
4587 for (i = 0; i < I915_NUM_RINGS; i++)
4588 init_ring_lists(&dev_priv->ring[i]);
4b9de737 4589 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
007cc8ac 4590 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
673a394b
EA
4591 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4592 i915_gem_retire_work_handler);
b29c19b6
CW
4593 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4594 i915_gem_idle_work_handler);
1f83fee0 4595 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
31169714 4596
94400120
DA
4597 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4598 if (IS_GEN3(dev)) {
50743298
DV
4599 I915_WRITE(MI_ARB_STATE,
4600 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
94400120
DA
4601 }
4602
72bfa19c
CW
4603 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4604
de151cf6 4605 /* Old X drivers will take 0-2 for front, back, depth buffers */
b397c836
EA
4606 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4607 dev_priv->fence_reg_start = 3;
de151cf6 4608
42b5aeab
VS
4609 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4610 dev_priv->num_fence_regs = 32;
4611 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
de151cf6
JB
4612 dev_priv->num_fence_regs = 16;
4613 else
4614 dev_priv->num_fence_regs = 8;
4615
b5aa8a0f 4616 /* Initialize fence registers to zero */
19b2dbde
CW
4617 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4618 i915_gem_restore_fences(dev);
10ed13e4 4619
673a394b 4620 i915_gem_detect_bit_6_swizzle(dev);
6b95a207 4621 init_waitqueue_head(&dev_priv->pending_flip_queue);
17250b71 4622
ce453d81
CW
4623 dev_priv->mm.interruptible = true;
4624
7dc19d5a
DC
4625 dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4626 dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
17250b71
CW
4627 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4628 register_shrinker(&dev_priv->mm.inactive_shrinker);
673a394b 4629}
71acb5eb
DA
4630
4631/*
4632 * Create a physically contiguous memory object for this object
4633 * e.g. for cursor + overlay regs
4634 */
995b6762
CW
4635static int i915_gem_init_phys_object(struct drm_device *dev,
4636 int id, int size, int align)
71acb5eb
DA
4637{
4638 drm_i915_private_t *dev_priv = dev->dev_private;
4639 struct drm_i915_gem_phys_object *phys_obj;
4640 int ret;
4641
4642 if (dev_priv->mm.phys_objs[id - 1] || !size)
4643 return 0;
4644
b14c5679 4645 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
71acb5eb
DA
4646 if (!phys_obj)
4647 return -ENOMEM;
4648
4649 phys_obj->id = id;
4650
6eeefaf3 4651 phys_obj->handle = drm_pci_alloc(dev, size, align);
71acb5eb
DA
4652 if (!phys_obj->handle) {
4653 ret = -ENOMEM;
4654 goto kfree_obj;
4655 }
4656#ifdef CONFIG_X86
4657 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4658#endif
4659
4660 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4661
4662 return 0;
4663kfree_obj:
9a298b2a 4664 kfree(phys_obj);
71acb5eb
DA
4665 return ret;
4666}
4667
995b6762 4668static void i915_gem_free_phys_object(struct drm_device *dev, int id)
71acb5eb
DA
4669{
4670 drm_i915_private_t *dev_priv = dev->dev_private;
4671 struct drm_i915_gem_phys_object *phys_obj;
4672
4673 if (!dev_priv->mm.phys_objs[id - 1])
4674 return;
4675
4676 phys_obj = dev_priv->mm.phys_objs[id - 1];
4677 if (phys_obj->cur_obj) {
4678 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4679 }
4680
4681#ifdef CONFIG_X86
4682 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4683#endif
4684 drm_pci_free(dev, phys_obj->handle);
4685 kfree(phys_obj);
4686 dev_priv->mm.phys_objs[id - 1] = NULL;
4687}
4688
4689void i915_gem_free_all_phys_object(struct drm_device *dev)
4690{
4691 int i;
4692
260883c8 4693 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
71acb5eb
DA
4694 i915_gem_free_phys_object(dev, i);
4695}
4696
4697void i915_gem_detach_phys_object(struct drm_device *dev,
05394f39 4698 struct drm_i915_gem_object *obj)
71acb5eb 4699{
496ad9aa 4700 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
e5281ccd 4701 char *vaddr;
71acb5eb 4702 int i;
71acb5eb
DA
4703 int page_count;
4704
05394f39 4705 if (!obj->phys_obj)
71acb5eb 4706 return;
05394f39 4707 vaddr = obj->phys_obj->handle->vaddr;
71acb5eb 4708
05394f39 4709 page_count = obj->base.size / PAGE_SIZE;
71acb5eb 4710 for (i = 0; i < page_count; i++) {
5949eac4 4711 struct page *page = shmem_read_mapping_page(mapping, i);
e5281ccd
CW
4712 if (!IS_ERR(page)) {
4713 char *dst = kmap_atomic(page);
4714 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4715 kunmap_atomic(dst);
4716
4717 drm_clflush_pages(&page, 1);
4718
4719 set_page_dirty(page);
4720 mark_page_accessed(page);
4721 page_cache_release(page);
4722 }
71acb5eb 4723 }
e76e9aeb 4724 i915_gem_chipset_flush(dev);
d78b47b9 4725
05394f39
CW
4726 obj->phys_obj->cur_obj = NULL;
4727 obj->phys_obj = NULL;
71acb5eb
DA
4728}
4729
4730int
4731i915_gem_attach_phys_object(struct drm_device *dev,
05394f39 4732 struct drm_i915_gem_object *obj,
6eeefaf3
CW
4733 int id,
4734 int align)
71acb5eb 4735{
496ad9aa 4736 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
71acb5eb 4737 drm_i915_private_t *dev_priv = dev->dev_private;
71acb5eb
DA
4738 int ret = 0;
4739 int page_count;
4740 int i;
4741
4742 if (id > I915_MAX_PHYS_OBJECT)
4743 return -EINVAL;
4744
05394f39
CW
4745 if (obj->phys_obj) {
4746 if (obj->phys_obj->id == id)
71acb5eb
DA
4747 return 0;
4748 i915_gem_detach_phys_object(dev, obj);
4749 }
4750
71acb5eb
DA
4751 /* create a new object */
4752 if (!dev_priv->mm.phys_objs[id - 1]) {
4753 ret = i915_gem_init_phys_object(dev, id,
05394f39 4754 obj->base.size, align);
71acb5eb 4755 if (ret) {
05394f39
CW
4756 DRM_ERROR("failed to init phys object %d size: %zu\n",
4757 id, obj->base.size);
e5281ccd 4758 return ret;
71acb5eb
DA
4759 }
4760 }
4761
4762 /* bind to the object */
05394f39
CW
4763 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4764 obj->phys_obj->cur_obj = obj;
71acb5eb 4765
05394f39 4766 page_count = obj->base.size / PAGE_SIZE;
71acb5eb
DA
4767
4768 for (i = 0; i < page_count; i++) {
e5281ccd
CW
4769 struct page *page;
4770 char *dst, *src;
4771
5949eac4 4772 page = shmem_read_mapping_page(mapping, i);
e5281ccd
CW
4773 if (IS_ERR(page))
4774 return PTR_ERR(page);
71acb5eb 4775
ff75b9bc 4776 src = kmap_atomic(page);
05394f39 4777 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
71acb5eb 4778 memcpy(dst, src, PAGE_SIZE);
3e4d3af5 4779 kunmap_atomic(src);
71acb5eb 4780
e5281ccd
CW
4781 mark_page_accessed(page);
4782 page_cache_release(page);
4783 }
d78b47b9 4784
71acb5eb 4785 return 0;
71acb5eb
DA
4786}
4787
4788static int
05394f39
CW
4789i915_gem_phys_pwrite(struct drm_device *dev,
4790 struct drm_i915_gem_object *obj,
71acb5eb
DA
4791 struct drm_i915_gem_pwrite *args,
4792 struct drm_file *file_priv)
4793{
05394f39 4794 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
2bb4629a 4795 char __user *user_data = to_user_ptr(args->data_ptr);
71acb5eb 4796
b47b30cc
CW
4797 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4798 unsigned long unwritten;
4799
4800 /* The physical object once assigned is fixed for the lifetime
4801 * of the obj, so we can safely drop the lock and continue
4802 * to access vaddr.
4803 */
4804 mutex_unlock(&dev->struct_mutex);
4805 unwritten = copy_from_user(vaddr, user_data, args->size);
4806 mutex_lock(&dev->struct_mutex);
4807 if (unwritten)
4808 return -EFAULT;
4809 }
71acb5eb 4810
e76e9aeb 4811 i915_gem_chipset_flush(dev);
71acb5eb
DA
4812 return 0;
4813}
b962442e 4814
f787a5f5 4815void i915_gem_release(struct drm_device *dev, struct drm_file *file)
b962442e 4816{
f787a5f5 4817 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e 4818
b29c19b6
CW
4819 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4820
b962442e
EA
4821 /* Clean up our request list when the client is going away, so that
4822 * later retire_requests won't dereference our soon-to-be-gone
4823 * file_priv.
4824 */
1c25595f 4825 spin_lock(&file_priv->mm.lock);
f787a5f5
CW
4826 while (!list_empty(&file_priv->mm.request_list)) {
4827 struct drm_i915_gem_request *request;
4828
4829 request = list_first_entry(&file_priv->mm.request_list,
4830 struct drm_i915_gem_request,
4831 client_list);
4832 list_del(&request->client_list);
4833 request->file_priv = NULL;
4834 }
1c25595f 4835 spin_unlock(&file_priv->mm.lock);
b962442e 4836}
31169714 4837
b29c19b6
CW
4838static void
4839i915_gem_file_idle_work_handler(struct work_struct *work)
4840{
4841 struct drm_i915_file_private *file_priv =
4842 container_of(work, typeof(*file_priv), mm.idle_work.work);
4843
4844 atomic_set(&file_priv->rps_wait_boost, false);
4845}
4846
4847int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4848{
4849 struct drm_i915_file_private *file_priv;
e422b888 4850 int ret;
b29c19b6
CW
4851
4852 DRM_DEBUG_DRIVER("\n");
4853
4854 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4855 if (!file_priv)
4856 return -ENOMEM;
4857
4858 file->driver_priv = file_priv;
4859 file_priv->dev_priv = dev->dev_private;
4860
4861 spin_lock_init(&file_priv->mm.lock);
4862 INIT_LIST_HEAD(&file_priv->mm.request_list);
4863 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4864 i915_gem_file_idle_work_handler);
4865
e422b888
BW
4866 ret = i915_gem_context_open(dev, file);
4867 if (ret)
4868 kfree(file_priv);
b29c19b6 4869
e422b888 4870 return ret;
b29c19b6
CW
4871}
4872
5774506f
CW
4873static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4874{
4875 if (!mutex_is_locked(mutex))
4876 return false;
4877
4878#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4879 return mutex->owner == task;
4880#else
4881 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4882 return false;
4883#endif
4884}
4885
7dc19d5a
DC
4886static unsigned long
4887i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
31169714 4888{
17250b71
CW
4889 struct drm_i915_private *dev_priv =
4890 container_of(shrinker,
4891 struct drm_i915_private,
4892 mm.inactive_shrinker);
4893 struct drm_device *dev = dev_priv->dev;
6c085a72 4894 struct drm_i915_gem_object *obj;
5774506f 4895 bool unlock = true;
7dc19d5a 4896 unsigned long count;
17250b71 4897
5774506f
CW
4898 if (!mutex_trylock(&dev->struct_mutex)) {
4899 if (!mutex_is_locked_by(&dev->struct_mutex, current))
d3227046 4900 return 0;
5774506f 4901
677feac2 4902 if (dev_priv->mm.shrinker_no_lock_stealing)
d3227046 4903 return 0;
677feac2 4904
5774506f
CW
4905 unlock = false;
4906 }
31169714 4907
7dc19d5a 4908 count = 0;
35c20a60 4909 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
a5570178 4910 if (obj->pages_pin_count == 0)
7dc19d5a 4911 count += obj->base.size >> PAGE_SHIFT;
fcb4a578
BW
4912
4913 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4914 if (obj->active)
4915 continue;
4916
d7f46fc4 4917 if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
7dc19d5a 4918 count += obj->base.size >> PAGE_SHIFT;
fcb4a578 4919 }
17250b71 4920
5774506f
CW
4921 if (unlock)
4922 mutex_unlock(&dev->struct_mutex);
d9973b43 4923
7dc19d5a 4924 return count;
31169714 4925}
a70a3148
BW
4926
4927/* All the new VM stuff */
4928unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4929 struct i915_address_space *vm)
4930{
4931 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4932 struct i915_vma *vma;
4933
6f425321
BW
4934 if (!dev_priv->mm.aliasing_ppgtt ||
4935 vm == &dev_priv->mm.aliasing_ppgtt->base)
a70a3148
BW
4936 vm = &dev_priv->gtt.base;
4937
4938 BUG_ON(list_empty(&o->vma_list));
4939 list_for_each_entry(vma, &o->vma_list, vma_link) {
4940 if (vma->vm == vm)
4941 return vma->node.start;
4942
4943 }
4944 return -1;
4945}
4946
4947bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4948 struct i915_address_space *vm)
4949{
4950 struct i915_vma *vma;
4951
4952 list_for_each_entry(vma, &o->vma_list, vma_link)
8b9c2b94 4953 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
a70a3148
BW
4954 return true;
4955
4956 return false;
4957}
4958
4959bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4960{
5a1d5eb0 4961 struct i915_vma *vma;
a70a3148 4962
5a1d5eb0
CW
4963 list_for_each_entry(vma, &o->vma_list, vma_link)
4964 if (drm_mm_node_allocated(&vma->node))
a70a3148
BW
4965 return true;
4966
4967 return false;
4968}
4969
4970unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
4971 struct i915_address_space *vm)
4972{
4973 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4974 struct i915_vma *vma;
4975
6f425321
BW
4976 if (!dev_priv->mm.aliasing_ppgtt ||
4977 vm == &dev_priv->mm.aliasing_ppgtt->base)
a70a3148
BW
4978 vm = &dev_priv->gtt.base;
4979
4980 BUG_ON(list_empty(&o->vma_list));
4981
4982 list_for_each_entry(vma, &o->vma_list, vma_link)
4983 if (vma->vm == vm)
4984 return vma->node.size;
4985
4986 return 0;
4987}
4988
7dc19d5a
DC
4989static unsigned long
4990i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
4991{
4992 struct drm_i915_private *dev_priv =
4993 container_of(shrinker,
4994 struct drm_i915_private,
4995 mm.inactive_shrinker);
4996 struct drm_device *dev = dev_priv->dev;
7dc19d5a
DC
4997 unsigned long freed;
4998 bool unlock = true;
4999
5000 if (!mutex_trylock(&dev->struct_mutex)) {
5001 if (!mutex_is_locked_by(&dev->struct_mutex, current))
d3227046 5002 return SHRINK_STOP;
7dc19d5a
DC
5003
5004 if (dev_priv->mm.shrinker_no_lock_stealing)
d3227046 5005 return SHRINK_STOP;
7dc19d5a
DC
5006
5007 unlock = false;
5008 }
5009
d9973b43
CW
5010 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5011 if (freed < sc->nr_to_scan)
5012 freed += __i915_gem_shrink(dev_priv,
5013 sc->nr_to_scan - freed,
5014 false);
5015 if (freed < sc->nr_to_scan)
7dc19d5a
DC
5016 freed += i915_gem_shrink_all(dev_priv);
5017
5018 if (unlock)
5019 mutex_unlock(&dev->struct_mutex);
d9973b43 5020
7dc19d5a
DC
5021 return freed;
5022}
5c2abbea
BW
5023
5024struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5025{
5026 struct i915_vma *vma;
5027
5028 if (WARN_ON(list_empty(&obj->vma_list)))
5029 return NULL;
5030
5031 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
6e164c33 5032 if (vma->vm != obj_to_ggtt(obj))
5c2abbea
BW
5033 return NULL;
5034
5035 return vma;
5036}