]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Consolidate binding parameters into flags
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
CommitLineData
673a394b
EA
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
760285e7 28#include <drm/drmP.h>
0de23977 29#include <drm/drm_vma_manager.h>
760285e7 30#include <drm/i915_drm.h>
673a394b 31#include "i915_drv.h"
1c5d22f7 32#include "i915_trace.h"
652c393a 33#include "intel_drv.h"
5949eac4 34#include <linux/shmem_fs.h>
5a0e3ad6 35#include <linux/slab.h>
673a394b 36#include <linux/swap.h>
79e53945 37#include <linux/pci.h>
1286ff73 38#include <linux/dma-buf.h>
673a394b 39
05394f39 40static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
2c22569b
CW
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42 bool force);
07fe0b12 43static __must_check int
23f54483
BW
44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly);
05394f39
CW
46static int i915_gem_phys_pwrite(struct drm_device *dev,
47 struct drm_i915_gem_object *obj,
71acb5eb 48 struct drm_i915_gem_pwrite *args,
05394f39 49 struct drm_file *file);
673a394b 50
61050808
CW
51static void i915_gem_write_fence(struct drm_device *dev, int reg,
52 struct drm_i915_gem_object *obj);
53static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
54 struct drm_i915_fence_reg *fence,
55 bool enable);
56
7dc19d5a
DC
57static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
58 struct shrink_control *sc);
59static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
60 struct shrink_control *sc);
d9973b43
CW
61static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
62static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
8c59967c 63static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
31169714 64
c76ce038
CW
65static bool cpu_cache_is_coherent(struct drm_device *dev,
66 enum i915_cache_level level)
67{
68 return HAS_LLC(dev) || level != I915_CACHE_NONE;
69}
70
2c22569b
CW
71static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
72{
73 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
74 return true;
75
76 return obj->pin_display;
77}
78
61050808
CW
79static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
80{
81 if (obj->tiling_mode)
82 i915_gem_release_mmap(obj);
83
84 /* As we do not have an associated fence register, we will force
85 * a tiling change if we ever need to acquire one.
86 */
5d82e3e6 87 obj->fence_dirty = false;
61050808
CW
88 obj->fence_reg = I915_FENCE_REG_NONE;
89}
90
73aa808f
CW
91/* some bookkeeping */
92static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
93 size_t size)
94{
c20e8355 95 spin_lock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
96 dev_priv->mm.object_count++;
97 dev_priv->mm.object_memory += size;
c20e8355 98 spin_unlock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
99}
100
101static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
102 size_t size)
103{
c20e8355 104 spin_lock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
105 dev_priv->mm.object_count--;
106 dev_priv->mm.object_memory -= size;
c20e8355 107 spin_unlock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
108}
109
21dd3734 110static int
33196ded 111i915_gem_wait_for_error(struct i915_gpu_error *error)
30dbf0c0 112{
30dbf0c0
CW
113 int ret;
114
7abb690a
DV
115#define EXIT_COND (!i915_reset_in_progress(error) || \
116 i915_terminally_wedged(error))
1f83fee0 117 if (EXIT_COND)
30dbf0c0
CW
118 return 0;
119
0a6759c6
DV
120 /*
121 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
122 * userspace. If it takes that long something really bad is going on and
123 * we should simply try to bail out and fail as gracefully as possible.
124 */
1f83fee0
DV
125 ret = wait_event_interruptible_timeout(error->reset_queue,
126 EXIT_COND,
127 10*HZ);
0a6759c6
DV
128 if (ret == 0) {
129 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
130 return -EIO;
131 } else if (ret < 0) {
30dbf0c0 132 return ret;
0a6759c6 133 }
1f83fee0 134#undef EXIT_COND
30dbf0c0 135
21dd3734 136 return 0;
30dbf0c0
CW
137}
138
54cf91dc 139int i915_mutex_lock_interruptible(struct drm_device *dev)
76c1dec1 140{
33196ded 141 struct drm_i915_private *dev_priv = dev->dev_private;
76c1dec1
CW
142 int ret;
143
33196ded 144 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
76c1dec1
CW
145 if (ret)
146 return ret;
147
148 ret = mutex_lock_interruptible(&dev->struct_mutex);
149 if (ret)
150 return ret;
151
23bc5982 152 WARN_ON(i915_verify_lists(dev));
76c1dec1
CW
153 return 0;
154}
30dbf0c0 155
7d1c4804 156static inline bool
05394f39 157i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
7d1c4804 158{
9843877d 159 return i915_gem_obj_bound_any(obj) && !obj->active;
7d1c4804
CW
160}
161
79e53945
JB
162int
163i915_gem_init_ioctl(struct drm_device *dev, void *data,
05394f39 164 struct drm_file *file)
79e53945 165{
93d18799 166 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945 167 struct drm_i915_gem_init *args = data;
2021746e 168
7bb6fb8d
DV
169 if (drm_core_check_feature(dev, DRIVER_MODESET))
170 return -ENODEV;
171
2021746e
CW
172 if (args->gtt_start >= args->gtt_end ||
173 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
174 return -EINVAL;
79e53945 175
f534bc0b
DV
176 /* GEM with user mode setting was never supported on ilk and later. */
177 if (INTEL_INFO(dev)->gen >= 5)
178 return -ENODEV;
179
79e53945 180 mutex_lock(&dev->struct_mutex);
d7e5008f
BW
181 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
182 args->gtt_end);
93d18799 183 dev_priv->gtt.mappable_end = args->gtt_end;
673a394b
EA
184 mutex_unlock(&dev->struct_mutex);
185
2021746e 186 return 0;
673a394b
EA
187}
188
5a125c3c
EA
189int
190i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
05394f39 191 struct drm_file *file)
5a125c3c 192{
73aa808f 193 struct drm_i915_private *dev_priv = dev->dev_private;
5a125c3c 194 struct drm_i915_gem_get_aperture *args = data;
6299f992
CW
195 struct drm_i915_gem_object *obj;
196 size_t pinned;
5a125c3c 197
6299f992 198 pinned = 0;
73aa808f 199 mutex_lock(&dev->struct_mutex);
35c20a60 200 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
d7f46fc4 201 if (i915_gem_obj_is_pinned(obj))
f343c5f6 202 pinned += i915_gem_obj_ggtt_size(obj);
73aa808f 203 mutex_unlock(&dev->struct_mutex);
5a125c3c 204
853ba5d2 205 args->aper_size = dev_priv->gtt.base.total;
0206e353 206 args->aper_available_size = args->aper_size - pinned;
6299f992 207
5a125c3c
EA
208 return 0;
209}
210
42dcedd4
CW
211void *i915_gem_object_alloc(struct drm_device *dev)
212{
213 struct drm_i915_private *dev_priv = dev->dev_private;
fac15c10 214 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
42dcedd4
CW
215}
216
217void i915_gem_object_free(struct drm_i915_gem_object *obj)
218{
219 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
220 kmem_cache_free(dev_priv->slab, obj);
221}
222
ff72145b
DA
223static int
224i915_gem_create(struct drm_file *file,
225 struct drm_device *dev,
226 uint64_t size,
227 uint32_t *handle_p)
673a394b 228{
05394f39 229 struct drm_i915_gem_object *obj;
a1a2d1d3
PP
230 int ret;
231 u32 handle;
673a394b 232
ff72145b 233 size = roundup(size, PAGE_SIZE);
8ffc0246
CW
234 if (size == 0)
235 return -EINVAL;
673a394b
EA
236
237 /* Allocate the new object */
ff72145b 238 obj = i915_gem_alloc_object(dev, size);
673a394b
EA
239 if (obj == NULL)
240 return -ENOMEM;
241
05394f39 242 ret = drm_gem_handle_create(file, &obj->base, &handle);
202f2fef 243 /* drop reference from allocate - handle holds it now */
d861e338
DV
244 drm_gem_object_unreference_unlocked(&obj->base);
245 if (ret)
246 return ret;
202f2fef 247
ff72145b 248 *handle_p = handle;
673a394b
EA
249 return 0;
250}
251
ff72145b
DA
252int
253i915_gem_dumb_create(struct drm_file *file,
254 struct drm_device *dev,
255 struct drm_mode_create_dumb *args)
256{
257 /* have to work out size/pitch and return them */
de45eaf7 258 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
ff72145b
DA
259 args->size = args->pitch * args->height;
260 return i915_gem_create(file, dev,
261 args->size, &args->handle);
262}
263
ff72145b
DA
264/**
265 * Creates a new mm object and returns a handle to it.
266 */
267int
268i915_gem_create_ioctl(struct drm_device *dev, void *data,
269 struct drm_file *file)
270{
271 struct drm_i915_gem_create *args = data;
63ed2cb2 272
ff72145b
DA
273 return i915_gem_create(file, dev,
274 args->size, &args->handle);
275}
276
8461d226
DV
277static inline int
278__copy_to_user_swizzled(char __user *cpu_vaddr,
279 const char *gpu_vaddr, int gpu_offset,
280 int length)
281{
282 int ret, cpu_offset = 0;
283
284 while (length > 0) {
285 int cacheline_end = ALIGN(gpu_offset + 1, 64);
286 int this_length = min(cacheline_end - gpu_offset, length);
287 int swizzled_gpu_offset = gpu_offset ^ 64;
288
289 ret = __copy_to_user(cpu_vaddr + cpu_offset,
290 gpu_vaddr + swizzled_gpu_offset,
291 this_length);
292 if (ret)
293 return ret + length;
294
295 cpu_offset += this_length;
296 gpu_offset += this_length;
297 length -= this_length;
298 }
299
300 return 0;
301}
302
8c59967c 303static inline int
4f0c7cfb
BW
304__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
305 const char __user *cpu_vaddr,
8c59967c
DV
306 int length)
307{
308 int ret, cpu_offset = 0;
309
310 while (length > 0) {
311 int cacheline_end = ALIGN(gpu_offset + 1, 64);
312 int this_length = min(cacheline_end - gpu_offset, length);
313 int swizzled_gpu_offset = gpu_offset ^ 64;
314
315 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
316 cpu_vaddr + cpu_offset,
317 this_length);
318 if (ret)
319 return ret + length;
320
321 cpu_offset += this_length;
322 gpu_offset += this_length;
323 length -= this_length;
324 }
325
326 return 0;
327}
328
d174bd64
DV
329/* Per-page copy function for the shmem pread fastpath.
330 * Flushes invalid cachelines before reading the target if
331 * needs_clflush is set. */
eb01459f 332static int
d174bd64
DV
333shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
334 char __user *user_data,
335 bool page_do_bit17_swizzling, bool needs_clflush)
336{
337 char *vaddr;
338 int ret;
339
e7e58eb5 340 if (unlikely(page_do_bit17_swizzling))
d174bd64
DV
341 return -EINVAL;
342
343 vaddr = kmap_atomic(page);
344 if (needs_clflush)
345 drm_clflush_virt_range(vaddr + shmem_page_offset,
346 page_length);
347 ret = __copy_to_user_inatomic(user_data,
348 vaddr + shmem_page_offset,
349 page_length);
350 kunmap_atomic(vaddr);
351
f60d7f0c 352 return ret ? -EFAULT : 0;
d174bd64
DV
353}
354
23c18c71
DV
355static void
356shmem_clflush_swizzled_range(char *addr, unsigned long length,
357 bool swizzled)
358{
e7e58eb5 359 if (unlikely(swizzled)) {
23c18c71
DV
360 unsigned long start = (unsigned long) addr;
361 unsigned long end = (unsigned long) addr + length;
362
363 /* For swizzling simply ensure that we always flush both
364 * channels. Lame, but simple and it works. Swizzled
365 * pwrite/pread is far from a hotpath - current userspace
366 * doesn't use it at all. */
367 start = round_down(start, 128);
368 end = round_up(end, 128);
369
370 drm_clflush_virt_range((void *)start, end - start);
371 } else {
372 drm_clflush_virt_range(addr, length);
373 }
374
375}
376
d174bd64
DV
377/* Only difference to the fast-path function is that this can handle bit17
378 * and uses non-atomic copy and kmap functions. */
379static int
380shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
381 char __user *user_data,
382 bool page_do_bit17_swizzling, bool needs_clflush)
383{
384 char *vaddr;
385 int ret;
386
387 vaddr = kmap(page);
388 if (needs_clflush)
23c18c71
DV
389 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
390 page_length,
391 page_do_bit17_swizzling);
d174bd64
DV
392
393 if (page_do_bit17_swizzling)
394 ret = __copy_to_user_swizzled(user_data,
395 vaddr, shmem_page_offset,
396 page_length);
397 else
398 ret = __copy_to_user(user_data,
399 vaddr + shmem_page_offset,
400 page_length);
401 kunmap(page);
402
f60d7f0c 403 return ret ? - EFAULT : 0;
d174bd64
DV
404}
405
eb01459f 406static int
dbf7bff0
DV
407i915_gem_shmem_pread(struct drm_device *dev,
408 struct drm_i915_gem_object *obj,
409 struct drm_i915_gem_pread *args,
410 struct drm_file *file)
eb01459f 411{
8461d226 412 char __user *user_data;
eb01459f 413 ssize_t remain;
8461d226 414 loff_t offset;
eb2c0c81 415 int shmem_page_offset, page_length, ret = 0;
8461d226 416 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
96d79b52 417 int prefaulted = 0;
8489731c 418 int needs_clflush = 0;
67d5a50c 419 struct sg_page_iter sg_iter;
eb01459f 420
2bb4629a 421 user_data = to_user_ptr(args->data_ptr);
eb01459f
EA
422 remain = args->size;
423
8461d226 424 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
eb01459f 425
8489731c
DV
426 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
427 /* If we're not in the cpu read domain, set ourself into the gtt
428 * read domain and manually flush cachelines (if required). This
429 * optimizes for the case when the gpu will dirty the data
430 * anyway again before the next pread happens. */
c76ce038 431 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
23f54483
BW
432 ret = i915_gem_object_wait_rendering(obj, true);
433 if (ret)
434 return ret;
8489731c 435 }
eb01459f 436
f60d7f0c
CW
437 ret = i915_gem_object_get_pages(obj);
438 if (ret)
439 return ret;
440
441 i915_gem_object_pin_pages(obj);
442
8461d226 443 offset = args->offset;
eb01459f 444
67d5a50c
ID
445 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
446 offset >> PAGE_SHIFT) {
2db76d7c 447 struct page *page = sg_page_iter_page(&sg_iter);
9da3da66
CW
448
449 if (remain <= 0)
450 break;
451
eb01459f
EA
452 /* Operation in this page
453 *
eb01459f 454 * shmem_page_offset = offset within page in shmem file
eb01459f
EA
455 * page_length = bytes to copy for this page
456 */
c8cbbb8b 457 shmem_page_offset = offset_in_page(offset);
eb01459f
EA
458 page_length = remain;
459 if ((shmem_page_offset + page_length) > PAGE_SIZE)
460 page_length = PAGE_SIZE - shmem_page_offset;
eb01459f 461
8461d226
DV
462 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
463 (page_to_phys(page) & (1 << 17)) != 0;
464
d174bd64
DV
465 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
466 user_data, page_do_bit17_swizzling,
467 needs_clflush);
468 if (ret == 0)
469 goto next_page;
dbf7bff0 470
dbf7bff0
DV
471 mutex_unlock(&dev->struct_mutex);
472
d330a953 473 if (likely(!i915.prefault_disable) && !prefaulted) {
f56f821f 474 ret = fault_in_multipages_writeable(user_data, remain);
96d79b52
DV
475 /* Userspace is tricking us, but we've already clobbered
476 * its pages with the prefault and promised to write the
477 * data up to the first fault. Hence ignore any errors
478 * and just continue. */
479 (void)ret;
480 prefaulted = 1;
481 }
eb01459f 482
d174bd64
DV
483 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
484 user_data, page_do_bit17_swizzling,
485 needs_clflush);
eb01459f 486
dbf7bff0 487 mutex_lock(&dev->struct_mutex);
f60d7f0c 488
dbf7bff0 489next_page:
e5281ccd 490 mark_page_accessed(page);
e5281ccd 491
f60d7f0c 492 if (ret)
8461d226 493 goto out;
8461d226 494
eb01459f 495 remain -= page_length;
8461d226 496 user_data += page_length;
eb01459f
EA
497 offset += page_length;
498 }
499
4f27b75d 500out:
f60d7f0c
CW
501 i915_gem_object_unpin_pages(obj);
502
eb01459f
EA
503 return ret;
504}
505
673a394b
EA
506/**
507 * Reads data from the object referenced by handle.
508 *
509 * On error, the contents of *data are undefined.
510 */
511int
512i915_gem_pread_ioctl(struct drm_device *dev, void *data,
05394f39 513 struct drm_file *file)
673a394b
EA
514{
515 struct drm_i915_gem_pread *args = data;
05394f39 516 struct drm_i915_gem_object *obj;
35b62a89 517 int ret = 0;
673a394b 518
51311d0a
CW
519 if (args->size == 0)
520 return 0;
521
522 if (!access_ok(VERIFY_WRITE,
2bb4629a 523 to_user_ptr(args->data_ptr),
51311d0a
CW
524 args->size))
525 return -EFAULT;
526
4f27b75d 527 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 528 if (ret)
4f27b75d 529 return ret;
673a394b 530
05394f39 531 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 532 if (&obj->base == NULL) {
1d7cfea1
CW
533 ret = -ENOENT;
534 goto unlock;
4f27b75d 535 }
673a394b 536
7dcd2499 537 /* Bounds check source. */
05394f39
CW
538 if (args->offset > obj->base.size ||
539 args->size > obj->base.size - args->offset) {
ce9d419d 540 ret = -EINVAL;
35b62a89 541 goto out;
ce9d419d
CW
542 }
543
1286ff73
DV
544 /* prime objects have no backing filp to GEM pread/pwrite
545 * pages from.
546 */
547 if (!obj->base.filp) {
548 ret = -EINVAL;
549 goto out;
550 }
551
db53a302
CW
552 trace_i915_gem_object_pread(obj, args->offset, args->size);
553
dbf7bff0 554 ret = i915_gem_shmem_pread(dev, obj, args, file);
673a394b 555
35b62a89 556out:
05394f39 557 drm_gem_object_unreference(&obj->base);
1d7cfea1 558unlock:
4f27b75d 559 mutex_unlock(&dev->struct_mutex);
eb01459f 560 return ret;
673a394b
EA
561}
562
0839ccb8
KP
563/* This is the fast write path which cannot handle
564 * page faults in the source data
9b7530cc 565 */
0839ccb8
KP
566
567static inline int
568fast_user_write(struct io_mapping *mapping,
569 loff_t page_base, int page_offset,
570 char __user *user_data,
571 int length)
9b7530cc 572{
4f0c7cfb
BW
573 void __iomem *vaddr_atomic;
574 void *vaddr;
0839ccb8 575 unsigned long unwritten;
9b7530cc 576
3e4d3af5 577 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
4f0c7cfb
BW
578 /* We can use the cpu mem copy function because this is X86. */
579 vaddr = (void __force*)vaddr_atomic + page_offset;
580 unwritten = __copy_from_user_inatomic_nocache(vaddr,
0839ccb8 581 user_data, length);
3e4d3af5 582 io_mapping_unmap_atomic(vaddr_atomic);
fbd5a26d 583 return unwritten;
0839ccb8
KP
584}
585
3de09aa3
EA
586/**
587 * This is the fast pwrite path, where we copy the data directly from the
588 * user into the GTT, uncached.
589 */
673a394b 590static int
05394f39
CW
591i915_gem_gtt_pwrite_fast(struct drm_device *dev,
592 struct drm_i915_gem_object *obj,
3de09aa3 593 struct drm_i915_gem_pwrite *args,
05394f39 594 struct drm_file *file)
673a394b 595{
0839ccb8 596 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 597 ssize_t remain;
0839ccb8 598 loff_t offset, page_base;
673a394b 599 char __user *user_data;
935aaa69
DV
600 int page_offset, page_length, ret;
601
1ec9e26d 602 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
935aaa69
DV
603 if (ret)
604 goto out;
605
606 ret = i915_gem_object_set_to_gtt_domain(obj, true);
607 if (ret)
608 goto out_unpin;
609
610 ret = i915_gem_object_put_fence(obj);
611 if (ret)
612 goto out_unpin;
673a394b 613
2bb4629a 614 user_data = to_user_ptr(args->data_ptr);
673a394b 615 remain = args->size;
673a394b 616
f343c5f6 617 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
673a394b
EA
618
619 while (remain > 0) {
620 /* Operation in this page
621 *
0839ccb8
KP
622 * page_base = page offset within aperture
623 * page_offset = offset within page
624 * page_length = bytes to copy for this page
673a394b 625 */
c8cbbb8b
CW
626 page_base = offset & PAGE_MASK;
627 page_offset = offset_in_page(offset);
0839ccb8
KP
628 page_length = remain;
629 if ((page_offset + remain) > PAGE_SIZE)
630 page_length = PAGE_SIZE - page_offset;
631
0839ccb8 632 /* If we get a fault while copying data, then (presumably) our
3de09aa3
EA
633 * source page isn't available. Return the error and we'll
634 * retry in the slow path.
0839ccb8 635 */
5d4545ae 636 if (fast_user_write(dev_priv->gtt.mappable, page_base,
935aaa69
DV
637 page_offset, user_data, page_length)) {
638 ret = -EFAULT;
639 goto out_unpin;
640 }
673a394b 641
0839ccb8
KP
642 remain -= page_length;
643 user_data += page_length;
644 offset += page_length;
673a394b 645 }
673a394b 646
935aaa69 647out_unpin:
d7f46fc4 648 i915_gem_object_ggtt_unpin(obj);
935aaa69 649out:
3de09aa3 650 return ret;
673a394b
EA
651}
652
d174bd64
DV
653/* Per-page copy function for the shmem pwrite fastpath.
654 * Flushes invalid cachelines before writing to the target if
655 * needs_clflush_before is set and flushes out any written cachelines after
656 * writing if needs_clflush is set. */
3043c60c 657static int
d174bd64
DV
658shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
659 char __user *user_data,
660 bool page_do_bit17_swizzling,
661 bool needs_clflush_before,
662 bool needs_clflush_after)
673a394b 663{
d174bd64 664 char *vaddr;
673a394b 665 int ret;
3de09aa3 666
e7e58eb5 667 if (unlikely(page_do_bit17_swizzling))
d174bd64 668 return -EINVAL;
3de09aa3 669
d174bd64
DV
670 vaddr = kmap_atomic(page);
671 if (needs_clflush_before)
672 drm_clflush_virt_range(vaddr + shmem_page_offset,
673 page_length);
674 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
675 user_data,
676 page_length);
677 if (needs_clflush_after)
678 drm_clflush_virt_range(vaddr + shmem_page_offset,
679 page_length);
680 kunmap_atomic(vaddr);
3de09aa3 681
755d2218 682 return ret ? -EFAULT : 0;
3de09aa3
EA
683}
684
d174bd64
DV
685/* Only difference to the fast-path function is that this can handle bit17
686 * and uses non-atomic copy and kmap functions. */
3043c60c 687static int
d174bd64
DV
688shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
689 char __user *user_data,
690 bool page_do_bit17_swizzling,
691 bool needs_clflush_before,
692 bool needs_clflush_after)
673a394b 693{
d174bd64
DV
694 char *vaddr;
695 int ret;
e5281ccd 696
d174bd64 697 vaddr = kmap(page);
e7e58eb5 698 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
23c18c71
DV
699 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
700 page_length,
701 page_do_bit17_swizzling);
d174bd64
DV
702 if (page_do_bit17_swizzling)
703 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
e5281ccd
CW
704 user_data,
705 page_length);
d174bd64
DV
706 else
707 ret = __copy_from_user(vaddr + shmem_page_offset,
708 user_data,
709 page_length);
710 if (needs_clflush_after)
23c18c71
DV
711 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
712 page_length,
713 page_do_bit17_swizzling);
d174bd64 714 kunmap(page);
40123c1f 715
755d2218 716 return ret ? -EFAULT : 0;
40123c1f
EA
717}
718
40123c1f 719static int
e244a443
DV
720i915_gem_shmem_pwrite(struct drm_device *dev,
721 struct drm_i915_gem_object *obj,
722 struct drm_i915_gem_pwrite *args,
723 struct drm_file *file)
40123c1f 724{
40123c1f 725 ssize_t remain;
8c59967c
DV
726 loff_t offset;
727 char __user *user_data;
eb2c0c81 728 int shmem_page_offset, page_length, ret = 0;
8c59967c 729 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
e244a443 730 int hit_slowpath = 0;
58642885
DV
731 int needs_clflush_after = 0;
732 int needs_clflush_before = 0;
67d5a50c 733 struct sg_page_iter sg_iter;
40123c1f 734
2bb4629a 735 user_data = to_user_ptr(args->data_ptr);
40123c1f
EA
736 remain = args->size;
737
8c59967c 738 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
40123c1f 739
58642885
DV
740 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
741 /* If we're not in the cpu write domain, set ourself into the gtt
742 * write domain and manually flush cachelines (if required). This
743 * optimizes for the case when the gpu will use the data
744 * right away and we therefore have to clflush anyway. */
2c22569b 745 needs_clflush_after = cpu_write_needs_clflush(obj);
23f54483
BW
746 ret = i915_gem_object_wait_rendering(obj, false);
747 if (ret)
748 return ret;
58642885 749 }
c76ce038
CW
750 /* Same trick applies to invalidate partially written cachelines read
751 * before writing. */
752 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
753 needs_clflush_before =
754 !cpu_cache_is_coherent(dev, obj->cache_level);
58642885 755
755d2218
CW
756 ret = i915_gem_object_get_pages(obj);
757 if (ret)
758 return ret;
759
760 i915_gem_object_pin_pages(obj);
761
673a394b 762 offset = args->offset;
05394f39 763 obj->dirty = 1;
673a394b 764
67d5a50c
ID
765 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
766 offset >> PAGE_SHIFT) {
2db76d7c 767 struct page *page = sg_page_iter_page(&sg_iter);
58642885 768 int partial_cacheline_write;
e5281ccd 769
9da3da66
CW
770 if (remain <= 0)
771 break;
772
40123c1f
EA
773 /* Operation in this page
774 *
40123c1f 775 * shmem_page_offset = offset within page in shmem file
40123c1f
EA
776 * page_length = bytes to copy for this page
777 */
c8cbbb8b 778 shmem_page_offset = offset_in_page(offset);
40123c1f
EA
779
780 page_length = remain;
781 if ((shmem_page_offset + page_length) > PAGE_SIZE)
782 page_length = PAGE_SIZE - shmem_page_offset;
40123c1f 783
58642885
DV
784 /* If we don't overwrite a cacheline completely we need to be
785 * careful to have up-to-date data by first clflushing. Don't
786 * overcomplicate things and flush the entire patch. */
787 partial_cacheline_write = needs_clflush_before &&
788 ((shmem_page_offset | page_length)
789 & (boot_cpu_data.x86_clflush_size - 1));
790
8c59967c
DV
791 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
792 (page_to_phys(page) & (1 << 17)) != 0;
793
d174bd64
DV
794 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
795 user_data, page_do_bit17_swizzling,
796 partial_cacheline_write,
797 needs_clflush_after);
798 if (ret == 0)
799 goto next_page;
e244a443
DV
800
801 hit_slowpath = 1;
e244a443 802 mutex_unlock(&dev->struct_mutex);
d174bd64
DV
803 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
804 user_data, page_do_bit17_swizzling,
805 partial_cacheline_write,
806 needs_clflush_after);
40123c1f 807
e244a443 808 mutex_lock(&dev->struct_mutex);
755d2218 809
e244a443 810next_page:
e5281ccd
CW
811 set_page_dirty(page);
812 mark_page_accessed(page);
e5281ccd 813
755d2218 814 if (ret)
8c59967c 815 goto out;
8c59967c 816
40123c1f 817 remain -= page_length;
8c59967c 818 user_data += page_length;
40123c1f 819 offset += page_length;
673a394b
EA
820 }
821
fbd5a26d 822out:
755d2218
CW
823 i915_gem_object_unpin_pages(obj);
824
e244a443 825 if (hit_slowpath) {
8dcf015e
DV
826 /*
827 * Fixup: Flush cpu caches in case we didn't flush the dirty
828 * cachelines in-line while writing and the object moved
829 * out of the cpu write domain while we've dropped the lock.
830 */
831 if (!needs_clflush_after &&
832 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
000433b6
CW
833 if (i915_gem_clflush_object(obj, obj->pin_display))
834 i915_gem_chipset_flush(dev);
e244a443 835 }
8c59967c 836 }
673a394b 837
58642885 838 if (needs_clflush_after)
e76e9aeb 839 i915_gem_chipset_flush(dev);
58642885 840
40123c1f 841 return ret;
673a394b
EA
842}
843
844/**
845 * Writes data to the object referenced by handle.
846 *
847 * On error, the contents of the buffer that were to be modified are undefined.
848 */
849int
850i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
fbd5a26d 851 struct drm_file *file)
673a394b
EA
852{
853 struct drm_i915_gem_pwrite *args = data;
05394f39 854 struct drm_i915_gem_object *obj;
51311d0a
CW
855 int ret;
856
857 if (args->size == 0)
858 return 0;
859
860 if (!access_ok(VERIFY_READ,
2bb4629a 861 to_user_ptr(args->data_ptr),
51311d0a
CW
862 args->size))
863 return -EFAULT;
864
d330a953 865 if (likely(!i915.prefault_disable)) {
0b74b508
XZ
866 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
867 args->size);
868 if (ret)
869 return -EFAULT;
870 }
673a394b 871
fbd5a26d 872 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 873 if (ret)
fbd5a26d 874 return ret;
1d7cfea1 875
05394f39 876 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 877 if (&obj->base == NULL) {
1d7cfea1
CW
878 ret = -ENOENT;
879 goto unlock;
fbd5a26d 880 }
673a394b 881
7dcd2499 882 /* Bounds check destination. */
05394f39
CW
883 if (args->offset > obj->base.size ||
884 args->size > obj->base.size - args->offset) {
ce9d419d 885 ret = -EINVAL;
35b62a89 886 goto out;
ce9d419d
CW
887 }
888
1286ff73
DV
889 /* prime objects have no backing filp to GEM pread/pwrite
890 * pages from.
891 */
892 if (!obj->base.filp) {
893 ret = -EINVAL;
894 goto out;
895 }
896
db53a302
CW
897 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
898
935aaa69 899 ret = -EFAULT;
673a394b
EA
900 /* We can only do the GTT pwrite on untiled buffers, as otherwise
901 * it would end up going through the fenced access, and we'll get
902 * different detiling behavior between reading and writing.
903 * pread/pwrite currently are reading and writing from the CPU
904 * perspective, requiring manual detiling by the client.
905 */
5c0480f2 906 if (obj->phys_obj) {
fbd5a26d 907 ret = i915_gem_phys_pwrite(dev, obj, args, file);
5c0480f2
DV
908 goto out;
909 }
910
2c22569b
CW
911 if (obj->tiling_mode == I915_TILING_NONE &&
912 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
913 cpu_write_needs_clflush(obj)) {
fbd5a26d 914 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
935aaa69
DV
915 /* Note that the gtt paths might fail with non-page-backed user
916 * pointers (e.g. gtt mappings when moving data between
917 * textures). Fallback to the shmem path in that case. */
fbd5a26d 918 }
673a394b 919
86a1ee26 920 if (ret == -EFAULT || ret == -ENOSPC)
935aaa69 921 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
5c0480f2 922
35b62a89 923out:
05394f39 924 drm_gem_object_unreference(&obj->base);
1d7cfea1 925unlock:
fbd5a26d 926 mutex_unlock(&dev->struct_mutex);
673a394b
EA
927 return ret;
928}
929
b361237b 930int
33196ded 931i915_gem_check_wedge(struct i915_gpu_error *error,
b361237b
CW
932 bool interruptible)
933{
1f83fee0 934 if (i915_reset_in_progress(error)) {
b361237b
CW
935 /* Non-interruptible callers can't handle -EAGAIN, hence return
936 * -EIO unconditionally for these. */
937 if (!interruptible)
938 return -EIO;
939
1f83fee0
DV
940 /* Recovery complete, but the reset failed ... */
941 if (i915_terminally_wedged(error))
b361237b
CW
942 return -EIO;
943
944 return -EAGAIN;
945 }
946
947 return 0;
948}
949
950/*
951 * Compare seqno against outstanding lazy request. Emit a request if they are
952 * equal.
953 */
954static int
955i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
956{
957 int ret;
958
959 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
960
961 ret = 0;
1823521d 962 if (seqno == ring->outstanding_lazy_seqno)
0025c077 963 ret = i915_add_request(ring, NULL);
b361237b
CW
964
965 return ret;
966}
967
094f9a54
CW
968static void fake_irq(unsigned long data)
969{
970 wake_up_process((struct task_struct *)data);
971}
972
973static bool missed_irq(struct drm_i915_private *dev_priv,
974 struct intel_ring_buffer *ring)
975{
976 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
977}
978
b29c19b6
CW
979static bool can_wait_boost(struct drm_i915_file_private *file_priv)
980{
981 if (file_priv == NULL)
982 return true;
983
984 return !atomic_xchg(&file_priv->rps_wait_boost, true);
985}
986
b361237b
CW
987/**
988 * __wait_seqno - wait until execution of seqno has finished
989 * @ring: the ring expected to report seqno
990 * @seqno: duh!
f69061be 991 * @reset_counter: reset sequence associated with the given seqno
b361237b
CW
992 * @interruptible: do an interruptible wait (normally yes)
993 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
994 *
f69061be
DV
995 * Note: It is of utmost importance that the passed in seqno and reset_counter
996 * values have been read by the caller in an smp safe manner. Where read-side
997 * locks are involved, it is sufficient to read the reset_counter before
998 * unlocking the lock that protects the seqno. For lockless tricks, the
999 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1000 * inserted.
1001 *
b361237b
CW
1002 * Returns 0 if the seqno was found within the alloted time. Else returns the
1003 * errno with remaining time filled in timeout argument.
1004 */
1005static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
f69061be 1006 unsigned reset_counter,
b29c19b6
CW
1007 bool interruptible,
1008 struct timespec *timeout,
1009 struct drm_i915_file_private *file_priv)
b361237b 1010{
3d13ef2e
DL
1011 struct drm_device *dev = ring->dev;
1012 drm_i915_private_t *dev_priv = dev->dev_private;
168c3f21
MK
1013 const bool irq_test_in_progress =
1014 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
094f9a54
CW
1015 struct timespec before, now;
1016 DEFINE_WAIT(wait);
47e9766d 1017 unsigned long timeout_expire;
b361237b
CW
1018 int ret;
1019
c67a470b
PZ
1020 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1021
b361237b
CW
1022 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1023 return 0;
1024
47e9766d 1025 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
b361237b 1026
3d13ef2e 1027 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
b29c19b6
CW
1028 gen6_rps_boost(dev_priv);
1029 if (file_priv)
1030 mod_delayed_work(dev_priv->wq,
1031 &file_priv->mm.idle_work,
1032 msecs_to_jiffies(100));
1033 }
1034
168c3f21 1035 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
b361237b
CW
1036 return -ENODEV;
1037
094f9a54
CW
1038 /* Record current time in case interrupted by signal, or wedged */
1039 trace_i915_gem_request_wait_begin(ring, seqno);
b361237b 1040 getrawmonotonic(&before);
094f9a54
CW
1041 for (;;) {
1042 struct timer_list timer;
b361237b 1043
094f9a54
CW
1044 prepare_to_wait(&ring->irq_queue, &wait,
1045 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
b361237b 1046
f69061be
DV
1047 /* We need to check whether any gpu reset happened in between
1048 * the caller grabbing the seqno and now ... */
094f9a54
CW
1049 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1050 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1051 * is truely gone. */
1052 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1053 if (ret == 0)
1054 ret = -EAGAIN;
1055 break;
1056 }
f69061be 1057
094f9a54
CW
1058 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1059 ret = 0;
1060 break;
1061 }
b361237b 1062
094f9a54
CW
1063 if (interruptible && signal_pending(current)) {
1064 ret = -ERESTARTSYS;
1065 break;
1066 }
1067
47e9766d 1068 if (timeout && time_after_eq(jiffies, timeout_expire)) {
094f9a54
CW
1069 ret = -ETIME;
1070 break;
1071 }
1072
1073 timer.function = NULL;
1074 if (timeout || missed_irq(dev_priv, ring)) {
47e9766d
MK
1075 unsigned long expire;
1076
094f9a54 1077 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
47e9766d 1078 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
094f9a54
CW
1079 mod_timer(&timer, expire);
1080 }
1081
5035c275 1082 io_schedule();
094f9a54 1083
094f9a54
CW
1084 if (timer.function) {
1085 del_singleshot_timer_sync(&timer);
1086 destroy_timer_on_stack(&timer);
1087 }
1088 }
b361237b 1089 getrawmonotonic(&now);
094f9a54 1090 trace_i915_gem_request_wait_end(ring, seqno);
b361237b 1091
168c3f21
MK
1092 if (!irq_test_in_progress)
1093 ring->irq_put(ring);
094f9a54
CW
1094
1095 finish_wait(&ring->irq_queue, &wait);
b361237b
CW
1096
1097 if (timeout) {
1098 struct timespec sleep_time = timespec_sub(now, before);
1099 *timeout = timespec_sub(*timeout, sleep_time);
4f42f4ef
CW
1100 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1101 set_normalized_timespec(timeout, 0, 0);
b361237b
CW
1102 }
1103
094f9a54 1104 return ret;
b361237b
CW
1105}
1106
1107/**
1108 * Waits for a sequence number to be signaled, and cleans up the
1109 * request and object lists appropriately for that event.
1110 */
1111int
1112i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1113{
1114 struct drm_device *dev = ring->dev;
1115 struct drm_i915_private *dev_priv = dev->dev_private;
1116 bool interruptible = dev_priv->mm.interruptible;
1117 int ret;
1118
1119 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1120 BUG_ON(seqno == 0);
1121
33196ded 1122 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
b361237b
CW
1123 if (ret)
1124 return ret;
1125
1126 ret = i915_gem_check_olr(ring, seqno);
1127 if (ret)
1128 return ret;
1129
f69061be
DV
1130 return __wait_seqno(ring, seqno,
1131 atomic_read(&dev_priv->gpu_error.reset_counter),
b29c19b6 1132 interruptible, NULL, NULL);
b361237b
CW
1133}
1134
d26e3af8
CW
1135static int
1136i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1137 struct intel_ring_buffer *ring)
1138{
1139 i915_gem_retire_requests_ring(ring);
1140
1141 /* Manually manage the write flush as we may have not yet
1142 * retired the buffer.
1143 *
1144 * Note that the last_write_seqno is always the earlier of
1145 * the two (read/write) seqno, so if we haved successfully waited,
1146 * we know we have passed the last write.
1147 */
1148 obj->last_write_seqno = 0;
1149 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1150
1151 return 0;
1152}
1153
b361237b
CW
1154/**
1155 * Ensures that all rendering to the object has completed and the object is
1156 * safe to unbind from the GTT or access from the CPU.
1157 */
1158static __must_check int
1159i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1160 bool readonly)
1161{
1162 struct intel_ring_buffer *ring = obj->ring;
1163 u32 seqno;
1164 int ret;
1165
1166 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1167 if (seqno == 0)
1168 return 0;
1169
1170 ret = i915_wait_seqno(ring, seqno);
1171 if (ret)
1172 return ret;
1173
d26e3af8 1174 return i915_gem_object_wait_rendering__tail(obj, ring);
b361237b
CW
1175}
1176
3236f57a
CW
1177/* A nonblocking variant of the above wait. This is a highly dangerous routine
1178 * as the object state may change during this call.
1179 */
1180static __must_check int
1181i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
6e4930f6 1182 struct drm_i915_file_private *file_priv,
3236f57a
CW
1183 bool readonly)
1184{
1185 struct drm_device *dev = obj->base.dev;
1186 struct drm_i915_private *dev_priv = dev->dev_private;
1187 struct intel_ring_buffer *ring = obj->ring;
f69061be 1188 unsigned reset_counter;
3236f57a
CW
1189 u32 seqno;
1190 int ret;
1191
1192 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1193 BUG_ON(!dev_priv->mm.interruptible);
1194
1195 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1196 if (seqno == 0)
1197 return 0;
1198
33196ded 1199 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
3236f57a
CW
1200 if (ret)
1201 return ret;
1202
1203 ret = i915_gem_check_olr(ring, seqno);
1204 if (ret)
1205 return ret;
1206
f69061be 1207 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3236f57a 1208 mutex_unlock(&dev->struct_mutex);
6e4930f6 1209 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
3236f57a 1210 mutex_lock(&dev->struct_mutex);
d26e3af8
CW
1211 if (ret)
1212 return ret;
3236f57a 1213
d26e3af8 1214 return i915_gem_object_wait_rendering__tail(obj, ring);
3236f57a
CW
1215}
1216
673a394b 1217/**
2ef7eeaa
EA
1218 * Called when user space prepares to use an object with the CPU, either
1219 * through the mmap ioctl's mapping or a GTT mapping.
673a394b
EA
1220 */
1221int
1222i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
05394f39 1223 struct drm_file *file)
673a394b
EA
1224{
1225 struct drm_i915_gem_set_domain *args = data;
05394f39 1226 struct drm_i915_gem_object *obj;
2ef7eeaa
EA
1227 uint32_t read_domains = args->read_domains;
1228 uint32_t write_domain = args->write_domain;
673a394b
EA
1229 int ret;
1230
2ef7eeaa 1231 /* Only handle setting domains to types used by the CPU. */
21d509e3 1232 if (write_domain & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1233 return -EINVAL;
1234
21d509e3 1235 if (read_domains & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1236 return -EINVAL;
1237
1238 /* Having something in the write domain implies it's in the read
1239 * domain, and only that read domain. Enforce that in the request.
1240 */
1241 if (write_domain != 0 && read_domains != write_domain)
1242 return -EINVAL;
1243
76c1dec1 1244 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1245 if (ret)
76c1dec1 1246 return ret;
1d7cfea1 1247
05394f39 1248 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 1249 if (&obj->base == NULL) {
1d7cfea1
CW
1250 ret = -ENOENT;
1251 goto unlock;
76c1dec1 1252 }
673a394b 1253
3236f57a
CW
1254 /* Try to flush the object off the GPU without holding the lock.
1255 * We will repeat the flush holding the lock in the normal manner
1256 * to catch cases where we are gazumped.
1257 */
6e4930f6
CW
1258 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1259 file->driver_priv,
1260 !write_domain);
3236f57a
CW
1261 if (ret)
1262 goto unref;
1263
2ef7eeaa
EA
1264 if (read_domains & I915_GEM_DOMAIN_GTT) {
1265 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
02354392
EA
1266
1267 /* Silently promote "you're not bound, there was nothing to do"
1268 * to success, since the client was just asking us to
1269 * make sure everything was done.
1270 */
1271 if (ret == -EINVAL)
1272 ret = 0;
2ef7eeaa 1273 } else {
e47c68e9 1274 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
2ef7eeaa
EA
1275 }
1276
3236f57a 1277unref:
05394f39 1278 drm_gem_object_unreference(&obj->base);
1d7cfea1 1279unlock:
673a394b
EA
1280 mutex_unlock(&dev->struct_mutex);
1281 return ret;
1282}
1283
1284/**
1285 * Called when user space has done writes to this buffer
1286 */
1287int
1288i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
05394f39 1289 struct drm_file *file)
673a394b
EA
1290{
1291 struct drm_i915_gem_sw_finish *args = data;
05394f39 1292 struct drm_i915_gem_object *obj;
673a394b
EA
1293 int ret = 0;
1294
76c1dec1 1295 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1296 if (ret)
76c1dec1 1297 return ret;
1d7cfea1 1298
05394f39 1299 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 1300 if (&obj->base == NULL) {
1d7cfea1
CW
1301 ret = -ENOENT;
1302 goto unlock;
673a394b
EA
1303 }
1304
673a394b 1305 /* Pinned buffers may be scanout, so flush the cache */
2c22569b
CW
1306 if (obj->pin_display)
1307 i915_gem_object_flush_cpu_write_domain(obj, true);
e47c68e9 1308
05394f39 1309 drm_gem_object_unreference(&obj->base);
1d7cfea1 1310unlock:
673a394b
EA
1311 mutex_unlock(&dev->struct_mutex);
1312 return ret;
1313}
1314
1315/**
1316 * Maps the contents of an object, returning the address it is mapped
1317 * into.
1318 *
1319 * While the mapping holds a reference on the contents of the object, it doesn't
1320 * imply a ref on the object itself.
1321 */
1322int
1323i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
05394f39 1324 struct drm_file *file)
673a394b
EA
1325{
1326 struct drm_i915_gem_mmap *args = data;
1327 struct drm_gem_object *obj;
673a394b
EA
1328 unsigned long addr;
1329
05394f39 1330 obj = drm_gem_object_lookup(dev, file, args->handle);
673a394b 1331 if (obj == NULL)
bf79cb91 1332 return -ENOENT;
673a394b 1333
1286ff73
DV
1334 /* prime objects have no backing filp to GEM mmap
1335 * pages from.
1336 */
1337 if (!obj->filp) {
1338 drm_gem_object_unreference_unlocked(obj);
1339 return -EINVAL;
1340 }
1341
6be5ceb0 1342 addr = vm_mmap(obj->filp, 0, args->size,
673a394b
EA
1343 PROT_READ | PROT_WRITE, MAP_SHARED,
1344 args->offset);
bc9025bd 1345 drm_gem_object_unreference_unlocked(obj);
673a394b
EA
1346 if (IS_ERR((void *)addr))
1347 return addr;
1348
1349 args->addr_ptr = (uint64_t) addr;
1350
1351 return 0;
1352}
1353
de151cf6
JB
1354/**
1355 * i915_gem_fault - fault a page into the GTT
1356 * vma: VMA in question
1357 * vmf: fault info
1358 *
1359 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1360 * from userspace. The fault handler takes care of binding the object to
1361 * the GTT (if needed), allocating and programming a fence register (again,
1362 * only if needed based on whether the old reg is still valid or the object
1363 * is tiled) and inserting a new PTE into the faulting process.
1364 *
1365 * Note that the faulting process may involve evicting existing objects
1366 * from the GTT and/or fence registers to make room. So performance may
1367 * suffer if the GTT working set is large or there are few fence registers
1368 * left.
1369 */
1370int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1371{
05394f39
CW
1372 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1373 struct drm_device *dev = obj->base.dev;
7d1c4804 1374 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6
JB
1375 pgoff_t page_offset;
1376 unsigned long pfn;
1377 int ret = 0;
0f973f27 1378 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
de151cf6 1379
f65c9168
PZ
1380 intel_runtime_pm_get(dev_priv);
1381
de151cf6
JB
1382 /* We don't use vmf->pgoff since that has the fake offset */
1383 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1384 PAGE_SHIFT;
1385
d9bc7e9f
CW
1386 ret = i915_mutex_lock_interruptible(dev);
1387 if (ret)
1388 goto out;
a00b10c3 1389
db53a302
CW
1390 trace_i915_gem_object_fault(obj, page_offset, true, write);
1391
6e4930f6
CW
1392 /* Try to flush the object off the GPU first without holding the lock.
1393 * Upon reacquiring the lock, we will perform our sanity checks and then
1394 * repeat the flush holding the lock in the normal manner to catch cases
1395 * where we are gazumped.
1396 */
1397 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1398 if (ret)
1399 goto unlock;
1400
eb119bd6
CW
1401 /* Access to snoopable pages through the GTT is incoherent. */
1402 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1403 ret = -EINVAL;
1404 goto unlock;
1405 }
1406
d9bc7e9f 1407 /* Now bind it into the GTT if needed */
1ec9e26d 1408 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
c9839303
CW
1409 if (ret)
1410 goto unlock;
4a684a41 1411
c9839303
CW
1412 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1413 if (ret)
1414 goto unpin;
74898d7e 1415
06d98131 1416 ret = i915_gem_object_get_fence(obj);
d9e86c0e 1417 if (ret)
c9839303 1418 goto unpin;
7d1c4804 1419
6299f992
CW
1420 obj->fault_mappable = true;
1421
f343c5f6
BW
1422 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1423 pfn >>= PAGE_SHIFT;
1424 pfn += page_offset;
de151cf6
JB
1425
1426 /* Finally, remap it using the new GTT offset */
1427 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
c9839303 1428unpin:
d7f46fc4 1429 i915_gem_object_ggtt_unpin(obj);
c715089f 1430unlock:
de151cf6 1431 mutex_unlock(&dev->struct_mutex);
d9bc7e9f 1432out:
de151cf6 1433 switch (ret) {
d9bc7e9f 1434 case -EIO:
a9340cca
DV
1435 /* If this -EIO is due to a gpu hang, give the reset code a
1436 * chance to clean up the mess. Otherwise return the proper
1437 * SIGBUS. */
f65c9168
PZ
1438 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
1439 ret = VM_FAULT_SIGBUS;
1440 break;
1441 }
045e769a 1442 case -EAGAIN:
571c608d
DV
1443 /*
1444 * EAGAIN means the gpu is hung and we'll wait for the error
1445 * handler to reset everything when re-faulting in
1446 * i915_mutex_lock_interruptible.
d9bc7e9f 1447 */
c715089f
CW
1448 case 0:
1449 case -ERESTARTSYS:
bed636ab 1450 case -EINTR:
e79e0fe3
DR
1451 case -EBUSY:
1452 /*
1453 * EBUSY is ok: this just means that another thread
1454 * already did the job.
1455 */
f65c9168
PZ
1456 ret = VM_FAULT_NOPAGE;
1457 break;
de151cf6 1458 case -ENOMEM:
f65c9168
PZ
1459 ret = VM_FAULT_OOM;
1460 break;
a7c2e1aa 1461 case -ENOSPC:
45d67817 1462 case -EFAULT:
f65c9168
PZ
1463 ret = VM_FAULT_SIGBUS;
1464 break;
de151cf6 1465 default:
a7c2e1aa 1466 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
f65c9168
PZ
1467 ret = VM_FAULT_SIGBUS;
1468 break;
de151cf6 1469 }
f65c9168
PZ
1470
1471 intel_runtime_pm_put(dev_priv);
1472 return ret;
de151cf6
JB
1473}
1474
48018a57
PZ
1475void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1476{
1477 struct i915_vma *vma;
1478
1479 /*
1480 * Only the global gtt is relevant for gtt memory mappings, so restrict
1481 * list traversal to objects bound into the global address space. Note
1482 * that the active list should be empty, but better safe than sorry.
1483 */
1484 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1485 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1486 i915_gem_release_mmap(vma->obj);
1487 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1488 i915_gem_release_mmap(vma->obj);
1489}
1490
901782b2
CW
1491/**
1492 * i915_gem_release_mmap - remove physical page mappings
1493 * @obj: obj in question
1494 *
af901ca1 1495 * Preserve the reservation of the mmapping with the DRM core code, but
901782b2
CW
1496 * relinquish ownership of the pages back to the system.
1497 *
1498 * It is vital that we remove the page mapping if we have mapped a tiled
1499 * object through the GTT and then lose the fence register due to
1500 * resource pressure. Similarly if the object has been moved out of the
1501 * aperture, than pages mapped into userspace must be revoked. Removing the
1502 * mapping will then trigger a page fault on the next user access, allowing
1503 * fixup by i915_gem_fault().
1504 */
d05ca301 1505void
05394f39 1506i915_gem_release_mmap(struct drm_i915_gem_object *obj)
901782b2 1507{
6299f992
CW
1508 if (!obj->fault_mappable)
1509 return;
901782b2 1510
51335df9 1511 drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
6299f992 1512 obj->fault_mappable = false;
901782b2
CW
1513}
1514
0fa87796 1515uint32_t
e28f8711 1516i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
92b88aeb 1517{
e28f8711 1518 uint32_t gtt_size;
92b88aeb
CW
1519
1520 if (INTEL_INFO(dev)->gen >= 4 ||
e28f8711
CW
1521 tiling_mode == I915_TILING_NONE)
1522 return size;
92b88aeb
CW
1523
1524 /* Previous chips need a power-of-two fence region when tiling */
1525 if (INTEL_INFO(dev)->gen == 3)
e28f8711 1526 gtt_size = 1024*1024;
92b88aeb 1527 else
e28f8711 1528 gtt_size = 512*1024;
92b88aeb 1529
e28f8711
CW
1530 while (gtt_size < size)
1531 gtt_size <<= 1;
92b88aeb 1532
e28f8711 1533 return gtt_size;
92b88aeb
CW
1534}
1535
de151cf6
JB
1536/**
1537 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1538 * @obj: object to check
1539 *
1540 * Return the required GTT alignment for an object, taking into account
5e783301 1541 * potential fence register mapping.
de151cf6 1542 */
d865110c
ID
1543uint32_t
1544i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1545 int tiling_mode, bool fenced)
de151cf6 1546{
de151cf6
JB
1547 /*
1548 * Minimum alignment is 4k (GTT page size), but might be greater
1549 * if a fence register is needed for the object.
1550 */
d865110c 1551 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
e28f8711 1552 tiling_mode == I915_TILING_NONE)
de151cf6
JB
1553 return 4096;
1554
a00b10c3
CW
1555 /*
1556 * Previous chips need to be aligned to the size of the smallest
1557 * fence register that can contain the object.
1558 */
e28f8711 1559 return i915_gem_get_gtt_size(dev, size, tiling_mode);
a00b10c3
CW
1560}
1561
d8cb5086
CW
1562static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1563{
1564 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1565 int ret;
1566
0de23977 1567 if (drm_vma_node_has_offset(&obj->base.vma_node))
d8cb5086
CW
1568 return 0;
1569
da494d7c
DV
1570 dev_priv->mm.shrinker_no_lock_stealing = true;
1571
d8cb5086
CW
1572 ret = drm_gem_create_mmap_offset(&obj->base);
1573 if (ret != -ENOSPC)
da494d7c 1574 goto out;
d8cb5086
CW
1575
1576 /* Badly fragmented mmap space? The only way we can recover
1577 * space is by destroying unwanted objects. We can't randomly release
1578 * mmap_offsets as userspace expects them to be persistent for the
1579 * lifetime of the objects. The closest we can is to release the
1580 * offsets on purgeable objects by truncating it and marking it purged,
1581 * which prevents userspace from ever using that object again.
1582 */
1583 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1584 ret = drm_gem_create_mmap_offset(&obj->base);
1585 if (ret != -ENOSPC)
da494d7c 1586 goto out;
d8cb5086
CW
1587
1588 i915_gem_shrink_all(dev_priv);
da494d7c
DV
1589 ret = drm_gem_create_mmap_offset(&obj->base);
1590out:
1591 dev_priv->mm.shrinker_no_lock_stealing = false;
1592
1593 return ret;
d8cb5086
CW
1594}
1595
1596static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1597{
d8cb5086
CW
1598 drm_gem_free_mmap_offset(&obj->base);
1599}
1600
de151cf6 1601int
ff72145b
DA
1602i915_gem_mmap_gtt(struct drm_file *file,
1603 struct drm_device *dev,
1604 uint32_t handle,
1605 uint64_t *offset)
de151cf6 1606{
da761a6e 1607 struct drm_i915_private *dev_priv = dev->dev_private;
05394f39 1608 struct drm_i915_gem_object *obj;
de151cf6
JB
1609 int ret;
1610
76c1dec1 1611 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1612 if (ret)
76c1dec1 1613 return ret;
de151cf6 1614
ff72145b 1615 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
c8725226 1616 if (&obj->base == NULL) {
1d7cfea1
CW
1617 ret = -ENOENT;
1618 goto unlock;
1619 }
de151cf6 1620
5d4545ae 1621 if (obj->base.size > dev_priv->gtt.mappable_end) {
da761a6e 1622 ret = -E2BIG;
ff56b0bc 1623 goto out;
da761a6e
CW
1624 }
1625
05394f39 1626 if (obj->madv != I915_MADV_WILLNEED) {
bd9b6a4e 1627 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
8c99e57d 1628 ret = -EFAULT;
1d7cfea1 1629 goto out;
ab18282d
CW
1630 }
1631
d8cb5086
CW
1632 ret = i915_gem_object_create_mmap_offset(obj);
1633 if (ret)
1634 goto out;
de151cf6 1635
0de23977 1636 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
de151cf6 1637
1d7cfea1 1638out:
05394f39 1639 drm_gem_object_unreference(&obj->base);
1d7cfea1 1640unlock:
de151cf6 1641 mutex_unlock(&dev->struct_mutex);
1d7cfea1 1642 return ret;
de151cf6
JB
1643}
1644
ff72145b
DA
1645/**
1646 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1647 * @dev: DRM device
1648 * @data: GTT mapping ioctl data
1649 * @file: GEM object info
1650 *
1651 * Simply returns the fake offset to userspace so it can mmap it.
1652 * The mmap call will end up in drm_gem_mmap(), which will set things
1653 * up so we can get faults in the handler above.
1654 *
1655 * The fault handler will take care of binding the object into the GTT
1656 * (since it may have been evicted to make room for something), allocating
1657 * a fence register, and mapping the appropriate aperture address into
1658 * userspace.
1659 */
1660int
1661i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1662 struct drm_file *file)
1663{
1664 struct drm_i915_gem_mmap_gtt *args = data;
1665
ff72145b
DA
1666 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1667}
1668
225067ee
DV
1669/* Immediately discard the backing storage */
1670static void
1671i915_gem_object_truncate(struct drm_i915_gem_object *obj)
e5281ccd 1672{
e5281ccd 1673 struct inode *inode;
e5281ccd 1674
4d6294bf 1675 i915_gem_object_free_mmap_offset(obj);
1286ff73 1676
4d6294bf
CW
1677 if (obj->base.filp == NULL)
1678 return;
e5281ccd 1679
225067ee
DV
1680 /* Our goal here is to return as much of the memory as
1681 * is possible back to the system as we are called from OOM.
1682 * To do this we must instruct the shmfs to drop all of its
1683 * backing pages, *now*.
1684 */
496ad9aa 1685 inode = file_inode(obj->base.filp);
225067ee 1686 shmem_truncate_range(inode, 0, (loff_t)-1);
e5281ccd 1687
225067ee
DV
1688 obj->madv = __I915_MADV_PURGED;
1689}
e5281ccd 1690
225067ee
DV
1691static inline int
1692i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1693{
1694 return obj->madv == I915_MADV_DONTNEED;
e5281ccd
CW
1695}
1696
5cdf5881 1697static void
05394f39 1698i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
673a394b 1699{
90797e6d
ID
1700 struct sg_page_iter sg_iter;
1701 int ret;
1286ff73 1702
05394f39 1703 BUG_ON(obj->madv == __I915_MADV_PURGED);
673a394b 1704
6c085a72
CW
1705 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1706 if (ret) {
1707 /* In the event of a disaster, abandon all caches and
1708 * hope for the best.
1709 */
1710 WARN_ON(ret != -EIO);
2c22569b 1711 i915_gem_clflush_object(obj, true);
6c085a72
CW
1712 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1713 }
1714
6dacfd2f 1715 if (i915_gem_object_needs_bit17_swizzle(obj))
280b713b
EA
1716 i915_gem_object_save_bit_17_swizzle(obj);
1717
05394f39
CW
1718 if (obj->madv == I915_MADV_DONTNEED)
1719 obj->dirty = 0;
3ef94daa 1720
90797e6d 1721 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
2db76d7c 1722 struct page *page = sg_page_iter_page(&sg_iter);
9da3da66 1723
05394f39 1724 if (obj->dirty)
9da3da66 1725 set_page_dirty(page);
3ef94daa 1726
05394f39 1727 if (obj->madv == I915_MADV_WILLNEED)
9da3da66 1728 mark_page_accessed(page);
3ef94daa 1729
9da3da66 1730 page_cache_release(page);
3ef94daa 1731 }
05394f39 1732 obj->dirty = 0;
673a394b 1733
9da3da66
CW
1734 sg_free_table(obj->pages);
1735 kfree(obj->pages);
37e680a1 1736}
6c085a72 1737
dd624afd 1738int
37e680a1
CW
1739i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1740{
1741 const struct drm_i915_gem_object_ops *ops = obj->ops;
1742
2f745ad3 1743 if (obj->pages == NULL)
37e680a1
CW
1744 return 0;
1745
a5570178
CW
1746 if (obj->pages_pin_count)
1747 return -EBUSY;
1748
9843877d 1749 BUG_ON(i915_gem_obj_bound_any(obj));
3e123027 1750
a2165e31
CW
1751 /* ->put_pages might need to allocate memory for the bit17 swizzle
1752 * array, hence protect them from being reaped by removing them from gtt
1753 * lists early. */
35c20a60 1754 list_del(&obj->global_list);
a2165e31 1755
37e680a1 1756 ops->put_pages(obj);
05394f39 1757 obj->pages = NULL;
37e680a1 1758
6c085a72
CW
1759 if (i915_gem_object_is_purgeable(obj))
1760 i915_gem_object_truncate(obj);
1761
1762 return 0;
1763}
1764
d9973b43 1765static unsigned long
93927ca5
DV
1766__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1767 bool purgeable_only)
6c085a72 1768{
57094f82 1769 struct list_head still_bound_list;
6c085a72 1770 struct drm_i915_gem_object *obj, *next;
d9973b43 1771 unsigned long count = 0;
6c085a72
CW
1772
1773 list_for_each_entry_safe(obj, next,
1774 &dev_priv->mm.unbound_list,
35c20a60 1775 global_list) {
93927ca5 1776 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
37e680a1 1777 i915_gem_object_put_pages(obj) == 0) {
6c085a72
CW
1778 count += obj->base.size >> PAGE_SHIFT;
1779 if (count >= target)
1780 return count;
1781 }
1782 }
1783
57094f82
CW
1784 /*
1785 * As we may completely rewrite the bound list whilst unbinding
1786 * (due to retiring requests) we have to strictly process only
1787 * one element of the list at the time, and recheck the list
1788 * on every iteration.
1789 */
1790 INIT_LIST_HEAD(&still_bound_list);
1791 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
07fe0b12 1792 struct i915_vma *vma, *v;
80dcfdbd 1793
57094f82
CW
1794 obj = list_first_entry(&dev_priv->mm.bound_list,
1795 typeof(*obj), global_list);
1796 list_move_tail(&obj->global_list, &still_bound_list);
1797
80dcfdbd
BW
1798 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1799 continue;
1800
57094f82
CW
1801 /*
1802 * Hold a reference whilst we unbind this object, as we may
1803 * end up waiting for and retiring requests. This might
1804 * release the final reference (held by the active list)
1805 * and result in the object being freed from under us.
1806 * in this object being freed.
1807 *
1808 * Note 1: Shrinking the bound list is special since only active
1809 * (and hence bound objects) can contain such limbo objects, so
1810 * we don't need special tricks for shrinking the unbound list.
1811 * The only other place where we have to be careful with active
1812 * objects suddenly disappearing due to retiring requests is the
1813 * eviction code.
1814 *
1815 * Note 2: Even though the bound list doesn't hold a reference
1816 * to the object we can safely grab one here: The final object
1817 * unreferencing and the bound_list are both protected by the
1818 * dev->struct_mutex and so we won't ever be able to observe an
1819 * object on the bound_list with a reference count equals 0.
1820 */
1821 drm_gem_object_reference(&obj->base);
1822
07fe0b12
BW
1823 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1824 if (i915_vma_unbind(vma))
1825 break;
80dcfdbd 1826
57094f82 1827 if (i915_gem_object_put_pages(obj) == 0)
6c085a72 1828 count += obj->base.size >> PAGE_SHIFT;
57094f82
CW
1829
1830 drm_gem_object_unreference(&obj->base);
6c085a72 1831 }
57094f82 1832 list_splice(&still_bound_list, &dev_priv->mm.bound_list);
6c085a72
CW
1833
1834 return count;
1835}
1836
d9973b43 1837static unsigned long
93927ca5
DV
1838i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1839{
1840 return __i915_gem_shrink(dev_priv, target, true);
1841}
1842
d9973b43 1843static unsigned long
6c085a72
CW
1844i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1845{
1846 struct drm_i915_gem_object *obj, *next;
7dc19d5a 1847 long freed = 0;
6c085a72
CW
1848
1849 i915_gem_evict_everything(dev_priv->dev);
1850
35c20a60 1851 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
7dc19d5a 1852 global_list) {
d9973b43 1853 if (i915_gem_object_put_pages(obj) == 0)
7dc19d5a 1854 freed += obj->base.size >> PAGE_SHIFT;
7dc19d5a
DC
1855 }
1856 return freed;
225067ee
DV
1857}
1858
37e680a1 1859static int
6c085a72 1860i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
e5281ccd 1861{
6c085a72 1862 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
e5281ccd
CW
1863 int page_count, i;
1864 struct address_space *mapping;
9da3da66
CW
1865 struct sg_table *st;
1866 struct scatterlist *sg;
90797e6d 1867 struct sg_page_iter sg_iter;
e5281ccd 1868 struct page *page;
90797e6d 1869 unsigned long last_pfn = 0; /* suppress gcc warning */
6c085a72 1870 gfp_t gfp;
e5281ccd 1871
6c085a72
CW
1872 /* Assert that the object is not currently in any GPU domain. As it
1873 * wasn't in the GTT, there shouldn't be any way it could have been in
1874 * a GPU cache
1875 */
1876 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1877 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1878
9da3da66
CW
1879 st = kmalloc(sizeof(*st), GFP_KERNEL);
1880 if (st == NULL)
1881 return -ENOMEM;
1882
05394f39 1883 page_count = obj->base.size / PAGE_SIZE;
9da3da66 1884 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
9da3da66 1885 kfree(st);
e5281ccd 1886 return -ENOMEM;
9da3da66 1887 }
e5281ccd 1888
9da3da66
CW
1889 /* Get the list of pages out of our struct file. They'll be pinned
1890 * at this point until we release them.
1891 *
1892 * Fail silently without starting the shrinker
1893 */
496ad9aa 1894 mapping = file_inode(obj->base.filp)->i_mapping;
6c085a72 1895 gfp = mapping_gfp_mask(mapping);
caf49191 1896 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
6c085a72 1897 gfp &= ~(__GFP_IO | __GFP_WAIT);
90797e6d
ID
1898 sg = st->sgl;
1899 st->nents = 0;
1900 for (i = 0; i < page_count; i++) {
6c085a72
CW
1901 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1902 if (IS_ERR(page)) {
1903 i915_gem_purge(dev_priv, page_count);
1904 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1905 }
1906 if (IS_ERR(page)) {
1907 /* We've tried hard to allocate the memory by reaping
1908 * our own buffer, now let the real VM do its job and
1909 * go down in flames if truly OOM.
1910 */
caf49191 1911 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
6c085a72
CW
1912 gfp |= __GFP_IO | __GFP_WAIT;
1913
1914 i915_gem_shrink_all(dev_priv);
1915 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1916 if (IS_ERR(page))
1917 goto err_pages;
1918
caf49191 1919 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
6c085a72
CW
1920 gfp &= ~(__GFP_IO | __GFP_WAIT);
1921 }
426729dc
KRW
1922#ifdef CONFIG_SWIOTLB
1923 if (swiotlb_nr_tbl()) {
1924 st->nents++;
1925 sg_set_page(sg, page, PAGE_SIZE, 0);
1926 sg = sg_next(sg);
1927 continue;
1928 }
1929#endif
90797e6d
ID
1930 if (!i || page_to_pfn(page) != last_pfn + 1) {
1931 if (i)
1932 sg = sg_next(sg);
1933 st->nents++;
1934 sg_set_page(sg, page, PAGE_SIZE, 0);
1935 } else {
1936 sg->length += PAGE_SIZE;
1937 }
1938 last_pfn = page_to_pfn(page);
3bbbe706
DV
1939
1940 /* Check that the i965g/gm workaround works. */
1941 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
e5281ccd 1942 }
426729dc
KRW
1943#ifdef CONFIG_SWIOTLB
1944 if (!swiotlb_nr_tbl())
1945#endif
1946 sg_mark_end(sg);
74ce6b6c
CW
1947 obj->pages = st;
1948
6dacfd2f 1949 if (i915_gem_object_needs_bit17_swizzle(obj))
e5281ccd
CW
1950 i915_gem_object_do_bit_17_swizzle(obj);
1951
1952 return 0;
1953
1954err_pages:
90797e6d
ID
1955 sg_mark_end(sg);
1956 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2db76d7c 1957 page_cache_release(sg_page_iter_page(&sg_iter));
9da3da66
CW
1958 sg_free_table(st);
1959 kfree(st);
e5281ccd 1960 return PTR_ERR(page);
673a394b
EA
1961}
1962
37e680a1
CW
1963/* Ensure that the associated pages are gathered from the backing storage
1964 * and pinned into our object. i915_gem_object_get_pages() may be called
1965 * multiple times before they are released by a single call to
1966 * i915_gem_object_put_pages() - once the pages are no longer referenced
1967 * either as a result of memory pressure (reaping pages under the shrinker)
1968 * or as the object is itself released.
1969 */
1970int
1971i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1972{
1973 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1974 const struct drm_i915_gem_object_ops *ops = obj->ops;
1975 int ret;
1976
2f745ad3 1977 if (obj->pages)
37e680a1
CW
1978 return 0;
1979
43e28f09 1980 if (obj->madv != I915_MADV_WILLNEED) {
bd9b6a4e 1981 DRM_DEBUG("Attempting to obtain a purgeable object\n");
8c99e57d 1982 return -EFAULT;
43e28f09
CW
1983 }
1984
a5570178
CW
1985 BUG_ON(obj->pages_pin_count);
1986
37e680a1
CW
1987 ret = ops->get_pages(obj);
1988 if (ret)
1989 return ret;
1990
35c20a60 1991 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
37e680a1 1992 return 0;
673a394b
EA
1993}
1994
e2d05a8b 1995static void
05394f39 1996i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
9d773091 1997 struct intel_ring_buffer *ring)
673a394b 1998{
05394f39 1999 struct drm_device *dev = obj->base.dev;
69dc4987 2000 struct drm_i915_private *dev_priv = dev->dev_private;
9d773091 2001 u32 seqno = intel_ring_get_seqno(ring);
617dbe27 2002
852835f3 2003 BUG_ON(ring == NULL);
02978ff5
CW
2004 if (obj->ring != ring && obj->last_write_seqno) {
2005 /* Keep the seqno relative to the current ring */
2006 obj->last_write_seqno = seqno;
2007 }
05394f39 2008 obj->ring = ring;
673a394b
EA
2009
2010 /* Add a reference if we're newly entering the active list. */
05394f39
CW
2011 if (!obj->active) {
2012 drm_gem_object_reference(&obj->base);
2013 obj->active = 1;
673a394b 2014 }
e35a41de 2015
05394f39 2016 list_move_tail(&obj->ring_list, &ring->active_list);
caea7476 2017
0201f1ec 2018 obj->last_read_seqno = seqno;
caea7476 2019
7dd49065 2020 if (obj->fenced_gpu_access) {
caea7476 2021 obj->last_fenced_seqno = seqno;
caea7476 2022
7dd49065
CW
2023 /* Bump MRU to take account of the delayed flush */
2024 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2025 struct drm_i915_fence_reg *reg;
2026
2027 reg = &dev_priv->fence_regs[obj->fence_reg];
2028 list_move_tail(&reg->lru_list,
2029 &dev_priv->mm.fence_list);
2030 }
caea7476
CW
2031 }
2032}
2033
e2d05a8b
BW
2034void i915_vma_move_to_active(struct i915_vma *vma,
2035 struct intel_ring_buffer *ring)
2036{
2037 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2038 return i915_gem_object_move_to_active(vma->obj, ring);
2039}
2040
caea7476 2041static void
caea7476 2042i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
ce44b0ea 2043{
ca191b13 2044 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
feb822cf
BW
2045 struct i915_address_space *vm;
2046 struct i915_vma *vma;
ce44b0ea 2047
65ce3027 2048 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
05394f39 2049 BUG_ON(!obj->active);
caea7476 2050
feb822cf
BW
2051 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2052 vma = i915_gem_obj_to_vma(obj, vm);
2053 if (vma && !list_empty(&vma->mm_list))
2054 list_move_tail(&vma->mm_list, &vm->inactive_list);
2055 }
caea7476 2056
65ce3027 2057 list_del_init(&obj->ring_list);
caea7476
CW
2058 obj->ring = NULL;
2059
65ce3027
CW
2060 obj->last_read_seqno = 0;
2061 obj->last_write_seqno = 0;
2062 obj->base.write_domain = 0;
2063
2064 obj->last_fenced_seqno = 0;
caea7476 2065 obj->fenced_gpu_access = false;
caea7476
CW
2066
2067 obj->active = 0;
2068 drm_gem_object_unreference(&obj->base);
2069
2070 WARN_ON(i915_verify_lists(dev));
ce44b0ea 2071}
673a394b 2072
9d773091 2073static int
fca26bb4 2074i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
53d227f2 2075{
9d773091
CW
2076 struct drm_i915_private *dev_priv = dev->dev_private;
2077 struct intel_ring_buffer *ring;
2078 int ret, i, j;
53d227f2 2079
107f27a5 2080 /* Carefully retire all requests without writing to the rings */
9d773091 2081 for_each_ring(ring, dev_priv, i) {
107f27a5
CW
2082 ret = intel_ring_idle(ring);
2083 if (ret)
2084 return ret;
9d773091 2085 }
9d773091 2086 i915_gem_retire_requests(dev);
107f27a5
CW
2087
2088 /* Finally reset hw state */
9d773091 2089 for_each_ring(ring, dev_priv, i) {
fca26bb4 2090 intel_ring_init_seqno(ring, seqno);
498d2ac1 2091
9d773091
CW
2092 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2093 ring->sync_seqno[j] = 0;
2094 }
53d227f2 2095
9d773091 2096 return 0;
53d227f2
DV
2097}
2098
fca26bb4
MK
2099int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2100{
2101 struct drm_i915_private *dev_priv = dev->dev_private;
2102 int ret;
2103
2104 if (seqno == 0)
2105 return -EINVAL;
2106
2107 /* HWS page needs to be set less than what we
2108 * will inject to ring
2109 */
2110 ret = i915_gem_init_seqno(dev, seqno - 1);
2111 if (ret)
2112 return ret;
2113
2114 /* Carefully set the last_seqno value so that wrap
2115 * detection still works
2116 */
2117 dev_priv->next_seqno = seqno;
2118 dev_priv->last_seqno = seqno - 1;
2119 if (dev_priv->last_seqno == 0)
2120 dev_priv->last_seqno--;
2121
2122 return 0;
2123}
2124
9d773091
CW
2125int
2126i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
53d227f2 2127{
9d773091
CW
2128 struct drm_i915_private *dev_priv = dev->dev_private;
2129
2130 /* reserve 0 for non-seqno */
2131 if (dev_priv->next_seqno == 0) {
fca26bb4 2132 int ret = i915_gem_init_seqno(dev, 0);
9d773091
CW
2133 if (ret)
2134 return ret;
53d227f2 2135
9d773091
CW
2136 dev_priv->next_seqno = 1;
2137 }
53d227f2 2138
f72b3435 2139 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
9d773091 2140 return 0;
53d227f2
DV
2141}
2142
0025c077
MK
2143int __i915_add_request(struct intel_ring_buffer *ring,
2144 struct drm_file *file,
7d736f4f 2145 struct drm_i915_gem_object *obj,
0025c077 2146 u32 *out_seqno)
673a394b 2147{
db53a302 2148 drm_i915_private_t *dev_priv = ring->dev->dev_private;
acb868d3 2149 struct drm_i915_gem_request *request;
7d736f4f 2150 u32 request_ring_position, request_start;
673a394b 2151 int was_empty;
3cce469c
CW
2152 int ret;
2153
7d736f4f 2154 request_start = intel_ring_get_tail(ring);
cc889e0f
DV
2155 /*
2156 * Emit any outstanding flushes - execbuf can fail to emit the flush
2157 * after having emitted the batchbuffer command. Hence we need to fix
2158 * things up similar to emitting the lazy request. The difference here
2159 * is that the flush _must_ happen before the next request, no matter
2160 * what.
2161 */
a7b9761d
CW
2162 ret = intel_ring_flush_all_caches(ring);
2163 if (ret)
2164 return ret;
cc889e0f 2165
3c0e234c
CW
2166 request = ring->preallocated_lazy_request;
2167 if (WARN_ON(request == NULL))
acb868d3 2168 return -ENOMEM;
cc889e0f 2169
a71d8d94
CW
2170 /* Record the position of the start of the request so that
2171 * should we detect the updated seqno part-way through the
2172 * GPU processing the request, we never over-estimate the
2173 * position of the head.
2174 */
2175 request_ring_position = intel_ring_get_tail(ring);
2176
9d773091 2177 ret = ring->add_request(ring);
3c0e234c 2178 if (ret)
3bb73aba 2179 return ret;
673a394b 2180
9d773091 2181 request->seqno = intel_ring_get_seqno(ring);
852835f3 2182 request->ring = ring;
7d736f4f 2183 request->head = request_start;
a71d8d94 2184 request->tail = request_ring_position;
7d736f4f
MK
2185
2186 /* Whilst this request exists, batch_obj will be on the
2187 * active_list, and so will hold the active reference. Only when this
2188 * request is retired will the the batch_obj be moved onto the
2189 * inactive_list and lose its active reference. Hence we do not need
2190 * to explicitly hold another reference here.
2191 */
9a7e0c2a 2192 request->batch_obj = obj;
0e50e96b 2193
9a7e0c2a
CW
2194 /* Hold a reference to the current context so that we can inspect
2195 * it later in case a hangcheck error event fires.
2196 */
2197 request->ctx = ring->last_context;
0e50e96b
MK
2198 if (request->ctx)
2199 i915_gem_context_reference(request->ctx);
2200
673a394b 2201 request->emitted_jiffies = jiffies;
852835f3
ZN
2202 was_empty = list_empty(&ring->request_list);
2203 list_add_tail(&request->list, &ring->request_list);
3bb73aba 2204 request->file_priv = NULL;
852835f3 2205
db53a302
CW
2206 if (file) {
2207 struct drm_i915_file_private *file_priv = file->driver_priv;
2208
1c25595f 2209 spin_lock(&file_priv->mm.lock);
f787a5f5 2210 request->file_priv = file_priv;
b962442e 2211 list_add_tail(&request->client_list,
f787a5f5 2212 &file_priv->mm.request_list);
1c25595f 2213 spin_unlock(&file_priv->mm.lock);
b962442e 2214 }
673a394b 2215
9d773091 2216 trace_i915_gem_request_add(ring, request->seqno);
1823521d 2217 ring->outstanding_lazy_seqno = 0;
3c0e234c 2218 ring->preallocated_lazy_request = NULL;
db53a302 2219
db1b76ca 2220 if (!dev_priv->ums.mm_suspended) {
10cd45b6
MK
2221 i915_queue_hangcheck(ring->dev);
2222
f047e395 2223 if (was_empty) {
b29c19b6 2224 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
b3b079db 2225 queue_delayed_work(dev_priv->wq,
bcb45086
CW
2226 &dev_priv->mm.retire_work,
2227 round_jiffies_up_relative(HZ));
f047e395
CW
2228 intel_mark_busy(dev_priv->dev);
2229 }
f65d9421 2230 }
cc889e0f 2231
acb868d3 2232 if (out_seqno)
9d773091 2233 *out_seqno = request->seqno;
3cce469c 2234 return 0;
673a394b
EA
2235}
2236
f787a5f5
CW
2237static inline void
2238i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
673a394b 2239{
1c25595f 2240 struct drm_i915_file_private *file_priv = request->file_priv;
673a394b 2241
1c25595f
CW
2242 if (!file_priv)
2243 return;
1c5d22f7 2244
1c25595f 2245 spin_lock(&file_priv->mm.lock);
b29c19b6
CW
2246 list_del(&request->client_list);
2247 request->file_priv = NULL;
1c25595f 2248 spin_unlock(&file_priv->mm.lock);
673a394b 2249}
673a394b 2250
939fd762 2251static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
44e2c070 2252 const struct i915_hw_context *ctx)
be62acb4 2253{
44e2c070 2254 unsigned long elapsed;
be62acb4 2255
44e2c070
MK
2256 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2257
2258 if (ctx->hang_stats.banned)
be62acb4
MK
2259 return true;
2260
2261 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
3fac8978
MK
2262 if (dev_priv->gpu_error.stop_rings == 0 &&
2263 i915_gem_context_is_default(ctx)) {
2264 DRM_ERROR("gpu hanging too fast, banning!\n");
2265 } else {
2266 DRM_DEBUG("context hanging too fast, banning!\n");
2267 }
2268
be62acb4
MK
2269 return true;
2270 }
2271
2272 return false;
2273}
2274
939fd762
MK
2275static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2276 struct i915_hw_context *ctx,
b6b0fac0 2277 const bool guilty)
aa60c664 2278{
44e2c070
MK
2279 struct i915_ctx_hang_stats *hs;
2280
2281 if (WARN_ON(!ctx))
2282 return;
aa60c664 2283
44e2c070
MK
2284 hs = &ctx->hang_stats;
2285
2286 if (guilty) {
939fd762 2287 hs->banned = i915_context_is_banned(dev_priv, ctx);
44e2c070
MK
2288 hs->batch_active++;
2289 hs->guilty_ts = get_seconds();
2290 } else {
2291 hs->batch_pending++;
aa60c664
MK
2292 }
2293}
2294
0e50e96b
MK
2295static void i915_gem_free_request(struct drm_i915_gem_request *request)
2296{
2297 list_del(&request->list);
2298 i915_gem_request_remove_from_client(request);
2299
2300 if (request->ctx)
2301 i915_gem_context_unreference(request->ctx);
2302
2303 kfree(request);
2304}
2305
b6b0fac0
MK
2306static struct drm_i915_gem_request *
2307i915_gem_find_first_non_complete(struct intel_ring_buffer *ring)
9375e446 2308{
4db080f9 2309 struct drm_i915_gem_request *request;
b6b0fac0 2310 const u32 completed_seqno = ring->get_seqno(ring, false);
4db080f9
CW
2311
2312 list_for_each_entry(request, &ring->request_list, list) {
2313 if (i915_seqno_passed(completed_seqno, request->seqno))
2314 continue;
aa60c664 2315
b6b0fac0 2316 return request;
4db080f9 2317 }
b6b0fac0
MK
2318
2319 return NULL;
2320}
2321
2322static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2323 struct intel_ring_buffer *ring)
2324{
2325 struct drm_i915_gem_request *request;
2326 bool ring_hung;
2327
2328 request = i915_gem_find_first_non_complete(ring);
2329
2330 if (request == NULL)
2331 return;
2332
2333 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2334
939fd762 2335 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
b6b0fac0
MK
2336
2337 list_for_each_entry_continue(request, &ring->request_list, list)
939fd762 2338 i915_set_reset_status(dev_priv, request->ctx, false);
4db080f9 2339}
aa60c664 2340
4db080f9
CW
2341static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2342 struct intel_ring_buffer *ring)
2343{
dfaae392 2344 while (!list_empty(&ring->active_list)) {
05394f39 2345 struct drm_i915_gem_object *obj;
9375e446 2346
05394f39
CW
2347 obj = list_first_entry(&ring->active_list,
2348 struct drm_i915_gem_object,
2349 ring_list);
9375e446 2350
05394f39 2351 i915_gem_object_move_to_inactive(obj);
673a394b 2352 }
1d62beea
BW
2353
2354 /*
2355 * We must free the requests after all the corresponding objects have
2356 * been moved off active lists. Which is the same order as the normal
2357 * retire_requests function does. This is important if object hold
2358 * implicit references on things like e.g. ppgtt address spaces through
2359 * the request.
2360 */
2361 while (!list_empty(&ring->request_list)) {
2362 struct drm_i915_gem_request *request;
2363
2364 request = list_first_entry(&ring->request_list,
2365 struct drm_i915_gem_request,
2366 list);
2367
2368 i915_gem_free_request(request);
2369 }
673a394b
EA
2370}
2371
19b2dbde 2372void i915_gem_restore_fences(struct drm_device *dev)
312817a3
CW
2373{
2374 struct drm_i915_private *dev_priv = dev->dev_private;
2375 int i;
2376
4b9de737 2377 for (i = 0; i < dev_priv->num_fence_regs; i++) {
312817a3 2378 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
7d2cb39c 2379
94a335db
DV
2380 /*
2381 * Commit delayed tiling changes if we have an object still
2382 * attached to the fence, otherwise just clear the fence.
2383 */
2384 if (reg->obj) {
2385 i915_gem_object_update_fence(reg->obj, reg,
2386 reg->obj->tiling_mode);
2387 } else {
2388 i915_gem_write_fence(dev, i, NULL);
2389 }
312817a3
CW
2390 }
2391}
2392
069efc1d 2393void i915_gem_reset(struct drm_device *dev)
673a394b 2394{
77f01230 2395 struct drm_i915_private *dev_priv = dev->dev_private;
b4519513 2396 struct intel_ring_buffer *ring;
1ec14ad3 2397 int i;
673a394b 2398
4db080f9
CW
2399 /*
2400 * Before we free the objects from the requests, we need to inspect
2401 * them for finding the guilty party. As the requests only borrow
2402 * their reference to the objects, the inspection must be done first.
2403 */
2404 for_each_ring(ring, dev_priv, i)
2405 i915_gem_reset_ring_status(dev_priv, ring);
2406
b4519513 2407 for_each_ring(ring, dev_priv, i)
4db080f9 2408 i915_gem_reset_ring_cleanup(dev_priv, ring);
dfaae392 2409
3d57e5bd
BW
2410 i915_gem_cleanup_ringbuffer(dev);
2411
acce9ffa
BW
2412 i915_gem_context_reset(dev);
2413
19b2dbde 2414 i915_gem_restore_fences(dev);
673a394b
EA
2415}
2416
2417/**
2418 * This function clears the request list as sequence numbers are passed.
2419 */
a71d8d94 2420void
db53a302 2421i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
673a394b 2422{
673a394b
EA
2423 uint32_t seqno;
2424
db53a302 2425 if (list_empty(&ring->request_list))
6c0594a3
KW
2426 return;
2427
db53a302 2428 WARN_ON(i915_verify_lists(ring->dev));
673a394b 2429
b2eadbc8 2430 seqno = ring->get_seqno(ring, true);
1ec14ad3 2431
e9103038
CW
2432 /* Move any buffers on the active list that are no longer referenced
2433 * by the ringbuffer to the flushing/inactive lists as appropriate,
2434 * before we free the context associated with the requests.
2435 */
2436 while (!list_empty(&ring->active_list)) {
2437 struct drm_i915_gem_object *obj;
2438
2439 obj = list_first_entry(&ring->active_list,
2440 struct drm_i915_gem_object,
2441 ring_list);
2442
2443 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2444 break;
2445
2446 i915_gem_object_move_to_inactive(obj);
2447 }
2448
2449
852835f3 2450 while (!list_empty(&ring->request_list)) {
673a394b 2451 struct drm_i915_gem_request *request;
673a394b 2452
852835f3 2453 request = list_first_entry(&ring->request_list,
673a394b
EA
2454 struct drm_i915_gem_request,
2455 list);
673a394b 2456
dfaae392 2457 if (!i915_seqno_passed(seqno, request->seqno))
b84d5f0c
CW
2458 break;
2459
db53a302 2460 trace_i915_gem_request_retire(ring, request->seqno);
a71d8d94
CW
2461 /* We know the GPU must have read the request to have
2462 * sent us the seqno + interrupt, so use the position
2463 * of tail of the request to update the last known position
2464 * of the GPU head.
2465 */
2466 ring->last_retired_head = request->tail;
b84d5f0c 2467
0e50e96b 2468 i915_gem_free_request(request);
b84d5f0c 2469 }
673a394b 2470
db53a302
CW
2471 if (unlikely(ring->trace_irq_seqno &&
2472 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1ec14ad3 2473 ring->irq_put(ring);
db53a302 2474 ring->trace_irq_seqno = 0;
9d34e5db 2475 }
23bc5982 2476
db53a302 2477 WARN_ON(i915_verify_lists(ring->dev));
673a394b
EA
2478}
2479
b29c19b6 2480bool
b09a1fec
CW
2481i915_gem_retire_requests(struct drm_device *dev)
2482{
2483 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 2484 struct intel_ring_buffer *ring;
b29c19b6 2485 bool idle = true;
1ec14ad3 2486 int i;
b09a1fec 2487
b29c19b6 2488 for_each_ring(ring, dev_priv, i) {
b4519513 2489 i915_gem_retire_requests_ring(ring);
b29c19b6
CW
2490 idle &= list_empty(&ring->request_list);
2491 }
2492
2493 if (idle)
2494 mod_delayed_work(dev_priv->wq,
2495 &dev_priv->mm.idle_work,
2496 msecs_to_jiffies(100));
2497
2498 return idle;
b09a1fec
CW
2499}
2500
75ef9da2 2501static void
673a394b
EA
2502i915_gem_retire_work_handler(struct work_struct *work)
2503{
b29c19b6
CW
2504 struct drm_i915_private *dev_priv =
2505 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2506 struct drm_device *dev = dev_priv->dev;
0a58705b 2507 bool idle;
673a394b 2508
891b48cf 2509 /* Come back later if the device is busy... */
b29c19b6
CW
2510 idle = false;
2511 if (mutex_trylock(&dev->struct_mutex)) {
2512 idle = i915_gem_retire_requests(dev);
2513 mutex_unlock(&dev->struct_mutex);
673a394b 2514 }
b29c19b6 2515 if (!idle)
bcb45086
CW
2516 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2517 round_jiffies_up_relative(HZ));
b29c19b6 2518}
0a58705b 2519
b29c19b6
CW
2520static void
2521i915_gem_idle_work_handler(struct work_struct *work)
2522{
2523 struct drm_i915_private *dev_priv =
2524 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2525
2526 intel_mark_idle(dev_priv->dev);
673a394b
EA
2527}
2528
30dfebf3
DV
2529/**
2530 * Ensures that an object will eventually get non-busy by flushing any required
2531 * write domains, emitting any outstanding lazy request and retiring and
2532 * completed requests.
2533 */
2534static int
2535i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2536{
2537 int ret;
2538
2539 if (obj->active) {
0201f1ec 2540 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
30dfebf3
DV
2541 if (ret)
2542 return ret;
2543
30dfebf3
DV
2544 i915_gem_retire_requests_ring(obj->ring);
2545 }
2546
2547 return 0;
2548}
2549
23ba4fd0
BW
2550/**
2551 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2552 * @DRM_IOCTL_ARGS: standard ioctl arguments
2553 *
2554 * Returns 0 if successful, else an error is returned with the remaining time in
2555 * the timeout parameter.
2556 * -ETIME: object is still busy after timeout
2557 * -ERESTARTSYS: signal interrupted the wait
2558 * -ENONENT: object doesn't exist
2559 * Also possible, but rare:
2560 * -EAGAIN: GPU wedged
2561 * -ENOMEM: damn
2562 * -ENODEV: Internal IRQ fail
2563 * -E?: The add request failed
2564 *
2565 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2566 * non-zero timeout parameter the wait ioctl will wait for the given number of
2567 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2568 * without holding struct_mutex the object may become re-busied before this
2569 * function completes. A similar but shorter * race condition exists in the busy
2570 * ioctl
2571 */
2572int
2573i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2574{
f69061be 2575 drm_i915_private_t *dev_priv = dev->dev_private;
23ba4fd0
BW
2576 struct drm_i915_gem_wait *args = data;
2577 struct drm_i915_gem_object *obj;
2578 struct intel_ring_buffer *ring = NULL;
eac1f14f 2579 struct timespec timeout_stack, *timeout = NULL;
f69061be 2580 unsigned reset_counter;
23ba4fd0
BW
2581 u32 seqno = 0;
2582 int ret = 0;
2583
eac1f14f
BW
2584 if (args->timeout_ns >= 0) {
2585 timeout_stack = ns_to_timespec(args->timeout_ns);
2586 timeout = &timeout_stack;
2587 }
23ba4fd0
BW
2588
2589 ret = i915_mutex_lock_interruptible(dev);
2590 if (ret)
2591 return ret;
2592
2593 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2594 if (&obj->base == NULL) {
2595 mutex_unlock(&dev->struct_mutex);
2596 return -ENOENT;
2597 }
2598
30dfebf3
DV
2599 /* Need to make sure the object gets inactive eventually. */
2600 ret = i915_gem_object_flush_active(obj);
23ba4fd0
BW
2601 if (ret)
2602 goto out;
2603
2604 if (obj->active) {
0201f1ec 2605 seqno = obj->last_read_seqno;
23ba4fd0
BW
2606 ring = obj->ring;
2607 }
2608
2609 if (seqno == 0)
2610 goto out;
2611
23ba4fd0
BW
2612 /* Do this after OLR check to make sure we make forward progress polling
2613 * on this IOCTL with a 0 timeout (like busy ioctl)
2614 */
2615 if (!args->timeout_ns) {
2616 ret = -ETIME;
2617 goto out;
2618 }
2619
2620 drm_gem_object_unreference(&obj->base);
f69061be 2621 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
23ba4fd0
BW
2622 mutex_unlock(&dev->struct_mutex);
2623
b29c19b6 2624 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
4f42f4ef 2625 if (timeout)
eac1f14f 2626 args->timeout_ns = timespec_to_ns(timeout);
23ba4fd0
BW
2627 return ret;
2628
2629out:
2630 drm_gem_object_unreference(&obj->base);
2631 mutex_unlock(&dev->struct_mutex);
2632 return ret;
2633}
2634
5816d648
BW
2635/**
2636 * i915_gem_object_sync - sync an object to a ring.
2637 *
2638 * @obj: object which may be in use on another ring.
2639 * @to: ring we wish to use the object on. May be NULL.
2640 *
2641 * This code is meant to abstract object synchronization with the GPU.
2642 * Calling with NULL implies synchronizing the object with the CPU
2643 * rather than a particular GPU ring.
2644 *
2645 * Returns 0 if successful, else propagates up the lower layer error.
2646 */
2911a35b
BW
2647int
2648i915_gem_object_sync(struct drm_i915_gem_object *obj,
2649 struct intel_ring_buffer *to)
2650{
2651 struct intel_ring_buffer *from = obj->ring;
2652 u32 seqno;
2653 int ret, idx;
2654
2655 if (from == NULL || to == from)
2656 return 0;
2657
5816d648 2658 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
0201f1ec 2659 return i915_gem_object_wait_rendering(obj, false);
2911a35b
BW
2660
2661 idx = intel_ring_sync_index(from, to);
2662
0201f1ec 2663 seqno = obj->last_read_seqno;
2911a35b
BW
2664 if (seqno <= from->sync_seqno[idx])
2665 return 0;
2666
b4aca010
BW
2667 ret = i915_gem_check_olr(obj->ring, seqno);
2668 if (ret)
2669 return ret;
2911a35b 2670
b52b89da 2671 trace_i915_gem_ring_sync_to(from, to, seqno);
1500f7ea 2672 ret = to->sync_to(to, from, seqno);
e3a5a225 2673 if (!ret)
7b01e260
MK
2674 /* We use last_read_seqno because sync_to()
2675 * might have just caused seqno wrap under
2676 * the radar.
2677 */
2678 from->sync_seqno[idx] = obj->last_read_seqno;
2911a35b 2679
e3a5a225 2680 return ret;
2911a35b
BW
2681}
2682
b5ffc9bc
CW
2683static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2684{
2685 u32 old_write_domain, old_read_domains;
2686
b5ffc9bc
CW
2687 /* Force a pagefault for domain tracking on next user access */
2688 i915_gem_release_mmap(obj);
2689
b97c3d9c
KP
2690 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2691 return;
2692
97c809fd
CW
2693 /* Wait for any direct GTT access to complete */
2694 mb();
2695
b5ffc9bc
CW
2696 old_read_domains = obj->base.read_domains;
2697 old_write_domain = obj->base.write_domain;
2698
2699 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2700 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2701
2702 trace_i915_gem_object_change_domain(obj,
2703 old_read_domains,
2704 old_write_domain);
2705}
2706
07fe0b12 2707int i915_vma_unbind(struct i915_vma *vma)
673a394b 2708{
07fe0b12 2709 struct drm_i915_gem_object *obj = vma->obj;
7bddb01f 2710 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
43e28f09 2711 int ret;
673a394b 2712
07fe0b12 2713 if (list_empty(&vma->vma_link))
673a394b
EA
2714 return 0;
2715
0ff501cb
DV
2716 if (!drm_mm_node_allocated(&vma->node)) {
2717 i915_gem_vma_destroy(vma);
0ff501cb
DV
2718 return 0;
2719 }
433544bd 2720
d7f46fc4 2721 if (vma->pin_count)
31d8d651 2722 return -EBUSY;
673a394b 2723
c4670ad0
CW
2724 BUG_ON(obj->pages == NULL);
2725
a8198eea 2726 ret = i915_gem_object_finish_gpu(obj);
1488fc08 2727 if (ret)
a8198eea
CW
2728 return ret;
2729 /* Continue on if we fail due to EIO, the GPU is hung so we
2730 * should be safe and we need to cleanup or else we might
2731 * cause memory corruption through use-after-free.
2732 */
2733
b5ffc9bc 2734 i915_gem_object_finish_gtt(obj);
5323fd04 2735
96b47b65 2736 /* release the fence reg _after_ flushing */
d9e86c0e 2737 ret = i915_gem_object_put_fence(obj);
1488fc08 2738 if (ret)
d9e86c0e 2739 return ret;
96b47b65 2740
07fe0b12 2741 trace_i915_vma_unbind(vma);
db53a302 2742
6f65e29a
BW
2743 vma->unbind_vma(vma);
2744
74163907 2745 i915_gem_gtt_finish_object(obj);
7bddb01f 2746
ca191b13 2747 list_del(&vma->mm_list);
75e9e915 2748 /* Avoid an unnecessary call to unbind on rebind. */
5cacaac7
BW
2749 if (i915_is_ggtt(vma->vm))
2750 obj->map_and_fenceable = true;
673a394b 2751
2f633156
BW
2752 drm_mm_remove_node(&vma->node);
2753 i915_gem_vma_destroy(vma);
2754
2755 /* Since the unbound list is global, only move to that list if
b93dab6e 2756 * no more VMAs exist. */
2f633156
BW
2757 if (list_empty(&obj->vma_list))
2758 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
673a394b 2759
70903c3b
CW
2760 /* And finally now the object is completely decoupled from this vma,
2761 * we can drop its hold on the backing storage and allow it to be
2762 * reaped by the shrinker.
2763 */
2764 i915_gem_object_unpin_pages(obj);
2765
88241785 2766 return 0;
54cf91dc
CW
2767}
2768
07fe0b12
BW
2769/**
2770 * Unbinds an object from the global GTT aperture.
2771 */
2772int
2773i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2774{
2775 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2776 struct i915_address_space *ggtt = &dev_priv->gtt.base;
2777
58e73e15 2778 if (!i915_gem_obj_ggtt_bound(obj))
07fe0b12
BW
2779 return 0;
2780
d7f46fc4 2781 if (i915_gem_obj_to_ggtt(obj)->pin_count)
07fe0b12
BW
2782 return -EBUSY;
2783
2784 BUG_ON(obj->pages == NULL);
2785
2786 return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2787}
2788
b2da9fe5 2789int i915_gpu_idle(struct drm_device *dev)
4df2faf4
DV
2790{
2791 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 2792 struct intel_ring_buffer *ring;
1ec14ad3 2793 int ret, i;
4df2faf4 2794
4df2faf4 2795 /* Flush everything onto the inactive list. */
b4519513 2796 for_each_ring(ring, dev_priv, i) {
41bde553 2797 ret = i915_switch_context(ring, NULL, ring->default_context);
b6c7488d
BW
2798 if (ret)
2799 return ret;
2800
3e960501 2801 ret = intel_ring_idle(ring);
1ec14ad3
CW
2802 if (ret)
2803 return ret;
2804 }
4df2faf4 2805
8a1a49f9 2806 return 0;
4df2faf4
DV
2807}
2808
9ce079e4
CW
2809static void i965_write_fence_reg(struct drm_device *dev, int reg,
2810 struct drm_i915_gem_object *obj)
de151cf6 2811{
de151cf6 2812 drm_i915_private_t *dev_priv = dev->dev_private;
56c844e5
ID
2813 int fence_reg;
2814 int fence_pitch_shift;
de151cf6 2815
56c844e5
ID
2816 if (INTEL_INFO(dev)->gen >= 6) {
2817 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2818 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2819 } else {
2820 fence_reg = FENCE_REG_965_0;
2821 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2822 }
2823
d18b9619
CW
2824 fence_reg += reg * 8;
2825
2826 /* To w/a incoherency with non-atomic 64-bit register updates,
2827 * we split the 64-bit update into two 32-bit writes. In order
2828 * for a partial fence not to be evaluated between writes, we
2829 * precede the update with write to turn off the fence register,
2830 * and only enable the fence as the last step.
2831 *
2832 * For extra levels of paranoia, we make sure each step lands
2833 * before applying the next step.
2834 */
2835 I915_WRITE(fence_reg, 0);
2836 POSTING_READ(fence_reg);
2837
9ce079e4 2838 if (obj) {
f343c5f6 2839 u32 size = i915_gem_obj_ggtt_size(obj);
d18b9619 2840 uint64_t val;
de151cf6 2841
f343c5f6 2842 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
9ce079e4 2843 0xfffff000) << 32;
f343c5f6 2844 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
56c844e5 2845 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
9ce079e4
CW
2846 if (obj->tiling_mode == I915_TILING_Y)
2847 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2848 val |= I965_FENCE_REG_VALID;
c6642782 2849
d18b9619
CW
2850 I915_WRITE(fence_reg + 4, val >> 32);
2851 POSTING_READ(fence_reg + 4);
2852
2853 I915_WRITE(fence_reg + 0, val);
2854 POSTING_READ(fence_reg);
2855 } else {
2856 I915_WRITE(fence_reg + 4, 0);
2857 POSTING_READ(fence_reg + 4);
2858 }
de151cf6
JB
2859}
2860
9ce079e4
CW
2861static void i915_write_fence_reg(struct drm_device *dev, int reg,
2862 struct drm_i915_gem_object *obj)
de151cf6 2863{
de151cf6 2864 drm_i915_private_t *dev_priv = dev->dev_private;
9ce079e4 2865 u32 val;
de151cf6 2866
9ce079e4 2867 if (obj) {
f343c5f6 2868 u32 size = i915_gem_obj_ggtt_size(obj);
9ce079e4
CW
2869 int pitch_val;
2870 int tile_width;
c6642782 2871
f343c5f6 2872 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
9ce079e4 2873 (size & -size) != size ||
f343c5f6
BW
2874 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2875 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2876 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
c6642782 2877
9ce079e4
CW
2878 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2879 tile_width = 128;
2880 else
2881 tile_width = 512;
2882
2883 /* Note: pitch better be a power of two tile widths */
2884 pitch_val = obj->stride / tile_width;
2885 pitch_val = ffs(pitch_val) - 1;
2886
f343c5f6 2887 val = i915_gem_obj_ggtt_offset(obj);
9ce079e4
CW
2888 if (obj->tiling_mode == I915_TILING_Y)
2889 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2890 val |= I915_FENCE_SIZE_BITS(size);
2891 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2892 val |= I830_FENCE_REG_VALID;
2893 } else
2894 val = 0;
2895
2896 if (reg < 8)
2897 reg = FENCE_REG_830_0 + reg * 4;
2898 else
2899 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2900
2901 I915_WRITE(reg, val);
2902 POSTING_READ(reg);
de151cf6
JB
2903}
2904
9ce079e4
CW
2905static void i830_write_fence_reg(struct drm_device *dev, int reg,
2906 struct drm_i915_gem_object *obj)
de151cf6 2907{
de151cf6 2908 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6 2909 uint32_t val;
de151cf6 2910
9ce079e4 2911 if (obj) {
f343c5f6 2912 u32 size = i915_gem_obj_ggtt_size(obj);
9ce079e4 2913 uint32_t pitch_val;
de151cf6 2914
f343c5f6 2915 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
9ce079e4 2916 (size & -size) != size ||
f343c5f6
BW
2917 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2918 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2919 i915_gem_obj_ggtt_offset(obj), size);
e76a16de 2920
9ce079e4
CW
2921 pitch_val = obj->stride / 128;
2922 pitch_val = ffs(pitch_val) - 1;
de151cf6 2923
f343c5f6 2924 val = i915_gem_obj_ggtt_offset(obj);
9ce079e4
CW
2925 if (obj->tiling_mode == I915_TILING_Y)
2926 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2927 val |= I830_FENCE_SIZE_BITS(size);
2928 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2929 val |= I830_FENCE_REG_VALID;
2930 } else
2931 val = 0;
c6642782 2932
9ce079e4
CW
2933 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2934 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2935}
2936
d0a57789
CW
2937inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2938{
2939 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2940}
2941
9ce079e4
CW
2942static void i915_gem_write_fence(struct drm_device *dev, int reg,
2943 struct drm_i915_gem_object *obj)
2944{
d0a57789
CW
2945 struct drm_i915_private *dev_priv = dev->dev_private;
2946
2947 /* Ensure that all CPU reads are completed before installing a fence
2948 * and all writes before removing the fence.
2949 */
2950 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2951 mb();
2952
94a335db
DV
2953 WARN(obj && (!obj->stride || !obj->tiling_mode),
2954 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2955 obj->stride, obj->tiling_mode);
2956
9ce079e4 2957 switch (INTEL_INFO(dev)->gen) {
5ab31333 2958 case 8:
9ce079e4 2959 case 7:
56c844e5 2960 case 6:
9ce079e4
CW
2961 case 5:
2962 case 4: i965_write_fence_reg(dev, reg, obj); break;
2963 case 3: i915_write_fence_reg(dev, reg, obj); break;
2964 case 2: i830_write_fence_reg(dev, reg, obj); break;
7dbf9d6e 2965 default: BUG();
9ce079e4 2966 }
d0a57789
CW
2967
2968 /* And similarly be paranoid that no direct access to this region
2969 * is reordered to before the fence is installed.
2970 */
2971 if (i915_gem_object_needs_mb(obj))
2972 mb();
de151cf6
JB
2973}
2974
61050808
CW
2975static inline int fence_number(struct drm_i915_private *dev_priv,
2976 struct drm_i915_fence_reg *fence)
2977{
2978 return fence - dev_priv->fence_regs;
2979}
2980
2981static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2982 struct drm_i915_fence_reg *fence,
2983 bool enable)
2984{
2dc8aae0 2985 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
46a0b638
CW
2986 int reg = fence_number(dev_priv, fence);
2987
2988 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
61050808
CW
2989
2990 if (enable) {
46a0b638 2991 obj->fence_reg = reg;
61050808
CW
2992 fence->obj = obj;
2993 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2994 } else {
2995 obj->fence_reg = I915_FENCE_REG_NONE;
2996 fence->obj = NULL;
2997 list_del_init(&fence->lru_list);
2998 }
94a335db 2999 obj->fence_dirty = false;
61050808
CW
3000}
3001
d9e86c0e 3002static int
d0a57789 3003i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
d9e86c0e 3004{
1c293ea3 3005 if (obj->last_fenced_seqno) {
86d5bc37 3006 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
18991845
CW
3007 if (ret)
3008 return ret;
d9e86c0e
CW
3009
3010 obj->last_fenced_seqno = 0;
d9e86c0e
CW
3011 }
3012
86d5bc37 3013 obj->fenced_gpu_access = false;
d9e86c0e
CW
3014 return 0;
3015}
3016
3017int
3018i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3019{
61050808 3020 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
f9c513e9 3021 struct drm_i915_fence_reg *fence;
d9e86c0e
CW
3022 int ret;
3023
d0a57789 3024 ret = i915_gem_object_wait_fence(obj);
d9e86c0e
CW
3025 if (ret)
3026 return ret;
3027
61050808
CW
3028 if (obj->fence_reg == I915_FENCE_REG_NONE)
3029 return 0;
d9e86c0e 3030
f9c513e9
CW
3031 fence = &dev_priv->fence_regs[obj->fence_reg];
3032
61050808 3033 i915_gem_object_fence_lost(obj);
f9c513e9 3034 i915_gem_object_update_fence(obj, fence, false);
d9e86c0e
CW
3035
3036 return 0;
3037}
3038
3039static struct drm_i915_fence_reg *
a360bb1a 3040i915_find_fence_reg(struct drm_device *dev)
ae3db24a 3041{
ae3db24a 3042 struct drm_i915_private *dev_priv = dev->dev_private;
8fe301ad 3043 struct drm_i915_fence_reg *reg, *avail;
d9e86c0e 3044 int i;
ae3db24a
DV
3045
3046 /* First try to find a free reg */
d9e86c0e 3047 avail = NULL;
ae3db24a
DV
3048 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3049 reg = &dev_priv->fence_regs[i];
3050 if (!reg->obj)
d9e86c0e 3051 return reg;
ae3db24a 3052
1690e1eb 3053 if (!reg->pin_count)
d9e86c0e 3054 avail = reg;
ae3db24a
DV
3055 }
3056
d9e86c0e 3057 if (avail == NULL)
5dce5b93 3058 goto deadlock;
ae3db24a
DV
3059
3060 /* None available, try to steal one or wait for a user to finish */
d9e86c0e 3061 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
1690e1eb 3062 if (reg->pin_count)
ae3db24a
DV
3063 continue;
3064
8fe301ad 3065 return reg;
ae3db24a
DV
3066 }
3067
5dce5b93
CW
3068deadlock:
3069 /* Wait for completion of pending flips which consume fences */
3070 if (intel_has_pending_fb_unpin(dev))
3071 return ERR_PTR(-EAGAIN);
3072
3073 return ERR_PTR(-EDEADLK);
ae3db24a
DV
3074}
3075
de151cf6 3076/**
9a5a53b3 3077 * i915_gem_object_get_fence - set up fencing for an object
de151cf6
JB
3078 * @obj: object to map through a fence reg
3079 *
3080 * When mapping objects through the GTT, userspace wants to be able to write
3081 * to them without having to worry about swizzling if the object is tiled.
de151cf6
JB
3082 * This function walks the fence regs looking for a free one for @obj,
3083 * stealing one if it can't find any.
3084 *
3085 * It then sets up the reg based on the object's properties: address, pitch
3086 * and tiling format.
9a5a53b3
CW
3087 *
3088 * For an untiled surface, this removes any existing fence.
de151cf6 3089 */
8c4b8c3f 3090int
06d98131 3091i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
de151cf6 3092{
05394f39 3093 struct drm_device *dev = obj->base.dev;
79e53945 3094 struct drm_i915_private *dev_priv = dev->dev_private;
14415745 3095 bool enable = obj->tiling_mode != I915_TILING_NONE;
d9e86c0e 3096 struct drm_i915_fence_reg *reg;
ae3db24a 3097 int ret;
de151cf6 3098
14415745
CW
3099 /* Have we updated the tiling parameters upon the object and so
3100 * will need to serialise the write to the associated fence register?
3101 */
5d82e3e6 3102 if (obj->fence_dirty) {
d0a57789 3103 ret = i915_gem_object_wait_fence(obj);
14415745
CW
3104 if (ret)
3105 return ret;
3106 }
9a5a53b3 3107
d9e86c0e 3108 /* Just update our place in the LRU if our fence is getting reused. */
05394f39
CW
3109 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3110 reg = &dev_priv->fence_regs[obj->fence_reg];
5d82e3e6 3111 if (!obj->fence_dirty) {
14415745
CW
3112 list_move_tail(&reg->lru_list,
3113 &dev_priv->mm.fence_list);
3114 return 0;
3115 }
3116 } else if (enable) {
3117 reg = i915_find_fence_reg(dev);
5dce5b93
CW
3118 if (IS_ERR(reg))
3119 return PTR_ERR(reg);
d9e86c0e 3120
14415745
CW
3121 if (reg->obj) {
3122 struct drm_i915_gem_object *old = reg->obj;
3123
d0a57789 3124 ret = i915_gem_object_wait_fence(old);
29c5a587
CW
3125 if (ret)
3126 return ret;
3127
14415745 3128 i915_gem_object_fence_lost(old);
29c5a587 3129 }
14415745 3130 } else
a09ba7fa 3131 return 0;
a09ba7fa 3132
14415745 3133 i915_gem_object_update_fence(obj, reg, enable);
14415745 3134
9ce079e4 3135 return 0;
de151cf6
JB
3136}
3137
42d6ab48
CW
3138static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3139 struct drm_mm_node *gtt_space,
3140 unsigned long cache_level)
3141{
3142 struct drm_mm_node *other;
3143
3144 /* On non-LLC machines we have to be careful when putting differing
3145 * types of snoopable memory together to avoid the prefetcher
4239ca77 3146 * crossing memory domains and dying.
42d6ab48
CW
3147 */
3148 if (HAS_LLC(dev))
3149 return true;
3150
c6cfb325 3151 if (!drm_mm_node_allocated(gtt_space))
42d6ab48
CW
3152 return true;
3153
3154 if (list_empty(&gtt_space->node_list))
3155 return true;
3156
3157 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3158 if (other->allocated && !other->hole_follows && other->color != cache_level)
3159 return false;
3160
3161 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3162 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3163 return false;
3164
3165 return true;
3166}
3167
3168static void i915_gem_verify_gtt(struct drm_device *dev)
3169{
3170#if WATCH_GTT
3171 struct drm_i915_private *dev_priv = dev->dev_private;
3172 struct drm_i915_gem_object *obj;
3173 int err = 0;
3174
35c20a60 3175 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
42d6ab48
CW
3176 if (obj->gtt_space == NULL) {
3177 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3178 err++;
3179 continue;
3180 }
3181
3182 if (obj->cache_level != obj->gtt_space->color) {
3183 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
f343c5f6
BW
3184 i915_gem_obj_ggtt_offset(obj),
3185 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
42d6ab48
CW
3186 obj->cache_level,
3187 obj->gtt_space->color);
3188 err++;
3189 continue;
3190 }
3191
3192 if (!i915_gem_valid_gtt_space(dev,
3193 obj->gtt_space,
3194 obj->cache_level)) {
3195 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
f343c5f6
BW
3196 i915_gem_obj_ggtt_offset(obj),
3197 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
42d6ab48
CW
3198 obj->cache_level);
3199 err++;
3200 continue;
3201 }
3202 }
3203
3204 WARN_ON(err);
3205#endif
3206}
3207
673a394b
EA
3208/**
3209 * Finds free space in the GTT aperture and binds the object there.
3210 */
3211static int
07fe0b12
BW
3212i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3213 struct i915_address_space *vm,
3214 unsigned alignment,
1ec9e26d 3215 unsigned flags)
673a394b 3216{
05394f39 3217 struct drm_device *dev = obj->base.dev;
673a394b 3218 drm_i915_private_t *dev_priv = dev->dev_private;
5e783301 3219 u32 size, fence_size, fence_alignment, unfenced_alignment;
07fe0b12 3220 size_t gtt_max =
1ec9e26d 3221 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
2f633156 3222 struct i915_vma *vma;
07f73f69 3223 int ret;
673a394b 3224
e28f8711
CW
3225 fence_size = i915_gem_get_gtt_size(dev,
3226 obj->base.size,
3227 obj->tiling_mode);
3228 fence_alignment = i915_gem_get_gtt_alignment(dev,
3229 obj->base.size,
d865110c 3230 obj->tiling_mode, true);
e28f8711 3231 unfenced_alignment =
d865110c 3232 i915_gem_get_gtt_alignment(dev,
1ec9e26d
DV
3233 obj->base.size,
3234 obj->tiling_mode, false);
a00b10c3 3235
673a394b 3236 if (alignment == 0)
1ec9e26d 3237 alignment = flags & PIN_MAPPABLE ? fence_alignment :
5e783301 3238 unfenced_alignment;
1ec9e26d 3239 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
bd9b6a4e 3240 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
673a394b
EA
3241 return -EINVAL;
3242 }
3243
1ec9e26d 3244 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
a00b10c3 3245
654fc607
CW
3246 /* If the object is bigger than the entire aperture, reject it early
3247 * before evicting everything in a vain attempt to find space.
3248 */
0a9ae0d7 3249 if (obj->base.size > gtt_max) {
bd9b6a4e 3250 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
a36689cb 3251 obj->base.size,
1ec9e26d 3252 flags & PIN_MAPPABLE ? "mappable" : "total",
0a9ae0d7 3253 gtt_max);
654fc607
CW
3254 return -E2BIG;
3255 }
3256
37e680a1 3257 ret = i915_gem_object_get_pages(obj);
6c085a72
CW
3258 if (ret)
3259 return ret;
3260
fbdda6fb
CW
3261 i915_gem_object_pin_pages(obj);
3262
accfef2e 3263 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
db473b36 3264 if (IS_ERR(vma)) {
bc6bc15b
DV
3265 ret = PTR_ERR(vma);
3266 goto err_unpin;
2f633156
BW
3267 }
3268
0a9ae0d7 3269search_free:
07fe0b12 3270 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
0a9ae0d7 3271 size, alignment,
31e5d7c6
DH
3272 obj->cache_level, 0, gtt_max,
3273 DRM_MM_SEARCH_DEFAULT);
dc9dd7a2 3274 if (ret) {
f6cd1f15 3275 ret = i915_gem_evict_something(dev, vm, size, alignment,
1ec9e26d 3276 obj->cache_level, flags);
dc9dd7a2
CW
3277 if (ret == 0)
3278 goto search_free;
9731129c 3279
bc6bc15b 3280 goto err_free_vma;
673a394b 3281 }
2f633156 3282 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
c6cfb325 3283 obj->cache_level))) {
2f633156 3284 ret = -EINVAL;
bc6bc15b 3285 goto err_remove_node;
673a394b
EA
3286 }
3287
74163907 3288 ret = i915_gem_gtt_prepare_object(obj);
2f633156 3289 if (ret)
bc6bc15b 3290 goto err_remove_node;
673a394b 3291
35c20a60 3292 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
ca191b13 3293 list_add_tail(&vma->mm_list, &vm->inactive_list);
bf1a1092 3294
4bd561b3
BW
3295 if (i915_is_ggtt(vm)) {
3296 bool mappable, fenceable;
a00b10c3 3297
49987099
DV
3298 fenceable = (vma->node.size == fence_size &&
3299 (vma->node.start & (fence_alignment - 1)) == 0);
4bd561b3 3300
49987099
DV
3301 mappable = (vma->node.start + obj->base.size <=
3302 dev_priv->gtt.mappable_end);
a00b10c3 3303
5cacaac7 3304 obj->map_and_fenceable = mappable && fenceable;
4bd561b3 3305 }
75e9e915 3306
1ec9e26d 3307 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
75e9e915 3308
1ec9e26d 3309 trace_i915_vma_bind(vma, flags);
42d6ab48 3310 i915_gem_verify_gtt(dev);
673a394b 3311 return 0;
2f633156 3312
bc6bc15b 3313err_remove_node:
6286ef9b 3314 drm_mm_remove_node(&vma->node);
bc6bc15b 3315err_free_vma:
2f633156 3316 i915_gem_vma_destroy(vma);
bc6bc15b 3317err_unpin:
2f633156 3318 i915_gem_object_unpin_pages(obj);
2f633156 3319 return ret;
673a394b
EA
3320}
3321
000433b6 3322bool
2c22569b
CW
3323i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3324 bool force)
673a394b 3325{
673a394b
EA
3326 /* If we don't have a page list set up, then we're not pinned
3327 * to GPU, and we can ignore the cache flush because it'll happen
3328 * again at bind time.
3329 */
05394f39 3330 if (obj->pages == NULL)
000433b6 3331 return false;
673a394b 3332
769ce464
ID
3333 /*
3334 * Stolen memory is always coherent with the GPU as it is explicitly
3335 * marked as wc by the system, or the system is cache-coherent.
3336 */
3337 if (obj->stolen)
000433b6 3338 return false;
769ce464 3339
9c23f7fc
CW
3340 /* If the GPU is snooping the contents of the CPU cache,
3341 * we do not need to manually clear the CPU cache lines. However,
3342 * the caches are only snooped when the render cache is
3343 * flushed/invalidated. As we always have to emit invalidations
3344 * and flushes when moving into and out of the RENDER domain, correct
3345 * snooping behaviour occurs naturally as the result of our domain
3346 * tracking.
3347 */
2c22569b 3348 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
000433b6 3349 return false;
9c23f7fc 3350
1c5d22f7 3351 trace_i915_gem_object_clflush(obj);
9da3da66 3352 drm_clflush_sg(obj->pages);
000433b6
CW
3353
3354 return true;
e47c68e9
EA
3355}
3356
3357/** Flushes the GTT write domain for the object if it's dirty. */
3358static void
05394f39 3359i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 3360{
1c5d22f7
CW
3361 uint32_t old_write_domain;
3362
05394f39 3363 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
e47c68e9
EA
3364 return;
3365
63256ec5 3366 /* No actual flushing is required for the GTT write domain. Writes
e47c68e9
EA
3367 * to it immediately go to main memory as far as we know, so there's
3368 * no chipset flush. It also doesn't land in render cache.
63256ec5
CW
3369 *
3370 * However, we do have to enforce the order so that all writes through
3371 * the GTT land before any writes to the device, such as updates to
3372 * the GATT itself.
e47c68e9 3373 */
63256ec5
CW
3374 wmb();
3375
05394f39
CW
3376 old_write_domain = obj->base.write_domain;
3377 obj->base.write_domain = 0;
1c5d22f7
CW
3378
3379 trace_i915_gem_object_change_domain(obj,
05394f39 3380 obj->base.read_domains,
1c5d22f7 3381 old_write_domain);
e47c68e9
EA
3382}
3383
3384/** Flushes the CPU write domain for the object if it's dirty. */
3385static void
2c22569b
CW
3386i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3387 bool force)
e47c68e9 3388{
1c5d22f7 3389 uint32_t old_write_domain;
e47c68e9 3390
05394f39 3391 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
e47c68e9
EA
3392 return;
3393
000433b6
CW
3394 if (i915_gem_clflush_object(obj, force))
3395 i915_gem_chipset_flush(obj->base.dev);
3396
05394f39
CW
3397 old_write_domain = obj->base.write_domain;
3398 obj->base.write_domain = 0;
1c5d22f7
CW
3399
3400 trace_i915_gem_object_change_domain(obj,
05394f39 3401 obj->base.read_domains,
1c5d22f7 3402 old_write_domain);
e47c68e9
EA
3403}
3404
2ef7eeaa
EA
3405/**
3406 * Moves a single object to the GTT read, and possibly write domain.
3407 *
3408 * This function returns when the move is complete, including waiting on
3409 * flushes to occur.
3410 */
79e53945 3411int
2021746e 3412i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2ef7eeaa 3413{
8325a09d 3414 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1c5d22f7 3415 uint32_t old_write_domain, old_read_domains;
e47c68e9 3416 int ret;
2ef7eeaa 3417
02354392 3418 /* Not valid to be called on unbound objects. */
9843877d 3419 if (!i915_gem_obj_bound_any(obj))
02354392
EA
3420 return -EINVAL;
3421
8d7e3de1
CW
3422 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3423 return 0;
3424
0201f1ec 3425 ret = i915_gem_object_wait_rendering(obj, !write);
88241785
CW
3426 if (ret)
3427 return ret;
3428
2c22569b 3429 i915_gem_object_flush_cpu_write_domain(obj, false);
1c5d22f7 3430
d0a57789
CW
3431 /* Serialise direct access to this object with the barriers for
3432 * coherent writes from the GPU, by effectively invalidating the
3433 * GTT domain upon first access.
3434 */
3435 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3436 mb();
3437
05394f39
CW
3438 old_write_domain = obj->base.write_domain;
3439 old_read_domains = obj->base.read_domains;
1c5d22f7 3440
e47c68e9
EA
3441 /* It should now be out of any other write domains, and we can update
3442 * the domain values for our changes.
3443 */
05394f39
CW
3444 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3445 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
e47c68e9 3446 if (write) {
05394f39
CW
3447 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3448 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3449 obj->dirty = 1;
2ef7eeaa
EA
3450 }
3451
1c5d22f7
CW
3452 trace_i915_gem_object_change_domain(obj,
3453 old_read_domains,
3454 old_write_domain);
3455
8325a09d 3456 /* And bump the LRU for this access */
ca191b13 3457 if (i915_gem_object_is_inactive(obj)) {
5c2abbea 3458 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
ca191b13
BW
3459 if (vma)
3460 list_move_tail(&vma->mm_list,
3461 &dev_priv->gtt.base.inactive_list);
3462
3463 }
8325a09d 3464
e47c68e9
EA
3465 return 0;
3466}
3467
e4ffd173
CW
3468int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3469 enum i915_cache_level cache_level)
3470{
7bddb01f 3471 struct drm_device *dev = obj->base.dev;
3089c6f2 3472 struct i915_vma *vma;
e4ffd173
CW
3473 int ret;
3474
3475 if (obj->cache_level == cache_level)
3476 return 0;
3477
d7f46fc4 3478 if (i915_gem_obj_is_pinned(obj)) {
e4ffd173
CW
3479 DRM_DEBUG("can not change the cache level of pinned objects\n");
3480 return -EBUSY;
3481 }
3482
3089c6f2
BW
3483 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3484 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
07fe0b12 3485 ret = i915_vma_unbind(vma);
3089c6f2
BW
3486 if (ret)
3487 return ret;
3488
3489 break;
3490 }
42d6ab48
CW
3491 }
3492
3089c6f2 3493 if (i915_gem_obj_bound_any(obj)) {
e4ffd173
CW
3494 ret = i915_gem_object_finish_gpu(obj);
3495 if (ret)
3496 return ret;
3497
3498 i915_gem_object_finish_gtt(obj);
3499
3500 /* Before SandyBridge, you could not use tiling or fence
3501 * registers with snooped memory, so relinquish any fences
3502 * currently pointing to our region in the aperture.
3503 */
42d6ab48 3504 if (INTEL_INFO(dev)->gen < 6) {
e4ffd173
CW
3505 ret = i915_gem_object_put_fence(obj);
3506 if (ret)
3507 return ret;
3508 }
3509
6f65e29a
BW
3510 list_for_each_entry(vma, &obj->vma_list, vma_link)
3511 vma->bind_vma(vma, cache_level, 0);
e4ffd173
CW
3512 }
3513
2c22569b
CW
3514 list_for_each_entry(vma, &obj->vma_list, vma_link)
3515 vma->node.color = cache_level;
3516 obj->cache_level = cache_level;
3517
3518 if (cpu_write_needs_clflush(obj)) {
e4ffd173
CW
3519 u32 old_read_domains, old_write_domain;
3520
3521 /* If we're coming from LLC cached, then we haven't
3522 * actually been tracking whether the data is in the
3523 * CPU cache or not, since we only allow one bit set
3524 * in obj->write_domain and have been skipping the clflushes.
3525 * Just set it to the CPU cache for now.
3526 */
3527 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
e4ffd173
CW
3528
3529 old_read_domains = obj->base.read_domains;
3530 old_write_domain = obj->base.write_domain;
3531
3532 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3533 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3534
3535 trace_i915_gem_object_change_domain(obj,
3536 old_read_domains,
3537 old_write_domain);
3538 }
3539
42d6ab48 3540 i915_gem_verify_gtt(dev);
e4ffd173
CW
3541 return 0;
3542}
3543
199adf40
BW
3544int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3545 struct drm_file *file)
e6994aee 3546{
199adf40 3547 struct drm_i915_gem_caching *args = data;
e6994aee
CW
3548 struct drm_i915_gem_object *obj;
3549 int ret;
3550
3551 ret = i915_mutex_lock_interruptible(dev);
3552 if (ret)
3553 return ret;
3554
3555 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3556 if (&obj->base == NULL) {
3557 ret = -ENOENT;
3558 goto unlock;
3559 }
3560
651d794f
CW
3561 switch (obj->cache_level) {
3562 case I915_CACHE_LLC:
3563 case I915_CACHE_L3_LLC:
3564 args->caching = I915_CACHING_CACHED;
3565 break;
3566
4257d3ba
CW
3567 case I915_CACHE_WT:
3568 args->caching = I915_CACHING_DISPLAY;
3569 break;
3570
651d794f
CW
3571 default:
3572 args->caching = I915_CACHING_NONE;
3573 break;
3574 }
e6994aee
CW
3575
3576 drm_gem_object_unreference(&obj->base);
3577unlock:
3578 mutex_unlock(&dev->struct_mutex);
3579 return ret;
3580}
3581
199adf40
BW
3582int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3583 struct drm_file *file)
e6994aee 3584{
199adf40 3585 struct drm_i915_gem_caching *args = data;
e6994aee
CW
3586 struct drm_i915_gem_object *obj;
3587 enum i915_cache_level level;
3588 int ret;
3589
199adf40
BW
3590 switch (args->caching) {
3591 case I915_CACHING_NONE:
e6994aee
CW
3592 level = I915_CACHE_NONE;
3593 break;
199adf40 3594 case I915_CACHING_CACHED:
e6994aee
CW
3595 level = I915_CACHE_LLC;
3596 break;
4257d3ba
CW
3597 case I915_CACHING_DISPLAY:
3598 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3599 break;
e6994aee
CW
3600 default:
3601 return -EINVAL;
3602 }
3603
3bc2913e
BW
3604 ret = i915_mutex_lock_interruptible(dev);
3605 if (ret)
3606 return ret;
3607
e6994aee
CW
3608 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3609 if (&obj->base == NULL) {
3610 ret = -ENOENT;
3611 goto unlock;
3612 }
3613
3614 ret = i915_gem_object_set_cache_level(obj, level);
3615
3616 drm_gem_object_unreference(&obj->base);
3617unlock:
3618 mutex_unlock(&dev->struct_mutex);
3619 return ret;
3620}
3621
cc98b413
CW
3622static bool is_pin_display(struct drm_i915_gem_object *obj)
3623{
3624 /* There are 3 sources that pin objects:
3625 * 1. The display engine (scanouts, sprites, cursors);
3626 * 2. Reservations for execbuffer;
3627 * 3. The user.
3628 *
3629 * We can ignore reservations as we hold the struct_mutex and
3630 * are only called outside of the reservation path. The user
3631 * can only increment pin_count once, and so if after
3632 * subtracting the potential reference by the user, any pin_count
3633 * remains, it must be due to another use by the display engine.
3634 */
d7f46fc4 3635 return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
cc98b413
CW
3636}
3637
b9241ea3 3638/*
2da3b9b9
CW
3639 * Prepare buffer for display plane (scanout, cursors, etc).
3640 * Can be called from an uninterruptible phase (modesetting) and allows
3641 * any flushes to be pipelined (for pageflips).
b9241ea3
ZW
3642 */
3643int
2da3b9b9
CW
3644i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3645 u32 alignment,
919926ae 3646 struct intel_ring_buffer *pipelined)
b9241ea3 3647{
2da3b9b9 3648 u32 old_read_domains, old_write_domain;
b9241ea3
ZW
3649 int ret;
3650
0be73284 3651 if (pipelined != obj->ring) {
2911a35b
BW
3652 ret = i915_gem_object_sync(obj, pipelined);
3653 if (ret)
b9241ea3
ZW
3654 return ret;
3655 }
3656
cc98b413
CW
3657 /* Mark the pin_display early so that we account for the
3658 * display coherency whilst setting up the cache domains.
3659 */
3660 obj->pin_display = true;
3661
a7ef0640
EA
3662 /* The display engine is not coherent with the LLC cache on gen6. As
3663 * a result, we make sure that the pinning that is about to occur is
3664 * done with uncached PTEs. This is lowest common denominator for all
3665 * chipsets.
3666 *
3667 * However for gen6+, we could do better by using the GFDT bit instead
3668 * of uncaching, which would allow us to flush all the LLC-cached data
3669 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3670 */
651d794f
CW
3671 ret = i915_gem_object_set_cache_level(obj,
3672 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
a7ef0640 3673 if (ret)
cc98b413 3674 goto err_unpin_display;
a7ef0640 3675
2da3b9b9
CW
3676 /* As the user may map the buffer once pinned in the display plane
3677 * (e.g. libkms for the bootup splash), we have to ensure that we
3678 * always use map_and_fenceable for all scanout buffers.
3679 */
1ec9e26d 3680 ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
2da3b9b9 3681 if (ret)
cc98b413 3682 goto err_unpin_display;
2da3b9b9 3683
2c22569b 3684 i915_gem_object_flush_cpu_write_domain(obj, true);
b118c1e3 3685
2da3b9b9 3686 old_write_domain = obj->base.write_domain;
05394f39 3687 old_read_domains = obj->base.read_domains;
2da3b9b9
CW
3688
3689 /* It should now be out of any other write domains, and we can update
3690 * the domain values for our changes.
3691 */
e5f1d962 3692 obj->base.write_domain = 0;
05394f39 3693 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
b9241ea3
ZW
3694
3695 trace_i915_gem_object_change_domain(obj,
3696 old_read_domains,
2da3b9b9 3697 old_write_domain);
b9241ea3
ZW
3698
3699 return 0;
cc98b413
CW
3700
3701err_unpin_display:
3702 obj->pin_display = is_pin_display(obj);
3703 return ret;
3704}
3705
3706void
3707i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3708{
d7f46fc4 3709 i915_gem_object_ggtt_unpin(obj);
cc98b413 3710 obj->pin_display = is_pin_display(obj);
b9241ea3
ZW
3711}
3712
85345517 3713int
a8198eea 3714i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
85345517 3715{
88241785
CW
3716 int ret;
3717
a8198eea 3718 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
85345517
CW
3719 return 0;
3720
0201f1ec 3721 ret = i915_gem_object_wait_rendering(obj, false);
c501ae7f
CW
3722 if (ret)
3723 return ret;
3724
a8198eea
CW
3725 /* Ensure that we invalidate the GPU's caches and TLBs. */
3726 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
c501ae7f 3727 return 0;
85345517
CW
3728}
3729
e47c68e9
EA
3730/**
3731 * Moves a single object to the CPU read, and possibly write domain.
3732 *
3733 * This function returns when the move is complete, including waiting on
3734 * flushes to occur.
3735 */
dabdfe02 3736int
919926ae 3737i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
e47c68e9 3738{
1c5d22f7 3739 uint32_t old_write_domain, old_read_domains;
e47c68e9
EA
3740 int ret;
3741
8d7e3de1
CW
3742 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3743 return 0;
3744
0201f1ec 3745 ret = i915_gem_object_wait_rendering(obj, !write);
88241785
CW
3746 if (ret)
3747 return ret;
3748
e47c68e9 3749 i915_gem_object_flush_gtt_write_domain(obj);
2ef7eeaa 3750
05394f39
CW
3751 old_write_domain = obj->base.write_domain;
3752 old_read_domains = obj->base.read_domains;
1c5d22f7 3753
e47c68e9 3754 /* Flush the CPU cache if it's still invalid. */
05394f39 3755 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2c22569b 3756 i915_gem_clflush_object(obj, false);
2ef7eeaa 3757
05394f39 3758 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2ef7eeaa
EA
3759 }
3760
3761 /* It should now be out of any other write domains, and we can update
3762 * the domain values for our changes.
3763 */
05394f39 3764 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
e47c68e9
EA
3765
3766 /* If we're writing through the CPU, then the GPU read domains will
3767 * need to be invalidated at next use.
3768 */
3769 if (write) {
05394f39
CW
3770 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3771 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
e47c68e9 3772 }
2ef7eeaa 3773
1c5d22f7
CW
3774 trace_i915_gem_object_change_domain(obj,
3775 old_read_domains,
3776 old_write_domain);
3777
2ef7eeaa
EA
3778 return 0;
3779}
3780
673a394b
EA
3781/* Throttle our rendering by waiting until the ring has completed our requests
3782 * emitted over 20 msec ago.
3783 *
b962442e
EA
3784 * Note that if we were to use the current jiffies each time around the loop,
3785 * we wouldn't escape the function with any frames outstanding if the time to
3786 * render a frame was over 20ms.
3787 *
673a394b
EA
3788 * This should get us reasonable parallelism between CPU and GPU but also
3789 * relatively low latency when blocking on a particular request to finish.
3790 */
40a5f0de 3791static int
f787a5f5 3792i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
40a5f0de 3793{
f787a5f5
CW
3794 struct drm_i915_private *dev_priv = dev->dev_private;
3795 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e 3796 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
f787a5f5
CW
3797 struct drm_i915_gem_request *request;
3798 struct intel_ring_buffer *ring = NULL;
f69061be 3799 unsigned reset_counter;
f787a5f5
CW
3800 u32 seqno = 0;
3801 int ret;
93533c29 3802
308887aa
DV
3803 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3804 if (ret)
3805 return ret;
3806
3807 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3808 if (ret)
3809 return ret;
e110e8d6 3810
1c25595f 3811 spin_lock(&file_priv->mm.lock);
f787a5f5 3812 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
b962442e
EA
3813 if (time_after_eq(request->emitted_jiffies, recent_enough))
3814 break;
40a5f0de 3815
f787a5f5
CW
3816 ring = request->ring;
3817 seqno = request->seqno;
b962442e 3818 }
f69061be 3819 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1c25595f 3820 spin_unlock(&file_priv->mm.lock);
40a5f0de 3821
f787a5f5
CW
3822 if (seqno == 0)
3823 return 0;
2bc43b5c 3824
b29c19b6 3825 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
f787a5f5
CW
3826 if (ret == 0)
3827 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
40a5f0de
EA
3828
3829 return ret;
3830}
3831
673a394b 3832int
05394f39 3833i915_gem_object_pin(struct drm_i915_gem_object *obj,
c37e2204 3834 struct i915_address_space *vm,
05394f39 3835 uint32_t alignment,
1ec9e26d 3836 unsigned flags)
673a394b 3837{
07fe0b12 3838 struct i915_vma *vma;
673a394b
EA
3839 int ret;
3840
1ec9e26d
DV
3841 if (WARN_ON(flags & PIN_MAPPABLE && !i915_is_ggtt(vm)))
3842 return -EINVAL;
07fe0b12
BW
3843
3844 vma = i915_gem_obj_to_vma(obj, vm);
07fe0b12 3845 if (vma) {
d7f46fc4
BW
3846 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3847 return -EBUSY;
3848
07fe0b12
BW
3849 if ((alignment &&
3850 vma->node.start & (alignment - 1)) ||
1ec9e26d 3851 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
d7f46fc4 3852 WARN(vma->pin_count,
ae7d49d8 3853 "bo is already pinned with incorrect alignment:"
f343c5f6 3854 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
75e9e915 3855 " obj->map_and_fenceable=%d\n",
07fe0b12 3856 i915_gem_obj_offset(obj, vm), alignment,
1ec9e26d 3857 flags & PIN_MAPPABLE,
05394f39 3858 obj->map_and_fenceable);
07fe0b12 3859 ret = i915_vma_unbind(vma);
ac0c6b5a
CW
3860 if (ret)
3861 return ret;
3862 }
3863 }
3864
07fe0b12 3865 if (!i915_gem_obj_bound(obj, vm)) {
1ec9e26d 3866 ret = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
9731129c 3867 if (ret)
673a394b 3868 return ret;
8742267a 3869
22c344e9 3870 }
76446cac 3871
6f65e29a
BW
3872 vma = i915_gem_obj_to_vma(obj, vm);
3873
1ec9e26d
DV
3874 vma->bind_vma(vma, obj->cache_level,
3875 flags & PIN_MAPPABLE ? GLOBAL_BIND : 0);
74898d7e 3876
d7f46fc4 3877 i915_gem_obj_to_vma(obj, vm)->pin_count++;
1ec9e26d
DV
3878 if (flags & PIN_MAPPABLE)
3879 obj->pin_mappable |= true;
673a394b
EA
3880
3881 return 0;
3882}
3883
3884void
d7f46fc4 3885i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
673a394b 3886{
d7f46fc4 3887 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
673a394b 3888
d7f46fc4
BW
3889 BUG_ON(!vma);
3890 BUG_ON(vma->pin_count == 0);
3891 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3892
3893 if (--vma->pin_count == 0)
6299f992 3894 obj->pin_mappable = false;
673a394b
EA
3895}
3896
3897int
3898i915_gem_pin_ioctl(struct drm_device *dev, void *data,
05394f39 3899 struct drm_file *file)
673a394b
EA
3900{
3901 struct drm_i915_gem_pin *args = data;
05394f39 3902 struct drm_i915_gem_object *obj;
673a394b
EA
3903 int ret;
3904
02f6bccc
DV
3905 if (INTEL_INFO(dev)->gen >= 6)
3906 return -ENODEV;
3907
1d7cfea1
CW
3908 ret = i915_mutex_lock_interruptible(dev);
3909 if (ret)
3910 return ret;
673a394b 3911
05394f39 3912 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3913 if (&obj->base == NULL) {
1d7cfea1
CW
3914 ret = -ENOENT;
3915 goto unlock;
673a394b 3916 }
673a394b 3917
05394f39 3918 if (obj->madv != I915_MADV_WILLNEED) {
bd9b6a4e 3919 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
8c99e57d 3920 ret = -EFAULT;
1d7cfea1 3921 goto out;
3ef94daa
CW
3922 }
3923
05394f39 3924 if (obj->pin_filp != NULL && obj->pin_filp != file) {
bd9b6a4e 3925 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
79e53945 3926 args->handle);
1d7cfea1
CW
3927 ret = -EINVAL;
3928 goto out;
79e53945
JB
3929 }
3930
aa5f8021
DV
3931 if (obj->user_pin_count == ULONG_MAX) {
3932 ret = -EBUSY;
3933 goto out;
3934 }
3935
93be8788 3936 if (obj->user_pin_count == 0) {
1ec9e26d 3937 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
1d7cfea1
CW
3938 if (ret)
3939 goto out;
673a394b
EA
3940 }
3941
93be8788
CW
3942 obj->user_pin_count++;
3943 obj->pin_filp = file;
3944
f343c5f6 3945 args->offset = i915_gem_obj_ggtt_offset(obj);
1d7cfea1 3946out:
05394f39 3947 drm_gem_object_unreference(&obj->base);
1d7cfea1 3948unlock:
673a394b 3949 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3950 return ret;
673a394b
EA
3951}
3952
3953int
3954i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
05394f39 3955 struct drm_file *file)
673a394b
EA
3956{
3957 struct drm_i915_gem_pin *args = data;
05394f39 3958 struct drm_i915_gem_object *obj;
76c1dec1 3959 int ret;
673a394b 3960
1d7cfea1
CW
3961 ret = i915_mutex_lock_interruptible(dev);
3962 if (ret)
3963 return ret;
673a394b 3964
05394f39 3965 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3966 if (&obj->base == NULL) {
1d7cfea1
CW
3967 ret = -ENOENT;
3968 goto unlock;
673a394b 3969 }
76c1dec1 3970
05394f39 3971 if (obj->pin_filp != file) {
bd9b6a4e 3972 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
79e53945 3973 args->handle);
1d7cfea1
CW
3974 ret = -EINVAL;
3975 goto out;
79e53945 3976 }
05394f39
CW
3977 obj->user_pin_count--;
3978 if (obj->user_pin_count == 0) {
3979 obj->pin_filp = NULL;
d7f46fc4 3980 i915_gem_object_ggtt_unpin(obj);
79e53945 3981 }
673a394b 3982
1d7cfea1 3983out:
05394f39 3984 drm_gem_object_unreference(&obj->base);
1d7cfea1 3985unlock:
673a394b 3986 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3987 return ret;
673a394b
EA
3988}
3989
3990int
3991i915_gem_busy_ioctl(struct drm_device *dev, void *data,
05394f39 3992 struct drm_file *file)
673a394b
EA
3993{
3994 struct drm_i915_gem_busy *args = data;
05394f39 3995 struct drm_i915_gem_object *obj;
30dbf0c0
CW
3996 int ret;
3997
76c1dec1 3998 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 3999 if (ret)
76c1dec1 4000 return ret;
673a394b 4001
05394f39 4002 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 4003 if (&obj->base == NULL) {
1d7cfea1
CW
4004 ret = -ENOENT;
4005 goto unlock;
673a394b 4006 }
d1b851fc 4007
0be555b6
CW
4008 /* Count all active objects as busy, even if they are currently not used
4009 * by the gpu. Users of this interface expect objects to eventually
4010 * become non-busy without any further actions, therefore emit any
4011 * necessary flushes here.
c4de0a5d 4012 */
30dfebf3 4013 ret = i915_gem_object_flush_active(obj);
0be555b6 4014
30dfebf3 4015 args->busy = obj->active;
e9808edd
CW
4016 if (obj->ring) {
4017 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4018 args->busy |= intel_ring_flag(obj->ring) << 16;
4019 }
673a394b 4020
05394f39 4021 drm_gem_object_unreference(&obj->base);
1d7cfea1 4022unlock:
673a394b 4023 mutex_unlock(&dev->struct_mutex);
1d7cfea1 4024 return ret;
673a394b
EA
4025}
4026
4027int
4028i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4029 struct drm_file *file_priv)
4030{
0206e353 4031 return i915_gem_ring_throttle(dev, file_priv);
673a394b
EA
4032}
4033
3ef94daa
CW
4034int
4035i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4036 struct drm_file *file_priv)
4037{
4038 struct drm_i915_gem_madvise *args = data;
05394f39 4039 struct drm_i915_gem_object *obj;
76c1dec1 4040 int ret;
3ef94daa
CW
4041
4042 switch (args->madv) {
4043 case I915_MADV_DONTNEED:
4044 case I915_MADV_WILLNEED:
4045 break;
4046 default:
4047 return -EINVAL;
4048 }
4049
1d7cfea1
CW
4050 ret = i915_mutex_lock_interruptible(dev);
4051 if (ret)
4052 return ret;
4053
05394f39 4054 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
c8725226 4055 if (&obj->base == NULL) {
1d7cfea1
CW
4056 ret = -ENOENT;
4057 goto unlock;
3ef94daa 4058 }
3ef94daa 4059
d7f46fc4 4060 if (i915_gem_obj_is_pinned(obj)) {
1d7cfea1
CW
4061 ret = -EINVAL;
4062 goto out;
3ef94daa
CW
4063 }
4064
05394f39
CW
4065 if (obj->madv != __I915_MADV_PURGED)
4066 obj->madv = args->madv;
3ef94daa 4067
6c085a72
CW
4068 /* if the object is no longer attached, discard its backing storage */
4069 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
2d7ef395
CW
4070 i915_gem_object_truncate(obj);
4071
05394f39 4072 args->retained = obj->madv != __I915_MADV_PURGED;
bb6baf76 4073
1d7cfea1 4074out:
05394f39 4075 drm_gem_object_unreference(&obj->base);
1d7cfea1 4076unlock:
3ef94daa 4077 mutex_unlock(&dev->struct_mutex);
1d7cfea1 4078 return ret;
3ef94daa
CW
4079}
4080
37e680a1
CW
4081void i915_gem_object_init(struct drm_i915_gem_object *obj,
4082 const struct drm_i915_gem_object_ops *ops)
0327d6ba 4083{
35c20a60 4084 INIT_LIST_HEAD(&obj->global_list);
0327d6ba 4085 INIT_LIST_HEAD(&obj->ring_list);
b25cb2f8 4086 INIT_LIST_HEAD(&obj->obj_exec_link);
2f633156 4087 INIT_LIST_HEAD(&obj->vma_list);
0327d6ba 4088
37e680a1
CW
4089 obj->ops = ops;
4090
0327d6ba
CW
4091 obj->fence_reg = I915_FENCE_REG_NONE;
4092 obj->madv = I915_MADV_WILLNEED;
4093 /* Avoid an unnecessary call to unbind on the first bind. */
4094 obj->map_and_fenceable = true;
4095
4096 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4097}
4098
37e680a1
CW
4099static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4100 .get_pages = i915_gem_object_get_pages_gtt,
4101 .put_pages = i915_gem_object_put_pages_gtt,
4102};
4103
05394f39
CW
4104struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4105 size_t size)
ac52bc56 4106{
c397b908 4107 struct drm_i915_gem_object *obj;
5949eac4 4108 struct address_space *mapping;
1a240d4d 4109 gfp_t mask;
ac52bc56 4110
42dcedd4 4111 obj = i915_gem_object_alloc(dev);
c397b908
DV
4112 if (obj == NULL)
4113 return NULL;
673a394b 4114
c397b908 4115 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
42dcedd4 4116 i915_gem_object_free(obj);
c397b908
DV
4117 return NULL;
4118 }
673a394b 4119
bed1ea95
CW
4120 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4121 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4122 /* 965gm cannot relocate objects above 4GiB. */
4123 mask &= ~__GFP_HIGHMEM;
4124 mask |= __GFP_DMA32;
4125 }
4126
496ad9aa 4127 mapping = file_inode(obj->base.filp)->i_mapping;
bed1ea95 4128 mapping_set_gfp_mask(mapping, mask);
5949eac4 4129
37e680a1 4130 i915_gem_object_init(obj, &i915_gem_object_ops);
73aa808f 4131
c397b908
DV
4132 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4133 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
673a394b 4134
3d29b842
ED
4135 if (HAS_LLC(dev)) {
4136 /* On some devices, we can have the GPU use the LLC (the CPU
a1871112
EA
4137 * cache) for about a 10% performance improvement
4138 * compared to uncached. Graphics requests other than
4139 * display scanout are coherent with the CPU in
4140 * accessing this cache. This means in this mode we
4141 * don't need to clflush on the CPU side, and on the
4142 * GPU side we only need to flush internal caches to
4143 * get data visible to the CPU.
4144 *
4145 * However, we maintain the display planes as UC, and so
4146 * need to rebind when first used as such.
4147 */
4148 obj->cache_level = I915_CACHE_LLC;
4149 } else
4150 obj->cache_level = I915_CACHE_NONE;
4151
d861e338
DV
4152 trace_i915_gem_object_create(obj);
4153
05394f39 4154 return obj;
c397b908
DV
4155}
4156
1488fc08 4157void i915_gem_free_object(struct drm_gem_object *gem_obj)
673a394b 4158{
1488fc08 4159 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
05394f39 4160 struct drm_device *dev = obj->base.dev;
be72615b 4161 drm_i915_private_t *dev_priv = dev->dev_private;
07fe0b12 4162 struct i915_vma *vma, *next;
673a394b 4163
f65c9168
PZ
4164 intel_runtime_pm_get(dev_priv);
4165
26e12f89
CW
4166 trace_i915_gem_object_destroy(obj);
4167
1488fc08
CW
4168 if (obj->phys_obj)
4169 i915_gem_detach_phys_object(dev, obj);
4170
07fe0b12 4171 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
d7f46fc4
BW
4172 int ret;
4173
4174 vma->pin_count = 0;
4175 ret = i915_vma_unbind(vma);
07fe0b12
BW
4176 if (WARN_ON(ret == -ERESTARTSYS)) {
4177 bool was_interruptible;
1488fc08 4178
07fe0b12
BW
4179 was_interruptible = dev_priv->mm.interruptible;
4180 dev_priv->mm.interruptible = false;
1488fc08 4181
07fe0b12 4182 WARN_ON(i915_vma_unbind(vma));
1488fc08 4183
07fe0b12
BW
4184 dev_priv->mm.interruptible = was_interruptible;
4185 }
1488fc08
CW
4186 }
4187
1d64ae71
BW
4188 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4189 * before progressing. */
4190 if (obj->stolen)
4191 i915_gem_object_unpin_pages(obj);
4192
401c29f6
BW
4193 if (WARN_ON(obj->pages_pin_count))
4194 obj->pages_pin_count = 0;
37e680a1 4195 i915_gem_object_put_pages(obj);
d8cb5086 4196 i915_gem_object_free_mmap_offset(obj);
0104fdbb 4197 i915_gem_object_release_stolen(obj);
de151cf6 4198
9da3da66
CW
4199 BUG_ON(obj->pages);
4200
2f745ad3
CW
4201 if (obj->base.import_attach)
4202 drm_prime_gem_destroy(&obj->base, NULL);
de151cf6 4203
05394f39
CW
4204 drm_gem_object_release(&obj->base);
4205 i915_gem_info_remove_obj(dev_priv, obj->base.size);
c397b908 4206
05394f39 4207 kfree(obj->bit_17);
42dcedd4 4208 i915_gem_object_free(obj);
f65c9168
PZ
4209
4210 intel_runtime_pm_put(dev_priv);
673a394b
EA
4211}
4212
e656a6cb 4213struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
2f633156 4214 struct i915_address_space *vm)
e656a6cb
DV
4215{
4216 struct i915_vma *vma;
4217 list_for_each_entry(vma, &obj->vma_list, vma_link)
4218 if (vma->vm == vm)
4219 return vma;
4220
4221 return NULL;
4222}
4223
2f633156
BW
4224void i915_gem_vma_destroy(struct i915_vma *vma)
4225{
4226 WARN_ON(vma->node.allocated);
aaa05667
CW
4227
4228 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4229 if (!list_empty(&vma->exec_list))
4230 return;
4231
8b9c2b94 4232 list_del(&vma->vma_link);
b93dab6e 4233
2f633156
BW
4234 kfree(vma);
4235}
4236
29105ccc 4237int
45c5f202 4238i915_gem_suspend(struct drm_device *dev)
29105ccc
CW
4239{
4240 drm_i915_private_t *dev_priv = dev->dev_private;
45c5f202 4241 int ret = 0;
28dfe52a 4242
45c5f202 4243 mutex_lock(&dev->struct_mutex);
f7403347 4244 if (dev_priv->ums.mm_suspended)
45c5f202 4245 goto err;
28dfe52a 4246
b2da9fe5 4247 ret = i915_gpu_idle(dev);
f7403347 4248 if (ret)
45c5f202 4249 goto err;
f7403347 4250
b2da9fe5 4251 i915_gem_retire_requests(dev);
673a394b 4252
29105ccc 4253 /* Under UMS, be paranoid and evict. */
a39d7efc 4254 if (!drm_core_check_feature(dev, DRIVER_MODESET))
6c085a72 4255 i915_gem_evict_everything(dev);
29105ccc 4256
29105ccc 4257 i915_kernel_lost_context(dev);
6dbe2772 4258 i915_gem_cleanup_ringbuffer(dev);
29105ccc 4259
45c5f202
CW
4260 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4261 * We need to replace this with a semaphore, or something.
4262 * And not confound ums.mm_suspended!
4263 */
4264 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4265 DRIVER_MODESET);
4266 mutex_unlock(&dev->struct_mutex);
4267
4268 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
29105ccc 4269 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
b29c19b6 4270 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
29105ccc 4271
673a394b 4272 return 0;
45c5f202
CW
4273
4274err:
4275 mutex_unlock(&dev->struct_mutex);
4276 return ret;
673a394b
EA
4277}
4278
c3787e2e 4279int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
b9524a1e 4280{
c3787e2e 4281 struct drm_device *dev = ring->dev;
b9524a1e 4282 drm_i915_private_t *dev_priv = dev->dev_private;
35a85ac6
BW
4283 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4284 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
c3787e2e 4285 int i, ret;
b9524a1e 4286
040d2baa 4287 if (!HAS_L3_DPF(dev) || !remap_info)
c3787e2e 4288 return 0;
b9524a1e 4289
c3787e2e
BW
4290 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4291 if (ret)
4292 return ret;
b9524a1e 4293
c3787e2e
BW
4294 /*
4295 * Note: We do not worry about the concurrent register cacheline hang
4296 * here because no other code should access these registers other than
4297 * at initialization time.
4298 */
b9524a1e 4299 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
c3787e2e
BW
4300 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4301 intel_ring_emit(ring, reg_base + i);
4302 intel_ring_emit(ring, remap_info[i/4]);
b9524a1e
BW
4303 }
4304
c3787e2e 4305 intel_ring_advance(ring);
b9524a1e 4306
c3787e2e 4307 return ret;
b9524a1e
BW
4308}
4309
f691e2f4
DV
4310void i915_gem_init_swizzling(struct drm_device *dev)
4311{
4312 drm_i915_private_t *dev_priv = dev->dev_private;
4313
11782b02 4314 if (INTEL_INFO(dev)->gen < 5 ||
f691e2f4
DV
4315 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4316 return;
4317
4318 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4319 DISP_TILE_SURFACE_SWIZZLING);
4320
11782b02
DV
4321 if (IS_GEN5(dev))
4322 return;
4323
f691e2f4
DV
4324 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4325 if (IS_GEN6(dev))
6b26c86d 4326 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
8782e26c 4327 else if (IS_GEN7(dev))
6b26c86d 4328 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
31a5336e
BW
4329 else if (IS_GEN8(dev))
4330 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
8782e26c
BW
4331 else
4332 BUG();
f691e2f4 4333}
e21af88d 4334
67b1b571
CW
4335static bool
4336intel_enable_blt(struct drm_device *dev)
4337{
4338 if (!HAS_BLT(dev))
4339 return false;
4340
4341 /* The blitter was dysfunctional on early prototypes */
4342 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4343 DRM_INFO("BLT not supported on this pre-production hardware;"
4344 " graphics performance will be degraded.\n");
4345 return false;
4346 }
4347
4348 return true;
4349}
4350
4fc7c971 4351static int i915_gem_init_rings(struct drm_device *dev)
8187a2b7 4352{
4fc7c971 4353 struct drm_i915_private *dev_priv = dev->dev_private;
8187a2b7 4354 int ret;
68f95ba9 4355
5c1143bb 4356 ret = intel_init_render_ring_buffer(dev);
68f95ba9 4357 if (ret)
b6913e4b 4358 return ret;
68f95ba9
CW
4359
4360 if (HAS_BSD(dev)) {
5c1143bb 4361 ret = intel_init_bsd_ring_buffer(dev);
68f95ba9
CW
4362 if (ret)
4363 goto cleanup_render_ring;
d1b851fc 4364 }
68f95ba9 4365
67b1b571 4366 if (intel_enable_blt(dev)) {
549f7365
CW
4367 ret = intel_init_blt_ring_buffer(dev);
4368 if (ret)
4369 goto cleanup_bsd_ring;
4370 }
4371
9a8a2213
BW
4372 if (HAS_VEBOX(dev)) {
4373 ret = intel_init_vebox_ring_buffer(dev);
4374 if (ret)
4375 goto cleanup_blt_ring;
4376 }
4377
4378
99433931 4379 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4fc7c971 4380 if (ret)
9a8a2213 4381 goto cleanup_vebox_ring;
4fc7c971
BW
4382
4383 return 0;
4384
9a8a2213
BW
4385cleanup_vebox_ring:
4386 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4fc7c971
BW
4387cleanup_blt_ring:
4388 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4389cleanup_bsd_ring:
4390 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4391cleanup_render_ring:
4392 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4393
4394 return ret;
4395}
4396
4397int
4398i915_gem_init_hw(struct drm_device *dev)
4399{
4400 drm_i915_private_t *dev_priv = dev->dev_private;
35a85ac6 4401 int ret, i;
4fc7c971
BW
4402
4403 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4404 return -EIO;
4405
59124506 4406 if (dev_priv->ellc_size)
05e21cc4 4407 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4fc7c971 4408
0bf21347
VS
4409 if (IS_HASWELL(dev))
4410 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4411 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
9435373e 4412
88a2b2a3 4413 if (HAS_PCH_NOP(dev)) {
6ba844b0
DV
4414 if (IS_IVYBRIDGE(dev)) {
4415 u32 temp = I915_READ(GEN7_MSG_CTL);
4416 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4417 I915_WRITE(GEN7_MSG_CTL, temp);
4418 } else if (INTEL_INFO(dev)->gen >= 7) {
4419 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4420 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4421 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4422 }
88a2b2a3
BW
4423 }
4424
4fc7c971
BW
4425 i915_gem_init_swizzling(dev);
4426
4427 ret = i915_gem_init_rings(dev);
99433931
MK
4428 if (ret)
4429 return ret;
4430
c3787e2e
BW
4431 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4432 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4433
254f965c 4434 /*
2fa48d8d
BW
4435 * XXX: Contexts should only be initialized once. Doing a switch to the
4436 * default context switch however is something we'd like to do after
4437 * reset or thaw (the latter may not actually be necessary for HW, but
4438 * goes with our code better). Context switching requires rings (for
4439 * the do_switch), but before enabling PPGTT. So don't move this.
254f965c 4440 */
2fa48d8d 4441 ret = i915_gem_context_enable(dev_priv);
8245be31 4442 if (ret) {
2fa48d8d
BW
4443 DRM_ERROR("Context enable failed %d\n", ret);
4444 goto err_out;
b7c36d25 4445 }
e21af88d 4446
68f95ba9 4447 return 0;
2fa48d8d
BW
4448
4449err_out:
4450 i915_gem_cleanup_ringbuffer(dev);
4451 return ret;
8187a2b7
ZN
4452}
4453
1070a42b
CW
4454int i915_gem_init(struct drm_device *dev)
4455{
4456 struct drm_i915_private *dev_priv = dev->dev_private;
1070a42b
CW
4457 int ret;
4458
1070a42b 4459 mutex_lock(&dev->struct_mutex);
d62b4892
JB
4460
4461 if (IS_VALLEYVIEW(dev)) {
4462 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4463 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4464 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4465 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4466 }
4467
d7e5008f 4468 i915_gem_init_global_gtt(dev);
d62b4892 4469
2fa48d8d 4470 ret = i915_gem_context_init(dev);
e3848694
MK
4471 if (ret) {
4472 mutex_unlock(&dev->struct_mutex);
2fa48d8d 4473 return ret;
e3848694 4474 }
2fa48d8d 4475
1070a42b
CW
4476 ret = i915_gem_init_hw(dev);
4477 mutex_unlock(&dev->struct_mutex);
4478 if (ret) {
bdf4fd7e 4479 WARN_ON(dev_priv->mm.aliasing_ppgtt);
2fa48d8d 4480 i915_gem_context_fini(dev);
c39538a8 4481 drm_mm_takedown(&dev_priv->gtt.base.mm);
1070a42b
CW
4482 return ret;
4483 }
4484
53ca26ca
DV
4485 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4486 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4487 dev_priv->dri1.allow_batchbuffer = 1;
1070a42b
CW
4488 return 0;
4489}
4490
8187a2b7
ZN
4491void
4492i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4493{
4494 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 4495 struct intel_ring_buffer *ring;
1ec14ad3 4496 int i;
8187a2b7 4497
b4519513
CW
4498 for_each_ring(ring, dev_priv, i)
4499 intel_cleanup_ring_buffer(ring);
8187a2b7
ZN
4500}
4501
673a394b
EA
4502int
4503i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4504 struct drm_file *file_priv)
4505{
db1b76ca 4506 struct drm_i915_private *dev_priv = dev->dev_private;
b4519513 4507 int ret;
673a394b 4508
79e53945
JB
4509 if (drm_core_check_feature(dev, DRIVER_MODESET))
4510 return 0;
4511
1f83fee0 4512 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
673a394b 4513 DRM_ERROR("Reenabling wedged hardware, good luck\n");
1f83fee0 4514 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
673a394b
EA
4515 }
4516
673a394b 4517 mutex_lock(&dev->struct_mutex);
db1b76ca 4518 dev_priv->ums.mm_suspended = 0;
9bb2d6f9 4519
f691e2f4 4520 ret = i915_gem_init_hw(dev);
d816f6ac
WF
4521 if (ret != 0) {
4522 mutex_unlock(&dev->struct_mutex);
9bb2d6f9 4523 return ret;
d816f6ac 4524 }
9bb2d6f9 4525
5cef07e1 4526 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
673a394b 4527 mutex_unlock(&dev->struct_mutex);
dbb19d30 4528
5f35308b
CW
4529 ret = drm_irq_install(dev);
4530 if (ret)
4531 goto cleanup_ringbuffer;
dbb19d30 4532
673a394b 4533 return 0;
5f35308b
CW
4534
4535cleanup_ringbuffer:
4536 mutex_lock(&dev->struct_mutex);
4537 i915_gem_cleanup_ringbuffer(dev);
db1b76ca 4538 dev_priv->ums.mm_suspended = 1;
5f35308b
CW
4539 mutex_unlock(&dev->struct_mutex);
4540
4541 return ret;
673a394b
EA
4542}
4543
4544int
4545i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4546 struct drm_file *file_priv)
4547{
79e53945
JB
4548 if (drm_core_check_feature(dev, DRIVER_MODESET))
4549 return 0;
4550
dbb19d30 4551 drm_irq_uninstall(dev);
db1b76ca 4552
45c5f202 4553 return i915_gem_suspend(dev);
673a394b
EA
4554}
4555
4556void
4557i915_gem_lastclose(struct drm_device *dev)
4558{
4559 int ret;
673a394b 4560
e806b495
EA
4561 if (drm_core_check_feature(dev, DRIVER_MODESET))
4562 return;
4563
45c5f202 4564 ret = i915_gem_suspend(dev);
6dbe2772
KP
4565 if (ret)
4566 DRM_ERROR("failed to idle hardware: %d\n", ret);
673a394b
EA
4567}
4568
64193406
CW
4569static void
4570init_ring_lists(struct intel_ring_buffer *ring)
4571{
4572 INIT_LIST_HEAD(&ring->active_list);
4573 INIT_LIST_HEAD(&ring->request_list);
64193406
CW
4574}
4575
7e0d96bc
BW
4576void i915_init_vm(struct drm_i915_private *dev_priv,
4577 struct i915_address_space *vm)
fc8c067e 4578{
7e0d96bc
BW
4579 if (!i915_is_ggtt(vm))
4580 drm_mm_init(&vm->mm, vm->start, vm->total);
fc8c067e
BW
4581 vm->dev = dev_priv->dev;
4582 INIT_LIST_HEAD(&vm->active_list);
4583 INIT_LIST_HEAD(&vm->inactive_list);
4584 INIT_LIST_HEAD(&vm->global_link);
f72d21ed 4585 list_add_tail(&vm->global_link, &dev_priv->vm_list);
fc8c067e
BW
4586}
4587
673a394b
EA
4588void
4589i915_gem_load(struct drm_device *dev)
4590{
4591 drm_i915_private_t *dev_priv = dev->dev_private;
42dcedd4
CW
4592 int i;
4593
4594 dev_priv->slab =
4595 kmem_cache_create("i915_gem_object",
4596 sizeof(struct drm_i915_gem_object), 0,
4597 SLAB_HWCACHE_ALIGN,
4598 NULL);
673a394b 4599
fc8c067e
BW
4600 INIT_LIST_HEAD(&dev_priv->vm_list);
4601 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4602
a33afea5 4603 INIT_LIST_HEAD(&dev_priv->context_list);
6c085a72
CW
4604 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4605 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
a09ba7fa 4606 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
1ec14ad3
CW
4607 for (i = 0; i < I915_NUM_RINGS; i++)
4608 init_ring_lists(&dev_priv->ring[i]);
4b9de737 4609 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
007cc8ac 4610 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
673a394b
EA
4611 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4612 i915_gem_retire_work_handler);
b29c19b6
CW
4613 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4614 i915_gem_idle_work_handler);
1f83fee0 4615 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
31169714 4616
94400120
DA
4617 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4618 if (IS_GEN3(dev)) {
50743298
DV
4619 I915_WRITE(MI_ARB_STATE,
4620 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
94400120
DA
4621 }
4622
72bfa19c
CW
4623 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4624
de151cf6 4625 /* Old X drivers will take 0-2 for front, back, depth buffers */
b397c836
EA
4626 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4627 dev_priv->fence_reg_start = 3;
de151cf6 4628
42b5aeab
VS
4629 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4630 dev_priv->num_fence_regs = 32;
4631 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
de151cf6
JB
4632 dev_priv->num_fence_regs = 16;
4633 else
4634 dev_priv->num_fence_regs = 8;
4635
b5aa8a0f 4636 /* Initialize fence registers to zero */
19b2dbde
CW
4637 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4638 i915_gem_restore_fences(dev);
10ed13e4 4639
673a394b 4640 i915_gem_detect_bit_6_swizzle(dev);
6b95a207 4641 init_waitqueue_head(&dev_priv->pending_flip_queue);
17250b71 4642
ce453d81
CW
4643 dev_priv->mm.interruptible = true;
4644
7dc19d5a
DC
4645 dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4646 dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
17250b71
CW
4647 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4648 register_shrinker(&dev_priv->mm.inactive_shrinker);
673a394b 4649}
71acb5eb
DA
4650
4651/*
4652 * Create a physically contiguous memory object for this object
4653 * e.g. for cursor + overlay regs
4654 */
995b6762
CW
4655static int i915_gem_init_phys_object(struct drm_device *dev,
4656 int id, int size, int align)
71acb5eb
DA
4657{
4658 drm_i915_private_t *dev_priv = dev->dev_private;
4659 struct drm_i915_gem_phys_object *phys_obj;
4660 int ret;
4661
4662 if (dev_priv->mm.phys_objs[id - 1] || !size)
4663 return 0;
4664
b14c5679 4665 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
71acb5eb
DA
4666 if (!phys_obj)
4667 return -ENOMEM;
4668
4669 phys_obj->id = id;
4670
6eeefaf3 4671 phys_obj->handle = drm_pci_alloc(dev, size, align);
71acb5eb
DA
4672 if (!phys_obj->handle) {
4673 ret = -ENOMEM;
4674 goto kfree_obj;
4675 }
4676#ifdef CONFIG_X86
4677 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4678#endif
4679
4680 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4681
4682 return 0;
4683kfree_obj:
9a298b2a 4684 kfree(phys_obj);
71acb5eb
DA
4685 return ret;
4686}
4687
995b6762 4688static void i915_gem_free_phys_object(struct drm_device *dev, int id)
71acb5eb
DA
4689{
4690 drm_i915_private_t *dev_priv = dev->dev_private;
4691 struct drm_i915_gem_phys_object *phys_obj;
4692
4693 if (!dev_priv->mm.phys_objs[id - 1])
4694 return;
4695
4696 phys_obj = dev_priv->mm.phys_objs[id - 1];
4697 if (phys_obj->cur_obj) {
4698 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4699 }
4700
4701#ifdef CONFIG_X86
4702 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4703#endif
4704 drm_pci_free(dev, phys_obj->handle);
4705 kfree(phys_obj);
4706 dev_priv->mm.phys_objs[id - 1] = NULL;
4707}
4708
4709void i915_gem_free_all_phys_object(struct drm_device *dev)
4710{
4711 int i;
4712
260883c8 4713 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
71acb5eb
DA
4714 i915_gem_free_phys_object(dev, i);
4715}
4716
4717void i915_gem_detach_phys_object(struct drm_device *dev,
05394f39 4718 struct drm_i915_gem_object *obj)
71acb5eb 4719{
496ad9aa 4720 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
e5281ccd 4721 char *vaddr;
71acb5eb 4722 int i;
71acb5eb
DA
4723 int page_count;
4724
05394f39 4725 if (!obj->phys_obj)
71acb5eb 4726 return;
05394f39 4727 vaddr = obj->phys_obj->handle->vaddr;
71acb5eb 4728
05394f39 4729 page_count = obj->base.size / PAGE_SIZE;
71acb5eb 4730 for (i = 0; i < page_count; i++) {
5949eac4 4731 struct page *page = shmem_read_mapping_page(mapping, i);
e5281ccd
CW
4732 if (!IS_ERR(page)) {
4733 char *dst = kmap_atomic(page);
4734 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4735 kunmap_atomic(dst);
4736
4737 drm_clflush_pages(&page, 1);
4738
4739 set_page_dirty(page);
4740 mark_page_accessed(page);
4741 page_cache_release(page);
4742 }
71acb5eb 4743 }
e76e9aeb 4744 i915_gem_chipset_flush(dev);
d78b47b9 4745
05394f39
CW
4746 obj->phys_obj->cur_obj = NULL;
4747 obj->phys_obj = NULL;
71acb5eb
DA
4748}
4749
4750int
4751i915_gem_attach_phys_object(struct drm_device *dev,
05394f39 4752 struct drm_i915_gem_object *obj,
6eeefaf3
CW
4753 int id,
4754 int align)
71acb5eb 4755{
496ad9aa 4756 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
71acb5eb 4757 drm_i915_private_t *dev_priv = dev->dev_private;
71acb5eb
DA
4758 int ret = 0;
4759 int page_count;
4760 int i;
4761
4762 if (id > I915_MAX_PHYS_OBJECT)
4763 return -EINVAL;
4764
05394f39
CW
4765 if (obj->phys_obj) {
4766 if (obj->phys_obj->id == id)
71acb5eb
DA
4767 return 0;
4768 i915_gem_detach_phys_object(dev, obj);
4769 }
4770
71acb5eb
DA
4771 /* create a new object */
4772 if (!dev_priv->mm.phys_objs[id - 1]) {
4773 ret = i915_gem_init_phys_object(dev, id,
05394f39 4774 obj->base.size, align);
71acb5eb 4775 if (ret) {
05394f39
CW
4776 DRM_ERROR("failed to init phys object %d size: %zu\n",
4777 id, obj->base.size);
e5281ccd 4778 return ret;
71acb5eb
DA
4779 }
4780 }
4781
4782 /* bind to the object */
05394f39
CW
4783 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4784 obj->phys_obj->cur_obj = obj;
71acb5eb 4785
05394f39 4786 page_count = obj->base.size / PAGE_SIZE;
71acb5eb
DA
4787
4788 for (i = 0; i < page_count; i++) {
e5281ccd
CW
4789 struct page *page;
4790 char *dst, *src;
4791
5949eac4 4792 page = shmem_read_mapping_page(mapping, i);
e5281ccd
CW
4793 if (IS_ERR(page))
4794 return PTR_ERR(page);
71acb5eb 4795
ff75b9bc 4796 src = kmap_atomic(page);
05394f39 4797 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
71acb5eb 4798 memcpy(dst, src, PAGE_SIZE);
3e4d3af5 4799 kunmap_atomic(src);
71acb5eb 4800
e5281ccd
CW
4801 mark_page_accessed(page);
4802 page_cache_release(page);
4803 }
d78b47b9 4804
71acb5eb 4805 return 0;
71acb5eb
DA
4806}
4807
4808static int
05394f39
CW
4809i915_gem_phys_pwrite(struct drm_device *dev,
4810 struct drm_i915_gem_object *obj,
71acb5eb
DA
4811 struct drm_i915_gem_pwrite *args,
4812 struct drm_file *file_priv)
4813{
05394f39 4814 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
2bb4629a 4815 char __user *user_data = to_user_ptr(args->data_ptr);
71acb5eb 4816
b47b30cc
CW
4817 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4818 unsigned long unwritten;
4819
4820 /* The physical object once assigned is fixed for the lifetime
4821 * of the obj, so we can safely drop the lock and continue
4822 * to access vaddr.
4823 */
4824 mutex_unlock(&dev->struct_mutex);
4825 unwritten = copy_from_user(vaddr, user_data, args->size);
4826 mutex_lock(&dev->struct_mutex);
4827 if (unwritten)
4828 return -EFAULT;
4829 }
71acb5eb 4830
e76e9aeb 4831 i915_gem_chipset_flush(dev);
71acb5eb
DA
4832 return 0;
4833}
b962442e 4834
f787a5f5 4835void i915_gem_release(struct drm_device *dev, struct drm_file *file)
b962442e 4836{
f787a5f5 4837 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e 4838
b29c19b6
CW
4839 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4840
b962442e
EA
4841 /* Clean up our request list when the client is going away, so that
4842 * later retire_requests won't dereference our soon-to-be-gone
4843 * file_priv.
4844 */
1c25595f 4845 spin_lock(&file_priv->mm.lock);
f787a5f5
CW
4846 while (!list_empty(&file_priv->mm.request_list)) {
4847 struct drm_i915_gem_request *request;
4848
4849 request = list_first_entry(&file_priv->mm.request_list,
4850 struct drm_i915_gem_request,
4851 client_list);
4852 list_del(&request->client_list);
4853 request->file_priv = NULL;
4854 }
1c25595f 4855 spin_unlock(&file_priv->mm.lock);
b962442e 4856}
31169714 4857
b29c19b6
CW
4858static void
4859i915_gem_file_idle_work_handler(struct work_struct *work)
4860{
4861 struct drm_i915_file_private *file_priv =
4862 container_of(work, typeof(*file_priv), mm.idle_work.work);
4863
4864 atomic_set(&file_priv->rps_wait_boost, false);
4865}
4866
4867int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4868{
4869 struct drm_i915_file_private *file_priv;
e422b888 4870 int ret;
b29c19b6
CW
4871
4872 DRM_DEBUG_DRIVER("\n");
4873
4874 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4875 if (!file_priv)
4876 return -ENOMEM;
4877
4878 file->driver_priv = file_priv;
4879 file_priv->dev_priv = dev->dev_private;
4880
4881 spin_lock_init(&file_priv->mm.lock);
4882 INIT_LIST_HEAD(&file_priv->mm.request_list);
4883 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4884 i915_gem_file_idle_work_handler);
4885
e422b888
BW
4886 ret = i915_gem_context_open(dev, file);
4887 if (ret)
4888 kfree(file_priv);
b29c19b6 4889
e422b888 4890 return ret;
b29c19b6
CW
4891}
4892
5774506f
CW
4893static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4894{
4895 if (!mutex_is_locked(mutex))
4896 return false;
4897
4898#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4899 return mutex->owner == task;
4900#else
4901 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4902 return false;
4903#endif
4904}
4905
7dc19d5a
DC
4906static unsigned long
4907i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
31169714 4908{
17250b71
CW
4909 struct drm_i915_private *dev_priv =
4910 container_of(shrinker,
4911 struct drm_i915_private,
4912 mm.inactive_shrinker);
4913 struct drm_device *dev = dev_priv->dev;
6c085a72 4914 struct drm_i915_gem_object *obj;
5774506f 4915 bool unlock = true;
7dc19d5a 4916 unsigned long count;
17250b71 4917
5774506f
CW
4918 if (!mutex_trylock(&dev->struct_mutex)) {
4919 if (!mutex_is_locked_by(&dev->struct_mutex, current))
d3227046 4920 return 0;
5774506f 4921
677feac2 4922 if (dev_priv->mm.shrinker_no_lock_stealing)
d3227046 4923 return 0;
677feac2 4924
5774506f
CW
4925 unlock = false;
4926 }
31169714 4927
7dc19d5a 4928 count = 0;
35c20a60 4929 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
a5570178 4930 if (obj->pages_pin_count == 0)
7dc19d5a 4931 count += obj->base.size >> PAGE_SHIFT;
fcb4a578
BW
4932
4933 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4934 if (obj->active)
4935 continue;
4936
d7f46fc4 4937 if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
7dc19d5a 4938 count += obj->base.size >> PAGE_SHIFT;
fcb4a578 4939 }
17250b71 4940
5774506f
CW
4941 if (unlock)
4942 mutex_unlock(&dev->struct_mutex);
d9973b43 4943
7dc19d5a 4944 return count;
31169714 4945}
a70a3148
BW
4946
4947/* All the new VM stuff */
4948unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4949 struct i915_address_space *vm)
4950{
4951 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4952 struct i915_vma *vma;
4953
6f425321
BW
4954 if (!dev_priv->mm.aliasing_ppgtt ||
4955 vm == &dev_priv->mm.aliasing_ppgtt->base)
a70a3148
BW
4956 vm = &dev_priv->gtt.base;
4957
4958 BUG_ON(list_empty(&o->vma_list));
4959 list_for_each_entry(vma, &o->vma_list, vma_link) {
4960 if (vma->vm == vm)
4961 return vma->node.start;
4962
4963 }
4964 return -1;
4965}
4966
4967bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4968 struct i915_address_space *vm)
4969{
4970 struct i915_vma *vma;
4971
4972 list_for_each_entry(vma, &o->vma_list, vma_link)
8b9c2b94 4973 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
a70a3148
BW
4974 return true;
4975
4976 return false;
4977}
4978
4979bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4980{
5a1d5eb0 4981 struct i915_vma *vma;
a70a3148 4982
5a1d5eb0
CW
4983 list_for_each_entry(vma, &o->vma_list, vma_link)
4984 if (drm_mm_node_allocated(&vma->node))
a70a3148
BW
4985 return true;
4986
4987 return false;
4988}
4989
4990unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
4991 struct i915_address_space *vm)
4992{
4993 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4994 struct i915_vma *vma;
4995
6f425321
BW
4996 if (!dev_priv->mm.aliasing_ppgtt ||
4997 vm == &dev_priv->mm.aliasing_ppgtt->base)
a70a3148
BW
4998 vm = &dev_priv->gtt.base;
4999
5000 BUG_ON(list_empty(&o->vma_list));
5001
5002 list_for_each_entry(vma, &o->vma_list, vma_link)
5003 if (vma->vm == vm)
5004 return vma->node.size;
5005
5006 return 0;
5007}
5008
7dc19d5a
DC
5009static unsigned long
5010i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
5011{
5012 struct drm_i915_private *dev_priv =
5013 container_of(shrinker,
5014 struct drm_i915_private,
5015 mm.inactive_shrinker);
5016 struct drm_device *dev = dev_priv->dev;
7dc19d5a
DC
5017 unsigned long freed;
5018 bool unlock = true;
5019
5020 if (!mutex_trylock(&dev->struct_mutex)) {
5021 if (!mutex_is_locked_by(&dev->struct_mutex, current))
d3227046 5022 return SHRINK_STOP;
7dc19d5a
DC
5023
5024 if (dev_priv->mm.shrinker_no_lock_stealing)
d3227046 5025 return SHRINK_STOP;
7dc19d5a
DC
5026
5027 unlock = false;
5028 }
5029
d9973b43
CW
5030 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5031 if (freed < sc->nr_to_scan)
5032 freed += __i915_gem_shrink(dev_priv,
5033 sc->nr_to_scan - freed,
5034 false);
5035 if (freed < sc->nr_to_scan)
7dc19d5a
DC
5036 freed += i915_gem_shrink_all(dev_priv);
5037
5038 if (unlock)
5039 mutex_unlock(&dev->struct_mutex);
d9973b43 5040
7dc19d5a
DC
5041 return freed;
5042}
5c2abbea
BW
5043
5044struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5045{
5046 struct i915_vma *vma;
5047
5048 if (WARN_ON(list_empty(&obj->vma_list)))
5049 return NULL;
5050
5051 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
6e164c33 5052 if (vma->vm != obj_to_ggtt(obj))
5c2abbea
BW
5053 return NULL;
5054
5055 return vma;
5056}