]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Remove unneeded dev argument
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
CommitLineData
673a394b
EA
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
760285e7
DH
28#include <drm/drmP.h>
29#include <drm/i915_drm.h>
673a394b 30#include "i915_drv.h"
1c5d22f7 31#include "i915_trace.h"
652c393a 32#include "intel_drv.h"
5949eac4 33#include <linux/shmem_fs.h>
5a0e3ad6 34#include <linux/slab.h>
673a394b 35#include <linux/swap.h>
79e53945 36#include <linux/pci.h>
1286ff73 37#include <linux/dma-buf.h>
673a394b 38
05394f39
CW
39static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
40static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
88241785
CW
41static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
42 unsigned alignment,
86a1ee26
CW
43 bool map_and_fenceable,
44 bool nonblocking);
05394f39
CW
45static int i915_gem_phys_pwrite(struct drm_device *dev,
46 struct drm_i915_gem_object *obj,
71acb5eb 47 struct drm_i915_gem_pwrite *args,
05394f39 48 struct drm_file *file);
673a394b 49
61050808
CW
50static void i915_gem_write_fence(struct drm_device *dev, int reg,
51 struct drm_i915_gem_object *obj);
52static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53 struct drm_i915_fence_reg *fence,
54 bool enable);
55
17250b71 56static int i915_gem_inactive_shrink(struct shrinker *shrinker,
1495f230 57 struct shrink_control *sc);
6c085a72
CW
58static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
59static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
8c59967c 60static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
31169714 61
61050808
CW
62static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
63{
64 if (obj->tiling_mode)
65 i915_gem_release_mmap(obj);
66
67 /* As we do not have an associated fence register, we will force
68 * a tiling change if we ever need to acquire one.
69 */
5d82e3e6 70 obj->fence_dirty = false;
61050808
CW
71 obj->fence_reg = I915_FENCE_REG_NONE;
72}
73
73aa808f
CW
74/* some bookkeeping */
75static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
76 size_t size)
77{
78 dev_priv->mm.object_count++;
79 dev_priv->mm.object_memory += size;
80}
81
82static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
83 size_t size)
84{
85 dev_priv->mm.object_count--;
86 dev_priv->mm.object_memory -= size;
87}
88
21dd3734 89static int
33196ded 90i915_gem_wait_for_error(struct i915_gpu_error *error)
30dbf0c0 91{
30dbf0c0
CW
92 int ret;
93
1f83fee0
DV
94#define EXIT_COND (!i915_reset_in_progress(error))
95 if (EXIT_COND)
30dbf0c0
CW
96 return 0;
97
1f83fee0
DV
98 /* GPU is already declared terminally dead, give up. */
99 if (i915_terminally_wedged(error))
100 return -EIO;
101
0a6759c6
DV
102 /*
103 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
104 * userspace. If it takes that long something really bad is going on and
105 * we should simply try to bail out and fail as gracefully as possible.
106 */
1f83fee0
DV
107 ret = wait_event_interruptible_timeout(error->reset_queue,
108 EXIT_COND,
109 10*HZ);
0a6759c6
DV
110 if (ret == 0) {
111 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
112 return -EIO;
113 } else if (ret < 0) {
30dbf0c0 114 return ret;
0a6759c6 115 }
1f83fee0 116#undef EXIT_COND
30dbf0c0 117
21dd3734 118 return 0;
30dbf0c0
CW
119}
120
54cf91dc 121int i915_mutex_lock_interruptible(struct drm_device *dev)
76c1dec1 122{
33196ded 123 struct drm_i915_private *dev_priv = dev->dev_private;
76c1dec1
CW
124 int ret;
125
33196ded 126 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
76c1dec1
CW
127 if (ret)
128 return ret;
129
130 ret = mutex_lock_interruptible(&dev->struct_mutex);
131 if (ret)
132 return ret;
133
23bc5982 134 WARN_ON(i915_verify_lists(dev));
76c1dec1
CW
135 return 0;
136}
30dbf0c0 137
7d1c4804 138static inline bool
05394f39 139i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
7d1c4804 140{
6c085a72 141 return obj->gtt_space && !obj->active;
7d1c4804
CW
142}
143
79e53945
JB
144int
145i915_gem_init_ioctl(struct drm_device *dev, void *data,
05394f39 146 struct drm_file *file)
79e53945 147{
93d18799 148 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945 149 struct drm_i915_gem_init *args = data;
2021746e 150
7bb6fb8d
DV
151 if (drm_core_check_feature(dev, DRIVER_MODESET))
152 return -ENODEV;
153
2021746e
CW
154 if (args->gtt_start >= args->gtt_end ||
155 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
156 return -EINVAL;
79e53945 157
f534bc0b
DV
158 /* GEM with user mode setting was never supported on ilk and later. */
159 if (INTEL_INFO(dev)->gen >= 5)
160 return -ENODEV;
161
79e53945 162 mutex_lock(&dev->struct_mutex);
d7e5008f
BW
163 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
164 args->gtt_end);
93d18799 165 dev_priv->gtt.mappable_end = args->gtt_end;
673a394b
EA
166 mutex_unlock(&dev->struct_mutex);
167
2021746e 168 return 0;
673a394b
EA
169}
170
5a125c3c
EA
171int
172i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
05394f39 173 struct drm_file *file)
5a125c3c 174{
73aa808f 175 struct drm_i915_private *dev_priv = dev->dev_private;
5a125c3c 176 struct drm_i915_gem_get_aperture *args = data;
6299f992
CW
177 struct drm_i915_gem_object *obj;
178 size_t pinned;
5a125c3c 179
6299f992 180 pinned = 0;
73aa808f 181 mutex_lock(&dev->struct_mutex);
6c085a72 182 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1b50247a
CW
183 if (obj->pin_count)
184 pinned += obj->gtt_space->size;
73aa808f 185 mutex_unlock(&dev->struct_mutex);
5a125c3c 186
5d4545ae 187 args->aper_size = dev_priv->gtt.total;
0206e353 188 args->aper_available_size = args->aper_size - pinned;
6299f992 189
5a125c3c
EA
190 return 0;
191}
192
42dcedd4
CW
193void *i915_gem_object_alloc(struct drm_device *dev)
194{
195 struct drm_i915_private *dev_priv = dev->dev_private;
196 return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
197}
198
199void i915_gem_object_free(struct drm_i915_gem_object *obj)
200{
201 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
202 kmem_cache_free(dev_priv->slab, obj);
203}
204
ff72145b
DA
205static int
206i915_gem_create(struct drm_file *file,
207 struct drm_device *dev,
208 uint64_t size,
209 uint32_t *handle_p)
673a394b 210{
05394f39 211 struct drm_i915_gem_object *obj;
a1a2d1d3
PP
212 int ret;
213 u32 handle;
673a394b 214
ff72145b 215 size = roundup(size, PAGE_SIZE);
8ffc0246
CW
216 if (size == 0)
217 return -EINVAL;
673a394b
EA
218
219 /* Allocate the new object */
ff72145b 220 obj = i915_gem_alloc_object(dev, size);
673a394b
EA
221 if (obj == NULL)
222 return -ENOMEM;
223
05394f39 224 ret = drm_gem_handle_create(file, &obj->base, &handle);
1dfd9754 225 if (ret) {
05394f39
CW
226 drm_gem_object_release(&obj->base);
227 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
42dcedd4 228 i915_gem_object_free(obj);
673a394b 229 return ret;
1dfd9754 230 }
673a394b 231
202f2fef 232 /* drop reference from allocate - handle holds it now */
05394f39 233 drm_gem_object_unreference(&obj->base);
202f2fef
CW
234 trace_i915_gem_object_create(obj);
235
ff72145b 236 *handle_p = handle;
673a394b
EA
237 return 0;
238}
239
ff72145b
DA
240int
241i915_gem_dumb_create(struct drm_file *file,
242 struct drm_device *dev,
243 struct drm_mode_create_dumb *args)
244{
245 /* have to work out size/pitch and return them */
ed0291fd 246 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
ff72145b
DA
247 args->size = args->pitch * args->height;
248 return i915_gem_create(file, dev,
249 args->size, &args->handle);
250}
251
252int i915_gem_dumb_destroy(struct drm_file *file,
253 struct drm_device *dev,
254 uint32_t handle)
255{
256 return drm_gem_handle_delete(file, handle);
257}
258
259/**
260 * Creates a new mm object and returns a handle to it.
261 */
262int
263i915_gem_create_ioctl(struct drm_device *dev, void *data,
264 struct drm_file *file)
265{
266 struct drm_i915_gem_create *args = data;
63ed2cb2 267
ff72145b
DA
268 return i915_gem_create(file, dev,
269 args->size, &args->handle);
270}
271
8461d226
DV
272static inline int
273__copy_to_user_swizzled(char __user *cpu_vaddr,
274 const char *gpu_vaddr, int gpu_offset,
275 int length)
276{
277 int ret, cpu_offset = 0;
278
279 while (length > 0) {
280 int cacheline_end = ALIGN(gpu_offset + 1, 64);
281 int this_length = min(cacheline_end - gpu_offset, length);
282 int swizzled_gpu_offset = gpu_offset ^ 64;
283
284 ret = __copy_to_user(cpu_vaddr + cpu_offset,
285 gpu_vaddr + swizzled_gpu_offset,
286 this_length);
287 if (ret)
288 return ret + length;
289
290 cpu_offset += this_length;
291 gpu_offset += this_length;
292 length -= this_length;
293 }
294
295 return 0;
296}
297
8c59967c 298static inline int
4f0c7cfb
BW
299__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
300 const char __user *cpu_vaddr,
8c59967c
DV
301 int length)
302{
303 int ret, cpu_offset = 0;
304
305 while (length > 0) {
306 int cacheline_end = ALIGN(gpu_offset + 1, 64);
307 int this_length = min(cacheline_end - gpu_offset, length);
308 int swizzled_gpu_offset = gpu_offset ^ 64;
309
310 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
311 cpu_vaddr + cpu_offset,
312 this_length);
313 if (ret)
314 return ret + length;
315
316 cpu_offset += this_length;
317 gpu_offset += this_length;
318 length -= this_length;
319 }
320
321 return 0;
322}
323
d174bd64
DV
324/* Per-page copy function for the shmem pread fastpath.
325 * Flushes invalid cachelines before reading the target if
326 * needs_clflush is set. */
eb01459f 327static int
d174bd64
DV
328shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
329 char __user *user_data,
330 bool page_do_bit17_swizzling, bool needs_clflush)
331{
332 char *vaddr;
333 int ret;
334
e7e58eb5 335 if (unlikely(page_do_bit17_swizzling))
d174bd64
DV
336 return -EINVAL;
337
338 vaddr = kmap_atomic(page);
339 if (needs_clflush)
340 drm_clflush_virt_range(vaddr + shmem_page_offset,
341 page_length);
342 ret = __copy_to_user_inatomic(user_data,
343 vaddr + shmem_page_offset,
344 page_length);
345 kunmap_atomic(vaddr);
346
f60d7f0c 347 return ret ? -EFAULT : 0;
d174bd64
DV
348}
349
23c18c71
DV
350static void
351shmem_clflush_swizzled_range(char *addr, unsigned long length,
352 bool swizzled)
353{
e7e58eb5 354 if (unlikely(swizzled)) {
23c18c71
DV
355 unsigned long start = (unsigned long) addr;
356 unsigned long end = (unsigned long) addr + length;
357
358 /* For swizzling simply ensure that we always flush both
359 * channels. Lame, but simple and it works. Swizzled
360 * pwrite/pread is far from a hotpath - current userspace
361 * doesn't use it at all. */
362 start = round_down(start, 128);
363 end = round_up(end, 128);
364
365 drm_clflush_virt_range((void *)start, end - start);
366 } else {
367 drm_clflush_virt_range(addr, length);
368 }
369
370}
371
d174bd64
DV
372/* Only difference to the fast-path function is that this can handle bit17
373 * and uses non-atomic copy and kmap functions. */
374static int
375shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
376 char __user *user_data,
377 bool page_do_bit17_swizzling, bool needs_clflush)
378{
379 char *vaddr;
380 int ret;
381
382 vaddr = kmap(page);
383 if (needs_clflush)
23c18c71
DV
384 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
385 page_length,
386 page_do_bit17_swizzling);
d174bd64
DV
387
388 if (page_do_bit17_swizzling)
389 ret = __copy_to_user_swizzled(user_data,
390 vaddr, shmem_page_offset,
391 page_length);
392 else
393 ret = __copy_to_user(user_data,
394 vaddr + shmem_page_offset,
395 page_length);
396 kunmap(page);
397
f60d7f0c 398 return ret ? - EFAULT : 0;
d174bd64
DV
399}
400
eb01459f 401static int
dbf7bff0
DV
402i915_gem_shmem_pread(struct drm_device *dev,
403 struct drm_i915_gem_object *obj,
404 struct drm_i915_gem_pread *args,
405 struct drm_file *file)
eb01459f 406{
8461d226 407 char __user *user_data;
eb01459f 408 ssize_t remain;
8461d226 409 loff_t offset;
eb2c0c81 410 int shmem_page_offset, page_length, ret = 0;
8461d226 411 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
96d79b52 412 int prefaulted = 0;
8489731c 413 int needs_clflush = 0;
9da3da66
CW
414 struct scatterlist *sg;
415 int i;
eb01459f 416
2bb4629a 417 user_data = to_user_ptr(args->data_ptr);
eb01459f
EA
418 remain = args->size;
419
8461d226 420 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
eb01459f 421
8489731c
DV
422 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
423 /* If we're not in the cpu read domain, set ourself into the gtt
424 * read domain and manually flush cachelines (if required). This
425 * optimizes for the case when the gpu will dirty the data
426 * anyway again before the next pread happens. */
427 if (obj->cache_level == I915_CACHE_NONE)
428 needs_clflush = 1;
6c085a72
CW
429 if (obj->gtt_space) {
430 ret = i915_gem_object_set_to_gtt_domain(obj, false);
431 if (ret)
432 return ret;
433 }
8489731c 434 }
eb01459f 435
f60d7f0c
CW
436 ret = i915_gem_object_get_pages(obj);
437 if (ret)
438 return ret;
439
440 i915_gem_object_pin_pages(obj);
441
8461d226 442 offset = args->offset;
eb01459f 443
9da3da66 444 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
e5281ccd
CW
445 struct page *page;
446
9da3da66
CW
447 if (i < offset >> PAGE_SHIFT)
448 continue;
449
450 if (remain <= 0)
451 break;
452
eb01459f
EA
453 /* Operation in this page
454 *
eb01459f 455 * shmem_page_offset = offset within page in shmem file
eb01459f
EA
456 * page_length = bytes to copy for this page
457 */
c8cbbb8b 458 shmem_page_offset = offset_in_page(offset);
eb01459f
EA
459 page_length = remain;
460 if ((shmem_page_offset + page_length) > PAGE_SIZE)
461 page_length = PAGE_SIZE - shmem_page_offset;
eb01459f 462
9da3da66 463 page = sg_page(sg);
8461d226
DV
464 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
465 (page_to_phys(page) & (1 << 17)) != 0;
466
d174bd64
DV
467 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
468 user_data, page_do_bit17_swizzling,
469 needs_clflush);
470 if (ret == 0)
471 goto next_page;
dbf7bff0 472
dbf7bff0
DV
473 mutex_unlock(&dev->struct_mutex);
474
96d79b52 475 if (!prefaulted) {
f56f821f 476 ret = fault_in_multipages_writeable(user_data, remain);
96d79b52
DV
477 /* Userspace is tricking us, but we've already clobbered
478 * its pages with the prefault and promised to write the
479 * data up to the first fault. Hence ignore any errors
480 * and just continue. */
481 (void)ret;
482 prefaulted = 1;
483 }
eb01459f 484
d174bd64
DV
485 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
486 user_data, page_do_bit17_swizzling,
487 needs_clflush);
eb01459f 488
dbf7bff0 489 mutex_lock(&dev->struct_mutex);
f60d7f0c 490
dbf7bff0 491next_page:
e5281ccd 492 mark_page_accessed(page);
e5281ccd 493
f60d7f0c 494 if (ret)
8461d226 495 goto out;
8461d226 496
eb01459f 497 remain -= page_length;
8461d226 498 user_data += page_length;
eb01459f
EA
499 offset += page_length;
500 }
501
4f27b75d 502out:
f60d7f0c
CW
503 i915_gem_object_unpin_pages(obj);
504
eb01459f
EA
505 return ret;
506}
507
673a394b
EA
508/**
509 * Reads data from the object referenced by handle.
510 *
511 * On error, the contents of *data are undefined.
512 */
513int
514i915_gem_pread_ioctl(struct drm_device *dev, void *data,
05394f39 515 struct drm_file *file)
673a394b
EA
516{
517 struct drm_i915_gem_pread *args = data;
05394f39 518 struct drm_i915_gem_object *obj;
35b62a89 519 int ret = 0;
673a394b 520
51311d0a
CW
521 if (args->size == 0)
522 return 0;
523
524 if (!access_ok(VERIFY_WRITE,
2bb4629a 525 to_user_ptr(args->data_ptr),
51311d0a
CW
526 args->size))
527 return -EFAULT;
528
4f27b75d 529 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 530 if (ret)
4f27b75d 531 return ret;
673a394b 532
05394f39 533 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 534 if (&obj->base == NULL) {
1d7cfea1
CW
535 ret = -ENOENT;
536 goto unlock;
4f27b75d 537 }
673a394b 538
7dcd2499 539 /* Bounds check source. */
05394f39
CW
540 if (args->offset > obj->base.size ||
541 args->size > obj->base.size - args->offset) {
ce9d419d 542 ret = -EINVAL;
35b62a89 543 goto out;
ce9d419d
CW
544 }
545
1286ff73
DV
546 /* prime objects have no backing filp to GEM pread/pwrite
547 * pages from.
548 */
549 if (!obj->base.filp) {
550 ret = -EINVAL;
551 goto out;
552 }
553
db53a302
CW
554 trace_i915_gem_object_pread(obj, args->offset, args->size);
555
dbf7bff0 556 ret = i915_gem_shmem_pread(dev, obj, args, file);
673a394b 557
35b62a89 558out:
05394f39 559 drm_gem_object_unreference(&obj->base);
1d7cfea1 560unlock:
4f27b75d 561 mutex_unlock(&dev->struct_mutex);
eb01459f 562 return ret;
673a394b
EA
563}
564
0839ccb8
KP
565/* This is the fast write path which cannot handle
566 * page faults in the source data
9b7530cc 567 */
0839ccb8
KP
568
569static inline int
570fast_user_write(struct io_mapping *mapping,
571 loff_t page_base, int page_offset,
572 char __user *user_data,
573 int length)
9b7530cc 574{
4f0c7cfb
BW
575 void __iomem *vaddr_atomic;
576 void *vaddr;
0839ccb8 577 unsigned long unwritten;
9b7530cc 578
3e4d3af5 579 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
4f0c7cfb
BW
580 /* We can use the cpu mem copy function because this is X86. */
581 vaddr = (void __force*)vaddr_atomic + page_offset;
582 unwritten = __copy_from_user_inatomic_nocache(vaddr,
0839ccb8 583 user_data, length);
3e4d3af5 584 io_mapping_unmap_atomic(vaddr_atomic);
fbd5a26d 585 return unwritten;
0839ccb8
KP
586}
587
3de09aa3
EA
588/**
589 * This is the fast pwrite path, where we copy the data directly from the
590 * user into the GTT, uncached.
591 */
673a394b 592static int
05394f39
CW
593i915_gem_gtt_pwrite_fast(struct drm_device *dev,
594 struct drm_i915_gem_object *obj,
3de09aa3 595 struct drm_i915_gem_pwrite *args,
05394f39 596 struct drm_file *file)
673a394b 597{
0839ccb8 598 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 599 ssize_t remain;
0839ccb8 600 loff_t offset, page_base;
673a394b 601 char __user *user_data;
935aaa69
DV
602 int page_offset, page_length, ret;
603
86a1ee26 604 ret = i915_gem_object_pin(obj, 0, true, true);
935aaa69
DV
605 if (ret)
606 goto out;
607
608 ret = i915_gem_object_set_to_gtt_domain(obj, true);
609 if (ret)
610 goto out_unpin;
611
612 ret = i915_gem_object_put_fence(obj);
613 if (ret)
614 goto out_unpin;
673a394b 615
2bb4629a 616 user_data = to_user_ptr(args->data_ptr);
673a394b 617 remain = args->size;
673a394b 618
05394f39 619 offset = obj->gtt_offset + args->offset;
673a394b
EA
620
621 while (remain > 0) {
622 /* Operation in this page
623 *
0839ccb8
KP
624 * page_base = page offset within aperture
625 * page_offset = offset within page
626 * page_length = bytes to copy for this page
673a394b 627 */
c8cbbb8b
CW
628 page_base = offset & PAGE_MASK;
629 page_offset = offset_in_page(offset);
0839ccb8
KP
630 page_length = remain;
631 if ((page_offset + remain) > PAGE_SIZE)
632 page_length = PAGE_SIZE - page_offset;
633
0839ccb8 634 /* If we get a fault while copying data, then (presumably) our
3de09aa3
EA
635 * source page isn't available. Return the error and we'll
636 * retry in the slow path.
0839ccb8 637 */
5d4545ae 638 if (fast_user_write(dev_priv->gtt.mappable, page_base,
935aaa69
DV
639 page_offset, user_data, page_length)) {
640 ret = -EFAULT;
641 goto out_unpin;
642 }
673a394b 643
0839ccb8
KP
644 remain -= page_length;
645 user_data += page_length;
646 offset += page_length;
673a394b 647 }
673a394b 648
935aaa69
DV
649out_unpin:
650 i915_gem_object_unpin(obj);
651out:
3de09aa3 652 return ret;
673a394b
EA
653}
654
d174bd64
DV
655/* Per-page copy function for the shmem pwrite fastpath.
656 * Flushes invalid cachelines before writing to the target if
657 * needs_clflush_before is set and flushes out any written cachelines after
658 * writing if needs_clflush is set. */
3043c60c 659static int
d174bd64
DV
660shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
661 char __user *user_data,
662 bool page_do_bit17_swizzling,
663 bool needs_clflush_before,
664 bool needs_clflush_after)
673a394b 665{
d174bd64 666 char *vaddr;
673a394b 667 int ret;
3de09aa3 668
e7e58eb5 669 if (unlikely(page_do_bit17_swizzling))
d174bd64 670 return -EINVAL;
3de09aa3 671
d174bd64
DV
672 vaddr = kmap_atomic(page);
673 if (needs_clflush_before)
674 drm_clflush_virt_range(vaddr + shmem_page_offset,
675 page_length);
676 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
677 user_data,
678 page_length);
679 if (needs_clflush_after)
680 drm_clflush_virt_range(vaddr + shmem_page_offset,
681 page_length);
682 kunmap_atomic(vaddr);
3de09aa3 683
755d2218 684 return ret ? -EFAULT : 0;
3de09aa3
EA
685}
686
d174bd64
DV
687/* Only difference to the fast-path function is that this can handle bit17
688 * and uses non-atomic copy and kmap functions. */
3043c60c 689static int
d174bd64
DV
690shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
691 char __user *user_data,
692 bool page_do_bit17_swizzling,
693 bool needs_clflush_before,
694 bool needs_clflush_after)
673a394b 695{
d174bd64
DV
696 char *vaddr;
697 int ret;
e5281ccd 698
d174bd64 699 vaddr = kmap(page);
e7e58eb5 700 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
23c18c71
DV
701 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
702 page_length,
703 page_do_bit17_swizzling);
d174bd64
DV
704 if (page_do_bit17_swizzling)
705 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
e5281ccd
CW
706 user_data,
707 page_length);
d174bd64
DV
708 else
709 ret = __copy_from_user(vaddr + shmem_page_offset,
710 user_data,
711 page_length);
712 if (needs_clflush_after)
23c18c71
DV
713 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
714 page_length,
715 page_do_bit17_swizzling);
d174bd64 716 kunmap(page);
40123c1f 717
755d2218 718 return ret ? -EFAULT : 0;
40123c1f
EA
719}
720
40123c1f 721static int
e244a443
DV
722i915_gem_shmem_pwrite(struct drm_device *dev,
723 struct drm_i915_gem_object *obj,
724 struct drm_i915_gem_pwrite *args,
725 struct drm_file *file)
40123c1f 726{
40123c1f 727 ssize_t remain;
8c59967c
DV
728 loff_t offset;
729 char __user *user_data;
eb2c0c81 730 int shmem_page_offset, page_length, ret = 0;
8c59967c 731 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
e244a443 732 int hit_slowpath = 0;
58642885
DV
733 int needs_clflush_after = 0;
734 int needs_clflush_before = 0;
9da3da66
CW
735 int i;
736 struct scatterlist *sg;
40123c1f 737
2bb4629a 738 user_data = to_user_ptr(args->data_ptr);
40123c1f
EA
739 remain = args->size;
740
8c59967c 741 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
40123c1f 742
58642885
DV
743 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
744 /* If we're not in the cpu write domain, set ourself into the gtt
745 * write domain and manually flush cachelines (if required). This
746 * optimizes for the case when the gpu will use the data
747 * right away and we therefore have to clflush anyway. */
748 if (obj->cache_level == I915_CACHE_NONE)
749 needs_clflush_after = 1;
6c085a72
CW
750 if (obj->gtt_space) {
751 ret = i915_gem_object_set_to_gtt_domain(obj, true);
752 if (ret)
753 return ret;
754 }
58642885
DV
755 }
756 /* Same trick applies for invalidate partially written cachelines before
757 * writing. */
758 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
759 && obj->cache_level == I915_CACHE_NONE)
760 needs_clflush_before = 1;
761
755d2218
CW
762 ret = i915_gem_object_get_pages(obj);
763 if (ret)
764 return ret;
765
766 i915_gem_object_pin_pages(obj);
767
673a394b 768 offset = args->offset;
05394f39 769 obj->dirty = 1;
673a394b 770
9da3da66 771 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
e5281ccd 772 struct page *page;
58642885 773 int partial_cacheline_write;
e5281ccd 774
9da3da66
CW
775 if (i < offset >> PAGE_SHIFT)
776 continue;
777
778 if (remain <= 0)
779 break;
780
40123c1f
EA
781 /* Operation in this page
782 *
40123c1f 783 * shmem_page_offset = offset within page in shmem file
40123c1f
EA
784 * page_length = bytes to copy for this page
785 */
c8cbbb8b 786 shmem_page_offset = offset_in_page(offset);
40123c1f
EA
787
788 page_length = remain;
789 if ((shmem_page_offset + page_length) > PAGE_SIZE)
790 page_length = PAGE_SIZE - shmem_page_offset;
40123c1f 791
58642885
DV
792 /* If we don't overwrite a cacheline completely we need to be
793 * careful to have up-to-date data by first clflushing. Don't
794 * overcomplicate things and flush the entire patch. */
795 partial_cacheline_write = needs_clflush_before &&
796 ((shmem_page_offset | page_length)
797 & (boot_cpu_data.x86_clflush_size - 1));
798
9da3da66 799 page = sg_page(sg);
8c59967c
DV
800 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
801 (page_to_phys(page) & (1 << 17)) != 0;
802
d174bd64
DV
803 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
804 user_data, page_do_bit17_swizzling,
805 partial_cacheline_write,
806 needs_clflush_after);
807 if (ret == 0)
808 goto next_page;
e244a443
DV
809
810 hit_slowpath = 1;
e244a443 811 mutex_unlock(&dev->struct_mutex);
d174bd64
DV
812 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
813 user_data, page_do_bit17_swizzling,
814 partial_cacheline_write,
815 needs_clflush_after);
40123c1f 816
e244a443 817 mutex_lock(&dev->struct_mutex);
755d2218 818
e244a443 819next_page:
e5281ccd
CW
820 set_page_dirty(page);
821 mark_page_accessed(page);
e5281ccd 822
755d2218 823 if (ret)
8c59967c 824 goto out;
8c59967c 825
40123c1f 826 remain -= page_length;
8c59967c 827 user_data += page_length;
40123c1f 828 offset += page_length;
673a394b
EA
829 }
830
fbd5a26d 831out:
755d2218
CW
832 i915_gem_object_unpin_pages(obj);
833
e244a443 834 if (hit_slowpath) {
8dcf015e
DV
835 /*
836 * Fixup: Flush cpu caches in case we didn't flush the dirty
837 * cachelines in-line while writing and the object moved
838 * out of the cpu write domain while we've dropped the lock.
839 */
840 if (!needs_clflush_after &&
841 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
e244a443 842 i915_gem_clflush_object(obj);
e76e9aeb 843 i915_gem_chipset_flush(dev);
e244a443 844 }
8c59967c 845 }
673a394b 846
58642885 847 if (needs_clflush_after)
e76e9aeb 848 i915_gem_chipset_flush(dev);
58642885 849
40123c1f 850 return ret;
673a394b
EA
851}
852
853/**
854 * Writes data to the object referenced by handle.
855 *
856 * On error, the contents of the buffer that were to be modified are undefined.
857 */
858int
859i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
fbd5a26d 860 struct drm_file *file)
673a394b
EA
861{
862 struct drm_i915_gem_pwrite *args = data;
05394f39 863 struct drm_i915_gem_object *obj;
51311d0a
CW
864 int ret;
865
866 if (args->size == 0)
867 return 0;
868
869 if (!access_ok(VERIFY_READ,
2bb4629a 870 to_user_ptr(args->data_ptr),
51311d0a
CW
871 args->size))
872 return -EFAULT;
873
2bb4629a 874 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
f56f821f 875 args->size);
51311d0a
CW
876 if (ret)
877 return -EFAULT;
673a394b 878
fbd5a26d 879 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 880 if (ret)
fbd5a26d 881 return ret;
1d7cfea1 882
05394f39 883 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 884 if (&obj->base == NULL) {
1d7cfea1
CW
885 ret = -ENOENT;
886 goto unlock;
fbd5a26d 887 }
673a394b 888
7dcd2499 889 /* Bounds check destination. */
05394f39
CW
890 if (args->offset > obj->base.size ||
891 args->size > obj->base.size - args->offset) {
ce9d419d 892 ret = -EINVAL;
35b62a89 893 goto out;
ce9d419d
CW
894 }
895
1286ff73
DV
896 /* prime objects have no backing filp to GEM pread/pwrite
897 * pages from.
898 */
899 if (!obj->base.filp) {
900 ret = -EINVAL;
901 goto out;
902 }
903
db53a302
CW
904 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
905
935aaa69 906 ret = -EFAULT;
673a394b
EA
907 /* We can only do the GTT pwrite on untiled buffers, as otherwise
908 * it would end up going through the fenced access, and we'll get
909 * different detiling behavior between reading and writing.
910 * pread/pwrite currently are reading and writing from the CPU
911 * perspective, requiring manual detiling by the client.
912 */
5c0480f2 913 if (obj->phys_obj) {
fbd5a26d 914 ret = i915_gem_phys_pwrite(dev, obj, args, file);
5c0480f2
DV
915 goto out;
916 }
917
86a1ee26 918 if (obj->cache_level == I915_CACHE_NONE &&
c07496fa 919 obj->tiling_mode == I915_TILING_NONE &&
5c0480f2 920 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
fbd5a26d 921 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
935aaa69
DV
922 /* Note that the gtt paths might fail with non-page-backed user
923 * pointers (e.g. gtt mappings when moving data between
924 * textures). Fallback to the shmem path in that case. */
fbd5a26d 925 }
673a394b 926
86a1ee26 927 if (ret == -EFAULT || ret == -ENOSPC)
935aaa69 928 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
5c0480f2 929
35b62a89 930out:
05394f39 931 drm_gem_object_unreference(&obj->base);
1d7cfea1 932unlock:
fbd5a26d 933 mutex_unlock(&dev->struct_mutex);
673a394b
EA
934 return ret;
935}
936
b361237b 937int
33196ded 938i915_gem_check_wedge(struct i915_gpu_error *error,
b361237b
CW
939 bool interruptible)
940{
1f83fee0 941 if (i915_reset_in_progress(error)) {
b361237b
CW
942 /* Non-interruptible callers can't handle -EAGAIN, hence return
943 * -EIO unconditionally for these. */
944 if (!interruptible)
945 return -EIO;
946
1f83fee0
DV
947 /* Recovery complete, but the reset failed ... */
948 if (i915_terminally_wedged(error))
b361237b
CW
949 return -EIO;
950
951 return -EAGAIN;
952 }
953
954 return 0;
955}
956
957/*
958 * Compare seqno against outstanding lazy request. Emit a request if they are
959 * equal.
960 */
961static int
962i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
963{
964 int ret;
965
966 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
967
968 ret = 0;
969 if (seqno == ring->outstanding_lazy_request)
970 ret = i915_add_request(ring, NULL, NULL);
971
972 return ret;
973}
974
975/**
976 * __wait_seqno - wait until execution of seqno has finished
977 * @ring: the ring expected to report seqno
978 * @seqno: duh!
f69061be 979 * @reset_counter: reset sequence associated with the given seqno
b361237b
CW
980 * @interruptible: do an interruptible wait (normally yes)
981 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
982 *
f69061be
DV
983 * Note: It is of utmost importance that the passed in seqno and reset_counter
984 * values have been read by the caller in an smp safe manner. Where read-side
985 * locks are involved, it is sufficient to read the reset_counter before
986 * unlocking the lock that protects the seqno. For lockless tricks, the
987 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
988 * inserted.
989 *
b361237b
CW
990 * Returns 0 if the seqno was found within the alloted time. Else returns the
991 * errno with remaining time filled in timeout argument.
992 */
993static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
f69061be 994 unsigned reset_counter,
b361237b
CW
995 bool interruptible, struct timespec *timeout)
996{
997 drm_i915_private_t *dev_priv = ring->dev->dev_private;
998 struct timespec before, now, wait_time={1,0};
999 unsigned long timeout_jiffies;
1000 long end;
1001 bool wait_forever = true;
1002 int ret;
1003
1004 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1005 return 0;
1006
1007 trace_i915_gem_request_wait_begin(ring, seqno);
1008
1009 if (timeout != NULL) {
1010 wait_time = *timeout;
1011 wait_forever = false;
1012 }
1013
1014 timeout_jiffies = timespec_to_jiffies(&wait_time);
1015
1016 if (WARN_ON(!ring->irq_get(ring)))
1017 return -ENODEV;
1018
1019 /* Record current time in case interrupted by signal, or wedged * */
1020 getrawmonotonic(&before);
1021
1022#define EXIT_COND \
1023 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
f69061be
DV
1024 i915_reset_in_progress(&dev_priv->gpu_error) || \
1025 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
b361237b
CW
1026 do {
1027 if (interruptible)
1028 end = wait_event_interruptible_timeout(ring->irq_queue,
1029 EXIT_COND,
1030 timeout_jiffies);
1031 else
1032 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1033 timeout_jiffies);
1034
f69061be
DV
1035 /* We need to check whether any gpu reset happened in between
1036 * the caller grabbing the seqno and now ... */
1037 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1038 end = -EAGAIN;
1039
1040 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1041 * gone. */
33196ded 1042 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
b361237b
CW
1043 if (ret)
1044 end = ret;
1045 } while (end == 0 && wait_forever);
1046
1047 getrawmonotonic(&now);
1048
1049 ring->irq_put(ring);
1050 trace_i915_gem_request_wait_end(ring, seqno);
1051#undef EXIT_COND
1052
1053 if (timeout) {
1054 struct timespec sleep_time = timespec_sub(now, before);
1055 *timeout = timespec_sub(*timeout, sleep_time);
1056 }
1057
1058 switch (end) {
1059 case -EIO:
1060 case -EAGAIN: /* Wedged */
1061 case -ERESTARTSYS: /* Signal */
1062 return (int)end;
1063 case 0: /* Timeout */
1064 if (timeout)
1065 set_normalized_timespec(timeout, 0, 0);
1066 return -ETIME;
1067 default: /* Completed */
1068 WARN_ON(end < 0); /* We're not aware of other errors */
1069 return 0;
1070 }
1071}
1072
1073/**
1074 * Waits for a sequence number to be signaled, and cleans up the
1075 * request and object lists appropriately for that event.
1076 */
1077int
1078i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1079{
1080 struct drm_device *dev = ring->dev;
1081 struct drm_i915_private *dev_priv = dev->dev_private;
1082 bool interruptible = dev_priv->mm.interruptible;
1083 int ret;
1084
1085 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1086 BUG_ON(seqno == 0);
1087
33196ded 1088 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
b361237b
CW
1089 if (ret)
1090 return ret;
1091
1092 ret = i915_gem_check_olr(ring, seqno);
1093 if (ret)
1094 return ret;
1095
f69061be
DV
1096 return __wait_seqno(ring, seqno,
1097 atomic_read(&dev_priv->gpu_error.reset_counter),
1098 interruptible, NULL);
b361237b
CW
1099}
1100
1101/**
1102 * Ensures that all rendering to the object has completed and the object is
1103 * safe to unbind from the GTT or access from the CPU.
1104 */
1105static __must_check int
1106i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1107 bool readonly)
1108{
1109 struct intel_ring_buffer *ring = obj->ring;
1110 u32 seqno;
1111 int ret;
1112
1113 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1114 if (seqno == 0)
1115 return 0;
1116
1117 ret = i915_wait_seqno(ring, seqno);
1118 if (ret)
1119 return ret;
1120
1121 i915_gem_retire_requests_ring(ring);
1122
1123 /* Manually manage the write flush as we may have not yet
1124 * retired the buffer.
1125 */
1126 if (obj->last_write_seqno &&
1127 i915_seqno_passed(seqno, obj->last_write_seqno)) {
1128 obj->last_write_seqno = 0;
1129 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1130 }
1131
1132 return 0;
1133}
1134
3236f57a
CW
1135/* A nonblocking variant of the above wait. This is a highly dangerous routine
1136 * as the object state may change during this call.
1137 */
1138static __must_check int
1139i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1140 bool readonly)
1141{
1142 struct drm_device *dev = obj->base.dev;
1143 struct drm_i915_private *dev_priv = dev->dev_private;
1144 struct intel_ring_buffer *ring = obj->ring;
f69061be 1145 unsigned reset_counter;
3236f57a
CW
1146 u32 seqno;
1147 int ret;
1148
1149 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1150 BUG_ON(!dev_priv->mm.interruptible);
1151
1152 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1153 if (seqno == 0)
1154 return 0;
1155
33196ded 1156 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
3236f57a
CW
1157 if (ret)
1158 return ret;
1159
1160 ret = i915_gem_check_olr(ring, seqno);
1161 if (ret)
1162 return ret;
1163
f69061be 1164 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3236f57a 1165 mutex_unlock(&dev->struct_mutex);
f69061be 1166 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3236f57a
CW
1167 mutex_lock(&dev->struct_mutex);
1168
1169 i915_gem_retire_requests_ring(ring);
1170
1171 /* Manually manage the write flush as we may have not yet
1172 * retired the buffer.
1173 */
1174 if (obj->last_write_seqno &&
1175 i915_seqno_passed(seqno, obj->last_write_seqno)) {
1176 obj->last_write_seqno = 0;
1177 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1178 }
1179
1180 return ret;
1181}
1182
673a394b 1183/**
2ef7eeaa
EA
1184 * Called when user space prepares to use an object with the CPU, either
1185 * through the mmap ioctl's mapping or a GTT mapping.
673a394b
EA
1186 */
1187int
1188i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
05394f39 1189 struct drm_file *file)
673a394b
EA
1190{
1191 struct drm_i915_gem_set_domain *args = data;
05394f39 1192 struct drm_i915_gem_object *obj;
2ef7eeaa
EA
1193 uint32_t read_domains = args->read_domains;
1194 uint32_t write_domain = args->write_domain;
673a394b
EA
1195 int ret;
1196
2ef7eeaa 1197 /* Only handle setting domains to types used by the CPU. */
21d509e3 1198 if (write_domain & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1199 return -EINVAL;
1200
21d509e3 1201 if (read_domains & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1202 return -EINVAL;
1203
1204 /* Having something in the write domain implies it's in the read
1205 * domain, and only that read domain. Enforce that in the request.
1206 */
1207 if (write_domain != 0 && read_domains != write_domain)
1208 return -EINVAL;
1209
76c1dec1 1210 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1211 if (ret)
76c1dec1 1212 return ret;
1d7cfea1 1213
05394f39 1214 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 1215 if (&obj->base == NULL) {
1d7cfea1
CW
1216 ret = -ENOENT;
1217 goto unlock;
76c1dec1 1218 }
673a394b 1219
3236f57a
CW
1220 /* Try to flush the object off the GPU without holding the lock.
1221 * We will repeat the flush holding the lock in the normal manner
1222 * to catch cases where we are gazumped.
1223 */
1224 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1225 if (ret)
1226 goto unref;
1227
2ef7eeaa
EA
1228 if (read_domains & I915_GEM_DOMAIN_GTT) {
1229 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
02354392
EA
1230
1231 /* Silently promote "you're not bound, there was nothing to do"
1232 * to success, since the client was just asking us to
1233 * make sure everything was done.
1234 */
1235 if (ret == -EINVAL)
1236 ret = 0;
2ef7eeaa 1237 } else {
e47c68e9 1238 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
2ef7eeaa
EA
1239 }
1240
3236f57a 1241unref:
05394f39 1242 drm_gem_object_unreference(&obj->base);
1d7cfea1 1243unlock:
673a394b
EA
1244 mutex_unlock(&dev->struct_mutex);
1245 return ret;
1246}
1247
1248/**
1249 * Called when user space has done writes to this buffer
1250 */
1251int
1252i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
05394f39 1253 struct drm_file *file)
673a394b
EA
1254{
1255 struct drm_i915_gem_sw_finish *args = data;
05394f39 1256 struct drm_i915_gem_object *obj;
673a394b
EA
1257 int ret = 0;
1258
76c1dec1 1259 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1260 if (ret)
76c1dec1 1261 return ret;
1d7cfea1 1262
05394f39 1263 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 1264 if (&obj->base == NULL) {
1d7cfea1
CW
1265 ret = -ENOENT;
1266 goto unlock;
673a394b
EA
1267 }
1268
673a394b 1269 /* Pinned buffers may be scanout, so flush the cache */
05394f39 1270 if (obj->pin_count)
e47c68e9
EA
1271 i915_gem_object_flush_cpu_write_domain(obj);
1272
05394f39 1273 drm_gem_object_unreference(&obj->base);
1d7cfea1 1274unlock:
673a394b
EA
1275 mutex_unlock(&dev->struct_mutex);
1276 return ret;
1277}
1278
1279/**
1280 * Maps the contents of an object, returning the address it is mapped
1281 * into.
1282 *
1283 * While the mapping holds a reference on the contents of the object, it doesn't
1284 * imply a ref on the object itself.
1285 */
1286int
1287i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
05394f39 1288 struct drm_file *file)
673a394b
EA
1289{
1290 struct drm_i915_gem_mmap *args = data;
1291 struct drm_gem_object *obj;
673a394b
EA
1292 unsigned long addr;
1293
05394f39 1294 obj = drm_gem_object_lookup(dev, file, args->handle);
673a394b 1295 if (obj == NULL)
bf79cb91 1296 return -ENOENT;
673a394b 1297
1286ff73
DV
1298 /* prime objects have no backing filp to GEM mmap
1299 * pages from.
1300 */
1301 if (!obj->filp) {
1302 drm_gem_object_unreference_unlocked(obj);
1303 return -EINVAL;
1304 }
1305
6be5ceb0 1306 addr = vm_mmap(obj->filp, 0, args->size,
673a394b
EA
1307 PROT_READ | PROT_WRITE, MAP_SHARED,
1308 args->offset);
bc9025bd 1309 drm_gem_object_unreference_unlocked(obj);
673a394b
EA
1310 if (IS_ERR((void *)addr))
1311 return addr;
1312
1313 args->addr_ptr = (uint64_t) addr;
1314
1315 return 0;
1316}
1317
de151cf6
JB
1318/**
1319 * i915_gem_fault - fault a page into the GTT
1320 * vma: VMA in question
1321 * vmf: fault info
1322 *
1323 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1324 * from userspace. The fault handler takes care of binding the object to
1325 * the GTT (if needed), allocating and programming a fence register (again,
1326 * only if needed based on whether the old reg is still valid or the object
1327 * is tiled) and inserting a new PTE into the faulting process.
1328 *
1329 * Note that the faulting process may involve evicting existing objects
1330 * from the GTT and/or fence registers to make room. So performance may
1331 * suffer if the GTT working set is large or there are few fence registers
1332 * left.
1333 */
1334int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1335{
05394f39
CW
1336 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1337 struct drm_device *dev = obj->base.dev;
7d1c4804 1338 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6
JB
1339 pgoff_t page_offset;
1340 unsigned long pfn;
1341 int ret = 0;
0f973f27 1342 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
de151cf6
JB
1343
1344 /* We don't use vmf->pgoff since that has the fake offset */
1345 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1346 PAGE_SHIFT;
1347
d9bc7e9f
CW
1348 ret = i915_mutex_lock_interruptible(dev);
1349 if (ret)
1350 goto out;
a00b10c3 1351
db53a302
CW
1352 trace_i915_gem_object_fault(obj, page_offset, true, write);
1353
eb119bd6
CW
1354 /* Access to snoopable pages through the GTT is incoherent. */
1355 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1356 ret = -EINVAL;
1357 goto unlock;
1358 }
1359
d9bc7e9f 1360 /* Now bind it into the GTT if needed */
c9839303
CW
1361 ret = i915_gem_object_pin(obj, 0, true, false);
1362 if (ret)
1363 goto unlock;
4a684a41 1364
c9839303
CW
1365 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1366 if (ret)
1367 goto unpin;
74898d7e 1368
06d98131 1369 ret = i915_gem_object_get_fence(obj);
d9e86c0e 1370 if (ret)
c9839303 1371 goto unpin;
7d1c4804 1372
6299f992
CW
1373 obj->fault_mappable = true;
1374
5d4545ae 1375 pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
de151cf6
JB
1376 page_offset;
1377
1378 /* Finally, remap it using the new GTT offset */
1379 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
c9839303
CW
1380unpin:
1381 i915_gem_object_unpin(obj);
c715089f 1382unlock:
de151cf6 1383 mutex_unlock(&dev->struct_mutex);
d9bc7e9f 1384out:
de151cf6 1385 switch (ret) {
d9bc7e9f 1386 case -EIO:
a9340cca
DV
1387 /* If this -EIO is due to a gpu hang, give the reset code a
1388 * chance to clean up the mess. Otherwise return the proper
1389 * SIGBUS. */
1f83fee0 1390 if (i915_terminally_wedged(&dev_priv->gpu_error))
a9340cca 1391 return VM_FAULT_SIGBUS;
045e769a 1392 case -EAGAIN:
d9bc7e9f
CW
1393 /* Give the error handler a chance to run and move the
1394 * objects off the GPU active list. Next time we service the
1395 * fault, we should be able to transition the page into the
1396 * GTT without touching the GPU (and so avoid further
1397 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1398 * with coherency, just lost writes.
1399 */
045e769a 1400 set_need_resched();
c715089f
CW
1401 case 0:
1402 case -ERESTARTSYS:
bed636ab 1403 case -EINTR:
e79e0fe3
DR
1404 case -EBUSY:
1405 /*
1406 * EBUSY is ok: this just means that another thread
1407 * already did the job.
1408 */
c715089f 1409 return VM_FAULT_NOPAGE;
de151cf6 1410 case -ENOMEM:
de151cf6 1411 return VM_FAULT_OOM;
a7c2e1aa
DV
1412 case -ENOSPC:
1413 return VM_FAULT_SIGBUS;
de151cf6 1414 default:
a7c2e1aa 1415 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
c715089f 1416 return VM_FAULT_SIGBUS;
de151cf6
JB
1417 }
1418}
1419
901782b2
CW
1420/**
1421 * i915_gem_release_mmap - remove physical page mappings
1422 * @obj: obj in question
1423 *
af901ca1 1424 * Preserve the reservation of the mmapping with the DRM core code, but
901782b2
CW
1425 * relinquish ownership of the pages back to the system.
1426 *
1427 * It is vital that we remove the page mapping if we have mapped a tiled
1428 * object through the GTT and then lose the fence register due to
1429 * resource pressure. Similarly if the object has been moved out of the
1430 * aperture, than pages mapped into userspace must be revoked. Removing the
1431 * mapping will then trigger a page fault on the next user access, allowing
1432 * fixup by i915_gem_fault().
1433 */
d05ca301 1434void
05394f39 1435i915_gem_release_mmap(struct drm_i915_gem_object *obj)
901782b2 1436{
6299f992
CW
1437 if (!obj->fault_mappable)
1438 return;
901782b2 1439
f6e47884
CW
1440 if (obj->base.dev->dev_mapping)
1441 unmap_mapping_range(obj->base.dev->dev_mapping,
1442 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1443 obj->base.size, 1);
fb7d516a 1444
6299f992 1445 obj->fault_mappable = false;
901782b2
CW
1446}
1447
0fa87796 1448uint32_t
e28f8711 1449i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
92b88aeb 1450{
e28f8711 1451 uint32_t gtt_size;
92b88aeb
CW
1452
1453 if (INTEL_INFO(dev)->gen >= 4 ||
e28f8711
CW
1454 tiling_mode == I915_TILING_NONE)
1455 return size;
92b88aeb
CW
1456
1457 /* Previous chips need a power-of-two fence region when tiling */
1458 if (INTEL_INFO(dev)->gen == 3)
e28f8711 1459 gtt_size = 1024*1024;
92b88aeb 1460 else
e28f8711 1461 gtt_size = 512*1024;
92b88aeb 1462
e28f8711
CW
1463 while (gtt_size < size)
1464 gtt_size <<= 1;
92b88aeb 1465
e28f8711 1466 return gtt_size;
92b88aeb
CW
1467}
1468
de151cf6
JB
1469/**
1470 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1471 * @obj: object to check
1472 *
1473 * Return the required GTT alignment for an object, taking into account
5e783301 1474 * potential fence register mapping.
de151cf6 1475 */
d865110c
ID
1476uint32_t
1477i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1478 int tiling_mode, bool fenced)
de151cf6 1479{
de151cf6
JB
1480 /*
1481 * Minimum alignment is 4k (GTT page size), but might be greater
1482 * if a fence register is needed for the object.
1483 */
d865110c 1484 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
e28f8711 1485 tiling_mode == I915_TILING_NONE)
de151cf6
JB
1486 return 4096;
1487
a00b10c3
CW
1488 /*
1489 * Previous chips need to be aligned to the size of the smallest
1490 * fence register that can contain the object.
1491 */
e28f8711 1492 return i915_gem_get_gtt_size(dev, size, tiling_mode);
a00b10c3
CW
1493}
1494
d8cb5086
CW
1495static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1496{
1497 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1498 int ret;
1499
1500 if (obj->base.map_list.map)
1501 return 0;
1502
da494d7c
DV
1503 dev_priv->mm.shrinker_no_lock_stealing = true;
1504
d8cb5086
CW
1505 ret = drm_gem_create_mmap_offset(&obj->base);
1506 if (ret != -ENOSPC)
da494d7c 1507 goto out;
d8cb5086
CW
1508
1509 /* Badly fragmented mmap space? The only way we can recover
1510 * space is by destroying unwanted objects. We can't randomly release
1511 * mmap_offsets as userspace expects them to be persistent for the
1512 * lifetime of the objects. The closest we can is to release the
1513 * offsets on purgeable objects by truncating it and marking it purged,
1514 * which prevents userspace from ever using that object again.
1515 */
1516 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1517 ret = drm_gem_create_mmap_offset(&obj->base);
1518 if (ret != -ENOSPC)
da494d7c 1519 goto out;
d8cb5086
CW
1520
1521 i915_gem_shrink_all(dev_priv);
da494d7c
DV
1522 ret = drm_gem_create_mmap_offset(&obj->base);
1523out:
1524 dev_priv->mm.shrinker_no_lock_stealing = false;
1525
1526 return ret;
d8cb5086
CW
1527}
1528
1529static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1530{
1531 if (!obj->base.map_list.map)
1532 return;
1533
1534 drm_gem_free_mmap_offset(&obj->base);
1535}
1536
de151cf6 1537int
ff72145b
DA
1538i915_gem_mmap_gtt(struct drm_file *file,
1539 struct drm_device *dev,
1540 uint32_t handle,
1541 uint64_t *offset)
de151cf6 1542{
da761a6e 1543 struct drm_i915_private *dev_priv = dev->dev_private;
05394f39 1544 struct drm_i915_gem_object *obj;
de151cf6
JB
1545 int ret;
1546
76c1dec1 1547 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1548 if (ret)
76c1dec1 1549 return ret;
de151cf6 1550
ff72145b 1551 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
c8725226 1552 if (&obj->base == NULL) {
1d7cfea1
CW
1553 ret = -ENOENT;
1554 goto unlock;
1555 }
de151cf6 1556
5d4545ae 1557 if (obj->base.size > dev_priv->gtt.mappable_end) {
da761a6e 1558 ret = -E2BIG;
ff56b0bc 1559 goto out;
da761a6e
CW
1560 }
1561
05394f39 1562 if (obj->madv != I915_MADV_WILLNEED) {
ab18282d 1563 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1d7cfea1
CW
1564 ret = -EINVAL;
1565 goto out;
ab18282d
CW
1566 }
1567
d8cb5086
CW
1568 ret = i915_gem_object_create_mmap_offset(obj);
1569 if (ret)
1570 goto out;
de151cf6 1571
ff72145b 1572 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
de151cf6 1573
1d7cfea1 1574out:
05394f39 1575 drm_gem_object_unreference(&obj->base);
1d7cfea1 1576unlock:
de151cf6 1577 mutex_unlock(&dev->struct_mutex);
1d7cfea1 1578 return ret;
de151cf6
JB
1579}
1580
ff72145b
DA
1581/**
1582 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1583 * @dev: DRM device
1584 * @data: GTT mapping ioctl data
1585 * @file: GEM object info
1586 *
1587 * Simply returns the fake offset to userspace so it can mmap it.
1588 * The mmap call will end up in drm_gem_mmap(), which will set things
1589 * up so we can get faults in the handler above.
1590 *
1591 * The fault handler will take care of binding the object into the GTT
1592 * (since it may have been evicted to make room for something), allocating
1593 * a fence register, and mapping the appropriate aperture address into
1594 * userspace.
1595 */
1596int
1597i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1598 struct drm_file *file)
1599{
1600 struct drm_i915_gem_mmap_gtt *args = data;
1601
ff72145b
DA
1602 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1603}
1604
225067ee
DV
1605/* Immediately discard the backing storage */
1606static void
1607i915_gem_object_truncate(struct drm_i915_gem_object *obj)
e5281ccd 1608{
e5281ccd 1609 struct inode *inode;
e5281ccd 1610
4d6294bf 1611 i915_gem_object_free_mmap_offset(obj);
1286ff73 1612
4d6294bf
CW
1613 if (obj->base.filp == NULL)
1614 return;
e5281ccd 1615
225067ee
DV
1616 /* Our goal here is to return as much of the memory as
1617 * is possible back to the system as we are called from OOM.
1618 * To do this we must instruct the shmfs to drop all of its
1619 * backing pages, *now*.
1620 */
05394f39 1621 inode = obj->base.filp->f_path.dentry->d_inode;
225067ee 1622 shmem_truncate_range(inode, 0, (loff_t)-1);
e5281ccd 1623
225067ee
DV
1624 obj->madv = __I915_MADV_PURGED;
1625}
e5281ccd 1626
225067ee
DV
1627static inline int
1628i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1629{
1630 return obj->madv == I915_MADV_DONTNEED;
e5281ccd
CW
1631}
1632
5cdf5881 1633static void
05394f39 1634i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
673a394b 1635{
05394f39 1636 int page_count = obj->base.size / PAGE_SIZE;
9da3da66 1637 struct scatterlist *sg;
6c085a72 1638 int ret, i;
1286ff73 1639
05394f39 1640 BUG_ON(obj->madv == __I915_MADV_PURGED);
673a394b 1641
6c085a72
CW
1642 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1643 if (ret) {
1644 /* In the event of a disaster, abandon all caches and
1645 * hope for the best.
1646 */
1647 WARN_ON(ret != -EIO);
1648 i915_gem_clflush_object(obj);
1649 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1650 }
1651
6dacfd2f 1652 if (i915_gem_object_needs_bit17_swizzle(obj))
280b713b
EA
1653 i915_gem_object_save_bit_17_swizzle(obj);
1654
05394f39
CW
1655 if (obj->madv == I915_MADV_DONTNEED)
1656 obj->dirty = 0;
3ef94daa 1657
9da3da66
CW
1658 for_each_sg(obj->pages->sgl, sg, page_count, i) {
1659 struct page *page = sg_page(sg);
1660
05394f39 1661 if (obj->dirty)
9da3da66 1662 set_page_dirty(page);
3ef94daa 1663
05394f39 1664 if (obj->madv == I915_MADV_WILLNEED)
9da3da66 1665 mark_page_accessed(page);
3ef94daa 1666
9da3da66 1667 page_cache_release(page);
3ef94daa 1668 }
05394f39 1669 obj->dirty = 0;
673a394b 1670
9da3da66
CW
1671 sg_free_table(obj->pages);
1672 kfree(obj->pages);
37e680a1 1673}
6c085a72 1674
dd624afd 1675int
37e680a1
CW
1676i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1677{
1678 const struct drm_i915_gem_object_ops *ops = obj->ops;
1679
2f745ad3 1680 if (obj->pages == NULL)
37e680a1
CW
1681 return 0;
1682
1683 BUG_ON(obj->gtt_space);
6c085a72 1684
a5570178
CW
1685 if (obj->pages_pin_count)
1686 return -EBUSY;
1687
a2165e31
CW
1688 /* ->put_pages might need to allocate memory for the bit17 swizzle
1689 * array, hence protect them from being reaped by removing them from gtt
1690 * lists early. */
1691 list_del(&obj->gtt_list);
1692
37e680a1 1693 ops->put_pages(obj);
05394f39 1694 obj->pages = NULL;
37e680a1 1695
6c085a72
CW
1696 if (i915_gem_object_is_purgeable(obj))
1697 i915_gem_object_truncate(obj);
1698
1699 return 0;
1700}
1701
1702static long
93927ca5
DV
1703__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1704 bool purgeable_only)
6c085a72
CW
1705{
1706 struct drm_i915_gem_object *obj, *next;
1707 long count = 0;
1708
1709 list_for_each_entry_safe(obj, next,
1710 &dev_priv->mm.unbound_list,
1711 gtt_list) {
93927ca5 1712 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
37e680a1 1713 i915_gem_object_put_pages(obj) == 0) {
6c085a72
CW
1714 count += obj->base.size >> PAGE_SHIFT;
1715 if (count >= target)
1716 return count;
1717 }
1718 }
1719
1720 list_for_each_entry_safe(obj, next,
1721 &dev_priv->mm.inactive_list,
1722 mm_list) {
93927ca5 1723 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
6c085a72 1724 i915_gem_object_unbind(obj) == 0 &&
37e680a1 1725 i915_gem_object_put_pages(obj) == 0) {
6c085a72
CW
1726 count += obj->base.size >> PAGE_SHIFT;
1727 if (count >= target)
1728 return count;
1729 }
1730 }
1731
1732 return count;
1733}
1734
93927ca5
DV
1735static long
1736i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1737{
1738 return __i915_gem_shrink(dev_priv, target, true);
1739}
1740
6c085a72
CW
1741static void
1742i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1743{
1744 struct drm_i915_gem_object *obj, *next;
1745
1746 i915_gem_evict_everything(dev_priv->dev);
1747
1748 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
37e680a1 1749 i915_gem_object_put_pages(obj);
225067ee
DV
1750}
1751
37e680a1 1752static int
6c085a72 1753i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
e5281ccd 1754{
6c085a72 1755 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
e5281ccd
CW
1756 int page_count, i;
1757 struct address_space *mapping;
9da3da66
CW
1758 struct sg_table *st;
1759 struct scatterlist *sg;
e5281ccd 1760 struct page *page;
6c085a72 1761 gfp_t gfp;
e5281ccd 1762
6c085a72
CW
1763 /* Assert that the object is not currently in any GPU domain. As it
1764 * wasn't in the GTT, there shouldn't be any way it could have been in
1765 * a GPU cache
1766 */
1767 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1768 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1769
9da3da66
CW
1770 st = kmalloc(sizeof(*st), GFP_KERNEL);
1771 if (st == NULL)
1772 return -ENOMEM;
1773
05394f39 1774 page_count = obj->base.size / PAGE_SIZE;
9da3da66
CW
1775 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1776 sg_free_table(st);
1777 kfree(st);
e5281ccd 1778 return -ENOMEM;
9da3da66 1779 }
e5281ccd 1780
9da3da66
CW
1781 /* Get the list of pages out of our struct file. They'll be pinned
1782 * at this point until we release them.
1783 *
1784 * Fail silently without starting the shrinker
1785 */
6c085a72
CW
1786 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
1787 gfp = mapping_gfp_mask(mapping);
caf49191 1788 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
6c085a72 1789 gfp &= ~(__GFP_IO | __GFP_WAIT);
9da3da66 1790 for_each_sg(st->sgl, sg, page_count, i) {
6c085a72
CW
1791 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1792 if (IS_ERR(page)) {
1793 i915_gem_purge(dev_priv, page_count);
1794 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1795 }
1796 if (IS_ERR(page)) {
1797 /* We've tried hard to allocate the memory by reaping
1798 * our own buffer, now let the real VM do its job and
1799 * go down in flames if truly OOM.
1800 */
caf49191 1801 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
6c085a72
CW
1802 gfp |= __GFP_IO | __GFP_WAIT;
1803
1804 i915_gem_shrink_all(dev_priv);
1805 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1806 if (IS_ERR(page))
1807 goto err_pages;
1808
caf49191 1809 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
6c085a72
CW
1810 gfp &= ~(__GFP_IO | __GFP_WAIT);
1811 }
e5281ccd 1812
9da3da66 1813 sg_set_page(sg, page, PAGE_SIZE, 0);
e5281ccd
CW
1814 }
1815
74ce6b6c
CW
1816 obj->pages = st;
1817
6dacfd2f 1818 if (i915_gem_object_needs_bit17_swizzle(obj))
e5281ccd
CW
1819 i915_gem_object_do_bit_17_swizzle(obj);
1820
1821 return 0;
1822
1823err_pages:
9da3da66
CW
1824 for_each_sg(st->sgl, sg, i, page_count)
1825 page_cache_release(sg_page(sg));
1826 sg_free_table(st);
1827 kfree(st);
e5281ccd 1828 return PTR_ERR(page);
673a394b
EA
1829}
1830
37e680a1
CW
1831/* Ensure that the associated pages are gathered from the backing storage
1832 * and pinned into our object. i915_gem_object_get_pages() may be called
1833 * multiple times before they are released by a single call to
1834 * i915_gem_object_put_pages() - once the pages are no longer referenced
1835 * either as a result of memory pressure (reaping pages under the shrinker)
1836 * or as the object is itself released.
1837 */
1838int
1839i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1840{
1841 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1842 const struct drm_i915_gem_object_ops *ops = obj->ops;
1843 int ret;
1844
2f745ad3 1845 if (obj->pages)
37e680a1
CW
1846 return 0;
1847
43e28f09
CW
1848 if (obj->madv != I915_MADV_WILLNEED) {
1849 DRM_ERROR("Attempting to obtain a purgeable object\n");
1850 return -EINVAL;
1851 }
1852
a5570178
CW
1853 BUG_ON(obj->pages_pin_count);
1854
37e680a1
CW
1855 ret = ops->get_pages(obj);
1856 if (ret)
1857 return ret;
1858
1859 list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
1860 return 0;
673a394b
EA
1861}
1862
54cf91dc 1863void
05394f39 1864i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
9d773091 1865 struct intel_ring_buffer *ring)
673a394b 1866{
05394f39 1867 struct drm_device *dev = obj->base.dev;
69dc4987 1868 struct drm_i915_private *dev_priv = dev->dev_private;
9d773091 1869 u32 seqno = intel_ring_get_seqno(ring);
617dbe27 1870
852835f3 1871 BUG_ON(ring == NULL);
05394f39 1872 obj->ring = ring;
673a394b
EA
1873
1874 /* Add a reference if we're newly entering the active list. */
05394f39
CW
1875 if (!obj->active) {
1876 drm_gem_object_reference(&obj->base);
1877 obj->active = 1;
673a394b 1878 }
e35a41de 1879
673a394b 1880 /* Move from whatever list we were on to the tail of execution. */
05394f39
CW
1881 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1882 list_move_tail(&obj->ring_list, &ring->active_list);
caea7476 1883
0201f1ec 1884 obj->last_read_seqno = seqno;
caea7476 1885
7dd49065 1886 if (obj->fenced_gpu_access) {
caea7476 1887 obj->last_fenced_seqno = seqno;
caea7476 1888
7dd49065
CW
1889 /* Bump MRU to take account of the delayed flush */
1890 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1891 struct drm_i915_fence_reg *reg;
1892
1893 reg = &dev_priv->fence_regs[obj->fence_reg];
1894 list_move_tail(&reg->lru_list,
1895 &dev_priv->mm.fence_list);
1896 }
caea7476
CW
1897 }
1898}
1899
1900static void
caea7476 1901i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
ce44b0ea 1902{
05394f39 1903 struct drm_device *dev = obj->base.dev;
caea7476 1904 struct drm_i915_private *dev_priv = dev->dev_private;
ce44b0ea 1905
65ce3027 1906 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
05394f39 1907 BUG_ON(!obj->active);
caea7476 1908
1b50247a 1909 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
caea7476 1910
65ce3027 1911 list_del_init(&obj->ring_list);
caea7476
CW
1912 obj->ring = NULL;
1913
65ce3027
CW
1914 obj->last_read_seqno = 0;
1915 obj->last_write_seqno = 0;
1916 obj->base.write_domain = 0;
1917
1918 obj->last_fenced_seqno = 0;
caea7476 1919 obj->fenced_gpu_access = false;
caea7476
CW
1920
1921 obj->active = 0;
1922 drm_gem_object_unreference(&obj->base);
1923
1924 WARN_ON(i915_verify_lists(dev));
ce44b0ea 1925}
673a394b 1926
9d773091 1927static int
fca26bb4 1928i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
53d227f2 1929{
9d773091
CW
1930 struct drm_i915_private *dev_priv = dev->dev_private;
1931 struct intel_ring_buffer *ring;
1932 int ret, i, j;
53d227f2 1933
107f27a5 1934 /* Carefully retire all requests without writing to the rings */
9d773091 1935 for_each_ring(ring, dev_priv, i) {
107f27a5
CW
1936 ret = intel_ring_idle(ring);
1937 if (ret)
1938 return ret;
9d773091 1939 }
9d773091 1940 i915_gem_retire_requests(dev);
107f27a5
CW
1941
1942 /* Finally reset hw state */
9d773091 1943 for_each_ring(ring, dev_priv, i) {
fca26bb4 1944 intel_ring_init_seqno(ring, seqno);
498d2ac1 1945
9d773091
CW
1946 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1947 ring->sync_seqno[j] = 0;
1948 }
53d227f2 1949
9d773091 1950 return 0;
53d227f2
DV
1951}
1952
fca26bb4
MK
1953int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
1954{
1955 struct drm_i915_private *dev_priv = dev->dev_private;
1956 int ret;
1957
1958 if (seqno == 0)
1959 return -EINVAL;
1960
1961 /* HWS page needs to be set less than what we
1962 * will inject to ring
1963 */
1964 ret = i915_gem_init_seqno(dev, seqno - 1);
1965 if (ret)
1966 return ret;
1967
1968 /* Carefully set the last_seqno value so that wrap
1969 * detection still works
1970 */
1971 dev_priv->next_seqno = seqno;
1972 dev_priv->last_seqno = seqno - 1;
1973 if (dev_priv->last_seqno == 0)
1974 dev_priv->last_seqno--;
1975
1976 return 0;
1977}
1978
9d773091
CW
1979int
1980i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
53d227f2 1981{
9d773091
CW
1982 struct drm_i915_private *dev_priv = dev->dev_private;
1983
1984 /* reserve 0 for non-seqno */
1985 if (dev_priv->next_seqno == 0) {
fca26bb4 1986 int ret = i915_gem_init_seqno(dev, 0);
9d773091
CW
1987 if (ret)
1988 return ret;
53d227f2 1989
9d773091
CW
1990 dev_priv->next_seqno = 1;
1991 }
53d227f2 1992
f72b3435 1993 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
9d773091 1994 return 0;
53d227f2
DV
1995}
1996
3cce469c 1997int
db53a302 1998i915_add_request(struct intel_ring_buffer *ring,
f787a5f5 1999 struct drm_file *file,
acb868d3 2000 u32 *out_seqno)
673a394b 2001{
db53a302 2002 drm_i915_private_t *dev_priv = ring->dev->dev_private;
acb868d3 2003 struct drm_i915_gem_request *request;
a71d8d94 2004 u32 request_ring_position;
673a394b 2005 int was_empty;
3cce469c
CW
2006 int ret;
2007
cc889e0f
DV
2008 /*
2009 * Emit any outstanding flushes - execbuf can fail to emit the flush
2010 * after having emitted the batchbuffer command. Hence we need to fix
2011 * things up similar to emitting the lazy request. The difference here
2012 * is that the flush _must_ happen before the next request, no matter
2013 * what.
2014 */
a7b9761d
CW
2015 ret = intel_ring_flush_all_caches(ring);
2016 if (ret)
2017 return ret;
cc889e0f 2018
acb868d3
CW
2019 request = kmalloc(sizeof(*request), GFP_KERNEL);
2020 if (request == NULL)
2021 return -ENOMEM;
cc889e0f 2022
673a394b 2023
a71d8d94
CW
2024 /* Record the position of the start of the request so that
2025 * should we detect the updated seqno part-way through the
2026 * GPU processing the request, we never over-estimate the
2027 * position of the head.
2028 */
2029 request_ring_position = intel_ring_get_tail(ring);
2030
9d773091 2031 ret = ring->add_request(ring);
3bb73aba
CW
2032 if (ret) {
2033 kfree(request);
2034 return ret;
2035 }
673a394b 2036
9d773091 2037 request->seqno = intel_ring_get_seqno(ring);
852835f3 2038 request->ring = ring;
a71d8d94 2039 request->tail = request_ring_position;
673a394b 2040 request->emitted_jiffies = jiffies;
852835f3
ZN
2041 was_empty = list_empty(&ring->request_list);
2042 list_add_tail(&request->list, &ring->request_list);
3bb73aba 2043 request->file_priv = NULL;
852835f3 2044
db53a302
CW
2045 if (file) {
2046 struct drm_i915_file_private *file_priv = file->driver_priv;
2047
1c25595f 2048 spin_lock(&file_priv->mm.lock);
f787a5f5 2049 request->file_priv = file_priv;
b962442e 2050 list_add_tail(&request->client_list,
f787a5f5 2051 &file_priv->mm.request_list);
1c25595f 2052 spin_unlock(&file_priv->mm.lock);
b962442e 2053 }
673a394b 2054
9d773091 2055 trace_i915_gem_request_add(ring, request->seqno);
5391d0cf 2056 ring->outstanding_lazy_request = 0;
db53a302 2057
f65d9421 2058 if (!dev_priv->mm.suspended) {
3e0dc6b0 2059 if (i915_enable_hangcheck) {
99584db3 2060 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
cecc21fe 2061 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3e0dc6b0 2062 }
f047e395 2063 if (was_empty) {
b3b079db 2064 queue_delayed_work(dev_priv->wq,
bcb45086
CW
2065 &dev_priv->mm.retire_work,
2066 round_jiffies_up_relative(HZ));
f047e395
CW
2067 intel_mark_busy(dev_priv->dev);
2068 }
f65d9421 2069 }
cc889e0f 2070
acb868d3 2071 if (out_seqno)
9d773091 2072 *out_seqno = request->seqno;
3cce469c 2073 return 0;
673a394b
EA
2074}
2075
f787a5f5
CW
2076static inline void
2077i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
673a394b 2078{
1c25595f 2079 struct drm_i915_file_private *file_priv = request->file_priv;
673a394b 2080
1c25595f
CW
2081 if (!file_priv)
2082 return;
1c5d22f7 2083
1c25595f 2084 spin_lock(&file_priv->mm.lock);
09bfa517
HRK
2085 if (request->file_priv) {
2086 list_del(&request->client_list);
2087 request->file_priv = NULL;
2088 }
1c25595f 2089 spin_unlock(&file_priv->mm.lock);
673a394b 2090}
673a394b 2091
dfaae392
CW
2092static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2093 struct intel_ring_buffer *ring)
9375e446 2094{
dfaae392
CW
2095 while (!list_empty(&ring->request_list)) {
2096 struct drm_i915_gem_request *request;
673a394b 2097
dfaae392
CW
2098 request = list_first_entry(&ring->request_list,
2099 struct drm_i915_gem_request,
2100 list);
de151cf6 2101
dfaae392 2102 list_del(&request->list);
f787a5f5 2103 i915_gem_request_remove_from_client(request);
dfaae392
CW
2104 kfree(request);
2105 }
673a394b 2106
dfaae392 2107 while (!list_empty(&ring->active_list)) {
05394f39 2108 struct drm_i915_gem_object *obj;
9375e446 2109
05394f39
CW
2110 obj = list_first_entry(&ring->active_list,
2111 struct drm_i915_gem_object,
2112 ring_list);
9375e446 2113
05394f39 2114 i915_gem_object_move_to_inactive(obj);
673a394b
EA
2115 }
2116}
2117
312817a3
CW
2118static void i915_gem_reset_fences(struct drm_device *dev)
2119{
2120 struct drm_i915_private *dev_priv = dev->dev_private;
2121 int i;
2122
4b9de737 2123 for (i = 0; i < dev_priv->num_fence_regs; i++) {
312817a3 2124 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
7d2cb39c 2125
ada726c7 2126 i915_gem_write_fence(dev, i, NULL);
7d2cb39c 2127
ada726c7
CW
2128 if (reg->obj)
2129 i915_gem_object_fence_lost(reg->obj);
7d2cb39c 2130
ada726c7
CW
2131 reg->pin_count = 0;
2132 reg->obj = NULL;
2133 INIT_LIST_HEAD(&reg->lru_list);
312817a3 2134 }
ada726c7
CW
2135
2136 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
312817a3
CW
2137}
2138
069efc1d 2139void i915_gem_reset(struct drm_device *dev)
673a394b 2140{
77f01230 2141 struct drm_i915_private *dev_priv = dev->dev_private;
05394f39 2142 struct drm_i915_gem_object *obj;
b4519513 2143 struct intel_ring_buffer *ring;
1ec14ad3 2144 int i;
673a394b 2145
b4519513
CW
2146 for_each_ring(ring, dev_priv, i)
2147 i915_gem_reset_ring_lists(dev_priv, ring);
dfaae392 2148
dfaae392
CW
2149 /* Move everything out of the GPU domains to ensure we do any
2150 * necessary invalidation upon reuse.
2151 */
05394f39 2152 list_for_each_entry(obj,
77f01230 2153 &dev_priv->mm.inactive_list,
69dc4987 2154 mm_list)
77f01230 2155 {
05394f39 2156 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
77f01230 2157 }
069efc1d
CW
2158
2159 /* The fence registers are invalidated so clear them out */
312817a3 2160 i915_gem_reset_fences(dev);
673a394b
EA
2161}
2162
2163/**
2164 * This function clears the request list as sequence numbers are passed.
2165 */
a71d8d94 2166void
db53a302 2167i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
673a394b 2168{
673a394b
EA
2169 uint32_t seqno;
2170
db53a302 2171 if (list_empty(&ring->request_list))
6c0594a3
KW
2172 return;
2173
db53a302 2174 WARN_ON(i915_verify_lists(ring->dev));
673a394b 2175
b2eadbc8 2176 seqno = ring->get_seqno(ring, true);
1ec14ad3 2177
852835f3 2178 while (!list_empty(&ring->request_list)) {
673a394b 2179 struct drm_i915_gem_request *request;
673a394b 2180
852835f3 2181 request = list_first_entry(&ring->request_list,
673a394b
EA
2182 struct drm_i915_gem_request,
2183 list);
673a394b 2184
dfaae392 2185 if (!i915_seqno_passed(seqno, request->seqno))
b84d5f0c
CW
2186 break;
2187
db53a302 2188 trace_i915_gem_request_retire(ring, request->seqno);
a71d8d94
CW
2189 /* We know the GPU must have read the request to have
2190 * sent us the seqno + interrupt, so use the position
2191 * of tail of the request to update the last known position
2192 * of the GPU head.
2193 */
2194 ring->last_retired_head = request->tail;
b84d5f0c
CW
2195
2196 list_del(&request->list);
f787a5f5 2197 i915_gem_request_remove_from_client(request);
b84d5f0c
CW
2198 kfree(request);
2199 }
673a394b 2200
b84d5f0c
CW
2201 /* Move any buffers on the active list that are no longer referenced
2202 * by the ringbuffer to the flushing/inactive lists as appropriate.
2203 */
2204 while (!list_empty(&ring->active_list)) {
05394f39 2205 struct drm_i915_gem_object *obj;
b84d5f0c 2206
0206e353 2207 obj = list_first_entry(&ring->active_list,
05394f39
CW
2208 struct drm_i915_gem_object,
2209 ring_list);
673a394b 2210
0201f1ec 2211 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
673a394b 2212 break;
b84d5f0c 2213
65ce3027 2214 i915_gem_object_move_to_inactive(obj);
673a394b 2215 }
9d34e5db 2216
db53a302
CW
2217 if (unlikely(ring->trace_irq_seqno &&
2218 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1ec14ad3 2219 ring->irq_put(ring);
db53a302 2220 ring->trace_irq_seqno = 0;
9d34e5db 2221 }
23bc5982 2222
db53a302 2223 WARN_ON(i915_verify_lists(ring->dev));
673a394b
EA
2224}
2225
b09a1fec
CW
2226void
2227i915_gem_retire_requests(struct drm_device *dev)
2228{
2229 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 2230 struct intel_ring_buffer *ring;
1ec14ad3 2231 int i;
b09a1fec 2232
b4519513
CW
2233 for_each_ring(ring, dev_priv, i)
2234 i915_gem_retire_requests_ring(ring);
b09a1fec
CW
2235}
2236
75ef9da2 2237static void
673a394b
EA
2238i915_gem_retire_work_handler(struct work_struct *work)
2239{
2240 drm_i915_private_t *dev_priv;
2241 struct drm_device *dev;
b4519513 2242 struct intel_ring_buffer *ring;
0a58705b
CW
2243 bool idle;
2244 int i;
673a394b
EA
2245
2246 dev_priv = container_of(work, drm_i915_private_t,
2247 mm.retire_work.work);
2248 dev = dev_priv->dev;
2249
891b48cf
CW
2250 /* Come back later if the device is busy... */
2251 if (!mutex_trylock(&dev->struct_mutex)) {
bcb45086
CW
2252 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2253 round_jiffies_up_relative(HZ));
891b48cf
CW
2254 return;
2255 }
673a394b 2256
b09a1fec 2257 i915_gem_retire_requests(dev);
673a394b 2258
0a58705b
CW
2259 /* Send a periodic flush down the ring so we don't hold onto GEM
2260 * objects indefinitely.
673a394b 2261 */
0a58705b 2262 idle = true;
b4519513 2263 for_each_ring(ring, dev_priv, i) {
3bb73aba
CW
2264 if (ring->gpu_caches_dirty)
2265 i915_add_request(ring, NULL, NULL);
0a58705b
CW
2266
2267 idle &= list_empty(&ring->request_list);
673a394b
EA
2268 }
2269
0a58705b 2270 if (!dev_priv->mm.suspended && !idle)
bcb45086
CW
2271 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2272 round_jiffies_up_relative(HZ));
f047e395
CW
2273 if (idle)
2274 intel_mark_idle(dev);
0a58705b 2275
673a394b 2276 mutex_unlock(&dev->struct_mutex);
673a394b
EA
2277}
2278
30dfebf3
DV
2279/**
2280 * Ensures that an object will eventually get non-busy by flushing any required
2281 * write domains, emitting any outstanding lazy request and retiring and
2282 * completed requests.
2283 */
2284static int
2285i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2286{
2287 int ret;
2288
2289 if (obj->active) {
0201f1ec 2290 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
30dfebf3
DV
2291 if (ret)
2292 return ret;
2293
30dfebf3
DV
2294 i915_gem_retire_requests_ring(obj->ring);
2295 }
2296
2297 return 0;
2298}
2299
23ba4fd0
BW
2300/**
2301 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2302 * @DRM_IOCTL_ARGS: standard ioctl arguments
2303 *
2304 * Returns 0 if successful, else an error is returned with the remaining time in
2305 * the timeout parameter.
2306 * -ETIME: object is still busy after timeout
2307 * -ERESTARTSYS: signal interrupted the wait
2308 * -ENONENT: object doesn't exist
2309 * Also possible, but rare:
2310 * -EAGAIN: GPU wedged
2311 * -ENOMEM: damn
2312 * -ENODEV: Internal IRQ fail
2313 * -E?: The add request failed
2314 *
2315 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2316 * non-zero timeout parameter the wait ioctl will wait for the given number of
2317 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2318 * without holding struct_mutex the object may become re-busied before this
2319 * function completes. A similar but shorter * race condition exists in the busy
2320 * ioctl
2321 */
2322int
2323i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2324{
f69061be 2325 drm_i915_private_t *dev_priv = dev->dev_private;
23ba4fd0
BW
2326 struct drm_i915_gem_wait *args = data;
2327 struct drm_i915_gem_object *obj;
2328 struct intel_ring_buffer *ring = NULL;
eac1f14f 2329 struct timespec timeout_stack, *timeout = NULL;
f69061be 2330 unsigned reset_counter;
23ba4fd0
BW
2331 u32 seqno = 0;
2332 int ret = 0;
2333
eac1f14f
BW
2334 if (args->timeout_ns >= 0) {
2335 timeout_stack = ns_to_timespec(args->timeout_ns);
2336 timeout = &timeout_stack;
2337 }
23ba4fd0
BW
2338
2339 ret = i915_mutex_lock_interruptible(dev);
2340 if (ret)
2341 return ret;
2342
2343 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2344 if (&obj->base == NULL) {
2345 mutex_unlock(&dev->struct_mutex);
2346 return -ENOENT;
2347 }
2348
30dfebf3
DV
2349 /* Need to make sure the object gets inactive eventually. */
2350 ret = i915_gem_object_flush_active(obj);
23ba4fd0
BW
2351 if (ret)
2352 goto out;
2353
2354 if (obj->active) {
0201f1ec 2355 seqno = obj->last_read_seqno;
23ba4fd0
BW
2356 ring = obj->ring;
2357 }
2358
2359 if (seqno == 0)
2360 goto out;
2361
23ba4fd0
BW
2362 /* Do this after OLR check to make sure we make forward progress polling
2363 * on this IOCTL with a 0 timeout (like busy ioctl)
2364 */
2365 if (!args->timeout_ns) {
2366 ret = -ETIME;
2367 goto out;
2368 }
2369
2370 drm_gem_object_unreference(&obj->base);
f69061be 2371 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
23ba4fd0
BW
2372 mutex_unlock(&dev->struct_mutex);
2373
f69061be 2374 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
eac1f14f
BW
2375 if (timeout) {
2376 WARN_ON(!timespec_valid(timeout));
2377 args->timeout_ns = timespec_to_ns(timeout);
2378 }
23ba4fd0
BW
2379 return ret;
2380
2381out:
2382 drm_gem_object_unreference(&obj->base);
2383 mutex_unlock(&dev->struct_mutex);
2384 return ret;
2385}
2386
5816d648
BW
2387/**
2388 * i915_gem_object_sync - sync an object to a ring.
2389 *
2390 * @obj: object which may be in use on another ring.
2391 * @to: ring we wish to use the object on. May be NULL.
2392 *
2393 * This code is meant to abstract object synchronization with the GPU.
2394 * Calling with NULL implies synchronizing the object with the CPU
2395 * rather than a particular GPU ring.
2396 *
2397 * Returns 0 if successful, else propagates up the lower layer error.
2398 */
2911a35b
BW
2399int
2400i915_gem_object_sync(struct drm_i915_gem_object *obj,
2401 struct intel_ring_buffer *to)
2402{
2403 struct intel_ring_buffer *from = obj->ring;
2404 u32 seqno;
2405 int ret, idx;
2406
2407 if (from == NULL || to == from)
2408 return 0;
2409
5816d648 2410 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
0201f1ec 2411 return i915_gem_object_wait_rendering(obj, false);
2911a35b
BW
2412
2413 idx = intel_ring_sync_index(from, to);
2414
0201f1ec 2415 seqno = obj->last_read_seqno;
2911a35b
BW
2416 if (seqno <= from->sync_seqno[idx])
2417 return 0;
2418
b4aca010
BW
2419 ret = i915_gem_check_olr(obj->ring, seqno);
2420 if (ret)
2421 return ret;
2911a35b 2422
1500f7ea 2423 ret = to->sync_to(to, from, seqno);
e3a5a225 2424 if (!ret)
7b01e260
MK
2425 /* We use last_read_seqno because sync_to()
2426 * might have just caused seqno wrap under
2427 * the radar.
2428 */
2429 from->sync_seqno[idx] = obj->last_read_seqno;
2911a35b 2430
e3a5a225 2431 return ret;
2911a35b
BW
2432}
2433
b5ffc9bc
CW
2434static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2435{
2436 u32 old_write_domain, old_read_domains;
2437
b5ffc9bc
CW
2438 /* Force a pagefault for domain tracking on next user access */
2439 i915_gem_release_mmap(obj);
2440
b97c3d9c
KP
2441 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2442 return;
2443
97c809fd
CW
2444 /* Wait for any direct GTT access to complete */
2445 mb();
2446
b5ffc9bc
CW
2447 old_read_domains = obj->base.read_domains;
2448 old_write_domain = obj->base.write_domain;
2449
2450 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2451 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2452
2453 trace_i915_gem_object_change_domain(obj,
2454 old_read_domains,
2455 old_write_domain);
2456}
2457
673a394b
EA
2458/**
2459 * Unbinds an object from the GTT aperture.
2460 */
0f973f27 2461int
05394f39 2462i915_gem_object_unbind(struct drm_i915_gem_object *obj)
673a394b 2463{
7bddb01f 2464 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
43e28f09 2465 int ret;
673a394b 2466
05394f39 2467 if (obj->gtt_space == NULL)
673a394b
EA
2468 return 0;
2469
31d8d651
CW
2470 if (obj->pin_count)
2471 return -EBUSY;
673a394b 2472
c4670ad0
CW
2473 BUG_ON(obj->pages == NULL);
2474
a8198eea 2475 ret = i915_gem_object_finish_gpu(obj);
1488fc08 2476 if (ret)
a8198eea
CW
2477 return ret;
2478 /* Continue on if we fail due to EIO, the GPU is hung so we
2479 * should be safe and we need to cleanup or else we might
2480 * cause memory corruption through use-after-free.
2481 */
2482
b5ffc9bc 2483 i915_gem_object_finish_gtt(obj);
5323fd04 2484
96b47b65 2485 /* release the fence reg _after_ flushing */
d9e86c0e 2486 ret = i915_gem_object_put_fence(obj);
1488fc08 2487 if (ret)
d9e86c0e 2488 return ret;
96b47b65 2489
db53a302
CW
2490 trace_i915_gem_object_unbind(obj);
2491
74898d7e
DV
2492 if (obj->has_global_gtt_mapping)
2493 i915_gem_gtt_unbind_object(obj);
7bddb01f
DV
2494 if (obj->has_aliasing_ppgtt_mapping) {
2495 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2496 obj->has_aliasing_ppgtt_mapping = 0;
2497 }
74163907 2498 i915_gem_gtt_finish_object(obj);
7bddb01f 2499
6c085a72
CW
2500 list_del(&obj->mm_list);
2501 list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
75e9e915 2502 /* Avoid an unnecessary call to unbind on rebind. */
05394f39 2503 obj->map_and_fenceable = true;
673a394b 2504
05394f39
CW
2505 drm_mm_put_block(obj->gtt_space);
2506 obj->gtt_space = NULL;
2507 obj->gtt_offset = 0;
673a394b 2508
88241785 2509 return 0;
54cf91dc
CW
2510}
2511
b2da9fe5 2512int i915_gpu_idle(struct drm_device *dev)
4df2faf4
DV
2513{
2514 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 2515 struct intel_ring_buffer *ring;
1ec14ad3 2516 int ret, i;
4df2faf4 2517
4df2faf4 2518 /* Flush everything onto the inactive list. */
b4519513 2519 for_each_ring(ring, dev_priv, i) {
b6c7488d
BW
2520 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2521 if (ret)
2522 return ret;
2523
3e960501 2524 ret = intel_ring_idle(ring);
1ec14ad3
CW
2525 if (ret)
2526 return ret;
2527 }
4df2faf4 2528
8a1a49f9 2529 return 0;
4df2faf4
DV
2530}
2531
9ce079e4
CW
2532static void i965_write_fence_reg(struct drm_device *dev, int reg,
2533 struct drm_i915_gem_object *obj)
de151cf6 2534{
de151cf6 2535 drm_i915_private_t *dev_priv = dev->dev_private;
56c844e5
ID
2536 int fence_reg;
2537 int fence_pitch_shift;
de151cf6
JB
2538 uint64_t val;
2539
56c844e5
ID
2540 if (INTEL_INFO(dev)->gen >= 6) {
2541 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2542 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2543 } else {
2544 fence_reg = FENCE_REG_965_0;
2545 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2546 }
2547
9ce079e4
CW
2548 if (obj) {
2549 u32 size = obj->gtt_space->size;
de151cf6 2550
9ce079e4
CW
2551 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2552 0xfffff000) << 32;
2553 val |= obj->gtt_offset & 0xfffff000;
56c844e5 2554 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
9ce079e4
CW
2555 if (obj->tiling_mode == I915_TILING_Y)
2556 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2557 val |= I965_FENCE_REG_VALID;
2558 } else
2559 val = 0;
c6642782 2560
56c844e5
ID
2561 fence_reg += reg * 8;
2562 I915_WRITE64(fence_reg, val);
2563 POSTING_READ(fence_reg);
de151cf6
JB
2564}
2565
9ce079e4
CW
2566static void i915_write_fence_reg(struct drm_device *dev, int reg,
2567 struct drm_i915_gem_object *obj)
de151cf6 2568{
de151cf6 2569 drm_i915_private_t *dev_priv = dev->dev_private;
9ce079e4 2570 u32 val;
de151cf6 2571
9ce079e4
CW
2572 if (obj) {
2573 u32 size = obj->gtt_space->size;
2574 int pitch_val;
2575 int tile_width;
c6642782 2576
9ce079e4
CW
2577 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2578 (size & -size) != size ||
2579 (obj->gtt_offset & (size - 1)),
2580 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2581 obj->gtt_offset, obj->map_and_fenceable, size);
c6642782 2582
9ce079e4
CW
2583 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2584 tile_width = 128;
2585 else
2586 tile_width = 512;
2587
2588 /* Note: pitch better be a power of two tile widths */
2589 pitch_val = obj->stride / tile_width;
2590 pitch_val = ffs(pitch_val) - 1;
2591
2592 val = obj->gtt_offset;
2593 if (obj->tiling_mode == I915_TILING_Y)
2594 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2595 val |= I915_FENCE_SIZE_BITS(size);
2596 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2597 val |= I830_FENCE_REG_VALID;
2598 } else
2599 val = 0;
2600
2601 if (reg < 8)
2602 reg = FENCE_REG_830_0 + reg * 4;
2603 else
2604 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2605
2606 I915_WRITE(reg, val);
2607 POSTING_READ(reg);
de151cf6
JB
2608}
2609
9ce079e4
CW
2610static void i830_write_fence_reg(struct drm_device *dev, int reg,
2611 struct drm_i915_gem_object *obj)
de151cf6 2612{
de151cf6 2613 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6 2614 uint32_t val;
de151cf6 2615
9ce079e4
CW
2616 if (obj) {
2617 u32 size = obj->gtt_space->size;
2618 uint32_t pitch_val;
de151cf6 2619
9ce079e4
CW
2620 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2621 (size & -size) != size ||
2622 (obj->gtt_offset & (size - 1)),
2623 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2624 obj->gtt_offset, size);
e76a16de 2625
9ce079e4
CW
2626 pitch_val = obj->stride / 128;
2627 pitch_val = ffs(pitch_val) - 1;
de151cf6 2628
9ce079e4
CW
2629 val = obj->gtt_offset;
2630 if (obj->tiling_mode == I915_TILING_Y)
2631 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2632 val |= I830_FENCE_SIZE_BITS(size);
2633 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2634 val |= I830_FENCE_REG_VALID;
2635 } else
2636 val = 0;
c6642782 2637
9ce079e4
CW
2638 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2639 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2640}
2641
d0a57789
CW
2642inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2643{
2644 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2645}
2646
9ce079e4
CW
2647static void i915_gem_write_fence(struct drm_device *dev, int reg,
2648 struct drm_i915_gem_object *obj)
2649{
d0a57789
CW
2650 struct drm_i915_private *dev_priv = dev->dev_private;
2651
2652 /* Ensure that all CPU reads are completed before installing a fence
2653 * and all writes before removing the fence.
2654 */
2655 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2656 mb();
2657
9ce079e4
CW
2658 switch (INTEL_INFO(dev)->gen) {
2659 case 7:
56c844e5 2660 case 6:
9ce079e4
CW
2661 case 5:
2662 case 4: i965_write_fence_reg(dev, reg, obj); break;
2663 case 3: i915_write_fence_reg(dev, reg, obj); break;
2664 case 2: i830_write_fence_reg(dev, reg, obj); break;
7dbf9d6e 2665 default: BUG();
9ce079e4 2666 }
d0a57789
CW
2667
2668 /* And similarly be paranoid that no direct access to this region
2669 * is reordered to before the fence is installed.
2670 */
2671 if (i915_gem_object_needs_mb(obj))
2672 mb();
de151cf6
JB
2673}
2674
61050808
CW
2675static inline int fence_number(struct drm_i915_private *dev_priv,
2676 struct drm_i915_fence_reg *fence)
2677{
2678 return fence - dev_priv->fence_regs;
2679}
2680
2681static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2682 struct drm_i915_fence_reg *fence,
2683 bool enable)
2684{
2685 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2686 int reg = fence_number(dev_priv, fence);
2687
2688 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2689
2690 if (enable) {
2691 obj->fence_reg = reg;
2692 fence->obj = obj;
2693 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2694 } else {
2695 obj->fence_reg = I915_FENCE_REG_NONE;
2696 fence->obj = NULL;
2697 list_del_init(&fence->lru_list);
2698 }
2699}
2700
d9e86c0e 2701static int
d0a57789 2702i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
d9e86c0e 2703{
1c293ea3 2704 if (obj->last_fenced_seqno) {
86d5bc37 2705 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
18991845
CW
2706 if (ret)
2707 return ret;
d9e86c0e
CW
2708
2709 obj->last_fenced_seqno = 0;
d9e86c0e
CW
2710 }
2711
86d5bc37 2712 obj->fenced_gpu_access = false;
d9e86c0e
CW
2713 return 0;
2714}
2715
2716int
2717i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2718{
61050808 2719 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
d9e86c0e
CW
2720 int ret;
2721
d0a57789 2722 ret = i915_gem_object_wait_fence(obj);
d9e86c0e
CW
2723 if (ret)
2724 return ret;
2725
61050808
CW
2726 if (obj->fence_reg == I915_FENCE_REG_NONE)
2727 return 0;
d9e86c0e 2728
61050808
CW
2729 i915_gem_object_update_fence(obj,
2730 &dev_priv->fence_regs[obj->fence_reg],
2731 false);
2732 i915_gem_object_fence_lost(obj);
d9e86c0e
CW
2733
2734 return 0;
2735}
2736
2737static struct drm_i915_fence_reg *
a360bb1a 2738i915_find_fence_reg(struct drm_device *dev)
ae3db24a 2739{
ae3db24a 2740 struct drm_i915_private *dev_priv = dev->dev_private;
8fe301ad 2741 struct drm_i915_fence_reg *reg, *avail;
d9e86c0e 2742 int i;
ae3db24a
DV
2743
2744 /* First try to find a free reg */
d9e86c0e 2745 avail = NULL;
ae3db24a
DV
2746 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2747 reg = &dev_priv->fence_regs[i];
2748 if (!reg->obj)
d9e86c0e 2749 return reg;
ae3db24a 2750
1690e1eb 2751 if (!reg->pin_count)
d9e86c0e 2752 avail = reg;
ae3db24a
DV
2753 }
2754
d9e86c0e
CW
2755 if (avail == NULL)
2756 return NULL;
ae3db24a
DV
2757
2758 /* None available, try to steal one or wait for a user to finish */
d9e86c0e 2759 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
1690e1eb 2760 if (reg->pin_count)
ae3db24a
DV
2761 continue;
2762
8fe301ad 2763 return reg;
ae3db24a
DV
2764 }
2765
8fe301ad 2766 return NULL;
ae3db24a
DV
2767}
2768
de151cf6 2769/**
9a5a53b3 2770 * i915_gem_object_get_fence - set up fencing for an object
de151cf6
JB
2771 * @obj: object to map through a fence reg
2772 *
2773 * When mapping objects through the GTT, userspace wants to be able to write
2774 * to them without having to worry about swizzling if the object is tiled.
de151cf6
JB
2775 * This function walks the fence regs looking for a free one for @obj,
2776 * stealing one if it can't find any.
2777 *
2778 * It then sets up the reg based on the object's properties: address, pitch
2779 * and tiling format.
9a5a53b3
CW
2780 *
2781 * For an untiled surface, this removes any existing fence.
de151cf6 2782 */
8c4b8c3f 2783int
06d98131 2784i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
de151cf6 2785{
05394f39 2786 struct drm_device *dev = obj->base.dev;
79e53945 2787 struct drm_i915_private *dev_priv = dev->dev_private;
14415745 2788 bool enable = obj->tiling_mode != I915_TILING_NONE;
d9e86c0e 2789 struct drm_i915_fence_reg *reg;
ae3db24a 2790 int ret;
de151cf6 2791
14415745
CW
2792 /* Have we updated the tiling parameters upon the object and so
2793 * will need to serialise the write to the associated fence register?
2794 */
5d82e3e6 2795 if (obj->fence_dirty) {
d0a57789 2796 ret = i915_gem_object_wait_fence(obj);
14415745
CW
2797 if (ret)
2798 return ret;
2799 }
9a5a53b3 2800
d9e86c0e 2801 /* Just update our place in the LRU if our fence is getting reused. */
05394f39
CW
2802 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2803 reg = &dev_priv->fence_regs[obj->fence_reg];
5d82e3e6 2804 if (!obj->fence_dirty) {
14415745
CW
2805 list_move_tail(&reg->lru_list,
2806 &dev_priv->mm.fence_list);
2807 return 0;
2808 }
2809 } else if (enable) {
2810 reg = i915_find_fence_reg(dev);
2811 if (reg == NULL)
2812 return -EDEADLK;
d9e86c0e 2813
14415745
CW
2814 if (reg->obj) {
2815 struct drm_i915_gem_object *old = reg->obj;
2816
d0a57789 2817 ret = i915_gem_object_wait_fence(old);
29c5a587
CW
2818 if (ret)
2819 return ret;
2820
14415745 2821 i915_gem_object_fence_lost(old);
29c5a587 2822 }
14415745 2823 } else
a09ba7fa 2824 return 0;
a09ba7fa 2825
14415745 2826 i915_gem_object_update_fence(obj, reg, enable);
5d82e3e6 2827 obj->fence_dirty = false;
14415745 2828
9ce079e4 2829 return 0;
de151cf6
JB
2830}
2831
42d6ab48
CW
2832static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2833 struct drm_mm_node *gtt_space,
2834 unsigned long cache_level)
2835{
2836 struct drm_mm_node *other;
2837
2838 /* On non-LLC machines we have to be careful when putting differing
2839 * types of snoopable memory together to avoid the prefetcher
4239ca77 2840 * crossing memory domains and dying.
42d6ab48
CW
2841 */
2842 if (HAS_LLC(dev))
2843 return true;
2844
2845 if (gtt_space == NULL)
2846 return true;
2847
2848 if (list_empty(&gtt_space->node_list))
2849 return true;
2850
2851 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2852 if (other->allocated && !other->hole_follows && other->color != cache_level)
2853 return false;
2854
2855 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2856 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2857 return false;
2858
2859 return true;
2860}
2861
2862static void i915_gem_verify_gtt(struct drm_device *dev)
2863{
2864#if WATCH_GTT
2865 struct drm_i915_private *dev_priv = dev->dev_private;
2866 struct drm_i915_gem_object *obj;
2867 int err = 0;
2868
2869 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
2870 if (obj->gtt_space == NULL) {
2871 printk(KERN_ERR "object found on GTT list with no space reserved\n");
2872 err++;
2873 continue;
2874 }
2875
2876 if (obj->cache_level != obj->gtt_space->color) {
2877 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
2878 obj->gtt_space->start,
2879 obj->gtt_space->start + obj->gtt_space->size,
2880 obj->cache_level,
2881 obj->gtt_space->color);
2882 err++;
2883 continue;
2884 }
2885
2886 if (!i915_gem_valid_gtt_space(dev,
2887 obj->gtt_space,
2888 obj->cache_level)) {
2889 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
2890 obj->gtt_space->start,
2891 obj->gtt_space->start + obj->gtt_space->size,
2892 obj->cache_level);
2893 err++;
2894 continue;
2895 }
2896 }
2897
2898 WARN_ON(err);
2899#endif
2900}
2901
673a394b
EA
2902/**
2903 * Finds free space in the GTT aperture and binds the object there.
2904 */
2905static int
05394f39 2906i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
920afa77 2907 unsigned alignment,
86a1ee26
CW
2908 bool map_and_fenceable,
2909 bool nonblocking)
673a394b 2910{
05394f39 2911 struct drm_device *dev = obj->base.dev;
673a394b 2912 drm_i915_private_t *dev_priv = dev->dev_private;
dc9dd7a2 2913 struct drm_mm_node *node;
5e783301 2914 u32 size, fence_size, fence_alignment, unfenced_alignment;
75e9e915 2915 bool mappable, fenceable;
07f73f69 2916 int ret;
673a394b 2917
e28f8711
CW
2918 fence_size = i915_gem_get_gtt_size(dev,
2919 obj->base.size,
2920 obj->tiling_mode);
2921 fence_alignment = i915_gem_get_gtt_alignment(dev,
2922 obj->base.size,
d865110c 2923 obj->tiling_mode, true);
e28f8711 2924 unfenced_alignment =
d865110c 2925 i915_gem_get_gtt_alignment(dev,
e28f8711 2926 obj->base.size,
d865110c 2927 obj->tiling_mode, false);
a00b10c3 2928
673a394b 2929 if (alignment == 0)
5e783301
DV
2930 alignment = map_and_fenceable ? fence_alignment :
2931 unfenced_alignment;
75e9e915 2932 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
673a394b
EA
2933 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2934 return -EINVAL;
2935 }
2936
05394f39 2937 size = map_and_fenceable ? fence_size : obj->base.size;
a00b10c3 2938
654fc607
CW
2939 /* If the object is bigger than the entire aperture, reject it early
2940 * before evicting everything in a vain attempt to find space.
2941 */
05394f39 2942 if (obj->base.size >
5d4545ae 2943 (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
654fc607
CW
2944 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2945 return -E2BIG;
2946 }
2947
37e680a1 2948 ret = i915_gem_object_get_pages(obj);
6c085a72
CW
2949 if (ret)
2950 return ret;
2951
fbdda6fb
CW
2952 i915_gem_object_pin_pages(obj);
2953
dc9dd7a2
CW
2954 node = kzalloc(sizeof(*node), GFP_KERNEL);
2955 if (node == NULL) {
2956 i915_gem_object_unpin_pages(obj);
2957 return -ENOMEM;
2958 }
2959
673a394b 2960 search_free:
75e9e915 2961 if (map_and_fenceable)
dc9dd7a2
CW
2962 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
2963 size, alignment, obj->cache_level,
5d4545ae 2964 0, dev_priv->gtt.mappable_end);
920afa77 2965 else
dc9dd7a2
CW
2966 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
2967 size, alignment, obj->cache_level);
2968 if (ret) {
75e9e915 2969 ret = i915_gem_evict_something(dev, size, alignment,
42d6ab48 2970 obj->cache_level,
86a1ee26
CW
2971 map_and_fenceable,
2972 nonblocking);
dc9dd7a2
CW
2973 if (ret == 0)
2974 goto search_free;
9731129c 2975
dc9dd7a2
CW
2976 i915_gem_object_unpin_pages(obj);
2977 kfree(node);
2978 return ret;
673a394b 2979 }
dc9dd7a2 2980 if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
fbdda6fb 2981 i915_gem_object_unpin_pages(obj);
dc9dd7a2 2982 drm_mm_put_block(node);
42d6ab48 2983 return -EINVAL;
673a394b
EA
2984 }
2985
74163907 2986 ret = i915_gem_gtt_prepare_object(obj);
7c2e6fdf 2987 if (ret) {
fbdda6fb 2988 i915_gem_object_unpin_pages(obj);
dc9dd7a2 2989 drm_mm_put_block(node);
6c085a72 2990 return ret;
673a394b 2991 }
673a394b 2992
6c085a72 2993 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
05394f39 2994 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
bf1a1092 2995
dc9dd7a2
CW
2996 obj->gtt_space = node;
2997 obj->gtt_offset = node->start;
1c5d22f7 2998
75e9e915 2999 fenceable =
dc9dd7a2
CW
3000 node->size == fence_size &&
3001 (node->start & (fence_alignment - 1)) == 0;
a00b10c3 3002
75e9e915 3003 mappable =
5d4545ae 3004 obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
a00b10c3 3005
05394f39 3006 obj->map_and_fenceable = mappable && fenceable;
75e9e915 3007
fbdda6fb 3008 i915_gem_object_unpin_pages(obj);
db53a302 3009 trace_i915_gem_object_bind(obj, map_and_fenceable);
42d6ab48 3010 i915_gem_verify_gtt(dev);
673a394b
EA
3011 return 0;
3012}
3013
3014void
05394f39 3015i915_gem_clflush_object(struct drm_i915_gem_object *obj)
673a394b 3016{
673a394b
EA
3017 /* If we don't have a page list set up, then we're not pinned
3018 * to GPU, and we can ignore the cache flush because it'll happen
3019 * again at bind time.
3020 */
05394f39 3021 if (obj->pages == NULL)
673a394b
EA
3022 return;
3023
769ce464
ID
3024 /*
3025 * Stolen memory is always coherent with the GPU as it is explicitly
3026 * marked as wc by the system, or the system is cache-coherent.
3027 */
3028 if (obj->stolen)
3029 return;
3030
9c23f7fc
CW
3031 /* If the GPU is snooping the contents of the CPU cache,
3032 * we do not need to manually clear the CPU cache lines. However,
3033 * the caches are only snooped when the render cache is
3034 * flushed/invalidated. As we always have to emit invalidations
3035 * and flushes when moving into and out of the RENDER domain, correct
3036 * snooping behaviour occurs naturally as the result of our domain
3037 * tracking.
3038 */
3039 if (obj->cache_level != I915_CACHE_NONE)
3040 return;
3041
1c5d22f7 3042 trace_i915_gem_object_clflush(obj);
cfa16a0d 3043
9da3da66 3044 drm_clflush_sg(obj->pages);
e47c68e9
EA
3045}
3046
3047/** Flushes the GTT write domain for the object if it's dirty. */
3048static void
05394f39 3049i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 3050{
1c5d22f7
CW
3051 uint32_t old_write_domain;
3052
05394f39 3053 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
e47c68e9
EA
3054 return;
3055
63256ec5 3056 /* No actual flushing is required for the GTT write domain. Writes
e47c68e9
EA
3057 * to it immediately go to main memory as far as we know, so there's
3058 * no chipset flush. It also doesn't land in render cache.
63256ec5
CW
3059 *
3060 * However, we do have to enforce the order so that all writes through
3061 * the GTT land before any writes to the device, such as updates to
3062 * the GATT itself.
e47c68e9 3063 */
63256ec5
CW
3064 wmb();
3065
05394f39
CW
3066 old_write_domain = obj->base.write_domain;
3067 obj->base.write_domain = 0;
1c5d22f7
CW
3068
3069 trace_i915_gem_object_change_domain(obj,
05394f39 3070 obj->base.read_domains,
1c5d22f7 3071 old_write_domain);
e47c68e9
EA
3072}
3073
3074/** Flushes the CPU write domain for the object if it's dirty. */
3075static void
05394f39 3076i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 3077{
1c5d22f7 3078 uint32_t old_write_domain;
e47c68e9 3079
05394f39 3080 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
e47c68e9
EA
3081 return;
3082
3083 i915_gem_clflush_object(obj);
e76e9aeb 3084 i915_gem_chipset_flush(obj->base.dev);
05394f39
CW
3085 old_write_domain = obj->base.write_domain;
3086 obj->base.write_domain = 0;
1c5d22f7
CW
3087
3088 trace_i915_gem_object_change_domain(obj,
05394f39 3089 obj->base.read_domains,
1c5d22f7 3090 old_write_domain);
e47c68e9
EA
3091}
3092
2ef7eeaa
EA
3093/**
3094 * Moves a single object to the GTT read, and possibly write domain.
3095 *
3096 * This function returns when the move is complete, including waiting on
3097 * flushes to occur.
3098 */
79e53945 3099int
2021746e 3100i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2ef7eeaa 3101{
8325a09d 3102 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1c5d22f7 3103 uint32_t old_write_domain, old_read_domains;
e47c68e9 3104 int ret;
2ef7eeaa 3105
02354392 3106 /* Not valid to be called on unbound objects. */
05394f39 3107 if (obj->gtt_space == NULL)
02354392
EA
3108 return -EINVAL;
3109
8d7e3de1
CW
3110 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3111 return 0;
3112
0201f1ec 3113 ret = i915_gem_object_wait_rendering(obj, !write);
88241785
CW
3114 if (ret)
3115 return ret;
3116
7213342d 3117 i915_gem_object_flush_cpu_write_domain(obj);
1c5d22f7 3118
d0a57789
CW
3119 /* Serialise direct access to this object with the barriers for
3120 * coherent writes from the GPU, by effectively invalidating the
3121 * GTT domain upon first access.
3122 */
3123 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3124 mb();
3125
05394f39
CW
3126 old_write_domain = obj->base.write_domain;
3127 old_read_domains = obj->base.read_domains;
1c5d22f7 3128
e47c68e9
EA
3129 /* It should now be out of any other write domains, and we can update
3130 * the domain values for our changes.
3131 */
05394f39
CW
3132 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3133 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
e47c68e9 3134 if (write) {
05394f39
CW
3135 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3136 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3137 obj->dirty = 1;
2ef7eeaa
EA
3138 }
3139
1c5d22f7
CW
3140 trace_i915_gem_object_change_domain(obj,
3141 old_read_domains,
3142 old_write_domain);
3143
8325a09d
CW
3144 /* And bump the LRU for this access */
3145 if (i915_gem_object_is_inactive(obj))
3146 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3147
e47c68e9
EA
3148 return 0;
3149}
3150
e4ffd173
CW
3151int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3152 enum i915_cache_level cache_level)
3153{
7bddb01f
DV
3154 struct drm_device *dev = obj->base.dev;
3155 drm_i915_private_t *dev_priv = dev->dev_private;
e4ffd173
CW
3156 int ret;
3157
3158 if (obj->cache_level == cache_level)
3159 return 0;
3160
3161 if (obj->pin_count) {
3162 DRM_DEBUG("can not change the cache level of pinned objects\n");
3163 return -EBUSY;
3164 }
3165
42d6ab48
CW
3166 if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
3167 ret = i915_gem_object_unbind(obj);
3168 if (ret)
3169 return ret;
3170 }
3171
e4ffd173
CW
3172 if (obj->gtt_space) {
3173 ret = i915_gem_object_finish_gpu(obj);
3174 if (ret)
3175 return ret;
3176
3177 i915_gem_object_finish_gtt(obj);
3178
3179 /* Before SandyBridge, you could not use tiling or fence
3180 * registers with snooped memory, so relinquish any fences
3181 * currently pointing to our region in the aperture.
3182 */
42d6ab48 3183 if (INTEL_INFO(dev)->gen < 6) {
e4ffd173
CW
3184 ret = i915_gem_object_put_fence(obj);
3185 if (ret)
3186 return ret;
3187 }
3188
74898d7e
DV
3189 if (obj->has_global_gtt_mapping)
3190 i915_gem_gtt_bind_object(obj, cache_level);
7bddb01f
DV
3191 if (obj->has_aliasing_ppgtt_mapping)
3192 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3193 obj, cache_level);
42d6ab48
CW
3194
3195 obj->gtt_space->color = cache_level;
e4ffd173
CW
3196 }
3197
3198 if (cache_level == I915_CACHE_NONE) {
3199 u32 old_read_domains, old_write_domain;
3200
3201 /* If we're coming from LLC cached, then we haven't
3202 * actually been tracking whether the data is in the
3203 * CPU cache or not, since we only allow one bit set
3204 * in obj->write_domain and have been skipping the clflushes.
3205 * Just set it to the CPU cache for now.
3206 */
3207 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3208 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3209
3210 old_read_domains = obj->base.read_domains;
3211 old_write_domain = obj->base.write_domain;
3212
3213 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3214 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3215
3216 trace_i915_gem_object_change_domain(obj,
3217 old_read_domains,
3218 old_write_domain);
3219 }
3220
3221 obj->cache_level = cache_level;
42d6ab48 3222 i915_gem_verify_gtt(dev);
e4ffd173
CW
3223 return 0;
3224}
3225
199adf40
BW
3226int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3227 struct drm_file *file)
e6994aee 3228{
199adf40 3229 struct drm_i915_gem_caching *args = data;
e6994aee
CW
3230 struct drm_i915_gem_object *obj;
3231 int ret;
3232
3233 ret = i915_mutex_lock_interruptible(dev);
3234 if (ret)
3235 return ret;
3236
3237 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3238 if (&obj->base == NULL) {
3239 ret = -ENOENT;
3240 goto unlock;
3241 }
3242
199adf40 3243 args->caching = obj->cache_level != I915_CACHE_NONE;
e6994aee
CW
3244
3245 drm_gem_object_unreference(&obj->base);
3246unlock:
3247 mutex_unlock(&dev->struct_mutex);
3248 return ret;
3249}
3250
199adf40
BW
3251int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3252 struct drm_file *file)
e6994aee 3253{
199adf40 3254 struct drm_i915_gem_caching *args = data;
e6994aee
CW
3255 struct drm_i915_gem_object *obj;
3256 enum i915_cache_level level;
3257 int ret;
3258
199adf40
BW
3259 switch (args->caching) {
3260 case I915_CACHING_NONE:
e6994aee
CW
3261 level = I915_CACHE_NONE;
3262 break;
199adf40 3263 case I915_CACHING_CACHED:
e6994aee
CW
3264 level = I915_CACHE_LLC;
3265 break;
3266 default:
3267 return -EINVAL;
3268 }
3269
3bc2913e
BW
3270 ret = i915_mutex_lock_interruptible(dev);
3271 if (ret)
3272 return ret;
3273
e6994aee
CW
3274 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3275 if (&obj->base == NULL) {
3276 ret = -ENOENT;
3277 goto unlock;
3278 }
3279
3280 ret = i915_gem_object_set_cache_level(obj, level);
3281
3282 drm_gem_object_unreference(&obj->base);
3283unlock:
3284 mutex_unlock(&dev->struct_mutex);
3285 return ret;
3286}
3287
b9241ea3 3288/*
2da3b9b9
CW
3289 * Prepare buffer for display plane (scanout, cursors, etc).
3290 * Can be called from an uninterruptible phase (modesetting) and allows
3291 * any flushes to be pipelined (for pageflips).
b9241ea3
ZW
3292 */
3293int
2da3b9b9
CW
3294i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3295 u32 alignment,
919926ae 3296 struct intel_ring_buffer *pipelined)
b9241ea3 3297{
2da3b9b9 3298 u32 old_read_domains, old_write_domain;
b9241ea3
ZW
3299 int ret;
3300
0be73284 3301 if (pipelined != obj->ring) {
2911a35b
BW
3302 ret = i915_gem_object_sync(obj, pipelined);
3303 if (ret)
b9241ea3
ZW
3304 return ret;
3305 }
3306
a7ef0640
EA
3307 /* The display engine is not coherent with the LLC cache on gen6. As
3308 * a result, we make sure that the pinning that is about to occur is
3309 * done with uncached PTEs. This is lowest common denominator for all
3310 * chipsets.
3311 *
3312 * However for gen6+, we could do better by using the GFDT bit instead
3313 * of uncaching, which would allow us to flush all the LLC-cached data
3314 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3315 */
3316 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3317 if (ret)
3318 return ret;
3319
2da3b9b9
CW
3320 /* As the user may map the buffer once pinned in the display plane
3321 * (e.g. libkms for the bootup splash), we have to ensure that we
3322 * always use map_and_fenceable for all scanout buffers.
3323 */
86a1ee26 3324 ret = i915_gem_object_pin(obj, alignment, true, false);
2da3b9b9
CW
3325 if (ret)
3326 return ret;
3327
b118c1e3
CW
3328 i915_gem_object_flush_cpu_write_domain(obj);
3329
2da3b9b9 3330 old_write_domain = obj->base.write_domain;
05394f39 3331 old_read_domains = obj->base.read_domains;
2da3b9b9
CW
3332
3333 /* It should now be out of any other write domains, and we can update
3334 * the domain values for our changes.
3335 */
e5f1d962 3336 obj->base.write_domain = 0;
05394f39 3337 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
b9241ea3
ZW
3338
3339 trace_i915_gem_object_change_domain(obj,
3340 old_read_domains,
2da3b9b9 3341 old_write_domain);
b9241ea3
ZW
3342
3343 return 0;
3344}
3345
85345517 3346int
a8198eea 3347i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
85345517 3348{
88241785
CW
3349 int ret;
3350
a8198eea 3351 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
85345517
CW
3352 return 0;
3353
0201f1ec 3354 ret = i915_gem_object_wait_rendering(obj, false);
c501ae7f
CW
3355 if (ret)
3356 return ret;
3357
a8198eea
CW
3358 /* Ensure that we invalidate the GPU's caches and TLBs. */
3359 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
c501ae7f 3360 return 0;
85345517
CW
3361}
3362
e47c68e9
EA
3363/**
3364 * Moves a single object to the CPU read, and possibly write domain.
3365 *
3366 * This function returns when the move is complete, including waiting on
3367 * flushes to occur.
3368 */
dabdfe02 3369int
919926ae 3370i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
e47c68e9 3371{
1c5d22f7 3372 uint32_t old_write_domain, old_read_domains;
e47c68e9
EA
3373 int ret;
3374
8d7e3de1
CW
3375 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3376 return 0;
3377
0201f1ec 3378 ret = i915_gem_object_wait_rendering(obj, !write);
88241785
CW
3379 if (ret)
3380 return ret;
3381
e47c68e9 3382 i915_gem_object_flush_gtt_write_domain(obj);
2ef7eeaa 3383
05394f39
CW
3384 old_write_domain = obj->base.write_domain;
3385 old_read_domains = obj->base.read_domains;
1c5d22f7 3386
e47c68e9 3387 /* Flush the CPU cache if it's still invalid. */
05394f39 3388 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2ef7eeaa 3389 i915_gem_clflush_object(obj);
2ef7eeaa 3390
05394f39 3391 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2ef7eeaa
EA
3392 }
3393
3394 /* It should now be out of any other write domains, and we can update
3395 * the domain values for our changes.
3396 */
05394f39 3397 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
e47c68e9
EA
3398
3399 /* If we're writing through the CPU, then the GPU read domains will
3400 * need to be invalidated at next use.
3401 */
3402 if (write) {
05394f39
CW
3403 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3404 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
e47c68e9 3405 }
2ef7eeaa 3406
1c5d22f7
CW
3407 trace_i915_gem_object_change_domain(obj,
3408 old_read_domains,
3409 old_write_domain);
3410
2ef7eeaa
EA
3411 return 0;
3412}
3413
673a394b
EA
3414/* Throttle our rendering by waiting until the ring has completed our requests
3415 * emitted over 20 msec ago.
3416 *
b962442e
EA
3417 * Note that if we were to use the current jiffies each time around the loop,
3418 * we wouldn't escape the function with any frames outstanding if the time to
3419 * render a frame was over 20ms.
3420 *
673a394b
EA
3421 * This should get us reasonable parallelism between CPU and GPU but also
3422 * relatively low latency when blocking on a particular request to finish.
3423 */
40a5f0de 3424static int
f787a5f5 3425i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
40a5f0de 3426{
f787a5f5
CW
3427 struct drm_i915_private *dev_priv = dev->dev_private;
3428 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e 3429 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
f787a5f5
CW
3430 struct drm_i915_gem_request *request;
3431 struct intel_ring_buffer *ring = NULL;
f69061be 3432 unsigned reset_counter;
f787a5f5
CW
3433 u32 seqno = 0;
3434 int ret;
93533c29 3435
308887aa
DV
3436 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3437 if (ret)
3438 return ret;
3439
3440 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3441 if (ret)
3442 return ret;
e110e8d6 3443
1c25595f 3444 spin_lock(&file_priv->mm.lock);
f787a5f5 3445 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
b962442e
EA
3446 if (time_after_eq(request->emitted_jiffies, recent_enough))
3447 break;
40a5f0de 3448
f787a5f5
CW
3449 ring = request->ring;
3450 seqno = request->seqno;
b962442e 3451 }
f69061be 3452 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1c25595f 3453 spin_unlock(&file_priv->mm.lock);
40a5f0de 3454
f787a5f5
CW
3455 if (seqno == 0)
3456 return 0;
2bc43b5c 3457
f69061be 3458 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
f787a5f5
CW
3459 if (ret == 0)
3460 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
40a5f0de
EA
3461
3462 return ret;
3463}
3464
673a394b 3465int
05394f39
CW
3466i915_gem_object_pin(struct drm_i915_gem_object *obj,
3467 uint32_t alignment,
86a1ee26
CW
3468 bool map_and_fenceable,
3469 bool nonblocking)
673a394b 3470{
673a394b
EA
3471 int ret;
3472
7e81a42e
CW
3473 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3474 return -EBUSY;
ac0c6b5a 3475
05394f39
CW
3476 if (obj->gtt_space != NULL) {
3477 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3478 (map_and_fenceable && !obj->map_and_fenceable)) {
3479 WARN(obj->pin_count,
ae7d49d8 3480 "bo is already pinned with incorrect alignment:"
75e9e915
DV
3481 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3482 " obj->map_and_fenceable=%d\n",
05394f39 3483 obj->gtt_offset, alignment,
75e9e915 3484 map_and_fenceable,
05394f39 3485 obj->map_and_fenceable);
ac0c6b5a
CW
3486 ret = i915_gem_object_unbind(obj);
3487 if (ret)
3488 return ret;
3489 }
3490 }
3491
05394f39 3492 if (obj->gtt_space == NULL) {
8742267a
CW
3493 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3494
a00b10c3 3495 ret = i915_gem_object_bind_to_gtt(obj, alignment,
86a1ee26
CW
3496 map_and_fenceable,
3497 nonblocking);
9731129c 3498 if (ret)
673a394b 3499 return ret;
8742267a
CW
3500
3501 if (!dev_priv->mm.aliasing_ppgtt)
3502 i915_gem_gtt_bind_object(obj, obj->cache_level);
22c344e9 3503 }
76446cac 3504
74898d7e
DV
3505 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3506 i915_gem_gtt_bind_object(obj, obj->cache_level);
3507
1b50247a 3508 obj->pin_count++;
6299f992 3509 obj->pin_mappable |= map_and_fenceable;
673a394b
EA
3510
3511 return 0;
3512}
3513
3514void
05394f39 3515i915_gem_object_unpin(struct drm_i915_gem_object *obj)
673a394b 3516{
05394f39
CW
3517 BUG_ON(obj->pin_count == 0);
3518 BUG_ON(obj->gtt_space == NULL);
673a394b 3519
1b50247a 3520 if (--obj->pin_count == 0)
6299f992 3521 obj->pin_mappable = false;
673a394b
EA
3522}
3523
3524int
3525i915_gem_pin_ioctl(struct drm_device *dev, void *data,
05394f39 3526 struct drm_file *file)
673a394b
EA
3527{
3528 struct drm_i915_gem_pin *args = data;
05394f39 3529 struct drm_i915_gem_object *obj;
673a394b
EA
3530 int ret;
3531
1d7cfea1
CW
3532 ret = i915_mutex_lock_interruptible(dev);
3533 if (ret)
3534 return ret;
673a394b 3535
05394f39 3536 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3537 if (&obj->base == NULL) {
1d7cfea1
CW
3538 ret = -ENOENT;
3539 goto unlock;
673a394b 3540 }
673a394b 3541
05394f39 3542 if (obj->madv != I915_MADV_WILLNEED) {
bb6baf76 3543 DRM_ERROR("Attempting to pin a purgeable buffer\n");
1d7cfea1
CW
3544 ret = -EINVAL;
3545 goto out;
3ef94daa
CW
3546 }
3547
05394f39 3548 if (obj->pin_filp != NULL && obj->pin_filp != file) {
79e53945
JB
3549 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3550 args->handle);
1d7cfea1
CW
3551 ret = -EINVAL;
3552 goto out;
79e53945
JB
3553 }
3554
93be8788 3555 if (obj->user_pin_count == 0) {
86a1ee26 3556 ret = i915_gem_object_pin(obj, args->alignment, true, false);
1d7cfea1
CW
3557 if (ret)
3558 goto out;
673a394b
EA
3559 }
3560
93be8788
CW
3561 obj->user_pin_count++;
3562 obj->pin_filp = file;
3563
673a394b
EA
3564 /* XXX - flush the CPU caches for pinned objects
3565 * as the X server doesn't manage domains yet
3566 */
e47c68e9 3567 i915_gem_object_flush_cpu_write_domain(obj);
05394f39 3568 args->offset = obj->gtt_offset;
1d7cfea1 3569out:
05394f39 3570 drm_gem_object_unreference(&obj->base);
1d7cfea1 3571unlock:
673a394b 3572 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3573 return ret;
673a394b
EA
3574}
3575
3576int
3577i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
05394f39 3578 struct drm_file *file)
673a394b
EA
3579{
3580 struct drm_i915_gem_pin *args = data;
05394f39 3581 struct drm_i915_gem_object *obj;
76c1dec1 3582 int ret;
673a394b 3583
1d7cfea1
CW
3584 ret = i915_mutex_lock_interruptible(dev);
3585 if (ret)
3586 return ret;
673a394b 3587
05394f39 3588 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3589 if (&obj->base == NULL) {
1d7cfea1
CW
3590 ret = -ENOENT;
3591 goto unlock;
673a394b 3592 }
76c1dec1 3593
05394f39 3594 if (obj->pin_filp != file) {
79e53945
JB
3595 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3596 args->handle);
1d7cfea1
CW
3597 ret = -EINVAL;
3598 goto out;
79e53945 3599 }
05394f39
CW
3600 obj->user_pin_count--;
3601 if (obj->user_pin_count == 0) {
3602 obj->pin_filp = NULL;
79e53945
JB
3603 i915_gem_object_unpin(obj);
3604 }
673a394b 3605
1d7cfea1 3606out:
05394f39 3607 drm_gem_object_unreference(&obj->base);
1d7cfea1 3608unlock:
673a394b 3609 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3610 return ret;
673a394b
EA
3611}
3612
3613int
3614i915_gem_busy_ioctl(struct drm_device *dev, void *data,
05394f39 3615 struct drm_file *file)
673a394b
EA
3616{
3617 struct drm_i915_gem_busy *args = data;
05394f39 3618 struct drm_i915_gem_object *obj;
30dbf0c0
CW
3619 int ret;
3620
76c1dec1 3621 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 3622 if (ret)
76c1dec1 3623 return ret;
673a394b 3624
05394f39 3625 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3626 if (&obj->base == NULL) {
1d7cfea1
CW
3627 ret = -ENOENT;
3628 goto unlock;
673a394b 3629 }
d1b851fc 3630
0be555b6
CW
3631 /* Count all active objects as busy, even if they are currently not used
3632 * by the gpu. Users of this interface expect objects to eventually
3633 * become non-busy without any further actions, therefore emit any
3634 * necessary flushes here.
c4de0a5d 3635 */
30dfebf3 3636 ret = i915_gem_object_flush_active(obj);
0be555b6 3637
30dfebf3 3638 args->busy = obj->active;
e9808edd
CW
3639 if (obj->ring) {
3640 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3641 args->busy |= intel_ring_flag(obj->ring) << 16;
3642 }
673a394b 3643
05394f39 3644 drm_gem_object_unreference(&obj->base);
1d7cfea1 3645unlock:
673a394b 3646 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3647 return ret;
673a394b
EA
3648}
3649
3650int
3651i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3652 struct drm_file *file_priv)
3653{
0206e353 3654 return i915_gem_ring_throttle(dev, file_priv);
673a394b
EA
3655}
3656
3ef94daa
CW
3657int
3658i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3659 struct drm_file *file_priv)
3660{
3661 struct drm_i915_gem_madvise *args = data;
05394f39 3662 struct drm_i915_gem_object *obj;
76c1dec1 3663 int ret;
3ef94daa
CW
3664
3665 switch (args->madv) {
3666 case I915_MADV_DONTNEED:
3667 case I915_MADV_WILLNEED:
3668 break;
3669 default:
3670 return -EINVAL;
3671 }
3672
1d7cfea1
CW
3673 ret = i915_mutex_lock_interruptible(dev);
3674 if (ret)
3675 return ret;
3676
05394f39 3677 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
c8725226 3678 if (&obj->base == NULL) {
1d7cfea1
CW
3679 ret = -ENOENT;
3680 goto unlock;
3ef94daa 3681 }
3ef94daa 3682
05394f39 3683 if (obj->pin_count) {
1d7cfea1
CW
3684 ret = -EINVAL;
3685 goto out;
3ef94daa
CW
3686 }
3687
05394f39
CW
3688 if (obj->madv != __I915_MADV_PURGED)
3689 obj->madv = args->madv;
3ef94daa 3690
6c085a72
CW
3691 /* if the object is no longer attached, discard its backing storage */
3692 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
2d7ef395
CW
3693 i915_gem_object_truncate(obj);
3694
05394f39 3695 args->retained = obj->madv != __I915_MADV_PURGED;
bb6baf76 3696
1d7cfea1 3697out:
05394f39 3698 drm_gem_object_unreference(&obj->base);
1d7cfea1 3699unlock:
3ef94daa 3700 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3701 return ret;
3ef94daa
CW
3702}
3703
37e680a1
CW
3704void i915_gem_object_init(struct drm_i915_gem_object *obj,
3705 const struct drm_i915_gem_object_ops *ops)
0327d6ba 3706{
0327d6ba
CW
3707 INIT_LIST_HEAD(&obj->mm_list);
3708 INIT_LIST_HEAD(&obj->gtt_list);
3709 INIT_LIST_HEAD(&obj->ring_list);
3710 INIT_LIST_HEAD(&obj->exec_list);
3711
37e680a1
CW
3712 obj->ops = ops;
3713
0327d6ba
CW
3714 obj->fence_reg = I915_FENCE_REG_NONE;
3715 obj->madv = I915_MADV_WILLNEED;
3716 /* Avoid an unnecessary call to unbind on the first bind. */
3717 obj->map_and_fenceable = true;
3718
3719 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3720}
3721
37e680a1
CW
3722static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3723 .get_pages = i915_gem_object_get_pages_gtt,
3724 .put_pages = i915_gem_object_put_pages_gtt,
3725};
3726
05394f39
CW
3727struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3728 size_t size)
ac52bc56 3729{
c397b908 3730 struct drm_i915_gem_object *obj;
5949eac4 3731 struct address_space *mapping;
1a240d4d 3732 gfp_t mask;
ac52bc56 3733
42dcedd4 3734 obj = i915_gem_object_alloc(dev);
c397b908
DV
3735 if (obj == NULL)
3736 return NULL;
673a394b 3737
c397b908 3738 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
42dcedd4 3739 i915_gem_object_free(obj);
c397b908
DV
3740 return NULL;
3741 }
673a394b 3742
bed1ea95
CW
3743 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3744 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3745 /* 965gm cannot relocate objects above 4GiB. */
3746 mask &= ~__GFP_HIGHMEM;
3747 mask |= __GFP_DMA32;
3748 }
3749
5949eac4 3750 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
bed1ea95 3751 mapping_set_gfp_mask(mapping, mask);
5949eac4 3752
37e680a1 3753 i915_gem_object_init(obj, &i915_gem_object_ops);
73aa808f 3754
c397b908
DV
3755 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3756 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
673a394b 3757
3d29b842
ED
3758 if (HAS_LLC(dev)) {
3759 /* On some devices, we can have the GPU use the LLC (the CPU
a1871112
EA
3760 * cache) for about a 10% performance improvement
3761 * compared to uncached. Graphics requests other than
3762 * display scanout are coherent with the CPU in
3763 * accessing this cache. This means in this mode we
3764 * don't need to clflush on the CPU side, and on the
3765 * GPU side we only need to flush internal caches to
3766 * get data visible to the CPU.
3767 *
3768 * However, we maintain the display planes as UC, and so
3769 * need to rebind when first used as such.
3770 */
3771 obj->cache_level = I915_CACHE_LLC;
3772 } else
3773 obj->cache_level = I915_CACHE_NONE;
3774
05394f39 3775 return obj;
c397b908
DV
3776}
3777
3778int i915_gem_init_object(struct drm_gem_object *obj)
3779{
3780 BUG();
de151cf6 3781
673a394b
EA
3782 return 0;
3783}
3784
1488fc08 3785void i915_gem_free_object(struct drm_gem_object *gem_obj)
673a394b 3786{
1488fc08 3787 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
05394f39 3788 struct drm_device *dev = obj->base.dev;
be72615b 3789 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 3790
26e12f89
CW
3791 trace_i915_gem_object_destroy(obj);
3792
1488fc08
CW
3793 if (obj->phys_obj)
3794 i915_gem_detach_phys_object(dev, obj);
3795
3796 obj->pin_count = 0;
3797 if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3798 bool was_interruptible;
3799
3800 was_interruptible = dev_priv->mm.interruptible;
3801 dev_priv->mm.interruptible = false;
3802
3803 WARN_ON(i915_gem_object_unbind(obj));
3804
3805 dev_priv->mm.interruptible = was_interruptible;
3806 }
3807
a5570178 3808 obj->pages_pin_count = 0;
37e680a1 3809 i915_gem_object_put_pages(obj);
d8cb5086 3810 i915_gem_object_free_mmap_offset(obj);
0104fdbb 3811 i915_gem_object_release_stolen(obj);
de151cf6 3812
9da3da66
CW
3813 BUG_ON(obj->pages);
3814
2f745ad3
CW
3815 if (obj->base.import_attach)
3816 drm_prime_gem_destroy(&obj->base, NULL);
de151cf6 3817
05394f39
CW
3818 drm_gem_object_release(&obj->base);
3819 i915_gem_info_remove_obj(dev_priv, obj->base.size);
c397b908 3820
05394f39 3821 kfree(obj->bit_17);
42dcedd4 3822 i915_gem_object_free(obj);
673a394b
EA
3823}
3824
29105ccc
CW
3825int
3826i915_gem_idle(struct drm_device *dev)
3827{
3828 drm_i915_private_t *dev_priv = dev->dev_private;
3829 int ret;
28dfe52a 3830
29105ccc 3831 mutex_lock(&dev->struct_mutex);
1c5d22f7 3832
87acb0a5 3833 if (dev_priv->mm.suspended) {
29105ccc
CW
3834 mutex_unlock(&dev->struct_mutex);
3835 return 0;
28dfe52a
EA
3836 }
3837
b2da9fe5 3838 ret = i915_gpu_idle(dev);
6dbe2772
KP
3839 if (ret) {
3840 mutex_unlock(&dev->struct_mutex);
673a394b 3841 return ret;
6dbe2772 3842 }
b2da9fe5 3843 i915_gem_retire_requests(dev);
673a394b 3844
29105ccc 3845 /* Under UMS, be paranoid and evict. */
a39d7efc 3846 if (!drm_core_check_feature(dev, DRIVER_MODESET))
6c085a72 3847 i915_gem_evict_everything(dev);
29105ccc 3848
312817a3
CW
3849 i915_gem_reset_fences(dev);
3850
29105ccc
CW
3851 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3852 * We need to replace this with a semaphore, or something.
3853 * And not confound mm.suspended!
3854 */
3855 dev_priv->mm.suspended = 1;
99584db3 3856 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
29105ccc
CW
3857
3858 i915_kernel_lost_context(dev);
6dbe2772 3859 i915_gem_cleanup_ringbuffer(dev);
29105ccc 3860
6dbe2772
KP
3861 mutex_unlock(&dev->struct_mutex);
3862
29105ccc
CW
3863 /* Cancel the retire work handler, which should be idle now. */
3864 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3865
673a394b
EA
3866 return 0;
3867}
3868
b9524a1e
BW
3869void i915_gem_l3_remap(struct drm_device *dev)
3870{
3871 drm_i915_private_t *dev_priv = dev->dev_private;
3872 u32 misccpctl;
3873 int i;
3874
eb32e458 3875 if (!HAS_L3_GPU_CACHE(dev))
b9524a1e
BW
3876 return;
3877
a4da4fa4 3878 if (!dev_priv->l3_parity.remap_info)
b9524a1e
BW
3879 return;
3880
3881 misccpctl = I915_READ(GEN7_MISCCPCTL);
3882 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
3883 POSTING_READ(GEN7_MISCCPCTL);
3884
3885 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
3886 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
a4da4fa4 3887 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
b9524a1e
BW
3888 DRM_DEBUG("0x%x was already programmed to %x\n",
3889 GEN7_L3LOG_BASE + i, remap);
a4da4fa4 3890 if (remap && !dev_priv->l3_parity.remap_info[i/4])
b9524a1e 3891 DRM_DEBUG_DRIVER("Clearing remapped register\n");
a4da4fa4 3892 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
b9524a1e
BW
3893 }
3894
3895 /* Make sure all the writes land before disabling dop clock gating */
3896 POSTING_READ(GEN7_L3LOG_BASE);
3897
3898 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
3899}
3900
f691e2f4
DV
3901void i915_gem_init_swizzling(struct drm_device *dev)
3902{
3903 drm_i915_private_t *dev_priv = dev->dev_private;
3904
11782b02 3905 if (INTEL_INFO(dev)->gen < 5 ||
f691e2f4
DV
3906 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3907 return;
3908
3909 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3910 DISP_TILE_SURFACE_SWIZZLING);
3911
11782b02
DV
3912 if (IS_GEN5(dev))
3913 return;
3914
f691e2f4
DV
3915 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3916 if (IS_GEN6(dev))
6b26c86d 3917 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
8782e26c 3918 else if (IS_GEN7(dev))
6b26c86d 3919 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
8782e26c
BW
3920 else
3921 BUG();
f691e2f4 3922}
e21af88d 3923
67b1b571
CW
3924static bool
3925intel_enable_blt(struct drm_device *dev)
3926{
3927 if (!HAS_BLT(dev))
3928 return false;
3929
3930 /* The blitter was dysfunctional on early prototypes */
3931 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
3932 DRM_INFO("BLT not supported on this pre-production hardware;"
3933 " graphics performance will be degraded.\n");
3934 return false;
3935 }
3936
3937 return true;
3938}
3939
4fc7c971 3940static int i915_gem_init_rings(struct drm_device *dev)
8187a2b7 3941{
4fc7c971 3942 struct drm_i915_private *dev_priv = dev->dev_private;
8187a2b7 3943 int ret;
68f95ba9 3944
5c1143bb 3945 ret = intel_init_render_ring_buffer(dev);
68f95ba9 3946 if (ret)
b6913e4b 3947 return ret;
68f95ba9
CW
3948
3949 if (HAS_BSD(dev)) {
5c1143bb 3950 ret = intel_init_bsd_ring_buffer(dev);
68f95ba9
CW
3951 if (ret)
3952 goto cleanup_render_ring;
d1b851fc 3953 }
68f95ba9 3954
67b1b571 3955 if (intel_enable_blt(dev)) {
549f7365
CW
3956 ret = intel_init_blt_ring_buffer(dev);
3957 if (ret)
3958 goto cleanup_bsd_ring;
3959 }
3960
99433931 3961 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4fc7c971
BW
3962 if (ret)
3963 goto cleanup_blt_ring;
3964
3965 return 0;
3966
3967cleanup_blt_ring:
3968 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
3969cleanup_bsd_ring:
3970 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3971cleanup_render_ring:
3972 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3973
3974 return ret;
3975}
3976
3977int
3978i915_gem_init_hw(struct drm_device *dev)
3979{
3980 drm_i915_private_t *dev_priv = dev->dev_private;
3981 int ret;
3982
3983 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
3984 return -EIO;
3985
3986 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
3987 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
3988
3989 i915_gem_l3_remap(dev);
3990
3991 i915_gem_init_swizzling(dev);
3992
3993 ret = i915_gem_init_rings(dev);
99433931
MK
3994 if (ret)
3995 return ret;
3996
254f965c
BW
3997 /*
3998 * XXX: There was some w/a described somewhere suggesting loading
3999 * contexts before PPGTT.
4000 */
4001 i915_gem_context_init(dev);
e21af88d
DV
4002 i915_gem_init_ppgtt(dev);
4003
68f95ba9 4004 return 0;
8187a2b7
ZN
4005}
4006
1070a42b
CW
4007int i915_gem_init(struct drm_device *dev)
4008{
4009 struct drm_i915_private *dev_priv = dev->dev_private;
1070a42b
CW
4010 int ret;
4011
1070a42b 4012 mutex_lock(&dev->struct_mutex);
d7e5008f 4013 i915_gem_init_global_gtt(dev);
1070a42b
CW
4014 ret = i915_gem_init_hw(dev);
4015 mutex_unlock(&dev->struct_mutex);
4016 if (ret) {
4017 i915_gem_cleanup_aliasing_ppgtt(dev);
4018 return ret;
4019 }
4020
53ca26ca
DV
4021 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4022 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4023 dev_priv->dri1.allow_batchbuffer = 1;
1070a42b
CW
4024 return 0;
4025}
4026
8187a2b7
ZN
4027void
4028i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4029{
4030 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 4031 struct intel_ring_buffer *ring;
1ec14ad3 4032 int i;
8187a2b7 4033
b4519513
CW
4034 for_each_ring(ring, dev_priv, i)
4035 intel_cleanup_ring_buffer(ring);
8187a2b7
ZN
4036}
4037
673a394b
EA
4038int
4039i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4040 struct drm_file *file_priv)
4041{
4042 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 4043 int ret;
673a394b 4044
79e53945
JB
4045 if (drm_core_check_feature(dev, DRIVER_MODESET))
4046 return 0;
4047
1f83fee0 4048 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
673a394b 4049 DRM_ERROR("Reenabling wedged hardware, good luck\n");
1f83fee0 4050 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
673a394b
EA
4051 }
4052
673a394b 4053 mutex_lock(&dev->struct_mutex);
9bb2d6f9
EA
4054 dev_priv->mm.suspended = 0;
4055
f691e2f4 4056 ret = i915_gem_init_hw(dev);
d816f6ac
WF
4057 if (ret != 0) {
4058 mutex_unlock(&dev->struct_mutex);
9bb2d6f9 4059 return ret;
d816f6ac 4060 }
9bb2d6f9 4061
69dc4987 4062 BUG_ON(!list_empty(&dev_priv->mm.active_list));
673a394b 4063 mutex_unlock(&dev->struct_mutex);
dbb19d30 4064
5f35308b
CW
4065 ret = drm_irq_install(dev);
4066 if (ret)
4067 goto cleanup_ringbuffer;
dbb19d30 4068
673a394b 4069 return 0;
5f35308b
CW
4070
4071cleanup_ringbuffer:
4072 mutex_lock(&dev->struct_mutex);
4073 i915_gem_cleanup_ringbuffer(dev);
4074 dev_priv->mm.suspended = 1;
4075 mutex_unlock(&dev->struct_mutex);
4076
4077 return ret;
673a394b
EA
4078}
4079
4080int
4081i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4082 struct drm_file *file_priv)
4083{
79e53945
JB
4084 if (drm_core_check_feature(dev, DRIVER_MODESET))
4085 return 0;
4086
dbb19d30 4087 drm_irq_uninstall(dev);
e6890f6f 4088 return i915_gem_idle(dev);
673a394b
EA
4089}
4090
4091void
4092i915_gem_lastclose(struct drm_device *dev)
4093{
4094 int ret;
673a394b 4095
e806b495
EA
4096 if (drm_core_check_feature(dev, DRIVER_MODESET))
4097 return;
4098
6dbe2772
KP
4099 ret = i915_gem_idle(dev);
4100 if (ret)
4101 DRM_ERROR("failed to idle hardware: %d\n", ret);
673a394b
EA
4102}
4103
64193406
CW
4104static void
4105init_ring_lists(struct intel_ring_buffer *ring)
4106{
4107 INIT_LIST_HEAD(&ring->active_list);
4108 INIT_LIST_HEAD(&ring->request_list);
64193406
CW
4109}
4110
673a394b
EA
4111void
4112i915_gem_load(struct drm_device *dev)
4113{
4114 drm_i915_private_t *dev_priv = dev->dev_private;
42dcedd4
CW
4115 int i;
4116
4117 dev_priv->slab =
4118 kmem_cache_create("i915_gem_object",
4119 sizeof(struct drm_i915_gem_object), 0,
4120 SLAB_HWCACHE_ALIGN,
4121 NULL);
673a394b 4122
69dc4987 4123 INIT_LIST_HEAD(&dev_priv->mm.active_list);
673a394b 4124 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
6c085a72
CW
4125 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4126 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
a09ba7fa 4127 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
1ec14ad3
CW
4128 for (i = 0; i < I915_NUM_RINGS; i++)
4129 init_ring_lists(&dev_priv->ring[i]);
4b9de737 4130 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
007cc8ac 4131 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
673a394b
EA
4132 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4133 i915_gem_retire_work_handler);
1f83fee0 4134 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
31169714 4135
94400120
DA
4136 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4137 if (IS_GEN3(dev)) {
50743298
DV
4138 I915_WRITE(MI_ARB_STATE,
4139 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
94400120
DA
4140 }
4141
72bfa19c
CW
4142 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4143
de151cf6 4144 /* Old X drivers will take 0-2 for front, back, depth buffers */
b397c836
EA
4145 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4146 dev_priv->fence_reg_start = 3;
de151cf6 4147
a6c45cf0 4148 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
de151cf6
JB
4149 dev_priv->num_fence_regs = 16;
4150 else
4151 dev_priv->num_fence_regs = 8;
4152
b5aa8a0f 4153 /* Initialize fence registers to zero */
ada726c7 4154 i915_gem_reset_fences(dev);
10ed13e4 4155
673a394b 4156 i915_gem_detect_bit_6_swizzle(dev);
6b95a207 4157 init_waitqueue_head(&dev_priv->pending_flip_queue);
17250b71 4158
ce453d81
CW
4159 dev_priv->mm.interruptible = true;
4160
17250b71
CW
4161 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4162 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4163 register_shrinker(&dev_priv->mm.inactive_shrinker);
673a394b 4164}
71acb5eb
DA
4165
4166/*
4167 * Create a physically contiguous memory object for this object
4168 * e.g. for cursor + overlay regs
4169 */
995b6762
CW
4170static int i915_gem_init_phys_object(struct drm_device *dev,
4171 int id, int size, int align)
71acb5eb
DA
4172{
4173 drm_i915_private_t *dev_priv = dev->dev_private;
4174 struct drm_i915_gem_phys_object *phys_obj;
4175 int ret;
4176
4177 if (dev_priv->mm.phys_objs[id - 1] || !size)
4178 return 0;
4179
9a298b2a 4180 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
71acb5eb
DA
4181 if (!phys_obj)
4182 return -ENOMEM;
4183
4184 phys_obj->id = id;
4185
6eeefaf3 4186 phys_obj->handle = drm_pci_alloc(dev, size, align);
71acb5eb
DA
4187 if (!phys_obj->handle) {
4188 ret = -ENOMEM;
4189 goto kfree_obj;
4190 }
4191#ifdef CONFIG_X86
4192 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4193#endif
4194
4195 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4196
4197 return 0;
4198kfree_obj:
9a298b2a 4199 kfree(phys_obj);
71acb5eb
DA
4200 return ret;
4201}
4202
995b6762 4203static void i915_gem_free_phys_object(struct drm_device *dev, int id)
71acb5eb
DA
4204{
4205 drm_i915_private_t *dev_priv = dev->dev_private;
4206 struct drm_i915_gem_phys_object *phys_obj;
4207
4208 if (!dev_priv->mm.phys_objs[id - 1])
4209 return;
4210
4211 phys_obj = dev_priv->mm.phys_objs[id - 1];
4212 if (phys_obj->cur_obj) {
4213 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4214 }
4215
4216#ifdef CONFIG_X86
4217 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4218#endif
4219 drm_pci_free(dev, phys_obj->handle);
4220 kfree(phys_obj);
4221 dev_priv->mm.phys_objs[id - 1] = NULL;
4222}
4223
4224void i915_gem_free_all_phys_object(struct drm_device *dev)
4225{
4226 int i;
4227
260883c8 4228 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
71acb5eb
DA
4229 i915_gem_free_phys_object(dev, i);
4230}
4231
4232void i915_gem_detach_phys_object(struct drm_device *dev,
05394f39 4233 struct drm_i915_gem_object *obj)
71acb5eb 4234{
05394f39 4235 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
e5281ccd 4236 char *vaddr;
71acb5eb 4237 int i;
71acb5eb
DA
4238 int page_count;
4239
05394f39 4240 if (!obj->phys_obj)
71acb5eb 4241 return;
05394f39 4242 vaddr = obj->phys_obj->handle->vaddr;
71acb5eb 4243
05394f39 4244 page_count = obj->base.size / PAGE_SIZE;
71acb5eb 4245 for (i = 0; i < page_count; i++) {
5949eac4 4246 struct page *page = shmem_read_mapping_page(mapping, i);
e5281ccd
CW
4247 if (!IS_ERR(page)) {
4248 char *dst = kmap_atomic(page);
4249 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4250 kunmap_atomic(dst);
4251
4252 drm_clflush_pages(&page, 1);
4253
4254 set_page_dirty(page);
4255 mark_page_accessed(page);
4256 page_cache_release(page);
4257 }
71acb5eb 4258 }
e76e9aeb 4259 i915_gem_chipset_flush(dev);
d78b47b9 4260
05394f39
CW
4261 obj->phys_obj->cur_obj = NULL;
4262 obj->phys_obj = NULL;
71acb5eb
DA
4263}
4264
4265int
4266i915_gem_attach_phys_object(struct drm_device *dev,
05394f39 4267 struct drm_i915_gem_object *obj,
6eeefaf3
CW
4268 int id,
4269 int align)
71acb5eb 4270{
05394f39 4271 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
71acb5eb 4272 drm_i915_private_t *dev_priv = dev->dev_private;
71acb5eb
DA
4273 int ret = 0;
4274 int page_count;
4275 int i;
4276
4277 if (id > I915_MAX_PHYS_OBJECT)
4278 return -EINVAL;
4279
05394f39
CW
4280 if (obj->phys_obj) {
4281 if (obj->phys_obj->id == id)
71acb5eb
DA
4282 return 0;
4283 i915_gem_detach_phys_object(dev, obj);
4284 }
4285
71acb5eb
DA
4286 /* create a new object */
4287 if (!dev_priv->mm.phys_objs[id - 1]) {
4288 ret = i915_gem_init_phys_object(dev, id,
05394f39 4289 obj->base.size, align);
71acb5eb 4290 if (ret) {
05394f39
CW
4291 DRM_ERROR("failed to init phys object %d size: %zu\n",
4292 id, obj->base.size);
e5281ccd 4293 return ret;
71acb5eb
DA
4294 }
4295 }
4296
4297 /* bind to the object */
05394f39
CW
4298 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4299 obj->phys_obj->cur_obj = obj;
71acb5eb 4300
05394f39 4301 page_count = obj->base.size / PAGE_SIZE;
71acb5eb
DA
4302
4303 for (i = 0; i < page_count; i++) {
e5281ccd
CW
4304 struct page *page;
4305 char *dst, *src;
4306
5949eac4 4307 page = shmem_read_mapping_page(mapping, i);
e5281ccd
CW
4308 if (IS_ERR(page))
4309 return PTR_ERR(page);
71acb5eb 4310
ff75b9bc 4311 src = kmap_atomic(page);
05394f39 4312 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
71acb5eb 4313 memcpy(dst, src, PAGE_SIZE);
3e4d3af5 4314 kunmap_atomic(src);
71acb5eb 4315
e5281ccd
CW
4316 mark_page_accessed(page);
4317 page_cache_release(page);
4318 }
d78b47b9 4319
71acb5eb 4320 return 0;
71acb5eb
DA
4321}
4322
4323static int
05394f39
CW
4324i915_gem_phys_pwrite(struct drm_device *dev,
4325 struct drm_i915_gem_object *obj,
71acb5eb
DA
4326 struct drm_i915_gem_pwrite *args,
4327 struct drm_file *file_priv)
4328{
05394f39 4329 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
2bb4629a 4330 char __user *user_data = to_user_ptr(args->data_ptr);
71acb5eb 4331
b47b30cc
CW
4332 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4333 unsigned long unwritten;
4334
4335 /* The physical object once assigned is fixed for the lifetime
4336 * of the obj, so we can safely drop the lock and continue
4337 * to access vaddr.
4338 */
4339 mutex_unlock(&dev->struct_mutex);
4340 unwritten = copy_from_user(vaddr, user_data, args->size);
4341 mutex_lock(&dev->struct_mutex);
4342 if (unwritten)
4343 return -EFAULT;
4344 }
71acb5eb 4345
e76e9aeb 4346 i915_gem_chipset_flush(dev);
71acb5eb
DA
4347 return 0;
4348}
b962442e 4349
f787a5f5 4350void i915_gem_release(struct drm_device *dev, struct drm_file *file)
b962442e 4351{
f787a5f5 4352 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e
EA
4353
4354 /* Clean up our request list when the client is going away, so that
4355 * later retire_requests won't dereference our soon-to-be-gone
4356 * file_priv.
4357 */
1c25595f 4358 spin_lock(&file_priv->mm.lock);
f787a5f5
CW
4359 while (!list_empty(&file_priv->mm.request_list)) {
4360 struct drm_i915_gem_request *request;
4361
4362 request = list_first_entry(&file_priv->mm.request_list,
4363 struct drm_i915_gem_request,
4364 client_list);
4365 list_del(&request->client_list);
4366 request->file_priv = NULL;
4367 }
1c25595f 4368 spin_unlock(&file_priv->mm.lock);
b962442e 4369}
31169714 4370
5774506f
CW
4371static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4372{
4373 if (!mutex_is_locked(mutex))
4374 return false;
4375
4376#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4377 return mutex->owner == task;
4378#else
4379 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4380 return false;
4381#endif
4382}
4383
31169714 4384static int
1495f230 4385i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
31169714 4386{
17250b71
CW
4387 struct drm_i915_private *dev_priv =
4388 container_of(shrinker,
4389 struct drm_i915_private,
4390 mm.inactive_shrinker);
4391 struct drm_device *dev = dev_priv->dev;
6c085a72 4392 struct drm_i915_gem_object *obj;
1495f230 4393 int nr_to_scan = sc->nr_to_scan;
5774506f 4394 bool unlock = true;
17250b71
CW
4395 int cnt;
4396
5774506f
CW
4397 if (!mutex_trylock(&dev->struct_mutex)) {
4398 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4399 return 0;
4400
677feac2
DV
4401 if (dev_priv->mm.shrinker_no_lock_stealing)
4402 return 0;
4403
5774506f
CW
4404 unlock = false;
4405 }
31169714 4406
6c085a72
CW
4407 if (nr_to_scan) {
4408 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
93927ca5
DV
4409 if (nr_to_scan > 0)
4410 nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
4411 false);
6c085a72
CW
4412 if (nr_to_scan > 0)
4413 i915_gem_shrink_all(dev_priv);
31169714
CW
4414 }
4415
17250b71 4416 cnt = 0;
6c085a72 4417 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
a5570178
CW
4418 if (obj->pages_pin_count == 0)
4419 cnt += obj->base.size >> PAGE_SHIFT;
93927ca5 4420 list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
a5570178 4421 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
6c085a72 4422 cnt += obj->base.size >> PAGE_SHIFT;
17250b71 4423
5774506f
CW
4424 if (unlock)
4425 mutex_unlock(&dev->struct_mutex);
6c085a72 4426 return cnt;
31169714 4427}