]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/i915/i915_gem.c
drm/i915: allow PCH PWM override on IVB
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
CommitLineData
673a394b
EA
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
1c5d22f7 32#include "i915_trace.h"
652c393a 33#include "intel_drv.h"
5949eac4 34#include <linux/shmem_fs.h>
5a0e3ad6 35#include <linux/slab.h>
673a394b 36#include <linux/swap.h>
79e53945 37#include <linux/pci.h>
673a394b 38
88241785 39static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
05394f39
CW
40static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
88241785
CW
42static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
43 unsigned alignment,
44 bool map_and_fenceable);
d9e86c0e
CW
45static void i915_gem_clear_fence_reg(struct drm_device *dev,
46 struct drm_i915_fence_reg *reg);
05394f39
CW
47static int i915_gem_phys_pwrite(struct drm_device *dev,
48 struct drm_i915_gem_object *obj,
71acb5eb 49 struct drm_i915_gem_pwrite *args,
05394f39
CW
50 struct drm_file *file);
51static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
673a394b 52
17250b71 53static int i915_gem_inactive_shrink(struct shrinker *shrinker,
1495f230 54 struct shrink_control *sc);
8c59967c 55static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
31169714 56
73aa808f
CW
57/* some bookkeeping */
58static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
59 size_t size)
60{
61 dev_priv->mm.object_count++;
62 dev_priv->mm.object_memory += size;
63}
64
65static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
66 size_t size)
67{
68 dev_priv->mm.object_count--;
69 dev_priv->mm.object_memory -= size;
70}
71
21dd3734
CW
72static int
73i915_gem_wait_for_error(struct drm_device *dev)
30dbf0c0
CW
74{
75 struct drm_i915_private *dev_priv = dev->dev_private;
76 struct completion *x = &dev_priv->error_completion;
77 unsigned long flags;
78 int ret;
79
80 if (!atomic_read(&dev_priv->mm.wedged))
81 return 0;
82
83 ret = wait_for_completion_interruptible(x);
84 if (ret)
85 return ret;
86
21dd3734
CW
87 if (atomic_read(&dev_priv->mm.wedged)) {
88 /* GPU is hung, bump the completion count to account for
89 * the token we just consumed so that we never hit zero and
90 * end up waiting upon a subsequent completion event that
91 * will never happen.
92 */
93 spin_lock_irqsave(&x->wait.lock, flags);
94 x->done++;
95 spin_unlock_irqrestore(&x->wait.lock, flags);
96 }
97 return 0;
30dbf0c0
CW
98}
99
54cf91dc 100int i915_mutex_lock_interruptible(struct drm_device *dev)
76c1dec1 101{
76c1dec1
CW
102 int ret;
103
21dd3734 104 ret = i915_gem_wait_for_error(dev);
76c1dec1
CW
105 if (ret)
106 return ret;
107
108 ret = mutex_lock_interruptible(&dev->struct_mutex);
109 if (ret)
110 return ret;
111
23bc5982 112 WARN_ON(i915_verify_lists(dev));
76c1dec1
CW
113 return 0;
114}
30dbf0c0 115
7d1c4804 116static inline bool
05394f39 117i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
7d1c4804 118{
05394f39 119 return obj->gtt_space && !obj->active && obj->pin_count == 0;
7d1c4804
CW
120}
121
79e53945
JB
122int
123i915_gem_init_ioctl(struct drm_device *dev, void *data,
05394f39 124 struct drm_file *file)
79e53945
JB
125{
126 struct drm_i915_gem_init *args = data;
2021746e
CW
127
128 if (args->gtt_start >= args->gtt_end ||
129 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
130 return -EINVAL;
79e53945 131
f534bc0b
DV
132 /* GEM with user mode setting was never supported on ilk and later. */
133 if (INTEL_INFO(dev)->gen >= 5)
134 return -ENODEV;
135
79e53945 136 mutex_lock(&dev->struct_mutex);
644ec02b
DV
137 i915_gem_init_global_gtt(dev, args->gtt_start,
138 args->gtt_end, args->gtt_end);
673a394b
EA
139 mutex_unlock(&dev->struct_mutex);
140
2021746e 141 return 0;
673a394b
EA
142}
143
5a125c3c
EA
144int
145i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
05394f39 146 struct drm_file *file)
5a125c3c 147{
73aa808f 148 struct drm_i915_private *dev_priv = dev->dev_private;
5a125c3c 149 struct drm_i915_gem_get_aperture *args = data;
6299f992
CW
150 struct drm_i915_gem_object *obj;
151 size_t pinned;
5a125c3c
EA
152
153 if (!(dev->driver->driver_features & DRIVER_GEM))
154 return -ENODEV;
155
6299f992 156 pinned = 0;
73aa808f 157 mutex_lock(&dev->struct_mutex);
6299f992
CW
158 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
159 pinned += obj->gtt_space->size;
73aa808f 160 mutex_unlock(&dev->struct_mutex);
5a125c3c 161
6299f992 162 args->aper_size = dev_priv->mm.gtt_total;
0206e353 163 args->aper_available_size = args->aper_size - pinned;
6299f992 164
5a125c3c
EA
165 return 0;
166}
167
ff72145b
DA
168static int
169i915_gem_create(struct drm_file *file,
170 struct drm_device *dev,
171 uint64_t size,
172 uint32_t *handle_p)
673a394b 173{
05394f39 174 struct drm_i915_gem_object *obj;
a1a2d1d3
PP
175 int ret;
176 u32 handle;
673a394b 177
ff72145b 178 size = roundup(size, PAGE_SIZE);
8ffc0246
CW
179 if (size == 0)
180 return -EINVAL;
673a394b
EA
181
182 /* Allocate the new object */
ff72145b 183 obj = i915_gem_alloc_object(dev, size);
673a394b
EA
184 if (obj == NULL)
185 return -ENOMEM;
186
05394f39 187 ret = drm_gem_handle_create(file, &obj->base, &handle);
1dfd9754 188 if (ret) {
05394f39
CW
189 drm_gem_object_release(&obj->base);
190 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
202f2fef 191 kfree(obj);
673a394b 192 return ret;
1dfd9754 193 }
673a394b 194
202f2fef 195 /* drop reference from allocate - handle holds it now */
05394f39 196 drm_gem_object_unreference(&obj->base);
202f2fef
CW
197 trace_i915_gem_object_create(obj);
198
ff72145b 199 *handle_p = handle;
673a394b
EA
200 return 0;
201}
202
ff72145b
DA
203int
204i915_gem_dumb_create(struct drm_file *file,
205 struct drm_device *dev,
206 struct drm_mode_create_dumb *args)
207{
208 /* have to work out size/pitch and return them */
ed0291fd 209 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
ff72145b
DA
210 args->size = args->pitch * args->height;
211 return i915_gem_create(file, dev,
212 args->size, &args->handle);
213}
214
215int i915_gem_dumb_destroy(struct drm_file *file,
216 struct drm_device *dev,
217 uint32_t handle)
218{
219 return drm_gem_handle_delete(file, handle);
220}
221
222/**
223 * Creates a new mm object and returns a handle to it.
224 */
225int
226i915_gem_create_ioctl(struct drm_device *dev, void *data,
227 struct drm_file *file)
228{
229 struct drm_i915_gem_create *args = data;
230 return i915_gem_create(file, dev,
231 args->size, &args->handle);
232}
233
05394f39 234static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
280b713b 235{
05394f39 236 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
280b713b
EA
237
238 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
05394f39 239 obj->tiling_mode != I915_TILING_NONE;
280b713b
EA
240}
241
8461d226
DV
242static inline int
243__copy_to_user_swizzled(char __user *cpu_vaddr,
244 const char *gpu_vaddr, int gpu_offset,
245 int length)
246{
247 int ret, cpu_offset = 0;
248
249 while (length > 0) {
250 int cacheline_end = ALIGN(gpu_offset + 1, 64);
251 int this_length = min(cacheline_end - gpu_offset, length);
252 int swizzled_gpu_offset = gpu_offset ^ 64;
253
254 ret = __copy_to_user(cpu_vaddr + cpu_offset,
255 gpu_vaddr + swizzled_gpu_offset,
256 this_length);
257 if (ret)
258 return ret + length;
259
260 cpu_offset += this_length;
261 gpu_offset += this_length;
262 length -= this_length;
263 }
264
265 return 0;
266}
267
8c59967c
DV
268static inline int
269__copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
270 const char *cpu_vaddr,
271 int length)
272{
273 int ret, cpu_offset = 0;
274
275 while (length > 0) {
276 int cacheline_end = ALIGN(gpu_offset + 1, 64);
277 int this_length = min(cacheline_end - gpu_offset, length);
278 int swizzled_gpu_offset = gpu_offset ^ 64;
279
280 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
281 cpu_vaddr + cpu_offset,
282 this_length);
283 if (ret)
284 return ret + length;
285
286 cpu_offset += this_length;
287 gpu_offset += this_length;
288 length -= this_length;
289 }
290
291 return 0;
292}
293
d174bd64
DV
294/* Per-page copy function for the shmem pread fastpath.
295 * Flushes invalid cachelines before reading the target if
296 * needs_clflush is set. */
eb01459f 297static int
d174bd64
DV
298shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
299 char __user *user_data,
300 bool page_do_bit17_swizzling, bool needs_clflush)
301{
302 char *vaddr;
303 int ret;
304
e7e58eb5 305 if (unlikely(page_do_bit17_swizzling))
d174bd64
DV
306 return -EINVAL;
307
308 vaddr = kmap_atomic(page);
309 if (needs_clflush)
310 drm_clflush_virt_range(vaddr + shmem_page_offset,
311 page_length);
312 ret = __copy_to_user_inatomic(user_data,
313 vaddr + shmem_page_offset,
314 page_length);
315 kunmap_atomic(vaddr);
316
317 return ret;
318}
319
23c18c71
DV
320static void
321shmem_clflush_swizzled_range(char *addr, unsigned long length,
322 bool swizzled)
323{
e7e58eb5 324 if (unlikely(swizzled)) {
23c18c71
DV
325 unsigned long start = (unsigned long) addr;
326 unsigned long end = (unsigned long) addr + length;
327
328 /* For swizzling simply ensure that we always flush both
329 * channels. Lame, but simple and it works. Swizzled
330 * pwrite/pread is far from a hotpath - current userspace
331 * doesn't use it at all. */
332 start = round_down(start, 128);
333 end = round_up(end, 128);
334
335 drm_clflush_virt_range((void *)start, end - start);
336 } else {
337 drm_clflush_virt_range(addr, length);
338 }
339
340}
341
d174bd64
DV
342/* Only difference to the fast-path function is that this can handle bit17
343 * and uses non-atomic copy and kmap functions. */
344static int
345shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
346 char __user *user_data,
347 bool page_do_bit17_swizzling, bool needs_clflush)
348{
349 char *vaddr;
350 int ret;
351
352 vaddr = kmap(page);
353 if (needs_clflush)
23c18c71
DV
354 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
355 page_length,
356 page_do_bit17_swizzling);
d174bd64
DV
357
358 if (page_do_bit17_swizzling)
359 ret = __copy_to_user_swizzled(user_data,
360 vaddr, shmem_page_offset,
361 page_length);
362 else
363 ret = __copy_to_user(user_data,
364 vaddr + shmem_page_offset,
365 page_length);
366 kunmap(page);
367
368 return ret;
369}
370
eb01459f 371static int
dbf7bff0
DV
372i915_gem_shmem_pread(struct drm_device *dev,
373 struct drm_i915_gem_object *obj,
374 struct drm_i915_gem_pread *args,
375 struct drm_file *file)
eb01459f 376{
05394f39 377 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
8461d226 378 char __user *user_data;
eb01459f 379 ssize_t remain;
8461d226 380 loff_t offset;
eb2c0c81 381 int shmem_page_offset, page_length, ret = 0;
8461d226 382 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
dbf7bff0 383 int hit_slowpath = 0;
96d79b52 384 int prefaulted = 0;
8489731c 385 int needs_clflush = 0;
692a576b 386 int release_page;
eb01459f 387
8461d226 388 user_data = (char __user *) (uintptr_t) args->data_ptr;
eb01459f
EA
389 remain = args->size;
390
8461d226 391 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
eb01459f 392
8489731c
DV
393 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
394 /* If we're not in the cpu read domain, set ourself into the gtt
395 * read domain and manually flush cachelines (if required). This
396 * optimizes for the case when the gpu will dirty the data
397 * anyway again before the next pread happens. */
398 if (obj->cache_level == I915_CACHE_NONE)
399 needs_clflush = 1;
400 ret = i915_gem_object_set_to_gtt_domain(obj, false);
401 if (ret)
402 return ret;
403 }
eb01459f 404
8461d226 405 offset = args->offset;
eb01459f
EA
406
407 while (remain > 0) {
e5281ccd
CW
408 struct page *page;
409
eb01459f
EA
410 /* Operation in this page
411 *
eb01459f 412 * shmem_page_offset = offset within page in shmem file
eb01459f
EA
413 * page_length = bytes to copy for this page
414 */
c8cbbb8b 415 shmem_page_offset = offset_in_page(offset);
eb01459f
EA
416 page_length = remain;
417 if ((shmem_page_offset + page_length) > PAGE_SIZE)
418 page_length = PAGE_SIZE - shmem_page_offset;
eb01459f 419
692a576b
DV
420 if (obj->pages) {
421 page = obj->pages[offset >> PAGE_SHIFT];
422 release_page = 0;
423 } else {
424 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
425 if (IS_ERR(page)) {
426 ret = PTR_ERR(page);
427 goto out;
428 }
429 release_page = 1;
b65552f0 430 }
e5281ccd 431
8461d226
DV
432 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
433 (page_to_phys(page) & (1 << 17)) != 0;
434
d174bd64
DV
435 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
436 user_data, page_do_bit17_swizzling,
437 needs_clflush);
438 if (ret == 0)
439 goto next_page;
dbf7bff0
DV
440
441 hit_slowpath = 1;
692a576b 442 page_cache_get(page);
dbf7bff0
DV
443 mutex_unlock(&dev->struct_mutex);
444
96d79b52 445 if (!prefaulted) {
f56f821f 446 ret = fault_in_multipages_writeable(user_data, remain);
96d79b52
DV
447 /* Userspace is tricking us, but we've already clobbered
448 * its pages with the prefault and promised to write the
449 * data up to the first fault. Hence ignore any errors
450 * and just continue. */
451 (void)ret;
452 prefaulted = 1;
453 }
eb01459f 454
d174bd64
DV
455 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
456 user_data, page_do_bit17_swizzling,
457 needs_clflush);
eb01459f 458
dbf7bff0 459 mutex_lock(&dev->struct_mutex);
e5281ccd 460 page_cache_release(page);
dbf7bff0 461next_page:
e5281ccd 462 mark_page_accessed(page);
692a576b
DV
463 if (release_page)
464 page_cache_release(page);
e5281ccd 465
8461d226
DV
466 if (ret) {
467 ret = -EFAULT;
468 goto out;
469 }
470
eb01459f 471 remain -= page_length;
8461d226 472 user_data += page_length;
eb01459f
EA
473 offset += page_length;
474 }
475
4f27b75d 476out:
dbf7bff0
DV
477 if (hit_slowpath) {
478 /* Fixup: Kill any reinstated backing storage pages */
479 if (obj->madv == __I915_MADV_PURGED)
480 i915_gem_object_truncate(obj);
481 }
eb01459f
EA
482
483 return ret;
484}
485
673a394b
EA
486/**
487 * Reads data from the object referenced by handle.
488 *
489 * On error, the contents of *data are undefined.
490 */
491int
492i915_gem_pread_ioctl(struct drm_device *dev, void *data,
05394f39 493 struct drm_file *file)
673a394b
EA
494{
495 struct drm_i915_gem_pread *args = data;
05394f39 496 struct drm_i915_gem_object *obj;
35b62a89 497 int ret = 0;
673a394b 498
51311d0a
CW
499 if (args->size == 0)
500 return 0;
501
502 if (!access_ok(VERIFY_WRITE,
503 (char __user *)(uintptr_t)args->data_ptr,
504 args->size))
505 return -EFAULT;
506
4f27b75d 507 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 508 if (ret)
4f27b75d 509 return ret;
673a394b 510
05394f39 511 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 512 if (&obj->base == NULL) {
1d7cfea1
CW
513 ret = -ENOENT;
514 goto unlock;
4f27b75d 515 }
673a394b 516
7dcd2499 517 /* Bounds check source. */
05394f39
CW
518 if (args->offset > obj->base.size ||
519 args->size > obj->base.size - args->offset) {
ce9d419d 520 ret = -EINVAL;
35b62a89 521 goto out;
ce9d419d
CW
522 }
523
db53a302
CW
524 trace_i915_gem_object_pread(obj, args->offset, args->size);
525
dbf7bff0 526 ret = i915_gem_shmem_pread(dev, obj, args, file);
673a394b 527
35b62a89 528out:
05394f39 529 drm_gem_object_unreference(&obj->base);
1d7cfea1 530unlock:
4f27b75d 531 mutex_unlock(&dev->struct_mutex);
eb01459f 532 return ret;
673a394b
EA
533}
534
0839ccb8
KP
535/* This is the fast write path which cannot handle
536 * page faults in the source data
9b7530cc 537 */
0839ccb8
KP
538
539static inline int
540fast_user_write(struct io_mapping *mapping,
541 loff_t page_base, int page_offset,
542 char __user *user_data,
543 int length)
9b7530cc 544{
9b7530cc 545 char *vaddr_atomic;
0839ccb8 546 unsigned long unwritten;
9b7530cc 547
3e4d3af5 548 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
0839ccb8
KP
549 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
550 user_data, length);
3e4d3af5 551 io_mapping_unmap_atomic(vaddr_atomic);
fbd5a26d 552 return unwritten;
0839ccb8
KP
553}
554
3de09aa3
EA
555/**
556 * This is the fast pwrite path, where we copy the data directly from the
557 * user into the GTT, uncached.
558 */
673a394b 559static int
05394f39
CW
560i915_gem_gtt_pwrite_fast(struct drm_device *dev,
561 struct drm_i915_gem_object *obj,
3de09aa3 562 struct drm_i915_gem_pwrite *args,
05394f39 563 struct drm_file *file)
673a394b 564{
0839ccb8 565 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 566 ssize_t remain;
0839ccb8 567 loff_t offset, page_base;
673a394b 568 char __user *user_data;
935aaa69
DV
569 int page_offset, page_length, ret;
570
571 ret = i915_gem_object_pin(obj, 0, true);
572 if (ret)
573 goto out;
574
575 ret = i915_gem_object_set_to_gtt_domain(obj, true);
576 if (ret)
577 goto out_unpin;
578
579 ret = i915_gem_object_put_fence(obj);
580 if (ret)
581 goto out_unpin;
673a394b
EA
582
583 user_data = (char __user *) (uintptr_t) args->data_ptr;
584 remain = args->size;
673a394b 585
05394f39 586 offset = obj->gtt_offset + args->offset;
673a394b
EA
587
588 while (remain > 0) {
589 /* Operation in this page
590 *
0839ccb8
KP
591 * page_base = page offset within aperture
592 * page_offset = offset within page
593 * page_length = bytes to copy for this page
673a394b 594 */
c8cbbb8b
CW
595 page_base = offset & PAGE_MASK;
596 page_offset = offset_in_page(offset);
0839ccb8
KP
597 page_length = remain;
598 if ((page_offset + remain) > PAGE_SIZE)
599 page_length = PAGE_SIZE - page_offset;
600
0839ccb8 601 /* If we get a fault while copying data, then (presumably) our
3de09aa3
EA
602 * source page isn't available. Return the error and we'll
603 * retry in the slow path.
0839ccb8 604 */
fbd5a26d 605 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
935aaa69
DV
606 page_offset, user_data, page_length)) {
607 ret = -EFAULT;
608 goto out_unpin;
609 }
673a394b 610
0839ccb8
KP
611 remain -= page_length;
612 user_data += page_length;
613 offset += page_length;
673a394b 614 }
673a394b 615
935aaa69
DV
616out_unpin:
617 i915_gem_object_unpin(obj);
618out:
3de09aa3 619 return ret;
673a394b
EA
620}
621
d174bd64
DV
622/* Per-page copy function for the shmem pwrite fastpath.
623 * Flushes invalid cachelines before writing to the target if
624 * needs_clflush_before is set and flushes out any written cachelines after
625 * writing if needs_clflush is set. */
3043c60c 626static int
d174bd64
DV
627shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
628 char __user *user_data,
629 bool page_do_bit17_swizzling,
630 bool needs_clflush_before,
631 bool needs_clflush_after)
673a394b 632{
d174bd64 633 char *vaddr;
673a394b 634 int ret;
3de09aa3 635
e7e58eb5 636 if (unlikely(page_do_bit17_swizzling))
d174bd64 637 return -EINVAL;
3de09aa3 638
d174bd64
DV
639 vaddr = kmap_atomic(page);
640 if (needs_clflush_before)
641 drm_clflush_virt_range(vaddr + shmem_page_offset,
642 page_length);
643 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
644 user_data,
645 page_length);
646 if (needs_clflush_after)
647 drm_clflush_virt_range(vaddr + shmem_page_offset,
648 page_length);
649 kunmap_atomic(vaddr);
3de09aa3
EA
650
651 return ret;
652}
653
d174bd64
DV
654/* Only difference to the fast-path function is that this can handle bit17
655 * and uses non-atomic copy and kmap functions. */
3043c60c 656static int
d174bd64
DV
657shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
658 char __user *user_data,
659 bool page_do_bit17_swizzling,
660 bool needs_clflush_before,
661 bool needs_clflush_after)
673a394b 662{
d174bd64
DV
663 char *vaddr;
664 int ret;
e5281ccd 665
d174bd64 666 vaddr = kmap(page);
e7e58eb5 667 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
23c18c71
DV
668 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
669 page_length,
670 page_do_bit17_swizzling);
d174bd64
DV
671 if (page_do_bit17_swizzling)
672 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
e5281ccd
CW
673 user_data,
674 page_length);
d174bd64
DV
675 else
676 ret = __copy_from_user(vaddr + shmem_page_offset,
677 user_data,
678 page_length);
679 if (needs_clflush_after)
23c18c71
DV
680 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
681 page_length,
682 page_do_bit17_swizzling);
d174bd64 683 kunmap(page);
40123c1f 684
d174bd64 685 return ret;
40123c1f
EA
686}
687
40123c1f 688static int
e244a443
DV
689i915_gem_shmem_pwrite(struct drm_device *dev,
690 struct drm_i915_gem_object *obj,
691 struct drm_i915_gem_pwrite *args,
692 struct drm_file *file)
40123c1f 693{
05394f39 694 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
40123c1f 695 ssize_t remain;
8c59967c
DV
696 loff_t offset;
697 char __user *user_data;
eb2c0c81 698 int shmem_page_offset, page_length, ret = 0;
8c59967c 699 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
e244a443 700 int hit_slowpath = 0;
58642885
DV
701 int needs_clflush_after = 0;
702 int needs_clflush_before = 0;
692a576b 703 int release_page;
40123c1f 704
8c59967c 705 user_data = (char __user *) (uintptr_t) args->data_ptr;
40123c1f
EA
706 remain = args->size;
707
8c59967c 708 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
40123c1f 709
58642885
DV
710 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
711 /* If we're not in the cpu write domain, set ourself into the gtt
712 * write domain and manually flush cachelines (if required). This
713 * optimizes for the case when the gpu will use the data
714 * right away and we therefore have to clflush anyway. */
715 if (obj->cache_level == I915_CACHE_NONE)
716 needs_clflush_after = 1;
717 ret = i915_gem_object_set_to_gtt_domain(obj, true);
718 if (ret)
719 return ret;
720 }
721 /* Same trick applies for invalidate partially written cachelines before
722 * writing. */
723 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
724 && obj->cache_level == I915_CACHE_NONE)
725 needs_clflush_before = 1;
726
673a394b 727 offset = args->offset;
05394f39 728 obj->dirty = 1;
673a394b 729
40123c1f 730 while (remain > 0) {
e5281ccd 731 struct page *page;
58642885 732 int partial_cacheline_write;
e5281ccd 733
40123c1f
EA
734 /* Operation in this page
735 *
40123c1f 736 * shmem_page_offset = offset within page in shmem file
40123c1f
EA
737 * page_length = bytes to copy for this page
738 */
c8cbbb8b 739 shmem_page_offset = offset_in_page(offset);
40123c1f
EA
740
741 page_length = remain;
742 if ((shmem_page_offset + page_length) > PAGE_SIZE)
743 page_length = PAGE_SIZE - shmem_page_offset;
40123c1f 744
58642885
DV
745 /* If we don't overwrite a cacheline completely we need to be
746 * careful to have up-to-date data by first clflushing. Don't
747 * overcomplicate things and flush the entire patch. */
748 partial_cacheline_write = needs_clflush_before &&
749 ((shmem_page_offset | page_length)
750 & (boot_cpu_data.x86_clflush_size - 1));
751
692a576b
DV
752 if (obj->pages) {
753 page = obj->pages[offset >> PAGE_SHIFT];
754 release_page = 0;
755 } else {
756 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
757 if (IS_ERR(page)) {
758 ret = PTR_ERR(page);
759 goto out;
760 }
761 release_page = 1;
e5281ccd
CW
762 }
763
8c59967c
DV
764 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
765 (page_to_phys(page) & (1 << 17)) != 0;
766
d174bd64
DV
767 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
768 user_data, page_do_bit17_swizzling,
769 partial_cacheline_write,
770 needs_clflush_after);
771 if (ret == 0)
772 goto next_page;
e244a443
DV
773
774 hit_slowpath = 1;
692a576b 775 page_cache_get(page);
e244a443
DV
776 mutex_unlock(&dev->struct_mutex);
777
d174bd64
DV
778 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
779 user_data, page_do_bit17_swizzling,
780 partial_cacheline_write,
781 needs_clflush_after);
40123c1f 782
e244a443 783 mutex_lock(&dev->struct_mutex);
692a576b 784 page_cache_release(page);
e244a443 785next_page:
e5281ccd
CW
786 set_page_dirty(page);
787 mark_page_accessed(page);
692a576b
DV
788 if (release_page)
789 page_cache_release(page);
e5281ccd 790
8c59967c
DV
791 if (ret) {
792 ret = -EFAULT;
793 goto out;
794 }
795
40123c1f 796 remain -= page_length;
8c59967c 797 user_data += page_length;
40123c1f 798 offset += page_length;
673a394b
EA
799 }
800
fbd5a26d 801out:
e244a443
DV
802 if (hit_slowpath) {
803 /* Fixup: Kill any reinstated backing storage pages */
804 if (obj->madv == __I915_MADV_PURGED)
805 i915_gem_object_truncate(obj);
806 /* and flush dirty cachelines in case the object isn't in the cpu write
807 * domain anymore. */
808 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
809 i915_gem_clflush_object(obj);
810 intel_gtt_chipset_flush();
811 }
8c59967c 812 }
673a394b 813
58642885
DV
814 if (needs_clflush_after)
815 intel_gtt_chipset_flush();
816
40123c1f 817 return ret;
673a394b
EA
818}
819
820/**
821 * Writes data to the object referenced by handle.
822 *
823 * On error, the contents of the buffer that were to be modified are undefined.
824 */
825int
826i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
fbd5a26d 827 struct drm_file *file)
673a394b
EA
828{
829 struct drm_i915_gem_pwrite *args = data;
05394f39 830 struct drm_i915_gem_object *obj;
51311d0a
CW
831 int ret;
832
833 if (args->size == 0)
834 return 0;
835
836 if (!access_ok(VERIFY_READ,
837 (char __user *)(uintptr_t)args->data_ptr,
838 args->size))
839 return -EFAULT;
840
f56f821f
DV
841 ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
842 args->size);
51311d0a
CW
843 if (ret)
844 return -EFAULT;
673a394b 845
fbd5a26d 846 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 847 if (ret)
fbd5a26d 848 return ret;
1d7cfea1 849
05394f39 850 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 851 if (&obj->base == NULL) {
1d7cfea1
CW
852 ret = -ENOENT;
853 goto unlock;
fbd5a26d 854 }
673a394b 855
7dcd2499 856 /* Bounds check destination. */
05394f39
CW
857 if (args->offset > obj->base.size ||
858 args->size > obj->base.size - args->offset) {
ce9d419d 859 ret = -EINVAL;
35b62a89 860 goto out;
ce9d419d
CW
861 }
862
db53a302
CW
863 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
864
935aaa69 865 ret = -EFAULT;
673a394b
EA
866 /* We can only do the GTT pwrite on untiled buffers, as otherwise
867 * it would end up going through the fenced access, and we'll get
868 * different detiling behavior between reading and writing.
869 * pread/pwrite currently are reading and writing from the CPU
870 * perspective, requiring manual detiling by the client.
871 */
5c0480f2 872 if (obj->phys_obj) {
fbd5a26d 873 ret = i915_gem_phys_pwrite(dev, obj, args, file);
5c0480f2
DV
874 goto out;
875 }
876
877 if (obj->gtt_space &&
3ae53783 878 obj->cache_level == I915_CACHE_NONE &&
ffc62976 879 obj->map_and_fenceable &&
5c0480f2 880 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
fbd5a26d 881 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
935aaa69
DV
882 /* Note that the gtt paths might fail with non-page-backed user
883 * pointers (e.g. gtt mappings when moving data between
884 * textures). Fallback to the shmem path in that case. */
fbd5a26d 885 }
673a394b 886
5c0480f2 887 if (ret == -EFAULT)
935aaa69 888 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
5c0480f2 889
35b62a89 890out:
05394f39 891 drm_gem_object_unreference(&obj->base);
1d7cfea1 892unlock:
fbd5a26d 893 mutex_unlock(&dev->struct_mutex);
673a394b
EA
894 return ret;
895}
896
897/**
2ef7eeaa
EA
898 * Called when user space prepares to use an object with the CPU, either
899 * through the mmap ioctl's mapping or a GTT mapping.
673a394b
EA
900 */
901int
902i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
05394f39 903 struct drm_file *file)
673a394b
EA
904{
905 struct drm_i915_gem_set_domain *args = data;
05394f39 906 struct drm_i915_gem_object *obj;
2ef7eeaa
EA
907 uint32_t read_domains = args->read_domains;
908 uint32_t write_domain = args->write_domain;
673a394b
EA
909 int ret;
910
911 if (!(dev->driver->driver_features & DRIVER_GEM))
912 return -ENODEV;
913
2ef7eeaa 914 /* Only handle setting domains to types used by the CPU. */
21d509e3 915 if (write_domain & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
916 return -EINVAL;
917
21d509e3 918 if (read_domains & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
919 return -EINVAL;
920
921 /* Having something in the write domain implies it's in the read
922 * domain, and only that read domain. Enforce that in the request.
923 */
924 if (write_domain != 0 && read_domains != write_domain)
925 return -EINVAL;
926
76c1dec1 927 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 928 if (ret)
76c1dec1 929 return ret;
1d7cfea1 930
05394f39 931 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 932 if (&obj->base == NULL) {
1d7cfea1
CW
933 ret = -ENOENT;
934 goto unlock;
76c1dec1 935 }
673a394b 936
2ef7eeaa
EA
937 if (read_domains & I915_GEM_DOMAIN_GTT) {
938 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
02354392
EA
939
940 /* Silently promote "you're not bound, there was nothing to do"
941 * to success, since the client was just asking us to
942 * make sure everything was done.
943 */
944 if (ret == -EINVAL)
945 ret = 0;
2ef7eeaa 946 } else {
e47c68e9 947 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
2ef7eeaa
EA
948 }
949
05394f39 950 drm_gem_object_unreference(&obj->base);
1d7cfea1 951unlock:
673a394b
EA
952 mutex_unlock(&dev->struct_mutex);
953 return ret;
954}
955
956/**
957 * Called when user space has done writes to this buffer
958 */
959int
960i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
05394f39 961 struct drm_file *file)
673a394b
EA
962{
963 struct drm_i915_gem_sw_finish *args = data;
05394f39 964 struct drm_i915_gem_object *obj;
673a394b
EA
965 int ret = 0;
966
967 if (!(dev->driver->driver_features & DRIVER_GEM))
968 return -ENODEV;
969
76c1dec1 970 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 971 if (ret)
76c1dec1 972 return ret;
1d7cfea1 973
05394f39 974 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 975 if (&obj->base == NULL) {
1d7cfea1
CW
976 ret = -ENOENT;
977 goto unlock;
673a394b
EA
978 }
979
673a394b 980 /* Pinned buffers may be scanout, so flush the cache */
05394f39 981 if (obj->pin_count)
e47c68e9
EA
982 i915_gem_object_flush_cpu_write_domain(obj);
983
05394f39 984 drm_gem_object_unreference(&obj->base);
1d7cfea1 985unlock:
673a394b
EA
986 mutex_unlock(&dev->struct_mutex);
987 return ret;
988}
989
990/**
991 * Maps the contents of an object, returning the address it is mapped
992 * into.
993 *
994 * While the mapping holds a reference on the contents of the object, it doesn't
995 * imply a ref on the object itself.
996 */
997int
998i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
05394f39 999 struct drm_file *file)
673a394b
EA
1000{
1001 struct drm_i915_gem_mmap *args = data;
1002 struct drm_gem_object *obj;
673a394b
EA
1003 unsigned long addr;
1004
1005 if (!(dev->driver->driver_features & DRIVER_GEM))
1006 return -ENODEV;
1007
05394f39 1008 obj = drm_gem_object_lookup(dev, file, args->handle);
673a394b 1009 if (obj == NULL)
bf79cb91 1010 return -ENOENT;
673a394b 1011
673a394b
EA
1012 down_write(&current->mm->mmap_sem);
1013 addr = do_mmap(obj->filp, 0, args->size,
1014 PROT_READ | PROT_WRITE, MAP_SHARED,
1015 args->offset);
1016 up_write(&current->mm->mmap_sem);
bc9025bd 1017 drm_gem_object_unreference_unlocked(obj);
673a394b
EA
1018 if (IS_ERR((void *)addr))
1019 return addr;
1020
1021 args->addr_ptr = (uint64_t) addr;
1022
1023 return 0;
1024}
1025
de151cf6
JB
1026/**
1027 * i915_gem_fault - fault a page into the GTT
1028 * vma: VMA in question
1029 * vmf: fault info
1030 *
1031 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1032 * from userspace. The fault handler takes care of binding the object to
1033 * the GTT (if needed), allocating and programming a fence register (again,
1034 * only if needed based on whether the old reg is still valid or the object
1035 * is tiled) and inserting a new PTE into the faulting process.
1036 *
1037 * Note that the faulting process may involve evicting existing objects
1038 * from the GTT and/or fence registers to make room. So performance may
1039 * suffer if the GTT working set is large or there are few fence registers
1040 * left.
1041 */
1042int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1043{
05394f39
CW
1044 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1045 struct drm_device *dev = obj->base.dev;
7d1c4804 1046 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6
JB
1047 pgoff_t page_offset;
1048 unsigned long pfn;
1049 int ret = 0;
0f973f27 1050 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
de151cf6
JB
1051
1052 /* We don't use vmf->pgoff since that has the fake offset */
1053 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1054 PAGE_SHIFT;
1055
d9bc7e9f
CW
1056 ret = i915_mutex_lock_interruptible(dev);
1057 if (ret)
1058 goto out;
a00b10c3 1059
db53a302
CW
1060 trace_i915_gem_object_fault(obj, page_offset, true, write);
1061
d9bc7e9f 1062 /* Now bind it into the GTT if needed */
919926ae
CW
1063 if (!obj->map_and_fenceable) {
1064 ret = i915_gem_object_unbind(obj);
1065 if (ret)
1066 goto unlock;
a00b10c3 1067 }
05394f39 1068 if (!obj->gtt_space) {
75e9e915 1069 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
c715089f
CW
1070 if (ret)
1071 goto unlock;
de151cf6 1072
e92d03bf
EA
1073 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1074 if (ret)
1075 goto unlock;
1076 }
4a684a41 1077
74898d7e
DV
1078 if (!obj->has_global_gtt_mapping)
1079 i915_gem_gtt_bind_object(obj, obj->cache_level);
1080
9a5a53b3 1081 ret = i915_gem_object_get_fence(obj, NULL);
d9e86c0e
CW
1082 if (ret)
1083 goto unlock;
de151cf6 1084
05394f39
CW
1085 if (i915_gem_object_is_inactive(obj))
1086 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
7d1c4804 1087
6299f992
CW
1088 obj->fault_mappable = true;
1089
05394f39 1090 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
de151cf6
JB
1091 page_offset;
1092
1093 /* Finally, remap it using the new GTT offset */
1094 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
c715089f 1095unlock:
de151cf6 1096 mutex_unlock(&dev->struct_mutex);
d9bc7e9f 1097out:
de151cf6 1098 switch (ret) {
d9bc7e9f 1099 case -EIO:
045e769a 1100 case -EAGAIN:
d9bc7e9f
CW
1101 /* Give the error handler a chance to run and move the
1102 * objects off the GPU active list. Next time we service the
1103 * fault, we should be able to transition the page into the
1104 * GTT without touching the GPU (and so avoid further
1105 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1106 * with coherency, just lost writes.
1107 */
045e769a 1108 set_need_resched();
c715089f
CW
1109 case 0:
1110 case -ERESTARTSYS:
bed636ab 1111 case -EINTR:
c715089f 1112 return VM_FAULT_NOPAGE;
de151cf6 1113 case -ENOMEM:
de151cf6 1114 return VM_FAULT_OOM;
de151cf6 1115 default:
c715089f 1116 return VM_FAULT_SIGBUS;
de151cf6
JB
1117 }
1118}
1119
901782b2
CW
1120/**
1121 * i915_gem_release_mmap - remove physical page mappings
1122 * @obj: obj in question
1123 *
af901ca1 1124 * Preserve the reservation of the mmapping with the DRM core code, but
901782b2
CW
1125 * relinquish ownership of the pages back to the system.
1126 *
1127 * It is vital that we remove the page mapping if we have mapped a tiled
1128 * object through the GTT and then lose the fence register due to
1129 * resource pressure. Similarly if the object has been moved out of the
1130 * aperture, than pages mapped into userspace must be revoked. Removing the
1131 * mapping will then trigger a page fault on the next user access, allowing
1132 * fixup by i915_gem_fault().
1133 */
d05ca301 1134void
05394f39 1135i915_gem_release_mmap(struct drm_i915_gem_object *obj)
901782b2 1136{
6299f992
CW
1137 if (!obj->fault_mappable)
1138 return;
901782b2 1139
f6e47884
CW
1140 if (obj->base.dev->dev_mapping)
1141 unmap_mapping_range(obj->base.dev->dev_mapping,
1142 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1143 obj->base.size, 1);
fb7d516a 1144
6299f992 1145 obj->fault_mappable = false;
901782b2
CW
1146}
1147
92b88aeb 1148static uint32_t
e28f8711 1149i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
92b88aeb 1150{
e28f8711 1151 uint32_t gtt_size;
92b88aeb
CW
1152
1153 if (INTEL_INFO(dev)->gen >= 4 ||
e28f8711
CW
1154 tiling_mode == I915_TILING_NONE)
1155 return size;
92b88aeb
CW
1156
1157 /* Previous chips need a power-of-two fence region when tiling */
1158 if (INTEL_INFO(dev)->gen == 3)
e28f8711 1159 gtt_size = 1024*1024;
92b88aeb 1160 else
e28f8711 1161 gtt_size = 512*1024;
92b88aeb 1162
e28f8711
CW
1163 while (gtt_size < size)
1164 gtt_size <<= 1;
92b88aeb 1165
e28f8711 1166 return gtt_size;
92b88aeb
CW
1167}
1168
de151cf6
JB
1169/**
1170 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1171 * @obj: object to check
1172 *
1173 * Return the required GTT alignment for an object, taking into account
5e783301 1174 * potential fence register mapping.
de151cf6
JB
1175 */
1176static uint32_t
e28f8711
CW
1177i915_gem_get_gtt_alignment(struct drm_device *dev,
1178 uint32_t size,
1179 int tiling_mode)
de151cf6 1180{
de151cf6
JB
1181 /*
1182 * Minimum alignment is 4k (GTT page size), but might be greater
1183 * if a fence register is needed for the object.
1184 */
a00b10c3 1185 if (INTEL_INFO(dev)->gen >= 4 ||
e28f8711 1186 tiling_mode == I915_TILING_NONE)
de151cf6
JB
1187 return 4096;
1188
a00b10c3
CW
1189 /*
1190 * Previous chips need to be aligned to the size of the smallest
1191 * fence register that can contain the object.
1192 */
e28f8711 1193 return i915_gem_get_gtt_size(dev, size, tiling_mode);
a00b10c3
CW
1194}
1195
5e783301
DV
1196/**
1197 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1198 * unfenced object
e28f8711
CW
1199 * @dev: the device
1200 * @size: size of the object
1201 * @tiling_mode: tiling mode of the object
5e783301
DV
1202 *
1203 * Return the required GTT alignment for an object, only taking into account
1204 * unfenced tiled surface requirements.
1205 */
467cffba 1206uint32_t
e28f8711
CW
1207i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1208 uint32_t size,
1209 int tiling_mode)
5e783301 1210{
5e783301
DV
1211 /*
1212 * Minimum alignment is 4k (GTT page size) for sane hw.
1213 */
1214 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
e28f8711 1215 tiling_mode == I915_TILING_NONE)
5e783301
DV
1216 return 4096;
1217
e28f8711
CW
1218 /* Previous hardware however needs to be aligned to a power-of-two
1219 * tile height. The simplest method for determining this is to reuse
1220 * the power-of-tile object size.
5e783301 1221 */
e28f8711 1222 return i915_gem_get_gtt_size(dev, size, tiling_mode);
5e783301
DV
1223}
1224
de151cf6 1225int
ff72145b
DA
1226i915_gem_mmap_gtt(struct drm_file *file,
1227 struct drm_device *dev,
1228 uint32_t handle,
1229 uint64_t *offset)
de151cf6 1230{
da761a6e 1231 struct drm_i915_private *dev_priv = dev->dev_private;
05394f39 1232 struct drm_i915_gem_object *obj;
de151cf6
JB
1233 int ret;
1234
1235 if (!(dev->driver->driver_features & DRIVER_GEM))
1236 return -ENODEV;
1237
76c1dec1 1238 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1239 if (ret)
76c1dec1 1240 return ret;
de151cf6 1241
ff72145b 1242 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
c8725226 1243 if (&obj->base == NULL) {
1d7cfea1
CW
1244 ret = -ENOENT;
1245 goto unlock;
1246 }
de151cf6 1247
05394f39 1248 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
da761a6e 1249 ret = -E2BIG;
ff56b0bc 1250 goto out;
da761a6e
CW
1251 }
1252
05394f39 1253 if (obj->madv != I915_MADV_WILLNEED) {
ab18282d 1254 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1d7cfea1
CW
1255 ret = -EINVAL;
1256 goto out;
ab18282d
CW
1257 }
1258
05394f39 1259 if (!obj->base.map_list.map) {
b464e9a2 1260 ret = drm_gem_create_mmap_offset(&obj->base);
1d7cfea1
CW
1261 if (ret)
1262 goto out;
de151cf6
JB
1263 }
1264
ff72145b 1265 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
de151cf6 1266
1d7cfea1 1267out:
05394f39 1268 drm_gem_object_unreference(&obj->base);
1d7cfea1 1269unlock:
de151cf6 1270 mutex_unlock(&dev->struct_mutex);
1d7cfea1 1271 return ret;
de151cf6
JB
1272}
1273
ff72145b
DA
1274/**
1275 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1276 * @dev: DRM device
1277 * @data: GTT mapping ioctl data
1278 * @file: GEM object info
1279 *
1280 * Simply returns the fake offset to userspace so it can mmap it.
1281 * The mmap call will end up in drm_gem_mmap(), which will set things
1282 * up so we can get faults in the handler above.
1283 *
1284 * The fault handler will take care of binding the object into the GTT
1285 * (since it may have been evicted to make room for something), allocating
1286 * a fence register, and mapping the appropriate aperture address into
1287 * userspace.
1288 */
1289int
1290i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1291 struct drm_file *file)
1292{
1293 struct drm_i915_gem_mmap_gtt *args = data;
1294
1295 if (!(dev->driver->driver_features & DRIVER_GEM))
1296 return -ENODEV;
1297
1298 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1299}
1300
1301
e5281ccd 1302static int
05394f39 1303i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
e5281ccd
CW
1304 gfp_t gfpmask)
1305{
e5281ccd
CW
1306 int page_count, i;
1307 struct address_space *mapping;
1308 struct inode *inode;
1309 struct page *page;
1310
1311 /* Get the list of pages out of our struct file. They'll be pinned
1312 * at this point until we release them.
1313 */
05394f39
CW
1314 page_count = obj->base.size / PAGE_SIZE;
1315 BUG_ON(obj->pages != NULL);
1316 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1317 if (obj->pages == NULL)
e5281ccd
CW
1318 return -ENOMEM;
1319
05394f39 1320 inode = obj->base.filp->f_path.dentry->d_inode;
e5281ccd 1321 mapping = inode->i_mapping;
5949eac4
HD
1322 gfpmask |= mapping_gfp_mask(mapping);
1323
e5281ccd 1324 for (i = 0; i < page_count; i++) {
5949eac4 1325 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
e5281ccd
CW
1326 if (IS_ERR(page))
1327 goto err_pages;
1328
05394f39 1329 obj->pages[i] = page;
e5281ccd
CW
1330 }
1331
6dacfd2f 1332 if (i915_gem_object_needs_bit17_swizzle(obj))
e5281ccd
CW
1333 i915_gem_object_do_bit_17_swizzle(obj);
1334
1335 return 0;
1336
1337err_pages:
1338 while (i--)
05394f39 1339 page_cache_release(obj->pages[i]);
e5281ccd 1340
05394f39
CW
1341 drm_free_large(obj->pages);
1342 obj->pages = NULL;
e5281ccd
CW
1343 return PTR_ERR(page);
1344}
1345
5cdf5881 1346static void
05394f39 1347i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
673a394b 1348{
05394f39 1349 int page_count = obj->base.size / PAGE_SIZE;
673a394b
EA
1350 int i;
1351
05394f39 1352 BUG_ON(obj->madv == __I915_MADV_PURGED);
673a394b 1353
6dacfd2f 1354 if (i915_gem_object_needs_bit17_swizzle(obj))
280b713b
EA
1355 i915_gem_object_save_bit_17_swizzle(obj);
1356
05394f39
CW
1357 if (obj->madv == I915_MADV_DONTNEED)
1358 obj->dirty = 0;
3ef94daa
CW
1359
1360 for (i = 0; i < page_count; i++) {
05394f39
CW
1361 if (obj->dirty)
1362 set_page_dirty(obj->pages[i]);
3ef94daa 1363
05394f39
CW
1364 if (obj->madv == I915_MADV_WILLNEED)
1365 mark_page_accessed(obj->pages[i]);
3ef94daa 1366
05394f39 1367 page_cache_release(obj->pages[i]);
3ef94daa 1368 }
05394f39 1369 obj->dirty = 0;
673a394b 1370
05394f39
CW
1371 drm_free_large(obj->pages);
1372 obj->pages = NULL;
673a394b
EA
1373}
1374
54cf91dc 1375void
05394f39 1376i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1ec14ad3
CW
1377 struct intel_ring_buffer *ring,
1378 u32 seqno)
673a394b 1379{
05394f39 1380 struct drm_device *dev = obj->base.dev;
69dc4987 1381 struct drm_i915_private *dev_priv = dev->dev_private;
617dbe27 1382
852835f3 1383 BUG_ON(ring == NULL);
05394f39 1384 obj->ring = ring;
673a394b
EA
1385
1386 /* Add a reference if we're newly entering the active list. */
05394f39
CW
1387 if (!obj->active) {
1388 drm_gem_object_reference(&obj->base);
1389 obj->active = 1;
673a394b 1390 }
e35a41de 1391
673a394b 1392 /* Move from whatever list we were on to the tail of execution. */
05394f39
CW
1393 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1394 list_move_tail(&obj->ring_list, &ring->active_list);
caea7476 1395
05394f39 1396 obj->last_rendering_seqno = seqno;
caea7476 1397
7dd49065 1398 if (obj->fenced_gpu_access) {
caea7476
CW
1399 obj->last_fenced_seqno = seqno;
1400 obj->last_fenced_ring = ring;
1401
7dd49065
CW
1402 /* Bump MRU to take account of the delayed flush */
1403 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1404 struct drm_i915_fence_reg *reg;
1405
1406 reg = &dev_priv->fence_regs[obj->fence_reg];
1407 list_move_tail(&reg->lru_list,
1408 &dev_priv->mm.fence_list);
1409 }
caea7476
CW
1410 }
1411}
1412
1413static void
1414i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1415{
1416 list_del_init(&obj->ring_list);
1417 obj->last_rendering_seqno = 0;
673a394b
EA
1418}
1419
ce44b0ea 1420static void
05394f39 1421i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
ce44b0ea 1422{
05394f39 1423 struct drm_device *dev = obj->base.dev;
ce44b0ea 1424 drm_i915_private_t *dev_priv = dev->dev_private;
ce44b0ea 1425
05394f39
CW
1426 BUG_ON(!obj->active);
1427 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
caea7476
CW
1428
1429 i915_gem_object_move_off_active(obj);
1430}
1431
1432static void
1433i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1434{
1435 struct drm_device *dev = obj->base.dev;
1436 struct drm_i915_private *dev_priv = dev->dev_private;
1437
1438 if (obj->pin_count != 0)
1439 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1440 else
1441 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1442
1443 BUG_ON(!list_empty(&obj->gpu_write_list));
1444 BUG_ON(!obj->active);
1445 obj->ring = NULL;
1446
1447 i915_gem_object_move_off_active(obj);
1448 obj->fenced_gpu_access = false;
caea7476
CW
1449
1450 obj->active = 0;
87ca9c8a 1451 obj->pending_gpu_write = false;
caea7476
CW
1452 drm_gem_object_unreference(&obj->base);
1453
1454 WARN_ON(i915_verify_lists(dev));
ce44b0ea 1455}
673a394b 1456
963b4836
CW
1457/* Immediately discard the backing storage */
1458static void
05394f39 1459i915_gem_object_truncate(struct drm_i915_gem_object *obj)
963b4836 1460{
bb6baf76 1461 struct inode *inode;
963b4836 1462
ae9fed6b
CW
1463 /* Our goal here is to return as much of the memory as
1464 * is possible back to the system as we are called from OOM.
1465 * To do this we must instruct the shmfs to drop all of its
e2377fe0 1466 * backing pages, *now*.
ae9fed6b 1467 */
05394f39 1468 inode = obj->base.filp->f_path.dentry->d_inode;
e2377fe0 1469 shmem_truncate_range(inode, 0, (loff_t)-1);
bb6baf76 1470
a14917ee
CW
1471 if (obj->base.map_list.map)
1472 drm_gem_free_mmap_offset(&obj->base);
1473
05394f39 1474 obj->madv = __I915_MADV_PURGED;
963b4836
CW
1475}
1476
1477static inline int
05394f39 1478i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
963b4836 1479{
05394f39 1480 return obj->madv == I915_MADV_DONTNEED;
963b4836
CW
1481}
1482
63560396 1483static void
db53a302
CW
1484i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1485 uint32_t flush_domains)
63560396 1486{
05394f39 1487 struct drm_i915_gem_object *obj, *next;
63560396 1488
05394f39 1489 list_for_each_entry_safe(obj, next,
64193406 1490 &ring->gpu_write_list,
63560396 1491 gpu_write_list) {
05394f39
CW
1492 if (obj->base.write_domain & flush_domains) {
1493 uint32_t old_write_domain = obj->base.write_domain;
63560396 1494
05394f39
CW
1495 obj->base.write_domain = 0;
1496 list_del_init(&obj->gpu_write_list);
1ec14ad3 1497 i915_gem_object_move_to_active(obj, ring,
db53a302 1498 i915_gem_next_request_seqno(ring));
63560396 1499
63560396 1500 trace_i915_gem_object_change_domain(obj,
05394f39 1501 obj->base.read_domains,
63560396
DV
1502 old_write_domain);
1503 }
1504 }
1505}
8187a2b7 1506
53d227f2
DV
1507static u32
1508i915_gem_get_seqno(struct drm_device *dev)
1509{
1510 drm_i915_private_t *dev_priv = dev->dev_private;
1511 u32 seqno = dev_priv->next_seqno;
1512
1513 /* reserve 0 for non-seqno */
1514 if (++dev_priv->next_seqno == 0)
1515 dev_priv->next_seqno = 1;
1516
1517 return seqno;
1518}
1519
1520u32
1521i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1522{
1523 if (ring->outstanding_lazy_request == 0)
1524 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
1525
1526 return ring->outstanding_lazy_request;
1527}
1528
3cce469c 1529int
db53a302 1530i915_add_request(struct intel_ring_buffer *ring,
f787a5f5 1531 struct drm_file *file,
db53a302 1532 struct drm_i915_gem_request *request)
673a394b 1533{
db53a302 1534 drm_i915_private_t *dev_priv = ring->dev->dev_private;
673a394b 1535 uint32_t seqno;
a71d8d94 1536 u32 request_ring_position;
673a394b 1537 int was_empty;
3cce469c
CW
1538 int ret;
1539
1540 BUG_ON(request == NULL);
53d227f2 1541 seqno = i915_gem_next_request_seqno(ring);
673a394b 1542
a71d8d94
CW
1543 /* Record the position of the start of the request so that
1544 * should we detect the updated seqno part-way through the
1545 * GPU processing the request, we never over-estimate the
1546 * position of the head.
1547 */
1548 request_ring_position = intel_ring_get_tail(ring);
1549
3cce469c
CW
1550 ret = ring->add_request(ring, &seqno);
1551 if (ret)
1552 return ret;
673a394b 1553
db53a302 1554 trace_i915_gem_request_add(ring, seqno);
673a394b
EA
1555
1556 request->seqno = seqno;
852835f3 1557 request->ring = ring;
a71d8d94 1558 request->tail = request_ring_position;
673a394b 1559 request->emitted_jiffies = jiffies;
852835f3
ZN
1560 was_empty = list_empty(&ring->request_list);
1561 list_add_tail(&request->list, &ring->request_list);
1562
db53a302
CW
1563 if (file) {
1564 struct drm_i915_file_private *file_priv = file->driver_priv;
1565
1c25595f 1566 spin_lock(&file_priv->mm.lock);
f787a5f5 1567 request->file_priv = file_priv;
b962442e 1568 list_add_tail(&request->client_list,
f787a5f5 1569 &file_priv->mm.request_list);
1c25595f 1570 spin_unlock(&file_priv->mm.lock);
b962442e 1571 }
673a394b 1572
5391d0cf 1573 ring->outstanding_lazy_request = 0;
db53a302 1574
f65d9421 1575 if (!dev_priv->mm.suspended) {
3e0dc6b0
BW
1576 if (i915_enable_hangcheck) {
1577 mod_timer(&dev_priv->hangcheck_timer,
1578 jiffies +
1579 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1580 }
f65d9421 1581 if (was_empty)
b3b079db
CW
1582 queue_delayed_work(dev_priv->wq,
1583 &dev_priv->mm.retire_work, HZ);
f65d9421 1584 }
3cce469c 1585 return 0;
673a394b
EA
1586}
1587
f787a5f5
CW
1588static inline void
1589i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
673a394b 1590{
1c25595f 1591 struct drm_i915_file_private *file_priv = request->file_priv;
673a394b 1592
1c25595f
CW
1593 if (!file_priv)
1594 return;
1c5d22f7 1595
1c25595f 1596 spin_lock(&file_priv->mm.lock);
09bfa517
HRK
1597 if (request->file_priv) {
1598 list_del(&request->client_list);
1599 request->file_priv = NULL;
1600 }
1c25595f 1601 spin_unlock(&file_priv->mm.lock);
673a394b 1602}
673a394b 1603
dfaae392
CW
1604static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1605 struct intel_ring_buffer *ring)
9375e446 1606{
dfaae392
CW
1607 while (!list_empty(&ring->request_list)) {
1608 struct drm_i915_gem_request *request;
673a394b 1609
dfaae392
CW
1610 request = list_first_entry(&ring->request_list,
1611 struct drm_i915_gem_request,
1612 list);
de151cf6 1613
dfaae392 1614 list_del(&request->list);
f787a5f5 1615 i915_gem_request_remove_from_client(request);
dfaae392
CW
1616 kfree(request);
1617 }
673a394b 1618
dfaae392 1619 while (!list_empty(&ring->active_list)) {
05394f39 1620 struct drm_i915_gem_object *obj;
9375e446 1621
05394f39
CW
1622 obj = list_first_entry(&ring->active_list,
1623 struct drm_i915_gem_object,
1624 ring_list);
9375e446 1625
05394f39
CW
1626 obj->base.write_domain = 0;
1627 list_del_init(&obj->gpu_write_list);
1628 i915_gem_object_move_to_inactive(obj);
673a394b
EA
1629 }
1630}
1631
312817a3
CW
1632static void i915_gem_reset_fences(struct drm_device *dev)
1633{
1634 struct drm_i915_private *dev_priv = dev->dev_private;
1635 int i;
1636
4b9de737 1637 for (i = 0; i < dev_priv->num_fence_regs; i++) {
312817a3 1638 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
7d2cb39c
CW
1639 struct drm_i915_gem_object *obj = reg->obj;
1640
1641 if (!obj)
1642 continue;
1643
1644 if (obj->tiling_mode)
1645 i915_gem_release_mmap(obj);
1646
d9e86c0e
CW
1647 reg->obj->fence_reg = I915_FENCE_REG_NONE;
1648 reg->obj->fenced_gpu_access = false;
1649 reg->obj->last_fenced_seqno = 0;
1650 reg->obj->last_fenced_ring = NULL;
1651 i915_gem_clear_fence_reg(dev, reg);
312817a3
CW
1652 }
1653}
1654
069efc1d 1655void i915_gem_reset(struct drm_device *dev)
673a394b 1656{
77f01230 1657 struct drm_i915_private *dev_priv = dev->dev_private;
05394f39 1658 struct drm_i915_gem_object *obj;
1ec14ad3 1659 int i;
673a394b 1660
1ec14ad3
CW
1661 for (i = 0; i < I915_NUM_RINGS; i++)
1662 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
dfaae392
CW
1663
1664 /* Remove anything from the flushing lists. The GPU cache is likely
1665 * to be lost on reset along with the data, so simply move the
1666 * lost bo to the inactive list.
1667 */
1668 while (!list_empty(&dev_priv->mm.flushing_list)) {
0206e353 1669 obj = list_first_entry(&dev_priv->mm.flushing_list,
05394f39
CW
1670 struct drm_i915_gem_object,
1671 mm_list);
dfaae392 1672
05394f39
CW
1673 obj->base.write_domain = 0;
1674 list_del_init(&obj->gpu_write_list);
1675 i915_gem_object_move_to_inactive(obj);
dfaae392
CW
1676 }
1677
1678 /* Move everything out of the GPU domains to ensure we do any
1679 * necessary invalidation upon reuse.
1680 */
05394f39 1681 list_for_each_entry(obj,
77f01230 1682 &dev_priv->mm.inactive_list,
69dc4987 1683 mm_list)
77f01230 1684 {
05394f39 1685 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
77f01230 1686 }
069efc1d
CW
1687
1688 /* The fence registers are invalidated so clear them out */
312817a3 1689 i915_gem_reset_fences(dev);
673a394b
EA
1690}
1691
1692/**
1693 * This function clears the request list as sequence numbers are passed.
1694 */
a71d8d94 1695void
db53a302 1696i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
673a394b 1697{
673a394b 1698 uint32_t seqno;
1ec14ad3 1699 int i;
673a394b 1700
db53a302 1701 if (list_empty(&ring->request_list))
6c0594a3
KW
1702 return;
1703
db53a302 1704 WARN_ON(i915_verify_lists(ring->dev));
673a394b 1705
78501eac 1706 seqno = ring->get_seqno(ring);
1ec14ad3 1707
076e2c0e 1708 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1ec14ad3
CW
1709 if (seqno >= ring->sync_seqno[i])
1710 ring->sync_seqno[i] = 0;
1711
852835f3 1712 while (!list_empty(&ring->request_list)) {
673a394b 1713 struct drm_i915_gem_request *request;
673a394b 1714
852835f3 1715 request = list_first_entry(&ring->request_list,
673a394b
EA
1716 struct drm_i915_gem_request,
1717 list);
673a394b 1718
dfaae392 1719 if (!i915_seqno_passed(seqno, request->seqno))
b84d5f0c
CW
1720 break;
1721
db53a302 1722 trace_i915_gem_request_retire(ring, request->seqno);
a71d8d94
CW
1723 /* We know the GPU must have read the request to have
1724 * sent us the seqno + interrupt, so use the position
1725 * of tail of the request to update the last known position
1726 * of the GPU head.
1727 */
1728 ring->last_retired_head = request->tail;
b84d5f0c
CW
1729
1730 list_del(&request->list);
f787a5f5 1731 i915_gem_request_remove_from_client(request);
b84d5f0c
CW
1732 kfree(request);
1733 }
673a394b 1734
b84d5f0c
CW
1735 /* Move any buffers on the active list that are no longer referenced
1736 * by the ringbuffer to the flushing/inactive lists as appropriate.
1737 */
1738 while (!list_empty(&ring->active_list)) {
05394f39 1739 struct drm_i915_gem_object *obj;
b84d5f0c 1740
0206e353 1741 obj = list_first_entry(&ring->active_list,
05394f39
CW
1742 struct drm_i915_gem_object,
1743 ring_list);
673a394b 1744
05394f39 1745 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
673a394b 1746 break;
b84d5f0c 1747
05394f39 1748 if (obj->base.write_domain != 0)
b84d5f0c
CW
1749 i915_gem_object_move_to_flushing(obj);
1750 else
1751 i915_gem_object_move_to_inactive(obj);
673a394b 1752 }
9d34e5db 1753
db53a302
CW
1754 if (unlikely(ring->trace_irq_seqno &&
1755 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1ec14ad3 1756 ring->irq_put(ring);
db53a302 1757 ring->trace_irq_seqno = 0;
9d34e5db 1758 }
23bc5982 1759
db53a302 1760 WARN_ON(i915_verify_lists(ring->dev));
673a394b
EA
1761}
1762
b09a1fec
CW
1763void
1764i915_gem_retire_requests(struct drm_device *dev)
1765{
1766 drm_i915_private_t *dev_priv = dev->dev_private;
1ec14ad3 1767 int i;
b09a1fec 1768
be72615b 1769 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
05394f39 1770 struct drm_i915_gem_object *obj, *next;
be72615b
CW
1771
1772 /* We must be careful that during unbind() we do not
1773 * accidentally infinitely recurse into retire requests.
1774 * Currently:
1775 * retire -> free -> unbind -> wait -> retire_ring
1776 */
05394f39 1777 list_for_each_entry_safe(obj, next,
be72615b 1778 &dev_priv->mm.deferred_free_list,
69dc4987 1779 mm_list)
05394f39 1780 i915_gem_free_object_tail(obj);
be72615b
CW
1781 }
1782
1ec14ad3 1783 for (i = 0; i < I915_NUM_RINGS; i++)
db53a302 1784 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
b09a1fec
CW
1785}
1786
75ef9da2 1787static void
673a394b
EA
1788i915_gem_retire_work_handler(struct work_struct *work)
1789{
1790 drm_i915_private_t *dev_priv;
1791 struct drm_device *dev;
0a58705b
CW
1792 bool idle;
1793 int i;
673a394b
EA
1794
1795 dev_priv = container_of(work, drm_i915_private_t,
1796 mm.retire_work.work);
1797 dev = dev_priv->dev;
1798
891b48cf
CW
1799 /* Come back later if the device is busy... */
1800 if (!mutex_trylock(&dev->struct_mutex)) {
1801 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1802 return;
1803 }
1804
b09a1fec 1805 i915_gem_retire_requests(dev);
d1b851fc 1806
0a58705b
CW
1807 /* Send a periodic flush down the ring so we don't hold onto GEM
1808 * objects indefinitely.
1809 */
1810 idle = true;
1811 for (i = 0; i < I915_NUM_RINGS; i++) {
1812 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1813
1814 if (!list_empty(&ring->gpu_write_list)) {
1815 struct drm_i915_gem_request *request;
1816 int ret;
1817
db53a302
CW
1818 ret = i915_gem_flush_ring(ring,
1819 0, I915_GEM_GPU_DOMAINS);
0a58705b
CW
1820 request = kzalloc(sizeof(*request), GFP_KERNEL);
1821 if (ret || request == NULL ||
db53a302 1822 i915_add_request(ring, NULL, request))
0a58705b
CW
1823 kfree(request);
1824 }
1825
1826 idle &= list_empty(&ring->request_list);
1827 }
1828
1829 if (!dev_priv->mm.suspended && !idle)
9c9fe1f8 1830 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
0a58705b 1831
673a394b
EA
1832 mutex_unlock(&dev->struct_mutex);
1833}
1834
db53a302
CW
1835/**
1836 * Waits for a sequence number to be signaled, and cleans up the
1837 * request and object lists appropriately for that event.
1838 */
5a5a0c64 1839int
db53a302 1840i915_wait_request(struct intel_ring_buffer *ring,
b93f9cf1
BW
1841 uint32_t seqno,
1842 bool do_retire)
673a394b 1843{
db53a302 1844 drm_i915_private_t *dev_priv = ring->dev->dev_private;
802c7eb6 1845 u32 ier;
673a394b
EA
1846 int ret = 0;
1847
1848 BUG_ON(seqno == 0);
1849
d9bc7e9f
CW
1850 if (atomic_read(&dev_priv->mm.wedged)) {
1851 struct completion *x = &dev_priv->error_completion;
1852 bool recovery_complete;
1853 unsigned long flags;
1854
1855 /* Give the error handler a chance to run. */
1856 spin_lock_irqsave(&x->wait.lock, flags);
1857 recovery_complete = x->done > 0;
1858 spin_unlock_irqrestore(&x->wait.lock, flags);
1859
1860 return recovery_complete ? -EIO : -EAGAIN;
1861 }
30dbf0c0 1862
5d97eb69 1863 if (seqno == ring->outstanding_lazy_request) {
3cce469c
CW
1864 struct drm_i915_gem_request *request;
1865
1866 request = kzalloc(sizeof(*request), GFP_KERNEL);
1867 if (request == NULL)
e35a41de 1868 return -ENOMEM;
3cce469c 1869
db53a302 1870 ret = i915_add_request(ring, NULL, request);
3cce469c
CW
1871 if (ret) {
1872 kfree(request);
1873 return ret;
1874 }
1875
1876 seqno = request->seqno;
e35a41de 1877 }
ffed1d09 1878
78501eac 1879 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
db53a302 1880 if (HAS_PCH_SPLIT(ring->dev))
036a4a7d 1881 ier = I915_READ(DEIER) | I915_READ(GTIER);
23e3f9b3
JB
1882 else if (IS_VALLEYVIEW(ring->dev))
1883 ier = I915_READ(GTIER) | I915_READ(VLV_IER);
036a4a7d
ZW
1884 else
1885 ier = I915_READ(IER);
802c7eb6
JB
1886 if (!ier) {
1887 DRM_ERROR("something (likely vbetool) disabled "
1888 "interrupts, re-enabling\n");
f01c22fd
CW
1889 ring->dev->driver->irq_preinstall(ring->dev);
1890 ring->dev->driver->irq_postinstall(ring->dev);
802c7eb6
JB
1891 }
1892
db53a302 1893 trace_i915_gem_request_wait_begin(ring, seqno);
1c5d22f7 1894
b2223497 1895 ring->waiting_seqno = seqno;
b13c2b96 1896 if (ring->irq_get(ring)) {
ce453d81 1897 if (dev_priv->mm.interruptible)
b13c2b96
CW
1898 ret = wait_event_interruptible(ring->irq_queue,
1899 i915_seqno_passed(ring->get_seqno(ring), seqno)
1900 || atomic_read(&dev_priv->mm.wedged));
1901 else
1902 wait_event(ring->irq_queue,
1903 i915_seqno_passed(ring->get_seqno(ring), seqno)
1904 || atomic_read(&dev_priv->mm.wedged));
1905
1906 ring->irq_put(ring);
e959b5db
EA
1907 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
1908 seqno) ||
1909 atomic_read(&dev_priv->mm.wedged), 3000))
b5ba177d 1910 ret = -EBUSY;
b2223497 1911 ring->waiting_seqno = 0;
1c5d22f7 1912
db53a302 1913 trace_i915_gem_request_wait_end(ring, seqno);
673a394b 1914 }
ba1234d1 1915 if (atomic_read(&dev_priv->mm.wedged))
30dbf0c0 1916 ret = -EAGAIN;
673a394b 1917
673a394b
EA
1918 /* Directly dispatch request retiring. While we have the work queue
1919 * to handle this, the waiter on a request often wants an associated
1920 * buffer to have made it to the inactive list, and we would need
1921 * a separate wait queue to handle that.
1922 */
b93f9cf1 1923 if (ret == 0 && do_retire)
db53a302 1924 i915_gem_retire_requests_ring(ring);
673a394b
EA
1925
1926 return ret;
1927}
1928
673a394b
EA
1929/**
1930 * Ensures that all rendering to the object has completed and the object is
1931 * safe to unbind from the GTT or access from the CPU.
1932 */
54cf91dc 1933int
ce453d81 1934i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
673a394b 1935{
673a394b
EA
1936 int ret;
1937
e47c68e9
EA
1938 /* This function only exists to support waiting for existing rendering,
1939 * not for emitting required flushes.
673a394b 1940 */
05394f39 1941 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
673a394b
EA
1942
1943 /* If there is rendering queued on the buffer being evicted, wait for
1944 * it.
1945 */
05394f39 1946 if (obj->active) {
b93f9cf1
BW
1947 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
1948 true);
2cf34d7b 1949 if (ret)
673a394b
EA
1950 return ret;
1951 }
1952
1953 return 0;
1954}
1955
2911a35b
BW
1956int
1957i915_gem_object_sync(struct drm_i915_gem_object *obj,
1958 struct intel_ring_buffer *to)
1959{
1960 struct intel_ring_buffer *from = obj->ring;
1961 u32 seqno;
1962 int ret, idx;
1963
1964 if (from == NULL || to == from)
1965 return 0;
1966
1967 if (!i915_semaphore_is_enabled(obj->base.dev))
1968 return i915_gem_object_wait_rendering(obj);
1969
1970 idx = intel_ring_sync_index(from, to);
1971
1972 seqno = obj->last_rendering_seqno;
1973 if (seqno <= from->sync_seqno[idx])
1974 return 0;
1975
1976 if (seqno == from->outstanding_lazy_request) {
1977 struct drm_i915_gem_request *request;
1978
1979 request = kzalloc(sizeof(*request), GFP_KERNEL);
1980 if (request == NULL)
1981 return -ENOMEM;
1982
1983 ret = i915_add_request(from, NULL, request);
1984 if (ret) {
1985 kfree(request);
1986 return ret;
1987 }
1988
1989 seqno = request->seqno;
1990 }
1991
1992 from->sync_seqno[idx] = seqno;
1993
1994 return to->sync_to(to, from, seqno - 1);
1995
1996}
1997
b5ffc9bc
CW
1998static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
1999{
2000 u32 old_write_domain, old_read_domains;
2001
b5ffc9bc
CW
2002 /* Act a barrier for all accesses through the GTT */
2003 mb();
2004
2005 /* Force a pagefault for domain tracking on next user access */
2006 i915_gem_release_mmap(obj);
2007
b97c3d9c
KP
2008 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2009 return;
2010
b5ffc9bc
CW
2011 old_read_domains = obj->base.read_domains;
2012 old_write_domain = obj->base.write_domain;
2013
2014 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2015 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2016
2017 trace_i915_gem_object_change_domain(obj,
2018 old_read_domains,
2019 old_write_domain);
2020}
2021
673a394b
EA
2022/**
2023 * Unbinds an object from the GTT aperture.
2024 */
0f973f27 2025int
05394f39 2026i915_gem_object_unbind(struct drm_i915_gem_object *obj)
673a394b 2027{
7bddb01f 2028 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
673a394b
EA
2029 int ret = 0;
2030
05394f39 2031 if (obj->gtt_space == NULL)
673a394b
EA
2032 return 0;
2033
05394f39 2034 if (obj->pin_count != 0) {
673a394b
EA
2035 DRM_ERROR("Attempting to unbind pinned buffer\n");
2036 return -EINVAL;
2037 }
2038
a8198eea
CW
2039 ret = i915_gem_object_finish_gpu(obj);
2040 if (ret == -ERESTARTSYS)
2041 return ret;
2042 /* Continue on if we fail due to EIO, the GPU is hung so we
2043 * should be safe and we need to cleanup or else we might
2044 * cause memory corruption through use-after-free.
2045 */
2046
b5ffc9bc 2047 i915_gem_object_finish_gtt(obj);
5323fd04 2048
673a394b
EA
2049 /* Move the object to the CPU domain to ensure that
2050 * any possible CPU writes while it's not in the GTT
a8198eea 2051 * are flushed when we go to remap it.
673a394b 2052 */
a8198eea
CW
2053 if (ret == 0)
2054 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
8dc1775d 2055 if (ret == -ERESTARTSYS)
673a394b 2056 return ret;
812ed492 2057 if (ret) {
a8198eea
CW
2058 /* In the event of a disaster, abandon all caches and
2059 * hope for the best.
2060 */
812ed492 2061 i915_gem_clflush_object(obj);
05394f39 2062 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
812ed492 2063 }
673a394b 2064
96b47b65 2065 /* release the fence reg _after_ flushing */
d9e86c0e
CW
2066 ret = i915_gem_object_put_fence(obj);
2067 if (ret == -ERESTARTSYS)
2068 return ret;
96b47b65 2069
db53a302
CW
2070 trace_i915_gem_object_unbind(obj);
2071
74898d7e
DV
2072 if (obj->has_global_gtt_mapping)
2073 i915_gem_gtt_unbind_object(obj);
7bddb01f
DV
2074 if (obj->has_aliasing_ppgtt_mapping) {
2075 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2076 obj->has_aliasing_ppgtt_mapping = 0;
2077 }
74163907 2078 i915_gem_gtt_finish_object(obj);
7bddb01f 2079
e5281ccd 2080 i915_gem_object_put_pages_gtt(obj);
673a394b 2081
6299f992 2082 list_del_init(&obj->gtt_list);
05394f39 2083 list_del_init(&obj->mm_list);
75e9e915 2084 /* Avoid an unnecessary call to unbind on rebind. */
05394f39 2085 obj->map_and_fenceable = true;
673a394b 2086
05394f39
CW
2087 drm_mm_put_block(obj->gtt_space);
2088 obj->gtt_space = NULL;
2089 obj->gtt_offset = 0;
673a394b 2090
05394f39 2091 if (i915_gem_object_is_purgeable(obj))
963b4836
CW
2092 i915_gem_object_truncate(obj);
2093
8dc1775d 2094 return ret;
673a394b
EA
2095}
2096
88241785 2097int
db53a302 2098i915_gem_flush_ring(struct intel_ring_buffer *ring,
54cf91dc
CW
2099 uint32_t invalidate_domains,
2100 uint32_t flush_domains)
2101{
88241785
CW
2102 int ret;
2103
36d527de
CW
2104 if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2105 return 0;
2106
db53a302
CW
2107 trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2108
88241785
CW
2109 ret = ring->flush(ring, invalidate_domains, flush_domains);
2110 if (ret)
2111 return ret;
2112
36d527de
CW
2113 if (flush_domains & I915_GEM_GPU_DOMAINS)
2114 i915_gem_process_flushing_list(ring, flush_domains);
2115
88241785 2116 return 0;
54cf91dc
CW
2117}
2118
b93f9cf1 2119static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
a56ba56c 2120{
88241785
CW
2121 int ret;
2122
395b70be 2123 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
64193406
CW
2124 return 0;
2125
88241785 2126 if (!list_empty(&ring->gpu_write_list)) {
db53a302 2127 ret = i915_gem_flush_ring(ring,
0ac74c6b 2128 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
88241785
CW
2129 if (ret)
2130 return ret;
2131 }
2132
b93f9cf1
BW
2133 return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
2134 do_retire);
a56ba56c
CW
2135}
2136
b93f9cf1 2137int i915_gpu_idle(struct drm_device *dev, bool do_retire)
4df2faf4
DV
2138{
2139 drm_i915_private_t *dev_priv = dev->dev_private;
1ec14ad3 2140 int ret, i;
4df2faf4 2141
4df2faf4 2142 /* Flush everything onto the inactive list. */
1ec14ad3 2143 for (i = 0; i < I915_NUM_RINGS; i++) {
b93f9cf1 2144 ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
1ec14ad3
CW
2145 if (ret)
2146 return ret;
2147 }
4df2faf4 2148
8a1a49f9 2149 return 0;
4df2faf4
DV
2150}
2151
c6642782
DV
2152static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2153 struct intel_ring_buffer *pipelined)
4e901fdc 2154{
05394f39 2155 struct drm_device *dev = obj->base.dev;
4e901fdc 2156 drm_i915_private_t *dev_priv = dev->dev_private;
05394f39
CW
2157 u32 size = obj->gtt_space->size;
2158 int regnum = obj->fence_reg;
4e901fdc
EA
2159 uint64_t val;
2160
05394f39 2161 val = (uint64_t)((obj->gtt_offset + size - 4096) &
c6642782 2162 0xfffff000) << 32;
05394f39
CW
2163 val |= obj->gtt_offset & 0xfffff000;
2164 val |= (uint64_t)((obj->stride / 128) - 1) <<
4e901fdc
EA
2165 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2166
05394f39 2167 if (obj->tiling_mode == I915_TILING_Y)
4e901fdc
EA
2168 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2169 val |= I965_FENCE_REG_VALID;
2170
c6642782
DV
2171 if (pipelined) {
2172 int ret = intel_ring_begin(pipelined, 6);
2173 if (ret)
2174 return ret;
2175
2176 intel_ring_emit(pipelined, MI_NOOP);
2177 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2178 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2179 intel_ring_emit(pipelined, (u32)val);
2180 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2181 intel_ring_emit(pipelined, (u32)(val >> 32));
2182 intel_ring_advance(pipelined);
2183 } else
2184 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2185
2186 return 0;
4e901fdc
EA
2187}
2188
c6642782
DV
2189static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2190 struct intel_ring_buffer *pipelined)
de151cf6 2191{
05394f39 2192 struct drm_device *dev = obj->base.dev;
de151cf6 2193 drm_i915_private_t *dev_priv = dev->dev_private;
05394f39
CW
2194 u32 size = obj->gtt_space->size;
2195 int regnum = obj->fence_reg;
de151cf6
JB
2196 uint64_t val;
2197
05394f39 2198 val = (uint64_t)((obj->gtt_offset + size - 4096) &
de151cf6 2199 0xfffff000) << 32;
05394f39
CW
2200 val |= obj->gtt_offset & 0xfffff000;
2201 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2202 if (obj->tiling_mode == I915_TILING_Y)
de151cf6
JB
2203 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2204 val |= I965_FENCE_REG_VALID;
2205
c6642782
DV
2206 if (pipelined) {
2207 int ret = intel_ring_begin(pipelined, 6);
2208 if (ret)
2209 return ret;
2210
2211 intel_ring_emit(pipelined, MI_NOOP);
2212 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2213 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2214 intel_ring_emit(pipelined, (u32)val);
2215 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2216 intel_ring_emit(pipelined, (u32)(val >> 32));
2217 intel_ring_advance(pipelined);
2218 } else
2219 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2220
2221 return 0;
de151cf6
JB
2222}
2223
c6642782
DV
2224static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2225 struct intel_ring_buffer *pipelined)
de151cf6 2226{
05394f39 2227 struct drm_device *dev = obj->base.dev;
de151cf6 2228 drm_i915_private_t *dev_priv = dev->dev_private;
05394f39 2229 u32 size = obj->gtt_space->size;
c6642782 2230 u32 fence_reg, val, pitch_val;
0f973f27 2231 int tile_width;
de151cf6 2232
c6642782
DV
2233 if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2234 (size & -size) != size ||
2235 (obj->gtt_offset & (size - 1)),
2236 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2237 obj->gtt_offset, obj->map_and_fenceable, size))
2238 return -EINVAL;
de151cf6 2239
c6642782 2240 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
0f973f27 2241 tile_width = 128;
de151cf6 2242 else
0f973f27
JB
2243 tile_width = 512;
2244
2245 /* Note: pitch better be a power of two tile widths */
05394f39 2246 pitch_val = obj->stride / tile_width;
0f973f27 2247 pitch_val = ffs(pitch_val) - 1;
de151cf6 2248
05394f39
CW
2249 val = obj->gtt_offset;
2250 if (obj->tiling_mode == I915_TILING_Y)
de151cf6 2251 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
a00b10c3 2252 val |= I915_FENCE_SIZE_BITS(size);
de151cf6
JB
2253 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2254 val |= I830_FENCE_REG_VALID;
2255
05394f39 2256 fence_reg = obj->fence_reg;
a00b10c3
CW
2257 if (fence_reg < 8)
2258 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
dc529a4f 2259 else
a00b10c3 2260 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
c6642782
DV
2261
2262 if (pipelined) {
2263 int ret = intel_ring_begin(pipelined, 4);
2264 if (ret)
2265 return ret;
2266
2267 intel_ring_emit(pipelined, MI_NOOP);
2268 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2269 intel_ring_emit(pipelined, fence_reg);
2270 intel_ring_emit(pipelined, val);
2271 intel_ring_advance(pipelined);
2272 } else
2273 I915_WRITE(fence_reg, val);
2274
2275 return 0;
de151cf6
JB
2276}
2277
c6642782
DV
2278static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2279 struct intel_ring_buffer *pipelined)
de151cf6 2280{
05394f39 2281 struct drm_device *dev = obj->base.dev;
de151cf6 2282 drm_i915_private_t *dev_priv = dev->dev_private;
05394f39
CW
2283 u32 size = obj->gtt_space->size;
2284 int regnum = obj->fence_reg;
de151cf6
JB
2285 uint32_t val;
2286 uint32_t pitch_val;
2287
c6642782
DV
2288 if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2289 (size & -size) != size ||
2290 (obj->gtt_offset & (size - 1)),
2291 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2292 obj->gtt_offset, size))
2293 return -EINVAL;
de151cf6 2294
05394f39 2295 pitch_val = obj->stride / 128;
e76a16de 2296 pitch_val = ffs(pitch_val) - 1;
e76a16de 2297
05394f39
CW
2298 val = obj->gtt_offset;
2299 if (obj->tiling_mode == I915_TILING_Y)
de151cf6 2300 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
c6642782 2301 val |= I830_FENCE_SIZE_BITS(size);
de151cf6
JB
2302 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2303 val |= I830_FENCE_REG_VALID;
2304
c6642782
DV
2305 if (pipelined) {
2306 int ret = intel_ring_begin(pipelined, 4);
2307 if (ret)
2308 return ret;
2309
2310 intel_ring_emit(pipelined, MI_NOOP);
2311 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2312 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2313 intel_ring_emit(pipelined, val);
2314 intel_ring_advance(pipelined);
2315 } else
2316 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2317
2318 return 0;
de151cf6
JB
2319}
2320
d9e86c0e
CW
2321static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2322{
2323 return i915_seqno_passed(ring->get_seqno(ring), seqno);
2324}
2325
2326static int
2327i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
ce453d81 2328 struct intel_ring_buffer *pipelined)
d9e86c0e
CW
2329{
2330 int ret;
2331
2332 if (obj->fenced_gpu_access) {
88241785 2333 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
db53a302 2334 ret = i915_gem_flush_ring(obj->last_fenced_ring,
88241785
CW
2335 0, obj->base.write_domain);
2336 if (ret)
2337 return ret;
2338 }
d9e86c0e
CW
2339
2340 obj->fenced_gpu_access = false;
2341 }
2342
2343 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2344 if (!ring_passed_seqno(obj->last_fenced_ring,
2345 obj->last_fenced_seqno)) {
db53a302 2346 ret = i915_wait_request(obj->last_fenced_ring,
b93f9cf1
BW
2347 obj->last_fenced_seqno,
2348 true);
d9e86c0e
CW
2349 if (ret)
2350 return ret;
2351 }
2352
2353 obj->last_fenced_seqno = 0;
2354 obj->last_fenced_ring = NULL;
2355 }
2356
63256ec5
CW
2357 /* Ensure that all CPU reads are completed before installing a fence
2358 * and all writes before removing the fence.
2359 */
2360 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2361 mb();
2362
d9e86c0e
CW
2363 return 0;
2364}
2365
2366int
2367i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2368{
2369 int ret;
2370
2371 if (obj->tiling_mode)
2372 i915_gem_release_mmap(obj);
2373
ce453d81 2374 ret = i915_gem_object_flush_fence(obj, NULL);
d9e86c0e
CW
2375 if (ret)
2376 return ret;
2377
2378 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2379 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1690e1eb
CW
2380
2381 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count);
d9e86c0e
CW
2382 i915_gem_clear_fence_reg(obj->base.dev,
2383 &dev_priv->fence_regs[obj->fence_reg]);
2384
2385 obj->fence_reg = I915_FENCE_REG_NONE;
2386 }
2387
2388 return 0;
2389}
2390
2391static struct drm_i915_fence_reg *
2392i915_find_fence_reg(struct drm_device *dev,
2393 struct intel_ring_buffer *pipelined)
ae3db24a 2394{
ae3db24a 2395 struct drm_i915_private *dev_priv = dev->dev_private;
d9e86c0e
CW
2396 struct drm_i915_fence_reg *reg, *first, *avail;
2397 int i;
ae3db24a
DV
2398
2399 /* First try to find a free reg */
d9e86c0e 2400 avail = NULL;
ae3db24a
DV
2401 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2402 reg = &dev_priv->fence_regs[i];
2403 if (!reg->obj)
d9e86c0e 2404 return reg;
ae3db24a 2405
1690e1eb 2406 if (!reg->pin_count)
d9e86c0e 2407 avail = reg;
ae3db24a
DV
2408 }
2409
d9e86c0e
CW
2410 if (avail == NULL)
2411 return NULL;
ae3db24a
DV
2412
2413 /* None available, try to steal one or wait for a user to finish */
d9e86c0e
CW
2414 avail = first = NULL;
2415 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
1690e1eb 2416 if (reg->pin_count)
ae3db24a
DV
2417 continue;
2418
d9e86c0e
CW
2419 if (first == NULL)
2420 first = reg;
2421
2422 if (!pipelined ||
2423 !reg->obj->last_fenced_ring ||
2424 reg->obj->last_fenced_ring == pipelined) {
2425 avail = reg;
2426 break;
2427 }
ae3db24a
DV
2428 }
2429
d9e86c0e
CW
2430 if (avail == NULL)
2431 avail = first;
ae3db24a 2432
a00b10c3 2433 return avail;
ae3db24a
DV
2434}
2435
de151cf6 2436/**
9a5a53b3 2437 * i915_gem_object_get_fence - set up fencing for an object
de151cf6 2438 * @obj: object to map through a fence reg
d9e86c0e 2439 * @pipelined: ring on which to queue the change, or NULL for CPU access
de151cf6
JB
2440 *
2441 * When mapping objects through the GTT, userspace wants to be able to write
2442 * to them without having to worry about swizzling if the object is tiled.
de151cf6
JB
2443 * This function walks the fence regs looking for a free one for @obj,
2444 * stealing one if it can't find any.
2445 *
2446 * It then sets up the reg based on the object's properties: address, pitch
2447 * and tiling format.
9a5a53b3
CW
2448 *
2449 * For an untiled surface, this removes any existing fence.
de151cf6 2450 */
8c4b8c3f 2451int
d9e86c0e 2452i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
ce453d81 2453 struct intel_ring_buffer *pipelined)
de151cf6 2454{
05394f39 2455 struct drm_device *dev = obj->base.dev;
79e53945 2456 struct drm_i915_private *dev_priv = dev->dev_private;
d9e86c0e 2457 struct drm_i915_fence_reg *reg;
ae3db24a 2458 int ret;
de151cf6 2459
9a5a53b3
CW
2460 if (obj->tiling_mode == I915_TILING_NONE)
2461 return i915_gem_object_put_fence(obj);
2462
6bda10d1
CW
2463 /* XXX disable pipelining. There are bugs. Shocking. */
2464 pipelined = NULL;
2465
d9e86c0e 2466 /* Just update our place in the LRU if our fence is getting reused. */
05394f39
CW
2467 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2468 reg = &dev_priv->fence_regs[obj->fence_reg];
007cc8ac 2469 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
d9e86c0e 2470
29c5a587
CW
2471 if (obj->tiling_changed) {
2472 ret = i915_gem_object_flush_fence(obj, pipelined);
2473 if (ret)
2474 return ret;
2475
2476 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2477 pipelined = NULL;
2478
2479 if (pipelined) {
2480 reg->setup_seqno =
2481 i915_gem_next_request_seqno(pipelined);
2482 obj->last_fenced_seqno = reg->setup_seqno;
2483 obj->last_fenced_ring = pipelined;
2484 }
2485
2486 goto update;
2487 }
d9e86c0e
CW
2488
2489 if (!pipelined) {
2490 if (reg->setup_seqno) {
2491 if (!ring_passed_seqno(obj->last_fenced_ring,
2492 reg->setup_seqno)) {
db53a302 2493 ret = i915_wait_request(obj->last_fenced_ring,
b93f9cf1
BW
2494 reg->setup_seqno,
2495 true);
d9e86c0e
CW
2496 if (ret)
2497 return ret;
2498 }
2499
2500 reg->setup_seqno = 0;
2501 }
2502 } else if (obj->last_fenced_ring &&
2503 obj->last_fenced_ring != pipelined) {
ce453d81 2504 ret = i915_gem_object_flush_fence(obj, pipelined);
d9e86c0e
CW
2505 if (ret)
2506 return ret;
d9e86c0e
CW
2507 }
2508
a09ba7fa
EA
2509 return 0;
2510 }
2511
d9e86c0e
CW
2512 reg = i915_find_fence_reg(dev, pipelined);
2513 if (reg == NULL)
39965b37 2514 return -EDEADLK;
de151cf6 2515
ce453d81 2516 ret = i915_gem_object_flush_fence(obj, pipelined);
d9e86c0e 2517 if (ret)
ae3db24a 2518 return ret;
de151cf6 2519
d9e86c0e
CW
2520 if (reg->obj) {
2521 struct drm_i915_gem_object *old = reg->obj;
2522
2523 drm_gem_object_reference(&old->base);
2524
2525 if (old->tiling_mode)
2526 i915_gem_release_mmap(old);
2527
ce453d81 2528 ret = i915_gem_object_flush_fence(old, pipelined);
d9e86c0e
CW
2529 if (ret) {
2530 drm_gem_object_unreference(&old->base);
2531 return ret;
2532 }
2533
2534 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2535 pipelined = NULL;
2536
2537 old->fence_reg = I915_FENCE_REG_NONE;
2538 old->last_fenced_ring = pipelined;
2539 old->last_fenced_seqno =
db53a302 2540 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
d9e86c0e
CW
2541
2542 drm_gem_object_unreference(&old->base);
2543 } else if (obj->last_fenced_seqno == 0)
2544 pipelined = NULL;
a09ba7fa 2545
de151cf6 2546 reg->obj = obj;
d9e86c0e
CW
2547 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2548 obj->fence_reg = reg - dev_priv->fence_regs;
2549 obj->last_fenced_ring = pipelined;
de151cf6 2550
d9e86c0e 2551 reg->setup_seqno =
db53a302 2552 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
d9e86c0e
CW
2553 obj->last_fenced_seqno = reg->setup_seqno;
2554
2555update:
2556 obj->tiling_changed = false;
e259befd 2557 switch (INTEL_INFO(dev)->gen) {
25aebfc3 2558 case 7:
e259befd 2559 case 6:
c6642782 2560 ret = sandybridge_write_fence_reg(obj, pipelined);
e259befd
CW
2561 break;
2562 case 5:
2563 case 4:
c6642782 2564 ret = i965_write_fence_reg(obj, pipelined);
e259befd
CW
2565 break;
2566 case 3:
c6642782 2567 ret = i915_write_fence_reg(obj, pipelined);
e259befd
CW
2568 break;
2569 case 2:
c6642782 2570 ret = i830_write_fence_reg(obj, pipelined);
e259befd
CW
2571 break;
2572 }
d9ddcb96 2573
c6642782 2574 return ret;
de151cf6
JB
2575}
2576
2577/**
2578 * i915_gem_clear_fence_reg - clear out fence register info
2579 * @obj: object to clear
2580 *
2581 * Zeroes out the fence register itself and clears out the associated
05394f39 2582 * data structures in dev_priv and obj.
de151cf6
JB
2583 */
2584static void
d9e86c0e
CW
2585i915_gem_clear_fence_reg(struct drm_device *dev,
2586 struct drm_i915_fence_reg *reg)
de151cf6 2587{
79e53945 2588 drm_i915_private_t *dev_priv = dev->dev_private;
d9e86c0e 2589 uint32_t fence_reg = reg - dev_priv->fence_regs;
de151cf6 2590
e259befd 2591 switch (INTEL_INFO(dev)->gen) {
25aebfc3 2592 case 7:
e259befd 2593 case 6:
d9e86c0e 2594 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
e259befd
CW
2595 break;
2596 case 5:
2597 case 4:
d9e86c0e 2598 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
e259befd
CW
2599 break;
2600 case 3:
d9e86c0e
CW
2601 if (fence_reg >= 8)
2602 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
dc529a4f 2603 else
e259befd 2604 case 2:
d9e86c0e 2605 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
dc529a4f
EA
2606
2607 I915_WRITE(fence_reg, 0);
e259befd 2608 break;
dc529a4f 2609 }
de151cf6 2610
007cc8ac 2611 list_del_init(&reg->lru_list);
d9e86c0e
CW
2612 reg->obj = NULL;
2613 reg->setup_seqno = 0;
1690e1eb 2614 reg->pin_count = 0;
52dc7d32
CW
2615}
2616
673a394b
EA
2617/**
2618 * Finds free space in the GTT aperture and binds the object there.
2619 */
2620static int
05394f39 2621i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
920afa77 2622 unsigned alignment,
75e9e915 2623 bool map_and_fenceable)
673a394b 2624{
05394f39 2625 struct drm_device *dev = obj->base.dev;
673a394b 2626 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 2627 struct drm_mm_node *free_space;
a00b10c3 2628 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
5e783301 2629 u32 size, fence_size, fence_alignment, unfenced_alignment;
75e9e915 2630 bool mappable, fenceable;
07f73f69 2631 int ret;
673a394b 2632
05394f39 2633 if (obj->madv != I915_MADV_WILLNEED) {
3ef94daa
CW
2634 DRM_ERROR("Attempting to bind a purgeable object\n");
2635 return -EINVAL;
2636 }
2637
e28f8711
CW
2638 fence_size = i915_gem_get_gtt_size(dev,
2639 obj->base.size,
2640 obj->tiling_mode);
2641 fence_alignment = i915_gem_get_gtt_alignment(dev,
2642 obj->base.size,
2643 obj->tiling_mode);
2644 unfenced_alignment =
2645 i915_gem_get_unfenced_gtt_alignment(dev,
2646 obj->base.size,
2647 obj->tiling_mode);
a00b10c3 2648
673a394b 2649 if (alignment == 0)
5e783301
DV
2650 alignment = map_and_fenceable ? fence_alignment :
2651 unfenced_alignment;
75e9e915 2652 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
673a394b
EA
2653 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2654 return -EINVAL;
2655 }
2656
05394f39 2657 size = map_and_fenceable ? fence_size : obj->base.size;
a00b10c3 2658
654fc607
CW
2659 /* If the object is bigger than the entire aperture, reject it early
2660 * before evicting everything in a vain attempt to find space.
2661 */
05394f39 2662 if (obj->base.size >
75e9e915 2663 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
654fc607
CW
2664 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2665 return -E2BIG;
2666 }
2667
673a394b 2668 search_free:
75e9e915 2669 if (map_and_fenceable)
920afa77
DV
2670 free_space =
2671 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
a00b10c3 2672 size, alignment, 0,
920afa77
DV
2673 dev_priv->mm.gtt_mappable_end,
2674 0);
2675 else
2676 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
a00b10c3 2677 size, alignment, 0);
920afa77
DV
2678
2679 if (free_space != NULL) {
75e9e915 2680 if (map_and_fenceable)
05394f39 2681 obj->gtt_space =
920afa77 2682 drm_mm_get_block_range_generic(free_space,
a00b10c3 2683 size, alignment, 0,
920afa77
DV
2684 dev_priv->mm.gtt_mappable_end,
2685 0);
2686 else
05394f39 2687 obj->gtt_space =
a00b10c3 2688 drm_mm_get_block(free_space, size, alignment);
920afa77 2689 }
05394f39 2690 if (obj->gtt_space == NULL) {
673a394b
EA
2691 /* If the gtt is empty and we're still having trouble
2692 * fitting our object in, we're out of memory.
2693 */
75e9e915
DV
2694 ret = i915_gem_evict_something(dev, size, alignment,
2695 map_and_fenceable);
9731129c 2696 if (ret)
673a394b 2697 return ret;
9731129c 2698
673a394b
EA
2699 goto search_free;
2700 }
2701
e5281ccd 2702 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
673a394b 2703 if (ret) {
05394f39
CW
2704 drm_mm_put_block(obj->gtt_space);
2705 obj->gtt_space = NULL;
07f73f69
CW
2706
2707 if (ret == -ENOMEM) {
809b6334
CW
2708 /* first try to reclaim some memory by clearing the GTT */
2709 ret = i915_gem_evict_everything(dev, false);
07f73f69 2710 if (ret) {
07f73f69 2711 /* now try to shrink everyone else */
4bdadb97
CW
2712 if (gfpmask) {
2713 gfpmask = 0;
2714 goto search_free;
07f73f69
CW
2715 }
2716
809b6334 2717 return -ENOMEM;
07f73f69
CW
2718 }
2719
2720 goto search_free;
2721 }
2722
673a394b
EA
2723 return ret;
2724 }
2725
74163907 2726 ret = i915_gem_gtt_prepare_object(obj);
7c2e6fdf 2727 if (ret) {
e5281ccd 2728 i915_gem_object_put_pages_gtt(obj);
05394f39
CW
2729 drm_mm_put_block(obj->gtt_space);
2730 obj->gtt_space = NULL;
07f73f69 2731
809b6334 2732 if (i915_gem_evict_everything(dev, false))
07f73f69 2733 return ret;
07f73f69
CW
2734
2735 goto search_free;
673a394b 2736 }
673a394b 2737
0ebb9829
DV
2738 if (!dev_priv->mm.aliasing_ppgtt)
2739 i915_gem_gtt_bind_object(obj, obj->cache_level);
673a394b 2740
6299f992 2741 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
05394f39 2742 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
bf1a1092 2743
673a394b
EA
2744 /* Assert that the object is not currently in any GPU domain. As it
2745 * wasn't in the GTT, there shouldn't be any way it could have been in
2746 * a GPU cache
2747 */
05394f39
CW
2748 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2749 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
673a394b 2750
6299f992 2751 obj->gtt_offset = obj->gtt_space->start;
1c5d22f7 2752
75e9e915 2753 fenceable =
05394f39 2754 obj->gtt_space->size == fence_size &&
0206e353 2755 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
a00b10c3 2756
75e9e915 2757 mappable =
05394f39 2758 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
a00b10c3 2759
05394f39 2760 obj->map_and_fenceable = mappable && fenceable;
75e9e915 2761
db53a302 2762 trace_i915_gem_object_bind(obj, map_and_fenceable);
673a394b
EA
2763 return 0;
2764}
2765
2766void
05394f39 2767i915_gem_clflush_object(struct drm_i915_gem_object *obj)
673a394b 2768{
673a394b
EA
2769 /* If we don't have a page list set up, then we're not pinned
2770 * to GPU, and we can ignore the cache flush because it'll happen
2771 * again at bind time.
2772 */
05394f39 2773 if (obj->pages == NULL)
673a394b
EA
2774 return;
2775
9c23f7fc
CW
2776 /* If the GPU is snooping the contents of the CPU cache,
2777 * we do not need to manually clear the CPU cache lines. However,
2778 * the caches are only snooped when the render cache is
2779 * flushed/invalidated. As we always have to emit invalidations
2780 * and flushes when moving into and out of the RENDER domain, correct
2781 * snooping behaviour occurs naturally as the result of our domain
2782 * tracking.
2783 */
2784 if (obj->cache_level != I915_CACHE_NONE)
2785 return;
2786
1c5d22f7 2787 trace_i915_gem_object_clflush(obj);
cfa16a0d 2788
05394f39 2789 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
673a394b
EA
2790}
2791
e47c68e9 2792/** Flushes any GPU write domain for the object if it's dirty. */
88241785 2793static int
3619df03 2794i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 2795{
05394f39 2796 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
88241785 2797 return 0;
e47c68e9
EA
2798
2799 /* Queue the GPU write cache flushing we need. */
db53a302 2800 return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
e47c68e9
EA
2801}
2802
2803/** Flushes the GTT write domain for the object if it's dirty. */
2804static void
05394f39 2805i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 2806{
1c5d22f7
CW
2807 uint32_t old_write_domain;
2808
05394f39 2809 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
e47c68e9
EA
2810 return;
2811
63256ec5 2812 /* No actual flushing is required for the GTT write domain. Writes
e47c68e9
EA
2813 * to it immediately go to main memory as far as we know, so there's
2814 * no chipset flush. It also doesn't land in render cache.
63256ec5
CW
2815 *
2816 * However, we do have to enforce the order so that all writes through
2817 * the GTT land before any writes to the device, such as updates to
2818 * the GATT itself.
e47c68e9 2819 */
63256ec5
CW
2820 wmb();
2821
05394f39
CW
2822 old_write_domain = obj->base.write_domain;
2823 obj->base.write_domain = 0;
1c5d22f7
CW
2824
2825 trace_i915_gem_object_change_domain(obj,
05394f39 2826 obj->base.read_domains,
1c5d22f7 2827 old_write_domain);
e47c68e9
EA
2828}
2829
2830/** Flushes the CPU write domain for the object if it's dirty. */
2831static void
05394f39 2832i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 2833{
1c5d22f7 2834 uint32_t old_write_domain;
e47c68e9 2835
05394f39 2836 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
e47c68e9
EA
2837 return;
2838
2839 i915_gem_clflush_object(obj);
40ce6575 2840 intel_gtt_chipset_flush();
05394f39
CW
2841 old_write_domain = obj->base.write_domain;
2842 obj->base.write_domain = 0;
1c5d22f7
CW
2843
2844 trace_i915_gem_object_change_domain(obj,
05394f39 2845 obj->base.read_domains,
1c5d22f7 2846 old_write_domain);
e47c68e9
EA
2847}
2848
2ef7eeaa
EA
2849/**
2850 * Moves a single object to the GTT read, and possibly write domain.
2851 *
2852 * This function returns when the move is complete, including waiting on
2853 * flushes to occur.
2854 */
79e53945 2855int
2021746e 2856i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2ef7eeaa 2857{
1c5d22f7 2858 uint32_t old_write_domain, old_read_domains;
e47c68e9 2859 int ret;
2ef7eeaa 2860
02354392 2861 /* Not valid to be called on unbound objects. */
05394f39 2862 if (obj->gtt_space == NULL)
02354392
EA
2863 return -EINVAL;
2864
8d7e3de1
CW
2865 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2866 return 0;
2867
88241785
CW
2868 ret = i915_gem_object_flush_gpu_write_domain(obj);
2869 if (ret)
2870 return ret;
2871
87ca9c8a 2872 if (obj->pending_gpu_write || write) {
ce453d81 2873 ret = i915_gem_object_wait_rendering(obj);
87ca9c8a
CW
2874 if (ret)
2875 return ret;
2876 }
2dafb1e0 2877
7213342d 2878 i915_gem_object_flush_cpu_write_domain(obj);
1c5d22f7 2879
05394f39
CW
2880 old_write_domain = obj->base.write_domain;
2881 old_read_domains = obj->base.read_domains;
1c5d22f7 2882
e47c68e9
EA
2883 /* It should now be out of any other write domains, and we can update
2884 * the domain values for our changes.
2885 */
05394f39
CW
2886 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2887 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
e47c68e9 2888 if (write) {
05394f39
CW
2889 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2890 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2891 obj->dirty = 1;
2ef7eeaa
EA
2892 }
2893
1c5d22f7
CW
2894 trace_i915_gem_object_change_domain(obj,
2895 old_read_domains,
2896 old_write_domain);
2897
e47c68e9
EA
2898 return 0;
2899}
2900
e4ffd173
CW
2901int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2902 enum i915_cache_level cache_level)
2903{
7bddb01f
DV
2904 struct drm_device *dev = obj->base.dev;
2905 drm_i915_private_t *dev_priv = dev->dev_private;
e4ffd173
CW
2906 int ret;
2907
2908 if (obj->cache_level == cache_level)
2909 return 0;
2910
2911 if (obj->pin_count) {
2912 DRM_DEBUG("can not change the cache level of pinned objects\n");
2913 return -EBUSY;
2914 }
2915
2916 if (obj->gtt_space) {
2917 ret = i915_gem_object_finish_gpu(obj);
2918 if (ret)
2919 return ret;
2920
2921 i915_gem_object_finish_gtt(obj);
2922
2923 /* Before SandyBridge, you could not use tiling or fence
2924 * registers with snooped memory, so relinquish any fences
2925 * currently pointing to our region in the aperture.
2926 */
2927 if (INTEL_INFO(obj->base.dev)->gen < 6) {
2928 ret = i915_gem_object_put_fence(obj);
2929 if (ret)
2930 return ret;
2931 }
2932
74898d7e
DV
2933 if (obj->has_global_gtt_mapping)
2934 i915_gem_gtt_bind_object(obj, cache_level);
7bddb01f
DV
2935 if (obj->has_aliasing_ppgtt_mapping)
2936 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
2937 obj, cache_level);
e4ffd173
CW
2938 }
2939
2940 if (cache_level == I915_CACHE_NONE) {
2941 u32 old_read_domains, old_write_domain;
2942
2943 /* If we're coming from LLC cached, then we haven't
2944 * actually been tracking whether the data is in the
2945 * CPU cache or not, since we only allow one bit set
2946 * in obj->write_domain and have been skipping the clflushes.
2947 * Just set it to the CPU cache for now.
2948 */
2949 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
2950 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
2951
2952 old_read_domains = obj->base.read_domains;
2953 old_write_domain = obj->base.write_domain;
2954
2955 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2956 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2957
2958 trace_i915_gem_object_change_domain(obj,
2959 old_read_domains,
2960 old_write_domain);
2961 }
2962
2963 obj->cache_level = cache_level;
2964 return 0;
2965}
2966
b9241ea3 2967/*
2da3b9b9
CW
2968 * Prepare buffer for display plane (scanout, cursors, etc).
2969 * Can be called from an uninterruptible phase (modesetting) and allows
2970 * any flushes to be pipelined (for pageflips).
b9241ea3
ZW
2971 */
2972int
2da3b9b9
CW
2973i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2974 u32 alignment,
919926ae 2975 struct intel_ring_buffer *pipelined)
b9241ea3 2976{
2da3b9b9 2977 u32 old_read_domains, old_write_domain;
b9241ea3
ZW
2978 int ret;
2979
88241785
CW
2980 ret = i915_gem_object_flush_gpu_write_domain(obj);
2981 if (ret)
2982 return ret;
2983
0be73284 2984 if (pipelined != obj->ring) {
2911a35b
BW
2985 ret = i915_gem_object_sync(obj, pipelined);
2986 if (ret)
b9241ea3
ZW
2987 return ret;
2988 }
2989
a7ef0640
EA
2990 /* The display engine is not coherent with the LLC cache on gen6. As
2991 * a result, we make sure that the pinning that is about to occur is
2992 * done with uncached PTEs. This is lowest common denominator for all
2993 * chipsets.
2994 *
2995 * However for gen6+, we could do better by using the GFDT bit instead
2996 * of uncaching, which would allow us to flush all the LLC-cached data
2997 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
2998 */
2999 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3000 if (ret)
3001 return ret;
3002
2da3b9b9
CW
3003 /* As the user may map the buffer once pinned in the display plane
3004 * (e.g. libkms for the bootup splash), we have to ensure that we
3005 * always use map_and_fenceable for all scanout buffers.
3006 */
3007 ret = i915_gem_object_pin(obj, alignment, true);
3008 if (ret)
3009 return ret;
3010
b118c1e3
CW
3011 i915_gem_object_flush_cpu_write_domain(obj);
3012
2da3b9b9 3013 old_write_domain = obj->base.write_domain;
05394f39 3014 old_read_domains = obj->base.read_domains;
2da3b9b9
CW
3015
3016 /* It should now be out of any other write domains, and we can update
3017 * the domain values for our changes.
3018 */
3019 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
05394f39 3020 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
b9241ea3
ZW
3021
3022 trace_i915_gem_object_change_domain(obj,
3023 old_read_domains,
2da3b9b9 3024 old_write_domain);
b9241ea3
ZW
3025
3026 return 0;
3027}
3028
85345517 3029int
a8198eea 3030i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
85345517 3031{
88241785
CW
3032 int ret;
3033
a8198eea 3034 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
85345517
CW
3035 return 0;
3036
88241785 3037 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
db53a302 3038 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
88241785
CW
3039 if (ret)
3040 return ret;
3041 }
85345517 3042
c501ae7f
CW
3043 ret = i915_gem_object_wait_rendering(obj);
3044 if (ret)
3045 return ret;
3046
a8198eea
CW
3047 /* Ensure that we invalidate the GPU's caches and TLBs. */
3048 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
c501ae7f 3049 return 0;
85345517
CW
3050}
3051
e47c68e9
EA
3052/**
3053 * Moves a single object to the CPU read, and possibly write domain.
3054 *
3055 * This function returns when the move is complete, including waiting on
3056 * flushes to occur.
3057 */
dabdfe02 3058int
919926ae 3059i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
e47c68e9 3060{
1c5d22f7 3061 uint32_t old_write_domain, old_read_domains;
e47c68e9
EA
3062 int ret;
3063
8d7e3de1
CW
3064 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3065 return 0;
3066
88241785
CW
3067 ret = i915_gem_object_flush_gpu_write_domain(obj);
3068 if (ret)
3069 return ret;
3070
f8413190
CW
3071 if (write || obj->pending_gpu_write) {
3072 ret = i915_gem_object_wait_rendering(obj);
3073 if (ret)
3074 return ret;
3075 }
2ef7eeaa 3076
e47c68e9 3077 i915_gem_object_flush_gtt_write_domain(obj);
2ef7eeaa 3078
05394f39
CW
3079 old_write_domain = obj->base.write_domain;
3080 old_read_domains = obj->base.read_domains;
1c5d22f7 3081
e47c68e9 3082 /* Flush the CPU cache if it's still invalid. */
05394f39 3083 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2ef7eeaa 3084 i915_gem_clflush_object(obj);
2ef7eeaa 3085
05394f39 3086 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2ef7eeaa
EA
3087 }
3088
3089 /* It should now be out of any other write domains, and we can update
3090 * the domain values for our changes.
3091 */
05394f39 3092 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
e47c68e9
EA
3093
3094 /* If we're writing through the CPU, then the GPU read domains will
3095 * need to be invalidated at next use.
3096 */
3097 if (write) {
05394f39
CW
3098 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3099 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
e47c68e9 3100 }
2ef7eeaa 3101
1c5d22f7
CW
3102 trace_i915_gem_object_change_domain(obj,
3103 old_read_domains,
3104 old_write_domain);
3105
2ef7eeaa
EA
3106 return 0;
3107}
3108
673a394b
EA
3109/* Throttle our rendering by waiting until the ring has completed our requests
3110 * emitted over 20 msec ago.
3111 *
b962442e
EA
3112 * Note that if we were to use the current jiffies each time around the loop,
3113 * we wouldn't escape the function with any frames outstanding if the time to
3114 * render a frame was over 20ms.
3115 *
673a394b
EA
3116 * This should get us reasonable parallelism between CPU and GPU but also
3117 * relatively low latency when blocking on a particular request to finish.
3118 */
40a5f0de 3119static int
f787a5f5 3120i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
40a5f0de 3121{
f787a5f5
CW
3122 struct drm_i915_private *dev_priv = dev->dev_private;
3123 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e 3124 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
f787a5f5
CW
3125 struct drm_i915_gem_request *request;
3126 struct intel_ring_buffer *ring = NULL;
3127 u32 seqno = 0;
3128 int ret;
93533c29 3129
e110e8d6
CW
3130 if (atomic_read(&dev_priv->mm.wedged))
3131 return -EIO;
3132
1c25595f 3133 spin_lock(&file_priv->mm.lock);
f787a5f5 3134 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
b962442e
EA
3135 if (time_after_eq(request->emitted_jiffies, recent_enough))
3136 break;
40a5f0de 3137
f787a5f5
CW
3138 ring = request->ring;
3139 seqno = request->seqno;
b962442e 3140 }
1c25595f 3141 spin_unlock(&file_priv->mm.lock);
40a5f0de 3142
f787a5f5
CW
3143 if (seqno == 0)
3144 return 0;
2bc43b5c 3145
f787a5f5 3146 ret = 0;
78501eac 3147 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
f787a5f5
CW
3148 /* And wait for the seqno passing without holding any locks and
3149 * causing extra latency for others. This is safe as the irq
3150 * generation is designed to be run atomically and so is
3151 * lockless.
3152 */
b13c2b96
CW
3153 if (ring->irq_get(ring)) {
3154 ret = wait_event_interruptible(ring->irq_queue,
3155 i915_seqno_passed(ring->get_seqno(ring), seqno)
3156 || atomic_read(&dev_priv->mm.wedged));
3157 ring->irq_put(ring);
40a5f0de 3158
b13c2b96
CW
3159 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3160 ret = -EIO;
e959b5db
EA
3161 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
3162 seqno) ||
7ea29b13
EA
3163 atomic_read(&dev_priv->mm.wedged), 3000)) {
3164 ret = -EBUSY;
b13c2b96 3165 }
40a5f0de
EA
3166 }
3167
f787a5f5
CW
3168 if (ret == 0)
3169 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
40a5f0de
EA
3170
3171 return ret;
3172}
3173
673a394b 3174int
05394f39
CW
3175i915_gem_object_pin(struct drm_i915_gem_object *obj,
3176 uint32_t alignment,
75e9e915 3177 bool map_and_fenceable)
673a394b 3178{
05394f39 3179 struct drm_device *dev = obj->base.dev;
f13d3f73 3180 struct drm_i915_private *dev_priv = dev->dev_private;
673a394b
EA
3181 int ret;
3182
05394f39 3183 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
23bc5982 3184 WARN_ON(i915_verify_lists(dev));
ac0c6b5a 3185
05394f39
CW
3186 if (obj->gtt_space != NULL) {
3187 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3188 (map_and_fenceable && !obj->map_and_fenceable)) {
3189 WARN(obj->pin_count,
ae7d49d8 3190 "bo is already pinned with incorrect alignment:"
75e9e915
DV
3191 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3192 " obj->map_and_fenceable=%d\n",
05394f39 3193 obj->gtt_offset, alignment,
75e9e915 3194 map_and_fenceable,
05394f39 3195 obj->map_and_fenceable);
ac0c6b5a
CW
3196 ret = i915_gem_object_unbind(obj);
3197 if (ret)
3198 return ret;
3199 }
3200 }
3201
05394f39 3202 if (obj->gtt_space == NULL) {
a00b10c3 3203 ret = i915_gem_object_bind_to_gtt(obj, alignment,
75e9e915 3204 map_and_fenceable);
9731129c 3205 if (ret)
673a394b 3206 return ret;
22c344e9 3207 }
76446cac 3208
74898d7e
DV
3209 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3210 i915_gem_gtt_bind_object(obj, obj->cache_level);
3211
05394f39 3212 if (obj->pin_count++ == 0) {
05394f39
CW
3213 if (!obj->active)
3214 list_move_tail(&obj->mm_list,
f13d3f73 3215 &dev_priv->mm.pinned_list);
673a394b 3216 }
6299f992 3217 obj->pin_mappable |= map_and_fenceable;
673a394b 3218
23bc5982 3219 WARN_ON(i915_verify_lists(dev));
673a394b
EA
3220 return 0;
3221}
3222
3223void
05394f39 3224i915_gem_object_unpin(struct drm_i915_gem_object *obj)
673a394b 3225{
05394f39 3226 struct drm_device *dev = obj->base.dev;
673a394b 3227 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 3228
23bc5982 3229 WARN_ON(i915_verify_lists(dev));
05394f39
CW
3230 BUG_ON(obj->pin_count == 0);
3231 BUG_ON(obj->gtt_space == NULL);
673a394b 3232
05394f39
CW
3233 if (--obj->pin_count == 0) {
3234 if (!obj->active)
3235 list_move_tail(&obj->mm_list,
673a394b 3236 &dev_priv->mm.inactive_list);
6299f992 3237 obj->pin_mappable = false;
673a394b 3238 }
23bc5982 3239 WARN_ON(i915_verify_lists(dev));
673a394b
EA
3240}
3241
3242int
3243i915_gem_pin_ioctl(struct drm_device *dev, void *data,
05394f39 3244 struct drm_file *file)
673a394b
EA
3245{
3246 struct drm_i915_gem_pin *args = data;
05394f39 3247 struct drm_i915_gem_object *obj;
673a394b
EA
3248 int ret;
3249
1d7cfea1
CW
3250 ret = i915_mutex_lock_interruptible(dev);
3251 if (ret)
3252 return ret;
673a394b 3253
05394f39 3254 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3255 if (&obj->base == NULL) {
1d7cfea1
CW
3256 ret = -ENOENT;
3257 goto unlock;
673a394b 3258 }
673a394b 3259
05394f39 3260 if (obj->madv != I915_MADV_WILLNEED) {
bb6baf76 3261 DRM_ERROR("Attempting to pin a purgeable buffer\n");
1d7cfea1
CW
3262 ret = -EINVAL;
3263 goto out;
3ef94daa
CW
3264 }
3265
05394f39 3266 if (obj->pin_filp != NULL && obj->pin_filp != file) {
79e53945
JB
3267 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3268 args->handle);
1d7cfea1
CW
3269 ret = -EINVAL;
3270 goto out;
79e53945
JB
3271 }
3272
05394f39
CW
3273 obj->user_pin_count++;
3274 obj->pin_filp = file;
3275 if (obj->user_pin_count == 1) {
75e9e915 3276 ret = i915_gem_object_pin(obj, args->alignment, true);
1d7cfea1
CW
3277 if (ret)
3278 goto out;
673a394b
EA
3279 }
3280
3281 /* XXX - flush the CPU caches for pinned objects
3282 * as the X server doesn't manage domains yet
3283 */
e47c68e9 3284 i915_gem_object_flush_cpu_write_domain(obj);
05394f39 3285 args->offset = obj->gtt_offset;
1d7cfea1 3286out:
05394f39 3287 drm_gem_object_unreference(&obj->base);
1d7cfea1 3288unlock:
673a394b 3289 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3290 return ret;
673a394b
EA
3291}
3292
3293int
3294i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
05394f39 3295 struct drm_file *file)
673a394b
EA
3296{
3297 struct drm_i915_gem_pin *args = data;
05394f39 3298 struct drm_i915_gem_object *obj;
76c1dec1 3299 int ret;
673a394b 3300
1d7cfea1
CW
3301 ret = i915_mutex_lock_interruptible(dev);
3302 if (ret)
3303 return ret;
673a394b 3304
05394f39 3305 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3306 if (&obj->base == NULL) {
1d7cfea1
CW
3307 ret = -ENOENT;
3308 goto unlock;
673a394b 3309 }
76c1dec1 3310
05394f39 3311 if (obj->pin_filp != file) {
79e53945
JB
3312 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3313 args->handle);
1d7cfea1
CW
3314 ret = -EINVAL;
3315 goto out;
79e53945 3316 }
05394f39
CW
3317 obj->user_pin_count--;
3318 if (obj->user_pin_count == 0) {
3319 obj->pin_filp = NULL;
79e53945
JB
3320 i915_gem_object_unpin(obj);
3321 }
673a394b 3322
1d7cfea1 3323out:
05394f39 3324 drm_gem_object_unreference(&obj->base);
1d7cfea1 3325unlock:
673a394b 3326 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3327 return ret;
673a394b
EA
3328}
3329
3330int
3331i915_gem_busy_ioctl(struct drm_device *dev, void *data,
05394f39 3332 struct drm_file *file)
673a394b
EA
3333{
3334 struct drm_i915_gem_busy *args = data;
05394f39 3335 struct drm_i915_gem_object *obj;
30dbf0c0
CW
3336 int ret;
3337
76c1dec1 3338 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 3339 if (ret)
76c1dec1 3340 return ret;
673a394b 3341
05394f39 3342 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3343 if (&obj->base == NULL) {
1d7cfea1
CW
3344 ret = -ENOENT;
3345 goto unlock;
673a394b 3346 }
d1b851fc 3347
0be555b6
CW
3348 /* Count all active objects as busy, even if they are currently not used
3349 * by the gpu. Users of this interface expect objects to eventually
3350 * become non-busy without any further actions, therefore emit any
3351 * necessary flushes here.
c4de0a5d 3352 */
05394f39 3353 args->busy = obj->active;
0be555b6
CW
3354 if (args->busy) {
3355 /* Unconditionally flush objects, even when the gpu still uses this
3356 * object. Userspace calling this function indicates that it wants to
3357 * use this buffer rather sooner than later, so issuing the required
3358 * flush earlier is beneficial.
3359 */
1a1c6976 3360 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
db53a302 3361 ret = i915_gem_flush_ring(obj->ring,
88241785 3362 0, obj->base.write_domain);
1a1c6976
CW
3363 } else if (obj->ring->outstanding_lazy_request ==
3364 obj->last_rendering_seqno) {
3365 struct drm_i915_gem_request *request;
3366
7a194876
CW
3367 /* This ring is not being cleared by active usage,
3368 * so emit a request to do so.
3369 */
1a1c6976 3370 request = kzalloc(sizeof(*request), GFP_KERNEL);
457eafce 3371 if (request) {
0206e353 3372 ret = i915_add_request(obj->ring, NULL, request);
457eafce
RM
3373 if (ret)
3374 kfree(request);
3375 } else
7a194876
CW
3376 ret = -ENOMEM;
3377 }
0be555b6
CW
3378
3379 /* Update the active list for the hardware's current position.
3380 * Otherwise this only updates on a delayed timer or when irqs
3381 * are actually unmasked, and our working set ends up being
3382 * larger than required.
3383 */
db53a302 3384 i915_gem_retire_requests_ring(obj->ring);
0be555b6 3385
05394f39 3386 args->busy = obj->active;
0be555b6 3387 }
673a394b 3388
05394f39 3389 drm_gem_object_unreference(&obj->base);
1d7cfea1 3390unlock:
673a394b 3391 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3392 return ret;
673a394b
EA
3393}
3394
3395int
3396i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3397 struct drm_file *file_priv)
3398{
0206e353 3399 return i915_gem_ring_throttle(dev, file_priv);
673a394b
EA
3400}
3401
3ef94daa
CW
3402int
3403i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3404 struct drm_file *file_priv)
3405{
3406 struct drm_i915_gem_madvise *args = data;
05394f39 3407 struct drm_i915_gem_object *obj;
76c1dec1 3408 int ret;
3ef94daa
CW
3409
3410 switch (args->madv) {
3411 case I915_MADV_DONTNEED:
3412 case I915_MADV_WILLNEED:
3413 break;
3414 default:
3415 return -EINVAL;
3416 }
3417
1d7cfea1
CW
3418 ret = i915_mutex_lock_interruptible(dev);
3419 if (ret)
3420 return ret;
3421
05394f39 3422 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
c8725226 3423 if (&obj->base == NULL) {
1d7cfea1
CW
3424 ret = -ENOENT;
3425 goto unlock;
3ef94daa 3426 }
3ef94daa 3427
05394f39 3428 if (obj->pin_count) {
1d7cfea1
CW
3429 ret = -EINVAL;
3430 goto out;
3ef94daa
CW
3431 }
3432
05394f39
CW
3433 if (obj->madv != __I915_MADV_PURGED)
3434 obj->madv = args->madv;
3ef94daa 3435
2d7ef395 3436 /* if the object is no longer bound, discard its backing storage */
05394f39
CW
3437 if (i915_gem_object_is_purgeable(obj) &&
3438 obj->gtt_space == NULL)
2d7ef395
CW
3439 i915_gem_object_truncate(obj);
3440
05394f39 3441 args->retained = obj->madv != __I915_MADV_PURGED;
bb6baf76 3442
1d7cfea1 3443out:
05394f39 3444 drm_gem_object_unreference(&obj->base);
1d7cfea1 3445unlock:
3ef94daa 3446 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3447 return ret;
3ef94daa
CW
3448}
3449
05394f39
CW
3450struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3451 size_t size)
ac52bc56 3452{
73aa808f 3453 struct drm_i915_private *dev_priv = dev->dev_private;
c397b908 3454 struct drm_i915_gem_object *obj;
5949eac4 3455 struct address_space *mapping;
ac52bc56 3456
c397b908
DV
3457 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3458 if (obj == NULL)
3459 return NULL;
673a394b 3460
c397b908
DV
3461 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3462 kfree(obj);
3463 return NULL;
3464 }
673a394b 3465
5949eac4
HD
3466 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3467 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
3468
73aa808f
CW
3469 i915_gem_info_add_obj(dev_priv, size);
3470
c397b908
DV
3471 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3472 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
673a394b 3473
3d29b842
ED
3474 if (HAS_LLC(dev)) {
3475 /* On some devices, we can have the GPU use the LLC (the CPU
a1871112
EA
3476 * cache) for about a 10% performance improvement
3477 * compared to uncached. Graphics requests other than
3478 * display scanout are coherent with the CPU in
3479 * accessing this cache. This means in this mode we
3480 * don't need to clflush on the CPU side, and on the
3481 * GPU side we only need to flush internal caches to
3482 * get data visible to the CPU.
3483 *
3484 * However, we maintain the display planes as UC, and so
3485 * need to rebind when first used as such.
3486 */
3487 obj->cache_level = I915_CACHE_LLC;
3488 } else
3489 obj->cache_level = I915_CACHE_NONE;
3490
62b8b215 3491 obj->base.driver_private = NULL;
c397b908 3492 obj->fence_reg = I915_FENCE_REG_NONE;
69dc4987 3493 INIT_LIST_HEAD(&obj->mm_list);
93a37f20 3494 INIT_LIST_HEAD(&obj->gtt_list);
69dc4987 3495 INIT_LIST_HEAD(&obj->ring_list);
432e58ed 3496 INIT_LIST_HEAD(&obj->exec_list);
c397b908 3497 INIT_LIST_HEAD(&obj->gpu_write_list);
c397b908 3498 obj->madv = I915_MADV_WILLNEED;
75e9e915
DV
3499 /* Avoid an unnecessary call to unbind on the first bind. */
3500 obj->map_and_fenceable = true;
de151cf6 3501
05394f39 3502 return obj;
c397b908
DV
3503}
3504
3505int i915_gem_init_object(struct drm_gem_object *obj)
3506{
3507 BUG();
de151cf6 3508
673a394b
EA
3509 return 0;
3510}
3511
05394f39 3512static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
673a394b 3513{
05394f39 3514 struct drm_device *dev = obj->base.dev;
be72615b 3515 drm_i915_private_t *dev_priv = dev->dev_private;
be72615b 3516 int ret;
673a394b 3517
be72615b
CW
3518 ret = i915_gem_object_unbind(obj);
3519 if (ret == -ERESTARTSYS) {
05394f39 3520 list_move(&obj->mm_list,
be72615b
CW
3521 &dev_priv->mm.deferred_free_list);
3522 return;
3523 }
673a394b 3524
26e12f89
CW
3525 trace_i915_gem_object_destroy(obj);
3526
05394f39 3527 if (obj->base.map_list.map)
b464e9a2 3528 drm_gem_free_mmap_offset(&obj->base);
de151cf6 3529
05394f39
CW
3530 drm_gem_object_release(&obj->base);
3531 i915_gem_info_remove_obj(dev_priv, obj->base.size);
c397b908 3532
05394f39
CW
3533 kfree(obj->bit_17);
3534 kfree(obj);
673a394b
EA
3535}
3536
05394f39 3537void i915_gem_free_object(struct drm_gem_object *gem_obj)
be72615b 3538{
05394f39
CW
3539 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3540 struct drm_device *dev = obj->base.dev;
be72615b 3541
05394f39 3542 while (obj->pin_count > 0)
be72615b
CW
3543 i915_gem_object_unpin(obj);
3544
05394f39 3545 if (obj->phys_obj)
be72615b
CW
3546 i915_gem_detach_phys_object(dev, obj);
3547
3548 i915_gem_free_object_tail(obj);
3549}
3550
29105ccc
CW
3551int
3552i915_gem_idle(struct drm_device *dev)
3553{
3554 drm_i915_private_t *dev_priv = dev->dev_private;
3555 int ret;
28dfe52a 3556
29105ccc 3557 mutex_lock(&dev->struct_mutex);
1c5d22f7 3558
87acb0a5 3559 if (dev_priv->mm.suspended) {
29105ccc
CW
3560 mutex_unlock(&dev->struct_mutex);
3561 return 0;
28dfe52a
EA
3562 }
3563
b93f9cf1 3564 ret = i915_gpu_idle(dev, true);
6dbe2772
KP
3565 if (ret) {
3566 mutex_unlock(&dev->struct_mutex);
673a394b 3567 return ret;
6dbe2772 3568 }
673a394b 3569
29105ccc
CW
3570 /* Under UMS, be paranoid and evict. */
3571 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
5eac3ab4 3572 ret = i915_gem_evict_inactive(dev, false);
29105ccc
CW
3573 if (ret) {
3574 mutex_unlock(&dev->struct_mutex);
3575 return ret;
3576 }
3577 }
3578
312817a3
CW
3579 i915_gem_reset_fences(dev);
3580
29105ccc
CW
3581 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3582 * We need to replace this with a semaphore, or something.
3583 * And not confound mm.suspended!
3584 */
3585 dev_priv->mm.suspended = 1;
bc0c7f14 3586 del_timer_sync(&dev_priv->hangcheck_timer);
29105ccc
CW
3587
3588 i915_kernel_lost_context(dev);
6dbe2772 3589 i915_gem_cleanup_ringbuffer(dev);
29105ccc 3590
6dbe2772
KP
3591 mutex_unlock(&dev->struct_mutex);
3592
29105ccc
CW
3593 /* Cancel the retire work handler, which should be idle now. */
3594 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3595
673a394b
EA
3596 return 0;
3597}
3598
f691e2f4
DV
3599void i915_gem_init_swizzling(struct drm_device *dev)
3600{
3601 drm_i915_private_t *dev_priv = dev->dev_private;
3602
11782b02 3603 if (INTEL_INFO(dev)->gen < 5 ||
f691e2f4
DV
3604 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3605 return;
3606
3607 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3608 DISP_TILE_SURFACE_SWIZZLING);
3609
11782b02
DV
3610 if (IS_GEN5(dev))
3611 return;
3612
f691e2f4
DV
3613 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3614 if (IS_GEN6(dev))
3615 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
3616 else
3617 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
3618}
e21af88d
DV
3619
3620void i915_gem_init_ppgtt(struct drm_device *dev)
3621{
3622 drm_i915_private_t *dev_priv = dev->dev_private;
3623 uint32_t pd_offset;
3624 struct intel_ring_buffer *ring;
55a254ac
DV
3625 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
3626 uint32_t __iomem *pd_addr;
3627 uint32_t pd_entry;
e21af88d
DV
3628 int i;
3629
3630 if (!dev_priv->mm.aliasing_ppgtt)
3631 return;
3632
55a254ac
DV
3633
3634 pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
3635 for (i = 0; i < ppgtt->num_pd_entries; i++) {
3636 dma_addr_t pt_addr;
3637
3638 if (dev_priv->mm.gtt->needs_dmar)
3639 pt_addr = ppgtt->pt_dma_addr[i];
3640 else
3641 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
3642
3643 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
3644 pd_entry |= GEN6_PDE_VALID;
3645
3646 writel(pd_entry, pd_addr + i);
3647 }
3648 readl(pd_addr);
3649
3650 pd_offset = ppgtt->pd_offset;
e21af88d
DV
3651 pd_offset /= 64; /* in cachelines, */
3652 pd_offset <<= 16;
3653
3654 if (INTEL_INFO(dev)->gen == 6) {
3655 uint32_t ecochk = I915_READ(GAM_ECOCHK);
3656 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
3657 ECOCHK_PPGTT_CACHE64B);
3658 I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
3659 } else if (INTEL_INFO(dev)->gen >= 7) {
3660 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
3661 /* GFX_MODE is per-ring on gen7+ */
3662 }
3663
3664 for (i = 0; i < I915_NUM_RINGS; i++) {
3665 ring = &dev_priv->ring[i];
3666
3667 if (INTEL_INFO(dev)->gen >= 7)
3668 I915_WRITE(RING_MODE_GEN7(ring),
3669 GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
3670
3671 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
3672 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
3673 }
3674}
3675
8187a2b7 3676int
f691e2f4 3677i915_gem_init_hw(struct drm_device *dev)
8187a2b7
ZN
3678{
3679 drm_i915_private_t *dev_priv = dev->dev_private;
3680 int ret;
68f95ba9 3681
f691e2f4
DV
3682 i915_gem_init_swizzling(dev);
3683
5c1143bb 3684 ret = intel_init_render_ring_buffer(dev);
68f95ba9 3685 if (ret)
b6913e4b 3686 return ret;
68f95ba9
CW
3687
3688 if (HAS_BSD(dev)) {
5c1143bb 3689 ret = intel_init_bsd_ring_buffer(dev);
68f95ba9
CW
3690 if (ret)
3691 goto cleanup_render_ring;
d1b851fc 3692 }
68f95ba9 3693
549f7365
CW
3694 if (HAS_BLT(dev)) {
3695 ret = intel_init_blt_ring_buffer(dev);
3696 if (ret)
3697 goto cleanup_bsd_ring;
3698 }
3699
6f392d54
CW
3700 dev_priv->next_seqno = 1;
3701
e21af88d
DV
3702 i915_gem_init_ppgtt(dev);
3703
68f95ba9
CW
3704 return 0;
3705
549f7365 3706cleanup_bsd_ring:
1ec14ad3 3707 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
68f95ba9 3708cleanup_render_ring:
1ec14ad3 3709 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
8187a2b7
ZN
3710 return ret;
3711}
3712
3713void
3714i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3715{
3716 drm_i915_private_t *dev_priv = dev->dev_private;
1ec14ad3 3717 int i;
8187a2b7 3718
1ec14ad3
CW
3719 for (i = 0; i < I915_NUM_RINGS; i++)
3720 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
8187a2b7
ZN
3721}
3722
673a394b
EA
3723int
3724i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3725 struct drm_file *file_priv)
3726{
3727 drm_i915_private_t *dev_priv = dev->dev_private;
1ec14ad3 3728 int ret, i;
673a394b 3729
79e53945
JB
3730 if (drm_core_check_feature(dev, DRIVER_MODESET))
3731 return 0;
3732
ba1234d1 3733 if (atomic_read(&dev_priv->mm.wedged)) {
673a394b 3734 DRM_ERROR("Reenabling wedged hardware, good luck\n");
ba1234d1 3735 atomic_set(&dev_priv->mm.wedged, 0);
673a394b
EA
3736 }
3737
673a394b 3738 mutex_lock(&dev->struct_mutex);
9bb2d6f9
EA
3739 dev_priv->mm.suspended = 0;
3740
f691e2f4 3741 ret = i915_gem_init_hw(dev);
d816f6ac
WF
3742 if (ret != 0) {
3743 mutex_unlock(&dev->struct_mutex);
9bb2d6f9 3744 return ret;
d816f6ac 3745 }
9bb2d6f9 3746
69dc4987 3747 BUG_ON(!list_empty(&dev_priv->mm.active_list));
673a394b
EA
3748 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3749 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
1ec14ad3
CW
3750 for (i = 0; i < I915_NUM_RINGS; i++) {
3751 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3752 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3753 }
673a394b 3754 mutex_unlock(&dev->struct_mutex);
dbb19d30 3755
5f35308b
CW
3756 ret = drm_irq_install(dev);
3757 if (ret)
3758 goto cleanup_ringbuffer;
dbb19d30 3759
673a394b 3760 return 0;
5f35308b
CW
3761
3762cleanup_ringbuffer:
3763 mutex_lock(&dev->struct_mutex);
3764 i915_gem_cleanup_ringbuffer(dev);
3765 dev_priv->mm.suspended = 1;
3766 mutex_unlock(&dev->struct_mutex);
3767
3768 return ret;
673a394b
EA
3769}
3770
3771int
3772i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3773 struct drm_file *file_priv)
3774{
79e53945
JB
3775 if (drm_core_check_feature(dev, DRIVER_MODESET))
3776 return 0;
3777
dbb19d30 3778 drm_irq_uninstall(dev);
e6890f6f 3779 return i915_gem_idle(dev);
673a394b
EA
3780}
3781
3782void
3783i915_gem_lastclose(struct drm_device *dev)
3784{
3785 int ret;
673a394b 3786
e806b495
EA
3787 if (drm_core_check_feature(dev, DRIVER_MODESET))
3788 return;
3789
6dbe2772
KP
3790 ret = i915_gem_idle(dev);
3791 if (ret)
3792 DRM_ERROR("failed to idle hardware: %d\n", ret);
673a394b
EA
3793}
3794
64193406
CW
3795static void
3796init_ring_lists(struct intel_ring_buffer *ring)
3797{
3798 INIT_LIST_HEAD(&ring->active_list);
3799 INIT_LIST_HEAD(&ring->request_list);
3800 INIT_LIST_HEAD(&ring->gpu_write_list);
3801}
3802
673a394b
EA
3803void
3804i915_gem_load(struct drm_device *dev)
3805{
b5aa8a0f 3806 int i;
673a394b
EA
3807 drm_i915_private_t *dev_priv = dev->dev_private;
3808
69dc4987 3809 INIT_LIST_HEAD(&dev_priv->mm.active_list);
673a394b
EA
3810 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3811 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
f13d3f73 3812 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
a09ba7fa 3813 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
be72615b 3814 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
93a37f20 3815 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
1ec14ad3
CW
3816 for (i = 0; i < I915_NUM_RINGS; i++)
3817 init_ring_lists(&dev_priv->ring[i]);
4b9de737 3818 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
007cc8ac 3819 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
673a394b
EA
3820 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3821 i915_gem_retire_work_handler);
30dbf0c0 3822 init_completion(&dev_priv->error_completion);
31169714 3823
94400120
DA
3824 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3825 if (IS_GEN3(dev)) {
3826 u32 tmp = I915_READ(MI_ARB_STATE);
3827 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3828 /* arb state is a masked write, so set bit + bit in mask */
3829 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3830 I915_WRITE(MI_ARB_STATE, tmp);
3831 }
3832 }
3833
72bfa19c
CW
3834 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3835
de151cf6 3836 /* Old X drivers will take 0-2 for front, back, depth buffers */
b397c836
EA
3837 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3838 dev_priv->fence_reg_start = 3;
de151cf6 3839
a6c45cf0 3840 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
de151cf6
JB
3841 dev_priv->num_fence_regs = 16;
3842 else
3843 dev_priv->num_fence_regs = 8;
3844
b5aa8a0f 3845 /* Initialize fence registers to zero */
10ed13e4
EA
3846 for (i = 0; i < dev_priv->num_fence_regs; i++) {
3847 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
b5aa8a0f 3848 }
10ed13e4 3849
673a394b 3850 i915_gem_detect_bit_6_swizzle(dev);
6b95a207 3851 init_waitqueue_head(&dev_priv->pending_flip_queue);
17250b71 3852
ce453d81
CW
3853 dev_priv->mm.interruptible = true;
3854
17250b71
CW
3855 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3856 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3857 register_shrinker(&dev_priv->mm.inactive_shrinker);
673a394b 3858}
71acb5eb
DA
3859
3860/*
3861 * Create a physically contiguous memory object for this object
3862 * e.g. for cursor + overlay regs
3863 */
995b6762
CW
3864static int i915_gem_init_phys_object(struct drm_device *dev,
3865 int id, int size, int align)
71acb5eb
DA
3866{
3867 drm_i915_private_t *dev_priv = dev->dev_private;
3868 struct drm_i915_gem_phys_object *phys_obj;
3869 int ret;
3870
3871 if (dev_priv->mm.phys_objs[id - 1] || !size)
3872 return 0;
3873
9a298b2a 3874 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
71acb5eb
DA
3875 if (!phys_obj)
3876 return -ENOMEM;
3877
3878 phys_obj->id = id;
3879
6eeefaf3 3880 phys_obj->handle = drm_pci_alloc(dev, size, align);
71acb5eb
DA
3881 if (!phys_obj->handle) {
3882 ret = -ENOMEM;
3883 goto kfree_obj;
3884 }
3885#ifdef CONFIG_X86
3886 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3887#endif
3888
3889 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3890
3891 return 0;
3892kfree_obj:
9a298b2a 3893 kfree(phys_obj);
71acb5eb
DA
3894 return ret;
3895}
3896
995b6762 3897static void i915_gem_free_phys_object(struct drm_device *dev, int id)
71acb5eb
DA
3898{
3899 drm_i915_private_t *dev_priv = dev->dev_private;
3900 struct drm_i915_gem_phys_object *phys_obj;
3901
3902 if (!dev_priv->mm.phys_objs[id - 1])
3903 return;
3904
3905 phys_obj = dev_priv->mm.phys_objs[id - 1];
3906 if (phys_obj->cur_obj) {
3907 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3908 }
3909
3910#ifdef CONFIG_X86
3911 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3912#endif
3913 drm_pci_free(dev, phys_obj->handle);
3914 kfree(phys_obj);
3915 dev_priv->mm.phys_objs[id - 1] = NULL;
3916}
3917
3918void i915_gem_free_all_phys_object(struct drm_device *dev)
3919{
3920 int i;
3921
260883c8 3922 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
71acb5eb
DA
3923 i915_gem_free_phys_object(dev, i);
3924}
3925
3926void i915_gem_detach_phys_object(struct drm_device *dev,
05394f39 3927 struct drm_i915_gem_object *obj)
71acb5eb 3928{
05394f39 3929 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
e5281ccd 3930 char *vaddr;
71acb5eb 3931 int i;
71acb5eb
DA
3932 int page_count;
3933
05394f39 3934 if (!obj->phys_obj)
71acb5eb 3935 return;
05394f39 3936 vaddr = obj->phys_obj->handle->vaddr;
71acb5eb 3937
05394f39 3938 page_count = obj->base.size / PAGE_SIZE;
71acb5eb 3939 for (i = 0; i < page_count; i++) {
5949eac4 3940 struct page *page = shmem_read_mapping_page(mapping, i);
e5281ccd
CW
3941 if (!IS_ERR(page)) {
3942 char *dst = kmap_atomic(page);
3943 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
3944 kunmap_atomic(dst);
3945
3946 drm_clflush_pages(&page, 1);
3947
3948 set_page_dirty(page);
3949 mark_page_accessed(page);
3950 page_cache_release(page);
3951 }
71acb5eb 3952 }
40ce6575 3953 intel_gtt_chipset_flush();
d78b47b9 3954
05394f39
CW
3955 obj->phys_obj->cur_obj = NULL;
3956 obj->phys_obj = NULL;
71acb5eb
DA
3957}
3958
3959int
3960i915_gem_attach_phys_object(struct drm_device *dev,
05394f39 3961 struct drm_i915_gem_object *obj,
6eeefaf3
CW
3962 int id,
3963 int align)
71acb5eb 3964{
05394f39 3965 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
71acb5eb 3966 drm_i915_private_t *dev_priv = dev->dev_private;
71acb5eb
DA
3967 int ret = 0;
3968 int page_count;
3969 int i;
3970
3971 if (id > I915_MAX_PHYS_OBJECT)
3972 return -EINVAL;
3973
05394f39
CW
3974 if (obj->phys_obj) {
3975 if (obj->phys_obj->id == id)
71acb5eb
DA
3976 return 0;
3977 i915_gem_detach_phys_object(dev, obj);
3978 }
3979
71acb5eb
DA
3980 /* create a new object */
3981 if (!dev_priv->mm.phys_objs[id - 1]) {
3982 ret = i915_gem_init_phys_object(dev, id,
05394f39 3983 obj->base.size, align);
71acb5eb 3984 if (ret) {
05394f39
CW
3985 DRM_ERROR("failed to init phys object %d size: %zu\n",
3986 id, obj->base.size);
e5281ccd 3987 return ret;
71acb5eb
DA
3988 }
3989 }
3990
3991 /* bind to the object */
05394f39
CW
3992 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3993 obj->phys_obj->cur_obj = obj;
71acb5eb 3994
05394f39 3995 page_count = obj->base.size / PAGE_SIZE;
71acb5eb
DA
3996
3997 for (i = 0; i < page_count; i++) {
e5281ccd
CW
3998 struct page *page;
3999 char *dst, *src;
4000
5949eac4 4001 page = shmem_read_mapping_page(mapping, i);
e5281ccd
CW
4002 if (IS_ERR(page))
4003 return PTR_ERR(page);
71acb5eb 4004
ff75b9bc 4005 src = kmap_atomic(page);
05394f39 4006 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
71acb5eb 4007 memcpy(dst, src, PAGE_SIZE);
3e4d3af5 4008 kunmap_atomic(src);
71acb5eb 4009
e5281ccd
CW
4010 mark_page_accessed(page);
4011 page_cache_release(page);
4012 }
d78b47b9 4013
71acb5eb 4014 return 0;
71acb5eb
DA
4015}
4016
4017static int
05394f39
CW
4018i915_gem_phys_pwrite(struct drm_device *dev,
4019 struct drm_i915_gem_object *obj,
71acb5eb
DA
4020 struct drm_i915_gem_pwrite *args,
4021 struct drm_file *file_priv)
4022{
05394f39 4023 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
b47b30cc 4024 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
71acb5eb 4025
b47b30cc
CW
4026 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4027 unsigned long unwritten;
4028
4029 /* The physical object once assigned is fixed for the lifetime
4030 * of the obj, so we can safely drop the lock and continue
4031 * to access vaddr.
4032 */
4033 mutex_unlock(&dev->struct_mutex);
4034 unwritten = copy_from_user(vaddr, user_data, args->size);
4035 mutex_lock(&dev->struct_mutex);
4036 if (unwritten)
4037 return -EFAULT;
4038 }
71acb5eb 4039
40ce6575 4040 intel_gtt_chipset_flush();
71acb5eb
DA
4041 return 0;
4042}
b962442e 4043
f787a5f5 4044void i915_gem_release(struct drm_device *dev, struct drm_file *file)
b962442e 4045{
f787a5f5 4046 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e
EA
4047
4048 /* Clean up our request list when the client is going away, so that
4049 * later retire_requests won't dereference our soon-to-be-gone
4050 * file_priv.
4051 */
1c25595f 4052 spin_lock(&file_priv->mm.lock);
f787a5f5
CW
4053 while (!list_empty(&file_priv->mm.request_list)) {
4054 struct drm_i915_gem_request *request;
4055
4056 request = list_first_entry(&file_priv->mm.request_list,
4057 struct drm_i915_gem_request,
4058 client_list);
4059 list_del(&request->client_list);
4060 request->file_priv = NULL;
4061 }
1c25595f 4062 spin_unlock(&file_priv->mm.lock);
b962442e 4063}
31169714 4064
1637ef41
CW
4065static int
4066i915_gpu_is_active(struct drm_device *dev)
4067{
4068 drm_i915_private_t *dev_priv = dev->dev_private;
4069 int lists_empty;
4070
1637ef41 4071 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
17250b71 4072 list_empty(&dev_priv->mm.active_list);
1637ef41
CW
4073
4074 return !lists_empty;
4075}
4076
31169714 4077static int
1495f230 4078i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
31169714 4079{
17250b71
CW
4080 struct drm_i915_private *dev_priv =
4081 container_of(shrinker,
4082 struct drm_i915_private,
4083 mm.inactive_shrinker);
4084 struct drm_device *dev = dev_priv->dev;
4085 struct drm_i915_gem_object *obj, *next;
1495f230 4086 int nr_to_scan = sc->nr_to_scan;
17250b71
CW
4087 int cnt;
4088
4089 if (!mutex_trylock(&dev->struct_mutex))
bbe2e11a 4090 return 0;
31169714
CW
4091
4092 /* "fast-path" to count number of available objects */
4093 if (nr_to_scan == 0) {
17250b71
CW
4094 cnt = 0;
4095 list_for_each_entry(obj,
4096 &dev_priv->mm.inactive_list,
4097 mm_list)
4098 cnt++;
4099 mutex_unlock(&dev->struct_mutex);
4100 return cnt / 100 * sysctl_vfs_cache_pressure;
31169714
CW
4101 }
4102
1637ef41 4103rescan:
31169714 4104 /* first scan for clean buffers */
17250b71 4105 i915_gem_retire_requests(dev);
31169714 4106
17250b71
CW
4107 list_for_each_entry_safe(obj, next,
4108 &dev_priv->mm.inactive_list,
4109 mm_list) {
4110 if (i915_gem_object_is_purgeable(obj)) {
2021746e
CW
4111 if (i915_gem_object_unbind(obj) == 0 &&
4112 --nr_to_scan == 0)
17250b71 4113 break;
31169714 4114 }
31169714
CW
4115 }
4116
4117 /* second pass, evict/count anything still on the inactive list */
17250b71
CW
4118 cnt = 0;
4119 list_for_each_entry_safe(obj, next,
4120 &dev_priv->mm.inactive_list,
4121 mm_list) {
2021746e
CW
4122 if (nr_to_scan &&
4123 i915_gem_object_unbind(obj) == 0)
17250b71 4124 nr_to_scan--;
2021746e 4125 else
17250b71
CW
4126 cnt++;
4127 }
4128
4129 if (nr_to_scan && i915_gpu_is_active(dev)) {
1637ef41
CW
4130 /*
4131 * We are desperate for pages, so as a last resort, wait
4132 * for the GPU to finish and discard whatever we can.
4133 * This has a dramatic impact to reduce the number of
4134 * OOM-killer events whilst running the GPU aggressively.
4135 */
b93f9cf1 4136 if (i915_gpu_idle(dev, true) == 0)
1637ef41
CW
4137 goto rescan;
4138 }
17250b71
CW
4139 mutex_unlock(&dev->struct_mutex);
4140 return cnt / 100 * sysctl_vfs_cache_pressure;
31169714 4141}